1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/jhash.h> 24 #include <linux/rculist.h> 25 #include <net/net_namespace.h> 26 #include <net/sock.h> 27 #include <net/netlink.h> 28 #include <net/pkt_sched.h> 29 #include <net/pkt_cls.h> 30 #include <net/tc_act/tc_pedit.h> 31 #include <net/tc_act/tc_mirred.h> 32 #include <net/tc_act/tc_vlan.h> 33 #include <net/tc_act/tc_tunnel_key.h> 34 #include <net/tc_act/tc_csum.h> 35 #include <net/tc_act/tc_gact.h> 36 #include <net/tc_act/tc_police.h> 37 #include <net/tc_act/tc_sample.h> 38 #include <net/tc_act/tc_skbedit.h> 39 #include <net/tc_act/tc_ct.h> 40 #include <net/tc_act/tc_mpls.h> 41 #include <net/tc_act/tc_gate.h> 42 #include <net/flow_offload.h> 43 44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 45 46 /* The list of all installed classifier types */ 47 static LIST_HEAD(tcf_proto_base); 48 49 /* Protects list of registered TC modules. It is pure SMP lock. */ 50 static DEFINE_RWLOCK(cls_mod_lock); 51 52 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 53 { 54 return jhash_3words(tp->chain->index, tp->prio, 55 (__force __u32)tp->protocol, 0); 56 } 57 58 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 59 struct tcf_proto *tp) 60 { 61 struct tcf_block *block = chain->block; 62 63 mutex_lock(&block->proto_destroy_lock); 64 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 65 destroy_obj_hashfn(tp)); 66 mutex_unlock(&block->proto_destroy_lock); 67 } 68 69 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 70 const struct tcf_proto *tp2) 71 { 72 return tp1->chain->index == tp2->chain->index && 73 tp1->prio == tp2->prio && 74 tp1->protocol == tp2->protocol; 75 } 76 77 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 78 struct tcf_proto *tp) 79 { 80 u32 hash = destroy_obj_hashfn(tp); 81 struct tcf_proto *iter; 82 bool found = false; 83 84 rcu_read_lock(); 85 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 86 destroy_ht_node, hash) { 87 if (tcf_proto_cmp(tp, iter)) { 88 found = true; 89 break; 90 } 91 } 92 rcu_read_unlock(); 93 94 return found; 95 } 96 97 static void 98 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 99 { 100 struct tcf_block *block = chain->block; 101 102 mutex_lock(&block->proto_destroy_lock); 103 if (hash_hashed(&tp->destroy_ht_node)) 104 hash_del_rcu(&tp->destroy_ht_node); 105 mutex_unlock(&block->proto_destroy_lock); 106 } 107 108 /* Find classifier type by string name */ 109 110 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 111 { 112 const struct tcf_proto_ops *t, *res = NULL; 113 114 if (kind) { 115 read_lock(&cls_mod_lock); 116 list_for_each_entry(t, &tcf_proto_base, head) { 117 if (strcmp(kind, t->kind) == 0) { 118 if (try_module_get(t->owner)) 119 res = t; 120 break; 121 } 122 } 123 read_unlock(&cls_mod_lock); 124 } 125 return res; 126 } 127 128 static const struct tcf_proto_ops * 129 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 130 struct netlink_ext_ack *extack) 131 { 132 const struct tcf_proto_ops *ops; 133 134 ops = __tcf_proto_lookup_ops(kind); 135 if (ops) 136 return ops; 137 #ifdef CONFIG_MODULES 138 if (rtnl_held) 139 rtnl_unlock(); 140 request_module("cls_%s", kind); 141 if (rtnl_held) 142 rtnl_lock(); 143 ops = __tcf_proto_lookup_ops(kind); 144 /* We dropped the RTNL semaphore in order to perform 145 * the module load. So, even if we succeeded in loading 146 * the module we have to replay the request. We indicate 147 * this using -EAGAIN. 148 */ 149 if (ops) { 150 module_put(ops->owner); 151 return ERR_PTR(-EAGAIN); 152 } 153 #endif 154 NL_SET_ERR_MSG(extack, "TC classifier not found"); 155 return ERR_PTR(-ENOENT); 156 } 157 158 /* Register(unregister) new classifier type */ 159 160 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 161 { 162 struct tcf_proto_ops *t; 163 int rc = -EEXIST; 164 165 write_lock(&cls_mod_lock); 166 list_for_each_entry(t, &tcf_proto_base, head) 167 if (!strcmp(ops->kind, t->kind)) 168 goto out; 169 170 list_add_tail(&ops->head, &tcf_proto_base); 171 rc = 0; 172 out: 173 write_unlock(&cls_mod_lock); 174 return rc; 175 } 176 EXPORT_SYMBOL(register_tcf_proto_ops); 177 178 static struct workqueue_struct *tc_filter_wq; 179 180 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 181 { 182 struct tcf_proto_ops *t; 183 int rc = -ENOENT; 184 185 /* Wait for outstanding call_rcu()s, if any, from a 186 * tcf_proto_ops's destroy() handler. 187 */ 188 rcu_barrier(); 189 flush_workqueue(tc_filter_wq); 190 191 write_lock(&cls_mod_lock); 192 list_for_each_entry(t, &tcf_proto_base, head) { 193 if (t == ops) { 194 list_del(&t->head); 195 rc = 0; 196 break; 197 } 198 } 199 write_unlock(&cls_mod_lock); 200 return rc; 201 } 202 EXPORT_SYMBOL(unregister_tcf_proto_ops); 203 204 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 205 { 206 INIT_RCU_WORK(rwork, func); 207 return queue_rcu_work(tc_filter_wq, rwork); 208 } 209 EXPORT_SYMBOL(tcf_queue_work); 210 211 /* Select new prio value from the range, managed by kernel. */ 212 213 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 214 { 215 u32 first = TC_H_MAKE(0xC0000000U, 0U); 216 217 if (tp) 218 first = tp->prio - 1; 219 220 return TC_H_MAJ(first); 221 } 222 223 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 224 { 225 if (kind) 226 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ; 227 memset(name, 0, IFNAMSIZ); 228 return false; 229 } 230 231 static bool tcf_proto_is_unlocked(const char *kind) 232 { 233 const struct tcf_proto_ops *ops; 234 bool ret; 235 236 if (strlen(kind) == 0) 237 return false; 238 239 ops = tcf_proto_lookup_ops(kind, false, NULL); 240 /* On error return false to take rtnl lock. Proto lookup/create 241 * functions will perform lookup again and properly handle errors. 242 */ 243 if (IS_ERR(ops)) 244 return false; 245 246 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 247 module_put(ops->owner); 248 return ret; 249 } 250 251 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 252 u32 prio, struct tcf_chain *chain, 253 bool rtnl_held, 254 struct netlink_ext_ack *extack) 255 { 256 struct tcf_proto *tp; 257 int err; 258 259 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 260 if (!tp) 261 return ERR_PTR(-ENOBUFS); 262 263 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 264 if (IS_ERR(tp->ops)) { 265 err = PTR_ERR(tp->ops); 266 goto errout; 267 } 268 tp->classify = tp->ops->classify; 269 tp->protocol = protocol; 270 tp->prio = prio; 271 tp->chain = chain; 272 spin_lock_init(&tp->lock); 273 refcount_set(&tp->refcnt, 1); 274 275 err = tp->ops->init(tp); 276 if (err) { 277 module_put(tp->ops->owner); 278 goto errout; 279 } 280 return tp; 281 282 errout: 283 kfree(tp); 284 return ERR_PTR(err); 285 } 286 287 static void tcf_proto_get(struct tcf_proto *tp) 288 { 289 refcount_inc(&tp->refcnt); 290 } 291 292 static void tcf_chain_put(struct tcf_chain *chain); 293 294 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 295 bool sig_destroy, struct netlink_ext_ack *extack) 296 { 297 tp->ops->destroy(tp, rtnl_held, extack); 298 if (sig_destroy) 299 tcf_proto_signal_destroyed(tp->chain, tp); 300 tcf_chain_put(tp->chain); 301 module_put(tp->ops->owner); 302 kfree_rcu(tp, rcu); 303 } 304 305 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 306 struct netlink_ext_ack *extack) 307 { 308 if (refcount_dec_and_test(&tp->refcnt)) 309 tcf_proto_destroy(tp, rtnl_held, true, extack); 310 } 311 312 static bool tcf_proto_check_delete(struct tcf_proto *tp) 313 { 314 if (tp->ops->delete_empty) 315 return tp->ops->delete_empty(tp); 316 317 tp->deleting = true; 318 return tp->deleting; 319 } 320 321 static void tcf_proto_mark_delete(struct tcf_proto *tp) 322 { 323 spin_lock(&tp->lock); 324 tp->deleting = true; 325 spin_unlock(&tp->lock); 326 } 327 328 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 329 { 330 bool deleting; 331 332 spin_lock(&tp->lock); 333 deleting = tp->deleting; 334 spin_unlock(&tp->lock); 335 336 return deleting; 337 } 338 339 #define ASSERT_BLOCK_LOCKED(block) \ 340 lockdep_assert_held(&(block)->lock) 341 342 struct tcf_filter_chain_list_item { 343 struct list_head list; 344 tcf_chain_head_change_t *chain_head_change; 345 void *chain_head_change_priv; 346 }; 347 348 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 349 u32 chain_index) 350 { 351 struct tcf_chain *chain; 352 353 ASSERT_BLOCK_LOCKED(block); 354 355 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 356 if (!chain) 357 return NULL; 358 list_add_tail_rcu(&chain->list, &block->chain_list); 359 mutex_init(&chain->filter_chain_lock); 360 chain->block = block; 361 chain->index = chain_index; 362 chain->refcnt = 1; 363 if (!chain->index) 364 block->chain0.chain = chain; 365 return chain; 366 } 367 368 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 369 struct tcf_proto *tp_head) 370 { 371 if (item->chain_head_change) 372 item->chain_head_change(tp_head, item->chain_head_change_priv); 373 } 374 375 static void tcf_chain0_head_change(struct tcf_chain *chain, 376 struct tcf_proto *tp_head) 377 { 378 struct tcf_filter_chain_list_item *item; 379 struct tcf_block *block = chain->block; 380 381 if (chain->index) 382 return; 383 384 mutex_lock(&block->lock); 385 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 386 tcf_chain_head_change_item(item, tp_head); 387 mutex_unlock(&block->lock); 388 } 389 390 /* Returns true if block can be safely freed. */ 391 392 static bool tcf_chain_detach(struct tcf_chain *chain) 393 { 394 struct tcf_block *block = chain->block; 395 396 ASSERT_BLOCK_LOCKED(block); 397 398 list_del_rcu(&chain->list); 399 if (!chain->index) 400 block->chain0.chain = NULL; 401 402 if (list_empty(&block->chain_list) && 403 refcount_read(&block->refcnt) == 0) 404 return true; 405 406 return false; 407 } 408 409 static void tcf_block_destroy(struct tcf_block *block) 410 { 411 mutex_destroy(&block->lock); 412 mutex_destroy(&block->proto_destroy_lock); 413 kfree_rcu(block, rcu); 414 } 415 416 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 417 { 418 struct tcf_block *block = chain->block; 419 420 mutex_destroy(&chain->filter_chain_lock); 421 kfree_rcu(chain, rcu); 422 if (free_block) 423 tcf_block_destroy(block); 424 } 425 426 static void tcf_chain_hold(struct tcf_chain *chain) 427 { 428 ASSERT_BLOCK_LOCKED(chain->block); 429 430 ++chain->refcnt; 431 } 432 433 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 434 { 435 ASSERT_BLOCK_LOCKED(chain->block); 436 437 /* In case all the references are action references, this 438 * chain should not be shown to the user. 439 */ 440 return chain->refcnt == chain->action_refcnt; 441 } 442 443 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 444 u32 chain_index) 445 { 446 struct tcf_chain *chain; 447 448 ASSERT_BLOCK_LOCKED(block); 449 450 list_for_each_entry(chain, &block->chain_list, list) { 451 if (chain->index == chain_index) 452 return chain; 453 } 454 return NULL; 455 } 456 457 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 458 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 459 u32 chain_index) 460 { 461 struct tcf_chain *chain; 462 463 list_for_each_entry_rcu(chain, &block->chain_list, list) { 464 if (chain->index == chain_index) 465 return chain; 466 } 467 return NULL; 468 } 469 #endif 470 471 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 472 u32 seq, u16 flags, int event, bool unicast); 473 474 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 475 u32 chain_index, bool create, 476 bool by_act) 477 { 478 struct tcf_chain *chain = NULL; 479 bool is_first_reference; 480 481 mutex_lock(&block->lock); 482 chain = tcf_chain_lookup(block, chain_index); 483 if (chain) { 484 tcf_chain_hold(chain); 485 } else { 486 if (!create) 487 goto errout; 488 chain = tcf_chain_create(block, chain_index); 489 if (!chain) 490 goto errout; 491 } 492 493 if (by_act) 494 ++chain->action_refcnt; 495 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 496 mutex_unlock(&block->lock); 497 498 /* Send notification only in case we got the first 499 * non-action reference. Until then, the chain acts only as 500 * a placeholder for actions pointing to it and user ought 501 * not know about them. 502 */ 503 if (is_first_reference && !by_act) 504 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 505 RTM_NEWCHAIN, false); 506 507 return chain; 508 509 errout: 510 mutex_unlock(&block->lock); 511 return chain; 512 } 513 514 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 515 bool create) 516 { 517 return __tcf_chain_get(block, chain_index, create, false); 518 } 519 520 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 521 { 522 return __tcf_chain_get(block, chain_index, true, true); 523 } 524 EXPORT_SYMBOL(tcf_chain_get_by_act); 525 526 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 527 void *tmplt_priv); 528 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 529 void *tmplt_priv, u32 chain_index, 530 struct tcf_block *block, struct sk_buff *oskb, 531 u32 seq, u16 flags, bool unicast); 532 533 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 534 bool explicitly_created) 535 { 536 struct tcf_block *block = chain->block; 537 const struct tcf_proto_ops *tmplt_ops; 538 bool free_block = false; 539 unsigned int refcnt; 540 void *tmplt_priv; 541 542 mutex_lock(&block->lock); 543 if (explicitly_created) { 544 if (!chain->explicitly_created) { 545 mutex_unlock(&block->lock); 546 return; 547 } 548 chain->explicitly_created = false; 549 } 550 551 if (by_act) 552 chain->action_refcnt--; 553 554 /* tc_chain_notify_delete can't be called while holding block lock. 555 * However, when block is unlocked chain can be changed concurrently, so 556 * save these to temporary variables. 557 */ 558 refcnt = --chain->refcnt; 559 tmplt_ops = chain->tmplt_ops; 560 tmplt_priv = chain->tmplt_priv; 561 562 /* The last dropped non-action reference will trigger notification. */ 563 if (refcnt - chain->action_refcnt == 0 && !by_act) { 564 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, 565 block, NULL, 0, 0, false); 566 /* Last reference to chain, no need to lock. */ 567 chain->flushing = false; 568 } 569 570 if (refcnt == 0) 571 free_block = tcf_chain_detach(chain); 572 mutex_unlock(&block->lock); 573 574 if (refcnt == 0) { 575 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 576 tcf_chain_destroy(chain, free_block); 577 } 578 } 579 580 static void tcf_chain_put(struct tcf_chain *chain) 581 { 582 __tcf_chain_put(chain, false, false); 583 } 584 585 void tcf_chain_put_by_act(struct tcf_chain *chain) 586 { 587 __tcf_chain_put(chain, true, false); 588 } 589 EXPORT_SYMBOL(tcf_chain_put_by_act); 590 591 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 592 { 593 __tcf_chain_put(chain, false, true); 594 } 595 596 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 597 { 598 struct tcf_proto *tp, *tp_next; 599 600 mutex_lock(&chain->filter_chain_lock); 601 tp = tcf_chain_dereference(chain->filter_chain, chain); 602 while (tp) { 603 tp_next = rcu_dereference_protected(tp->next, 1); 604 tcf_proto_signal_destroying(chain, tp); 605 tp = tp_next; 606 } 607 tp = tcf_chain_dereference(chain->filter_chain, chain); 608 RCU_INIT_POINTER(chain->filter_chain, NULL); 609 tcf_chain0_head_change(chain, NULL); 610 chain->flushing = true; 611 mutex_unlock(&chain->filter_chain_lock); 612 613 while (tp) { 614 tp_next = rcu_dereference_protected(tp->next, 1); 615 tcf_proto_put(tp, rtnl_held, NULL); 616 tp = tp_next; 617 } 618 } 619 620 static int tcf_block_setup(struct tcf_block *block, 621 struct flow_block_offload *bo); 622 623 static void tcf_block_offload_init(struct flow_block_offload *bo, 624 struct net_device *dev, 625 enum flow_block_command command, 626 enum flow_block_binder_type binder_type, 627 struct flow_block *flow_block, 628 bool shared, struct netlink_ext_ack *extack) 629 { 630 bo->net = dev_net(dev); 631 bo->command = command; 632 bo->binder_type = binder_type; 633 bo->block = flow_block; 634 bo->block_shared = shared; 635 bo->extack = extack; 636 INIT_LIST_HEAD(&bo->cb_list); 637 } 638 639 static void tcf_block_unbind(struct tcf_block *block, 640 struct flow_block_offload *bo); 641 642 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) 643 { 644 struct tcf_block *block = block_cb->indr.data; 645 struct net_device *dev = block_cb->indr.dev; 646 struct netlink_ext_ack extack = {}; 647 struct flow_block_offload bo; 648 649 tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND, 650 block_cb->indr.binder_type, 651 &block->flow_block, tcf_block_shared(block), 652 &extack); 653 down_write(&block->cb_lock); 654 list_del(&block_cb->driver_list); 655 list_move(&block_cb->list, &bo.cb_list); 656 up_write(&block->cb_lock); 657 rtnl_lock(); 658 tcf_block_unbind(block, &bo); 659 rtnl_unlock(); 660 } 661 662 static bool tcf_block_offload_in_use(struct tcf_block *block) 663 { 664 return atomic_read(&block->offloadcnt); 665 } 666 667 static int tcf_block_offload_cmd(struct tcf_block *block, 668 struct net_device *dev, 669 struct tcf_block_ext_info *ei, 670 enum flow_block_command command, 671 struct netlink_ext_ack *extack) 672 { 673 struct flow_block_offload bo = {}; 674 675 tcf_block_offload_init(&bo, dev, command, ei->binder_type, 676 &block->flow_block, tcf_block_shared(block), 677 extack); 678 679 if (dev->netdev_ops->ndo_setup_tc) { 680 int err; 681 682 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 683 if (err < 0) { 684 if (err != -EOPNOTSUPP) 685 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 686 return err; 687 } 688 689 return tcf_block_setup(block, &bo); 690 } 691 692 flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo, 693 tc_block_indr_cleanup); 694 tcf_block_setup(block, &bo); 695 696 return -EOPNOTSUPP; 697 } 698 699 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 700 struct tcf_block_ext_info *ei, 701 struct netlink_ext_ack *extack) 702 { 703 struct net_device *dev = q->dev_queue->dev; 704 int err; 705 706 down_write(&block->cb_lock); 707 708 /* If tc offload feature is disabled and the block we try to bind 709 * to already has some offloaded filters, forbid to bind. 710 */ 711 if (dev->netdev_ops->ndo_setup_tc && 712 !tc_can_offload(dev) && 713 tcf_block_offload_in_use(block)) { 714 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 715 err = -EOPNOTSUPP; 716 goto err_unlock; 717 } 718 719 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); 720 if (err == -EOPNOTSUPP) 721 goto no_offload_dev_inc; 722 if (err) 723 goto err_unlock; 724 725 up_write(&block->cb_lock); 726 return 0; 727 728 no_offload_dev_inc: 729 if (tcf_block_offload_in_use(block)) 730 goto err_unlock; 731 732 err = 0; 733 block->nooffloaddevcnt++; 734 err_unlock: 735 up_write(&block->cb_lock); 736 return err; 737 } 738 739 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 740 struct tcf_block_ext_info *ei) 741 { 742 struct net_device *dev = q->dev_queue->dev; 743 int err; 744 745 down_write(&block->cb_lock); 746 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 747 if (err == -EOPNOTSUPP) 748 goto no_offload_dev_dec; 749 up_write(&block->cb_lock); 750 return; 751 752 no_offload_dev_dec: 753 WARN_ON(block->nooffloaddevcnt-- == 0); 754 up_write(&block->cb_lock); 755 } 756 757 static int 758 tcf_chain0_head_change_cb_add(struct tcf_block *block, 759 struct tcf_block_ext_info *ei, 760 struct netlink_ext_ack *extack) 761 { 762 struct tcf_filter_chain_list_item *item; 763 struct tcf_chain *chain0; 764 765 item = kmalloc(sizeof(*item), GFP_KERNEL); 766 if (!item) { 767 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 768 return -ENOMEM; 769 } 770 item->chain_head_change = ei->chain_head_change; 771 item->chain_head_change_priv = ei->chain_head_change_priv; 772 773 mutex_lock(&block->lock); 774 chain0 = block->chain0.chain; 775 if (chain0) 776 tcf_chain_hold(chain0); 777 else 778 list_add(&item->list, &block->chain0.filter_chain_list); 779 mutex_unlock(&block->lock); 780 781 if (chain0) { 782 struct tcf_proto *tp_head; 783 784 mutex_lock(&chain0->filter_chain_lock); 785 786 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 787 if (tp_head) 788 tcf_chain_head_change_item(item, tp_head); 789 790 mutex_lock(&block->lock); 791 list_add(&item->list, &block->chain0.filter_chain_list); 792 mutex_unlock(&block->lock); 793 794 mutex_unlock(&chain0->filter_chain_lock); 795 tcf_chain_put(chain0); 796 } 797 798 return 0; 799 } 800 801 static void 802 tcf_chain0_head_change_cb_del(struct tcf_block *block, 803 struct tcf_block_ext_info *ei) 804 { 805 struct tcf_filter_chain_list_item *item; 806 807 mutex_lock(&block->lock); 808 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 809 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 810 (item->chain_head_change == ei->chain_head_change && 811 item->chain_head_change_priv == ei->chain_head_change_priv)) { 812 if (block->chain0.chain) 813 tcf_chain_head_change_item(item, NULL); 814 list_del(&item->list); 815 mutex_unlock(&block->lock); 816 817 kfree(item); 818 return; 819 } 820 } 821 mutex_unlock(&block->lock); 822 WARN_ON(1); 823 } 824 825 struct tcf_net { 826 spinlock_t idr_lock; /* Protects idr */ 827 struct idr idr; 828 }; 829 830 static unsigned int tcf_net_id; 831 832 static int tcf_block_insert(struct tcf_block *block, struct net *net, 833 struct netlink_ext_ack *extack) 834 { 835 struct tcf_net *tn = net_generic(net, tcf_net_id); 836 int err; 837 838 idr_preload(GFP_KERNEL); 839 spin_lock(&tn->idr_lock); 840 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 841 GFP_NOWAIT); 842 spin_unlock(&tn->idr_lock); 843 idr_preload_end(); 844 845 return err; 846 } 847 848 static void tcf_block_remove(struct tcf_block *block, struct net *net) 849 { 850 struct tcf_net *tn = net_generic(net, tcf_net_id); 851 852 spin_lock(&tn->idr_lock); 853 idr_remove(&tn->idr, block->index); 854 spin_unlock(&tn->idr_lock); 855 } 856 857 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 858 u32 block_index, 859 struct netlink_ext_ack *extack) 860 { 861 struct tcf_block *block; 862 863 block = kzalloc(sizeof(*block), GFP_KERNEL); 864 if (!block) { 865 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 866 return ERR_PTR(-ENOMEM); 867 } 868 mutex_init(&block->lock); 869 mutex_init(&block->proto_destroy_lock); 870 init_rwsem(&block->cb_lock); 871 flow_block_init(&block->flow_block); 872 INIT_LIST_HEAD(&block->chain_list); 873 INIT_LIST_HEAD(&block->owner_list); 874 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 875 876 refcount_set(&block->refcnt, 1); 877 block->net = net; 878 block->index = block_index; 879 880 /* Don't store q pointer for blocks which are shared */ 881 if (!tcf_block_shared(block)) 882 block->q = q; 883 return block; 884 } 885 886 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 887 { 888 struct tcf_net *tn = net_generic(net, tcf_net_id); 889 890 return idr_find(&tn->idr, block_index); 891 } 892 893 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 894 { 895 struct tcf_block *block; 896 897 rcu_read_lock(); 898 block = tcf_block_lookup(net, block_index); 899 if (block && !refcount_inc_not_zero(&block->refcnt)) 900 block = NULL; 901 rcu_read_unlock(); 902 903 return block; 904 } 905 906 static struct tcf_chain * 907 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 908 { 909 mutex_lock(&block->lock); 910 if (chain) 911 chain = list_is_last(&chain->list, &block->chain_list) ? 912 NULL : list_next_entry(chain, list); 913 else 914 chain = list_first_entry_or_null(&block->chain_list, 915 struct tcf_chain, list); 916 917 /* skip all action-only chains */ 918 while (chain && tcf_chain_held_by_acts_only(chain)) 919 chain = list_is_last(&chain->list, &block->chain_list) ? 920 NULL : list_next_entry(chain, list); 921 922 if (chain) 923 tcf_chain_hold(chain); 924 mutex_unlock(&block->lock); 925 926 return chain; 927 } 928 929 /* Function to be used by all clients that want to iterate over all chains on 930 * block. It properly obtains block->lock and takes reference to chain before 931 * returning it. Users of this function must be tolerant to concurrent chain 932 * insertion/deletion or ensure that no concurrent chain modification is 933 * possible. Note that all netlink dump callbacks cannot guarantee to provide 934 * consistent dump because rtnl lock is released each time skb is filled with 935 * data and sent to user-space. 936 */ 937 938 struct tcf_chain * 939 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 940 { 941 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 942 943 if (chain) 944 tcf_chain_put(chain); 945 946 return chain_next; 947 } 948 EXPORT_SYMBOL(tcf_get_next_chain); 949 950 static struct tcf_proto * 951 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 952 { 953 u32 prio = 0; 954 955 ASSERT_RTNL(); 956 mutex_lock(&chain->filter_chain_lock); 957 958 if (!tp) { 959 tp = tcf_chain_dereference(chain->filter_chain, chain); 960 } else if (tcf_proto_is_deleting(tp)) { 961 /* 'deleting' flag is set and chain->filter_chain_lock was 962 * unlocked, which means next pointer could be invalid. Restart 963 * search. 964 */ 965 prio = tp->prio + 1; 966 tp = tcf_chain_dereference(chain->filter_chain, chain); 967 968 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 969 if (!tp->deleting && tp->prio >= prio) 970 break; 971 } else { 972 tp = tcf_chain_dereference(tp->next, chain); 973 } 974 975 if (tp) 976 tcf_proto_get(tp); 977 978 mutex_unlock(&chain->filter_chain_lock); 979 980 return tp; 981 } 982 983 /* Function to be used by all clients that want to iterate over all tp's on 984 * chain. Users of this function must be tolerant to concurrent tp 985 * insertion/deletion or ensure that no concurrent chain modification is 986 * possible. Note that all netlink dump callbacks cannot guarantee to provide 987 * consistent dump because rtnl lock is released each time skb is filled with 988 * data and sent to user-space. 989 */ 990 991 struct tcf_proto * 992 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp, 993 bool rtnl_held) 994 { 995 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 996 997 if (tp) 998 tcf_proto_put(tp, rtnl_held, NULL); 999 1000 return tp_next; 1001 } 1002 EXPORT_SYMBOL(tcf_get_next_proto); 1003 1004 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1005 { 1006 struct tcf_chain *chain; 1007 1008 /* Last reference to block. At this point chains cannot be added or 1009 * removed concurrently. 1010 */ 1011 for (chain = tcf_get_next_chain(block, NULL); 1012 chain; 1013 chain = tcf_get_next_chain(block, chain)) { 1014 tcf_chain_put_explicitly_created(chain); 1015 tcf_chain_flush(chain, rtnl_held); 1016 } 1017 } 1018 1019 /* Lookup Qdisc and increments its reference counter. 1020 * Set parent, if necessary. 1021 */ 1022 1023 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1024 u32 *parent, int ifindex, bool rtnl_held, 1025 struct netlink_ext_ack *extack) 1026 { 1027 const struct Qdisc_class_ops *cops; 1028 struct net_device *dev; 1029 int err = 0; 1030 1031 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1032 return 0; 1033 1034 rcu_read_lock(); 1035 1036 /* Find link */ 1037 dev = dev_get_by_index_rcu(net, ifindex); 1038 if (!dev) { 1039 rcu_read_unlock(); 1040 return -ENODEV; 1041 } 1042 1043 /* Find qdisc */ 1044 if (!*parent) { 1045 *q = dev->qdisc; 1046 *parent = (*q)->handle; 1047 } else { 1048 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1049 if (!*q) { 1050 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1051 err = -EINVAL; 1052 goto errout_rcu; 1053 } 1054 } 1055 1056 *q = qdisc_refcount_inc_nz(*q); 1057 if (!*q) { 1058 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1059 err = -EINVAL; 1060 goto errout_rcu; 1061 } 1062 1063 /* Is it classful? */ 1064 cops = (*q)->ops->cl_ops; 1065 if (!cops) { 1066 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1067 err = -EINVAL; 1068 goto errout_qdisc; 1069 } 1070 1071 if (!cops->tcf_block) { 1072 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1073 err = -EOPNOTSUPP; 1074 goto errout_qdisc; 1075 } 1076 1077 errout_rcu: 1078 /* At this point we know that qdisc is not noop_qdisc, 1079 * which means that qdisc holds a reference to net_device 1080 * and we hold a reference to qdisc, so it is safe to release 1081 * rcu read lock. 1082 */ 1083 rcu_read_unlock(); 1084 return err; 1085 1086 errout_qdisc: 1087 rcu_read_unlock(); 1088 1089 if (rtnl_held) 1090 qdisc_put(*q); 1091 else 1092 qdisc_put_unlocked(*q); 1093 *q = NULL; 1094 1095 return err; 1096 } 1097 1098 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1099 int ifindex, struct netlink_ext_ack *extack) 1100 { 1101 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1102 return 0; 1103 1104 /* Do we search for filter, attached to class? */ 1105 if (TC_H_MIN(parent)) { 1106 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1107 1108 *cl = cops->find(q, parent); 1109 if (*cl == 0) { 1110 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1111 return -ENOENT; 1112 } 1113 } 1114 1115 return 0; 1116 } 1117 1118 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1119 unsigned long cl, int ifindex, 1120 u32 block_index, 1121 struct netlink_ext_ack *extack) 1122 { 1123 struct tcf_block *block; 1124 1125 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1126 block = tcf_block_refcnt_get(net, block_index); 1127 if (!block) { 1128 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1129 return ERR_PTR(-EINVAL); 1130 } 1131 } else { 1132 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1133 1134 block = cops->tcf_block(q, cl, extack); 1135 if (!block) 1136 return ERR_PTR(-EINVAL); 1137 1138 if (tcf_block_shared(block)) { 1139 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1140 return ERR_PTR(-EOPNOTSUPP); 1141 } 1142 1143 /* Always take reference to block in order to support execution 1144 * of rules update path of cls API without rtnl lock. Caller 1145 * must release block when it is finished using it. 'if' block 1146 * of this conditional obtain reference to block by calling 1147 * tcf_block_refcnt_get(). 1148 */ 1149 refcount_inc(&block->refcnt); 1150 } 1151 1152 return block; 1153 } 1154 1155 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1156 struct tcf_block_ext_info *ei, bool rtnl_held) 1157 { 1158 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1159 /* Flushing/putting all chains will cause the block to be 1160 * deallocated when last chain is freed. However, if chain_list 1161 * is empty, block has to be manually deallocated. After block 1162 * reference counter reached 0, it is no longer possible to 1163 * increment it or add new chains to block. 1164 */ 1165 bool free_block = list_empty(&block->chain_list); 1166 1167 mutex_unlock(&block->lock); 1168 if (tcf_block_shared(block)) 1169 tcf_block_remove(block, block->net); 1170 1171 if (q) 1172 tcf_block_offload_unbind(block, q, ei); 1173 1174 if (free_block) 1175 tcf_block_destroy(block); 1176 else 1177 tcf_block_flush_all_chains(block, rtnl_held); 1178 } else if (q) { 1179 tcf_block_offload_unbind(block, q, ei); 1180 } 1181 } 1182 1183 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1184 { 1185 __tcf_block_put(block, NULL, NULL, rtnl_held); 1186 } 1187 1188 /* Find tcf block. 1189 * Set q, parent, cl when appropriate. 1190 */ 1191 1192 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1193 u32 *parent, unsigned long *cl, 1194 int ifindex, u32 block_index, 1195 struct netlink_ext_ack *extack) 1196 { 1197 struct tcf_block *block; 1198 int err = 0; 1199 1200 ASSERT_RTNL(); 1201 1202 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1203 if (err) 1204 goto errout; 1205 1206 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1207 if (err) 1208 goto errout_qdisc; 1209 1210 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1211 if (IS_ERR(block)) { 1212 err = PTR_ERR(block); 1213 goto errout_qdisc; 1214 } 1215 1216 return block; 1217 1218 errout_qdisc: 1219 if (*q) 1220 qdisc_put(*q); 1221 errout: 1222 *q = NULL; 1223 return ERR_PTR(err); 1224 } 1225 1226 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1227 bool rtnl_held) 1228 { 1229 if (!IS_ERR_OR_NULL(block)) 1230 tcf_block_refcnt_put(block, rtnl_held); 1231 1232 if (q) { 1233 if (rtnl_held) 1234 qdisc_put(q); 1235 else 1236 qdisc_put_unlocked(q); 1237 } 1238 } 1239 1240 struct tcf_block_owner_item { 1241 struct list_head list; 1242 struct Qdisc *q; 1243 enum flow_block_binder_type binder_type; 1244 }; 1245 1246 static void 1247 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1248 struct Qdisc *q, 1249 enum flow_block_binder_type binder_type) 1250 { 1251 if (block->keep_dst && 1252 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1253 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1254 netif_keep_dst(qdisc_dev(q)); 1255 } 1256 1257 void tcf_block_netif_keep_dst(struct tcf_block *block) 1258 { 1259 struct tcf_block_owner_item *item; 1260 1261 block->keep_dst = true; 1262 list_for_each_entry(item, &block->owner_list, list) 1263 tcf_block_owner_netif_keep_dst(block, item->q, 1264 item->binder_type); 1265 } 1266 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1267 1268 static int tcf_block_owner_add(struct tcf_block *block, 1269 struct Qdisc *q, 1270 enum flow_block_binder_type binder_type) 1271 { 1272 struct tcf_block_owner_item *item; 1273 1274 item = kmalloc(sizeof(*item), GFP_KERNEL); 1275 if (!item) 1276 return -ENOMEM; 1277 item->q = q; 1278 item->binder_type = binder_type; 1279 list_add(&item->list, &block->owner_list); 1280 return 0; 1281 } 1282 1283 static void tcf_block_owner_del(struct tcf_block *block, 1284 struct Qdisc *q, 1285 enum flow_block_binder_type binder_type) 1286 { 1287 struct tcf_block_owner_item *item; 1288 1289 list_for_each_entry(item, &block->owner_list, list) { 1290 if (item->q == q && item->binder_type == binder_type) { 1291 list_del(&item->list); 1292 kfree(item); 1293 return; 1294 } 1295 } 1296 WARN_ON(1); 1297 } 1298 1299 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1300 struct tcf_block_ext_info *ei, 1301 struct netlink_ext_ack *extack) 1302 { 1303 struct net *net = qdisc_net(q); 1304 struct tcf_block *block = NULL; 1305 int err; 1306 1307 if (ei->block_index) 1308 /* block_index not 0 means the shared block is requested */ 1309 block = tcf_block_refcnt_get(net, ei->block_index); 1310 1311 if (!block) { 1312 block = tcf_block_create(net, q, ei->block_index, extack); 1313 if (IS_ERR(block)) 1314 return PTR_ERR(block); 1315 if (tcf_block_shared(block)) { 1316 err = tcf_block_insert(block, net, extack); 1317 if (err) 1318 goto err_block_insert; 1319 } 1320 } 1321 1322 err = tcf_block_owner_add(block, q, ei->binder_type); 1323 if (err) 1324 goto err_block_owner_add; 1325 1326 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1327 1328 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1329 if (err) 1330 goto err_chain0_head_change_cb_add; 1331 1332 err = tcf_block_offload_bind(block, q, ei, extack); 1333 if (err) 1334 goto err_block_offload_bind; 1335 1336 *p_block = block; 1337 return 0; 1338 1339 err_block_offload_bind: 1340 tcf_chain0_head_change_cb_del(block, ei); 1341 err_chain0_head_change_cb_add: 1342 tcf_block_owner_del(block, q, ei->binder_type); 1343 err_block_owner_add: 1344 err_block_insert: 1345 tcf_block_refcnt_put(block, true); 1346 return err; 1347 } 1348 EXPORT_SYMBOL(tcf_block_get_ext); 1349 1350 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1351 { 1352 struct tcf_proto __rcu **p_filter_chain = priv; 1353 1354 rcu_assign_pointer(*p_filter_chain, tp_head); 1355 } 1356 1357 int tcf_block_get(struct tcf_block **p_block, 1358 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1359 struct netlink_ext_ack *extack) 1360 { 1361 struct tcf_block_ext_info ei = { 1362 .chain_head_change = tcf_chain_head_change_dflt, 1363 .chain_head_change_priv = p_filter_chain, 1364 }; 1365 1366 WARN_ON(!p_filter_chain); 1367 return tcf_block_get_ext(p_block, q, &ei, extack); 1368 } 1369 EXPORT_SYMBOL(tcf_block_get); 1370 1371 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1372 * actions should be all removed after flushing. 1373 */ 1374 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1375 struct tcf_block_ext_info *ei) 1376 { 1377 if (!block) 1378 return; 1379 tcf_chain0_head_change_cb_del(block, ei); 1380 tcf_block_owner_del(block, q, ei->binder_type); 1381 1382 __tcf_block_put(block, q, ei, true); 1383 } 1384 EXPORT_SYMBOL(tcf_block_put_ext); 1385 1386 void tcf_block_put(struct tcf_block *block) 1387 { 1388 struct tcf_block_ext_info ei = {0, }; 1389 1390 if (!block) 1391 return; 1392 tcf_block_put_ext(block, block->q, &ei); 1393 } 1394 1395 EXPORT_SYMBOL(tcf_block_put); 1396 1397 static int 1398 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1399 void *cb_priv, bool add, bool offload_in_use, 1400 struct netlink_ext_ack *extack) 1401 { 1402 struct tcf_chain *chain, *chain_prev; 1403 struct tcf_proto *tp, *tp_prev; 1404 int err; 1405 1406 lockdep_assert_held(&block->cb_lock); 1407 1408 for (chain = __tcf_get_next_chain(block, NULL); 1409 chain; 1410 chain_prev = chain, 1411 chain = __tcf_get_next_chain(block, chain), 1412 tcf_chain_put(chain_prev)) { 1413 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1414 tp_prev = tp, 1415 tp = __tcf_get_next_proto(chain, tp), 1416 tcf_proto_put(tp_prev, true, NULL)) { 1417 if (tp->ops->reoffload) { 1418 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1419 extack); 1420 if (err && add) 1421 goto err_playback_remove; 1422 } else if (add && offload_in_use) { 1423 err = -EOPNOTSUPP; 1424 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1425 goto err_playback_remove; 1426 } 1427 } 1428 } 1429 1430 return 0; 1431 1432 err_playback_remove: 1433 tcf_proto_put(tp, true, NULL); 1434 tcf_chain_put(chain); 1435 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1436 extack); 1437 return err; 1438 } 1439 1440 static int tcf_block_bind(struct tcf_block *block, 1441 struct flow_block_offload *bo) 1442 { 1443 struct flow_block_cb *block_cb, *next; 1444 int err, i = 0; 1445 1446 lockdep_assert_held(&block->cb_lock); 1447 1448 list_for_each_entry(block_cb, &bo->cb_list, list) { 1449 err = tcf_block_playback_offloads(block, block_cb->cb, 1450 block_cb->cb_priv, true, 1451 tcf_block_offload_in_use(block), 1452 bo->extack); 1453 if (err) 1454 goto err_unroll; 1455 if (!bo->unlocked_driver_cb) 1456 block->lockeddevcnt++; 1457 1458 i++; 1459 } 1460 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1461 1462 return 0; 1463 1464 err_unroll: 1465 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1466 if (i-- > 0) { 1467 list_del(&block_cb->list); 1468 tcf_block_playback_offloads(block, block_cb->cb, 1469 block_cb->cb_priv, false, 1470 tcf_block_offload_in_use(block), 1471 NULL); 1472 if (!bo->unlocked_driver_cb) 1473 block->lockeddevcnt--; 1474 } 1475 flow_block_cb_free(block_cb); 1476 } 1477 1478 return err; 1479 } 1480 1481 static void tcf_block_unbind(struct tcf_block *block, 1482 struct flow_block_offload *bo) 1483 { 1484 struct flow_block_cb *block_cb, *next; 1485 1486 lockdep_assert_held(&block->cb_lock); 1487 1488 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1489 tcf_block_playback_offloads(block, block_cb->cb, 1490 block_cb->cb_priv, false, 1491 tcf_block_offload_in_use(block), 1492 NULL); 1493 list_del(&block_cb->list); 1494 flow_block_cb_free(block_cb); 1495 if (!bo->unlocked_driver_cb) 1496 block->lockeddevcnt--; 1497 } 1498 } 1499 1500 static int tcf_block_setup(struct tcf_block *block, 1501 struct flow_block_offload *bo) 1502 { 1503 int err; 1504 1505 switch (bo->command) { 1506 case FLOW_BLOCK_BIND: 1507 err = tcf_block_bind(block, bo); 1508 break; 1509 case FLOW_BLOCK_UNBIND: 1510 err = 0; 1511 tcf_block_unbind(block, bo); 1512 break; 1513 default: 1514 WARN_ON_ONCE(1); 1515 err = -EOPNOTSUPP; 1516 } 1517 1518 return err; 1519 } 1520 1521 /* Main classifier routine: scans classifier chain attached 1522 * to this qdisc, (optionally) tests for protocol and asks 1523 * specific classifiers. 1524 */ 1525 static inline int __tcf_classify(struct sk_buff *skb, 1526 const struct tcf_proto *tp, 1527 const struct tcf_proto *orig_tp, 1528 struct tcf_result *res, 1529 bool compat_mode, 1530 u32 *last_executed_chain) 1531 { 1532 #ifdef CONFIG_NET_CLS_ACT 1533 const int max_reclassify_loop = 4; 1534 const struct tcf_proto *first_tp; 1535 int limit = 0; 1536 1537 reclassify: 1538 #endif 1539 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1540 __be16 protocol = skb_protocol(skb, false); 1541 int err; 1542 1543 if (tp->protocol != protocol && 1544 tp->protocol != htons(ETH_P_ALL)) 1545 continue; 1546 1547 err = tp->classify(skb, tp, res); 1548 #ifdef CONFIG_NET_CLS_ACT 1549 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1550 first_tp = orig_tp; 1551 *last_executed_chain = first_tp->chain->index; 1552 goto reset; 1553 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1554 first_tp = res->goto_tp; 1555 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1556 goto reset; 1557 } 1558 #endif 1559 if (err >= 0) 1560 return err; 1561 } 1562 1563 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1564 #ifdef CONFIG_NET_CLS_ACT 1565 reset: 1566 if (unlikely(limit++ >= max_reclassify_loop)) { 1567 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1568 tp->chain->block->index, 1569 tp->prio & 0xffff, 1570 ntohs(tp->protocol)); 1571 return TC_ACT_SHOT; 1572 } 1573 1574 tp = first_tp; 1575 goto reclassify; 1576 #endif 1577 } 1578 1579 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1580 struct tcf_result *res, bool compat_mode) 1581 { 1582 u32 last_executed_chain = 0; 1583 1584 return __tcf_classify(skb, tp, tp, res, compat_mode, 1585 &last_executed_chain); 1586 } 1587 EXPORT_SYMBOL(tcf_classify); 1588 1589 int tcf_classify_ingress(struct sk_buff *skb, 1590 const struct tcf_block *ingress_block, 1591 const struct tcf_proto *tp, 1592 struct tcf_result *res, bool compat_mode) 1593 { 1594 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1595 u32 last_executed_chain = 0; 1596 1597 return __tcf_classify(skb, tp, tp, res, compat_mode, 1598 &last_executed_chain); 1599 #else 1600 u32 last_executed_chain = tp ? tp->chain->index : 0; 1601 const struct tcf_proto *orig_tp = tp; 1602 struct tc_skb_ext *ext; 1603 int ret; 1604 1605 ext = skb_ext_find(skb, TC_SKB_EXT); 1606 1607 if (ext && ext->chain) { 1608 struct tcf_chain *fchain; 1609 1610 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain); 1611 if (!fchain) 1612 return TC_ACT_SHOT; 1613 1614 /* Consume, so cloned/redirect skbs won't inherit ext */ 1615 skb_ext_del(skb, TC_SKB_EXT); 1616 1617 tp = rcu_dereference_bh(fchain->filter_chain); 1618 last_executed_chain = fchain->index; 1619 } 1620 1621 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, 1622 &last_executed_chain); 1623 1624 /* If we missed on some chain */ 1625 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1626 ext = skb_ext_add(skb, TC_SKB_EXT); 1627 if (WARN_ON_ONCE(!ext)) 1628 return TC_ACT_SHOT; 1629 ext->chain = last_executed_chain; 1630 } 1631 1632 return ret; 1633 #endif 1634 } 1635 EXPORT_SYMBOL(tcf_classify_ingress); 1636 1637 struct tcf_chain_info { 1638 struct tcf_proto __rcu **pprev; 1639 struct tcf_proto __rcu *next; 1640 }; 1641 1642 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1643 struct tcf_chain_info *chain_info) 1644 { 1645 return tcf_chain_dereference(*chain_info->pprev, chain); 1646 } 1647 1648 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1649 struct tcf_chain_info *chain_info, 1650 struct tcf_proto *tp) 1651 { 1652 if (chain->flushing) 1653 return -EAGAIN; 1654 1655 if (*chain_info->pprev == chain->filter_chain) 1656 tcf_chain0_head_change(chain, tp); 1657 tcf_proto_get(tp); 1658 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1659 rcu_assign_pointer(*chain_info->pprev, tp); 1660 1661 return 0; 1662 } 1663 1664 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1665 struct tcf_chain_info *chain_info, 1666 struct tcf_proto *tp) 1667 { 1668 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1669 1670 tcf_proto_mark_delete(tp); 1671 if (tp == chain->filter_chain) 1672 tcf_chain0_head_change(chain, next); 1673 RCU_INIT_POINTER(*chain_info->pprev, next); 1674 } 1675 1676 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1677 struct tcf_chain_info *chain_info, 1678 u32 protocol, u32 prio, 1679 bool prio_allocate); 1680 1681 /* Try to insert new proto. 1682 * If proto with specified priority already exists, free new proto 1683 * and return existing one. 1684 */ 1685 1686 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1687 struct tcf_proto *tp_new, 1688 u32 protocol, u32 prio, 1689 bool rtnl_held) 1690 { 1691 struct tcf_chain_info chain_info; 1692 struct tcf_proto *tp; 1693 int err = 0; 1694 1695 mutex_lock(&chain->filter_chain_lock); 1696 1697 if (tcf_proto_exists_destroying(chain, tp_new)) { 1698 mutex_unlock(&chain->filter_chain_lock); 1699 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1700 return ERR_PTR(-EAGAIN); 1701 } 1702 1703 tp = tcf_chain_tp_find(chain, &chain_info, 1704 protocol, prio, false); 1705 if (!tp) 1706 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1707 mutex_unlock(&chain->filter_chain_lock); 1708 1709 if (tp) { 1710 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1711 tp_new = tp; 1712 } else if (err) { 1713 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1714 tp_new = ERR_PTR(err); 1715 } 1716 1717 return tp_new; 1718 } 1719 1720 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1721 struct tcf_proto *tp, bool rtnl_held, 1722 struct netlink_ext_ack *extack) 1723 { 1724 struct tcf_chain_info chain_info; 1725 struct tcf_proto *tp_iter; 1726 struct tcf_proto **pprev; 1727 struct tcf_proto *next; 1728 1729 mutex_lock(&chain->filter_chain_lock); 1730 1731 /* Atomically find and remove tp from chain. */ 1732 for (pprev = &chain->filter_chain; 1733 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1734 pprev = &tp_iter->next) { 1735 if (tp_iter == tp) { 1736 chain_info.pprev = pprev; 1737 chain_info.next = tp_iter->next; 1738 WARN_ON(tp_iter->deleting); 1739 break; 1740 } 1741 } 1742 /* Verify that tp still exists and no new filters were inserted 1743 * concurrently. 1744 * Mark tp for deletion if it is empty. 1745 */ 1746 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1747 mutex_unlock(&chain->filter_chain_lock); 1748 return; 1749 } 1750 1751 tcf_proto_signal_destroying(chain, tp); 1752 next = tcf_chain_dereference(chain_info.next, chain); 1753 if (tp == chain->filter_chain) 1754 tcf_chain0_head_change(chain, next); 1755 RCU_INIT_POINTER(*chain_info.pprev, next); 1756 mutex_unlock(&chain->filter_chain_lock); 1757 1758 tcf_proto_put(tp, rtnl_held, extack); 1759 } 1760 1761 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1762 struct tcf_chain_info *chain_info, 1763 u32 protocol, u32 prio, 1764 bool prio_allocate) 1765 { 1766 struct tcf_proto **pprev; 1767 struct tcf_proto *tp; 1768 1769 /* Check the chain for existence of proto-tcf with this priority */ 1770 for (pprev = &chain->filter_chain; 1771 (tp = tcf_chain_dereference(*pprev, chain)); 1772 pprev = &tp->next) { 1773 if (tp->prio >= prio) { 1774 if (tp->prio == prio) { 1775 if (prio_allocate || 1776 (tp->protocol != protocol && protocol)) 1777 return ERR_PTR(-EINVAL); 1778 } else { 1779 tp = NULL; 1780 } 1781 break; 1782 } 1783 } 1784 chain_info->pprev = pprev; 1785 if (tp) { 1786 chain_info->next = tp->next; 1787 tcf_proto_get(tp); 1788 } else { 1789 chain_info->next = NULL; 1790 } 1791 return tp; 1792 } 1793 1794 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1795 struct tcf_proto *tp, struct tcf_block *block, 1796 struct Qdisc *q, u32 parent, void *fh, 1797 u32 portid, u32 seq, u16 flags, int event, 1798 bool terse_dump, bool rtnl_held) 1799 { 1800 struct tcmsg *tcm; 1801 struct nlmsghdr *nlh; 1802 unsigned char *b = skb_tail_pointer(skb); 1803 1804 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1805 if (!nlh) 1806 goto out_nlmsg_trim; 1807 tcm = nlmsg_data(nlh); 1808 tcm->tcm_family = AF_UNSPEC; 1809 tcm->tcm__pad1 = 0; 1810 tcm->tcm__pad2 = 0; 1811 if (q) { 1812 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1813 tcm->tcm_parent = parent; 1814 } else { 1815 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1816 tcm->tcm_block_index = block->index; 1817 } 1818 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1819 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1820 goto nla_put_failure; 1821 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1822 goto nla_put_failure; 1823 if (!fh) { 1824 tcm->tcm_handle = 0; 1825 } else if (terse_dump) { 1826 if (tp->ops->terse_dump) { 1827 if (tp->ops->terse_dump(net, tp, fh, skb, tcm, 1828 rtnl_held) < 0) 1829 goto nla_put_failure; 1830 } else { 1831 goto cls_op_not_supp; 1832 } 1833 } else { 1834 if (tp->ops->dump && 1835 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 1836 goto nla_put_failure; 1837 } 1838 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1839 return skb->len; 1840 1841 out_nlmsg_trim: 1842 nla_put_failure: 1843 cls_op_not_supp: 1844 nlmsg_trim(skb, b); 1845 return -1; 1846 } 1847 1848 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 1849 struct nlmsghdr *n, struct tcf_proto *tp, 1850 struct tcf_block *block, struct Qdisc *q, 1851 u32 parent, void *fh, int event, bool unicast, 1852 bool rtnl_held) 1853 { 1854 struct sk_buff *skb; 1855 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1856 int err = 0; 1857 1858 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1859 if (!skb) 1860 return -ENOBUFS; 1861 1862 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1863 n->nlmsg_seq, n->nlmsg_flags, event, 1864 false, rtnl_held) <= 0) { 1865 kfree_skb(skb); 1866 return -EINVAL; 1867 } 1868 1869 if (unicast) 1870 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1871 else 1872 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1873 n->nlmsg_flags & NLM_F_ECHO); 1874 1875 if (err > 0) 1876 err = 0; 1877 return err; 1878 } 1879 1880 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 1881 struct nlmsghdr *n, struct tcf_proto *tp, 1882 struct tcf_block *block, struct Qdisc *q, 1883 u32 parent, void *fh, bool unicast, bool *last, 1884 bool rtnl_held, struct netlink_ext_ack *extack) 1885 { 1886 struct sk_buff *skb; 1887 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1888 int err; 1889 1890 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1891 if (!skb) 1892 return -ENOBUFS; 1893 1894 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1895 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 1896 false, rtnl_held) <= 0) { 1897 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 1898 kfree_skb(skb); 1899 return -EINVAL; 1900 } 1901 1902 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 1903 if (err) { 1904 kfree_skb(skb); 1905 return err; 1906 } 1907 1908 if (unicast) 1909 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1910 else 1911 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1912 n->nlmsg_flags & NLM_F_ECHO); 1913 if (err < 0) 1914 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 1915 1916 if (err > 0) 1917 err = 0; 1918 return err; 1919 } 1920 1921 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 1922 struct tcf_block *block, struct Qdisc *q, 1923 u32 parent, struct nlmsghdr *n, 1924 struct tcf_chain *chain, int event, 1925 bool rtnl_held) 1926 { 1927 struct tcf_proto *tp; 1928 1929 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held); 1930 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held)) 1931 tfilter_notify(net, oskb, n, tp, block, 1932 q, parent, NULL, event, false, rtnl_held); 1933 } 1934 1935 static void tfilter_put(struct tcf_proto *tp, void *fh) 1936 { 1937 if (tp->ops->put && fh) 1938 tp->ops->put(tp, fh); 1939 } 1940 1941 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1942 struct netlink_ext_ack *extack) 1943 { 1944 struct net *net = sock_net(skb->sk); 1945 struct nlattr *tca[TCA_MAX + 1]; 1946 char name[IFNAMSIZ]; 1947 struct tcmsg *t; 1948 u32 protocol; 1949 u32 prio; 1950 bool prio_allocate; 1951 u32 parent; 1952 u32 chain_index; 1953 struct Qdisc *q = NULL; 1954 struct tcf_chain_info chain_info; 1955 struct tcf_chain *chain = NULL; 1956 struct tcf_block *block; 1957 struct tcf_proto *tp; 1958 unsigned long cl; 1959 void *fh; 1960 int err; 1961 int tp_created; 1962 bool rtnl_held = false; 1963 1964 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1965 return -EPERM; 1966 1967 replay: 1968 tp_created = 0; 1969 1970 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 1971 rtm_tca_policy, extack); 1972 if (err < 0) 1973 return err; 1974 1975 t = nlmsg_data(n); 1976 protocol = TC_H_MIN(t->tcm_info); 1977 prio = TC_H_MAJ(t->tcm_info); 1978 prio_allocate = false; 1979 parent = t->tcm_parent; 1980 tp = NULL; 1981 cl = 0; 1982 block = NULL; 1983 1984 if (prio == 0) { 1985 /* If no priority is provided by the user, 1986 * we allocate one. 1987 */ 1988 if (n->nlmsg_flags & NLM_F_CREATE) { 1989 prio = TC_H_MAKE(0x80000000U, 0U); 1990 prio_allocate = true; 1991 } else { 1992 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 1993 return -ENOENT; 1994 } 1995 } 1996 1997 /* Find head of filter chain. */ 1998 1999 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2000 if (err) 2001 return err; 2002 2003 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2004 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2005 err = -EINVAL; 2006 goto errout; 2007 } 2008 2009 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2010 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2011 * type is not specified, classifier is not unlocked. 2012 */ 2013 if (rtnl_held || 2014 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2015 !tcf_proto_is_unlocked(name)) { 2016 rtnl_held = true; 2017 rtnl_lock(); 2018 } 2019 2020 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2021 if (err) 2022 goto errout; 2023 2024 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2025 extack); 2026 if (IS_ERR(block)) { 2027 err = PTR_ERR(block); 2028 goto errout; 2029 } 2030 block->classid = parent; 2031 2032 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2033 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2034 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2035 err = -EINVAL; 2036 goto errout; 2037 } 2038 chain = tcf_chain_get(block, chain_index, true); 2039 if (!chain) { 2040 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2041 err = -ENOMEM; 2042 goto errout; 2043 } 2044 2045 mutex_lock(&chain->filter_chain_lock); 2046 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2047 prio, prio_allocate); 2048 if (IS_ERR(tp)) { 2049 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2050 err = PTR_ERR(tp); 2051 goto errout_locked; 2052 } 2053 2054 if (tp == NULL) { 2055 struct tcf_proto *tp_new = NULL; 2056 2057 if (chain->flushing) { 2058 err = -EAGAIN; 2059 goto errout_locked; 2060 } 2061 2062 /* Proto-tcf does not exist, create new one */ 2063 2064 if (tca[TCA_KIND] == NULL || !protocol) { 2065 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2066 err = -EINVAL; 2067 goto errout_locked; 2068 } 2069 2070 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2071 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2072 err = -ENOENT; 2073 goto errout_locked; 2074 } 2075 2076 if (prio_allocate) 2077 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2078 &chain_info)); 2079 2080 mutex_unlock(&chain->filter_chain_lock); 2081 tp_new = tcf_proto_create(name, protocol, prio, chain, 2082 rtnl_held, extack); 2083 if (IS_ERR(tp_new)) { 2084 err = PTR_ERR(tp_new); 2085 goto errout_tp; 2086 } 2087 2088 tp_created = 1; 2089 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2090 rtnl_held); 2091 if (IS_ERR(tp)) { 2092 err = PTR_ERR(tp); 2093 goto errout_tp; 2094 } 2095 } else { 2096 mutex_unlock(&chain->filter_chain_lock); 2097 } 2098 2099 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2100 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2101 err = -EINVAL; 2102 goto errout; 2103 } 2104 2105 fh = tp->ops->get(tp, t->tcm_handle); 2106 2107 if (!fh) { 2108 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2109 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2110 err = -ENOENT; 2111 goto errout; 2112 } 2113 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2114 tfilter_put(tp, fh); 2115 NL_SET_ERR_MSG(extack, "Filter already exists"); 2116 err = -EEXIST; 2117 goto errout; 2118 } 2119 2120 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2121 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2122 err = -EINVAL; 2123 goto errout; 2124 } 2125 2126 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2127 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, 2128 rtnl_held, extack); 2129 if (err == 0) { 2130 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2131 RTM_NEWTFILTER, false, rtnl_held); 2132 tfilter_put(tp, fh); 2133 /* q pointer is NULL for shared blocks */ 2134 if (q) 2135 q->flags &= ~TCQ_F_CAN_BYPASS; 2136 } 2137 2138 errout: 2139 if (err && tp_created) 2140 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2141 errout_tp: 2142 if (chain) { 2143 if (tp && !IS_ERR(tp)) 2144 tcf_proto_put(tp, rtnl_held, NULL); 2145 if (!tp_created) 2146 tcf_chain_put(chain); 2147 } 2148 tcf_block_release(q, block, rtnl_held); 2149 2150 if (rtnl_held) 2151 rtnl_unlock(); 2152 2153 if (err == -EAGAIN) { 2154 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2155 * of target chain. 2156 */ 2157 rtnl_held = true; 2158 /* Replay the request. */ 2159 goto replay; 2160 } 2161 return err; 2162 2163 errout_locked: 2164 mutex_unlock(&chain->filter_chain_lock); 2165 goto errout; 2166 } 2167 2168 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2169 struct netlink_ext_ack *extack) 2170 { 2171 struct net *net = sock_net(skb->sk); 2172 struct nlattr *tca[TCA_MAX + 1]; 2173 char name[IFNAMSIZ]; 2174 struct tcmsg *t; 2175 u32 protocol; 2176 u32 prio; 2177 u32 parent; 2178 u32 chain_index; 2179 struct Qdisc *q = NULL; 2180 struct tcf_chain_info chain_info; 2181 struct tcf_chain *chain = NULL; 2182 struct tcf_block *block = NULL; 2183 struct tcf_proto *tp = NULL; 2184 unsigned long cl = 0; 2185 void *fh = NULL; 2186 int err; 2187 bool rtnl_held = false; 2188 2189 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2190 return -EPERM; 2191 2192 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2193 rtm_tca_policy, extack); 2194 if (err < 0) 2195 return err; 2196 2197 t = nlmsg_data(n); 2198 protocol = TC_H_MIN(t->tcm_info); 2199 prio = TC_H_MAJ(t->tcm_info); 2200 parent = t->tcm_parent; 2201 2202 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2203 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2204 return -ENOENT; 2205 } 2206 2207 /* Find head of filter chain. */ 2208 2209 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2210 if (err) 2211 return err; 2212 2213 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2214 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2215 err = -EINVAL; 2216 goto errout; 2217 } 2218 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2219 * found), qdisc is not unlocked, classifier type is not specified, 2220 * classifier is not unlocked. 2221 */ 2222 if (!prio || 2223 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2224 !tcf_proto_is_unlocked(name)) { 2225 rtnl_held = true; 2226 rtnl_lock(); 2227 } 2228 2229 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2230 if (err) 2231 goto errout; 2232 2233 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2234 extack); 2235 if (IS_ERR(block)) { 2236 err = PTR_ERR(block); 2237 goto errout; 2238 } 2239 2240 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2241 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2242 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2243 err = -EINVAL; 2244 goto errout; 2245 } 2246 chain = tcf_chain_get(block, chain_index, false); 2247 if (!chain) { 2248 /* User requested flush on non-existent chain. Nothing to do, 2249 * so just return success. 2250 */ 2251 if (prio == 0) { 2252 err = 0; 2253 goto errout; 2254 } 2255 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2256 err = -ENOENT; 2257 goto errout; 2258 } 2259 2260 if (prio == 0) { 2261 tfilter_notify_chain(net, skb, block, q, parent, n, 2262 chain, RTM_DELTFILTER, rtnl_held); 2263 tcf_chain_flush(chain, rtnl_held); 2264 err = 0; 2265 goto errout; 2266 } 2267 2268 mutex_lock(&chain->filter_chain_lock); 2269 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2270 prio, false); 2271 if (!tp || IS_ERR(tp)) { 2272 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2273 err = tp ? PTR_ERR(tp) : -ENOENT; 2274 goto errout_locked; 2275 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2276 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2277 err = -EINVAL; 2278 goto errout_locked; 2279 } else if (t->tcm_handle == 0) { 2280 tcf_proto_signal_destroying(chain, tp); 2281 tcf_chain_tp_remove(chain, &chain_info, tp); 2282 mutex_unlock(&chain->filter_chain_lock); 2283 2284 tcf_proto_put(tp, rtnl_held, NULL); 2285 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2286 RTM_DELTFILTER, false, rtnl_held); 2287 err = 0; 2288 goto errout; 2289 } 2290 mutex_unlock(&chain->filter_chain_lock); 2291 2292 fh = tp->ops->get(tp, t->tcm_handle); 2293 2294 if (!fh) { 2295 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2296 err = -ENOENT; 2297 } else { 2298 bool last; 2299 2300 err = tfilter_del_notify(net, skb, n, tp, block, 2301 q, parent, fh, false, &last, 2302 rtnl_held, extack); 2303 2304 if (err) 2305 goto errout; 2306 if (last) 2307 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2308 } 2309 2310 errout: 2311 if (chain) { 2312 if (tp && !IS_ERR(tp)) 2313 tcf_proto_put(tp, rtnl_held, NULL); 2314 tcf_chain_put(chain); 2315 } 2316 tcf_block_release(q, block, rtnl_held); 2317 2318 if (rtnl_held) 2319 rtnl_unlock(); 2320 2321 return err; 2322 2323 errout_locked: 2324 mutex_unlock(&chain->filter_chain_lock); 2325 goto errout; 2326 } 2327 2328 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2329 struct netlink_ext_ack *extack) 2330 { 2331 struct net *net = sock_net(skb->sk); 2332 struct nlattr *tca[TCA_MAX + 1]; 2333 char name[IFNAMSIZ]; 2334 struct tcmsg *t; 2335 u32 protocol; 2336 u32 prio; 2337 u32 parent; 2338 u32 chain_index; 2339 struct Qdisc *q = NULL; 2340 struct tcf_chain_info chain_info; 2341 struct tcf_chain *chain = NULL; 2342 struct tcf_block *block = NULL; 2343 struct tcf_proto *tp = NULL; 2344 unsigned long cl = 0; 2345 void *fh = NULL; 2346 int err; 2347 bool rtnl_held = false; 2348 2349 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2350 rtm_tca_policy, extack); 2351 if (err < 0) 2352 return err; 2353 2354 t = nlmsg_data(n); 2355 protocol = TC_H_MIN(t->tcm_info); 2356 prio = TC_H_MAJ(t->tcm_info); 2357 parent = t->tcm_parent; 2358 2359 if (prio == 0) { 2360 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2361 return -ENOENT; 2362 } 2363 2364 /* Find head of filter chain. */ 2365 2366 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2367 if (err) 2368 return err; 2369 2370 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2371 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2372 err = -EINVAL; 2373 goto errout; 2374 } 2375 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2376 * unlocked, classifier type is not specified, classifier is not 2377 * unlocked. 2378 */ 2379 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2380 !tcf_proto_is_unlocked(name)) { 2381 rtnl_held = true; 2382 rtnl_lock(); 2383 } 2384 2385 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2386 if (err) 2387 goto errout; 2388 2389 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2390 extack); 2391 if (IS_ERR(block)) { 2392 err = PTR_ERR(block); 2393 goto errout; 2394 } 2395 2396 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2397 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2398 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2399 err = -EINVAL; 2400 goto errout; 2401 } 2402 chain = tcf_chain_get(block, chain_index, false); 2403 if (!chain) { 2404 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2405 err = -EINVAL; 2406 goto errout; 2407 } 2408 2409 mutex_lock(&chain->filter_chain_lock); 2410 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2411 prio, false); 2412 mutex_unlock(&chain->filter_chain_lock); 2413 if (!tp || IS_ERR(tp)) { 2414 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2415 err = tp ? PTR_ERR(tp) : -ENOENT; 2416 goto errout; 2417 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2418 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2419 err = -EINVAL; 2420 goto errout; 2421 } 2422 2423 fh = tp->ops->get(tp, t->tcm_handle); 2424 2425 if (!fh) { 2426 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2427 err = -ENOENT; 2428 } else { 2429 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2430 fh, RTM_NEWTFILTER, true, rtnl_held); 2431 if (err < 0) 2432 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2433 } 2434 2435 tfilter_put(tp, fh); 2436 errout: 2437 if (chain) { 2438 if (tp && !IS_ERR(tp)) 2439 tcf_proto_put(tp, rtnl_held, NULL); 2440 tcf_chain_put(chain); 2441 } 2442 tcf_block_release(q, block, rtnl_held); 2443 2444 if (rtnl_held) 2445 rtnl_unlock(); 2446 2447 return err; 2448 } 2449 2450 struct tcf_dump_args { 2451 struct tcf_walker w; 2452 struct sk_buff *skb; 2453 struct netlink_callback *cb; 2454 struct tcf_block *block; 2455 struct Qdisc *q; 2456 u32 parent; 2457 bool terse_dump; 2458 }; 2459 2460 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2461 { 2462 struct tcf_dump_args *a = (void *)arg; 2463 struct net *net = sock_net(a->skb->sk); 2464 2465 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2466 n, NETLINK_CB(a->cb->skb).portid, 2467 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2468 RTM_NEWTFILTER, a->terse_dump, true); 2469 } 2470 2471 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2472 struct sk_buff *skb, struct netlink_callback *cb, 2473 long index_start, long *p_index, bool terse) 2474 { 2475 struct net *net = sock_net(skb->sk); 2476 struct tcf_block *block = chain->block; 2477 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2478 struct tcf_proto *tp, *tp_prev; 2479 struct tcf_dump_args arg; 2480 2481 for (tp = __tcf_get_next_proto(chain, NULL); 2482 tp; 2483 tp_prev = tp, 2484 tp = __tcf_get_next_proto(chain, tp), 2485 tcf_proto_put(tp_prev, true, NULL), 2486 (*p_index)++) { 2487 if (*p_index < index_start) 2488 continue; 2489 if (TC_H_MAJ(tcm->tcm_info) && 2490 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2491 continue; 2492 if (TC_H_MIN(tcm->tcm_info) && 2493 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2494 continue; 2495 if (*p_index > index_start) 2496 memset(&cb->args[1], 0, 2497 sizeof(cb->args) - sizeof(cb->args[0])); 2498 if (cb->args[1] == 0) { 2499 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2500 NETLINK_CB(cb->skb).portid, 2501 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2502 RTM_NEWTFILTER, false, true) <= 0) 2503 goto errout; 2504 cb->args[1] = 1; 2505 } 2506 if (!tp->ops->walk) 2507 continue; 2508 arg.w.fn = tcf_node_dump; 2509 arg.skb = skb; 2510 arg.cb = cb; 2511 arg.block = block; 2512 arg.q = q; 2513 arg.parent = parent; 2514 arg.w.stop = 0; 2515 arg.w.skip = cb->args[1] - 1; 2516 arg.w.count = 0; 2517 arg.w.cookie = cb->args[2]; 2518 arg.terse_dump = terse; 2519 tp->ops->walk(tp, &arg.w, true); 2520 cb->args[2] = arg.w.cookie; 2521 cb->args[1] = arg.w.count + 1; 2522 if (arg.w.stop) 2523 goto errout; 2524 } 2525 return true; 2526 2527 errout: 2528 tcf_proto_put(tp, true, NULL); 2529 return false; 2530 } 2531 2532 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { 2533 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), 2534 }; 2535 2536 /* called with RTNL */ 2537 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2538 { 2539 struct tcf_chain *chain, *chain_prev; 2540 struct net *net = sock_net(skb->sk); 2541 struct nlattr *tca[TCA_MAX + 1]; 2542 struct Qdisc *q = NULL; 2543 struct tcf_block *block; 2544 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2545 bool terse_dump = false; 2546 long index_start; 2547 long index; 2548 u32 parent; 2549 int err; 2550 2551 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2552 return skb->len; 2553 2554 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2555 tcf_tfilter_dump_policy, cb->extack); 2556 if (err) 2557 return err; 2558 2559 if (tca[TCA_DUMP_FLAGS]) { 2560 struct nla_bitfield32 flags = 2561 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); 2562 2563 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; 2564 } 2565 2566 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2567 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2568 if (!block) 2569 goto out; 2570 /* If we work with block index, q is NULL and parent value 2571 * will never be used in the following code. The check 2572 * in tcf_fill_node prevents it. However, compiler does not 2573 * see that far, so set parent to zero to silence the warning 2574 * about parent being uninitialized. 2575 */ 2576 parent = 0; 2577 } else { 2578 const struct Qdisc_class_ops *cops; 2579 struct net_device *dev; 2580 unsigned long cl = 0; 2581 2582 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2583 if (!dev) 2584 return skb->len; 2585 2586 parent = tcm->tcm_parent; 2587 if (!parent) 2588 q = dev->qdisc; 2589 else 2590 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2591 if (!q) 2592 goto out; 2593 cops = q->ops->cl_ops; 2594 if (!cops) 2595 goto out; 2596 if (!cops->tcf_block) 2597 goto out; 2598 if (TC_H_MIN(tcm->tcm_parent)) { 2599 cl = cops->find(q, tcm->tcm_parent); 2600 if (cl == 0) 2601 goto out; 2602 } 2603 block = cops->tcf_block(q, cl, NULL); 2604 if (!block) 2605 goto out; 2606 parent = block->classid; 2607 if (tcf_block_shared(block)) 2608 q = NULL; 2609 } 2610 2611 index_start = cb->args[0]; 2612 index = 0; 2613 2614 for (chain = __tcf_get_next_chain(block, NULL); 2615 chain; 2616 chain_prev = chain, 2617 chain = __tcf_get_next_chain(block, chain), 2618 tcf_chain_put(chain_prev)) { 2619 if (tca[TCA_CHAIN] && 2620 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2621 continue; 2622 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2623 index_start, &index, terse_dump)) { 2624 tcf_chain_put(chain); 2625 err = -EMSGSIZE; 2626 break; 2627 } 2628 } 2629 2630 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2631 tcf_block_refcnt_put(block, true); 2632 cb->args[0] = index; 2633 2634 out: 2635 /* If we did no progress, the error (EMSGSIZE) is real */ 2636 if (skb->len == 0 && err) 2637 return err; 2638 return skb->len; 2639 } 2640 2641 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2642 void *tmplt_priv, u32 chain_index, 2643 struct net *net, struct sk_buff *skb, 2644 struct tcf_block *block, 2645 u32 portid, u32 seq, u16 flags, int event) 2646 { 2647 unsigned char *b = skb_tail_pointer(skb); 2648 const struct tcf_proto_ops *ops; 2649 struct nlmsghdr *nlh; 2650 struct tcmsg *tcm; 2651 void *priv; 2652 2653 ops = tmplt_ops; 2654 priv = tmplt_priv; 2655 2656 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2657 if (!nlh) 2658 goto out_nlmsg_trim; 2659 tcm = nlmsg_data(nlh); 2660 tcm->tcm_family = AF_UNSPEC; 2661 tcm->tcm__pad1 = 0; 2662 tcm->tcm__pad2 = 0; 2663 tcm->tcm_handle = 0; 2664 if (block->q) { 2665 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2666 tcm->tcm_parent = block->q->handle; 2667 } else { 2668 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2669 tcm->tcm_block_index = block->index; 2670 } 2671 2672 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2673 goto nla_put_failure; 2674 2675 if (ops) { 2676 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2677 goto nla_put_failure; 2678 if (ops->tmplt_dump(skb, net, priv) < 0) 2679 goto nla_put_failure; 2680 } 2681 2682 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2683 return skb->len; 2684 2685 out_nlmsg_trim: 2686 nla_put_failure: 2687 nlmsg_trim(skb, b); 2688 return -EMSGSIZE; 2689 } 2690 2691 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2692 u32 seq, u16 flags, int event, bool unicast) 2693 { 2694 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2695 struct tcf_block *block = chain->block; 2696 struct net *net = block->net; 2697 struct sk_buff *skb; 2698 int err = 0; 2699 2700 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2701 if (!skb) 2702 return -ENOBUFS; 2703 2704 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2705 chain->index, net, skb, block, portid, 2706 seq, flags, event) <= 0) { 2707 kfree_skb(skb); 2708 return -EINVAL; 2709 } 2710 2711 if (unicast) 2712 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2713 else 2714 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2715 flags & NLM_F_ECHO); 2716 2717 if (err > 0) 2718 err = 0; 2719 return err; 2720 } 2721 2722 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2723 void *tmplt_priv, u32 chain_index, 2724 struct tcf_block *block, struct sk_buff *oskb, 2725 u32 seq, u16 flags, bool unicast) 2726 { 2727 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2728 struct net *net = block->net; 2729 struct sk_buff *skb; 2730 2731 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2732 if (!skb) 2733 return -ENOBUFS; 2734 2735 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2736 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { 2737 kfree_skb(skb); 2738 return -EINVAL; 2739 } 2740 2741 if (unicast) 2742 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2743 2744 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2745 } 2746 2747 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2748 struct nlattr **tca, 2749 struct netlink_ext_ack *extack) 2750 { 2751 const struct tcf_proto_ops *ops; 2752 char name[IFNAMSIZ]; 2753 void *tmplt_priv; 2754 2755 /* If kind is not set, user did not specify template. */ 2756 if (!tca[TCA_KIND]) 2757 return 0; 2758 2759 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2760 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2761 return -EINVAL; 2762 } 2763 2764 ops = tcf_proto_lookup_ops(name, true, extack); 2765 if (IS_ERR(ops)) 2766 return PTR_ERR(ops); 2767 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2768 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2769 return -EOPNOTSUPP; 2770 } 2771 2772 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2773 if (IS_ERR(tmplt_priv)) { 2774 module_put(ops->owner); 2775 return PTR_ERR(tmplt_priv); 2776 } 2777 chain->tmplt_ops = ops; 2778 chain->tmplt_priv = tmplt_priv; 2779 return 0; 2780 } 2781 2782 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2783 void *tmplt_priv) 2784 { 2785 /* If template ops are set, no work to do for us. */ 2786 if (!tmplt_ops) 2787 return; 2788 2789 tmplt_ops->tmplt_destroy(tmplt_priv); 2790 module_put(tmplt_ops->owner); 2791 } 2792 2793 /* Add/delete/get a chain */ 2794 2795 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2796 struct netlink_ext_ack *extack) 2797 { 2798 struct net *net = sock_net(skb->sk); 2799 struct nlattr *tca[TCA_MAX + 1]; 2800 struct tcmsg *t; 2801 u32 parent; 2802 u32 chain_index; 2803 struct Qdisc *q = NULL; 2804 struct tcf_chain *chain = NULL; 2805 struct tcf_block *block; 2806 unsigned long cl; 2807 int err; 2808 2809 if (n->nlmsg_type != RTM_GETCHAIN && 2810 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2811 return -EPERM; 2812 2813 replay: 2814 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2815 rtm_tca_policy, extack); 2816 if (err < 0) 2817 return err; 2818 2819 t = nlmsg_data(n); 2820 parent = t->tcm_parent; 2821 cl = 0; 2822 2823 block = tcf_block_find(net, &q, &parent, &cl, 2824 t->tcm_ifindex, t->tcm_block_index, extack); 2825 if (IS_ERR(block)) 2826 return PTR_ERR(block); 2827 2828 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2829 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2830 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2831 err = -EINVAL; 2832 goto errout_block; 2833 } 2834 2835 mutex_lock(&block->lock); 2836 chain = tcf_chain_lookup(block, chain_index); 2837 if (n->nlmsg_type == RTM_NEWCHAIN) { 2838 if (chain) { 2839 if (tcf_chain_held_by_acts_only(chain)) { 2840 /* The chain exists only because there is 2841 * some action referencing it. 2842 */ 2843 tcf_chain_hold(chain); 2844 } else { 2845 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 2846 err = -EEXIST; 2847 goto errout_block_locked; 2848 } 2849 } else { 2850 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2851 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 2852 err = -ENOENT; 2853 goto errout_block_locked; 2854 } 2855 chain = tcf_chain_create(block, chain_index); 2856 if (!chain) { 2857 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 2858 err = -ENOMEM; 2859 goto errout_block_locked; 2860 } 2861 } 2862 } else { 2863 if (!chain || tcf_chain_held_by_acts_only(chain)) { 2864 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2865 err = -EINVAL; 2866 goto errout_block_locked; 2867 } 2868 tcf_chain_hold(chain); 2869 } 2870 2871 if (n->nlmsg_type == RTM_NEWCHAIN) { 2872 /* Modifying chain requires holding parent block lock. In case 2873 * the chain was successfully added, take a reference to the 2874 * chain. This ensures that an empty chain does not disappear at 2875 * the end of this function. 2876 */ 2877 tcf_chain_hold(chain); 2878 chain->explicitly_created = true; 2879 } 2880 mutex_unlock(&block->lock); 2881 2882 switch (n->nlmsg_type) { 2883 case RTM_NEWCHAIN: 2884 err = tc_chain_tmplt_add(chain, net, tca, extack); 2885 if (err) { 2886 tcf_chain_put_explicitly_created(chain); 2887 goto errout; 2888 } 2889 2890 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 2891 RTM_NEWCHAIN, false); 2892 break; 2893 case RTM_DELCHAIN: 2894 tfilter_notify_chain(net, skb, block, q, parent, n, 2895 chain, RTM_DELTFILTER, true); 2896 /* Flush the chain first as the user requested chain removal. */ 2897 tcf_chain_flush(chain, true); 2898 /* In case the chain was successfully deleted, put a reference 2899 * to the chain previously taken during addition. 2900 */ 2901 tcf_chain_put_explicitly_created(chain); 2902 break; 2903 case RTM_GETCHAIN: 2904 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 2905 n->nlmsg_seq, n->nlmsg_type, true); 2906 if (err < 0) 2907 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 2908 break; 2909 default: 2910 err = -EOPNOTSUPP; 2911 NL_SET_ERR_MSG(extack, "Unsupported message type"); 2912 goto errout; 2913 } 2914 2915 errout: 2916 tcf_chain_put(chain); 2917 errout_block: 2918 tcf_block_release(q, block, true); 2919 if (err == -EAGAIN) 2920 /* Replay the request. */ 2921 goto replay; 2922 return err; 2923 2924 errout_block_locked: 2925 mutex_unlock(&block->lock); 2926 goto errout_block; 2927 } 2928 2929 /* called with RTNL */ 2930 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 2931 { 2932 struct net *net = sock_net(skb->sk); 2933 struct nlattr *tca[TCA_MAX + 1]; 2934 struct Qdisc *q = NULL; 2935 struct tcf_block *block; 2936 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2937 struct tcf_chain *chain; 2938 long index_start; 2939 long index; 2940 u32 parent; 2941 int err; 2942 2943 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2944 return skb->len; 2945 2946 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2947 rtm_tca_policy, cb->extack); 2948 if (err) 2949 return err; 2950 2951 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2952 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2953 if (!block) 2954 goto out; 2955 /* If we work with block index, q is NULL and parent value 2956 * will never be used in the following code. The check 2957 * in tcf_fill_node prevents it. However, compiler does not 2958 * see that far, so set parent to zero to silence the warning 2959 * about parent being uninitialized. 2960 */ 2961 parent = 0; 2962 } else { 2963 const struct Qdisc_class_ops *cops; 2964 struct net_device *dev; 2965 unsigned long cl = 0; 2966 2967 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2968 if (!dev) 2969 return skb->len; 2970 2971 parent = tcm->tcm_parent; 2972 if (!parent) { 2973 q = dev->qdisc; 2974 parent = q->handle; 2975 } else { 2976 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2977 } 2978 if (!q) 2979 goto out; 2980 cops = q->ops->cl_ops; 2981 if (!cops) 2982 goto out; 2983 if (!cops->tcf_block) 2984 goto out; 2985 if (TC_H_MIN(tcm->tcm_parent)) { 2986 cl = cops->find(q, tcm->tcm_parent); 2987 if (cl == 0) 2988 goto out; 2989 } 2990 block = cops->tcf_block(q, cl, NULL); 2991 if (!block) 2992 goto out; 2993 if (tcf_block_shared(block)) 2994 q = NULL; 2995 } 2996 2997 index_start = cb->args[0]; 2998 index = 0; 2999 3000 mutex_lock(&block->lock); 3001 list_for_each_entry(chain, &block->chain_list, list) { 3002 if ((tca[TCA_CHAIN] && 3003 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 3004 continue; 3005 if (index < index_start) { 3006 index++; 3007 continue; 3008 } 3009 if (tcf_chain_held_by_acts_only(chain)) 3010 continue; 3011 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3012 chain->index, net, skb, block, 3013 NETLINK_CB(cb->skb).portid, 3014 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3015 RTM_NEWCHAIN); 3016 if (err <= 0) 3017 break; 3018 index++; 3019 } 3020 mutex_unlock(&block->lock); 3021 3022 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3023 tcf_block_refcnt_put(block, true); 3024 cb->args[0] = index; 3025 3026 out: 3027 /* If we did no progress, the error (EMSGSIZE) is real */ 3028 if (skb->len == 0 && err) 3029 return err; 3030 return skb->len; 3031 } 3032 3033 void tcf_exts_destroy(struct tcf_exts *exts) 3034 { 3035 #ifdef CONFIG_NET_CLS_ACT 3036 if (exts->actions) { 3037 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3038 kfree(exts->actions); 3039 } 3040 exts->nr_actions = 0; 3041 #endif 3042 } 3043 EXPORT_SYMBOL(tcf_exts_destroy); 3044 3045 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3046 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr, 3047 bool rtnl_held, struct netlink_ext_ack *extack) 3048 { 3049 #ifdef CONFIG_NET_CLS_ACT 3050 { 3051 struct tc_action *act; 3052 size_t attr_size = 0; 3053 3054 if (exts->police && tb[exts->police]) { 3055 act = tcf_action_init_1(net, tp, tb[exts->police], 3056 rate_tlv, "police", ovr, 3057 TCA_ACT_BIND, rtnl_held, 3058 extack); 3059 if (IS_ERR(act)) 3060 return PTR_ERR(act); 3061 3062 act->type = exts->type = TCA_OLD_COMPAT; 3063 exts->actions[0] = act; 3064 exts->nr_actions = 1; 3065 } else if (exts->action && tb[exts->action]) { 3066 int err; 3067 3068 err = tcf_action_init(net, tp, tb[exts->action], 3069 rate_tlv, NULL, ovr, TCA_ACT_BIND, 3070 exts->actions, &attr_size, 3071 rtnl_held, extack); 3072 if (err < 0) 3073 return err; 3074 exts->nr_actions = err; 3075 } 3076 } 3077 #else 3078 if ((exts->action && tb[exts->action]) || 3079 (exts->police && tb[exts->police])) { 3080 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3081 return -EOPNOTSUPP; 3082 } 3083 #endif 3084 3085 return 0; 3086 } 3087 EXPORT_SYMBOL(tcf_exts_validate); 3088 3089 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3090 { 3091 #ifdef CONFIG_NET_CLS_ACT 3092 struct tcf_exts old = *dst; 3093 3094 *dst = *src; 3095 tcf_exts_destroy(&old); 3096 #endif 3097 } 3098 EXPORT_SYMBOL(tcf_exts_change); 3099 3100 #ifdef CONFIG_NET_CLS_ACT 3101 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3102 { 3103 if (exts->nr_actions == 0) 3104 return NULL; 3105 else 3106 return exts->actions[0]; 3107 } 3108 #endif 3109 3110 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3111 { 3112 #ifdef CONFIG_NET_CLS_ACT 3113 struct nlattr *nest; 3114 3115 if (exts->action && tcf_exts_has_actions(exts)) { 3116 /* 3117 * again for backward compatible mode - we want 3118 * to work with both old and new modes of entering 3119 * tc data even if iproute2 was newer - jhs 3120 */ 3121 if (exts->type != TCA_OLD_COMPAT) { 3122 nest = nla_nest_start_noflag(skb, exts->action); 3123 if (nest == NULL) 3124 goto nla_put_failure; 3125 3126 if (tcf_action_dump(skb, exts->actions, 0, 0, false) 3127 < 0) 3128 goto nla_put_failure; 3129 nla_nest_end(skb, nest); 3130 } else if (exts->police) { 3131 struct tc_action *act = tcf_exts_first_act(exts); 3132 nest = nla_nest_start_noflag(skb, exts->police); 3133 if (nest == NULL || !act) 3134 goto nla_put_failure; 3135 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3136 goto nla_put_failure; 3137 nla_nest_end(skb, nest); 3138 } 3139 } 3140 return 0; 3141 3142 nla_put_failure: 3143 nla_nest_cancel(skb, nest); 3144 return -1; 3145 #else 3146 return 0; 3147 #endif 3148 } 3149 EXPORT_SYMBOL(tcf_exts_dump); 3150 3151 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts) 3152 { 3153 #ifdef CONFIG_NET_CLS_ACT 3154 struct nlattr *nest; 3155 3156 if (!exts->action || !tcf_exts_has_actions(exts)) 3157 return 0; 3158 3159 nest = nla_nest_start_noflag(skb, exts->action); 3160 if (!nest) 3161 goto nla_put_failure; 3162 3163 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0) 3164 goto nla_put_failure; 3165 nla_nest_end(skb, nest); 3166 return 0; 3167 3168 nla_put_failure: 3169 nla_nest_cancel(skb, nest); 3170 return -1; 3171 #else 3172 return 0; 3173 #endif 3174 } 3175 EXPORT_SYMBOL(tcf_exts_terse_dump); 3176 3177 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3178 { 3179 #ifdef CONFIG_NET_CLS_ACT 3180 struct tc_action *a = tcf_exts_first_act(exts); 3181 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3182 return -1; 3183 #endif 3184 return 0; 3185 } 3186 EXPORT_SYMBOL(tcf_exts_dump_stats); 3187 3188 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3189 { 3190 if (*flags & TCA_CLS_FLAGS_IN_HW) 3191 return; 3192 *flags |= TCA_CLS_FLAGS_IN_HW; 3193 atomic_inc(&block->offloadcnt); 3194 } 3195 3196 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3197 { 3198 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3199 return; 3200 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3201 atomic_dec(&block->offloadcnt); 3202 } 3203 3204 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3205 struct tcf_proto *tp, u32 *cnt, 3206 u32 *flags, u32 diff, bool add) 3207 { 3208 lockdep_assert_held(&block->cb_lock); 3209 3210 spin_lock(&tp->lock); 3211 if (add) { 3212 if (!*cnt) 3213 tcf_block_offload_inc(block, flags); 3214 *cnt += diff; 3215 } else { 3216 *cnt -= diff; 3217 if (!*cnt) 3218 tcf_block_offload_dec(block, flags); 3219 } 3220 spin_unlock(&tp->lock); 3221 } 3222 3223 static void 3224 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3225 u32 *cnt, u32 *flags) 3226 { 3227 lockdep_assert_held(&block->cb_lock); 3228 3229 spin_lock(&tp->lock); 3230 tcf_block_offload_dec(block, flags); 3231 *cnt = 0; 3232 spin_unlock(&tp->lock); 3233 } 3234 3235 static int 3236 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3237 void *type_data, bool err_stop) 3238 { 3239 struct flow_block_cb *block_cb; 3240 int ok_count = 0; 3241 int err; 3242 3243 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3244 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3245 if (err) { 3246 if (err_stop) 3247 return err; 3248 } else { 3249 ok_count++; 3250 } 3251 } 3252 return ok_count; 3253 } 3254 3255 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3256 void *type_data, bool err_stop, bool rtnl_held) 3257 { 3258 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3259 int ok_count; 3260 3261 retry: 3262 if (take_rtnl) 3263 rtnl_lock(); 3264 down_read(&block->cb_lock); 3265 /* Need to obtain rtnl lock if block is bound to devs that require it. 3266 * In block bind code cb_lock is obtained while holding rtnl, so we must 3267 * obtain the locks in same order here. 3268 */ 3269 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3270 up_read(&block->cb_lock); 3271 take_rtnl = true; 3272 goto retry; 3273 } 3274 3275 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3276 3277 up_read(&block->cb_lock); 3278 if (take_rtnl) 3279 rtnl_unlock(); 3280 return ok_count; 3281 } 3282 EXPORT_SYMBOL(tc_setup_cb_call); 3283 3284 /* Non-destructive filter add. If filter that wasn't already in hardware is 3285 * successfully offloaded, increment block offloads counter. On failure, 3286 * previously offloaded filter is considered to be intact and offloads counter 3287 * is not decremented. 3288 */ 3289 3290 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3291 enum tc_setup_type type, void *type_data, bool err_stop, 3292 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3293 { 3294 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3295 int ok_count; 3296 3297 retry: 3298 if (take_rtnl) 3299 rtnl_lock(); 3300 down_read(&block->cb_lock); 3301 /* Need to obtain rtnl lock if block is bound to devs that require it. 3302 * In block bind code cb_lock is obtained while holding rtnl, so we must 3303 * obtain the locks in same order here. 3304 */ 3305 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3306 up_read(&block->cb_lock); 3307 take_rtnl = true; 3308 goto retry; 3309 } 3310 3311 /* Make sure all netdevs sharing this block are offload-capable. */ 3312 if (block->nooffloaddevcnt && err_stop) { 3313 ok_count = -EOPNOTSUPP; 3314 goto err_unlock; 3315 } 3316 3317 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3318 if (ok_count < 0) 3319 goto err_unlock; 3320 3321 if (tp->ops->hw_add) 3322 tp->ops->hw_add(tp, type_data); 3323 if (ok_count > 0) 3324 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3325 ok_count, true); 3326 err_unlock: 3327 up_read(&block->cb_lock); 3328 if (take_rtnl) 3329 rtnl_unlock(); 3330 return ok_count < 0 ? ok_count : 0; 3331 } 3332 EXPORT_SYMBOL(tc_setup_cb_add); 3333 3334 /* Destructive filter replace. If filter that wasn't already in hardware is 3335 * successfully offloaded, increment block offload counter. On failure, 3336 * previously offloaded filter is considered to be destroyed and offload counter 3337 * is decremented. 3338 */ 3339 3340 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3341 enum tc_setup_type type, void *type_data, bool err_stop, 3342 u32 *old_flags, unsigned int *old_in_hw_count, 3343 u32 *new_flags, unsigned int *new_in_hw_count, 3344 bool rtnl_held) 3345 { 3346 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3347 int ok_count; 3348 3349 retry: 3350 if (take_rtnl) 3351 rtnl_lock(); 3352 down_read(&block->cb_lock); 3353 /* Need to obtain rtnl lock if block is bound to devs that require it. 3354 * In block bind code cb_lock is obtained while holding rtnl, so we must 3355 * obtain the locks in same order here. 3356 */ 3357 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3358 up_read(&block->cb_lock); 3359 take_rtnl = true; 3360 goto retry; 3361 } 3362 3363 /* Make sure all netdevs sharing this block are offload-capable. */ 3364 if (block->nooffloaddevcnt && err_stop) { 3365 ok_count = -EOPNOTSUPP; 3366 goto err_unlock; 3367 } 3368 3369 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3370 if (tp->ops->hw_del) 3371 tp->ops->hw_del(tp, type_data); 3372 3373 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3374 if (ok_count < 0) 3375 goto err_unlock; 3376 3377 if (tp->ops->hw_add) 3378 tp->ops->hw_add(tp, type_data); 3379 if (ok_count > 0) 3380 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3381 new_flags, ok_count, true); 3382 err_unlock: 3383 up_read(&block->cb_lock); 3384 if (take_rtnl) 3385 rtnl_unlock(); 3386 return ok_count < 0 ? ok_count : 0; 3387 } 3388 EXPORT_SYMBOL(tc_setup_cb_replace); 3389 3390 /* Destroy filter and decrement block offload counter, if filter was previously 3391 * offloaded. 3392 */ 3393 3394 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3395 enum tc_setup_type type, void *type_data, bool err_stop, 3396 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3397 { 3398 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3399 int ok_count; 3400 3401 retry: 3402 if (take_rtnl) 3403 rtnl_lock(); 3404 down_read(&block->cb_lock); 3405 /* Need to obtain rtnl lock if block is bound to devs that require it. 3406 * In block bind code cb_lock is obtained while holding rtnl, so we must 3407 * obtain the locks in same order here. 3408 */ 3409 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3410 up_read(&block->cb_lock); 3411 take_rtnl = true; 3412 goto retry; 3413 } 3414 3415 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3416 3417 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3418 if (tp->ops->hw_del) 3419 tp->ops->hw_del(tp, type_data); 3420 3421 up_read(&block->cb_lock); 3422 if (take_rtnl) 3423 rtnl_unlock(); 3424 return ok_count < 0 ? ok_count : 0; 3425 } 3426 EXPORT_SYMBOL(tc_setup_cb_destroy); 3427 3428 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3429 bool add, flow_setup_cb_t *cb, 3430 enum tc_setup_type type, void *type_data, 3431 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3432 { 3433 int err = cb(type, type_data, cb_priv); 3434 3435 if (err) { 3436 if (add && tc_skip_sw(*flags)) 3437 return err; 3438 } else { 3439 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3440 add); 3441 } 3442 3443 return 0; 3444 } 3445 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3446 3447 static int tcf_act_get_cookie(struct flow_action_entry *entry, 3448 const struct tc_action *act) 3449 { 3450 struct tc_cookie *cookie; 3451 int err = 0; 3452 3453 rcu_read_lock(); 3454 cookie = rcu_dereference(act->act_cookie); 3455 if (cookie) { 3456 entry->cookie = flow_action_cookie_create(cookie->data, 3457 cookie->len, 3458 GFP_ATOMIC); 3459 if (!entry->cookie) 3460 err = -ENOMEM; 3461 } 3462 rcu_read_unlock(); 3463 return err; 3464 } 3465 3466 static void tcf_act_put_cookie(struct flow_action_entry *entry) 3467 { 3468 flow_action_cookie_destroy(entry->cookie); 3469 } 3470 3471 void tc_cleanup_flow_action(struct flow_action *flow_action) 3472 { 3473 struct flow_action_entry *entry; 3474 int i; 3475 3476 flow_action_for_each(i, entry, flow_action) { 3477 tcf_act_put_cookie(entry); 3478 if (entry->destructor) 3479 entry->destructor(entry->destructor_priv); 3480 } 3481 } 3482 EXPORT_SYMBOL(tc_cleanup_flow_action); 3483 3484 static void tcf_mirred_get_dev(struct flow_action_entry *entry, 3485 const struct tc_action *act) 3486 { 3487 #ifdef CONFIG_NET_CLS_ACT 3488 entry->dev = act->ops->get_dev(act, &entry->destructor); 3489 if (!entry->dev) 3490 return; 3491 entry->destructor_priv = entry->dev; 3492 #endif 3493 } 3494 3495 static void tcf_tunnel_encap_put_tunnel(void *priv) 3496 { 3497 struct ip_tunnel_info *tunnel = priv; 3498 3499 kfree(tunnel); 3500 } 3501 3502 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, 3503 const struct tc_action *act) 3504 { 3505 entry->tunnel = tcf_tunnel_info_copy(act); 3506 if (!entry->tunnel) 3507 return -ENOMEM; 3508 entry->destructor = tcf_tunnel_encap_put_tunnel; 3509 entry->destructor_priv = entry->tunnel; 3510 return 0; 3511 } 3512 3513 static void tcf_sample_get_group(struct flow_action_entry *entry, 3514 const struct tc_action *act) 3515 { 3516 #ifdef CONFIG_NET_CLS_ACT 3517 entry->sample.psample_group = 3518 act->ops->get_psample_group(act, &entry->destructor); 3519 entry->destructor_priv = entry->sample.psample_group; 3520 #endif 3521 } 3522 3523 static void tcf_gate_entry_destructor(void *priv) 3524 { 3525 struct action_gate_entry *oe = priv; 3526 3527 kfree(oe); 3528 } 3529 3530 static int tcf_gate_get_entries(struct flow_action_entry *entry, 3531 const struct tc_action *act) 3532 { 3533 entry->gate.entries = tcf_gate_get_list(act); 3534 3535 if (!entry->gate.entries) 3536 return -EINVAL; 3537 3538 entry->destructor = tcf_gate_entry_destructor; 3539 entry->destructor_priv = entry->gate.entries; 3540 3541 return 0; 3542 } 3543 3544 static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats) 3545 { 3546 if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY)) 3547 return FLOW_ACTION_HW_STATS_DONT_CARE; 3548 else if (!hw_stats) 3549 return FLOW_ACTION_HW_STATS_DISABLED; 3550 3551 return hw_stats; 3552 } 3553 3554 int tc_setup_flow_action(struct flow_action *flow_action, 3555 const struct tcf_exts *exts) 3556 { 3557 struct tc_action *act; 3558 int i, j, k, err = 0; 3559 3560 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3561 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3562 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3563 3564 if (!exts) 3565 return 0; 3566 3567 j = 0; 3568 tcf_exts_for_each_action(i, act, exts) { 3569 struct flow_action_entry *entry; 3570 3571 entry = &flow_action->entries[j]; 3572 spin_lock_bh(&act->tcfa_lock); 3573 err = tcf_act_get_cookie(entry, act); 3574 if (err) 3575 goto err_out_locked; 3576 3577 entry->hw_stats = tc_act_hw_stats(act->hw_stats); 3578 3579 if (is_tcf_gact_ok(act)) { 3580 entry->id = FLOW_ACTION_ACCEPT; 3581 } else if (is_tcf_gact_shot(act)) { 3582 entry->id = FLOW_ACTION_DROP; 3583 } else if (is_tcf_gact_trap(act)) { 3584 entry->id = FLOW_ACTION_TRAP; 3585 } else if (is_tcf_gact_goto_chain(act)) { 3586 entry->id = FLOW_ACTION_GOTO; 3587 entry->chain_index = tcf_gact_goto_chain_index(act); 3588 } else if (is_tcf_mirred_egress_redirect(act)) { 3589 entry->id = FLOW_ACTION_REDIRECT; 3590 tcf_mirred_get_dev(entry, act); 3591 } else if (is_tcf_mirred_egress_mirror(act)) { 3592 entry->id = FLOW_ACTION_MIRRED; 3593 tcf_mirred_get_dev(entry, act); 3594 } else if (is_tcf_mirred_ingress_redirect(act)) { 3595 entry->id = FLOW_ACTION_REDIRECT_INGRESS; 3596 tcf_mirred_get_dev(entry, act); 3597 } else if (is_tcf_mirred_ingress_mirror(act)) { 3598 entry->id = FLOW_ACTION_MIRRED_INGRESS; 3599 tcf_mirred_get_dev(entry, act); 3600 } else if (is_tcf_vlan(act)) { 3601 switch (tcf_vlan_action(act)) { 3602 case TCA_VLAN_ACT_PUSH: 3603 entry->id = FLOW_ACTION_VLAN_PUSH; 3604 entry->vlan.vid = tcf_vlan_push_vid(act); 3605 entry->vlan.proto = tcf_vlan_push_proto(act); 3606 entry->vlan.prio = tcf_vlan_push_prio(act); 3607 break; 3608 case TCA_VLAN_ACT_POP: 3609 entry->id = FLOW_ACTION_VLAN_POP; 3610 break; 3611 case TCA_VLAN_ACT_MODIFY: 3612 entry->id = FLOW_ACTION_VLAN_MANGLE; 3613 entry->vlan.vid = tcf_vlan_push_vid(act); 3614 entry->vlan.proto = tcf_vlan_push_proto(act); 3615 entry->vlan.prio = tcf_vlan_push_prio(act); 3616 break; 3617 default: 3618 err = -EOPNOTSUPP; 3619 goto err_out_locked; 3620 } 3621 } else if (is_tcf_tunnel_set(act)) { 3622 entry->id = FLOW_ACTION_TUNNEL_ENCAP; 3623 err = tcf_tunnel_encap_get_tunnel(entry, act); 3624 if (err) 3625 goto err_out_locked; 3626 } else if (is_tcf_tunnel_release(act)) { 3627 entry->id = FLOW_ACTION_TUNNEL_DECAP; 3628 } else if (is_tcf_pedit(act)) { 3629 for (k = 0; k < tcf_pedit_nkeys(act); k++) { 3630 switch (tcf_pedit_cmd(act, k)) { 3631 case TCA_PEDIT_KEY_EX_CMD_SET: 3632 entry->id = FLOW_ACTION_MANGLE; 3633 break; 3634 case TCA_PEDIT_KEY_EX_CMD_ADD: 3635 entry->id = FLOW_ACTION_ADD; 3636 break; 3637 default: 3638 err = -EOPNOTSUPP; 3639 goto err_out_locked; 3640 } 3641 entry->mangle.htype = tcf_pedit_htype(act, k); 3642 entry->mangle.mask = tcf_pedit_mask(act, k); 3643 entry->mangle.val = tcf_pedit_val(act, k); 3644 entry->mangle.offset = tcf_pedit_offset(act, k); 3645 entry->hw_stats = tc_act_hw_stats(act->hw_stats); 3646 entry = &flow_action->entries[++j]; 3647 } 3648 } else if (is_tcf_csum(act)) { 3649 entry->id = FLOW_ACTION_CSUM; 3650 entry->csum_flags = tcf_csum_update_flags(act); 3651 } else if (is_tcf_skbedit_mark(act)) { 3652 entry->id = FLOW_ACTION_MARK; 3653 entry->mark = tcf_skbedit_mark(act); 3654 } else if (is_tcf_sample(act)) { 3655 entry->id = FLOW_ACTION_SAMPLE; 3656 entry->sample.trunc_size = tcf_sample_trunc_size(act); 3657 entry->sample.truncate = tcf_sample_truncate(act); 3658 entry->sample.rate = tcf_sample_rate(act); 3659 tcf_sample_get_group(entry, act); 3660 } else if (is_tcf_police(act)) { 3661 entry->id = FLOW_ACTION_POLICE; 3662 entry->police.burst = tcf_police_tcfp_burst(act); 3663 entry->police.rate_bytes_ps = 3664 tcf_police_rate_bytes_ps(act); 3665 } else if (is_tcf_ct(act)) { 3666 entry->id = FLOW_ACTION_CT; 3667 entry->ct.action = tcf_ct_action(act); 3668 entry->ct.zone = tcf_ct_zone(act); 3669 entry->ct.flow_table = tcf_ct_ft(act); 3670 } else if (is_tcf_mpls(act)) { 3671 switch (tcf_mpls_action(act)) { 3672 case TCA_MPLS_ACT_PUSH: 3673 entry->id = FLOW_ACTION_MPLS_PUSH; 3674 entry->mpls_push.proto = tcf_mpls_proto(act); 3675 entry->mpls_push.label = tcf_mpls_label(act); 3676 entry->mpls_push.tc = tcf_mpls_tc(act); 3677 entry->mpls_push.bos = tcf_mpls_bos(act); 3678 entry->mpls_push.ttl = tcf_mpls_ttl(act); 3679 break; 3680 case TCA_MPLS_ACT_POP: 3681 entry->id = FLOW_ACTION_MPLS_POP; 3682 entry->mpls_pop.proto = tcf_mpls_proto(act); 3683 break; 3684 case TCA_MPLS_ACT_MODIFY: 3685 entry->id = FLOW_ACTION_MPLS_MANGLE; 3686 entry->mpls_mangle.label = tcf_mpls_label(act); 3687 entry->mpls_mangle.tc = tcf_mpls_tc(act); 3688 entry->mpls_mangle.bos = tcf_mpls_bos(act); 3689 entry->mpls_mangle.ttl = tcf_mpls_ttl(act); 3690 break; 3691 default: 3692 goto err_out_locked; 3693 } 3694 } else if (is_tcf_skbedit_ptype(act)) { 3695 entry->id = FLOW_ACTION_PTYPE; 3696 entry->ptype = tcf_skbedit_ptype(act); 3697 } else if (is_tcf_skbedit_priority(act)) { 3698 entry->id = FLOW_ACTION_PRIORITY; 3699 entry->priority = tcf_skbedit_priority(act); 3700 } else if (is_tcf_gate(act)) { 3701 entry->id = FLOW_ACTION_GATE; 3702 entry->gate.index = tcf_gate_index(act); 3703 entry->gate.prio = tcf_gate_prio(act); 3704 entry->gate.basetime = tcf_gate_basetime(act); 3705 entry->gate.cycletime = tcf_gate_cycletime(act); 3706 entry->gate.cycletimeext = tcf_gate_cycletimeext(act); 3707 entry->gate.num_entries = tcf_gate_num_entries(act); 3708 err = tcf_gate_get_entries(entry, act); 3709 if (err) 3710 goto err_out; 3711 } else { 3712 err = -EOPNOTSUPP; 3713 goto err_out_locked; 3714 } 3715 spin_unlock_bh(&act->tcfa_lock); 3716 3717 if (!is_tcf_pedit(act)) 3718 j++; 3719 } 3720 3721 err_out: 3722 if (err) 3723 tc_cleanup_flow_action(flow_action); 3724 3725 return err; 3726 err_out_locked: 3727 spin_unlock_bh(&act->tcfa_lock); 3728 goto err_out; 3729 } 3730 EXPORT_SYMBOL(tc_setup_flow_action); 3731 3732 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3733 { 3734 unsigned int num_acts = 0; 3735 struct tc_action *act; 3736 int i; 3737 3738 tcf_exts_for_each_action(i, act, exts) { 3739 if (is_tcf_pedit(act)) 3740 num_acts += tcf_pedit_nkeys(act); 3741 else 3742 num_acts++; 3743 } 3744 return num_acts; 3745 } 3746 EXPORT_SYMBOL(tcf_exts_num_actions); 3747 3748 static __net_init int tcf_net_init(struct net *net) 3749 { 3750 struct tcf_net *tn = net_generic(net, tcf_net_id); 3751 3752 spin_lock_init(&tn->idr_lock); 3753 idr_init(&tn->idr); 3754 return 0; 3755 } 3756 3757 static void __net_exit tcf_net_exit(struct net *net) 3758 { 3759 struct tcf_net *tn = net_generic(net, tcf_net_id); 3760 3761 idr_destroy(&tn->idr); 3762 } 3763 3764 static struct pernet_operations tcf_net_ops = { 3765 .init = tcf_net_init, 3766 .exit = tcf_net_exit, 3767 .id = &tcf_net_id, 3768 .size = sizeof(struct tcf_net), 3769 }; 3770 3771 static int __init tc_filter_init(void) 3772 { 3773 int err; 3774 3775 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3776 if (!tc_filter_wq) 3777 return -ENOMEM; 3778 3779 err = register_pernet_subsys(&tcf_net_ops); 3780 if (err) 3781 goto err_register_pernet_subsys; 3782 3783 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3784 RTNL_FLAG_DOIT_UNLOCKED); 3785 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3786 RTNL_FLAG_DOIT_UNLOCKED); 3787 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3788 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3789 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3790 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3791 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3792 tc_dump_chain, 0); 3793 3794 return 0; 3795 3796 err_register_pernet_subsys: 3797 destroy_workqueue(tc_filter_wq); 3798 return err; 3799 } 3800 3801 subsys_initcall(tc_filter_init); 3802