1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/jhash.h> 24 #include <linux/rculist.h> 25 #include <net/net_namespace.h> 26 #include <net/sock.h> 27 #include <net/netlink.h> 28 #include <net/pkt_sched.h> 29 #include <net/pkt_cls.h> 30 #include <net/tc_act/tc_pedit.h> 31 #include <net/tc_act/tc_mirred.h> 32 #include <net/tc_act/tc_vlan.h> 33 #include <net/tc_act/tc_tunnel_key.h> 34 #include <net/tc_act/tc_csum.h> 35 #include <net/tc_act/tc_gact.h> 36 #include <net/tc_act/tc_police.h> 37 #include <net/tc_act/tc_sample.h> 38 #include <net/tc_act/tc_skbedit.h> 39 #include <net/tc_act/tc_ct.h> 40 #include <net/tc_act/tc_mpls.h> 41 #include <net/tc_act/tc_gate.h> 42 #include <net/flow_offload.h> 43 44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 45 46 /* The list of all installed classifier types */ 47 static LIST_HEAD(tcf_proto_base); 48 49 /* Protects list of registered TC modules. It is pure SMP lock. */ 50 static DEFINE_RWLOCK(cls_mod_lock); 51 52 #ifdef CONFIG_NET_CLS_ACT 53 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc); 54 EXPORT_SYMBOL(tc_skb_ext_tc); 55 56 void tc_skb_ext_tc_enable(void) 57 { 58 static_branch_inc(&tc_skb_ext_tc); 59 } 60 EXPORT_SYMBOL(tc_skb_ext_tc_enable); 61 62 void tc_skb_ext_tc_disable(void) 63 { 64 static_branch_dec(&tc_skb_ext_tc); 65 } 66 EXPORT_SYMBOL(tc_skb_ext_tc_disable); 67 #endif 68 69 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 70 { 71 return jhash_3words(tp->chain->index, tp->prio, 72 (__force __u32)tp->protocol, 0); 73 } 74 75 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 76 struct tcf_proto *tp) 77 { 78 struct tcf_block *block = chain->block; 79 80 mutex_lock(&block->proto_destroy_lock); 81 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 82 destroy_obj_hashfn(tp)); 83 mutex_unlock(&block->proto_destroy_lock); 84 } 85 86 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 87 const struct tcf_proto *tp2) 88 { 89 return tp1->chain->index == tp2->chain->index && 90 tp1->prio == tp2->prio && 91 tp1->protocol == tp2->protocol; 92 } 93 94 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 95 struct tcf_proto *tp) 96 { 97 u32 hash = destroy_obj_hashfn(tp); 98 struct tcf_proto *iter; 99 bool found = false; 100 101 rcu_read_lock(); 102 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 103 destroy_ht_node, hash) { 104 if (tcf_proto_cmp(tp, iter)) { 105 found = true; 106 break; 107 } 108 } 109 rcu_read_unlock(); 110 111 return found; 112 } 113 114 static void 115 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 116 { 117 struct tcf_block *block = chain->block; 118 119 mutex_lock(&block->proto_destroy_lock); 120 if (hash_hashed(&tp->destroy_ht_node)) 121 hash_del_rcu(&tp->destroy_ht_node); 122 mutex_unlock(&block->proto_destroy_lock); 123 } 124 125 /* Find classifier type by string name */ 126 127 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 128 { 129 const struct tcf_proto_ops *t, *res = NULL; 130 131 if (kind) { 132 read_lock(&cls_mod_lock); 133 list_for_each_entry(t, &tcf_proto_base, head) { 134 if (strcmp(kind, t->kind) == 0) { 135 if (try_module_get(t->owner)) 136 res = t; 137 break; 138 } 139 } 140 read_unlock(&cls_mod_lock); 141 } 142 return res; 143 } 144 145 static const struct tcf_proto_ops * 146 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 147 struct netlink_ext_ack *extack) 148 { 149 const struct tcf_proto_ops *ops; 150 151 ops = __tcf_proto_lookup_ops(kind); 152 if (ops) 153 return ops; 154 #ifdef CONFIG_MODULES 155 if (rtnl_held) 156 rtnl_unlock(); 157 request_module("cls_%s", kind); 158 if (rtnl_held) 159 rtnl_lock(); 160 ops = __tcf_proto_lookup_ops(kind); 161 /* We dropped the RTNL semaphore in order to perform 162 * the module load. So, even if we succeeded in loading 163 * the module we have to replay the request. We indicate 164 * this using -EAGAIN. 165 */ 166 if (ops) { 167 module_put(ops->owner); 168 return ERR_PTR(-EAGAIN); 169 } 170 #endif 171 NL_SET_ERR_MSG(extack, "TC classifier not found"); 172 return ERR_PTR(-ENOENT); 173 } 174 175 /* Register(unregister) new classifier type */ 176 177 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 178 { 179 struct tcf_proto_ops *t; 180 int rc = -EEXIST; 181 182 write_lock(&cls_mod_lock); 183 list_for_each_entry(t, &tcf_proto_base, head) 184 if (!strcmp(ops->kind, t->kind)) 185 goto out; 186 187 list_add_tail(&ops->head, &tcf_proto_base); 188 rc = 0; 189 out: 190 write_unlock(&cls_mod_lock); 191 return rc; 192 } 193 EXPORT_SYMBOL(register_tcf_proto_ops); 194 195 static struct workqueue_struct *tc_filter_wq; 196 197 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 198 { 199 struct tcf_proto_ops *t; 200 int rc = -ENOENT; 201 202 /* Wait for outstanding call_rcu()s, if any, from a 203 * tcf_proto_ops's destroy() handler. 204 */ 205 rcu_barrier(); 206 flush_workqueue(tc_filter_wq); 207 208 write_lock(&cls_mod_lock); 209 list_for_each_entry(t, &tcf_proto_base, head) { 210 if (t == ops) { 211 list_del(&t->head); 212 rc = 0; 213 break; 214 } 215 } 216 write_unlock(&cls_mod_lock); 217 218 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc); 219 } 220 EXPORT_SYMBOL(unregister_tcf_proto_ops); 221 222 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 223 { 224 INIT_RCU_WORK(rwork, func); 225 return queue_rcu_work(tc_filter_wq, rwork); 226 } 227 EXPORT_SYMBOL(tcf_queue_work); 228 229 /* Select new prio value from the range, managed by kernel. */ 230 231 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 232 { 233 u32 first = TC_H_MAKE(0xC0000000U, 0U); 234 235 if (tp) 236 first = tp->prio - 1; 237 238 return TC_H_MAJ(first); 239 } 240 241 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 242 { 243 if (kind) 244 return nla_strscpy(name, kind, IFNAMSIZ) < 0; 245 memset(name, 0, IFNAMSIZ); 246 return false; 247 } 248 249 static bool tcf_proto_is_unlocked(const char *kind) 250 { 251 const struct tcf_proto_ops *ops; 252 bool ret; 253 254 if (strlen(kind) == 0) 255 return false; 256 257 ops = tcf_proto_lookup_ops(kind, false, NULL); 258 /* On error return false to take rtnl lock. Proto lookup/create 259 * functions will perform lookup again and properly handle errors. 260 */ 261 if (IS_ERR(ops)) 262 return false; 263 264 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 265 module_put(ops->owner); 266 return ret; 267 } 268 269 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 270 u32 prio, struct tcf_chain *chain, 271 bool rtnl_held, 272 struct netlink_ext_ack *extack) 273 { 274 struct tcf_proto *tp; 275 int err; 276 277 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 278 if (!tp) 279 return ERR_PTR(-ENOBUFS); 280 281 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 282 if (IS_ERR(tp->ops)) { 283 err = PTR_ERR(tp->ops); 284 goto errout; 285 } 286 tp->classify = tp->ops->classify; 287 tp->protocol = protocol; 288 tp->prio = prio; 289 tp->chain = chain; 290 spin_lock_init(&tp->lock); 291 refcount_set(&tp->refcnt, 1); 292 293 err = tp->ops->init(tp); 294 if (err) { 295 module_put(tp->ops->owner); 296 goto errout; 297 } 298 return tp; 299 300 errout: 301 kfree(tp); 302 return ERR_PTR(err); 303 } 304 305 static void tcf_proto_get(struct tcf_proto *tp) 306 { 307 refcount_inc(&tp->refcnt); 308 } 309 310 static void tcf_chain_put(struct tcf_chain *chain); 311 312 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 313 bool sig_destroy, struct netlink_ext_ack *extack) 314 { 315 tp->ops->destroy(tp, rtnl_held, extack); 316 if (sig_destroy) 317 tcf_proto_signal_destroyed(tp->chain, tp); 318 tcf_chain_put(tp->chain); 319 module_put(tp->ops->owner); 320 kfree_rcu(tp, rcu); 321 } 322 323 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 324 struct netlink_ext_ack *extack) 325 { 326 if (refcount_dec_and_test(&tp->refcnt)) 327 tcf_proto_destroy(tp, rtnl_held, true, extack); 328 } 329 330 static bool tcf_proto_check_delete(struct tcf_proto *tp) 331 { 332 if (tp->ops->delete_empty) 333 return tp->ops->delete_empty(tp); 334 335 tp->deleting = true; 336 return tp->deleting; 337 } 338 339 static void tcf_proto_mark_delete(struct tcf_proto *tp) 340 { 341 spin_lock(&tp->lock); 342 tp->deleting = true; 343 spin_unlock(&tp->lock); 344 } 345 346 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 347 { 348 bool deleting; 349 350 spin_lock(&tp->lock); 351 deleting = tp->deleting; 352 spin_unlock(&tp->lock); 353 354 return deleting; 355 } 356 357 #define ASSERT_BLOCK_LOCKED(block) \ 358 lockdep_assert_held(&(block)->lock) 359 360 struct tcf_filter_chain_list_item { 361 struct list_head list; 362 tcf_chain_head_change_t *chain_head_change; 363 void *chain_head_change_priv; 364 }; 365 366 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 367 u32 chain_index) 368 { 369 struct tcf_chain *chain; 370 371 ASSERT_BLOCK_LOCKED(block); 372 373 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 374 if (!chain) 375 return NULL; 376 list_add_tail_rcu(&chain->list, &block->chain_list); 377 mutex_init(&chain->filter_chain_lock); 378 chain->block = block; 379 chain->index = chain_index; 380 chain->refcnt = 1; 381 if (!chain->index) 382 block->chain0.chain = chain; 383 return chain; 384 } 385 386 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 387 struct tcf_proto *tp_head) 388 { 389 if (item->chain_head_change) 390 item->chain_head_change(tp_head, item->chain_head_change_priv); 391 } 392 393 static void tcf_chain0_head_change(struct tcf_chain *chain, 394 struct tcf_proto *tp_head) 395 { 396 struct tcf_filter_chain_list_item *item; 397 struct tcf_block *block = chain->block; 398 399 if (chain->index) 400 return; 401 402 mutex_lock(&block->lock); 403 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 404 tcf_chain_head_change_item(item, tp_head); 405 mutex_unlock(&block->lock); 406 } 407 408 /* Returns true if block can be safely freed. */ 409 410 static bool tcf_chain_detach(struct tcf_chain *chain) 411 { 412 struct tcf_block *block = chain->block; 413 414 ASSERT_BLOCK_LOCKED(block); 415 416 list_del_rcu(&chain->list); 417 if (!chain->index) 418 block->chain0.chain = NULL; 419 420 if (list_empty(&block->chain_list) && 421 refcount_read(&block->refcnt) == 0) 422 return true; 423 424 return false; 425 } 426 427 static void tcf_block_destroy(struct tcf_block *block) 428 { 429 mutex_destroy(&block->lock); 430 mutex_destroy(&block->proto_destroy_lock); 431 kfree_rcu(block, rcu); 432 } 433 434 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 435 { 436 struct tcf_block *block = chain->block; 437 438 mutex_destroy(&chain->filter_chain_lock); 439 kfree_rcu(chain, rcu); 440 if (free_block) 441 tcf_block_destroy(block); 442 } 443 444 static void tcf_chain_hold(struct tcf_chain *chain) 445 { 446 ASSERT_BLOCK_LOCKED(chain->block); 447 448 ++chain->refcnt; 449 } 450 451 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 452 { 453 ASSERT_BLOCK_LOCKED(chain->block); 454 455 /* In case all the references are action references, this 456 * chain should not be shown to the user. 457 */ 458 return chain->refcnt == chain->action_refcnt; 459 } 460 461 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 462 u32 chain_index) 463 { 464 struct tcf_chain *chain; 465 466 ASSERT_BLOCK_LOCKED(block); 467 468 list_for_each_entry(chain, &block->chain_list, list) { 469 if (chain->index == chain_index) 470 return chain; 471 } 472 return NULL; 473 } 474 475 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 476 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 477 u32 chain_index) 478 { 479 struct tcf_chain *chain; 480 481 list_for_each_entry_rcu(chain, &block->chain_list, list) { 482 if (chain->index == chain_index) 483 return chain; 484 } 485 return NULL; 486 } 487 #endif 488 489 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 490 u32 seq, u16 flags, int event, bool unicast); 491 492 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 493 u32 chain_index, bool create, 494 bool by_act) 495 { 496 struct tcf_chain *chain = NULL; 497 bool is_first_reference; 498 499 mutex_lock(&block->lock); 500 chain = tcf_chain_lookup(block, chain_index); 501 if (chain) { 502 tcf_chain_hold(chain); 503 } else { 504 if (!create) 505 goto errout; 506 chain = tcf_chain_create(block, chain_index); 507 if (!chain) 508 goto errout; 509 } 510 511 if (by_act) 512 ++chain->action_refcnt; 513 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 514 mutex_unlock(&block->lock); 515 516 /* Send notification only in case we got the first 517 * non-action reference. Until then, the chain acts only as 518 * a placeholder for actions pointing to it and user ought 519 * not know about them. 520 */ 521 if (is_first_reference && !by_act) 522 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 523 RTM_NEWCHAIN, false); 524 525 return chain; 526 527 errout: 528 mutex_unlock(&block->lock); 529 return chain; 530 } 531 532 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 533 bool create) 534 { 535 return __tcf_chain_get(block, chain_index, create, false); 536 } 537 538 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 539 { 540 return __tcf_chain_get(block, chain_index, true, true); 541 } 542 EXPORT_SYMBOL(tcf_chain_get_by_act); 543 544 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 545 void *tmplt_priv); 546 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 547 void *tmplt_priv, u32 chain_index, 548 struct tcf_block *block, struct sk_buff *oskb, 549 u32 seq, u16 flags, bool unicast); 550 551 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 552 bool explicitly_created) 553 { 554 struct tcf_block *block = chain->block; 555 const struct tcf_proto_ops *tmplt_ops; 556 bool free_block = false; 557 unsigned int refcnt; 558 void *tmplt_priv; 559 560 mutex_lock(&block->lock); 561 if (explicitly_created) { 562 if (!chain->explicitly_created) { 563 mutex_unlock(&block->lock); 564 return; 565 } 566 chain->explicitly_created = false; 567 } 568 569 if (by_act) 570 chain->action_refcnt--; 571 572 /* tc_chain_notify_delete can't be called while holding block lock. 573 * However, when block is unlocked chain can be changed concurrently, so 574 * save these to temporary variables. 575 */ 576 refcnt = --chain->refcnt; 577 tmplt_ops = chain->tmplt_ops; 578 tmplt_priv = chain->tmplt_priv; 579 580 /* The last dropped non-action reference will trigger notification. */ 581 if (refcnt - chain->action_refcnt == 0 && !by_act) { 582 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, 583 block, NULL, 0, 0, false); 584 /* Last reference to chain, no need to lock. */ 585 chain->flushing = false; 586 } 587 588 if (refcnt == 0) 589 free_block = tcf_chain_detach(chain); 590 mutex_unlock(&block->lock); 591 592 if (refcnt == 0) { 593 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 594 tcf_chain_destroy(chain, free_block); 595 } 596 } 597 598 static void tcf_chain_put(struct tcf_chain *chain) 599 { 600 __tcf_chain_put(chain, false, false); 601 } 602 603 void tcf_chain_put_by_act(struct tcf_chain *chain) 604 { 605 __tcf_chain_put(chain, true, false); 606 } 607 EXPORT_SYMBOL(tcf_chain_put_by_act); 608 609 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 610 { 611 __tcf_chain_put(chain, false, true); 612 } 613 614 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 615 { 616 struct tcf_proto *tp, *tp_next; 617 618 mutex_lock(&chain->filter_chain_lock); 619 tp = tcf_chain_dereference(chain->filter_chain, chain); 620 while (tp) { 621 tp_next = rcu_dereference_protected(tp->next, 1); 622 tcf_proto_signal_destroying(chain, tp); 623 tp = tp_next; 624 } 625 tp = tcf_chain_dereference(chain->filter_chain, chain); 626 RCU_INIT_POINTER(chain->filter_chain, NULL); 627 tcf_chain0_head_change(chain, NULL); 628 chain->flushing = true; 629 mutex_unlock(&chain->filter_chain_lock); 630 631 while (tp) { 632 tp_next = rcu_dereference_protected(tp->next, 1); 633 tcf_proto_put(tp, rtnl_held, NULL); 634 tp = tp_next; 635 } 636 } 637 638 static int tcf_block_setup(struct tcf_block *block, 639 struct flow_block_offload *bo); 640 641 static void tcf_block_offload_init(struct flow_block_offload *bo, 642 struct net_device *dev, struct Qdisc *sch, 643 enum flow_block_command command, 644 enum flow_block_binder_type binder_type, 645 struct flow_block *flow_block, 646 bool shared, struct netlink_ext_ack *extack) 647 { 648 bo->net = dev_net(dev); 649 bo->command = command; 650 bo->binder_type = binder_type; 651 bo->block = flow_block; 652 bo->block_shared = shared; 653 bo->extack = extack; 654 bo->sch = sch; 655 bo->cb_list_head = &flow_block->cb_list; 656 INIT_LIST_HEAD(&bo->cb_list); 657 } 658 659 static void tcf_block_unbind(struct tcf_block *block, 660 struct flow_block_offload *bo); 661 662 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) 663 { 664 struct tcf_block *block = block_cb->indr.data; 665 struct net_device *dev = block_cb->indr.dev; 666 struct Qdisc *sch = block_cb->indr.sch; 667 struct netlink_ext_ack extack = {}; 668 struct flow_block_offload bo = {}; 669 670 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, 671 block_cb->indr.binder_type, 672 &block->flow_block, tcf_block_shared(block), 673 &extack); 674 rtnl_lock(); 675 down_write(&block->cb_lock); 676 list_del(&block_cb->driver_list); 677 list_move(&block_cb->list, &bo.cb_list); 678 tcf_block_unbind(block, &bo); 679 up_write(&block->cb_lock); 680 rtnl_unlock(); 681 } 682 683 static bool tcf_block_offload_in_use(struct tcf_block *block) 684 { 685 return atomic_read(&block->offloadcnt); 686 } 687 688 static int tcf_block_offload_cmd(struct tcf_block *block, 689 struct net_device *dev, struct Qdisc *sch, 690 struct tcf_block_ext_info *ei, 691 enum flow_block_command command, 692 struct netlink_ext_ack *extack) 693 { 694 struct flow_block_offload bo = {}; 695 696 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, 697 &block->flow_block, tcf_block_shared(block), 698 extack); 699 700 if (dev->netdev_ops->ndo_setup_tc) { 701 int err; 702 703 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 704 if (err < 0) { 705 if (err != -EOPNOTSUPP) 706 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 707 return err; 708 } 709 710 return tcf_block_setup(block, &bo); 711 } 712 713 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, 714 tc_block_indr_cleanup); 715 tcf_block_setup(block, &bo); 716 717 return -EOPNOTSUPP; 718 } 719 720 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 721 struct tcf_block_ext_info *ei, 722 struct netlink_ext_ack *extack) 723 { 724 struct net_device *dev = q->dev_queue->dev; 725 int err; 726 727 down_write(&block->cb_lock); 728 729 /* If tc offload feature is disabled and the block we try to bind 730 * to already has some offloaded filters, forbid to bind. 731 */ 732 if (dev->netdev_ops->ndo_setup_tc && 733 !tc_can_offload(dev) && 734 tcf_block_offload_in_use(block)) { 735 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 736 err = -EOPNOTSUPP; 737 goto err_unlock; 738 } 739 740 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); 741 if (err == -EOPNOTSUPP) 742 goto no_offload_dev_inc; 743 if (err) 744 goto err_unlock; 745 746 up_write(&block->cb_lock); 747 return 0; 748 749 no_offload_dev_inc: 750 if (tcf_block_offload_in_use(block)) 751 goto err_unlock; 752 753 err = 0; 754 block->nooffloaddevcnt++; 755 err_unlock: 756 up_write(&block->cb_lock); 757 return err; 758 } 759 760 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 761 struct tcf_block_ext_info *ei) 762 { 763 struct net_device *dev = q->dev_queue->dev; 764 int err; 765 766 down_write(&block->cb_lock); 767 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); 768 if (err == -EOPNOTSUPP) 769 goto no_offload_dev_dec; 770 up_write(&block->cb_lock); 771 return; 772 773 no_offload_dev_dec: 774 WARN_ON(block->nooffloaddevcnt-- == 0); 775 up_write(&block->cb_lock); 776 } 777 778 static int 779 tcf_chain0_head_change_cb_add(struct tcf_block *block, 780 struct tcf_block_ext_info *ei, 781 struct netlink_ext_ack *extack) 782 { 783 struct tcf_filter_chain_list_item *item; 784 struct tcf_chain *chain0; 785 786 item = kmalloc(sizeof(*item), GFP_KERNEL); 787 if (!item) { 788 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 789 return -ENOMEM; 790 } 791 item->chain_head_change = ei->chain_head_change; 792 item->chain_head_change_priv = ei->chain_head_change_priv; 793 794 mutex_lock(&block->lock); 795 chain0 = block->chain0.chain; 796 if (chain0) 797 tcf_chain_hold(chain0); 798 else 799 list_add(&item->list, &block->chain0.filter_chain_list); 800 mutex_unlock(&block->lock); 801 802 if (chain0) { 803 struct tcf_proto *tp_head; 804 805 mutex_lock(&chain0->filter_chain_lock); 806 807 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 808 if (tp_head) 809 tcf_chain_head_change_item(item, tp_head); 810 811 mutex_lock(&block->lock); 812 list_add(&item->list, &block->chain0.filter_chain_list); 813 mutex_unlock(&block->lock); 814 815 mutex_unlock(&chain0->filter_chain_lock); 816 tcf_chain_put(chain0); 817 } 818 819 return 0; 820 } 821 822 static void 823 tcf_chain0_head_change_cb_del(struct tcf_block *block, 824 struct tcf_block_ext_info *ei) 825 { 826 struct tcf_filter_chain_list_item *item; 827 828 mutex_lock(&block->lock); 829 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 830 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 831 (item->chain_head_change == ei->chain_head_change && 832 item->chain_head_change_priv == ei->chain_head_change_priv)) { 833 if (block->chain0.chain) 834 tcf_chain_head_change_item(item, NULL); 835 list_del(&item->list); 836 mutex_unlock(&block->lock); 837 838 kfree(item); 839 return; 840 } 841 } 842 mutex_unlock(&block->lock); 843 WARN_ON(1); 844 } 845 846 struct tcf_net { 847 spinlock_t idr_lock; /* Protects idr */ 848 struct idr idr; 849 }; 850 851 static unsigned int tcf_net_id; 852 853 static int tcf_block_insert(struct tcf_block *block, struct net *net, 854 struct netlink_ext_ack *extack) 855 { 856 struct tcf_net *tn = net_generic(net, tcf_net_id); 857 int err; 858 859 idr_preload(GFP_KERNEL); 860 spin_lock(&tn->idr_lock); 861 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 862 GFP_NOWAIT); 863 spin_unlock(&tn->idr_lock); 864 idr_preload_end(); 865 866 return err; 867 } 868 869 static void tcf_block_remove(struct tcf_block *block, struct net *net) 870 { 871 struct tcf_net *tn = net_generic(net, tcf_net_id); 872 873 spin_lock(&tn->idr_lock); 874 idr_remove(&tn->idr, block->index); 875 spin_unlock(&tn->idr_lock); 876 } 877 878 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 879 u32 block_index, 880 struct netlink_ext_ack *extack) 881 { 882 struct tcf_block *block; 883 884 block = kzalloc(sizeof(*block), GFP_KERNEL); 885 if (!block) { 886 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 887 return ERR_PTR(-ENOMEM); 888 } 889 mutex_init(&block->lock); 890 mutex_init(&block->proto_destroy_lock); 891 init_rwsem(&block->cb_lock); 892 flow_block_init(&block->flow_block); 893 INIT_LIST_HEAD(&block->chain_list); 894 INIT_LIST_HEAD(&block->owner_list); 895 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 896 897 refcount_set(&block->refcnt, 1); 898 block->net = net; 899 block->index = block_index; 900 901 /* Don't store q pointer for blocks which are shared */ 902 if (!tcf_block_shared(block)) 903 block->q = q; 904 return block; 905 } 906 907 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 908 { 909 struct tcf_net *tn = net_generic(net, tcf_net_id); 910 911 return idr_find(&tn->idr, block_index); 912 } 913 914 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 915 { 916 struct tcf_block *block; 917 918 rcu_read_lock(); 919 block = tcf_block_lookup(net, block_index); 920 if (block && !refcount_inc_not_zero(&block->refcnt)) 921 block = NULL; 922 rcu_read_unlock(); 923 924 return block; 925 } 926 927 static struct tcf_chain * 928 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 929 { 930 mutex_lock(&block->lock); 931 if (chain) 932 chain = list_is_last(&chain->list, &block->chain_list) ? 933 NULL : list_next_entry(chain, list); 934 else 935 chain = list_first_entry_or_null(&block->chain_list, 936 struct tcf_chain, list); 937 938 /* skip all action-only chains */ 939 while (chain && tcf_chain_held_by_acts_only(chain)) 940 chain = list_is_last(&chain->list, &block->chain_list) ? 941 NULL : list_next_entry(chain, list); 942 943 if (chain) 944 tcf_chain_hold(chain); 945 mutex_unlock(&block->lock); 946 947 return chain; 948 } 949 950 /* Function to be used by all clients that want to iterate over all chains on 951 * block. It properly obtains block->lock and takes reference to chain before 952 * returning it. Users of this function must be tolerant to concurrent chain 953 * insertion/deletion or ensure that no concurrent chain modification is 954 * possible. Note that all netlink dump callbacks cannot guarantee to provide 955 * consistent dump because rtnl lock is released each time skb is filled with 956 * data and sent to user-space. 957 */ 958 959 struct tcf_chain * 960 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 961 { 962 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 963 964 if (chain) 965 tcf_chain_put(chain); 966 967 return chain_next; 968 } 969 EXPORT_SYMBOL(tcf_get_next_chain); 970 971 static struct tcf_proto * 972 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 973 { 974 u32 prio = 0; 975 976 ASSERT_RTNL(); 977 mutex_lock(&chain->filter_chain_lock); 978 979 if (!tp) { 980 tp = tcf_chain_dereference(chain->filter_chain, chain); 981 } else if (tcf_proto_is_deleting(tp)) { 982 /* 'deleting' flag is set and chain->filter_chain_lock was 983 * unlocked, which means next pointer could be invalid. Restart 984 * search. 985 */ 986 prio = tp->prio + 1; 987 tp = tcf_chain_dereference(chain->filter_chain, chain); 988 989 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 990 if (!tp->deleting && tp->prio >= prio) 991 break; 992 } else { 993 tp = tcf_chain_dereference(tp->next, chain); 994 } 995 996 if (tp) 997 tcf_proto_get(tp); 998 999 mutex_unlock(&chain->filter_chain_lock); 1000 1001 return tp; 1002 } 1003 1004 /* Function to be used by all clients that want to iterate over all tp's on 1005 * chain. Users of this function must be tolerant to concurrent tp 1006 * insertion/deletion or ensure that no concurrent chain modification is 1007 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1008 * consistent dump because rtnl lock is released each time skb is filled with 1009 * data and sent to user-space. 1010 */ 1011 1012 struct tcf_proto * 1013 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1014 { 1015 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 1016 1017 if (tp) 1018 tcf_proto_put(tp, true, NULL); 1019 1020 return tp_next; 1021 } 1022 EXPORT_SYMBOL(tcf_get_next_proto); 1023 1024 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1025 { 1026 struct tcf_chain *chain; 1027 1028 /* Last reference to block. At this point chains cannot be added or 1029 * removed concurrently. 1030 */ 1031 for (chain = tcf_get_next_chain(block, NULL); 1032 chain; 1033 chain = tcf_get_next_chain(block, chain)) { 1034 tcf_chain_put_explicitly_created(chain); 1035 tcf_chain_flush(chain, rtnl_held); 1036 } 1037 } 1038 1039 /* Lookup Qdisc and increments its reference counter. 1040 * Set parent, if necessary. 1041 */ 1042 1043 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1044 u32 *parent, int ifindex, bool rtnl_held, 1045 struct netlink_ext_ack *extack) 1046 { 1047 const struct Qdisc_class_ops *cops; 1048 struct net_device *dev; 1049 int err = 0; 1050 1051 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1052 return 0; 1053 1054 rcu_read_lock(); 1055 1056 /* Find link */ 1057 dev = dev_get_by_index_rcu(net, ifindex); 1058 if (!dev) { 1059 rcu_read_unlock(); 1060 return -ENODEV; 1061 } 1062 1063 /* Find qdisc */ 1064 if (!*parent) { 1065 *q = rcu_dereference(dev->qdisc); 1066 *parent = (*q)->handle; 1067 } else { 1068 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1069 if (!*q) { 1070 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1071 err = -EINVAL; 1072 goto errout_rcu; 1073 } 1074 } 1075 1076 *q = qdisc_refcount_inc_nz(*q); 1077 if (!*q) { 1078 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1079 err = -EINVAL; 1080 goto errout_rcu; 1081 } 1082 1083 /* Is it classful? */ 1084 cops = (*q)->ops->cl_ops; 1085 if (!cops) { 1086 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1087 err = -EINVAL; 1088 goto errout_qdisc; 1089 } 1090 1091 if (!cops->tcf_block) { 1092 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1093 err = -EOPNOTSUPP; 1094 goto errout_qdisc; 1095 } 1096 1097 errout_rcu: 1098 /* At this point we know that qdisc is not noop_qdisc, 1099 * which means that qdisc holds a reference to net_device 1100 * and we hold a reference to qdisc, so it is safe to release 1101 * rcu read lock. 1102 */ 1103 rcu_read_unlock(); 1104 return err; 1105 1106 errout_qdisc: 1107 rcu_read_unlock(); 1108 1109 if (rtnl_held) 1110 qdisc_put(*q); 1111 else 1112 qdisc_put_unlocked(*q); 1113 *q = NULL; 1114 1115 return err; 1116 } 1117 1118 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1119 int ifindex, struct netlink_ext_ack *extack) 1120 { 1121 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1122 return 0; 1123 1124 /* Do we search for filter, attached to class? */ 1125 if (TC_H_MIN(parent)) { 1126 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1127 1128 *cl = cops->find(q, parent); 1129 if (*cl == 0) { 1130 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1131 return -ENOENT; 1132 } 1133 } 1134 1135 return 0; 1136 } 1137 1138 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1139 unsigned long cl, int ifindex, 1140 u32 block_index, 1141 struct netlink_ext_ack *extack) 1142 { 1143 struct tcf_block *block; 1144 1145 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1146 block = tcf_block_refcnt_get(net, block_index); 1147 if (!block) { 1148 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1149 return ERR_PTR(-EINVAL); 1150 } 1151 } else { 1152 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1153 1154 block = cops->tcf_block(q, cl, extack); 1155 if (!block) 1156 return ERR_PTR(-EINVAL); 1157 1158 if (tcf_block_shared(block)) { 1159 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1160 return ERR_PTR(-EOPNOTSUPP); 1161 } 1162 1163 /* Always take reference to block in order to support execution 1164 * of rules update path of cls API without rtnl lock. Caller 1165 * must release block when it is finished using it. 'if' block 1166 * of this conditional obtain reference to block by calling 1167 * tcf_block_refcnt_get(). 1168 */ 1169 refcount_inc(&block->refcnt); 1170 } 1171 1172 return block; 1173 } 1174 1175 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1176 struct tcf_block_ext_info *ei, bool rtnl_held) 1177 { 1178 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1179 /* Flushing/putting all chains will cause the block to be 1180 * deallocated when last chain is freed. However, if chain_list 1181 * is empty, block has to be manually deallocated. After block 1182 * reference counter reached 0, it is no longer possible to 1183 * increment it or add new chains to block. 1184 */ 1185 bool free_block = list_empty(&block->chain_list); 1186 1187 mutex_unlock(&block->lock); 1188 if (tcf_block_shared(block)) 1189 tcf_block_remove(block, block->net); 1190 1191 if (q) 1192 tcf_block_offload_unbind(block, q, ei); 1193 1194 if (free_block) 1195 tcf_block_destroy(block); 1196 else 1197 tcf_block_flush_all_chains(block, rtnl_held); 1198 } else if (q) { 1199 tcf_block_offload_unbind(block, q, ei); 1200 } 1201 } 1202 1203 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1204 { 1205 __tcf_block_put(block, NULL, NULL, rtnl_held); 1206 } 1207 1208 /* Find tcf block. 1209 * Set q, parent, cl when appropriate. 1210 */ 1211 1212 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1213 u32 *parent, unsigned long *cl, 1214 int ifindex, u32 block_index, 1215 struct netlink_ext_ack *extack) 1216 { 1217 struct tcf_block *block; 1218 int err = 0; 1219 1220 ASSERT_RTNL(); 1221 1222 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1223 if (err) 1224 goto errout; 1225 1226 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1227 if (err) 1228 goto errout_qdisc; 1229 1230 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1231 if (IS_ERR(block)) { 1232 err = PTR_ERR(block); 1233 goto errout_qdisc; 1234 } 1235 1236 return block; 1237 1238 errout_qdisc: 1239 if (*q) 1240 qdisc_put(*q); 1241 errout: 1242 *q = NULL; 1243 return ERR_PTR(err); 1244 } 1245 1246 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1247 bool rtnl_held) 1248 { 1249 if (!IS_ERR_OR_NULL(block)) 1250 tcf_block_refcnt_put(block, rtnl_held); 1251 1252 if (q) { 1253 if (rtnl_held) 1254 qdisc_put(q); 1255 else 1256 qdisc_put_unlocked(q); 1257 } 1258 } 1259 1260 struct tcf_block_owner_item { 1261 struct list_head list; 1262 struct Qdisc *q; 1263 enum flow_block_binder_type binder_type; 1264 }; 1265 1266 static void 1267 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1268 struct Qdisc *q, 1269 enum flow_block_binder_type binder_type) 1270 { 1271 if (block->keep_dst && 1272 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1273 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1274 netif_keep_dst(qdisc_dev(q)); 1275 } 1276 1277 void tcf_block_netif_keep_dst(struct tcf_block *block) 1278 { 1279 struct tcf_block_owner_item *item; 1280 1281 block->keep_dst = true; 1282 list_for_each_entry(item, &block->owner_list, list) 1283 tcf_block_owner_netif_keep_dst(block, item->q, 1284 item->binder_type); 1285 } 1286 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1287 1288 static int tcf_block_owner_add(struct tcf_block *block, 1289 struct Qdisc *q, 1290 enum flow_block_binder_type binder_type) 1291 { 1292 struct tcf_block_owner_item *item; 1293 1294 item = kmalloc(sizeof(*item), GFP_KERNEL); 1295 if (!item) 1296 return -ENOMEM; 1297 item->q = q; 1298 item->binder_type = binder_type; 1299 list_add(&item->list, &block->owner_list); 1300 return 0; 1301 } 1302 1303 static void tcf_block_owner_del(struct tcf_block *block, 1304 struct Qdisc *q, 1305 enum flow_block_binder_type binder_type) 1306 { 1307 struct tcf_block_owner_item *item; 1308 1309 list_for_each_entry(item, &block->owner_list, list) { 1310 if (item->q == q && item->binder_type == binder_type) { 1311 list_del(&item->list); 1312 kfree(item); 1313 return; 1314 } 1315 } 1316 WARN_ON(1); 1317 } 1318 1319 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1320 struct tcf_block_ext_info *ei, 1321 struct netlink_ext_ack *extack) 1322 { 1323 struct net *net = qdisc_net(q); 1324 struct tcf_block *block = NULL; 1325 int err; 1326 1327 if (ei->block_index) 1328 /* block_index not 0 means the shared block is requested */ 1329 block = tcf_block_refcnt_get(net, ei->block_index); 1330 1331 if (!block) { 1332 block = tcf_block_create(net, q, ei->block_index, extack); 1333 if (IS_ERR(block)) 1334 return PTR_ERR(block); 1335 if (tcf_block_shared(block)) { 1336 err = tcf_block_insert(block, net, extack); 1337 if (err) 1338 goto err_block_insert; 1339 } 1340 } 1341 1342 err = tcf_block_owner_add(block, q, ei->binder_type); 1343 if (err) 1344 goto err_block_owner_add; 1345 1346 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1347 1348 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1349 if (err) 1350 goto err_chain0_head_change_cb_add; 1351 1352 err = tcf_block_offload_bind(block, q, ei, extack); 1353 if (err) 1354 goto err_block_offload_bind; 1355 1356 *p_block = block; 1357 return 0; 1358 1359 err_block_offload_bind: 1360 tcf_chain0_head_change_cb_del(block, ei); 1361 err_chain0_head_change_cb_add: 1362 tcf_block_owner_del(block, q, ei->binder_type); 1363 err_block_owner_add: 1364 err_block_insert: 1365 tcf_block_refcnt_put(block, true); 1366 return err; 1367 } 1368 EXPORT_SYMBOL(tcf_block_get_ext); 1369 1370 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1371 { 1372 struct tcf_proto __rcu **p_filter_chain = priv; 1373 1374 rcu_assign_pointer(*p_filter_chain, tp_head); 1375 } 1376 1377 int tcf_block_get(struct tcf_block **p_block, 1378 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1379 struct netlink_ext_ack *extack) 1380 { 1381 struct tcf_block_ext_info ei = { 1382 .chain_head_change = tcf_chain_head_change_dflt, 1383 .chain_head_change_priv = p_filter_chain, 1384 }; 1385 1386 WARN_ON(!p_filter_chain); 1387 return tcf_block_get_ext(p_block, q, &ei, extack); 1388 } 1389 EXPORT_SYMBOL(tcf_block_get); 1390 1391 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1392 * actions should be all removed after flushing. 1393 */ 1394 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1395 struct tcf_block_ext_info *ei) 1396 { 1397 if (!block) 1398 return; 1399 tcf_chain0_head_change_cb_del(block, ei); 1400 tcf_block_owner_del(block, q, ei->binder_type); 1401 1402 __tcf_block_put(block, q, ei, true); 1403 } 1404 EXPORT_SYMBOL(tcf_block_put_ext); 1405 1406 void tcf_block_put(struct tcf_block *block) 1407 { 1408 struct tcf_block_ext_info ei = {0, }; 1409 1410 if (!block) 1411 return; 1412 tcf_block_put_ext(block, block->q, &ei); 1413 } 1414 1415 EXPORT_SYMBOL(tcf_block_put); 1416 1417 static int 1418 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1419 void *cb_priv, bool add, bool offload_in_use, 1420 struct netlink_ext_ack *extack) 1421 { 1422 struct tcf_chain *chain, *chain_prev; 1423 struct tcf_proto *tp, *tp_prev; 1424 int err; 1425 1426 lockdep_assert_held(&block->cb_lock); 1427 1428 for (chain = __tcf_get_next_chain(block, NULL); 1429 chain; 1430 chain_prev = chain, 1431 chain = __tcf_get_next_chain(block, chain), 1432 tcf_chain_put(chain_prev)) { 1433 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1434 tp_prev = tp, 1435 tp = __tcf_get_next_proto(chain, tp), 1436 tcf_proto_put(tp_prev, true, NULL)) { 1437 if (tp->ops->reoffload) { 1438 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1439 extack); 1440 if (err && add) 1441 goto err_playback_remove; 1442 } else if (add && offload_in_use) { 1443 err = -EOPNOTSUPP; 1444 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1445 goto err_playback_remove; 1446 } 1447 } 1448 } 1449 1450 return 0; 1451 1452 err_playback_remove: 1453 tcf_proto_put(tp, true, NULL); 1454 tcf_chain_put(chain); 1455 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1456 extack); 1457 return err; 1458 } 1459 1460 static int tcf_block_bind(struct tcf_block *block, 1461 struct flow_block_offload *bo) 1462 { 1463 struct flow_block_cb *block_cb, *next; 1464 int err, i = 0; 1465 1466 lockdep_assert_held(&block->cb_lock); 1467 1468 list_for_each_entry(block_cb, &bo->cb_list, list) { 1469 err = tcf_block_playback_offloads(block, block_cb->cb, 1470 block_cb->cb_priv, true, 1471 tcf_block_offload_in_use(block), 1472 bo->extack); 1473 if (err) 1474 goto err_unroll; 1475 if (!bo->unlocked_driver_cb) 1476 block->lockeddevcnt++; 1477 1478 i++; 1479 } 1480 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1481 1482 return 0; 1483 1484 err_unroll: 1485 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1486 if (i-- > 0) { 1487 list_del(&block_cb->list); 1488 tcf_block_playback_offloads(block, block_cb->cb, 1489 block_cb->cb_priv, false, 1490 tcf_block_offload_in_use(block), 1491 NULL); 1492 if (!bo->unlocked_driver_cb) 1493 block->lockeddevcnt--; 1494 } 1495 flow_block_cb_free(block_cb); 1496 } 1497 1498 return err; 1499 } 1500 1501 static void tcf_block_unbind(struct tcf_block *block, 1502 struct flow_block_offload *bo) 1503 { 1504 struct flow_block_cb *block_cb, *next; 1505 1506 lockdep_assert_held(&block->cb_lock); 1507 1508 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1509 tcf_block_playback_offloads(block, block_cb->cb, 1510 block_cb->cb_priv, false, 1511 tcf_block_offload_in_use(block), 1512 NULL); 1513 list_del(&block_cb->list); 1514 flow_block_cb_free(block_cb); 1515 if (!bo->unlocked_driver_cb) 1516 block->lockeddevcnt--; 1517 } 1518 } 1519 1520 static int tcf_block_setup(struct tcf_block *block, 1521 struct flow_block_offload *bo) 1522 { 1523 int err; 1524 1525 switch (bo->command) { 1526 case FLOW_BLOCK_BIND: 1527 err = tcf_block_bind(block, bo); 1528 break; 1529 case FLOW_BLOCK_UNBIND: 1530 err = 0; 1531 tcf_block_unbind(block, bo); 1532 break; 1533 default: 1534 WARN_ON_ONCE(1); 1535 err = -EOPNOTSUPP; 1536 } 1537 1538 return err; 1539 } 1540 1541 /* Main classifier routine: scans classifier chain attached 1542 * to this qdisc, (optionally) tests for protocol and asks 1543 * specific classifiers. 1544 */ 1545 static inline int __tcf_classify(struct sk_buff *skb, 1546 const struct tcf_proto *tp, 1547 const struct tcf_proto *orig_tp, 1548 struct tcf_result *res, 1549 bool compat_mode, 1550 u32 *last_executed_chain) 1551 { 1552 #ifdef CONFIG_NET_CLS_ACT 1553 const int max_reclassify_loop = 16; 1554 const struct tcf_proto *first_tp; 1555 int limit = 0; 1556 1557 reclassify: 1558 #endif 1559 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1560 __be16 protocol = skb_protocol(skb, false); 1561 int err; 1562 1563 if (tp->protocol != protocol && 1564 tp->protocol != htons(ETH_P_ALL)) 1565 continue; 1566 1567 err = tp->classify(skb, tp, res); 1568 #ifdef CONFIG_NET_CLS_ACT 1569 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1570 first_tp = orig_tp; 1571 *last_executed_chain = first_tp->chain->index; 1572 goto reset; 1573 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1574 first_tp = res->goto_tp; 1575 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1576 goto reset; 1577 } 1578 #endif 1579 if (err >= 0) 1580 return err; 1581 } 1582 1583 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1584 #ifdef CONFIG_NET_CLS_ACT 1585 reset: 1586 if (unlikely(limit++ >= max_reclassify_loop)) { 1587 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1588 tp->chain->block->index, 1589 tp->prio & 0xffff, 1590 ntohs(tp->protocol)); 1591 return TC_ACT_SHOT; 1592 } 1593 1594 tp = first_tp; 1595 goto reclassify; 1596 #endif 1597 } 1598 1599 int tcf_classify(struct sk_buff *skb, 1600 const struct tcf_block *block, 1601 const struct tcf_proto *tp, 1602 struct tcf_result *res, bool compat_mode) 1603 { 1604 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1605 u32 last_executed_chain = 0; 1606 1607 return __tcf_classify(skb, tp, tp, res, compat_mode, 1608 &last_executed_chain); 1609 #else 1610 u32 last_executed_chain = tp ? tp->chain->index : 0; 1611 const struct tcf_proto *orig_tp = tp; 1612 struct tc_skb_ext *ext; 1613 int ret; 1614 1615 if (block) { 1616 ext = skb_ext_find(skb, TC_SKB_EXT); 1617 1618 if (ext && ext->chain) { 1619 struct tcf_chain *fchain; 1620 1621 fchain = tcf_chain_lookup_rcu(block, ext->chain); 1622 if (!fchain) 1623 return TC_ACT_SHOT; 1624 1625 /* Consume, so cloned/redirect skbs won't inherit ext */ 1626 skb_ext_del(skb, TC_SKB_EXT); 1627 1628 tp = rcu_dereference_bh(fchain->filter_chain); 1629 last_executed_chain = fchain->index; 1630 } 1631 } 1632 1633 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, 1634 &last_executed_chain); 1635 1636 if (tc_skb_ext_tc_enabled()) { 1637 /* If we missed on some chain */ 1638 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1639 struct tc_skb_cb *cb = tc_skb_cb(skb); 1640 1641 ext = tc_skb_ext_alloc(skb); 1642 if (WARN_ON_ONCE(!ext)) 1643 return TC_ACT_SHOT; 1644 ext->chain = last_executed_chain; 1645 ext->mru = cb->mru; 1646 ext->post_ct = cb->post_ct; 1647 ext->post_ct_snat = cb->post_ct_snat; 1648 ext->post_ct_dnat = cb->post_ct_dnat; 1649 ext->zone = cb->zone; 1650 } 1651 } 1652 1653 return ret; 1654 #endif 1655 } 1656 EXPORT_SYMBOL(tcf_classify); 1657 1658 struct tcf_chain_info { 1659 struct tcf_proto __rcu **pprev; 1660 struct tcf_proto __rcu *next; 1661 }; 1662 1663 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1664 struct tcf_chain_info *chain_info) 1665 { 1666 return tcf_chain_dereference(*chain_info->pprev, chain); 1667 } 1668 1669 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1670 struct tcf_chain_info *chain_info, 1671 struct tcf_proto *tp) 1672 { 1673 if (chain->flushing) 1674 return -EAGAIN; 1675 1676 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1677 if (*chain_info->pprev == chain->filter_chain) 1678 tcf_chain0_head_change(chain, tp); 1679 tcf_proto_get(tp); 1680 rcu_assign_pointer(*chain_info->pprev, tp); 1681 1682 return 0; 1683 } 1684 1685 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1686 struct tcf_chain_info *chain_info, 1687 struct tcf_proto *tp) 1688 { 1689 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1690 1691 tcf_proto_mark_delete(tp); 1692 if (tp == chain->filter_chain) 1693 tcf_chain0_head_change(chain, next); 1694 RCU_INIT_POINTER(*chain_info->pprev, next); 1695 } 1696 1697 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1698 struct tcf_chain_info *chain_info, 1699 u32 protocol, u32 prio, 1700 bool prio_allocate); 1701 1702 /* Try to insert new proto. 1703 * If proto with specified priority already exists, free new proto 1704 * and return existing one. 1705 */ 1706 1707 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1708 struct tcf_proto *tp_new, 1709 u32 protocol, u32 prio, 1710 bool rtnl_held) 1711 { 1712 struct tcf_chain_info chain_info; 1713 struct tcf_proto *tp; 1714 int err = 0; 1715 1716 mutex_lock(&chain->filter_chain_lock); 1717 1718 if (tcf_proto_exists_destroying(chain, tp_new)) { 1719 mutex_unlock(&chain->filter_chain_lock); 1720 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1721 return ERR_PTR(-EAGAIN); 1722 } 1723 1724 tp = tcf_chain_tp_find(chain, &chain_info, 1725 protocol, prio, false); 1726 if (!tp) 1727 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1728 mutex_unlock(&chain->filter_chain_lock); 1729 1730 if (tp) { 1731 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1732 tp_new = tp; 1733 } else if (err) { 1734 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1735 tp_new = ERR_PTR(err); 1736 } 1737 1738 return tp_new; 1739 } 1740 1741 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1742 struct tcf_proto *tp, bool rtnl_held, 1743 struct netlink_ext_ack *extack) 1744 { 1745 struct tcf_chain_info chain_info; 1746 struct tcf_proto *tp_iter; 1747 struct tcf_proto **pprev; 1748 struct tcf_proto *next; 1749 1750 mutex_lock(&chain->filter_chain_lock); 1751 1752 /* Atomically find and remove tp from chain. */ 1753 for (pprev = &chain->filter_chain; 1754 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1755 pprev = &tp_iter->next) { 1756 if (tp_iter == tp) { 1757 chain_info.pprev = pprev; 1758 chain_info.next = tp_iter->next; 1759 WARN_ON(tp_iter->deleting); 1760 break; 1761 } 1762 } 1763 /* Verify that tp still exists and no new filters were inserted 1764 * concurrently. 1765 * Mark tp for deletion if it is empty. 1766 */ 1767 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1768 mutex_unlock(&chain->filter_chain_lock); 1769 return; 1770 } 1771 1772 tcf_proto_signal_destroying(chain, tp); 1773 next = tcf_chain_dereference(chain_info.next, chain); 1774 if (tp == chain->filter_chain) 1775 tcf_chain0_head_change(chain, next); 1776 RCU_INIT_POINTER(*chain_info.pprev, next); 1777 mutex_unlock(&chain->filter_chain_lock); 1778 1779 tcf_proto_put(tp, rtnl_held, extack); 1780 } 1781 1782 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1783 struct tcf_chain_info *chain_info, 1784 u32 protocol, u32 prio, 1785 bool prio_allocate) 1786 { 1787 struct tcf_proto **pprev; 1788 struct tcf_proto *tp; 1789 1790 /* Check the chain for existence of proto-tcf with this priority */ 1791 for (pprev = &chain->filter_chain; 1792 (tp = tcf_chain_dereference(*pprev, chain)); 1793 pprev = &tp->next) { 1794 if (tp->prio >= prio) { 1795 if (tp->prio == prio) { 1796 if (prio_allocate || 1797 (tp->protocol != protocol && protocol)) 1798 return ERR_PTR(-EINVAL); 1799 } else { 1800 tp = NULL; 1801 } 1802 break; 1803 } 1804 } 1805 chain_info->pprev = pprev; 1806 if (tp) { 1807 chain_info->next = tp->next; 1808 tcf_proto_get(tp); 1809 } else { 1810 chain_info->next = NULL; 1811 } 1812 return tp; 1813 } 1814 1815 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1816 struct tcf_proto *tp, struct tcf_block *block, 1817 struct Qdisc *q, u32 parent, void *fh, 1818 u32 portid, u32 seq, u16 flags, int event, 1819 bool terse_dump, bool rtnl_held) 1820 { 1821 struct tcmsg *tcm; 1822 struct nlmsghdr *nlh; 1823 unsigned char *b = skb_tail_pointer(skb); 1824 1825 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1826 if (!nlh) 1827 goto out_nlmsg_trim; 1828 tcm = nlmsg_data(nlh); 1829 tcm->tcm_family = AF_UNSPEC; 1830 tcm->tcm__pad1 = 0; 1831 tcm->tcm__pad2 = 0; 1832 if (q) { 1833 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1834 tcm->tcm_parent = parent; 1835 } else { 1836 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1837 tcm->tcm_block_index = block->index; 1838 } 1839 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1840 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1841 goto nla_put_failure; 1842 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1843 goto nla_put_failure; 1844 if (!fh) { 1845 tcm->tcm_handle = 0; 1846 } else if (terse_dump) { 1847 if (tp->ops->terse_dump) { 1848 if (tp->ops->terse_dump(net, tp, fh, skb, tcm, 1849 rtnl_held) < 0) 1850 goto nla_put_failure; 1851 } else { 1852 goto cls_op_not_supp; 1853 } 1854 } else { 1855 if (tp->ops->dump && 1856 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 1857 goto nla_put_failure; 1858 } 1859 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1860 return skb->len; 1861 1862 out_nlmsg_trim: 1863 nla_put_failure: 1864 cls_op_not_supp: 1865 nlmsg_trim(skb, b); 1866 return -1; 1867 } 1868 1869 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 1870 struct nlmsghdr *n, struct tcf_proto *tp, 1871 struct tcf_block *block, struct Qdisc *q, 1872 u32 parent, void *fh, int event, bool unicast, 1873 bool rtnl_held) 1874 { 1875 struct sk_buff *skb; 1876 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1877 int err = 0; 1878 1879 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1880 if (!skb) 1881 return -ENOBUFS; 1882 1883 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1884 n->nlmsg_seq, n->nlmsg_flags, event, 1885 false, rtnl_held) <= 0) { 1886 kfree_skb(skb); 1887 return -EINVAL; 1888 } 1889 1890 if (unicast) 1891 err = rtnl_unicast(skb, net, portid); 1892 else 1893 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1894 n->nlmsg_flags & NLM_F_ECHO); 1895 return err; 1896 } 1897 1898 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 1899 struct nlmsghdr *n, struct tcf_proto *tp, 1900 struct tcf_block *block, struct Qdisc *q, 1901 u32 parent, void *fh, bool unicast, bool *last, 1902 bool rtnl_held, struct netlink_ext_ack *extack) 1903 { 1904 struct sk_buff *skb; 1905 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1906 int err; 1907 1908 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1909 if (!skb) 1910 return -ENOBUFS; 1911 1912 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1913 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 1914 false, rtnl_held) <= 0) { 1915 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 1916 kfree_skb(skb); 1917 return -EINVAL; 1918 } 1919 1920 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 1921 if (err) { 1922 kfree_skb(skb); 1923 return err; 1924 } 1925 1926 if (unicast) 1927 err = rtnl_unicast(skb, net, portid); 1928 else 1929 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1930 n->nlmsg_flags & NLM_F_ECHO); 1931 if (err < 0) 1932 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 1933 1934 return err; 1935 } 1936 1937 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 1938 struct tcf_block *block, struct Qdisc *q, 1939 u32 parent, struct nlmsghdr *n, 1940 struct tcf_chain *chain, int event) 1941 { 1942 struct tcf_proto *tp; 1943 1944 for (tp = tcf_get_next_proto(chain, NULL); 1945 tp; tp = tcf_get_next_proto(chain, tp)) 1946 tfilter_notify(net, oskb, n, tp, block, 1947 q, parent, NULL, event, false, true); 1948 } 1949 1950 static void tfilter_put(struct tcf_proto *tp, void *fh) 1951 { 1952 if (tp->ops->put && fh) 1953 tp->ops->put(tp, fh); 1954 } 1955 1956 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1957 struct netlink_ext_ack *extack) 1958 { 1959 struct net *net = sock_net(skb->sk); 1960 struct nlattr *tca[TCA_MAX + 1]; 1961 char name[IFNAMSIZ]; 1962 struct tcmsg *t; 1963 u32 protocol; 1964 u32 prio; 1965 bool prio_allocate; 1966 u32 parent; 1967 u32 chain_index; 1968 struct Qdisc *q; 1969 struct tcf_chain_info chain_info; 1970 struct tcf_chain *chain; 1971 struct tcf_block *block; 1972 struct tcf_proto *tp; 1973 unsigned long cl; 1974 void *fh; 1975 int err; 1976 int tp_created; 1977 bool rtnl_held = false; 1978 u32 flags; 1979 1980 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1981 return -EPERM; 1982 1983 replay: 1984 tp_created = 0; 1985 1986 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 1987 rtm_tca_policy, extack); 1988 if (err < 0) 1989 return err; 1990 1991 t = nlmsg_data(n); 1992 protocol = TC_H_MIN(t->tcm_info); 1993 prio = TC_H_MAJ(t->tcm_info); 1994 prio_allocate = false; 1995 parent = t->tcm_parent; 1996 tp = NULL; 1997 cl = 0; 1998 block = NULL; 1999 q = NULL; 2000 chain = NULL; 2001 flags = 0; 2002 2003 if (prio == 0) { 2004 /* If no priority is provided by the user, 2005 * we allocate one. 2006 */ 2007 if (n->nlmsg_flags & NLM_F_CREATE) { 2008 prio = TC_H_MAKE(0x80000000U, 0U); 2009 prio_allocate = true; 2010 } else { 2011 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2012 return -ENOENT; 2013 } 2014 } 2015 2016 /* Find head of filter chain. */ 2017 2018 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2019 if (err) 2020 return err; 2021 2022 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2023 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2024 err = -EINVAL; 2025 goto errout; 2026 } 2027 2028 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2029 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2030 * type is not specified, classifier is not unlocked. 2031 */ 2032 if (rtnl_held || 2033 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2034 !tcf_proto_is_unlocked(name)) { 2035 rtnl_held = true; 2036 rtnl_lock(); 2037 } 2038 2039 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2040 if (err) 2041 goto errout; 2042 2043 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2044 extack); 2045 if (IS_ERR(block)) { 2046 err = PTR_ERR(block); 2047 goto errout; 2048 } 2049 block->classid = parent; 2050 2051 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2052 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2053 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2054 err = -EINVAL; 2055 goto errout; 2056 } 2057 chain = tcf_chain_get(block, chain_index, true); 2058 if (!chain) { 2059 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2060 err = -ENOMEM; 2061 goto errout; 2062 } 2063 2064 mutex_lock(&chain->filter_chain_lock); 2065 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2066 prio, prio_allocate); 2067 if (IS_ERR(tp)) { 2068 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2069 err = PTR_ERR(tp); 2070 goto errout_locked; 2071 } 2072 2073 if (tp == NULL) { 2074 struct tcf_proto *tp_new = NULL; 2075 2076 if (chain->flushing) { 2077 err = -EAGAIN; 2078 goto errout_locked; 2079 } 2080 2081 /* Proto-tcf does not exist, create new one */ 2082 2083 if (tca[TCA_KIND] == NULL || !protocol) { 2084 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2085 err = -EINVAL; 2086 goto errout_locked; 2087 } 2088 2089 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2090 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2091 err = -ENOENT; 2092 goto errout_locked; 2093 } 2094 2095 if (prio_allocate) 2096 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2097 &chain_info)); 2098 2099 mutex_unlock(&chain->filter_chain_lock); 2100 tp_new = tcf_proto_create(name, protocol, prio, chain, 2101 rtnl_held, extack); 2102 if (IS_ERR(tp_new)) { 2103 err = PTR_ERR(tp_new); 2104 goto errout_tp; 2105 } 2106 2107 tp_created = 1; 2108 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2109 rtnl_held); 2110 if (IS_ERR(tp)) { 2111 err = PTR_ERR(tp); 2112 goto errout_tp; 2113 } 2114 } else { 2115 mutex_unlock(&chain->filter_chain_lock); 2116 } 2117 2118 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2119 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2120 err = -EINVAL; 2121 goto errout; 2122 } 2123 2124 fh = tp->ops->get(tp, t->tcm_handle); 2125 2126 if (!fh) { 2127 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2128 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2129 err = -ENOENT; 2130 goto errout; 2131 } 2132 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2133 tfilter_put(tp, fh); 2134 NL_SET_ERR_MSG(extack, "Filter already exists"); 2135 err = -EEXIST; 2136 goto errout; 2137 } 2138 2139 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2140 tfilter_put(tp, fh); 2141 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2142 err = -EINVAL; 2143 goto errout; 2144 } 2145 2146 if (!(n->nlmsg_flags & NLM_F_CREATE)) 2147 flags |= TCA_ACT_FLAGS_REPLACE; 2148 if (!rtnl_held) 2149 flags |= TCA_ACT_FLAGS_NO_RTNL; 2150 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2151 flags, extack); 2152 if (err == 0) { 2153 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2154 RTM_NEWTFILTER, false, rtnl_held); 2155 tfilter_put(tp, fh); 2156 /* q pointer is NULL for shared blocks */ 2157 if (q) 2158 q->flags &= ~TCQ_F_CAN_BYPASS; 2159 } 2160 2161 errout: 2162 if (err && tp_created) 2163 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2164 errout_tp: 2165 if (chain) { 2166 if (tp && !IS_ERR(tp)) 2167 tcf_proto_put(tp, rtnl_held, NULL); 2168 if (!tp_created) 2169 tcf_chain_put(chain); 2170 } 2171 tcf_block_release(q, block, rtnl_held); 2172 2173 if (rtnl_held) 2174 rtnl_unlock(); 2175 2176 if (err == -EAGAIN) { 2177 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2178 * of target chain. 2179 */ 2180 rtnl_held = true; 2181 /* Replay the request. */ 2182 goto replay; 2183 } 2184 return err; 2185 2186 errout_locked: 2187 mutex_unlock(&chain->filter_chain_lock); 2188 goto errout; 2189 } 2190 2191 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2192 struct netlink_ext_ack *extack) 2193 { 2194 struct net *net = sock_net(skb->sk); 2195 struct nlattr *tca[TCA_MAX + 1]; 2196 char name[IFNAMSIZ]; 2197 struct tcmsg *t; 2198 u32 protocol; 2199 u32 prio; 2200 u32 parent; 2201 u32 chain_index; 2202 struct Qdisc *q = NULL; 2203 struct tcf_chain_info chain_info; 2204 struct tcf_chain *chain = NULL; 2205 struct tcf_block *block = NULL; 2206 struct tcf_proto *tp = NULL; 2207 unsigned long cl = 0; 2208 void *fh = NULL; 2209 int err; 2210 bool rtnl_held = false; 2211 2212 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2213 return -EPERM; 2214 2215 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2216 rtm_tca_policy, extack); 2217 if (err < 0) 2218 return err; 2219 2220 t = nlmsg_data(n); 2221 protocol = TC_H_MIN(t->tcm_info); 2222 prio = TC_H_MAJ(t->tcm_info); 2223 parent = t->tcm_parent; 2224 2225 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2226 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2227 return -ENOENT; 2228 } 2229 2230 /* Find head of filter chain. */ 2231 2232 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2233 if (err) 2234 return err; 2235 2236 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2237 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2238 err = -EINVAL; 2239 goto errout; 2240 } 2241 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2242 * found), qdisc is not unlocked, classifier type is not specified, 2243 * classifier is not unlocked. 2244 */ 2245 if (!prio || 2246 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2247 !tcf_proto_is_unlocked(name)) { 2248 rtnl_held = true; 2249 rtnl_lock(); 2250 } 2251 2252 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2253 if (err) 2254 goto errout; 2255 2256 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2257 extack); 2258 if (IS_ERR(block)) { 2259 err = PTR_ERR(block); 2260 goto errout; 2261 } 2262 2263 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2264 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2265 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2266 err = -EINVAL; 2267 goto errout; 2268 } 2269 chain = tcf_chain_get(block, chain_index, false); 2270 if (!chain) { 2271 /* User requested flush on non-existent chain. Nothing to do, 2272 * so just return success. 2273 */ 2274 if (prio == 0) { 2275 err = 0; 2276 goto errout; 2277 } 2278 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2279 err = -ENOENT; 2280 goto errout; 2281 } 2282 2283 if (prio == 0) { 2284 tfilter_notify_chain(net, skb, block, q, parent, n, 2285 chain, RTM_DELTFILTER); 2286 tcf_chain_flush(chain, rtnl_held); 2287 err = 0; 2288 goto errout; 2289 } 2290 2291 mutex_lock(&chain->filter_chain_lock); 2292 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2293 prio, false); 2294 if (!tp || IS_ERR(tp)) { 2295 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2296 err = tp ? PTR_ERR(tp) : -ENOENT; 2297 goto errout_locked; 2298 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2299 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2300 err = -EINVAL; 2301 goto errout_locked; 2302 } else if (t->tcm_handle == 0) { 2303 tcf_proto_signal_destroying(chain, tp); 2304 tcf_chain_tp_remove(chain, &chain_info, tp); 2305 mutex_unlock(&chain->filter_chain_lock); 2306 2307 tcf_proto_put(tp, rtnl_held, NULL); 2308 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2309 RTM_DELTFILTER, false, rtnl_held); 2310 err = 0; 2311 goto errout; 2312 } 2313 mutex_unlock(&chain->filter_chain_lock); 2314 2315 fh = tp->ops->get(tp, t->tcm_handle); 2316 2317 if (!fh) { 2318 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2319 err = -ENOENT; 2320 } else { 2321 bool last; 2322 2323 err = tfilter_del_notify(net, skb, n, tp, block, 2324 q, parent, fh, false, &last, 2325 rtnl_held, extack); 2326 2327 if (err) 2328 goto errout; 2329 if (last) 2330 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2331 } 2332 2333 errout: 2334 if (chain) { 2335 if (tp && !IS_ERR(tp)) 2336 tcf_proto_put(tp, rtnl_held, NULL); 2337 tcf_chain_put(chain); 2338 } 2339 tcf_block_release(q, block, rtnl_held); 2340 2341 if (rtnl_held) 2342 rtnl_unlock(); 2343 2344 return err; 2345 2346 errout_locked: 2347 mutex_unlock(&chain->filter_chain_lock); 2348 goto errout; 2349 } 2350 2351 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2352 struct netlink_ext_ack *extack) 2353 { 2354 struct net *net = sock_net(skb->sk); 2355 struct nlattr *tca[TCA_MAX + 1]; 2356 char name[IFNAMSIZ]; 2357 struct tcmsg *t; 2358 u32 protocol; 2359 u32 prio; 2360 u32 parent; 2361 u32 chain_index; 2362 struct Qdisc *q = NULL; 2363 struct tcf_chain_info chain_info; 2364 struct tcf_chain *chain = NULL; 2365 struct tcf_block *block = NULL; 2366 struct tcf_proto *tp = NULL; 2367 unsigned long cl = 0; 2368 void *fh = NULL; 2369 int err; 2370 bool rtnl_held = false; 2371 2372 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2373 rtm_tca_policy, extack); 2374 if (err < 0) 2375 return err; 2376 2377 t = nlmsg_data(n); 2378 protocol = TC_H_MIN(t->tcm_info); 2379 prio = TC_H_MAJ(t->tcm_info); 2380 parent = t->tcm_parent; 2381 2382 if (prio == 0) { 2383 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2384 return -ENOENT; 2385 } 2386 2387 /* Find head of filter chain. */ 2388 2389 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2390 if (err) 2391 return err; 2392 2393 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2394 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2395 err = -EINVAL; 2396 goto errout; 2397 } 2398 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2399 * unlocked, classifier type is not specified, classifier is not 2400 * unlocked. 2401 */ 2402 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2403 !tcf_proto_is_unlocked(name)) { 2404 rtnl_held = true; 2405 rtnl_lock(); 2406 } 2407 2408 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2409 if (err) 2410 goto errout; 2411 2412 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2413 extack); 2414 if (IS_ERR(block)) { 2415 err = PTR_ERR(block); 2416 goto errout; 2417 } 2418 2419 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2420 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2421 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2422 err = -EINVAL; 2423 goto errout; 2424 } 2425 chain = tcf_chain_get(block, chain_index, false); 2426 if (!chain) { 2427 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2428 err = -EINVAL; 2429 goto errout; 2430 } 2431 2432 mutex_lock(&chain->filter_chain_lock); 2433 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2434 prio, false); 2435 mutex_unlock(&chain->filter_chain_lock); 2436 if (!tp || IS_ERR(tp)) { 2437 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2438 err = tp ? PTR_ERR(tp) : -ENOENT; 2439 goto errout; 2440 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2441 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2442 err = -EINVAL; 2443 goto errout; 2444 } 2445 2446 fh = tp->ops->get(tp, t->tcm_handle); 2447 2448 if (!fh) { 2449 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2450 err = -ENOENT; 2451 } else { 2452 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2453 fh, RTM_NEWTFILTER, true, rtnl_held); 2454 if (err < 0) 2455 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2456 } 2457 2458 tfilter_put(tp, fh); 2459 errout: 2460 if (chain) { 2461 if (tp && !IS_ERR(tp)) 2462 tcf_proto_put(tp, rtnl_held, NULL); 2463 tcf_chain_put(chain); 2464 } 2465 tcf_block_release(q, block, rtnl_held); 2466 2467 if (rtnl_held) 2468 rtnl_unlock(); 2469 2470 return err; 2471 } 2472 2473 struct tcf_dump_args { 2474 struct tcf_walker w; 2475 struct sk_buff *skb; 2476 struct netlink_callback *cb; 2477 struct tcf_block *block; 2478 struct Qdisc *q; 2479 u32 parent; 2480 bool terse_dump; 2481 }; 2482 2483 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2484 { 2485 struct tcf_dump_args *a = (void *)arg; 2486 struct net *net = sock_net(a->skb->sk); 2487 2488 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2489 n, NETLINK_CB(a->cb->skb).portid, 2490 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2491 RTM_NEWTFILTER, a->terse_dump, true); 2492 } 2493 2494 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2495 struct sk_buff *skb, struct netlink_callback *cb, 2496 long index_start, long *p_index, bool terse) 2497 { 2498 struct net *net = sock_net(skb->sk); 2499 struct tcf_block *block = chain->block; 2500 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2501 struct tcf_proto *tp, *tp_prev; 2502 struct tcf_dump_args arg; 2503 2504 for (tp = __tcf_get_next_proto(chain, NULL); 2505 tp; 2506 tp_prev = tp, 2507 tp = __tcf_get_next_proto(chain, tp), 2508 tcf_proto_put(tp_prev, true, NULL), 2509 (*p_index)++) { 2510 if (*p_index < index_start) 2511 continue; 2512 if (TC_H_MAJ(tcm->tcm_info) && 2513 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2514 continue; 2515 if (TC_H_MIN(tcm->tcm_info) && 2516 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2517 continue; 2518 if (*p_index > index_start) 2519 memset(&cb->args[1], 0, 2520 sizeof(cb->args) - sizeof(cb->args[0])); 2521 if (cb->args[1] == 0) { 2522 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2523 NETLINK_CB(cb->skb).portid, 2524 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2525 RTM_NEWTFILTER, false, true) <= 0) 2526 goto errout; 2527 cb->args[1] = 1; 2528 } 2529 if (!tp->ops->walk) 2530 continue; 2531 arg.w.fn = tcf_node_dump; 2532 arg.skb = skb; 2533 arg.cb = cb; 2534 arg.block = block; 2535 arg.q = q; 2536 arg.parent = parent; 2537 arg.w.stop = 0; 2538 arg.w.skip = cb->args[1] - 1; 2539 arg.w.count = 0; 2540 arg.w.cookie = cb->args[2]; 2541 arg.terse_dump = terse; 2542 tp->ops->walk(tp, &arg.w, true); 2543 cb->args[2] = arg.w.cookie; 2544 cb->args[1] = arg.w.count + 1; 2545 if (arg.w.stop) 2546 goto errout; 2547 } 2548 return true; 2549 2550 errout: 2551 tcf_proto_put(tp, true, NULL); 2552 return false; 2553 } 2554 2555 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { 2556 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), 2557 }; 2558 2559 /* called with RTNL */ 2560 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2561 { 2562 struct tcf_chain *chain, *chain_prev; 2563 struct net *net = sock_net(skb->sk); 2564 struct nlattr *tca[TCA_MAX + 1]; 2565 struct Qdisc *q = NULL; 2566 struct tcf_block *block; 2567 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2568 bool terse_dump = false; 2569 long index_start; 2570 long index; 2571 u32 parent; 2572 int err; 2573 2574 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2575 return skb->len; 2576 2577 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2578 tcf_tfilter_dump_policy, cb->extack); 2579 if (err) 2580 return err; 2581 2582 if (tca[TCA_DUMP_FLAGS]) { 2583 struct nla_bitfield32 flags = 2584 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); 2585 2586 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; 2587 } 2588 2589 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2590 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2591 if (!block) 2592 goto out; 2593 /* If we work with block index, q is NULL and parent value 2594 * will never be used in the following code. The check 2595 * in tcf_fill_node prevents it. However, compiler does not 2596 * see that far, so set parent to zero to silence the warning 2597 * about parent being uninitialized. 2598 */ 2599 parent = 0; 2600 } else { 2601 const struct Qdisc_class_ops *cops; 2602 struct net_device *dev; 2603 unsigned long cl = 0; 2604 2605 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2606 if (!dev) 2607 return skb->len; 2608 2609 parent = tcm->tcm_parent; 2610 if (!parent) 2611 q = rtnl_dereference(dev->qdisc); 2612 else 2613 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2614 if (!q) 2615 goto out; 2616 cops = q->ops->cl_ops; 2617 if (!cops) 2618 goto out; 2619 if (!cops->tcf_block) 2620 goto out; 2621 if (TC_H_MIN(tcm->tcm_parent)) { 2622 cl = cops->find(q, tcm->tcm_parent); 2623 if (cl == 0) 2624 goto out; 2625 } 2626 block = cops->tcf_block(q, cl, NULL); 2627 if (!block) 2628 goto out; 2629 parent = block->classid; 2630 if (tcf_block_shared(block)) 2631 q = NULL; 2632 } 2633 2634 index_start = cb->args[0]; 2635 index = 0; 2636 2637 for (chain = __tcf_get_next_chain(block, NULL); 2638 chain; 2639 chain_prev = chain, 2640 chain = __tcf_get_next_chain(block, chain), 2641 tcf_chain_put(chain_prev)) { 2642 if (tca[TCA_CHAIN] && 2643 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2644 continue; 2645 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2646 index_start, &index, terse_dump)) { 2647 tcf_chain_put(chain); 2648 err = -EMSGSIZE; 2649 break; 2650 } 2651 } 2652 2653 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2654 tcf_block_refcnt_put(block, true); 2655 cb->args[0] = index; 2656 2657 out: 2658 /* If we did no progress, the error (EMSGSIZE) is real */ 2659 if (skb->len == 0 && err) 2660 return err; 2661 return skb->len; 2662 } 2663 2664 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2665 void *tmplt_priv, u32 chain_index, 2666 struct net *net, struct sk_buff *skb, 2667 struct tcf_block *block, 2668 u32 portid, u32 seq, u16 flags, int event) 2669 { 2670 unsigned char *b = skb_tail_pointer(skb); 2671 const struct tcf_proto_ops *ops; 2672 struct nlmsghdr *nlh; 2673 struct tcmsg *tcm; 2674 void *priv; 2675 2676 ops = tmplt_ops; 2677 priv = tmplt_priv; 2678 2679 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2680 if (!nlh) 2681 goto out_nlmsg_trim; 2682 tcm = nlmsg_data(nlh); 2683 tcm->tcm_family = AF_UNSPEC; 2684 tcm->tcm__pad1 = 0; 2685 tcm->tcm__pad2 = 0; 2686 tcm->tcm_handle = 0; 2687 if (block->q) { 2688 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2689 tcm->tcm_parent = block->q->handle; 2690 } else { 2691 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2692 tcm->tcm_block_index = block->index; 2693 } 2694 2695 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2696 goto nla_put_failure; 2697 2698 if (ops) { 2699 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2700 goto nla_put_failure; 2701 if (ops->tmplt_dump(skb, net, priv) < 0) 2702 goto nla_put_failure; 2703 } 2704 2705 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2706 return skb->len; 2707 2708 out_nlmsg_trim: 2709 nla_put_failure: 2710 nlmsg_trim(skb, b); 2711 return -EMSGSIZE; 2712 } 2713 2714 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2715 u32 seq, u16 flags, int event, bool unicast) 2716 { 2717 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2718 struct tcf_block *block = chain->block; 2719 struct net *net = block->net; 2720 struct sk_buff *skb; 2721 int err = 0; 2722 2723 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2724 if (!skb) 2725 return -ENOBUFS; 2726 2727 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2728 chain->index, net, skb, block, portid, 2729 seq, flags, event) <= 0) { 2730 kfree_skb(skb); 2731 return -EINVAL; 2732 } 2733 2734 if (unicast) 2735 err = rtnl_unicast(skb, net, portid); 2736 else 2737 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2738 flags & NLM_F_ECHO); 2739 2740 return err; 2741 } 2742 2743 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2744 void *tmplt_priv, u32 chain_index, 2745 struct tcf_block *block, struct sk_buff *oskb, 2746 u32 seq, u16 flags, bool unicast) 2747 { 2748 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2749 struct net *net = block->net; 2750 struct sk_buff *skb; 2751 2752 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2753 if (!skb) 2754 return -ENOBUFS; 2755 2756 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2757 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { 2758 kfree_skb(skb); 2759 return -EINVAL; 2760 } 2761 2762 if (unicast) 2763 return rtnl_unicast(skb, net, portid); 2764 2765 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2766 } 2767 2768 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2769 struct nlattr **tca, 2770 struct netlink_ext_ack *extack) 2771 { 2772 const struct tcf_proto_ops *ops; 2773 char name[IFNAMSIZ]; 2774 void *tmplt_priv; 2775 2776 /* If kind is not set, user did not specify template. */ 2777 if (!tca[TCA_KIND]) 2778 return 0; 2779 2780 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2781 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2782 return -EINVAL; 2783 } 2784 2785 ops = tcf_proto_lookup_ops(name, true, extack); 2786 if (IS_ERR(ops)) 2787 return PTR_ERR(ops); 2788 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2789 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2790 return -EOPNOTSUPP; 2791 } 2792 2793 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2794 if (IS_ERR(tmplt_priv)) { 2795 module_put(ops->owner); 2796 return PTR_ERR(tmplt_priv); 2797 } 2798 chain->tmplt_ops = ops; 2799 chain->tmplt_priv = tmplt_priv; 2800 return 0; 2801 } 2802 2803 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2804 void *tmplt_priv) 2805 { 2806 /* If template ops are set, no work to do for us. */ 2807 if (!tmplt_ops) 2808 return; 2809 2810 tmplt_ops->tmplt_destroy(tmplt_priv); 2811 module_put(tmplt_ops->owner); 2812 } 2813 2814 /* Add/delete/get a chain */ 2815 2816 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2817 struct netlink_ext_ack *extack) 2818 { 2819 struct net *net = sock_net(skb->sk); 2820 struct nlattr *tca[TCA_MAX + 1]; 2821 struct tcmsg *t; 2822 u32 parent; 2823 u32 chain_index; 2824 struct Qdisc *q; 2825 struct tcf_chain *chain; 2826 struct tcf_block *block; 2827 unsigned long cl; 2828 int err; 2829 2830 if (n->nlmsg_type != RTM_GETCHAIN && 2831 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2832 return -EPERM; 2833 2834 replay: 2835 q = NULL; 2836 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2837 rtm_tca_policy, extack); 2838 if (err < 0) 2839 return err; 2840 2841 t = nlmsg_data(n); 2842 parent = t->tcm_parent; 2843 cl = 0; 2844 2845 block = tcf_block_find(net, &q, &parent, &cl, 2846 t->tcm_ifindex, t->tcm_block_index, extack); 2847 if (IS_ERR(block)) 2848 return PTR_ERR(block); 2849 2850 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2851 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2852 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2853 err = -EINVAL; 2854 goto errout_block; 2855 } 2856 2857 mutex_lock(&block->lock); 2858 chain = tcf_chain_lookup(block, chain_index); 2859 if (n->nlmsg_type == RTM_NEWCHAIN) { 2860 if (chain) { 2861 if (tcf_chain_held_by_acts_only(chain)) { 2862 /* The chain exists only because there is 2863 * some action referencing it. 2864 */ 2865 tcf_chain_hold(chain); 2866 } else { 2867 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 2868 err = -EEXIST; 2869 goto errout_block_locked; 2870 } 2871 } else { 2872 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2873 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 2874 err = -ENOENT; 2875 goto errout_block_locked; 2876 } 2877 chain = tcf_chain_create(block, chain_index); 2878 if (!chain) { 2879 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 2880 err = -ENOMEM; 2881 goto errout_block_locked; 2882 } 2883 } 2884 } else { 2885 if (!chain || tcf_chain_held_by_acts_only(chain)) { 2886 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2887 err = -EINVAL; 2888 goto errout_block_locked; 2889 } 2890 tcf_chain_hold(chain); 2891 } 2892 2893 if (n->nlmsg_type == RTM_NEWCHAIN) { 2894 /* Modifying chain requires holding parent block lock. In case 2895 * the chain was successfully added, take a reference to the 2896 * chain. This ensures that an empty chain does not disappear at 2897 * the end of this function. 2898 */ 2899 tcf_chain_hold(chain); 2900 chain->explicitly_created = true; 2901 } 2902 mutex_unlock(&block->lock); 2903 2904 switch (n->nlmsg_type) { 2905 case RTM_NEWCHAIN: 2906 err = tc_chain_tmplt_add(chain, net, tca, extack); 2907 if (err) { 2908 tcf_chain_put_explicitly_created(chain); 2909 goto errout; 2910 } 2911 2912 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 2913 RTM_NEWCHAIN, false); 2914 break; 2915 case RTM_DELCHAIN: 2916 tfilter_notify_chain(net, skb, block, q, parent, n, 2917 chain, RTM_DELTFILTER); 2918 /* Flush the chain first as the user requested chain removal. */ 2919 tcf_chain_flush(chain, true); 2920 /* In case the chain was successfully deleted, put a reference 2921 * to the chain previously taken during addition. 2922 */ 2923 tcf_chain_put_explicitly_created(chain); 2924 break; 2925 case RTM_GETCHAIN: 2926 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 2927 n->nlmsg_flags, n->nlmsg_type, true); 2928 if (err < 0) 2929 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 2930 break; 2931 default: 2932 err = -EOPNOTSUPP; 2933 NL_SET_ERR_MSG(extack, "Unsupported message type"); 2934 goto errout; 2935 } 2936 2937 errout: 2938 tcf_chain_put(chain); 2939 errout_block: 2940 tcf_block_release(q, block, true); 2941 if (err == -EAGAIN) 2942 /* Replay the request. */ 2943 goto replay; 2944 return err; 2945 2946 errout_block_locked: 2947 mutex_unlock(&block->lock); 2948 goto errout_block; 2949 } 2950 2951 /* called with RTNL */ 2952 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 2953 { 2954 struct net *net = sock_net(skb->sk); 2955 struct nlattr *tca[TCA_MAX + 1]; 2956 struct Qdisc *q = NULL; 2957 struct tcf_block *block; 2958 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2959 struct tcf_chain *chain; 2960 long index_start; 2961 long index; 2962 int err; 2963 2964 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2965 return skb->len; 2966 2967 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2968 rtm_tca_policy, cb->extack); 2969 if (err) 2970 return err; 2971 2972 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2973 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2974 if (!block) 2975 goto out; 2976 } else { 2977 const struct Qdisc_class_ops *cops; 2978 struct net_device *dev; 2979 unsigned long cl = 0; 2980 2981 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2982 if (!dev) 2983 return skb->len; 2984 2985 if (!tcm->tcm_parent) 2986 q = rtnl_dereference(dev->qdisc); 2987 else 2988 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2989 2990 if (!q) 2991 goto out; 2992 cops = q->ops->cl_ops; 2993 if (!cops) 2994 goto out; 2995 if (!cops->tcf_block) 2996 goto out; 2997 if (TC_H_MIN(tcm->tcm_parent)) { 2998 cl = cops->find(q, tcm->tcm_parent); 2999 if (cl == 0) 3000 goto out; 3001 } 3002 block = cops->tcf_block(q, cl, NULL); 3003 if (!block) 3004 goto out; 3005 if (tcf_block_shared(block)) 3006 q = NULL; 3007 } 3008 3009 index_start = cb->args[0]; 3010 index = 0; 3011 3012 mutex_lock(&block->lock); 3013 list_for_each_entry(chain, &block->chain_list, list) { 3014 if ((tca[TCA_CHAIN] && 3015 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 3016 continue; 3017 if (index < index_start) { 3018 index++; 3019 continue; 3020 } 3021 if (tcf_chain_held_by_acts_only(chain)) 3022 continue; 3023 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3024 chain->index, net, skb, block, 3025 NETLINK_CB(cb->skb).portid, 3026 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3027 RTM_NEWCHAIN); 3028 if (err <= 0) 3029 break; 3030 index++; 3031 } 3032 mutex_unlock(&block->lock); 3033 3034 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3035 tcf_block_refcnt_put(block, true); 3036 cb->args[0] = index; 3037 3038 out: 3039 /* If we did no progress, the error (EMSGSIZE) is real */ 3040 if (skb->len == 0 && err) 3041 return err; 3042 return skb->len; 3043 } 3044 3045 void tcf_exts_destroy(struct tcf_exts *exts) 3046 { 3047 #ifdef CONFIG_NET_CLS_ACT 3048 if (exts->actions) { 3049 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3050 kfree(exts->actions); 3051 } 3052 exts->nr_actions = 0; 3053 #endif 3054 } 3055 EXPORT_SYMBOL(tcf_exts_destroy); 3056 3057 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3058 struct nlattr *rate_tlv, struct tcf_exts *exts, 3059 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) 3060 { 3061 #ifdef CONFIG_NET_CLS_ACT 3062 { 3063 int init_res[TCA_ACT_MAX_PRIO] = {}; 3064 struct tc_action *act; 3065 size_t attr_size = 0; 3066 3067 if (exts->police && tb[exts->police]) { 3068 struct tc_action_ops *a_o; 3069 3070 a_o = tc_action_load_ops(tb[exts->police], true, 3071 !(flags & TCA_ACT_FLAGS_NO_RTNL), 3072 extack); 3073 if (IS_ERR(a_o)) 3074 return PTR_ERR(a_o); 3075 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND; 3076 act = tcf_action_init_1(net, tp, tb[exts->police], 3077 rate_tlv, a_o, init_res, flags, 3078 extack); 3079 module_put(a_o->owner); 3080 if (IS_ERR(act)) 3081 return PTR_ERR(act); 3082 3083 act->type = exts->type = TCA_OLD_COMPAT; 3084 exts->actions[0] = act; 3085 exts->nr_actions = 1; 3086 tcf_idr_insert_many(exts->actions); 3087 } else if (exts->action && tb[exts->action]) { 3088 int err; 3089 3090 flags |= TCA_ACT_FLAGS_BIND; 3091 err = tcf_action_init(net, tp, tb[exts->action], 3092 rate_tlv, exts->actions, init_res, 3093 &attr_size, flags, fl_flags, 3094 extack); 3095 if (err < 0) 3096 return err; 3097 exts->nr_actions = err; 3098 } 3099 } 3100 #else 3101 if ((exts->action && tb[exts->action]) || 3102 (exts->police && tb[exts->police])) { 3103 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3104 return -EOPNOTSUPP; 3105 } 3106 #endif 3107 3108 return 0; 3109 } 3110 EXPORT_SYMBOL(tcf_exts_validate_ex); 3111 3112 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3113 struct nlattr *rate_tlv, struct tcf_exts *exts, 3114 u32 flags, struct netlink_ext_ack *extack) 3115 { 3116 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts, 3117 flags, 0, extack); 3118 } 3119 EXPORT_SYMBOL(tcf_exts_validate); 3120 3121 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3122 { 3123 #ifdef CONFIG_NET_CLS_ACT 3124 struct tcf_exts old = *dst; 3125 3126 *dst = *src; 3127 tcf_exts_destroy(&old); 3128 #endif 3129 } 3130 EXPORT_SYMBOL(tcf_exts_change); 3131 3132 #ifdef CONFIG_NET_CLS_ACT 3133 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3134 { 3135 if (exts->nr_actions == 0) 3136 return NULL; 3137 else 3138 return exts->actions[0]; 3139 } 3140 #endif 3141 3142 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3143 { 3144 #ifdef CONFIG_NET_CLS_ACT 3145 struct nlattr *nest; 3146 3147 if (exts->action && tcf_exts_has_actions(exts)) { 3148 /* 3149 * again for backward compatible mode - we want 3150 * to work with both old and new modes of entering 3151 * tc data even if iproute2 was newer - jhs 3152 */ 3153 if (exts->type != TCA_OLD_COMPAT) { 3154 nest = nla_nest_start_noflag(skb, exts->action); 3155 if (nest == NULL) 3156 goto nla_put_failure; 3157 3158 if (tcf_action_dump(skb, exts->actions, 0, 0, false) 3159 < 0) 3160 goto nla_put_failure; 3161 nla_nest_end(skb, nest); 3162 } else if (exts->police) { 3163 struct tc_action *act = tcf_exts_first_act(exts); 3164 nest = nla_nest_start_noflag(skb, exts->police); 3165 if (nest == NULL || !act) 3166 goto nla_put_failure; 3167 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3168 goto nla_put_failure; 3169 nla_nest_end(skb, nest); 3170 } 3171 } 3172 return 0; 3173 3174 nla_put_failure: 3175 nla_nest_cancel(skb, nest); 3176 return -1; 3177 #else 3178 return 0; 3179 #endif 3180 } 3181 EXPORT_SYMBOL(tcf_exts_dump); 3182 3183 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts) 3184 { 3185 #ifdef CONFIG_NET_CLS_ACT 3186 struct nlattr *nest; 3187 3188 if (!exts->action || !tcf_exts_has_actions(exts)) 3189 return 0; 3190 3191 nest = nla_nest_start_noflag(skb, exts->action); 3192 if (!nest) 3193 goto nla_put_failure; 3194 3195 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0) 3196 goto nla_put_failure; 3197 nla_nest_end(skb, nest); 3198 return 0; 3199 3200 nla_put_failure: 3201 nla_nest_cancel(skb, nest); 3202 return -1; 3203 #else 3204 return 0; 3205 #endif 3206 } 3207 EXPORT_SYMBOL(tcf_exts_terse_dump); 3208 3209 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3210 { 3211 #ifdef CONFIG_NET_CLS_ACT 3212 struct tc_action *a = tcf_exts_first_act(exts); 3213 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3214 return -1; 3215 #endif 3216 return 0; 3217 } 3218 EXPORT_SYMBOL(tcf_exts_dump_stats); 3219 3220 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3221 { 3222 if (*flags & TCA_CLS_FLAGS_IN_HW) 3223 return; 3224 *flags |= TCA_CLS_FLAGS_IN_HW; 3225 atomic_inc(&block->offloadcnt); 3226 } 3227 3228 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3229 { 3230 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3231 return; 3232 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3233 atomic_dec(&block->offloadcnt); 3234 } 3235 3236 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3237 struct tcf_proto *tp, u32 *cnt, 3238 u32 *flags, u32 diff, bool add) 3239 { 3240 lockdep_assert_held(&block->cb_lock); 3241 3242 spin_lock(&tp->lock); 3243 if (add) { 3244 if (!*cnt) 3245 tcf_block_offload_inc(block, flags); 3246 *cnt += diff; 3247 } else { 3248 *cnt -= diff; 3249 if (!*cnt) 3250 tcf_block_offload_dec(block, flags); 3251 } 3252 spin_unlock(&tp->lock); 3253 } 3254 3255 static void 3256 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3257 u32 *cnt, u32 *flags) 3258 { 3259 lockdep_assert_held(&block->cb_lock); 3260 3261 spin_lock(&tp->lock); 3262 tcf_block_offload_dec(block, flags); 3263 *cnt = 0; 3264 spin_unlock(&tp->lock); 3265 } 3266 3267 static int 3268 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3269 void *type_data, bool err_stop) 3270 { 3271 struct flow_block_cb *block_cb; 3272 int ok_count = 0; 3273 int err; 3274 3275 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3276 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3277 if (err) { 3278 if (err_stop) 3279 return err; 3280 } else { 3281 ok_count++; 3282 } 3283 } 3284 return ok_count; 3285 } 3286 3287 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3288 void *type_data, bool err_stop, bool rtnl_held) 3289 { 3290 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3291 int ok_count; 3292 3293 retry: 3294 if (take_rtnl) 3295 rtnl_lock(); 3296 down_read(&block->cb_lock); 3297 /* Need to obtain rtnl lock if block is bound to devs that require it. 3298 * In block bind code cb_lock is obtained while holding rtnl, so we must 3299 * obtain the locks in same order here. 3300 */ 3301 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3302 up_read(&block->cb_lock); 3303 take_rtnl = true; 3304 goto retry; 3305 } 3306 3307 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3308 3309 up_read(&block->cb_lock); 3310 if (take_rtnl) 3311 rtnl_unlock(); 3312 return ok_count; 3313 } 3314 EXPORT_SYMBOL(tc_setup_cb_call); 3315 3316 /* Non-destructive filter add. If filter that wasn't already in hardware is 3317 * successfully offloaded, increment block offloads counter. On failure, 3318 * previously offloaded filter is considered to be intact and offloads counter 3319 * is not decremented. 3320 */ 3321 3322 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3323 enum tc_setup_type type, void *type_data, bool err_stop, 3324 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3325 { 3326 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3327 int ok_count; 3328 3329 retry: 3330 if (take_rtnl) 3331 rtnl_lock(); 3332 down_read(&block->cb_lock); 3333 /* Need to obtain rtnl lock if block is bound to devs that require it. 3334 * In block bind code cb_lock is obtained while holding rtnl, so we must 3335 * obtain the locks in same order here. 3336 */ 3337 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3338 up_read(&block->cb_lock); 3339 take_rtnl = true; 3340 goto retry; 3341 } 3342 3343 /* Make sure all netdevs sharing this block are offload-capable. */ 3344 if (block->nooffloaddevcnt && err_stop) { 3345 ok_count = -EOPNOTSUPP; 3346 goto err_unlock; 3347 } 3348 3349 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3350 if (ok_count < 0) 3351 goto err_unlock; 3352 3353 if (tp->ops->hw_add) 3354 tp->ops->hw_add(tp, type_data); 3355 if (ok_count > 0) 3356 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3357 ok_count, true); 3358 err_unlock: 3359 up_read(&block->cb_lock); 3360 if (take_rtnl) 3361 rtnl_unlock(); 3362 return min(ok_count, 0); 3363 } 3364 EXPORT_SYMBOL(tc_setup_cb_add); 3365 3366 /* Destructive filter replace. If filter that wasn't already in hardware is 3367 * successfully offloaded, increment block offload counter. On failure, 3368 * previously offloaded filter is considered to be destroyed and offload counter 3369 * is decremented. 3370 */ 3371 3372 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3373 enum tc_setup_type type, void *type_data, bool err_stop, 3374 u32 *old_flags, unsigned int *old_in_hw_count, 3375 u32 *new_flags, unsigned int *new_in_hw_count, 3376 bool rtnl_held) 3377 { 3378 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3379 int ok_count; 3380 3381 retry: 3382 if (take_rtnl) 3383 rtnl_lock(); 3384 down_read(&block->cb_lock); 3385 /* Need to obtain rtnl lock if block is bound to devs that require it. 3386 * In block bind code cb_lock is obtained while holding rtnl, so we must 3387 * obtain the locks in same order here. 3388 */ 3389 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3390 up_read(&block->cb_lock); 3391 take_rtnl = true; 3392 goto retry; 3393 } 3394 3395 /* Make sure all netdevs sharing this block are offload-capable. */ 3396 if (block->nooffloaddevcnt && err_stop) { 3397 ok_count = -EOPNOTSUPP; 3398 goto err_unlock; 3399 } 3400 3401 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3402 if (tp->ops->hw_del) 3403 tp->ops->hw_del(tp, type_data); 3404 3405 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3406 if (ok_count < 0) 3407 goto err_unlock; 3408 3409 if (tp->ops->hw_add) 3410 tp->ops->hw_add(tp, type_data); 3411 if (ok_count > 0) 3412 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3413 new_flags, ok_count, true); 3414 err_unlock: 3415 up_read(&block->cb_lock); 3416 if (take_rtnl) 3417 rtnl_unlock(); 3418 return min(ok_count, 0); 3419 } 3420 EXPORT_SYMBOL(tc_setup_cb_replace); 3421 3422 /* Destroy filter and decrement block offload counter, if filter was previously 3423 * offloaded. 3424 */ 3425 3426 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3427 enum tc_setup_type type, void *type_data, bool err_stop, 3428 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3429 { 3430 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3431 int ok_count; 3432 3433 retry: 3434 if (take_rtnl) 3435 rtnl_lock(); 3436 down_read(&block->cb_lock); 3437 /* Need to obtain rtnl lock if block is bound to devs that require it. 3438 * In block bind code cb_lock is obtained while holding rtnl, so we must 3439 * obtain the locks in same order here. 3440 */ 3441 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3442 up_read(&block->cb_lock); 3443 take_rtnl = true; 3444 goto retry; 3445 } 3446 3447 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3448 3449 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3450 if (tp->ops->hw_del) 3451 tp->ops->hw_del(tp, type_data); 3452 3453 up_read(&block->cb_lock); 3454 if (take_rtnl) 3455 rtnl_unlock(); 3456 return min(ok_count, 0); 3457 } 3458 EXPORT_SYMBOL(tc_setup_cb_destroy); 3459 3460 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3461 bool add, flow_setup_cb_t *cb, 3462 enum tc_setup_type type, void *type_data, 3463 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3464 { 3465 int err = cb(type, type_data, cb_priv); 3466 3467 if (err) { 3468 if (add && tc_skip_sw(*flags)) 3469 return err; 3470 } else { 3471 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3472 add); 3473 } 3474 3475 return 0; 3476 } 3477 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3478 3479 static int tcf_act_get_cookie(struct flow_action_entry *entry, 3480 const struct tc_action *act) 3481 { 3482 struct tc_cookie *cookie; 3483 int err = 0; 3484 3485 rcu_read_lock(); 3486 cookie = rcu_dereference(act->act_cookie); 3487 if (cookie) { 3488 entry->cookie = flow_action_cookie_create(cookie->data, 3489 cookie->len, 3490 GFP_ATOMIC); 3491 if (!entry->cookie) 3492 err = -ENOMEM; 3493 } 3494 rcu_read_unlock(); 3495 return err; 3496 } 3497 3498 static void tcf_act_put_cookie(struct flow_action_entry *entry) 3499 { 3500 flow_action_cookie_destroy(entry->cookie); 3501 } 3502 3503 void tc_cleanup_offload_action(struct flow_action *flow_action) 3504 { 3505 struct flow_action_entry *entry; 3506 int i; 3507 3508 flow_action_for_each(i, entry, flow_action) { 3509 tcf_act_put_cookie(entry); 3510 if (entry->destructor) 3511 entry->destructor(entry->destructor_priv); 3512 } 3513 } 3514 EXPORT_SYMBOL(tc_cleanup_offload_action); 3515 3516 static int tc_setup_offload_act(struct tc_action *act, 3517 struct flow_action_entry *entry, 3518 u32 *index_inc, 3519 struct netlink_ext_ack *extack) 3520 { 3521 #ifdef CONFIG_NET_CLS_ACT 3522 if (act->ops->offload_act_setup) { 3523 return act->ops->offload_act_setup(act, entry, index_inc, true, 3524 extack); 3525 } else { 3526 NL_SET_ERR_MSG(extack, "Action does not support offload"); 3527 return -EOPNOTSUPP; 3528 } 3529 #else 3530 return 0; 3531 #endif 3532 } 3533 3534 int tc_setup_action(struct flow_action *flow_action, 3535 struct tc_action *actions[], 3536 struct netlink_ext_ack *extack) 3537 { 3538 int i, j, k, index, err = 0; 3539 struct tc_action *act; 3540 3541 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3542 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3543 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3544 3545 if (!actions) 3546 return 0; 3547 3548 j = 0; 3549 tcf_act_for_each_action(i, act, actions) { 3550 struct flow_action_entry *entry; 3551 3552 entry = &flow_action->entries[j]; 3553 spin_lock_bh(&act->tcfa_lock); 3554 err = tcf_act_get_cookie(entry, act); 3555 if (err) 3556 goto err_out_locked; 3557 3558 index = 0; 3559 err = tc_setup_offload_act(act, entry, &index, extack); 3560 if (err) 3561 goto err_out_locked; 3562 3563 for (k = 0; k < index ; k++) { 3564 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats); 3565 entry[k].hw_index = act->tcfa_index; 3566 } 3567 3568 j += index; 3569 3570 spin_unlock_bh(&act->tcfa_lock); 3571 } 3572 3573 err_out: 3574 if (err) 3575 tc_cleanup_offload_action(flow_action); 3576 3577 return err; 3578 err_out_locked: 3579 spin_unlock_bh(&act->tcfa_lock); 3580 goto err_out; 3581 } 3582 3583 int tc_setup_offload_action(struct flow_action *flow_action, 3584 const struct tcf_exts *exts, 3585 struct netlink_ext_ack *extack) 3586 { 3587 #ifdef CONFIG_NET_CLS_ACT 3588 if (!exts) 3589 return 0; 3590 3591 return tc_setup_action(flow_action, exts->actions, extack); 3592 #else 3593 return 0; 3594 #endif 3595 } 3596 EXPORT_SYMBOL(tc_setup_offload_action); 3597 3598 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3599 { 3600 unsigned int num_acts = 0; 3601 struct tc_action *act; 3602 int i; 3603 3604 tcf_exts_for_each_action(i, act, exts) { 3605 if (is_tcf_pedit(act)) 3606 num_acts += tcf_pedit_nkeys(act); 3607 else 3608 num_acts++; 3609 } 3610 return num_acts; 3611 } 3612 EXPORT_SYMBOL(tcf_exts_num_actions); 3613 3614 #ifdef CONFIG_NET_CLS_ACT 3615 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr, 3616 u32 *p_block_index, 3617 struct netlink_ext_ack *extack) 3618 { 3619 *p_block_index = nla_get_u32(block_index_attr); 3620 if (!*p_block_index) { 3621 NL_SET_ERR_MSG(extack, "Block number may not be zero"); 3622 return -EINVAL; 3623 } 3624 3625 return 0; 3626 } 3627 3628 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 3629 enum flow_block_binder_type binder_type, 3630 struct nlattr *block_index_attr, 3631 struct netlink_ext_ack *extack) 3632 { 3633 u32 block_index; 3634 int err; 3635 3636 if (!block_index_attr) 3637 return 0; 3638 3639 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3640 if (err) 3641 return err; 3642 3643 if (!block_index) 3644 return 0; 3645 3646 qe->info.binder_type = binder_type; 3647 qe->info.chain_head_change = tcf_chain_head_change_dflt; 3648 qe->info.chain_head_change_priv = &qe->filter_chain; 3649 qe->info.block_index = block_index; 3650 3651 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); 3652 } 3653 EXPORT_SYMBOL(tcf_qevent_init); 3654 3655 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 3656 { 3657 if (qe->info.block_index) 3658 tcf_block_put_ext(qe->block, sch, &qe->info); 3659 } 3660 EXPORT_SYMBOL(tcf_qevent_destroy); 3661 3662 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 3663 struct netlink_ext_ack *extack) 3664 { 3665 u32 block_index; 3666 int err; 3667 3668 if (!block_index_attr) 3669 return 0; 3670 3671 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3672 if (err) 3673 return err; 3674 3675 /* Bounce newly-configured block or change in block. */ 3676 if (block_index != qe->info.block_index) { 3677 NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); 3678 return -EINVAL; 3679 } 3680 3681 return 0; 3682 } 3683 EXPORT_SYMBOL(tcf_qevent_validate_change); 3684 3685 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 3686 struct sk_buff **to_free, int *ret) 3687 { 3688 struct tcf_result cl_res; 3689 struct tcf_proto *fl; 3690 3691 if (!qe->info.block_index) 3692 return skb; 3693 3694 fl = rcu_dereference_bh(qe->filter_chain); 3695 3696 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) { 3697 case TC_ACT_SHOT: 3698 qdisc_qstats_drop(sch); 3699 __qdisc_drop(skb, to_free); 3700 *ret = __NET_XMIT_BYPASS; 3701 return NULL; 3702 case TC_ACT_STOLEN: 3703 case TC_ACT_QUEUED: 3704 case TC_ACT_TRAP: 3705 __qdisc_drop(skb, to_free); 3706 *ret = __NET_XMIT_STOLEN; 3707 return NULL; 3708 case TC_ACT_REDIRECT: 3709 skb_do_redirect(skb); 3710 *ret = __NET_XMIT_STOLEN; 3711 return NULL; 3712 } 3713 3714 return skb; 3715 } 3716 EXPORT_SYMBOL(tcf_qevent_handle); 3717 3718 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 3719 { 3720 if (!qe->info.block_index) 3721 return 0; 3722 return nla_put_u32(skb, attr_name, qe->info.block_index); 3723 } 3724 EXPORT_SYMBOL(tcf_qevent_dump); 3725 #endif 3726 3727 static __net_init int tcf_net_init(struct net *net) 3728 { 3729 struct tcf_net *tn = net_generic(net, tcf_net_id); 3730 3731 spin_lock_init(&tn->idr_lock); 3732 idr_init(&tn->idr); 3733 return 0; 3734 } 3735 3736 static void __net_exit tcf_net_exit(struct net *net) 3737 { 3738 struct tcf_net *tn = net_generic(net, tcf_net_id); 3739 3740 idr_destroy(&tn->idr); 3741 } 3742 3743 static struct pernet_operations tcf_net_ops = { 3744 .init = tcf_net_init, 3745 .exit = tcf_net_exit, 3746 .id = &tcf_net_id, 3747 .size = sizeof(struct tcf_net), 3748 }; 3749 3750 static int __init tc_filter_init(void) 3751 { 3752 int err; 3753 3754 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3755 if (!tc_filter_wq) 3756 return -ENOMEM; 3757 3758 err = register_pernet_subsys(&tcf_net_ops); 3759 if (err) 3760 goto err_register_pernet_subsys; 3761 3762 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3763 RTNL_FLAG_DOIT_UNLOCKED); 3764 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3765 RTNL_FLAG_DOIT_UNLOCKED); 3766 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3767 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3768 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3769 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3770 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3771 tc_dump_chain, 0); 3772 3773 return 0; 3774 3775 err_register_pernet_subsys: 3776 destroy_workqueue(tc_filter_wq); 3777 return err; 3778 } 3779 3780 subsys_initcall(tc_filter_init); 3781