1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/jhash.h> 24 #include <linux/rculist.h> 25 #include <net/net_namespace.h> 26 #include <net/sock.h> 27 #include <net/netlink.h> 28 #include <net/pkt_sched.h> 29 #include <net/pkt_cls.h> 30 #include <net/tc_act/tc_pedit.h> 31 #include <net/tc_act/tc_mirred.h> 32 #include <net/tc_act/tc_vlan.h> 33 #include <net/tc_act/tc_tunnel_key.h> 34 #include <net/tc_act/tc_csum.h> 35 #include <net/tc_act/tc_gact.h> 36 #include <net/tc_act/tc_police.h> 37 #include <net/tc_act/tc_sample.h> 38 #include <net/tc_act/tc_skbedit.h> 39 #include <net/tc_act/tc_ct.h> 40 #include <net/tc_act/tc_mpls.h> 41 #include <net/tc_act/tc_gate.h> 42 #include <net/flow_offload.h> 43 44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 45 46 /* The list of all installed classifier types */ 47 static LIST_HEAD(tcf_proto_base); 48 49 /* Protects list of registered TC modules. It is pure SMP lock. */ 50 static DEFINE_RWLOCK(cls_mod_lock); 51 52 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 53 { 54 return jhash_3words(tp->chain->index, tp->prio, 55 (__force __u32)tp->protocol, 0); 56 } 57 58 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 59 struct tcf_proto *tp) 60 { 61 struct tcf_block *block = chain->block; 62 63 mutex_lock(&block->proto_destroy_lock); 64 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 65 destroy_obj_hashfn(tp)); 66 mutex_unlock(&block->proto_destroy_lock); 67 } 68 69 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 70 const struct tcf_proto *tp2) 71 { 72 return tp1->chain->index == tp2->chain->index && 73 tp1->prio == tp2->prio && 74 tp1->protocol == tp2->protocol; 75 } 76 77 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 78 struct tcf_proto *tp) 79 { 80 u32 hash = destroy_obj_hashfn(tp); 81 struct tcf_proto *iter; 82 bool found = false; 83 84 rcu_read_lock(); 85 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 86 destroy_ht_node, hash) { 87 if (tcf_proto_cmp(tp, iter)) { 88 found = true; 89 break; 90 } 91 } 92 rcu_read_unlock(); 93 94 return found; 95 } 96 97 static void 98 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 99 { 100 struct tcf_block *block = chain->block; 101 102 mutex_lock(&block->proto_destroy_lock); 103 if (hash_hashed(&tp->destroy_ht_node)) 104 hash_del_rcu(&tp->destroy_ht_node); 105 mutex_unlock(&block->proto_destroy_lock); 106 } 107 108 /* Find classifier type by string name */ 109 110 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 111 { 112 const struct tcf_proto_ops *t, *res = NULL; 113 114 if (kind) { 115 read_lock(&cls_mod_lock); 116 list_for_each_entry(t, &tcf_proto_base, head) { 117 if (strcmp(kind, t->kind) == 0) { 118 if (try_module_get(t->owner)) 119 res = t; 120 break; 121 } 122 } 123 read_unlock(&cls_mod_lock); 124 } 125 return res; 126 } 127 128 static const struct tcf_proto_ops * 129 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 130 struct netlink_ext_ack *extack) 131 { 132 const struct tcf_proto_ops *ops; 133 134 ops = __tcf_proto_lookup_ops(kind); 135 if (ops) 136 return ops; 137 #ifdef CONFIG_MODULES 138 if (rtnl_held) 139 rtnl_unlock(); 140 request_module("cls_%s", kind); 141 if (rtnl_held) 142 rtnl_lock(); 143 ops = __tcf_proto_lookup_ops(kind); 144 /* We dropped the RTNL semaphore in order to perform 145 * the module load. So, even if we succeeded in loading 146 * the module we have to replay the request. We indicate 147 * this using -EAGAIN. 148 */ 149 if (ops) { 150 module_put(ops->owner); 151 return ERR_PTR(-EAGAIN); 152 } 153 #endif 154 NL_SET_ERR_MSG(extack, "TC classifier not found"); 155 return ERR_PTR(-ENOENT); 156 } 157 158 /* Register(unregister) new classifier type */ 159 160 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 161 { 162 struct tcf_proto_ops *t; 163 int rc = -EEXIST; 164 165 write_lock(&cls_mod_lock); 166 list_for_each_entry(t, &tcf_proto_base, head) 167 if (!strcmp(ops->kind, t->kind)) 168 goto out; 169 170 list_add_tail(&ops->head, &tcf_proto_base); 171 rc = 0; 172 out: 173 write_unlock(&cls_mod_lock); 174 return rc; 175 } 176 EXPORT_SYMBOL(register_tcf_proto_ops); 177 178 static struct workqueue_struct *tc_filter_wq; 179 180 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 181 { 182 struct tcf_proto_ops *t; 183 int rc = -ENOENT; 184 185 /* Wait for outstanding call_rcu()s, if any, from a 186 * tcf_proto_ops's destroy() handler. 187 */ 188 rcu_barrier(); 189 flush_workqueue(tc_filter_wq); 190 191 write_lock(&cls_mod_lock); 192 list_for_each_entry(t, &tcf_proto_base, head) { 193 if (t == ops) { 194 list_del(&t->head); 195 rc = 0; 196 break; 197 } 198 } 199 write_unlock(&cls_mod_lock); 200 return rc; 201 } 202 EXPORT_SYMBOL(unregister_tcf_proto_ops); 203 204 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 205 { 206 INIT_RCU_WORK(rwork, func); 207 return queue_rcu_work(tc_filter_wq, rwork); 208 } 209 EXPORT_SYMBOL(tcf_queue_work); 210 211 /* Select new prio value from the range, managed by kernel. */ 212 213 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 214 { 215 u32 first = TC_H_MAKE(0xC0000000U, 0U); 216 217 if (tp) 218 first = tp->prio - 1; 219 220 return TC_H_MAJ(first); 221 } 222 223 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 224 { 225 if (kind) 226 return nla_strscpy(name, kind, IFNAMSIZ) < 0; 227 memset(name, 0, IFNAMSIZ); 228 return false; 229 } 230 231 static bool tcf_proto_is_unlocked(const char *kind) 232 { 233 const struct tcf_proto_ops *ops; 234 bool ret; 235 236 if (strlen(kind) == 0) 237 return false; 238 239 ops = tcf_proto_lookup_ops(kind, false, NULL); 240 /* On error return false to take rtnl lock. Proto lookup/create 241 * functions will perform lookup again and properly handle errors. 242 */ 243 if (IS_ERR(ops)) 244 return false; 245 246 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 247 module_put(ops->owner); 248 return ret; 249 } 250 251 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 252 u32 prio, struct tcf_chain *chain, 253 bool rtnl_held, 254 struct netlink_ext_ack *extack) 255 { 256 struct tcf_proto *tp; 257 int err; 258 259 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 260 if (!tp) 261 return ERR_PTR(-ENOBUFS); 262 263 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 264 if (IS_ERR(tp->ops)) { 265 err = PTR_ERR(tp->ops); 266 goto errout; 267 } 268 tp->classify = tp->ops->classify; 269 tp->protocol = protocol; 270 tp->prio = prio; 271 tp->chain = chain; 272 spin_lock_init(&tp->lock); 273 refcount_set(&tp->refcnt, 1); 274 275 err = tp->ops->init(tp); 276 if (err) { 277 module_put(tp->ops->owner); 278 goto errout; 279 } 280 return tp; 281 282 errout: 283 kfree(tp); 284 return ERR_PTR(err); 285 } 286 287 static void tcf_proto_get(struct tcf_proto *tp) 288 { 289 refcount_inc(&tp->refcnt); 290 } 291 292 static void tcf_chain_put(struct tcf_chain *chain); 293 294 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 295 bool sig_destroy, struct netlink_ext_ack *extack) 296 { 297 tp->ops->destroy(tp, rtnl_held, extack); 298 if (sig_destroy) 299 tcf_proto_signal_destroyed(tp->chain, tp); 300 tcf_chain_put(tp->chain); 301 module_put(tp->ops->owner); 302 kfree_rcu(tp, rcu); 303 } 304 305 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 306 struct netlink_ext_ack *extack) 307 { 308 if (refcount_dec_and_test(&tp->refcnt)) 309 tcf_proto_destroy(tp, rtnl_held, true, extack); 310 } 311 312 static bool tcf_proto_check_delete(struct tcf_proto *tp) 313 { 314 if (tp->ops->delete_empty) 315 return tp->ops->delete_empty(tp); 316 317 tp->deleting = true; 318 return tp->deleting; 319 } 320 321 static void tcf_proto_mark_delete(struct tcf_proto *tp) 322 { 323 spin_lock(&tp->lock); 324 tp->deleting = true; 325 spin_unlock(&tp->lock); 326 } 327 328 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 329 { 330 bool deleting; 331 332 spin_lock(&tp->lock); 333 deleting = tp->deleting; 334 spin_unlock(&tp->lock); 335 336 return deleting; 337 } 338 339 #define ASSERT_BLOCK_LOCKED(block) \ 340 lockdep_assert_held(&(block)->lock) 341 342 struct tcf_filter_chain_list_item { 343 struct list_head list; 344 tcf_chain_head_change_t *chain_head_change; 345 void *chain_head_change_priv; 346 }; 347 348 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 349 u32 chain_index) 350 { 351 struct tcf_chain *chain; 352 353 ASSERT_BLOCK_LOCKED(block); 354 355 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 356 if (!chain) 357 return NULL; 358 list_add_tail_rcu(&chain->list, &block->chain_list); 359 mutex_init(&chain->filter_chain_lock); 360 chain->block = block; 361 chain->index = chain_index; 362 chain->refcnt = 1; 363 if (!chain->index) 364 block->chain0.chain = chain; 365 return chain; 366 } 367 368 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 369 struct tcf_proto *tp_head) 370 { 371 if (item->chain_head_change) 372 item->chain_head_change(tp_head, item->chain_head_change_priv); 373 } 374 375 static void tcf_chain0_head_change(struct tcf_chain *chain, 376 struct tcf_proto *tp_head) 377 { 378 struct tcf_filter_chain_list_item *item; 379 struct tcf_block *block = chain->block; 380 381 if (chain->index) 382 return; 383 384 mutex_lock(&block->lock); 385 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 386 tcf_chain_head_change_item(item, tp_head); 387 mutex_unlock(&block->lock); 388 } 389 390 /* Returns true if block can be safely freed. */ 391 392 static bool tcf_chain_detach(struct tcf_chain *chain) 393 { 394 struct tcf_block *block = chain->block; 395 396 ASSERT_BLOCK_LOCKED(block); 397 398 list_del_rcu(&chain->list); 399 if (!chain->index) 400 block->chain0.chain = NULL; 401 402 if (list_empty(&block->chain_list) && 403 refcount_read(&block->refcnt) == 0) 404 return true; 405 406 return false; 407 } 408 409 static void tcf_block_destroy(struct tcf_block *block) 410 { 411 mutex_destroy(&block->lock); 412 mutex_destroy(&block->proto_destroy_lock); 413 kfree_rcu(block, rcu); 414 } 415 416 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 417 { 418 struct tcf_block *block = chain->block; 419 420 mutex_destroy(&chain->filter_chain_lock); 421 kfree_rcu(chain, rcu); 422 if (free_block) 423 tcf_block_destroy(block); 424 } 425 426 static void tcf_chain_hold(struct tcf_chain *chain) 427 { 428 ASSERT_BLOCK_LOCKED(chain->block); 429 430 ++chain->refcnt; 431 } 432 433 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 434 { 435 ASSERT_BLOCK_LOCKED(chain->block); 436 437 /* In case all the references are action references, this 438 * chain should not be shown to the user. 439 */ 440 return chain->refcnt == chain->action_refcnt; 441 } 442 443 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 444 u32 chain_index) 445 { 446 struct tcf_chain *chain; 447 448 ASSERT_BLOCK_LOCKED(block); 449 450 list_for_each_entry(chain, &block->chain_list, list) { 451 if (chain->index == chain_index) 452 return chain; 453 } 454 return NULL; 455 } 456 457 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 458 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 459 u32 chain_index) 460 { 461 struct tcf_chain *chain; 462 463 list_for_each_entry_rcu(chain, &block->chain_list, list) { 464 if (chain->index == chain_index) 465 return chain; 466 } 467 return NULL; 468 } 469 #endif 470 471 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 472 u32 seq, u16 flags, int event, bool unicast); 473 474 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 475 u32 chain_index, bool create, 476 bool by_act) 477 { 478 struct tcf_chain *chain = NULL; 479 bool is_first_reference; 480 481 mutex_lock(&block->lock); 482 chain = tcf_chain_lookup(block, chain_index); 483 if (chain) { 484 tcf_chain_hold(chain); 485 } else { 486 if (!create) 487 goto errout; 488 chain = tcf_chain_create(block, chain_index); 489 if (!chain) 490 goto errout; 491 } 492 493 if (by_act) 494 ++chain->action_refcnt; 495 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 496 mutex_unlock(&block->lock); 497 498 /* Send notification only in case we got the first 499 * non-action reference. Until then, the chain acts only as 500 * a placeholder for actions pointing to it and user ought 501 * not know about them. 502 */ 503 if (is_first_reference && !by_act) 504 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 505 RTM_NEWCHAIN, false); 506 507 return chain; 508 509 errout: 510 mutex_unlock(&block->lock); 511 return chain; 512 } 513 514 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 515 bool create) 516 { 517 return __tcf_chain_get(block, chain_index, create, false); 518 } 519 520 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 521 { 522 return __tcf_chain_get(block, chain_index, true, true); 523 } 524 EXPORT_SYMBOL(tcf_chain_get_by_act); 525 526 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 527 void *tmplt_priv); 528 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 529 void *tmplt_priv, u32 chain_index, 530 struct tcf_block *block, struct sk_buff *oskb, 531 u32 seq, u16 flags, bool unicast); 532 533 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 534 bool explicitly_created) 535 { 536 struct tcf_block *block = chain->block; 537 const struct tcf_proto_ops *tmplt_ops; 538 bool free_block = false; 539 unsigned int refcnt; 540 void *tmplt_priv; 541 542 mutex_lock(&block->lock); 543 if (explicitly_created) { 544 if (!chain->explicitly_created) { 545 mutex_unlock(&block->lock); 546 return; 547 } 548 chain->explicitly_created = false; 549 } 550 551 if (by_act) 552 chain->action_refcnt--; 553 554 /* tc_chain_notify_delete can't be called while holding block lock. 555 * However, when block is unlocked chain can be changed concurrently, so 556 * save these to temporary variables. 557 */ 558 refcnt = --chain->refcnt; 559 tmplt_ops = chain->tmplt_ops; 560 tmplt_priv = chain->tmplt_priv; 561 562 /* The last dropped non-action reference will trigger notification. */ 563 if (refcnt - chain->action_refcnt == 0 && !by_act) { 564 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, 565 block, NULL, 0, 0, false); 566 /* Last reference to chain, no need to lock. */ 567 chain->flushing = false; 568 } 569 570 if (refcnt == 0) 571 free_block = tcf_chain_detach(chain); 572 mutex_unlock(&block->lock); 573 574 if (refcnt == 0) { 575 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 576 tcf_chain_destroy(chain, free_block); 577 } 578 } 579 580 static void tcf_chain_put(struct tcf_chain *chain) 581 { 582 __tcf_chain_put(chain, false, false); 583 } 584 585 void tcf_chain_put_by_act(struct tcf_chain *chain) 586 { 587 __tcf_chain_put(chain, true, false); 588 } 589 EXPORT_SYMBOL(tcf_chain_put_by_act); 590 591 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 592 { 593 __tcf_chain_put(chain, false, true); 594 } 595 596 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 597 { 598 struct tcf_proto *tp, *tp_next; 599 600 mutex_lock(&chain->filter_chain_lock); 601 tp = tcf_chain_dereference(chain->filter_chain, chain); 602 while (tp) { 603 tp_next = rcu_dereference_protected(tp->next, 1); 604 tcf_proto_signal_destroying(chain, tp); 605 tp = tp_next; 606 } 607 tp = tcf_chain_dereference(chain->filter_chain, chain); 608 RCU_INIT_POINTER(chain->filter_chain, NULL); 609 tcf_chain0_head_change(chain, NULL); 610 chain->flushing = true; 611 mutex_unlock(&chain->filter_chain_lock); 612 613 while (tp) { 614 tp_next = rcu_dereference_protected(tp->next, 1); 615 tcf_proto_put(tp, rtnl_held, NULL); 616 tp = tp_next; 617 } 618 } 619 620 static int tcf_block_setup(struct tcf_block *block, 621 struct flow_block_offload *bo); 622 623 static void tcf_block_offload_init(struct flow_block_offload *bo, 624 struct net_device *dev, struct Qdisc *sch, 625 enum flow_block_command command, 626 enum flow_block_binder_type binder_type, 627 struct flow_block *flow_block, 628 bool shared, struct netlink_ext_ack *extack) 629 { 630 bo->net = dev_net(dev); 631 bo->command = command; 632 bo->binder_type = binder_type; 633 bo->block = flow_block; 634 bo->block_shared = shared; 635 bo->extack = extack; 636 bo->sch = sch; 637 bo->cb_list_head = &flow_block->cb_list; 638 INIT_LIST_HEAD(&bo->cb_list); 639 } 640 641 static void tcf_block_unbind(struct tcf_block *block, 642 struct flow_block_offload *bo); 643 644 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) 645 { 646 struct tcf_block *block = block_cb->indr.data; 647 struct net_device *dev = block_cb->indr.dev; 648 struct Qdisc *sch = block_cb->indr.sch; 649 struct netlink_ext_ack extack = {}; 650 struct flow_block_offload bo = {}; 651 652 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, 653 block_cb->indr.binder_type, 654 &block->flow_block, tcf_block_shared(block), 655 &extack); 656 rtnl_lock(); 657 down_write(&block->cb_lock); 658 list_del(&block_cb->driver_list); 659 list_move(&block_cb->list, &bo.cb_list); 660 tcf_block_unbind(block, &bo); 661 up_write(&block->cb_lock); 662 rtnl_unlock(); 663 } 664 665 static bool tcf_block_offload_in_use(struct tcf_block *block) 666 { 667 return atomic_read(&block->offloadcnt); 668 } 669 670 static int tcf_block_offload_cmd(struct tcf_block *block, 671 struct net_device *dev, struct Qdisc *sch, 672 struct tcf_block_ext_info *ei, 673 enum flow_block_command command, 674 struct netlink_ext_ack *extack) 675 { 676 struct flow_block_offload bo = {}; 677 678 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, 679 &block->flow_block, tcf_block_shared(block), 680 extack); 681 682 if (dev->netdev_ops->ndo_setup_tc) { 683 int err; 684 685 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 686 if (err < 0) { 687 if (err != -EOPNOTSUPP) 688 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 689 return err; 690 } 691 692 return tcf_block_setup(block, &bo); 693 } 694 695 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, 696 tc_block_indr_cleanup); 697 tcf_block_setup(block, &bo); 698 699 return -EOPNOTSUPP; 700 } 701 702 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 703 struct tcf_block_ext_info *ei, 704 struct netlink_ext_ack *extack) 705 { 706 struct net_device *dev = q->dev_queue->dev; 707 int err; 708 709 down_write(&block->cb_lock); 710 711 /* If tc offload feature is disabled and the block we try to bind 712 * to already has some offloaded filters, forbid to bind. 713 */ 714 if (dev->netdev_ops->ndo_setup_tc && 715 !tc_can_offload(dev) && 716 tcf_block_offload_in_use(block)) { 717 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 718 err = -EOPNOTSUPP; 719 goto err_unlock; 720 } 721 722 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); 723 if (err == -EOPNOTSUPP) 724 goto no_offload_dev_inc; 725 if (err) 726 goto err_unlock; 727 728 up_write(&block->cb_lock); 729 return 0; 730 731 no_offload_dev_inc: 732 if (tcf_block_offload_in_use(block)) 733 goto err_unlock; 734 735 err = 0; 736 block->nooffloaddevcnt++; 737 err_unlock: 738 up_write(&block->cb_lock); 739 return err; 740 } 741 742 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 743 struct tcf_block_ext_info *ei) 744 { 745 struct net_device *dev = q->dev_queue->dev; 746 int err; 747 748 down_write(&block->cb_lock); 749 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); 750 if (err == -EOPNOTSUPP) 751 goto no_offload_dev_dec; 752 up_write(&block->cb_lock); 753 return; 754 755 no_offload_dev_dec: 756 WARN_ON(block->nooffloaddevcnt-- == 0); 757 up_write(&block->cb_lock); 758 } 759 760 static int 761 tcf_chain0_head_change_cb_add(struct tcf_block *block, 762 struct tcf_block_ext_info *ei, 763 struct netlink_ext_ack *extack) 764 { 765 struct tcf_filter_chain_list_item *item; 766 struct tcf_chain *chain0; 767 768 item = kmalloc(sizeof(*item), GFP_KERNEL); 769 if (!item) { 770 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 771 return -ENOMEM; 772 } 773 item->chain_head_change = ei->chain_head_change; 774 item->chain_head_change_priv = ei->chain_head_change_priv; 775 776 mutex_lock(&block->lock); 777 chain0 = block->chain0.chain; 778 if (chain0) 779 tcf_chain_hold(chain0); 780 else 781 list_add(&item->list, &block->chain0.filter_chain_list); 782 mutex_unlock(&block->lock); 783 784 if (chain0) { 785 struct tcf_proto *tp_head; 786 787 mutex_lock(&chain0->filter_chain_lock); 788 789 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 790 if (tp_head) 791 tcf_chain_head_change_item(item, tp_head); 792 793 mutex_lock(&block->lock); 794 list_add(&item->list, &block->chain0.filter_chain_list); 795 mutex_unlock(&block->lock); 796 797 mutex_unlock(&chain0->filter_chain_lock); 798 tcf_chain_put(chain0); 799 } 800 801 return 0; 802 } 803 804 static void 805 tcf_chain0_head_change_cb_del(struct tcf_block *block, 806 struct tcf_block_ext_info *ei) 807 { 808 struct tcf_filter_chain_list_item *item; 809 810 mutex_lock(&block->lock); 811 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 812 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 813 (item->chain_head_change == ei->chain_head_change && 814 item->chain_head_change_priv == ei->chain_head_change_priv)) { 815 if (block->chain0.chain) 816 tcf_chain_head_change_item(item, NULL); 817 list_del(&item->list); 818 mutex_unlock(&block->lock); 819 820 kfree(item); 821 return; 822 } 823 } 824 mutex_unlock(&block->lock); 825 WARN_ON(1); 826 } 827 828 struct tcf_net { 829 spinlock_t idr_lock; /* Protects idr */ 830 struct idr idr; 831 }; 832 833 static unsigned int tcf_net_id; 834 835 static int tcf_block_insert(struct tcf_block *block, struct net *net, 836 struct netlink_ext_ack *extack) 837 { 838 struct tcf_net *tn = net_generic(net, tcf_net_id); 839 int err; 840 841 idr_preload(GFP_KERNEL); 842 spin_lock(&tn->idr_lock); 843 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 844 GFP_NOWAIT); 845 spin_unlock(&tn->idr_lock); 846 idr_preload_end(); 847 848 return err; 849 } 850 851 static void tcf_block_remove(struct tcf_block *block, struct net *net) 852 { 853 struct tcf_net *tn = net_generic(net, tcf_net_id); 854 855 spin_lock(&tn->idr_lock); 856 idr_remove(&tn->idr, block->index); 857 spin_unlock(&tn->idr_lock); 858 } 859 860 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 861 u32 block_index, 862 struct netlink_ext_ack *extack) 863 { 864 struct tcf_block *block; 865 866 block = kzalloc(sizeof(*block), GFP_KERNEL); 867 if (!block) { 868 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 869 return ERR_PTR(-ENOMEM); 870 } 871 mutex_init(&block->lock); 872 mutex_init(&block->proto_destroy_lock); 873 init_rwsem(&block->cb_lock); 874 flow_block_init(&block->flow_block); 875 INIT_LIST_HEAD(&block->chain_list); 876 INIT_LIST_HEAD(&block->owner_list); 877 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 878 879 refcount_set(&block->refcnt, 1); 880 block->net = net; 881 block->index = block_index; 882 883 /* Don't store q pointer for blocks which are shared */ 884 if (!tcf_block_shared(block)) 885 block->q = q; 886 return block; 887 } 888 889 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 890 { 891 struct tcf_net *tn = net_generic(net, tcf_net_id); 892 893 return idr_find(&tn->idr, block_index); 894 } 895 896 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 897 { 898 struct tcf_block *block; 899 900 rcu_read_lock(); 901 block = tcf_block_lookup(net, block_index); 902 if (block && !refcount_inc_not_zero(&block->refcnt)) 903 block = NULL; 904 rcu_read_unlock(); 905 906 return block; 907 } 908 909 static struct tcf_chain * 910 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 911 { 912 mutex_lock(&block->lock); 913 if (chain) 914 chain = list_is_last(&chain->list, &block->chain_list) ? 915 NULL : list_next_entry(chain, list); 916 else 917 chain = list_first_entry_or_null(&block->chain_list, 918 struct tcf_chain, list); 919 920 /* skip all action-only chains */ 921 while (chain && tcf_chain_held_by_acts_only(chain)) 922 chain = list_is_last(&chain->list, &block->chain_list) ? 923 NULL : list_next_entry(chain, list); 924 925 if (chain) 926 tcf_chain_hold(chain); 927 mutex_unlock(&block->lock); 928 929 return chain; 930 } 931 932 /* Function to be used by all clients that want to iterate over all chains on 933 * block. It properly obtains block->lock and takes reference to chain before 934 * returning it. Users of this function must be tolerant to concurrent chain 935 * insertion/deletion or ensure that no concurrent chain modification is 936 * possible. Note that all netlink dump callbacks cannot guarantee to provide 937 * consistent dump because rtnl lock is released each time skb is filled with 938 * data and sent to user-space. 939 */ 940 941 struct tcf_chain * 942 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 943 { 944 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 945 946 if (chain) 947 tcf_chain_put(chain); 948 949 return chain_next; 950 } 951 EXPORT_SYMBOL(tcf_get_next_chain); 952 953 static struct tcf_proto * 954 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 955 { 956 u32 prio = 0; 957 958 ASSERT_RTNL(); 959 mutex_lock(&chain->filter_chain_lock); 960 961 if (!tp) { 962 tp = tcf_chain_dereference(chain->filter_chain, chain); 963 } else if (tcf_proto_is_deleting(tp)) { 964 /* 'deleting' flag is set and chain->filter_chain_lock was 965 * unlocked, which means next pointer could be invalid. Restart 966 * search. 967 */ 968 prio = tp->prio + 1; 969 tp = tcf_chain_dereference(chain->filter_chain, chain); 970 971 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 972 if (!tp->deleting && tp->prio >= prio) 973 break; 974 } else { 975 tp = tcf_chain_dereference(tp->next, chain); 976 } 977 978 if (tp) 979 tcf_proto_get(tp); 980 981 mutex_unlock(&chain->filter_chain_lock); 982 983 return tp; 984 } 985 986 /* Function to be used by all clients that want to iterate over all tp's on 987 * chain. Users of this function must be tolerant to concurrent tp 988 * insertion/deletion or ensure that no concurrent chain modification is 989 * possible. Note that all netlink dump callbacks cannot guarantee to provide 990 * consistent dump because rtnl lock is released each time skb is filled with 991 * data and sent to user-space. 992 */ 993 994 struct tcf_proto * 995 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 996 { 997 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 998 999 if (tp) 1000 tcf_proto_put(tp, true, NULL); 1001 1002 return tp_next; 1003 } 1004 EXPORT_SYMBOL(tcf_get_next_proto); 1005 1006 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1007 { 1008 struct tcf_chain *chain; 1009 1010 /* Last reference to block. At this point chains cannot be added or 1011 * removed concurrently. 1012 */ 1013 for (chain = tcf_get_next_chain(block, NULL); 1014 chain; 1015 chain = tcf_get_next_chain(block, chain)) { 1016 tcf_chain_put_explicitly_created(chain); 1017 tcf_chain_flush(chain, rtnl_held); 1018 } 1019 } 1020 1021 /* Lookup Qdisc and increments its reference counter. 1022 * Set parent, if necessary. 1023 */ 1024 1025 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1026 u32 *parent, int ifindex, bool rtnl_held, 1027 struct netlink_ext_ack *extack) 1028 { 1029 const struct Qdisc_class_ops *cops; 1030 struct net_device *dev; 1031 int err = 0; 1032 1033 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1034 return 0; 1035 1036 rcu_read_lock(); 1037 1038 /* Find link */ 1039 dev = dev_get_by_index_rcu(net, ifindex); 1040 if (!dev) { 1041 rcu_read_unlock(); 1042 return -ENODEV; 1043 } 1044 1045 /* Find qdisc */ 1046 if (!*parent) { 1047 *q = dev->qdisc; 1048 *parent = (*q)->handle; 1049 } else { 1050 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1051 if (!*q) { 1052 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1053 err = -EINVAL; 1054 goto errout_rcu; 1055 } 1056 } 1057 1058 *q = qdisc_refcount_inc_nz(*q); 1059 if (!*q) { 1060 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1061 err = -EINVAL; 1062 goto errout_rcu; 1063 } 1064 1065 /* Is it classful? */ 1066 cops = (*q)->ops->cl_ops; 1067 if (!cops) { 1068 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1069 err = -EINVAL; 1070 goto errout_qdisc; 1071 } 1072 1073 if (!cops->tcf_block) { 1074 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1075 err = -EOPNOTSUPP; 1076 goto errout_qdisc; 1077 } 1078 1079 errout_rcu: 1080 /* At this point we know that qdisc is not noop_qdisc, 1081 * which means that qdisc holds a reference to net_device 1082 * and we hold a reference to qdisc, so it is safe to release 1083 * rcu read lock. 1084 */ 1085 rcu_read_unlock(); 1086 return err; 1087 1088 errout_qdisc: 1089 rcu_read_unlock(); 1090 1091 if (rtnl_held) 1092 qdisc_put(*q); 1093 else 1094 qdisc_put_unlocked(*q); 1095 *q = NULL; 1096 1097 return err; 1098 } 1099 1100 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1101 int ifindex, struct netlink_ext_ack *extack) 1102 { 1103 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1104 return 0; 1105 1106 /* Do we search for filter, attached to class? */ 1107 if (TC_H_MIN(parent)) { 1108 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1109 1110 *cl = cops->find(q, parent); 1111 if (*cl == 0) { 1112 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1113 return -ENOENT; 1114 } 1115 } 1116 1117 return 0; 1118 } 1119 1120 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1121 unsigned long cl, int ifindex, 1122 u32 block_index, 1123 struct netlink_ext_ack *extack) 1124 { 1125 struct tcf_block *block; 1126 1127 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1128 block = tcf_block_refcnt_get(net, block_index); 1129 if (!block) { 1130 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1131 return ERR_PTR(-EINVAL); 1132 } 1133 } else { 1134 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1135 1136 block = cops->tcf_block(q, cl, extack); 1137 if (!block) 1138 return ERR_PTR(-EINVAL); 1139 1140 if (tcf_block_shared(block)) { 1141 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1142 return ERR_PTR(-EOPNOTSUPP); 1143 } 1144 1145 /* Always take reference to block in order to support execution 1146 * of rules update path of cls API without rtnl lock. Caller 1147 * must release block when it is finished using it. 'if' block 1148 * of this conditional obtain reference to block by calling 1149 * tcf_block_refcnt_get(). 1150 */ 1151 refcount_inc(&block->refcnt); 1152 } 1153 1154 return block; 1155 } 1156 1157 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1158 struct tcf_block_ext_info *ei, bool rtnl_held) 1159 { 1160 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1161 /* Flushing/putting all chains will cause the block to be 1162 * deallocated when last chain is freed. However, if chain_list 1163 * is empty, block has to be manually deallocated. After block 1164 * reference counter reached 0, it is no longer possible to 1165 * increment it or add new chains to block. 1166 */ 1167 bool free_block = list_empty(&block->chain_list); 1168 1169 mutex_unlock(&block->lock); 1170 if (tcf_block_shared(block)) 1171 tcf_block_remove(block, block->net); 1172 1173 if (q) 1174 tcf_block_offload_unbind(block, q, ei); 1175 1176 if (free_block) 1177 tcf_block_destroy(block); 1178 else 1179 tcf_block_flush_all_chains(block, rtnl_held); 1180 } else if (q) { 1181 tcf_block_offload_unbind(block, q, ei); 1182 } 1183 } 1184 1185 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1186 { 1187 __tcf_block_put(block, NULL, NULL, rtnl_held); 1188 } 1189 1190 /* Find tcf block. 1191 * Set q, parent, cl when appropriate. 1192 */ 1193 1194 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1195 u32 *parent, unsigned long *cl, 1196 int ifindex, u32 block_index, 1197 struct netlink_ext_ack *extack) 1198 { 1199 struct tcf_block *block; 1200 int err = 0; 1201 1202 ASSERT_RTNL(); 1203 1204 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1205 if (err) 1206 goto errout; 1207 1208 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1209 if (err) 1210 goto errout_qdisc; 1211 1212 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1213 if (IS_ERR(block)) { 1214 err = PTR_ERR(block); 1215 goto errout_qdisc; 1216 } 1217 1218 return block; 1219 1220 errout_qdisc: 1221 if (*q) 1222 qdisc_put(*q); 1223 errout: 1224 *q = NULL; 1225 return ERR_PTR(err); 1226 } 1227 1228 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1229 bool rtnl_held) 1230 { 1231 if (!IS_ERR_OR_NULL(block)) 1232 tcf_block_refcnt_put(block, rtnl_held); 1233 1234 if (q) { 1235 if (rtnl_held) 1236 qdisc_put(q); 1237 else 1238 qdisc_put_unlocked(q); 1239 } 1240 } 1241 1242 struct tcf_block_owner_item { 1243 struct list_head list; 1244 struct Qdisc *q; 1245 enum flow_block_binder_type binder_type; 1246 }; 1247 1248 static void 1249 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1250 struct Qdisc *q, 1251 enum flow_block_binder_type binder_type) 1252 { 1253 if (block->keep_dst && 1254 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1255 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1256 netif_keep_dst(qdisc_dev(q)); 1257 } 1258 1259 void tcf_block_netif_keep_dst(struct tcf_block *block) 1260 { 1261 struct tcf_block_owner_item *item; 1262 1263 block->keep_dst = true; 1264 list_for_each_entry(item, &block->owner_list, list) 1265 tcf_block_owner_netif_keep_dst(block, item->q, 1266 item->binder_type); 1267 } 1268 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1269 1270 static int tcf_block_owner_add(struct tcf_block *block, 1271 struct Qdisc *q, 1272 enum flow_block_binder_type binder_type) 1273 { 1274 struct tcf_block_owner_item *item; 1275 1276 item = kmalloc(sizeof(*item), GFP_KERNEL); 1277 if (!item) 1278 return -ENOMEM; 1279 item->q = q; 1280 item->binder_type = binder_type; 1281 list_add(&item->list, &block->owner_list); 1282 return 0; 1283 } 1284 1285 static void tcf_block_owner_del(struct tcf_block *block, 1286 struct Qdisc *q, 1287 enum flow_block_binder_type binder_type) 1288 { 1289 struct tcf_block_owner_item *item; 1290 1291 list_for_each_entry(item, &block->owner_list, list) { 1292 if (item->q == q && item->binder_type == binder_type) { 1293 list_del(&item->list); 1294 kfree(item); 1295 return; 1296 } 1297 } 1298 WARN_ON(1); 1299 } 1300 1301 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1302 struct tcf_block_ext_info *ei, 1303 struct netlink_ext_ack *extack) 1304 { 1305 struct net *net = qdisc_net(q); 1306 struct tcf_block *block = NULL; 1307 int err; 1308 1309 if (ei->block_index) 1310 /* block_index not 0 means the shared block is requested */ 1311 block = tcf_block_refcnt_get(net, ei->block_index); 1312 1313 if (!block) { 1314 block = tcf_block_create(net, q, ei->block_index, extack); 1315 if (IS_ERR(block)) 1316 return PTR_ERR(block); 1317 if (tcf_block_shared(block)) { 1318 err = tcf_block_insert(block, net, extack); 1319 if (err) 1320 goto err_block_insert; 1321 } 1322 } 1323 1324 err = tcf_block_owner_add(block, q, ei->binder_type); 1325 if (err) 1326 goto err_block_owner_add; 1327 1328 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1329 1330 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1331 if (err) 1332 goto err_chain0_head_change_cb_add; 1333 1334 err = tcf_block_offload_bind(block, q, ei, extack); 1335 if (err) 1336 goto err_block_offload_bind; 1337 1338 *p_block = block; 1339 return 0; 1340 1341 err_block_offload_bind: 1342 tcf_chain0_head_change_cb_del(block, ei); 1343 err_chain0_head_change_cb_add: 1344 tcf_block_owner_del(block, q, ei->binder_type); 1345 err_block_owner_add: 1346 err_block_insert: 1347 tcf_block_refcnt_put(block, true); 1348 return err; 1349 } 1350 EXPORT_SYMBOL(tcf_block_get_ext); 1351 1352 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1353 { 1354 struct tcf_proto __rcu **p_filter_chain = priv; 1355 1356 rcu_assign_pointer(*p_filter_chain, tp_head); 1357 } 1358 1359 int tcf_block_get(struct tcf_block **p_block, 1360 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1361 struct netlink_ext_ack *extack) 1362 { 1363 struct tcf_block_ext_info ei = { 1364 .chain_head_change = tcf_chain_head_change_dflt, 1365 .chain_head_change_priv = p_filter_chain, 1366 }; 1367 1368 WARN_ON(!p_filter_chain); 1369 return tcf_block_get_ext(p_block, q, &ei, extack); 1370 } 1371 EXPORT_SYMBOL(tcf_block_get); 1372 1373 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1374 * actions should be all removed after flushing. 1375 */ 1376 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1377 struct tcf_block_ext_info *ei) 1378 { 1379 if (!block) 1380 return; 1381 tcf_chain0_head_change_cb_del(block, ei); 1382 tcf_block_owner_del(block, q, ei->binder_type); 1383 1384 __tcf_block_put(block, q, ei, true); 1385 } 1386 EXPORT_SYMBOL(tcf_block_put_ext); 1387 1388 void tcf_block_put(struct tcf_block *block) 1389 { 1390 struct tcf_block_ext_info ei = {0, }; 1391 1392 if (!block) 1393 return; 1394 tcf_block_put_ext(block, block->q, &ei); 1395 } 1396 1397 EXPORT_SYMBOL(tcf_block_put); 1398 1399 static int 1400 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1401 void *cb_priv, bool add, bool offload_in_use, 1402 struct netlink_ext_ack *extack) 1403 { 1404 struct tcf_chain *chain, *chain_prev; 1405 struct tcf_proto *tp, *tp_prev; 1406 int err; 1407 1408 lockdep_assert_held(&block->cb_lock); 1409 1410 for (chain = __tcf_get_next_chain(block, NULL); 1411 chain; 1412 chain_prev = chain, 1413 chain = __tcf_get_next_chain(block, chain), 1414 tcf_chain_put(chain_prev)) { 1415 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1416 tp_prev = tp, 1417 tp = __tcf_get_next_proto(chain, tp), 1418 tcf_proto_put(tp_prev, true, NULL)) { 1419 if (tp->ops->reoffload) { 1420 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1421 extack); 1422 if (err && add) 1423 goto err_playback_remove; 1424 } else if (add && offload_in_use) { 1425 err = -EOPNOTSUPP; 1426 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1427 goto err_playback_remove; 1428 } 1429 } 1430 } 1431 1432 return 0; 1433 1434 err_playback_remove: 1435 tcf_proto_put(tp, true, NULL); 1436 tcf_chain_put(chain); 1437 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1438 extack); 1439 return err; 1440 } 1441 1442 static int tcf_block_bind(struct tcf_block *block, 1443 struct flow_block_offload *bo) 1444 { 1445 struct flow_block_cb *block_cb, *next; 1446 int err, i = 0; 1447 1448 lockdep_assert_held(&block->cb_lock); 1449 1450 list_for_each_entry(block_cb, &bo->cb_list, list) { 1451 err = tcf_block_playback_offloads(block, block_cb->cb, 1452 block_cb->cb_priv, true, 1453 tcf_block_offload_in_use(block), 1454 bo->extack); 1455 if (err) 1456 goto err_unroll; 1457 if (!bo->unlocked_driver_cb) 1458 block->lockeddevcnt++; 1459 1460 i++; 1461 } 1462 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1463 1464 return 0; 1465 1466 err_unroll: 1467 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1468 if (i-- > 0) { 1469 list_del(&block_cb->list); 1470 tcf_block_playback_offloads(block, block_cb->cb, 1471 block_cb->cb_priv, false, 1472 tcf_block_offload_in_use(block), 1473 NULL); 1474 if (!bo->unlocked_driver_cb) 1475 block->lockeddevcnt--; 1476 } 1477 flow_block_cb_free(block_cb); 1478 } 1479 1480 return err; 1481 } 1482 1483 static void tcf_block_unbind(struct tcf_block *block, 1484 struct flow_block_offload *bo) 1485 { 1486 struct flow_block_cb *block_cb, *next; 1487 1488 lockdep_assert_held(&block->cb_lock); 1489 1490 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1491 tcf_block_playback_offloads(block, block_cb->cb, 1492 block_cb->cb_priv, false, 1493 tcf_block_offload_in_use(block), 1494 NULL); 1495 list_del(&block_cb->list); 1496 flow_block_cb_free(block_cb); 1497 if (!bo->unlocked_driver_cb) 1498 block->lockeddevcnt--; 1499 } 1500 } 1501 1502 static int tcf_block_setup(struct tcf_block *block, 1503 struct flow_block_offload *bo) 1504 { 1505 int err; 1506 1507 switch (bo->command) { 1508 case FLOW_BLOCK_BIND: 1509 err = tcf_block_bind(block, bo); 1510 break; 1511 case FLOW_BLOCK_UNBIND: 1512 err = 0; 1513 tcf_block_unbind(block, bo); 1514 break; 1515 default: 1516 WARN_ON_ONCE(1); 1517 err = -EOPNOTSUPP; 1518 } 1519 1520 return err; 1521 } 1522 1523 /* Main classifier routine: scans classifier chain attached 1524 * to this qdisc, (optionally) tests for protocol and asks 1525 * specific classifiers. 1526 */ 1527 static inline int __tcf_classify(struct sk_buff *skb, 1528 const struct tcf_proto *tp, 1529 const struct tcf_proto *orig_tp, 1530 struct tcf_result *res, 1531 bool compat_mode, 1532 u32 *last_executed_chain) 1533 { 1534 #ifdef CONFIG_NET_CLS_ACT 1535 const int max_reclassify_loop = 16; 1536 const struct tcf_proto *first_tp; 1537 int limit = 0; 1538 1539 reclassify: 1540 #endif 1541 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1542 __be16 protocol = skb_protocol(skb, false); 1543 int err; 1544 1545 if (tp->protocol != protocol && 1546 tp->protocol != htons(ETH_P_ALL)) 1547 continue; 1548 1549 err = tp->classify(skb, tp, res); 1550 #ifdef CONFIG_NET_CLS_ACT 1551 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1552 first_tp = orig_tp; 1553 *last_executed_chain = first_tp->chain->index; 1554 goto reset; 1555 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1556 first_tp = res->goto_tp; 1557 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1558 goto reset; 1559 } 1560 #endif 1561 if (err >= 0) 1562 return err; 1563 } 1564 1565 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1566 #ifdef CONFIG_NET_CLS_ACT 1567 reset: 1568 if (unlikely(limit++ >= max_reclassify_loop)) { 1569 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1570 tp->chain->block->index, 1571 tp->prio & 0xffff, 1572 ntohs(tp->protocol)); 1573 return TC_ACT_SHOT; 1574 } 1575 1576 tp = first_tp; 1577 goto reclassify; 1578 #endif 1579 } 1580 1581 int tcf_classify(struct sk_buff *skb, 1582 const struct tcf_block *block, 1583 const struct tcf_proto *tp, 1584 struct tcf_result *res, bool compat_mode) 1585 { 1586 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1587 u32 last_executed_chain = 0; 1588 1589 return __tcf_classify(skb, tp, tp, res, compat_mode, 1590 &last_executed_chain); 1591 #else 1592 u32 last_executed_chain = tp ? tp->chain->index : 0; 1593 const struct tcf_proto *orig_tp = tp; 1594 struct tc_skb_ext *ext; 1595 int ret; 1596 1597 if (block) { 1598 ext = skb_ext_find(skb, TC_SKB_EXT); 1599 1600 if (ext && ext->chain) { 1601 struct tcf_chain *fchain; 1602 1603 fchain = tcf_chain_lookup_rcu(block, ext->chain); 1604 if (!fchain) 1605 return TC_ACT_SHOT; 1606 1607 /* Consume, so cloned/redirect skbs won't inherit ext */ 1608 skb_ext_del(skb, TC_SKB_EXT); 1609 1610 tp = rcu_dereference_bh(fchain->filter_chain); 1611 last_executed_chain = fchain->index; 1612 } 1613 } 1614 1615 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, 1616 &last_executed_chain); 1617 1618 /* If we missed on some chain */ 1619 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1620 struct tc_skb_cb *cb = tc_skb_cb(skb); 1621 1622 ext = tc_skb_ext_alloc(skb); 1623 if (WARN_ON_ONCE(!ext)) 1624 return TC_ACT_SHOT; 1625 ext->chain = last_executed_chain; 1626 ext->mru = cb->mru; 1627 ext->post_ct = cb->post_ct; 1628 ext->post_ct_snat = cb->post_ct_snat; 1629 ext->post_ct_dnat = cb->post_ct_dnat; 1630 ext->zone = cb->zone; 1631 } 1632 1633 return ret; 1634 #endif 1635 } 1636 EXPORT_SYMBOL(tcf_classify); 1637 1638 struct tcf_chain_info { 1639 struct tcf_proto __rcu **pprev; 1640 struct tcf_proto __rcu *next; 1641 }; 1642 1643 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1644 struct tcf_chain_info *chain_info) 1645 { 1646 return tcf_chain_dereference(*chain_info->pprev, chain); 1647 } 1648 1649 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1650 struct tcf_chain_info *chain_info, 1651 struct tcf_proto *tp) 1652 { 1653 if (chain->flushing) 1654 return -EAGAIN; 1655 1656 if (*chain_info->pprev == chain->filter_chain) 1657 tcf_chain0_head_change(chain, tp); 1658 tcf_proto_get(tp); 1659 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1660 rcu_assign_pointer(*chain_info->pprev, tp); 1661 1662 return 0; 1663 } 1664 1665 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1666 struct tcf_chain_info *chain_info, 1667 struct tcf_proto *tp) 1668 { 1669 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1670 1671 tcf_proto_mark_delete(tp); 1672 if (tp == chain->filter_chain) 1673 tcf_chain0_head_change(chain, next); 1674 RCU_INIT_POINTER(*chain_info->pprev, next); 1675 } 1676 1677 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1678 struct tcf_chain_info *chain_info, 1679 u32 protocol, u32 prio, 1680 bool prio_allocate); 1681 1682 /* Try to insert new proto. 1683 * If proto with specified priority already exists, free new proto 1684 * and return existing one. 1685 */ 1686 1687 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1688 struct tcf_proto *tp_new, 1689 u32 protocol, u32 prio, 1690 bool rtnl_held) 1691 { 1692 struct tcf_chain_info chain_info; 1693 struct tcf_proto *tp; 1694 int err = 0; 1695 1696 mutex_lock(&chain->filter_chain_lock); 1697 1698 if (tcf_proto_exists_destroying(chain, tp_new)) { 1699 mutex_unlock(&chain->filter_chain_lock); 1700 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1701 return ERR_PTR(-EAGAIN); 1702 } 1703 1704 tp = tcf_chain_tp_find(chain, &chain_info, 1705 protocol, prio, false); 1706 if (!tp) 1707 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1708 mutex_unlock(&chain->filter_chain_lock); 1709 1710 if (tp) { 1711 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1712 tp_new = tp; 1713 } else if (err) { 1714 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1715 tp_new = ERR_PTR(err); 1716 } 1717 1718 return tp_new; 1719 } 1720 1721 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1722 struct tcf_proto *tp, bool rtnl_held, 1723 struct netlink_ext_ack *extack) 1724 { 1725 struct tcf_chain_info chain_info; 1726 struct tcf_proto *tp_iter; 1727 struct tcf_proto **pprev; 1728 struct tcf_proto *next; 1729 1730 mutex_lock(&chain->filter_chain_lock); 1731 1732 /* Atomically find and remove tp from chain. */ 1733 for (pprev = &chain->filter_chain; 1734 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1735 pprev = &tp_iter->next) { 1736 if (tp_iter == tp) { 1737 chain_info.pprev = pprev; 1738 chain_info.next = tp_iter->next; 1739 WARN_ON(tp_iter->deleting); 1740 break; 1741 } 1742 } 1743 /* Verify that tp still exists and no new filters were inserted 1744 * concurrently. 1745 * Mark tp for deletion if it is empty. 1746 */ 1747 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1748 mutex_unlock(&chain->filter_chain_lock); 1749 return; 1750 } 1751 1752 tcf_proto_signal_destroying(chain, tp); 1753 next = tcf_chain_dereference(chain_info.next, chain); 1754 if (tp == chain->filter_chain) 1755 tcf_chain0_head_change(chain, next); 1756 RCU_INIT_POINTER(*chain_info.pprev, next); 1757 mutex_unlock(&chain->filter_chain_lock); 1758 1759 tcf_proto_put(tp, rtnl_held, extack); 1760 } 1761 1762 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1763 struct tcf_chain_info *chain_info, 1764 u32 protocol, u32 prio, 1765 bool prio_allocate) 1766 { 1767 struct tcf_proto **pprev; 1768 struct tcf_proto *tp; 1769 1770 /* Check the chain for existence of proto-tcf with this priority */ 1771 for (pprev = &chain->filter_chain; 1772 (tp = tcf_chain_dereference(*pprev, chain)); 1773 pprev = &tp->next) { 1774 if (tp->prio >= prio) { 1775 if (tp->prio == prio) { 1776 if (prio_allocate || 1777 (tp->protocol != protocol && protocol)) 1778 return ERR_PTR(-EINVAL); 1779 } else { 1780 tp = NULL; 1781 } 1782 break; 1783 } 1784 } 1785 chain_info->pprev = pprev; 1786 if (tp) { 1787 chain_info->next = tp->next; 1788 tcf_proto_get(tp); 1789 } else { 1790 chain_info->next = NULL; 1791 } 1792 return tp; 1793 } 1794 1795 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1796 struct tcf_proto *tp, struct tcf_block *block, 1797 struct Qdisc *q, u32 parent, void *fh, 1798 u32 portid, u32 seq, u16 flags, int event, 1799 bool terse_dump, bool rtnl_held) 1800 { 1801 struct tcmsg *tcm; 1802 struct nlmsghdr *nlh; 1803 unsigned char *b = skb_tail_pointer(skb); 1804 1805 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1806 if (!nlh) 1807 goto out_nlmsg_trim; 1808 tcm = nlmsg_data(nlh); 1809 tcm->tcm_family = AF_UNSPEC; 1810 tcm->tcm__pad1 = 0; 1811 tcm->tcm__pad2 = 0; 1812 if (q) { 1813 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1814 tcm->tcm_parent = parent; 1815 } else { 1816 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1817 tcm->tcm_block_index = block->index; 1818 } 1819 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1820 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1821 goto nla_put_failure; 1822 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1823 goto nla_put_failure; 1824 if (!fh) { 1825 tcm->tcm_handle = 0; 1826 } else if (terse_dump) { 1827 if (tp->ops->terse_dump) { 1828 if (tp->ops->terse_dump(net, tp, fh, skb, tcm, 1829 rtnl_held) < 0) 1830 goto nla_put_failure; 1831 } else { 1832 goto cls_op_not_supp; 1833 } 1834 } else { 1835 if (tp->ops->dump && 1836 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 1837 goto nla_put_failure; 1838 } 1839 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1840 return skb->len; 1841 1842 out_nlmsg_trim: 1843 nla_put_failure: 1844 cls_op_not_supp: 1845 nlmsg_trim(skb, b); 1846 return -1; 1847 } 1848 1849 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 1850 struct nlmsghdr *n, struct tcf_proto *tp, 1851 struct tcf_block *block, struct Qdisc *q, 1852 u32 parent, void *fh, int event, bool unicast, 1853 bool rtnl_held) 1854 { 1855 struct sk_buff *skb; 1856 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1857 int err = 0; 1858 1859 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1860 if (!skb) 1861 return -ENOBUFS; 1862 1863 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1864 n->nlmsg_seq, n->nlmsg_flags, event, 1865 false, rtnl_held) <= 0) { 1866 kfree_skb(skb); 1867 return -EINVAL; 1868 } 1869 1870 if (unicast) 1871 err = rtnl_unicast(skb, net, portid); 1872 else 1873 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1874 n->nlmsg_flags & NLM_F_ECHO); 1875 return err; 1876 } 1877 1878 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 1879 struct nlmsghdr *n, struct tcf_proto *tp, 1880 struct tcf_block *block, struct Qdisc *q, 1881 u32 parent, void *fh, bool unicast, bool *last, 1882 bool rtnl_held, struct netlink_ext_ack *extack) 1883 { 1884 struct sk_buff *skb; 1885 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1886 int err; 1887 1888 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1889 if (!skb) 1890 return -ENOBUFS; 1891 1892 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1893 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 1894 false, rtnl_held) <= 0) { 1895 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 1896 kfree_skb(skb); 1897 return -EINVAL; 1898 } 1899 1900 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 1901 if (err) { 1902 kfree_skb(skb); 1903 return err; 1904 } 1905 1906 if (unicast) 1907 err = rtnl_unicast(skb, net, portid); 1908 else 1909 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1910 n->nlmsg_flags & NLM_F_ECHO); 1911 if (err < 0) 1912 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 1913 1914 return err; 1915 } 1916 1917 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 1918 struct tcf_block *block, struct Qdisc *q, 1919 u32 parent, struct nlmsghdr *n, 1920 struct tcf_chain *chain, int event) 1921 { 1922 struct tcf_proto *tp; 1923 1924 for (tp = tcf_get_next_proto(chain, NULL); 1925 tp; tp = tcf_get_next_proto(chain, tp)) 1926 tfilter_notify(net, oskb, n, tp, block, 1927 q, parent, NULL, event, false, true); 1928 } 1929 1930 static void tfilter_put(struct tcf_proto *tp, void *fh) 1931 { 1932 if (tp->ops->put && fh) 1933 tp->ops->put(tp, fh); 1934 } 1935 1936 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1937 struct netlink_ext_ack *extack) 1938 { 1939 struct net *net = sock_net(skb->sk); 1940 struct nlattr *tca[TCA_MAX + 1]; 1941 char name[IFNAMSIZ]; 1942 struct tcmsg *t; 1943 u32 protocol; 1944 u32 prio; 1945 bool prio_allocate; 1946 u32 parent; 1947 u32 chain_index; 1948 struct Qdisc *q; 1949 struct tcf_chain_info chain_info; 1950 struct tcf_chain *chain; 1951 struct tcf_block *block; 1952 struct tcf_proto *tp; 1953 unsigned long cl; 1954 void *fh; 1955 int err; 1956 int tp_created; 1957 bool rtnl_held = false; 1958 u32 flags; 1959 1960 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1961 return -EPERM; 1962 1963 replay: 1964 tp_created = 0; 1965 1966 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 1967 rtm_tca_policy, extack); 1968 if (err < 0) 1969 return err; 1970 1971 t = nlmsg_data(n); 1972 protocol = TC_H_MIN(t->tcm_info); 1973 prio = TC_H_MAJ(t->tcm_info); 1974 prio_allocate = false; 1975 parent = t->tcm_parent; 1976 tp = NULL; 1977 cl = 0; 1978 block = NULL; 1979 q = NULL; 1980 chain = NULL; 1981 flags = 0; 1982 1983 if (prio == 0) { 1984 /* If no priority is provided by the user, 1985 * we allocate one. 1986 */ 1987 if (n->nlmsg_flags & NLM_F_CREATE) { 1988 prio = TC_H_MAKE(0x80000000U, 0U); 1989 prio_allocate = true; 1990 } else { 1991 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 1992 return -ENOENT; 1993 } 1994 } 1995 1996 /* Find head of filter chain. */ 1997 1998 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 1999 if (err) 2000 return err; 2001 2002 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2003 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2004 err = -EINVAL; 2005 goto errout; 2006 } 2007 2008 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2009 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2010 * type is not specified, classifier is not unlocked. 2011 */ 2012 if (rtnl_held || 2013 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2014 !tcf_proto_is_unlocked(name)) { 2015 rtnl_held = true; 2016 rtnl_lock(); 2017 } 2018 2019 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2020 if (err) 2021 goto errout; 2022 2023 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2024 extack); 2025 if (IS_ERR(block)) { 2026 err = PTR_ERR(block); 2027 goto errout; 2028 } 2029 block->classid = parent; 2030 2031 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2032 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2033 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2034 err = -EINVAL; 2035 goto errout; 2036 } 2037 chain = tcf_chain_get(block, chain_index, true); 2038 if (!chain) { 2039 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2040 err = -ENOMEM; 2041 goto errout; 2042 } 2043 2044 mutex_lock(&chain->filter_chain_lock); 2045 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2046 prio, prio_allocate); 2047 if (IS_ERR(tp)) { 2048 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2049 err = PTR_ERR(tp); 2050 goto errout_locked; 2051 } 2052 2053 if (tp == NULL) { 2054 struct tcf_proto *tp_new = NULL; 2055 2056 if (chain->flushing) { 2057 err = -EAGAIN; 2058 goto errout_locked; 2059 } 2060 2061 /* Proto-tcf does not exist, create new one */ 2062 2063 if (tca[TCA_KIND] == NULL || !protocol) { 2064 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2065 err = -EINVAL; 2066 goto errout_locked; 2067 } 2068 2069 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2070 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2071 err = -ENOENT; 2072 goto errout_locked; 2073 } 2074 2075 if (prio_allocate) 2076 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2077 &chain_info)); 2078 2079 mutex_unlock(&chain->filter_chain_lock); 2080 tp_new = tcf_proto_create(name, protocol, prio, chain, 2081 rtnl_held, extack); 2082 if (IS_ERR(tp_new)) { 2083 err = PTR_ERR(tp_new); 2084 goto errout_tp; 2085 } 2086 2087 tp_created = 1; 2088 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2089 rtnl_held); 2090 if (IS_ERR(tp)) { 2091 err = PTR_ERR(tp); 2092 goto errout_tp; 2093 } 2094 } else { 2095 mutex_unlock(&chain->filter_chain_lock); 2096 } 2097 2098 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2099 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2100 err = -EINVAL; 2101 goto errout; 2102 } 2103 2104 fh = tp->ops->get(tp, t->tcm_handle); 2105 2106 if (!fh) { 2107 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2108 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2109 err = -ENOENT; 2110 goto errout; 2111 } 2112 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2113 tfilter_put(tp, fh); 2114 NL_SET_ERR_MSG(extack, "Filter already exists"); 2115 err = -EEXIST; 2116 goto errout; 2117 } 2118 2119 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2120 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2121 err = -EINVAL; 2122 goto errout; 2123 } 2124 2125 if (!(n->nlmsg_flags & NLM_F_CREATE)) 2126 flags |= TCA_ACT_FLAGS_REPLACE; 2127 if (!rtnl_held) 2128 flags |= TCA_ACT_FLAGS_NO_RTNL; 2129 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2130 flags, extack); 2131 if (err == 0) { 2132 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2133 RTM_NEWTFILTER, false, rtnl_held); 2134 tfilter_put(tp, fh); 2135 /* q pointer is NULL for shared blocks */ 2136 if (q) 2137 q->flags &= ~TCQ_F_CAN_BYPASS; 2138 } 2139 2140 errout: 2141 if (err && tp_created) 2142 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2143 errout_tp: 2144 if (chain) { 2145 if (tp && !IS_ERR(tp)) 2146 tcf_proto_put(tp, rtnl_held, NULL); 2147 if (!tp_created) 2148 tcf_chain_put(chain); 2149 } 2150 tcf_block_release(q, block, rtnl_held); 2151 2152 if (rtnl_held) 2153 rtnl_unlock(); 2154 2155 if (err == -EAGAIN) { 2156 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2157 * of target chain. 2158 */ 2159 rtnl_held = true; 2160 /* Replay the request. */ 2161 goto replay; 2162 } 2163 return err; 2164 2165 errout_locked: 2166 mutex_unlock(&chain->filter_chain_lock); 2167 goto errout; 2168 } 2169 2170 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2171 struct netlink_ext_ack *extack) 2172 { 2173 struct net *net = sock_net(skb->sk); 2174 struct nlattr *tca[TCA_MAX + 1]; 2175 char name[IFNAMSIZ]; 2176 struct tcmsg *t; 2177 u32 protocol; 2178 u32 prio; 2179 u32 parent; 2180 u32 chain_index; 2181 struct Qdisc *q = NULL; 2182 struct tcf_chain_info chain_info; 2183 struct tcf_chain *chain = NULL; 2184 struct tcf_block *block = NULL; 2185 struct tcf_proto *tp = NULL; 2186 unsigned long cl = 0; 2187 void *fh = NULL; 2188 int err; 2189 bool rtnl_held = false; 2190 2191 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2192 return -EPERM; 2193 2194 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2195 rtm_tca_policy, extack); 2196 if (err < 0) 2197 return err; 2198 2199 t = nlmsg_data(n); 2200 protocol = TC_H_MIN(t->tcm_info); 2201 prio = TC_H_MAJ(t->tcm_info); 2202 parent = t->tcm_parent; 2203 2204 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2205 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2206 return -ENOENT; 2207 } 2208 2209 /* Find head of filter chain. */ 2210 2211 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2212 if (err) 2213 return err; 2214 2215 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2216 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2217 err = -EINVAL; 2218 goto errout; 2219 } 2220 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2221 * found), qdisc is not unlocked, classifier type is not specified, 2222 * classifier is not unlocked. 2223 */ 2224 if (!prio || 2225 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2226 !tcf_proto_is_unlocked(name)) { 2227 rtnl_held = true; 2228 rtnl_lock(); 2229 } 2230 2231 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2232 if (err) 2233 goto errout; 2234 2235 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2236 extack); 2237 if (IS_ERR(block)) { 2238 err = PTR_ERR(block); 2239 goto errout; 2240 } 2241 2242 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2243 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2244 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2245 err = -EINVAL; 2246 goto errout; 2247 } 2248 chain = tcf_chain_get(block, chain_index, false); 2249 if (!chain) { 2250 /* User requested flush on non-existent chain. Nothing to do, 2251 * so just return success. 2252 */ 2253 if (prio == 0) { 2254 err = 0; 2255 goto errout; 2256 } 2257 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2258 err = -ENOENT; 2259 goto errout; 2260 } 2261 2262 if (prio == 0) { 2263 tfilter_notify_chain(net, skb, block, q, parent, n, 2264 chain, RTM_DELTFILTER); 2265 tcf_chain_flush(chain, rtnl_held); 2266 err = 0; 2267 goto errout; 2268 } 2269 2270 mutex_lock(&chain->filter_chain_lock); 2271 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2272 prio, false); 2273 if (!tp || IS_ERR(tp)) { 2274 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2275 err = tp ? PTR_ERR(tp) : -ENOENT; 2276 goto errout_locked; 2277 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2278 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2279 err = -EINVAL; 2280 goto errout_locked; 2281 } else if (t->tcm_handle == 0) { 2282 tcf_proto_signal_destroying(chain, tp); 2283 tcf_chain_tp_remove(chain, &chain_info, tp); 2284 mutex_unlock(&chain->filter_chain_lock); 2285 2286 tcf_proto_put(tp, rtnl_held, NULL); 2287 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2288 RTM_DELTFILTER, false, rtnl_held); 2289 err = 0; 2290 goto errout; 2291 } 2292 mutex_unlock(&chain->filter_chain_lock); 2293 2294 fh = tp->ops->get(tp, t->tcm_handle); 2295 2296 if (!fh) { 2297 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2298 err = -ENOENT; 2299 } else { 2300 bool last; 2301 2302 err = tfilter_del_notify(net, skb, n, tp, block, 2303 q, parent, fh, false, &last, 2304 rtnl_held, extack); 2305 2306 if (err) 2307 goto errout; 2308 if (last) 2309 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2310 } 2311 2312 errout: 2313 if (chain) { 2314 if (tp && !IS_ERR(tp)) 2315 tcf_proto_put(tp, rtnl_held, NULL); 2316 tcf_chain_put(chain); 2317 } 2318 tcf_block_release(q, block, rtnl_held); 2319 2320 if (rtnl_held) 2321 rtnl_unlock(); 2322 2323 return err; 2324 2325 errout_locked: 2326 mutex_unlock(&chain->filter_chain_lock); 2327 goto errout; 2328 } 2329 2330 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2331 struct netlink_ext_ack *extack) 2332 { 2333 struct net *net = sock_net(skb->sk); 2334 struct nlattr *tca[TCA_MAX + 1]; 2335 char name[IFNAMSIZ]; 2336 struct tcmsg *t; 2337 u32 protocol; 2338 u32 prio; 2339 u32 parent; 2340 u32 chain_index; 2341 struct Qdisc *q = NULL; 2342 struct tcf_chain_info chain_info; 2343 struct tcf_chain *chain = NULL; 2344 struct tcf_block *block = NULL; 2345 struct tcf_proto *tp = NULL; 2346 unsigned long cl = 0; 2347 void *fh = NULL; 2348 int err; 2349 bool rtnl_held = false; 2350 2351 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2352 rtm_tca_policy, extack); 2353 if (err < 0) 2354 return err; 2355 2356 t = nlmsg_data(n); 2357 protocol = TC_H_MIN(t->tcm_info); 2358 prio = TC_H_MAJ(t->tcm_info); 2359 parent = t->tcm_parent; 2360 2361 if (prio == 0) { 2362 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2363 return -ENOENT; 2364 } 2365 2366 /* Find head of filter chain. */ 2367 2368 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2369 if (err) 2370 return err; 2371 2372 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2373 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2374 err = -EINVAL; 2375 goto errout; 2376 } 2377 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2378 * unlocked, classifier type is not specified, classifier is not 2379 * unlocked. 2380 */ 2381 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2382 !tcf_proto_is_unlocked(name)) { 2383 rtnl_held = true; 2384 rtnl_lock(); 2385 } 2386 2387 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2388 if (err) 2389 goto errout; 2390 2391 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2392 extack); 2393 if (IS_ERR(block)) { 2394 err = PTR_ERR(block); 2395 goto errout; 2396 } 2397 2398 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2399 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2400 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2401 err = -EINVAL; 2402 goto errout; 2403 } 2404 chain = tcf_chain_get(block, chain_index, false); 2405 if (!chain) { 2406 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2407 err = -EINVAL; 2408 goto errout; 2409 } 2410 2411 mutex_lock(&chain->filter_chain_lock); 2412 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2413 prio, false); 2414 mutex_unlock(&chain->filter_chain_lock); 2415 if (!tp || IS_ERR(tp)) { 2416 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2417 err = tp ? PTR_ERR(tp) : -ENOENT; 2418 goto errout; 2419 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2420 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2421 err = -EINVAL; 2422 goto errout; 2423 } 2424 2425 fh = tp->ops->get(tp, t->tcm_handle); 2426 2427 if (!fh) { 2428 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2429 err = -ENOENT; 2430 } else { 2431 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2432 fh, RTM_NEWTFILTER, true, rtnl_held); 2433 if (err < 0) 2434 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2435 } 2436 2437 tfilter_put(tp, fh); 2438 errout: 2439 if (chain) { 2440 if (tp && !IS_ERR(tp)) 2441 tcf_proto_put(tp, rtnl_held, NULL); 2442 tcf_chain_put(chain); 2443 } 2444 tcf_block_release(q, block, rtnl_held); 2445 2446 if (rtnl_held) 2447 rtnl_unlock(); 2448 2449 return err; 2450 } 2451 2452 struct tcf_dump_args { 2453 struct tcf_walker w; 2454 struct sk_buff *skb; 2455 struct netlink_callback *cb; 2456 struct tcf_block *block; 2457 struct Qdisc *q; 2458 u32 parent; 2459 bool terse_dump; 2460 }; 2461 2462 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2463 { 2464 struct tcf_dump_args *a = (void *)arg; 2465 struct net *net = sock_net(a->skb->sk); 2466 2467 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2468 n, NETLINK_CB(a->cb->skb).portid, 2469 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2470 RTM_NEWTFILTER, a->terse_dump, true); 2471 } 2472 2473 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2474 struct sk_buff *skb, struct netlink_callback *cb, 2475 long index_start, long *p_index, bool terse) 2476 { 2477 struct net *net = sock_net(skb->sk); 2478 struct tcf_block *block = chain->block; 2479 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2480 struct tcf_proto *tp, *tp_prev; 2481 struct tcf_dump_args arg; 2482 2483 for (tp = __tcf_get_next_proto(chain, NULL); 2484 tp; 2485 tp_prev = tp, 2486 tp = __tcf_get_next_proto(chain, tp), 2487 tcf_proto_put(tp_prev, true, NULL), 2488 (*p_index)++) { 2489 if (*p_index < index_start) 2490 continue; 2491 if (TC_H_MAJ(tcm->tcm_info) && 2492 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2493 continue; 2494 if (TC_H_MIN(tcm->tcm_info) && 2495 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2496 continue; 2497 if (*p_index > index_start) 2498 memset(&cb->args[1], 0, 2499 sizeof(cb->args) - sizeof(cb->args[0])); 2500 if (cb->args[1] == 0) { 2501 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2502 NETLINK_CB(cb->skb).portid, 2503 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2504 RTM_NEWTFILTER, false, true) <= 0) 2505 goto errout; 2506 cb->args[1] = 1; 2507 } 2508 if (!tp->ops->walk) 2509 continue; 2510 arg.w.fn = tcf_node_dump; 2511 arg.skb = skb; 2512 arg.cb = cb; 2513 arg.block = block; 2514 arg.q = q; 2515 arg.parent = parent; 2516 arg.w.stop = 0; 2517 arg.w.skip = cb->args[1] - 1; 2518 arg.w.count = 0; 2519 arg.w.cookie = cb->args[2]; 2520 arg.terse_dump = terse; 2521 tp->ops->walk(tp, &arg.w, true); 2522 cb->args[2] = arg.w.cookie; 2523 cb->args[1] = arg.w.count + 1; 2524 if (arg.w.stop) 2525 goto errout; 2526 } 2527 return true; 2528 2529 errout: 2530 tcf_proto_put(tp, true, NULL); 2531 return false; 2532 } 2533 2534 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { 2535 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), 2536 }; 2537 2538 /* called with RTNL */ 2539 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2540 { 2541 struct tcf_chain *chain, *chain_prev; 2542 struct net *net = sock_net(skb->sk); 2543 struct nlattr *tca[TCA_MAX + 1]; 2544 struct Qdisc *q = NULL; 2545 struct tcf_block *block; 2546 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2547 bool terse_dump = false; 2548 long index_start; 2549 long index; 2550 u32 parent; 2551 int err; 2552 2553 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2554 return skb->len; 2555 2556 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2557 tcf_tfilter_dump_policy, cb->extack); 2558 if (err) 2559 return err; 2560 2561 if (tca[TCA_DUMP_FLAGS]) { 2562 struct nla_bitfield32 flags = 2563 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); 2564 2565 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; 2566 } 2567 2568 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2569 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2570 if (!block) 2571 goto out; 2572 /* If we work with block index, q is NULL and parent value 2573 * will never be used in the following code. The check 2574 * in tcf_fill_node prevents it. However, compiler does not 2575 * see that far, so set parent to zero to silence the warning 2576 * about parent being uninitialized. 2577 */ 2578 parent = 0; 2579 } else { 2580 const struct Qdisc_class_ops *cops; 2581 struct net_device *dev; 2582 unsigned long cl = 0; 2583 2584 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2585 if (!dev) 2586 return skb->len; 2587 2588 parent = tcm->tcm_parent; 2589 if (!parent) 2590 q = dev->qdisc; 2591 else 2592 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2593 if (!q) 2594 goto out; 2595 cops = q->ops->cl_ops; 2596 if (!cops) 2597 goto out; 2598 if (!cops->tcf_block) 2599 goto out; 2600 if (TC_H_MIN(tcm->tcm_parent)) { 2601 cl = cops->find(q, tcm->tcm_parent); 2602 if (cl == 0) 2603 goto out; 2604 } 2605 block = cops->tcf_block(q, cl, NULL); 2606 if (!block) 2607 goto out; 2608 parent = block->classid; 2609 if (tcf_block_shared(block)) 2610 q = NULL; 2611 } 2612 2613 index_start = cb->args[0]; 2614 index = 0; 2615 2616 for (chain = __tcf_get_next_chain(block, NULL); 2617 chain; 2618 chain_prev = chain, 2619 chain = __tcf_get_next_chain(block, chain), 2620 tcf_chain_put(chain_prev)) { 2621 if (tca[TCA_CHAIN] && 2622 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2623 continue; 2624 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2625 index_start, &index, terse_dump)) { 2626 tcf_chain_put(chain); 2627 err = -EMSGSIZE; 2628 break; 2629 } 2630 } 2631 2632 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2633 tcf_block_refcnt_put(block, true); 2634 cb->args[0] = index; 2635 2636 out: 2637 /* If we did no progress, the error (EMSGSIZE) is real */ 2638 if (skb->len == 0 && err) 2639 return err; 2640 return skb->len; 2641 } 2642 2643 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2644 void *tmplt_priv, u32 chain_index, 2645 struct net *net, struct sk_buff *skb, 2646 struct tcf_block *block, 2647 u32 portid, u32 seq, u16 flags, int event) 2648 { 2649 unsigned char *b = skb_tail_pointer(skb); 2650 const struct tcf_proto_ops *ops; 2651 struct nlmsghdr *nlh; 2652 struct tcmsg *tcm; 2653 void *priv; 2654 2655 ops = tmplt_ops; 2656 priv = tmplt_priv; 2657 2658 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2659 if (!nlh) 2660 goto out_nlmsg_trim; 2661 tcm = nlmsg_data(nlh); 2662 tcm->tcm_family = AF_UNSPEC; 2663 tcm->tcm__pad1 = 0; 2664 tcm->tcm__pad2 = 0; 2665 tcm->tcm_handle = 0; 2666 if (block->q) { 2667 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2668 tcm->tcm_parent = block->q->handle; 2669 } else { 2670 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2671 tcm->tcm_block_index = block->index; 2672 } 2673 2674 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2675 goto nla_put_failure; 2676 2677 if (ops) { 2678 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2679 goto nla_put_failure; 2680 if (ops->tmplt_dump(skb, net, priv) < 0) 2681 goto nla_put_failure; 2682 } 2683 2684 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2685 return skb->len; 2686 2687 out_nlmsg_trim: 2688 nla_put_failure: 2689 nlmsg_trim(skb, b); 2690 return -EMSGSIZE; 2691 } 2692 2693 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2694 u32 seq, u16 flags, int event, bool unicast) 2695 { 2696 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2697 struct tcf_block *block = chain->block; 2698 struct net *net = block->net; 2699 struct sk_buff *skb; 2700 int err = 0; 2701 2702 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2703 if (!skb) 2704 return -ENOBUFS; 2705 2706 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2707 chain->index, net, skb, block, portid, 2708 seq, flags, event) <= 0) { 2709 kfree_skb(skb); 2710 return -EINVAL; 2711 } 2712 2713 if (unicast) 2714 err = rtnl_unicast(skb, net, portid); 2715 else 2716 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2717 flags & NLM_F_ECHO); 2718 2719 return err; 2720 } 2721 2722 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2723 void *tmplt_priv, u32 chain_index, 2724 struct tcf_block *block, struct sk_buff *oskb, 2725 u32 seq, u16 flags, bool unicast) 2726 { 2727 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2728 struct net *net = block->net; 2729 struct sk_buff *skb; 2730 2731 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2732 if (!skb) 2733 return -ENOBUFS; 2734 2735 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2736 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { 2737 kfree_skb(skb); 2738 return -EINVAL; 2739 } 2740 2741 if (unicast) 2742 return rtnl_unicast(skb, net, portid); 2743 2744 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2745 } 2746 2747 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2748 struct nlattr **tca, 2749 struct netlink_ext_ack *extack) 2750 { 2751 const struct tcf_proto_ops *ops; 2752 char name[IFNAMSIZ]; 2753 void *tmplt_priv; 2754 2755 /* If kind is not set, user did not specify template. */ 2756 if (!tca[TCA_KIND]) 2757 return 0; 2758 2759 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2760 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2761 return -EINVAL; 2762 } 2763 2764 ops = tcf_proto_lookup_ops(name, true, extack); 2765 if (IS_ERR(ops)) 2766 return PTR_ERR(ops); 2767 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2768 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2769 return -EOPNOTSUPP; 2770 } 2771 2772 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2773 if (IS_ERR(tmplt_priv)) { 2774 module_put(ops->owner); 2775 return PTR_ERR(tmplt_priv); 2776 } 2777 chain->tmplt_ops = ops; 2778 chain->tmplt_priv = tmplt_priv; 2779 return 0; 2780 } 2781 2782 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2783 void *tmplt_priv) 2784 { 2785 /* If template ops are set, no work to do for us. */ 2786 if (!tmplt_ops) 2787 return; 2788 2789 tmplt_ops->tmplt_destroy(tmplt_priv); 2790 module_put(tmplt_ops->owner); 2791 } 2792 2793 /* Add/delete/get a chain */ 2794 2795 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2796 struct netlink_ext_ack *extack) 2797 { 2798 struct net *net = sock_net(skb->sk); 2799 struct nlattr *tca[TCA_MAX + 1]; 2800 struct tcmsg *t; 2801 u32 parent; 2802 u32 chain_index; 2803 struct Qdisc *q; 2804 struct tcf_chain *chain; 2805 struct tcf_block *block; 2806 unsigned long cl; 2807 int err; 2808 2809 if (n->nlmsg_type != RTM_GETCHAIN && 2810 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2811 return -EPERM; 2812 2813 replay: 2814 q = NULL; 2815 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2816 rtm_tca_policy, extack); 2817 if (err < 0) 2818 return err; 2819 2820 t = nlmsg_data(n); 2821 parent = t->tcm_parent; 2822 cl = 0; 2823 2824 block = tcf_block_find(net, &q, &parent, &cl, 2825 t->tcm_ifindex, t->tcm_block_index, extack); 2826 if (IS_ERR(block)) 2827 return PTR_ERR(block); 2828 2829 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2830 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2831 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2832 err = -EINVAL; 2833 goto errout_block; 2834 } 2835 2836 mutex_lock(&block->lock); 2837 chain = tcf_chain_lookup(block, chain_index); 2838 if (n->nlmsg_type == RTM_NEWCHAIN) { 2839 if (chain) { 2840 if (tcf_chain_held_by_acts_only(chain)) { 2841 /* The chain exists only because there is 2842 * some action referencing it. 2843 */ 2844 tcf_chain_hold(chain); 2845 } else { 2846 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 2847 err = -EEXIST; 2848 goto errout_block_locked; 2849 } 2850 } else { 2851 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2852 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 2853 err = -ENOENT; 2854 goto errout_block_locked; 2855 } 2856 chain = tcf_chain_create(block, chain_index); 2857 if (!chain) { 2858 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 2859 err = -ENOMEM; 2860 goto errout_block_locked; 2861 } 2862 } 2863 } else { 2864 if (!chain || tcf_chain_held_by_acts_only(chain)) { 2865 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2866 err = -EINVAL; 2867 goto errout_block_locked; 2868 } 2869 tcf_chain_hold(chain); 2870 } 2871 2872 if (n->nlmsg_type == RTM_NEWCHAIN) { 2873 /* Modifying chain requires holding parent block lock. In case 2874 * the chain was successfully added, take a reference to the 2875 * chain. This ensures that an empty chain does not disappear at 2876 * the end of this function. 2877 */ 2878 tcf_chain_hold(chain); 2879 chain->explicitly_created = true; 2880 } 2881 mutex_unlock(&block->lock); 2882 2883 switch (n->nlmsg_type) { 2884 case RTM_NEWCHAIN: 2885 err = tc_chain_tmplt_add(chain, net, tca, extack); 2886 if (err) { 2887 tcf_chain_put_explicitly_created(chain); 2888 goto errout; 2889 } 2890 2891 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 2892 RTM_NEWCHAIN, false); 2893 break; 2894 case RTM_DELCHAIN: 2895 tfilter_notify_chain(net, skb, block, q, parent, n, 2896 chain, RTM_DELTFILTER); 2897 /* Flush the chain first as the user requested chain removal. */ 2898 tcf_chain_flush(chain, true); 2899 /* In case the chain was successfully deleted, put a reference 2900 * to the chain previously taken during addition. 2901 */ 2902 tcf_chain_put_explicitly_created(chain); 2903 break; 2904 case RTM_GETCHAIN: 2905 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 2906 n->nlmsg_flags, n->nlmsg_type, true); 2907 if (err < 0) 2908 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 2909 break; 2910 default: 2911 err = -EOPNOTSUPP; 2912 NL_SET_ERR_MSG(extack, "Unsupported message type"); 2913 goto errout; 2914 } 2915 2916 errout: 2917 tcf_chain_put(chain); 2918 errout_block: 2919 tcf_block_release(q, block, true); 2920 if (err == -EAGAIN) 2921 /* Replay the request. */ 2922 goto replay; 2923 return err; 2924 2925 errout_block_locked: 2926 mutex_unlock(&block->lock); 2927 goto errout_block; 2928 } 2929 2930 /* called with RTNL */ 2931 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 2932 { 2933 struct net *net = sock_net(skb->sk); 2934 struct nlattr *tca[TCA_MAX + 1]; 2935 struct Qdisc *q = NULL; 2936 struct tcf_block *block; 2937 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2938 struct tcf_chain *chain; 2939 long index_start; 2940 long index; 2941 int err; 2942 2943 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2944 return skb->len; 2945 2946 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2947 rtm_tca_policy, cb->extack); 2948 if (err) 2949 return err; 2950 2951 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2952 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2953 if (!block) 2954 goto out; 2955 } else { 2956 const struct Qdisc_class_ops *cops; 2957 struct net_device *dev; 2958 unsigned long cl = 0; 2959 2960 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2961 if (!dev) 2962 return skb->len; 2963 2964 if (!tcm->tcm_parent) 2965 q = dev->qdisc; 2966 else 2967 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2968 2969 if (!q) 2970 goto out; 2971 cops = q->ops->cl_ops; 2972 if (!cops) 2973 goto out; 2974 if (!cops->tcf_block) 2975 goto out; 2976 if (TC_H_MIN(tcm->tcm_parent)) { 2977 cl = cops->find(q, tcm->tcm_parent); 2978 if (cl == 0) 2979 goto out; 2980 } 2981 block = cops->tcf_block(q, cl, NULL); 2982 if (!block) 2983 goto out; 2984 if (tcf_block_shared(block)) 2985 q = NULL; 2986 } 2987 2988 index_start = cb->args[0]; 2989 index = 0; 2990 2991 mutex_lock(&block->lock); 2992 list_for_each_entry(chain, &block->chain_list, list) { 2993 if ((tca[TCA_CHAIN] && 2994 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 2995 continue; 2996 if (index < index_start) { 2997 index++; 2998 continue; 2999 } 3000 if (tcf_chain_held_by_acts_only(chain)) 3001 continue; 3002 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3003 chain->index, net, skb, block, 3004 NETLINK_CB(cb->skb).portid, 3005 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3006 RTM_NEWCHAIN); 3007 if (err <= 0) 3008 break; 3009 index++; 3010 } 3011 mutex_unlock(&block->lock); 3012 3013 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3014 tcf_block_refcnt_put(block, true); 3015 cb->args[0] = index; 3016 3017 out: 3018 /* If we did no progress, the error (EMSGSIZE) is real */ 3019 if (skb->len == 0 && err) 3020 return err; 3021 return skb->len; 3022 } 3023 3024 void tcf_exts_destroy(struct tcf_exts *exts) 3025 { 3026 #ifdef CONFIG_NET_CLS_ACT 3027 if (exts->actions) { 3028 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3029 kfree(exts->actions); 3030 } 3031 exts->nr_actions = 0; 3032 #endif 3033 } 3034 EXPORT_SYMBOL(tcf_exts_destroy); 3035 3036 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3037 struct nlattr *rate_tlv, struct tcf_exts *exts, 3038 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) 3039 { 3040 #ifdef CONFIG_NET_CLS_ACT 3041 { 3042 int init_res[TCA_ACT_MAX_PRIO] = {}; 3043 struct tc_action *act; 3044 size_t attr_size = 0; 3045 3046 if (exts->police && tb[exts->police]) { 3047 struct tc_action_ops *a_o; 3048 3049 a_o = tc_action_load_ops(tb[exts->police], true, 3050 !(flags & TCA_ACT_FLAGS_NO_RTNL), 3051 extack); 3052 if (IS_ERR(a_o)) 3053 return PTR_ERR(a_o); 3054 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND; 3055 act = tcf_action_init_1(net, tp, tb[exts->police], 3056 rate_tlv, a_o, init_res, flags, 3057 extack); 3058 module_put(a_o->owner); 3059 if (IS_ERR(act)) 3060 return PTR_ERR(act); 3061 3062 act->type = exts->type = TCA_OLD_COMPAT; 3063 exts->actions[0] = act; 3064 exts->nr_actions = 1; 3065 tcf_idr_insert_many(exts->actions); 3066 } else if (exts->action && tb[exts->action]) { 3067 int err; 3068 3069 flags |= TCA_ACT_FLAGS_BIND; 3070 err = tcf_action_init(net, tp, tb[exts->action], 3071 rate_tlv, exts->actions, init_res, 3072 &attr_size, flags, fl_flags, 3073 extack); 3074 if (err < 0) 3075 return err; 3076 exts->nr_actions = err; 3077 } 3078 } 3079 #else 3080 if ((exts->action && tb[exts->action]) || 3081 (exts->police && tb[exts->police])) { 3082 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3083 return -EOPNOTSUPP; 3084 } 3085 #endif 3086 3087 return 0; 3088 } 3089 EXPORT_SYMBOL(tcf_exts_validate_ex); 3090 3091 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3092 struct nlattr *rate_tlv, struct tcf_exts *exts, 3093 u32 flags, struct netlink_ext_ack *extack) 3094 { 3095 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts, 3096 flags, 0, extack); 3097 } 3098 EXPORT_SYMBOL(tcf_exts_validate); 3099 3100 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3101 { 3102 #ifdef CONFIG_NET_CLS_ACT 3103 struct tcf_exts old = *dst; 3104 3105 *dst = *src; 3106 tcf_exts_destroy(&old); 3107 #endif 3108 } 3109 EXPORT_SYMBOL(tcf_exts_change); 3110 3111 #ifdef CONFIG_NET_CLS_ACT 3112 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3113 { 3114 if (exts->nr_actions == 0) 3115 return NULL; 3116 else 3117 return exts->actions[0]; 3118 } 3119 #endif 3120 3121 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3122 { 3123 #ifdef CONFIG_NET_CLS_ACT 3124 struct nlattr *nest; 3125 3126 if (exts->action && tcf_exts_has_actions(exts)) { 3127 /* 3128 * again for backward compatible mode - we want 3129 * to work with both old and new modes of entering 3130 * tc data even if iproute2 was newer - jhs 3131 */ 3132 if (exts->type != TCA_OLD_COMPAT) { 3133 nest = nla_nest_start_noflag(skb, exts->action); 3134 if (nest == NULL) 3135 goto nla_put_failure; 3136 3137 if (tcf_action_dump(skb, exts->actions, 0, 0, false) 3138 < 0) 3139 goto nla_put_failure; 3140 nla_nest_end(skb, nest); 3141 } else if (exts->police) { 3142 struct tc_action *act = tcf_exts_first_act(exts); 3143 nest = nla_nest_start_noflag(skb, exts->police); 3144 if (nest == NULL || !act) 3145 goto nla_put_failure; 3146 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3147 goto nla_put_failure; 3148 nla_nest_end(skb, nest); 3149 } 3150 } 3151 return 0; 3152 3153 nla_put_failure: 3154 nla_nest_cancel(skb, nest); 3155 return -1; 3156 #else 3157 return 0; 3158 #endif 3159 } 3160 EXPORT_SYMBOL(tcf_exts_dump); 3161 3162 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts) 3163 { 3164 #ifdef CONFIG_NET_CLS_ACT 3165 struct nlattr *nest; 3166 3167 if (!exts->action || !tcf_exts_has_actions(exts)) 3168 return 0; 3169 3170 nest = nla_nest_start_noflag(skb, exts->action); 3171 if (!nest) 3172 goto nla_put_failure; 3173 3174 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0) 3175 goto nla_put_failure; 3176 nla_nest_end(skb, nest); 3177 return 0; 3178 3179 nla_put_failure: 3180 nla_nest_cancel(skb, nest); 3181 return -1; 3182 #else 3183 return 0; 3184 #endif 3185 } 3186 EXPORT_SYMBOL(tcf_exts_terse_dump); 3187 3188 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3189 { 3190 #ifdef CONFIG_NET_CLS_ACT 3191 struct tc_action *a = tcf_exts_first_act(exts); 3192 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3193 return -1; 3194 #endif 3195 return 0; 3196 } 3197 EXPORT_SYMBOL(tcf_exts_dump_stats); 3198 3199 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3200 { 3201 if (*flags & TCA_CLS_FLAGS_IN_HW) 3202 return; 3203 *flags |= TCA_CLS_FLAGS_IN_HW; 3204 atomic_inc(&block->offloadcnt); 3205 } 3206 3207 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3208 { 3209 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3210 return; 3211 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3212 atomic_dec(&block->offloadcnt); 3213 } 3214 3215 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3216 struct tcf_proto *tp, u32 *cnt, 3217 u32 *flags, u32 diff, bool add) 3218 { 3219 lockdep_assert_held(&block->cb_lock); 3220 3221 spin_lock(&tp->lock); 3222 if (add) { 3223 if (!*cnt) 3224 tcf_block_offload_inc(block, flags); 3225 *cnt += diff; 3226 } else { 3227 *cnt -= diff; 3228 if (!*cnt) 3229 tcf_block_offload_dec(block, flags); 3230 } 3231 spin_unlock(&tp->lock); 3232 } 3233 3234 static void 3235 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3236 u32 *cnt, u32 *flags) 3237 { 3238 lockdep_assert_held(&block->cb_lock); 3239 3240 spin_lock(&tp->lock); 3241 tcf_block_offload_dec(block, flags); 3242 *cnt = 0; 3243 spin_unlock(&tp->lock); 3244 } 3245 3246 static int 3247 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3248 void *type_data, bool err_stop) 3249 { 3250 struct flow_block_cb *block_cb; 3251 int ok_count = 0; 3252 int err; 3253 3254 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3255 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3256 if (err) { 3257 if (err_stop) 3258 return err; 3259 } else { 3260 ok_count++; 3261 } 3262 } 3263 return ok_count; 3264 } 3265 3266 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3267 void *type_data, bool err_stop, bool rtnl_held) 3268 { 3269 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3270 int ok_count; 3271 3272 retry: 3273 if (take_rtnl) 3274 rtnl_lock(); 3275 down_read(&block->cb_lock); 3276 /* Need to obtain rtnl lock if block is bound to devs that require it. 3277 * In block bind code cb_lock is obtained while holding rtnl, so we must 3278 * obtain the locks in same order here. 3279 */ 3280 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3281 up_read(&block->cb_lock); 3282 take_rtnl = true; 3283 goto retry; 3284 } 3285 3286 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3287 3288 up_read(&block->cb_lock); 3289 if (take_rtnl) 3290 rtnl_unlock(); 3291 return ok_count; 3292 } 3293 EXPORT_SYMBOL(tc_setup_cb_call); 3294 3295 /* Non-destructive filter add. If filter that wasn't already in hardware is 3296 * successfully offloaded, increment block offloads counter. On failure, 3297 * previously offloaded filter is considered to be intact and offloads counter 3298 * is not decremented. 3299 */ 3300 3301 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3302 enum tc_setup_type type, void *type_data, bool err_stop, 3303 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3304 { 3305 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3306 int ok_count; 3307 3308 retry: 3309 if (take_rtnl) 3310 rtnl_lock(); 3311 down_read(&block->cb_lock); 3312 /* Need to obtain rtnl lock if block is bound to devs that require it. 3313 * In block bind code cb_lock is obtained while holding rtnl, so we must 3314 * obtain the locks in same order here. 3315 */ 3316 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3317 up_read(&block->cb_lock); 3318 take_rtnl = true; 3319 goto retry; 3320 } 3321 3322 /* Make sure all netdevs sharing this block are offload-capable. */ 3323 if (block->nooffloaddevcnt && err_stop) { 3324 ok_count = -EOPNOTSUPP; 3325 goto err_unlock; 3326 } 3327 3328 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3329 if (ok_count < 0) 3330 goto err_unlock; 3331 3332 if (tp->ops->hw_add) 3333 tp->ops->hw_add(tp, type_data); 3334 if (ok_count > 0) 3335 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3336 ok_count, true); 3337 err_unlock: 3338 up_read(&block->cb_lock); 3339 if (take_rtnl) 3340 rtnl_unlock(); 3341 return min(ok_count, 0); 3342 } 3343 EXPORT_SYMBOL(tc_setup_cb_add); 3344 3345 /* Destructive filter replace. If filter that wasn't already in hardware is 3346 * successfully offloaded, increment block offload counter. On failure, 3347 * previously offloaded filter is considered to be destroyed and offload counter 3348 * is decremented. 3349 */ 3350 3351 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3352 enum tc_setup_type type, void *type_data, bool err_stop, 3353 u32 *old_flags, unsigned int *old_in_hw_count, 3354 u32 *new_flags, unsigned int *new_in_hw_count, 3355 bool rtnl_held) 3356 { 3357 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3358 int ok_count; 3359 3360 retry: 3361 if (take_rtnl) 3362 rtnl_lock(); 3363 down_read(&block->cb_lock); 3364 /* Need to obtain rtnl lock if block is bound to devs that require it. 3365 * In block bind code cb_lock is obtained while holding rtnl, so we must 3366 * obtain the locks in same order here. 3367 */ 3368 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3369 up_read(&block->cb_lock); 3370 take_rtnl = true; 3371 goto retry; 3372 } 3373 3374 /* Make sure all netdevs sharing this block are offload-capable. */ 3375 if (block->nooffloaddevcnt && err_stop) { 3376 ok_count = -EOPNOTSUPP; 3377 goto err_unlock; 3378 } 3379 3380 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3381 if (tp->ops->hw_del) 3382 tp->ops->hw_del(tp, type_data); 3383 3384 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3385 if (ok_count < 0) 3386 goto err_unlock; 3387 3388 if (tp->ops->hw_add) 3389 tp->ops->hw_add(tp, type_data); 3390 if (ok_count > 0) 3391 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3392 new_flags, ok_count, true); 3393 err_unlock: 3394 up_read(&block->cb_lock); 3395 if (take_rtnl) 3396 rtnl_unlock(); 3397 return min(ok_count, 0); 3398 } 3399 EXPORT_SYMBOL(tc_setup_cb_replace); 3400 3401 /* Destroy filter and decrement block offload counter, if filter was previously 3402 * offloaded. 3403 */ 3404 3405 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3406 enum tc_setup_type type, void *type_data, bool err_stop, 3407 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3408 { 3409 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3410 int ok_count; 3411 3412 retry: 3413 if (take_rtnl) 3414 rtnl_lock(); 3415 down_read(&block->cb_lock); 3416 /* Need to obtain rtnl lock if block is bound to devs that require it. 3417 * In block bind code cb_lock is obtained while holding rtnl, so we must 3418 * obtain the locks in same order here. 3419 */ 3420 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3421 up_read(&block->cb_lock); 3422 take_rtnl = true; 3423 goto retry; 3424 } 3425 3426 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3427 3428 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3429 if (tp->ops->hw_del) 3430 tp->ops->hw_del(tp, type_data); 3431 3432 up_read(&block->cb_lock); 3433 if (take_rtnl) 3434 rtnl_unlock(); 3435 return min(ok_count, 0); 3436 } 3437 EXPORT_SYMBOL(tc_setup_cb_destroy); 3438 3439 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3440 bool add, flow_setup_cb_t *cb, 3441 enum tc_setup_type type, void *type_data, 3442 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3443 { 3444 int err = cb(type, type_data, cb_priv); 3445 3446 if (err) { 3447 if (add && tc_skip_sw(*flags)) 3448 return err; 3449 } else { 3450 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3451 add); 3452 } 3453 3454 return 0; 3455 } 3456 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3457 3458 static int tcf_act_get_cookie(struct flow_action_entry *entry, 3459 const struct tc_action *act) 3460 { 3461 struct tc_cookie *cookie; 3462 int err = 0; 3463 3464 rcu_read_lock(); 3465 cookie = rcu_dereference(act->act_cookie); 3466 if (cookie) { 3467 entry->cookie = flow_action_cookie_create(cookie->data, 3468 cookie->len, 3469 GFP_ATOMIC); 3470 if (!entry->cookie) 3471 err = -ENOMEM; 3472 } 3473 rcu_read_unlock(); 3474 return err; 3475 } 3476 3477 static void tcf_act_put_cookie(struct flow_action_entry *entry) 3478 { 3479 flow_action_cookie_destroy(entry->cookie); 3480 } 3481 3482 void tc_cleanup_offload_action(struct flow_action *flow_action) 3483 { 3484 struct flow_action_entry *entry; 3485 int i; 3486 3487 flow_action_for_each(i, entry, flow_action) { 3488 tcf_act_put_cookie(entry); 3489 if (entry->destructor) 3490 entry->destructor(entry->destructor_priv); 3491 } 3492 } 3493 EXPORT_SYMBOL(tc_cleanup_offload_action); 3494 3495 static int tc_setup_offload_act(struct tc_action *act, 3496 struct flow_action_entry *entry, 3497 u32 *index_inc) 3498 { 3499 #ifdef CONFIG_NET_CLS_ACT 3500 if (act->ops->offload_act_setup) 3501 return act->ops->offload_act_setup(act, entry, index_inc, true); 3502 else 3503 return -EOPNOTSUPP; 3504 #else 3505 return 0; 3506 #endif 3507 } 3508 3509 int tc_setup_action(struct flow_action *flow_action, 3510 struct tc_action *actions[]) 3511 { 3512 int i, j, index, err = 0; 3513 struct tc_action *act; 3514 3515 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3516 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3517 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3518 3519 if (!actions) 3520 return 0; 3521 3522 j = 0; 3523 tcf_act_for_each_action(i, act, actions) { 3524 struct flow_action_entry *entry; 3525 3526 entry = &flow_action->entries[j]; 3527 spin_lock_bh(&act->tcfa_lock); 3528 err = tcf_act_get_cookie(entry, act); 3529 if (err) 3530 goto err_out_locked; 3531 3532 entry->hw_stats = tc_act_hw_stats(act->hw_stats); 3533 entry->hw_index = act->tcfa_index; 3534 index = 0; 3535 err = tc_setup_offload_act(act, entry, &index); 3536 if (!err) 3537 j += index; 3538 else 3539 goto err_out_locked; 3540 spin_unlock_bh(&act->tcfa_lock); 3541 } 3542 3543 err_out: 3544 if (err) 3545 tc_cleanup_offload_action(flow_action); 3546 3547 return err; 3548 err_out_locked: 3549 spin_unlock_bh(&act->tcfa_lock); 3550 goto err_out; 3551 } 3552 3553 int tc_setup_offload_action(struct flow_action *flow_action, 3554 const struct tcf_exts *exts) 3555 { 3556 #ifdef CONFIG_NET_CLS_ACT 3557 if (!exts) 3558 return 0; 3559 3560 return tc_setup_action(flow_action, exts->actions); 3561 #else 3562 return 0; 3563 #endif 3564 } 3565 EXPORT_SYMBOL(tc_setup_offload_action); 3566 3567 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3568 { 3569 unsigned int num_acts = 0; 3570 struct tc_action *act; 3571 int i; 3572 3573 tcf_exts_for_each_action(i, act, exts) { 3574 if (is_tcf_pedit(act)) 3575 num_acts += tcf_pedit_nkeys(act); 3576 else 3577 num_acts++; 3578 } 3579 return num_acts; 3580 } 3581 EXPORT_SYMBOL(tcf_exts_num_actions); 3582 3583 #ifdef CONFIG_NET_CLS_ACT 3584 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr, 3585 u32 *p_block_index, 3586 struct netlink_ext_ack *extack) 3587 { 3588 *p_block_index = nla_get_u32(block_index_attr); 3589 if (!*p_block_index) { 3590 NL_SET_ERR_MSG(extack, "Block number may not be zero"); 3591 return -EINVAL; 3592 } 3593 3594 return 0; 3595 } 3596 3597 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 3598 enum flow_block_binder_type binder_type, 3599 struct nlattr *block_index_attr, 3600 struct netlink_ext_ack *extack) 3601 { 3602 u32 block_index; 3603 int err; 3604 3605 if (!block_index_attr) 3606 return 0; 3607 3608 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3609 if (err) 3610 return err; 3611 3612 if (!block_index) 3613 return 0; 3614 3615 qe->info.binder_type = binder_type; 3616 qe->info.chain_head_change = tcf_chain_head_change_dflt; 3617 qe->info.chain_head_change_priv = &qe->filter_chain; 3618 qe->info.block_index = block_index; 3619 3620 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); 3621 } 3622 EXPORT_SYMBOL(tcf_qevent_init); 3623 3624 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 3625 { 3626 if (qe->info.block_index) 3627 tcf_block_put_ext(qe->block, sch, &qe->info); 3628 } 3629 EXPORT_SYMBOL(tcf_qevent_destroy); 3630 3631 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 3632 struct netlink_ext_ack *extack) 3633 { 3634 u32 block_index; 3635 int err; 3636 3637 if (!block_index_attr) 3638 return 0; 3639 3640 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3641 if (err) 3642 return err; 3643 3644 /* Bounce newly-configured block or change in block. */ 3645 if (block_index != qe->info.block_index) { 3646 NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); 3647 return -EINVAL; 3648 } 3649 3650 return 0; 3651 } 3652 EXPORT_SYMBOL(tcf_qevent_validate_change); 3653 3654 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 3655 struct sk_buff **to_free, int *ret) 3656 { 3657 struct tcf_result cl_res; 3658 struct tcf_proto *fl; 3659 3660 if (!qe->info.block_index) 3661 return skb; 3662 3663 fl = rcu_dereference_bh(qe->filter_chain); 3664 3665 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) { 3666 case TC_ACT_SHOT: 3667 qdisc_qstats_drop(sch); 3668 __qdisc_drop(skb, to_free); 3669 *ret = __NET_XMIT_BYPASS; 3670 return NULL; 3671 case TC_ACT_STOLEN: 3672 case TC_ACT_QUEUED: 3673 case TC_ACT_TRAP: 3674 __qdisc_drop(skb, to_free); 3675 *ret = __NET_XMIT_STOLEN; 3676 return NULL; 3677 case TC_ACT_REDIRECT: 3678 skb_do_redirect(skb); 3679 *ret = __NET_XMIT_STOLEN; 3680 return NULL; 3681 } 3682 3683 return skb; 3684 } 3685 EXPORT_SYMBOL(tcf_qevent_handle); 3686 3687 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 3688 { 3689 if (!qe->info.block_index) 3690 return 0; 3691 return nla_put_u32(skb, attr_name, qe->info.block_index); 3692 } 3693 EXPORT_SYMBOL(tcf_qevent_dump); 3694 #endif 3695 3696 static __net_init int tcf_net_init(struct net *net) 3697 { 3698 struct tcf_net *tn = net_generic(net, tcf_net_id); 3699 3700 spin_lock_init(&tn->idr_lock); 3701 idr_init(&tn->idr); 3702 return 0; 3703 } 3704 3705 static void __net_exit tcf_net_exit(struct net *net) 3706 { 3707 struct tcf_net *tn = net_generic(net, tcf_net_id); 3708 3709 idr_destroy(&tn->idr); 3710 } 3711 3712 static struct pernet_operations tcf_net_ops = { 3713 .init = tcf_net_init, 3714 .exit = tcf_net_exit, 3715 .id = &tcf_net_id, 3716 .size = sizeof(struct tcf_net), 3717 }; 3718 3719 static int __init tc_filter_init(void) 3720 { 3721 int err; 3722 3723 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3724 if (!tc_filter_wq) 3725 return -ENOMEM; 3726 3727 err = register_pernet_subsys(&tcf_net_ops); 3728 if (err) 3729 goto err_register_pernet_subsys; 3730 3731 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3732 RTNL_FLAG_DOIT_UNLOCKED); 3733 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3734 RTNL_FLAG_DOIT_UNLOCKED); 3735 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3736 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3737 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3738 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3739 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3740 tc_dump_chain, 0); 3741 3742 return 0; 3743 3744 err_register_pernet_subsys: 3745 destroy_workqueue(tc_filter_wq); 3746 return err; 3747 } 3748 3749 subsys_initcall(tc_filter_init); 3750