1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/rhashtable.h> 24 #include <linux/jhash.h> 25 #include <net/net_namespace.h> 26 #include <net/sock.h> 27 #include <net/netlink.h> 28 #include <net/pkt_sched.h> 29 #include <net/pkt_cls.h> 30 #include <net/tc_act/tc_pedit.h> 31 #include <net/tc_act/tc_mirred.h> 32 #include <net/tc_act/tc_vlan.h> 33 #include <net/tc_act/tc_tunnel_key.h> 34 #include <net/tc_act/tc_csum.h> 35 #include <net/tc_act/tc_gact.h> 36 #include <net/tc_act/tc_police.h> 37 #include <net/tc_act/tc_sample.h> 38 #include <net/tc_act/tc_skbedit.h> 39 #include <net/tc_act/tc_ct.h> 40 #include <net/tc_act/tc_mpls.h> 41 #include <net/flow_offload.h> 42 43 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 44 45 /* The list of all installed classifier types */ 46 static LIST_HEAD(tcf_proto_base); 47 48 /* Protects list of registered TC modules. It is pure SMP lock. */ 49 static DEFINE_RWLOCK(cls_mod_lock); 50 51 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 52 { 53 return jhash_3words(tp->chain->index, tp->prio, 54 (__force __u32)tp->protocol, 0); 55 } 56 57 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 58 struct tcf_proto *tp) 59 { 60 struct tcf_block *block = chain->block; 61 62 mutex_lock(&block->proto_destroy_lock); 63 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 64 destroy_obj_hashfn(tp)); 65 mutex_unlock(&block->proto_destroy_lock); 66 } 67 68 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 69 const struct tcf_proto *tp2) 70 { 71 return tp1->chain->index == tp2->chain->index && 72 tp1->prio == tp2->prio && 73 tp1->protocol == tp2->protocol; 74 } 75 76 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 77 struct tcf_proto *tp) 78 { 79 u32 hash = destroy_obj_hashfn(tp); 80 struct tcf_proto *iter; 81 bool found = false; 82 83 rcu_read_lock(); 84 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 85 destroy_ht_node, hash) { 86 if (tcf_proto_cmp(tp, iter)) { 87 found = true; 88 break; 89 } 90 } 91 rcu_read_unlock(); 92 93 return found; 94 } 95 96 static void 97 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 98 { 99 struct tcf_block *block = chain->block; 100 101 mutex_lock(&block->proto_destroy_lock); 102 if (hash_hashed(&tp->destroy_ht_node)) 103 hash_del_rcu(&tp->destroy_ht_node); 104 mutex_unlock(&block->proto_destroy_lock); 105 } 106 107 /* Find classifier type by string name */ 108 109 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 110 { 111 const struct tcf_proto_ops *t, *res = NULL; 112 113 if (kind) { 114 read_lock(&cls_mod_lock); 115 list_for_each_entry(t, &tcf_proto_base, head) { 116 if (strcmp(kind, t->kind) == 0) { 117 if (try_module_get(t->owner)) 118 res = t; 119 break; 120 } 121 } 122 read_unlock(&cls_mod_lock); 123 } 124 return res; 125 } 126 127 static const struct tcf_proto_ops * 128 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 129 struct netlink_ext_ack *extack) 130 { 131 const struct tcf_proto_ops *ops; 132 133 ops = __tcf_proto_lookup_ops(kind); 134 if (ops) 135 return ops; 136 #ifdef CONFIG_MODULES 137 if (rtnl_held) 138 rtnl_unlock(); 139 request_module("cls_%s", kind); 140 if (rtnl_held) 141 rtnl_lock(); 142 ops = __tcf_proto_lookup_ops(kind); 143 /* We dropped the RTNL semaphore in order to perform 144 * the module load. So, even if we succeeded in loading 145 * the module we have to replay the request. We indicate 146 * this using -EAGAIN. 147 */ 148 if (ops) { 149 module_put(ops->owner); 150 return ERR_PTR(-EAGAIN); 151 } 152 #endif 153 NL_SET_ERR_MSG(extack, "TC classifier not found"); 154 return ERR_PTR(-ENOENT); 155 } 156 157 /* Register(unregister) new classifier type */ 158 159 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 160 { 161 struct tcf_proto_ops *t; 162 int rc = -EEXIST; 163 164 write_lock(&cls_mod_lock); 165 list_for_each_entry(t, &tcf_proto_base, head) 166 if (!strcmp(ops->kind, t->kind)) 167 goto out; 168 169 list_add_tail(&ops->head, &tcf_proto_base); 170 rc = 0; 171 out: 172 write_unlock(&cls_mod_lock); 173 return rc; 174 } 175 EXPORT_SYMBOL(register_tcf_proto_ops); 176 177 static struct workqueue_struct *tc_filter_wq; 178 179 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 180 { 181 struct tcf_proto_ops *t; 182 int rc = -ENOENT; 183 184 /* Wait for outstanding call_rcu()s, if any, from a 185 * tcf_proto_ops's destroy() handler. 186 */ 187 rcu_barrier(); 188 flush_workqueue(tc_filter_wq); 189 190 write_lock(&cls_mod_lock); 191 list_for_each_entry(t, &tcf_proto_base, head) { 192 if (t == ops) { 193 list_del(&t->head); 194 rc = 0; 195 break; 196 } 197 } 198 write_unlock(&cls_mod_lock); 199 return rc; 200 } 201 EXPORT_SYMBOL(unregister_tcf_proto_ops); 202 203 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 204 { 205 INIT_RCU_WORK(rwork, func); 206 return queue_rcu_work(tc_filter_wq, rwork); 207 } 208 EXPORT_SYMBOL(tcf_queue_work); 209 210 /* Select new prio value from the range, managed by kernel. */ 211 212 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 213 { 214 u32 first = TC_H_MAKE(0xC0000000U, 0U); 215 216 if (tp) 217 first = tp->prio - 1; 218 219 return TC_H_MAJ(first); 220 } 221 222 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 223 { 224 if (kind) 225 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ; 226 memset(name, 0, IFNAMSIZ); 227 return false; 228 } 229 230 static bool tcf_proto_is_unlocked(const char *kind) 231 { 232 const struct tcf_proto_ops *ops; 233 bool ret; 234 235 if (strlen(kind) == 0) 236 return false; 237 238 ops = tcf_proto_lookup_ops(kind, false, NULL); 239 /* On error return false to take rtnl lock. Proto lookup/create 240 * functions will perform lookup again and properly handle errors. 241 */ 242 if (IS_ERR(ops)) 243 return false; 244 245 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 246 module_put(ops->owner); 247 return ret; 248 } 249 250 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 251 u32 prio, struct tcf_chain *chain, 252 bool rtnl_held, 253 struct netlink_ext_ack *extack) 254 { 255 struct tcf_proto *tp; 256 int err; 257 258 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 259 if (!tp) 260 return ERR_PTR(-ENOBUFS); 261 262 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 263 if (IS_ERR(tp->ops)) { 264 err = PTR_ERR(tp->ops); 265 goto errout; 266 } 267 tp->classify = tp->ops->classify; 268 tp->protocol = protocol; 269 tp->prio = prio; 270 tp->chain = chain; 271 spin_lock_init(&tp->lock); 272 refcount_set(&tp->refcnt, 1); 273 274 err = tp->ops->init(tp); 275 if (err) { 276 module_put(tp->ops->owner); 277 goto errout; 278 } 279 return tp; 280 281 errout: 282 kfree(tp); 283 return ERR_PTR(err); 284 } 285 286 static void tcf_proto_get(struct tcf_proto *tp) 287 { 288 refcount_inc(&tp->refcnt); 289 } 290 291 static void tcf_chain_put(struct tcf_chain *chain); 292 293 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 294 bool sig_destroy, struct netlink_ext_ack *extack) 295 { 296 tp->ops->destroy(tp, rtnl_held, extack); 297 if (sig_destroy) 298 tcf_proto_signal_destroyed(tp->chain, tp); 299 tcf_chain_put(tp->chain); 300 module_put(tp->ops->owner); 301 kfree_rcu(tp, rcu); 302 } 303 304 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 305 struct netlink_ext_ack *extack) 306 { 307 if (refcount_dec_and_test(&tp->refcnt)) 308 tcf_proto_destroy(tp, rtnl_held, true, extack); 309 } 310 311 static int walker_check_empty(struct tcf_proto *tp, void *fh, 312 struct tcf_walker *arg) 313 { 314 if (fh) { 315 arg->nonempty = true; 316 return -1; 317 } 318 return 0; 319 } 320 321 static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held) 322 { 323 struct tcf_walker walker = { .fn = walker_check_empty, }; 324 325 if (tp->ops->walk) { 326 tp->ops->walk(tp, &walker, rtnl_held); 327 return !walker.nonempty; 328 } 329 return true; 330 } 331 332 static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held) 333 { 334 spin_lock(&tp->lock); 335 if (tcf_proto_is_empty(tp, rtnl_held)) 336 tp->deleting = true; 337 spin_unlock(&tp->lock); 338 return tp->deleting; 339 } 340 341 static void tcf_proto_mark_delete(struct tcf_proto *tp) 342 { 343 spin_lock(&tp->lock); 344 tp->deleting = true; 345 spin_unlock(&tp->lock); 346 } 347 348 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 349 { 350 bool deleting; 351 352 spin_lock(&tp->lock); 353 deleting = tp->deleting; 354 spin_unlock(&tp->lock); 355 356 return deleting; 357 } 358 359 #define ASSERT_BLOCK_LOCKED(block) \ 360 lockdep_assert_held(&(block)->lock) 361 362 struct tcf_filter_chain_list_item { 363 struct list_head list; 364 tcf_chain_head_change_t *chain_head_change; 365 void *chain_head_change_priv; 366 }; 367 368 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 369 u32 chain_index) 370 { 371 struct tcf_chain *chain; 372 373 ASSERT_BLOCK_LOCKED(block); 374 375 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 376 if (!chain) 377 return NULL; 378 list_add_tail(&chain->list, &block->chain_list); 379 mutex_init(&chain->filter_chain_lock); 380 chain->block = block; 381 chain->index = chain_index; 382 chain->refcnt = 1; 383 if (!chain->index) 384 block->chain0.chain = chain; 385 return chain; 386 } 387 388 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 389 struct tcf_proto *tp_head) 390 { 391 if (item->chain_head_change) 392 item->chain_head_change(tp_head, item->chain_head_change_priv); 393 } 394 395 static void tcf_chain0_head_change(struct tcf_chain *chain, 396 struct tcf_proto *tp_head) 397 { 398 struct tcf_filter_chain_list_item *item; 399 struct tcf_block *block = chain->block; 400 401 if (chain->index) 402 return; 403 404 mutex_lock(&block->lock); 405 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 406 tcf_chain_head_change_item(item, tp_head); 407 mutex_unlock(&block->lock); 408 } 409 410 /* Returns true if block can be safely freed. */ 411 412 static bool tcf_chain_detach(struct tcf_chain *chain) 413 { 414 struct tcf_block *block = chain->block; 415 416 ASSERT_BLOCK_LOCKED(block); 417 418 list_del(&chain->list); 419 if (!chain->index) 420 block->chain0.chain = NULL; 421 422 if (list_empty(&block->chain_list) && 423 refcount_read(&block->refcnt) == 0) 424 return true; 425 426 return false; 427 } 428 429 static void tcf_block_destroy(struct tcf_block *block) 430 { 431 mutex_destroy(&block->lock); 432 mutex_destroy(&block->proto_destroy_lock); 433 kfree_rcu(block, rcu); 434 } 435 436 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 437 { 438 struct tcf_block *block = chain->block; 439 440 mutex_destroy(&chain->filter_chain_lock); 441 kfree_rcu(chain, rcu); 442 if (free_block) 443 tcf_block_destroy(block); 444 } 445 446 static void tcf_chain_hold(struct tcf_chain *chain) 447 { 448 ASSERT_BLOCK_LOCKED(chain->block); 449 450 ++chain->refcnt; 451 } 452 453 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 454 { 455 ASSERT_BLOCK_LOCKED(chain->block); 456 457 /* In case all the references are action references, this 458 * chain should not be shown to the user. 459 */ 460 return chain->refcnt == chain->action_refcnt; 461 } 462 463 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 464 u32 chain_index) 465 { 466 struct tcf_chain *chain; 467 468 ASSERT_BLOCK_LOCKED(block); 469 470 list_for_each_entry(chain, &block->chain_list, list) { 471 if (chain->index == chain_index) 472 return chain; 473 } 474 return NULL; 475 } 476 477 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 478 u32 seq, u16 flags, int event, bool unicast); 479 480 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 481 u32 chain_index, bool create, 482 bool by_act) 483 { 484 struct tcf_chain *chain = NULL; 485 bool is_first_reference; 486 487 mutex_lock(&block->lock); 488 chain = tcf_chain_lookup(block, chain_index); 489 if (chain) { 490 tcf_chain_hold(chain); 491 } else { 492 if (!create) 493 goto errout; 494 chain = tcf_chain_create(block, chain_index); 495 if (!chain) 496 goto errout; 497 } 498 499 if (by_act) 500 ++chain->action_refcnt; 501 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 502 mutex_unlock(&block->lock); 503 504 /* Send notification only in case we got the first 505 * non-action reference. Until then, the chain acts only as 506 * a placeholder for actions pointing to it and user ought 507 * not know about them. 508 */ 509 if (is_first_reference && !by_act) 510 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 511 RTM_NEWCHAIN, false); 512 513 return chain; 514 515 errout: 516 mutex_unlock(&block->lock); 517 return chain; 518 } 519 520 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 521 bool create) 522 { 523 return __tcf_chain_get(block, chain_index, create, false); 524 } 525 526 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 527 { 528 return __tcf_chain_get(block, chain_index, true, true); 529 } 530 EXPORT_SYMBOL(tcf_chain_get_by_act); 531 532 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 533 void *tmplt_priv); 534 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 535 void *tmplt_priv, u32 chain_index, 536 struct tcf_block *block, struct sk_buff *oskb, 537 u32 seq, u16 flags, bool unicast); 538 539 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 540 bool explicitly_created) 541 { 542 struct tcf_block *block = chain->block; 543 const struct tcf_proto_ops *tmplt_ops; 544 bool free_block = false; 545 unsigned int refcnt; 546 void *tmplt_priv; 547 548 mutex_lock(&block->lock); 549 if (explicitly_created) { 550 if (!chain->explicitly_created) { 551 mutex_unlock(&block->lock); 552 return; 553 } 554 chain->explicitly_created = false; 555 } 556 557 if (by_act) 558 chain->action_refcnt--; 559 560 /* tc_chain_notify_delete can't be called while holding block lock. 561 * However, when block is unlocked chain can be changed concurrently, so 562 * save these to temporary variables. 563 */ 564 refcnt = --chain->refcnt; 565 tmplt_ops = chain->tmplt_ops; 566 tmplt_priv = chain->tmplt_priv; 567 568 /* The last dropped non-action reference will trigger notification. */ 569 if (refcnt - chain->action_refcnt == 0 && !by_act) { 570 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, 571 block, NULL, 0, 0, false); 572 /* Last reference to chain, no need to lock. */ 573 chain->flushing = false; 574 } 575 576 if (refcnt == 0) 577 free_block = tcf_chain_detach(chain); 578 mutex_unlock(&block->lock); 579 580 if (refcnt == 0) { 581 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 582 tcf_chain_destroy(chain, free_block); 583 } 584 } 585 586 static void tcf_chain_put(struct tcf_chain *chain) 587 { 588 __tcf_chain_put(chain, false, false); 589 } 590 591 void tcf_chain_put_by_act(struct tcf_chain *chain) 592 { 593 __tcf_chain_put(chain, true, false); 594 } 595 EXPORT_SYMBOL(tcf_chain_put_by_act); 596 597 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 598 { 599 __tcf_chain_put(chain, false, true); 600 } 601 602 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 603 { 604 struct tcf_proto *tp, *tp_next; 605 606 mutex_lock(&chain->filter_chain_lock); 607 tp = tcf_chain_dereference(chain->filter_chain, chain); 608 while (tp) { 609 tp_next = rcu_dereference_protected(tp->next, 1); 610 tcf_proto_signal_destroying(chain, tp); 611 tp = tp_next; 612 } 613 tp = tcf_chain_dereference(chain->filter_chain, chain); 614 RCU_INIT_POINTER(chain->filter_chain, NULL); 615 tcf_chain0_head_change(chain, NULL); 616 chain->flushing = true; 617 mutex_unlock(&chain->filter_chain_lock); 618 619 while (tp) { 620 tp_next = rcu_dereference_protected(tp->next, 1); 621 tcf_proto_put(tp, rtnl_held, NULL); 622 tp = tp_next; 623 } 624 } 625 626 static int tcf_block_setup(struct tcf_block *block, 627 struct flow_block_offload *bo); 628 629 static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block, 630 flow_indr_block_bind_cb_t *cb, void *cb_priv, 631 enum flow_block_command command, bool ingress) 632 { 633 struct flow_block_offload bo = { 634 .command = command, 635 .binder_type = ingress ? 636 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS : 637 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 638 .net = dev_net(dev), 639 .block_shared = tcf_block_non_null_shared(block), 640 }; 641 INIT_LIST_HEAD(&bo.cb_list); 642 643 if (!block) 644 return; 645 646 bo.block = &block->flow_block; 647 648 down_write(&block->cb_lock); 649 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); 650 651 tcf_block_setup(block, &bo); 652 up_write(&block->cb_lock); 653 } 654 655 static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress) 656 { 657 const struct Qdisc_class_ops *cops; 658 const struct Qdisc_ops *ops; 659 struct Qdisc *qdisc; 660 661 if (!dev_ingress_queue(dev)) 662 return NULL; 663 664 qdisc = dev_ingress_queue(dev)->qdisc_sleeping; 665 if (!qdisc) 666 return NULL; 667 668 ops = qdisc->ops; 669 if (!ops) 670 return NULL; 671 672 if (!ingress && !strcmp("ingress", ops->id)) 673 return NULL; 674 675 cops = ops->cl_ops; 676 if (!cops) 677 return NULL; 678 679 if (!cops->tcf_block) 680 return NULL; 681 682 return cops->tcf_block(qdisc, 683 ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS, 684 NULL); 685 } 686 687 static void tc_indr_block_get_and_cmd(struct net_device *dev, 688 flow_indr_block_bind_cb_t *cb, 689 void *cb_priv, 690 enum flow_block_command command) 691 { 692 struct tcf_block *block; 693 694 block = tc_dev_block(dev, true); 695 tc_indr_block_cmd(dev, block, cb, cb_priv, command, true); 696 697 block = tc_dev_block(dev, false); 698 tc_indr_block_cmd(dev, block, cb, cb_priv, command, false); 699 } 700 701 static void tc_indr_block_call(struct tcf_block *block, 702 struct net_device *dev, 703 struct tcf_block_ext_info *ei, 704 enum flow_block_command command, 705 struct netlink_ext_ack *extack) 706 { 707 struct flow_block_offload bo = { 708 .command = command, 709 .binder_type = ei->binder_type, 710 .net = dev_net(dev), 711 .block = &block->flow_block, 712 .block_shared = tcf_block_shared(block), 713 .extack = extack, 714 }; 715 INIT_LIST_HEAD(&bo.cb_list); 716 717 flow_indr_block_call(dev, &bo, command); 718 tcf_block_setup(block, &bo); 719 } 720 721 static bool tcf_block_offload_in_use(struct tcf_block *block) 722 { 723 return atomic_read(&block->offloadcnt); 724 } 725 726 static int tcf_block_offload_cmd(struct tcf_block *block, 727 struct net_device *dev, 728 struct tcf_block_ext_info *ei, 729 enum flow_block_command command, 730 struct netlink_ext_ack *extack) 731 { 732 struct flow_block_offload bo = {}; 733 int err; 734 735 bo.net = dev_net(dev); 736 bo.command = command; 737 bo.binder_type = ei->binder_type; 738 bo.block = &block->flow_block; 739 bo.block_shared = tcf_block_shared(block); 740 bo.extack = extack; 741 INIT_LIST_HEAD(&bo.cb_list); 742 743 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 744 if (err < 0) 745 return err; 746 747 return tcf_block_setup(block, &bo); 748 } 749 750 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 751 struct tcf_block_ext_info *ei, 752 struct netlink_ext_ack *extack) 753 { 754 struct net_device *dev = q->dev_queue->dev; 755 int err; 756 757 down_write(&block->cb_lock); 758 if (!dev->netdev_ops->ndo_setup_tc) 759 goto no_offload_dev_inc; 760 761 /* If tc offload feature is disabled and the block we try to bind 762 * to already has some offloaded filters, forbid to bind. 763 */ 764 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { 765 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 766 err = -EOPNOTSUPP; 767 goto err_unlock; 768 } 769 770 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); 771 if (err == -EOPNOTSUPP) 772 goto no_offload_dev_inc; 773 if (err) 774 goto err_unlock; 775 776 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 777 up_write(&block->cb_lock); 778 return 0; 779 780 no_offload_dev_inc: 781 if (tcf_block_offload_in_use(block)) { 782 err = -EOPNOTSUPP; 783 goto err_unlock; 784 } 785 err = 0; 786 block->nooffloaddevcnt++; 787 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 788 err_unlock: 789 up_write(&block->cb_lock); 790 return err; 791 } 792 793 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 794 struct tcf_block_ext_info *ei) 795 { 796 struct net_device *dev = q->dev_queue->dev; 797 int err; 798 799 down_write(&block->cb_lock); 800 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 801 802 if (!dev->netdev_ops->ndo_setup_tc) 803 goto no_offload_dev_dec; 804 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 805 if (err == -EOPNOTSUPP) 806 goto no_offload_dev_dec; 807 up_write(&block->cb_lock); 808 return; 809 810 no_offload_dev_dec: 811 WARN_ON(block->nooffloaddevcnt-- == 0); 812 up_write(&block->cb_lock); 813 } 814 815 static int 816 tcf_chain0_head_change_cb_add(struct tcf_block *block, 817 struct tcf_block_ext_info *ei, 818 struct netlink_ext_ack *extack) 819 { 820 struct tcf_filter_chain_list_item *item; 821 struct tcf_chain *chain0; 822 823 item = kmalloc(sizeof(*item), GFP_KERNEL); 824 if (!item) { 825 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 826 return -ENOMEM; 827 } 828 item->chain_head_change = ei->chain_head_change; 829 item->chain_head_change_priv = ei->chain_head_change_priv; 830 831 mutex_lock(&block->lock); 832 chain0 = block->chain0.chain; 833 if (chain0) 834 tcf_chain_hold(chain0); 835 else 836 list_add(&item->list, &block->chain0.filter_chain_list); 837 mutex_unlock(&block->lock); 838 839 if (chain0) { 840 struct tcf_proto *tp_head; 841 842 mutex_lock(&chain0->filter_chain_lock); 843 844 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 845 if (tp_head) 846 tcf_chain_head_change_item(item, tp_head); 847 848 mutex_lock(&block->lock); 849 list_add(&item->list, &block->chain0.filter_chain_list); 850 mutex_unlock(&block->lock); 851 852 mutex_unlock(&chain0->filter_chain_lock); 853 tcf_chain_put(chain0); 854 } 855 856 return 0; 857 } 858 859 static void 860 tcf_chain0_head_change_cb_del(struct tcf_block *block, 861 struct tcf_block_ext_info *ei) 862 { 863 struct tcf_filter_chain_list_item *item; 864 865 mutex_lock(&block->lock); 866 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 867 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 868 (item->chain_head_change == ei->chain_head_change && 869 item->chain_head_change_priv == ei->chain_head_change_priv)) { 870 if (block->chain0.chain) 871 tcf_chain_head_change_item(item, NULL); 872 list_del(&item->list); 873 mutex_unlock(&block->lock); 874 875 kfree(item); 876 return; 877 } 878 } 879 mutex_unlock(&block->lock); 880 WARN_ON(1); 881 } 882 883 struct tcf_net { 884 spinlock_t idr_lock; /* Protects idr */ 885 struct idr idr; 886 }; 887 888 static unsigned int tcf_net_id; 889 890 static int tcf_block_insert(struct tcf_block *block, struct net *net, 891 struct netlink_ext_ack *extack) 892 { 893 struct tcf_net *tn = net_generic(net, tcf_net_id); 894 int err; 895 896 idr_preload(GFP_KERNEL); 897 spin_lock(&tn->idr_lock); 898 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 899 GFP_NOWAIT); 900 spin_unlock(&tn->idr_lock); 901 idr_preload_end(); 902 903 return err; 904 } 905 906 static void tcf_block_remove(struct tcf_block *block, struct net *net) 907 { 908 struct tcf_net *tn = net_generic(net, tcf_net_id); 909 910 spin_lock(&tn->idr_lock); 911 idr_remove(&tn->idr, block->index); 912 spin_unlock(&tn->idr_lock); 913 } 914 915 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 916 u32 block_index, 917 struct netlink_ext_ack *extack) 918 { 919 struct tcf_block *block; 920 921 block = kzalloc(sizeof(*block), GFP_KERNEL); 922 if (!block) { 923 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 924 return ERR_PTR(-ENOMEM); 925 } 926 mutex_init(&block->lock); 927 mutex_init(&block->proto_destroy_lock); 928 init_rwsem(&block->cb_lock); 929 flow_block_init(&block->flow_block); 930 INIT_LIST_HEAD(&block->chain_list); 931 INIT_LIST_HEAD(&block->owner_list); 932 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 933 934 refcount_set(&block->refcnt, 1); 935 block->net = net; 936 block->index = block_index; 937 938 /* Don't store q pointer for blocks which are shared */ 939 if (!tcf_block_shared(block)) 940 block->q = q; 941 return block; 942 } 943 944 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 945 { 946 struct tcf_net *tn = net_generic(net, tcf_net_id); 947 948 return idr_find(&tn->idr, block_index); 949 } 950 951 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 952 { 953 struct tcf_block *block; 954 955 rcu_read_lock(); 956 block = tcf_block_lookup(net, block_index); 957 if (block && !refcount_inc_not_zero(&block->refcnt)) 958 block = NULL; 959 rcu_read_unlock(); 960 961 return block; 962 } 963 964 static struct tcf_chain * 965 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 966 { 967 mutex_lock(&block->lock); 968 if (chain) 969 chain = list_is_last(&chain->list, &block->chain_list) ? 970 NULL : list_next_entry(chain, list); 971 else 972 chain = list_first_entry_or_null(&block->chain_list, 973 struct tcf_chain, list); 974 975 /* skip all action-only chains */ 976 while (chain && tcf_chain_held_by_acts_only(chain)) 977 chain = list_is_last(&chain->list, &block->chain_list) ? 978 NULL : list_next_entry(chain, list); 979 980 if (chain) 981 tcf_chain_hold(chain); 982 mutex_unlock(&block->lock); 983 984 return chain; 985 } 986 987 /* Function to be used by all clients that want to iterate over all chains on 988 * block. It properly obtains block->lock and takes reference to chain before 989 * returning it. Users of this function must be tolerant to concurrent chain 990 * insertion/deletion or ensure that no concurrent chain modification is 991 * possible. Note that all netlink dump callbacks cannot guarantee to provide 992 * consistent dump because rtnl lock is released each time skb is filled with 993 * data and sent to user-space. 994 */ 995 996 struct tcf_chain * 997 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 998 { 999 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 1000 1001 if (chain) 1002 tcf_chain_put(chain); 1003 1004 return chain_next; 1005 } 1006 EXPORT_SYMBOL(tcf_get_next_chain); 1007 1008 static struct tcf_proto * 1009 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1010 { 1011 u32 prio = 0; 1012 1013 ASSERT_RTNL(); 1014 mutex_lock(&chain->filter_chain_lock); 1015 1016 if (!tp) { 1017 tp = tcf_chain_dereference(chain->filter_chain, chain); 1018 } else if (tcf_proto_is_deleting(tp)) { 1019 /* 'deleting' flag is set and chain->filter_chain_lock was 1020 * unlocked, which means next pointer could be invalid. Restart 1021 * search. 1022 */ 1023 prio = tp->prio + 1; 1024 tp = tcf_chain_dereference(chain->filter_chain, chain); 1025 1026 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 1027 if (!tp->deleting && tp->prio >= prio) 1028 break; 1029 } else { 1030 tp = tcf_chain_dereference(tp->next, chain); 1031 } 1032 1033 if (tp) 1034 tcf_proto_get(tp); 1035 1036 mutex_unlock(&chain->filter_chain_lock); 1037 1038 return tp; 1039 } 1040 1041 /* Function to be used by all clients that want to iterate over all tp's on 1042 * chain. Users of this function must be tolerant to concurrent tp 1043 * insertion/deletion or ensure that no concurrent chain modification is 1044 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1045 * consistent dump because rtnl lock is released each time skb is filled with 1046 * data and sent to user-space. 1047 */ 1048 1049 struct tcf_proto * 1050 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp, 1051 bool rtnl_held) 1052 { 1053 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 1054 1055 if (tp) 1056 tcf_proto_put(tp, rtnl_held, NULL); 1057 1058 return tp_next; 1059 } 1060 EXPORT_SYMBOL(tcf_get_next_proto); 1061 1062 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1063 { 1064 struct tcf_chain *chain; 1065 1066 /* Last reference to block. At this point chains cannot be added or 1067 * removed concurrently. 1068 */ 1069 for (chain = tcf_get_next_chain(block, NULL); 1070 chain; 1071 chain = tcf_get_next_chain(block, chain)) { 1072 tcf_chain_put_explicitly_created(chain); 1073 tcf_chain_flush(chain, rtnl_held); 1074 } 1075 } 1076 1077 /* Lookup Qdisc and increments its reference counter. 1078 * Set parent, if necessary. 1079 */ 1080 1081 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1082 u32 *parent, int ifindex, bool rtnl_held, 1083 struct netlink_ext_ack *extack) 1084 { 1085 const struct Qdisc_class_ops *cops; 1086 struct net_device *dev; 1087 int err = 0; 1088 1089 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1090 return 0; 1091 1092 rcu_read_lock(); 1093 1094 /* Find link */ 1095 dev = dev_get_by_index_rcu(net, ifindex); 1096 if (!dev) { 1097 rcu_read_unlock(); 1098 return -ENODEV; 1099 } 1100 1101 /* Find qdisc */ 1102 if (!*parent) { 1103 *q = dev->qdisc; 1104 *parent = (*q)->handle; 1105 } else { 1106 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1107 if (!*q) { 1108 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1109 err = -EINVAL; 1110 goto errout_rcu; 1111 } 1112 } 1113 1114 *q = qdisc_refcount_inc_nz(*q); 1115 if (!*q) { 1116 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1117 err = -EINVAL; 1118 goto errout_rcu; 1119 } 1120 1121 /* Is it classful? */ 1122 cops = (*q)->ops->cl_ops; 1123 if (!cops) { 1124 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1125 err = -EINVAL; 1126 goto errout_qdisc; 1127 } 1128 1129 if (!cops->tcf_block) { 1130 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1131 err = -EOPNOTSUPP; 1132 goto errout_qdisc; 1133 } 1134 1135 errout_rcu: 1136 /* At this point we know that qdisc is not noop_qdisc, 1137 * which means that qdisc holds a reference to net_device 1138 * and we hold a reference to qdisc, so it is safe to release 1139 * rcu read lock. 1140 */ 1141 rcu_read_unlock(); 1142 return err; 1143 1144 errout_qdisc: 1145 rcu_read_unlock(); 1146 1147 if (rtnl_held) 1148 qdisc_put(*q); 1149 else 1150 qdisc_put_unlocked(*q); 1151 *q = NULL; 1152 1153 return err; 1154 } 1155 1156 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1157 int ifindex, struct netlink_ext_ack *extack) 1158 { 1159 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1160 return 0; 1161 1162 /* Do we search for filter, attached to class? */ 1163 if (TC_H_MIN(parent)) { 1164 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1165 1166 *cl = cops->find(q, parent); 1167 if (*cl == 0) { 1168 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1169 return -ENOENT; 1170 } 1171 } 1172 1173 return 0; 1174 } 1175 1176 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1177 unsigned long cl, int ifindex, 1178 u32 block_index, 1179 struct netlink_ext_ack *extack) 1180 { 1181 struct tcf_block *block; 1182 1183 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1184 block = tcf_block_refcnt_get(net, block_index); 1185 if (!block) { 1186 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1187 return ERR_PTR(-EINVAL); 1188 } 1189 } else { 1190 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1191 1192 block = cops->tcf_block(q, cl, extack); 1193 if (!block) 1194 return ERR_PTR(-EINVAL); 1195 1196 if (tcf_block_shared(block)) { 1197 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1198 return ERR_PTR(-EOPNOTSUPP); 1199 } 1200 1201 /* Always take reference to block in order to support execution 1202 * of rules update path of cls API without rtnl lock. Caller 1203 * must release block when it is finished using it. 'if' block 1204 * of this conditional obtain reference to block by calling 1205 * tcf_block_refcnt_get(). 1206 */ 1207 refcount_inc(&block->refcnt); 1208 } 1209 1210 return block; 1211 } 1212 1213 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1214 struct tcf_block_ext_info *ei, bool rtnl_held) 1215 { 1216 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1217 /* Flushing/putting all chains will cause the block to be 1218 * deallocated when last chain is freed. However, if chain_list 1219 * is empty, block has to be manually deallocated. After block 1220 * reference counter reached 0, it is no longer possible to 1221 * increment it or add new chains to block. 1222 */ 1223 bool free_block = list_empty(&block->chain_list); 1224 1225 mutex_unlock(&block->lock); 1226 if (tcf_block_shared(block)) 1227 tcf_block_remove(block, block->net); 1228 1229 if (q) 1230 tcf_block_offload_unbind(block, q, ei); 1231 1232 if (free_block) 1233 tcf_block_destroy(block); 1234 else 1235 tcf_block_flush_all_chains(block, rtnl_held); 1236 } else if (q) { 1237 tcf_block_offload_unbind(block, q, ei); 1238 } 1239 } 1240 1241 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1242 { 1243 __tcf_block_put(block, NULL, NULL, rtnl_held); 1244 } 1245 1246 /* Find tcf block. 1247 * Set q, parent, cl when appropriate. 1248 */ 1249 1250 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1251 u32 *parent, unsigned long *cl, 1252 int ifindex, u32 block_index, 1253 struct netlink_ext_ack *extack) 1254 { 1255 struct tcf_block *block; 1256 int err = 0; 1257 1258 ASSERT_RTNL(); 1259 1260 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1261 if (err) 1262 goto errout; 1263 1264 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1265 if (err) 1266 goto errout_qdisc; 1267 1268 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1269 if (IS_ERR(block)) { 1270 err = PTR_ERR(block); 1271 goto errout_qdisc; 1272 } 1273 1274 return block; 1275 1276 errout_qdisc: 1277 if (*q) 1278 qdisc_put(*q); 1279 errout: 1280 *q = NULL; 1281 return ERR_PTR(err); 1282 } 1283 1284 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1285 bool rtnl_held) 1286 { 1287 if (!IS_ERR_OR_NULL(block)) 1288 tcf_block_refcnt_put(block, rtnl_held); 1289 1290 if (q) { 1291 if (rtnl_held) 1292 qdisc_put(q); 1293 else 1294 qdisc_put_unlocked(q); 1295 } 1296 } 1297 1298 struct tcf_block_owner_item { 1299 struct list_head list; 1300 struct Qdisc *q; 1301 enum flow_block_binder_type binder_type; 1302 }; 1303 1304 static void 1305 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1306 struct Qdisc *q, 1307 enum flow_block_binder_type binder_type) 1308 { 1309 if (block->keep_dst && 1310 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1311 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1312 netif_keep_dst(qdisc_dev(q)); 1313 } 1314 1315 void tcf_block_netif_keep_dst(struct tcf_block *block) 1316 { 1317 struct tcf_block_owner_item *item; 1318 1319 block->keep_dst = true; 1320 list_for_each_entry(item, &block->owner_list, list) 1321 tcf_block_owner_netif_keep_dst(block, item->q, 1322 item->binder_type); 1323 } 1324 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1325 1326 static int tcf_block_owner_add(struct tcf_block *block, 1327 struct Qdisc *q, 1328 enum flow_block_binder_type binder_type) 1329 { 1330 struct tcf_block_owner_item *item; 1331 1332 item = kmalloc(sizeof(*item), GFP_KERNEL); 1333 if (!item) 1334 return -ENOMEM; 1335 item->q = q; 1336 item->binder_type = binder_type; 1337 list_add(&item->list, &block->owner_list); 1338 return 0; 1339 } 1340 1341 static void tcf_block_owner_del(struct tcf_block *block, 1342 struct Qdisc *q, 1343 enum flow_block_binder_type binder_type) 1344 { 1345 struct tcf_block_owner_item *item; 1346 1347 list_for_each_entry(item, &block->owner_list, list) { 1348 if (item->q == q && item->binder_type == binder_type) { 1349 list_del(&item->list); 1350 kfree(item); 1351 return; 1352 } 1353 } 1354 WARN_ON(1); 1355 } 1356 1357 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1358 struct tcf_block_ext_info *ei, 1359 struct netlink_ext_ack *extack) 1360 { 1361 struct net *net = qdisc_net(q); 1362 struct tcf_block *block = NULL; 1363 int err; 1364 1365 if (ei->block_index) 1366 /* block_index not 0 means the shared block is requested */ 1367 block = tcf_block_refcnt_get(net, ei->block_index); 1368 1369 if (!block) { 1370 block = tcf_block_create(net, q, ei->block_index, extack); 1371 if (IS_ERR(block)) 1372 return PTR_ERR(block); 1373 if (tcf_block_shared(block)) { 1374 err = tcf_block_insert(block, net, extack); 1375 if (err) 1376 goto err_block_insert; 1377 } 1378 } 1379 1380 err = tcf_block_owner_add(block, q, ei->binder_type); 1381 if (err) 1382 goto err_block_owner_add; 1383 1384 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1385 1386 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1387 if (err) 1388 goto err_chain0_head_change_cb_add; 1389 1390 err = tcf_block_offload_bind(block, q, ei, extack); 1391 if (err) 1392 goto err_block_offload_bind; 1393 1394 *p_block = block; 1395 return 0; 1396 1397 err_block_offload_bind: 1398 tcf_chain0_head_change_cb_del(block, ei); 1399 err_chain0_head_change_cb_add: 1400 tcf_block_owner_del(block, q, ei->binder_type); 1401 err_block_owner_add: 1402 err_block_insert: 1403 tcf_block_refcnt_put(block, true); 1404 return err; 1405 } 1406 EXPORT_SYMBOL(tcf_block_get_ext); 1407 1408 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1409 { 1410 struct tcf_proto __rcu **p_filter_chain = priv; 1411 1412 rcu_assign_pointer(*p_filter_chain, tp_head); 1413 } 1414 1415 int tcf_block_get(struct tcf_block **p_block, 1416 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1417 struct netlink_ext_ack *extack) 1418 { 1419 struct tcf_block_ext_info ei = { 1420 .chain_head_change = tcf_chain_head_change_dflt, 1421 .chain_head_change_priv = p_filter_chain, 1422 }; 1423 1424 WARN_ON(!p_filter_chain); 1425 return tcf_block_get_ext(p_block, q, &ei, extack); 1426 } 1427 EXPORT_SYMBOL(tcf_block_get); 1428 1429 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1430 * actions should be all removed after flushing. 1431 */ 1432 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1433 struct tcf_block_ext_info *ei) 1434 { 1435 if (!block) 1436 return; 1437 tcf_chain0_head_change_cb_del(block, ei); 1438 tcf_block_owner_del(block, q, ei->binder_type); 1439 1440 __tcf_block_put(block, q, ei, true); 1441 } 1442 EXPORT_SYMBOL(tcf_block_put_ext); 1443 1444 void tcf_block_put(struct tcf_block *block) 1445 { 1446 struct tcf_block_ext_info ei = {0, }; 1447 1448 if (!block) 1449 return; 1450 tcf_block_put_ext(block, block->q, &ei); 1451 } 1452 1453 EXPORT_SYMBOL(tcf_block_put); 1454 1455 static int 1456 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1457 void *cb_priv, bool add, bool offload_in_use, 1458 struct netlink_ext_ack *extack) 1459 { 1460 struct tcf_chain *chain, *chain_prev; 1461 struct tcf_proto *tp, *tp_prev; 1462 int err; 1463 1464 lockdep_assert_held(&block->cb_lock); 1465 1466 for (chain = __tcf_get_next_chain(block, NULL); 1467 chain; 1468 chain_prev = chain, 1469 chain = __tcf_get_next_chain(block, chain), 1470 tcf_chain_put(chain_prev)) { 1471 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1472 tp_prev = tp, 1473 tp = __tcf_get_next_proto(chain, tp), 1474 tcf_proto_put(tp_prev, true, NULL)) { 1475 if (tp->ops->reoffload) { 1476 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1477 extack); 1478 if (err && add) 1479 goto err_playback_remove; 1480 } else if (add && offload_in_use) { 1481 err = -EOPNOTSUPP; 1482 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1483 goto err_playback_remove; 1484 } 1485 } 1486 } 1487 1488 return 0; 1489 1490 err_playback_remove: 1491 tcf_proto_put(tp, true, NULL); 1492 tcf_chain_put(chain); 1493 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1494 extack); 1495 return err; 1496 } 1497 1498 static int tcf_block_bind(struct tcf_block *block, 1499 struct flow_block_offload *bo) 1500 { 1501 struct flow_block_cb *block_cb, *next; 1502 int err, i = 0; 1503 1504 lockdep_assert_held(&block->cb_lock); 1505 1506 list_for_each_entry(block_cb, &bo->cb_list, list) { 1507 err = tcf_block_playback_offloads(block, block_cb->cb, 1508 block_cb->cb_priv, true, 1509 tcf_block_offload_in_use(block), 1510 bo->extack); 1511 if (err) 1512 goto err_unroll; 1513 if (!bo->unlocked_driver_cb) 1514 block->lockeddevcnt++; 1515 1516 i++; 1517 } 1518 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1519 1520 return 0; 1521 1522 err_unroll: 1523 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1524 if (i-- > 0) { 1525 list_del(&block_cb->list); 1526 tcf_block_playback_offloads(block, block_cb->cb, 1527 block_cb->cb_priv, false, 1528 tcf_block_offload_in_use(block), 1529 NULL); 1530 if (!bo->unlocked_driver_cb) 1531 block->lockeddevcnt--; 1532 } 1533 flow_block_cb_free(block_cb); 1534 } 1535 1536 return err; 1537 } 1538 1539 static void tcf_block_unbind(struct tcf_block *block, 1540 struct flow_block_offload *bo) 1541 { 1542 struct flow_block_cb *block_cb, *next; 1543 1544 lockdep_assert_held(&block->cb_lock); 1545 1546 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1547 tcf_block_playback_offloads(block, block_cb->cb, 1548 block_cb->cb_priv, false, 1549 tcf_block_offload_in_use(block), 1550 NULL); 1551 list_del(&block_cb->list); 1552 flow_block_cb_free(block_cb); 1553 if (!bo->unlocked_driver_cb) 1554 block->lockeddevcnt--; 1555 } 1556 } 1557 1558 static int tcf_block_setup(struct tcf_block *block, 1559 struct flow_block_offload *bo) 1560 { 1561 int err; 1562 1563 switch (bo->command) { 1564 case FLOW_BLOCK_BIND: 1565 err = tcf_block_bind(block, bo); 1566 break; 1567 case FLOW_BLOCK_UNBIND: 1568 err = 0; 1569 tcf_block_unbind(block, bo); 1570 break; 1571 default: 1572 WARN_ON_ONCE(1); 1573 err = -EOPNOTSUPP; 1574 } 1575 1576 return err; 1577 } 1578 1579 /* Main classifier routine: scans classifier chain attached 1580 * to this qdisc, (optionally) tests for protocol and asks 1581 * specific classifiers. 1582 */ 1583 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1584 struct tcf_result *res, bool compat_mode) 1585 { 1586 #ifdef CONFIG_NET_CLS_ACT 1587 const int max_reclassify_loop = 4; 1588 const struct tcf_proto *orig_tp = tp; 1589 const struct tcf_proto *first_tp; 1590 int limit = 0; 1591 1592 reclassify: 1593 #endif 1594 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1595 __be16 protocol = tc_skb_protocol(skb); 1596 int err; 1597 1598 if (tp->protocol != protocol && 1599 tp->protocol != htons(ETH_P_ALL)) 1600 continue; 1601 1602 err = tp->classify(skb, tp, res); 1603 #ifdef CONFIG_NET_CLS_ACT 1604 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1605 first_tp = orig_tp; 1606 goto reset; 1607 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1608 first_tp = res->goto_tp; 1609 1610 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1611 { 1612 struct tc_skb_ext *ext; 1613 1614 ext = skb_ext_add(skb, TC_SKB_EXT); 1615 if (WARN_ON_ONCE(!ext)) 1616 return TC_ACT_SHOT; 1617 1618 ext->chain = err & TC_ACT_EXT_VAL_MASK; 1619 } 1620 #endif 1621 goto reset; 1622 } 1623 #endif 1624 if (err >= 0) 1625 return err; 1626 } 1627 1628 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1629 #ifdef CONFIG_NET_CLS_ACT 1630 reset: 1631 if (unlikely(limit++ >= max_reclassify_loop)) { 1632 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1633 tp->chain->block->index, 1634 tp->prio & 0xffff, 1635 ntohs(tp->protocol)); 1636 return TC_ACT_SHOT; 1637 } 1638 1639 tp = first_tp; 1640 goto reclassify; 1641 #endif 1642 } 1643 EXPORT_SYMBOL(tcf_classify); 1644 1645 struct tcf_chain_info { 1646 struct tcf_proto __rcu **pprev; 1647 struct tcf_proto __rcu *next; 1648 }; 1649 1650 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1651 struct tcf_chain_info *chain_info) 1652 { 1653 return tcf_chain_dereference(*chain_info->pprev, chain); 1654 } 1655 1656 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1657 struct tcf_chain_info *chain_info, 1658 struct tcf_proto *tp) 1659 { 1660 if (chain->flushing) 1661 return -EAGAIN; 1662 1663 if (*chain_info->pprev == chain->filter_chain) 1664 tcf_chain0_head_change(chain, tp); 1665 tcf_proto_get(tp); 1666 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1667 rcu_assign_pointer(*chain_info->pprev, tp); 1668 1669 return 0; 1670 } 1671 1672 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1673 struct tcf_chain_info *chain_info, 1674 struct tcf_proto *tp) 1675 { 1676 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1677 1678 tcf_proto_mark_delete(tp); 1679 if (tp == chain->filter_chain) 1680 tcf_chain0_head_change(chain, next); 1681 RCU_INIT_POINTER(*chain_info->pprev, next); 1682 } 1683 1684 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1685 struct tcf_chain_info *chain_info, 1686 u32 protocol, u32 prio, 1687 bool prio_allocate); 1688 1689 /* Try to insert new proto. 1690 * If proto with specified priority already exists, free new proto 1691 * and return existing one. 1692 */ 1693 1694 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1695 struct tcf_proto *tp_new, 1696 u32 protocol, u32 prio, 1697 bool rtnl_held) 1698 { 1699 struct tcf_chain_info chain_info; 1700 struct tcf_proto *tp; 1701 int err = 0; 1702 1703 mutex_lock(&chain->filter_chain_lock); 1704 1705 if (tcf_proto_exists_destroying(chain, tp_new)) { 1706 mutex_unlock(&chain->filter_chain_lock); 1707 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1708 return ERR_PTR(-EAGAIN); 1709 } 1710 1711 tp = tcf_chain_tp_find(chain, &chain_info, 1712 protocol, prio, false); 1713 if (!tp) 1714 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1715 mutex_unlock(&chain->filter_chain_lock); 1716 1717 if (tp) { 1718 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1719 tp_new = tp; 1720 } else if (err) { 1721 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1722 tp_new = ERR_PTR(err); 1723 } 1724 1725 return tp_new; 1726 } 1727 1728 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1729 struct tcf_proto *tp, bool rtnl_held, 1730 struct netlink_ext_ack *extack) 1731 { 1732 struct tcf_chain_info chain_info; 1733 struct tcf_proto *tp_iter; 1734 struct tcf_proto **pprev; 1735 struct tcf_proto *next; 1736 1737 mutex_lock(&chain->filter_chain_lock); 1738 1739 /* Atomically find and remove tp from chain. */ 1740 for (pprev = &chain->filter_chain; 1741 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1742 pprev = &tp_iter->next) { 1743 if (tp_iter == tp) { 1744 chain_info.pprev = pprev; 1745 chain_info.next = tp_iter->next; 1746 WARN_ON(tp_iter->deleting); 1747 break; 1748 } 1749 } 1750 /* Verify that tp still exists and no new filters were inserted 1751 * concurrently. 1752 * Mark tp for deletion if it is empty. 1753 */ 1754 if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) { 1755 mutex_unlock(&chain->filter_chain_lock); 1756 return; 1757 } 1758 1759 tcf_proto_signal_destroying(chain, tp); 1760 next = tcf_chain_dereference(chain_info.next, chain); 1761 if (tp == chain->filter_chain) 1762 tcf_chain0_head_change(chain, next); 1763 RCU_INIT_POINTER(*chain_info.pprev, next); 1764 mutex_unlock(&chain->filter_chain_lock); 1765 1766 tcf_proto_put(tp, rtnl_held, extack); 1767 } 1768 1769 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1770 struct tcf_chain_info *chain_info, 1771 u32 protocol, u32 prio, 1772 bool prio_allocate) 1773 { 1774 struct tcf_proto **pprev; 1775 struct tcf_proto *tp; 1776 1777 /* Check the chain for existence of proto-tcf with this priority */ 1778 for (pprev = &chain->filter_chain; 1779 (tp = tcf_chain_dereference(*pprev, chain)); 1780 pprev = &tp->next) { 1781 if (tp->prio >= prio) { 1782 if (tp->prio == prio) { 1783 if (prio_allocate || 1784 (tp->protocol != protocol && protocol)) 1785 return ERR_PTR(-EINVAL); 1786 } else { 1787 tp = NULL; 1788 } 1789 break; 1790 } 1791 } 1792 chain_info->pprev = pprev; 1793 if (tp) { 1794 chain_info->next = tp->next; 1795 tcf_proto_get(tp); 1796 } else { 1797 chain_info->next = NULL; 1798 } 1799 return tp; 1800 } 1801 1802 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1803 struct tcf_proto *tp, struct tcf_block *block, 1804 struct Qdisc *q, u32 parent, void *fh, 1805 u32 portid, u32 seq, u16 flags, int event, 1806 bool rtnl_held) 1807 { 1808 struct tcmsg *tcm; 1809 struct nlmsghdr *nlh; 1810 unsigned char *b = skb_tail_pointer(skb); 1811 1812 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1813 if (!nlh) 1814 goto out_nlmsg_trim; 1815 tcm = nlmsg_data(nlh); 1816 tcm->tcm_family = AF_UNSPEC; 1817 tcm->tcm__pad1 = 0; 1818 tcm->tcm__pad2 = 0; 1819 if (q) { 1820 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1821 tcm->tcm_parent = parent; 1822 } else { 1823 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1824 tcm->tcm_block_index = block->index; 1825 } 1826 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1827 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1828 goto nla_put_failure; 1829 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1830 goto nla_put_failure; 1831 if (!fh) { 1832 tcm->tcm_handle = 0; 1833 } else { 1834 if (tp->ops->dump && 1835 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 1836 goto nla_put_failure; 1837 } 1838 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1839 return skb->len; 1840 1841 out_nlmsg_trim: 1842 nla_put_failure: 1843 nlmsg_trim(skb, b); 1844 return -1; 1845 } 1846 1847 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 1848 struct nlmsghdr *n, struct tcf_proto *tp, 1849 struct tcf_block *block, struct Qdisc *q, 1850 u32 parent, void *fh, int event, bool unicast, 1851 bool rtnl_held) 1852 { 1853 struct sk_buff *skb; 1854 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1855 int err = 0; 1856 1857 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1858 if (!skb) 1859 return -ENOBUFS; 1860 1861 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1862 n->nlmsg_seq, n->nlmsg_flags, event, 1863 rtnl_held) <= 0) { 1864 kfree_skb(skb); 1865 return -EINVAL; 1866 } 1867 1868 if (unicast) 1869 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1870 else 1871 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1872 n->nlmsg_flags & NLM_F_ECHO); 1873 1874 if (err > 0) 1875 err = 0; 1876 return err; 1877 } 1878 1879 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 1880 struct nlmsghdr *n, struct tcf_proto *tp, 1881 struct tcf_block *block, struct Qdisc *q, 1882 u32 parent, void *fh, bool unicast, bool *last, 1883 bool rtnl_held, struct netlink_ext_ack *extack) 1884 { 1885 struct sk_buff *skb; 1886 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1887 int err; 1888 1889 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1890 if (!skb) 1891 return -ENOBUFS; 1892 1893 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1894 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 1895 rtnl_held) <= 0) { 1896 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 1897 kfree_skb(skb); 1898 return -EINVAL; 1899 } 1900 1901 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 1902 if (err) { 1903 kfree_skb(skb); 1904 return err; 1905 } 1906 1907 if (unicast) 1908 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1909 else 1910 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1911 n->nlmsg_flags & NLM_F_ECHO); 1912 if (err < 0) 1913 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 1914 1915 if (err > 0) 1916 err = 0; 1917 return err; 1918 } 1919 1920 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 1921 struct tcf_block *block, struct Qdisc *q, 1922 u32 parent, struct nlmsghdr *n, 1923 struct tcf_chain *chain, int event, 1924 bool rtnl_held) 1925 { 1926 struct tcf_proto *tp; 1927 1928 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held); 1929 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held)) 1930 tfilter_notify(net, oskb, n, tp, block, 1931 q, parent, NULL, event, false, rtnl_held); 1932 } 1933 1934 static void tfilter_put(struct tcf_proto *tp, void *fh) 1935 { 1936 if (tp->ops->put && fh) 1937 tp->ops->put(tp, fh); 1938 } 1939 1940 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1941 struct netlink_ext_ack *extack) 1942 { 1943 struct net *net = sock_net(skb->sk); 1944 struct nlattr *tca[TCA_MAX + 1]; 1945 char name[IFNAMSIZ]; 1946 struct tcmsg *t; 1947 u32 protocol; 1948 u32 prio; 1949 bool prio_allocate; 1950 u32 parent; 1951 u32 chain_index; 1952 struct Qdisc *q = NULL; 1953 struct tcf_chain_info chain_info; 1954 struct tcf_chain *chain = NULL; 1955 struct tcf_block *block; 1956 struct tcf_proto *tp; 1957 unsigned long cl; 1958 void *fh; 1959 int err; 1960 int tp_created; 1961 bool rtnl_held = false; 1962 1963 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1964 return -EPERM; 1965 1966 replay: 1967 tp_created = 0; 1968 1969 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 1970 rtm_tca_policy, extack); 1971 if (err < 0) 1972 return err; 1973 1974 t = nlmsg_data(n); 1975 protocol = TC_H_MIN(t->tcm_info); 1976 prio = TC_H_MAJ(t->tcm_info); 1977 prio_allocate = false; 1978 parent = t->tcm_parent; 1979 tp = NULL; 1980 cl = 0; 1981 block = NULL; 1982 1983 if (prio == 0) { 1984 /* If no priority is provided by the user, 1985 * we allocate one. 1986 */ 1987 if (n->nlmsg_flags & NLM_F_CREATE) { 1988 prio = TC_H_MAKE(0x80000000U, 0U); 1989 prio_allocate = true; 1990 } else { 1991 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 1992 return -ENOENT; 1993 } 1994 } 1995 1996 /* Find head of filter chain. */ 1997 1998 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 1999 if (err) 2000 return err; 2001 2002 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2003 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2004 err = -EINVAL; 2005 goto errout; 2006 } 2007 2008 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2009 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2010 * type is not specified, classifier is not unlocked. 2011 */ 2012 if (rtnl_held || 2013 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2014 !tcf_proto_is_unlocked(name)) { 2015 rtnl_held = true; 2016 rtnl_lock(); 2017 } 2018 2019 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2020 if (err) 2021 goto errout; 2022 2023 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2024 extack); 2025 if (IS_ERR(block)) { 2026 err = PTR_ERR(block); 2027 goto errout; 2028 } 2029 2030 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2031 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2032 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2033 err = -EINVAL; 2034 goto errout; 2035 } 2036 chain = tcf_chain_get(block, chain_index, true); 2037 if (!chain) { 2038 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2039 err = -ENOMEM; 2040 goto errout; 2041 } 2042 2043 mutex_lock(&chain->filter_chain_lock); 2044 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2045 prio, prio_allocate); 2046 if (IS_ERR(tp)) { 2047 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2048 err = PTR_ERR(tp); 2049 goto errout_locked; 2050 } 2051 2052 if (tp == NULL) { 2053 struct tcf_proto *tp_new = NULL; 2054 2055 if (chain->flushing) { 2056 err = -EAGAIN; 2057 goto errout_locked; 2058 } 2059 2060 /* Proto-tcf does not exist, create new one */ 2061 2062 if (tca[TCA_KIND] == NULL || !protocol) { 2063 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2064 err = -EINVAL; 2065 goto errout_locked; 2066 } 2067 2068 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2069 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2070 err = -ENOENT; 2071 goto errout_locked; 2072 } 2073 2074 if (prio_allocate) 2075 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2076 &chain_info)); 2077 2078 mutex_unlock(&chain->filter_chain_lock); 2079 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]), 2080 protocol, prio, chain, rtnl_held, 2081 extack); 2082 if (IS_ERR(tp_new)) { 2083 err = PTR_ERR(tp_new); 2084 goto errout_tp; 2085 } 2086 2087 tp_created = 1; 2088 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2089 rtnl_held); 2090 if (IS_ERR(tp)) { 2091 err = PTR_ERR(tp); 2092 goto errout_tp; 2093 } 2094 } else { 2095 mutex_unlock(&chain->filter_chain_lock); 2096 } 2097 2098 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2099 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2100 err = -EINVAL; 2101 goto errout; 2102 } 2103 2104 fh = tp->ops->get(tp, t->tcm_handle); 2105 2106 if (!fh) { 2107 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2108 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2109 err = -ENOENT; 2110 goto errout; 2111 } 2112 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2113 tfilter_put(tp, fh); 2114 NL_SET_ERR_MSG(extack, "Filter already exists"); 2115 err = -EEXIST; 2116 goto errout; 2117 } 2118 2119 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2120 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2121 err = -EINVAL; 2122 goto errout; 2123 } 2124 2125 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2126 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, 2127 rtnl_held, extack); 2128 if (err == 0) { 2129 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2130 RTM_NEWTFILTER, false, rtnl_held); 2131 tfilter_put(tp, fh); 2132 /* q pointer is NULL for shared blocks */ 2133 if (q) 2134 q->flags &= ~TCQ_F_CAN_BYPASS; 2135 } 2136 2137 errout: 2138 if (err && tp_created) 2139 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2140 errout_tp: 2141 if (chain) { 2142 if (tp && !IS_ERR(tp)) 2143 tcf_proto_put(tp, rtnl_held, NULL); 2144 if (!tp_created) 2145 tcf_chain_put(chain); 2146 } 2147 tcf_block_release(q, block, rtnl_held); 2148 2149 if (rtnl_held) 2150 rtnl_unlock(); 2151 2152 if (err == -EAGAIN) { 2153 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2154 * of target chain. 2155 */ 2156 rtnl_held = true; 2157 /* Replay the request. */ 2158 goto replay; 2159 } 2160 return err; 2161 2162 errout_locked: 2163 mutex_unlock(&chain->filter_chain_lock); 2164 goto errout; 2165 } 2166 2167 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2168 struct netlink_ext_ack *extack) 2169 { 2170 struct net *net = sock_net(skb->sk); 2171 struct nlattr *tca[TCA_MAX + 1]; 2172 char name[IFNAMSIZ]; 2173 struct tcmsg *t; 2174 u32 protocol; 2175 u32 prio; 2176 u32 parent; 2177 u32 chain_index; 2178 struct Qdisc *q = NULL; 2179 struct tcf_chain_info chain_info; 2180 struct tcf_chain *chain = NULL; 2181 struct tcf_block *block = NULL; 2182 struct tcf_proto *tp = NULL; 2183 unsigned long cl = 0; 2184 void *fh = NULL; 2185 int err; 2186 bool rtnl_held = false; 2187 2188 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2189 return -EPERM; 2190 2191 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2192 rtm_tca_policy, extack); 2193 if (err < 0) 2194 return err; 2195 2196 t = nlmsg_data(n); 2197 protocol = TC_H_MIN(t->tcm_info); 2198 prio = TC_H_MAJ(t->tcm_info); 2199 parent = t->tcm_parent; 2200 2201 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2202 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2203 return -ENOENT; 2204 } 2205 2206 /* Find head of filter chain. */ 2207 2208 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2209 if (err) 2210 return err; 2211 2212 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2213 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2214 err = -EINVAL; 2215 goto errout; 2216 } 2217 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2218 * found), qdisc is not unlocked, classifier type is not specified, 2219 * classifier is not unlocked. 2220 */ 2221 if (!prio || 2222 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2223 !tcf_proto_is_unlocked(name)) { 2224 rtnl_held = true; 2225 rtnl_lock(); 2226 } 2227 2228 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2229 if (err) 2230 goto errout; 2231 2232 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2233 extack); 2234 if (IS_ERR(block)) { 2235 err = PTR_ERR(block); 2236 goto errout; 2237 } 2238 2239 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2240 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2241 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2242 err = -EINVAL; 2243 goto errout; 2244 } 2245 chain = tcf_chain_get(block, chain_index, false); 2246 if (!chain) { 2247 /* User requested flush on non-existent chain. Nothing to do, 2248 * so just return success. 2249 */ 2250 if (prio == 0) { 2251 err = 0; 2252 goto errout; 2253 } 2254 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2255 err = -ENOENT; 2256 goto errout; 2257 } 2258 2259 if (prio == 0) { 2260 tfilter_notify_chain(net, skb, block, q, parent, n, 2261 chain, RTM_DELTFILTER, rtnl_held); 2262 tcf_chain_flush(chain, rtnl_held); 2263 err = 0; 2264 goto errout; 2265 } 2266 2267 mutex_lock(&chain->filter_chain_lock); 2268 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2269 prio, false); 2270 if (!tp || IS_ERR(tp)) { 2271 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2272 err = tp ? PTR_ERR(tp) : -ENOENT; 2273 goto errout_locked; 2274 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2275 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2276 err = -EINVAL; 2277 goto errout_locked; 2278 } else if (t->tcm_handle == 0) { 2279 tcf_proto_signal_destroying(chain, tp); 2280 tcf_chain_tp_remove(chain, &chain_info, tp); 2281 mutex_unlock(&chain->filter_chain_lock); 2282 2283 tcf_proto_put(tp, rtnl_held, NULL); 2284 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2285 RTM_DELTFILTER, false, rtnl_held); 2286 err = 0; 2287 goto errout; 2288 } 2289 mutex_unlock(&chain->filter_chain_lock); 2290 2291 fh = tp->ops->get(tp, t->tcm_handle); 2292 2293 if (!fh) { 2294 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2295 err = -ENOENT; 2296 } else { 2297 bool last; 2298 2299 err = tfilter_del_notify(net, skb, n, tp, block, 2300 q, parent, fh, false, &last, 2301 rtnl_held, extack); 2302 2303 if (err) 2304 goto errout; 2305 if (last) 2306 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2307 } 2308 2309 errout: 2310 if (chain) { 2311 if (tp && !IS_ERR(tp)) 2312 tcf_proto_put(tp, rtnl_held, NULL); 2313 tcf_chain_put(chain); 2314 } 2315 tcf_block_release(q, block, rtnl_held); 2316 2317 if (rtnl_held) 2318 rtnl_unlock(); 2319 2320 return err; 2321 2322 errout_locked: 2323 mutex_unlock(&chain->filter_chain_lock); 2324 goto errout; 2325 } 2326 2327 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2328 struct netlink_ext_ack *extack) 2329 { 2330 struct net *net = sock_net(skb->sk); 2331 struct nlattr *tca[TCA_MAX + 1]; 2332 char name[IFNAMSIZ]; 2333 struct tcmsg *t; 2334 u32 protocol; 2335 u32 prio; 2336 u32 parent; 2337 u32 chain_index; 2338 struct Qdisc *q = NULL; 2339 struct tcf_chain_info chain_info; 2340 struct tcf_chain *chain = NULL; 2341 struct tcf_block *block = NULL; 2342 struct tcf_proto *tp = NULL; 2343 unsigned long cl = 0; 2344 void *fh = NULL; 2345 int err; 2346 bool rtnl_held = false; 2347 2348 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2349 rtm_tca_policy, extack); 2350 if (err < 0) 2351 return err; 2352 2353 t = nlmsg_data(n); 2354 protocol = TC_H_MIN(t->tcm_info); 2355 prio = TC_H_MAJ(t->tcm_info); 2356 parent = t->tcm_parent; 2357 2358 if (prio == 0) { 2359 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2360 return -ENOENT; 2361 } 2362 2363 /* Find head of filter chain. */ 2364 2365 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2366 if (err) 2367 return err; 2368 2369 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2370 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2371 err = -EINVAL; 2372 goto errout; 2373 } 2374 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2375 * unlocked, classifier type is not specified, classifier is not 2376 * unlocked. 2377 */ 2378 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2379 !tcf_proto_is_unlocked(name)) { 2380 rtnl_held = true; 2381 rtnl_lock(); 2382 } 2383 2384 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2385 if (err) 2386 goto errout; 2387 2388 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2389 extack); 2390 if (IS_ERR(block)) { 2391 err = PTR_ERR(block); 2392 goto errout; 2393 } 2394 2395 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2396 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2397 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2398 err = -EINVAL; 2399 goto errout; 2400 } 2401 chain = tcf_chain_get(block, chain_index, false); 2402 if (!chain) { 2403 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2404 err = -EINVAL; 2405 goto errout; 2406 } 2407 2408 mutex_lock(&chain->filter_chain_lock); 2409 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2410 prio, false); 2411 mutex_unlock(&chain->filter_chain_lock); 2412 if (!tp || IS_ERR(tp)) { 2413 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2414 err = tp ? PTR_ERR(tp) : -ENOENT; 2415 goto errout; 2416 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2417 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2418 err = -EINVAL; 2419 goto errout; 2420 } 2421 2422 fh = tp->ops->get(tp, t->tcm_handle); 2423 2424 if (!fh) { 2425 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2426 err = -ENOENT; 2427 } else { 2428 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2429 fh, RTM_NEWTFILTER, true, rtnl_held); 2430 if (err < 0) 2431 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2432 } 2433 2434 tfilter_put(tp, fh); 2435 errout: 2436 if (chain) { 2437 if (tp && !IS_ERR(tp)) 2438 tcf_proto_put(tp, rtnl_held, NULL); 2439 tcf_chain_put(chain); 2440 } 2441 tcf_block_release(q, block, rtnl_held); 2442 2443 if (rtnl_held) 2444 rtnl_unlock(); 2445 2446 return err; 2447 } 2448 2449 struct tcf_dump_args { 2450 struct tcf_walker w; 2451 struct sk_buff *skb; 2452 struct netlink_callback *cb; 2453 struct tcf_block *block; 2454 struct Qdisc *q; 2455 u32 parent; 2456 }; 2457 2458 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2459 { 2460 struct tcf_dump_args *a = (void *)arg; 2461 struct net *net = sock_net(a->skb->sk); 2462 2463 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2464 n, NETLINK_CB(a->cb->skb).portid, 2465 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2466 RTM_NEWTFILTER, true); 2467 } 2468 2469 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2470 struct sk_buff *skb, struct netlink_callback *cb, 2471 long index_start, long *p_index) 2472 { 2473 struct net *net = sock_net(skb->sk); 2474 struct tcf_block *block = chain->block; 2475 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2476 struct tcf_proto *tp, *tp_prev; 2477 struct tcf_dump_args arg; 2478 2479 for (tp = __tcf_get_next_proto(chain, NULL); 2480 tp; 2481 tp_prev = tp, 2482 tp = __tcf_get_next_proto(chain, tp), 2483 tcf_proto_put(tp_prev, true, NULL), 2484 (*p_index)++) { 2485 if (*p_index < index_start) 2486 continue; 2487 if (TC_H_MAJ(tcm->tcm_info) && 2488 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2489 continue; 2490 if (TC_H_MIN(tcm->tcm_info) && 2491 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2492 continue; 2493 if (*p_index > index_start) 2494 memset(&cb->args[1], 0, 2495 sizeof(cb->args) - sizeof(cb->args[0])); 2496 if (cb->args[1] == 0) { 2497 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2498 NETLINK_CB(cb->skb).portid, 2499 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2500 RTM_NEWTFILTER, true) <= 0) 2501 goto errout; 2502 cb->args[1] = 1; 2503 } 2504 if (!tp->ops->walk) 2505 continue; 2506 arg.w.fn = tcf_node_dump; 2507 arg.skb = skb; 2508 arg.cb = cb; 2509 arg.block = block; 2510 arg.q = q; 2511 arg.parent = parent; 2512 arg.w.stop = 0; 2513 arg.w.skip = cb->args[1] - 1; 2514 arg.w.count = 0; 2515 arg.w.cookie = cb->args[2]; 2516 tp->ops->walk(tp, &arg.w, true); 2517 cb->args[2] = arg.w.cookie; 2518 cb->args[1] = arg.w.count + 1; 2519 if (arg.w.stop) 2520 goto errout; 2521 } 2522 return true; 2523 2524 errout: 2525 tcf_proto_put(tp, true, NULL); 2526 return false; 2527 } 2528 2529 /* called with RTNL */ 2530 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2531 { 2532 struct tcf_chain *chain, *chain_prev; 2533 struct net *net = sock_net(skb->sk); 2534 struct nlattr *tca[TCA_MAX + 1]; 2535 struct Qdisc *q = NULL; 2536 struct tcf_block *block; 2537 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2538 long index_start; 2539 long index; 2540 u32 parent; 2541 int err; 2542 2543 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2544 return skb->len; 2545 2546 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2547 NULL, cb->extack); 2548 if (err) 2549 return err; 2550 2551 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2552 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2553 if (!block) 2554 goto out; 2555 /* If we work with block index, q is NULL and parent value 2556 * will never be used in the following code. The check 2557 * in tcf_fill_node prevents it. However, compiler does not 2558 * see that far, so set parent to zero to silence the warning 2559 * about parent being uninitialized. 2560 */ 2561 parent = 0; 2562 } else { 2563 const struct Qdisc_class_ops *cops; 2564 struct net_device *dev; 2565 unsigned long cl = 0; 2566 2567 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2568 if (!dev) 2569 return skb->len; 2570 2571 parent = tcm->tcm_parent; 2572 if (!parent) { 2573 q = dev->qdisc; 2574 parent = q->handle; 2575 } else { 2576 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2577 } 2578 if (!q) 2579 goto out; 2580 cops = q->ops->cl_ops; 2581 if (!cops) 2582 goto out; 2583 if (!cops->tcf_block) 2584 goto out; 2585 if (TC_H_MIN(tcm->tcm_parent)) { 2586 cl = cops->find(q, tcm->tcm_parent); 2587 if (cl == 0) 2588 goto out; 2589 } 2590 block = cops->tcf_block(q, cl, NULL); 2591 if (!block) 2592 goto out; 2593 if (tcf_block_shared(block)) 2594 q = NULL; 2595 } 2596 2597 index_start = cb->args[0]; 2598 index = 0; 2599 2600 for (chain = __tcf_get_next_chain(block, NULL); 2601 chain; 2602 chain_prev = chain, 2603 chain = __tcf_get_next_chain(block, chain), 2604 tcf_chain_put(chain_prev)) { 2605 if (tca[TCA_CHAIN] && 2606 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2607 continue; 2608 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2609 index_start, &index)) { 2610 tcf_chain_put(chain); 2611 err = -EMSGSIZE; 2612 break; 2613 } 2614 } 2615 2616 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2617 tcf_block_refcnt_put(block, true); 2618 cb->args[0] = index; 2619 2620 out: 2621 /* If we did no progress, the error (EMSGSIZE) is real */ 2622 if (skb->len == 0 && err) 2623 return err; 2624 return skb->len; 2625 } 2626 2627 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2628 void *tmplt_priv, u32 chain_index, 2629 struct net *net, struct sk_buff *skb, 2630 struct tcf_block *block, 2631 u32 portid, u32 seq, u16 flags, int event) 2632 { 2633 unsigned char *b = skb_tail_pointer(skb); 2634 const struct tcf_proto_ops *ops; 2635 struct nlmsghdr *nlh; 2636 struct tcmsg *tcm; 2637 void *priv; 2638 2639 ops = tmplt_ops; 2640 priv = tmplt_priv; 2641 2642 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2643 if (!nlh) 2644 goto out_nlmsg_trim; 2645 tcm = nlmsg_data(nlh); 2646 tcm->tcm_family = AF_UNSPEC; 2647 tcm->tcm__pad1 = 0; 2648 tcm->tcm__pad2 = 0; 2649 tcm->tcm_handle = 0; 2650 if (block->q) { 2651 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2652 tcm->tcm_parent = block->q->handle; 2653 } else { 2654 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2655 tcm->tcm_block_index = block->index; 2656 } 2657 2658 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2659 goto nla_put_failure; 2660 2661 if (ops) { 2662 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2663 goto nla_put_failure; 2664 if (ops->tmplt_dump(skb, net, priv) < 0) 2665 goto nla_put_failure; 2666 } 2667 2668 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2669 return skb->len; 2670 2671 out_nlmsg_trim: 2672 nla_put_failure: 2673 nlmsg_trim(skb, b); 2674 return -EMSGSIZE; 2675 } 2676 2677 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2678 u32 seq, u16 flags, int event, bool unicast) 2679 { 2680 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2681 struct tcf_block *block = chain->block; 2682 struct net *net = block->net; 2683 struct sk_buff *skb; 2684 int err = 0; 2685 2686 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2687 if (!skb) 2688 return -ENOBUFS; 2689 2690 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2691 chain->index, net, skb, block, portid, 2692 seq, flags, event) <= 0) { 2693 kfree_skb(skb); 2694 return -EINVAL; 2695 } 2696 2697 if (unicast) 2698 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2699 else 2700 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2701 flags & NLM_F_ECHO); 2702 2703 if (err > 0) 2704 err = 0; 2705 return err; 2706 } 2707 2708 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2709 void *tmplt_priv, u32 chain_index, 2710 struct tcf_block *block, struct sk_buff *oskb, 2711 u32 seq, u16 flags, bool unicast) 2712 { 2713 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2714 struct net *net = block->net; 2715 struct sk_buff *skb; 2716 2717 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2718 if (!skb) 2719 return -ENOBUFS; 2720 2721 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2722 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { 2723 kfree_skb(skb); 2724 return -EINVAL; 2725 } 2726 2727 if (unicast) 2728 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2729 2730 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2731 } 2732 2733 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2734 struct nlattr **tca, 2735 struct netlink_ext_ack *extack) 2736 { 2737 const struct tcf_proto_ops *ops; 2738 char name[IFNAMSIZ]; 2739 void *tmplt_priv; 2740 2741 /* If kind is not set, user did not specify template. */ 2742 if (!tca[TCA_KIND]) 2743 return 0; 2744 2745 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2746 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2747 return -EINVAL; 2748 } 2749 2750 ops = tcf_proto_lookup_ops(name, true, extack); 2751 if (IS_ERR(ops)) 2752 return PTR_ERR(ops); 2753 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2754 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2755 return -EOPNOTSUPP; 2756 } 2757 2758 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2759 if (IS_ERR(tmplt_priv)) { 2760 module_put(ops->owner); 2761 return PTR_ERR(tmplt_priv); 2762 } 2763 chain->tmplt_ops = ops; 2764 chain->tmplt_priv = tmplt_priv; 2765 return 0; 2766 } 2767 2768 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2769 void *tmplt_priv) 2770 { 2771 /* If template ops are set, no work to do for us. */ 2772 if (!tmplt_ops) 2773 return; 2774 2775 tmplt_ops->tmplt_destroy(tmplt_priv); 2776 module_put(tmplt_ops->owner); 2777 } 2778 2779 /* Add/delete/get a chain */ 2780 2781 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2782 struct netlink_ext_ack *extack) 2783 { 2784 struct net *net = sock_net(skb->sk); 2785 struct nlattr *tca[TCA_MAX + 1]; 2786 struct tcmsg *t; 2787 u32 parent; 2788 u32 chain_index; 2789 struct Qdisc *q = NULL; 2790 struct tcf_chain *chain = NULL; 2791 struct tcf_block *block; 2792 unsigned long cl; 2793 int err; 2794 2795 if (n->nlmsg_type != RTM_GETCHAIN && 2796 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2797 return -EPERM; 2798 2799 replay: 2800 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2801 rtm_tca_policy, extack); 2802 if (err < 0) 2803 return err; 2804 2805 t = nlmsg_data(n); 2806 parent = t->tcm_parent; 2807 cl = 0; 2808 2809 block = tcf_block_find(net, &q, &parent, &cl, 2810 t->tcm_ifindex, t->tcm_block_index, extack); 2811 if (IS_ERR(block)) 2812 return PTR_ERR(block); 2813 2814 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2815 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2816 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2817 err = -EINVAL; 2818 goto errout_block; 2819 } 2820 2821 mutex_lock(&block->lock); 2822 chain = tcf_chain_lookup(block, chain_index); 2823 if (n->nlmsg_type == RTM_NEWCHAIN) { 2824 if (chain) { 2825 if (tcf_chain_held_by_acts_only(chain)) { 2826 /* The chain exists only because there is 2827 * some action referencing it. 2828 */ 2829 tcf_chain_hold(chain); 2830 } else { 2831 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 2832 err = -EEXIST; 2833 goto errout_block_locked; 2834 } 2835 } else { 2836 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2837 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 2838 err = -ENOENT; 2839 goto errout_block_locked; 2840 } 2841 chain = tcf_chain_create(block, chain_index); 2842 if (!chain) { 2843 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 2844 err = -ENOMEM; 2845 goto errout_block_locked; 2846 } 2847 } 2848 } else { 2849 if (!chain || tcf_chain_held_by_acts_only(chain)) { 2850 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2851 err = -EINVAL; 2852 goto errout_block_locked; 2853 } 2854 tcf_chain_hold(chain); 2855 } 2856 2857 if (n->nlmsg_type == RTM_NEWCHAIN) { 2858 /* Modifying chain requires holding parent block lock. In case 2859 * the chain was successfully added, take a reference to the 2860 * chain. This ensures that an empty chain does not disappear at 2861 * the end of this function. 2862 */ 2863 tcf_chain_hold(chain); 2864 chain->explicitly_created = true; 2865 } 2866 mutex_unlock(&block->lock); 2867 2868 switch (n->nlmsg_type) { 2869 case RTM_NEWCHAIN: 2870 err = tc_chain_tmplt_add(chain, net, tca, extack); 2871 if (err) { 2872 tcf_chain_put_explicitly_created(chain); 2873 goto errout; 2874 } 2875 2876 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 2877 RTM_NEWCHAIN, false); 2878 break; 2879 case RTM_DELCHAIN: 2880 tfilter_notify_chain(net, skb, block, q, parent, n, 2881 chain, RTM_DELTFILTER, true); 2882 /* Flush the chain first as the user requested chain removal. */ 2883 tcf_chain_flush(chain, true); 2884 /* In case the chain was successfully deleted, put a reference 2885 * to the chain previously taken during addition. 2886 */ 2887 tcf_chain_put_explicitly_created(chain); 2888 break; 2889 case RTM_GETCHAIN: 2890 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 2891 n->nlmsg_seq, n->nlmsg_type, true); 2892 if (err < 0) 2893 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 2894 break; 2895 default: 2896 err = -EOPNOTSUPP; 2897 NL_SET_ERR_MSG(extack, "Unsupported message type"); 2898 goto errout; 2899 } 2900 2901 errout: 2902 tcf_chain_put(chain); 2903 errout_block: 2904 tcf_block_release(q, block, true); 2905 if (err == -EAGAIN) 2906 /* Replay the request. */ 2907 goto replay; 2908 return err; 2909 2910 errout_block_locked: 2911 mutex_unlock(&block->lock); 2912 goto errout_block; 2913 } 2914 2915 /* called with RTNL */ 2916 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 2917 { 2918 struct net *net = sock_net(skb->sk); 2919 struct nlattr *tca[TCA_MAX + 1]; 2920 struct Qdisc *q = NULL; 2921 struct tcf_block *block; 2922 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2923 struct tcf_chain *chain; 2924 long index_start; 2925 long index; 2926 u32 parent; 2927 int err; 2928 2929 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2930 return skb->len; 2931 2932 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2933 rtm_tca_policy, cb->extack); 2934 if (err) 2935 return err; 2936 2937 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2938 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2939 if (!block) 2940 goto out; 2941 /* If we work with block index, q is NULL and parent value 2942 * will never be used in the following code. The check 2943 * in tcf_fill_node prevents it. However, compiler does not 2944 * see that far, so set parent to zero to silence the warning 2945 * about parent being uninitialized. 2946 */ 2947 parent = 0; 2948 } else { 2949 const struct Qdisc_class_ops *cops; 2950 struct net_device *dev; 2951 unsigned long cl = 0; 2952 2953 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2954 if (!dev) 2955 return skb->len; 2956 2957 parent = tcm->tcm_parent; 2958 if (!parent) { 2959 q = dev->qdisc; 2960 parent = q->handle; 2961 } else { 2962 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2963 } 2964 if (!q) 2965 goto out; 2966 cops = q->ops->cl_ops; 2967 if (!cops) 2968 goto out; 2969 if (!cops->tcf_block) 2970 goto out; 2971 if (TC_H_MIN(tcm->tcm_parent)) { 2972 cl = cops->find(q, tcm->tcm_parent); 2973 if (cl == 0) 2974 goto out; 2975 } 2976 block = cops->tcf_block(q, cl, NULL); 2977 if (!block) 2978 goto out; 2979 if (tcf_block_shared(block)) 2980 q = NULL; 2981 } 2982 2983 index_start = cb->args[0]; 2984 index = 0; 2985 2986 mutex_lock(&block->lock); 2987 list_for_each_entry(chain, &block->chain_list, list) { 2988 if ((tca[TCA_CHAIN] && 2989 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 2990 continue; 2991 if (index < index_start) { 2992 index++; 2993 continue; 2994 } 2995 if (tcf_chain_held_by_acts_only(chain)) 2996 continue; 2997 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2998 chain->index, net, skb, block, 2999 NETLINK_CB(cb->skb).portid, 3000 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3001 RTM_NEWCHAIN); 3002 if (err <= 0) 3003 break; 3004 index++; 3005 } 3006 mutex_unlock(&block->lock); 3007 3008 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3009 tcf_block_refcnt_put(block, true); 3010 cb->args[0] = index; 3011 3012 out: 3013 /* If we did no progress, the error (EMSGSIZE) is real */ 3014 if (skb->len == 0 && err) 3015 return err; 3016 return skb->len; 3017 } 3018 3019 void tcf_exts_destroy(struct tcf_exts *exts) 3020 { 3021 #ifdef CONFIG_NET_CLS_ACT 3022 if (exts->actions) { 3023 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3024 kfree(exts->actions); 3025 } 3026 exts->nr_actions = 0; 3027 #endif 3028 } 3029 EXPORT_SYMBOL(tcf_exts_destroy); 3030 3031 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3032 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr, 3033 bool rtnl_held, struct netlink_ext_ack *extack) 3034 { 3035 #ifdef CONFIG_NET_CLS_ACT 3036 { 3037 struct tc_action *act; 3038 size_t attr_size = 0; 3039 3040 if (exts->police && tb[exts->police]) { 3041 act = tcf_action_init_1(net, tp, tb[exts->police], 3042 rate_tlv, "police", ovr, 3043 TCA_ACT_BIND, rtnl_held, 3044 extack); 3045 if (IS_ERR(act)) 3046 return PTR_ERR(act); 3047 3048 act->type = exts->type = TCA_OLD_COMPAT; 3049 exts->actions[0] = act; 3050 exts->nr_actions = 1; 3051 } else if (exts->action && tb[exts->action]) { 3052 int err; 3053 3054 err = tcf_action_init(net, tp, tb[exts->action], 3055 rate_tlv, NULL, ovr, TCA_ACT_BIND, 3056 exts->actions, &attr_size, 3057 rtnl_held, extack); 3058 if (err < 0) 3059 return err; 3060 exts->nr_actions = err; 3061 } 3062 } 3063 #else 3064 if ((exts->action && tb[exts->action]) || 3065 (exts->police && tb[exts->police])) { 3066 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3067 return -EOPNOTSUPP; 3068 } 3069 #endif 3070 3071 return 0; 3072 } 3073 EXPORT_SYMBOL(tcf_exts_validate); 3074 3075 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3076 { 3077 #ifdef CONFIG_NET_CLS_ACT 3078 struct tcf_exts old = *dst; 3079 3080 *dst = *src; 3081 tcf_exts_destroy(&old); 3082 #endif 3083 } 3084 EXPORT_SYMBOL(tcf_exts_change); 3085 3086 #ifdef CONFIG_NET_CLS_ACT 3087 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3088 { 3089 if (exts->nr_actions == 0) 3090 return NULL; 3091 else 3092 return exts->actions[0]; 3093 } 3094 #endif 3095 3096 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3097 { 3098 #ifdef CONFIG_NET_CLS_ACT 3099 struct nlattr *nest; 3100 3101 if (exts->action && tcf_exts_has_actions(exts)) { 3102 /* 3103 * again for backward compatible mode - we want 3104 * to work with both old and new modes of entering 3105 * tc data even if iproute2 was newer - jhs 3106 */ 3107 if (exts->type != TCA_OLD_COMPAT) { 3108 nest = nla_nest_start_noflag(skb, exts->action); 3109 if (nest == NULL) 3110 goto nla_put_failure; 3111 3112 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) 3113 goto nla_put_failure; 3114 nla_nest_end(skb, nest); 3115 } else if (exts->police) { 3116 struct tc_action *act = tcf_exts_first_act(exts); 3117 nest = nla_nest_start_noflag(skb, exts->police); 3118 if (nest == NULL || !act) 3119 goto nla_put_failure; 3120 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3121 goto nla_put_failure; 3122 nla_nest_end(skb, nest); 3123 } 3124 } 3125 return 0; 3126 3127 nla_put_failure: 3128 nla_nest_cancel(skb, nest); 3129 return -1; 3130 #else 3131 return 0; 3132 #endif 3133 } 3134 EXPORT_SYMBOL(tcf_exts_dump); 3135 3136 3137 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3138 { 3139 #ifdef CONFIG_NET_CLS_ACT 3140 struct tc_action *a = tcf_exts_first_act(exts); 3141 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3142 return -1; 3143 #endif 3144 return 0; 3145 } 3146 EXPORT_SYMBOL(tcf_exts_dump_stats); 3147 3148 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3149 { 3150 if (*flags & TCA_CLS_FLAGS_IN_HW) 3151 return; 3152 *flags |= TCA_CLS_FLAGS_IN_HW; 3153 atomic_inc(&block->offloadcnt); 3154 } 3155 3156 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3157 { 3158 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3159 return; 3160 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3161 atomic_dec(&block->offloadcnt); 3162 } 3163 3164 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3165 struct tcf_proto *tp, u32 *cnt, 3166 u32 *flags, u32 diff, bool add) 3167 { 3168 lockdep_assert_held(&block->cb_lock); 3169 3170 spin_lock(&tp->lock); 3171 if (add) { 3172 if (!*cnt) 3173 tcf_block_offload_inc(block, flags); 3174 *cnt += diff; 3175 } else { 3176 *cnt -= diff; 3177 if (!*cnt) 3178 tcf_block_offload_dec(block, flags); 3179 } 3180 spin_unlock(&tp->lock); 3181 } 3182 3183 static void 3184 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3185 u32 *cnt, u32 *flags) 3186 { 3187 lockdep_assert_held(&block->cb_lock); 3188 3189 spin_lock(&tp->lock); 3190 tcf_block_offload_dec(block, flags); 3191 *cnt = 0; 3192 spin_unlock(&tp->lock); 3193 } 3194 3195 static int 3196 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3197 void *type_data, bool err_stop) 3198 { 3199 struct flow_block_cb *block_cb; 3200 int ok_count = 0; 3201 int err; 3202 3203 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3204 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3205 if (err) { 3206 if (err_stop) 3207 return err; 3208 } else { 3209 ok_count++; 3210 } 3211 } 3212 return ok_count; 3213 } 3214 3215 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3216 void *type_data, bool err_stop, bool rtnl_held) 3217 { 3218 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3219 int ok_count; 3220 3221 retry: 3222 if (take_rtnl) 3223 rtnl_lock(); 3224 down_read(&block->cb_lock); 3225 /* Need to obtain rtnl lock if block is bound to devs that require it. 3226 * In block bind code cb_lock is obtained while holding rtnl, so we must 3227 * obtain the locks in same order here. 3228 */ 3229 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3230 up_read(&block->cb_lock); 3231 take_rtnl = true; 3232 goto retry; 3233 } 3234 3235 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3236 3237 up_read(&block->cb_lock); 3238 if (take_rtnl) 3239 rtnl_unlock(); 3240 return ok_count; 3241 } 3242 EXPORT_SYMBOL(tc_setup_cb_call); 3243 3244 /* Non-destructive filter add. If filter that wasn't already in hardware is 3245 * successfully offloaded, increment block offloads counter. On failure, 3246 * previously offloaded filter is considered to be intact and offloads counter 3247 * is not decremented. 3248 */ 3249 3250 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3251 enum tc_setup_type type, void *type_data, bool err_stop, 3252 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3253 { 3254 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3255 int ok_count; 3256 3257 retry: 3258 if (take_rtnl) 3259 rtnl_lock(); 3260 down_read(&block->cb_lock); 3261 /* Need to obtain rtnl lock if block is bound to devs that require it. 3262 * In block bind code cb_lock is obtained while holding rtnl, so we must 3263 * obtain the locks in same order here. 3264 */ 3265 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3266 up_read(&block->cb_lock); 3267 take_rtnl = true; 3268 goto retry; 3269 } 3270 3271 /* Make sure all netdevs sharing this block are offload-capable. */ 3272 if (block->nooffloaddevcnt && err_stop) { 3273 ok_count = -EOPNOTSUPP; 3274 goto err_unlock; 3275 } 3276 3277 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3278 if (ok_count < 0) 3279 goto err_unlock; 3280 3281 if (tp->ops->hw_add) 3282 tp->ops->hw_add(tp, type_data); 3283 if (ok_count > 0) 3284 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3285 ok_count, true); 3286 err_unlock: 3287 up_read(&block->cb_lock); 3288 if (take_rtnl) 3289 rtnl_unlock(); 3290 return ok_count < 0 ? ok_count : 0; 3291 } 3292 EXPORT_SYMBOL(tc_setup_cb_add); 3293 3294 /* Destructive filter replace. If filter that wasn't already in hardware is 3295 * successfully offloaded, increment block offload counter. On failure, 3296 * previously offloaded filter is considered to be destroyed and offload counter 3297 * is decremented. 3298 */ 3299 3300 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3301 enum tc_setup_type type, void *type_data, bool err_stop, 3302 u32 *old_flags, unsigned int *old_in_hw_count, 3303 u32 *new_flags, unsigned int *new_in_hw_count, 3304 bool rtnl_held) 3305 { 3306 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3307 int ok_count; 3308 3309 retry: 3310 if (take_rtnl) 3311 rtnl_lock(); 3312 down_read(&block->cb_lock); 3313 /* Need to obtain rtnl lock if block is bound to devs that require it. 3314 * In block bind code cb_lock is obtained while holding rtnl, so we must 3315 * obtain the locks in same order here. 3316 */ 3317 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3318 up_read(&block->cb_lock); 3319 take_rtnl = true; 3320 goto retry; 3321 } 3322 3323 /* Make sure all netdevs sharing this block are offload-capable. */ 3324 if (block->nooffloaddevcnt && err_stop) { 3325 ok_count = -EOPNOTSUPP; 3326 goto err_unlock; 3327 } 3328 3329 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3330 if (tp->ops->hw_del) 3331 tp->ops->hw_del(tp, type_data); 3332 3333 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3334 if (ok_count < 0) 3335 goto err_unlock; 3336 3337 if (tp->ops->hw_add) 3338 tp->ops->hw_add(tp, type_data); 3339 if (ok_count > 0) 3340 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3341 new_flags, ok_count, true); 3342 err_unlock: 3343 up_read(&block->cb_lock); 3344 if (take_rtnl) 3345 rtnl_unlock(); 3346 return ok_count < 0 ? ok_count : 0; 3347 } 3348 EXPORT_SYMBOL(tc_setup_cb_replace); 3349 3350 /* Destroy filter and decrement block offload counter, if filter was previously 3351 * offloaded. 3352 */ 3353 3354 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3355 enum tc_setup_type type, void *type_data, bool err_stop, 3356 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3357 { 3358 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3359 int ok_count; 3360 3361 retry: 3362 if (take_rtnl) 3363 rtnl_lock(); 3364 down_read(&block->cb_lock); 3365 /* Need to obtain rtnl lock if block is bound to devs that require it. 3366 * In block bind code cb_lock is obtained while holding rtnl, so we must 3367 * obtain the locks in same order here. 3368 */ 3369 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3370 up_read(&block->cb_lock); 3371 take_rtnl = true; 3372 goto retry; 3373 } 3374 3375 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3376 3377 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3378 if (tp->ops->hw_del) 3379 tp->ops->hw_del(tp, type_data); 3380 3381 up_read(&block->cb_lock); 3382 if (take_rtnl) 3383 rtnl_unlock(); 3384 return ok_count < 0 ? ok_count : 0; 3385 } 3386 EXPORT_SYMBOL(tc_setup_cb_destroy); 3387 3388 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3389 bool add, flow_setup_cb_t *cb, 3390 enum tc_setup_type type, void *type_data, 3391 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3392 { 3393 int err = cb(type, type_data, cb_priv); 3394 3395 if (err) { 3396 if (add && tc_skip_sw(*flags)) 3397 return err; 3398 } else { 3399 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3400 add); 3401 } 3402 3403 return 0; 3404 } 3405 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3406 3407 void tc_cleanup_flow_action(struct flow_action *flow_action) 3408 { 3409 struct flow_action_entry *entry; 3410 int i; 3411 3412 flow_action_for_each(i, entry, flow_action) 3413 if (entry->destructor) 3414 entry->destructor(entry->destructor_priv); 3415 } 3416 EXPORT_SYMBOL(tc_cleanup_flow_action); 3417 3418 static void tcf_mirred_get_dev(struct flow_action_entry *entry, 3419 const struct tc_action *act) 3420 { 3421 #ifdef CONFIG_NET_CLS_ACT 3422 entry->dev = act->ops->get_dev(act, &entry->destructor); 3423 if (!entry->dev) 3424 return; 3425 entry->destructor_priv = entry->dev; 3426 #endif 3427 } 3428 3429 static void tcf_tunnel_encap_put_tunnel(void *priv) 3430 { 3431 struct ip_tunnel_info *tunnel = priv; 3432 3433 kfree(tunnel); 3434 } 3435 3436 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, 3437 const struct tc_action *act) 3438 { 3439 entry->tunnel = tcf_tunnel_info_copy(act); 3440 if (!entry->tunnel) 3441 return -ENOMEM; 3442 entry->destructor = tcf_tunnel_encap_put_tunnel; 3443 entry->destructor_priv = entry->tunnel; 3444 return 0; 3445 } 3446 3447 static void tcf_sample_get_group(struct flow_action_entry *entry, 3448 const struct tc_action *act) 3449 { 3450 #ifdef CONFIG_NET_CLS_ACT 3451 entry->sample.psample_group = 3452 act->ops->get_psample_group(act, &entry->destructor); 3453 entry->destructor_priv = entry->sample.psample_group; 3454 #endif 3455 } 3456 3457 int tc_setup_flow_action(struct flow_action *flow_action, 3458 const struct tcf_exts *exts, bool rtnl_held) 3459 { 3460 const struct tc_action *act; 3461 int i, j, k, err = 0; 3462 3463 if (!exts) 3464 return 0; 3465 3466 if (!rtnl_held) 3467 rtnl_lock(); 3468 3469 j = 0; 3470 tcf_exts_for_each_action(i, act, exts) { 3471 struct flow_action_entry *entry; 3472 3473 entry = &flow_action->entries[j]; 3474 if (is_tcf_gact_ok(act)) { 3475 entry->id = FLOW_ACTION_ACCEPT; 3476 } else if (is_tcf_gact_shot(act)) { 3477 entry->id = FLOW_ACTION_DROP; 3478 } else if (is_tcf_gact_trap(act)) { 3479 entry->id = FLOW_ACTION_TRAP; 3480 } else if (is_tcf_gact_goto_chain(act)) { 3481 entry->id = FLOW_ACTION_GOTO; 3482 entry->chain_index = tcf_gact_goto_chain_index(act); 3483 } else if (is_tcf_mirred_egress_redirect(act)) { 3484 entry->id = FLOW_ACTION_REDIRECT; 3485 tcf_mirred_get_dev(entry, act); 3486 } else if (is_tcf_mirred_egress_mirror(act)) { 3487 entry->id = FLOW_ACTION_MIRRED; 3488 tcf_mirred_get_dev(entry, act); 3489 } else if (is_tcf_mirred_ingress_redirect(act)) { 3490 entry->id = FLOW_ACTION_REDIRECT_INGRESS; 3491 tcf_mirred_get_dev(entry, act); 3492 } else if (is_tcf_mirred_ingress_mirror(act)) { 3493 entry->id = FLOW_ACTION_MIRRED_INGRESS; 3494 tcf_mirred_get_dev(entry, act); 3495 } else if (is_tcf_vlan(act)) { 3496 switch (tcf_vlan_action(act)) { 3497 case TCA_VLAN_ACT_PUSH: 3498 entry->id = FLOW_ACTION_VLAN_PUSH; 3499 entry->vlan.vid = tcf_vlan_push_vid(act); 3500 entry->vlan.proto = tcf_vlan_push_proto(act); 3501 entry->vlan.prio = tcf_vlan_push_prio(act); 3502 break; 3503 case TCA_VLAN_ACT_POP: 3504 entry->id = FLOW_ACTION_VLAN_POP; 3505 break; 3506 case TCA_VLAN_ACT_MODIFY: 3507 entry->id = FLOW_ACTION_VLAN_MANGLE; 3508 entry->vlan.vid = tcf_vlan_push_vid(act); 3509 entry->vlan.proto = tcf_vlan_push_proto(act); 3510 entry->vlan.prio = tcf_vlan_push_prio(act); 3511 break; 3512 default: 3513 err = -EOPNOTSUPP; 3514 goto err_out; 3515 } 3516 } else if (is_tcf_tunnel_set(act)) { 3517 entry->id = FLOW_ACTION_TUNNEL_ENCAP; 3518 err = tcf_tunnel_encap_get_tunnel(entry, act); 3519 if (err) 3520 goto err_out; 3521 } else if (is_tcf_tunnel_release(act)) { 3522 entry->id = FLOW_ACTION_TUNNEL_DECAP; 3523 } else if (is_tcf_pedit(act)) { 3524 for (k = 0; k < tcf_pedit_nkeys(act); k++) { 3525 switch (tcf_pedit_cmd(act, k)) { 3526 case TCA_PEDIT_KEY_EX_CMD_SET: 3527 entry->id = FLOW_ACTION_MANGLE; 3528 break; 3529 case TCA_PEDIT_KEY_EX_CMD_ADD: 3530 entry->id = FLOW_ACTION_ADD; 3531 break; 3532 default: 3533 err = -EOPNOTSUPP; 3534 goto err_out; 3535 } 3536 entry->mangle.htype = tcf_pedit_htype(act, k); 3537 entry->mangle.mask = tcf_pedit_mask(act, k); 3538 entry->mangle.val = tcf_pedit_val(act, k); 3539 entry->mangle.offset = tcf_pedit_offset(act, k); 3540 entry = &flow_action->entries[++j]; 3541 } 3542 } else if (is_tcf_csum(act)) { 3543 entry->id = FLOW_ACTION_CSUM; 3544 entry->csum_flags = tcf_csum_update_flags(act); 3545 } else if (is_tcf_skbedit_mark(act)) { 3546 entry->id = FLOW_ACTION_MARK; 3547 entry->mark = tcf_skbedit_mark(act); 3548 } else if (is_tcf_sample(act)) { 3549 entry->id = FLOW_ACTION_SAMPLE; 3550 entry->sample.trunc_size = tcf_sample_trunc_size(act); 3551 entry->sample.truncate = tcf_sample_truncate(act); 3552 entry->sample.rate = tcf_sample_rate(act); 3553 tcf_sample_get_group(entry, act); 3554 } else if (is_tcf_police(act)) { 3555 entry->id = FLOW_ACTION_POLICE; 3556 entry->police.burst = tcf_police_tcfp_burst(act); 3557 entry->police.rate_bytes_ps = 3558 tcf_police_rate_bytes_ps(act); 3559 } else if (is_tcf_ct(act)) { 3560 entry->id = FLOW_ACTION_CT; 3561 entry->ct.action = tcf_ct_action(act); 3562 entry->ct.zone = tcf_ct_zone(act); 3563 } else if (is_tcf_mpls(act)) { 3564 switch (tcf_mpls_action(act)) { 3565 case TCA_MPLS_ACT_PUSH: 3566 entry->id = FLOW_ACTION_MPLS_PUSH; 3567 entry->mpls_push.proto = tcf_mpls_proto(act); 3568 entry->mpls_push.label = tcf_mpls_label(act); 3569 entry->mpls_push.tc = tcf_mpls_tc(act); 3570 entry->mpls_push.bos = tcf_mpls_bos(act); 3571 entry->mpls_push.ttl = tcf_mpls_ttl(act); 3572 break; 3573 case TCA_MPLS_ACT_POP: 3574 entry->id = FLOW_ACTION_MPLS_POP; 3575 entry->mpls_pop.proto = tcf_mpls_proto(act); 3576 break; 3577 case TCA_MPLS_ACT_MODIFY: 3578 entry->id = FLOW_ACTION_MPLS_MANGLE; 3579 entry->mpls_mangle.label = tcf_mpls_label(act); 3580 entry->mpls_mangle.tc = tcf_mpls_tc(act); 3581 entry->mpls_mangle.bos = tcf_mpls_bos(act); 3582 entry->mpls_mangle.ttl = tcf_mpls_ttl(act); 3583 break; 3584 default: 3585 goto err_out; 3586 } 3587 } else if (is_tcf_skbedit_ptype(act)) { 3588 entry->id = FLOW_ACTION_PTYPE; 3589 entry->ptype = tcf_skbedit_ptype(act); 3590 } else { 3591 err = -EOPNOTSUPP; 3592 goto err_out; 3593 } 3594 3595 if (!is_tcf_pedit(act)) 3596 j++; 3597 } 3598 3599 err_out: 3600 if (!rtnl_held) 3601 rtnl_unlock(); 3602 3603 if (err) 3604 tc_cleanup_flow_action(flow_action); 3605 3606 return err; 3607 } 3608 EXPORT_SYMBOL(tc_setup_flow_action); 3609 3610 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3611 { 3612 unsigned int num_acts = 0; 3613 struct tc_action *act; 3614 int i; 3615 3616 tcf_exts_for_each_action(i, act, exts) { 3617 if (is_tcf_pedit(act)) 3618 num_acts += tcf_pedit_nkeys(act); 3619 else 3620 num_acts++; 3621 } 3622 return num_acts; 3623 } 3624 EXPORT_SYMBOL(tcf_exts_num_actions); 3625 3626 static __net_init int tcf_net_init(struct net *net) 3627 { 3628 struct tcf_net *tn = net_generic(net, tcf_net_id); 3629 3630 spin_lock_init(&tn->idr_lock); 3631 idr_init(&tn->idr); 3632 return 0; 3633 } 3634 3635 static void __net_exit tcf_net_exit(struct net *net) 3636 { 3637 struct tcf_net *tn = net_generic(net, tcf_net_id); 3638 3639 idr_destroy(&tn->idr); 3640 } 3641 3642 static struct pernet_operations tcf_net_ops = { 3643 .init = tcf_net_init, 3644 .exit = tcf_net_exit, 3645 .id = &tcf_net_id, 3646 .size = sizeof(struct tcf_net), 3647 }; 3648 3649 static struct flow_indr_block_entry block_entry = { 3650 .cb = tc_indr_block_get_and_cmd, 3651 .list = LIST_HEAD_INIT(block_entry.list), 3652 }; 3653 3654 static int __init tc_filter_init(void) 3655 { 3656 int err; 3657 3658 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3659 if (!tc_filter_wq) 3660 return -ENOMEM; 3661 3662 err = register_pernet_subsys(&tcf_net_ops); 3663 if (err) 3664 goto err_register_pernet_subsys; 3665 3666 flow_indr_add_block_cb(&block_entry); 3667 3668 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3669 RTNL_FLAG_DOIT_UNLOCKED); 3670 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3671 RTNL_FLAG_DOIT_UNLOCKED); 3672 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3673 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3674 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3675 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3676 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3677 tc_dump_chain, 0); 3678 3679 return 0; 3680 3681 err_register_pernet_subsys: 3682 destroy_workqueue(tc_filter_wq); 3683 return err; 3684 } 3685 3686 subsys_initcall(tc_filter_init); 3687