1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/rhashtable.h> 24 #include <linux/jhash.h> 25 #include <linux/rculist.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/pkt_cls.h> 31 #include <net/tc_act/tc_pedit.h> 32 #include <net/tc_act/tc_mirred.h> 33 #include <net/tc_act/tc_vlan.h> 34 #include <net/tc_act/tc_tunnel_key.h> 35 #include <net/tc_act/tc_csum.h> 36 #include <net/tc_act/tc_gact.h> 37 #include <net/tc_act/tc_police.h> 38 #include <net/tc_act/tc_sample.h> 39 #include <net/tc_act/tc_skbedit.h> 40 #include <net/tc_act/tc_ct.h> 41 #include <net/tc_act/tc_mpls.h> 42 #include <net/flow_offload.h> 43 44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 45 46 /* The list of all installed classifier types */ 47 static LIST_HEAD(tcf_proto_base); 48 49 /* Protects list of registered TC modules. It is pure SMP lock. */ 50 static DEFINE_RWLOCK(cls_mod_lock); 51 52 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 53 { 54 return jhash_3words(tp->chain->index, tp->prio, 55 (__force __u32)tp->protocol, 0); 56 } 57 58 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 59 struct tcf_proto *tp) 60 { 61 struct tcf_block *block = chain->block; 62 63 mutex_lock(&block->proto_destroy_lock); 64 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 65 destroy_obj_hashfn(tp)); 66 mutex_unlock(&block->proto_destroy_lock); 67 } 68 69 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 70 const struct tcf_proto *tp2) 71 { 72 return tp1->chain->index == tp2->chain->index && 73 tp1->prio == tp2->prio && 74 tp1->protocol == tp2->protocol; 75 } 76 77 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 78 struct tcf_proto *tp) 79 { 80 u32 hash = destroy_obj_hashfn(tp); 81 struct tcf_proto *iter; 82 bool found = false; 83 84 rcu_read_lock(); 85 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 86 destroy_ht_node, hash) { 87 if (tcf_proto_cmp(tp, iter)) { 88 found = true; 89 break; 90 } 91 } 92 rcu_read_unlock(); 93 94 return found; 95 } 96 97 static void 98 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 99 { 100 struct tcf_block *block = chain->block; 101 102 mutex_lock(&block->proto_destroy_lock); 103 if (hash_hashed(&tp->destroy_ht_node)) 104 hash_del_rcu(&tp->destroy_ht_node); 105 mutex_unlock(&block->proto_destroy_lock); 106 } 107 108 /* Find classifier type by string name */ 109 110 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 111 { 112 const struct tcf_proto_ops *t, *res = NULL; 113 114 if (kind) { 115 read_lock(&cls_mod_lock); 116 list_for_each_entry(t, &tcf_proto_base, head) { 117 if (strcmp(kind, t->kind) == 0) { 118 if (try_module_get(t->owner)) 119 res = t; 120 break; 121 } 122 } 123 read_unlock(&cls_mod_lock); 124 } 125 return res; 126 } 127 128 static const struct tcf_proto_ops * 129 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 130 struct netlink_ext_ack *extack) 131 { 132 const struct tcf_proto_ops *ops; 133 134 ops = __tcf_proto_lookup_ops(kind); 135 if (ops) 136 return ops; 137 #ifdef CONFIG_MODULES 138 if (rtnl_held) 139 rtnl_unlock(); 140 request_module("cls_%s", kind); 141 if (rtnl_held) 142 rtnl_lock(); 143 ops = __tcf_proto_lookup_ops(kind); 144 /* We dropped the RTNL semaphore in order to perform 145 * the module load. So, even if we succeeded in loading 146 * the module we have to replay the request. We indicate 147 * this using -EAGAIN. 148 */ 149 if (ops) { 150 module_put(ops->owner); 151 return ERR_PTR(-EAGAIN); 152 } 153 #endif 154 NL_SET_ERR_MSG(extack, "TC classifier not found"); 155 return ERR_PTR(-ENOENT); 156 } 157 158 /* Register(unregister) new classifier type */ 159 160 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 161 { 162 struct tcf_proto_ops *t; 163 int rc = -EEXIST; 164 165 write_lock(&cls_mod_lock); 166 list_for_each_entry(t, &tcf_proto_base, head) 167 if (!strcmp(ops->kind, t->kind)) 168 goto out; 169 170 list_add_tail(&ops->head, &tcf_proto_base); 171 rc = 0; 172 out: 173 write_unlock(&cls_mod_lock); 174 return rc; 175 } 176 EXPORT_SYMBOL(register_tcf_proto_ops); 177 178 static struct workqueue_struct *tc_filter_wq; 179 180 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 181 { 182 struct tcf_proto_ops *t; 183 int rc = -ENOENT; 184 185 /* Wait for outstanding call_rcu()s, if any, from a 186 * tcf_proto_ops's destroy() handler. 187 */ 188 rcu_barrier(); 189 flush_workqueue(tc_filter_wq); 190 191 write_lock(&cls_mod_lock); 192 list_for_each_entry(t, &tcf_proto_base, head) { 193 if (t == ops) { 194 list_del(&t->head); 195 rc = 0; 196 break; 197 } 198 } 199 write_unlock(&cls_mod_lock); 200 return rc; 201 } 202 EXPORT_SYMBOL(unregister_tcf_proto_ops); 203 204 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 205 { 206 INIT_RCU_WORK(rwork, func); 207 return queue_rcu_work(tc_filter_wq, rwork); 208 } 209 EXPORT_SYMBOL(tcf_queue_work); 210 211 /* Select new prio value from the range, managed by kernel. */ 212 213 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 214 { 215 u32 first = TC_H_MAKE(0xC0000000U, 0U); 216 217 if (tp) 218 first = tp->prio - 1; 219 220 return TC_H_MAJ(first); 221 } 222 223 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 224 { 225 if (kind) 226 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ; 227 memset(name, 0, IFNAMSIZ); 228 return false; 229 } 230 231 static bool tcf_proto_is_unlocked(const char *kind) 232 { 233 const struct tcf_proto_ops *ops; 234 bool ret; 235 236 if (strlen(kind) == 0) 237 return false; 238 239 ops = tcf_proto_lookup_ops(kind, false, NULL); 240 /* On error return false to take rtnl lock. Proto lookup/create 241 * functions will perform lookup again and properly handle errors. 242 */ 243 if (IS_ERR(ops)) 244 return false; 245 246 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 247 module_put(ops->owner); 248 return ret; 249 } 250 251 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 252 u32 prio, struct tcf_chain *chain, 253 bool rtnl_held, 254 struct netlink_ext_ack *extack) 255 { 256 struct tcf_proto *tp; 257 int err; 258 259 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 260 if (!tp) 261 return ERR_PTR(-ENOBUFS); 262 263 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 264 if (IS_ERR(tp->ops)) { 265 err = PTR_ERR(tp->ops); 266 goto errout; 267 } 268 tp->classify = tp->ops->classify; 269 tp->protocol = protocol; 270 tp->prio = prio; 271 tp->chain = chain; 272 spin_lock_init(&tp->lock); 273 refcount_set(&tp->refcnt, 1); 274 275 err = tp->ops->init(tp); 276 if (err) { 277 module_put(tp->ops->owner); 278 goto errout; 279 } 280 return tp; 281 282 errout: 283 kfree(tp); 284 return ERR_PTR(err); 285 } 286 287 static void tcf_proto_get(struct tcf_proto *tp) 288 { 289 refcount_inc(&tp->refcnt); 290 } 291 292 static void tcf_chain_put(struct tcf_chain *chain); 293 294 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 295 bool sig_destroy, struct netlink_ext_ack *extack) 296 { 297 tp->ops->destroy(tp, rtnl_held, extack); 298 if (sig_destroy) 299 tcf_proto_signal_destroyed(tp->chain, tp); 300 tcf_chain_put(tp->chain); 301 module_put(tp->ops->owner); 302 kfree_rcu(tp, rcu); 303 } 304 305 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 306 struct netlink_ext_ack *extack) 307 { 308 if (refcount_dec_and_test(&tp->refcnt)) 309 tcf_proto_destroy(tp, rtnl_held, true, extack); 310 } 311 312 static bool tcf_proto_check_delete(struct tcf_proto *tp) 313 { 314 if (tp->ops->delete_empty) 315 return tp->ops->delete_empty(tp); 316 317 tp->deleting = true; 318 return tp->deleting; 319 } 320 321 static void tcf_proto_mark_delete(struct tcf_proto *tp) 322 { 323 spin_lock(&tp->lock); 324 tp->deleting = true; 325 spin_unlock(&tp->lock); 326 } 327 328 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 329 { 330 bool deleting; 331 332 spin_lock(&tp->lock); 333 deleting = tp->deleting; 334 spin_unlock(&tp->lock); 335 336 return deleting; 337 } 338 339 #define ASSERT_BLOCK_LOCKED(block) \ 340 lockdep_assert_held(&(block)->lock) 341 342 struct tcf_filter_chain_list_item { 343 struct list_head list; 344 tcf_chain_head_change_t *chain_head_change; 345 void *chain_head_change_priv; 346 }; 347 348 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 349 u32 chain_index) 350 { 351 struct tcf_chain *chain; 352 353 ASSERT_BLOCK_LOCKED(block); 354 355 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 356 if (!chain) 357 return NULL; 358 list_add_tail_rcu(&chain->list, &block->chain_list); 359 mutex_init(&chain->filter_chain_lock); 360 chain->block = block; 361 chain->index = chain_index; 362 chain->refcnt = 1; 363 if (!chain->index) 364 block->chain0.chain = chain; 365 return chain; 366 } 367 368 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 369 struct tcf_proto *tp_head) 370 { 371 if (item->chain_head_change) 372 item->chain_head_change(tp_head, item->chain_head_change_priv); 373 } 374 375 static void tcf_chain0_head_change(struct tcf_chain *chain, 376 struct tcf_proto *tp_head) 377 { 378 struct tcf_filter_chain_list_item *item; 379 struct tcf_block *block = chain->block; 380 381 if (chain->index) 382 return; 383 384 mutex_lock(&block->lock); 385 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 386 tcf_chain_head_change_item(item, tp_head); 387 mutex_unlock(&block->lock); 388 } 389 390 /* Returns true if block can be safely freed. */ 391 392 static bool tcf_chain_detach(struct tcf_chain *chain) 393 { 394 struct tcf_block *block = chain->block; 395 396 ASSERT_BLOCK_LOCKED(block); 397 398 list_del_rcu(&chain->list); 399 if (!chain->index) 400 block->chain0.chain = NULL; 401 402 if (list_empty(&block->chain_list) && 403 refcount_read(&block->refcnt) == 0) 404 return true; 405 406 return false; 407 } 408 409 static void tcf_block_destroy(struct tcf_block *block) 410 { 411 mutex_destroy(&block->lock); 412 mutex_destroy(&block->proto_destroy_lock); 413 kfree_rcu(block, rcu); 414 } 415 416 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 417 { 418 struct tcf_block *block = chain->block; 419 420 mutex_destroy(&chain->filter_chain_lock); 421 kfree_rcu(chain, rcu); 422 if (free_block) 423 tcf_block_destroy(block); 424 } 425 426 static void tcf_chain_hold(struct tcf_chain *chain) 427 { 428 ASSERT_BLOCK_LOCKED(chain->block); 429 430 ++chain->refcnt; 431 } 432 433 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 434 { 435 ASSERT_BLOCK_LOCKED(chain->block); 436 437 /* In case all the references are action references, this 438 * chain should not be shown to the user. 439 */ 440 return chain->refcnt == chain->action_refcnt; 441 } 442 443 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 444 u32 chain_index) 445 { 446 struct tcf_chain *chain; 447 448 ASSERT_BLOCK_LOCKED(block); 449 450 list_for_each_entry(chain, &block->chain_list, list) { 451 if (chain->index == chain_index) 452 return chain; 453 } 454 return NULL; 455 } 456 457 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 458 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 459 u32 chain_index) 460 { 461 struct tcf_chain *chain; 462 463 list_for_each_entry_rcu(chain, &block->chain_list, list) { 464 if (chain->index == chain_index) 465 return chain; 466 } 467 return NULL; 468 } 469 #endif 470 471 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 472 u32 seq, u16 flags, int event, bool unicast); 473 474 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 475 u32 chain_index, bool create, 476 bool by_act) 477 { 478 struct tcf_chain *chain = NULL; 479 bool is_first_reference; 480 481 mutex_lock(&block->lock); 482 chain = tcf_chain_lookup(block, chain_index); 483 if (chain) { 484 tcf_chain_hold(chain); 485 } else { 486 if (!create) 487 goto errout; 488 chain = tcf_chain_create(block, chain_index); 489 if (!chain) 490 goto errout; 491 } 492 493 if (by_act) 494 ++chain->action_refcnt; 495 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 496 mutex_unlock(&block->lock); 497 498 /* Send notification only in case we got the first 499 * non-action reference. Until then, the chain acts only as 500 * a placeholder for actions pointing to it and user ought 501 * not know about them. 502 */ 503 if (is_first_reference && !by_act) 504 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 505 RTM_NEWCHAIN, false); 506 507 return chain; 508 509 errout: 510 mutex_unlock(&block->lock); 511 return chain; 512 } 513 514 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 515 bool create) 516 { 517 return __tcf_chain_get(block, chain_index, create, false); 518 } 519 520 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 521 { 522 return __tcf_chain_get(block, chain_index, true, true); 523 } 524 EXPORT_SYMBOL(tcf_chain_get_by_act); 525 526 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 527 void *tmplt_priv); 528 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 529 void *tmplt_priv, u32 chain_index, 530 struct tcf_block *block, struct sk_buff *oskb, 531 u32 seq, u16 flags, bool unicast); 532 533 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 534 bool explicitly_created) 535 { 536 struct tcf_block *block = chain->block; 537 const struct tcf_proto_ops *tmplt_ops; 538 bool free_block = false; 539 unsigned int refcnt; 540 void *tmplt_priv; 541 542 mutex_lock(&block->lock); 543 if (explicitly_created) { 544 if (!chain->explicitly_created) { 545 mutex_unlock(&block->lock); 546 return; 547 } 548 chain->explicitly_created = false; 549 } 550 551 if (by_act) 552 chain->action_refcnt--; 553 554 /* tc_chain_notify_delete can't be called while holding block lock. 555 * However, when block is unlocked chain can be changed concurrently, so 556 * save these to temporary variables. 557 */ 558 refcnt = --chain->refcnt; 559 tmplt_ops = chain->tmplt_ops; 560 tmplt_priv = chain->tmplt_priv; 561 562 /* The last dropped non-action reference will trigger notification. */ 563 if (refcnt - chain->action_refcnt == 0 && !by_act) { 564 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, 565 block, NULL, 0, 0, false); 566 /* Last reference to chain, no need to lock. */ 567 chain->flushing = false; 568 } 569 570 if (refcnt == 0) 571 free_block = tcf_chain_detach(chain); 572 mutex_unlock(&block->lock); 573 574 if (refcnt == 0) { 575 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 576 tcf_chain_destroy(chain, free_block); 577 } 578 } 579 580 static void tcf_chain_put(struct tcf_chain *chain) 581 { 582 __tcf_chain_put(chain, false, false); 583 } 584 585 void tcf_chain_put_by_act(struct tcf_chain *chain) 586 { 587 __tcf_chain_put(chain, true, false); 588 } 589 EXPORT_SYMBOL(tcf_chain_put_by_act); 590 591 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 592 { 593 __tcf_chain_put(chain, false, true); 594 } 595 596 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 597 { 598 struct tcf_proto *tp, *tp_next; 599 600 mutex_lock(&chain->filter_chain_lock); 601 tp = tcf_chain_dereference(chain->filter_chain, chain); 602 while (tp) { 603 tp_next = rcu_dereference_protected(tp->next, 1); 604 tcf_proto_signal_destroying(chain, tp); 605 tp = tp_next; 606 } 607 tp = tcf_chain_dereference(chain->filter_chain, chain); 608 RCU_INIT_POINTER(chain->filter_chain, NULL); 609 tcf_chain0_head_change(chain, NULL); 610 chain->flushing = true; 611 mutex_unlock(&chain->filter_chain_lock); 612 613 while (tp) { 614 tp_next = rcu_dereference_protected(tp->next, 1); 615 tcf_proto_put(tp, rtnl_held, NULL); 616 tp = tp_next; 617 } 618 } 619 620 static int tcf_block_setup(struct tcf_block *block, 621 struct flow_block_offload *bo); 622 623 static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block, 624 flow_indr_block_bind_cb_t *cb, void *cb_priv, 625 enum flow_block_command command, bool ingress) 626 { 627 struct flow_block_offload bo = { 628 .command = command, 629 .binder_type = ingress ? 630 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS : 631 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 632 .net = dev_net(dev), 633 .block_shared = tcf_block_non_null_shared(block), 634 }; 635 INIT_LIST_HEAD(&bo.cb_list); 636 637 if (!block) 638 return; 639 640 bo.block = &block->flow_block; 641 642 down_write(&block->cb_lock); 643 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); 644 645 tcf_block_setup(block, &bo); 646 up_write(&block->cb_lock); 647 } 648 649 static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress) 650 { 651 const struct Qdisc_class_ops *cops; 652 const struct Qdisc_ops *ops; 653 struct Qdisc *qdisc; 654 655 if (!dev_ingress_queue(dev)) 656 return NULL; 657 658 qdisc = dev_ingress_queue(dev)->qdisc_sleeping; 659 if (!qdisc) 660 return NULL; 661 662 ops = qdisc->ops; 663 if (!ops) 664 return NULL; 665 666 if (!ingress && !strcmp("ingress", ops->id)) 667 return NULL; 668 669 cops = ops->cl_ops; 670 if (!cops) 671 return NULL; 672 673 if (!cops->tcf_block) 674 return NULL; 675 676 return cops->tcf_block(qdisc, 677 ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS, 678 NULL); 679 } 680 681 static void tc_indr_block_get_and_cmd(struct net_device *dev, 682 flow_indr_block_bind_cb_t *cb, 683 void *cb_priv, 684 enum flow_block_command command) 685 { 686 struct tcf_block *block; 687 688 block = tc_dev_block(dev, true); 689 tc_indr_block_cmd(dev, block, cb, cb_priv, command, true); 690 691 block = tc_dev_block(dev, false); 692 tc_indr_block_cmd(dev, block, cb, cb_priv, command, false); 693 } 694 695 static void tc_indr_block_call(struct tcf_block *block, 696 struct net_device *dev, 697 struct tcf_block_ext_info *ei, 698 enum flow_block_command command, 699 struct netlink_ext_ack *extack) 700 { 701 struct flow_block_offload bo = { 702 .command = command, 703 .binder_type = ei->binder_type, 704 .net = dev_net(dev), 705 .block = &block->flow_block, 706 .block_shared = tcf_block_shared(block), 707 .extack = extack, 708 }; 709 INIT_LIST_HEAD(&bo.cb_list); 710 711 flow_indr_block_call(dev, &bo, command, TC_SETUP_BLOCK); 712 tcf_block_setup(block, &bo); 713 } 714 715 static bool tcf_block_offload_in_use(struct tcf_block *block) 716 { 717 return atomic_read(&block->offloadcnt); 718 } 719 720 static int tcf_block_offload_cmd(struct tcf_block *block, 721 struct net_device *dev, 722 struct tcf_block_ext_info *ei, 723 enum flow_block_command command, 724 struct netlink_ext_ack *extack) 725 { 726 struct flow_block_offload bo = {}; 727 int err; 728 729 bo.net = dev_net(dev); 730 bo.command = command; 731 bo.binder_type = ei->binder_type; 732 bo.block = &block->flow_block; 733 bo.block_shared = tcf_block_shared(block); 734 bo.extack = extack; 735 INIT_LIST_HEAD(&bo.cb_list); 736 737 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 738 if (err < 0) 739 return err; 740 741 return tcf_block_setup(block, &bo); 742 } 743 744 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 745 struct tcf_block_ext_info *ei, 746 struct netlink_ext_ack *extack) 747 { 748 struct net_device *dev = q->dev_queue->dev; 749 int err; 750 751 down_write(&block->cb_lock); 752 if (!dev->netdev_ops->ndo_setup_tc) 753 goto no_offload_dev_inc; 754 755 /* If tc offload feature is disabled and the block we try to bind 756 * to already has some offloaded filters, forbid to bind. 757 */ 758 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { 759 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 760 err = -EOPNOTSUPP; 761 goto err_unlock; 762 } 763 764 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); 765 if (err == -EOPNOTSUPP) 766 goto no_offload_dev_inc; 767 if (err) 768 goto err_unlock; 769 770 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 771 up_write(&block->cb_lock); 772 return 0; 773 774 no_offload_dev_inc: 775 if (tcf_block_offload_in_use(block)) { 776 err = -EOPNOTSUPP; 777 goto err_unlock; 778 } 779 err = 0; 780 block->nooffloaddevcnt++; 781 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 782 err_unlock: 783 up_write(&block->cb_lock); 784 return err; 785 } 786 787 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 788 struct tcf_block_ext_info *ei) 789 { 790 struct net_device *dev = q->dev_queue->dev; 791 int err; 792 793 down_write(&block->cb_lock); 794 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 795 796 if (!dev->netdev_ops->ndo_setup_tc) 797 goto no_offload_dev_dec; 798 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 799 if (err == -EOPNOTSUPP) 800 goto no_offload_dev_dec; 801 up_write(&block->cb_lock); 802 return; 803 804 no_offload_dev_dec: 805 WARN_ON(block->nooffloaddevcnt-- == 0); 806 up_write(&block->cb_lock); 807 } 808 809 static int 810 tcf_chain0_head_change_cb_add(struct tcf_block *block, 811 struct tcf_block_ext_info *ei, 812 struct netlink_ext_ack *extack) 813 { 814 struct tcf_filter_chain_list_item *item; 815 struct tcf_chain *chain0; 816 817 item = kmalloc(sizeof(*item), GFP_KERNEL); 818 if (!item) { 819 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 820 return -ENOMEM; 821 } 822 item->chain_head_change = ei->chain_head_change; 823 item->chain_head_change_priv = ei->chain_head_change_priv; 824 825 mutex_lock(&block->lock); 826 chain0 = block->chain0.chain; 827 if (chain0) 828 tcf_chain_hold(chain0); 829 else 830 list_add(&item->list, &block->chain0.filter_chain_list); 831 mutex_unlock(&block->lock); 832 833 if (chain0) { 834 struct tcf_proto *tp_head; 835 836 mutex_lock(&chain0->filter_chain_lock); 837 838 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 839 if (tp_head) 840 tcf_chain_head_change_item(item, tp_head); 841 842 mutex_lock(&block->lock); 843 list_add(&item->list, &block->chain0.filter_chain_list); 844 mutex_unlock(&block->lock); 845 846 mutex_unlock(&chain0->filter_chain_lock); 847 tcf_chain_put(chain0); 848 } 849 850 return 0; 851 } 852 853 static void 854 tcf_chain0_head_change_cb_del(struct tcf_block *block, 855 struct tcf_block_ext_info *ei) 856 { 857 struct tcf_filter_chain_list_item *item; 858 859 mutex_lock(&block->lock); 860 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 861 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 862 (item->chain_head_change == ei->chain_head_change && 863 item->chain_head_change_priv == ei->chain_head_change_priv)) { 864 if (block->chain0.chain) 865 tcf_chain_head_change_item(item, NULL); 866 list_del(&item->list); 867 mutex_unlock(&block->lock); 868 869 kfree(item); 870 return; 871 } 872 } 873 mutex_unlock(&block->lock); 874 WARN_ON(1); 875 } 876 877 struct tcf_net { 878 spinlock_t idr_lock; /* Protects idr */ 879 struct idr idr; 880 }; 881 882 static unsigned int tcf_net_id; 883 884 static int tcf_block_insert(struct tcf_block *block, struct net *net, 885 struct netlink_ext_ack *extack) 886 { 887 struct tcf_net *tn = net_generic(net, tcf_net_id); 888 int err; 889 890 idr_preload(GFP_KERNEL); 891 spin_lock(&tn->idr_lock); 892 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 893 GFP_NOWAIT); 894 spin_unlock(&tn->idr_lock); 895 idr_preload_end(); 896 897 return err; 898 } 899 900 static void tcf_block_remove(struct tcf_block *block, struct net *net) 901 { 902 struct tcf_net *tn = net_generic(net, tcf_net_id); 903 904 spin_lock(&tn->idr_lock); 905 idr_remove(&tn->idr, block->index); 906 spin_unlock(&tn->idr_lock); 907 } 908 909 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 910 u32 block_index, 911 struct netlink_ext_ack *extack) 912 { 913 struct tcf_block *block; 914 915 block = kzalloc(sizeof(*block), GFP_KERNEL); 916 if (!block) { 917 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 918 return ERR_PTR(-ENOMEM); 919 } 920 mutex_init(&block->lock); 921 mutex_init(&block->proto_destroy_lock); 922 init_rwsem(&block->cb_lock); 923 flow_block_init(&block->flow_block); 924 INIT_LIST_HEAD(&block->chain_list); 925 INIT_LIST_HEAD(&block->owner_list); 926 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 927 928 refcount_set(&block->refcnt, 1); 929 block->net = net; 930 block->index = block_index; 931 932 /* Don't store q pointer for blocks which are shared */ 933 if (!tcf_block_shared(block)) 934 block->q = q; 935 return block; 936 } 937 938 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 939 { 940 struct tcf_net *tn = net_generic(net, tcf_net_id); 941 942 return idr_find(&tn->idr, block_index); 943 } 944 945 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 946 { 947 struct tcf_block *block; 948 949 rcu_read_lock(); 950 block = tcf_block_lookup(net, block_index); 951 if (block && !refcount_inc_not_zero(&block->refcnt)) 952 block = NULL; 953 rcu_read_unlock(); 954 955 return block; 956 } 957 958 static struct tcf_chain * 959 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 960 { 961 mutex_lock(&block->lock); 962 if (chain) 963 chain = list_is_last(&chain->list, &block->chain_list) ? 964 NULL : list_next_entry(chain, list); 965 else 966 chain = list_first_entry_or_null(&block->chain_list, 967 struct tcf_chain, list); 968 969 /* skip all action-only chains */ 970 while (chain && tcf_chain_held_by_acts_only(chain)) 971 chain = list_is_last(&chain->list, &block->chain_list) ? 972 NULL : list_next_entry(chain, list); 973 974 if (chain) 975 tcf_chain_hold(chain); 976 mutex_unlock(&block->lock); 977 978 return chain; 979 } 980 981 /* Function to be used by all clients that want to iterate over all chains on 982 * block. It properly obtains block->lock and takes reference to chain before 983 * returning it. Users of this function must be tolerant to concurrent chain 984 * insertion/deletion or ensure that no concurrent chain modification is 985 * possible. Note that all netlink dump callbacks cannot guarantee to provide 986 * consistent dump because rtnl lock is released each time skb is filled with 987 * data and sent to user-space. 988 */ 989 990 struct tcf_chain * 991 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 992 { 993 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 994 995 if (chain) 996 tcf_chain_put(chain); 997 998 return chain_next; 999 } 1000 EXPORT_SYMBOL(tcf_get_next_chain); 1001 1002 static struct tcf_proto * 1003 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1004 { 1005 u32 prio = 0; 1006 1007 ASSERT_RTNL(); 1008 mutex_lock(&chain->filter_chain_lock); 1009 1010 if (!tp) { 1011 tp = tcf_chain_dereference(chain->filter_chain, chain); 1012 } else if (tcf_proto_is_deleting(tp)) { 1013 /* 'deleting' flag is set and chain->filter_chain_lock was 1014 * unlocked, which means next pointer could be invalid. Restart 1015 * search. 1016 */ 1017 prio = tp->prio + 1; 1018 tp = tcf_chain_dereference(chain->filter_chain, chain); 1019 1020 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 1021 if (!tp->deleting && tp->prio >= prio) 1022 break; 1023 } else { 1024 tp = tcf_chain_dereference(tp->next, chain); 1025 } 1026 1027 if (tp) 1028 tcf_proto_get(tp); 1029 1030 mutex_unlock(&chain->filter_chain_lock); 1031 1032 return tp; 1033 } 1034 1035 /* Function to be used by all clients that want to iterate over all tp's on 1036 * chain. Users of this function must be tolerant to concurrent tp 1037 * insertion/deletion or ensure that no concurrent chain modification is 1038 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1039 * consistent dump because rtnl lock is released each time skb is filled with 1040 * data and sent to user-space. 1041 */ 1042 1043 struct tcf_proto * 1044 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp, 1045 bool rtnl_held) 1046 { 1047 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 1048 1049 if (tp) 1050 tcf_proto_put(tp, rtnl_held, NULL); 1051 1052 return tp_next; 1053 } 1054 EXPORT_SYMBOL(tcf_get_next_proto); 1055 1056 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1057 { 1058 struct tcf_chain *chain; 1059 1060 /* Last reference to block. At this point chains cannot be added or 1061 * removed concurrently. 1062 */ 1063 for (chain = tcf_get_next_chain(block, NULL); 1064 chain; 1065 chain = tcf_get_next_chain(block, chain)) { 1066 tcf_chain_put_explicitly_created(chain); 1067 tcf_chain_flush(chain, rtnl_held); 1068 } 1069 } 1070 1071 /* Lookup Qdisc and increments its reference counter. 1072 * Set parent, if necessary. 1073 */ 1074 1075 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1076 u32 *parent, int ifindex, bool rtnl_held, 1077 struct netlink_ext_ack *extack) 1078 { 1079 const struct Qdisc_class_ops *cops; 1080 struct net_device *dev; 1081 int err = 0; 1082 1083 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1084 return 0; 1085 1086 rcu_read_lock(); 1087 1088 /* Find link */ 1089 dev = dev_get_by_index_rcu(net, ifindex); 1090 if (!dev) { 1091 rcu_read_unlock(); 1092 return -ENODEV; 1093 } 1094 1095 /* Find qdisc */ 1096 if (!*parent) { 1097 *q = dev->qdisc; 1098 *parent = (*q)->handle; 1099 } else { 1100 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1101 if (!*q) { 1102 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1103 err = -EINVAL; 1104 goto errout_rcu; 1105 } 1106 } 1107 1108 *q = qdisc_refcount_inc_nz(*q); 1109 if (!*q) { 1110 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1111 err = -EINVAL; 1112 goto errout_rcu; 1113 } 1114 1115 /* Is it classful? */ 1116 cops = (*q)->ops->cl_ops; 1117 if (!cops) { 1118 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1119 err = -EINVAL; 1120 goto errout_qdisc; 1121 } 1122 1123 if (!cops->tcf_block) { 1124 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1125 err = -EOPNOTSUPP; 1126 goto errout_qdisc; 1127 } 1128 1129 errout_rcu: 1130 /* At this point we know that qdisc is not noop_qdisc, 1131 * which means that qdisc holds a reference to net_device 1132 * and we hold a reference to qdisc, so it is safe to release 1133 * rcu read lock. 1134 */ 1135 rcu_read_unlock(); 1136 return err; 1137 1138 errout_qdisc: 1139 rcu_read_unlock(); 1140 1141 if (rtnl_held) 1142 qdisc_put(*q); 1143 else 1144 qdisc_put_unlocked(*q); 1145 *q = NULL; 1146 1147 return err; 1148 } 1149 1150 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1151 int ifindex, struct netlink_ext_ack *extack) 1152 { 1153 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1154 return 0; 1155 1156 /* Do we search for filter, attached to class? */ 1157 if (TC_H_MIN(parent)) { 1158 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1159 1160 *cl = cops->find(q, parent); 1161 if (*cl == 0) { 1162 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1163 return -ENOENT; 1164 } 1165 } 1166 1167 return 0; 1168 } 1169 1170 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1171 unsigned long cl, int ifindex, 1172 u32 block_index, 1173 struct netlink_ext_ack *extack) 1174 { 1175 struct tcf_block *block; 1176 1177 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1178 block = tcf_block_refcnt_get(net, block_index); 1179 if (!block) { 1180 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1181 return ERR_PTR(-EINVAL); 1182 } 1183 } else { 1184 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1185 1186 block = cops->tcf_block(q, cl, extack); 1187 if (!block) 1188 return ERR_PTR(-EINVAL); 1189 1190 if (tcf_block_shared(block)) { 1191 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1192 return ERR_PTR(-EOPNOTSUPP); 1193 } 1194 1195 /* Always take reference to block in order to support execution 1196 * of rules update path of cls API without rtnl lock. Caller 1197 * must release block when it is finished using it. 'if' block 1198 * of this conditional obtain reference to block by calling 1199 * tcf_block_refcnt_get(). 1200 */ 1201 refcount_inc(&block->refcnt); 1202 } 1203 1204 return block; 1205 } 1206 1207 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1208 struct tcf_block_ext_info *ei, bool rtnl_held) 1209 { 1210 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1211 /* Flushing/putting all chains will cause the block to be 1212 * deallocated when last chain is freed. However, if chain_list 1213 * is empty, block has to be manually deallocated. After block 1214 * reference counter reached 0, it is no longer possible to 1215 * increment it or add new chains to block. 1216 */ 1217 bool free_block = list_empty(&block->chain_list); 1218 1219 mutex_unlock(&block->lock); 1220 if (tcf_block_shared(block)) 1221 tcf_block_remove(block, block->net); 1222 1223 if (q) 1224 tcf_block_offload_unbind(block, q, ei); 1225 1226 if (free_block) 1227 tcf_block_destroy(block); 1228 else 1229 tcf_block_flush_all_chains(block, rtnl_held); 1230 } else if (q) { 1231 tcf_block_offload_unbind(block, q, ei); 1232 } 1233 } 1234 1235 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1236 { 1237 __tcf_block_put(block, NULL, NULL, rtnl_held); 1238 } 1239 1240 /* Find tcf block. 1241 * Set q, parent, cl when appropriate. 1242 */ 1243 1244 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1245 u32 *parent, unsigned long *cl, 1246 int ifindex, u32 block_index, 1247 struct netlink_ext_ack *extack) 1248 { 1249 struct tcf_block *block; 1250 int err = 0; 1251 1252 ASSERT_RTNL(); 1253 1254 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1255 if (err) 1256 goto errout; 1257 1258 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1259 if (err) 1260 goto errout_qdisc; 1261 1262 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1263 if (IS_ERR(block)) { 1264 err = PTR_ERR(block); 1265 goto errout_qdisc; 1266 } 1267 1268 return block; 1269 1270 errout_qdisc: 1271 if (*q) 1272 qdisc_put(*q); 1273 errout: 1274 *q = NULL; 1275 return ERR_PTR(err); 1276 } 1277 1278 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1279 bool rtnl_held) 1280 { 1281 if (!IS_ERR_OR_NULL(block)) 1282 tcf_block_refcnt_put(block, rtnl_held); 1283 1284 if (q) { 1285 if (rtnl_held) 1286 qdisc_put(q); 1287 else 1288 qdisc_put_unlocked(q); 1289 } 1290 } 1291 1292 struct tcf_block_owner_item { 1293 struct list_head list; 1294 struct Qdisc *q; 1295 enum flow_block_binder_type binder_type; 1296 }; 1297 1298 static void 1299 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1300 struct Qdisc *q, 1301 enum flow_block_binder_type binder_type) 1302 { 1303 if (block->keep_dst && 1304 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1305 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1306 netif_keep_dst(qdisc_dev(q)); 1307 } 1308 1309 void tcf_block_netif_keep_dst(struct tcf_block *block) 1310 { 1311 struct tcf_block_owner_item *item; 1312 1313 block->keep_dst = true; 1314 list_for_each_entry(item, &block->owner_list, list) 1315 tcf_block_owner_netif_keep_dst(block, item->q, 1316 item->binder_type); 1317 } 1318 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1319 1320 static int tcf_block_owner_add(struct tcf_block *block, 1321 struct Qdisc *q, 1322 enum flow_block_binder_type binder_type) 1323 { 1324 struct tcf_block_owner_item *item; 1325 1326 item = kmalloc(sizeof(*item), GFP_KERNEL); 1327 if (!item) 1328 return -ENOMEM; 1329 item->q = q; 1330 item->binder_type = binder_type; 1331 list_add(&item->list, &block->owner_list); 1332 return 0; 1333 } 1334 1335 static void tcf_block_owner_del(struct tcf_block *block, 1336 struct Qdisc *q, 1337 enum flow_block_binder_type binder_type) 1338 { 1339 struct tcf_block_owner_item *item; 1340 1341 list_for_each_entry(item, &block->owner_list, list) { 1342 if (item->q == q && item->binder_type == binder_type) { 1343 list_del(&item->list); 1344 kfree(item); 1345 return; 1346 } 1347 } 1348 WARN_ON(1); 1349 } 1350 1351 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1352 struct tcf_block_ext_info *ei, 1353 struct netlink_ext_ack *extack) 1354 { 1355 struct net *net = qdisc_net(q); 1356 struct tcf_block *block = NULL; 1357 int err; 1358 1359 if (ei->block_index) 1360 /* block_index not 0 means the shared block is requested */ 1361 block = tcf_block_refcnt_get(net, ei->block_index); 1362 1363 if (!block) { 1364 block = tcf_block_create(net, q, ei->block_index, extack); 1365 if (IS_ERR(block)) 1366 return PTR_ERR(block); 1367 if (tcf_block_shared(block)) { 1368 err = tcf_block_insert(block, net, extack); 1369 if (err) 1370 goto err_block_insert; 1371 } 1372 } 1373 1374 err = tcf_block_owner_add(block, q, ei->binder_type); 1375 if (err) 1376 goto err_block_owner_add; 1377 1378 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1379 1380 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1381 if (err) 1382 goto err_chain0_head_change_cb_add; 1383 1384 err = tcf_block_offload_bind(block, q, ei, extack); 1385 if (err) 1386 goto err_block_offload_bind; 1387 1388 *p_block = block; 1389 return 0; 1390 1391 err_block_offload_bind: 1392 tcf_chain0_head_change_cb_del(block, ei); 1393 err_chain0_head_change_cb_add: 1394 tcf_block_owner_del(block, q, ei->binder_type); 1395 err_block_owner_add: 1396 err_block_insert: 1397 tcf_block_refcnt_put(block, true); 1398 return err; 1399 } 1400 EXPORT_SYMBOL(tcf_block_get_ext); 1401 1402 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1403 { 1404 struct tcf_proto __rcu **p_filter_chain = priv; 1405 1406 rcu_assign_pointer(*p_filter_chain, tp_head); 1407 } 1408 1409 int tcf_block_get(struct tcf_block **p_block, 1410 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1411 struct netlink_ext_ack *extack) 1412 { 1413 struct tcf_block_ext_info ei = { 1414 .chain_head_change = tcf_chain_head_change_dflt, 1415 .chain_head_change_priv = p_filter_chain, 1416 }; 1417 1418 WARN_ON(!p_filter_chain); 1419 return tcf_block_get_ext(p_block, q, &ei, extack); 1420 } 1421 EXPORT_SYMBOL(tcf_block_get); 1422 1423 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1424 * actions should be all removed after flushing. 1425 */ 1426 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1427 struct tcf_block_ext_info *ei) 1428 { 1429 if (!block) 1430 return; 1431 tcf_chain0_head_change_cb_del(block, ei); 1432 tcf_block_owner_del(block, q, ei->binder_type); 1433 1434 __tcf_block_put(block, q, ei, true); 1435 } 1436 EXPORT_SYMBOL(tcf_block_put_ext); 1437 1438 void tcf_block_put(struct tcf_block *block) 1439 { 1440 struct tcf_block_ext_info ei = {0, }; 1441 1442 if (!block) 1443 return; 1444 tcf_block_put_ext(block, block->q, &ei); 1445 } 1446 1447 EXPORT_SYMBOL(tcf_block_put); 1448 1449 static int 1450 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1451 void *cb_priv, bool add, bool offload_in_use, 1452 struct netlink_ext_ack *extack) 1453 { 1454 struct tcf_chain *chain, *chain_prev; 1455 struct tcf_proto *tp, *tp_prev; 1456 int err; 1457 1458 lockdep_assert_held(&block->cb_lock); 1459 1460 for (chain = __tcf_get_next_chain(block, NULL); 1461 chain; 1462 chain_prev = chain, 1463 chain = __tcf_get_next_chain(block, chain), 1464 tcf_chain_put(chain_prev)) { 1465 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1466 tp_prev = tp, 1467 tp = __tcf_get_next_proto(chain, tp), 1468 tcf_proto_put(tp_prev, true, NULL)) { 1469 if (tp->ops->reoffload) { 1470 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1471 extack); 1472 if (err && add) 1473 goto err_playback_remove; 1474 } else if (add && offload_in_use) { 1475 err = -EOPNOTSUPP; 1476 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1477 goto err_playback_remove; 1478 } 1479 } 1480 } 1481 1482 return 0; 1483 1484 err_playback_remove: 1485 tcf_proto_put(tp, true, NULL); 1486 tcf_chain_put(chain); 1487 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1488 extack); 1489 return err; 1490 } 1491 1492 static int tcf_block_bind(struct tcf_block *block, 1493 struct flow_block_offload *bo) 1494 { 1495 struct flow_block_cb *block_cb, *next; 1496 int err, i = 0; 1497 1498 lockdep_assert_held(&block->cb_lock); 1499 1500 list_for_each_entry(block_cb, &bo->cb_list, list) { 1501 err = tcf_block_playback_offloads(block, block_cb->cb, 1502 block_cb->cb_priv, true, 1503 tcf_block_offload_in_use(block), 1504 bo->extack); 1505 if (err) 1506 goto err_unroll; 1507 if (!bo->unlocked_driver_cb) 1508 block->lockeddevcnt++; 1509 1510 i++; 1511 } 1512 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1513 1514 return 0; 1515 1516 err_unroll: 1517 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1518 if (i-- > 0) { 1519 list_del(&block_cb->list); 1520 tcf_block_playback_offloads(block, block_cb->cb, 1521 block_cb->cb_priv, false, 1522 tcf_block_offload_in_use(block), 1523 NULL); 1524 if (!bo->unlocked_driver_cb) 1525 block->lockeddevcnt--; 1526 } 1527 flow_block_cb_free(block_cb); 1528 } 1529 1530 return err; 1531 } 1532 1533 static void tcf_block_unbind(struct tcf_block *block, 1534 struct flow_block_offload *bo) 1535 { 1536 struct flow_block_cb *block_cb, *next; 1537 1538 lockdep_assert_held(&block->cb_lock); 1539 1540 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1541 tcf_block_playback_offloads(block, block_cb->cb, 1542 block_cb->cb_priv, false, 1543 tcf_block_offload_in_use(block), 1544 NULL); 1545 list_del(&block_cb->list); 1546 flow_block_cb_free(block_cb); 1547 if (!bo->unlocked_driver_cb) 1548 block->lockeddevcnt--; 1549 } 1550 } 1551 1552 static int tcf_block_setup(struct tcf_block *block, 1553 struct flow_block_offload *bo) 1554 { 1555 int err; 1556 1557 switch (bo->command) { 1558 case FLOW_BLOCK_BIND: 1559 err = tcf_block_bind(block, bo); 1560 break; 1561 case FLOW_BLOCK_UNBIND: 1562 err = 0; 1563 tcf_block_unbind(block, bo); 1564 break; 1565 default: 1566 WARN_ON_ONCE(1); 1567 err = -EOPNOTSUPP; 1568 } 1569 1570 return err; 1571 } 1572 1573 /* Main classifier routine: scans classifier chain attached 1574 * to this qdisc, (optionally) tests for protocol and asks 1575 * specific classifiers. 1576 */ 1577 static inline int __tcf_classify(struct sk_buff *skb, 1578 const struct tcf_proto *tp, 1579 const struct tcf_proto *orig_tp, 1580 struct tcf_result *res, 1581 bool compat_mode, 1582 u32 *last_executed_chain) 1583 { 1584 #ifdef CONFIG_NET_CLS_ACT 1585 const int max_reclassify_loop = 4; 1586 const struct tcf_proto *first_tp; 1587 int limit = 0; 1588 1589 reclassify: 1590 #endif 1591 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1592 __be16 protocol = tc_skb_protocol(skb); 1593 int err; 1594 1595 if (tp->protocol != protocol && 1596 tp->protocol != htons(ETH_P_ALL)) 1597 continue; 1598 1599 err = tp->classify(skb, tp, res); 1600 #ifdef CONFIG_NET_CLS_ACT 1601 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1602 first_tp = orig_tp; 1603 *last_executed_chain = first_tp->chain->index; 1604 goto reset; 1605 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1606 first_tp = res->goto_tp; 1607 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1608 goto reset; 1609 } 1610 #endif 1611 if (err >= 0) 1612 return err; 1613 } 1614 1615 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1616 #ifdef CONFIG_NET_CLS_ACT 1617 reset: 1618 if (unlikely(limit++ >= max_reclassify_loop)) { 1619 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1620 tp->chain->block->index, 1621 tp->prio & 0xffff, 1622 ntohs(tp->protocol)); 1623 return TC_ACT_SHOT; 1624 } 1625 1626 tp = first_tp; 1627 goto reclassify; 1628 #endif 1629 } 1630 1631 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1632 struct tcf_result *res, bool compat_mode) 1633 { 1634 u32 last_executed_chain = 0; 1635 1636 return __tcf_classify(skb, tp, tp, res, compat_mode, 1637 &last_executed_chain); 1638 } 1639 EXPORT_SYMBOL(tcf_classify); 1640 1641 int tcf_classify_ingress(struct sk_buff *skb, 1642 const struct tcf_block *ingress_block, 1643 const struct tcf_proto *tp, 1644 struct tcf_result *res, bool compat_mode) 1645 { 1646 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1647 u32 last_executed_chain = 0; 1648 1649 return __tcf_classify(skb, tp, tp, res, compat_mode, 1650 &last_executed_chain); 1651 #else 1652 u32 last_executed_chain = tp ? tp->chain->index : 0; 1653 const struct tcf_proto *orig_tp = tp; 1654 struct tc_skb_ext *ext; 1655 int ret; 1656 1657 ext = skb_ext_find(skb, TC_SKB_EXT); 1658 1659 if (ext && ext->chain) { 1660 struct tcf_chain *fchain; 1661 1662 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain); 1663 if (!fchain) 1664 return TC_ACT_SHOT; 1665 1666 /* Consume, so cloned/redirect skbs won't inherit ext */ 1667 skb_ext_del(skb, TC_SKB_EXT); 1668 1669 tp = rcu_dereference_bh(fchain->filter_chain); 1670 last_executed_chain = fchain->index; 1671 } 1672 1673 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, 1674 &last_executed_chain); 1675 1676 /* If we missed on some chain */ 1677 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1678 ext = skb_ext_add(skb, TC_SKB_EXT); 1679 if (WARN_ON_ONCE(!ext)) 1680 return TC_ACT_SHOT; 1681 ext->chain = last_executed_chain; 1682 } 1683 1684 return ret; 1685 #endif 1686 } 1687 EXPORT_SYMBOL(tcf_classify_ingress); 1688 1689 struct tcf_chain_info { 1690 struct tcf_proto __rcu **pprev; 1691 struct tcf_proto __rcu *next; 1692 }; 1693 1694 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1695 struct tcf_chain_info *chain_info) 1696 { 1697 return tcf_chain_dereference(*chain_info->pprev, chain); 1698 } 1699 1700 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1701 struct tcf_chain_info *chain_info, 1702 struct tcf_proto *tp) 1703 { 1704 if (chain->flushing) 1705 return -EAGAIN; 1706 1707 if (*chain_info->pprev == chain->filter_chain) 1708 tcf_chain0_head_change(chain, tp); 1709 tcf_proto_get(tp); 1710 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1711 rcu_assign_pointer(*chain_info->pprev, tp); 1712 1713 return 0; 1714 } 1715 1716 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1717 struct tcf_chain_info *chain_info, 1718 struct tcf_proto *tp) 1719 { 1720 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1721 1722 tcf_proto_mark_delete(tp); 1723 if (tp == chain->filter_chain) 1724 tcf_chain0_head_change(chain, next); 1725 RCU_INIT_POINTER(*chain_info->pprev, next); 1726 } 1727 1728 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1729 struct tcf_chain_info *chain_info, 1730 u32 protocol, u32 prio, 1731 bool prio_allocate); 1732 1733 /* Try to insert new proto. 1734 * If proto with specified priority already exists, free new proto 1735 * and return existing one. 1736 */ 1737 1738 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1739 struct tcf_proto *tp_new, 1740 u32 protocol, u32 prio, 1741 bool rtnl_held) 1742 { 1743 struct tcf_chain_info chain_info; 1744 struct tcf_proto *tp; 1745 int err = 0; 1746 1747 mutex_lock(&chain->filter_chain_lock); 1748 1749 if (tcf_proto_exists_destroying(chain, tp_new)) { 1750 mutex_unlock(&chain->filter_chain_lock); 1751 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1752 return ERR_PTR(-EAGAIN); 1753 } 1754 1755 tp = tcf_chain_tp_find(chain, &chain_info, 1756 protocol, prio, false); 1757 if (!tp) 1758 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1759 mutex_unlock(&chain->filter_chain_lock); 1760 1761 if (tp) { 1762 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1763 tp_new = tp; 1764 } else if (err) { 1765 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1766 tp_new = ERR_PTR(err); 1767 } 1768 1769 return tp_new; 1770 } 1771 1772 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1773 struct tcf_proto *tp, bool rtnl_held, 1774 struct netlink_ext_ack *extack) 1775 { 1776 struct tcf_chain_info chain_info; 1777 struct tcf_proto *tp_iter; 1778 struct tcf_proto **pprev; 1779 struct tcf_proto *next; 1780 1781 mutex_lock(&chain->filter_chain_lock); 1782 1783 /* Atomically find and remove tp from chain. */ 1784 for (pprev = &chain->filter_chain; 1785 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1786 pprev = &tp_iter->next) { 1787 if (tp_iter == tp) { 1788 chain_info.pprev = pprev; 1789 chain_info.next = tp_iter->next; 1790 WARN_ON(tp_iter->deleting); 1791 break; 1792 } 1793 } 1794 /* Verify that tp still exists and no new filters were inserted 1795 * concurrently. 1796 * Mark tp for deletion if it is empty. 1797 */ 1798 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1799 mutex_unlock(&chain->filter_chain_lock); 1800 return; 1801 } 1802 1803 tcf_proto_signal_destroying(chain, tp); 1804 next = tcf_chain_dereference(chain_info.next, chain); 1805 if (tp == chain->filter_chain) 1806 tcf_chain0_head_change(chain, next); 1807 RCU_INIT_POINTER(*chain_info.pprev, next); 1808 mutex_unlock(&chain->filter_chain_lock); 1809 1810 tcf_proto_put(tp, rtnl_held, extack); 1811 } 1812 1813 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1814 struct tcf_chain_info *chain_info, 1815 u32 protocol, u32 prio, 1816 bool prio_allocate) 1817 { 1818 struct tcf_proto **pprev; 1819 struct tcf_proto *tp; 1820 1821 /* Check the chain for existence of proto-tcf with this priority */ 1822 for (pprev = &chain->filter_chain; 1823 (tp = tcf_chain_dereference(*pprev, chain)); 1824 pprev = &tp->next) { 1825 if (tp->prio >= prio) { 1826 if (tp->prio == prio) { 1827 if (prio_allocate || 1828 (tp->protocol != protocol && protocol)) 1829 return ERR_PTR(-EINVAL); 1830 } else { 1831 tp = NULL; 1832 } 1833 break; 1834 } 1835 } 1836 chain_info->pprev = pprev; 1837 if (tp) { 1838 chain_info->next = tp->next; 1839 tcf_proto_get(tp); 1840 } else { 1841 chain_info->next = NULL; 1842 } 1843 return tp; 1844 } 1845 1846 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1847 struct tcf_proto *tp, struct tcf_block *block, 1848 struct Qdisc *q, u32 parent, void *fh, 1849 u32 portid, u32 seq, u16 flags, int event, 1850 bool rtnl_held) 1851 { 1852 struct tcmsg *tcm; 1853 struct nlmsghdr *nlh; 1854 unsigned char *b = skb_tail_pointer(skb); 1855 1856 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1857 if (!nlh) 1858 goto out_nlmsg_trim; 1859 tcm = nlmsg_data(nlh); 1860 tcm->tcm_family = AF_UNSPEC; 1861 tcm->tcm__pad1 = 0; 1862 tcm->tcm__pad2 = 0; 1863 if (q) { 1864 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1865 tcm->tcm_parent = parent; 1866 } else { 1867 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1868 tcm->tcm_block_index = block->index; 1869 } 1870 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1871 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1872 goto nla_put_failure; 1873 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1874 goto nla_put_failure; 1875 if (!fh) { 1876 tcm->tcm_handle = 0; 1877 } else { 1878 if (tp->ops->dump && 1879 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 1880 goto nla_put_failure; 1881 } 1882 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1883 return skb->len; 1884 1885 out_nlmsg_trim: 1886 nla_put_failure: 1887 nlmsg_trim(skb, b); 1888 return -1; 1889 } 1890 1891 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 1892 struct nlmsghdr *n, struct tcf_proto *tp, 1893 struct tcf_block *block, struct Qdisc *q, 1894 u32 parent, void *fh, int event, bool unicast, 1895 bool rtnl_held) 1896 { 1897 struct sk_buff *skb; 1898 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1899 int err = 0; 1900 1901 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1902 if (!skb) 1903 return -ENOBUFS; 1904 1905 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1906 n->nlmsg_seq, n->nlmsg_flags, event, 1907 rtnl_held) <= 0) { 1908 kfree_skb(skb); 1909 return -EINVAL; 1910 } 1911 1912 if (unicast) 1913 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1914 else 1915 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1916 n->nlmsg_flags & NLM_F_ECHO); 1917 1918 if (err > 0) 1919 err = 0; 1920 return err; 1921 } 1922 1923 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 1924 struct nlmsghdr *n, struct tcf_proto *tp, 1925 struct tcf_block *block, struct Qdisc *q, 1926 u32 parent, void *fh, bool unicast, bool *last, 1927 bool rtnl_held, struct netlink_ext_ack *extack) 1928 { 1929 struct sk_buff *skb; 1930 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1931 int err; 1932 1933 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1934 if (!skb) 1935 return -ENOBUFS; 1936 1937 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1938 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 1939 rtnl_held) <= 0) { 1940 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 1941 kfree_skb(skb); 1942 return -EINVAL; 1943 } 1944 1945 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 1946 if (err) { 1947 kfree_skb(skb); 1948 return err; 1949 } 1950 1951 if (unicast) 1952 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1953 else 1954 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1955 n->nlmsg_flags & NLM_F_ECHO); 1956 if (err < 0) 1957 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 1958 1959 if (err > 0) 1960 err = 0; 1961 return err; 1962 } 1963 1964 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 1965 struct tcf_block *block, struct Qdisc *q, 1966 u32 parent, struct nlmsghdr *n, 1967 struct tcf_chain *chain, int event, 1968 bool rtnl_held) 1969 { 1970 struct tcf_proto *tp; 1971 1972 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held); 1973 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held)) 1974 tfilter_notify(net, oskb, n, tp, block, 1975 q, parent, NULL, event, false, rtnl_held); 1976 } 1977 1978 static void tfilter_put(struct tcf_proto *tp, void *fh) 1979 { 1980 if (tp->ops->put && fh) 1981 tp->ops->put(tp, fh); 1982 } 1983 1984 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1985 struct netlink_ext_ack *extack) 1986 { 1987 struct net *net = sock_net(skb->sk); 1988 struct nlattr *tca[TCA_MAX + 1]; 1989 char name[IFNAMSIZ]; 1990 struct tcmsg *t; 1991 u32 protocol; 1992 u32 prio; 1993 bool prio_allocate; 1994 u32 parent; 1995 u32 chain_index; 1996 struct Qdisc *q = NULL; 1997 struct tcf_chain_info chain_info; 1998 struct tcf_chain *chain = NULL; 1999 struct tcf_block *block; 2000 struct tcf_proto *tp; 2001 unsigned long cl; 2002 void *fh; 2003 int err; 2004 int tp_created; 2005 bool rtnl_held = false; 2006 2007 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2008 return -EPERM; 2009 2010 replay: 2011 tp_created = 0; 2012 2013 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2014 rtm_tca_policy, extack); 2015 if (err < 0) 2016 return err; 2017 2018 t = nlmsg_data(n); 2019 protocol = TC_H_MIN(t->tcm_info); 2020 prio = TC_H_MAJ(t->tcm_info); 2021 prio_allocate = false; 2022 parent = t->tcm_parent; 2023 tp = NULL; 2024 cl = 0; 2025 block = NULL; 2026 2027 if (prio == 0) { 2028 /* If no priority is provided by the user, 2029 * we allocate one. 2030 */ 2031 if (n->nlmsg_flags & NLM_F_CREATE) { 2032 prio = TC_H_MAKE(0x80000000U, 0U); 2033 prio_allocate = true; 2034 } else { 2035 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2036 return -ENOENT; 2037 } 2038 } 2039 2040 /* Find head of filter chain. */ 2041 2042 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2043 if (err) 2044 return err; 2045 2046 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2047 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2048 err = -EINVAL; 2049 goto errout; 2050 } 2051 2052 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2053 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2054 * type is not specified, classifier is not unlocked. 2055 */ 2056 if (rtnl_held || 2057 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2058 !tcf_proto_is_unlocked(name)) { 2059 rtnl_held = true; 2060 rtnl_lock(); 2061 } 2062 2063 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2064 if (err) 2065 goto errout; 2066 2067 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2068 extack); 2069 if (IS_ERR(block)) { 2070 err = PTR_ERR(block); 2071 goto errout; 2072 } 2073 2074 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2075 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2076 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2077 err = -EINVAL; 2078 goto errout; 2079 } 2080 chain = tcf_chain_get(block, chain_index, true); 2081 if (!chain) { 2082 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2083 err = -ENOMEM; 2084 goto errout; 2085 } 2086 2087 mutex_lock(&chain->filter_chain_lock); 2088 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2089 prio, prio_allocate); 2090 if (IS_ERR(tp)) { 2091 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2092 err = PTR_ERR(tp); 2093 goto errout_locked; 2094 } 2095 2096 if (tp == NULL) { 2097 struct tcf_proto *tp_new = NULL; 2098 2099 if (chain->flushing) { 2100 err = -EAGAIN; 2101 goto errout_locked; 2102 } 2103 2104 /* Proto-tcf does not exist, create new one */ 2105 2106 if (tca[TCA_KIND] == NULL || !protocol) { 2107 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2108 err = -EINVAL; 2109 goto errout_locked; 2110 } 2111 2112 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2113 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2114 err = -ENOENT; 2115 goto errout_locked; 2116 } 2117 2118 if (prio_allocate) 2119 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2120 &chain_info)); 2121 2122 mutex_unlock(&chain->filter_chain_lock); 2123 tp_new = tcf_proto_create(name, protocol, prio, chain, 2124 rtnl_held, extack); 2125 if (IS_ERR(tp_new)) { 2126 err = PTR_ERR(tp_new); 2127 goto errout_tp; 2128 } 2129 2130 tp_created = 1; 2131 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2132 rtnl_held); 2133 if (IS_ERR(tp)) { 2134 err = PTR_ERR(tp); 2135 goto errout_tp; 2136 } 2137 } else { 2138 mutex_unlock(&chain->filter_chain_lock); 2139 } 2140 2141 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2142 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2143 err = -EINVAL; 2144 goto errout; 2145 } 2146 2147 fh = tp->ops->get(tp, t->tcm_handle); 2148 2149 if (!fh) { 2150 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2151 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2152 err = -ENOENT; 2153 goto errout; 2154 } 2155 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2156 tfilter_put(tp, fh); 2157 NL_SET_ERR_MSG(extack, "Filter already exists"); 2158 err = -EEXIST; 2159 goto errout; 2160 } 2161 2162 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2163 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2164 err = -EINVAL; 2165 goto errout; 2166 } 2167 2168 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2169 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, 2170 rtnl_held, extack); 2171 if (err == 0) { 2172 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2173 RTM_NEWTFILTER, false, rtnl_held); 2174 tfilter_put(tp, fh); 2175 /* q pointer is NULL for shared blocks */ 2176 if (q) 2177 q->flags &= ~TCQ_F_CAN_BYPASS; 2178 } 2179 2180 errout: 2181 if (err && tp_created) 2182 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2183 errout_tp: 2184 if (chain) { 2185 if (tp && !IS_ERR(tp)) 2186 tcf_proto_put(tp, rtnl_held, NULL); 2187 if (!tp_created) 2188 tcf_chain_put(chain); 2189 } 2190 tcf_block_release(q, block, rtnl_held); 2191 2192 if (rtnl_held) 2193 rtnl_unlock(); 2194 2195 if (err == -EAGAIN) { 2196 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2197 * of target chain. 2198 */ 2199 rtnl_held = true; 2200 /* Replay the request. */ 2201 goto replay; 2202 } 2203 return err; 2204 2205 errout_locked: 2206 mutex_unlock(&chain->filter_chain_lock); 2207 goto errout; 2208 } 2209 2210 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2211 struct netlink_ext_ack *extack) 2212 { 2213 struct net *net = sock_net(skb->sk); 2214 struct nlattr *tca[TCA_MAX + 1]; 2215 char name[IFNAMSIZ]; 2216 struct tcmsg *t; 2217 u32 protocol; 2218 u32 prio; 2219 u32 parent; 2220 u32 chain_index; 2221 struct Qdisc *q = NULL; 2222 struct tcf_chain_info chain_info; 2223 struct tcf_chain *chain = NULL; 2224 struct tcf_block *block = NULL; 2225 struct tcf_proto *tp = NULL; 2226 unsigned long cl = 0; 2227 void *fh = NULL; 2228 int err; 2229 bool rtnl_held = false; 2230 2231 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2232 return -EPERM; 2233 2234 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2235 rtm_tca_policy, extack); 2236 if (err < 0) 2237 return err; 2238 2239 t = nlmsg_data(n); 2240 protocol = TC_H_MIN(t->tcm_info); 2241 prio = TC_H_MAJ(t->tcm_info); 2242 parent = t->tcm_parent; 2243 2244 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2245 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2246 return -ENOENT; 2247 } 2248 2249 /* Find head of filter chain. */ 2250 2251 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2252 if (err) 2253 return err; 2254 2255 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2256 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2257 err = -EINVAL; 2258 goto errout; 2259 } 2260 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2261 * found), qdisc is not unlocked, classifier type is not specified, 2262 * classifier is not unlocked. 2263 */ 2264 if (!prio || 2265 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2266 !tcf_proto_is_unlocked(name)) { 2267 rtnl_held = true; 2268 rtnl_lock(); 2269 } 2270 2271 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2272 if (err) 2273 goto errout; 2274 2275 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2276 extack); 2277 if (IS_ERR(block)) { 2278 err = PTR_ERR(block); 2279 goto errout; 2280 } 2281 2282 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2283 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2284 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2285 err = -EINVAL; 2286 goto errout; 2287 } 2288 chain = tcf_chain_get(block, chain_index, false); 2289 if (!chain) { 2290 /* User requested flush on non-existent chain. Nothing to do, 2291 * so just return success. 2292 */ 2293 if (prio == 0) { 2294 err = 0; 2295 goto errout; 2296 } 2297 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2298 err = -ENOENT; 2299 goto errout; 2300 } 2301 2302 if (prio == 0) { 2303 tfilter_notify_chain(net, skb, block, q, parent, n, 2304 chain, RTM_DELTFILTER, rtnl_held); 2305 tcf_chain_flush(chain, rtnl_held); 2306 err = 0; 2307 goto errout; 2308 } 2309 2310 mutex_lock(&chain->filter_chain_lock); 2311 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2312 prio, false); 2313 if (!tp || IS_ERR(tp)) { 2314 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2315 err = tp ? PTR_ERR(tp) : -ENOENT; 2316 goto errout_locked; 2317 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2318 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2319 err = -EINVAL; 2320 goto errout_locked; 2321 } else if (t->tcm_handle == 0) { 2322 tcf_proto_signal_destroying(chain, tp); 2323 tcf_chain_tp_remove(chain, &chain_info, tp); 2324 mutex_unlock(&chain->filter_chain_lock); 2325 2326 tcf_proto_put(tp, rtnl_held, NULL); 2327 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2328 RTM_DELTFILTER, false, rtnl_held); 2329 err = 0; 2330 goto errout; 2331 } 2332 mutex_unlock(&chain->filter_chain_lock); 2333 2334 fh = tp->ops->get(tp, t->tcm_handle); 2335 2336 if (!fh) { 2337 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2338 err = -ENOENT; 2339 } else { 2340 bool last; 2341 2342 err = tfilter_del_notify(net, skb, n, tp, block, 2343 q, parent, fh, false, &last, 2344 rtnl_held, extack); 2345 2346 if (err) 2347 goto errout; 2348 if (last) 2349 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2350 } 2351 2352 errout: 2353 if (chain) { 2354 if (tp && !IS_ERR(tp)) 2355 tcf_proto_put(tp, rtnl_held, NULL); 2356 tcf_chain_put(chain); 2357 } 2358 tcf_block_release(q, block, rtnl_held); 2359 2360 if (rtnl_held) 2361 rtnl_unlock(); 2362 2363 return err; 2364 2365 errout_locked: 2366 mutex_unlock(&chain->filter_chain_lock); 2367 goto errout; 2368 } 2369 2370 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2371 struct netlink_ext_ack *extack) 2372 { 2373 struct net *net = sock_net(skb->sk); 2374 struct nlattr *tca[TCA_MAX + 1]; 2375 char name[IFNAMSIZ]; 2376 struct tcmsg *t; 2377 u32 protocol; 2378 u32 prio; 2379 u32 parent; 2380 u32 chain_index; 2381 struct Qdisc *q = NULL; 2382 struct tcf_chain_info chain_info; 2383 struct tcf_chain *chain = NULL; 2384 struct tcf_block *block = NULL; 2385 struct tcf_proto *tp = NULL; 2386 unsigned long cl = 0; 2387 void *fh = NULL; 2388 int err; 2389 bool rtnl_held = false; 2390 2391 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2392 rtm_tca_policy, extack); 2393 if (err < 0) 2394 return err; 2395 2396 t = nlmsg_data(n); 2397 protocol = TC_H_MIN(t->tcm_info); 2398 prio = TC_H_MAJ(t->tcm_info); 2399 parent = t->tcm_parent; 2400 2401 if (prio == 0) { 2402 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2403 return -ENOENT; 2404 } 2405 2406 /* Find head of filter chain. */ 2407 2408 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2409 if (err) 2410 return err; 2411 2412 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2413 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2414 err = -EINVAL; 2415 goto errout; 2416 } 2417 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2418 * unlocked, classifier type is not specified, classifier is not 2419 * unlocked. 2420 */ 2421 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2422 !tcf_proto_is_unlocked(name)) { 2423 rtnl_held = true; 2424 rtnl_lock(); 2425 } 2426 2427 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2428 if (err) 2429 goto errout; 2430 2431 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2432 extack); 2433 if (IS_ERR(block)) { 2434 err = PTR_ERR(block); 2435 goto errout; 2436 } 2437 2438 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2439 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2440 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2441 err = -EINVAL; 2442 goto errout; 2443 } 2444 chain = tcf_chain_get(block, chain_index, false); 2445 if (!chain) { 2446 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2447 err = -EINVAL; 2448 goto errout; 2449 } 2450 2451 mutex_lock(&chain->filter_chain_lock); 2452 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2453 prio, false); 2454 mutex_unlock(&chain->filter_chain_lock); 2455 if (!tp || IS_ERR(tp)) { 2456 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2457 err = tp ? PTR_ERR(tp) : -ENOENT; 2458 goto errout; 2459 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2460 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2461 err = -EINVAL; 2462 goto errout; 2463 } 2464 2465 fh = tp->ops->get(tp, t->tcm_handle); 2466 2467 if (!fh) { 2468 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2469 err = -ENOENT; 2470 } else { 2471 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2472 fh, RTM_NEWTFILTER, true, rtnl_held); 2473 if (err < 0) 2474 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2475 } 2476 2477 tfilter_put(tp, fh); 2478 errout: 2479 if (chain) { 2480 if (tp && !IS_ERR(tp)) 2481 tcf_proto_put(tp, rtnl_held, NULL); 2482 tcf_chain_put(chain); 2483 } 2484 tcf_block_release(q, block, rtnl_held); 2485 2486 if (rtnl_held) 2487 rtnl_unlock(); 2488 2489 return err; 2490 } 2491 2492 struct tcf_dump_args { 2493 struct tcf_walker w; 2494 struct sk_buff *skb; 2495 struct netlink_callback *cb; 2496 struct tcf_block *block; 2497 struct Qdisc *q; 2498 u32 parent; 2499 }; 2500 2501 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2502 { 2503 struct tcf_dump_args *a = (void *)arg; 2504 struct net *net = sock_net(a->skb->sk); 2505 2506 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2507 n, NETLINK_CB(a->cb->skb).portid, 2508 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2509 RTM_NEWTFILTER, true); 2510 } 2511 2512 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2513 struct sk_buff *skb, struct netlink_callback *cb, 2514 long index_start, long *p_index) 2515 { 2516 struct net *net = sock_net(skb->sk); 2517 struct tcf_block *block = chain->block; 2518 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2519 struct tcf_proto *tp, *tp_prev; 2520 struct tcf_dump_args arg; 2521 2522 for (tp = __tcf_get_next_proto(chain, NULL); 2523 tp; 2524 tp_prev = tp, 2525 tp = __tcf_get_next_proto(chain, tp), 2526 tcf_proto_put(tp_prev, true, NULL), 2527 (*p_index)++) { 2528 if (*p_index < index_start) 2529 continue; 2530 if (TC_H_MAJ(tcm->tcm_info) && 2531 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2532 continue; 2533 if (TC_H_MIN(tcm->tcm_info) && 2534 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2535 continue; 2536 if (*p_index > index_start) 2537 memset(&cb->args[1], 0, 2538 sizeof(cb->args) - sizeof(cb->args[0])); 2539 if (cb->args[1] == 0) { 2540 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2541 NETLINK_CB(cb->skb).portid, 2542 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2543 RTM_NEWTFILTER, true) <= 0) 2544 goto errout; 2545 cb->args[1] = 1; 2546 } 2547 if (!tp->ops->walk) 2548 continue; 2549 arg.w.fn = tcf_node_dump; 2550 arg.skb = skb; 2551 arg.cb = cb; 2552 arg.block = block; 2553 arg.q = q; 2554 arg.parent = parent; 2555 arg.w.stop = 0; 2556 arg.w.skip = cb->args[1] - 1; 2557 arg.w.count = 0; 2558 arg.w.cookie = cb->args[2]; 2559 tp->ops->walk(tp, &arg.w, true); 2560 cb->args[2] = arg.w.cookie; 2561 cb->args[1] = arg.w.count + 1; 2562 if (arg.w.stop) 2563 goto errout; 2564 } 2565 return true; 2566 2567 errout: 2568 tcf_proto_put(tp, true, NULL); 2569 return false; 2570 } 2571 2572 /* called with RTNL */ 2573 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2574 { 2575 struct tcf_chain *chain, *chain_prev; 2576 struct net *net = sock_net(skb->sk); 2577 struct nlattr *tca[TCA_MAX + 1]; 2578 struct Qdisc *q = NULL; 2579 struct tcf_block *block; 2580 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2581 long index_start; 2582 long index; 2583 u32 parent; 2584 int err; 2585 2586 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2587 return skb->len; 2588 2589 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2590 NULL, cb->extack); 2591 if (err) 2592 return err; 2593 2594 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2595 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2596 if (!block) 2597 goto out; 2598 /* If we work with block index, q is NULL and parent value 2599 * will never be used in the following code. The check 2600 * in tcf_fill_node prevents it. However, compiler does not 2601 * see that far, so set parent to zero to silence the warning 2602 * about parent being uninitialized. 2603 */ 2604 parent = 0; 2605 } else { 2606 const struct Qdisc_class_ops *cops; 2607 struct net_device *dev; 2608 unsigned long cl = 0; 2609 2610 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2611 if (!dev) 2612 return skb->len; 2613 2614 parent = tcm->tcm_parent; 2615 if (!parent) { 2616 q = dev->qdisc; 2617 parent = q->handle; 2618 } else { 2619 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2620 } 2621 if (!q) 2622 goto out; 2623 cops = q->ops->cl_ops; 2624 if (!cops) 2625 goto out; 2626 if (!cops->tcf_block) 2627 goto out; 2628 if (TC_H_MIN(tcm->tcm_parent)) { 2629 cl = cops->find(q, tcm->tcm_parent); 2630 if (cl == 0) 2631 goto out; 2632 } 2633 block = cops->tcf_block(q, cl, NULL); 2634 if (!block) 2635 goto out; 2636 if (tcf_block_shared(block)) 2637 q = NULL; 2638 } 2639 2640 index_start = cb->args[0]; 2641 index = 0; 2642 2643 for (chain = __tcf_get_next_chain(block, NULL); 2644 chain; 2645 chain_prev = chain, 2646 chain = __tcf_get_next_chain(block, chain), 2647 tcf_chain_put(chain_prev)) { 2648 if (tca[TCA_CHAIN] && 2649 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2650 continue; 2651 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2652 index_start, &index)) { 2653 tcf_chain_put(chain); 2654 err = -EMSGSIZE; 2655 break; 2656 } 2657 } 2658 2659 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2660 tcf_block_refcnt_put(block, true); 2661 cb->args[0] = index; 2662 2663 out: 2664 /* If we did no progress, the error (EMSGSIZE) is real */ 2665 if (skb->len == 0 && err) 2666 return err; 2667 return skb->len; 2668 } 2669 2670 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2671 void *tmplt_priv, u32 chain_index, 2672 struct net *net, struct sk_buff *skb, 2673 struct tcf_block *block, 2674 u32 portid, u32 seq, u16 flags, int event) 2675 { 2676 unsigned char *b = skb_tail_pointer(skb); 2677 const struct tcf_proto_ops *ops; 2678 struct nlmsghdr *nlh; 2679 struct tcmsg *tcm; 2680 void *priv; 2681 2682 ops = tmplt_ops; 2683 priv = tmplt_priv; 2684 2685 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2686 if (!nlh) 2687 goto out_nlmsg_trim; 2688 tcm = nlmsg_data(nlh); 2689 tcm->tcm_family = AF_UNSPEC; 2690 tcm->tcm__pad1 = 0; 2691 tcm->tcm__pad2 = 0; 2692 tcm->tcm_handle = 0; 2693 if (block->q) { 2694 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2695 tcm->tcm_parent = block->q->handle; 2696 } else { 2697 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2698 tcm->tcm_block_index = block->index; 2699 } 2700 2701 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2702 goto nla_put_failure; 2703 2704 if (ops) { 2705 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2706 goto nla_put_failure; 2707 if (ops->tmplt_dump(skb, net, priv) < 0) 2708 goto nla_put_failure; 2709 } 2710 2711 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2712 return skb->len; 2713 2714 out_nlmsg_trim: 2715 nla_put_failure: 2716 nlmsg_trim(skb, b); 2717 return -EMSGSIZE; 2718 } 2719 2720 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2721 u32 seq, u16 flags, int event, bool unicast) 2722 { 2723 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2724 struct tcf_block *block = chain->block; 2725 struct net *net = block->net; 2726 struct sk_buff *skb; 2727 int err = 0; 2728 2729 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2730 if (!skb) 2731 return -ENOBUFS; 2732 2733 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2734 chain->index, net, skb, block, portid, 2735 seq, flags, event) <= 0) { 2736 kfree_skb(skb); 2737 return -EINVAL; 2738 } 2739 2740 if (unicast) 2741 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2742 else 2743 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2744 flags & NLM_F_ECHO); 2745 2746 if (err > 0) 2747 err = 0; 2748 return err; 2749 } 2750 2751 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2752 void *tmplt_priv, u32 chain_index, 2753 struct tcf_block *block, struct sk_buff *oskb, 2754 u32 seq, u16 flags, bool unicast) 2755 { 2756 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2757 struct net *net = block->net; 2758 struct sk_buff *skb; 2759 2760 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2761 if (!skb) 2762 return -ENOBUFS; 2763 2764 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2765 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { 2766 kfree_skb(skb); 2767 return -EINVAL; 2768 } 2769 2770 if (unicast) 2771 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2772 2773 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2774 } 2775 2776 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2777 struct nlattr **tca, 2778 struct netlink_ext_ack *extack) 2779 { 2780 const struct tcf_proto_ops *ops; 2781 char name[IFNAMSIZ]; 2782 void *tmplt_priv; 2783 2784 /* If kind is not set, user did not specify template. */ 2785 if (!tca[TCA_KIND]) 2786 return 0; 2787 2788 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2789 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2790 return -EINVAL; 2791 } 2792 2793 ops = tcf_proto_lookup_ops(name, true, extack); 2794 if (IS_ERR(ops)) 2795 return PTR_ERR(ops); 2796 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2797 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2798 return -EOPNOTSUPP; 2799 } 2800 2801 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2802 if (IS_ERR(tmplt_priv)) { 2803 module_put(ops->owner); 2804 return PTR_ERR(tmplt_priv); 2805 } 2806 chain->tmplt_ops = ops; 2807 chain->tmplt_priv = tmplt_priv; 2808 return 0; 2809 } 2810 2811 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2812 void *tmplt_priv) 2813 { 2814 /* If template ops are set, no work to do for us. */ 2815 if (!tmplt_ops) 2816 return; 2817 2818 tmplt_ops->tmplt_destroy(tmplt_priv); 2819 module_put(tmplt_ops->owner); 2820 } 2821 2822 /* Add/delete/get a chain */ 2823 2824 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2825 struct netlink_ext_ack *extack) 2826 { 2827 struct net *net = sock_net(skb->sk); 2828 struct nlattr *tca[TCA_MAX + 1]; 2829 struct tcmsg *t; 2830 u32 parent; 2831 u32 chain_index; 2832 struct Qdisc *q = NULL; 2833 struct tcf_chain *chain = NULL; 2834 struct tcf_block *block; 2835 unsigned long cl; 2836 int err; 2837 2838 if (n->nlmsg_type != RTM_GETCHAIN && 2839 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2840 return -EPERM; 2841 2842 replay: 2843 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2844 rtm_tca_policy, extack); 2845 if (err < 0) 2846 return err; 2847 2848 t = nlmsg_data(n); 2849 parent = t->tcm_parent; 2850 cl = 0; 2851 2852 block = tcf_block_find(net, &q, &parent, &cl, 2853 t->tcm_ifindex, t->tcm_block_index, extack); 2854 if (IS_ERR(block)) 2855 return PTR_ERR(block); 2856 2857 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2858 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2859 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2860 err = -EINVAL; 2861 goto errout_block; 2862 } 2863 2864 mutex_lock(&block->lock); 2865 chain = tcf_chain_lookup(block, chain_index); 2866 if (n->nlmsg_type == RTM_NEWCHAIN) { 2867 if (chain) { 2868 if (tcf_chain_held_by_acts_only(chain)) { 2869 /* The chain exists only because there is 2870 * some action referencing it. 2871 */ 2872 tcf_chain_hold(chain); 2873 } else { 2874 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 2875 err = -EEXIST; 2876 goto errout_block_locked; 2877 } 2878 } else { 2879 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2880 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 2881 err = -ENOENT; 2882 goto errout_block_locked; 2883 } 2884 chain = tcf_chain_create(block, chain_index); 2885 if (!chain) { 2886 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 2887 err = -ENOMEM; 2888 goto errout_block_locked; 2889 } 2890 } 2891 } else { 2892 if (!chain || tcf_chain_held_by_acts_only(chain)) { 2893 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2894 err = -EINVAL; 2895 goto errout_block_locked; 2896 } 2897 tcf_chain_hold(chain); 2898 } 2899 2900 if (n->nlmsg_type == RTM_NEWCHAIN) { 2901 /* Modifying chain requires holding parent block lock. In case 2902 * the chain was successfully added, take a reference to the 2903 * chain. This ensures that an empty chain does not disappear at 2904 * the end of this function. 2905 */ 2906 tcf_chain_hold(chain); 2907 chain->explicitly_created = true; 2908 } 2909 mutex_unlock(&block->lock); 2910 2911 switch (n->nlmsg_type) { 2912 case RTM_NEWCHAIN: 2913 err = tc_chain_tmplt_add(chain, net, tca, extack); 2914 if (err) { 2915 tcf_chain_put_explicitly_created(chain); 2916 goto errout; 2917 } 2918 2919 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 2920 RTM_NEWCHAIN, false); 2921 break; 2922 case RTM_DELCHAIN: 2923 tfilter_notify_chain(net, skb, block, q, parent, n, 2924 chain, RTM_DELTFILTER, true); 2925 /* Flush the chain first as the user requested chain removal. */ 2926 tcf_chain_flush(chain, true); 2927 /* In case the chain was successfully deleted, put a reference 2928 * to the chain previously taken during addition. 2929 */ 2930 tcf_chain_put_explicitly_created(chain); 2931 break; 2932 case RTM_GETCHAIN: 2933 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 2934 n->nlmsg_seq, n->nlmsg_type, true); 2935 if (err < 0) 2936 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 2937 break; 2938 default: 2939 err = -EOPNOTSUPP; 2940 NL_SET_ERR_MSG(extack, "Unsupported message type"); 2941 goto errout; 2942 } 2943 2944 errout: 2945 tcf_chain_put(chain); 2946 errout_block: 2947 tcf_block_release(q, block, true); 2948 if (err == -EAGAIN) 2949 /* Replay the request. */ 2950 goto replay; 2951 return err; 2952 2953 errout_block_locked: 2954 mutex_unlock(&block->lock); 2955 goto errout_block; 2956 } 2957 2958 /* called with RTNL */ 2959 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 2960 { 2961 struct net *net = sock_net(skb->sk); 2962 struct nlattr *tca[TCA_MAX + 1]; 2963 struct Qdisc *q = NULL; 2964 struct tcf_block *block; 2965 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2966 struct tcf_chain *chain; 2967 long index_start; 2968 long index; 2969 u32 parent; 2970 int err; 2971 2972 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2973 return skb->len; 2974 2975 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2976 rtm_tca_policy, cb->extack); 2977 if (err) 2978 return err; 2979 2980 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2981 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2982 if (!block) 2983 goto out; 2984 /* If we work with block index, q is NULL and parent value 2985 * will never be used in the following code. The check 2986 * in tcf_fill_node prevents it. However, compiler does not 2987 * see that far, so set parent to zero to silence the warning 2988 * about parent being uninitialized. 2989 */ 2990 parent = 0; 2991 } else { 2992 const struct Qdisc_class_ops *cops; 2993 struct net_device *dev; 2994 unsigned long cl = 0; 2995 2996 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2997 if (!dev) 2998 return skb->len; 2999 3000 parent = tcm->tcm_parent; 3001 if (!parent) { 3002 q = dev->qdisc; 3003 parent = q->handle; 3004 } else { 3005 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 3006 } 3007 if (!q) 3008 goto out; 3009 cops = q->ops->cl_ops; 3010 if (!cops) 3011 goto out; 3012 if (!cops->tcf_block) 3013 goto out; 3014 if (TC_H_MIN(tcm->tcm_parent)) { 3015 cl = cops->find(q, tcm->tcm_parent); 3016 if (cl == 0) 3017 goto out; 3018 } 3019 block = cops->tcf_block(q, cl, NULL); 3020 if (!block) 3021 goto out; 3022 if (tcf_block_shared(block)) 3023 q = NULL; 3024 } 3025 3026 index_start = cb->args[0]; 3027 index = 0; 3028 3029 mutex_lock(&block->lock); 3030 list_for_each_entry(chain, &block->chain_list, list) { 3031 if ((tca[TCA_CHAIN] && 3032 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 3033 continue; 3034 if (index < index_start) { 3035 index++; 3036 continue; 3037 } 3038 if (tcf_chain_held_by_acts_only(chain)) 3039 continue; 3040 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3041 chain->index, net, skb, block, 3042 NETLINK_CB(cb->skb).portid, 3043 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3044 RTM_NEWCHAIN); 3045 if (err <= 0) 3046 break; 3047 index++; 3048 } 3049 mutex_unlock(&block->lock); 3050 3051 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3052 tcf_block_refcnt_put(block, true); 3053 cb->args[0] = index; 3054 3055 out: 3056 /* If we did no progress, the error (EMSGSIZE) is real */ 3057 if (skb->len == 0 && err) 3058 return err; 3059 return skb->len; 3060 } 3061 3062 void tcf_exts_destroy(struct tcf_exts *exts) 3063 { 3064 #ifdef CONFIG_NET_CLS_ACT 3065 if (exts->actions) { 3066 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3067 kfree(exts->actions); 3068 } 3069 exts->nr_actions = 0; 3070 #endif 3071 } 3072 EXPORT_SYMBOL(tcf_exts_destroy); 3073 3074 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3075 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr, 3076 bool rtnl_held, struct netlink_ext_ack *extack) 3077 { 3078 #ifdef CONFIG_NET_CLS_ACT 3079 { 3080 struct tc_action *act; 3081 size_t attr_size = 0; 3082 3083 if (exts->police && tb[exts->police]) { 3084 act = tcf_action_init_1(net, tp, tb[exts->police], 3085 rate_tlv, "police", ovr, 3086 TCA_ACT_BIND, rtnl_held, 3087 extack); 3088 if (IS_ERR(act)) 3089 return PTR_ERR(act); 3090 3091 act->type = exts->type = TCA_OLD_COMPAT; 3092 exts->actions[0] = act; 3093 exts->nr_actions = 1; 3094 } else if (exts->action && tb[exts->action]) { 3095 int err; 3096 3097 err = tcf_action_init(net, tp, tb[exts->action], 3098 rate_tlv, NULL, ovr, TCA_ACT_BIND, 3099 exts->actions, &attr_size, 3100 rtnl_held, extack); 3101 if (err < 0) 3102 return err; 3103 exts->nr_actions = err; 3104 } 3105 } 3106 #else 3107 if ((exts->action && tb[exts->action]) || 3108 (exts->police && tb[exts->police])) { 3109 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3110 return -EOPNOTSUPP; 3111 } 3112 #endif 3113 3114 return 0; 3115 } 3116 EXPORT_SYMBOL(tcf_exts_validate); 3117 3118 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3119 { 3120 #ifdef CONFIG_NET_CLS_ACT 3121 struct tcf_exts old = *dst; 3122 3123 *dst = *src; 3124 tcf_exts_destroy(&old); 3125 #endif 3126 } 3127 EXPORT_SYMBOL(tcf_exts_change); 3128 3129 #ifdef CONFIG_NET_CLS_ACT 3130 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3131 { 3132 if (exts->nr_actions == 0) 3133 return NULL; 3134 else 3135 return exts->actions[0]; 3136 } 3137 #endif 3138 3139 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3140 { 3141 #ifdef CONFIG_NET_CLS_ACT 3142 struct nlattr *nest; 3143 3144 if (exts->action && tcf_exts_has_actions(exts)) { 3145 /* 3146 * again for backward compatible mode - we want 3147 * to work with both old and new modes of entering 3148 * tc data even if iproute2 was newer - jhs 3149 */ 3150 if (exts->type != TCA_OLD_COMPAT) { 3151 nest = nla_nest_start_noflag(skb, exts->action); 3152 if (nest == NULL) 3153 goto nla_put_failure; 3154 3155 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) 3156 goto nla_put_failure; 3157 nla_nest_end(skb, nest); 3158 } else if (exts->police) { 3159 struct tc_action *act = tcf_exts_first_act(exts); 3160 nest = nla_nest_start_noflag(skb, exts->police); 3161 if (nest == NULL || !act) 3162 goto nla_put_failure; 3163 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3164 goto nla_put_failure; 3165 nla_nest_end(skb, nest); 3166 } 3167 } 3168 return 0; 3169 3170 nla_put_failure: 3171 nla_nest_cancel(skb, nest); 3172 return -1; 3173 #else 3174 return 0; 3175 #endif 3176 } 3177 EXPORT_SYMBOL(tcf_exts_dump); 3178 3179 3180 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3181 { 3182 #ifdef CONFIG_NET_CLS_ACT 3183 struct tc_action *a = tcf_exts_first_act(exts); 3184 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3185 return -1; 3186 #endif 3187 return 0; 3188 } 3189 EXPORT_SYMBOL(tcf_exts_dump_stats); 3190 3191 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3192 { 3193 if (*flags & TCA_CLS_FLAGS_IN_HW) 3194 return; 3195 *flags |= TCA_CLS_FLAGS_IN_HW; 3196 atomic_inc(&block->offloadcnt); 3197 } 3198 3199 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3200 { 3201 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3202 return; 3203 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3204 atomic_dec(&block->offloadcnt); 3205 } 3206 3207 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3208 struct tcf_proto *tp, u32 *cnt, 3209 u32 *flags, u32 diff, bool add) 3210 { 3211 lockdep_assert_held(&block->cb_lock); 3212 3213 spin_lock(&tp->lock); 3214 if (add) { 3215 if (!*cnt) 3216 tcf_block_offload_inc(block, flags); 3217 *cnt += diff; 3218 } else { 3219 *cnt -= diff; 3220 if (!*cnt) 3221 tcf_block_offload_dec(block, flags); 3222 } 3223 spin_unlock(&tp->lock); 3224 } 3225 3226 static void 3227 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3228 u32 *cnt, u32 *flags) 3229 { 3230 lockdep_assert_held(&block->cb_lock); 3231 3232 spin_lock(&tp->lock); 3233 tcf_block_offload_dec(block, flags); 3234 *cnt = 0; 3235 spin_unlock(&tp->lock); 3236 } 3237 3238 static int 3239 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3240 void *type_data, bool err_stop) 3241 { 3242 struct flow_block_cb *block_cb; 3243 int ok_count = 0; 3244 int err; 3245 3246 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3247 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3248 if (err) { 3249 if (err_stop) 3250 return err; 3251 } else { 3252 ok_count++; 3253 } 3254 } 3255 return ok_count; 3256 } 3257 3258 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3259 void *type_data, bool err_stop, bool rtnl_held) 3260 { 3261 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3262 int ok_count; 3263 3264 retry: 3265 if (take_rtnl) 3266 rtnl_lock(); 3267 down_read(&block->cb_lock); 3268 /* Need to obtain rtnl lock if block is bound to devs that require it. 3269 * In block bind code cb_lock is obtained while holding rtnl, so we must 3270 * obtain the locks in same order here. 3271 */ 3272 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3273 up_read(&block->cb_lock); 3274 take_rtnl = true; 3275 goto retry; 3276 } 3277 3278 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3279 3280 up_read(&block->cb_lock); 3281 if (take_rtnl) 3282 rtnl_unlock(); 3283 return ok_count; 3284 } 3285 EXPORT_SYMBOL(tc_setup_cb_call); 3286 3287 /* Non-destructive filter add. If filter that wasn't already in hardware is 3288 * successfully offloaded, increment block offloads counter. On failure, 3289 * previously offloaded filter is considered to be intact and offloads counter 3290 * is not decremented. 3291 */ 3292 3293 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3294 enum tc_setup_type type, void *type_data, bool err_stop, 3295 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3296 { 3297 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3298 int ok_count; 3299 3300 retry: 3301 if (take_rtnl) 3302 rtnl_lock(); 3303 down_read(&block->cb_lock); 3304 /* Need to obtain rtnl lock if block is bound to devs that require it. 3305 * In block bind code cb_lock is obtained while holding rtnl, so we must 3306 * obtain the locks in same order here. 3307 */ 3308 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3309 up_read(&block->cb_lock); 3310 take_rtnl = true; 3311 goto retry; 3312 } 3313 3314 /* Make sure all netdevs sharing this block are offload-capable. */ 3315 if (block->nooffloaddevcnt && err_stop) { 3316 ok_count = -EOPNOTSUPP; 3317 goto err_unlock; 3318 } 3319 3320 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3321 if (ok_count < 0) 3322 goto err_unlock; 3323 3324 if (tp->ops->hw_add) 3325 tp->ops->hw_add(tp, type_data); 3326 if (ok_count > 0) 3327 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3328 ok_count, true); 3329 err_unlock: 3330 up_read(&block->cb_lock); 3331 if (take_rtnl) 3332 rtnl_unlock(); 3333 return ok_count < 0 ? ok_count : 0; 3334 } 3335 EXPORT_SYMBOL(tc_setup_cb_add); 3336 3337 /* Destructive filter replace. If filter that wasn't already in hardware is 3338 * successfully offloaded, increment block offload counter. On failure, 3339 * previously offloaded filter is considered to be destroyed and offload counter 3340 * is decremented. 3341 */ 3342 3343 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3344 enum tc_setup_type type, void *type_data, bool err_stop, 3345 u32 *old_flags, unsigned int *old_in_hw_count, 3346 u32 *new_flags, unsigned int *new_in_hw_count, 3347 bool rtnl_held) 3348 { 3349 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3350 int ok_count; 3351 3352 retry: 3353 if (take_rtnl) 3354 rtnl_lock(); 3355 down_read(&block->cb_lock); 3356 /* Need to obtain rtnl lock if block is bound to devs that require it. 3357 * In block bind code cb_lock is obtained while holding rtnl, so we must 3358 * obtain the locks in same order here. 3359 */ 3360 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3361 up_read(&block->cb_lock); 3362 take_rtnl = true; 3363 goto retry; 3364 } 3365 3366 /* Make sure all netdevs sharing this block are offload-capable. */ 3367 if (block->nooffloaddevcnt && err_stop) { 3368 ok_count = -EOPNOTSUPP; 3369 goto err_unlock; 3370 } 3371 3372 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3373 if (tp->ops->hw_del) 3374 tp->ops->hw_del(tp, type_data); 3375 3376 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3377 if (ok_count < 0) 3378 goto err_unlock; 3379 3380 if (tp->ops->hw_add) 3381 tp->ops->hw_add(tp, type_data); 3382 if (ok_count > 0) 3383 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3384 new_flags, ok_count, true); 3385 err_unlock: 3386 up_read(&block->cb_lock); 3387 if (take_rtnl) 3388 rtnl_unlock(); 3389 return ok_count < 0 ? ok_count : 0; 3390 } 3391 EXPORT_SYMBOL(tc_setup_cb_replace); 3392 3393 /* Destroy filter and decrement block offload counter, if filter was previously 3394 * offloaded. 3395 */ 3396 3397 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3398 enum tc_setup_type type, void *type_data, bool err_stop, 3399 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3400 { 3401 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3402 int ok_count; 3403 3404 retry: 3405 if (take_rtnl) 3406 rtnl_lock(); 3407 down_read(&block->cb_lock); 3408 /* Need to obtain rtnl lock if block is bound to devs that require it. 3409 * In block bind code cb_lock is obtained while holding rtnl, so we must 3410 * obtain the locks in same order here. 3411 */ 3412 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3413 up_read(&block->cb_lock); 3414 take_rtnl = true; 3415 goto retry; 3416 } 3417 3418 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3419 3420 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3421 if (tp->ops->hw_del) 3422 tp->ops->hw_del(tp, type_data); 3423 3424 up_read(&block->cb_lock); 3425 if (take_rtnl) 3426 rtnl_unlock(); 3427 return ok_count < 0 ? ok_count : 0; 3428 } 3429 EXPORT_SYMBOL(tc_setup_cb_destroy); 3430 3431 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3432 bool add, flow_setup_cb_t *cb, 3433 enum tc_setup_type type, void *type_data, 3434 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3435 { 3436 int err = cb(type, type_data, cb_priv); 3437 3438 if (err) { 3439 if (add && tc_skip_sw(*flags)) 3440 return err; 3441 } else { 3442 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3443 add); 3444 } 3445 3446 return 0; 3447 } 3448 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3449 3450 static int tcf_act_get_cookie(struct flow_action_entry *entry, 3451 const struct tc_action *act) 3452 { 3453 struct tc_cookie *cookie; 3454 int err = 0; 3455 3456 rcu_read_lock(); 3457 cookie = rcu_dereference(act->act_cookie); 3458 if (cookie) { 3459 entry->cookie = flow_action_cookie_create(cookie->data, 3460 cookie->len, 3461 GFP_ATOMIC); 3462 if (!entry->cookie) 3463 err = -ENOMEM; 3464 } 3465 rcu_read_unlock(); 3466 return err; 3467 } 3468 3469 static void tcf_act_put_cookie(struct flow_action_entry *entry) 3470 { 3471 flow_action_cookie_destroy(entry->cookie); 3472 } 3473 3474 void tc_cleanup_flow_action(struct flow_action *flow_action) 3475 { 3476 struct flow_action_entry *entry; 3477 int i; 3478 3479 flow_action_for_each(i, entry, flow_action) { 3480 tcf_act_put_cookie(entry); 3481 if (entry->destructor) 3482 entry->destructor(entry->destructor_priv); 3483 } 3484 } 3485 EXPORT_SYMBOL(tc_cleanup_flow_action); 3486 3487 static void tcf_mirred_get_dev(struct flow_action_entry *entry, 3488 const struct tc_action *act) 3489 { 3490 #ifdef CONFIG_NET_CLS_ACT 3491 entry->dev = act->ops->get_dev(act, &entry->destructor); 3492 if (!entry->dev) 3493 return; 3494 entry->destructor_priv = entry->dev; 3495 #endif 3496 } 3497 3498 static void tcf_tunnel_encap_put_tunnel(void *priv) 3499 { 3500 struct ip_tunnel_info *tunnel = priv; 3501 3502 kfree(tunnel); 3503 } 3504 3505 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, 3506 const struct tc_action *act) 3507 { 3508 entry->tunnel = tcf_tunnel_info_copy(act); 3509 if (!entry->tunnel) 3510 return -ENOMEM; 3511 entry->destructor = tcf_tunnel_encap_put_tunnel; 3512 entry->destructor_priv = entry->tunnel; 3513 return 0; 3514 } 3515 3516 static void tcf_sample_get_group(struct flow_action_entry *entry, 3517 const struct tc_action *act) 3518 { 3519 #ifdef CONFIG_NET_CLS_ACT 3520 entry->sample.psample_group = 3521 act->ops->get_psample_group(act, &entry->destructor); 3522 entry->destructor_priv = entry->sample.psample_group; 3523 #endif 3524 } 3525 3526 int tc_setup_flow_action(struct flow_action *flow_action, 3527 const struct tcf_exts *exts) 3528 { 3529 struct tc_action *act; 3530 int i, j, k, err = 0; 3531 3532 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3533 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3534 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3535 3536 if (!exts) 3537 return 0; 3538 3539 j = 0; 3540 tcf_exts_for_each_action(i, act, exts) { 3541 struct flow_action_entry *entry; 3542 3543 entry = &flow_action->entries[j]; 3544 spin_lock_bh(&act->tcfa_lock); 3545 err = tcf_act_get_cookie(entry, act); 3546 if (err) 3547 goto err_out_locked; 3548 3549 entry->hw_stats = act->hw_stats; 3550 3551 if (is_tcf_gact_ok(act)) { 3552 entry->id = FLOW_ACTION_ACCEPT; 3553 } else if (is_tcf_gact_shot(act)) { 3554 entry->id = FLOW_ACTION_DROP; 3555 } else if (is_tcf_gact_trap(act)) { 3556 entry->id = FLOW_ACTION_TRAP; 3557 } else if (is_tcf_gact_goto_chain(act)) { 3558 entry->id = FLOW_ACTION_GOTO; 3559 entry->chain_index = tcf_gact_goto_chain_index(act); 3560 } else if (is_tcf_mirred_egress_redirect(act)) { 3561 entry->id = FLOW_ACTION_REDIRECT; 3562 tcf_mirred_get_dev(entry, act); 3563 } else if (is_tcf_mirred_egress_mirror(act)) { 3564 entry->id = FLOW_ACTION_MIRRED; 3565 tcf_mirred_get_dev(entry, act); 3566 } else if (is_tcf_mirred_ingress_redirect(act)) { 3567 entry->id = FLOW_ACTION_REDIRECT_INGRESS; 3568 tcf_mirred_get_dev(entry, act); 3569 } else if (is_tcf_mirred_ingress_mirror(act)) { 3570 entry->id = FLOW_ACTION_MIRRED_INGRESS; 3571 tcf_mirred_get_dev(entry, act); 3572 } else if (is_tcf_vlan(act)) { 3573 switch (tcf_vlan_action(act)) { 3574 case TCA_VLAN_ACT_PUSH: 3575 entry->id = FLOW_ACTION_VLAN_PUSH; 3576 entry->vlan.vid = tcf_vlan_push_vid(act); 3577 entry->vlan.proto = tcf_vlan_push_proto(act); 3578 entry->vlan.prio = tcf_vlan_push_prio(act); 3579 break; 3580 case TCA_VLAN_ACT_POP: 3581 entry->id = FLOW_ACTION_VLAN_POP; 3582 break; 3583 case TCA_VLAN_ACT_MODIFY: 3584 entry->id = FLOW_ACTION_VLAN_MANGLE; 3585 entry->vlan.vid = tcf_vlan_push_vid(act); 3586 entry->vlan.proto = tcf_vlan_push_proto(act); 3587 entry->vlan.prio = tcf_vlan_push_prio(act); 3588 break; 3589 default: 3590 err = -EOPNOTSUPP; 3591 goto err_out_locked; 3592 } 3593 } else if (is_tcf_tunnel_set(act)) { 3594 entry->id = FLOW_ACTION_TUNNEL_ENCAP; 3595 err = tcf_tunnel_encap_get_tunnel(entry, act); 3596 if (err) 3597 goto err_out_locked; 3598 } else if (is_tcf_tunnel_release(act)) { 3599 entry->id = FLOW_ACTION_TUNNEL_DECAP; 3600 } else if (is_tcf_pedit(act)) { 3601 for (k = 0; k < tcf_pedit_nkeys(act); k++) { 3602 switch (tcf_pedit_cmd(act, k)) { 3603 case TCA_PEDIT_KEY_EX_CMD_SET: 3604 entry->id = FLOW_ACTION_MANGLE; 3605 break; 3606 case TCA_PEDIT_KEY_EX_CMD_ADD: 3607 entry->id = FLOW_ACTION_ADD; 3608 break; 3609 default: 3610 err = -EOPNOTSUPP; 3611 goto err_out_locked; 3612 } 3613 entry->mangle.htype = tcf_pedit_htype(act, k); 3614 entry->mangle.mask = tcf_pedit_mask(act, k); 3615 entry->mangle.val = tcf_pedit_val(act, k); 3616 entry->mangle.offset = tcf_pedit_offset(act, k); 3617 entry->hw_stats = act->hw_stats; 3618 entry = &flow_action->entries[++j]; 3619 } 3620 } else if (is_tcf_csum(act)) { 3621 entry->id = FLOW_ACTION_CSUM; 3622 entry->csum_flags = tcf_csum_update_flags(act); 3623 } else if (is_tcf_skbedit_mark(act)) { 3624 entry->id = FLOW_ACTION_MARK; 3625 entry->mark = tcf_skbedit_mark(act); 3626 } else if (is_tcf_sample(act)) { 3627 entry->id = FLOW_ACTION_SAMPLE; 3628 entry->sample.trunc_size = tcf_sample_trunc_size(act); 3629 entry->sample.truncate = tcf_sample_truncate(act); 3630 entry->sample.rate = tcf_sample_rate(act); 3631 tcf_sample_get_group(entry, act); 3632 } else if (is_tcf_police(act)) { 3633 entry->id = FLOW_ACTION_POLICE; 3634 entry->police.burst = tcf_police_tcfp_burst(act); 3635 entry->police.rate_bytes_ps = 3636 tcf_police_rate_bytes_ps(act); 3637 } else if (is_tcf_ct(act)) { 3638 entry->id = FLOW_ACTION_CT; 3639 entry->ct.action = tcf_ct_action(act); 3640 entry->ct.zone = tcf_ct_zone(act); 3641 entry->ct.flow_table = tcf_ct_ft(act); 3642 } else if (is_tcf_mpls(act)) { 3643 switch (tcf_mpls_action(act)) { 3644 case TCA_MPLS_ACT_PUSH: 3645 entry->id = FLOW_ACTION_MPLS_PUSH; 3646 entry->mpls_push.proto = tcf_mpls_proto(act); 3647 entry->mpls_push.label = tcf_mpls_label(act); 3648 entry->mpls_push.tc = tcf_mpls_tc(act); 3649 entry->mpls_push.bos = tcf_mpls_bos(act); 3650 entry->mpls_push.ttl = tcf_mpls_ttl(act); 3651 break; 3652 case TCA_MPLS_ACT_POP: 3653 entry->id = FLOW_ACTION_MPLS_POP; 3654 entry->mpls_pop.proto = tcf_mpls_proto(act); 3655 break; 3656 case TCA_MPLS_ACT_MODIFY: 3657 entry->id = FLOW_ACTION_MPLS_MANGLE; 3658 entry->mpls_mangle.label = tcf_mpls_label(act); 3659 entry->mpls_mangle.tc = tcf_mpls_tc(act); 3660 entry->mpls_mangle.bos = tcf_mpls_bos(act); 3661 entry->mpls_mangle.ttl = tcf_mpls_ttl(act); 3662 break; 3663 default: 3664 goto err_out_locked; 3665 } 3666 } else if (is_tcf_skbedit_ptype(act)) { 3667 entry->id = FLOW_ACTION_PTYPE; 3668 entry->ptype = tcf_skbedit_ptype(act); 3669 } else if (is_tcf_skbedit_priority(act)) { 3670 entry->id = FLOW_ACTION_PRIORITY; 3671 entry->priority = tcf_skbedit_priority(act); 3672 } else { 3673 err = -EOPNOTSUPP; 3674 goto err_out_locked; 3675 } 3676 spin_unlock_bh(&act->tcfa_lock); 3677 3678 if (!is_tcf_pedit(act)) 3679 j++; 3680 } 3681 3682 err_out: 3683 if (err) 3684 tc_cleanup_flow_action(flow_action); 3685 3686 return err; 3687 err_out_locked: 3688 spin_unlock_bh(&act->tcfa_lock); 3689 goto err_out; 3690 } 3691 EXPORT_SYMBOL(tc_setup_flow_action); 3692 3693 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3694 { 3695 unsigned int num_acts = 0; 3696 struct tc_action *act; 3697 int i; 3698 3699 tcf_exts_for_each_action(i, act, exts) { 3700 if (is_tcf_pedit(act)) 3701 num_acts += tcf_pedit_nkeys(act); 3702 else 3703 num_acts++; 3704 } 3705 return num_acts; 3706 } 3707 EXPORT_SYMBOL(tcf_exts_num_actions); 3708 3709 static __net_init int tcf_net_init(struct net *net) 3710 { 3711 struct tcf_net *tn = net_generic(net, tcf_net_id); 3712 3713 spin_lock_init(&tn->idr_lock); 3714 idr_init(&tn->idr); 3715 return 0; 3716 } 3717 3718 static void __net_exit tcf_net_exit(struct net *net) 3719 { 3720 struct tcf_net *tn = net_generic(net, tcf_net_id); 3721 3722 idr_destroy(&tn->idr); 3723 } 3724 3725 static struct pernet_operations tcf_net_ops = { 3726 .init = tcf_net_init, 3727 .exit = tcf_net_exit, 3728 .id = &tcf_net_id, 3729 .size = sizeof(struct tcf_net), 3730 }; 3731 3732 static struct flow_indr_block_entry block_entry = { 3733 .cb = tc_indr_block_get_and_cmd, 3734 .list = LIST_HEAD_INIT(block_entry.list), 3735 }; 3736 3737 static int __init tc_filter_init(void) 3738 { 3739 int err; 3740 3741 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3742 if (!tc_filter_wq) 3743 return -ENOMEM; 3744 3745 err = register_pernet_subsys(&tcf_net_ops); 3746 if (err) 3747 goto err_register_pernet_subsys; 3748 3749 flow_indr_add_block_cb(&block_entry); 3750 3751 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3752 RTNL_FLAG_DOIT_UNLOCKED); 3753 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3754 RTNL_FLAG_DOIT_UNLOCKED); 3755 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3756 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3757 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3758 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3759 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3760 tc_dump_chain, 0); 3761 3762 return 0; 3763 3764 err_register_pernet_subsys: 3765 destroy_workqueue(tc_filter_wq); 3766 return err; 3767 } 3768 3769 subsys_initcall(tc_filter_init); 3770