1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/act_api.c Packet action API. 4 * 5 * Author: Jamal Hadi Salim 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/string.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <linux/kmod.h> 16 #include <linux/err.h> 17 #include <linux/module.h> 18 #include <net/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/act_api.h> 23 #include <net/netlink.h> 24 25 #ifdef CONFIG_INET 26 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count); 27 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count); 28 #endif 29 30 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)) 31 { 32 #ifdef CONFIG_INET 33 if (static_branch_unlikely(&tcf_frag_xmit_count)) 34 return sch_frag_xmit_hook(skb, xmit); 35 #endif 36 37 return xmit(skb); 38 } 39 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit); 40 41 static void tcf_action_goto_chain_exec(const struct tc_action *a, 42 struct tcf_result *res) 43 { 44 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain); 45 46 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 47 } 48 49 static void tcf_free_cookie_rcu(struct rcu_head *p) 50 { 51 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu); 52 53 kfree(cookie->data); 54 kfree(cookie); 55 } 56 57 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, 58 struct tc_cookie *new_cookie) 59 { 60 struct tc_cookie *old; 61 62 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); 63 if (old) 64 call_rcu(&old->rcu, tcf_free_cookie_rcu); 65 } 66 67 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, 68 struct tcf_chain **newchain, 69 struct netlink_ext_ack *extack) 70 { 71 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL; 72 u32 chain_index; 73 74 if (!opcode) 75 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0; 76 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC) 77 ret = 0; 78 if (ret) { 79 NL_SET_ERR_MSG(extack, "invalid control action"); 80 goto end; 81 } 82 83 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) { 84 chain_index = action & TC_ACT_EXT_VAL_MASK; 85 if (!tp || !newchain) { 86 ret = -EINVAL; 87 NL_SET_ERR_MSG(extack, 88 "can't goto NULL proto/chain"); 89 goto end; 90 } 91 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index); 92 if (!*newchain) { 93 ret = -ENOMEM; 94 NL_SET_ERR_MSG(extack, 95 "can't allocate goto_chain"); 96 } 97 } 98 end: 99 return ret; 100 } 101 EXPORT_SYMBOL(tcf_action_check_ctrlact); 102 103 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, 104 struct tcf_chain *goto_chain) 105 { 106 a->tcfa_action = action; 107 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1); 108 return goto_chain; 109 } 110 EXPORT_SYMBOL(tcf_action_set_ctrlact); 111 112 /* XXX: For standalone actions, we don't need a RCU grace period either, because 113 * actions are always connected to filters and filters are already destroyed in 114 * RCU callbacks, so after a RCU grace period actions are already disconnected 115 * from filters. Readers later can not find us. 116 */ 117 static void free_tcf(struct tc_action *p) 118 { 119 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1); 120 121 free_percpu(p->cpu_bstats); 122 free_percpu(p->cpu_bstats_hw); 123 free_percpu(p->cpu_qstats); 124 125 tcf_set_action_cookie(&p->act_cookie, NULL); 126 if (chain) 127 tcf_chain_put_by_act(chain); 128 129 kfree(p); 130 } 131 132 static void tcf_action_cleanup(struct tc_action *p) 133 { 134 if (p->ops->cleanup) 135 p->ops->cleanup(p); 136 137 gen_kill_estimator(&p->tcfa_rate_est); 138 free_tcf(p); 139 } 140 141 static int __tcf_action_put(struct tc_action *p, bool bind) 142 { 143 struct tcf_idrinfo *idrinfo = p->idrinfo; 144 145 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) { 146 if (bind) 147 atomic_dec(&p->tcfa_bindcnt); 148 idr_remove(&idrinfo->action_idr, p->tcfa_index); 149 mutex_unlock(&idrinfo->lock); 150 151 tcf_action_cleanup(p); 152 return 1; 153 } 154 155 if (bind) 156 atomic_dec(&p->tcfa_bindcnt); 157 158 return 0; 159 } 160 161 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 162 { 163 int ret = 0; 164 165 /* Release with strict==1 and bind==0 is only called through act API 166 * interface (classifiers always bind). Only case when action with 167 * positive reference count and zero bind count can exist is when it was 168 * also created with act API (unbinding last classifier will destroy the 169 * action if it was created by classifier). So only case when bind count 170 * can be changed after initial check is when unbound action is 171 * destroyed by act API while classifier binds to action with same id 172 * concurrently. This result either creation of new action(same behavior 173 * as before), or reusing existing action if concurrent process 174 * increments reference count before action is deleted. Both scenarios 175 * are acceptable. 176 */ 177 if (p) { 178 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) 179 return -EPERM; 180 181 if (__tcf_action_put(p, bind)) 182 ret = ACT_P_DELETED; 183 } 184 185 return ret; 186 } 187 188 int tcf_idr_release(struct tc_action *a, bool bind) 189 { 190 const struct tc_action_ops *ops = a->ops; 191 int ret; 192 193 ret = __tcf_idr_release(a, bind, false); 194 if (ret == ACT_P_DELETED) 195 module_put(ops->owner); 196 return ret; 197 } 198 EXPORT_SYMBOL(tcf_idr_release); 199 200 static size_t tcf_action_shared_attrs_size(const struct tc_action *act) 201 { 202 struct tc_cookie *act_cookie; 203 u32 cookie_len = 0; 204 205 rcu_read_lock(); 206 act_cookie = rcu_dereference(act->act_cookie); 207 208 if (act_cookie) 209 cookie_len = nla_total_size(act_cookie->len); 210 rcu_read_unlock(); 211 212 return nla_total_size(0) /* action number nested */ 213 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ 214 + cookie_len /* TCA_ACT_COOKIE */ 215 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */ 216 + nla_total_size(0) /* TCA_ACT_STATS nested */ 217 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */ 218 /* TCA_STATS_BASIC */ 219 + nla_total_size_64bit(sizeof(struct gnet_stats_basic)) 220 /* TCA_STATS_PKT64 */ 221 + nla_total_size_64bit(sizeof(u64)) 222 /* TCA_STATS_QUEUE */ 223 + nla_total_size_64bit(sizeof(struct gnet_stats_queue)) 224 + nla_total_size(0) /* TCA_OPTIONS nested */ 225 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */ 226 } 227 228 static size_t tcf_action_full_attrs_size(size_t sz) 229 { 230 return NLMSG_HDRLEN /* struct nlmsghdr */ 231 + sizeof(struct tcamsg) 232 + nla_total_size(0) /* TCA_ACT_TAB nested */ 233 + sz; 234 } 235 236 static size_t tcf_action_fill_size(const struct tc_action *act) 237 { 238 size_t sz = tcf_action_shared_attrs_size(act); 239 240 if (act->ops->get_fill_size) 241 return act->ops->get_fill_size(act) + sz; 242 return sz; 243 } 244 245 static int 246 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act) 247 { 248 unsigned char *b = skb_tail_pointer(skb); 249 struct tc_cookie *cookie; 250 251 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 252 goto nla_put_failure; 253 if (tcf_action_copy_stats(skb, a, 0)) 254 goto nla_put_failure; 255 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index)) 256 goto nla_put_failure; 257 258 rcu_read_lock(); 259 cookie = rcu_dereference(a->act_cookie); 260 if (cookie) { 261 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { 262 rcu_read_unlock(); 263 goto nla_put_failure; 264 } 265 } 266 rcu_read_unlock(); 267 268 return 0; 269 270 nla_put_failure: 271 nlmsg_trim(skb, b); 272 return -1; 273 } 274 275 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 276 struct netlink_callback *cb) 277 { 278 int err = 0, index = -1, s_i = 0, n_i = 0; 279 u32 act_flags = cb->args[2]; 280 unsigned long jiffy_since = cb->args[3]; 281 struct nlattr *nest; 282 struct idr *idr = &idrinfo->action_idr; 283 struct tc_action *p; 284 unsigned long id = 1; 285 unsigned long tmp; 286 287 mutex_lock(&idrinfo->lock); 288 289 s_i = cb->args[0]; 290 291 idr_for_each_entry_ul(idr, p, tmp, id) { 292 index++; 293 if (index < s_i) 294 continue; 295 if (IS_ERR(p)) 296 continue; 297 298 if (jiffy_since && 299 time_after(jiffy_since, 300 (unsigned long)p->tcfa_tm.lastuse)) 301 continue; 302 303 nest = nla_nest_start_noflag(skb, n_i); 304 if (!nest) { 305 index--; 306 goto nla_put_failure; 307 } 308 err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ? 309 tcf_action_dump_terse(skb, p, true) : 310 tcf_action_dump_1(skb, p, 0, 0); 311 if (err < 0) { 312 index--; 313 nlmsg_trim(skb, nest); 314 goto done; 315 } 316 nla_nest_end(skb, nest); 317 n_i++; 318 if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) && 319 n_i >= TCA_ACT_MAX_PRIO) 320 goto done; 321 } 322 done: 323 if (index >= 0) 324 cb->args[0] = index + 1; 325 326 mutex_unlock(&idrinfo->lock); 327 if (n_i) { 328 if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) 329 cb->args[1] = n_i; 330 } 331 return n_i; 332 333 nla_put_failure: 334 nla_nest_cancel(skb, nest); 335 goto done; 336 } 337 338 static int tcf_idr_release_unsafe(struct tc_action *p) 339 { 340 if (atomic_read(&p->tcfa_bindcnt) > 0) 341 return -EPERM; 342 343 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 344 idr_remove(&p->idrinfo->action_idr, p->tcfa_index); 345 tcf_action_cleanup(p); 346 return ACT_P_DELETED; 347 } 348 349 return 0; 350 } 351 352 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 353 const struct tc_action_ops *ops) 354 { 355 struct nlattr *nest; 356 int n_i = 0; 357 int ret = -EINVAL; 358 struct idr *idr = &idrinfo->action_idr; 359 struct tc_action *p; 360 unsigned long id = 1; 361 unsigned long tmp; 362 363 nest = nla_nest_start_noflag(skb, 0); 364 if (nest == NULL) 365 goto nla_put_failure; 366 if (nla_put_string(skb, TCA_KIND, ops->kind)) 367 goto nla_put_failure; 368 369 mutex_lock(&idrinfo->lock); 370 idr_for_each_entry_ul(idr, p, tmp, id) { 371 if (IS_ERR(p)) 372 continue; 373 ret = tcf_idr_release_unsafe(p); 374 if (ret == ACT_P_DELETED) { 375 module_put(ops->owner); 376 n_i++; 377 } else if (ret < 0) { 378 mutex_unlock(&idrinfo->lock); 379 goto nla_put_failure; 380 } 381 } 382 mutex_unlock(&idrinfo->lock); 383 384 if (nla_put_u32(skb, TCA_FCNT, n_i)) 385 goto nla_put_failure; 386 nla_nest_end(skb, nest); 387 388 return n_i; 389 nla_put_failure: 390 nla_nest_cancel(skb, nest); 391 return ret; 392 } 393 394 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 395 struct netlink_callback *cb, int type, 396 const struct tc_action_ops *ops, 397 struct netlink_ext_ack *extack) 398 { 399 struct tcf_idrinfo *idrinfo = tn->idrinfo; 400 401 if (type == RTM_DELACTION) { 402 return tcf_del_walker(idrinfo, skb, ops); 403 } else if (type == RTM_GETACTION) { 404 return tcf_dump_walker(idrinfo, skb, cb); 405 } else { 406 WARN(1, "tcf_generic_walker: unknown command %d\n", type); 407 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command"); 408 return -EINVAL; 409 } 410 } 411 EXPORT_SYMBOL(tcf_generic_walker); 412 413 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 414 { 415 struct tcf_idrinfo *idrinfo = tn->idrinfo; 416 struct tc_action *p; 417 418 mutex_lock(&idrinfo->lock); 419 p = idr_find(&idrinfo->action_idr, index); 420 if (IS_ERR(p)) 421 p = NULL; 422 else if (p) 423 refcount_inc(&p->tcfa_refcnt); 424 mutex_unlock(&idrinfo->lock); 425 426 if (p) { 427 *a = p; 428 return true; 429 } 430 return false; 431 } 432 EXPORT_SYMBOL(tcf_idr_search); 433 434 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) 435 { 436 struct tc_action *p; 437 int ret = 0; 438 439 mutex_lock(&idrinfo->lock); 440 p = idr_find(&idrinfo->action_idr, index); 441 if (!p) { 442 mutex_unlock(&idrinfo->lock); 443 return -ENOENT; 444 } 445 446 if (!atomic_read(&p->tcfa_bindcnt)) { 447 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 448 struct module *owner = p->ops->owner; 449 450 WARN_ON(p != idr_remove(&idrinfo->action_idr, 451 p->tcfa_index)); 452 mutex_unlock(&idrinfo->lock); 453 454 tcf_action_cleanup(p); 455 module_put(owner); 456 return 0; 457 } 458 ret = 0; 459 } else { 460 ret = -EPERM; 461 } 462 463 mutex_unlock(&idrinfo->lock); 464 return ret; 465 } 466 467 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 468 struct tc_action **a, const struct tc_action_ops *ops, 469 int bind, bool cpustats, u32 flags) 470 { 471 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); 472 struct tcf_idrinfo *idrinfo = tn->idrinfo; 473 int err = -ENOMEM; 474 475 if (unlikely(!p)) 476 return -ENOMEM; 477 refcount_set(&p->tcfa_refcnt, 1); 478 if (bind) 479 atomic_set(&p->tcfa_bindcnt, 1); 480 481 if (cpustats) { 482 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 483 if (!p->cpu_bstats) 484 goto err1; 485 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 486 if (!p->cpu_bstats_hw) 487 goto err2; 488 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 489 if (!p->cpu_qstats) 490 goto err3; 491 } 492 spin_lock_init(&p->tcfa_lock); 493 p->tcfa_index = index; 494 p->tcfa_tm.install = jiffies; 495 p->tcfa_tm.lastuse = jiffies; 496 p->tcfa_tm.firstuse = 0; 497 p->tcfa_flags = flags; 498 if (est) { 499 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 500 &p->tcfa_rate_est, 501 &p->tcfa_lock, NULL, est); 502 if (err) 503 goto err4; 504 } 505 506 p->idrinfo = idrinfo; 507 __module_get(ops->owner); 508 p->ops = ops; 509 *a = p; 510 return 0; 511 err4: 512 free_percpu(p->cpu_qstats); 513 err3: 514 free_percpu(p->cpu_bstats_hw); 515 err2: 516 free_percpu(p->cpu_bstats); 517 err1: 518 kfree(p); 519 return err; 520 } 521 EXPORT_SYMBOL(tcf_idr_create); 522 523 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index, 524 struct nlattr *est, struct tc_action **a, 525 const struct tc_action_ops *ops, int bind, 526 u32 flags) 527 { 528 /* Set cpustats according to actions flags. */ 529 return tcf_idr_create(tn, index, est, a, ops, bind, 530 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags); 531 } 532 EXPORT_SYMBOL(tcf_idr_create_from_flags); 533 534 /* Cleanup idr index that was allocated but not initialized. */ 535 536 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) 537 { 538 struct tcf_idrinfo *idrinfo = tn->idrinfo; 539 540 mutex_lock(&idrinfo->lock); 541 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ 542 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); 543 mutex_unlock(&idrinfo->lock); 544 } 545 EXPORT_SYMBOL(tcf_idr_cleanup); 546 547 /* Check if action with specified index exists. If actions is found, increments 548 * its reference and bind counters, and return 1. Otherwise insert temporary 549 * error pointer (to prevent concurrent users from inserting actions with same 550 * index) and return 0. 551 */ 552 553 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, 554 struct tc_action **a, int bind) 555 { 556 struct tcf_idrinfo *idrinfo = tn->idrinfo; 557 struct tc_action *p; 558 int ret; 559 560 again: 561 mutex_lock(&idrinfo->lock); 562 if (*index) { 563 p = idr_find(&idrinfo->action_idr, *index); 564 if (IS_ERR(p)) { 565 /* This means that another process allocated 566 * index but did not assign the pointer yet. 567 */ 568 mutex_unlock(&idrinfo->lock); 569 goto again; 570 } 571 572 if (p) { 573 refcount_inc(&p->tcfa_refcnt); 574 if (bind) 575 atomic_inc(&p->tcfa_bindcnt); 576 *a = p; 577 ret = 1; 578 } else { 579 *a = NULL; 580 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 581 *index, GFP_KERNEL); 582 if (!ret) 583 idr_replace(&idrinfo->action_idr, 584 ERR_PTR(-EBUSY), *index); 585 } 586 } else { 587 *index = 1; 588 *a = NULL; 589 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 590 UINT_MAX, GFP_KERNEL); 591 if (!ret) 592 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), 593 *index); 594 } 595 mutex_unlock(&idrinfo->lock); 596 return ret; 597 } 598 EXPORT_SYMBOL(tcf_idr_check_alloc); 599 600 void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 601 struct tcf_idrinfo *idrinfo) 602 { 603 struct idr *idr = &idrinfo->action_idr; 604 struct tc_action *p; 605 int ret; 606 unsigned long id = 1; 607 unsigned long tmp; 608 609 idr_for_each_entry_ul(idr, p, tmp, id) { 610 ret = __tcf_idr_release(p, false, true); 611 if (ret == ACT_P_DELETED) 612 module_put(ops->owner); 613 else if (ret < 0) 614 return; 615 } 616 idr_destroy(&idrinfo->action_idr); 617 } 618 EXPORT_SYMBOL(tcf_idrinfo_destroy); 619 620 static LIST_HEAD(act_base); 621 static DEFINE_RWLOCK(act_mod_lock); 622 623 int tcf_register_action(struct tc_action_ops *act, 624 struct pernet_operations *ops) 625 { 626 struct tc_action_ops *a; 627 int ret; 628 629 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup) 630 return -EINVAL; 631 632 /* We have to register pernet ops before making the action ops visible, 633 * otherwise tcf_action_init_1() could get a partially initialized 634 * netns. 635 */ 636 ret = register_pernet_subsys(ops); 637 if (ret) 638 return ret; 639 640 write_lock(&act_mod_lock); 641 list_for_each_entry(a, &act_base, head) { 642 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { 643 write_unlock(&act_mod_lock); 644 unregister_pernet_subsys(ops); 645 return -EEXIST; 646 } 647 } 648 list_add_tail(&act->head, &act_base); 649 write_unlock(&act_mod_lock); 650 651 return 0; 652 } 653 EXPORT_SYMBOL(tcf_register_action); 654 655 int tcf_unregister_action(struct tc_action_ops *act, 656 struct pernet_operations *ops) 657 { 658 struct tc_action_ops *a; 659 int err = -ENOENT; 660 661 write_lock(&act_mod_lock); 662 list_for_each_entry(a, &act_base, head) { 663 if (a == act) { 664 list_del(&act->head); 665 err = 0; 666 break; 667 } 668 } 669 write_unlock(&act_mod_lock); 670 if (!err) 671 unregister_pernet_subsys(ops); 672 return err; 673 } 674 EXPORT_SYMBOL(tcf_unregister_action); 675 676 /* lookup by name */ 677 static struct tc_action_ops *tc_lookup_action_n(char *kind) 678 { 679 struct tc_action_ops *a, *res = NULL; 680 681 if (kind) { 682 read_lock(&act_mod_lock); 683 list_for_each_entry(a, &act_base, head) { 684 if (strcmp(kind, a->kind) == 0) { 685 if (try_module_get(a->owner)) 686 res = a; 687 break; 688 } 689 } 690 read_unlock(&act_mod_lock); 691 } 692 return res; 693 } 694 695 /* lookup by nlattr */ 696 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 697 { 698 struct tc_action_ops *a, *res = NULL; 699 700 if (kind) { 701 read_lock(&act_mod_lock); 702 list_for_each_entry(a, &act_base, head) { 703 if (nla_strcmp(kind, a->kind) == 0) { 704 if (try_module_get(a->owner)) 705 res = a; 706 break; 707 } 708 } 709 read_unlock(&act_mod_lock); 710 } 711 return res; 712 } 713 714 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */ 715 #define TCA_ACT_MAX_PRIO_MASK 0x1FF 716 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 717 int nr_actions, struct tcf_result *res) 718 { 719 u32 jmp_prgcnt = 0; 720 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ 721 int i; 722 int ret = TC_ACT_OK; 723 724 if (skb_skip_tc_classify(skb)) 725 return TC_ACT_OK; 726 727 restart_act_graph: 728 for (i = 0; i < nr_actions; i++) { 729 const struct tc_action *a = actions[i]; 730 731 if (jmp_prgcnt > 0) { 732 jmp_prgcnt -= 1; 733 continue; 734 } 735 repeat: 736 ret = a->ops->act(skb, a, res); 737 if (ret == TC_ACT_REPEAT) 738 goto repeat; /* we need a ttl - JHS */ 739 740 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { 741 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; 742 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { 743 /* faulty opcode, stop pipeline */ 744 return TC_ACT_OK; 745 } else { 746 jmp_ttl -= 1; 747 if (jmp_ttl > 0) 748 goto restart_act_graph; 749 else /* faulty graph, stop pipeline */ 750 return TC_ACT_OK; 751 } 752 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 753 if (unlikely(!rcu_access_pointer(a->goto_chain))) { 754 net_warn_ratelimited("can't go to NULL chain!\n"); 755 return TC_ACT_SHOT; 756 } 757 tcf_action_goto_chain_exec(a, res); 758 } 759 760 if (ret != TC_ACT_PIPE) 761 break; 762 } 763 764 return ret; 765 } 766 EXPORT_SYMBOL(tcf_action_exec); 767 768 int tcf_action_destroy(struct tc_action *actions[], int bind) 769 { 770 const struct tc_action_ops *ops; 771 struct tc_action *a; 772 int ret = 0, i; 773 774 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 775 a = actions[i]; 776 actions[i] = NULL; 777 ops = a->ops; 778 ret = __tcf_idr_release(a, bind, true); 779 if (ret == ACT_P_DELETED) 780 module_put(ops->owner); 781 else if (ret < 0) 782 return ret; 783 } 784 return ret; 785 } 786 787 static int tcf_action_put(struct tc_action *p) 788 { 789 return __tcf_action_put(p, false); 790 } 791 792 /* Put all actions in this array, skip those NULL's. */ 793 static void tcf_action_put_many(struct tc_action *actions[]) 794 { 795 int i; 796 797 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 798 struct tc_action *a = actions[i]; 799 const struct tc_action_ops *ops; 800 801 if (!a) 802 continue; 803 ops = a->ops; 804 if (tcf_action_put(a)) 805 module_put(ops->owner); 806 } 807 } 808 809 int 810 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 811 { 812 return a->ops->dump(skb, a, bind, ref); 813 } 814 815 int 816 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 817 { 818 int err = -EINVAL; 819 unsigned char *b = skb_tail_pointer(skb); 820 struct nlattr *nest; 821 822 if (tcf_action_dump_terse(skb, a, false)) 823 goto nla_put_failure; 824 825 if (a->hw_stats != TCA_ACT_HW_STATS_ANY && 826 nla_put_bitfield32(skb, TCA_ACT_HW_STATS, 827 a->hw_stats, TCA_ACT_HW_STATS_ANY)) 828 goto nla_put_failure; 829 830 if (a->used_hw_stats_valid && 831 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS, 832 a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) 833 goto nla_put_failure; 834 835 if (a->tcfa_flags && 836 nla_put_bitfield32(skb, TCA_ACT_FLAGS, 837 a->tcfa_flags, a->tcfa_flags)) 838 goto nla_put_failure; 839 840 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 841 if (nest == NULL) 842 goto nla_put_failure; 843 err = tcf_action_dump_old(skb, a, bind, ref); 844 if (err > 0) { 845 nla_nest_end(skb, nest); 846 return err; 847 } 848 849 nla_put_failure: 850 nlmsg_trim(skb, b); 851 return -1; 852 } 853 EXPORT_SYMBOL(tcf_action_dump_1); 854 855 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], 856 int bind, int ref, bool terse) 857 { 858 struct tc_action *a; 859 int err = -EINVAL, i; 860 struct nlattr *nest; 861 862 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 863 a = actions[i]; 864 nest = nla_nest_start_noflag(skb, i + 1); 865 if (nest == NULL) 866 goto nla_put_failure; 867 err = terse ? tcf_action_dump_terse(skb, a, false) : 868 tcf_action_dump_1(skb, a, bind, ref); 869 if (err < 0) 870 goto errout; 871 nla_nest_end(skb, nest); 872 } 873 874 return 0; 875 876 nla_put_failure: 877 err = -EINVAL; 878 errout: 879 nla_nest_cancel(skb, nest); 880 return err; 881 } 882 883 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 884 { 885 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 886 if (!c) 887 return NULL; 888 889 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 890 if (!c->data) { 891 kfree(c); 892 return NULL; 893 } 894 c->len = nla_len(tb[TCA_ACT_COOKIE]); 895 896 return c; 897 } 898 899 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr) 900 { 901 struct nla_bitfield32 hw_stats_bf; 902 903 /* If the user did not pass the attr, that means he does 904 * not care about the type. Return "any" in that case 905 * which is setting on all supported types. 906 */ 907 if (!hw_stats_attr) 908 return TCA_ACT_HW_STATS_ANY; 909 hw_stats_bf = nla_get_bitfield32(hw_stats_attr); 910 return hw_stats_bf.value; 911 } 912 913 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { 914 [TCA_ACT_KIND] = { .type = NLA_STRING }, 915 [TCA_ACT_INDEX] = { .type = NLA_U32 }, 916 [TCA_ACT_COOKIE] = { .type = NLA_BINARY, 917 .len = TC_COOKIE_MAX_SIZE }, 918 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, 919 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS), 920 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY), 921 }; 922 923 void tcf_idr_insert_many(struct tc_action *actions[]) 924 { 925 int i; 926 927 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 928 struct tc_action *a = actions[i]; 929 struct tcf_idrinfo *idrinfo; 930 931 if (!a) 932 continue; 933 idrinfo = a->idrinfo; 934 mutex_lock(&idrinfo->lock); 935 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if 936 * it is just created, otherwise this is just a nop. 937 */ 938 idr_replace(&idrinfo->action_idr, a, a->tcfa_index); 939 mutex_unlock(&idrinfo->lock); 940 } 941 } 942 943 struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla, 944 bool rtnl_held, 945 struct netlink_ext_ack *extack) 946 { 947 struct nlattr *tb[TCA_ACT_MAX + 1]; 948 struct tc_action_ops *a_o; 949 char act_name[IFNAMSIZ]; 950 struct nlattr *kind; 951 int err; 952 953 if (name == NULL) { 954 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 955 tcf_action_policy, extack); 956 if (err < 0) 957 return ERR_PTR(err); 958 err = -EINVAL; 959 kind = tb[TCA_ACT_KIND]; 960 if (!kind) { 961 NL_SET_ERR_MSG(extack, "TC action kind must be specified"); 962 return ERR_PTR(err); 963 } 964 if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) { 965 NL_SET_ERR_MSG(extack, "TC action name too long"); 966 return ERR_PTR(err); 967 } 968 } else { 969 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) { 970 NL_SET_ERR_MSG(extack, "TC action name too long"); 971 return ERR_PTR(-EINVAL); 972 } 973 } 974 975 a_o = tc_lookup_action_n(act_name); 976 if (a_o == NULL) { 977 #ifdef CONFIG_MODULES 978 if (rtnl_held) 979 rtnl_unlock(); 980 request_module("act_%s", act_name); 981 if (rtnl_held) 982 rtnl_lock(); 983 984 a_o = tc_lookup_action_n(act_name); 985 986 /* We dropped the RTNL semaphore in order to 987 * perform the module load. So, even if we 988 * succeeded in loading the module we have to 989 * tell the caller to replay the request. We 990 * indicate this using -EAGAIN. 991 */ 992 if (a_o != NULL) { 993 module_put(a_o->owner); 994 return ERR_PTR(-EAGAIN); 995 } 996 #endif 997 NL_SET_ERR_MSG(extack, "Failed to load TC action module"); 998 return ERR_PTR(-ENOENT); 999 } 1000 1001 return a_o; 1002 } 1003 1004 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 1005 struct nlattr *nla, struct nlattr *est, 1006 char *name, int ovr, int bind, 1007 struct tc_action_ops *a_o, int *init_res, 1008 bool rtnl_held, 1009 struct netlink_ext_ack *extack) 1010 { 1011 struct nla_bitfield32 flags = { 0, 0 }; 1012 u8 hw_stats = TCA_ACT_HW_STATS_ANY; 1013 struct nlattr *tb[TCA_ACT_MAX + 1]; 1014 struct tc_cookie *cookie = NULL; 1015 struct tc_action *a; 1016 int err; 1017 1018 /* backward compatibility for policer */ 1019 if (name == NULL) { 1020 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1021 tcf_action_policy, extack); 1022 if (err < 0) 1023 return ERR_PTR(err); 1024 if (tb[TCA_ACT_COOKIE]) { 1025 cookie = nla_memdup_cookie(tb); 1026 if (!cookie) { 1027 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie"); 1028 err = -ENOMEM; 1029 goto err_out; 1030 } 1031 } 1032 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]); 1033 if (tb[TCA_ACT_FLAGS]) 1034 flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]); 1035 1036 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, 1037 rtnl_held, tp, flags.value, extack); 1038 } else { 1039 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, 1040 tp, flags.value, extack); 1041 } 1042 if (err < 0) 1043 goto err_out; 1044 *init_res = err; 1045 1046 if (!name && tb[TCA_ACT_COOKIE]) 1047 tcf_set_action_cookie(&a->act_cookie, cookie); 1048 1049 if (!name) 1050 a->hw_stats = hw_stats; 1051 1052 return a; 1053 1054 err_out: 1055 if (cookie) { 1056 kfree(cookie->data); 1057 kfree(cookie); 1058 } 1059 return ERR_PTR(err); 1060 } 1061 1062 /* Returns numbers of initialized actions or negative error. */ 1063 1064 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 1065 struct nlattr *est, char *name, int ovr, int bind, 1066 struct tc_action *actions[], int init_res[], size_t *attr_size, 1067 bool rtnl_held, struct netlink_ext_ack *extack) 1068 { 1069 struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {}; 1070 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1071 struct tc_action *act; 1072 size_t sz = 0; 1073 int err; 1074 int i; 1075 1076 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1077 extack); 1078 if (err < 0) 1079 return err; 1080 1081 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1082 struct tc_action_ops *a_o; 1083 1084 a_o = tc_action_load_ops(name, tb[i], rtnl_held, extack); 1085 if (IS_ERR(a_o)) { 1086 err = PTR_ERR(a_o); 1087 goto err_mod; 1088 } 1089 ops[i - 1] = a_o; 1090 } 1091 1092 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1093 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind, 1094 ops[i - 1], &init_res[i - 1], rtnl_held, 1095 extack); 1096 if (IS_ERR(act)) { 1097 err = PTR_ERR(act); 1098 goto err; 1099 } 1100 sz += tcf_action_fill_size(act); 1101 /* Start from index 0 */ 1102 actions[i - 1] = act; 1103 } 1104 1105 /* We have to commit them all together, because if any error happened in 1106 * between, we could not handle the failure gracefully. 1107 */ 1108 tcf_idr_insert_many(actions); 1109 1110 *attr_size = tcf_action_full_attrs_size(sz); 1111 err = i - 1; 1112 goto err_mod; 1113 1114 err: 1115 tcf_action_destroy(actions, bind); 1116 err_mod: 1117 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 1118 if (ops[i]) 1119 module_put(ops[i]->owner); 1120 } 1121 return err; 1122 } 1123 1124 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, 1125 u64 drops, bool hw) 1126 { 1127 if (a->cpu_bstats) { 1128 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 1129 1130 this_cpu_ptr(a->cpu_qstats)->drops += drops; 1131 1132 if (hw) 1133 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), 1134 bytes, packets); 1135 return; 1136 } 1137 1138 _bstats_update(&a->tcfa_bstats, bytes, packets); 1139 a->tcfa_qstats.drops += drops; 1140 if (hw) 1141 _bstats_update(&a->tcfa_bstats_hw, bytes, packets); 1142 } 1143 EXPORT_SYMBOL(tcf_action_update_stats); 1144 1145 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 1146 int compat_mode) 1147 { 1148 int err = 0; 1149 struct gnet_dump d; 1150 1151 if (p == NULL) 1152 goto errout; 1153 1154 /* compat_mode being true specifies a call that is supposed 1155 * to add additional backward compatibility statistic TLVs. 1156 */ 1157 if (compat_mode) { 1158 if (p->type == TCA_OLD_COMPAT) 1159 err = gnet_stats_start_copy_compat(skb, 0, 1160 TCA_STATS, 1161 TCA_XSTATS, 1162 &p->tcfa_lock, &d, 1163 TCA_PAD); 1164 else 1165 return 0; 1166 } else 1167 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 1168 &p->tcfa_lock, &d, TCA_ACT_PAD); 1169 1170 if (err < 0) 1171 goto errout; 1172 1173 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || 1174 gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw, 1175 &p->tcfa_bstats_hw) < 0 || 1176 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 1177 gnet_stats_copy_queue(&d, p->cpu_qstats, 1178 &p->tcfa_qstats, 1179 p->tcfa_qstats.qlen) < 0) 1180 goto errout; 1181 1182 if (gnet_stats_finish_copy(&d) < 0) 1183 goto errout; 1184 1185 return 0; 1186 1187 errout: 1188 return -1; 1189 } 1190 1191 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], 1192 u32 portid, u32 seq, u16 flags, int event, int bind, 1193 int ref) 1194 { 1195 struct tcamsg *t; 1196 struct nlmsghdr *nlh; 1197 unsigned char *b = skb_tail_pointer(skb); 1198 struct nlattr *nest; 1199 1200 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 1201 if (!nlh) 1202 goto out_nlmsg_trim; 1203 t = nlmsg_data(nlh); 1204 t->tca_family = AF_UNSPEC; 1205 t->tca__pad1 = 0; 1206 t->tca__pad2 = 0; 1207 1208 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1209 if (!nest) 1210 goto out_nlmsg_trim; 1211 1212 if (tcf_action_dump(skb, actions, bind, ref, false) < 0) 1213 goto out_nlmsg_trim; 1214 1215 nla_nest_end(skb, nest); 1216 1217 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1218 return skb->len; 1219 1220 out_nlmsg_trim: 1221 nlmsg_trim(skb, b); 1222 return -1; 1223 } 1224 1225 static int 1226 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 1227 struct tc_action *actions[], int event, 1228 struct netlink_ext_ack *extack) 1229 { 1230 struct sk_buff *skb; 1231 1232 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1233 if (!skb) 1234 return -ENOBUFS; 1235 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 1236 0, 1) <= 0) { 1237 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1238 kfree_skb(skb); 1239 return -EINVAL; 1240 } 1241 1242 return rtnl_unicast(skb, net, portid); 1243 } 1244 1245 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, 1246 struct nlmsghdr *n, u32 portid, 1247 struct netlink_ext_ack *extack) 1248 { 1249 struct nlattr *tb[TCA_ACT_MAX + 1]; 1250 const struct tc_action_ops *ops; 1251 struct tc_action *a; 1252 int index; 1253 int err; 1254 1255 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1256 tcf_action_policy, extack); 1257 if (err < 0) 1258 goto err_out; 1259 1260 err = -EINVAL; 1261 if (tb[TCA_ACT_INDEX] == NULL || 1262 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) { 1263 NL_SET_ERR_MSG(extack, "Invalid TC action index value"); 1264 goto err_out; 1265 } 1266 index = nla_get_u32(tb[TCA_ACT_INDEX]); 1267 1268 err = -EINVAL; 1269 ops = tc_lookup_action(tb[TCA_ACT_KIND]); 1270 if (!ops) { /* could happen in batch of actions */ 1271 NL_SET_ERR_MSG(extack, "Specified TC action kind not found"); 1272 goto err_out; 1273 } 1274 err = -ENOENT; 1275 if (ops->lookup(net, &a, index) == 0) { 1276 NL_SET_ERR_MSG(extack, "TC action with specified index not found"); 1277 goto err_mod; 1278 } 1279 1280 module_put(ops->owner); 1281 return a; 1282 1283 err_mod: 1284 module_put(ops->owner); 1285 err_out: 1286 return ERR_PTR(err); 1287 } 1288 1289 static int tca_action_flush(struct net *net, struct nlattr *nla, 1290 struct nlmsghdr *n, u32 portid, 1291 struct netlink_ext_ack *extack) 1292 { 1293 struct sk_buff *skb; 1294 unsigned char *b; 1295 struct nlmsghdr *nlh; 1296 struct tcamsg *t; 1297 struct netlink_callback dcb; 1298 struct nlattr *nest; 1299 struct nlattr *tb[TCA_ACT_MAX + 1]; 1300 const struct tc_action_ops *ops; 1301 struct nlattr *kind; 1302 int err = -ENOMEM; 1303 1304 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1305 if (!skb) 1306 return err; 1307 1308 b = skb_tail_pointer(skb); 1309 1310 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1311 tcf_action_policy, extack); 1312 if (err < 0) 1313 goto err_out; 1314 1315 err = -EINVAL; 1316 kind = tb[TCA_ACT_KIND]; 1317 ops = tc_lookup_action(kind); 1318 if (!ops) { /*some idjot trying to flush unknown action */ 1319 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action"); 1320 goto err_out; 1321 } 1322 1323 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, 1324 sizeof(*t), 0); 1325 if (!nlh) { 1326 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification"); 1327 goto out_module_put; 1328 } 1329 t = nlmsg_data(nlh); 1330 t->tca_family = AF_UNSPEC; 1331 t->tca__pad1 = 0; 1332 t->tca__pad2 = 0; 1333 1334 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1335 if (!nest) { 1336 NL_SET_ERR_MSG(extack, "Failed to add new netlink message"); 1337 goto out_module_put; 1338 } 1339 1340 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack); 1341 if (err <= 0) { 1342 nla_nest_cancel(skb, nest); 1343 goto out_module_put; 1344 } 1345 1346 nla_nest_end(skb, nest); 1347 1348 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1349 nlh->nlmsg_flags |= NLM_F_ROOT; 1350 module_put(ops->owner); 1351 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1352 n->nlmsg_flags & NLM_F_ECHO); 1353 if (err > 0) 1354 return 0; 1355 if (err < 0) 1356 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification"); 1357 1358 return err; 1359 1360 out_module_put: 1361 module_put(ops->owner); 1362 err_out: 1363 kfree_skb(skb); 1364 return err; 1365 } 1366 1367 static int tcf_action_delete(struct net *net, struct tc_action *actions[]) 1368 { 1369 int i; 1370 1371 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1372 struct tc_action *a = actions[i]; 1373 const struct tc_action_ops *ops = a->ops; 1374 /* Actions can be deleted concurrently so we must save their 1375 * type and id to search again after reference is released. 1376 */ 1377 struct tcf_idrinfo *idrinfo = a->idrinfo; 1378 u32 act_index = a->tcfa_index; 1379 1380 actions[i] = NULL; 1381 if (tcf_action_put(a)) { 1382 /* last reference, action was deleted concurrently */ 1383 module_put(ops->owner); 1384 } else { 1385 int ret; 1386 1387 /* now do the delete */ 1388 ret = tcf_idr_delete_index(idrinfo, act_index); 1389 if (ret < 0) 1390 return ret; 1391 } 1392 } 1393 return 0; 1394 } 1395 1396 static int 1397 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1398 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1399 { 1400 int ret; 1401 struct sk_buff *skb; 1402 1403 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1404 GFP_KERNEL); 1405 if (!skb) 1406 return -ENOBUFS; 1407 1408 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 1409 0, 2) <= 0) { 1410 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); 1411 kfree_skb(skb); 1412 return -EINVAL; 1413 } 1414 1415 /* now do the delete */ 1416 ret = tcf_action_delete(net, actions); 1417 if (ret < 0) { 1418 NL_SET_ERR_MSG(extack, "Failed to delete TC action"); 1419 kfree_skb(skb); 1420 return ret; 1421 } 1422 1423 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1424 n->nlmsg_flags & NLM_F_ECHO); 1425 if (ret > 0) 1426 return 0; 1427 return ret; 1428 } 1429 1430 static int 1431 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 1432 u32 portid, int event, struct netlink_ext_ack *extack) 1433 { 1434 int i, ret; 1435 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1436 struct tc_action *act; 1437 size_t attr_size = 0; 1438 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1439 1440 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1441 extack); 1442 if (ret < 0) 1443 return ret; 1444 1445 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 1446 if (tb[1]) 1447 return tca_action_flush(net, tb[1], n, portid, extack); 1448 1449 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action"); 1450 return -EINVAL; 1451 } 1452 1453 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1454 act = tcf_action_get_1(net, tb[i], n, portid, extack); 1455 if (IS_ERR(act)) { 1456 ret = PTR_ERR(act); 1457 goto err; 1458 } 1459 attr_size += tcf_action_fill_size(act); 1460 actions[i - 1] = act; 1461 } 1462 1463 attr_size = tcf_action_full_attrs_size(attr_size); 1464 1465 if (event == RTM_GETACTION) 1466 ret = tcf_get_notify(net, portid, n, actions, event, extack); 1467 else { /* delete */ 1468 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); 1469 if (ret) 1470 goto err; 1471 return 0; 1472 } 1473 err: 1474 tcf_action_put_many(actions); 1475 return ret; 1476 } 1477 1478 static int 1479 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1480 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1481 { 1482 struct sk_buff *skb; 1483 int err = 0; 1484 1485 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1486 GFP_KERNEL); 1487 if (!skb) 1488 return -ENOBUFS; 1489 1490 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 1491 RTM_NEWACTION, 0, 0) <= 0) { 1492 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1493 kfree_skb(skb); 1494 return -EINVAL; 1495 } 1496 1497 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1498 n->nlmsg_flags & NLM_F_ECHO); 1499 if (err > 0) 1500 err = 0; 1501 return err; 1502 } 1503 1504 static int tcf_action_add(struct net *net, struct nlattr *nla, 1505 struct nlmsghdr *n, u32 portid, int ovr, 1506 struct netlink_ext_ack *extack) 1507 { 1508 size_t attr_size = 0; 1509 int loop, ret, i; 1510 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1511 int init_res[TCA_ACT_MAX_PRIO] = {}; 1512 1513 for (loop = 0; loop < 10; loop++) { 1514 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, 1515 actions, init_res, &attr_size, true, extack); 1516 if (ret != -EAGAIN) 1517 break; 1518 } 1519 1520 if (ret < 0) 1521 return ret; 1522 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); 1523 1524 /* only put existing actions */ 1525 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) 1526 if (init_res[i] == ACT_P_CREATED) 1527 actions[i] = NULL; 1528 tcf_action_put_many(actions); 1529 1530 return ret; 1531 } 1532 1533 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { 1534 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON | 1535 TCA_ACT_FLAG_TERSE_DUMP), 1536 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, 1537 }; 1538 1539 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, 1540 struct netlink_ext_ack *extack) 1541 { 1542 struct net *net = sock_net(skb->sk); 1543 struct nlattr *tca[TCA_ROOT_MAX + 1]; 1544 u32 portid = NETLINK_CB(skb).portid; 1545 int ret = 0, ovr = 0; 1546 1547 if ((n->nlmsg_type != RTM_GETACTION) && 1548 !netlink_capable(skb, CAP_NET_ADMIN)) 1549 return -EPERM; 1550 1551 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca, 1552 TCA_ROOT_MAX, NULL, extack); 1553 if (ret < 0) 1554 return ret; 1555 1556 if (tca[TCA_ACT_TAB] == NULL) { 1557 NL_SET_ERR_MSG(extack, "Netlink action attributes missing"); 1558 return -EINVAL; 1559 } 1560 1561 /* n->nlmsg_flags & NLM_F_CREATE */ 1562 switch (n->nlmsg_type) { 1563 case RTM_NEWACTION: 1564 /* we are going to assume all other flags 1565 * imply create only if it doesn't exist 1566 * Note that CREATE | EXCL implies that 1567 * but since we want avoid ambiguity (eg when flags 1568 * is zero) then just set this 1569 */ 1570 if (n->nlmsg_flags & NLM_F_REPLACE) 1571 ovr = 1; 1572 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr, 1573 extack); 1574 break; 1575 case RTM_DELACTION: 1576 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1577 portid, RTM_DELACTION, extack); 1578 break; 1579 case RTM_GETACTION: 1580 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1581 portid, RTM_GETACTION, extack); 1582 break; 1583 default: 1584 BUG(); 1585 } 1586 1587 return ret; 1588 } 1589 1590 static struct nlattr *find_dump_kind(struct nlattr **nla) 1591 { 1592 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 1593 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1594 struct nlattr *kind; 1595 1596 tb1 = nla[TCA_ACT_TAB]; 1597 if (tb1 == NULL) 1598 return NULL; 1599 1600 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) 1601 return NULL; 1602 1603 if (tb[1] == NULL) 1604 return NULL; 1605 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0) 1606 return NULL; 1607 kind = tb2[TCA_ACT_KIND]; 1608 1609 return kind; 1610 } 1611 1612 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1613 { 1614 struct net *net = sock_net(skb->sk); 1615 struct nlmsghdr *nlh; 1616 unsigned char *b = skb_tail_pointer(skb); 1617 struct nlattr *nest; 1618 struct tc_action_ops *a_o; 1619 int ret = 0; 1620 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 1621 struct nlattr *tb[TCA_ROOT_MAX + 1]; 1622 struct nlattr *count_attr = NULL; 1623 unsigned long jiffy_since = 0; 1624 struct nlattr *kind = NULL; 1625 struct nla_bitfield32 bf; 1626 u32 msecs_since = 0; 1627 u32 act_count = 0; 1628 1629 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb, 1630 TCA_ROOT_MAX, tcaa_policy, cb->extack); 1631 if (ret < 0) 1632 return ret; 1633 1634 kind = find_dump_kind(tb); 1635 if (kind == NULL) { 1636 pr_info("tc_dump_action: action bad kind\n"); 1637 return 0; 1638 } 1639 1640 a_o = tc_lookup_action(kind); 1641 if (a_o == NULL) 1642 return 0; 1643 1644 cb->args[2] = 0; 1645 if (tb[TCA_ROOT_FLAGS]) { 1646 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); 1647 cb->args[2] = bf.value; 1648 } 1649 1650 if (tb[TCA_ROOT_TIME_DELTA]) { 1651 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); 1652 } 1653 1654 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1655 cb->nlh->nlmsg_type, sizeof(*t), 0); 1656 if (!nlh) 1657 goto out_module_put; 1658 1659 if (msecs_since) 1660 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); 1661 1662 t = nlmsg_data(nlh); 1663 t->tca_family = AF_UNSPEC; 1664 t->tca__pad1 = 0; 1665 t->tca__pad2 = 0; 1666 cb->args[3] = jiffy_since; 1667 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); 1668 if (!count_attr) 1669 goto out_module_put; 1670 1671 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1672 if (nest == NULL) 1673 goto out_module_put; 1674 1675 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL); 1676 if (ret < 0) 1677 goto out_module_put; 1678 1679 if (ret > 0) { 1680 nla_nest_end(skb, nest); 1681 ret = skb->len; 1682 act_count = cb->args[1]; 1683 memcpy(nla_data(count_attr), &act_count, sizeof(u32)); 1684 cb->args[1] = 0; 1685 } else 1686 nlmsg_trim(skb, b); 1687 1688 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1689 if (NETLINK_CB(cb->skb).portid && ret) 1690 nlh->nlmsg_flags |= NLM_F_MULTI; 1691 module_put(a_o->owner); 1692 return skb->len; 1693 1694 out_module_put: 1695 module_put(a_o->owner); 1696 nlmsg_trim(skb, b); 1697 return skb->len; 1698 } 1699 1700 static int __init tc_action_init(void) 1701 { 1702 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); 1703 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); 1704 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 1705 0); 1706 1707 return 0; 1708 } 1709 1710 subsys_initcall(tc_action_init); 1711