1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/act_api.c Packet action API. 4 * 5 * Author: Jamal Hadi Salim 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/string.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <linux/kmod.h> 16 #include <linux/err.h> 17 #include <linux/module.h> 18 #include <net/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/act_api.h> 23 #include <net/netlink.h> 24 25 static void tcf_action_goto_chain_exec(const struct tc_action *a, 26 struct tcf_result *res) 27 { 28 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain); 29 30 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 31 } 32 33 static void tcf_free_cookie_rcu(struct rcu_head *p) 34 { 35 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu); 36 37 kfree(cookie->data); 38 kfree(cookie); 39 } 40 41 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, 42 struct tc_cookie *new_cookie) 43 { 44 struct tc_cookie *old; 45 46 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); 47 if (old) 48 call_rcu(&old->rcu, tcf_free_cookie_rcu); 49 } 50 51 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, 52 struct tcf_chain **newchain, 53 struct netlink_ext_ack *extack) 54 { 55 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL; 56 u32 chain_index; 57 58 if (!opcode) 59 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0; 60 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC) 61 ret = 0; 62 if (ret) { 63 NL_SET_ERR_MSG(extack, "invalid control action"); 64 goto end; 65 } 66 67 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) { 68 chain_index = action & TC_ACT_EXT_VAL_MASK; 69 if (!tp || !newchain) { 70 ret = -EINVAL; 71 NL_SET_ERR_MSG(extack, 72 "can't goto NULL proto/chain"); 73 goto end; 74 } 75 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index); 76 if (!*newchain) { 77 ret = -ENOMEM; 78 NL_SET_ERR_MSG(extack, 79 "can't allocate goto_chain"); 80 } 81 } 82 end: 83 return ret; 84 } 85 EXPORT_SYMBOL(tcf_action_check_ctrlact); 86 87 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, 88 struct tcf_chain *goto_chain) 89 { 90 a->tcfa_action = action; 91 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1); 92 return goto_chain; 93 } 94 EXPORT_SYMBOL(tcf_action_set_ctrlact); 95 96 /* XXX: For standalone actions, we don't need a RCU grace period either, because 97 * actions are always connected to filters and filters are already destroyed in 98 * RCU callbacks, so after a RCU grace period actions are already disconnected 99 * from filters. Readers later can not find us. 100 */ 101 static void free_tcf(struct tc_action *p) 102 { 103 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1); 104 105 free_percpu(p->cpu_bstats); 106 free_percpu(p->cpu_bstats_hw); 107 free_percpu(p->cpu_qstats); 108 109 tcf_set_action_cookie(&p->act_cookie, NULL); 110 if (chain) 111 tcf_chain_put_by_act(chain); 112 113 kfree(p); 114 } 115 116 static void tcf_action_cleanup(struct tc_action *p) 117 { 118 if (p->ops->cleanup) 119 p->ops->cleanup(p); 120 121 gen_kill_estimator(&p->tcfa_rate_est); 122 free_tcf(p); 123 } 124 125 static int __tcf_action_put(struct tc_action *p, bool bind) 126 { 127 struct tcf_idrinfo *idrinfo = p->idrinfo; 128 129 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) { 130 if (bind) 131 atomic_dec(&p->tcfa_bindcnt); 132 idr_remove(&idrinfo->action_idr, p->tcfa_index); 133 mutex_unlock(&idrinfo->lock); 134 135 tcf_action_cleanup(p); 136 return 1; 137 } 138 139 if (bind) 140 atomic_dec(&p->tcfa_bindcnt); 141 142 return 0; 143 } 144 145 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 146 { 147 int ret = 0; 148 149 /* Release with strict==1 and bind==0 is only called through act API 150 * interface (classifiers always bind). Only case when action with 151 * positive reference count and zero bind count can exist is when it was 152 * also created with act API (unbinding last classifier will destroy the 153 * action if it was created by classifier). So only case when bind count 154 * can be changed after initial check is when unbound action is 155 * destroyed by act API while classifier binds to action with same id 156 * concurrently. This result either creation of new action(same behavior 157 * as before), or reusing existing action if concurrent process 158 * increments reference count before action is deleted. Both scenarios 159 * are acceptable. 160 */ 161 if (p) { 162 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) 163 return -EPERM; 164 165 if (__tcf_action_put(p, bind)) 166 ret = ACT_P_DELETED; 167 } 168 169 return ret; 170 } 171 EXPORT_SYMBOL(__tcf_idr_release); 172 173 static size_t tcf_action_shared_attrs_size(const struct tc_action *act) 174 { 175 struct tc_cookie *act_cookie; 176 u32 cookie_len = 0; 177 178 rcu_read_lock(); 179 act_cookie = rcu_dereference(act->act_cookie); 180 181 if (act_cookie) 182 cookie_len = nla_total_size(act_cookie->len); 183 rcu_read_unlock(); 184 185 return nla_total_size(0) /* action number nested */ 186 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ 187 + cookie_len /* TCA_ACT_COOKIE */ 188 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */ 189 + nla_total_size(0) /* TCA_ACT_STATS nested */ 190 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */ 191 /* TCA_STATS_BASIC */ 192 + nla_total_size_64bit(sizeof(struct gnet_stats_basic)) 193 /* TCA_STATS_PKT64 */ 194 + nla_total_size_64bit(sizeof(u64)) 195 /* TCA_STATS_QUEUE */ 196 + nla_total_size_64bit(sizeof(struct gnet_stats_queue)) 197 + nla_total_size(0) /* TCA_OPTIONS nested */ 198 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */ 199 } 200 201 static size_t tcf_action_full_attrs_size(size_t sz) 202 { 203 return NLMSG_HDRLEN /* struct nlmsghdr */ 204 + sizeof(struct tcamsg) 205 + nla_total_size(0) /* TCA_ACT_TAB nested */ 206 + sz; 207 } 208 209 static size_t tcf_action_fill_size(const struct tc_action *act) 210 { 211 size_t sz = tcf_action_shared_attrs_size(act); 212 213 if (act->ops->get_fill_size) 214 return act->ops->get_fill_size(act) + sz; 215 return sz; 216 } 217 218 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 219 struct netlink_callback *cb) 220 { 221 int err = 0, index = -1, s_i = 0, n_i = 0; 222 u32 act_flags = cb->args[2]; 223 unsigned long jiffy_since = cb->args[3]; 224 struct nlattr *nest; 225 struct idr *idr = &idrinfo->action_idr; 226 struct tc_action *p; 227 unsigned long id = 1; 228 unsigned long tmp; 229 230 mutex_lock(&idrinfo->lock); 231 232 s_i = cb->args[0]; 233 234 idr_for_each_entry_ul(idr, p, tmp, id) { 235 index++; 236 if (index < s_i) 237 continue; 238 if (IS_ERR(p)) 239 continue; 240 241 if (jiffy_since && 242 time_after(jiffy_since, 243 (unsigned long)p->tcfa_tm.lastuse)) 244 continue; 245 246 nest = nla_nest_start_noflag(skb, n_i); 247 if (!nest) { 248 index--; 249 goto nla_put_failure; 250 } 251 err = tcf_action_dump_1(skb, p, 0, 0); 252 if (err < 0) { 253 index--; 254 nlmsg_trim(skb, nest); 255 goto done; 256 } 257 nla_nest_end(skb, nest); 258 n_i++; 259 if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) && 260 n_i >= TCA_ACT_MAX_PRIO) 261 goto done; 262 } 263 done: 264 if (index >= 0) 265 cb->args[0] = index + 1; 266 267 mutex_unlock(&idrinfo->lock); 268 if (n_i) { 269 if (act_flags & TCA_FLAG_LARGE_DUMP_ON) 270 cb->args[1] = n_i; 271 } 272 return n_i; 273 274 nla_put_failure: 275 nla_nest_cancel(skb, nest); 276 goto done; 277 } 278 279 static int tcf_idr_release_unsafe(struct tc_action *p) 280 { 281 if (atomic_read(&p->tcfa_bindcnt) > 0) 282 return -EPERM; 283 284 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 285 idr_remove(&p->idrinfo->action_idr, p->tcfa_index); 286 tcf_action_cleanup(p); 287 return ACT_P_DELETED; 288 } 289 290 return 0; 291 } 292 293 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 294 const struct tc_action_ops *ops) 295 { 296 struct nlattr *nest; 297 int n_i = 0; 298 int ret = -EINVAL; 299 struct idr *idr = &idrinfo->action_idr; 300 struct tc_action *p; 301 unsigned long id = 1; 302 unsigned long tmp; 303 304 nest = nla_nest_start_noflag(skb, 0); 305 if (nest == NULL) 306 goto nla_put_failure; 307 if (nla_put_string(skb, TCA_KIND, ops->kind)) 308 goto nla_put_failure; 309 310 mutex_lock(&idrinfo->lock); 311 idr_for_each_entry_ul(idr, p, tmp, id) { 312 if (IS_ERR(p)) 313 continue; 314 ret = tcf_idr_release_unsafe(p); 315 if (ret == ACT_P_DELETED) { 316 module_put(ops->owner); 317 n_i++; 318 } else if (ret < 0) { 319 mutex_unlock(&idrinfo->lock); 320 goto nla_put_failure; 321 } 322 } 323 mutex_unlock(&idrinfo->lock); 324 325 if (nla_put_u32(skb, TCA_FCNT, n_i)) 326 goto nla_put_failure; 327 nla_nest_end(skb, nest); 328 329 return n_i; 330 nla_put_failure: 331 nla_nest_cancel(skb, nest); 332 return ret; 333 } 334 335 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 336 struct netlink_callback *cb, int type, 337 const struct tc_action_ops *ops, 338 struct netlink_ext_ack *extack) 339 { 340 struct tcf_idrinfo *idrinfo = tn->idrinfo; 341 342 if (type == RTM_DELACTION) { 343 return tcf_del_walker(idrinfo, skb, ops); 344 } else if (type == RTM_GETACTION) { 345 return tcf_dump_walker(idrinfo, skb, cb); 346 } else { 347 WARN(1, "tcf_generic_walker: unknown command %d\n", type); 348 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command"); 349 return -EINVAL; 350 } 351 } 352 EXPORT_SYMBOL(tcf_generic_walker); 353 354 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 355 { 356 struct tcf_idrinfo *idrinfo = tn->idrinfo; 357 struct tc_action *p; 358 359 mutex_lock(&idrinfo->lock); 360 p = idr_find(&idrinfo->action_idr, index); 361 if (IS_ERR(p)) 362 p = NULL; 363 else if (p) 364 refcount_inc(&p->tcfa_refcnt); 365 mutex_unlock(&idrinfo->lock); 366 367 if (p) { 368 *a = p; 369 return true; 370 } 371 return false; 372 } 373 EXPORT_SYMBOL(tcf_idr_search); 374 375 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) 376 { 377 struct tc_action *p; 378 int ret = 0; 379 380 mutex_lock(&idrinfo->lock); 381 p = idr_find(&idrinfo->action_idr, index); 382 if (!p) { 383 mutex_unlock(&idrinfo->lock); 384 return -ENOENT; 385 } 386 387 if (!atomic_read(&p->tcfa_bindcnt)) { 388 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 389 struct module *owner = p->ops->owner; 390 391 WARN_ON(p != idr_remove(&idrinfo->action_idr, 392 p->tcfa_index)); 393 mutex_unlock(&idrinfo->lock); 394 395 tcf_action_cleanup(p); 396 module_put(owner); 397 return 0; 398 } 399 ret = 0; 400 } else { 401 ret = -EPERM; 402 } 403 404 mutex_unlock(&idrinfo->lock); 405 return ret; 406 } 407 408 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 409 struct tc_action **a, const struct tc_action_ops *ops, 410 int bind, bool cpustats, u32 flags) 411 { 412 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); 413 struct tcf_idrinfo *idrinfo = tn->idrinfo; 414 int err = -ENOMEM; 415 416 if (unlikely(!p)) 417 return -ENOMEM; 418 refcount_set(&p->tcfa_refcnt, 1); 419 if (bind) 420 atomic_set(&p->tcfa_bindcnt, 1); 421 422 if (cpustats) { 423 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 424 if (!p->cpu_bstats) 425 goto err1; 426 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 427 if (!p->cpu_bstats_hw) 428 goto err2; 429 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 430 if (!p->cpu_qstats) 431 goto err3; 432 } 433 spin_lock_init(&p->tcfa_lock); 434 p->tcfa_index = index; 435 p->tcfa_tm.install = jiffies; 436 p->tcfa_tm.lastuse = jiffies; 437 p->tcfa_tm.firstuse = 0; 438 p->tcfa_flags = flags; 439 if (est) { 440 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 441 &p->tcfa_rate_est, 442 &p->tcfa_lock, NULL, est); 443 if (err) 444 goto err4; 445 } 446 447 p->idrinfo = idrinfo; 448 p->ops = ops; 449 *a = p; 450 return 0; 451 err4: 452 free_percpu(p->cpu_qstats); 453 err3: 454 free_percpu(p->cpu_bstats_hw); 455 err2: 456 free_percpu(p->cpu_bstats); 457 err1: 458 kfree(p); 459 return err; 460 } 461 EXPORT_SYMBOL(tcf_idr_create); 462 463 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index, 464 struct nlattr *est, struct tc_action **a, 465 const struct tc_action_ops *ops, int bind, 466 u32 flags) 467 { 468 /* Set cpustats according to actions flags. */ 469 return tcf_idr_create(tn, index, est, a, ops, bind, 470 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags); 471 } 472 EXPORT_SYMBOL(tcf_idr_create_from_flags); 473 474 /* Cleanup idr index that was allocated but not initialized. */ 475 476 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) 477 { 478 struct tcf_idrinfo *idrinfo = tn->idrinfo; 479 480 mutex_lock(&idrinfo->lock); 481 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ 482 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); 483 mutex_unlock(&idrinfo->lock); 484 } 485 EXPORT_SYMBOL(tcf_idr_cleanup); 486 487 /* Check if action with specified index exists. If actions is found, increments 488 * its reference and bind counters, and return 1. Otherwise insert temporary 489 * error pointer (to prevent concurrent users from inserting actions with same 490 * index) and return 0. 491 */ 492 493 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, 494 struct tc_action **a, int bind) 495 { 496 struct tcf_idrinfo *idrinfo = tn->idrinfo; 497 struct tc_action *p; 498 int ret; 499 500 again: 501 mutex_lock(&idrinfo->lock); 502 if (*index) { 503 p = idr_find(&idrinfo->action_idr, *index); 504 if (IS_ERR(p)) { 505 /* This means that another process allocated 506 * index but did not assign the pointer yet. 507 */ 508 mutex_unlock(&idrinfo->lock); 509 goto again; 510 } 511 512 if (p) { 513 refcount_inc(&p->tcfa_refcnt); 514 if (bind) 515 atomic_inc(&p->tcfa_bindcnt); 516 *a = p; 517 ret = 1; 518 } else { 519 *a = NULL; 520 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 521 *index, GFP_KERNEL); 522 if (!ret) 523 idr_replace(&idrinfo->action_idr, 524 ERR_PTR(-EBUSY), *index); 525 } 526 } else { 527 *index = 1; 528 *a = NULL; 529 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 530 UINT_MAX, GFP_KERNEL); 531 if (!ret) 532 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), 533 *index); 534 } 535 mutex_unlock(&idrinfo->lock); 536 return ret; 537 } 538 EXPORT_SYMBOL(tcf_idr_check_alloc); 539 540 void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 541 struct tcf_idrinfo *idrinfo) 542 { 543 struct idr *idr = &idrinfo->action_idr; 544 struct tc_action *p; 545 int ret; 546 unsigned long id = 1; 547 unsigned long tmp; 548 549 idr_for_each_entry_ul(idr, p, tmp, id) { 550 ret = __tcf_idr_release(p, false, true); 551 if (ret == ACT_P_DELETED) 552 module_put(ops->owner); 553 else if (ret < 0) 554 return; 555 } 556 idr_destroy(&idrinfo->action_idr); 557 } 558 EXPORT_SYMBOL(tcf_idrinfo_destroy); 559 560 static LIST_HEAD(act_base); 561 static DEFINE_RWLOCK(act_mod_lock); 562 563 int tcf_register_action(struct tc_action_ops *act, 564 struct pernet_operations *ops) 565 { 566 struct tc_action_ops *a; 567 int ret; 568 569 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup) 570 return -EINVAL; 571 572 /* We have to register pernet ops before making the action ops visible, 573 * otherwise tcf_action_init_1() could get a partially initialized 574 * netns. 575 */ 576 ret = register_pernet_subsys(ops); 577 if (ret) 578 return ret; 579 580 write_lock(&act_mod_lock); 581 list_for_each_entry(a, &act_base, head) { 582 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { 583 write_unlock(&act_mod_lock); 584 unregister_pernet_subsys(ops); 585 return -EEXIST; 586 } 587 } 588 list_add_tail(&act->head, &act_base); 589 write_unlock(&act_mod_lock); 590 591 return 0; 592 } 593 EXPORT_SYMBOL(tcf_register_action); 594 595 int tcf_unregister_action(struct tc_action_ops *act, 596 struct pernet_operations *ops) 597 { 598 struct tc_action_ops *a; 599 int err = -ENOENT; 600 601 write_lock(&act_mod_lock); 602 list_for_each_entry(a, &act_base, head) { 603 if (a == act) { 604 list_del(&act->head); 605 err = 0; 606 break; 607 } 608 } 609 write_unlock(&act_mod_lock); 610 if (!err) 611 unregister_pernet_subsys(ops); 612 return err; 613 } 614 EXPORT_SYMBOL(tcf_unregister_action); 615 616 /* lookup by name */ 617 static struct tc_action_ops *tc_lookup_action_n(char *kind) 618 { 619 struct tc_action_ops *a, *res = NULL; 620 621 if (kind) { 622 read_lock(&act_mod_lock); 623 list_for_each_entry(a, &act_base, head) { 624 if (strcmp(kind, a->kind) == 0) { 625 if (try_module_get(a->owner)) 626 res = a; 627 break; 628 } 629 } 630 read_unlock(&act_mod_lock); 631 } 632 return res; 633 } 634 635 /* lookup by nlattr */ 636 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 637 { 638 struct tc_action_ops *a, *res = NULL; 639 640 if (kind) { 641 read_lock(&act_mod_lock); 642 list_for_each_entry(a, &act_base, head) { 643 if (nla_strcmp(kind, a->kind) == 0) { 644 if (try_module_get(a->owner)) 645 res = a; 646 break; 647 } 648 } 649 read_unlock(&act_mod_lock); 650 } 651 return res; 652 } 653 654 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */ 655 #define TCA_ACT_MAX_PRIO_MASK 0x1FF 656 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 657 int nr_actions, struct tcf_result *res) 658 { 659 u32 jmp_prgcnt = 0; 660 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ 661 int i; 662 int ret = TC_ACT_OK; 663 664 if (skb_skip_tc_classify(skb)) 665 return TC_ACT_OK; 666 667 restart_act_graph: 668 for (i = 0; i < nr_actions; i++) { 669 const struct tc_action *a = actions[i]; 670 671 if (jmp_prgcnt > 0) { 672 jmp_prgcnt -= 1; 673 continue; 674 } 675 repeat: 676 ret = a->ops->act(skb, a, res); 677 if (ret == TC_ACT_REPEAT) 678 goto repeat; /* we need a ttl - JHS */ 679 680 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { 681 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; 682 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { 683 /* faulty opcode, stop pipeline */ 684 return TC_ACT_OK; 685 } else { 686 jmp_ttl -= 1; 687 if (jmp_ttl > 0) 688 goto restart_act_graph; 689 else /* faulty graph, stop pipeline */ 690 return TC_ACT_OK; 691 } 692 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 693 if (unlikely(!rcu_access_pointer(a->goto_chain))) { 694 net_warn_ratelimited("can't go to NULL chain!\n"); 695 return TC_ACT_SHOT; 696 } 697 tcf_action_goto_chain_exec(a, res); 698 } 699 700 if (ret != TC_ACT_PIPE) 701 break; 702 } 703 704 return ret; 705 } 706 EXPORT_SYMBOL(tcf_action_exec); 707 708 int tcf_action_destroy(struct tc_action *actions[], int bind) 709 { 710 const struct tc_action_ops *ops; 711 struct tc_action *a; 712 int ret = 0, i; 713 714 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 715 a = actions[i]; 716 actions[i] = NULL; 717 ops = a->ops; 718 ret = __tcf_idr_release(a, bind, true); 719 if (ret == ACT_P_DELETED) 720 module_put(ops->owner); 721 else if (ret < 0) 722 return ret; 723 } 724 return ret; 725 } 726 727 static int tcf_action_put(struct tc_action *p) 728 { 729 return __tcf_action_put(p, false); 730 } 731 732 /* Put all actions in this array, skip those NULL's. */ 733 static void tcf_action_put_many(struct tc_action *actions[]) 734 { 735 int i; 736 737 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 738 struct tc_action *a = actions[i]; 739 const struct tc_action_ops *ops; 740 741 if (!a) 742 continue; 743 ops = a->ops; 744 if (tcf_action_put(a)) 745 module_put(ops->owner); 746 } 747 } 748 749 int 750 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 751 { 752 return a->ops->dump(skb, a, bind, ref); 753 } 754 755 static int 756 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a) 757 { 758 unsigned char *b = skb_tail_pointer(skb); 759 struct tc_cookie *cookie; 760 761 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 762 goto nla_put_failure; 763 if (tcf_action_copy_stats(skb, a, 0)) 764 goto nla_put_failure; 765 766 rcu_read_lock(); 767 cookie = rcu_dereference(a->act_cookie); 768 if (cookie) { 769 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { 770 rcu_read_unlock(); 771 goto nla_put_failure; 772 } 773 } 774 rcu_read_unlock(); 775 776 return 0; 777 778 nla_put_failure: 779 nlmsg_trim(skb, b); 780 return -1; 781 } 782 783 int 784 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 785 { 786 int err = -EINVAL; 787 unsigned char *b = skb_tail_pointer(skb); 788 struct nlattr *nest; 789 790 if (tcf_action_dump_terse(skb, a)) 791 goto nla_put_failure; 792 793 if (a->hw_stats != TCA_ACT_HW_STATS_ANY && 794 nla_put_bitfield32(skb, TCA_ACT_HW_STATS, 795 a->hw_stats, TCA_ACT_HW_STATS_ANY)) 796 goto nla_put_failure; 797 798 if (a->used_hw_stats_valid && 799 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS, 800 a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) 801 goto nla_put_failure; 802 803 if (a->tcfa_flags && 804 nla_put_bitfield32(skb, TCA_ACT_FLAGS, 805 a->tcfa_flags, a->tcfa_flags)) 806 goto nla_put_failure; 807 808 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 809 if (nest == NULL) 810 goto nla_put_failure; 811 err = tcf_action_dump_old(skb, a, bind, ref); 812 if (err > 0) { 813 nla_nest_end(skb, nest); 814 return err; 815 } 816 817 nla_put_failure: 818 nlmsg_trim(skb, b); 819 return -1; 820 } 821 EXPORT_SYMBOL(tcf_action_dump_1); 822 823 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], 824 int bind, int ref, bool terse) 825 { 826 struct tc_action *a; 827 int err = -EINVAL, i; 828 struct nlattr *nest; 829 830 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 831 a = actions[i]; 832 nest = nla_nest_start_noflag(skb, i + 1); 833 if (nest == NULL) 834 goto nla_put_failure; 835 err = terse ? tcf_action_dump_terse(skb, a) : 836 tcf_action_dump_1(skb, a, bind, ref); 837 if (err < 0) 838 goto errout; 839 nla_nest_end(skb, nest); 840 } 841 842 return 0; 843 844 nla_put_failure: 845 err = -EINVAL; 846 errout: 847 nla_nest_cancel(skb, nest); 848 return err; 849 } 850 851 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 852 { 853 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 854 if (!c) 855 return NULL; 856 857 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 858 if (!c->data) { 859 kfree(c); 860 return NULL; 861 } 862 c->len = nla_len(tb[TCA_ACT_COOKIE]); 863 864 return c; 865 } 866 867 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr) 868 { 869 struct nla_bitfield32 hw_stats_bf; 870 871 /* If the user did not pass the attr, that means he does 872 * not care about the type. Return "any" in that case 873 * which is setting on all supported types. 874 */ 875 if (!hw_stats_attr) 876 return TCA_ACT_HW_STATS_ANY; 877 hw_stats_bf = nla_get_bitfield32(hw_stats_attr); 878 return hw_stats_bf.value; 879 } 880 881 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { 882 [TCA_ACT_KIND] = { .type = NLA_STRING }, 883 [TCA_ACT_INDEX] = { .type = NLA_U32 }, 884 [TCA_ACT_COOKIE] = { .type = NLA_BINARY, 885 .len = TC_COOKIE_MAX_SIZE }, 886 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, 887 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS), 888 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY), 889 }; 890 891 static void tcf_idr_insert_many(struct tc_action *actions[]) 892 { 893 int i; 894 895 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 896 struct tc_action *a = actions[i]; 897 struct tcf_idrinfo *idrinfo; 898 899 if (!a) 900 continue; 901 idrinfo = a->idrinfo; 902 mutex_lock(&idrinfo->lock); 903 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if 904 * it is just created, otherwise this is just a nop. 905 */ 906 idr_replace(&idrinfo->action_idr, a, a->tcfa_index); 907 mutex_unlock(&idrinfo->lock); 908 } 909 } 910 911 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 912 struct nlattr *nla, struct nlattr *est, 913 char *name, int ovr, int bind, 914 bool rtnl_held, 915 struct netlink_ext_ack *extack) 916 { 917 struct nla_bitfield32 flags = { 0, 0 }; 918 u8 hw_stats = TCA_ACT_HW_STATS_ANY; 919 struct tc_action *a; 920 struct tc_action_ops *a_o; 921 struct tc_cookie *cookie = NULL; 922 char act_name[IFNAMSIZ]; 923 struct nlattr *tb[TCA_ACT_MAX + 1]; 924 struct nlattr *kind; 925 int err; 926 927 if (name == NULL) { 928 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 929 tcf_action_policy, extack); 930 if (err < 0) 931 goto err_out; 932 err = -EINVAL; 933 kind = tb[TCA_ACT_KIND]; 934 if (!kind) { 935 NL_SET_ERR_MSG(extack, "TC action kind must be specified"); 936 goto err_out; 937 } 938 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) { 939 NL_SET_ERR_MSG(extack, "TC action name too long"); 940 goto err_out; 941 } 942 if (tb[TCA_ACT_COOKIE]) { 943 cookie = nla_memdup_cookie(tb); 944 if (!cookie) { 945 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie"); 946 err = -ENOMEM; 947 goto err_out; 948 } 949 } 950 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]); 951 if (tb[TCA_ACT_FLAGS]) 952 flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]); 953 } else { 954 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) { 955 NL_SET_ERR_MSG(extack, "TC action name too long"); 956 err = -EINVAL; 957 goto err_out; 958 } 959 } 960 961 a_o = tc_lookup_action_n(act_name); 962 if (a_o == NULL) { 963 #ifdef CONFIG_MODULES 964 if (rtnl_held) 965 rtnl_unlock(); 966 request_module("act_%s", act_name); 967 if (rtnl_held) 968 rtnl_lock(); 969 970 a_o = tc_lookup_action_n(act_name); 971 972 /* We dropped the RTNL semaphore in order to 973 * perform the module load. So, even if we 974 * succeeded in loading the module we have to 975 * tell the caller to replay the request. We 976 * indicate this using -EAGAIN. 977 */ 978 if (a_o != NULL) { 979 err = -EAGAIN; 980 goto err_mod; 981 } 982 #endif 983 NL_SET_ERR_MSG(extack, "Failed to load TC action module"); 984 err = -ENOENT; 985 goto err_free; 986 } 987 988 /* backward compatibility for policer */ 989 if (name == NULL) 990 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, 991 rtnl_held, tp, flags.value, extack); 992 else 993 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, 994 tp, flags.value, extack); 995 if (err < 0) 996 goto err_mod; 997 998 if (!name && tb[TCA_ACT_COOKIE]) 999 tcf_set_action_cookie(&a->act_cookie, cookie); 1000 1001 if (!name) 1002 a->hw_stats = hw_stats; 1003 1004 /* module count goes up only when brand new policy is created 1005 * if it exists and is only bound to in a_o->init() then 1006 * ACT_P_CREATED is not returned (a zero is). 1007 */ 1008 if (err != ACT_P_CREATED) 1009 module_put(a_o->owner); 1010 1011 return a; 1012 1013 err_mod: 1014 module_put(a_o->owner); 1015 err_free: 1016 if (cookie) { 1017 kfree(cookie->data); 1018 kfree(cookie); 1019 } 1020 err_out: 1021 return ERR_PTR(err); 1022 } 1023 1024 /* Returns numbers of initialized actions or negative error. */ 1025 1026 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 1027 struct nlattr *est, char *name, int ovr, int bind, 1028 struct tc_action *actions[], size_t *attr_size, 1029 bool rtnl_held, struct netlink_ext_ack *extack) 1030 { 1031 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1032 struct tc_action *act; 1033 size_t sz = 0; 1034 int err; 1035 int i; 1036 1037 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1038 extack); 1039 if (err < 0) 1040 return err; 1041 1042 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1043 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind, 1044 rtnl_held, extack); 1045 if (IS_ERR(act)) { 1046 err = PTR_ERR(act); 1047 goto err; 1048 } 1049 sz += tcf_action_fill_size(act); 1050 /* Start from index 0 */ 1051 actions[i - 1] = act; 1052 } 1053 1054 /* We have to commit them all together, because if any error happened in 1055 * between, we could not handle the failure gracefully. 1056 */ 1057 tcf_idr_insert_many(actions); 1058 1059 *attr_size = tcf_action_full_attrs_size(sz); 1060 return i - 1; 1061 1062 err: 1063 tcf_action_destroy(actions, bind); 1064 return err; 1065 } 1066 1067 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, 1068 u64 drops, bool hw) 1069 { 1070 if (a->cpu_bstats) { 1071 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 1072 1073 this_cpu_ptr(a->cpu_qstats)->drops += drops; 1074 1075 if (hw) 1076 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), 1077 bytes, packets); 1078 return; 1079 } 1080 1081 _bstats_update(&a->tcfa_bstats, bytes, packets); 1082 a->tcfa_qstats.drops += drops; 1083 if (hw) 1084 _bstats_update(&a->tcfa_bstats_hw, bytes, packets); 1085 } 1086 EXPORT_SYMBOL(tcf_action_update_stats); 1087 1088 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 1089 int compat_mode) 1090 { 1091 int err = 0; 1092 struct gnet_dump d; 1093 1094 if (p == NULL) 1095 goto errout; 1096 1097 /* compat_mode being true specifies a call that is supposed 1098 * to add additional backward compatibility statistic TLVs. 1099 */ 1100 if (compat_mode) { 1101 if (p->type == TCA_OLD_COMPAT) 1102 err = gnet_stats_start_copy_compat(skb, 0, 1103 TCA_STATS, 1104 TCA_XSTATS, 1105 &p->tcfa_lock, &d, 1106 TCA_PAD); 1107 else 1108 return 0; 1109 } else 1110 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 1111 &p->tcfa_lock, &d, TCA_ACT_PAD); 1112 1113 if (err < 0) 1114 goto errout; 1115 1116 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || 1117 gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw, 1118 &p->tcfa_bstats_hw) < 0 || 1119 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 1120 gnet_stats_copy_queue(&d, p->cpu_qstats, 1121 &p->tcfa_qstats, 1122 p->tcfa_qstats.qlen) < 0) 1123 goto errout; 1124 1125 if (gnet_stats_finish_copy(&d) < 0) 1126 goto errout; 1127 1128 return 0; 1129 1130 errout: 1131 return -1; 1132 } 1133 1134 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], 1135 u32 portid, u32 seq, u16 flags, int event, int bind, 1136 int ref) 1137 { 1138 struct tcamsg *t; 1139 struct nlmsghdr *nlh; 1140 unsigned char *b = skb_tail_pointer(skb); 1141 struct nlattr *nest; 1142 1143 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 1144 if (!nlh) 1145 goto out_nlmsg_trim; 1146 t = nlmsg_data(nlh); 1147 t->tca_family = AF_UNSPEC; 1148 t->tca__pad1 = 0; 1149 t->tca__pad2 = 0; 1150 1151 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1152 if (!nest) 1153 goto out_nlmsg_trim; 1154 1155 if (tcf_action_dump(skb, actions, bind, ref, false) < 0) 1156 goto out_nlmsg_trim; 1157 1158 nla_nest_end(skb, nest); 1159 1160 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1161 return skb->len; 1162 1163 out_nlmsg_trim: 1164 nlmsg_trim(skb, b); 1165 return -1; 1166 } 1167 1168 static int 1169 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 1170 struct tc_action *actions[], int event, 1171 struct netlink_ext_ack *extack) 1172 { 1173 struct sk_buff *skb; 1174 1175 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1176 if (!skb) 1177 return -ENOBUFS; 1178 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 1179 0, 1) <= 0) { 1180 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1181 kfree_skb(skb); 1182 return -EINVAL; 1183 } 1184 1185 return rtnl_unicast(skb, net, portid); 1186 } 1187 1188 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, 1189 struct nlmsghdr *n, u32 portid, 1190 struct netlink_ext_ack *extack) 1191 { 1192 struct nlattr *tb[TCA_ACT_MAX + 1]; 1193 const struct tc_action_ops *ops; 1194 struct tc_action *a; 1195 int index; 1196 int err; 1197 1198 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1199 tcf_action_policy, extack); 1200 if (err < 0) 1201 goto err_out; 1202 1203 err = -EINVAL; 1204 if (tb[TCA_ACT_INDEX] == NULL || 1205 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) { 1206 NL_SET_ERR_MSG(extack, "Invalid TC action index value"); 1207 goto err_out; 1208 } 1209 index = nla_get_u32(tb[TCA_ACT_INDEX]); 1210 1211 err = -EINVAL; 1212 ops = tc_lookup_action(tb[TCA_ACT_KIND]); 1213 if (!ops) { /* could happen in batch of actions */ 1214 NL_SET_ERR_MSG(extack, "Specified TC action kind not found"); 1215 goto err_out; 1216 } 1217 err = -ENOENT; 1218 if (ops->lookup(net, &a, index) == 0) { 1219 NL_SET_ERR_MSG(extack, "TC action with specified index not found"); 1220 goto err_mod; 1221 } 1222 1223 module_put(ops->owner); 1224 return a; 1225 1226 err_mod: 1227 module_put(ops->owner); 1228 err_out: 1229 return ERR_PTR(err); 1230 } 1231 1232 static int tca_action_flush(struct net *net, struct nlattr *nla, 1233 struct nlmsghdr *n, u32 portid, 1234 struct netlink_ext_ack *extack) 1235 { 1236 struct sk_buff *skb; 1237 unsigned char *b; 1238 struct nlmsghdr *nlh; 1239 struct tcamsg *t; 1240 struct netlink_callback dcb; 1241 struct nlattr *nest; 1242 struct nlattr *tb[TCA_ACT_MAX + 1]; 1243 const struct tc_action_ops *ops; 1244 struct nlattr *kind; 1245 int err = -ENOMEM; 1246 1247 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1248 if (!skb) 1249 return err; 1250 1251 b = skb_tail_pointer(skb); 1252 1253 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1254 tcf_action_policy, extack); 1255 if (err < 0) 1256 goto err_out; 1257 1258 err = -EINVAL; 1259 kind = tb[TCA_ACT_KIND]; 1260 ops = tc_lookup_action(kind); 1261 if (!ops) { /*some idjot trying to flush unknown action */ 1262 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action"); 1263 goto err_out; 1264 } 1265 1266 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, 1267 sizeof(*t), 0); 1268 if (!nlh) { 1269 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification"); 1270 goto out_module_put; 1271 } 1272 t = nlmsg_data(nlh); 1273 t->tca_family = AF_UNSPEC; 1274 t->tca__pad1 = 0; 1275 t->tca__pad2 = 0; 1276 1277 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1278 if (!nest) { 1279 NL_SET_ERR_MSG(extack, "Failed to add new netlink message"); 1280 goto out_module_put; 1281 } 1282 1283 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack); 1284 if (err <= 0) { 1285 nla_nest_cancel(skb, nest); 1286 goto out_module_put; 1287 } 1288 1289 nla_nest_end(skb, nest); 1290 1291 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1292 nlh->nlmsg_flags |= NLM_F_ROOT; 1293 module_put(ops->owner); 1294 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1295 n->nlmsg_flags & NLM_F_ECHO); 1296 if (err > 0) 1297 return 0; 1298 if (err < 0) 1299 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification"); 1300 1301 return err; 1302 1303 out_module_put: 1304 module_put(ops->owner); 1305 err_out: 1306 kfree_skb(skb); 1307 return err; 1308 } 1309 1310 static int tcf_action_delete(struct net *net, struct tc_action *actions[]) 1311 { 1312 int i; 1313 1314 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1315 struct tc_action *a = actions[i]; 1316 const struct tc_action_ops *ops = a->ops; 1317 /* Actions can be deleted concurrently so we must save their 1318 * type and id to search again after reference is released. 1319 */ 1320 struct tcf_idrinfo *idrinfo = a->idrinfo; 1321 u32 act_index = a->tcfa_index; 1322 1323 actions[i] = NULL; 1324 if (tcf_action_put(a)) { 1325 /* last reference, action was deleted concurrently */ 1326 module_put(ops->owner); 1327 } else { 1328 int ret; 1329 1330 /* now do the delete */ 1331 ret = tcf_idr_delete_index(idrinfo, act_index); 1332 if (ret < 0) 1333 return ret; 1334 } 1335 } 1336 return 0; 1337 } 1338 1339 static int 1340 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1341 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1342 { 1343 int ret; 1344 struct sk_buff *skb; 1345 1346 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1347 GFP_KERNEL); 1348 if (!skb) 1349 return -ENOBUFS; 1350 1351 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 1352 0, 2) <= 0) { 1353 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); 1354 kfree_skb(skb); 1355 return -EINVAL; 1356 } 1357 1358 /* now do the delete */ 1359 ret = tcf_action_delete(net, actions); 1360 if (ret < 0) { 1361 NL_SET_ERR_MSG(extack, "Failed to delete TC action"); 1362 kfree_skb(skb); 1363 return ret; 1364 } 1365 1366 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1367 n->nlmsg_flags & NLM_F_ECHO); 1368 if (ret > 0) 1369 return 0; 1370 return ret; 1371 } 1372 1373 static int 1374 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 1375 u32 portid, int event, struct netlink_ext_ack *extack) 1376 { 1377 int i, ret; 1378 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1379 struct tc_action *act; 1380 size_t attr_size = 0; 1381 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1382 1383 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1384 extack); 1385 if (ret < 0) 1386 return ret; 1387 1388 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 1389 if (tb[1]) 1390 return tca_action_flush(net, tb[1], n, portid, extack); 1391 1392 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action"); 1393 return -EINVAL; 1394 } 1395 1396 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1397 act = tcf_action_get_1(net, tb[i], n, portid, extack); 1398 if (IS_ERR(act)) { 1399 ret = PTR_ERR(act); 1400 goto err; 1401 } 1402 attr_size += tcf_action_fill_size(act); 1403 actions[i - 1] = act; 1404 } 1405 1406 attr_size = tcf_action_full_attrs_size(attr_size); 1407 1408 if (event == RTM_GETACTION) 1409 ret = tcf_get_notify(net, portid, n, actions, event, extack); 1410 else { /* delete */ 1411 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); 1412 if (ret) 1413 goto err; 1414 return 0; 1415 } 1416 err: 1417 tcf_action_put_many(actions); 1418 return ret; 1419 } 1420 1421 static int 1422 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1423 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1424 { 1425 struct sk_buff *skb; 1426 int err = 0; 1427 1428 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1429 GFP_KERNEL); 1430 if (!skb) 1431 return -ENOBUFS; 1432 1433 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 1434 RTM_NEWACTION, 0, 0) <= 0) { 1435 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1436 kfree_skb(skb); 1437 return -EINVAL; 1438 } 1439 1440 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1441 n->nlmsg_flags & NLM_F_ECHO); 1442 if (err > 0) 1443 err = 0; 1444 return err; 1445 } 1446 1447 static int tcf_action_add(struct net *net, struct nlattr *nla, 1448 struct nlmsghdr *n, u32 portid, int ovr, 1449 struct netlink_ext_ack *extack) 1450 { 1451 size_t attr_size = 0; 1452 int loop, ret; 1453 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1454 1455 for (loop = 0; loop < 10; loop++) { 1456 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, 1457 actions, &attr_size, true, extack); 1458 if (ret != -EAGAIN) 1459 break; 1460 } 1461 1462 if (ret < 0) 1463 return ret; 1464 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); 1465 if (ovr) 1466 tcf_action_put_many(actions); 1467 1468 return ret; 1469 } 1470 1471 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { 1472 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_FLAG_LARGE_DUMP_ON), 1473 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, 1474 }; 1475 1476 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, 1477 struct netlink_ext_ack *extack) 1478 { 1479 struct net *net = sock_net(skb->sk); 1480 struct nlattr *tca[TCA_ROOT_MAX + 1]; 1481 u32 portid = NETLINK_CB(skb).portid; 1482 int ret = 0, ovr = 0; 1483 1484 if ((n->nlmsg_type != RTM_GETACTION) && 1485 !netlink_capable(skb, CAP_NET_ADMIN)) 1486 return -EPERM; 1487 1488 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca, 1489 TCA_ROOT_MAX, NULL, extack); 1490 if (ret < 0) 1491 return ret; 1492 1493 if (tca[TCA_ACT_TAB] == NULL) { 1494 NL_SET_ERR_MSG(extack, "Netlink action attributes missing"); 1495 return -EINVAL; 1496 } 1497 1498 /* n->nlmsg_flags & NLM_F_CREATE */ 1499 switch (n->nlmsg_type) { 1500 case RTM_NEWACTION: 1501 /* we are going to assume all other flags 1502 * imply create only if it doesn't exist 1503 * Note that CREATE | EXCL implies that 1504 * but since we want avoid ambiguity (eg when flags 1505 * is zero) then just set this 1506 */ 1507 if (n->nlmsg_flags & NLM_F_REPLACE) 1508 ovr = 1; 1509 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr, 1510 extack); 1511 break; 1512 case RTM_DELACTION: 1513 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1514 portid, RTM_DELACTION, extack); 1515 break; 1516 case RTM_GETACTION: 1517 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1518 portid, RTM_GETACTION, extack); 1519 break; 1520 default: 1521 BUG(); 1522 } 1523 1524 return ret; 1525 } 1526 1527 static struct nlattr *find_dump_kind(struct nlattr **nla) 1528 { 1529 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 1530 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1531 struct nlattr *kind; 1532 1533 tb1 = nla[TCA_ACT_TAB]; 1534 if (tb1 == NULL) 1535 return NULL; 1536 1537 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) 1538 return NULL; 1539 1540 if (tb[1] == NULL) 1541 return NULL; 1542 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0) 1543 return NULL; 1544 kind = tb2[TCA_ACT_KIND]; 1545 1546 return kind; 1547 } 1548 1549 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1550 { 1551 struct net *net = sock_net(skb->sk); 1552 struct nlmsghdr *nlh; 1553 unsigned char *b = skb_tail_pointer(skb); 1554 struct nlattr *nest; 1555 struct tc_action_ops *a_o; 1556 int ret = 0; 1557 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 1558 struct nlattr *tb[TCA_ROOT_MAX + 1]; 1559 struct nlattr *count_attr = NULL; 1560 unsigned long jiffy_since = 0; 1561 struct nlattr *kind = NULL; 1562 struct nla_bitfield32 bf; 1563 u32 msecs_since = 0; 1564 u32 act_count = 0; 1565 1566 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb, 1567 TCA_ROOT_MAX, tcaa_policy, cb->extack); 1568 if (ret < 0) 1569 return ret; 1570 1571 kind = find_dump_kind(tb); 1572 if (kind == NULL) { 1573 pr_info("tc_dump_action: action bad kind\n"); 1574 return 0; 1575 } 1576 1577 a_o = tc_lookup_action(kind); 1578 if (a_o == NULL) 1579 return 0; 1580 1581 cb->args[2] = 0; 1582 if (tb[TCA_ROOT_FLAGS]) { 1583 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); 1584 cb->args[2] = bf.value; 1585 } 1586 1587 if (tb[TCA_ROOT_TIME_DELTA]) { 1588 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); 1589 } 1590 1591 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1592 cb->nlh->nlmsg_type, sizeof(*t), 0); 1593 if (!nlh) 1594 goto out_module_put; 1595 1596 if (msecs_since) 1597 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); 1598 1599 t = nlmsg_data(nlh); 1600 t->tca_family = AF_UNSPEC; 1601 t->tca__pad1 = 0; 1602 t->tca__pad2 = 0; 1603 cb->args[3] = jiffy_since; 1604 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); 1605 if (!count_attr) 1606 goto out_module_put; 1607 1608 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1609 if (nest == NULL) 1610 goto out_module_put; 1611 1612 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL); 1613 if (ret < 0) 1614 goto out_module_put; 1615 1616 if (ret > 0) { 1617 nla_nest_end(skb, nest); 1618 ret = skb->len; 1619 act_count = cb->args[1]; 1620 memcpy(nla_data(count_attr), &act_count, sizeof(u32)); 1621 cb->args[1] = 0; 1622 } else 1623 nlmsg_trim(skb, b); 1624 1625 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1626 if (NETLINK_CB(cb->skb).portid && ret) 1627 nlh->nlmsg_flags |= NLM_F_MULTI; 1628 module_put(a_o->owner); 1629 return skb->len; 1630 1631 out_module_put: 1632 module_put(a_o->owner); 1633 nlmsg_trim(skb, b); 1634 return skb->len; 1635 } 1636 1637 static int __init tc_action_init(void) 1638 { 1639 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); 1640 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); 1641 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 1642 0); 1643 1644 return 0; 1645 } 1646 1647 subsys_initcall(tc_action_init); 1648