1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/act_api.c Packet action API. 4 * 5 * Author: Jamal Hadi Salim 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/string.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <linux/kmod.h> 16 #include <linux/err.h> 17 #include <linux/module.h> 18 #include <net/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/act_api.h> 23 #include <net/netlink.h> 24 25 static void tcf_action_goto_chain_exec(const struct tc_action *a, 26 struct tcf_result *res) 27 { 28 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain); 29 30 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 31 } 32 33 static void tcf_free_cookie_rcu(struct rcu_head *p) 34 { 35 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu); 36 37 kfree(cookie->data); 38 kfree(cookie); 39 } 40 41 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, 42 struct tc_cookie *new_cookie) 43 { 44 struct tc_cookie *old; 45 46 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); 47 if (old) 48 call_rcu(&old->rcu, tcf_free_cookie_rcu); 49 } 50 51 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, 52 struct tcf_chain **newchain, 53 struct netlink_ext_ack *extack) 54 { 55 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL; 56 u32 chain_index; 57 58 if (!opcode) 59 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0; 60 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC) 61 ret = 0; 62 if (ret) { 63 NL_SET_ERR_MSG(extack, "invalid control action"); 64 goto end; 65 } 66 67 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) { 68 chain_index = action & TC_ACT_EXT_VAL_MASK; 69 if (!tp || !newchain) { 70 ret = -EINVAL; 71 NL_SET_ERR_MSG(extack, 72 "can't goto NULL proto/chain"); 73 goto end; 74 } 75 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index); 76 if (!*newchain) { 77 ret = -ENOMEM; 78 NL_SET_ERR_MSG(extack, 79 "can't allocate goto_chain"); 80 } 81 } 82 end: 83 return ret; 84 } 85 EXPORT_SYMBOL(tcf_action_check_ctrlact); 86 87 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, 88 struct tcf_chain *goto_chain) 89 { 90 a->tcfa_action = action; 91 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1); 92 return goto_chain; 93 } 94 EXPORT_SYMBOL(tcf_action_set_ctrlact); 95 96 /* XXX: For standalone actions, we don't need a RCU grace period either, because 97 * actions are always connected to filters and filters are already destroyed in 98 * RCU callbacks, so after a RCU grace period actions are already disconnected 99 * from filters. Readers later can not find us. 100 */ 101 static void free_tcf(struct tc_action *p) 102 { 103 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1); 104 105 free_percpu(p->cpu_bstats); 106 free_percpu(p->cpu_bstats_hw); 107 free_percpu(p->cpu_qstats); 108 109 tcf_set_action_cookie(&p->act_cookie, NULL); 110 if (chain) 111 tcf_chain_put_by_act(chain); 112 113 kfree(p); 114 } 115 116 static void tcf_action_cleanup(struct tc_action *p) 117 { 118 if (p->ops->cleanup) 119 p->ops->cleanup(p); 120 121 gen_kill_estimator(&p->tcfa_rate_est); 122 free_tcf(p); 123 } 124 125 static int __tcf_action_put(struct tc_action *p, bool bind) 126 { 127 struct tcf_idrinfo *idrinfo = p->idrinfo; 128 129 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) { 130 if (bind) 131 atomic_dec(&p->tcfa_bindcnt); 132 idr_remove(&idrinfo->action_idr, p->tcfa_index); 133 mutex_unlock(&idrinfo->lock); 134 135 tcf_action_cleanup(p); 136 return 1; 137 } 138 139 if (bind) 140 atomic_dec(&p->tcfa_bindcnt); 141 142 return 0; 143 } 144 145 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 146 { 147 int ret = 0; 148 149 /* Release with strict==1 and bind==0 is only called through act API 150 * interface (classifiers always bind). Only case when action with 151 * positive reference count and zero bind count can exist is when it was 152 * also created with act API (unbinding last classifier will destroy the 153 * action if it was created by classifier). So only case when bind count 154 * can be changed after initial check is when unbound action is 155 * destroyed by act API while classifier binds to action with same id 156 * concurrently. This result either creation of new action(same behavior 157 * as before), or reusing existing action if concurrent process 158 * increments reference count before action is deleted. Both scenarios 159 * are acceptable. 160 */ 161 if (p) { 162 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) 163 return -EPERM; 164 165 if (__tcf_action_put(p, bind)) 166 ret = ACT_P_DELETED; 167 } 168 169 return ret; 170 } 171 EXPORT_SYMBOL(__tcf_idr_release); 172 173 static size_t tcf_action_shared_attrs_size(const struct tc_action *act) 174 { 175 struct tc_cookie *act_cookie; 176 u32 cookie_len = 0; 177 178 rcu_read_lock(); 179 act_cookie = rcu_dereference(act->act_cookie); 180 181 if (act_cookie) 182 cookie_len = nla_total_size(act_cookie->len); 183 rcu_read_unlock(); 184 185 return nla_total_size(0) /* action number nested */ 186 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ 187 + cookie_len /* TCA_ACT_COOKIE */ 188 + nla_total_size(0) /* TCA_ACT_STATS nested */ 189 /* TCA_STATS_BASIC */ 190 + nla_total_size_64bit(sizeof(struct gnet_stats_basic)) 191 /* TCA_STATS_PKT64 */ 192 + nla_total_size_64bit(sizeof(u64)) 193 /* TCA_STATS_QUEUE */ 194 + nla_total_size_64bit(sizeof(struct gnet_stats_queue)) 195 + nla_total_size(0) /* TCA_OPTIONS nested */ 196 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */ 197 } 198 199 static size_t tcf_action_full_attrs_size(size_t sz) 200 { 201 return NLMSG_HDRLEN /* struct nlmsghdr */ 202 + sizeof(struct tcamsg) 203 + nla_total_size(0) /* TCA_ACT_TAB nested */ 204 + sz; 205 } 206 207 static size_t tcf_action_fill_size(const struct tc_action *act) 208 { 209 size_t sz = tcf_action_shared_attrs_size(act); 210 211 if (act->ops->get_fill_size) 212 return act->ops->get_fill_size(act) + sz; 213 return sz; 214 } 215 216 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 217 struct netlink_callback *cb) 218 { 219 int err = 0, index = -1, s_i = 0, n_i = 0; 220 u32 act_flags = cb->args[2]; 221 unsigned long jiffy_since = cb->args[3]; 222 struct nlattr *nest; 223 struct idr *idr = &idrinfo->action_idr; 224 struct tc_action *p; 225 unsigned long id = 1; 226 unsigned long tmp; 227 228 mutex_lock(&idrinfo->lock); 229 230 s_i = cb->args[0]; 231 232 idr_for_each_entry_ul(idr, p, tmp, id) { 233 index++; 234 if (index < s_i) 235 continue; 236 237 if (jiffy_since && 238 time_after(jiffy_since, 239 (unsigned long)p->tcfa_tm.lastuse)) 240 continue; 241 242 nest = nla_nest_start_noflag(skb, n_i); 243 if (!nest) { 244 index--; 245 goto nla_put_failure; 246 } 247 err = tcf_action_dump_1(skb, p, 0, 0); 248 if (err < 0) { 249 index--; 250 nlmsg_trim(skb, nest); 251 goto done; 252 } 253 nla_nest_end(skb, nest); 254 n_i++; 255 if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) && 256 n_i >= TCA_ACT_MAX_PRIO) 257 goto done; 258 } 259 done: 260 if (index >= 0) 261 cb->args[0] = index + 1; 262 263 mutex_unlock(&idrinfo->lock); 264 if (n_i) { 265 if (act_flags & TCA_FLAG_LARGE_DUMP_ON) 266 cb->args[1] = n_i; 267 } 268 return n_i; 269 270 nla_put_failure: 271 nla_nest_cancel(skb, nest); 272 goto done; 273 } 274 275 static int tcf_idr_release_unsafe(struct tc_action *p) 276 { 277 if (atomic_read(&p->tcfa_bindcnt) > 0) 278 return -EPERM; 279 280 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 281 idr_remove(&p->idrinfo->action_idr, p->tcfa_index); 282 tcf_action_cleanup(p); 283 return ACT_P_DELETED; 284 } 285 286 return 0; 287 } 288 289 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 290 const struct tc_action_ops *ops) 291 { 292 struct nlattr *nest; 293 int n_i = 0; 294 int ret = -EINVAL; 295 struct idr *idr = &idrinfo->action_idr; 296 struct tc_action *p; 297 unsigned long id = 1; 298 unsigned long tmp; 299 300 nest = nla_nest_start_noflag(skb, 0); 301 if (nest == NULL) 302 goto nla_put_failure; 303 if (nla_put_string(skb, TCA_KIND, ops->kind)) 304 goto nla_put_failure; 305 306 mutex_lock(&idrinfo->lock); 307 idr_for_each_entry_ul(idr, p, tmp, id) { 308 ret = tcf_idr_release_unsafe(p); 309 if (ret == ACT_P_DELETED) { 310 module_put(ops->owner); 311 n_i++; 312 } else if (ret < 0) { 313 mutex_unlock(&idrinfo->lock); 314 goto nla_put_failure; 315 } 316 } 317 mutex_unlock(&idrinfo->lock); 318 319 if (nla_put_u32(skb, TCA_FCNT, n_i)) 320 goto nla_put_failure; 321 nla_nest_end(skb, nest); 322 323 return n_i; 324 nla_put_failure: 325 nla_nest_cancel(skb, nest); 326 return ret; 327 } 328 329 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 330 struct netlink_callback *cb, int type, 331 const struct tc_action_ops *ops, 332 struct netlink_ext_ack *extack) 333 { 334 struct tcf_idrinfo *idrinfo = tn->idrinfo; 335 336 if (type == RTM_DELACTION) { 337 return tcf_del_walker(idrinfo, skb, ops); 338 } else if (type == RTM_GETACTION) { 339 return tcf_dump_walker(idrinfo, skb, cb); 340 } else { 341 WARN(1, "tcf_generic_walker: unknown command %d\n", type); 342 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command"); 343 return -EINVAL; 344 } 345 } 346 EXPORT_SYMBOL(tcf_generic_walker); 347 348 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 349 { 350 struct tcf_idrinfo *idrinfo = tn->idrinfo; 351 struct tc_action *p; 352 353 mutex_lock(&idrinfo->lock); 354 p = idr_find(&idrinfo->action_idr, index); 355 if (IS_ERR(p)) 356 p = NULL; 357 else if (p) 358 refcount_inc(&p->tcfa_refcnt); 359 mutex_unlock(&idrinfo->lock); 360 361 if (p) { 362 *a = p; 363 return true; 364 } 365 return false; 366 } 367 EXPORT_SYMBOL(tcf_idr_search); 368 369 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) 370 { 371 struct tc_action *p; 372 int ret = 0; 373 374 mutex_lock(&idrinfo->lock); 375 p = idr_find(&idrinfo->action_idr, index); 376 if (!p) { 377 mutex_unlock(&idrinfo->lock); 378 return -ENOENT; 379 } 380 381 if (!atomic_read(&p->tcfa_bindcnt)) { 382 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 383 struct module *owner = p->ops->owner; 384 385 WARN_ON(p != idr_remove(&idrinfo->action_idr, 386 p->tcfa_index)); 387 mutex_unlock(&idrinfo->lock); 388 389 tcf_action_cleanup(p); 390 module_put(owner); 391 return 0; 392 } 393 ret = 0; 394 } else { 395 ret = -EPERM; 396 } 397 398 mutex_unlock(&idrinfo->lock); 399 return ret; 400 } 401 402 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 403 struct tc_action **a, const struct tc_action_ops *ops, 404 int bind, bool cpustats, u32 flags) 405 { 406 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); 407 struct tcf_idrinfo *idrinfo = tn->idrinfo; 408 int err = -ENOMEM; 409 410 if (unlikely(!p)) 411 return -ENOMEM; 412 refcount_set(&p->tcfa_refcnt, 1); 413 if (bind) 414 atomic_set(&p->tcfa_bindcnt, 1); 415 416 if (cpustats) { 417 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 418 if (!p->cpu_bstats) 419 goto err1; 420 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 421 if (!p->cpu_bstats_hw) 422 goto err2; 423 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 424 if (!p->cpu_qstats) 425 goto err3; 426 } 427 spin_lock_init(&p->tcfa_lock); 428 p->tcfa_index = index; 429 p->tcfa_tm.install = jiffies; 430 p->tcfa_tm.lastuse = jiffies; 431 p->tcfa_tm.firstuse = 0; 432 p->tcfa_flags = flags; 433 if (est) { 434 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 435 &p->tcfa_rate_est, 436 &p->tcfa_lock, NULL, est); 437 if (err) 438 goto err4; 439 } 440 441 p->idrinfo = idrinfo; 442 p->ops = ops; 443 *a = p; 444 return 0; 445 err4: 446 free_percpu(p->cpu_qstats); 447 err3: 448 free_percpu(p->cpu_bstats_hw); 449 err2: 450 free_percpu(p->cpu_bstats); 451 err1: 452 kfree(p); 453 return err; 454 } 455 EXPORT_SYMBOL(tcf_idr_create); 456 457 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index, 458 struct nlattr *est, struct tc_action **a, 459 const struct tc_action_ops *ops, int bind, 460 u32 flags) 461 { 462 /* Set cpustats according to actions flags. */ 463 return tcf_idr_create(tn, index, est, a, ops, bind, 464 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags); 465 } 466 EXPORT_SYMBOL(tcf_idr_create_from_flags); 467 468 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) 469 { 470 struct tcf_idrinfo *idrinfo = tn->idrinfo; 471 472 mutex_lock(&idrinfo->lock); 473 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ 474 WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index))); 475 mutex_unlock(&idrinfo->lock); 476 } 477 EXPORT_SYMBOL(tcf_idr_insert); 478 479 /* Cleanup idr index that was allocated but not initialized. */ 480 481 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) 482 { 483 struct tcf_idrinfo *idrinfo = tn->idrinfo; 484 485 mutex_lock(&idrinfo->lock); 486 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ 487 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); 488 mutex_unlock(&idrinfo->lock); 489 } 490 EXPORT_SYMBOL(tcf_idr_cleanup); 491 492 /* Check if action with specified index exists. If actions is found, increments 493 * its reference and bind counters, and return 1. Otherwise insert temporary 494 * error pointer (to prevent concurrent users from inserting actions with same 495 * index) and return 0. 496 */ 497 498 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, 499 struct tc_action **a, int bind) 500 { 501 struct tcf_idrinfo *idrinfo = tn->idrinfo; 502 struct tc_action *p; 503 int ret; 504 505 again: 506 mutex_lock(&idrinfo->lock); 507 if (*index) { 508 p = idr_find(&idrinfo->action_idr, *index); 509 if (IS_ERR(p)) { 510 /* This means that another process allocated 511 * index but did not assign the pointer yet. 512 */ 513 mutex_unlock(&idrinfo->lock); 514 goto again; 515 } 516 517 if (p) { 518 refcount_inc(&p->tcfa_refcnt); 519 if (bind) 520 atomic_inc(&p->tcfa_bindcnt); 521 *a = p; 522 ret = 1; 523 } else { 524 *a = NULL; 525 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 526 *index, GFP_KERNEL); 527 if (!ret) 528 idr_replace(&idrinfo->action_idr, 529 ERR_PTR(-EBUSY), *index); 530 } 531 } else { 532 *index = 1; 533 *a = NULL; 534 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 535 UINT_MAX, GFP_KERNEL); 536 if (!ret) 537 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), 538 *index); 539 } 540 mutex_unlock(&idrinfo->lock); 541 return ret; 542 } 543 EXPORT_SYMBOL(tcf_idr_check_alloc); 544 545 void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 546 struct tcf_idrinfo *idrinfo) 547 { 548 struct idr *idr = &idrinfo->action_idr; 549 struct tc_action *p; 550 int ret; 551 unsigned long id = 1; 552 unsigned long tmp; 553 554 idr_for_each_entry_ul(idr, p, tmp, id) { 555 ret = __tcf_idr_release(p, false, true); 556 if (ret == ACT_P_DELETED) 557 module_put(ops->owner); 558 else if (ret < 0) 559 return; 560 } 561 idr_destroy(&idrinfo->action_idr); 562 } 563 EXPORT_SYMBOL(tcf_idrinfo_destroy); 564 565 static LIST_HEAD(act_base); 566 static DEFINE_RWLOCK(act_mod_lock); 567 568 int tcf_register_action(struct tc_action_ops *act, 569 struct pernet_operations *ops) 570 { 571 struct tc_action_ops *a; 572 int ret; 573 574 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup) 575 return -EINVAL; 576 577 /* We have to register pernet ops before making the action ops visible, 578 * otherwise tcf_action_init_1() could get a partially initialized 579 * netns. 580 */ 581 ret = register_pernet_subsys(ops); 582 if (ret) 583 return ret; 584 585 write_lock(&act_mod_lock); 586 list_for_each_entry(a, &act_base, head) { 587 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { 588 write_unlock(&act_mod_lock); 589 unregister_pernet_subsys(ops); 590 return -EEXIST; 591 } 592 } 593 list_add_tail(&act->head, &act_base); 594 write_unlock(&act_mod_lock); 595 596 return 0; 597 } 598 EXPORT_SYMBOL(tcf_register_action); 599 600 int tcf_unregister_action(struct tc_action_ops *act, 601 struct pernet_operations *ops) 602 { 603 struct tc_action_ops *a; 604 int err = -ENOENT; 605 606 write_lock(&act_mod_lock); 607 list_for_each_entry(a, &act_base, head) { 608 if (a == act) { 609 list_del(&act->head); 610 err = 0; 611 break; 612 } 613 } 614 write_unlock(&act_mod_lock); 615 if (!err) 616 unregister_pernet_subsys(ops); 617 return err; 618 } 619 EXPORT_SYMBOL(tcf_unregister_action); 620 621 /* lookup by name */ 622 static struct tc_action_ops *tc_lookup_action_n(char *kind) 623 { 624 struct tc_action_ops *a, *res = NULL; 625 626 if (kind) { 627 read_lock(&act_mod_lock); 628 list_for_each_entry(a, &act_base, head) { 629 if (strcmp(kind, a->kind) == 0) { 630 if (try_module_get(a->owner)) 631 res = a; 632 break; 633 } 634 } 635 read_unlock(&act_mod_lock); 636 } 637 return res; 638 } 639 640 /* lookup by nlattr */ 641 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 642 { 643 struct tc_action_ops *a, *res = NULL; 644 645 if (kind) { 646 read_lock(&act_mod_lock); 647 list_for_each_entry(a, &act_base, head) { 648 if (nla_strcmp(kind, a->kind) == 0) { 649 if (try_module_get(a->owner)) 650 res = a; 651 break; 652 } 653 } 654 read_unlock(&act_mod_lock); 655 } 656 return res; 657 } 658 659 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */ 660 #define TCA_ACT_MAX_PRIO_MASK 0x1FF 661 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 662 int nr_actions, struct tcf_result *res) 663 { 664 u32 jmp_prgcnt = 0; 665 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ 666 int i; 667 int ret = TC_ACT_OK; 668 669 if (skb_skip_tc_classify(skb)) 670 return TC_ACT_OK; 671 672 restart_act_graph: 673 for (i = 0; i < nr_actions; i++) { 674 const struct tc_action *a = actions[i]; 675 676 if (jmp_prgcnt > 0) { 677 jmp_prgcnt -= 1; 678 continue; 679 } 680 repeat: 681 ret = a->ops->act(skb, a, res); 682 if (ret == TC_ACT_REPEAT) 683 goto repeat; /* we need a ttl - JHS */ 684 685 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { 686 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; 687 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { 688 /* faulty opcode, stop pipeline */ 689 return TC_ACT_OK; 690 } else { 691 jmp_ttl -= 1; 692 if (jmp_ttl > 0) 693 goto restart_act_graph; 694 else /* faulty graph, stop pipeline */ 695 return TC_ACT_OK; 696 } 697 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 698 if (unlikely(!rcu_access_pointer(a->goto_chain))) { 699 net_warn_ratelimited("can't go to NULL chain!\n"); 700 return TC_ACT_SHOT; 701 } 702 tcf_action_goto_chain_exec(a, res); 703 } 704 705 if (ret != TC_ACT_PIPE) 706 break; 707 } 708 709 return ret; 710 } 711 EXPORT_SYMBOL(tcf_action_exec); 712 713 int tcf_action_destroy(struct tc_action *actions[], int bind) 714 { 715 const struct tc_action_ops *ops; 716 struct tc_action *a; 717 int ret = 0, i; 718 719 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 720 a = actions[i]; 721 actions[i] = NULL; 722 ops = a->ops; 723 ret = __tcf_idr_release(a, bind, true); 724 if (ret == ACT_P_DELETED) 725 module_put(ops->owner); 726 else if (ret < 0) 727 return ret; 728 } 729 return ret; 730 } 731 732 static int tcf_action_destroy_1(struct tc_action *a, int bind) 733 { 734 struct tc_action *actions[] = { a, NULL }; 735 736 return tcf_action_destroy(actions, bind); 737 } 738 739 static int tcf_action_put(struct tc_action *p) 740 { 741 return __tcf_action_put(p, false); 742 } 743 744 /* Put all actions in this array, skip those NULL's. */ 745 static void tcf_action_put_many(struct tc_action *actions[]) 746 { 747 int i; 748 749 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 750 struct tc_action *a = actions[i]; 751 const struct tc_action_ops *ops; 752 753 if (!a) 754 continue; 755 ops = a->ops; 756 if (tcf_action_put(a)) 757 module_put(ops->owner); 758 } 759 } 760 761 int 762 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 763 { 764 return a->ops->dump(skb, a, bind, ref); 765 } 766 767 int 768 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 769 { 770 int err = -EINVAL; 771 unsigned char *b = skb_tail_pointer(skb); 772 struct nlattr *nest; 773 struct tc_cookie *cookie; 774 775 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 776 goto nla_put_failure; 777 if (tcf_action_copy_stats(skb, a, 0)) 778 goto nla_put_failure; 779 780 rcu_read_lock(); 781 cookie = rcu_dereference(a->act_cookie); 782 if (cookie) { 783 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { 784 rcu_read_unlock(); 785 goto nla_put_failure; 786 } 787 } 788 rcu_read_unlock(); 789 790 if (a->tcfa_flags) { 791 struct nla_bitfield32 flags = { a->tcfa_flags, 792 a->tcfa_flags, }; 793 794 if (nla_put(skb, TCA_ACT_FLAGS, sizeof(flags), &flags)) 795 goto nla_put_failure; 796 } 797 798 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 799 if (nest == NULL) 800 goto nla_put_failure; 801 err = tcf_action_dump_old(skb, a, bind, ref); 802 if (err > 0) { 803 nla_nest_end(skb, nest); 804 return err; 805 } 806 807 nla_put_failure: 808 nlmsg_trim(skb, b); 809 return -1; 810 } 811 EXPORT_SYMBOL(tcf_action_dump_1); 812 813 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], 814 int bind, int ref) 815 { 816 struct tc_action *a; 817 int err = -EINVAL, i; 818 struct nlattr *nest; 819 820 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 821 a = actions[i]; 822 nest = nla_nest_start_noflag(skb, i + 1); 823 if (nest == NULL) 824 goto nla_put_failure; 825 err = tcf_action_dump_1(skb, a, bind, ref); 826 if (err < 0) 827 goto errout; 828 nla_nest_end(skb, nest); 829 } 830 831 return 0; 832 833 nla_put_failure: 834 err = -EINVAL; 835 errout: 836 nla_nest_cancel(skb, nest); 837 return err; 838 } 839 840 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 841 { 842 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 843 if (!c) 844 return NULL; 845 846 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 847 if (!c->data) { 848 kfree(c); 849 return NULL; 850 } 851 c->len = nla_len(tb[TCA_ACT_COOKIE]); 852 853 return c; 854 } 855 856 static const u32 tca_act_flags_allowed = TCA_ACT_FLAGS_NO_PERCPU_STATS; 857 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { 858 [TCA_ACT_KIND] = { .type = NLA_STRING }, 859 [TCA_ACT_INDEX] = { .type = NLA_U32 }, 860 [TCA_ACT_COOKIE] = { .type = NLA_BINARY, 861 .len = TC_COOKIE_MAX_SIZE }, 862 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, 863 [TCA_ACT_FLAGS] = { .type = NLA_BITFIELD32, 864 .validation_data = &tca_act_flags_allowed }, 865 }; 866 867 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 868 struct nlattr *nla, struct nlattr *est, 869 char *name, int ovr, int bind, 870 bool rtnl_held, 871 struct netlink_ext_ack *extack) 872 { 873 struct nla_bitfield32 flags = { 0, 0 }; 874 struct tc_action *a; 875 struct tc_action_ops *a_o; 876 struct tc_cookie *cookie = NULL; 877 char act_name[IFNAMSIZ]; 878 struct nlattr *tb[TCA_ACT_MAX + 1]; 879 struct nlattr *kind; 880 int err; 881 882 if (name == NULL) { 883 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 884 tcf_action_policy, extack); 885 if (err < 0) 886 goto err_out; 887 err = -EINVAL; 888 kind = tb[TCA_ACT_KIND]; 889 if (!kind) { 890 NL_SET_ERR_MSG(extack, "TC action kind must be specified"); 891 goto err_out; 892 } 893 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) { 894 NL_SET_ERR_MSG(extack, "TC action name too long"); 895 goto err_out; 896 } 897 if (tb[TCA_ACT_COOKIE]) { 898 cookie = nla_memdup_cookie(tb); 899 if (!cookie) { 900 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie"); 901 err = -ENOMEM; 902 goto err_out; 903 } 904 } 905 if (tb[TCA_ACT_FLAGS]) 906 flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]); 907 } else { 908 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) { 909 NL_SET_ERR_MSG(extack, "TC action name too long"); 910 err = -EINVAL; 911 goto err_out; 912 } 913 } 914 915 a_o = tc_lookup_action_n(act_name); 916 if (a_o == NULL) { 917 #ifdef CONFIG_MODULES 918 if (rtnl_held) 919 rtnl_unlock(); 920 request_module("act_%s", act_name); 921 if (rtnl_held) 922 rtnl_lock(); 923 924 a_o = tc_lookup_action_n(act_name); 925 926 /* We dropped the RTNL semaphore in order to 927 * perform the module load. So, even if we 928 * succeeded in loading the module we have to 929 * tell the caller to replay the request. We 930 * indicate this using -EAGAIN. 931 */ 932 if (a_o != NULL) { 933 err = -EAGAIN; 934 goto err_mod; 935 } 936 #endif 937 NL_SET_ERR_MSG(extack, "Failed to load TC action module"); 938 err = -ENOENT; 939 goto err_out; 940 } 941 942 /* backward compatibility for policer */ 943 if (name == NULL) 944 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, 945 rtnl_held, tp, flags.value, extack); 946 else 947 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, 948 tp, flags.value, extack); 949 if (err < 0) 950 goto err_mod; 951 952 if (!name && tb[TCA_ACT_COOKIE]) 953 tcf_set_action_cookie(&a->act_cookie, cookie); 954 955 /* module count goes up only when brand new policy is created 956 * if it exists and is only bound to in a_o->init() then 957 * ACT_P_CREATED is not returned (a zero is). 958 */ 959 if (err != ACT_P_CREATED) 960 module_put(a_o->owner); 961 962 if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) && 963 !rcu_access_pointer(a->goto_chain)) { 964 tcf_action_destroy_1(a, bind); 965 NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain"); 966 return ERR_PTR(-EINVAL); 967 } 968 969 return a; 970 971 err_mod: 972 module_put(a_o->owner); 973 err_out: 974 if (cookie) { 975 kfree(cookie->data); 976 kfree(cookie); 977 } 978 return ERR_PTR(err); 979 } 980 981 /* Returns numbers of initialized actions or negative error. */ 982 983 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 984 struct nlattr *est, char *name, int ovr, int bind, 985 struct tc_action *actions[], size_t *attr_size, 986 bool rtnl_held, struct netlink_ext_ack *extack) 987 { 988 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 989 struct tc_action *act; 990 size_t sz = 0; 991 int err; 992 int i; 993 994 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 995 extack); 996 if (err < 0) 997 return err; 998 999 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1000 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind, 1001 rtnl_held, extack); 1002 if (IS_ERR(act)) { 1003 err = PTR_ERR(act); 1004 goto err; 1005 } 1006 sz += tcf_action_fill_size(act); 1007 /* Start from index 0 */ 1008 actions[i - 1] = act; 1009 } 1010 1011 *attr_size = tcf_action_full_attrs_size(sz); 1012 return i - 1; 1013 1014 err: 1015 tcf_action_destroy(actions, bind); 1016 return err; 1017 } 1018 1019 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, 1020 bool drop, bool hw) 1021 { 1022 if (a->cpu_bstats) { 1023 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 1024 1025 if (drop) 1026 this_cpu_ptr(a->cpu_qstats)->drops += packets; 1027 1028 if (hw) 1029 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), 1030 bytes, packets); 1031 return; 1032 } 1033 1034 _bstats_update(&a->tcfa_bstats, bytes, packets); 1035 if (drop) 1036 a->tcfa_qstats.drops += packets; 1037 if (hw) 1038 _bstats_update(&a->tcfa_bstats_hw, bytes, packets); 1039 } 1040 EXPORT_SYMBOL(tcf_action_update_stats); 1041 1042 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 1043 int compat_mode) 1044 { 1045 int err = 0; 1046 struct gnet_dump d; 1047 1048 if (p == NULL) 1049 goto errout; 1050 1051 /* compat_mode being true specifies a call that is supposed 1052 * to add additional backward compatibility statistic TLVs. 1053 */ 1054 if (compat_mode) { 1055 if (p->type == TCA_OLD_COMPAT) 1056 err = gnet_stats_start_copy_compat(skb, 0, 1057 TCA_STATS, 1058 TCA_XSTATS, 1059 &p->tcfa_lock, &d, 1060 TCA_PAD); 1061 else 1062 return 0; 1063 } else 1064 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 1065 &p->tcfa_lock, &d, TCA_ACT_PAD); 1066 1067 if (err < 0) 1068 goto errout; 1069 1070 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || 1071 gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw, 1072 &p->tcfa_bstats_hw) < 0 || 1073 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 1074 gnet_stats_copy_queue(&d, p->cpu_qstats, 1075 &p->tcfa_qstats, 1076 p->tcfa_qstats.qlen) < 0) 1077 goto errout; 1078 1079 if (gnet_stats_finish_copy(&d) < 0) 1080 goto errout; 1081 1082 return 0; 1083 1084 errout: 1085 return -1; 1086 } 1087 1088 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], 1089 u32 portid, u32 seq, u16 flags, int event, int bind, 1090 int ref) 1091 { 1092 struct tcamsg *t; 1093 struct nlmsghdr *nlh; 1094 unsigned char *b = skb_tail_pointer(skb); 1095 struct nlattr *nest; 1096 1097 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 1098 if (!nlh) 1099 goto out_nlmsg_trim; 1100 t = nlmsg_data(nlh); 1101 t->tca_family = AF_UNSPEC; 1102 t->tca__pad1 = 0; 1103 t->tca__pad2 = 0; 1104 1105 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1106 if (!nest) 1107 goto out_nlmsg_trim; 1108 1109 if (tcf_action_dump(skb, actions, bind, ref) < 0) 1110 goto out_nlmsg_trim; 1111 1112 nla_nest_end(skb, nest); 1113 1114 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1115 return skb->len; 1116 1117 out_nlmsg_trim: 1118 nlmsg_trim(skb, b); 1119 return -1; 1120 } 1121 1122 static int 1123 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 1124 struct tc_action *actions[], int event, 1125 struct netlink_ext_ack *extack) 1126 { 1127 struct sk_buff *skb; 1128 1129 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1130 if (!skb) 1131 return -ENOBUFS; 1132 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 1133 0, 1) <= 0) { 1134 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1135 kfree_skb(skb); 1136 return -EINVAL; 1137 } 1138 1139 return rtnl_unicast(skb, net, portid); 1140 } 1141 1142 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, 1143 struct nlmsghdr *n, u32 portid, 1144 struct netlink_ext_ack *extack) 1145 { 1146 struct nlattr *tb[TCA_ACT_MAX + 1]; 1147 const struct tc_action_ops *ops; 1148 struct tc_action *a; 1149 int index; 1150 int err; 1151 1152 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1153 tcf_action_policy, extack); 1154 if (err < 0) 1155 goto err_out; 1156 1157 err = -EINVAL; 1158 if (tb[TCA_ACT_INDEX] == NULL || 1159 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) { 1160 NL_SET_ERR_MSG(extack, "Invalid TC action index value"); 1161 goto err_out; 1162 } 1163 index = nla_get_u32(tb[TCA_ACT_INDEX]); 1164 1165 err = -EINVAL; 1166 ops = tc_lookup_action(tb[TCA_ACT_KIND]); 1167 if (!ops) { /* could happen in batch of actions */ 1168 NL_SET_ERR_MSG(extack, "Specified TC action kind not found"); 1169 goto err_out; 1170 } 1171 err = -ENOENT; 1172 if (ops->lookup(net, &a, index) == 0) { 1173 NL_SET_ERR_MSG(extack, "TC action with specified index not found"); 1174 goto err_mod; 1175 } 1176 1177 module_put(ops->owner); 1178 return a; 1179 1180 err_mod: 1181 module_put(ops->owner); 1182 err_out: 1183 return ERR_PTR(err); 1184 } 1185 1186 static int tca_action_flush(struct net *net, struct nlattr *nla, 1187 struct nlmsghdr *n, u32 portid, 1188 struct netlink_ext_ack *extack) 1189 { 1190 struct sk_buff *skb; 1191 unsigned char *b; 1192 struct nlmsghdr *nlh; 1193 struct tcamsg *t; 1194 struct netlink_callback dcb; 1195 struct nlattr *nest; 1196 struct nlattr *tb[TCA_ACT_MAX + 1]; 1197 const struct tc_action_ops *ops; 1198 struct nlattr *kind; 1199 int err = -ENOMEM; 1200 1201 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1202 if (!skb) 1203 return err; 1204 1205 b = skb_tail_pointer(skb); 1206 1207 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1208 tcf_action_policy, extack); 1209 if (err < 0) 1210 goto err_out; 1211 1212 err = -EINVAL; 1213 kind = tb[TCA_ACT_KIND]; 1214 ops = tc_lookup_action(kind); 1215 if (!ops) { /*some idjot trying to flush unknown action */ 1216 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action"); 1217 goto err_out; 1218 } 1219 1220 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, 1221 sizeof(*t), 0); 1222 if (!nlh) { 1223 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification"); 1224 goto out_module_put; 1225 } 1226 t = nlmsg_data(nlh); 1227 t->tca_family = AF_UNSPEC; 1228 t->tca__pad1 = 0; 1229 t->tca__pad2 = 0; 1230 1231 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1232 if (!nest) { 1233 NL_SET_ERR_MSG(extack, "Failed to add new netlink message"); 1234 goto out_module_put; 1235 } 1236 1237 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack); 1238 if (err <= 0) { 1239 nla_nest_cancel(skb, nest); 1240 goto out_module_put; 1241 } 1242 1243 nla_nest_end(skb, nest); 1244 1245 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1246 nlh->nlmsg_flags |= NLM_F_ROOT; 1247 module_put(ops->owner); 1248 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1249 n->nlmsg_flags & NLM_F_ECHO); 1250 if (err > 0) 1251 return 0; 1252 if (err < 0) 1253 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification"); 1254 1255 return err; 1256 1257 out_module_put: 1258 module_put(ops->owner); 1259 err_out: 1260 kfree_skb(skb); 1261 return err; 1262 } 1263 1264 static int tcf_action_delete(struct net *net, struct tc_action *actions[]) 1265 { 1266 int i; 1267 1268 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1269 struct tc_action *a = actions[i]; 1270 const struct tc_action_ops *ops = a->ops; 1271 /* Actions can be deleted concurrently so we must save their 1272 * type and id to search again after reference is released. 1273 */ 1274 struct tcf_idrinfo *idrinfo = a->idrinfo; 1275 u32 act_index = a->tcfa_index; 1276 1277 actions[i] = NULL; 1278 if (tcf_action_put(a)) { 1279 /* last reference, action was deleted concurrently */ 1280 module_put(ops->owner); 1281 } else { 1282 int ret; 1283 1284 /* now do the delete */ 1285 ret = tcf_idr_delete_index(idrinfo, act_index); 1286 if (ret < 0) 1287 return ret; 1288 } 1289 } 1290 return 0; 1291 } 1292 1293 static int 1294 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1295 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1296 { 1297 int ret; 1298 struct sk_buff *skb; 1299 1300 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1301 GFP_KERNEL); 1302 if (!skb) 1303 return -ENOBUFS; 1304 1305 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 1306 0, 2) <= 0) { 1307 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); 1308 kfree_skb(skb); 1309 return -EINVAL; 1310 } 1311 1312 /* now do the delete */ 1313 ret = tcf_action_delete(net, actions); 1314 if (ret < 0) { 1315 NL_SET_ERR_MSG(extack, "Failed to delete TC action"); 1316 kfree_skb(skb); 1317 return ret; 1318 } 1319 1320 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1321 n->nlmsg_flags & NLM_F_ECHO); 1322 if (ret > 0) 1323 return 0; 1324 return ret; 1325 } 1326 1327 static int 1328 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 1329 u32 portid, int event, struct netlink_ext_ack *extack) 1330 { 1331 int i, ret; 1332 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1333 struct tc_action *act; 1334 size_t attr_size = 0; 1335 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1336 1337 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1338 extack); 1339 if (ret < 0) 1340 return ret; 1341 1342 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 1343 if (tb[1]) 1344 return tca_action_flush(net, tb[1], n, portid, extack); 1345 1346 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action"); 1347 return -EINVAL; 1348 } 1349 1350 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1351 act = tcf_action_get_1(net, tb[i], n, portid, extack); 1352 if (IS_ERR(act)) { 1353 ret = PTR_ERR(act); 1354 goto err; 1355 } 1356 attr_size += tcf_action_fill_size(act); 1357 actions[i - 1] = act; 1358 } 1359 1360 attr_size = tcf_action_full_attrs_size(attr_size); 1361 1362 if (event == RTM_GETACTION) 1363 ret = tcf_get_notify(net, portid, n, actions, event, extack); 1364 else { /* delete */ 1365 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); 1366 if (ret) 1367 goto err; 1368 return 0; 1369 } 1370 err: 1371 tcf_action_put_many(actions); 1372 return ret; 1373 } 1374 1375 static int 1376 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1377 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1378 { 1379 struct sk_buff *skb; 1380 int err = 0; 1381 1382 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1383 GFP_KERNEL); 1384 if (!skb) 1385 return -ENOBUFS; 1386 1387 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 1388 RTM_NEWACTION, 0, 0) <= 0) { 1389 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1390 kfree_skb(skb); 1391 return -EINVAL; 1392 } 1393 1394 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1395 n->nlmsg_flags & NLM_F_ECHO); 1396 if (err > 0) 1397 err = 0; 1398 return err; 1399 } 1400 1401 static int tcf_action_add(struct net *net, struct nlattr *nla, 1402 struct nlmsghdr *n, u32 portid, int ovr, 1403 struct netlink_ext_ack *extack) 1404 { 1405 size_t attr_size = 0; 1406 int loop, ret; 1407 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1408 1409 for (loop = 0; loop < 10; loop++) { 1410 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, 1411 actions, &attr_size, true, extack); 1412 if (ret != -EAGAIN) 1413 break; 1414 } 1415 1416 if (ret < 0) 1417 return ret; 1418 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); 1419 if (ovr) 1420 tcf_action_put_many(actions); 1421 1422 return ret; 1423 } 1424 1425 static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON; 1426 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { 1427 [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32, 1428 .validation_data = &tcaa_root_flags_allowed }, 1429 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, 1430 }; 1431 1432 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, 1433 struct netlink_ext_ack *extack) 1434 { 1435 struct net *net = sock_net(skb->sk); 1436 struct nlattr *tca[TCA_ROOT_MAX + 1]; 1437 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 1438 int ret = 0, ovr = 0; 1439 1440 if ((n->nlmsg_type != RTM_GETACTION) && 1441 !netlink_capable(skb, CAP_NET_ADMIN)) 1442 return -EPERM; 1443 1444 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca, 1445 TCA_ROOT_MAX, NULL, extack); 1446 if (ret < 0) 1447 return ret; 1448 1449 if (tca[TCA_ACT_TAB] == NULL) { 1450 NL_SET_ERR_MSG(extack, "Netlink action attributes missing"); 1451 return -EINVAL; 1452 } 1453 1454 /* n->nlmsg_flags & NLM_F_CREATE */ 1455 switch (n->nlmsg_type) { 1456 case RTM_NEWACTION: 1457 /* we are going to assume all other flags 1458 * imply create only if it doesn't exist 1459 * Note that CREATE | EXCL implies that 1460 * but since we want avoid ambiguity (eg when flags 1461 * is zero) then just set this 1462 */ 1463 if (n->nlmsg_flags & NLM_F_REPLACE) 1464 ovr = 1; 1465 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr, 1466 extack); 1467 break; 1468 case RTM_DELACTION: 1469 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1470 portid, RTM_DELACTION, extack); 1471 break; 1472 case RTM_GETACTION: 1473 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1474 portid, RTM_GETACTION, extack); 1475 break; 1476 default: 1477 BUG(); 1478 } 1479 1480 return ret; 1481 } 1482 1483 static struct nlattr *find_dump_kind(struct nlattr **nla) 1484 { 1485 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 1486 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1487 struct nlattr *kind; 1488 1489 tb1 = nla[TCA_ACT_TAB]; 1490 if (tb1 == NULL) 1491 return NULL; 1492 1493 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) 1494 return NULL; 1495 1496 if (tb[1] == NULL) 1497 return NULL; 1498 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0) 1499 return NULL; 1500 kind = tb2[TCA_ACT_KIND]; 1501 1502 return kind; 1503 } 1504 1505 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1506 { 1507 struct net *net = sock_net(skb->sk); 1508 struct nlmsghdr *nlh; 1509 unsigned char *b = skb_tail_pointer(skb); 1510 struct nlattr *nest; 1511 struct tc_action_ops *a_o; 1512 int ret = 0; 1513 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 1514 struct nlattr *tb[TCA_ROOT_MAX + 1]; 1515 struct nlattr *count_attr = NULL; 1516 unsigned long jiffy_since = 0; 1517 struct nlattr *kind = NULL; 1518 struct nla_bitfield32 bf; 1519 u32 msecs_since = 0; 1520 u32 act_count = 0; 1521 1522 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb, 1523 TCA_ROOT_MAX, tcaa_policy, cb->extack); 1524 if (ret < 0) 1525 return ret; 1526 1527 kind = find_dump_kind(tb); 1528 if (kind == NULL) { 1529 pr_info("tc_dump_action: action bad kind\n"); 1530 return 0; 1531 } 1532 1533 a_o = tc_lookup_action(kind); 1534 if (a_o == NULL) 1535 return 0; 1536 1537 cb->args[2] = 0; 1538 if (tb[TCA_ROOT_FLAGS]) { 1539 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); 1540 cb->args[2] = bf.value; 1541 } 1542 1543 if (tb[TCA_ROOT_TIME_DELTA]) { 1544 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); 1545 } 1546 1547 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1548 cb->nlh->nlmsg_type, sizeof(*t), 0); 1549 if (!nlh) 1550 goto out_module_put; 1551 1552 if (msecs_since) 1553 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); 1554 1555 t = nlmsg_data(nlh); 1556 t->tca_family = AF_UNSPEC; 1557 t->tca__pad1 = 0; 1558 t->tca__pad2 = 0; 1559 cb->args[3] = jiffy_since; 1560 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); 1561 if (!count_attr) 1562 goto out_module_put; 1563 1564 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1565 if (nest == NULL) 1566 goto out_module_put; 1567 1568 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL); 1569 if (ret < 0) 1570 goto out_module_put; 1571 1572 if (ret > 0) { 1573 nla_nest_end(skb, nest); 1574 ret = skb->len; 1575 act_count = cb->args[1]; 1576 memcpy(nla_data(count_attr), &act_count, sizeof(u32)); 1577 cb->args[1] = 0; 1578 } else 1579 nlmsg_trim(skb, b); 1580 1581 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1582 if (NETLINK_CB(cb->skb).portid && ret) 1583 nlh->nlmsg_flags |= NLM_F_MULTI; 1584 module_put(a_o->owner); 1585 return skb->len; 1586 1587 out_module_put: 1588 module_put(a_o->owner); 1589 nlmsg_trim(skb, b); 1590 return skb->len; 1591 } 1592 1593 static int __init tc_action_init(void) 1594 { 1595 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); 1596 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); 1597 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 1598 0); 1599 1600 return 0; 1601 } 1602 1603 subsys_initcall(tc_action_init); 1604