1 /* 2 * net/sched/act_api.c Packet action API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Author: Jamal Hadi Salim 10 * 11 * 12 */ 13 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/slab.h> 19 #include <linux/skbuff.h> 20 #include <linux/init.h> 21 #include <linux/kmod.h> 22 #include <linux/err.h> 23 #include <linux/module.h> 24 #include <linux/rhashtable.h> 25 #include <linux/list.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/sch_generic.h> 29 #include <net/pkt_cls.h> 30 #include <net/act_api.h> 31 #include <net/netlink.h> 32 33 static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp) 34 { 35 u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK; 36 37 if (!tp) 38 return -EINVAL; 39 a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true); 40 if (!a->goto_chain) 41 return -ENOMEM; 42 return 0; 43 } 44 45 static void tcf_action_goto_chain_fini(struct tc_action *a) 46 { 47 tcf_chain_put(a->goto_chain); 48 } 49 50 static void tcf_action_goto_chain_exec(const struct tc_action *a, 51 struct tcf_result *res) 52 { 53 const struct tcf_chain *chain = a->goto_chain; 54 55 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 56 } 57 58 static void tcf_free_cookie_rcu(struct rcu_head *p) 59 { 60 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu); 61 62 kfree(cookie->data); 63 kfree(cookie); 64 } 65 66 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, 67 struct tc_cookie *new_cookie) 68 { 69 struct tc_cookie *old; 70 71 old = xchg(old_cookie, new_cookie); 72 if (old) 73 call_rcu(&old->rcu, tcf_free_cookie_rcu); 74 } 75 76 /* XXX: For standalone actions, we don't need a RCU grace period either, because 77 * actions are always connected to filters and filters are already destroyed in 78 * RCU callbacks, so after a RCU grace period actions are already disconnected 79 * from filters. Readers later can not find us. 80 */ 81 static void free_tcf(struct tc_action *p) 82 { 83 free_percpu(p->cpu_bstats); 84 free_percpu(p->cpu_qstats); 85 86 tcf_set_action_cookie(&p->act_cookie, NULL); 87 if (p->goto_chain) 88 tcf_action_goto_chain_fini(p); 89 90 kfree(p); 91 } 92 93 static void tcf_action_cleanup(struct tc_action *p) 94 { 95 if (p->ops->cleanup) 96 p->ops->cleanup(p); 97 98 gen_kill_estimator(&p->tcfa_rate_est); 99 free_tcf(p); 100 } 101 102 static int __tcf_action_put(struct tc_action *p, bool bind) 103 { 104 struct tcf_idrinfo *idrinfo = p->idrinfo; 105 106 if (refcount_dec_and_lock(&p->tcfa_refcnt, &idrinfo->lock)) { 107 if (bind) 108 atomic_dec(&p->tcfa_bindcnt); 109 idr_remove(&idrinfo->action_idr, p->tcfa_index); 110 spin_unlock(&idrinfo->lock); 111 112 tcf_action_cleanup(p); 113 return 1; 114 } 115 116 if (bind) 117 atomic_dec(&p->tcfa_bindcnt); 118 119 return 0; 120 } 121 122 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 123 { 124 int ret = 0; 125 126 /* Release with strict==1 and bind==0 is only called through act API 127 * interface (classifiers always bind). Only case when action with 128 * positive reference count and zero bind count can exist is when it was 129 * also created with act API (unbinding last classifier will destroy the 130 * action if it was created by classifier). So only case when bind count 131 * can be changed after initial check is when unbound action is 132 * destroyed by act API while classifier binds to action with same id 133 * concurrently. This result either creation of new action(same behavior 134 * as before), or reusing existing action if concurrent process 135 * increments reference count before action is deleted. Both scenarios 136 * are acceptable. 137 */ 138 if (p) { 139 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) 140 return -EPERM; 141 142 if (__tcf_action_put(p, bind)) 143 ret = ACT_P_DELETED; 144 } 145 146 return ret; 147 } 148 EXPORT_SYMBOL(__tcf_idr_release); 149 150 static size_t tcf_action_shared_attrs_size(const struct tc_action *act) 151 { 152 u32 cookie_len = 0; 153 154 if (act->act_cookie) 155 cookie_len = nla_total_size(act->act_cookie->len); 156 157 return nla_total_size(0) /* action number nested */ 158 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ 159 + cookie_len /* TCA_ACT_COOKIE */ 160 + nla_total_size(0) /* TCA_ACT_STATS nested */ 161 /* TCA_STATS_BASIC */ 162 + nla_total_size_64bit(sizeof(struct gnet_stats_basic)) 163 /* TCA_STATS_QUEUE */ 164 + nla_total_size_64bit(sizeof(struct gnet_stats_queue)) 165 + nla_total_size(0) /* TCA_OPTIONS nested */ 166 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */ 167 } 168 169 static size_t tcf_action_full_attrs_size(size_t sz) 170 { 171 return NLMSG_HDRLEN /* struct nlmsghdr */ 172 + sizeof(struct tcamsg) 173 + nla_total_size(0) /* TCA_ACT_TAB nested */ 174 + sz; 175 } 176 177 static size_t tcf_action_fill_size(const struct tc_action *act) 178 { 179 size_t sz = tcf_action_shared_attrs_size(act); 180 181 if (act->ops->get_fill_size) 182 return act->ops->get_fill_size(act) + sz; 183 return sz; 184 } 185 186 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 187 struct netlink_callback *cb) 188 { 189 int err = 0, index = -1, s_i = 0, n_i = 0; 190 u32 act_flags = cb->args[2]; 191 unsigned long jiffy_since = cb->args[3]; 192 struct nlattr *nest; 193 struct idr *idr = &idrinfo->action_idr; 194 struct tc_action *p; 195 unsigned long id = 1; 196 197 spin_lock(&idrinfo->lock); 198 199 s_i = cb->args[0]; 200 201 idr_for_each_entry_ul(idr, p, id) { 202 index++; 203 if (index < s_i) 204 continue; 205 206 if (jiffy_since && 207 time_after(jiffy_since, 208 (unsigned long)p->tcfa_tm.lastuse)) 209 continue; 210 211 nest = nla_nest_start(skb, n_i); 212 if (!nest) { 213 index--; 214 goto nla_put_failure; 215 } 216 err = tcf_action_dump_1(skb, p, 0, 0); 217 if (err < 0) { 218 index--; 219 nlmsg_trim(skb, nest); 220 goto done; 221 } 222 nla_nest_end(skb, nest); 223 n_i++; 224 if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) && 225 n_i >= TCA_ACT_MAX_PRIO) 226 goto done; 227 } 228 done: 229 if (index >= 0) 230 cb->args[0] = index + 1; 231 232 spin_unlock(&idrinfo->lock); 233 if (n_i) { 234 if (act_flags & TCA_FLAG_LARGE_DUMP_ON) 235 cb->args[1] = n_i; 236 } 237 return n_i; 238 239 nla_put_failure: 240 nla_nest_cancel(skb, nest); 241 goto done; 242 } 243 244 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 245 const struct tc_action_ops *ops) 246 { 247 struct nlattr *nest; 248 int n_i = 0; 249 int ret = -EINVAL; 250 struct idr *idr = &idrinfo->action_idr; 251 struct tc_action *p; 252 unsigned long id = 1; 253 254 nest = nla_nest_start(skb, 0); 255 if (nest == NULL) 256 goto nla_put_failure; 257 if (nla_put_string(skb, TCA_KIND, ops->kind)) 258 goto nla_put_failure; 259 260 idr_for_each_entry_ul(idr, p, id) { 261 ret = __tcf_idr_release(p, false, true); 262 if (ret == ACT_P_DELETED) { 263 module_put(ops->owner); 264 n_i++; 265 } else if (ret < 0) { 266 goto nla_put_failure; 267 } 268 } 269 if (nla_put_u32(skb, TCA_FCNT, n_i)) 270 goto nla_put_failure; 271 nla_nest_end(skb, nest); 272 273 return n_i; 274 nla_put_failure: 275 nla_nest_cancel(skb, nest); 276 return ret; 277 } 278 279 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 280 struct netlink_callback *cb, int type, 281 const struct tc_action_ops *ops, 282 struct netlink_ext_ack *extack) 283 { 284 struct tcf_idrinfo *idrinfo = tn->idrinfo; 285 286 if (type == RTM_DELACTION) { 287 return tcf_del_walker(idrinfo, skb, ops); 288 } else if (type == RTM_GETACTION) { 289 return tcf_dump_walker(idrinfo, skb, cb); 290 } else { 291 WARN(1, "tcf_generic_walker: unknown command %d\n", type); 292 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command"); 293 return -EINVAL; 294 } 295 } 296 EXPORT_SYMBOL(tcf_generic_walker); 297 298 static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, 299 struct tc_action **a, int bind) 300 { 301 struct tcf_idrinfo *idrinfo = tn->idrinfo; 302 struct tc_action *p; 303 304 spin_lock(&idrinfo->lock); 305 p = idr_find(&idrinfo->action_idr, index); 306 if (p) { 307 refcount_inc(&p->tcfa_refcnt); 308 if (bind) 309 atomic_inc(&p->tcfa_bindcnt); 310 } 311 spin_unlock(&idrinfo->lock); 312 313 if (p) { 314 *a = p; 315 return true; 316 } 317 return false; 318 } 319 320 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 321 { 322 return __tcf_idr_check(tn, index, a, 0); 323 } 324 EXPORT_SYMBOL(tcf_idr_search); 325 326 bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, 327 int bind) 328 { 329 return __tcf_idr_check(tn, index, a, bind); 330 } 331 EXPORT_SYMBOL(tcf_idr_check); 332 333 int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) 334 { 335 struct tcf_idrinfo *idrinfo = tn->idrinfo; 336 struct tc_action *p; 337 int ret = 0; 338 339 spin_lock(&idrinfo->lock); 340 p = idr_find(&idrinfo->action_idr, index); 341 if (!p) { 342 spin_unlock(&idrinfo->lock); 343 return -ENOENT; 344 } 345 346 if (!atomic_read(&p->tcfa_bindcnt)) { 347 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 348 struct module *owner = p->ops->owner; 349 350 WARN_ON(p != idr_remove(&idrinfo->action_idr, 351 p->tcfa_index)); 352 spin_unlock(&idrinfo->lock); 353 354 tcf_action_cleanup(p); 355 module_put(owner); 356 return 0; 357 } 358 ret = 0; 359 } else { 360 ret = -EPERM; 361 } 362 363 spin_unlock(&idrinfo->lock); 364 return ret; 365 } 366 EXPORT_SYMBOL(tcf_idr_delete_index); 367 368 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 369 struct tc_action **a, const struct tc_action_ops *ops, 370 int bind, bool cpustats) 371 { 372 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); 373 struct tcf_idrinfo *idrinfo = tn->idrinfo; 374 struct idr *idr = &idrinfo->action_idr; 375 int err = -ENOMEM; 376 377 if (unlikely(!p)) 378 return -ENOMEM; 379 refcount_set(&p->tcfa_refcnt, 1); 380 if (bind) 381 atomic_set(&p->tcfa_bindcnt, 1); 382 383 if (cpustats) { 384 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 385 if (!p->cpu_bstats) 386 goto err1; 387 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 388 if (!p->cpu_qstats) 389 goto err2; 390 } 391 spin_lock_init(&p->tcfa_lock); 392 idr_preload(GFP_KERNEL); 393 spin_lock(&idrinfo->lock); 394 /* user doesn't specify an index */ 395 if (!index) { 396 index = 1; 397 err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC); 398 } else { 399 err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC); 400 } 401 spin_unlock(&idrinfo->lock); 402 idr_preload_end(); 403 if (err) 404 goto err3; 405 406 p->tcfa_index = index; 407 p->tcfa_tm.install = jiffies; 408 p->tcfa_tm.lastuse = jiffies; 409 p->tcfa_tm.firstuse = 0; 410 if (est) { 411 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 412 &p->tcfa_rate_est, 413 &p->tcfa_lock, NULL, est); 414 if (err) 415 goto err4; 416 } 417 418 p->idrinfo = idrinfo; 419 p->ops = ops; 420 INIT_LIST_HEAD(&p->list); 421 *a = p; 422 return 0; 423 err4: 424 idr_remove(idr, index); 425 err3: 426 free_percpu(p->cpu_qstats); 427 err2: 428 free_percpu(p->cpu_bstats); 429 err1: 430 kfree(p); 431 return err; 432 } 433 EXPORT_SYMBOL(tcf_idr_create); 434 435 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) 436 { 437 struct tcf_idrinfo *idrinfo = tn->idrinfo; 438 439 spin_lock(&idrinfo->lock); 440 idr_replace(&idrinfo->action_idr, a, a->tcfa_index); 441 spin_unlock(&idrinfo->lock); 442 } 443 EXPORT_SYMBOL(tcf_idr_insert); 444 445 void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 446 struct tcf_idrinfo *idrinfo) 447 { 448 struct idr *idr = &idrinfo->action_idr; 449 struct tc_action *p; 450 int ret; 451 unsigned long id = 1; 452 453 idr_for_each_entry_ul(idr, p, id) { 454 ret = __tcf_idr_release(p, false, true); 455 if (ret == ACT_P_DELETED) 456 module_put(ops->owner); 457 else if (ret < 0) 458 return; 459 } 460 idr_destroy(&idrinfo->action_idr); 461 } 462 EXPORT_SYMBOL(tcf_idrinfo_destroy); 463 464 static LIST_HEAD(act_base); 465 static DEFINE_RWLOCK(act_mod_lock); 466 467 int tcf_register_action(struct tc_action_ops *act, 468 struct pernet_operations *ops) 469 { 470 struct tc_action_ops *a; 471 int ret; 472 473 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup) 474 return -EINVAL; 475 476 /* We have to register pernet ops before making the action ops visible, 477 * otherwise tcf_action_init_1() could get a partially initialized 478 * netns. 479 */ 480 ret = register_pernet_subsys(ops); 481 if (ret) 482 return ret; 483 484 write_lock(&act_mod_lock); 485 list_for_each_entry(a, &act_base, head) { 486 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 487 write_unlock(&act_mod_lock); 488 unregister_pernet_subsys(ops); 489 return -EEXIST; 490 } 491 } 492 list_add_tail(&act->head, &act_base); 493 write_unlock(&act_mod_lock); 494 495 return 0; 496 } 497 EXPORT_SYMBOL(tcf_register_action); 498 499 int tcf_unregister_action(struct tc_action_ops *act, 500 struct pernet_operations *ops) 501 { 502 struct tc_action_ops *a; 503 int err = -ENOENT; 504 505 write_lock(&act_mod_lock); 506 list_for_each_entry(a, &act_base, head) { 507 if (a == act) { 508 list_del(&act->head); 509 err = 0; 510 break; 511 } 512 } 513 write_unlock(&act_mod_lock); 514 if (!err) 515 unregister_pernet_subsys(ops); 516 return err; 517 } 518 EXPORT_SYMBOL(tcf_unregister_action); 519 520 /* lookup by name */ 521 static struct tc_action_ops *tc_lookup_action_n(char *kind) 522 { 523 struct tc_action_ops *a, *res = NULL; 524 525 if (kind) { 526 read_lock(&act_mod_lock); 527 list_for_each_entry(a, &act_base, head) { 528 if (strcmp(kind, a->kind) == 0) { 529 if (try_module_get(a->owner)) 530 res = a; 531 break; 532 } 533 } 534 read_unlock(&act_mod_lock); 535 } 536 return res; 537 } 538 539 /* lookup by nlattr */ 540 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 541 { 542 struct tc_action_ops *a, *res = NULL; 543 544 if (kind) { 545 read_lock(&act_mod_lock); 546 list_for_each_entry(a, &act_base, head) { 547 if (nla_strcmp(kind, a->kind) == 0) { 548 if (try_module_get(a->owner)) 549 res = a; 550 break; 551 } 552 } 553 read_unlock(&act_mod_lock); 554 } 555 return res; 556 } 557 558 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */ 559 #define TCA_ACT_MAX_PRIO_MASK 0x1FF 560 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 561 int nr_actions, struct tcf_result *res) 562 { 563 u32 jmp_prgcnt = 0; 564 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ 565 int i; 566 int ret = TC_ACT_OK; 567 568 if (skb_skip_tc_classify(skb)) 569 return TC_ACT_OK; 570 571 restart_act_graph: 572 for (i = 0; i < nr_actions; i++) { 573 const struct tc_action *a = actions[i]; 574 575 if (jmp_prgcnt > 0) { 576 jmp_prgcnt -= 1; 577 continue; 578 } 579 repeat: 580 ret = a->ops->act(skb, a, res); 581 if (ret == TC_ACT_REPEAT) 582 goto repeat; /* we need a ttl - JHS */ 583 584 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { 585 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; 586 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { 587 /* faulty opcode, stop pipeline */ 588 return TC_ACT_OK; 589 } else { 590 jmp_ttl -= 1; 591 if (jmp_ttl > 0) 592 goto restart_act_graph; 593 else /* faulty graph, stop pipeline */ 594 return TC_ACT_OK; 595 } 596 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 597 tcf_action_goto_chain_exec(a, res); 598 } 599 600 if (ret != TC_ACT_PIPE) 601 break; 602 } 603 604 return ret; 605 } 606 EXPORT_SYMBOL(tcf_action_exec); 607 608 int tcf_action_destroy(struct list_head *actions, int bind) 609 { 610 const struct tc_action_ops *ops; 611 struct tc_action *a, *tmp; 612 int ret = 0; 613 614 list_for_each_entry_safe(a, tmp, actions, list) { 615 ops = a->ops; 616 ret = __tcf_idr_release(a, bind, true); 617 if (ret == ACT_P_DELETED) 618 module_put(ops->owner); 619 else if (ret < 0) 620 return ret; 621 } 622 return ret; 623 } 624 625 static int tcf_action_put(struct tc_action *p) 626 { 627 return __tcf_action_put(p, false); 628 } 629 630 int 631 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 632 { 633 return a->ops->dump(skb, a, bind, ref); 634 } 635 636 int 637 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 638 { 639 int err = -EINVAL; 640 unsigned char *b = skb_tail_pointer(skb); 641 struct nlattr *nest; 642 struct tc_cookie *cookie; 643 644 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 645 goto nla_put_failure; 646 if (tcf_action_copy_stats(skb, a, 0)) 647 goto nla_put_failure; 648 649 rcu_read_lock(); 650 cookie = rcu_dereference(a->act_cookie); 651 if (cookie) { 652 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { 653 rcu_read_unlock(); 654 goto nla_put_failure; 655 } 656 } 657 rcu_read_unlock(); 658 659 nest = nla_nest_start(skb, TCA_OPTIONS); 660 if (nest == NULL) 661 goto nla_put_failure; 662 err = tcf_action_dump_old(skb, a, bind, ref); 663 if (err > 0) { 664 nla_nest_end(skb, nest); 665 return err; 666 } 667 668 nla_put_failure: 669 nlmsg_trim(skb, b); 670 return -1; 671 } 672 EXPORT_SYMBOL(tcf_action_dump_1); 673 674 int tcf_action_dump(struct sk_buff *skb, struct list_head *actions, 675 int bind, int ref) 676 { 677 struct tc_action *a; 678 int err = -EINVAL; 679 struct nlattr *nest; 680 681 list_for_each_entry(a, actions, list) { 682 nest = nla_nest_start(skb, a->order); 683 if (nest == NULL) 684 goto nla_put_failure; 685 err = tcf_action_dump_1(skb, a, bind, ref); 686 if (err < 0) 687 goto errout; 688 nla_nest_end(skb, nest); 689 } 690 691 return 0; 692 693 nla_put_failure: 694 err = -EINVAL; 695 errout: 696 nla_nest_cancel(skb, nest); 697 return err; 698 } 699 700 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 701 { 702 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 703 if (!c) 704 return NULL; 705 706 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 707 if (!c->data) { 708 kfree(c); 709 return NULL; 710 } 711 c->len = nla_len(tb[TCA_ACT_COOKIE]); 712 713 return c; 714 } 715 716 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 717 struct nlattr *nla, struct nlattr *est, 718 char *name, int ovr, int bind, 719 bool rtnl_held, 720 struct netlink_ext_ack *extack) 721 { 722 struct tc_action *a; 723 struct tc_action_ops *a_o; 724 struct tc_cookie *cookie = NULL; 725 char act_name[IFNAMSIZ]; 726 struct nlattr *tb[TCA_ACT_MAX + 1]; 727 struct nlattr *kind; 728 int err; 729 730 if (name == NULL) { 731 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack); 732 if (err < 0) 733 goto err_out; 734 err = -EINVAL; 735 kind = tb[TCA_ACT_KIND]; 736 if (!kind) { 737 NL_SET_ERR_MSG(extack, "TC action kind must be specified"); 738 goto err_out; 739 } 740 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) { 741 NL_SET_ERR_MSG(extack, "TC action name too long"); 742 goto err_out; 743 } 744 if (tb[TCA_ACT_COOKIE]) { 745 int cklen = nla_len(tb[TCA_ACT_COOKIE]); 746 747 if (cklen > TC_COOKIE_MAX_SIZE) { 748 NL_SET_ERR_MSG(extack, "TC cookie size above the maximum"); 749 goto err_out; 750 } 751 752 cookie = nla_memdup_cookie(tb); 753 if (!cookie) { 754 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie"); 755 err = -ENOMEM; 756 goto err_out; 757 } 758 } 759 } else { 760 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) { 761 NL_SET_ERR_MSG(extack, "TC action name too long"); 762 err = -EINVAL; 763 goto err_out; 764 } 765 } 766 767 a_o = tc_lookup_action_n(act_name); 768 if (a_o == NULL) { 769 #ifdef CONFIG_MODULES 770 if (rtnl_held) 771 rtnl_unlock(); 772 request_module("act_%s", act_name); 773 if (rtnl_held) 774 rtnl_lock(); 775 776 a_o = tc_lookup_action_n(act_name); 777 778 /* We dropped the RTNL semaphore in order to 779 * perform the module load. So, even if we 780 * succeeded in loading the module we have to 781 * tell the caller to replay the request. We 782 * indicate this using -EAGAIN. 783 */ 784 if (a_o != NULL) { 785 err = -EAGAIN; 786 goto err_mod; 787 } 788 #endif 789 NL_SET_ERR_MSG(extack, "Failed to load TC action module"); 790 err = -ENOENT; 791 goto err_out; 792 } 793 794 /* backward compatibility for policer */ 795 if (name == NULL) 796 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, 797 rtnl_held, extack); 798 else 799 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, 800 extack); 801 if (err < 0) 802 goto err_mod; 803 804 if (!name && tb[TCA_ACT_COOKIE]) 805 tcf_set_action_cookie(&a->act_cookie, cookie); 806 807 /* module count goes up only when brand new policy is created 808 * if it exists and is only bound to in a_o->init() then 809 * ACT_P_CREATED is not returned (a zero is). 810 */ 811 if (err != ACT_P_CREATED) 812 module_put(a_o->owner); 813 814 if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { 815 err = tcf_action_goto_chain_init(a, tp); 816 if (err) { 817 LIST_HEAD(actions); 818 819 list_add_tail(&a->list, &actions); 820 tcf_action_destroy(&actions, bind); 821 NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); 822 return ERR_PTR(err); 823 } 824 } 825 826 return a; 827 828 err_mod: 829 module_put(a_o->owner); 830 err_out: 831 if (cookie) { 832 kfree(cookie->data); 833 kfree(cookie); 834 } 835 return ERR_PTR(err); 836 } 837 838 static void cleanup_a(struct list_head *actions, int ovr) 839 { 840 struct tc_action *a; 841 842 if (!ovr) 843 return; 844 845 list_for_each_entry(a, actions, list) 846 refcount_dec(&a->tcfa_refcnt); 847 } 848 849 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 850 struct nlattr *est, char *name, int ovr, int bind, 851 struct list_head *actions, size_t *attr_size, 852 bool rtnl_held, struct netlink_ext_ack *extack) 853 { 854 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 855 struct tc_action *act; 856 size_t sz = 0; 857 int err; 858 int i; 859 860 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); 861 if (err < 0) 862 return err; 863 864 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 865 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind, 866 rtnl_held, extack); 867 if (IS_ERR(act)) { 868 err = PTR_ERR(act); 869 goto err; 870 } 871 act->order = i; 872 sz += tcf_action_fill_size(act); 873 if (ovr) 874 refcount_inc(&act->tcfa_refcnt); 875 list_add_tail(&act->list, actions); 876 } 877 878 *attr_size = tcf_action_full_attrs_size(sz); 879 880 /* Remove the temp refcnt which was necessary to protect against 881 * destroying an existing action which was being replaced 882 */ 883 cleanup_a(actions, ovr); 884 return 0; 885 886 err: 887 tcf_action_destroy(actions, bind); 888 return err; 889 } 890 891 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 892 int compat_mode) 893 { 894 int err = 0; 895 struct gnet_dump d; 896 897 if (p == NULL) 898 goto errout; 899 900 /* compat_mode being true specifies a call that is supposed 901 * to add additional backward compatibility statistic TLVs. 902 */ 903 if (compat_mode) { 904 if (p->type == TCA_OLD_COMPAT) 905 err = gnet_stats_start_copy_compat(skb, 0, 906 TCA_STATS, 907 TCA_XSTATS, 908 &p->tcfa_lock, &d, 909 TCA_PAD); 910 else 911 return 0; 912 } else 913 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 914 &p->tcfa_lock, &d, TCA_ACT_PAD); 915 916 if (err < 0) 917 goto errout; 918 919 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || 920 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 921 gnet_stats_copy_queue(&d, p->cpu_qstats, 922 &p->tcfa_qstats, 923 p->tcfa_qstats.qlen) < 0) 924 goto errout; 925 926 if (gnet_stats_finish_copy(&d) < 0) 927 goto errout; 928 929 return 0; 930 931 errout: 932 return -1; 933 } 934 935 static int tca_get_fill(struct sk_buff *skb, struct list_head *actions, 936 u32 portid, u32 seq, u16 flags, int event, int bind, 937 int ref) 938 { 939 struct tcamsg *t; 940 struct nlmsghdr *nlh; 941 unsigned char *b = skb_tail_pointer(skb); 942 struct nlattr *nest; 943 944 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 945 if (!nlh) 946 goto out_nlmsg_trim; 947 t = nlmsg_data(nlh); 948 t->tca_family = AF_UNSPEC; 949 t->tca__pad1 = 0; 950 t->tca__pad2 = 0; 951 952 nest = nla_nest_start(skb, TCA_ACT_TAB); 953 if (!nest) 954 goto out_nlmsg_trim; 955 956 if (tcf_action_dump(skb, actions, bind, ref) < 0) 957 goto out_nlmsg_trim; 958 959 nla_nest_end(skb, nest); 960 961 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 962 return skb->len; 963 964 out_nlmsg_trim: 965 nlmsg_trim(skb, b); 966 return -1; 967 } 968 969 static int 970 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 971 struct list_head *actions, int event, 972 struct netlink_ext_ack *extack) 973 { 974 struct sk_buff *skb; 975 976 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 977 if (!skb) 978 return -ENOBUFS; 979 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 980 0, 1) <= 0) { 981 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 982 kfree_skb(skb); 983 return -EINVAL; 984 } 985 986 return rtnl_unicast(skb, net, portid); 987 } 988 989 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, 990 struct nlmsghdr *n, u32 portid, 991 struct netlink_ext_ack *extack) 992 { 993 struct nlattr *tb[TCA_ACT_MAX + 1]; 994 const struct tc_action_ops *ops; 995 struct tc_action *a; 996 int index; 997 int err; 998 999 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack); 1000 if (err < 0) 1001 goto err_out; 1002 1003 err = -EINVAL; 1004 if (tb[TCA_ACT_INDEX] == NULL || 1005 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) { 1006 NL_SET_ERR_MSG(extack, "Invalid TC action index value"); 1007 goto err_out; 1008 } 1009 index = nla_get_u32(tb[TCA_ACT_INDEX]); 1010 1011 err = -EINVAL; 1012 ops = tc_lookup_action(tb[TCA_ACT_KIND]); 1013 if (!ops) { /* could happen in batch of actions */ 1014 NL_SET_ERR_MSG(extack, "Specified TC action not found"); 1015 goto err_out; 1016 } 1017 err = -ENOENT; 1018 if (ops->lookup(net, &a, index, extack) == 0) 1019 goto err_mod; 1020 1021 module_put(ops->owner); 1022 return a; 1023 1024 err_mod: 1025 module_put(ops->owner); 1026 err_out: 1027 return ERR_PTR(err); 1028 } 1029 1030 static int tca_action_flush(struct net *net, struct nlattr *nla, 1031 struct nlmsghdr *n, u32 portid, 1032 struct netlink_ext_ack *extack) 1033 { 1034 struct sk_buff *skb; 1035 unsigned char *b; 1036 struct nlmsghdr *nlh; 1037 struct tcamsg *t; 1038 struct netlink_callback dcb; 1039 struct nlattr *nest; 1040 struct nlattr *tb[TCA_ACT_MAX + 1]; 1041 const struct tc_action_ops *ops; 1042 struct nlattr *kind; 1043 int err = -ENOMEM; 1044 1045 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1046 if (!skb) 1047 return err; 1048 1049 b = skb_tail_pointer(skb); 1050 1051 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack); 1052 if (err < 0) 1053 goto err_out; 1054 1055 err = -EINVAL; 1056 kind = tb[TCA_ACT_KIND]; 1057 ops = tc_lookup_action(kind); 1058 if (!ops) { /*some idjot trying to flush unknown action */ 1059 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action"); 1060 goto err_out; 1061 } 1062 1063 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, 1064 sizeof(*t), 0); 1065 if (!nlh) { 1066 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification"); 1067 goto out_module_put; 1068 } 1069 t = nlmsg_data(nlh); 1070 t->tca_family = AF_UNSPEC; 1071 t->tca__pad1 = 0; 1072 t->tca__pad2 = 0; 1073 1074 nest = nla_nest_start(skb, TCA_ACT_TAB); 1075 if (!nest) { 1076 NL_SET_ERR_MSG(extack, "Failed to add new netlink message"); 1077 goto out_module_put; 1078 } 1079 1080 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack); 1081 if (err <= 0) { 1082 nla_nest_cancel(skb, nest); 1083 goto out_module_put; 1084 } 1085 1086 nla_nest_end(skb, nest); 1087 1088 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1089 nlh->nlmsg_flags |= NLM_F_ROOT; 1090 module_put(ops->owner); 1091 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1092 n->nlmsg_flags & NLM_F_ECHO); 1093 if (err > 0) 1094 return 0; 1095 if (err < 0) 1096 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification"); 1097 1098 return err; 1099 1100 out_module_put: 1101 module_put(ops->owner); 1102 err_out: 1103 kfree_skb(skb); 1104 return err; 1105 } 1106 1107 static int tcf_action_delete(struct net *net, struct list_head *actions, 1108 struct netlink_ext_ack *extack) 1109 { 1110 struct tc_action *a, *tmp; 1111 u32 act_index; 1112 int ret; 1113 1114 list_for_each_entry_safe(a, tmp, actions, list) { 1115 const struct tc_action_ops *ops = a->ops; 1116 1117 /* Actions can be deleted concurrently so we must save their 1118 * type and id to search again after reference is released. 1119 */ 1120 act_index = a->tcfa_index; 1121 1122 list_del(&a->list); 1123 if (tcf_action_put(a)) { 1124 /* last reference, action was deleted concurrently */ 1125 module_put(ops->owner); 1126 } else { 1127 /* now do the delete */ 1128 ret = ops->delete(net, act_index); 1129 if (ret < 0) 1130 return ret; 1131 } 1132 } 1133 return 0; 1134 } 1135 1136 static int 1137 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, 1138 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1139 { 1140 int ret; 1141 struct sk_buff *skb; 1142 1143 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1144 GFP_KERNEL); 1145 if (!skb) 1146 return -ENOBUFS; 1147 1148 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 1149 0, 2) <= 0) { 1150 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); 1151 kfree_skb(skb); 1152 return -EINVAL; 1153 } 1154 1155 /* now do the delete */ 1156 ret = tcf_action_delete(net, actions, extack); 1157 if (ret < 0) { 1158 NL_SET_ERR_MSG(extack, "Failed to delete TC action"); 1159 kfree_skb(skb); 1160 return ret; 1161 } 1162 1163 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1164 n->nlmsg_flags & NLM_F_ECHO); 1165 if (ret > 0) 1166 return 0; 1167 return ret; 1168 } 1169 1170 static int 1171 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 1172 u32 portid, int event, struct netlink_ext_ack *extack) 1173 { 1174 int i, ret; 1175 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1176 struct tc_action *act; 1177 size_t attr_size = 0; 1178 LIST_HEAD(actions); 1179 1180 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); 1181 if (ret < 0) 1182 return ret; 1183 1184 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 1185 if (tb[1]) 1186 return tca_action_flush(net, tb[1], n, portid, extack); 1187 1188 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action"); 1189 return -EINVAL; 1190 } 1191 1192 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1193 act = tcf_action_get_1(net, tb[i], n, portid, extack); 1194 if (IS_ERR(act)) { 1195 ret = PTR_ERR(act); 1196 goto err; 1197 } 1198 act->order = i; 1199 attr_size += tcf_action_fill_size(act); 1200 list_add_tail(&act->list, &actions); 1201 } 1202 1203 attr_size = tcf_action_full_attrs_size(attr_size); 1204 1205 if (event == RTM_GETACTION) 1206 ret = tcf_get_notify(net, portid, n, &actions, event, extack); 1207 else { /* delete */ 1208 ret = tcf_del_notify(net, n, &actions, portid, attr_size, extack); 1209 if (ret) 1210 goto err; 1211 return ret; 1212 } 1213 err: 1214 tcf_action_destroy(&actions, 0); 1215 return ret; 1216 } 1217 1218 static int 1219 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, 1220 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1221 { 1222 struct sk_buff *skb; 1223 int err = 0; 1224 1225 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1226 GFP_KERNEL); 1227 if (!skb) 1228 return -ENOBUFS; 1229 1230 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 1231 RTM_NEWACTION, 0, 0) <= 0) { 1232 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1233 kfree_skb(skb); 1234 return -EINVAL; 1235 } 1236 1237 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1238 n->nlmsg_flags & NLM_F_ECHO); 1239 if (err > 0) 1240 err = 0; 1241 return err; 1242 } 1243 1244 static int tcf_action_add(struct net *net, struct nlattr *nla, 1245 struct nlmsghdr *n, u32 portid, int ovr, 1246 struct netlink_ext_ack *extack) 1247 { 1248 size_t attr_size = 0; 1249 int ret = 0; 1250 LIST_HEAD(actions); 1251 1252 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions, 1253 &attr_size, true, extack); 1254 if (ret) 1255 return ret; 1256 1257 return tcf_add_notify(net, n, &actions, portid, attr_size, extack); 1258 } 1259 1260 static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON; 1261 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { 1262 [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32, 1263 .validation_data = &tcaa_root_flags_allowed }, 1264 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, 1265 }; 1266 1267 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, 1268 struct netlink_ext_ack *extack) 1269 { 1270 struct net *net = sock_net(skb->sk); 1271 struct nlattr *tca[TCA_ROOT_MAX + 1]; 1272 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 1273 int ret = 0, ovr = 0; 1274 1275 if ((n->nlmsg_type != RTM_GETACTION) && 1276 !netlink_capable(skb, CAP_NET_ADMIN)) 1277 return -EPERM; 1278 1279 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ROOT_MAX, NULL, 1280 extack); 1281 if (ret < 0) 1282 return ret; 1283 1284 if (tca[TCA_ACT_TAB] == NULL) { 1285 NL_SET_ERR_MSG(extack, "Netlink action attributes missing"); 1286 return -EINVAL; 1287 } 1288 1289 /* n->nlmsg_flags & NLM_F_CREATE */ 1290 switch (n->nlmsg_type) { 1291 case RTM_NEWACTION: 1292 /* we are going to assume all other flags 1293 * imply create only if it doesn't exist 1294 * Note that CREATE | EXCL implies that 1295 * but since we want avoid ambiguity (eg when flags 1296 * is zero) then just set this 1297 */ 1298 if (n->nlmsg_flags & NLM_F_REPLACE) 1299 ovr = 1; 1300 replay: 1301 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr, 1302 extack); 1303 if (ret == -EAGAIN) 1304 goto replay; 1305 break; 1306 case RTM_DELACTION: 1307 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1308 portid, RTM_DELACTION, extack); 1309 break; 1310 case RTM_GETACTION: 1311 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1312 portid, RTM_GETACTION, extack); 1313 break; 1314 default: 1315 BUG(); 1316 } 1317 1318 return ret; 1319 } 1320 1321 static struct nlattr *find_dump_kind(struct nlattr **nla) 1322 { 1323 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 1324 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1325 struct nlattr *kind; 1326 1327 tb1 = nla[TCA_ACT_TAB]; 1328 if (tb1 == NULL) 1329 return NULL; 1330 1331 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), 1332 NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) 1333 return NULL; 1334 1335 if (tb[1] == NULL) 1336 return NULL; 1337 if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0) 1338 return NULL; 1339 kind = tb2[TCA_ACT_KIND]; 1340 1341 return kind; 1342 } 1343 1344 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1345 { 1346 struct net *net = sock_net(skb->sk); 1347 struct nlmsghdr *nlh; 1348 unsigned char *b = skb_tail_pointer(skb); 1349 struct nlattr *nest; 1350 struct tc_action_ops *a_o; 1351 int ret = 0; 1352 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 1353 struct nlattr *tb[TCA_ROOT_MAX + 1]; 1354 struct nlattr *count_attr = NULL; 1355 unsigned long jiffy_since = 0; 1356 struct nlattr *kind = NULL; 1357 struct nla_bitfield32 bf; 1358 u32 msecs_since = 0; 1359 u32 act_count = 0; 1360 1361 ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX, 1362 tcaa_policy, NULL); 1363 if (ret < 0) 1364 return ret; 1365 1366 kind = find_dump_kind(tb); 1367 if (kind == NULL) { 1368 pr_info("tc_dump_action: action bad kind\n"); 1369 return 0; 1370 } 1371 1372 a_o = tc_lookup_action(kind); 1373 if (a_o == NULL) 1374 return 0; 1375 1376 cb->args[2] = 0; 1377 if (tb[TCA_ROOT_FLAGS]) { 1378 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); 1379 cb->args[2] = bf.value; 1380 } 1381 1382 if (tb[TCA_ROOT_TIME_DELTA]) { 1383 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); 1384 } 1385 1386 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1387 cb->nlh->nlmsg_type, sizeof(*t), 0); 1388 if (!nlh) 1389 goto out_module_put; 1390 1391 if (msecs_since) 1392 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); 1393 1394 t = nlmsg_data(nlh); 1395 t->tca_family = AF_UNSPEC; 1396 t->tca__pad1 = 0; 1397 t->tca__pad2 = 0; 1398 cb->args[3] = jiffy_since; 1399 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); 1400 if (!count_attr) 1401 goto out_module_put; 1402 1403 nest = nla_nest_start(skb, TCA_ACT_TAB); 1404 if (nest == NULL) 1405 goto out_module_put; 1406 1407 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL); 1408 if (ret < 0) 1409 goto out_module_put; 1410 1411 if (ret > 0) { 1412 nla_nest_end(skb, nest); 1413 ret = skb->len; 1414 act_count = cb->args[1]; 1415 memcpy(nla_data(count_attr), &act_count, sizeof(u32)); 1416 cb->args[1] = 0; 1417 } else 1418 nlmsg_trim(skb, b); 1419 1420 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1421 if (NETLINK_CB(cb->skb).portid && ret) 1422 nlh->nlmsg_flags |= NLM_F_MULTI; 1423 module_put(a_o->owner); 1424 return skb->len; 1425 1426 out_module_put: 1427 module_put(a_o->owner); 1428 nlmsg_trim(skb, b); 1429 return skb->len; 1430 } 1431 1432 struct tcf_action_net { 1433 struct rhashtable egdev_ht; 1434 }; 1435 1436 static unsigned int tcf_action_net_id; 1437 1438 struct tcf_action_egdev_cb { 1439 struct list_head list; 1440 tc_setup_cb_t *cb; 1441 void *cb_priv; 1442 }; 1443 1444 struct tcf_action_egdev { 1445 struct rhash_head ht_node; 1446 const struct net_device *dev; 1447 unsigned int refcnt; 1448 struct list_head cb_list; 1449 }; 1450 1451 static const struct rhashtable_params tcf_action_egdev_ht_params = { 1452 .key_offset = offsetof(struct tcf_action_egdev, dev), 1453 .head_offset = offsetof(struct tcf_action_egdev, ht_node), 1454 .key_len = sizeof(const struct net_device *), 1455 }; 1456 1457 static struct tcf_action_egdev * 1458 tcf_action_egdev_lookup(const struct net_device *dev) 1459 { 1460 struct net *net = dev_net(dev); 1461 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id); 1462 1463 return rhashtable_lookup_fast(&tan->egdev_ht, &dev, 1464 tcf_action_egdev_ht_params); 1465 } 1466 1467 static struct tcf_action_egdev * 1468 tcf_action_egdev_get(const struct net_device *dev) 1469 { 1470 struct tcf_action_egdev *egdev; 1471 struct tcf_action_net *tan; 1472 1473 egdev = tcf_action_egdev_lookup(dev); 1474 if (egdev) 1475 goto inc_ref; 1476 1477 egdev = kzalloc(sizeof(*egdev), GFP_KERNEL); 1478 if (!egdev) 1479 return NULL; 1480 INIT_LIST_HEAD(&egdev->cb_list); 1481 egdev->dev = dev; 1482 tan = net_generic(dev_net(dev), tcf_action_net_id); 1483 rhashtable_insert_fast(&tan->egdev_ht, &egdev->ht_node, 1484 tcf_action_egdev_ht_params); 1485 1486 inc_ref: 1487 egdev->refcnt++; 1488 return egdev; 1489 } 1490 1491 static void tcf_action_egdev_put(struct tcf_action_egdev *egdev) 1492 { 1493 struct tcf_action_net *tan; 1494 1495 if (--egdev->refcnt) 1496 return; 1497 tan = net_generic(dev_net(egdev->dev), tcf_action_net_id); 1498 rhashtable_remove_fast(&tan->egdev_ht, &egdev->ht_node, 1499 tcf_action_egdev_ht_params); 1500 kfree(egdev); 1501 } 1502 1503 static struct tcf_action_egdev_cb * 1504 tcf_action_egdev_cb_lookup(struct tcf_action_egdev *egdev, 1505 tc_setup_cb_t *cb, void *cb_priv) 1506 { 1507 struct tcf_action_egdev_cb *egdev_cb; 1508 1509 list_for_each_entry(egdev_cb, &egdev->cb_list, list) 1510 if (egdev_cb->cb == cb && egdev_cb->cb_priv == cb_priv) 1511 return egdev_cb; 1512 return NULL; 1513 } 1514 1515 static int tcf_action_egdev_cb_call(struct tcf_action_egdev *egdev, 1516 enum tc_setup_type type, 1517 void *type_data, bool err_stop) 1518 { 1519 struct tcf_action_egdev_cb *egdev_cb; 1520 int ok_count = 0; 1521 int err; 1522 1523 list_for_each_entry(egdev_cb, &egdev->cb_list, list) { 1524 err = egdev_cb->cb(type, type_data, egdev_cb->cb_priv); 1525 if (err) { 1526 if (err_stop) 1527 return err; 1528 } else { 1529 ok_count++; 1530 } 1531 } 1532 return ok_count; 1533 } 1534 1535 static int tcf_action_egdev_cb_add(struct tcf_action_egdev *egdev, 1536 tc_setup_cb_t *cb, void *cb_priv) 1537 { 1538 struct tcf_action_egdev_cb *egdev_cb; 1539 1540 egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv); 1541 if (WARN_ON(egdev_cb)) 1542 return -EEXIST; 1543 egdev_cb = kzalloc(sizeof(*egdev_cb), GFP_KERNEL); 1544 if (!egdev_cb) 1545 return -ENOMEM; 1546 egdev_cb->cb = cb; 1547 egdev_cb->cb_priv = cb_priv; 1548 list_add(&egdev_cb->list, &egdev->cb_list); 1549 return 0; 1550 } 1551 1552 static void tcf_action_egdev_cb_del(struct tcf_action_egdev *egdev, 1553 tc_setup_cb_t *cb, void *cb_priv) 1554 { 1555 struct tcf_action_egdev_cb *egdev_cb; 1556 1557 egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv); 1558 if (WARN_ON(!egdev_cb)) 1559 return; 1560 list_del(&egdev_cb->list); 1561 kfree(egdev_cb); 1562 } 1563 1564 static int __tc_setup_cb_egdev_register(const struct net_device *dev, 1565 tc_setup_cb_t *cb, void *cb_priv) 1566 { 1567 struct tcf_action_egdev *egdev = tcf_action_egdev_get(dev); 1568 int err; 1569 1570 if (!egdev) 1571 return -ENOMEM; 1572 err = tcf_action_egdev_cb_add(egdev, cb, cb_priv); 1573 if (err) 1574 goto err_cb_add; 1575 return 0; 1576 1577 err_cb_add: 1578 tcf_action_egdev_put(egdev); 1579 return err; 1580 } 1581 int tc_setup_cb_egdev_register(const struct net_device *dev, 1582 tc_setup_cb_t *cb, void *cb_priv) 1583 { 1584 int err; 1585 1586 rtnl_lock(); 1587 err = __tc_setup_cb_egdev_register(dev, cb, cb_priv); 1588 rtnl_unlock(); 1589 return err; 1590 } 1591 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register); 1592 1593 static void __tc_setup_cb_egdev_unregister(const struct net_device *dev, 1594 tc_setup_cb_t *cb, void *cb_priv) 1595 { 1596 struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev); 1597 1598 if (WARN_ON(!egdev)) 1599 return; 1600 tcf_action_egdev_cb_del(egdev, cb, cb_priv); 1601 tcf_action_egdev_put(egdev); 1602 } 1603 void tc_setup_cb_egdev_unregister(const struct net_device *dev, 1604 tc_setup_cb_t *cb, void *cb_priv) 1605 { 1606 rtnl_lock(); 1607 __tc_setup_cb_egdev_unregister(dev, cb, cb_priv); 1608 rtnl_unlock(); 1609 } 1610 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister); 1611 1612 int tc_setup_cb_egdev_call(const struct net_device *dev, 1613 enum tc_setup_type type, void *type_data, 1614 bool err_stop) 1615 { 1616 struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev); 1617 1618 if (!egdev) 1619 return 0; 1620 return tcf_action_egdev_cb_call(egdev, type, type_data, err_stop); 1621 } 1622 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call); 1623 1624 static __net_init int tcf_action_net_init(struct net *net) 1625 { 1626 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id); 1627 1628 return rhashtable_init(&tan->egdev_ht, &tcf_action_egdev_ht_params); 1629 } 1630 1631 static void __net_exit tcf_action_net_exit(struct net *net) 1632 { 1633 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id); 1634 1635 rhashtable_destroy(&tan->egdev_ht); 1636 } 1637 1638 static struct pernet_operations tcf_action_net_ops = { 1639 .init = tcf_action_net_init, 1640 .exit = tcf_action_net_exit, 1641 .id = &tcf_action_net_id, 1642 .size = sizeof(struct tcf_action_net), 1643 }; 1644 1645 static int __init tc_action_init(void) 1646 { 1647 int err; 1648 1649 err = register_pernet_subsys(&tcf_action_net_ops); 1650 if (err) 1651 return err; 1652 1653 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); 1654 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); 1655 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 1656 0); 1657 1658 return 0; 1659 } 1660 1661 subsys_initcall(tc_action_init); 1662