1 /* 2 * net/sched/act_api.c Packet action API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Author: Jamal Hadi Salim 10 * 11 * 12 */ 13 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/err.h> 22 #include <net/net_namespace.h> 23 #include <net/sock.h> 24 #include <net/sch_generic.h> 25 #include <net/act_api.h> 26 #include <net/netlink.h> 27 28 void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) 29 { 30 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 31 struct tcf_common **p1p; 32 33 for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) { 34 if (*p1p == p) { 35 write_lock_bh(hinfo->lock); 36 *p1p = p->tcfc_next; 37 write_unlock_bh(hinfo->lock); 38 gen_kill_estimator(&p->tcfc_bstats, 39 &p->tcfc_rate_est); 40 kfree(p); 41 return; 42 } 43 } 44 WARN_ON(1); 45 } 46 EXPORT_SYMBOL(tcf_hash_destroy); 47 48 int tcf_hash_release(struct tcf_common *p, int bind, 49 struct tcf_hashinfo *hinfo) 50 { 51 int ret = 0; 52 53 if (p) { 54 if (bind) 55 p->tcfc_bindcnt--; 56 57 p->tcfc_refcnt--; 58 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { 59 tcf_hash_destroy(p, hinfo); 60 ret = 1; 61 } 62 } 63 return ret; 64 } 65 EXPORT_SYMBOL(tcf_hash_release); 66 67 static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 68 struct tc_action *a, struct tcf_hashinfo *hinfo) 69 { 70 struct tcf_common *p; 71 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; 72 struct nlattr *nest; 73 74 read_lock_bh(hinfo->lock); 75 76 s_i = cb->args[0]; 77 78 for (i = 0; i < (hinfo->hmask + 1); i++) { 79 p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 80 81 for (; p; p = p->tcfc_next) { 82 index++; 83 if (index < s_i) 84 continue; 85 a->priv = p; 86 a->order = n_i; 87 88 nest = nla_nest_start(skb, a->order); 89 if (nest == NULL) 90 goto nla_put_failure; 91 err = tcf_action_dump_1(skb, a, 0, 0); 92 if (err < 0) { 93 index--; 94 nlmsg_trim(skb, nest); 95 goto done; 96 } 97 nla_nest_end(skb, nest); 98 n_i++; 99 if (n_i >= TCA_ACT_MAX_PRIO) 100 goto done; 101 } 102 } 103 done: 104 read_unlock_bh(hinfo->lock); 105 if (n_i) 106 cb->args[0] += n_i; 107 return n_i; 108 109 nla_put_failure: 110 nla_nest_cancel(skb, nest); 111 goto done; 112 } 113 114 static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, 115 struct tcf_hashinfo *hinfo) 116 { 117 struct tcf_common *p, *s_p; 118 struct nlattr *nest; 119 int i= 0, n_i = 0; 120 121 nest = nla_nest_start(skb, a->order); 122 if (nest == NULL) 123 goto nla_put_failure; 124 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 125 for (i = 0; i < (hinfo->hmask + 1); i++) { 126 p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 127 128 while (p != NULL) { 129 s_p = p->tcfc_next; 130 if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) 131 module_put(a->ops->owner); 132 n_i++; 133 p = s_p; 134 } 135 } 136 NLA_PUT_U32(skb, TCA_FCNT, n_i); 137 nla_nest_end(skb, nest); 138 139 return n_i; 140 nla_put_failure: 141 nla_nest_cancel(skb, nest); 142 return -EINVAL; 143 } 144 145 int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, 146 int type, struct tc_action *a) 147 { 148 struct tcf_hashinfo *hinfo = a->ops->hinfo; 149 150 if (type == RTM_DELACTION) { 151 return tcf_del_walker(skb, a, hinfo); 152 } else if (type == RTM_GETACTION) { 153 return tcf_dump_walker(skb, cb, a, hinfo); 154 } else { 155 printk("tcf_generic_walker: unknown action %d\n", type); 156 return -EINVAL; 157 } 158 } 159 EXPORT_SYMBOL(tcf_generic_walker); 160 161 struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) 162 { 163 struct tcf_common *p; 164 165 read_lock_bh(hinfo->lock); 166 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; 167 p = p->tcfc_next) { 168 if (p->tcfc_index == index) 169 break; 170 } 171 read_unlock_bh(hinfo->lock); 172 173 return p; 174 } 175 EXPORT_SYMBOL(tcf_hash_lookup); 176 177 u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo) 178 { 179 u32 val = *idx_gen; 180 181 do { 182 if (++val == 0) 183 val = 1; 184 } while (tcf_hash_lookup(val, hinfo)); 185 186 return (*idx_gen = val); 187 } 188 EXPORT_SYMBOL(tcf_hash_new_index); 189 190 int tcf_hash_search(struct tc_action *a, u32 index) 191 { 192 struct tcf_hashinfo *hinfo = a->ops->hinfo; 193 struct tcf_common *p = tcf_hash_lookup(index, hinfo); 194 195 if (p) { 196 a->priv = p; 197 return 1; 198 } 199 return 0; 200 } 201 EXPORT_SYMBOL(tcf_hash_search); 202 203 struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind, 204 struct tcf_hashinfo *hinfo) 205 { 206 struct tcf_common *p = NULL; 207 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { 208 if (bind) { 209 p->tcfc_bindcnt++; 210 p->tcfc_refcnt++; 211 } 212 a->priv = p; 213 } 214 return p; 215 } 216 EXPORT_SYMBOL(tcf_hash_check); 217 218 struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo) 219 { 220 struct tcf_common *p = kzalloc(size, GFP_KERNEL); 221 222 if (unlikely(!p)) 223 return p; 224 p->tcfc_refcnt = 1; 225 if (bind) 226 p->tcfc_bindcnt = 1; 227 228 spin_lock_init(&p->tcfc_lock); 229 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); 230 p->tcfc_tm.install = jiffies; 231 p->tcfc_tm.lastuse = jiffies; 232 if (est) 233 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, 234 &p->tcfc_lock, est); 235 a->priv = (void *) p; 236 return p; 237 } 238 EXPORT_SYMBOL(tcf_hash_create); 239 240 void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo) 241 { 242 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 243 244 write_lock_bh(hinfo->lock); 245 p->tcfc_next = hinfo->htab[h]; 246 hinfo->htab[h] = p; 247 write_unlock_bh(hinfo->lock); 248 } 249 EXPORT_SYMBOL(tcf_hash_insert); 250 251 static struct tc_action_ops *act_base = NULL; 252 static DEFINE_RWLOCK(act_mod_lock); 253 254 int tcf_register_action(struct tc_action_ops *act) 255 { 256 struct tc_action_ops *a, **ap; 257 258 write_lock(&act_mod_lock); 259 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { 260 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 261 write_unlock(&act_mod_lock); 262 return -EEXIST; 263 } 264 } 265 act->next = NULL; 266 *ap = act; 267 write_unlock(&act_mod_lock); 268 return 0; 269 } 270 EXPORT_SYMBOL(tcf_register_action); 271 272 int tcf_unregister_action(struct tc_action_ops *act) 273 { 274 struct tc_action_ops *a, **ap; 275 int err = -ENOENT; 276 277 write_lock(&act_mod_lock); 278 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) 279 if (a == act) 280 break; 281 if (a) { 282 *ap = a->next; 283 a->next = NULL; 284 err = 0; 285 } 286 write_unlock(&act_mod_lock); 287 return err; 288 } 289 EXPORT_SYMBOL(tcf_unregister_action); 290 291 /* lookup by name */ 292 static struct tc_action_ops *tc_lookup_action_n(char *kind) 293 { 294 struct tc_action_ops *a = NULL; 295 296 if (kind) { 297 read_lock(&act_mod_lock); 298 for (a = act_base; a; a = a->next) { 299 if (strcmp(kind, a->kind) == 0) { 300 if (!try_module_get(a->owner)) { 301 read_unlock(&act_mod_lock); 302 return NULL; 303 } 304 break; 305 } 306 } 307 read_unlock(&act_mod_lock); 308 } 309 return a; 310 } 311 312 /* lookup by nlattr */ 313 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 314 { 315 struct tc_action_ops *a = NULL; 316 317 if (kind) { 318 read_lock(&act_mod_lock); 319 for (a = act_base; a; a = a->next) { 320 if (nla_strcmp(kind, a->kind) == 0) { 321 if (!try_module_get(a->owner)) { 322 read_unlock(&act_mod_lock); 323 return NULL; 324 } 325 break; 326 } 327 } 328 read_unlock(&act_mod_lock); 329 } 330 return a; 331 } 332 333 #if 0 334 /* lookup by id */ 335 static struct tc_action_ops *tc_lookup_action_id(u32 type) 336 { 337 struct tc_action_ops *a = NULL; 338 339 if (type) { 340 read_lock(&act_mod_lock); 341 for (a = act_base; a; a = a->next) { 342 if (a->type == type) { 343 if (!try_module_get(a->owner)) { 344 read_unlock(&act_mod_lock); 345 return NULL; 346 } 347 break; 348 } 349 } 350 read_unlock(&act_mod_lock); 351 } 352 return a; 353 } 354 #endif 355 356 int tcf_action_exec(struct sk_buff *skb, struct tc_action *act, 357 struct tcf_result *res) 358 { 359 struct tc_action *a; 360 int ret = -1; 361 362 if (skb->tc_verd & TC_NCLS) { 363 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 364 ret = TC_ACT_OK; 365 goto exec_done; 366 } 367 while ((a = act) != NULL) { 368 repeat: 369 if (a->ops && a->ops->act) { 370 ret = a->ops->act(skb, a, res); 371 if (TC_MUNGED & skb->tc_verd) { 372 /* copied already, allow trampling */ 373 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); 374 skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); 375 } 376 if (ret == TC_ACT_REPEAT) 377 goto repeat; /* we need a ttl - JHS */ 378 if (ret != TC_ACT_PIPE) 379 goto exec_done; 380 } 381 act = a->next; 382 } 383 exec_done: 384 return ret; 385 } 386 EXPORT_SYMBOL(tcf_action_exec); 387 388 void tcf_action_destroy(struct tc_action *act, int bind) 389 { 390 struct tc_action *a; 391 392 for (a = act; a; a = act) { 393 if (a->ops && a->ops->cleanup) { 394 if (a->ops->cleanup(a, bind) == ACT_P_DELETED) 395 module_put(a->ops->owner); 396 act = act->next; 397 kfree(a); 398 } else { /*FIXME: Remove later - catch insertion bugs*/ 399 printk("tcf_action_destroy: BUG? destroying NULL ops\n"); 400 act = act->next; 401 kfree(a); 402 } 403 } 404 } 405 406 int 407 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 408 { 409 int err = -EINVAL; 410 411 if (a->ops == NULL || a->ops->dump == NULL) 412 return err; 413 return a->ops->dump(skb, a, bind, ref); 414 } 415 416 int 417 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 418 { 419 int err = -EINVAL; 420 unsigned char *b = skb_tail_pointer(skb); 421 struct nlattr *nest; 422 423 if (a->ops == NULL || a->ops->dump == NULL) 424 return err; 425 426 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 427 if (tcf_action_copy_stats(skb, a, 0)) 428 goto nla_put_failure; 429 nest = nla_nest_start(skb, TCA_OPTIONS); 430 if (nest == NULL) 431 goto nla_put_failure; 432 if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { 433 nla_nest_end(skb, nest); 434 return err; 435 } 436 437 nla_put_failure: 438 nlmsg_trim(skb, b); 439 return -1; 440 } 441 EXPORT_SYMBOL(tcf_action_dump_1); 442 443 int 444 tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) 445 { 446 struct tc_action *a; 447 int err = -EINVAL; 448 struct nlattr *nest; 449 450 while ((a = act) != NULL) { 451 act = a->next; 452 nest = nla_nest_start(skb, a->order); 453 if (nest == NULL) 454 goto nla_put_failure; 455 err = tcf_action_dump_1(skb, a, bind, ref); 456 if (err < 0) 457 goto errout; 458 nla_nest_end(skb, nest); 459 } 460 461 return 0; 462 463 nla_put_failure: 464 err = -EINVAL; 465 errout: 466 nla_nest_cancel(skb, nest); 467 return err; 468 } 469 470 struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, 471 char *name, int ovr, int bind) 472 { 473 struct tc_action *a; 474 struct tc_action_ops *a_o; 475 char act_name[IFNAMSIZ]; 476 struct nlattr *tb[TCA_ACT_MAX+1]; 477 struct nlattr *kind; 478 int err; 479 480 if (name == NULL) { 481 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 482 if (err < 0) 483 goto err_out; 484 err = -EINVAL; 485 kind = tb[TCA_ACT_KIND]; 486 if (kind == NULL) 487 goto err_out; 488 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 489 goto err_out; 490 } else { 491 err = -EINVAL; 492 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) 493 goto err_out; 494 } 495 496 a_o = tc_lookup_action_n(act_name); 497 if (a_o == NULL) { 498 #ifdef CONFIG_KMOD 499 rtnl_unlock(); 500 request_module("act_%s", act_name); 501 rtnl_lock(); 502 503 a_o = tc_lookup_action_n(act_name); 504 505 /* We dropped the RTNL semaphore in order to 506 * perform the module load. So, even if we 507 * succeeded in loading the module we have to 508 * tell the caller to replay the request. We 509 * indicate this using -EAGAIN. 510 */ 511 if (a_o != NULL) { 512 err = -EAGAIN; 513 goto err_mod; 514 } 515 #endif 516 err = -ENOENT; 517 goto err_out; 518 } 519 520 err = -ENOMEM; 521 a = kzalloc(sizeof(*a), GFP_KERNEL); 522 if (a == NULL) 523 goto err_mod; 524 525 /* backward compatibility for policer */ 526 if (name == NULL) 527 err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind); 528 else 529 err = a_o->init(nla, est, a, ovr, bind); 530 if (err < 0) 531 goto err_free; 532 533 /* module count goes up only when brand new policy is created 534 if it exists and is only bound to in a_o->init() then 535 ACT_P_CREATED is not returned (a zero is). 536 */ 537 if (err != ACT_P_CREATED) 538 module_put(a_o->owner); 539 a->ops = a_o; 540 541 return a; 542 543 err_free: 544 kfree(a); 545 err_mod: 546 module_put(a_o->owner); 547 err_out: 548 return ERR_PTR(err); 549 } 550 551 struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, 552 char *name, int ovr, int bind) 553 { 554 struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; 555 struct tc_action *head = NULL, *act, *act_prev = NULL; 556 int err; 557 int i; 558 559 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); 560 if (err < 0) 561 return ERR_PTR(err); 562 563 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 564 act = tcf_action_init_1(tb[i], est, name, ovr, bind); 565 if (IS_ERR(act)) 566 goto err; 567 act->order = i; 568 569 if (head == NULL) 570 head = act; 571 else 572 act_prev->next = act; 573 act_prev = act; 574 } 575 return head; 576 577 err: 578 if (head != NULL) 579 tcf_action_destroy(head, bind); 580 return act; 581 } 582 583 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, 584 int compat_mode) 585 { 586 int err = 0; 587 struct gnet_dump d; 588 struct tcf_act_hdr *h = a->priv; 589 590 if (h == NULL) 591 goto errout; 592 593 /* compat_mode being true specifies a call that is supposed 594 * to add additional backward compatiblity statistic TLVs. 595 */ 596 if (compat_mode) { 597 if (a->type == TCA_OLD_COMPAT) 598 err = gnet_stats_start_copy_compat(skb, 0, 599 TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d); 600 else 601 return 0; 602 } else 603 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 604 &h->tcf_lock, &d); 605 606 if (err < 0) 607 goto errout; 608 609 if (a->ops != NULL && a->ops->get_stats != NULL) 610 if (a->ops->get_stats(skb, a) < 0) 611 goto errout; 612 613 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || 614 gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || 615 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) 616 goto errout; 617 618 if (gnet_stats_finish_copy(&d) < 0) 619 goto errout; 620 621 return 0; 622 623 errout: 624 return -1; 625 } 626 627 static int 628 tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, 629 u16 flags, int event, int bind, int ref) 630 { 631 struct tcamsg *t; 632 struct nlmsghdr *nlh; 633 unsigned char *b = skb_tail_pointer(skb); 634 struct nlattr *nest; 635 636 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 637 638 t = NLMSG_DATA(nlh); 639 t->tca_family = AF_UNSPEC; 640 t->tca__pad1 = 0; 641 t->tca__pad2 = 0; 642 643 nest = nla_nest_start(skb, TCA_ACT_TAB); 644 if (nest == NULL) 645 goto nla_put_failure; 646 647 if (tcf_action_dump(skb, a, bind, ref) < 0) 648 goto nla_put_failure; 649 650 nla_nest_end(skb, nest); 651 652 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 653 return skb->len; 654 655 nla_put_failure: 656 nlmsg_failure: 657 nlmsg_trim(skb, b); 658 return -1; 659 } 660 661 static int 662 act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) 663 { 664 struct sk_buff *skb; 665 666 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 667 if (!skb) 668 return -ENOBUFS; 669 if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { 670 kfree_skb(skb); 671 return -EINVAL; 672 } 673 674 return rtnl_unicast(skb, &init_net, pid); 675 } 676 677 static struct tc_action * 678 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) 679 { 680 struct nlattr *tb[TCA_ACT_MAX+1]; 681 struct tc_action *a; 682 int index; 683 int err; 684 685 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 686 if (err < 0) 687 goto err_out; 688 689 err = -EINVAL; 690 if (tb[TCA_ACT_INDEX] == NULL || 691 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) 692 goto err_out; 693 index = nla_get_u32(tb[TCA_ACT_INDEX]); 694 695 err = -ENOMEM; 696 a = kzalloc(sizeof(struct tc_action), GFP_KERNEL); 697 if (a == NULL) 698 goto err_out; 699 700 err = -EINVAL; 701 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); 702 if (a->ops == NULL) 703 goto err_free; 704 if (a->ops->lookup == NULL) 705 goto err_mod; 706 err = -ENOENT; 707 if (a->ops->lookup(a, index) == 0) 708 goto err_mod; 709 710 module_put(a->ops->owner); 711 return a; 712 713 err_mod: 714 module_put(a->ops->owner); 715 err_free: 716 kfree(a); 717 err_out: 718 return ERR_PTR(err); 719 } 720 721 static void cleanup_a(struct tc_action *act) 722 { 723 struct tc_action *a; 724 725 for (a = act; a; a = act) { 726 act = a->next; 727 kfree(a); 728 } 729 } 730 731 static struct tc_action *create_a(int i) 732 { 733 struct tc_action *act; 734 735 act = kzalloc(sizeof(*act), GFP_KERNEL); 736 if (act == NULL) { 737 printk("create_a: failed to alloc!\n"); 738 return NULL; 739 } 740 act->order = i; 741 return act; 742 } 743 744 static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) 745 { 746 struct sk_buff *skb; 747 unsigned char *b; 748 struct nlmsghdr *nlh; 749 struct tcamsg *t; 750 struct netlink_callback dcb; 751 struct nlattr *nest; 752 struct nlattr *tb[TCA_ACT_MAX+1]; 753 struct nlattr *kind; 754 struct tc_action *a = create_a(0); 755 int err = -EINVAL; 756 757 if (a == NULL) { 758 printk("tca_action_flush: couldnt create tc_action\n"); 759 return err; 760 } 761 762 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 763 if (!skb) { 764 printk("tca_action_flush: failed skb alloc\n"); 765 kfree(a); 766 return -ENOBUFS; 767 } 768 769 b = skb_tail_pointer(skb); 770 771 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 772 if (err < 0) 773 goto err_out; 774 775 err = -EINVAL; 776 kind = tb[TCA_ACT_KIND]; 777 a->ops = tc_lookup_action(kind); 778 if (a->ops == NULL) 779 goto err_out; 780 781 nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); 782 t = NLMSG_DATA(nlh); 783 t->tca_family = AF_UNSPEC; 784 t->tca__pad1 = 0; 785 t->tca__pad2 = 0; 786 787 nest = nla_nest_start(skb, TCA_ACT_TAB); 788 if (nest == NULL) 789 goto nla_put_failure; 790 791 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); 792 if (err < 0) 793 goto nla_put_failure; 794 795 nla_nest_end(skb, nest); 796 797 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 798 nlh->nlmsg_flags |= NLM_F_ROOT; 799 module_put(a->ops->owner); 800 kfree(a); 801 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 802 if (err > 0) 803 return 0; 804 805 return err; 806 807 nla_put_failure: 808 nlmsg_failure: 809 module_put(a->ops->owner); 810 err_out: 811 kfree_skb(skb); 812 kfree(a); 813 return err; 814 } 815 816 static int 817 tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) 818 { 819 int i, ret; 820 struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; 821 struct tc_action *head = NULL, *act, *act_prev = NULL; 822 823 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); 824 if (ret < 0) 825 return ret; 826 827 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { 828 if (tb[0] != NULL && tb[1] == NULL) 829 return tca_action_flush(tb[0], n, pid); 830 } 831 832 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 833 act = tcf_action_get_1(tb[i], n, pid); 834 if (IS_ERR(act)) { 835 ret = PTR_ERR(act); 836 goto err; 837 } 838 act->order = i; 839 840 if (head == NULL) 841 head = act; 842 else 843 act_prev->next = act; 844 act_prev = act; 845 } 846 847 if (event == RTM_GETACTION) 848 ret = act_get_notify(pid, n, head, event); 849 else { /* delete */ 850 struct sk_buff *skb; 851 852 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 853 if (!skb) { 854 ret = -ENOBUFS; 855 goto err; 856 } 857 858 if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, 859 0, 1) <= 0) { 860 kfree_skb(skb); 861 ret = -EINVAL; 862 goto err; 863 } 864 865 /* now do the delete */ 866 tcf_action_destroy(head, 0); 867 ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, 868 n->nlmsg_flags&NLM_F_ECHO); 869 if (ret > 0) 870 return 0; 871 return ret; 872 } 873 err: 874 cleanup_a(head); 875 return ret; 876 } 877 878 static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, 879 u16 flags) 880 { 881 struct tcamsg *t; 882 struct nlmsghdr *nlh; 883 struct sk_buff *skb; 884 struct nlattr *nest; 885 unsigned char *b; 886 int err = 0; 887 888 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 889 if (!skb) 890 return -ENOBUFS; 891 892 b = skb_tail_pointer(skb); 893 894 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 895 t = NLMSG_DATA(nlh); 896 t->tca_family = AF_UNSPEC; 897 t->tca__pad1 = 0; 898 t->tca__pad2 = 0; 899 900 nest = nla_nest_start(skb, TCA_ACT_TAB); 901 if (nest == NULL) 902 goto nla_put_failure; 903 904 if (tcf_action_dump(skb, a, 0, 0) < 0) 905 goto nla_put_failure; 906 907 nla_nest_end(skb, nest); 908 909 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 910 NETLINK_CB(skb).dst_group = RTNLGRP_TC; 911 912 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); 913 if (err > 0) 914 err = 0; 915 return err; 916 917 nla_put_failure: 918 nlmsg_failure: 919 kfree_skb(skb); 920 return -1; 921 } 922 923 924 static int 925 tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) 926 { 927 int ret = 0; 928 struct tc_action *act; 929 struct tc_action *a; 930 u32 seq = n->nlmsg_seq; 931 932 act = tcf_action_init(nla, NULL, NULL, ovr, 0); 933 if (act == NULL) 934 goto done; 935 if (IS_ERR(act)) { 936 ret = PTR_ERR(act); 937 goto done; 938 } 939 940 /* dump then free all the actions after update; inserted policy 941 * stays intact 942 * */ 943 ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); 944 for (a = act; a; a = act) { 945 act = a->next; 946 kfree(a); 947 } 948 done: 949 return ret; 950 } 951 952 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 953 { 954 struct net *net = sock_net(skb->sk); 955 struct nlattr *tca[TCA_ACT_MAX + 1]; 956 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 957 int ret = 0, ovr = 0; 958 959 if (net != &init_net) 960 return -EINVAL; 961 962 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 963 if (ret < 0) 964 return ret; 965 966 if (tca[TCA_ACT_TAB] == NULL) { 967 printk("tc_ctl_action: received NO action attribs\n"); 968 return -EINVAL; 969 } 970 971 /* n->nlmsg_flags&NLM_F_CREATE 972 * */ 973 switch (n->nlmsg_type) { 974 case RTM_NEWACTION: 975 /* we are going to assume all other flags 976 * imply create only if it doesnt exist 977 * Note that CREATE | EXCL implies that 978 * but since we want avoid ambiguity (eg when flags 979 * is zero) then just set this 980 */ 981 if (n->nlmsg_flags&NLM_F_REPLACE) 982 ovr = 1; 983 replay: 984 ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr); 985 if (ret == -EAGAIN) 986 goto replay; 987 break; 988 case RTM_DELACTION: 989 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); 990 break; 991 case RTM_GETACTION: 992 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); 993 break; 994 default: 995 BUG(); 996 } 997 998 return ret; 999 } 1000 1001 static struct nlattr * 1002 find_dump_kind(struct nlmsghdr *n) 1003 { 1004 struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; 1005 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1006 struct nlattr *nla[TCAA_MAX + 1]; 1007 struct nlattr *kind; 1008 1009 if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0) 1010 return NULL; 1011 tb1 = nla[TCA_ACT_TAB]; 1012 if (tb1 == NULL) 1013 return NULL; 1014 1015 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), 1016 NLMSG_ALIGN(nla_len(tb1)), NULL) < 0) 1017 return NULL; 1018 1019 if (tb[1] == NULL) 1020 return NULL; 1021 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), 1022 nla_len(tb[1]), NULL) < 0) 1023 return NULL; 1024 kind = tb2[TCA_ACT_KIND]; 1025 1026 return kind; 1027 } 1028 1029 static int 1030 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1031 { 1032 struct net *net = sock_net(skb->sk); 1033 struct nlmsghdr *nlh; 1034 unsigned char *b = skb_tail_pointer(skb); 1035 struct nlattr *nest; 1036 struct tc_action_ops *a_o; 1037 struct tc_action a; 1038 int ret = 0; 1039 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 1040 struct nlattr *kind = find_dump_kind(cb->nlh); 1041 1042 if (net != &init_net) 1043 return 0; 1044 1045 if (kind == NULL) { 1046 printk("tc_dump_action: action bad kind\n"); 1047 return 0; 1048 } 1049 1050 a_o = tc_lookup_action(kind); 1051 if (a_o == NULL) { 1052 return 0; 1053 } 1054 1055 memset(&a, 0, sizeof(struct tc_action)); 1056 a.ops = a_o; 1057 1058 if (a_o->walk == NULL) { 1059 printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind); 1060 goto nla_put_failure; 1061 } 1062 1063 nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 1064 cb->nlh->nlmsg_type, sizeof(*t)); 1065 t = NLMSG_DATA(nlh); 1066 t->tca_family = AF_UNSPEC; 1067 t->tca__pad1 = 0; 1068 t->tca__pad2 = 0; 1069 1070 nest = nla_nest_start(skb, TCA_ACT_TAB); 1071 if (nest == NULL) 1072 goto nla_put_failure; 1073 1074 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); 1075 if (ret < 0) 1076 goto nla_put_failure; 1077 1078 if (ret > 0) { 1079 nla_nest_end(skb, nest); 1080 ret = skb->len; 1081 } else 1082 nla_nest_cancel(skb, nest); 1083 1084 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1085 if (NETLINK_CB(cb->skb).pid && ret) 1086 nlh->nlmsg_flags |= NLM_F_MULTI; 1087 module_put(a_o->owner); 1088 return skb->len; 1089 1090 nla_put_failure: 1091 nlmsg_failure: 1092 module_put(a_o->owner); 1093 nlmsg_trim(skb, b); 1094 return skb->len; 1095 } 1096 1097 static int __init tc_action_init(void) 1098 { 1099 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL); 1100 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL); 1101 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action); 1102 1103 return 0; 1104 } 1105 1106 subsys_initcall(tc_action_init); 1107