1 /* 2 * net/sched/act_api.c Packet action API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Author: Jamal Hadi Salim 10 * 11 * 12 */ 13 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/err.h> 22 #include <net/net_namespace.h> 23 #include <net/sock.h> 24 #include <net/sch_generic.h> 25 #include <net/act_api.h> 26 #include <net/netlink.h> 27 28 void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) 29 { 30 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 31 struct tcf_common **p1p; 32 33 for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) { 34 if (*p1p == p) { 35 write_lock_bh(hinfo->lock); 36 *p1p = p->tcfc_next; 37 write_unlock_bh(hinfo->lock); 38 gen_kill_estimator(&p->tcfc_bstats, 39 &p->tcfc_rate_est); 40 kfree(p); 41 return; 42 } 43 } 44 WARN_ON(1); 45 } 46 EXPORT_SYMBOL(tcf_hash_destroy); 47 48 int tcf_hash_release(struct tcf_common *p, int bind, 49 struct tcf_hashinfo *hinfo) 50 { 51 int ret = 0; 52 53 if (p) { 54 if (bind) 55 p->tcfc_bindcnt--; 56 57 p->tcfc_refcnt--; 58 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { 59 tcf_hash_destroy(p, hinfo); 60 ret = 1; 61 } 62 } 63 return ret; 64 } 65 EXPORT_SYMBOL(tcf_hash_release); 66 67 static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 68 struct tc_action *a, struct tcf_hashinfo *hinfo) 69 { 70 struct tcf_common *p; 71 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; 72 struct nlattr *nest; 73 74 read_lock_bh(hinfo->lock); 75 76 s_i = cb->args[0]; 77 78 for (i = 0; i < (hinfo->hmask + 1); i++) { 79 p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 80 81 for (; p; p = p->tcfc_next) { 82 index++; 83 if (index < s_i) 84 continue; 85 a->priv = p; 86 a->order = n_i; 87 88 nest = nla_nest_start(skb, a->order); 89 if (nest == NULL) 90 goto nla_put_failure; 91 err = tcf_action_dump_1(skb, a, 0, 0); 92 if (err < 0) { 93 index--; 94 nlmsg_trim(skb, nest); 95 goto done; 96 } 97 nla_nest_end(skb, nest); 98 n_i++; 99 if (n_i >= TCA_ACT_MAX_PRIO) 100 goto done; 101 } 102 } 103 done: 104 read_unlock_bh(hinfo->lock); 105 if (n_i) 106 cb->args[0] += n_i; 107 return n_i; 108 109 nla_put_failure: 110 nla_nest_cancel(skb, nest); 111 goto done; 112 } 113 114 static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, 115 struct tcf_hashinfo *hinfo) 116 { 117 struct tcf_common *p, *s_p; 118 struct nlattr *nest; 119 int i= 0, n_i = 0; 120 121 nest = nla_nest_start(skb, a->order); 122 if (nest == NULL) 123 goto nla_put_failure; 124 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 125 for (i = 0; i < (hinfo->hmask + 1); i++) { 126 p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 127 128 while (p != NULL) { 129 s_p = p->tcfc_next; 130 if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) 131 module_put(a->ops->owner); 132 n_i++; 133 p = s_p; 134 } 135 } 136 NLA_PUT_U32(skb, TCA_FCNT, n_i); 137 nla_nest_end(skb, nest); 138 139 return n_i; 140 nla_put_failure: 141 nla_nest_cancel(skb, nest); 142 return -EINVAL; 143 } 144 145 int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, 146 int type, struct tc_action *a) 147 { 148 struct tcf_hashinfo *hinfo = a->ops->hinfo; 149 150 if (type == RTM_DELACTION) { 151 return tcf_del_walker(skb, a, hinfo); 152 } else if (type == RTM_GETACTION) { 153 return tcf_dump_walker(skb, cb, a, hinfo); 154 } else { 155 printk("tcf_generic_walker: unknown action %d\n", type); 156 return -EINVAL; 157 } 158 } 159 EXPORT_SYMBOL(tcf_generic_walker); 160 161 struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) 162 { 163 struct tcf_common *p; 164 165 read_lock_bh(hinfo->lock); 166 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; 167 p = p->tcfc_next) { 168 if (p->tcfc_index == index) 169 break; 170 } 171 read_unlock_bh(hinfo->lock); 172 173 return p; 174 } 175 EXPORT_SYMBOL(tcf_hash_lookup); 176 177 u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo) 178 { 179 u32 val = *idx_gen; 180 181 do { 182 if (++val == 0) 183 val = 1; 184 } while (tcf_hash_lookup(val, hinfo)); 185 186 return (*idx_gen = val); 187 } 188 EXPORT_SYMBOL(tcf_hash_new_index); 189 190 int tcf_hash_search(struct tc_action *a, u32 index) 191 { 192 struct tcf_hashinfo *hinfo = a->ops->hinfo; 193 struct tcf_common *p = tcf_hash_lookup(index, hinfo); 194 195 if (p) { 196 a->priv = p; 197 return 1; 198 } 199 return 0; 200 } 201 EXPORT_SYMBOL(tcf_hash_search); 202 203 struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind, 204 struct tcf_hashinfo *hinfo) 205 { 206 struct tcf_common *p = NULL; 207 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { 208 if (bind) 209 p->tcfc_bindcnt++; 210 p->tcfc_refcnt++; 211 a->priv = p; 212 } 213 return p; 214 } 215 EXPORT_SYMBOL(tcf_hash_check); 216 217 struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo) 218 { 219 struct tcf_common *p = kzalloc(size, GFP_KERNEL); 220 221 if (unlikely(!p)) 222 return p; 223 p->tcfc_refcnt = 1; 224 if (bind) 225 p->tcfc_bindcnt = 1; 226 227 spin_lock_init(&p->tcfc_lock); 228 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); 229 p->tcfc_tm.install = jiffies; 230 p->tcfc_tm.lastuse = jiffies; 231 if (est) 232 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, 233 &p->tcfc_lock, est); 234 a->priv = (void *) p; 235 return p; 236 } 237 EXPORT_SYMBOL(tcf_hash_create); 238 239 void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo) 240 { 241 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 242 243 write_lock_bh(hinfo->lock); 244 p->tcfc_next = hinfo->htab[h]; 245 hinfo->htab[h] = p; 246 write_unlock_bh(hinfo->lock); 247 } 248 EXPORT_SYMBOL(tcf_hash_insert); 249 250 static struct tc_action_ops *act_base = NULL; 251 static DEFINE_RWLOCK(act_mod_lock); 252 253 int tcf_register_action(struct tc_action_ops *act) 254 { 255 struct tc_action_ops *a, **ap; 256 257 write_lock(&act_mod_lock); 258 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { 259 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 260 write_unlock(&act_mod_lock); 261 return -EEXIST; 262 } 263 } 264 act->next = NULL; 265 *ap = act; 266 write_unlock(&act_mod_lock); 267 return 0; 268 } 269 EXPORT_SYMBOL(tcf_register_action); 270 271 int tcf_unregister_action(struct tc_action_ops *act) 272 { 273 struct tc_action_ops *a, **ap; 274 int err = -ENOENT; 275 276 write_lock(&act_mod_lock); 277 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) 278 if (a == act) 279 break; 280 if (a) { 281 *ap = a->next; 282 a->next = NULL; 283 err = 0; 284 } 285 write_unlock(&act_mod_lock); 286 return err; 287 } 288 EXPORT_SYMBOL(tcf_unregister_action); 289 290 /* lookup by name */ 291 static struct tc_action_ops *tc_lookup_action_n(char *kind) 292 { 293 struct tc_action_ops *a = NULL; 294 295 if (kind) { 296 read_lock(&act_mod_lock); 297 for (a = act_base; a; a = a->next) { 298 if (strcmp(kind, a->kind) == 0) { 299 if (!try_module_get(a->owner)) { 300 read_unlock(&act_mod_lock); 301 return NULL; 302 } 303 break; 304 } 305 } 306 read_unlock(&act_mod_lock); 307 } 308 return a; 309 } 310 311 /* lookup by nlattr */ 312 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 313 { 314 struct tc_action_ops *a = NULL; 315 316 if (kind) { 317 read_lock(&act_mod_lock); 318 for (a = act_base; a; a = a->next) { 319 if (nla_strcmp(kind, a->kind) == 0) { 320 if (!try_module_get(a->owner)) { 321 read_unlock(&act_mod_lock); 322 return NULL; 323 } 324 break; 325 } 326 } 327 read_unlock(&act_mod_lock); 328 } 329 return a; 330 } 331 332 #if 0 333 /* lookup by id */ 334 static struct tc_action_ops *tc_lookup_action_id(u32 type) 335 { 336 struct tc_action_ops *a = NULL; 337 338 if (type) { 339 read_lock(&act_mod_lock); 340 for (a = act_base; a; a = a->next) { 341 if (a->type == type) { 342 if (!try_module_get(a->owner)) { 343 read_unlock(&act_mod_lock); 344 return NULL; 345 } 346 break; 347 } 348 } 349 read_unlock(&act_mod_lock); 350 } 351 return a; 352 } 353 #endif 354 355 int tcf_action_exec(struct sk_buff *skb, struct tc_action *act, 356 struct tcf_result *res) 357 { 358 struct tc_action *a; 359 int ret = -1; 360 361 if (skb->tc_verd & TC_NCLS) { 362 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 363 ret = TC_ACT_OK; 364 goto exec_done; 365 } 366 while ((a = act) != NULL) { 367 repeat: 368 if (a->ops && a->ops->act) { 369 ret = a->ops->act(skb, a, res); 370 if (TC_MUNGED & skb->tc_verd) { 371 /* copied already, allow trampling */ 372 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); 373 skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); 374 } 375 if (ret == TC_ACT_REPEAT) 376 goto repeat; /* we need a ttl - JHS */ 377 if (ret != TC_ACT_PIPE) 378 goto exec_done; 379 } 380 act = a->next; 381 } 382 exec_done: 383 return ret; 384 } 385 EXPORT_SYMBOL(tcf_action_exec); 386 387 void tcf_action_destroy(struct tc_action *act, int bind) 388 { 389 struct tc_action *a; 390 391 for (a = act; a; a = act) { 392 if (a->ops && a->ops->cleanup) { 393 if (a->ops->cleanup(a, bind) == ACT_P_DELETED) 394 module_put(a->ops->owner); 395 act = act->next; 396 kfree(a); 397 } else { /*FIXME: Remove later - catch insertion bugs*/ 398 printk("tcf_action_destroy: BUG? destroying NULL ops\n"); 399 act = act->next; 400 kfree(a); 401 } 402 } 403 } 404 405 int 406 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 407 { 408 int err = -EINVAL; 409 410 if (a->ops == NULL || a->ops->dump == NULL) 411 return err; 412 return a->ops->dump(skb, a, bind, ref); 413 } 414 415 int 416 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 417 { 418 int err = -EINVAL; 419 unsigned char *b = skb_tail_pointer(skb); 420 struct nlattr *nest; 421 422 if (a->ops == NULL || a->ops->dump == NULL) 423 return err; 424 425 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 426 if (tcf_action_copy_stats(skb, a, 0)) 427 goto nla_put_failure; 428 nest = nla_nest_start(skb, TCA_OPTIONS); 429 if (nest == NULL) 430 goto nla_put_failure; 431 if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { 432 nla_nest_end(skb, nest); 433 return err; 434 } 435 436 nla_put_failure: 437 nlmsg_trim(skb, b); 438 return -1; 439 } 440 EXPORT_SYMBOL(tcf_action_dump_1); 441 442 int 443 tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) 444 { 445 struct tc_action *a; 446 int err = -EINVAL; 447 struct nlattr *nest; 448 449 while ((a = act) != NULL) { 450 act = a->next; 451 nest = nla_nest_start(skb, a->order); 452 if (nest == NULL) 453 goto nla_put_failure; 454 err = tcf_action_dump_1(skb, a, bind, ref); 455 if (err < 0) 456 goto errout; 457 nla_nest_end(skb, nest); 458 } 459 460 return 0; 461 462 nla_put_failure: 463 err = -EINVAL; 464 errout: 465 nla_nest_cancel(skb, nest); 466 return err; 467 } 468 469 struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, 470 char *name, int ovr, int bind) 471 { 472 struct tc_action *a; 473 struct tc_action_ops *a_o; 474 char act_name[IFNAMSIZ]; 475 struct nlattr *tb[TCA_ACT_MAX+1]; 476 struct nlattr *kind; 477 int err; 478 479 if (name == NULL) { 480 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 481 if (err < 0) 482 goto err_out; 483 err = -EINVAL; 484 kind = tb[TCA_ACT_KIND]; 485 if (kind == NULL) 486 goto err_out; 487 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 488 goto err_out; 489 } else { 490 err = -EINVAL; 491 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) 492 goto err_out; 493 } 494 495 a_o = tc_lookup_action_n(act_name); 496 if (a_o == NULL) { 497 #ifdef CONFIG_MODULES 498 rtnl_unlock(); 499 request_module("act_%s", act_name); 500 rtnl_lock(); 501 502 a_o = tc_lookup_action_n(act_name); 503 504 /* We dropped the RTNL semaphore in order to 505 * perform the module load. So, even if we 506 * succeeded in loading the module we have to 507 * tell the caller to replay the request. We 508 * indicate this using -EAGAIN. 509 */ 510 if (a_o != NULL) { 511 err = -EAGAIN; 512 goto err_mod; 513 } 514 #endif 515 err = -ENOENT; 516 goto err_out; 517 } 518 519 err = -ENOMEM; 520 a = kzalloc(sizeof(*a), GFP_KERNEL); 521 if (a == NULL) 522 goto err_mod; 523 524 /* backward compatibility for policer */ 525 if (name == NULL) 526 err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind); 527 else 528 err = a_o->init(nla, est, a, ovr, bind); 529 if (err < 0) 530 goto err_free; 531 532 /* module count goes up only when brand new policy is created 533 if it exists and is only bound to in a_o->init() then 534 ACT_P_CREATED is not returned (a zero is). 535 */ 536 if (err != ACT_P_CREATED) 537 module_put(a_o->owner); 538 a->ops = a_o; 539 540 return a; 541 542 err_free: 543 kfree(a); 544 err_mod: 545 module_put(a_o->owner); 546 err_out: 547 return ERR_PTR(err); 548 } 549 550 struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, 551 char *name, int ovr, int bind) 552 { 553 struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; 554 struct tc_action *head = NULL, *act, *act_prev = NULL; 555 int err; 556 int i; 557 558 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); 559 if (err < 0) 560 return ERR_PTR(err); 561 562 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 563 act = tcf_action_init_1(tb[i], est, name, ovr, bind); 564 if (IS_ERR(act)) 565 goto err; 566 act->order = i; 567 568 if (head == NULL) 569 head = act; 570 else 571 act_prev->next = act; 572 act_prev = act; 573 } 574 return head; 575 576 err: 577 if (head != NULL) 578 tcf_action_destroy(head, bind); 579 return act; 580 } 581 582 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, 583 int compat_mode) 584 { 585 int err = 0; 586 struct gnet_dump d; 587 struct tcf_act_hdr *h = a->priv; 588 589 if (h == NULL) 590 goto errout; 591 592 /* compat_mode being true specifies a call that is supposed 593 * to add additional backward compatiblity statistic TLVs. 594 */ 595 if (compat_mode) { 596 if (a->type == TCA_OLD_COMPAT) 597 err = gnet_stats_start_copy_compat(skb, 0, 598 TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d); 599 else 600 return 0; 601 } else 602 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 603 &h->tcf_lock, &d); 604 605 if (err < 0) 606 goto errout; 607 608 if (a->ops != NULL && a->ops->get_stats != NULL) 609 if (a->ops->get_stats(skb, a) < 0) 610 goto errout; 611 612 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || 613 gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || 614 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) 615 goto errout; 616 617 if (gnet_stats_finish_copy(&d) < 0) 618 goto errout; 619 620 return 0; 621 622 errout: 623 return -1; 624 } 625 626 static int 627 tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, 628 u16 flags, int event, int bind, int ref) 629 { 630 struct tcamsg *t; 631 struct nlmsghdr *nlh; 632 unsigned char *b = skb_tail_pointer(skb); 633 struct nlattr *nest; 634 635 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 636 637 t = NLMSG_DATA(nlh); 638 t->tca_family = AF_UNSPEC; 639 t->tca__pad1 = 0; 640 t->tca__pad2 = 0; 641 642 nest = nla_nest_start(skb, TCA_ACT_TAB); 643 if (nest == NULL) 644 goto nla_put_failure; 645 646 if (tcf_action_dump(skb, a, bind, ref) < 0) 647 goto nla_put_failure; 648 649 nla_nest_end(skb, nest); 650 651 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 652 return skb->len; 653 654 nla_put_failure: 655 nlmsg_failure: 656 nlmsg_trim(skb, b); 657 return -1; 658 } 659 660 static int 661 act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) 662 { 663 struct sk_buff *skb; 664 665 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 666 if (!skb) 667 return -ENOBUFS; 668 if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { 669 kfree_skb(skb); 670 return -EINVAL; 671 } 672 673 return rtnl_unicast(skb, &init_net, pid); 674 } 675 676 static struct tc_action * 677 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) 678 { 679 struct nlattr *tb[TCA_ACT_MAX+1]; 680 struct tc_action *a; 681 int index; 682 int err; 683 684 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 685 if (err < 0) 686 goto err_out; 687 688 err = -EINVAL; 689 if (tb[TCA_ACT_INDEX] == NULL || 690 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) 691 goto err_out; 692 index = nla_get_u32(tb[TCA_ACT_INDEX]); 693 694 err = -ENOMEM; 695 a = kzalloc(sizeof(struct tc_action), GFP_KERNEL); 696 if (a == NULL) 697 goto err_out; 698 699 err = -EINVAL; 700 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); 701 if (a->ops == NULL) 702 goto err_free; 703 if (a->ops->lookup == NULL) 704 goto err_mod; 705 err = -ENOENT; 706 if (a->ops->lookup(a, index) == 0) 707 goto err_mod; 708 709 module_put(a->ops->owner); 710 return a; 711 712 err_mod: 713 module_put(a->ops->owner); 714 err_free: 715 kfree(a); 716 err_out: 717 return ERR_PTR(err); 718 } 719 720 static void cleanup_a(struct tc_action *act) 721 { 722 struct tc_action *a; 723 724 for (a = act; a; a = act) { 725 act = a->next; 726 kfree(a); 727 } 728 } 729 730 static struct tc_action *create_a(int i) 731 { 732 struct tc_action *act; 733 734 act = kzalloc(sizeof(*act), GFP_KERNEL); 735 if (act == NULL) { 736 printk("create_a: failed to alloc!\n"); 737 return NULL; 738 } 739 act->order = i; 740 return act; 741 } 742 743 static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) 744 { 745 struct sk_buff *skb; 746 unsigned char *b; 747 struct nlmsghdr *nlh; 748 struct tcamsg *t; 749 struct netlink_callback dcb; 750 struct nlattr *nest; 751 struct nlattr *tb[TCA_ACT_MAX+1]; 752 struct nlattr *kind; 753 struct tc_action *a = create_a(0); 754 int err = -ENOMEM; 755 756 if (a == NULL) { 757 printk("tca_action_flush: couldnt create tc_action\n"); 758 return err; 759 } 760 761 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 762 if (!skb) { 763 printk("tca_action_flush: failed skb alloc\n"); 764 kfree(a); 765 return err; 766 } 767 768 b = skb_tail_pointer(skb); 769 770 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 771 if (err < 0) 772 goto err_out; 773 774 err = -EINVAL; 775 kind = tb[TCA_ACT_KIND]; 776 a->ops = tc_lookup_action(kind); 777 if (a->ops == NULL) 778 goto err_out; 779 780 nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); 781 t = NLMSG_DATA(nlh); 782 t->tca_family = AF_UNSPEC; 783 t->tca__pad1 = 0; 784 t->tca__pad2 = 0; 785 786 nest = nla_nest_start(skb, TCA_ACT_TAB); 787 if (nest == NULL) 788 goto nla_put_failure; 789 790 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); 791 if (err < 0) 792 goto nla_put_failure; 793 if (err == 0) 794 goto noflush_out; 795 796 nla_nest_end(skb, nest); 797 798 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 799 nlh->nlmsg_flags |= NLM_F_ROOT; 800 module_put(a->ops->owner); 801 kfree(a); 802 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 803 if (err > 0) 804 return 0; 805 806 return err; 807 808 nla_put_failure: 809 nlmsg_failure: 810 module_put(a->ops->owner); 811 err_out: 812 noflush_out: 813 kfree_skb(skb); 814 kfree(a); 815 return err; 816 } 817 818 static int 819 tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) 820 { 821 int i, ret; 822 struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; 823 struct tc_action *head = NULL, *act, *act_prev = NULL; 824 825 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); 826 if (ret < 0) 827 return ret; 828 829 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { 830 if (tb[1] != NULL) 831 return tca_action_flush(tb[1], n, pid); 832 else 833 return -EINVAL; 834 } 835 836 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 837 act = tcf_action_get_1(tb[i], n, pid); 838 if (IS_ERR(act)) { 839 ret = PTR_ERR(act); 840 goto err; 841 } 842 act->order = i; 843 844 if (head == NULL) 845 head = act; 846 else 847 act_prev->next = act; 848 act_prev = act; 849 } 850 851 if (event == RTM_GETACTION) 852 ret = act_get_notify(pid, n, head, event); 853 else { /* delete */ 854 struct sk_buff *skb; 855 856 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 857 if (!skb) { 858 ret = -ENOBUFS; 859 goto err; 860 } 861 862 if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, 863 0, 1) <= 0) { 864 kfree_skb(skb); 865 ret = -EINVAL; 866 goto err; 867 } 868 869 /* now do the delete */ 870 tcf_action_destroy(head, 0); 871 ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, 872 n->nlmsg_flags&NLM_F_ECHO); 873 if (ret > 0) 874 return 0; 875 return ret; 876 } 877 err: 878 cleanup_a(head); 879 return ret; 880 } 881 882 static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, 883 u16 flags) 884 { 885 struct tcamsg *t; 886 struct nlmsghdr *nlh; 887 struct sk_buff *skb; 888 struct nlattr *nest; 889 unsigned char *b; 890 int err = 0; 891 892 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 893 if (!skb) 894 return -ENOBUFS; 895 896 b = skb_tail_pointer(skb); 897 898 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 899 t = NLMSG_DATA(nlh); 900 t->tca_family = AF_UNSPEC; 901 t->tca__pad1 = 0; 902 t->tca__pad2 = 0; 903 904 nest = nla_nest_start(skb, TCA_ACT_TAB); 905 if (nest == NULL) 906 goto nla_put_failure; 907 908 if (tcf_action_dump(skb, a, 0, 0) < 0) 909 goto nla_put_failure; 910 911 nla_nest_end(skb, nest); 912 913 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 914 NETLINK_CB(skb).dst_group = RTNLGRP_TC; 915 916 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); 917 if (err > 0) 918 err = 0; 919 return err; 920 921 nla_put_failure: 922 nlmsg_failure: 923 kfree_skb(skb); 924 return -1; 925 } 926 927 928 static int 929 tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) 930 { 931 int ret = 0; 932 struct tc_action *act; 933 struct tc_action *a; 934 u32 seq = n->nlmsg_seq; 935 936 act = tcf_action_init(nla, NULL, NULL, ovr, 0); 937 if (act == NULL) 938 goto done; 939 if (IS_ERR(act)) { 940 ret = PTR_ERR(act); 941 goto done; 942 } 943 944 /* dump then free all the actions after update; inserted policy 945 * stays intact 946 * */ 947 ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); 948 for (a = act; a; a = act) { 949 act = a->next; 950 kfree(a); 951 } 952 done: 953 return ret; 954 } 955 956 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 957 { 958 struct net *net = sock_net(skb->sk); 959 struct nlattr *tca[TCA_ACT_MAX + 1]; 960 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 961 int ret = 0, ovr = 0; 962 963 if (net != &init_net) 964 return -EINVAL; 965 966 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 967 if (ret < 0) 968 return ret; 969 970 if (tca[TCA_ACT_TAB] == NULL) { 971 printk("tc_ctl_action: received NO action attribs\n"); 972 return -EINVAL; 973 } 974 975 /* n->nlmsg_flags&NLM_F_CREATE 976 * */ 977 switch (n->nlmsg_type) { 978 case RTM_NEWACTION: 979 /* we are going to assume all other flags 980 * imply create only if it doesnt exist 981 * Note that CREATE | EXCL implies that 982 * but since we want avoid ambiguity (eg when flags 983 * is zero) then just set this 984 */ 985 if (n->nlmsg_flags&NLM_F_REPLACE) 986 ovr = 1; 987 replay: 988 ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr); 989 if (ret == -EAGAIN) 990 goto replay; 991 break; 992 case RTM_DELACTION: 993 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); 994 break; 995 case RTM_GETACTION: 996 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); 997 break; 998 default: 999 BUG(); 1000 } 1001 1002 return ret; 1003 } 1004 1005 static struct nlattr * 1006 find_dump_kind(struct nlmsghdr *n) 1007 { 1008 struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; 1009 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1010 struct nlattr *nla[TCAA_MAX + 1]; 1011 struct nlattr *kind; 1012 1013 if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0) 1014 return NULL; 1015 tb1 = nla[TCA_ACT_TAB]; 1016 if (tb1 == NULL) 1017 return NULL; 1018 1019 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), 1020 NLMSG_ALIGN(nla_len(tb1)), NULL) < 0) 1021 return NULL; 1022 1023 if (tb[1] == NULL) 1024 return NULL; 1025 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), 1026 nla_len(tb[1]), NULL) < 0) 1027 return NULL; 1028 kind = tb2[TCA_ACT_KIND]; 1029 1030 return kind; 1031 } 1032 1033 static int 1034 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1035 { 1036 struct net *net = sock_net(skb->sk); 1037 struct nlmsghdr *nlh; 1038 unsigned char *b = skb_tail_pointer(skb); 1039 struct nlattr *nest; 1040 struct tc_action_ops *a_o; 1041 struct tc_action a; 1042 int ret = 0; 1043 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 1044 struct nlattr *kind = find_dump_kind(cb->nlh); 1045 1046 if (net != &init_net) 1047 return 0; 1048 1049 if (kind == NULL) { 1050 printk("tc_dump_action: action bad kind\n"); 1051 return 0; 1052 } 1053 1054 a_o = tc_lookup_action(kind); 1055 if (a_o == NULL) { 1056 return 0; 1057 } 1058 1059 memset(&a, 0, sizeof(struct tc_action)); 1060 a.ops = a_o; 1061 1062 if (a_o->walk == NULL) { 1063 printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind); 1064 goto nla_put_failure; 1065 } 1066 1067 nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 1068 cb->nlh->nlmsg_type, sizeof(*t)); 1069 t = NLMSG_DATA(nlh); 1070 t->tca_family = AF_UNSPEC; 1071 t->tca__pad1 = 0; 1072 t->tca__pad2 = 0; 1073 1074 nest = nla_nest_start(skb, TCA_ACT_TAB); 1075 if (nest == NULL) 1076 goto nla_put_failure; 1077 1078 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); 1079 if (ret < 0) 1080 goto nla_put_failure; 1081 1082 if (ret > 0) { 1083 nla_nest_end(skb, nest); 1084 ret = skb->len; 1085 } else 1086 nla_nest_cancel(skb, nest); 1087 1088 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1089 if (NETLINK_CB(cb->skb).pid && ret) 1090 nlh->nlmsg_flags |= NLM_F_MULTI; 1091 module_put(a_o->owner); 1092 return skb->len; 1093 1094 nla_put_failure: 1095 nlmsg_failure: 1096 module_put(a_o->owner); 1097 nlmsg_trim(skb, b); 1098 return skb->len; 1099 } 1100 1101 static int __init tc_action_init(void) 1102 { 1103 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL); 1104 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL); 1105 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action); 1106 1107 return 0; 1108 } 1109 1110 subsys_initcall(tc_action_init); 1111