1 /* 2 * net/sched/cls_api.c Packet classifier API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Changes: 12 * 13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/skbuff.h> 24 #include <linux/init.h> 25 #include <linux/kmod.h> 26 #include <linux/err.h> 27 #include <linux/slab.h> 28 #include <net/net_namespace.h> 29 #include <net/sock.h> 30 #include <net/netlink.h> 31 #include <net/pkt_sched.h> 32 #include <net/pkt_cls.h> 33 34 /* The list of all installed classifier types */ 35 static LIST_HEAD(tcf_proto_base); 36 37 /* Protects list of registered TC modules. It is pure SMP lock. */ 38 static DEFINE_RWLOCK(cls_mod_lock); 39 40 /* Find classifier type by string name */ 41 42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind) 43 { 44 const struct tcf_proto_ops *t, *res = NULL; 45 46 if (kind) { 47 read_lock(&cls_mod_lock); 48 list_for_each_entry(t, &tcf_proto_base, head) { 49 if (strcmp(kind, t->kind) == 0) { 50 if (try_module_get(t->owner)) 51 res = t; 52 break; 53 } 54 } 55 read_unlock(&cls_mod_lock); 56 } 57 return res; 58 } 59 60 /* Register(unregister) new classifier type */ 61 62 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 63 { 64 struct tcf_proto_ops *t; 65 int rc = -EEXIST; 66 67 write_lock(&cls_mod_lock); 68 list_for_each_entry(t, &tcf_proto_base, head) 69 if (!strcmp(ops->kind, t->kind)) 70 goto out; 71 72 list_add_tail(&ops->head, &tcf_proto_base); 73 rc = 0; 74 out: 75 write_unlock(&cls_mod_lock); 76 return rc; 77 } 78 EXPORT_SYMBOL(register_tcf_proto_ops); 79 80 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 81 { 82 struct tcf_proto_ops *t; 83 int rc = -ENOENT; 84 85 /* Wait for outstanding call_rcu()s, if any, from a 86 * tcf_proto_ops's destroy() handler. 87 */ 88 rcu_barrier(); 89 90 write_lock(&cls_mod_lock); 91 list_for_each_entry(t, &tcf_proto_base, head) { 92 if (t == ops) { 93 list_del(&t->head); 94 rc = 0; 95 break; 96 } 97 } 98 write_unlock(&cls_mod_lock); 99 return rc; 100 } 101 EXPORT_SYMBOL(unregister_tcf_proto_ops); 102 103 /* Select new prio value from the range, managed by kernel. */ 104 105 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 106 { 107 u32 first = TC_H_MAKE(0xC0000000U, 0U); 108 109 if (tp) 110 first = tp->prio - 1; 111 112 return TC_H_MAJ(first); 113 } 114 115 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 116 u32 prio, u32 parent, struct Qdisc *q, 117 struct tcf_chain *chain) 118 { 119 struct tcf_proto *tp; 120 int err; 121 122 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 123 if (!tp) 124 return ERR_PTR(-ENOBUFS); 125 126 err = -ENOENT; 127 tp->ops = tcf_proto_lookup_ops(kind); 128 if (!tp->ops) { 129 #ifdef CONFIG_MODULES 130 rtnl_unlock(); 131 request_module("cls_%s", kind); 132 rtnl_lock(); 133 tp->ops = tcf_proto_lookup_ops(kind); 134 /* We dropped the RTNL semaphore in order to perform 135 * the module load. So, even if we succeeded in loading 136 * the module we have to replay the request. We indicate 137 * this using -EAGAIN. 138 */ 139 if (tp->ops) { 140 module_put(tp->ops->owner); 141 err = -EAGAIN; 142 } else { 143 err = -ENOENT; 144 } 145 goto errout; 146 #endif 147 } 148 tp->classify = tp->ops->classify; 149 tp->protocol = protocol; 150 tp->prio = prio; 151 tp->classid = parent; 152 tp->q = q; 153 tp->chain = chain; 154 155 err = tp->ops->init(tp); 156 if (err) { 157 module_put(tp->ops->owner); 158 goto errout; 159 } 160 return tp; 161 162 errout: 163 kfree(tp); 164 return ERR_PTR(err); 165 } 166 167 static void tcf_proto_destroy(struct tcf_proto *tp) 168 { 169 tp->ops->destroy(tp); 170 module_put(tp->ops->owner); 171 kfree_rcu(tp, rcu); 172 } 173 174 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 175 u32 chain_index) 176 { 177 struct tcf_chain *chain; 178 179 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 180 if (!chain) 181 return NULL; 182 list_add_tail(&chain->list, &block->chain_list); 183 chain->block = block; 184 chain->index = chain_index; 185 chain->refcnt = 0; 186 return chain; 187 } 188 189 static void tcf_chain_flush(struct tcf_chain *chain) 190 { 191 struct tcf_proto *tp; 192 193 if (chain->p_filter_chain) 194 RCU_INIT_POINTER(*chain->p_filter_chain, NULL); 195 while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) { 196 RCU_INIT_POINTER(chain->filter_chain, tp->next); 197 tcf_proto_destroy(tp); 198 } 199 } 200 201 static void tcf_chain_destroy(struct tcf_chain *chain) 202 { 203 /* May be already removed from the list by the previous call. */ 204 if (!list_empty(&chain->list)) 205 list_del_init(&chain->list); 206 207 /* There might still be a reference held when we got here from 208 * tcf_block_put. Wait for the user to drop reference before free. 209 */ 210 if (!chain->refcnt) 211 kfree(chain); 212 } 213 214 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 215 bool create) 216 { 217 struct tcf_chain *chain; 218 219 list_for_each_entry(chain, &block->chain_list, list) { 220 if (chain->index == chain_index) 221 goto incref; 222 } 223 chain = create ? tcf_chain_create(block, chain_index) : NULL; 224 225 incref: 226 if (chain) 227 chain->refcnt++; 228 return chain; 229 } 230 EXPORT_SYMBOL(tcf_chain_get); 231 232 void tcf_chain_put(struct tcf_chain *chain) 233 { 234 /* Destroy unused chain, with exception of chain 0, which is the 235 * default one and has to be always present. 236 */ 237 if (--chain->refcnt == 0 && !chain->filter_chain && chain->index != 0) 238 tcf_chain_destroy(chain); 239 } 240 EXPORT_SYMBOL(tcf_chain_put); 241 242 static void 243 tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain, 244 struct tcf_proto __rcu **p_filter_chain) 245 { 246 chain->p_filter_chain = p_filter_chain; 247 } 248 249 int tcf_block_get(struct tcf_block **p_block, 250 struct tcf_proto __rcu **p_filter_chain) 251 { 252 struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL); 253 struct tcf_chain *chain; 254 int err; 255 256 if (!block) 257 return -ENOMEM; 258 INIT_LIST_HEAD(&block->chain_list); 259 /* Create chain 0 by default, it has to be always present. */ 260 chain = tcf_chain_create(block, 0); 261 if (!chain) { 262 err = -ENOMEM; 263 goto err_chain_create; 264 } 265 tcf_chain_filter_chain_ptr_set(chain, p_filter_chain); 266 *p_block = block; 267 return 0; 268 269 err_chain_create: 270 kfree(block); 271 return err; 272 } 273 EXPORT_SYMBOL(tcf_block_get); 274 275 void tcf_block_put(struct tcf_block *block) 276 { 277 struct tcf_chain *chain, *tmp; 278 279 if (!block) 280 return; 281 282 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) { 283 tcf_chain_flush(chain); 284 tcf_chain_destroy(chain); 285 } 286 kfree(block); 287 } 288 EXPORT_SYMBOL(tcf_block_put); 289 290 /* Main classifier routine: scans classifier chain attached 291 * to this qdisc, (optionally) tests for protocol and asks 292 * specific classifiers. 293 */ 294 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 295 struct tcf_result *res, bool compat_mode) 296 { 297 __be16 protocol = tc_skb_protocol(skb); 298 #ifdef CONFIG_NET_CLS_ACT 299 const int max_reclassify_loop = 4; 300 const struct tcf_proto *orig_tp = tp; 301 const struct tcf_proto *first_tp; 302 int limit = 0; 303 304 reclassify: 305 #endif 306 for (; tp; tp = rcu_dereference_bh(tp->next)) { 307 int err; 308 309 if (tp->protocol != protocol && 310 tp->protocol != htons(ETH_P_ALL)) 311 continue; 312 313 err = tp->classify(skb, tp, res); 314 #ifdef CONFIG_NET_CLS_ACT 315 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 316 first_tp = orig_tp; 317 goto reset; 318 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 319 first_tp = res->goto_tp; 320 goto reset; 321 } 322 #endif 323 if (err >= 0) 324 return err; 325 } 326 327 return TC_ACT_UNSPEC; /* signal: continue lookup */ 328 #ifdef CONFIG_NET_CLS_ACT 329 reset: 330 if (unlikely(limit++ >= max_reclassify_loop)) { 331 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n", 332 tp->q->ops->id, tp->prio & 0xffff, 333 ntohs(tp->protocol)); 334 return TC_ACT_SHOT; 335 } 336 337 tp = first_tp; 338 protocol = tc_skb_protocol(skb); 339 goto reclassify; 340 #endif 341 } 342 EXPORT_SYMBOL(tcf_classify); 343 344 struct tcf_chain_info { 345 struct tcf_proto __rcu **pprev; 346 struct tcf_proto __rcu *next; 347 }; 348 349 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info) 350 { 351 return rtnl_dereference(*chain_info->pprev); 352 } 353 354 static void tcf_chain_tp_insert(struct tcf_chain *chain, 355 struct tcf_chain_info *chain_info, 356 struct tcf_proto *tp) 357 { 358 if (chain->p_filter_chain && 359 *chain_info->pprev == chain->filter_chain) 360 rcu_assign_pointer(*chain->p_filter_chain, tp); 361 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info)); 362 rcu_assign_pointer(*chain_info->pprev, tp); 363 } 364 365 static void tcf_chain_tp_remove(struct tcf_chain *chain, 366 struct tcf_chain_info *chain_info, 367 struct tcf_proto *tp) 368 { 369 struct tcf_proto *next = rtnl_dereference(chain_info->next); 370 371 if (chain->p_filter_chain && tp == chain->filter_chain) 372 RCU_INIT_POINTER(*chain->p_filter_chain, next); 373 RCU_INIT_POINTER(*chain_info->pprev, next); 374 } 375 376 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 377 struct tcf_chain_info *chain_info, 378 u32 protocol, u32 prio, 379 bool prio_allocate) 380 { 381 struct tcf_proto **pprev; 382 struct tcf_proto *tp; 383 384 /* Check the chain for existence of proto-tcf with this priority */ 385 for (pprev = &chain->filter_chain; 386 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) { 387 if (tp->prio >= prio) { 388 if (tp->prio == prio) { 389 if (prio_allocate || 390 (tp->protocol != protocol && protocol)) 391 return ERR_PTR(-EINVAL); 392 } else { 393 tp = NULL; 394 } 395 break; 396 } 397 } 398 chain_info->pprev = pprev; 399 chain_info->next = tp ? tp->next : NULL; 400 return tp; 401 } 402 403 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 404 struct tcf_proto *tp, void *fh, u32 portid, 405 u32 seq, u16 flags, int event) 406 { 407 struct tcmsg *tcm; 408 struct nlmsghdr *nlh; 409 unsigned char *b = skb_tail_pointer(skb); 410 411 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 412 if (!nlh) 413 goto out_nlmsg_trim; 414 tcm = nlmsg_data(nlh); 415 tcm->tcm_family = AF_UNSPEC; 416 tcm->tcm__pad1 = 0; 417 tcm->tcm__pad2 = 0; 418 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; 419 tcm->tcm_parent = tp->classid; 420 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 421 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 422 goto nla_put_failure; 423 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 424 goto nla_put_failure; 425 if (!fh) { 426 tcm->tcm_handle = 0; 427 } else { 428 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0) 429 goto nla_put_failure; 430 } 431 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 432 return skb->len; 433 434 out_nlmsg_trim: 435 nla_put_failure: 436 nlmsg_trim(skb, b); 437 return -1; 438 } 439 440 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 441 struct nlmsghdr *n, struct tcf_proto *tp, 442 void *fh, int event, bool unicast) 443 { 444 struct sk_buff *skb; 445 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 446 447 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 448 if (!skb) 449 return -ENOBUFS; 450 451 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 452 n->nlmsg_flags, event) <= 0) { 453 kfree_skb(skb); 454 return -EINVAL; 455 } 456 457 if (unicast) 458 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 459 460 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 461 n->nlmsg_flags & NLM_F_ECHO); 462 } 463 464 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 465 struct nlmsghdr *n, struct tcf_proto *tp, 466 void *fh, bool unicast, bool *last) 467 { 468 struct sk_buff *skb; 469 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 470 int err; 471 472 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 473 if (!skb) 474 return -ENOBUFS; 475 476 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 477 n->nlmsg_flags, RTM_DELTFILTER) <= 0) { 478 kfree_skb(skb); 479 return -EINVAL; 480 } 481 482 err = tp->ops->delete(tp, fh, last); 483 if (err) { 484 kfree_skb(skb); 485 return err; 486 } 487 488 if (unicast) 489 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 490 491 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 492 n->nlmsg_flags & NLM_F_ECHO); 493 } 494 495 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 496 struct nlmsghdr *n, 497 struct tcf_chain *chain, int event) 498 { 499 struct tcf_proto *tp; 500 501 for (tp = rtnl_dereference(chain->filter_chain); 502 tp; tp = rtnl_dereference(tp->next)) 503 tfilter_notify(net, oskb, n, tp, 0, event, false); 504 } 505 506 /* Add/change/delete/get a filter node */ 507 508 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 509 struct netlink_ext_ack *extack) 510 { 511 struct net *net = sock_net(skb->sk); 512 struct nlattr *tca[TCA_MAX + 1]; 513 struct tcmsg *t; 514 u32 protocol; 515 u32 prio; 516 bool prio_allocate; 517 u32 parent; 518 u32 chain_index; 519 struct net_device *dev; 520 struct Qdisc *q; 521 struct tcf_chain_info chain_info; 522 struct tcf_chain *chain = NULL; 523 struct tcf_block *block; 524 struct tcf_proto *tp; 525 const struct Qdisc_class_ops *cops; 526 unsigned long cl; 527 void *fh; 528 int err; 529 int tp_created; 530 531 if ((n->nlmsg_type != RTM_GETTFILTER) && 532 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 533 return -EPERM; 534 535 replay: 536 tp_created = 0; 537 538 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); 539 if (err < 0) 540 return err; 541 542 t = nlmsg_data(n); 543 protocol = TC_H_MIN(t->tcm_info); 544 prio = TC_H_MAJ(t->tcm_info); 545 prio_allocate = false; 546 parent = t->tcm_parent; 547 cl = 0; 548 549 if (prio == 0) { 550 switch (n->nlmsg_type) { 551 case RTM_DELTFILTER: 552 if (protocol || t->tcm_handle || tca[TCA_KIND]) 553 return -ENOENT; 554 break; 555 case RTM_NEWTFILTER: 556 /* If no priority is provided by the user, 557 * we allocate one. 558 */ 559 if (n->nlmsg_flags & NLM_F_CREATE) { 560 prio = TC_H_MAKE(0x80000000U, 0U); 561 prio_allocate = true; 562 break; 563 } 564 /* fall-through */ 565 default: 566 return -ENOENT; 567 } 568 } 569 570 /* Find head of filter chain. */ 571 572 /* Find link */ 573 dev = __dev_get_by_index(net, t->tcm_ifindex); 574 if (dev == NULL) 575 return -ENODEV; 576 577 /* Find qdisc */ 578 if (!parent) { 579 q = dev->qdisc; 580 parent = q->handle; 581 } else { 582 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 583 if (q == NULL) 584 return -EINVAL; 585 } 586 587 /* Is it classful? */ 588 cops = q->ops->cl_ops; 589 if (!cops) 590 return -EINVAL; 591 592 if (!cops->tcf_block) 593 return -EOPNOTSUPP; 594 595 /* Do we search for filter, attached to class? */ 596 if (TC_H_MIN(parent)) { 597 cl = cops->find(q, parent); 598 if (cl == 0) 599 return -ENOENT; 600 } 601 602 /* And the last stroke */ 603 block = cops->tcf_block(q, cl); 604 if (!block) { 605 err = -EINVAL; 606 goto errout; 607 } 608 609 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 610 if (chain_index > TC_ACT_EXT_VAL_MASK) { 611 err = -EINVAL; 612 goto errout; 613 } 614 chain = tcf_chain_get(block, chain_index, 615 n->nlmsg_type == RTM_NEWTFILTER); 616 if (!chain) { 617 err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL; 618 goto errout; 619 } 620 621 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) { 622 tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER); 623 tcf_chain_flush(chain); 624 err = 0; 625 goto errout; 626 } 627 628 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 629 prio, prio_allocate); 630 if (IS_ERR(tp)) { 631 err = PTR_ERR(tp); 632 goto errout; 633 } 634 635 if (tp == NULL) { 636 /* Proto-tcf does not exist, create new one */ 637 638 if (tca[TCA_KIND] == NULL || !protocol) { 639 err = -EINVAL; 640 goto errout; 641 } 642 643 if (n->nlmsg_type != RTM_NEWTFILTER || 644 !(n->nlmsg_flags & NLM_F_CREATE)) { 645 err = -ENOENT; 646 goto errout; 647 } 648 649 if (prio_allocate) 650 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info)); 651 652 tp = tcf_proto_create(nla_data(tca[TCA_KIND]), 653 protocol, prio, parent, q, chain); 654 if (IS_ERR(tp)) { 655 err = PTR_ERR(tp); 656 goto errout; 657 } 658 tp_created = 1; 659 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 660 err = -EINVAL; 661 goto errout; 662 } 663 664 fh = tp->ops->get(tp, t->tcm_handle); 665 666 if (!fh) { 667 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 668 tcf_chain_tp_remove(chain, &chain_info, tp); 669 tfilter_notify(net, skb, n, tp, fh, 670 RTM_DELTFILTER, false); 671 tcf_proto_destroy(tp); 672 err = 0; 673 goto errout; 674 } 675 676 if (n->nlmsg_type != RTM_NEWTFILTER || 677 !(n->nlmsg_flags & NLM_F_CREATE)) { 678 err = -ENOENT; 679 goto errout; 680 } 681 } else { 682 bool last; 683 684 switch (n->nlmsg_type) { 685 case RTM_NEWTFILTER: 686 if (n->nlmsg_flags & NLM_F_EXCL) { 687 if (tp_created) 688 tcf_proto_destroy(tp); 689 err = -EEXIST; 690 goto errout; 691 } 692 break; 693 case RTM_DELTFILTER: 694 err = tfilter_del_notify(net, skb, n, tp, fh, false, 695 &last); 696 if (err) 697 goto errout; 698 if (last) { 699 tcf_chain_tp_remove(chain, &chain_info, tp); 700 tcf_proto_destroy(tp); 701 } 702 goto errout; 703 case RTM_GETTFILTER: 704 err = tfilter_notify(net, skb, n, tp, fh, 705 RTM_NEWTFILTER, true); 706 goto errout; 707 default: 708 err = -EINVAL; 709 goto errout; 710 } 711 } 712 713 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 714 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); 715 if (err == 0) { 716 if (tp_created) 717 tcf_chain_tp_insert(chain, &chain_info, tp); 718 tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false); 719 } else { 720 if (tp_created) 721 tcf_proto_destroy(tp); 722 } 723 724 errout: 725 if (chain) 726 tcf_chain_put(chain); 727 if (err == -EAGAIN) 728 /* Replay the request. */ 729 goto replay; 730 return err; 731 } 732 733 struct tcf_dump_args { 734 struct tcf_walker w; 735 struct sk_buff *skb; 736 struct netlink_callback *cb; 737 }; 738 739 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 740 { 741 struct tcf_dump_args *a = (void *)arg; 742 struct net *net = sock_net(a->skb->sk); 743 744 return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid, 745 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 746 RTM_NEWTFILTER); 747 } 748 749 static bool tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb, 750 struct netlink_callback *cb, 751 long index_start, long *p_index) 752 { 753 struct net *net = sock_net(skb->sk); 754 struct tcmsg *tcm = nlmsg_data(cb->nlh); 755 struct tcf_dump_args arg; 756 struct tcf_proto *tp; 757 758 for (tp = rtnl_dereference(chain->filter_chain); 759 tp; tp = rtnl_dereference(tp->next), (*p_index)++) { 760 if (*p_index < index_start) 761 continue; 762 if (TC_H_MAJ(tcm->tcm_info) && 763 TC_H_MAJ(tcm->tcm_info) != tp->prio) 764 continue; 765 if (TC_H_MIN(tcm->tcm_info) && 766 TC_H_MIN(tcm->tcm_info) != tp->protocol) 767 continue; 768 if (*p_index > index_start) 769 memset(&cb->args[1], 0, 770 sizeof(cb->args) - sizeof(cb->args[0])); 771 if (cb->args[1] == 0) { 772 if (tcf_fill_node(net, skb, tp, 0, 773 NETLINK_CB(cb->skb).portid, 774 cb->nlh->nlmsg_seq, NLM_F_MULTI, 775 RTM_NEWTFILTER) <= 0) 776 return false; 777 778 cb->args[1] = 1; 779 } 780 if (!tp->ops->walk) 781 continue; 782 arg.w.fn = tcf_node_dump; 783 arg.skb = skb; 784 arg.cb = cb; 785 arg.w.stop = 0; 786 arg.w.skip = cb->args[1] - 1; 787 arg.w.count = 0; 788 tp->ops->walk(tp, &arg.w); 789 cb->args[1] = arg.w.count + 1; 790 if (arg.w.stop) 791 return false; 792 } 793 return true; 794 } 795 796 /* called with RTNL */ 797 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 798 { 799 struct net *net = sock_net(skb->sk); 800 struct nlattr *tca[TCA_MAX + 1]; 801 struct net_device *dev; 802 struct Qdisc *q; 803 struct tcf_block *block; 804 struct tcf_chain *chain; 805 struct tcmsg *tcm = nlmsg_data(cb->nlh); 806 unsigned long cl = 0; 807 const struct Qdisc_class_ops *cops; 808 long index_start; 809 long index; 810 int err; 811 812 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 813 return skb->len; 814 815 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); 816 if (err) 817 return err; 818 819 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 820 if (!dev) 821 return skb->len; 822 823 if (!tcm->tcm_parent) 824 q = dev->qdisc; 825 else 826 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 827 if (!q) 828 goto out; 829 cops = q->ops->cl_ops; 830 if (!cops) 831 goto out; 832 if (!cops->tcf_block) 833 goto out; 834 if (TC_H_MIN(tcm->tcm_parent)) { 835 cl = cops->find(q, tcm->tcm_parent); 836 if (cl == 0) 837 goto out; 838 } 839 block = cops->tcf_block(q, cl); 840 if (!block) 841 goto out; 842 843 index_start = cb->args[0]; 844 index = 0; 845 846 list_for_each_entry(chain, &block->chain_list, list) { 847 if (tca[TCA_CHAIN] && 848 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 849 continue; 850 if (!tcf_chain_dump(chain, skb, cb, index_start, &index)) 851 break; 852 } 853 854 cb->args[0] = index; 855 856 out: 857 return skb->len; 858 } 859 860 void tcf_exts_destroy(struct tcf_exts *exts) 861 { 862 #ifdef CONFIG_NET_CLS_ACT 863 LIST_HEAD(actions); 864 865 tcf_exts_to_list(exts, &actions); 866 tcf_action_destroy(&actions, TCA_ACT_UNBIND); 867 kfree(exts->actions); 868 exts->nr_actions = 0; 869 #endif 870 } 871 EXPORT_SYMBOL(tcf_exts_destroy); 872 873 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 874 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr) 875 { 876 #ifdef CONFIG_NET_CLS_ACT 877 { 878 struct tc_action *act; 879 880 if (exts->police && tb[exts->police]) { 881 act = tcf_action_init_1(net, tp, tb[exts->police], 882 rate_tlv, "police", ovr, 883 TCA_ACT_BIND); 884 if (IS_ERR(act)) 885 return PTR_ERR(act); 886 887 act->type = exts->type = TCA_OLD_COMPAT; 888 exts->actions[0] = act; 889 exts->nr_actions = 1; 890 } else if (exts->action && tb[exts->action]) { 891 LIST_HEAD(actions); 892 int err, i = 0; 893 894 err = tcf_action_init(net, tp, tb[exts->action], 895 rate_tlv, NULL, ovr, TCA_ACT_BIND, 896 &actions); 897 if (err) 898 return err; 899 list_for_each_entry(act, &actions, list) 900 exts->actions[i++] = act; 901 exts->nr_actions = i; 902 } 903 } 904 #else 905 if ((exts->action && tb[exts->action]) || 906 (exts->police && tb[exts->police])) 907 return -EOPNOTSUPP; 908 #endif 909 910 return 0; 911 } 912 EXPORT_SYMBOL(tcf_exts_validate); 913 914 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 915 { 916 #ifdef CONFIG_NET_CLS_ACT 917 struct tcf_exts old = *dst; 918 919 *dst = *src; 920 tcf_exts_destroy(&old); 921 #endif 922 } 923 EXPORT_SYMBOL(tcf_exts_change); 924 925 #ifdef CONFIG_NET_CLS_ACT 926 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 927 { 928 if (exts->nr_actions == 0) 929 return NULL; 930 else 931 return exts->actions[0]; 932 } 933 #endif 934 935 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 936 { 937 #ifdef CONFIG_NET_CLS_ACT 938 struct nlattr *nest; 939 940 if (exts->action && tcf_exts_has_actions(exts)) { 941 /* 942 * again for backward compatible mode - we want 943 * to work with both old and new modes of entering 944 * tc data even if iproute2 was newer - jhs 945 */ 946 if (exts->type != TCA_OLD_COMPAT) { 947 LIST_HEAD(actions); 948 949 nest = nla_nest_start(skb, exts->action); 950 if (nest == NULL) 951 goto nla_put_failure; 952 953 tcf_exts_to_list(exts, &actions); 954 if (tcf_action_dump(skb, &actions, 0, 0) < 0) 955 goto nla_put_failure; 956 nla_nest_end(skb, nest); 957 } else if (exts->police) { 958 struct tc_action *act = tcf_exts_first_act(exts); 959 nest = nla_nest_start(skb, exts->police); 960 if (nest == NULL || !act) 961 goto nla_put_failure; 962 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 963 goto nla_put_failure; 964 nla_nest_end(skb, nest); 965 } 966 } 967 return 0; 968 969 nla_put_failure: 970 nla_nest_cancel(skb, nest); 971 return -1; 972 #else 973 return 0; 974 #endif 975 } 976 EXPORT_SYMBOL(tcf_exts_dump); 977 978 979 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 980 { 981 #ifdef CONFIG_NET_CLS_ACT 982 struct tc_action *a = tcf_exts_first_act(exts); 983 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 984 return -1; 985 #endif 986 return 0; 987 } 988 EXPORT_SYMBOL(tcf_exts_dump_stats); 989 990 int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, 991 struct net_device **hw_dev) 992 { 993 #ifdef CONFIG_NET_CLS_ACT 994 const struct tc_action *a; 995 LIST_HEAD(actions); 996 997 if (!tcf_exts_has_actions(exts)) 998 return -EINVAL; 999 1000 tcf_exts_to_list(exts, &actions); 1001 list_for_each_entry(a, &actions, list) { 1002 if (a->ops->get_dev) { 1003 a->ops->get_dev(a, dev_net(dev), hw_dev); 1004 break; 1005 } 1006 } 1007 if (*hw_dev) 1008 return 0; 1009 #endif 1010 return -EOPNOTSUPP; 1011 } 1012 EXPORT_SYMBOL(tcf_exts_get_dev); 1013 1014 static int __init tc_filter_init(void) 1015 { 1016 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); 1017 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); 1018 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 1019 tc_dump_tfilter, 0); 1020 1021 return 0; 1022 } 1023 1024 subsys_initcall(tc_filter_init); 1025