1 /* 2 * net/sched/cls_api.c Packet classifier API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Changes: 12 * 13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/skbuff.h> 24 #include <linux/init.h> 25 #include <linux/kmod.h> 26 #include <linux/err.h> 27 #include <linux/slab.h> 28 #include <net/net_namespace.h> 29 #include <net/sock.h> 30 #include <net/netlink.h> 31 #include <net/pkt_sched.h> 32 #include <net/pkt_cls.h> 33 34 /* The list of all installed classifier types */ 35 static LIST_HEAD(tcf_proto_base); 36 37 /* Protects list of registered TC modules. It is pure SMP lock. */ 38 static DEFINE_RWLOCK(cls_mod_lock); 39 40 /* Find classifier type by string name */ 41 42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind) 43 { 44 const struct tcf_proto_ops *t, *res = NULL; 45 46 if (kind) { 47 read_lock(&cls_mod_lock); 48 list_for_each_entry(t, &tcf_proto_base, head) { 49 if (strcmp(kind, t->kind) == 0) { 50 if (try_module_get(t->owner)) 51 res = t; 52 break; 53 } 54 } 55 read_unlock(&cls_mod_lock); 56 } 57 return res; 58 } 59 60 /* Register(unregister) new classifier type */ 61 62 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 63 { 64 struct tcf_proto_ops *t; 65 int rc = -EEXIST; 66 67 write_lock(&cls_mod_lock); 68 list_for_each_entry(t, &tcf_proto_base, head) 69 if (!strcmp(ops->kind, t->kind)) 70 goto out; 71 72 list_add_tail(&ops->head, &tcf_proto_base); 73 rc = 0; 74 out: 75 write_unlock(&cls_mod_lock); 76 return rc; 77 } 78 EXPORT_SYMBOL(register_tcf_proto_ops); 79 80 static struct workqueue_struct *tc_filter_wq; 81 82 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 83 { 84 struct tcf_proto_ops *t; 85 int rc = -ENOENT; 86 87 /* Wait for outstanding call_rcu()s, if any, from a 88 * tcf_proto_ops's destroy() handler. 89 */ 90 rcu_barrier(); 91 flush_workqueue(tc_filter_wq); 92 93 write_lock(&cls_mod_lock); 94 list_for_each_entry(t, &tcf_proto_base, head) { 95 if (t == ops) { 96 list_del(&t->head); 97 rc = 0; 98 break; 99 } 100 } 101 write_unlock(&cls_mod_lock); 102 return rc; 103 } 104 EXPORT_SYMBOL(unregister_tcf_proto_ops); 105 106 bool tcf_queue_work(struct work_struct *work) 107 { 108 return queue_work(tc_filter_wq, work); 109 } 110 EXPORT_SYMBOL(tcf_queue_work); 111 112 /* Select new prio value from the range, managed by kernel. */ 113 114 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 115 { 116 u32 first = TC_H_MAKE(0xC0000000U, 0U); 117 118 if (tp) 119 first = tp->prio - 1; 120 121 return TC_H_MAJ(first); 122 } 123 124 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 125 u32 prio, u32 parent, struct Qdisc *q, 126 struct tcf_chain *chain) 127 { 128 struct tcf_proto *tp; 129 int err; 130 131 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 132 if (!tp) 133 return ERR_PTR(-ENOBUFS); 134 135 err = -ENOENT; 136 tp->ops = tcf_proto_lookup_ops(kind); 137 if (!tp->ops) { 138 #ifdef CONFIG_MODULES 139 rtnl_unlock(); 140 request_module("cls_%s", kind); 141 rtnl_lock(); 142 tp->ops = tcf_proto_lookup_ops(kind); 143 /* We dropped the RTNL semaphore in order to perform 144 * the module load. So, even if we succeeded in loading 145 * the module we have to replay the request. We indicate 146 * this using -EAGAIN. 147 */ 148 if (tp->ops) { 149 module_put(tp->ops->owner); 150 err = -EAGAIN; 151 } else { 152 err = -ENOENT; 153 } 154 goto errout; 155 #endif 156 } 157 tp->classify = tp->ops->classify; 158 tp->protocol = protocol; 159 tp->prio = prio; 160 tp->classid = parent; 161 tp->q = q; 162 tp->chain = chain; 163 164 err = tp->ops->init(tp); 165 if (err) { 166 module_put(tp->ops->owner); 167 goto errout; 168 } 169 return tp; 170 171 errout: 172 kfree(tp); 173 return ERR_PTR(err); 174 } 175 176 static void tcf_proto_destroy(struct tcf_proto *tp) 177 { 178 tp->ops->destroy(tp); 179 module_put(tp->ops->owner); 180 kfree_rcu(tp, rcu); 181 } 182 183 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 184 u32 chain_index) 185 { 186 struct tcf_chain *chain; 187 188 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 189 if (!chain) 190 return NULL; 191 list_add_tail(&chain->list, &block->chain_list); 192 chain->block = block; 193 chain->index = chain_index; 194 chain->refcnt = 1; 195 return chain; 196 } 197 198 static void tcf_chain_head_change(struct tcf_chain *chain, 199 struct tcf_proto *tp_head) 200 { 201 if (chain->chain_head_change) 202 chain->chain_head_change(tp_head, 203 chain->chain_head_change_priv); 204 } 205 206 static void tcf_chain_flush(struct tcf_chain *chain) 207 { 208 struct tcf_proto *tp = rtnl_dereference(chain->filter_chain); 209 210 tcf_chain_head_change(chain, NULL); 211 while (tp) { 212 RCU_INIT_POINTER(chain->filter_chain, tp->next); 213 tcf_proto_destroy(tp); 214 tp = rtnl_dereference(chain->filter_chain); 215 tcf_chain_put(chain); 216 } 217 } 218 219 static void tcf_chain_destroy(struct tcf_chain *chain) 220 { 221 list_del(&chain->list); 222 kfree(chain); 223 } 224 225 static void tcf_chain_hold(struct tcf_chain *chain) 226 { 227 ++chain->refcnt; 228 } 229 230 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 231 bool create) 232 { 233 struct tcf_chain *chain; 234 235 list_for_each_entry(chain, &block->chain_list, list) { 236 if (chain->index == chain_index) { 237 tcf_chain_hold(chain); 238 return chain; 239 } 240 } 241 242 return create ? tcf_chain_create(block, chain_index) : NULL; 243 } 244 EXPORT_SYMBOL(tcf_chain_get); 245 246 void tcf_chain_put(struct tcf_chain *chain) 247 { 248 if (--chain->refcnt == 0) 249 tcf_chain_destroy(chain); 250 } 251 EXPORT_SYMBOL(tcf_chain_put); 252 253 static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q, 254 struct tcf_block_ext_info *ei, 255 enum tc_block_command command) 256 { 257 struct net_device *dev = q->dev_queue->dev; 258 struct tc_block_offload bo = {}; 259 260 if (!dev->netdev_ops->ndo_setup_tc) 261 return; 262 bo.command = command; 263 bo.binder_type = ei->binder_type; 264 bo.block = block; 265 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 266 } 267 268 static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 269 struct tcf_block_ext_info *ei) 270 { 271 tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND); 272 } 273 274 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 275 struct tcf_block_ext_info *ei) 276 { 277 tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND); 278 } 279 280 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 281 struct tcf_block_ext_info *ei) 282 { 283 struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL); 284 struct tcf_chain *chain; 285 int err; 286 287 if (!block) 288 return -ENOMEM; 289 INIT_LIST_HEAD(&block->chain_list); 290 INIT_LIST_HEAD(&block->cb_list); 291 292 /* Create chain 0 by default, it has to be always present. */ 293 chain = tcf_chain_create(block, 0); 294 if (!chain) { 295 err = -ENOMEM; 296 goto err_chain_create; 297 } 298 WARN_ON(!ei->chain_head_change); 299 chain->chain_head_change = ei->chain_head_change; 300 chain->chain_head_change_priv = ei->chain_head_change_priv; 301 block->net = qdisc_net(q); 302 block->q = q; 303 tcf_block_offload_bind(block, q, ei); 304 *p_block = block; 305 return 0; 306 307 err_chain_create: 308 kfree(block); 309 return err; 310 } 311 EXPORT_SYMBOL(tcf_block_get_ext); 312 313 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 314 { 315 struct tcf_proto __rcu **p_filter_chain = priv; 316 317 rcu_assign_pointer(*p_filter_chain, tp_head); 318 } 319 320 int tcf_block_get(struct tcf_block **p_block, 321 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q) 322 { 323 struct tcf_block_ext_info ei = { 324 .chain_head_change = tcf_chain_head_change_dflt, 325 .chain_head_change_priv = p_filter_chain, 326 }; 327 328 WARN_ON(!p_filter_chain); 329 return tcf_block_get_ext(p_block, q, &ei); 330 } 331 EXPORT_SYMBOL(tcf_block_get); 332 333 static void tcf_block_put_final(struct work_struct *work) 334 { 335 struct tcf_block *block = container_of(work, struct tcf_block, work); 336 struct tcf_chain *chain, *tmp; 337 338 rtnl_lock(); 339 340 /* At this point, all the chains should have refcnt == 1. */ 341 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 342 tcf_chain_put(chain); 343 rtnl_unlock(); 344 kfree(block); 345 } 346 347 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 348 * actions should be all removed after flushing. 349 */ 350 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 351 struct tcf_block_ext_info *ei) 352 { 353 struct tcf_chain *chain; 354 355 /* Hold a refcnt for all chains, except 0, so that they don't disappear 356 * while we are iterating. 357 */ 358 list_for_each_entry(chain, &block->chain_list, list) 359 if (chain->index) 360 tcf_chain_hold(chain); 361 362 list_for_each_entry(chain, &block->chain_list, list) 363 tcf_chain_flush(chain); 364 365 tcf_block_offload_unbind(block, q, ei); 366 367 INIT_WORK(&block->work, tcf_block_put_final); 368 /* Wait for existing RCU callbacks to cool down, make sure their works 369 * have been queued before this. We can not flush pending works here 370 * because we are holding the RTNL lock. 371 */ 372 rcu_barrier(); 373 tcf_queue_work(&block->work); 374 } 375 EXPORT_SYMBOL(tcf_block_put_ext); 376 377 void tcf_block_put(struct tcf_block *block) 378 { 379 struct tcf_block_ext_info ei = {0, }; 380 381 if (!block) 382 return; 383 tcf_block_put_ext(block, block->q, &ei); 384 } 385 386 EXPORT_SYMBOL(tcf_block_put); 387 388 struct tcf_block_cb { 389 struct list_head list; 390 tc_setup_cb_t *cb; 391 void *cb_ident; 392 void *cb_priv; 393 unsigned int refcnt; 394 }; 395 396 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb) 397 { 398 return block_cb->cb_priv; 399 } 400 EXPORT_SYMBOL(tcf_block_cb_priv); 401 402 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, 403 tc_setup_cb_t *cb, void *cb_ident) 404 { struct tcf_block_cb *block_cb; 405 406 list_for_each_entry(block_cb, &block->cb_list, list) 407 if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) 408 return block_cb; 409 return NULL; 410 } 411 EXPORT_SYMBOL(tcf_block_cb_lookup); 412 413 void tcf_block_cb_incref(struct tcf_block_cb *block_cb) 414 { 415 block_cb->refcnt++; 416 } 417 EXPORT_SYMBOL(tcf_block_cb_incref); 418 419 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) 420 { 421 return --block_cb->refcnt; 422 } 423 EXPORT_SYMBOL(tcf_block_cb_decref); 424 425 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 426 tc_setup_cb_t *cb, void *cb_ident, 427 void *cb_priv) 428 { 429 struct tcf_block_cb *block_cb; 430 431 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); 432 if (!block_cb) 433 return NULL; 434 block_cb->cb = cb; 435 block_cb->cb_ident = cb_ident; 436 block_cb->cb_priv = cb_priv; 437 list_add(&block_cb->list, &block->cb_list); 438 return block_cb; 439 } 440 EXPORT_SYMBOL(__tcf_block_cb_register); 441 442 int tcf_block_cb_register(struct tcf_block *block, 443 tc_setup_cb_t *cb, void *cb_ident, 444 void *cb_priv) 445 { 446 struct tcf_block_cb *block_cb; 447 448 block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv); 449 return block_cb ? 0 : -ENOMEM; 450 } 451 EXPORT_SYMBOL(tcf_block_cb_register); 452 453 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) 454 { 455 list_del(&block_cb->list); 456 kfree(block_cb); 457 } 458 EXPORT_SYMBOL(__tcf_block_cb_unregister); 459 460 void tcf_block_cb_unregister(struct tcf_block *block, 461 tc_setup_cb_t *cb, void *cb_ident) 462 { 463 struct tcf_block_cb *block_cb; 464 465 block_cb = tcf_block_cb_lookup(block, cb, cb_ident); 466 if (!block_cb) 467 return; 468 __tcf_block_cb_unregister(block_cb); 469 } 470 EXPORT_SYMBOL(tcf_block_cb_unregister); 471 472 static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type, 473 void *type_data, bool err_stop) 474 { 475 struct tcf_block_cb *block_cb; 476 int ok_count = 0; 477 int err; 478 479 list_for_each_entry(block_cb, &block->cb_list, list) { 480 err = block_cb->cb(type, type_data, block_cb->cb_priv); 481 if (err) { 482 if (err_stop) 483 return err; 484 } else { 485 ok_count++; 486 } 487 } 488 return ok_count; 489 } 490 491 /* Main classifier routine: scans classifier chain attached 492 * to this qdisc, (optionally) tests for protocol and asks 493 * specific classifiers. 494 */ 495 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 496 struct tcf_result *res, bool compat_mode) 497 { 498 __be16 protocol = tc_skb_protocol(skb); 499 #ifdef CONFIG_NET_CLS_ACT 500 const int max_reclassify_loop = 4; 501 const struct tcf_proto *orig_tp = tp; 502 const struct tcf_proto *first_tp; 503 int limit = 0; 504 505 reclassify: 506 #endif 507 for (; tp; tp = rcu_dereference_bh(tp->next)) { 508 int err; 509 510 if (tp->protocol != protocol && 511 tp->protocol != htons(ETH_P_ALL)) 512 continue; 513 514 err = tp->classify(skb, tp, res); 515 #ifdef CONFIG_NET_CLS_ACT 516 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 517 first_tp = orig_tp; 518 goto reset; 519 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 520 first_tp = res->goto_tp; 521 goto reset; 522 } 523 #endif 524 if (err >= 0) 525 return err; 526 } 527 528 return TC_ACT_UNSPEC; /* signal: continue lookup */ 529 #ifdef CONFIG_NET_CLS_ACT 530 reset: 531 if (unlikely(limit++ >= max_reclassify_loop)) { 532 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n", 533 tp->q->ops->id, tp->prio & 0xffff, 534 ntohs(tp->protocol)); 535 return TC_ACT_SHOT; 536 } 537 538 tp = first_tp; 539 protocol = tc_skb_protocol(skb); 540 goto reclassify; 541 #endif 542 } 543 EXPORT_SYMBOL(tcf_classify); 544 545 struct tcf_chain_info { 546 struct tcf_proto __rcu **pprev; 547 struct tcf_proto __rcu *next; 548 }; 549 550 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info) 551 { 552 return rtnl_dereference(*chain_info->pprev); 553 } 554 555 static void tcf_chain_tp_insert(struct tcf_chain *chain, 556 struct tcf_chain_info *chain_info, 557 struct tcf_proto *tp) 558 { 559 if (*chain_info->pprev == chain->filter_chain) 560 tcf_chain_head_change(chain, tp); 561 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info)); 562 rcu_assign_pointer(*chain_info->pprev, tp); 563 tcf_chain_hold(chain); 564 } 565 566 static void tcf_chain_tp_remove(struct tcf_chain *chain, 567 struct tcf_chain_info *chain_info, 568 struct tcf_proto *tp) 569 { 570 struct tcf_proto *next = rtnl_dereference(chain_info->next); 571 572 if (tp == chain->filter_chain) 573 tcf_chain_head_change(chain, next); 574 RCU_INIT_POINTER(*chain_info->pprev, next); 575 tcf_chain_put(chain); 576 } 577 578 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 579 struct tcf_chain_info *chain_info, 580 u32 protocol, u32 prio, 581 bool prio_allocate) 582 { 583 struct tcf_proto **pprev; 584 struct tcf_proto *tp; 585 586 /* Check the chain for existence of proto-tcf with this priority */ 587 for (pprev = &chain->filter_chain; 588 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) { 589 if (tp->prio >= prio) { 590 if (tp->prio == prio) { 591 if (prio_allocate || 592 (tp->protocol != protocol && protocol)) 593 return ERR_PTR(-EINVAL); 594 } else { 595 tp = NULL; 596 } 597 break; 598 } 599 } 600 chain_info->pprev = pprev; 601 chain_info->next = tp ? tp->next : NULL; 602 return tp; 603 } 604 605 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 606 struct tcf_proto *tp, struct Qdisc *q, u32 parent, 607 void *fh, u32 portid, u32 seq, u16 flags, int event) 608 { 609 struct tcmsg *tcm; 610 struct nlmsghdr *nlh; 611 unsigned char *b = skb_tail_pointer(skb); 612 613 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 614 if (!nlh) 615 goto out_nlmsg_trim; 616 tcm = nlmsg_data(nlh); 617 tcm->tcm_family = AF_UNSPEC; 618 tcm->tcm__pad1 = 0; 619 tcm->tcm__pad2 = 0; 620 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 621 tcm->tcm_parent = parent; 622 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 623 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 624 goto nla_put_failure; 625 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 626 goto nla_put_failure; 627 if (!fh) { 628 tcm->tcm_handle = 0; 629 } else { 630 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0) 631 goto nla_put_failure; 632 } 633 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 634 return skb->len; 635 636 out_nlmsg_trim: 637 nla_put_failure: 638 nlmsg_trim(skb, b); 639 return -1; 640 } 641 642 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 643 struct nlmsghdr *n, struct tcf_proto *tp, 644 struct Qdisc *q, u32 parent, 645 void *fh, int event, bool unicast) 646 { 647 struct sk_buff *skb; 648 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 649 650 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 651 if (!skb) 652 return -ENOBUFS; 653 654 if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq, 655 n->nlmsg_flags, event) <= 0) { 656 kfree_skb(skb); 657 return -EINVAL; 658 } 659 660 if (unicast) 661 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 662 663 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 664 n->nlmsg_flags & NLM_F_ECHO); 665 } 666 667 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 668 struct nlmsghdr *n, struct tcf_proto *tp, 669 struct Qdisc *q, u32 parent, 670 void *fh, bool unicast, bool *last) 671 { 672 struct sk_buff *skb; 673 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 674 int err; 675 676 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 677 if (!skb) 678 return -ENOBUFS; 679 680 if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq, 681 n->nlmsg_flags, RTM_DELTFILTER) <= 0) { 682 kfree_skb(skb); 683 return -EINVAL; 684 } 685 686 err = tp->ops->delete(tp, fh, last); 687 if (err) { 688 kfree_skb(skb); 689 return err; 690 } 691 692 if (unicast) 693 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 694 695 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 696 n->nlmsg_flags & NLM_F_ECHO); 697 } 698 699 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 700 struct Qdisc *q, u32 parent, 701 struct nlmsghdr *n, 702 struct tcf_chain *chain, int event) 703 { 704 struct tcf_proto *tp; 705 706 for (tp = rtnl_dereference(chain->filter_chain); 707 tp; tp = rtnl_dereference(tp->next)) 708 tfilter_notify(net, oskb, n, tp, q, parent, 0, event, false); 709 } 710 711 /* Add/change/delete/get a filter node */ 712 713 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 714 struct netlink_ext_ack *extack) 715 { 716 struct net *net = sock_net(skb->sk); 717 struct nlattr *tca[TCA_MAX + 1]; 718 struct tcmsg *t; 719 u32 protocol; 720 u32 prio; 721 bool prio_allocate; 722 u32 parent; 723 u32 chain_index; 724 struct net_device *dev; 725 struct Qdisc *q; 726 struct tcf_chain_info chain_info; 727 struct tcf_chain *chain = NULL; 728 struct tcf_block *block; 729 struct tcf_proto *tp; 730 const struct Qdisc_class_ops *cops; 731 unsigned long cl; 732 void *fh; 733 int err; 734 int tp_created; 735 736 if ((n->nlmsg_type != RTM_GETTFILTER) && 737 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 738 return -EPERM; 739 740 replay: 741 tp_created = 0; 742 743 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); 744 if (err < 0) 745 return err; 746 747 t = nlmsg_data(n); 748 protocol = TC_H_MIN(t->tcm_info); 749 prio = TC_H_MAJ(t->tcm_info); 750 prio_allocate = false; 751 parent = t->tcm_parent; 752 cl = 0; 753 754 if (prio == 0) { 755 switch (n->nlmsg_type) { 756 case RTM_DELTFILTER: 757 if (protocol || t->tcm_handle || tca[TCA_KIND]) 758 return -ENOENT; 759 break; 760 case RTM_NEWTFILTER: 761 /* If no priority is provided by the user, 762 * we allocate one. 763 */ 764 if (n->nlmsg_flags & NLM_F_CREATE) { 765 prio = TC_H_MAKE(0x80000000U, 0U); 766 prio_allocate = true; 767 break; 768 } 769 /* fall-through */ 770 default: 771 return -ENOENT; 772 } 773 } 774 775 /* Find head of filter chain. */ 776 777 /* Find link */ 778 dev = __dev_get_by_index(net, t->tcm_ifindex); 779 if (dev == NULL) 780 return -ENODEV; 781 782 /* Find qdisc */ 783 if (!parent) { 784 q = dev->qdisc; 785 parent = q->handle; 786 } else { 787 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 788 if (q == NULL) 789 return -EINVAL; 790 } 791 792 /* Is it classful? */ 793 cops = q->ops->cl_ops; 794 if (!cops) 795 return -EINVAL; 796 797 if (!cops->tcf_block) 798 return -EOPNOTSUPP; 799 800 /* Do we search for filter, attached to class? */ 801 if (TC_H_MIN(parent)) { 802 cl = cops->find(q, parent); 803 if (cl == 0) 804 return -ENOENT; 805 } 806 807 /* And the last stroke */ 808 block = cops->tcf_block(q, cl); 809 if (!block) { 810 err = -EINVAL; 811 goto errout; 812 } 813 814 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 815 if (chain_index > TC_ACT_EXT_VAL_MASK) { 816 err = -EINVAL; 817 goto errout; 818 } 819 chain = tcf_chain_get(block, chain_index, 820 n->nlmsg_type == RTM_NEWTFILTER); 821 if (!chain) { 822 err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL; 823 goto errout; 824 } 825 826 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) { 827 tfilter_notify_chain(net, skb, q, parent, n, 828 chain, RTM_DELTFILTER); 829 tcf_chain_flush(chain); 830 err = 0; 831 goto errout; 832 } 833 834 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 835 prio, prio_allocate); 836 if (IS_ERR(tp)) { 837 err = PTR_ERR(tp); 838 goto errout; 839 } 840 841 if (tp == NULL) { 842 /* Proto-tcf does not exist, create new one */ 843 844 if (tca[TCA_KIND] == NULL || !protocol) { 845 err = -EINVAL; 846 goto errout; 847 } 848 849 if (n->nlmsg_type != RTM_NEWTFILTER || 850 !(n->nlmsg_flags & NLM_F_CREATE)) { 851 err = -ENOENT; 852 goto errout; 853 } 854 855 if (prio_allocate) 856 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info)); 857 858 tp = tcf_proto_create(nla_data(tca[TCA_KIND]), 859 protocol, prio, parent, q, chain); 860 if (IS_ERR(tp)) { 861 err = PTR_ERR(tp); 862 goto errout; 863 } 864 tp_created = 1; 865 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 866 err = -EINVAL; 867 goto errout; 868 } 869 870 fh = tp->ops->get(tp, t->tcm_handle); 871 872 if (!fh) { 873 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 874 tcf_chain_tp_remove(chain, &chain_info, tp); 875 tfilter_notify(net, skb, n, tp, q, parent, fh, 876 RTM_DELTFILTER, false); 877 tcf_proto_destroy(tp); 878 err = 0; 879 goto errout; 880 } 881 882 if (n->nlmsg_type != RTM_NEWTFILTER || 883 !(n->nlmsg_flags & NLM_F_CREATE)) { 884 err = -ENOENT; 885 goto errout; 886 } 887 } else { 888 bool last; 889 890 switch (n->nlmsg_type) { 891 case RTM_NEWTFILTER: 892 if (n->nlmsg_flags & NLM_F_EXCL) { 893 if (tp_created) 894 tcf_proto_destroy(tp); 895 err = -EEXIST; 896 goto errout; 897 } 898 break; 899 case RTM_DELTFILTER: 900 err = tfilter_del_notify(net, skb, n, tp, q, parent, 901 fh, false, &last); 902 if (err) 903 goto errout; 904 if (last) { 905 tcf_chain_tp_remove(chain, &chain_info, tp); 906 tcf_proto_destroy(tp); 907 } 908 goto errout; 909 case RTM_GETTFILTER: 910 err = tfilter_notify(net, skb, n, tp, q, parent, fh, 911 RTM_NEWTFILTER, true); 912 goto errout; 913 default: 914 err = -EINVAL; 915 goto errout; 916 } 917 } 918 919 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 920 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); 921 if (err == 0) { 922 if (tp_created) 923 tcf_chain_tp_insert(chain, &chain_info, tp); 924 tfilter_notify(net, skb, n, tp, q, parent, fh, 925 RTM_NEWTFILTER, false); 926 } else { 927 if (tp_created) 928 tcf_proto_destroy(tp); 929 } 930 931 errout: 932 if (chain) 933 tcf_chain_put(chain); 934 if (err == -EAGAIN) 935 /* Replay the request. */ 936 goto replay; 937 return err; 938 } 939 940 struct tcf_dump_args { 941 struct tcf_walker w; 942 struct sk_buff *skb; 943 struct netlink_callback *cb; 944 struct Qdisc *q; 945 u32 parent; 946 }; 947 948 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 949 { 950 struct tcf_dump_args *a = (void *)arg; 951 struct net *net = sock_net(a->skb->sk); 952 953 return tcf_fill_node(net, a->skb, tp, a->q, a->parent, 954 n, NETLINK_CB(a->cb->skb).portid, 955 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 956 RTM_NEWTFILTER); 957 } 958 959 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 960 struct sk_buff *skb, struct netlink_callback *cb, 961 long index_start, long *p_index) 962 { 963 struct net *net = sock_net(skb->sk); 964 struct tcmsg *tcm = nlmsg_data(cb->nlh); 965 struct tcf_dump_args arg; 966 struct tcf_proto *tp; 967 968 for (tp = rtnl_dereference(chain->filter_chain); 969 tp; tp = rtnl_dereference(tp->next), (*p_index)++) { 970 if (*p_index < index_start) 971 continue; 972 if (TC_H_MAJ(tcm->tcm_info) && 973 TC_H_MAJ(tcm->tcm_info) != tp->prio) 974 continue; 975 if (TC_H_MIN(tcm->tcm_info) && 976 TC_H_MIN(tcm->tcm_info) != tp->protocol) 977 continue; 978 if (*p_index > index_start) 979 memset(&cb->args[1], 0, 980 sizeof(cb->args) - sizeof(cb->args[0])); 981 if (cb->args[1] == 0) { 982 if (tcf_fill_node(net, skb, tp, q, parent, 0, 983 NETLINK_CB(cb->skb).portid, 984 cb->nlh->nlmsg_seq, NLM_F_MULTI, 985 RTM_NEWTFILTER) <= 0) 986 return false; 987 988 cb->args[1] = 1; 989 } 990 if (!tp->ops->walk) 991 continue; 992 arg.w.fn = tcf_node_dump; 993 arg.skb = skb; 994 arg.cb = cb; 995 arg.q = q; 996 arg.parent = parent; 997 arg.w.stop = 0; 998 arg.w.skip = cb->args[1] - 1; 999 arg.w.count = 0; 1000 tp->ops->walk(tp, &arg.w); 1001 cb->args[1] = arg.w.count + 1; 1002 if (arg.w.stop) 1003 return false; 1004 } 1005 return true; 1006 } 1007 1008 /* called with RTNL */ 1009 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 1010 { 1011 struct net *net = sock_net(skb->sk); 1012 struct nlattr *tca[TCA_MAX + 1]; 1013 struct net_device *dev; 1014 struct Qdisc *q; 1015 struct tcf_block *block; 1016 struct tcf_chain *chain; 1017 struct tcmsg *tcm = nlmsg_data(cb->nlh); 1018 unsigned long cl = 0; 1019 const struct Qdisc_class_ops *cops; 1020 long index_start; 1021 long index; 1022 u32 parent; 1023 int err; 1024 1025 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 1026 return skb->len; 1027 1028 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); 1029 if (err) 1030 return err; 1031 1032 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1033 if (!dev) 1034 return skb->len; 1035 1036 parent = tcm->tcm_parent; 1037 if (!parent) { 1038 q = dev->qdisc; 1039 parent = q->handle; 1040 } else { 1041 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 1042 } 1043 if (!q) 1044 goto out; 1045 cops = q->ops->cl_ops; 1046 if (!cops) 1047 goto out; 1048 if (!cops->tcf_block) 1049 goto out; 1050 if (TC_H_MIN(tcm->tcm_parent)) { 1051 cl = cops->find(q, tcm->tcm_parent); 1052 if (cl == 0) 1053 goto out; 1054 } 1055 block = cops->tcf_block(q, cl); 1056 if (!block) 1057 goto out; 1058 1059 index_start = cb->args[0]; 1060 index = 0; 1061 1062 list_for_each_entry(chain, &block->chain_list, list) { 1063 if (tca[TCA_CHAIN] && 1064 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 1065 continue; 1066 if (!tcf_chain_dump(chain, q, parent, skb, cb, 1067 index_start, &index)) 1068 break; 1069 } 1070 1071 cb->args[0] = index; 1072 1073 out: 1074 return skb->len; 1075 } 1076 1077 void tcf_exts_destroy(struct tcf_exts *exts) 1078 { 1079 #ifdef CONFIG_NET_CLS_ACT 1080 LIST_HEAD(actions); 1081 1082 ASSERT_RTNL(); 1083 tcf_exts_to_list(exts, &actions); 1084 tcf_action_destroy(&actions, TCA_ACT_UNBIND); 1085 kfree(exts->actions); 1086 exts->nr_actions = 0; 1087 #endif 1088 } 1089 EXPORT_SYMBOL(tcf_exts_destroy); 1090 1091 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 1092 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr) 1093 { 1094 #ifdef CONFIG_NET_CLS_ACT 1095 { 1096 struct tc_action *act; 1097 1098 if (exts->police && tb[exts->police]) { 1099 act = tcf_action_init_1(net, tp, tb[exts->police], 1100 rate_tlv, "police", ovr, 1101 TCA_ACT_BIND); 1102 if (IS_ERR(act)) 1103 return PTR_ERR(act); 1104 1105 act->type = exts->type = TCA_OLD_COMPAT; 1106 exts->actions[0] = act; 1107 exts->nr_actions = 1; 1108 } else if (exts->action && tb[exts->action]) { 1109 LIST_HEAD(actions); 1110 int err, i = 0; 1111 1112 err = tcf_action_init(net, tp, tb[exts->action], 1113 rate_tlv, NULL, ovr, TCA_ACT_BIND, 1114 &actions); 1115 if (err) 1116 return err; 1117 list_for_each_entry(act, &actions, list) 1118 exts->actions[i++] = act; 1119 exts->nr_actions = i; 1120 } 1121 exts->net = net; 1122 } 1123 #else 1124 if ((exts->action && tb[exts->action]) || 1125 (exts->police && tb[exts->police])) 1126 return -EOPNOTSUPP; 1127 #endif 1128 1129 return 0; 1130 } 1131 EXPORT_SYMBOL(tcf_exts_validate); 1132 1133 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 1134 { 1135 #ifdef CONFIG_NET_CLS_ACT 1136 struct tcf_exts old = *dst; 1137 1138 *dst = *src; 1139 tcf_exts_destroy(&old); 1140 #endif 1141 } 1142 EXPORT_SYMBOL(tcf_exts_change); 1143 1144 #ifdef CONFIG_NET_CLS_ACT 1145 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 1146 { 1147 if (exts->nr_actions == 0) 1148 return NULL; 1149 else 1150 return exts->actions[0]; 1151 } 1152 #endif 1153 1154 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 1155 { 1156 #ifdef CONFIG_NET_CLS_ACT 1157 struct nlattr *nest; 1158 1159 if (exts->action && tcf_exts_has_actions(exts)) { 1160 /* 1161 * again for backward compatible mode - we want 1162 * to work with both old and new modes of entering 1163 * tc data even if iproute2 was newer - jhs 1164 */ 1165 if (exts->type != TCA_OLD_COMPAT) { 1166 LIST_HEAD(actions); 1167 1168 nest = nla_nest_start(skb, exts->action); 1169 if (nest == NULL) 1170 goto nla_put_failure; 1171 1172 tcf_exts_to_list(exts, &actions); 1173 if (tcf_action_dump(skb, &actions, 0, 0) < 0) 1174 goto nla_put_failure; 1175 nla_nest_end(skb, nest); 1176 } else if (exts->police) { 1177 struct tc_action *act = tcf_exts_first_act(exts); 1178 nest = nla_nest_start(skb, exts->police); 1179 if (nest == NULL || !act) 1180 goto nla_put_failure; 1181 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 1182 goto nla_put_failure; 1183 nla_nest_end(skb, nest); 1184 } 1185 } 1186 return 0; 1187 1188 nla_put_failure: 1189 nla_nest_cancel(skb, nest); 1190 return -1; 1191 #else 1192 return 0; 1193 #endif 1194 } 1195 EXPORT_SYMBOL(tcf_exts_dump); 1196 1197 1198 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 1199 { 1200 #ifdef CONFIG_NET_CLS_ACT 1201 struct tc_action *a = tcf_exts_first_act(exts); 1202 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 1203 return -1; 1204 #endif 1205 return 0; 1206 } 1207 EXPORT_SYMBOL(tcf_exts_dump_stats); 1208 1209 static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts, 1210 enum tc_setup_type type, 1211 void *type_data, bool err_stop) 1212 { 1213 int ok_count = 0; 1214 #ifdef CONFIG_NET_CLS_ACT 1215 const struct tc_action *a; 1216 struct net_device *dev; 1217 int i, ret; 1218 1219 if (!tcf_exts_has_actions(exts)) 1220 return 0; 1221 1222 for (i = 0; i < exts->nr_actions; i++) { 1223 a = exts->actions[i]; 1224 if (!a->ops->get_dev) 1225 continue; 1226 dev = a->ops->get_dev(a); 1227 if (!dev) 1228 continue; 1229 ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop); 1230 if (ret < 0) 1231 return ret; 1232 ok_count += ret; 1233 } 1234 #endif 1235 return ok_count; 1236 } 1237 1238 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts, 1239 enum tc_setup_type type, void *type_data, bool err_stop) 1240 { 1241 int ok_count; 1242 int ret; 1243 1244 ret = tcf_block_cb_call(block, type, type_data, err_stop); 1245 if (ret < 0) 1246 return ret; 1247 ok_count = ret; 1248 1249 if (!exts) 1250 return ok_count; 1251 ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop); 1252 if (ret < 0) 1253 return ret; 1254 ok_count += ret; 1255 1256 return ok_count; 1257 } 1258 EXPORT_SYMBOL(tc_setup_cb_call); 1259 1260 static int __init tc_filter_init(void) 1261 { 1262 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 1263 if (!tc_filter_wq) 1264 return -ENOMEM; 1265 1266 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); 1267 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); 1268 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 1269 tc_dump_tfilter, 0); 1270 1271 return 0; 1272 } 1273 1274 subsys_initcall(tc_filter_init); 1275