1 /* 2 * net/sched/cls_api.c Packet classifier API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Changes: 12 * 13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/skbuff.h> 24 #include <linux/init.h> 25 #include <linux/kmod.h> 26 #include <linux/err.h> 27 #include <linux/slab.h> 28 #include <net/net_namespace.h> 29 #include <net/sock.h> 30 #include <net/netlink.h> 31 #include <net/pkt_sched.h> 32 #include <net/pkt_cls.h> 33 34 /* The list of all installed classifier types */ 35 static LIST_HEAD(tcf_proto_base); 36 37 /* Protects list of registered TC modules. It is pure SMP lock. */ 38 static DEFINE_RWLOCK(cls_mod_lock); 39 40 /* Find classifier type by string name */ 41 42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind) 43 { 44 const struct tcf_proto_ops *t, *res = NULL; 45 46 if (kind) { 47 read_lock(&cls_mod_lock); 48 list_for_each_entry(t, &tcf_proto_base, head) { 49 if (strcmp(kind, t->kind) == 0) { 50 if (try_module_get(t->owner)) 51 res = t; 52 break; 53 } 54 } 55 read_unlock(&cls_mod_lock); 56 } 57 return res; 58 } 59 60 /* Register(unregister) new classifier type */ 61 62 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 63 { 64 struct tcf_proto_ops *t; 65 int rc = -EEXIST; 66 67 write_lock(&cls_mod_lock); 68 list_for_each_entry(t, &tcf_proto_base, head) 69 if (!strcmp(ops->kind, t->kind)) 70 goto out; 71 72 list_add_tail(&ops->head, &tcf_proto_base); 73 rc = 0; 74 out: 75 write_unlock(&cls_mod_lock); 76 return rc; 77 } 78 EXPORT_SYMBOL(register_tcf_proto_ops); 79 80 static struct workqueue_struct *tc_filter_wq; 81 82 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 83 { 84 struct tcf_proto_ops *t; 85 int rc = -ENOENT; 86 87 /* Wait for outstanding call_rcu()s, if any, from a 88 * tcf_proto_ops's destroy() handler. 89 */ 90 rcu_barrier(); 91 flush_workqueue(tc_filter_wq); 92 93 write_lock(&cls_mod_lock); 94 list_for_each_entry(t, &tcf_proto_base, head) { 95 if (t == ops) { 96 list_del(&t->head); 97 rc = 0; 98 break; 99 } 100 } 101 write_unlock(&cls_mod_lock); 102 return rc; 103 } 104 EXPORT_SYMBOL(unregister_tcf_proto_ops); 105 106 bool tcf_queue_work(struct work_struct *work) 107 { 108 return queue_work(tc_filter_wq, work); 109 } 110 EXPORT_SYMBOL(tcf_queue_work); 111 112 /* Select new prio value from the range, managed by kernel. */ 113 114 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 115 { 116 u32 first = TC_H_MAKE(0xC0000000U, 0U); 117 118 if (tp) 119 first = tp->prio - 1; 120 121 return TC_H_MAJ(first); 122 } 123 124 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 125 u32 prio, u32 parent, struct Qdisc *q, 126 struct tcf_chain *chain) 127 { 128 struct tcf_proto *tp; 129 int err; 130 131 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 132 if (!tp) 133 return ERR_PTR(-ENOBUFS); 134 135 err = -ENOENT; 136 tp->ops = tcf_proto_lookup_ops(kind); 137 if (!tp->ops) { 138 #ifdef CONFIG_MODULES 139 rtnl_unlock(); 140 request_module("cls_%s", kind); 141 rtnl_lock(); 142 tp->ops = tcf_proto_lookup_ops(kind); 143 /* We dropped the RTNL semaphore in order to perform 144 * the module load. So, even if we succeeded in loading 145 * the module we have to replay the request. We indicate 146 * this using -EAGAIN. 147 */ 148 if (tp->ops) { 149 module_put(tp->ops->owner); 150 err = -EAGAIN; 151 } else { 152 err = -ENOENT; 153 } 154 goto errout; 155 #endif 156 } 157 tp->classify = tp->ops->classify; 158 tp->protocol = protocol; 159 tp->prio = prio; 160 tp->classid = parent; 161 tp->q = q; 162 tp->chain = chain; 163 164 err = tp->ops->init(tp); 165 if (err) { 166 module_put(tp->ops->owner); 167 goto errout; 168 } 169 return tp; 170 171 errout: 172 kfree(tp); 173 return ERR_PTR(err); 174 } 175 176 static void tcf_proto_destroy(struct tcf_proto *tp) 177 { 178 tp->ops->destroy(tp); 179 module_put(tp->ops->owner); 180 kfree_rcu(tp, rcu); 181 } 182 183 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 184 u32 chain_index) 185 { 186 struct tcf_chain *chain; 187 188 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 189 if (!chain) 190 return NULL; 191 list_add_tail(&chain->list, &block->chain_list); 192 chain->block = block; 193 chain->index = chain_index; 194 chain->refcnt = 1; 195 return chain; 196 } 197 198 static void tcf_chain_head_change(struct tcf_chain *chain, 199 struct tcf_proto *tp_head) 200 { 201 if (chain->chain_head_change) 202 chain->chain_head_change(tp_head, 203 chain->chain_head_change_priv); 204 } 205 206 static void tcf_chain_flush(struct tcf_chain *chain) 207 { 208 struct tcf_proto *tp; 209 210 tcf_chain_head_change(chain, NULL); 211 while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) { 212 RCU_INIT_POINTER(chain->filter_chain, tp->next); 213 tcf_chain_put(chain); 214 tcf_proto_destroy(tp); 215 } 216 } 217 218 static void tcf_chain_destroy(struct tcf_chain *chain) 219 { 220 list_del(&chain->list); 221 kfree(chain); 222 } 223 224 static void tcf_chain_hold(struct tcf_chain *chain) 225 { 226 ++chain->refcnt; 227 } 228 229 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 230 bool create) 231 { 232 struct tcf_chain *chain; 233 234 list_for_each_entry(chain, &block->chain_list, list) { 235 if (chain->index == chain_index) { 236 tcf_chain_hold(chain); 237 return chain; 238 } 239 } 240 241 return create ? tcf_chain_create(block, chain_index) : NULL; 242 } 243 EXPORT_SYMBOL(tcf_chain_get); 244 245 void tcf_chain_put(struct tcf_chain *chain) 246 { 247 if (--chain->refcnt == 0) 248 tcf_chain_destroy(chain); 249 } 250 EXPORT_SYMBOL(tcf_chain_put); 251 252 static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q, 253 struct tcf_block_ext_info *ei, 254 enum tc_block_command command) 255 { 256 struct net_device *dev = q->dev_queue->dev; 257 struct tc_block_offload bo = {}; 258 259 if (!dev->netdev_ops->ndo_setup_tc) 260 return; 261 bo.command = command; 262 bo.binder_type = ei->binder_type; 263 bo.block = block; 264 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 265 } 266 267 static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 268 struct tcf_block_ext_info *ei) 269 { 270 tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND); 271 } 272 273 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 274 struct tcf_block_ext_info *ei) 275 { 276 tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND); 277 } 278 279 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 280 struct tcf_block_ext_info *ei) 281 { 282 struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL); 283 struct tcf_chain *chain; 284 int err; 285 286 if (!block) 287 return -ENOMEM; 288 INIT_LIST_HEAD(&block->chain_list); 289 INIT_LIST_HEAD(&block->cb_list); 290 291 /* Create chain 0 by default, it has to be always present. */ 292 chain = tcf_chain_create(block, 0); 293 if (!chain) { 294 err = -ENOMEM; 295 goto err_chain_create; 296 } 297 WARN_ON(!ei->chain_head_change); 298 chain->chain_head_change = ei->chain_head_change; 299 chain->chain_head_change_priv = ei->chain_head_change_priv; 300 block->net = qdisc_net(q); 301 block->q = q; 302 tcf_block_offload_bind(block, q, ei); 303 *p_block = block; 304 return 0; 305 306 err_chain_create: 307 kfree(block); 308 return err; 309 } 310 EXPORT_SYMBOL(tcf_block_get_ext); 311 312 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 313 { 314 struct tcf_proto __rcu **p_filter_chain = priv; 315 316 rcu_assign_pointer(*p_filter_chain, tp_head); 317 } 318 319 int tcf_block_get(struct tcf_block **p_block, 320 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q) 321 { 322 struct tcf_block_ext_info ei = { 323 .chain_head_change = tcf_chain_head_change_dflt, 324 .chain_head_change_priv = p_filter_chain, 325 }; 326 327 WARN_ON(!p_filter_chain); 328 return tcf_block_get_ext(p_block, q, &ei); 329 } 330 EXPORT_SYMBOL(tcf_block_get); 331 332 static void tcf_block_put_final(struct work_struct *work) 333 { 334 struct tcf_block *block = container_of(work, struct tcf_block, work); 335 struct tcf_chain *chain, *tmp; 336 337 rtnl_lock(); 338 /* Only chain 0 should be still here. */ 339 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 340 tcf_chain_put(chain); 341 rtnl_unlock(); 342 kfree(block); 343 } 344 345 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 346 * actions should be all removed after flushing. However, filters are now 347 * destroyed in tc filter workqueue with RTNL lock, they can not race here. 348 */ 349 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 350 struct tcf_block_ext_info *ei) 351 { 352 struct tcf_chain *chain, *tmp; 353 354 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 355 tcf_chain_flush(chain); 356 357 tcf_block_offload_unbind(block, q, ei); 358 359 INIT_WORK(&block->work, tcf_block_put_final); 360 /* Wait for existing RCU callbacks to cool down, make sure their works 361 * have been queued before this. We can not flush pending works here 362 * because we are holding the RTNL lock. 363 */ 364 rcu_barrier(); 365 tcf_queue_work(&block->work); 366 } 367 EXPORT_SYMBOL(tcf_block_put_ext); 368 369 void tcf_block_put(struct tcf_block *block) 370 { 371 struct tcf_block_ext_info ei = {0, }; 372 373 if (!block) 374 return; 375 tcf_block_put_ext(block, block->q, &ei); 376 } 377 378 EXPORT_SYMBOL(tcf_block_put); 379 380 struct tcf_block_cb { 381 struct list_head list; 382 tc_setup_cb_t *cb; 383 void *cb_ident; 384 void *cb_priv; 385 unsigned int refcnt; 386 }; 387 388 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb) 389 { 390 return block_cb->cb_priv; 391 } 392 EXPORT_SYMBOL(tcf_block_cb_priv); 393 394 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, 395 tc_setup_cb_t *cb, void *cb_ident) 396 { struct tcf_block_cb *block_cb; 397 398 list_for_each_entry(block_cb, &block->cb_list, list) 399 if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) 400 return block_cb; 401 return NULL; 402 } 403 EXPORT_SYMBOL(tcf_block_cb_lookup); 404 405 void tcf_block_cb_incref(struct tcf_block_cb *block_cb) 406 { 407 block_cb->refcnt++; 408 } 409 EXPORT_SYMBOL(tcf_block_cb_incref); 410 411 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) 412 { 413 return --block_cb->refcnt; 414 } 415 EXPORT_SYMBOL(tcf_block_cb_decref); 416 417 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 418 tc_setup_cb_t *cb, void *cb_ident, 419 void *cb_priv) 420 { 421 struct tcf_block_cb *block_cb; 422 423 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); 424 if (!block_cb) 425 return NULL; 426 block_cb->cb = cb; 427 block_cb->cb_ident = cb_ident; 428 block_cb->cb_priv = cb_priv; 429 list_add(&block_cb->list, &block->cb_list); 430 return block_cb; 431 } 432 EXPORT_SYMBOL(__tcf_block_cb_register); 433 434 int tcf_block_cb_register(struct tcf_block *block, 435 tc_setup_cb_t *cb, void *cb_ident, 436 void *cb_priv) 437 { 438 struct tcf_block_cb *block_cb; 439 440 block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv); 441 return block_cb ? 0 : -ENOMEM; 442 } 443 EXPORT_SYMBOL(tcf_block_cb_register); 444 445 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) 446 { 447 list_del(&block_cb->list); 448 kfree(block_cb); 449 } 450 EXPORT_SYMBOL(__tcf_block_cb_unregister); 451 452 void tcf_block_cb_unregister(struct tcf_block *block, 453 tc_setup_cb_t *cb, void *cb_ident) 454 { 455 struct tcf_block_cb *block_cb; 456 457 block_cb = tcf_block_cb_lookup(block, cb, cb_ident); 458 if (!block_cb) 459 return; 460 __tcf_block_cb_unregister(block_cb); 461 } 462 EXPORT_SYMBOL(tcf_block_cb_unregister); 463 464 static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type, 465 void *type_data, bool err_stop) 466 { 467 struct tcf_block_cb *block_cb; 468 int ok_count = 0; 469 int err; 470 471 list_for_each_entry(block_cb, &block->cb_list, list) { 472 err = block_cb->cb(type, type_data, block_cb->cb_priv); 473 if (err) { 474 if (err_stop) 475 return err; 476 } else { 477 ok_count++; 478 } 479 } 480 return ok_count; 481 } 482 483 /* Main classifier routine: scans classifier chain attached 484 * to this qdisc, (optionally) tests for protocol and asks 485 * specific classifiers. 486 */ 487 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 488 struct tcf_result *res, bool compat_mode) 489 { 490 __be16 protocol = tc_skb_protocol(skb); 491 #ifdef CONFIG_NET_CLS_ACT 492 const int max_reclassify_loop = 4; 493 const struct tcf_proto *orig_tp = tp; 494 const struct tcf_proto *first_tp; 495 int limit = 0; 496 497 reclassify: 498 #endif 499 for (; tp; tp = rcu_dereference_bh(tp->next)) { 500 int err; 501 502 if (tp->protocol != protocol && 503 tp->protocol != htons(ETH_P_ALL)) 504 continue; 505 506 err = tp->classify(skb, tp, res); 507 #ifdef CONFIG_NET_CLS_ACT 508 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 509 first_tp = orig_tp; 510 goto reset; 511 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 512 first_tp = res->goto_tp; 513 goto reset; 514 } 515 #endif 516 if (err >= 0) 517 return err; 518 } 519 520 return TC_ACT_UNSPEC; /* signal: continue lookup */ 521 #ifdef CONFIG_NET_CLS_ACT 522 reset: 523 if (unlikely(limit++ >= max_reclassify_loop)) { 524 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n", 525 tp->q->ops->id, tp->prio & 0xffff, 526 ntohs(tp->protocol)); 527 return TC_ACT_SHOT; 528 } 529 530 tp = first_tp; 531 protocol = tc_skb_protocol(skb); 532 goto reclassify; 533 #endif 534 } 535 EXPORT_SYMBOL(tcf_classify); 536 537 struct tcf_chain_info { 538 struct tcf_proto __rcu **pprev; 539 struct tcf_proto __rcu *next; 540 }; 541 542 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info) 543 { 544 return rtnl_dereference(*chain_info->pprev); 545 } 546 547 static void tcf_chain_tp_insert(struct tcf_chain *chain, 548 struct tcf_chain_info *chain_info, 549 struct tcf_proto *tp) 550 { 551 if (*chain_info->pprev == chain->filter_chain) 552 tcf_chain_head_change(chain, tp); 553 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info)); 554 rcu_assign_pointer(*chain_info->pprev, tp); 555 tcf_chain_hold(chain); 556 } 557 558 static void tcf_chain_tp_remove(struct tcf_chain *chain, 559 struct tcf_chain_info *chain_info, 560 struct tcf_proto *tp) 561 { 562 struct tcf_proto *next = rtnl_dereference(chain_info->next); 563 564 if (tp == chain->filter_chain) 565 tcf_chain_head_change(chain, next); 566 RCU_INIT_POINTER(*chain_info->pprev, next); 567 tcf_chain_put(chain); 568 } 569 570 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 571 struct tcf_chain_info *chain_info, 572 u32 protocol, u32 prio, 573 bool prio_allocate) 574 { 575 struct tcf_proto **pprev; 576 struct tcf_proto *tp; 577 578 /* Check the chain for existence of proto-tcf with this priority */ 579 for (pprev = &chain->filter_chain; 580 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) { 581 if (tp->prio >= prio) { 582 if (tp->prio == prio) { 583 if (prio_allocate || 584 (tp->protocol != protocol && protocol)) 585 return ERR_PTR(-EINVAL); 586 } else { 587 tp = NULL; 588 } 589 break; 590 } 591 } 592 chain_info->pprev = pprev; 593 chain_info->next = tp ? tp->next : NULL; 594 return tp; 595 } 596 597 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 598 struct tcf_proto *tp, struct Qdisc *q, u32 parent, 599 void *fh, u32 portid, u32 seq, u16 flags, int event) 600 { 601 struct tcmsg *tcm; 602 struct nlmsghdr *nlh; 603 unsigned char *b = skb_tail_pointer(skb); 604 605 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 606 if (!nlh) 607 goto out_nlmsg_trim; 608 tcm = nlmsg_data(nlh); 609 tcm->tcm_family = AF_UNSPEC; 610 tcm->tcm__pad1 = 0; 611 tcm->tcm__pad2 = 0; 612 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 613 tcm->tcm_parent = parent; 614 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 615 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 616 goto nla_put_failure; 617 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 618 goto nla_put_failure; 619 if (!fh) { 620 tcm->tcm_handle = 0; 621 } else { 622 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0) 623 goto nla_put_failure; 624 } 625 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 626 return skb->len; 627 628 out_nlmsg_trim: 629 nla_put_failure: 630 nlmsg_trim(skb, b); 631 return -1; 632 } 633 634 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 635 struct nlmsghdr *n, struct tcf_proto *tp, 636 struct Qdisc *q, u32 parent, 637 void *fh, int event, bool unicast) 638 { 639 struct sk_buff *skb; 640 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 641 642 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 643 if (!skb) 644 return -ENOBUFS; 645 646 if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq, 647 n->nlmsg_flags, event) <= 0) { 648 kfree_skb(skb); 649 return -EINVAL; 650 } 651 652 if (unicast) 653 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 654 655 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 656 n->nlmsg_flags & NLM_F_ECHO); 657 } 658 659 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 660 struct nlmsghdr *n, struct tcf_proto *tp, 661 struct Qdisc *q, u32 parent, 662 void *fh, bool unicast, bool *last) 663 { 664 struct sk_buff *skb; 665 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 666 int err; 667 668 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 669 if (!skb) 670 return -ENOBUFS; 671 672 if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq, 673 n->nlmsg_flags, RTM_DELTFILTER) <= 0) { 674 kfree_skb(skb); 675 return -EINVAL; 676 } 677 678 err = tp->ops->delete(tp, fh, last); 679 if (err) { 680 kfree_skb(skb); 681 return err; 682 } 683 684 if (unicast) 685 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 686 687 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 688 n->nlmsg_flags & NLM_F_ECHO); 689 } 690 691 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 692 struct Qdisc *q, u32 parent, 693 struct nlmsghdr *n, 694 struct tcf_chain *chain, int event) 695 { 696 struct tcf_proto *tp; 697 698 for (tp = rtnl_dereference(chain->filter_chain); 699 tp; tp = rtnl_dereference(tp->next)) 700 tfilter_notify(net, oskb, n, tp, q, parent, 0, event, false); 701 } 702 703 /* Add/change/delete/get a filter node */ 704 705 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 706 struct netlink_ext_ack *extack) 707 { 708 struct net *net = sock_net(skb->sk); 709 struct nlattr *tca[TCA_MAX + 1]; 710 struct tcmsg *t; 711 u32 protocol; 712 u32 prio; 713 bool prio_allocate; 714 u32 parent; 715 u32 chain_index; 716 struct net_device *dev; 717 struct Qdisc *q; 718 struct tcf_chain_info chain_info; 719 struct tcf_chain *chain = NULL; 720 struct tcf_block *block; 721 struct tcf_proto *tp; 722 const struct Qdisc_class_ops *cops; 723 unsigned long cl; 724 void *fh; 725 int err; 726 int tp_created; 727 728 if ((n->nlmsg_type != RTM_GETTFILTER) && 729 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 730 return -EPERM; 731 732 replay: 733 tp_created = 0; 734 735 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); 736 if (err < 0) 737 return err; 738 739 t = nlmsg_data(n); 740 protocol = TC_H_MIN(t->tcm_info); 741 prio = TC_H_MAJ(t->tcm_info); 742 prio_allocate = false; 743 parent = t->tcm_parent; 744 cl = 0; 745 746 if (prio == 0) { 747 switch (n->nlmsg_type) { 748 case RTM_DELTFILTER: 749 if (protocol || t->tcm_handle || tca[TCA_KIND]) 750 return -ENOENT; 751 break; 752 case RTM_NEWTFILTER: 753 /* If no priority is provided by the user, 754 * we allocate one. 755 */ 756 if (n->nlmsg_flags & NLM_F_CREATE) { 757 prio = TC_H_MAKE(0x80000000U, 0U); 758 prio_allocate = true; 759 break; 760 } 761 /* fall-through */ 762 default: 763 return -ENOENT; 764 } 765 } 766 767 /* Find head of filter chain. */ 768 769 /* Find link */ 770 dev = __dev_get_by_index(net, t->tcm_ifindex); 771 if (dev == NULL) 772 return -ENODEV; 773 774 /* Find qdisc */ 775 if (!parent) { 776 q = dev->qdisc; 777 parent = q->handle; 778 } else { 779 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 780 if (q == NULL) 781 return -EINVAL; 782 } 783 784 /* Is it classful? */ 785 cops = q->ops->cl_ops; 786 if (!cops) 787 return -EINVAL; 788 789 if (!cops->tcf_block) 790 return -EOPNOTSUPP; 791 792 /* Do we search for filter, attached to class? */ 793 if (TC_H_MIN(parent)) { 794 cl = cops->find(q, parent); 795 if (cl == 0) 796 return -ENOENT; 797 } 798 799 /* And the last stroke */ 800 block = cops->tcf_block(q, cl); 801 if (!block) { 802 err = -EINVAL; 803 goto errout; 804 } 805 806 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 807 if (chain_index > TC_ACT_EXT_VAL_MASK) { 808 err = -EINVAL; 809 goto errout; 810 } 811 chain = tcf_chain_get(block, chain_index, 812 n->nlmsg_type == RTM_NEWTFILTER); 813 if (!chain) { 814 err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL; 815 goto errout; 816 } 817 818 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) { 819 tfilter_notify_chain(net, skb, q, parent, n, 820 chain, RTM_DELTFILTER); 821 tcf_chain_flush(chain); 822 err = 0; 823 goto errout; 824 } 825 826 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 827 prio, prio_allocate); 828 if (IS_ERR(tp)) { 829 err = PTR_ERR(tp); 830 goto errout; 831 } 832 833 if (tp == NULL) { 834 /* Proto-tcf does not exist, create new one */ 835 836 if (tca[TCA_KIND] == NULL || !protocol) { 837 err = -EINVAL; 838 goto errout; 839 } 840 841 if (n->nlmsg_type != RTM_NEWTFILTER || 842 !(n->nlmsg_flags & NLM_F_CREATE)) { 843 err = -ENOENT; 844 goto errout; 845 } 846 847 if (prio_allocate) 848 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info)); 849 850 tp = tcf_proto_create(nla_data(tca[TCA_KIND]), 851 protocol, prio, parent, q, chain); 852 if (IS_ERR(tp)) { 853 err = PTR_ERR(tp); 854 goto errout; 855 } 856 tp_created = 1; 857 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 858 err = -EINVAL; 859 goto errout; 860 } 861 862 fh = tp->ops->get(tp, t->tcm_handle); 863 864 if (!fh) { 865 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 866 tcf_chain_tp_remove(chain, &chain_info, tp); 867 tfilter_notify(net, skb, n, tp, q, parent, fh, 868 RTM_DELTFILTER, false); 869 tcf_proto_destroy(tp); 870 err = 0; 871 goto errout; 872 } 873 874 if (n->nlmsg_type != RTM_NEWTFILTER || 875 !(n->nlmsg_flags & NLM_F_CREATE)) { 876 err = -ENOENT; 877 goto errout; 878 } 879 } else { 880 bool last; 881 882 switch (n->nlmsg_type) { 883 case RTM_NEWTFILTER: 884 if (n->nlmsg_flags & NLM_F_EXCL) { 885 if (tp_created) 886 tcf_proto_destroy(tp); 887 err = -EEXIST; 888 goto errout; 889 } 890 break; 891 case RTM_DELTFILTER: 892 err = tfilter_del_notify(net, skb, n, tp, q, parent, 893 fh, false, &last); 894 if (err) 895 goto errout; 896 if (last) { 897 tcf_chain_tp_remove(chain, &chain_info, tp); 898 tcf_proto_destroy(tp); 899 } 900 goto errout; 901 case RTM_GETTFILTER: 902 err = tfilter_notify(net, skb, n, tp, q, parent, fh, 903 RTM_NEWTFILTER, true); 904 goto errout; 905 default: 906 err = -EINVAL; 907 goto errout; 908 } 909 } 910 911 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 912 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); 913 if (err == 0) { 914 if (tp_created) 915 tcf_chain_tp_insert(chain, &chain_info, tp); 916 tfilter_notify(net, skb, n, tp, q, parent, fh, 917 RTM_NEWTFILTER, false); 918 } else { 919 if (tp_created) 920 tcf_proto_destroy(tp); 921 } 922 923 errout: 924 if (chain) 925 tcf_chain_put(chain); 926 if (err == -EAGAIN) 927 /* Replay the request. */ 928 goto replay; 929 return err; 930 } 931 932 struct tcf_dump_args { 933 struct tcf_walker w; 934 struct sk_buff *skb; 935 struct netlink_callback *cb; 936 struct Qdisc *q; 937 u32 parent; 938 }; 939 940 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 941 { 942 struct tcf_dump_args *a = (void *)arg; 943 struct net *net = sock_net(a->skb->sk); 944 945 return tcf_fill_node(net, a->skb, tp, a->q, a->parent, 946 n, NETLINK_CB(a->cb->skb).portid, 947 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 948 RTM_NEWTFILTER); 949 } 950 951 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 952 struct sk_buff *skb, struct netlink_callback *cb, 953 long index_start, long *p_index) 954 { 955 struct net *net = sock_net(skb->sk); 956 struct tcmsg *tcm = nlmsg_data(cb->nlh); 957 struct tcf_dump_args arg; 958 struct tcf_proto *tp; 959 960 for (tp = rtnl_dereference(chain->filter_chain); 961 tp; tp = rtnl_dereference(tp->next), (*p_index)++) { 962 if (*p_index < index_start) 963 continue; 964 if (TC_H_MAJ(tcm->tcm_info) && 965 TC_H_MAJ(tcm->tcm_info) != tp->prio) 966 continue; 967 if (TC_H_MIN(tcm->tcm_info) && 968 TC_H_MIN(tcm->tcm_info) != tp->protocol) 969 continue; 970 if (*p_index > index_start) 971 memset(&cb->args[1], 0, 972 sizeof(cb->args) - sizeof(cb->args[0])); 973 if (cb->args[1] == 0) { 974 if (tcf_fill_node(net, skb, tp, q, parent, 0, 975 NETLINK_CB(cb->skb).portid, 976 cb->nlh->nlmsg_seq, NLM_F_MULTI, 977 RTM_NEWTFILTER) <= 0) 978 return false; 979 980 cb->args[1] = 1; 981 } 982 if (!tp->ops->walk) 983 continue; 984 arg.w.fn = tcf_node_dump; 985 arg.skb = skb; 986 arg.cb = cb; 987 arg.q = q; 988 arg.parent = parent; 989 arg.w.stop = 0; 990 arg.w.skip = cb->args[1] - 1; 991 arg.w.count = 0; 992 tp->ops->walk(tp, &arg.w); 993 cb->args[1] = arg.w.count + 1; 994 if (arg.w.stop) 995 return false; 996 } 997 return true; 998 } 999 1000 /* called with RTNL */ 1001 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 1002 { 1003 struct net *net = sock_net(skb->sk); 1004 struct nlattr *tca[TCA_MAX + 1]; 1005 struct net_device *dev; 1006 struct Qdisc *q; 1007 struct tcf_block *block; 1008 struct tcf_chain *chain; 1009 struct tcmsg *tcm = nlmsg_data(cb->nlh); 1010 unsigned long cl = 0; 1011 const struct Qdisc_class_ops *cops; 1012 long index_start; 1013 long index; 1014 u32 parent; 1015 int err; 1016 1017 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 1018 return skb->len; 1019 1020 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); 1021 if (err) 1022 return err; 1023 1024 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1025 if (!dev) 1026 return skb->len; 1027 1028 parent = tcm->tcm_parent; 1029 if (!parent) { 1030 q = dev->qdisc; 1031 parent = q->handle; 1032 } else { 1033 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 1034 } 1035 if (!q) 1036 goto out; 1037 cops = q->ops->cl_ops; 1038 if (!cops) 1039 goto out; 1040 if (!cops->tcf_block) 1041 goto out; 1042 if (TC_H_MIN(tcm->tcm_parent)) { 1043 cl = cops->find(q, tcm->tcm_parent); 1044 if (cl == 0) 1045 goto out; 1046 } 1047 block = cops->tcf_block(q, cl); 1048 if (!block) 1049 goto out; 1050 1051 index_start = cb->args[0]; 1052 index = 0; 1053 1054 list_for_each_entry(chain, &block->chain_list, list) { 1055 if (tca[TCA_CHAIN] && 1056 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 1057 continue; 1058 if (!tcf_chain_dump(chain, q, parent, skb, cb, 1059 index_start, &index)) 1060 break; 1061 } 1062 1063 cb->args[0] = index; 1064 1065 out: 1066 return skb->len; 1067 } 1068 1069 void tcf_exts_destroy(struct tcf_exts *exts) 1070 { 1071 #ifdef CONFIG_NET_CLS_ACT 1072 LIST_HEAD(actions); 1073 1074 ASSERT_RTNL(); 1075 tcf_exts_to_list(exts, &actions); 1076 tcf_action_destroy(&actions, TCA_ACT_UNBIND); 1077 kfree(exts->actions); 1078 exts->nr_actions = 0; 1079 #endif 1080 } 1081 EXPORT_SYMBOL(tcf_exts_destroy); 1082 1083 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 1084 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr) 1085 { 1086 #ifdef CONFIG_NET_CLS_ACT 1087 { 1088 struct tc_action *act; 1089 1090 if (exts->police && tb[exts->police]) { 1091 act = tcf_action_init_1(net, tp, tb[exts->police], 1092 rate_tlv, "police", ovr, 1093 TCA_ACT_BIND); 1094 if (IS_ERR(act)) 1095 return PTR_ERR(act); 1096 1097 act->type = exts->type = TCA_OLD_COMPAT; 1098 exts->actions[0] = act; 1099 exts->nr_actions = 1; 1100 } else if (exts->action && tb[exts->action]) { 1101 LIST_HEAD(actions); 1102 int err, i = 0; 1103 1104 err = tcf_action_init(net, tp, tb[exts->action], 1105 rate_tlv, NULL, ovr, TCA_ACT_BIND, 1106 &actions); 1107 if (err) 1108 return err; 1109 list_for_each_entry(act, &actions, list) 1110 exts->actions[i++] = act; 1111 exts->nr_actions = i; 1112 } 1113 } 1114 #else 1115 if ((exts->action && tb[exts->action]) || 1116 (exts->police && tb[exts->police])) 1117 return -EOPNOTSUPP; 1118 #endif 1119 1120 return 0; 1121 } 1122 EXPORT_SYMBOL(tcf_exts_validate); 1123 1124 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 1125 { 1126 #ifdef CONFIG_NET_CLS_ACT 1127 struct tcf_exts old = *dst; 1128 1129 *dst = *src; 1130 tcf_exts_destroy(&old); 1131 #endif 1132 } 1133 EXPORT_SYMBOL(tcf_exts_change); 1134 1135 #ifdef CONFIG_NET_CLS_ACT 1136 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 1137 { 1138 if (exts->nr_actions == 0) 1139 return NULL; 1140 else 1141 return exts->actions[0]; 1142 } 1143 #endif 1144 1145 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 1146 { 1147 #ifdef CONFIG_NET_CLS_ACT 1148 struct nlattr *nest; 1149 1150 if (exts->action && tcf_exts_has_actions(exts)) { 1151 /* 1152 * again for backward compatible mode - we want 1153 * to work with both old and new modes of entering 1154 * tc data even if iproute2 was newer - jhs 1155 */ 1156 if (exts->type != TCA_OLD_COMPAT) { 1157 LIST_HEAD(actions); 1158 1159 nest = nla_nest_start(skb, exts->action); 1160 if (nest == NULL) 1161 goto nla_put_failure; 1162 1163 tcf_exts_to_list(exts, &actions); 1164 if (tcf_action_dump(skb, &actions, 0, 0) < 0) 1165 goto nla_put_failure; 1166 nla_nest_end(skb, nest); 1167 } else if (exts->police) { 1168 struct tc_action *act = tcf_exts_first_act(exts); 1169 nest = nla_nest_start(skb, exts->police); 1170 if (nest == NULL || !act) 1171 goto nla_put_failure; 1172 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 1173 goto nla_put_failure; 1174 nla_nest_end(skb, nest); 1175 } 1176 } 1177 return 0; 1178 1179 nla_put_failure: 1180 nla_nest_cancel(skb, nest); 1181 return -1; 1182 #else 1183 return 0; 1184 #endif 1185 } 1186 EXPORT_SYMBOL(tcf_exts_dump); 1187 1188 1189 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 1190 { 1191 #ifdef CONFIG_NET_CLS_ACT 1192 struct tc_action *a = tcf_exts_first_act(exts); 1193 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 1194 return -1; 1195 #endif 1196 return 0; 1197 } 1198 EXPORT_SYMBOL(tcf_exts_dump_stats); 1199 1200 static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts, 1201 enum tc_setup_type type, 1202 void *type_data, bool err_stop) 1203 { 1204 int ok_count = 0; 1205 #ifdef CONFIG_NET_CLS_ACT 1206 const struct tc_action *a; 1207 struct net_device *dev; 1208 int i, ret; 1209 1210 if (!tcf_exts_has_actions(exts)) 1211 return 0; 1212 1213 for (i = 0; i < exts->nr_actions; i++) { 1214 a = exts->actions[i]; 1215 if (!a->ops->get_dev) 1216 continue; 1217 dev = a->ops->get_dev(a); 1218 if (!dev) 1219 continue; 1220 ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop); 1221 if (ret < 0) 1222 return ret; 1223 ok_count += ret; 1224 } 1225 #endif 1226 return ok_count; 1227 } 1228 1229 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts, 1230 enum tc_setup_type type, void *type_data, bool err_stop) 1231 { 1232 int ok_count; 1233 int ret; 1234 1235 ret = tcf_block_cb_call(block, type, type_data, err_stop); 1236 if (ret < 0) 1237 return ret; 1238 ok_count = ret; 1239 1240 if (!exts) 1241 return ok_count; 1242 ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop); 1243 if (ret < 0) 1244 return ret; 1245 ok_count += ret; 1246 1247 return ok_count; 1248 } 1249 EXPORT_SYMBOL(tc_setup_cb_call); 1250 1251 static int __init tc_filter_init(void) 1252 { 1253 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 1254 if (!tc_filter_wq) 1255 return -ENOMEM; 1256 1257 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); 1258 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); 1259 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 1260 tc_dump_tfilter, 0); 1261 1262 return 0; 1263 } 1264 1265 subsys_initcall(tc_filter_init); 1266