1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/sch_api.c Packet scheduler API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Fixes: 8 * 9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired. 10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support 12 */ 13 14 #include <linux/module.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/skbuff.h> 20 #include <linux/init.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/kmod.h> 24 #include <linux/list.h> 25 #include <linux/hrtimer.h> 26 #include <linux/slab.h> 27 #include <linux/hashtable.h> 28 29 #include <net/net_namespace.h> 30 #include <net/sock.h> 31 #include <net/netlink.h> 32 #include <net/pkt_sched.h> 33 #include <net/pkt_cls.h> 34 35 #include <trace/events/qdisc.h> 36 37 /* 38 39 Short review. 40 ------------- 41 42 This file consists of two interrelated parts: 43 44 1. queueing disciplines manager frontend. 45 2. traffic classes manager frontend. 46 47 Generally, queueing discipline ("qdisc") is a black box, 48 which is able to enqueue packets and to dequeue them (when 49 device is ready to send something) in order and at times 50 determined by algorithm hidden in it. 51 52 qdisc's are divided to two categories: 53 - "queues", which have no internal structure visible from outside. 54 - "schedulers", which split all the packets to "traffic classes", 55 using "packet classifiers" (look at cls_api.c) 56 57 In turn, classes may have child qdiscs (as rule, queues) 58 attached to them etc. etc. etc. 59 60 The goal of the routines in this file is to translate 61 information supplied by user in the form of handles 62 to more intelligible for kernel form, to make some sanity 63 checks and part of work, which is common to all qdiscs 64 and to provide rtnetlink notifications. 65 66 All real intelligent work is done inside qdisc modules. 67 68 69 70 Every discipline has two major routines: enqueue and dequeue. 71 72 ---dequeue 73 74 dequeue usually returns a skb to send. It is allowed to return NULL, 75 but it does not mean that queue is empty, it just means that 76 discipline does not want to send anything this time. 77 Queue is really empty if q->q.qlen == 0. 78 For complicated disciplines with multiple queues q->q is not 79 real packet queue, but however q->q.qlen must be valid. 80 81 ---enqueue 82 83 enqueue returns 0, if packet was enqueued successfully. 84 If packet (this one or another one) was dropped, it returns 85 not zero error code. 86 NET_XMIT_DROP - this packet dropped 87 Expected action: do not backoff, but wait until queue will clear. 88 NET_XMIT_CN - probably this packet enqueued, but another one dropped. 89 Expected action: backoff or ignore 90 91 Auxiliary routines: 92 93 ---peek 94 95 like dequeue but without removing a packet from the queue 96 97 ---reset 98 99 returns qdisc to initial state: purge all buffers, clear all 100 timers, counters (except for statistics) etc. 101 102 ---init 103 104 initializes newly created qdisc. 105 106 ---destroy 107 108 destroys resources allocated by init and during lifetime of qdisc. 109 110 ---change 111 112 changes qdisc parameters. 113 */ 114 115 /* Protects list of registered TC modules. It is pure SMP lock. */ 116 static DEFINE_RWLOCK(qdisc_mod_lock); 117 118 119 /************************************************ 120 * Queueing disciplines manipulation. * 121 ************************************************/ 122 123 124 /* The list of all installed queueing disciplines. */ 125 126 static struct Qdisc_ops *qdisc_base; 127 128 /* Register/unregister queueing discipline */ 129 130 int register_qdisc(struct Qdisc_ops *qops) 131 { 132 struct Qdisc_ops *q, **qp; 133 int rc = -EEXIST; 134 135 write_lock(&qdisc_mod_lock); 136 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) 137 if (!strcmp(qops->id, q->id)) 138 goto out; 139 140 if (qops->enqueue == NULL) 141 qops->enqueue = noop_qdisc_ops.enqueue; 142 if (qops->peek == NULL) { 143 if (qops->dequeue == NULL) 144 qops->peek = noop_qdisc_ops.peek; 145 else 146 goto out_einval; 147 } 148 if (qops->dequeue == NULL) 149 qops->dequeue = noop_qdisc_ops.dequeue; 150 151 if (qops->cl_ops) { 152 const struct Qdisc_class_ops *cops = qops->cl_ops; 153 154 if (!(cops->find && cops->walk && cops->leaf)) 155 goto out_einval; 156 157 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf)) 158 goto out_einval; 159 } 160 161 qops->next = NULL; 162 *qp = qops; 163 rc = 0; 164 out: 165 write_unlock(&qdisc_mod_lock); 166 return rc; 167 168 out_einval: 169 rc = -EINVAL; 170 goto out; 171 } 172 EXPORT_SYMBOL(register_qdisc); 173 174 int unregister_qdisc(struct Qdisc_ops *qops) 175 { 176 struct Qdisc_ops *q, **qp; 177 int err = -ENOENT; 178 179 write_lock(&qdisc_mod_lock); 180 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) 181 if (q == qops) 182 break; 183 if (q) { 184 *qp = q->next; 185 q->next = NULL; 186 err = 0; 187 } 188 write_unlock(&qdisc_mod_lock); 189 return err; 190 } 191 EXPORT_SYMBOL(unregister_qdisc); 192 193 /* Get default qdisc if not otherwise specified */ 194 void qdisc_get_default(char *name, size_t len) 195 { 196 read_lock(&qdisc_mod_lock); 197 strlcpy(name, default_qdisc_ops->id, len); 198 read_unlock(&qdisc_mod_lock); 199 } 200 201 static struct Qdisc_ops *qdisc_lookup_default(const char *name) 202 { 203 struct Qdisc_ops *q = NULL; 204 205 for (q = qdisc_base; q; q = q->next) { 206 if (!strcmp(name, q->id)) { 207 if (!try_module_get(q->owner)) 208 q = NULL; 209 break; 210 } 211 } 212 213 return q; 214 } 215 216 /* Set new default qdisc to use */ 217 int qdisc_set_default(const char *name) 218 { 219 const struct Qdisc_ops *ops; 220 221 if (!capable(CAP_NET_ADMIN)) 222 return -EPERM; 223 224 write_lock(&qdisc_mod_lock); 225 ops = qdisc_lookup_default(name); 226 if (!ops) { 227 /* Not found, drop lock and try to load module */ 228 write_unlock(&qdisc_mod_lock); 229 request_module("sch_%s", name); 230 write_lock(&qdisc_mod_lock); 231 232 ops = qdisc_lookup_default(name); 233 } 234 235 if (ops) { 236 /* Set new default */ 237 module_put(default_qdisc_ops->owner); 238 default_qdisc_ops = ops; 239 } 240 write_unlock(&qdisc_mod_lock); 241 242 return ops ? 0 : -ENOENT; 243 } 244 245 #ifdef CONFIG_NET_SCH_DEFAULT 246 /* Set default value from kernel config */ 247 static int __init sch_default_qdisc(void) 248 { 249 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH); 250 } 251 late_initcall(sch_default_qdisc); 252 #endif 253 254 /* We know handle. Find qdisc among all qdisc's attached to device 255 * (root qdisc, all its children, children of children etc.) 256 * Note: caller either uses rtnl or rcu_read_lock() 257 */ 258 259 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) 260 { 261 struct Qdisc *q; 262 263 if (!qdisc_dev(root)) 264 return (root->handle == handle ? root : NULL); 265 266 if (!(root->flags & TCQ_F_BUILTIN) && 267 root->handle == handle) 268 return root; 269 270 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle, 271 lockdep_rtnl_is_held()) { 272 if (q->handle == handle) 273 return q; 274 } 275 return NULL; 276 } 277 278 void qdisc_hash_add(struct Qdisc *q, bool invisible) 279 { 280 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { 281 ASSERT_RTNL(); 282 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); 283 if (invisible) 284 q->flags |= TCQ_F_INVISIBLE; 285 } 286 } 287 EXPORT_SYMBOL(qdisc_hash_add); 288 289 void qdisc_hash_del(struct Qdisc *q) 290 { 291 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { 292 ASSERT_RTNL(); 293 hash_del_rcu(&q->hash); 294 } 295 } 296 EXPORT_SYMBOL(qdisc_hash_del); 297 298 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 299 { 300 struct Qdisc *q; 301 302 if (!handle) 303 return NULL; 304 q = qdisc_match_from_root(dev->qdisc, handle); 305 if (q) 306 goto out; 307 308 if (dev_ingress_queue(dev)) 309 q = qdisc_match_from_root( 310 dev_ingress_queue(dev)->qdisc_sleeping, 311 handle); 312 out: 313 return q; 314 } 315 316 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) 317 { 318 struct netdev_queue *nq; 319 struct Qdisc *q; 320 321 if (!handle) 322 return NULL; 323 q = qdisc_match_from_root(dev->qdisc, handle); 324 if (q) 325 goto out; 326 327 nq = dev_ingress_queue_rcu(dev); 328 if (nq) 329 q = qdisc_match_from_root(nq->qdisc_sleeping, handle); 330 out: 331 return q; 332 } 333 334 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 335 { 336 unsigned long cl; 337 const struct Qdisc_class_ops *cops = p->ops->cl_ops; 338 339 if (cops == NULL) 340 return NULL; 341 cl = cops->find(p, classid); 342 343 if (cl == 0) 344 return NULL; 345 return cops->leaf(p, cl); 346 } 347 348 /* Find queueing discipline by name */ 349 350 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) 351 { 352 struct Qdisc_ops *q = NULL; 353 354 if (kind) { 355 read_lock(&qdisc_mod_lock); 356 for (q = qdisc_base; q; q = q->next) { 357 if (nla_strcmp(kind, q->id) == 0) { 358 if (!try_module_get(q->owner)) 359 q = NULL; 360 break; 361 } 362 } 363 read_unlock(&qdisc_mod_lock); 364 } 365 return q; 366 } 367 368 /* The linklayer setting were not transferred from iproute2, in older 369 * versions, and the rate tables lookup systems have been dropped in 370 * the kernel. To keep backward compatible with older iproute2 tc 371 * utils, we detect the linklayer setting by detecting if the rate 372 * table were modified. 373 * 374 * For linklayer ATM table entries, the rate table will be aligned to 375 * 48 bytes, thus some table entries will contain the same value. The 376 * mpu (min packet unit) is also encoded into the old rate table, thus 377 * starting from the mpu, we find low and high table entries for 378 * mapping this cell. If these entries contain the same value, when 379 * the rate tables have been modified for linklayer ATM. 380 * 381 * This is done by rounding mpu to the nearest 48 bytes cell/entry, 382 * and then roundup to the next cell, calc the table entry one below, 383 * and compare. 384 */ 385 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) 386 { 387 int low = roundup(r->mpu, 48); 388 int high = roundup(low+1, 48); 389 int cell_low = low >> r->cell_log; 390 int cell_high = (high >> r->cell_log) - 1; 391 392 /* rtab is too inaccurate at rates > 100Mbit/s */ 393 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { 394 pr_debug("TC linklayer: Giving up ATM detection\n"); 395 return TC_LINKLAYER_ETHERNET; 396 } 397 398 if ((cell_high > cell_low) && (cell_high < 256) 399 && (rtab[cell_low] == rtab[cell_high])) { 400 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", 401 cell_low, cell_high, rtab[cell_high]); 402 return TC_LINKLAYER_ATM; 403 } 404 return TC_LINKLAYER_ETHERNET; 405 } 406 407 static struct qdisc_rate_table *qdisc_rtab_list; 408 409 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 410 struct nlattr *tab, 411 struct netlink_ext_ack *extack) 412 { 413 struct qdisc_rate_table *rtab; 414 415 if (tab == NULL || r->rate == 0 || 416 r->cell_log == 0 || r->cell_log >= 32 || 417 nla_len(tab) != TC_RTAB_SIZE) { 418 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching"); 419 return NULL; 420 } 421 422 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) { 423 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) && 424 !memcmp(&rtab->data, nla_data(tab), 1024)) { 425 rtab->refcnt++; 426 return rtab; 427 } 428 } 429 430 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); 431 if (rtab) { 432 rtab->rate = *r; 433 rtab->refcnt = 1; 434 memcpy(rtab->data, nla_data(tab), 1024); 435 if (r->linklayer == TC_LINKLAYER_UNAWARE) 436 r->linklayer = __detect_linklayer(r, rtab->data); 437 rtab->next = qdisc_rtab_list; 438 qdisc_rtab_list = rtab; 439 } else { 440 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table"); 441 } 442 return rtab; 443 } 444 EXPORT_SYMBOL(qdisc_get_rtab); 445 446 void qdisc_put_rtab(struct qdisc_rate_table *tab) 447 { 448 struct qdisc_rate_table *rtab, **rtabp; 449 450 if (!tab || --tab->refcnt) 451 return; 452 453 for (rtabp = &qdisc_rtab_list; 454 (rtab = *rtabp) != NULL; 455 rtabp = &rtab->next) { 456 if (rtab == tab) { 457 *rtabp = rtab->next; 458 kfree(rtab); 459 return; 460 } 461 } 462 } 463 EXPORT_SYMBOL(qdisc_put_rtab); 464 465 static LIST_HEAD(qdisc_stab_list); 466 467 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = { 468 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) }, 469 [TCA_STAB_DATA] = { .type = NLA_BINARY }, 470 }; 471 472 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt, 473 struct netlink_ext_ack *extack) 474 { 475 struct nlattr *tb[TCA_STAB_MAX + 1]; 476 struct qdisc_size_table *stab; 477 struct tc_sizespec *s; 478 unsigned int tsize = 0; 479 u16 *tab = NULL; 480 int err; 481 482 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy, 483 extack); 484 if (err < 0) 485 return ERR_PTR(err); 486 if (!tb[TCA_STAB_BASE]) { 487 NL_SET_ERR_MSG(extack, "Size table base attribute is missing"); 488 return ERR_PTR(-EINVAL); 489 } 490 491 s = nla_data(tb[TCA_STAB_BASE]); 492 493 if (s->tsize > 0) { 494 if (!tb[TCA_STAB_DATA]) { 495 NL_SET_ERR_MSG(extack, "Size table data attribute is missing"); 496 return ERR_PTR(-EINVAL); 497 } 498 tab = nla_data(tb[TCA_STAB_DATA]); 499 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16); 500 } 501 502 if (tsize != s->tsize || (!tab && tsize > 0)) { 503 NL_SET_ERR_MSG(extack, "Invalid size of size table"); 504 return ERR_PTR(-EINVAL); 505 } 506 507 list_for_each_entry(stab, &qdisc_stab_list, list) { 508 if (memcmp(&stab->szopts, s, sizeof(*s))) 509 continue; 510 if (tsize > 0 && 511 memcmp(stab->data, tab, flex_array_size(stab, data, tsize))) 512 continue; 513 stab->refcnt++; 514 return stab; 515 } 516 517 if (s->size_log > STAB_SIZE_LOG_MAX || 518 s->cell_log > STAB_SIZE_LOG_MAX) { 519 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table"); 520 return ERR_PTR(-EINVAL); 521 } 522 523 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL); 524 if (!stab) 525 return ERR_PTR(-ENOMEM); 526 527 stab->refcnt = 1; 528 stab->szopts = *s; 529 if (tsize > 0) 530 memcpy(stab->data, tab, flex_array_size(stab, data, tsize)); 531 532 list_add_tail(&stab->list, &qdisc_stab_list); 533 534 return stab; 535 } 536 537 void qdisc_put_stab(struct qdisc_size_table *tab) 538 { 539 if (!tab) 540 return; 541 542 if (--tab->refcnt == 0) { 543 list_del(&tab->list); 544 kfree_rcu(tab, rcu); 545 } 546 } 547 EXPORT_SYMBOL(qdisc_put_stab); 548 549 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) 550 { 551 struct nlattr *nest; 552 553 nest = nla_nest_start_noflag(skb, TCA_STAB); 554 if (nest == NULL) 555 goto nla_put_failure; 556 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts)) 557 goto nla_put_failure; 558 nla_nest_end(skb, nest); 559 560 return skb->len; 561 562 nla_put_failure: 563 return -1; 564 } 565 566 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 567 const struct qdisc_size_table *stab) 568 { 569 int pkt_len, slot; 570 571 pkt_len = skb->len + stab->szopts.overhead; 572 if (unlikely(!stab->szopts.tsize)) 573 goto out; 574 575 slot = pkt_len + stab->szopts.cell_align; 576 if (unlikely(slot < 0)) 577 slot = 0; 578 579 slot >>= stab->szopts.cell_log; 580 if (likely(slot < stab->szopts.tsize)) 581 pkt_len = stab->data[slot]; 582 else 583 pkt_len = stab->data[stab->szopts.tsize - 1] * 584 (slot / stab->szopts.tsize) + 585 stab->data[slot % stab->szopts.tsize]; 586 587 pkt_len <<= stab->szopts.size_log; 588 out: 589 if (unlikely(pkt_len < 1)) 590 pkt_len = 1; 591 qdisc_skb_cb(skb)->pkt_len = pkt_len; 592 } 593 EXPORT_SYMBOL(__qdisc_calculate_pkt_len); 594 595 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) 596 { 597 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { 598 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", 599 txt, qdisc->ops->id, qdisc->handle >> 16); 600 qdisc->flags |= TCQ_F_WARN_NONWC; 601 } 602 } 603 EXPORT_SYMBOL(qdisc_warn_nonwc); 604 605 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) 606 { 607 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 608 timer); 609 610 rcu_read_lock(); 611 __netif_schedule(qdisc_root(wd->qdisc)); 612 rcu_read_unlock(); 613 614 return HRTIMER_NORESTART; 615 } 616 617 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, 618 clockid_t clockid) 619 { 620 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED); 621 wd->timer.function = qdisc_watchdog; 622 wd->qdisc = qdisc; 623 } 624 EXPORT_SYMBOL(qdisc_watchdog_init_clockid); 625 626 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) 627 { 628 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC); 629 } 630 EXPORT_SYMBOL(qdisc_watchdog_init); 631 632 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, 633 u64 delta_ns) 634 { 635 if (test_bit(__QDISC_STATE_DEACTIVATED, 636 &qdisc_root_sleeping(wd->qdisc)->state)) 637 return; 638 639 if (hrtimer_is_queued(&wd->timer)) { 640 /* If timer is already set in [expires, expires + delta_ns], 641 * do not reprogram it. 642 */ 643 if (wd->last_expires - expires <= delta_ns) 644 return; 645 } 646 647 wd->last_expires = expires; 648 hrtimer_start_range_ns(&wd->timer, 649 ns_to_ktime(expires), 650 delta_ns, 651 HRTIMER_MODE_ABS_PINNED); 652 } 653 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns); 654 655 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) 656 { 657 hrtimer_cancel(&wd->timer); 658 } 659 EXPORT_SYMBOL(qdisc_watchdog_cancel); 660 661 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) 662 { 663 struct hlist_head *h; 664 unsigned int i; 665 666 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL); 667 668 if (h != NULL) { 669 for (i = 0; i < n; i++) 670 INIT_HLIST_HEAD(&h[i]); 671 } 672 return h; 673 } 674 675 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) 676 { 677 struct Qdisc_class_common *cl; 678 struct hlist_node *next; 679 struct hlist_head *nhash, *ohash; 680 unsigned int nsize, nmask, osize; 681 unsigned int i, h; 682 683 /* Rehash when load factor exceeds 0.75 */ 684 if (clhash->hashelems * 4 <= clhash->hashsize * 3) 685 return; 686 nsize = clhash->hashsize * 2; 687 nmask = nsize - 1; 688 nhash = qdisc_class_hash_alloc(nsize); 689 if (nhash == NULL) 690 return; 691 692 ohash = clhash->hash; 693 osize = clhash->hashsize; 694 695 sch_tree_lock(sch); 696 for (i = 0; i < osize; i++) { 697 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) { 698 h = qdisc_class_hash(cl->classid, nmask); 699 hlist_add_head(&cl->hnode, &nhash[h]); 700 } 701 } 702 clhash->hash = nhash; 703 clhash->hashsize = nsize; 704 clhash->hashmask = nmask; 705 sch_tree_unlock(sch); 706 707 kvfree(ohash); 708 } 709 EXPORT_SYMBOL(qdisc_class_hash_grow); 710 711 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash) 712 { 713 unsigned int size = 4; 714 715 clhash->hash = qdisc_class_hash_alloc(size); 716 if (!clhash->hash) 717 return -ENOMEM; 718 clhash->hashsize = size; 719 clhash->hashmask = size - 1; 720 clhash->hashelems = 0; 721 return 0; 722 } 723 EXPORT_SYMBOL(qdisc_class_hash_init); 724 725 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash) 726 { 727 kvfree(clhash->hash); 728 } 729 EXPORT_SYMBOL(qdisc_class_hash_destroy); 730 731 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash, 732 struct Qdisc_class_common *cl) 733 { 734 unsigned int h; 735 736 INIT_HLIST_NODE(&cl->hnode); 737 h = qdisc_class_hash(cl->classid, clhash->hashmask); 738 hlist_add_head(&cl->hnode, &clhash->hash[h]); 739 clhash->hashelems++; 740 } 741 EXPORT_SYMBOL(qdisc_class_hash_insert); 742 743 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash, 744 struct Qdisc_class_common *cl) 745 { 746 hlist_del(&cl->hnode); 747 clhash->hashelems--; 748 } 749 EXPORT_SYMBOL(qdisc_class_hash_remove); 750 751 /* Allocate an unique handle from space managed by kernel 752 * Possible range is [8000-FFFF]:0000 (0x8000 values) 753 */ 754 static u32 qdisc_alloc_handle(struct net_device *dev) 755 { 756 int i = 0x8000; 757 static u32 autohandle = TC_H_MAKE(0x80000000U, 0); 758 759 do { 760 autohandle += TC_H_MAKE(0x10000U, 0); 761 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0)) 762 autohandle = TC_H_MAKE(0x80000000U, 0); 763 if (!qdisc_lookup(dev, autohandle)) 764 return autohandle; 765 cond_resched(); 766 } while (--i > 0); 767 768 return 0; 769 } 770 771 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) 772 { 773 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED; 774 const struct Qdisc_class_ops *cops; 775 unsigned long cl; 776 u32 parentid; 777 bool notify; 778 int drops; 779 780 if (n == 0 && len == 0) 781 return; 782 drops = max_t(int, n, 0); 783 rcu_read_lock(); 784 while ((parentid = sch->parent)) { 785 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) 786 break; 787 788 if (sch->flags & TCQ_F_NOPARENT) 789 break; 790 /* Notify parent qdisc only if child qdisc becomes empty. 791 * 792 * If child was empty even before update then backlog 793 * counter is screwed and we skip notification because 794 * parent class is already passive. 795 * 796 * If the original child was offloaded then it is allowed 797 * to be seem as empty, so the parent is notified anyway. 798 */ 799 notify = !sch->q.qlen && !WARN_ON_ONCE(!n && 800 !qdisc_is_offloaded); 801 /* TODO: perform the search on a per txq basis */ 802 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); 803 if (sch == NULL) { 804 WARN_ON_ONCE(parentid != TC_H_ROOT); 805 break; 806 } 807 cops = sch->ops->cl_ops; 808 if (notify && cops->qlen_notify) { 809 cl = cops->find(sch, parentid); 810 cops->qlen_notify(sch, cl); 811 } 812 sch->q.qlen -= n; 813 sch->qstats.backlog -= len; 814 __qdisc_qstats_drop(sch, drops); 815 } 816 rcu_read_unlock(); 817 } 818 EXPORT_SYMBOL(qdisc_tree_reduce_backlog); 819 820 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type, 821 void *type_data) 822 { 823 struct net_device *dev = qdisc_dev(sch); 824 int err; 825 826 sch->flags &= ~TCQ_F_OFFLOADED; 827 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 828 return 0; 829 830 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data); 831 if (err == -EOPNOTSUPP) 832 return 0; 833 834 if (!err) 835 sch->flags |= TCQ_F_OFFLOADED; 836 837 return err; 838 } 839 EXPORT_SYMBOL(qdisc_offload_dump_helper); 840 841 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 842 struct Qdisc *new, struct Qdisc *old, 843 enum tc_setup_type type, void *type_data, 844 struct netlink_ext_ack *extack) 845 { 846 bool any_qdisc_is_offloaded; 847 int err; 848 849 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 850 return; 851 852 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data); 853 854 /* Don't report error if the graft is part of destroy operation. */ 855 if (!err || !new || new == &noop_qdisc) 856 return; 857 858 /* Don't report error if the parent, the old child and the new 859 * one are not offloaded. 860 */ 861 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED; 862 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED; 863 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED; 864 865 if (any_qdisc_is_offloaded) 866 NL_SET_ERR_MSG(extack, "Offloading graft operation failed."); 867 } 868 EXPORT_SYMBOL(qdisc_offload_graft_helper); 869 870 static void qdisc_offload_graft_root(struct net_device *dev, 871 struct Qdisc *new, struct Qdisc *old, 872 struct netlink_ext_ack *extack) 873 { 874 struct tc_root_qopt_offload graft_offload = { 875 .command = TC_ROOT_GRAFT, 876 .handle = new ? new->handle : 0, 877 .ingress = (new && new->flags & TCQ_F_INGRESS) || 878 (old && old->flags & TCQ_F_INGRESS), 879 }; 880 881 qdisc_offload_graft_helper(dev, NULL, new, old, 882 TC_SETUP_ROOT_QDISC, &graft_offload, extack); 883 } 884 885 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, 886 u32 portid, u32 seq, u16 flags, int event) 887 { 888 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL; 889 struct gnet_stats_queue __percpu *cpu_qstats = NULL; 890 struct tcmsg *tcm; 891 struct nlmsghdr *nlh; 892 unsigned char *b = skb_tail_pointer(skb); 893 struct gnet_dump d; 894 struct qdisc_size_table *stab; 895 u32 block_index; 896 __u32 qlen; 897 898 cond_resched(); 899 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 900 if (!nlh) 901 goto out_nlmsg_trim; 902 tcm = nlmsg_data(nlh); 903 tcm->tcm_family = AF_UNSPEC; 904 tcm->tcm__pad1 = 0; 905 tcm->tcm__pad2 = 0; 906 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 907 tcm->tcm_parent = clid; 908 tcm->tcm_handle = q->handle; 909 tcm->tcm_info = refcount_read(&q->refcnt); 910 if (nla_put_string(skb, TCA_KIND, q->ops->id)) 911 goto nla_put_failure; 912 if (q->ops->ingress_block_get) { 913 block_index = q->ops->ingress_block_get(q); 914 if (block_index && 915 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index)) 916 goto nla_put_failure; 917 } 918 if (q->ops->egress_block_get) { 919 block_index = q->ops->egress_block_get(q); 920 if (block_index && 921 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index)) 922 goto nla_put_failure; 923 } 924 if (q->ops->dump && q->ops->dump(q, skb) < 0) 925 goto nla_put_failure; 926 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED))) 927 goto nla_put_failure; 928 qlen = qdisc_qlen_sum(q); 929 930 stab = rtnl_dereference(q->stab); 931 if (stab && qdisc_dump_stab(skb, stab) < 0) 932 goto nla_put_failure; 933 934 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 935 NULL, &d, TCA_PAD) < 0) 936 goto nla_put_failure; 937 938 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 939 goto nla_put_failure; 940 941 if (qdisc_is_percpu_stats(q)) { 942 cpu_bstats = q->cpu_bstats; 943 cpu_qstats = q->cpu_qstats; 944 } 945 946 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q), 947 &d, cpu_bstats, &q->bstats) < 0 || 948 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || 949 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) 950 goto nla_put_failure; 951 952 if (gnet_stats_finish_copy(&d) < 0) 953 goto nla_put_failure; 954 955 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 956 return skb->len; 957 958 out_nlmsg_trim: 959 nla_put_failure: 960 nlmsg_trim(skb, b); 961 return -1; 962 } 963 964 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible) 965 { 966 if (q->flags & TCQ_F_BUILTIN) 967 return true; 968 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible) 969 return true; 970 971 return false; 972 } 973 974 static int qdisc_notify(struct net *net, struct sk_buff *oskb, 975 struct nlmsghdr *n, u32 clid, 976 struct Qdisc *old, struct Qdisc *new) 977 { 978 struct sk_buff *skb; 979 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 980 981 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 982 if (!skb) 983 return -ENOBUFS; 984 985 if (old && !tc_qdisc_dump_ignore(old, false)) { 986 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq, 987 0, RTM_DELQDISC) < 0) 988 goto err_out; 989 } 990 if (new && !tc_qdisc_dump_ignore(new, false)) { 991 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq, 992 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) 993 goto err_out; 994 } 995 996 if (skb->len) 997 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 998 n->nlmsg_flags & NLM_F_ECHO); 999 1000 err_out: 1001 kfree_skb(skb); 1002 return -EINVAL; 1003 } 1004 1005 static void notify_and_destroy(struct net *net, struct sk_buff *skb, 1006 struct nlmsghdr *n, u32 clid, 1007 struct Qdisc *old, struct Qdisc *new) 1008 { 1009 if (new || old) 1010 qdisc_notify(net, skb, n, clid, old, new); 1011 1012 if (old) 1013 qdisc_put(old); 1014 } 1015 1016 static void qdisc_clear_nolock(struct Qdisc *sch) 1017 { 1018 sch->flags &= ~TCQ_F_NOLOCK; 1019 if (!(sch->flags & TCQ_F_CPUSTATS)) 1020 return; 1021 1022 free_percpu(sch->cpu_bstats); 1023 free_percpu(sch->cpu_qstats); 1024 sch->cpu_bstats = NULL; 1025 sch->cpu_qstats = NULL; 1026 sch->flags &= ~TCQ_F_CPUSTATS; 1027 } 1028 1029 /* Graft qdisc "new" to class "classid" of qdisc "parent" or 1030 * to device "dev". 1031 * 1032 * When appropriate send a netlink notification using 'skb' 1033 * and "n". 1034 * 1035 * On success, destroy old qdisc. 1036 */ 1037 1038 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, 1039 struct sk_buff *skb, struct nlmsghdr *n, u32 classid, 1040 struct Qdisc *new, struct Qdisc *old, 1041 struct netlink_ext_ack *extack) 1042 { 1043 struct Qdisc *q = old; 1044 struct net *net = dev_net(dev); 1045 1046 if (parent == NULL) { 1047 unsigned int i, num_q, ingress; 1048 1049 ingress = 0; 1050 num_q = dev->num_tx_queues; 1051 if ((q && q->flags & TCQ_F_INGRESS) || 1052 (new && new->flags & TCQ_F_INGRESS)) { 1053 num_q = 1; 1054 ingress = 1; 1055 if (!dev_ingress_queue(dev)) { 1056 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue"); 1057 return -ENOENT; 1058 } 1059 } 1060 1061 if (dev->flags & IFF_UP) 1062 dev_deactivate(dev); 1063 1064 qdisc_offload_graft_root(dev, new, old, extack); 1065 1066 if (new && new->ops->attach) 1067 goto skip; 1068 1069 for (i = 0; i < num_q; i++) { 1070 struct netdev_queue *dev_queue = dev_ingress_queue(dev); 1071 1072 if (!ingress) 1073 dev_queue = netdev_get_tx_queue(dev, i); 1074 1075 old = dev_graft_qdisc(dev_queue, new); 1076 if (new && i > 0) 1077 qdisc_refcount_inc(new); 1078 1079 if (!ingress) 1080 qdisc_put(old); 1081 } 1082 1083 skip: 1084 if (!ingress) { 1085 notify_and_destroy(net, skb, n, classid, 1086 dev->qdisc, new); 1087 if (new && !new->ops->attach) 1088 qdisc_refcount_inc(new); 1089 dev->qdisc = new ? : &noop_qdisc; 1090 1091 if (new && new->ops->attach) 1092 new->ops->attach(new); 1093 } else { 1094 notify_and_destroy(net, skb, n, classid, old, new); 1095 } 1096 1097 if (dev->flags & IFF_UP) 1098 dev_activate(dev); 1099 } else { 1100 const struct Qdisc_class_ops *cops = parent->ops->cl_ops; 1101 unsigned long cl; 1102 int err; 1103 1104 /* Only support running class lockless if parent is lockless */ 1105 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK)) 1106 qdisc_clear_nolock(new); 1107 1108 if (!cops || !cops->graft) 1109 return -EOPNOTSUPP; 1110 1111 cl = cops->find(parent, classid); 1112 if (!cl) { 1113 NL_SET_ERR_MSG(extack, "Specified class not found"); 1114 return -ENOENT; 1115 } 1116 1117 err = cops->graft(parent, cl, new, &old, extack); 1118 if (err) 1119 return err; 1120 notify_and_destroy(net, skb, n, classid, old, new); 1121 } 1122 return 0; 1123 } 1124 1125 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca, 1126 struct netlink_ext_ack *extack) 1127 { 1128 u32 block_index; 1129 1130 if (tca[TCA_INGRESS_BLOCK]) { 1131 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]); 1132 1133 if (!block_index) { 1134 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0"); 1135 return -EINVAL; 1136 } 1137 if (!sch->ops->ingress_block_set) { 1138 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported"); 1139 return -EOPNOTSUPP; 1140 } 1141 sch->ops->ingress_block_set(sch, block_index); 1142 } 1143 if (tca[TCA_EGRESS_BLOCK]) { 1144 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]); 1145 1146 if (!block_index) { 1147 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0"); 1148 return -EINVAL; 1149 } 1150 if (!sch->ops->egress_block_set) { 1151 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported"); 1152 return -EOPNOTSUPP; 1153 } 1154 sch->ops->egress_block_set(sch, block_index); 1155 } 1156 return 0; 1157 } 1158 1159 /* 1160 Allocate and initialize new qdisc. 1161 1162 Parameters are passed via opt. 1163 */ 1164 1165 static struct Qdisc *qdisc_create(struct net_device *dev, 1166 struct netdev_queue *dev_queue, 1167 struct Qdisc *p, u32 parent, u32 handle, 1168 struct nlattr **tca, int *errp, 1169 struct netlink_ext_ack *extack) 1170 { 1171 int err; 1172 struct nlattr *kind = tca[TCA_KIND]; 1173 struct Qdisc *sch; 1174 struct Qdisc_ops *ops; 1175 struct qdisc_size_table *stab; 1176 1177 ops = qdisc_lookup_ops(kind); 1178 #ifdef CONFIG_MODULES 1179 if (ops == NULL && kind != NULL) { 1180 char name[IFNAMSIZ]; 1181 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) { 1182 /* We dropped the RTNL semaphore in order to 1183 * perform the module load. So, even if we 1184 * succeeded in loading the module we have to 1185 * tell the caller to replay the request. We 1186 * indicate this using -EAGAIN. 1187 * We replay the request because the device may 1188 * go away in the mean time. 1189 */ 1190 rtnl_unlock(); 1191 request_module("sch_%s", name); 1192 rtnl_lock(); 1193 ops = qdisc_lookup_ops(kind); 1194 if (ops != NULL) { 1195 /* We will try again qdisc_lookup_ops, 1196 * so don't keep a reference. 1197 */ 1198 module_put(ops->owner); 1199 err = -EAGAIN; 1200 goto err_out; 1201 } 1202 } 1203 } 1204 #endif 1205 1206 err = -ENOENT; 1207 if (!ops) { 1208 NL_SET_ERR_MSG(extack, "Specified qdisc not found"); 1209 goto err_out; 1210 } 1211 1212 sch = qdisc_alloc(dev_queue, ops, extack); 1213 if (IS_ERR(sch)) { 1214 err = PTR_ERR(sch); 1215 goto err_out2; 1216 } 1217 1218 sch->parent = parent; 1219 1220 if (handle == TC_H_INGRESS) { 1221 sch->flags |= TCQ_F_INGRESS; 1222 handle = TC_H_MAKE(TC_H_INGRESS, 0); 1223 } else { 1224 if (handle == 0) { 1225 handle = qdisc_alloc_handle(dev); 1226 if (handle == 0) { 1227 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded"); 1228 err = -ENOSPC; 1229 goto err_out3; 1230 } 1231 } 1232 if (!netif_is_multiqueue(dev)) 1233 sch->flags |= TCQ_F_ONETXQUEUE; 1234 } 1235 1236 sch->handle = handle; 1237 1238 /* This exist to keep backward compatible with a userspace 1239 * loophole, what allowed userspace to get IFF_NO_QUEUE 1240 * facility on older kernels by setting tx_queue_len=0 (prior 1241 * to qdisc init), and then forgot to reinit tx_queue_len 1242 * before again attaching a qdisc. 1243 */ 1244 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) { 1245 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 1246 netdev_info(dev, "Caught tx_queue_len zero misconfig\n"); 1247 } 1248 1249 err = qdisc_block_indexes_set(sch, tca, extack); 1250 if (err) 1251 goto err_out3; 1252 1253 if (ops->init) { 1254 err = ops->init(sch, tca[TCA_OPTIONS], extack); 1255 if (err != 0) 1256 goto err_out5; 1257 } 1258 1259 if (tca[TCA_STAB]) { 1260 stab = qdisc_get_stab(tca[TCA_STAB], extack); 1261 if (IS_ERR(stab)) { 1262 err = PTR_ERR(stab); 1263 goto err_out4; 1264 } 1265 rcu_assign_pointer(sch->stab, stab); 1266 } 1267 if (tca[TCA_RATE]) { 1268 seqcount_t *running; 1269 1270 err = -EOPNOTSUPP; 1271 if (sch->flags & TCQ_F_MQROOT) { 1272 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc"); 1273 goto err_out4; 1274 } 1275 1276 if (sch->parent != TC_H_ROOT && 1277 !(sch->flags & TCQ_F_INGRESS) && 1278 (!p || !(p->flags & TCQ_F_MQROOT))) 1279 running = qdisc_root_sleeping_running(sch); 1280 else 1281 running = &sch->running; 1282 1283 err = gen_new_estimator(&sch->bstats, 1284 sch->cpu_bstats, 1285 &sch->rate_est, 1286 NULL, 1287 running, 1288 tca[TCA_RATE]); 1289 if (err) { 1290 NL_SET_ERR_MSG(extack, "Failed to generate new estimator"); 1291 goto err_out4; 1292 } 1293 } 1294 1295 qdisc_hash_add(sch, false); 1296 trace_qdisc_create(ops, dev, parent); 1297 1298 return sch; 1299 1300 err_out5: 1301 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ 1302 if (ops->destroy) 1303 ops->destroy(sch); 1304 err_out3: 1305 dev_put(dev); 1306 qdisc_free(sch); 1307 err_out2: 1308 module_put(ops->owner); 1309 err_out: 1310 *errp = err; 1311 return NULL; 1312 1313 err_out4: 1314 /* 1315 * Any broken qdiscs that would require a ops->reset() here? 1316 * The qdisc was never in action so it shouldn't be necessary. 1317 */ 1318 qdisc_put_stab(rtnl_dereference(sch->stab)); 1319 if (ops->destroy) 1320 ops->destroy(sch); 1321 goto err_out3; 1322 } 1323 1324 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca, 1325 struct netlink_ext_ack *extack) 1326 { 1327 struct qdisc_size_table *ostab, *stab = NULL; 1328 int err = 0; 1329 1330 if (tca[TCA_OPTIONS]) { 1331 if (!sch->ops->change) { 1332 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc"); 1333 return -EINVAL; 1334 } 1335 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) { 1336 NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); 1337 return -EOPNOTSUPP; 1338 } 1339 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack); 1340 if (err) 1341 return err; 1342 } 1343 1344 if (tca[TCA_STAB]) { 1345 stab = qdisc_get_stab(tca[TCA_STAB], extack); 1346 if (IS_ERR(stab)) 1347 return PTR_ERR(stab); 1348 } 1349 1350 ostab = rtnl_dereference(sch->stab); 1351 rcu_assign_pointer(sch->stab, stab); 1352 qdisc_put_stab(ostab); 1353 1354 if (tca[TCA_RATE]) { 1355 /* NB: ignores errors from replace_estimator 1356 because change can't be undone. */ 1357 if (sch->flags & TCQ_F_MQROOT) 1358 goto out; 1359 gen_replace_estimator(&sch->bstats, 1360 sch->cpu_bstats, 1361 &sch->rate_est, 1362 NULL, 1363 qdisc_root_sleeping_running(sch), 1364 tca[TCA_RATE]); 1365 } 1366 out: 1367 return 0; 1368 } 1369 1370 struct check_loop_arg { 1371 struct qdisc_walker w; 1372 struct Qdisc *p; 1373 int depth; 1374 }; 1375 1376 static int check_loop_fn(struct Qdisc *q, unsigned long cl, 1377 struct qdisc_walker *w); 1378 1379 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth) 1380 { 1381 struct check_loop_arg arg; 1382 1383 if (q->ops->cl_ops == NULL) 1384 return 0; 1385 1386 arg.w.stop = arg.w.skip = arg.w.count = 0; 1387 arg.w.fn = check_loop_fn; 1388 arg.depth = depth; 1389 arg.p = p; 1390 q->ops->cl_ops->walk(q, &arg.w); 1391 return arg.w.stop ? -ELOOP : 0; 1392 } 1393 1394 static int 1395 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) 1396 { 1397 struct Qdisc *leaf; 1398 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1399 struct check_loop_arg *arg = (struct check_loop_arg *)w; 1400 1401 leaf = cops->leaf(q, cl); 1402 if (leaf) { 1403 if (leaf == arg->p || arg->depth > 7) 1404 return -ELOOP; 1405 return check_loop(leaf, arg->p, arg->depth + 1); 1406 } 1407 return 0; 1408 } 1409 1410 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { 1411 [TCA_KIND] = { .type = NLA_STRING }, 1412 [TCA_RATE] = { .type = NLA_BINARY, 1413 .len = sizeof(struct tc_estimator) }, 1414 [TCA_STAB] = { .type = NLA_NESTED }, 1415 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG }, 1416 [TCA_CHAIN] = { .type = NLA_U32 }, 1417 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 }, 1418 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 }, 1419 }; 1420 1421 /* 1422 * Delete/get qdisc. 1423 */ 1424 1425 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1426 struct netlink_ext_ack *extack) 1427 { 1428 struct net *net = sock_net(skb->sk); 1429 struct tcmsg *tcm = nlmsg_data(n); 1430 struct nlattr *tca[TCA_MAX + 1]; 1431 struct net_device *dev; 1432 u32 clid; 1433 struct Qdisc *q = NULL; 1434 struct Qdisc *p = NULL; 1435 int err; 1436 1437 if ((n->nlmsg_type != RTM_GETQDISC) && 1438 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1439 return -EPERM; 1440 1441 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, 1442 rtm_tca_policy, extack); 1443 if (err < 0) 1444 return err; 1445 1446 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1447 if (!dev) 1448 return -ENODEV; 1449 1450 clid = tcm->tcm_parent; 1451 if (clid) { 1452 if (clid != TC_H_ROOT) { 1453 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { 1454 p = qdisc_lookup(dev, TC_H_MAJ(clid)); 1455 if (!p) { 1456 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid"); 1457 return -ENOENT; 1458 } 1459 q = qdisc_leaf(p, clid); 1460 } else if (dev_ingress_queue(dev)) { 1461 q = dev_ingress_queue(dev)->qdisc_sleeping; 1462 } 1463 } else { 1464 q = dev->qdisc; 1465 } 1466 if (!q) { 1467 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device"); 1468 return -ENOENT; 1469 } 1470 1471 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) { 1472 NL_SET_ERR_MSG(extack, "Invalid handle"); 1473 return -EINVAL; 1474 } 1475 } else { 1476 q = qdisc_lookup(dev, tcm->tcm_handle); 1477 if (!q) { 1478 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle"); 1479 return -ENOENT; 1480 } 1481 } 1482 1483 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1484 NL_SET_ERR_MSG(extack, "Invalid qdisc name"); 1485 return -EINVAL; 1486 } 1487 1488 if (n->nlmsg_type == RTM_DELQDISC) { 1489 if (!clid) { 1490 NL_SET_ERR_MSG(extack, "Classid cannot be zero"); 1491 return -EINVAL; 1492 } 1493 if (q->handle == 0) { 1494 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero"); 1495 return -ENOENT; 1496 } 1497 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack); 1498 if (err != 0) 1499 return err; 1500 } else { 1501 qdisc_notify(net, skb, n, clid, NULL, q); 1502 } 1503 return 0; 1504 } 1505 1506 /* 1507 * Create/change qdisc. 1508 */ 1509 1510 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1511 struct netlink_ext_ack *extack) 1512 { 1513 struct net *net = sock_net(skb->sk); 1514 struct tcmsg *tcm; 1515 struct nlattr *tca[TCA_MAX + 1]; 1516 struct net_device *dev; 1517 u32 clid; 1518 struct Qdisc *q, *p; 1519 int err; 1520 1521 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1522 return -EPERM; 1523 1524 replay: 1525 /* Reinit, just in case something touches this. */ 1526 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, 1527 rtm_tca_policy, extack); 1528 if (err < 0) 1529 return err; 1530 1531 tcm = nlmsg_data(n); 1532 clid = tcm->tcm_parent; 1533 q = p = NULL; 1534 1535 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1536 if (!dev) 1537 return -ENODEV; 1538 1539 1540 if (clid) { 1541 if (clid != TC_H_ROOT) { 1542 if (clid != TC_H_INGRESS) { 1543 p = qdisc_lookup(dev, TC_H_MAJ(clid)); 1544 if (!p) { 1545 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc"); 1546 return -ENOENT; 1547 } 1548 q = qdisc_leaf(p, clid); 1549 } else if (dev_ingress_queue_create(dev)) { 1550 q = dev_ingress_queue(dev)->qdisc_sleeping; 1551 } 1552 } else { 1553 q = dev->qdisc; 1554 } 1555 1556 /* It may be default qdisc, ignore it */ 1557 if (q && q->handle == 0) 1558 q = NULL; 1559 1560 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { 1561 if (tcm->tcm_handle) { 1562 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) { 1563 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override"); 1564 return -EEXIST; 1565 } 1566 if (TC_H_MIN(tcm->tcm_handle)) { 1567 NL_SET_ERR_MSG(extack, "Invalid minor handle"); 1568 return -EINVAL; 1569 } 1570 q = qdisc_lookup(dev, tcm->tcm_handle); 1571 if (!q) 1572 goto create_n_graft; 1573 if (n->nlmsg_flags & NLM_F_EXCL) { 1574 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override"); 1575 return -EEXIST; 1576 } 1577 if (tca[TCA_KIND] && 1578 nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1579 NL_SET_ERR_MSG(extack, "Invalid qdisc name"); 1580 return -EINVAL; 1581 } 1582 if (q == p || 1583 (p && check_loop(q, p, 0))) { 1584 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected"); 1585 return -ELOOP; 1586 } 1587 qdisc_refcount_inc(q); 1588 goto graft; 1589 } else { 1590 if (!q) 1591 goto create_n_graft; 1592 1593 /* This magic test requires explanation. 1594 * 1595 * We know, that some child q is already 1596 * attached to this parent and have choice: 1597 * either to change it or to create/graft new one. 1598 * 1599 * 1. We are allowed to create/graft only 1600 * if CREATE and REPLACE flags are set. 1601 * 1602 * 2. If EXCL is set, requestor wanted to say, 1603 * that qdisc tcm_handle is not expected 1604 * to exist, so that we choose create/graft too. 1605 * 1606 * 3. The last case is when no flags are set. 1607 * Alas, it is sort of hole in API, we 1608 * cannot decide what to do unambiguously. 1609 * For now we select create/graft, if 1610 * user gave KIND, which does not match existing. 1611 */ 1612 if ((n->nlmsg_flags & NLM_F_CREATE) && 1613 (n->nlmsg_flags & NLM_F_REPLACE) && 1614 ((n->nlmsg_flags & NLM_F_EXCL) || 1615 (tca[TCA_KIND] && 1616 nla_strcmp(tca[TCA_KIND], q->ops->id)))) 1617 goto create_n_graft; 1618 } 1619 } 1620 } else { 1621 if (!tcm->tcm_handle) { 1622 NL_SET_ERR_MSG(extack, "Handle cannot be zero"); 1623 return -EINVAL; 1624 } 1625 q = qdisc_lookup(dev, tcm->tcm_handle); 1626 } 1627 1628 /* Change qdisc parameters */ 1629 if (!q) { 1630 NL_SET_ERR_MSG(extack, "Specified qdisc not found"); 1631 return -ENOENT; 1632 } 1633 if (n->nlmsg_flags & NLM_F_EXCL) { 1634 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify"); 1635 return -EEXIST; 1636 } 1637 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1638 NL_SET_ERR_MSG(extack, "Invalid qdisc name"); 1639 return -EINVAL; 1640 } 1641 err = qdisc_change(q, tca, extack); 1642 if (err == 0) 1643 qdisc_notify(net, skb, n, clid, NULL, q); 1644 return err; 1645 1646 create_n_graft: 1647 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 1648 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag"); 1649 return -ENOENT; 1650 } 1651 if (clid == TC_H_INGRESS) { 1652 if (dev_ingress_queue(dev)) { 1653 q = qdisc_create(dev, dev_ingress_queue(dev), p, 1654 tcm->tcm_parent, tcm->tcm_parent, 1655 tca, &err, extack); 1656 } else { 1657 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device"); 1658 err = -ENOENT; 1659 } 1660 } else { 1661 struct netdev_queue *dev_queue; 1662 1663 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) 1664 dev_queue = p->ops->cl_ops->select_queue(p, tcm); 1665 else if (p) 1666 dev_queue = p->dev_queue; 1667 else 1668 dev_queue = netdev_get_tx_queue(dev, 0); 1669 1670 q = qdisc_create(dev, dev_queue, p, 1671 tcm->tcm_parent, tcm->tcm_handle, 1672 tca, &err, extack); 1673 } 1674 if (q == NULL) { 1675 if (err == -EAGAIN) 1676 goto replay; 1677 return err; 1678 } 1679 1680 graft: 1681 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack); 1682 if (err) { 1683 if (q) 1684 qdisc_put(q); 1685 return err; 1686 } 1687 1688 return 0; 1689 } 1690 1691 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, 1692 struct netlink_callback *cb, 1693 int *q_idx_p, int s_q_idx, bool recur, 1694 bool dump_invisible) 1695 { 1696 int ret = 0, q_idx = *q_idx_p; 1697 struct Qdisc *q; 1698 int b; 1699 1700 if (!root) 1701 return 0; 1702 1703 q = root; 1704 if (q_idx < s_q_idx) { 1705 q_idx++; 1706 } else { 1707 if (!tc_qdisc_dump_ignore(q, dump_invisible) && 1708 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, 1709 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1710 RTM_NEWQDISC) <= 0) 1711 goto done; 1712 q_idx++; 1713 } 1714 1715 /* If dumping singletons, there is no qdisc_dev(root) and the singleton 1716 * itself has already been dumped. 1717 * 1718 * If we've already dumped the top-level (ingress) qdisc above and the global 1719 * qdisc hashtable, we don't want to hit it again 1720 */ 1721 if (!qdisc_dev(root) || !recur) 1722 goto out; 1723 1724 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 1725 if (q_idx < s_q_idx) { 1726 q_idx++; 1727 continue; 1728 } 1729 if (!tc_qdisc_dump_ignore(q, dump_invisible) && 1730 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, 1731 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1732 RTM_NEWQDISC) <= 0) 1733 goto done; 1734 q_idx++; 1735 } 1736 1737 out: 1738 *q_idx_p = q_idx; 1739 return ret; 1740 done: 1741 ret = -1; 1742 goto out; 1743 } 1744 1745 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) 1746 { 1747 struct net *net = sock_net(skb->sk); 1748 int idx, q_idx; 1749 int s_idx, s_q_idx; 1750 struct net_device *dev; 1751 const struct nlmsghdr *nlh = cb->nlh; 1752 struct nlattr *tca[TCA_MAX + 1]; 1753 int err; 1754 1755 s_idx = cb->args[0]; 1756 s_q_idx = q_idx = cb->args[1]; 1757 1758 idx = 0; 1759 ASSERT_RTNL(); 1760 1761 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX, 1762 rtm_tca_policy, cb->extack); 1763 if (err < 0) 1764 return err; 1765 1766 for_each_netdev(net, dev) { 1767 struct netdev_queue *dev_queue; 1768 1769 if (idx < s_idx) 1770 goto cont; 1771 if (idx > s_idx) 1772 s_q_idx = 0; 1773 q_idx = 0; 1774 1775 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, 1776 true, tca[TCA_DUMP_INVISIBLE]) < 0) 1777 goto done; 1778 1779 dev_queue = dev_ingress_queue(dev); 1780 if (dev_queue && 1781 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, 1782 &q_idx, s_q_idx, false, 1783 tca[TCA_DUMP_INVISIBLE]) < 0) 1784 goto done; 1785 1786 cont: 1787 idx++; 1788 } 1789 1790 done: 1791 cb->args[0] = idx; 1792 cb->args[1] = q_idx; 1793 1794 return skb->len; 1795 } 1796 1797 1798 1799 /************************************************ 1800 * Traffic classes manipulation. * 1801 ************************************************/ 1802 1803 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, 1804 unsigned long cl, 1805 u32 portid, u32 seq, u16 flags, int event) 1806 { 1807 struct tcmsg *tcm; 1808 struct nlmsghdr *nlh; 1809 unsigned char *b = skb_tail_pointer(skb); 1810 struct gnet_dump d; 1811 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; 1812 1813 cond_resched(); 1814 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1815 if (!nlh) 1816 goto out_nlmsg_trim; 1817 tcm = nlmsg_data(nlh); 1818 tcm->tcm_family = AF_UNSPEC; 1819 tcm->tcm__pad1 = 0; 1820 tcm->tcm__pad2 = 0; 1821 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1822 tcm->tcm_parent = q->handle; 1823 tcm->tcm_handle = q->handle; 1824 tcm->tcm_info = 0; 1825 if (nla_put_string(skb, TCA_KIND, q->ops->id)) 1826 goto nla_put_failure; 1827 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1828 goto nla_put_failure; 1829 1830 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 1831 NULL, &d, TCA_PAD) < 0) 1832 goto nla_put_failure; 1833 1834 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1835 goto nla_put_failure; 1836 1837 if (gnet_stats_finish_copy(&d) < 0) 1838 goto nla_put_failure; 1839 1840 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1841 return skb->len; 1842 1843 out_nlmsg_trim: 1844 nla_put_failure: 1845 nlmsg_trim(skb, b); 1846 return -1; 1847 } 1848 1849 static int tclass_notify(struct net *net, struct sk_buff *oskb, 1850 struct nlmsghdr *n, struct Qdisc *q, 1851 unsigned long cl, int event) 1852 { 1853 struct sk_buff *skb; 1854 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1855 1856 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1857 if (!skb) 1858 return -ENOBUFS; 1859 1860 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) { 1861 kfree_skb(skb); 1862 return -EINVAL; 1863 } 1864 1865 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1866 n->nlmsg_flags & NLM_F_ECHO); 1867 } 1868 1869 static int tclass_del_notify(struct net *net, 1870 const struct Qdisc_class_ops *cops, 1871 struct sk_buff *oskb, struct nlmsghdr *n, 1872 struct Qdisc *q, unsigned long cl, 1873 struct netlink_ext_ack *extack) 1874 { 1875 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1876 struct sk_buff *skb; 1877 int err = 0; 1878 1879 if (!cops->delete) 1880 return -EOPNOTSUPP; 1881 1882 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1883 if (!skb) 1884 return -ENOBUFS; 1885 1886 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, 1887 RTM_DELTCLASS) < 0) { 1888 kfree_skb(skb); 1889 return -EINVAL; 1890 } 1891 1892 err = cops->delete(q, cl, extack); 1893 if (err) { 1894 kfree_skb(skb); 1895 return err; 1896 } 1897 1898 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1899 n->nlmsg_flags & NLM_F_ECHO); 1900 return err; 1901 } 1902 1903 #ifdef CONFIG_NET_CLS 1904 1905 struct tcf_bind_args { 1906 struct tcf_walker w; 1907 unsigned long base; 1908 unsigned long cl; 1909 u32 classid; 1910 }; 1911 1912 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 1913 { 1914 struct tcf_bind_args *a = (void *)arg; 1915 1916 if (tp->ops->bind_class) { 1917 struct Qdisc *q = tcf_block_q(tp->chain->block); 1918 1919 sch_tree_lock(q); 1920 tp->ops->bind_class(n, a->classid, a->cl, q, a->base); 1921 sch_tree_unlock(q); 1922 } 1923 return 0; 1924 } 1925 1926 struct tc_bind_class_args { 1927 struct qdisc_walker w; 1928 unsigned long new_cl; 1929 u32 portid; 1930 u32 clid; 1931 }; 1932 1933 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl, 1934 struct qdisc_walker *w) 1935 { 1936 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w; 1937 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1938 struct tcf_block *block; 1939 struct tcf_chain *chain; 1940 1941 block = cops->tcf_block(q, cl, NULL); 1942 if (!block) 1943 return 0; 1944 for (chain = tcf_get_next_chain(block, NULL); 1945 chain; 1946 chain = tcf_get_next_chain(block, chain)) { 1947 struct tcf_proto *tp; 1948 1949 for (tp = tcf_get_next_proto(chain, NULL); 1950 tp; tp = tcf_get_next_proto(chain, tp)) { 1951 struct tcf_bind_args arg = {}; 1952 1953 arg.w.fn = tcf_node_bind; 1954 arg.classid = a->clid; 1955 arg.base = cl; 1956 arg.cl = a->new_cl; 1957 tp->ops->walk(tp, &arg.w, true); 1958 } 1959 } 1960 1961 return 0; 1962 } 1963 1964 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, 1965 unsigned long new_cl) 1966 { 1967 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1968 struct tc_bind_class_args args = {}; 1969 1970 if (!cops->tcf_block) 1971 return; 1972 args.portid = portid; 1973 args.clid = clid; 1974 args.new_cl = new_cl; 1975 args.w.fn = tc_bind_class_walker; 1976 q->ops->cl_ops->walk(q, &args.w); 1977 } 1978 1979 #else 1980 1981 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, 1982 unsigned long new_cl) 1983 { 1984 } 1985 1986 #endif 1987 1988 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, 1989 struct netlink_ext_ack *extack) 1990 { 1991 struct net *net = sock_net(skb->sk); 1992 struct tcmsg *tcm = nlmsg_data(n); 1993 struct nlattr *tca[TCA_MAX + 1]; 1994 struct net_device *dev; 1995 struct Qdisc *q = NULL; 1996 const struct Qdisc_class_ops *cops; 1997 unsigned long cl = 0; 1998 unsigned long new_cl; 1999 u32 portid; 2000 u32 clid; 2001 u32 qid; 2002 int err; 2003 2004 if ((n->nlmsg_type != RTM_GETTCLASS) && 2005 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2006 return -EPERM; 2007 2008 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, 2009 rtm_tca_policy, extack); 2010 if (err < 0) 2011 return err; 2012 2013 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2014 if (!dev) 2015 return -ENODEV; 2016 2017 /* 2018 parent == TC_H_UNSPEC - unspecified parent. 2019 parent == TC_H_ROOT - class is root, which has no parent. 2020 parent == X:0 - parent is root class. 2021 parent == X:Y - parent is a node in hierarchy. 2022 parent == 0:Y - parent is X:Y, where X:0 is qdisc. 2023 2024 handle == 0:0 - generate handle from kernel pool. 2025 handle == 0:Y - class is X:Y, where X:0 is qdisc. 2026 handle == X:Y - clear. 2027 handle == X:0 - root class. 2028 */ 2029 2030 /* Step 1. Determine qdisc handle X:0 */ 2031 2032 portid = tcm->tcm_parent; 2033 clid = tcm->tcm_handle; 2034 qid = TC_H_MAJ(clid); 2035 2036 if (portid != TC_H_ROOT) { 2037 u32 qid1 = TC_H_MAJ(portid); 2038 2039 if (qid && qid1) { 2040 /* If both majors are known, they must be identical. */ 2041 if (qid != qid1) 2042 return -EINVAL; 2043 } else if (qid1) { 2044 qid = qid1; 2045 } else if (qid == 0) 2046 qid = dev->qdisc->handle; 2047 2048 /* Now qid is genuine qdisc handle consistent 2049 * both with parent and child. 2050 * 2051 * TC_H_MAJ(portid) still may be unspecified, complete it now. 2052 */ 2053 if (portid) 2054 portid = TC_H_MAKE(qid, portid); 2055 } else { 2056 if (qid == 0) 2057 qid = dev->qdisc->handle; 2058 } 2059 2060 /* OK. Locate qdisc */ 2061 q = qdisc_lookup(dev, qid); 2062 if (!q) 2063 return -ENOENT; 2064 2065 /* An check that it supports classes */ 2066 cops = q->ops->cl_ops; 2067 if (cops == NULL) 2068 return -EINVAL; 2069 2070 /* Now try to get class */ 2071 if (clid == 0) { 2072 if (portid == TC_H_ROOT) 2073 clid = qid; 2074 } else 2075 clid = TC_H_MAKE(qid, clid); 2076 2077 if (clid) 2078 cl = cops->find(q, clid); 2079 2080 if (cl == 0) { 2081 err = -ENOENT; 2082 if (n->nlmsg_type != RTM_NEWTCLASS || 2083 !(n->nlmsg_flags & NLM_F_CREATE)) 2084 goto out; 2085 } else { 2086 switch (n->nlmsg_type) { 2087 case RTM_NEWTCLASS: 2088 err = -EEXIST; 2089 if (n->nlmsg_flags & NLM_F_EXCL) 2090 goto out; 2091 break; 2092 case RTM_DELTCLASS: 2093 err = tclass_del_notify(net, cops, skb, n, q, cl, extack); 2094 /* Unbind the class with flilters with 0 */ 2095 tc_bind_tclass(q, portid, clid, 0); 2096 goto out; 2097 case RTM_GETTCLASS: 2098 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); 2099 goto out; 2100 default: 2101 err = -EINVAL; 2102 goto out; 2103 } 2104 } 2105 2106 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) { 2107 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes"); 2108 return -EOPNOTSUPP; 2109 } 2110 2111 new_cl = cl; 2112 err = -EOPNOTSUPP; 2113 if (cops->change) 2114 err = cops->change(q, clid, portid, tca, &new_cl, extack); 2115 if (err == 0) { 2116 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); 2117 /* We just create a new class, need to do reverse binding. */ 2118 if (cl != new_cl) 2119 tc_bind_tclass(q, portid, clid, new_cl); 2120 } 2121 out: 2122 return err; 2123 } 2124 2125 struct qdisc_dump_args { 2126 struct qdisc_walker w; 2127 struct sk_buff *skb; 2128 struct netlink_callback *cb; 2129 }; 2130 2131 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, 2132 struct qdisc_walker *arg) 2133 { 2134 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg; 2135 2136 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid, 2137 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2138 RTM_NEWTCLASS); 2139 } 2140 2141 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, 2142 struct tcmsg *tcm, struct netlink_callback *cb, 2143 int *t_p, int s_t) 2144 { 2145 struct qdisc_dump_args arg; 2146 2147 if (tc_qdisc_dump_ignore(q, false) || 2148 *t_p < s_t || !q->ops->cl_ops || 2149 (tcm->tcm_parent && 2150 TC_H_MAJ(tcm->tcm_parent) != q->handle)) { 2151 (*t_p)++; 2152 return 0; 2153 } 2154 if (*t_p > s_t) 2155 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); 2156 arg.w.fn = qdisc_class_dump; 2157 arg.skb = skb; 2158 arg.cb = cb; 2159 arg.w.stop = 0; 2160 arg.w.skip = cb->args[1]; 2161 arg.w.count = 0; 2162 q->ops->cl_ops->walk(q, &arg.w); 2163 cb->args[1] = arg.w.count; 2164 if (arg.w.stop) 2165 return -1; 2166 (*t_p)++; 2167 return 0; 2168 } 2169 2170 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, 2171 struct tcmsg *tcm, struct netlink_callback *cb, 2172 int *t_p, int s_t, bool recur) 2173 { 2174 struct Qdisc *q; 2175 int b; 2176 2177 if (!root) 2178 return 0; 2179 2180 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) 2181 return -1; 2182 2183 if (!qdisc_dev(root) || !recur) 2184 return 0; 2185 2186 if (tcm->tcm_parent) { 2187 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent)); 2188 if (q && q != root && 2189 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 2190 return -1; 2191 return 0; 2192 } 2193 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 2194 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 2195 return -1; 2196 } 2197 2198 return 0; 2199 } 2200 2201 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 2202 { 2203 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2204 struct net *net = sock_net(skb->sk); 2205 struct netdev_queue *dev_queue; 2206 struct net_device *dev; 2207 int t, s_t; 2208 2209 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2210 return 0; 2211 dev = dev_get_by_index(net, tcm->tcm_ifindex); 2212 if (!dev) 2213 return 0; 2214 2215 s_t = cb->args[0]; 2216 t = 0; 2217 2218 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0) 2219 goto done; 2220 2221 dev_queue = dev_ingress_queue(dev); 2222 if (dev_queue && 2223 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, 2224 &t, s_t, false) < 0) 2225 goto done; 2226 2227 done: 2228 cb->args[0] = t; 2229 2230 dev_put(dev); 2231 return skb->len; 2232 } 2233 2234 #ifdef CONFIG_PROC_FS 2235 static int psched_show(struct seq_file *seq, void *v) 2236 { 2237 seq_printf(seq, "%08x %08x %08x %08x\n", 2238 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1), 2239 1000000, 2240 (u32)NSEC_PER_SEC / hrtimer_resolution); 2241 2242 return 0; 2243 } 2244 2245 static int __net_init psched_net_init(struct net *net) 2246 { 2247 struct proc_dir_entry *e; 2248 2249 e = proc_create_single("psched", 0, net->proc_net, psched_show); 2250 if (e == NULL) 2251 return -ENOMEM; 2252 2253 return 0; 2254 } 2255 2256 static void __net_exit psched_net_exit(struct net *net) 2257 { 2258 remove_proc_entry("psched", net->proc_net); 2259 } 2260 #else 2261 static int __net_init psched_net_init(struct net *net) 2262 { 2263 return 0; 2264 } 2265 2266 static void __net_exit psched_net_exit(struct net *net) 2267 { 2268 } 2269 #endif 2270 2271 static struct pernet_operations psched_net_ops = { 2272 .init = psched_net_init, 2273 .exit = psched_net_exit, 2274 }; 2275 2276 static int __init pktsched_init(void) 2277 { 2278 int err; 2279 2280 err = register_pernet_subsys(&psched_net_ops); 2281 if (err) { 2282 pr_err("pktsched_init: " 2283 "cannot initialize per netns operations\n"); 2284 return err; 2285 } 2286 2287 register_qdisc(&pfifo_fast_ops); 2288 register_qdisc(&pfifo_qdisc_ops); 2289 register_qdisc(&bfifo_qdisc_ops); 2290 register_qdisc(&pfifo_head_drop_qdisc_ops); 2291 register_qdisc(&mq_qdisc_ops); 2292 register_qdisc(&noqueue_qdisc_ops); 2293 2294 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0); 2295 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0); 2296 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, 2297 0); 2298 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0); 2299 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0); 2300 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, 2301 0); 2302 2303 return 0; 2304 } 2305 2306 subsys_initcall(pktsched_init); 2307