1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/sch_api.c Packet scheduler API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Fixes: 8 * 9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired. 10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support 12 */ 13 14 #include <linux/module.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/skbuff.h> 20 #include <linux/init.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/kmod.h> 24 #include <linux/list.h> 25 #include <linux/hrtimer.h> 26 #include <linux/slab.h> 27 #include <linux/hashtable.h> 28 29 #include <net/net_namespace.h> 30 #include <net/sock.h> 31 #include <net/netlink.h> 32 #include <net/pkt_sched.h> 33 #include <net/pkt_cls.h> 34 35 #include <trace/events/qdisc.h> 36 37 /* 38 39 Short review. 40 ------------- 41 42 This file consists of two interrelated parts: 43 44 1. queueing disciplines manager frontend. 45 2. traffic classes manager frontend. 46 47 Generally, queueing discipline ("qdisc") is a black box, 48 which is able to enqueue packets and to dequeue them (when 49 device is ready to send something) in order and at times 50 determined by algorithm hidden in it. 51 52 qdisc's are divided to two categories: 53 - "queues", which have no internal structure visible from outside. 54 - "schedulers", which split all the packets to "traffic classes", 55 using "packet classifiers" (look at cls_api.c) 56 57 In turn, classes may have child qdiscs (as rule, queues) 58 attached to them etc. etc. etc. 59 60 The goal of the routines in this file is to translate 61 information supplied by user in the form of handles 62 to more intelligible for kernel form, to make some sanity 63 checks and part of work, which is common to all qdiscs 64 and to provide rtnetlink notifications. 65 66 All real intelligent work is done inside qdisc modules. 67 68 69 70 Every discipline has two major routines: enqueue and dequeue. 71 72 ---dequeue 73 74 dequeue usually returns a skb to send. It is allowed to return NULL, 75 but it does not mean that queue is empty, it just means that 76 discipline does not want to send anything this time. 77 Queue is really empty if q->q.qlen == 0. 78 For complicated disciplines with multiple queues q->q is not 79 real packet queue, but however q->q.qlen must be valid. 80 81 ---enqueue 82 83 enqueue returns 0, if packet was enqueued successfully. 84 If packet (this one or another one) was dropped, it returns 85 not zero error code. 86 NET_XMIT_DROP - this packet dropped 87 Expected action: do not backoff, but wait until queue will clear. 88 NET_XMIT_CN - probably this packet enqueued, but another one dropped. 89 Expected action: backoff or ignore 90 91 Auxiliary routines: 92 93 ---peek 94 95 like dequeue but without removing a packet from the queue 96 97 ---reset 98 99 returns qdisc to initial state: purge all buffers, clear all 100 timers, counters (except for statistics) etc. 101 102 ---init 103 104 initializes newly created qdisc. 105 106 ---destroy 107 108 destroys resources allocated by init and during lifetime of qdisc. 109 110 ---change 111 112 changes qdisc parameters. 113 */ 114 115 /* Protects list of registered TC modules. It is pure SMP lock. */ 116 static DEFINE_RWLOCK(qdisc_mod_lock); 117 118 119 /************************************************ 120 * Queueing disciplines manipulation. * 121 ************************************************/ 122 123 124 /* The list of all installed queueing disciplines. */ 125 126 static struct Qdisc_ops *qdisc_base; 127 128 /* Register/unregister queueing discipline */ 129 130 int register_qdisc(struct Qdisc_ops *qops) 131 { 132 struct Qdisc_ops *q, **qp; 133 int rc = -EEXIST; 134 135 write_lock(&qdisc_mod_lock); 136 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) 137 if (!strcmp(qops->id, q->id)) 138 goto out; 139 140 if (qops->enqueue == NULL) 141 qops->enqueue = noop_qdisc_ops.enqueue; 142 if (qops->peek == NULL) { 143 if (qops->dequeue == NULL) 144 qops->peek = noop_qdisc_ops.peek; 145 else 146 goto out_einval; 147 } 148 if (qops->dequeue == NULL) 149 qops->dequeue = noop_qdisc_ops.dequeue; 150 151 if (qops->cl_ops) { 152 const struct Qdisc_class_ops *cops = qops->cl_ops; 153 154 if (!(cops->find && cops->walk && cops->leaf)) 155 goto out_einval; 156 157 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf)) 158 goto out_einval; 159 } 160 161 qops->next = NULL; 162 *qp = qops; 163 rc = 0; 164 out: 165 write_unlock(&qdisc_mod_lock); 166 return rc; 167 168 out_einval: 169 rc = -EINVAL; 170 goto out; 171 } 172 EXPORT_SYMBOL(register_qdisc); 173 174 void unregister_qdisc(struct Qdisc_ops *qops) 175 { 176 struct Qdisc_ops *q, **qp; 177 int err = -ENOENT; 178 179 write_lock(&qdisc_mod_lock); 180 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) 181 if (q == qops) 182 break; 183 if (q) { 184 *qp = q->next; 185 q->next = NULL; 186 err = 0; 187 } 188 write_unlock(&qdisc_mod_lock); 189 190 WARN(err, "unregister qdisc(%s) failed\n", qops->id); 191 } 192 EXPORT_SYMBOL(unregister_qdisc); 193 194 /* Get default qdisc if not otherwise specified */ 195 void qdisc_get_default(char *name, size_t len) 196 { 197 read_lock(&qdisc_mod_lock); 198 strlcpy(name, default_qdisc_ops->id, len); 199 read_unlock(&qdisc_mod_lock); 200 } 201 202 static struct Qdisc_ops *qdisc_lookup_default(const char *name) 203 { 204 struct Qdisc_ops *q = NULL; 205 206 for (q = qdisc_base; q; q = q->next) { 207 if (!strcmp(name, q->id)) { 208 if (!try_module_get(q->owner)) 209 q = NULL; 210 break; 211 } 212 } 213 214 return q; 215 } 216 217 /* Set new default qdisc to use */ 218 int qdisc_set_default(const char *name) 219 { 220 const struct Qdisc_ops *ops; 221 222 if (!capable(CAP_NET_ADMIN)) 223 return -EPERM; 224 225 write_lock(&qdisc_mod_lock); 226 ops = qdisc_lookup_default(name); 227 if (!ops) { 228 /* Not found, drop lock and try to load module */ 229 write_unlock(&qdisc_mod_lock); 230 request_module("sch_%s", name); 231 write_lock(&qdisc_mod_lock); 232 233 ops = qdisc_lookup_default(name); 234 } 235 236 if (ops) { 237 /* Set new default */ 238 module_put(default_qdisc_ops->owner); 239 default_qdisc_ops = ops; 240 } 241 write_unlock(&qdisc_mod_lock); 242 243 return ops ? 0 : -ENOENT; 244 } 245 246 #ifdef CONFIG_NET_SCH_DEFAULT 247 /* Set default value from kernel config */ 248 static int __init sch_default_qdisc(void) 249 { 250 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH); 251 } 252 late_initcall(sch_default_qdisc); 253 #endif 254 255 /* We know handle. Find qdisc among all qdisc's attached to device 256 * (root qdisc, all its children, children of children etc.) 257 * Note: caller either uses rtnl or rcu_read_lock() 258 */ 259 260 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) 261 { 262 struct Qdisc *q; 263 264 if (!qdisc_dev(root)) 265 return (root->handle == handle ? root : NULL); 266 267 if (!(root->flags & TCQ_F_BUILTIN) && 268 root->handle == handle) 269 return root; 270 271 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle, 272 lockdep_rtnl_is_held()) { 273 if (q->handle == handle) 274 return q; 275 } 276 return NULL; 277 } 278 279 void qdisc_hash_add(struct Qdisc *q, bool invisible) 280 { 281 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { 282 ASSERT_RTNL(); 283 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); 284 if (invisible) 285 q->flags |= TCQ_F_INVISIBLE; 286 } 287 } 288 EXPORT_SYMBOL(qdisc_hash_add); 289 290 void qdisc_hash_del(struct Qdisc *q) 291 { 292 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { 293 ASSERT_RTNL(); 294 hash_del_rcu(&q->hash); 295 } 296 } 297 EXPORT_SYMBOL(qdisc_hash_del); 298 299 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 300 { 301 struct Qdisc *q; 302 303 if (!handle) 304 return NULL; 305 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle); 306 if (q) 307 goto out; 308 309 if (dev_ingress_queue(dev)) 310 q = qdisc_match_from_root( 311 dev_ingress_queue(dev)->qdisc_sleeping, 312 handle); 313 out: 314 return q; 315 } 316 317 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) 318 { 319 struct netdev_queue *nq; 320 struct Qdisc *q; 321 322 if (!handle) 323 return NULL; 324 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle); 325 if (q) 326 goto out; 327 328 nq = dev_ingress_queue_rcu(dev); 329 if (nq) 330 q = qdisc_match_from_root(nq->qdisc_sleeping, handle); 331 out: 332 return q; 333 } 334 335 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 336 { 337 unsigned long cl; 338 const struct Qdisc_class_ops *cops = p->ops->cl_ops; 339 340 if (cops == NULL) 341 return NULL; 342 cl = cops->find(p, classid); 343 344 if (cl == 0) 345 return NULL; 346 return cops->leaf(p, cl); 347 } 348 349 /* Find queueing discipline by name */ 350 351 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) 352 { 353 struct Qdisc_ops *q = NULL; 354 355 if (kind) { 356 read_lock(&qdisc_mod_lock); 357 for (q = qdisc_base; q; q = q->next) { 358 if (nla_strcmp(kind, q->id) == 0) { 359 if (!try_module_get(q->owner)) 360 q = NULL; 361 break; 362 } 363 } 364 read_unlock(&qdisc_mod_lock); 365 } 366 return q; 367 } 368 369 /* The linklayer setting were not transferred from iproute2, in older 370 * versions, and the rate tables lookup systems have been dropped in 371 * the kernel. To keep backward compatible with older iproute2 tc 372 * utils, we detect the linklayer setting by detecting if the rate 373 * table were modified. 374 * 375 * For linklayer ATM table entries, the rate table will be aligned to 376 * 48 bytes, thus some table entries will contain the same value. The 377 * mpu (min packet unit) is also encoded into the old rate table, thus 378 * starting from the mpu, we find low and high table entries for 379 * mapping this cell. If these entries contain the same value, when 380 * the rate tables have been modified for linklayer ATM. 381 * 382 * This is done by rounding mpu to the nearest 48 bytes cell/entry, 383 * and then roundup to the next cell, calc the table entry one below, 384 * and compare. 385 */ 386 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) 387 { 388 int low = roundup(r->mpu, 48); 389 int high = roundup(low+1, 48); 390 int cell_low = low >> r->cell_log; 391 int cell_high = (high >> r->cell_log) - 1; 392 393 /* rtab is too inaccurate at rates > 100Mbit/s */ 394 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { 395 pr_debug("TC linklayer: Giving up ATM detection\n"); 396 return TC_LINKLAYER_ETHERNET; 397 } 398 399 if ((cell_high > cell_low) && (cell_high < 256) 400 && (rtab[cell_low] == rtab[cell_high])) { 401 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", 402 cell_low, cell_high, rtab[cell_high]); 403 return TC_LINKLAYER_ATM; 404 } 405 return TC_LINKLAYER_ETHERNET; 406 } 407 408 static struct qdisc_rate_table *qdisc_rtab_list; 409 410 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 411 struct nlattr *tab, 412 struct netlink_ext_ack *extack) 413 { 414 struct qdisc_rate_table *rtab; 415 416 if (tab == NULL || r->rate == 0 || 417 r->cell_log == 0 || r->cell_log >= 32 || 418 nla_len(tab) != TC_RTAB_SIZE) { 419 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching"); 420 return NULL; 421 } 422 423 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) { 424 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) && 425 !memcmp(&rtab->data, nla_data(tab), 1024)) { 426 rtab->refcnt++; 427 return rtab; 428 } 429 } 430 431 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); 432 if (rtab) { 433 rtab->rate = *r; 434 rtab->refcnt = 1; 435 memcpy(rtab->data, nla_data(tab), 1024); 436 if (r->linklayer == TC_LINKLAYER_UNAWARE) 437 r->linklayer = __detect_linklayer(r, rtab->data); 438 rtab->next = qdisc_rtab_list; 439 qdisc_rtab_list = rtab; 440 } else { 441 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table"); 442 } 443 return rtab; 444 } 445 EXPORT_SYMBOL(qdisc_get_rtab); 446 447 void qdisc_put_rtab(struct qdisc_rate_table *tab) 448 { 449 struct qdisc_rate_table *rtab, **rtabp; 450 451 if (!tab || --tab->refcnt) 452 return; 453 454 for (rtabp = &qdisc_rtab_list; 455 (rtab = *rtabp) != NULL; 456 rtabp = &rtab->next) { 457 if (rtab == tab) { 458 *rtabp = rtab->next; 459 kfree(rtab); 460 return; 461 } 462 } 463 } 464 EXPORT_SYMBOL(qdisc_put_rtab); 465 466 static LIST_HEAD(qdisc_stab_list); 467 468 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = { 469 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) }, 470 [TCA_STAB_DATA] = { .type = NLA_BINARY }, 471 }; 472 473 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt, 474 struct netlink_ext_ack *extack) 475 { 476 struct nlattr *tb[TCA_STAB_MAX + 1]; 477 struct qdisc_size_table *stab; 478 struct tc_sizespec *s; 479 unsigned int tsize = 0; 480 u16 *tab = NULL; 481 int err; 482 483 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy, 484 extack); 485 if (err < 0) 486 return ERR_PTR(err); 487 if (!tb[TCA_STAB_BASE]) { 488 NL_SET_ERR_MSG(extack, "Size table base attribute is missing"); 489 return ERR_PTR(-EINVAL); 490 } 491 492 s = nla_data(tb[TCA_STAB_BASE]); 493 494 if (s->tsize > 0) { 495 if (!tb[TCA_STAB_DATA]) { 496 NL_SET_ERR_MSG(extack, "Size table data attribute is missing"); 497 return ERR_PTR(-EINVAL); 498 } 499 tab = nla_data(tb[TCA_STAB_DATA]); 500 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16); 501 } 502 503 if (tsize != s->tsize || (!tab && tsize > 0)) { 504 NL_SET_ERR_MSG(extack, "Invalid size of size table"); 505 return ERR_PTR(-EINVAL); 506 } 507 508 list_for_each_entry(stab, &qdisc_stab_list, list) { 509 if (memcmp(&stab->szopts, s, sizeof(*s))) 510 continue; 511 if (tsize > 0 && 512 memcmp(stab->data, tab, flex_array_size(stab, data, tsize))) 513 continue; 514 stab->refcnt++; 515 return stab; 516 } 517 518 if (s->size_log > STAB_SIZE_LOG_MAX || 519 s->cell_log > STAB_SIZE_LOG_MAX) { 520 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table"); 521 return ERR_PTR(-EINVAL); 522 } 523 524 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL); 525 if (!stab) 526 return ERR_PTR(-ENOMEM); 527 528 stab->refcnt = 1; 529 stab->szopts = *s; 530 if (tsize > 0) 531 memcpy(stab->data, tab, flex_array_size(stab, data, tsize)); 532 533 list_add_tail(&stab->list, &qdisc_stab_list); 534 535 return stab; 536 } 537 538 void qdisc_put_stab(struct qdisc_size_table *tab) 539 { 540 if (!tab) 541 return; 542 543 if (--tab->refcnt == 0) { 544 list_del(&tab->list); 545 kfree_rcu(tab, rcu); 546 } 547 } 548 EXPORT_SYMBOL(qdisc_put_stab); 549 550 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) 551 { 552 struct nlattr *nest; 553 554 nest = nla_nest_start_noflag(skb, TCA_STAB); 555 if (nest == NULL) 556 goto nla_put_failure; 557 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts)) 558 goto nla_put_failure; 559 nla_nest_end(skb, nest); 560 561 return skb->len; 562 563 nla_put_failure: 564 return -1; 565 } 566 567 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 568 const struct qdisc_size_table *stab) 569 { 570 int pkt_len, slot; 571 572 pkt_len = skb->len + stab->szopts.overhead; 573 if (unlikely(!stab->szopts.tsize)) 574 goto out; 575 576 slot = pkt_len + stab->szopts.cell_align; 577 if (unlikely(slot < 0)) 578 slot = 0; 579 580 slot >>= stab->szopts.cell_log; 581 if (likely(slot < stab->szopts.tsize)) 582 pkt_len = stab->data[slot]; 583 else 584 pkt_len = stab->data[stab->szopts.tsize - 1] * 585 (slot / stab->szopts.tsize) + 586 stab->data[slot % stab->szopts.tsize]; 587 588 pkt_len <<= stab->szopts.size_log; 589 out: 590 if (unlikely(pkt_len < 1)) 591 pkt_len = 1; 592 qdisc_skb_cb(skb)->pkt_len = pkt_len; 593 } 594 EXPORT_SYMBOL(__qdisc_calculate_pkt_len); 595 596 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) 597 { 598 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { 599 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", 600 txt, qdisc->ops->id, qdisc->handle >> 16); 601 qdisc->flags |= TCQ_F_WARN_NONWC; 602 } 603 } 604 EXPORT_SYMBOL(qdisc_warn_nonwc); 605 606 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) 607 { 608 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 609 timer); 610 611 rcu_read_lock(); 612 __netif_schedule(qdisc_root(wd->qdisc)); 613 rcu_read_unlock(); 614 615 return HRTIMER_NORESTART; 616 } 617 618 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, 619 clockid_t clockid) 620 { 621 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED); 622 wd->timer.function = qdisc_watchdog; 623 wd->qdisc = qdisc; 624 } 625 EXPORT_SYMBOL(qdisc_watchdog_init_clockid); 626 627 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) 628 { 629 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC); 630 } 631 EXPORT_SYMBOL(qdisc_watchdog_init); 632 633 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, 634 u64 delta_ns) 635 { 636 if (test_bit(__QDISC_STATE_DEACTIVATED, 637 &qdisc_root_sleeping(wd->qdisc)->state)) 638 return; 639 640 if (hrtimer_is_queued(&wd->timer)) { 641 /* If timer is already set in [expires, expires + delta_ns], 642 * do not reprogram it. 643 */ 644 if (wd->last_expires - expires <= delta_ns) 645 return; 646 } 647 648 wd->last_expires = expires; 649 hrtimer_start_range_ns(&wd->timer, 650 ns_to_ktime(expires), 651 delta_ns, 652 HRTIMER_MODE_ABS_PINNED); 653 } 654 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns); 655 656 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) 657 { 658 hrtimer_cancel(&wd->timer); 659 } 660 EXPORT_SYMBOL(qdisc_watchdog_cancel); 661 662 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) 663 { 664 struct hlist_head *h; 665 unsigned int i; 666 667 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL); 668 669 if (h != NULL) { 670 for (i = 0; i < n; i++) 671 INIT_HLIST_HEAD(&h[i]); 672 } 673 return h; 674 } 675 676 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) 677 { 678 struct Qdisc_class_common *cl; 679 struct hlist_node *next; 680 struct hlist_head *nhash, *ohash; 681 unsigned int nsize, nmask, osize; 682 unsigned int i, h; 683 684 /* Rehash when load factor exceeds 0.75 */ 685 if (clhash->hashelems * 4 <= clhash->hashsize * 3) 686 return; 687 nsize = clhash->hashsize * 2; 688 nmask = nsize - 1; 689 nhash = qdisc_class_hash_alloc(nsize); 690 if (nhash == NULL) 691 return; 692 693 ohash = clhash->hash; 694 osize = clhash->hashsize; 695 696 sch_tree_lock(sch); 697 for (i = 0; i < osize; i++) { 698 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) { 699 h = qdisc_class_hash(cl->classid, nmask); 700 hlist_add_head(&cl->hnode, &nhash[h]); 701 } 702 } 703 clhash->hash = nhash; 704 clhash->hashsize = nsize; 705 clhash->hashmask = nmask; 706 sch_tree_unlock(sch); 707 708 kvfree(ohash); 709 } 710 EXPORT_SYMBOL(qdisc_class_hash_grow); 711 712 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash) 713 { 714 unsigned int size = 4; 715 716 clhash->hash = qdisc_class_hash_alloc(size); 717 if (!clhash->hash) 718 return -ENOMEM; 719 clhash->hashsize = size; 720 clhash->hashmask = size - 1; 721 clhash->hashelems = 0; 722 return 0; 723 } 724 EXPORT_SYMBOL(qdisc_class_hash_init); 725 726 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash) 727 { 728 kvfree(clhash->hash); 729 } 730 EXPORT_SYMBOL(qdisc_class_hash_destroy); 731 732 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash, 733 struct Qdisc_class_common *cl) 734 { 735 unsigned int h; 736 737 INIT_HLIST_NODE(&cl->hnode); 738 h = qdisc_class_hash(cl->classid, clhash->hashmask); 739 hlist_add_head(&cl->hnode, &clhash->hash[h]); 740 clhash->hashelems++; 741 } 742 EXPORT_SYMBOL(qdisc_class_hash_insert); 743 744 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash, 745 struct Qdisc_class_common *cl) 746 { 747 hlist_del(&cl->hnode); 748 clhash->hashelems--; 749 } 750 EXPORT_SYMBOL(qdisc_class_hash_remove); 751 752 /* Allocate an unique handle from space managed by kernel 753 * Possible range is [8000-FFFF]:0000 (0x8000 values) 754 */ 755 static u32 qdisc_alloc_handle(struct net_device *dev) 756 { 757 int i = 0x8000; 758 static u32 autohandle = TC_H_MAKE(0x80000000U, 0); 759 760 do { 761 autohandle += TC_H_MAKE(0x10000U, 0); 762 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0)) 763 autohandle = TC_H_MAKE(0x80000000U, 0); 764 if (!qdisc_lookup(dev, autohandle)) 765 return autohandle; 766 cond_resched(); 767 } while (--i > 0); 768 769 return 0; 770 } 771 772 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) 773 { 774 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED; 775 const struct Qdisc_class_ops *cops; 776 unsigned long cl; 777 u32 parentid; 778 bool notify; 779 int drops; 780 781 if (n == 0 && len == 0) 782 return; 783 drops = max_t(int, n, 0); 784 rcu_read_lock(); 785 while ((parentid = sch->parent)) { 786 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) 787 break; 788 789 if (sch->flags & TCQ_F_NOPARENT) 790 break; 791 /* Notify parent qdisc only if child qdisc becomes empty. 792 * 793 * If child was empty even before update then backlog 794 * counter is screwed and we skip notification because 795 * parent class is already passive. 796 * 797 * If the original child was offloaded then it is allowed 798 * to be seem as empty, so the parent is notified anyway. 799 */ 800 notify = !sch->q.qlen && !WARN_ON_ONCE(!n && 801 !qdisc_is_offloaded); 802 /* TODO: perform the search on a per txq basis */ 803 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); 804 if (sch == NULL) { 805 WARN_ON_ONCE(parentid != TC_H_ROOT); 806 break; 807 } 808 cops = sch->ops->cl_ops; 809 if (notify && cops->qlen_notify) { 810 cl = cops->find(sch, parentid); 811 cops->qlen_notify(sch, cl); 812 } 813 sch->q.qlen -= n; 814 sch->qstats.backlog -= len; 815 __qdisc_qstats_drop(sch, drops); 816 } 817 rcu_read_unlock(); 818 } 819 EXPORT_SYMBOL(qdisc_tree_reduce_backlog); 820 821 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type, 822 void *type_data) 823 { 824 struct net_device *dev = qdisc_dev(sch); 825 int err; 826 827 sch->flags &= ~TCQ_F_OFFLOADED; 828 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 829 return 0; 830 831 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data); 832 if (err == -EOPNOTSUPP) 833 return 0; 834 835 if (!err) 836 sch->flags |= TCQ_F_OFFLOADED; 837 838 return err; 839 } 840 EXPORT_SYMBOL(qdisc_offload_dump_helper); 841 842 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 843 struct Qdisc *new, struct Qdisc *old, 844 enum tc_setup_type type, void *type_data, 845 struct netlink_ext_ack *extack) 846 { 847 bool any_qdisc_is_offloaded; 848 int err; 849 850 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 851 return; 852 853 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data); 854 855 /* Don't report error if the graft is part of destroy operation. */ 856 if (!err || !new || new == &noop_qdisc) 857 return; 858 859 /* Don't report error if the parent, the old child and the new 860 * one are not offloaded. 861 */ 862 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED; 863 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED; 864 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED; 865 866 if (any_qdisc_is_offloaded) 867 NL_SET_ERR_MSG(extack, "Offloading graft operation failed."); 868 } 869 EXPORT_SYMBOL(qdisc_offload_graft_helper); 870 871 static void qdisc_offload_graft_root(struct net_device *dev, 872 struct Qdisc *new, struct Qdisc *old, 873 struct netlink_ext_ack *extack) 874 { 875 struct tc_root_qopt_offload graft_offload = { 876 .command = TC_ROOT_GRAFT, 877 .handle = new ? new->handle : 0, 878 .ingress = (new && new->flags & TCQ_F_INGRESS) || 879 (old && old->flags & TCQ_F_INGRESS), 880 }; 881 882 qdisc_offload_graft_helper(dev, NULL, new, old, 883 TC_SETUP_ROOT_QDISC, &graft_offload, extack); 884 } 885 886 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, 887 u32 portid, u32 seq, u16 flags, int event) 888 { 889 struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL; 890 struct gnet_stats_queue __percpu *cpu_qstats = NULL; 891 struct tcmsg *tcm; 892 struct nlmsghdr *nlh; 893 unsigned char *b = skb_tail_pointer(skb); 894 struct gnet_dump d; 895 struct qdisc_size_table *stab; 896 u32 block_index; 897 __u32 qlen; 898 899 cond_resched(); 900 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 901 if (!nlh) 902 goto out_nlmsg_trim; 903 tcm = nlmsg_data(nlh); 904 tcm->tcm_family = AF_UNSPEC; 905 tcm->tcm__pad1 = 0; 906 tcm->tcm__pad2 = 0; 907 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 908 tcm->tcm_parent = clid; 909 tcm->tcm_handle = q->handle; 910 tcm->tcm_info = refcount_read(&q->refcnt); 911 if (nla_put_string(skb, TCA_KIND, q->ops->id)) 912 goto nla_put_failure; 913 if (q->ops->ingress_block_get) { 914 block_index = q->ops->ingress_block_get(q); 915 if (block_index && 916 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index)) 917 goto nla_put_failure; 918 } 919 if (q->ops->egress_block_get) { 920 block_index = q->ops->egress_block_get(q); 921 if (block_index && 922 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index)) 923 goto nla_put_failure; 924 } 925 if (q->ops->dump && q->ops->dump(q, skb) < 0) 926 goto nla_put_failure; 927 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED))) 928 goto nla_put_failure; 929 qlen = qdisc_qlen_sum(q); 930 931 stab = rtnl_dereference(q->stab); 932 if (stab && qdisc_dump_stab(skb, stab) < 0) 933 goto nla_put_failure; 934 935 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 936 NULL, &d, TCA_PAD) < 0) 937 goto nla_put_failure; 938 939 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 940 goto nla_put_failure; 941 942 if (qdisc_is_percpu_stats(q)) { 943 cpu_bstats = q->cpu_bstats; 944 cpu_qstats = q->cpu_qstats; 945 } 946 947 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 || 948 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || 949 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) 950 goto nla_put_failure; 951 952 if (gnet_stats_finish_copy(&d) < 0) 953 goto nla_put_failure; 954 955 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 956 return skb->len; 957 958 out_nlmsg_trim: 959 nla_put_failure: 960 nlmsg_trim(skb, b); 961 return -1; 962 } 963 964 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible) 965 { 966 if (q->flags & TCQ_F_BUILTIN) 967 return true; 968 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible) 969 return true; 970 971 return false; 972 } 973 974 static int qdisc_notify(struct net *net, struct sk_buff *oskb, 975 struct nlmsghdr *n, u32 clid, 976 struct Qdisc *old, struct Qdisc *new) 977 { 978 struct sk_buff *skb; 979 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 980 981 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 982 if (!skb) 983 return -ENOBUFS; 984 985 if (old && !tc_qdisc_dump_ignore(old, false)) { 986 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq, 987 0, RTM_DELQDISC) < 0) 988 goto err_out; 989 } 990 if (new && !tc_qdisc_dump_ignore(new, false)) { 991 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq, 992 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) 993 goto err_out; 994 } 995 996 if (skb->len) 997 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 998 n->nlmsg_flags & NLM_F_ECHO); 999 1000 err_out: 1001 kfree_skb(skb); 1002 return -EINVAL; 1003 } 1004 1005 static void notify_and_destroy(struct net *net, struct sk_buff *skb, 1006 struct nlmsghdr *n, u32 clid, 1007 struct Qdisc *old, struct Qdisc *new) 1008 { 1009 if (new || old) 1010 qdisc_notify(net, skb, n, clid, old, new); 1011 1012 if (old) 1013 qdisc_put(old); 1014 } 1015 1016 static void qdisc_clear_nolock(struct Qdisc *sch) 1017 { 1018 sch->flags &= ~TCQ_F_NOLOCK; 1019 if (!(sch->flags & TCQ_F_CPUSTATS)) 1020 return; 1021 1022 free_percpu(sch->cpu_bstats); 1023 free_percpu(sch->cpu_qstats); 1024 sch->cpu_bstats = NULL; 1025 sch->cpu_qstats = NULL; 1026 sch->flags &= ~TCQ_F_CPUSTATS; 1027 } 1028 1029 /* Graft qdisc "new" to class "classid" of qdisc "parent" or 1030 * to device "dev". 1031 * 1032 * When appropriate send a netlink notification using 'skb' 1033 * and "n". 1034 * 1035 * On success, destroy old qdisc. 1036 */ 1037 1038 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, 1039 struct sk_buff *skb, struct nlmsghdr *n, u32 classid, 1040 struct Qdisc *new, struct Qdisc *old, 1041 struct netlink_ext_ack *extack) 1042 { 1043 struct Qdisc *q = old; 1044 struct net *net = dev_net(dev); 1045 1046 if (parent == NULL) { 1047 unsigned int i, num_q, ingress; 1048 1049 ingress = 0; 1050 num_q = dev->num_tx_queues; 1051 if ((q && q->flags & TCQ_F_INGRESS) || 1052 (new && new->flags & TCQ_F_INGRESS)) { 1053 num_q = 1; 1054 ingress = 1; 1055 if (!dev_ingress_queue(dev)) { 1056 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue"); 1057 return -ENOENT; 1058 } 1059 } 1060 1061 if (dev->flags & IFF_UP) 1062 dev_deactivate(dev); 1063 1064 qdisc_offload_graft_root(dev, new, old, extack); 1065 1066 if (new && new->ops->attach && !ingress) 1067 goto skip; 1068 1069 for (i = 0; i < num_q; i++) { 1070 struct netdev_queue *dev_queue = dev_ingress_queue(dev); 1071 1072 if (!ingress) 1073 dev_queue = netdev_get_tx_queue(dev, i); 1074 1075 old = dev_graft_qdisc(dev_queue, new); 1076 if (new && i > 0) 1077 qdisc_refcount_inc(new); 1078 1079 if (!ingress) 1080 qdisc_put(old); 1081 } 1082 1083 skip: 1084 if (!ingress) { 1085 notify_and_destroy(net, skb, n, classid, 1086 rtnl_dereference(dev->qdisc), new); 1087 if (new && !new->ops->attach) 1088 qdisc_refcount_inc(new); 1089 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); 1090 1091 if (new && new->ops->attach) 1092 new->ops->attach(new); 1093 } else { 1094 notify_and_destroy(net, skb, n, classid, old, new); 1095 } 1096 1097 if (dev->flags & IFF_UP) 1098 dev_activate(dev); 1099 } else { 1100 const struct Qdisc_class_ops *cops = parent->ops->cl_ops; 1101 unsigned long cl; 1102 int err; 1103 1104 /* Only support running class lockless if parent is lockless */ 1105 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK)) 1106 qdisc_clear_nolock(new); 1107 1108 if (!cops || !cops->graft) 1109 return -EOPNOTSUPP; 1110 1111 cl = cops->find(parent, classid); 1112 if (!cl) { 1113 NL_SET_ERR_MSG(extack, "Specified class not found"); 1114 return -ENOENT; 1115 } 1116 1117 err = cops->graft(parent, cl, new, &old, extack); 1118 if (err) 1119 return err; 1120 notify_and_destroy(net, skb, n, classid, old, new); 1121 } 1122 return 0; 1123 } 1124 1125 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca, 1126 struct netlink_ext_ack *extack) 1127 { 1128 u32 block_index; 1129 1130 if (tca[TCA_INGRESS_BLOCK]) { 1131 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]); 1132 1133 if (!block_index) { 1134 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0"); 1135 return -EINVAL; 1136 } 1137 if (!sch->ops->ingress_block_set) { 1138 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported"); 1139 return -EOPNOTSUPP; 1140 } 1141 sch->ops->ingress_block_set(sch, block_index); 1142 } 1143 if (tca[TCA_EGRESS_BLOCK]) { 1144 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]); 1145 1146 if (!block_index) { 1147 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0"); 1148 return -EINVAL; 1149 } 1150 if (!sch->ops->egress_block_set) { 1151 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported"); 1152 return -EOPNOTSUPP; 1153 } 1154 sch->ops->egress_block_set(sch, block_index); 1155 } 1156 return 0; 1157 } 1158 1159 /* 1160 Allocate and initialize new qdisc. 1161 1162 Parameters are passed via opt. 1163 */ 1164 1165 static struct Qdisc *qdisc_create(struct net_device *dev, 1166 struct netdev_queue *dev_queue, 1167 u32 parent, u32 handle, 1168 struct nlattr **tca, int *errp, 1169 struct netlink_ext_ack *extack) 1170 { 1171 int err; 1172 struct nlattr *kind = tca[TCA_KIND]; 1173 struct Qdisc *sch; 1174 struct Qdisc_ops *ops; 1175 struct qdisc_size_table *stab; 1176 1177 ops = qdisc_lookup_ops(kind); 1178 #ifdef CONFIG_MODULES 1179 if (ops == NULL && kind != NULL) { 1180 char name[IFNAMSIZ]; 1181 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) { 1182 /* We dropped the RTNL semaphore in order to 1183 * perform the module load. So, even if we 1184 * succeeded in loading the module we have to 1185 * tell the caller to replay the request. We 1186 * indicate this using -EAGAIN. 1187 * We replay the request because the device may 1188 * go away in the mean time. 1189 */ 1190 rtnl_unlock(); 1191 request_module("sch_%s", name); 1192 rtnl_lock(); 1193 ops = qdisc_lookup_ops(kind); 1194 if (ops != NULL) { 1195 /* We will try again qdisc_lookup_ops, 1196 * so don't keep a reference. 1197 */ 1198 module_put(ops->owner); 1199 err = -EAGAIN; 1200 goto err_out; 1201 } 1202 } 1203 } 1204 #endif 1205 1206 err = -ENOENT; 1207 if (!ops) { 1208 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown"); 1209 goto err_out; 1210 } 1211 1212 sch = qdisc_alloc(dev_queue, ops, extack); 1213 if (IS_ERR(sch)) { 1214 err = PTR_ERR(sch); 1215 goto err_out2; 1216 } 1217 1218 sch->parent = parent; 1219 1220 if (handle == TC_H_INGRESS) { 1221 sch->flags |= TCQ_F_INGRESS; 1222 handle = TC_H_MAKE(TC_H_INGRESS, 0); 1223 } else { 1224 if (handle == 0) { 1225 handle = qdisc_alloc_handle(dev); 1226 if (handle == 0) { 1227 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded"); 1228 err = -ENOSPC; 1229 goto err_out3; 1230 } 1231 } 1232 if (!netif_is_multiqueue(dev)) 1233 sch->flags |= TCQ_F_ONETXQUEUE; 1234 } 1235 1236 sch->handle = handle; 1237 1238 /* This exist to keep backward compatible with a userspace 1239 * loophole, what allowed userspace to get IFF_NO_QUEUE 1240 * facility on older kernels by setting tx_queue_len=0 (prior 1241 * to qdisc init), and then forgot to reinit tx_queue_len 1242 * before again attaching a qdisc. 1243 */ 1244 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) { 1245 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 1246 netdev_info(dev, "Caught tx_queue_len zero misconfig\n"); 1247 } 1248 1249 err = qdisc_block_indexes_set(sch, tca, extack); 1250 if (err) 1251 goto err_out3; 1252 1253 if (ops->init) { 1254 err = ops->init(sch, tca[TCA_OPTIONS], extack); 1255 if (err != 0) 1256 goto err_out5; 1257 } 1258 1259 if (tca[TCA_STAB]) { 1260 stab = qdisc_get_stab(tca[TCA_STAB], extack); 1261 if (IS_ERR(stab)) { 1262 err = PTR_ERR(stab); 1263 goto err_out4; 1264 } 1265 rcu_assign_pointer(sch->stab, stab); 1266 } 1267 if (tca[TCA_RATE]) { 1268 err = -EOPNOTSUPP; 1269 if (sch->flags & TCQ_F_MQROOT) { 1270 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc"); 1271 goto err_out4; 1272 } 1273 1274 err = gen_new_estimator(&sch->bstats, 1275 sch->cpu_bstats, 1276 &sch->rate_est, 1277 NULL, 1278 true, 1279 tca[TCA_RATE]); 1280 if (err) { 1281 NL_SET_ERR_MSG(extack, "Failed to generate new estimator"); 1282 goto err_out4; 1283 } 1284 } 1285 1286 qdisc_hash_add(sch, false); 1287 trace_qdisc_create(ops, dev, parent); 1288 1289 return sch; 1290 1291 err_out5: 1292 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ 1293 if (ops->destroy) 1294 ops->destroy(sch); 1295 err_out3: 1296 netdev_put(dev, &sch->dev_tracker); 1297 qdisc_free(sch); 1298 err_out2: 1299 module_put(ops->owner); 1300 err_out: 1301 *errp = err; 1302 return NULL; 1303 1304 err_out4: 1305 /* 1306 * Any broken qdiscs that would require a ops->reset() here? 1307 * The qdisc was never in action so it shouldn't be necessary. 1308 */ 1309 qdisc_put_stab(rtnl_dereference(sch->stab)); 1310 if (ops->destroy) 1311 ops->destroy(sch); 1312 goto err_out3; 1313 } 1314 1315 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca, 1316 struct netlink_ext_ack *extack) 1317 { 1318 struct qdisc_size_table *ostab, *stab = NULL; 1319 int err = 0; 1320 1321 if (tca[TCA_OPTIONS]) { 1322 if (!sch->ops->change) { 1323 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc"); 1324 return -EINVAL; 1325 } 1326 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) { 1327 NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); 1328 return -EOPNOTSUPP; 1329 } 1330 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack); 1331 if (err) 1332 return err; 1333 } 1334 1335 if (tca[TCA_STAB]) { 1336 stab = qdisc_get_stab(tca[TCA_STAB], extack); 1337 if (IS_ERR(stab)) 1338 return PTR_ERR(stab); 1339 } 1340 1341 ostab = rtnl_dereference(sch->stab); 1342 rcu_assign_pointer(sch->stab, stab); 1343 qdisc_put_stab(ostab); 1344 1345 if (tca[TCA_RATE]) { 1346 /* NB: ignores errors from replace_estimator 1347 because change can't be undone. */ 1348 if (sch->flags & TCQ_F_MQROOT) 1349 goto out; 1350 gen_replace_estimator(&sch->bstats, 1351 sch->cpu_bstats, 1352 &sch->rate_est, 1353 NULL, 1354 true, 1355 tca[TCA_RATE]); 1356 } 1357 out: 1358 return 0; 1359 } 1360 1361 struct check_loop_arg { 1362 struct qdisc_walker w; 1363 struct Qdisc *p; 1364 int depth; 1365 }; 1366 1367 static int check_loop_fn(struct Qdisc *q, unsigned long cl, 1368 struct qdisc_walker *w); 1369 1370 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth) 1371 { 1372 struct check_loop_arg arg; 1373 1374 if (q->ops->cl_ops == NULL) 1375 return 0; 1376 1377 arg.w.stop = arg.w.skip = arg.w.count = 0; 1378 arg.w.fn = check_loop_fn; 1379 arg.depth = depth; 1380 arg.p = p; 1381 q->ops->cl_ops->walk(q, &arg.w); 1382 return arg.w.stop ? -ELOOP : 0; 1383 } 1384 1385 static int 1386 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) 1387 { 1388 struct Qdisc *leaf; 1389 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1390 struct check_loop_arg *arg = (struct check_loop_arg *)w; 1391 1392 leaf = cops->leaf(q, cl); 1393 if (leaf) { 1394 if (leaf == arg->p || arg->depth > 7) 1395 return -ELOOP; 1396 return check_loop(leaf, arg->p, arg->depth + 1); 1397 } 1398 return 0; 1399 } 1400 1401 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { 1402 [TCA_KIND] = { .type = NLA_STRING }, 1403 [TCA_RATE] = { .type = NLA_BINARY, 1404 .len = sizeof(struct tc_estimator) }, 1405 [TCA_STAB] = { .type = NLA_NESTED }, 1406 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG }, 1407 [TCA_CHAIN] = { .type = NLA_U32 }, 1408 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 }, 1409 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 }, 1410 }; 1411 1412 /* 1413 * Delete/get qdisc. 1414 */ 1415 1416 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1417 struct netlink_ext_ack *extack) 1418 { 1419 struct net *net = sock_net(skb->sk); 1420 struct tcmsg *tcm = nlmsg_data(n); 1421 struct nlattr *tca[TCA_MAX + 1]; 1422 struct net_device *dev; 1423 u32 clid; 1424 struct Qdisc *q = NULL; 1425 struct Qdisc *p = NULL; 1426 int err; 1427 1428 if ((n->nlmsg_type != RTM_GETQDISC) && 1429 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1430 return -EPERM; 1431 1432 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, 1433 rtm_tca_policy, extack); 1434 if (err < 0) 1435 return err; 1436 1437 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1438 if (!dev) 1439 return -ENODEV; 1440 1441 clid = tcm->tcm_parent; 1442 if (clid) { 1443 if (clid != TC_H_ROOT) { 1444 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { 1445 p = qdisc_lookup(dev, TC_H_MAJ(clid)); 1446 if (!p) { 1447 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid"); 1448 return -ENOENT; 1449 } 1450 q = qdisc_leaf(p, clid); 1451 } else if (dev_ingress_queue(dev)) { 1452 q = dev_ingress_queue(dev)->qdisc_sleeping; 1453 } 1454 } else { 1455 q = rtnl_dereference(dev->qdisc); 1456 } 1457 if (!q) { 1458 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device"); 1459 return -ENOENT; 1460 } 1461 1462 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) { 1463 NL_SET_ERR_MSG(extack, "Invalid handle"); 1464 return -EINVAL; 1465 } 1466 } else { 1467 q = qdisc_lookup(dev, tcm->tcm_handle); 1468 if (!q) { 1469 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle"); 1470 return -ENOENT; 1471 } 1472 } 1473 1474 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1475 NL_SET_ERR_MSG(extack, "Invalid qdisc name"); 1476 return -EINVAL; 1477 } 1478 1479 if (n->nlmsg_type == RTM_DELQDISC) { 1480 if (!clid) { 1481 NL_SET_ERR_MSG(extack, "Classid cannot be zero"); 1482 return -EINVAL; 1483 } 1484 if (q->handle == 0) { 1485 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero"); 1486 return -ENOENT; 1487 } 1488 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack); 1489 if (err != 0) 1490 return err; 1491 } else { 1492 qdisc_notify(net, skb, n, clid, NULL, q); 1493 } 1494 return 0; 1495 } 1496 1497 /* 1498 * Create/change qdisc. 1499 */ 1500 1501 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1502 struct netlink_ext_ack *extack) 1503 { 1504 struct net *net = sock_net(skb->sk); 1505 struct tcmsg *tcm; 1506 struct nlattr *tca[TCA_MAX + 1]; 1507 struct net_device *dev; 1508 u32 clid; 1509 struct Qdisc *q, *p; 1510 int err; 1511 1512 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1513 return -EPERM; 1514 1515 replay: 1516 /* Reinit, just in case something touches this. */ 1517 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, 1518 rtm_tca_policy, extack); 1519 if (err < 0) 1520 return err; 1521 1522 tcm = nlmsg_data(n); 1523 clid = tcm->tcm_parent; 1524 q = p = NULL; 1525 1526 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1527 if (!dev) 1528 return -ENODEV; 1529 1530 1531 if (clid) { 1532 if (clid != TC_H_ROOT) { 1533 if (clid != TC_H_INGRESS) { 1534 p = qdisc_lookup(dev, TC_H_MAJ(clid)); 1535 if (!p) { 1536 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc"); 1537 return -ENOENT; 1538 } 1539 q = qdisc_leaf(p, clid); 1540 } else if (dev_ingress_queue_create(dev)) { 1541 q = dev_ingress_queue(dev)->qdisc_sleeping; 1542 } 1543 } else { 1544 q = rtnl_dereference(dev->qdisc); 1545 } 1546 1547 /* It may be default qdisc, ignore it */ 1548 if (q && q->handle == 0) 1549 q = NULL; 1550 1551 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { 1552 if (tcm->tcm_handle) { 1553 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) { 1554 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override"); 1555 return -EEXIST; 1556 } 1557 if (TC_H_MIN(tcm->tcm_handle)) { 1558 NL_SET_ERR_MSG(extack, "Invalid minor handle"); 1559 return -EINVAL; 1560 } 1561 q = qdisc_lookup(dev, tcm->tcm_handle); 1562 if (!q) 1563 goto create_n_graft; 1564 if (n->nlmsg_flags & NLM_F_EXCL) { 1565 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override"); 1566 return -EEXIST; 1567 } 1568 if (tca[TCA_KIND] && 1569 nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1570 NL_SET_ERR_MSG(extack, "Invalid qdisc name"); 1571 return -EINVAL; 1572 } 1573 if (q == p || 1574 (p && check_loop(q, p, 0))) { 1575 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected"); 1576 return -ELOOP; 1577 } 1578 qdisc_refcount_inc(q); 1579 goto graft; 1580 } else { 1581 if (!q) 1582 goto create_n_graft; 1583 1584 /* This magic test requires explanation. 1585 * 1586 * We know, that some child q is already 1587 * attached to this parent and have choice: 1588 * either to change it or to create/graft new one. 1589 * 1590 * 1. We are allowed to create/graft only 1591 * if CREATE and REPLACE flags are set. 1592 * 1593 * 2. If EXCL is set, requestor wanted to say, 1594 * that qdisc tcm_handle is not expected 1595 * to exist, so that we choose create/graft too. 1596 * 1597 * 3. The last case is when no flags are set. 1598 * Alas, it is sort of hole in API, we 1599 * cannot decide what to do unambiguously. 1600 * For now we select create/graft, if 1601 * user gave KIND, which does not match existing. 1602 */ 1603 if ((n->nlmsg_flags & NLM_F_CREATE) && 1604 (n->nlmsg_flags & NLM_F_REPLACE) && 1605 ((n->nlmsg_flags & NLM_F_EXCL) || 1606 (tca[TCA_KIND] && 1607 nla_strcmp(tca[TCA_KIND], q->ops->id)))) 1608 goto create_n_graft; 1609 } 1610 } 1611 } else { 1612 if (!tcm->tcm_handle) { 1613 NL_SET_ERR_MSG(extack, "Handle cannot be zero"); 1614 return -EINVAL; 1615 } 1616 q = qdisc_lookup(dev, tcm->tcm_handle); 1617 } 1618 1619 /* Change qdisc parameters */ 1620 if (!q) { 1621 NL_SET_ERR_MSG(extack, "Specified qdisc not found"); 1622 return -ENOENT; 1623 } 1624 if (n->nlmsg_flags & NLM_F_EXCL) { 1625 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify"); 1626 return -EEXIST; 1627 } 1628 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1629 NL_SET_ERR_MSG(extack, "Invalid qdisc name"); 1630 return -EINVAL; 1631 } 1632 err = qdisc_change(q, tca, extack); 1633 if (err == 0) 1634 qdisc_notify(net, skb, n, clid, NULL, q); 1635 return err; 1636 1637 create_n_graft: 1638 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 1639 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag"); 1640 return -ENOENT; 1641 } 1642 if (clid == TC_H_INGRESS) { 1643 if (dev_ingress_queue(dev)) { 1644 q = qdisc_create(dev, dev_ingress_queue(dev), 1645 tcm->tcm_parent, tcm->tcm_parent, 1646 tca, &err, extack); 1647 } else { 1648 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device"); 1649 err = -ENOENT; 1650 } 1651 } else { 1652 struct netdev_queue *dev_queue; 1653 1654 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) 1655 dev_queue = p->ops->cl_ops->select_queue(p, tcm); 1656 else if (p) 1657 dev_queue = p->dev_queue; 1658 else 1659 dev_queue = netdev_get_tx_queue(dev, 0); 1660 1661 q = qdisc_create(dev, dev_queue, 1662 tcm->tcm_parent, tcm->tcm_handle, 1663 tca, &err, extack); 1664 } 1665 if (q == NULL) { 1666 if (err == -EAGAIN) 1667 goto replay; 1668 return err; 1669 } 1670 1671 graft: 1672 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack); 1673 if (err) { 1674 if (q) 1675 qdisc_put(q); 1676 return err; 1677 } 1678 1679 return 0; 1680 } 1681 1682 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, 1683 struct netlink_callback *cb, 1684 int *q_idx_p, int s_q_idx, bool recur, 1685 bool dump_invisible) 1686 { 1687 int ret = 0, q_idx = *q_idx_p; 1688 struct Qdisc *q; 1689 int b; 1690 1691 if (!root) 1692 return 0; 1693 1694 q = root; 1695 if (q_idx < s_q_idx) { 1696 q_idx++; 1697 } else { 1698 if (!tc_qdisc_dump_ignore(q, dump_invisible) && 1699 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, 1700 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1701 RTM_NEWQDISC) <= 0) 1702 goto done; 1703 q_idx++; 1704 } 1705 1706 /* If dumping singletons, there is no qdisc_dev(root) and the singleton 1707 * itself has already been dumped. 1708 * 1709 * If we've already dumped the top-level (ingress) qdisc above and the global 1710 * qdisc hashtable, we don't want to hit it again 1711 */ 1712 if (!qdisc_dev(root) || !recur) 1713 goto out; 1714 1715 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 1716 if (q_idx < s_q_idx) { 1717 q_idx++; 1718 continue; 1719 } 1720 if (!tc_qdisc_dump_ignore(q, dump_invisible) && 1721 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, 1722 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1723 RTM_NEWQDISC) <= 0) 1724 goto done; 1725 q_idx++; 1726 } 1727 1728 out: 1729 *q_idx_p = q_idx; 1730 return ret; 1731 done: 1732 ret = -1; 1733 goto out; 1734 } 1735 1736 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) 1737 { 1738 struct net *net = sock_net(skb->sk); 1739 int idx, q_idx; 1740 int s_idx, s_q_idx; 1741 struct net_device *dev; 1742 const struct nlmsghdr *nlh = cb->nlh; 1743 struct nlattr *tca[TCA_MAX + 1]; 1744 int err; 1745 1746 s_idx = cb->args[0]; 1747 s_q_idx = q_idx = cb->args[1]; 1748 1749 idx = 0; 1750 ASSERT_RTNL(); 1751 1752 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX, 1753 rtm_tca_policy, cb->extack); 1754 if (err < 0) 1755 return err; 1756 1757 for_each_netdev(net, dev) { 1758 struct netdev_queue *dev_queue; 1759 1760 if (idx < s_idx) 1761 goto cont; 1762 if (idx > s_idx) 1763 s_q_idx = 0; 1764 q_idx = 0; 1765 1766 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc), 1767 skb, cb, &q_idx, s_q_idx, 1768 true, tca[TCA_DUMP_INVISIBLE]) < 0) 1769 goto done; 1770 1771 dev_queue = dev_ingress_queue(dev); 1772 if (dev_queue && 1773 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, 1774 &q_idx, s_q_idx, false, 1775 tca[TCA_DUMP_INVISIBLE]) < 0) 1776 goto done; 1777 1778 cont: 1779 idx++; 1780 } 1781 1782 done: 1783 cb->args[0] = idx; 1784 cb->args[1] = q_idx; 1785 1786 return skb->len; 1787 } 1788 1789 1790 1791 /************************************************ 1792 * Traffic classes manipulation. * 1793 ************************************************/ 1794 1795 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, 1796 unsigned long cl, 1797 u32 portid, u32 seq, u16 flags, int event) 1798 { 1799 struct tcmsg *tcm; 1800 struct nlmsghdr *nlh; 1801 unsigned char *b = skb_tail_pointer(skb); 1802 struct gnet_dump d; 1803 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; 1804 1805 cond_resched(); 1806 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1807 if (!nlh) 1808 goto out_nlmsg_trim; 1809 tcm = nlmsg_data(nlh); 1810 tcm->tcm_family = AF_UNSPEC; 1811 tcm->tcm__pad1 = 0; 1812 tcm->tcm__pad2 = 0; 1813 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1814 tcm->tcm_parent = q->handle; 1815 tcm->tcm_handle = q->handle; 1816 tcm->tcm_info = 0; 1817 if (nla_put_string(skb, TCA_KIND, q->ops->id)) 1818 goto nla_put_failure; 1819 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1820 goto nla_put_failure; 1821 1822 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 1823 NULL, &d, TCA_PAD) < 0) 1824 goto nla_put_failure; 1825 1826 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1827 goto nla_put_failure; 1828 1829 if (gnet_stats_finish_copy(&d) < 0) 1830 goto nla_put_failure; 1831 1832 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1833 return skb->len; 1834 1835 out_nlmsg_trim: 1836 nla_put_failure: 1837 nlmsg_trim(skb, b); 1838 return -1; 1839 } 1840 1841 static int tclass_notify(struct net *net, struct sk_buff *oskb, 1842 struct nlmsghdr *n, struct Qdisc *q, 1843 unsigned long cl, int event) 1844 { 1845 struct sk_buff *skb; 1846 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1847 1848 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1849 if (!skb) 1850 return -ENOBUFS; 1851 1852 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) { 1853 kfree_skb(skb); 1854 return -EINVAL; 1855 } 1856 1857 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1858 n->nlmsg_flags & NLM_F_ECHO); 1859 } 1860 1861 static int tclass_del_notify(struct net *net, 1862 const struct Qdisc_class_ops *cops, 1863 struct sk_buff *oskb, struct nlmsghdr *n, 1864 struct Qdisc *q, unsigned long cl, 1865 struct netlink_ext_ack *extack) 1866 { 1867 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1868 struct sk_buff *skb; 1869 int err = 0; 1870 1871 if (!cops->delete) 1872 return -EOPNOTSUPP; 1873 1874 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1875 if (!skb) 1876 return -ENOBUFS; 1877 1878 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, 1879 RTM_DELTCLASS) < 0) { 1880 kfree_skb(skb); 1881 return -EINVAL; 1882 } 1883 1884 err = cops->delete(q, cl, extack); 1885 if (err) { 1886 kfree_skb(skb); 1887 return err; 1888 } 1889 1890 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1891 n->nlmsg_flags & NLM_F_ECHO); 1892 return err; 1893 } 1894 1895 #ifdef CONFIG_NET_CLS 1896 1897 struct tcf_bind_args { 1898 struct tcf_walker w; 1899 unsigned long base; 1900 unsigned long cl; 1901 u32 classid; 1902 }; 1903 1904 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 1905 { 1906 struct tcf_bind_args *a = (void *)arg; 1907 1908 if (tp->ops->bind_class) { 1909 struct Qdisc *q = tcf_block_q(tp->chain->block); 1910 1911 sch_tree_lock(q); 1912 tp->ops->bind_class(n, a->classid, a->cl, q, a->base); 1913 sch_tree_unlock(q); 1914 } 1915 return 0; 1916 } 1917 1918 struct tc_bind_class_args { 1919 struct qdisc_walker w; 1920 unsigned long new_cl; 1921 u32 portid; 1922 u32 clid; 1923 }; 1924 1925 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl, 1926 struct qdisc_walker *w) 1927 { 1928 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w; 1929 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1930 struct tcf_block *block; 1931 struct tcf_chain *chain; 1932 1933 block = cops->tcf_block(q, cl, NULL); 1934 if (!block) 1935 return 0; 1936 for (chain = tcf_get_next_chain(block, NULL); 1937 chain; 1938 chain = tcf_get_next_chain(block, chain)) { 1939 struct tcf_proto *tp; 1940 1941 for (tp = tcf_get_next_proto(chain, NULL); 1942 tp; tp = tcf_get_next_proto(chain, tp)) { 1943 struct tcf_bind_args arg = {}; 1944 1945 arg.w.fn = tcf_node_bind; 1946 arg.classid = a->clid; 1947 arg.base = cl; 1948 arg.cl = a->new_cl; 1949 tp->ops->walk(tp, &arg.w, true); 1950 } 1951 } 1952 1953 return 0; 1954 } 1955 1956 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, 1957 unsigned long new_cl) 1958 { 1959 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1960 struct tc_bind_class_args args = {}; 1961 1962 if (!cops->tcf_block) 1963 return; 1964 args.portid = portid; 1965 args.clid = clid; 1966 args.new_cl = new_cl; 1967 args.w.fn = tc_bind_class_walker; 1968 q->ops->cl_ops->walk(q, &args.w); 1969 } 1970 1971 #else 1972 1973 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, 1974 unsigned long new_cl) 1975 { 1976 } 1977 1978 #endif 1979 1980 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, 1981 struct netlink_ext_ack *extack) 1982 { 1983 struct net *net = sock_net(skb->sk); 1984 struct tcmsg *tcm = nlmsg_data(n); 1985 struct nlattr *tca[TCA_MAX + 1]; 1986 struct net_device *dev; 1987 struct Qdisc *q = NULL; 1988 const struct Qdisc_class_ops *cops; 1989 unsigned long cl = 0; 1990 unsigned long new_cl; 1991 u32 portid; 1992 u32 clid; 1993 u32 qid; 1994 int err; 1995 1996 if ((n->nlmsg_type != RTM_GETTCLASS) && 1997 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1998 return -EPERM; 1999 2000 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, 2001 rtm_tca_policy, extack); 2002 if (err < 0) 2003 return err; 2004 2005 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2006 if (!dev) 2007 return -ENODEV; 2008 2009 /* 2010 parent == TC_H_UNSPEC - unspecified parent. 2011 parent == TC_H_ROOT - class is root, which has no parent. 2012 parent == X:0 - parent is root class. 2013 parent == X:Y - parent is a node in hierarchy. 2014 parent == 0:Y - parent is X:Y, where X:0 is qdisc. 2015 2016 handle == 0:0 - generate handle from kernel pool. 2017 handle == 0:Y - class is X:Y, where X:0 is qdisc. 2018 handle == X:Y - clear. 2019 handle == X:0 - root class. 2020 */ 2021 2022 /* Step 1. Determine qdisc handle X:0 */ 2023 2024 portid = tcm->tcm_parent; 2025 clid = tcm->tcm_handle; 2026 qid = TC_H_MAJ(clid); 2027 2028 if (portid != TC_H_ROOT) { 2029 u32 qid1 = TC_H_MAJ(portid); 2030 2031 if (qid && qid1) { 2032 /* If both majors are known, they must be identical. */ 2033 if (qid != qid1) 2034 return -EINVAL; 2035 } else if (qid1) { 2036 qid = qid1; 2037 } else if (qid == 0) 2038 qid = rtnl_dereference(dev->qdisc)->handle; 2039 2040 /* Now qid is genuine qdisc handle consistent 2041 * both with parent and child. 2042 * 2043 * TC_H_MAJ(portid) still may be unspecified, complete it now. 2044 */ 2045 if (portid) 2046 portid = TC_H_MAKE(qid, portid); 2047 } else { 2048 if (qid == 0) 2049 qid = rtnl_dereference(dev->qdisc)->handle; 2050 } 2051 2052 /* OK. Locate qdisc */ 2053 q = qdisc_lookup(dev, qid); 2054 if (!q) 2055 return -ENOENT; 2056 2057 /* An check that it supports classes */ 2058 cops = q->ops->cl_ops; 2059 if (cops == NULL) 2060 return -EINVAL; 2061 2062 /* Now try to get class */ 2063 if (clid == 0) { 2064 if (portid == TC_H_ROOT) 2065 clid = qid; 2066 } else 2067 clid = TC_H_MAKE(qid, clid); 2068 2069 if (clid) 2070 cl = cops->find(q, clid); 2071 2072 if (cl == 0) { 2073 err = -ENOENT; 2074 if (n->nlmsg_type != RTM_NEWTCLASS || 2075 !(n->nlmsg_flags & NLM_F_CREATE)) 2076 goto out; 2077 } else { 2078 switch (n->nlmsg_type) { 2079 case RTM_NEWTCLASS: 2080 err = -EEXIST; 2081 if (n->nlmsg_flags & NLM_F_EXCL) 2082 goto out; 2083 break; 2084 case RTM_DELTCLASS: 2085 err = tclass_del_notify(net, cops, skb, n, q, cl, extack); 2086 /* Unbind the class with flilters with 0 */ 2087 tc_bind_tclass(q, portid, clid, 0); 2088 goto out; 2089 case RTM_GETTCLASS: 2090 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); 2091 goto out; 2092 default: 2093 err = -EINVAL; 2094 goto out; 2095 } 2096 } 2097 2098 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) { 2099 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes"); 2100 return -EOPNOTSUPP; 2101 } 2102 2103 new_cl = cl; 2104 err = -EOPNOTSUPP; 2105 if (cops->change) 2106 err = cops->change(q, clid, portid, tca, &new_cl, extack); 2107 if (err == 0) { 2108 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); 2109 /* We just create a new class, need to do reverse binding. */ 2110 if (cl != new_cl) 2111 tc_bind_tclass(q, portid, clid, new_cl); 2112 } 2113 out: 2114 return err; 2115 } 2116 2117 struct qdisc_dump_args { 2118 struct qdisc_walker w; 2119 struct sk_buff *skb; 2120 struct netlink_callback *cb; 2121 }; 2122 2123 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, 2124 struct qdisc_walker *arg) 2125 { 2126 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg; 2127 2128 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid, 2129 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2130 RTM_NEWTCLASS); 2131 } 2132 2133 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, 2134 struct tcmsg *tcm, struct netlink_callback *cb, 2135 int *t_p, int s_t) 2136 { 2137 struct qdisc_dump_args arg; 2138 2139 if (tc_qdisc_dump_ignore(q, false) || 2140 *t_p < s_t || !q->ops->cl_ops || 2141 (tcm->tcm_parent && 2142 TC_H_MAJ(tcm->tcm_parent) != q->handle)) { 2143 (*t_p)++; 2144 return 0; 2145 } 2146 if (*t_p > s_t) 2147 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); 2148 arg.w.fn = qdisc_class_dump; 2149 arg.skb = skb; 2150 arg.cb = cb; 2151 arg.w.stop = 0; 2152 arg.w.skip = cb->args[1]; 2153 arg.w.count = 0; 2154 q->ops->cl_ops->walk(q, &arg.w); 2155 cb->args[1] = arg.w.count; 2156 if (arg.w.stop) 2157 return -1; 2158 (*t_p)++; 2159 return 0; 2160 } 2161 2162 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, 2163 struct tcmsg *tcm, struct netlink_callback *cb, 2164 int *t_p, int s_t, bool recur) 2165 { 2166 struct Qdisc *q; 2167 int b; 2168 2169 if (!root) 2170 return 0; 2171 2172 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) 2173 return -1; 2174 2175 if (!qdisc_dev(root) || !recur) 2176 return 0; 2177 2178 if (tcm->tcm_parent) { 2179 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent)); 2180 if (q && q != root && 2181 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 2182 return -1; 2183 return 0; 2184 } 2185 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 2186 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 2187 return -1; 2188 } 2189 2190 return 0; 2191 } 2192 2193 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 2194 { 2195 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2196 struct net *net = sock_net(skb->sk); 2197 struct netdev_queue *dev_queue; 2198 struct net_device *dev; 2199 int t, s_t; 2200 2201 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2202 return 0; 2203 dev = dev_get_by_index(net, tcm->tcm_ifindex); 2204 if (!dev) 2205 return 0; 2206 2207 s_t = cb->args[0]; 2208 t = 0; 2209 2210 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc), 2211 skb, tcm, cb, &t, s_t, true) < 0) 2212 goto done; 2213 2214 dev_queue = dev_ingress_queue(dev); 2215 if (dev_queue && 2216 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, 2217 &t, s_t, false) < 0) 2218 goto done; 2219 2220 done: 2221 cb->args[0] = t; 2222 2223 dev_put(dev); 2224 return skb->len; 2225 } 2226 2227 #ifdef CONFIG_PROC_FS 2228 static int psched_show(struct seq_file *seq, void *v) 2229 { 2230 seq_printf(seq, "%08x %08x %08x %08x\n", 2231 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1), 2232 1000000, 2233 (u32)NSEC_PER_SEC / hrtimer_resolution); 2234 2235 return 0; 2236 } 2237 2238 static int __net_init psched_net_init(struct net *net) 2239 { 2240 struct proc_dir_entry *e; 2241 2242 e = proc_create_single("psched", 0, net->proc_net, psched_show); 2243 if (e == NULL) 2244 return -ENOMEM; 2245 2246 return 0; 2247 } 2248 2249 static void __net_exit psched_net_exit(struct net *net) 2250 { 2251 remove_proc_entry("psched", net->proc_net); 2252 } 2253 #else 2254 static int __net_init psched_net_init(struct net *net) 2255 { 2256 return 0; 2257 } 2258 2259 static void __net_exit psched_net_exit(struct net *net) 2260 { 2261 } 2262 #endif 2263 2264 static struct pernet_operations psched_net_ops = { 2265 .init = psched_net_init, 2266 .exit = psched_net_exit, 2267 }; 2268 2269 static int __init pktsched_init(void) 2270 { 2271 int err; 2272 2273 err = register_pernet_subsys(&psched_net_ops); 2274 if (err) { 2275 pr_err("pktsched_init: " 2276 "cannot initialize per netns operations\n"); 2277 return err; 2278 } 2279 2280 register_qdisc(&pfifo_fast_ops); 2281 register_qdisc(&pfifo_qdisc_ops); 2282 register_qdisc(&bfifo_qdisc_ops); 2283 register_qdisc(&pfifo_head_drop_qdisc_ops); 2284 register_qdisc(&mq_qdisc_ops); 2285 register_qdisc(&noqueue_qdisc_ops); 2286 2287 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0); 2288 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0); 2289 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, 2290 0); 2291 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0); 2292 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0); 2293 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, 2294 0); 2295 2296 return 0; 2297 } 2298 2299 subsys_initcall(pktsched_init); 2300