1 /* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/netdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <linux/init.h> 25 #include <linux/rcupdate.h> 26 #include <linux/list.h> 27 #include <linux/slab.h> 28 #include <linux/if_vlan.h> 29 #include <linux/skb_array.h> 30 #include <linux/if_macvlan.h> 31 #include <net/sch_generic.h> 32 #include <net/pkt_sched.h> 33 #include <net/dst.h> 34 #include <trace/events/qdisc.h> 35 #include <net/xfrm.h> 36 37 /* Qdisc to use by default */ 38 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; 39 EXPORT_SYMBOL(default_qdisc_ops); 40 41 /* Main transmission queue. */ 42 43 /* Modifications to data participating in scheduling must be protected with 44 * qdisc_lock(qdisc) spinlock. 45 * 46 * The idea is the following: 47 * - enqueue, dequeue are serialized via qdisc root lock 48 * - ingress filtering is also serialized via qdisc root lock 49 * - updates to tree and tree walking are only done under the rtnl mutex. 50 */ 51 52 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) 53 { 54 const struct netdev_queue *txq = q->dev_queue; 55 spinlock_t *lock = NULL; 56 struct sk_buff *skb; 57 58 if (q->flags & TCQ_F_NOLOCK) { 59 lock = qdisc_lock(q); 60 spin_lock(lock); 61 } 62 63 skb = skb_peek(&q->skb_bad_txq); 64 if (skb) { 65 /* check the reason of requeuing without tx lock first */ 66 txq = skb_get_tx_queue(txq->dev, skb); 67 if (!netif_xmit_frozen_or_stopped(txq)) { 68 skb = __skb_dequeue(&q->skb_bad_txq); 69 if (qdisc_is_percpu_stats(q)) { 70 qdisc_qstats_cpu_backlog_dec(q, skb); 71 qdisc_qstats_cpu_qlen_dec(q); 72 } else { 73 qdisc_qstats_backlog_dec(q, skb); 74 q->q.qlen--; 75 } 76 } else { 77 skb = NULL; 78 } 79 } 80 81 if (lock) 82 spin_unlock(lock); 83 84 return skb; 85 } 86 87 static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) 88 { 89 struct sk_buff *skb = skb_peek(&q->skb_bad_txq); 90 91 if (unlikely(skb)) 92 skb = __skb_dequeue_bad_txq(q); 93 94 return skb; 95 } 96 97 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, 98 struct sk_buff *skb) 99 { 100 spinlock_t *lock = NULL; 101 102 if (q->flags & TCQ_F_NOLOCK) { 103 lock = qdisc_lock(q); 104 spin_lock(lock); 105 } 106 107 __skb_queue_tail(&q->skb_bad_txq, skb); 108 109 if (lock) 110 spin_unlock(lock); 111 } 112 113 static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 114 { 115 while (skb) { 116 struct sk_buff *next = skb->next; 117 118 __skb_queue_tail(&q->gso_skb, skb); 119 q->qstats.requeues++; 120 qdisc_qstats_backlog_inc(q, skb); 121 q->q.qlen++; /* it's still part of the queue */ 122 123 skb = next; 124 } 125 __netif_schedule(q); 126 127 return 0; 128 } 129 130 static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q) 131 { 132 spinlock_t *lock = qdisc_lock(q); 133 134 spin_lock(lock); 135 while (skb) { 136 struct sk_buff *next = skb->next; 137 138 __skb_queue_tail(&q->gso_skb, skb); 139 140 qdisc_qstats_cpu_requeues_inc(q); 141 qdisc_qstats_cpu_backlog_inc(q, skb); 142 qdisc_qstats_cpu_qlen_inc(q); 143 144 skb = next; 145 } 146 spin_unlock(lock); 147 148 __netif_schedule(q); 149 150 return 0; 151 } 152 153 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 154 { 155 if (q->flags & TCQ_F_NOLOCK) 156 return dev_requeue_skb_locked(skb, q); 157 else 158 return __dev_requeue_skb(skb, q); 159 } 160 161 static void try_bulk_dequeue_skb(struct Qdisc *q, 162 struct sk_buff *skb, 163 const struct netdev_queue *txq, 164 int *packets) 165 { 166 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; 167 168 while (bytelimit > 0) { 169 struct sk_buff *nskb = q->dequeue(q); 170 171 if (!nskb) 172 break; 173 174 bytelimit -= nskb->len; /* covers GSO len */ 175 skb->next = nskb; 176 skb = nskb; 177 (*packets)++; /* GSO counts as one pkt */ 178 } 179 skb->next = NULL; 180 } 181 182 /* This variant of try_bulk_dequeue_skb() makes sure 183 * all skbs in the chain are for the same txq 184 */ 185 static void try_bulk_dequeue_skb_slow(struct Qdisc *q, 186 struct sk_buff *skb, 187 int *packets) 188 { 189 int mapping = skb_get_queue_mapping(skb); 190 struct sk_buff *nskb; 191 int cnt = 0; 192 193 do { 194 nskb = q->dequeue(q); 195 if (!nskb) 196 break; 197 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { 198 qdisc_enqueue_skb_bad_txq(q, nskb); 199 200 if (qdisc_is_percpu_stats(q)) { 201 qdisc_qstats_cpu_backlog_inc(q, nskb); 202 qdisc_qstats_cpu_qlen_inc(q); 203 } else { 204 qdisc_qstats_backlog_inc(q, nskb); 205 q->q.qlen++; 206 } 207 break; 208 } 209 skb->next = nskb; 210 skb = nskb; 211 } while (++cnt < 8); 212 (*packets) += cnt; 213 skb->next = NULL; 214 } 215 216 /* Note that dequeue_skb can possibly return a SKB list (via skb->next). 217 * A requeued skb (via q->gso_skb) can also be a SKB list. 218 */ 219 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, 220 int *packets) 221 { 222 const struct netdev_queue *txq = q->dev_queue; 223 struct sk_buff *skb = NULL; 224 225 *packets = 1; 226 if (unlikely(!skb_queue_empty(&q->gso_skb))) { 227 spinlock_t *lock = NULL; 228 229 if (q->flags & TCQ_F_NOLOCK) { 230 lock = qdisc_lock(q); 231 spin_lock(lock); 232 } 233 234 skb = skb_peek(&q->gso_skb); 235 236 /* skb may be null if another cpu pulls gso_skb off in between 237 * empty check and lock. 238 */ 239 if (!skb) { 240 if (lock) 241 spin_unlock(lock); 242 goto validate; 243 } 244 245 /* skb in gso_skb were already validated */ 246 *validate = false; 247 if (xfrm_offload(skb)) 248 *validate = true; 249 /* check the reason of requeuing without tx lock first */ 250 txq = skb_get_tx_queue(txq->dev, skb); 251 if (!netif_xmit_frozen_or_stopped(txq)) { 252 skb = __skb_dequeue(&q->gso_skb); 253 if (qdisc_is_percpu_stats(q)) { 254 qdisc_qstats_cpu_backlog_dec(q, skb); 255 qdisc_qstats_cpu_qlen_dec(q); 256 } else { 257 qdisc_qstats_backlog_dec(q, skb); 258 q->q.qlen--; 259 } 260 } else { 261 skb = NULL; 262 } 263 if (lock) 264 spin_unlock(lock); 265 goto trace; 266 } 267 validate: 268 *validate = true; 269 270 if ((q->flags & TCQ_F_ONETXQUEUE) && 271 netif_xmit_frozen_or_stopped(txq)) 272 return skb; 273 274 skb = qdisc_dequeue_skb_bad_txq(q); 275 if (unlikely(skb)) 276 goto bulk; 277 skb = q->dequeue(q); 278 if (skb) { 279 bulk: 280 if (qdisc_may_bulk(q)) 281 try_bulk_dequeue_skb(q, skb, txq, packets); 282 else 283 try_bulk_dequeue_skb_slow(q, skb, packets); 284 } 285 trace: 286 trace_qdisc_dequeue(q, txq, *packets, skb); 287 return skb; 288 } 289 290 /* 291 * Transmit possibly several skbs, and handle the return status as 292 * required. Owning running seqcount bit guarantees that 293 * only one CPU can execute this function. 294 * 295 * Returns to the caller: 296 * false - hardware queue frozen backoff 297 * true - feel free to send more pkts 298 */ 299 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 300 struct net_device *dev, struct netdev_queue *txq, 301 spinlock_t *root_lock, bool validate) 302 { 303 int ret = NETDEV_TX_BUSY; 304 bool again = false; 305 306 /* And release qdisc */ 307 if (root_lock) 308 spin_unlock(root_lock); 309 310 /* Note that we validate skb (GSO, checksum, ...) outside of locks */ 311 if (validate) 312 skb = validate_xmit_skb_list(skb, dev, &again); 313 314 #ifdef CONFIG_XFRM_OFFLOAD 315 if (unlikely(again)) { 316 if (root_lock) 317 spin_lock(root_lock); 318 319 dev_requeue_skb(skb, q); 320 return false; 321 } 322 #endif 323 324 if (likely(skb)) { 325 HARD_TX_LOCK(dev, txq, smp_processor_id()); 326 if (!netif_xmit_frozen_or_stopped(txq)) 327 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 328 329 HARD_TX_UNLOCK(dev, txq); 330 } else { 331 if (root_lock) 332 spin_lock(root_lock); 333 return true; 334 } 335 336 if (root_lock) 337 spin_lock(root_lock); 338 339 if (!dev_xmit_complete(ret)) { 340 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 341 if (unlikely(ret != NETDEV_TX_BUSY)) 342 net_warn_ratelimited("BUG %s code %d qlen %d\n", 343 dev->name, ret, q->q.qlen); 344 345 dev_requeue_skb(skb, q); 346 return false; 347 } 348 349 if (ret && netif_xmit_frozen_or_stopped(txq)) 350 return false; 351 352 return true; 353 } 354 355 /* 356 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 357 * 358 * running seqcount guarantees only one CPU can process 359 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 360 * this queue. 361 * 362 * netif_tx_lock serializes accesses to device driver. 363 * 364 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 365 * if one is grabbed, another must be free. 366 * 367 * Note, that this procedure can be called by a watchdog timer 368 * 369 * Returns to the caller: 370 * 0 - queue is empty or throttled. 371 * >0 - queue is not empty. 372 * 373 */ 374 static inline bool qdisc_restart(struct Qdisc *q, int *packets) 375 { 376 spinlock_t *root_lock = NULL; 377 struct netdev_queue *txq; 378 struct net_device *dev; 379 struct sk_buff *skb; 380 bool validate; 381 382 /* Dequeue packet */ 383 skb = dequeue_skb(q, &validate, packets); 384 if (unlikely(!skb)) 385 return false; 386 387 if (!(q->flags & TCQ_F_NOLOCK)) 388 root_lock = qdisc_lock(q); 389 390 dev = qdisc_dev(q); 391 txq = skb_get_tx_queue(dev, skb); 392 393 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); 394 } 395 396 void __qdisc_run(struct Qdisc *q) 397 { 398 int quota = dev_tx_weight; 399 int packets; 400 401 while (qdisc_restart(q, &packets)) { 402 /* 403 * Ordered by possible occurrence: Postpone processing if 404 * 1. we've exceeded packet quota 405 * 2. another process needs the CPU; 406 */ 407 quota -= packets; 408 if (quota <= 0 || need_resched()) { 409 __netif_schedule(q); 410 break; 411 } 412 } 413 } 414 415 unsigned long dev_trans_start(struct net_device *dev) 416 { 417 unsigned long val, res; 418 unsigned int i; 419 420 if (is_vlan_dev(dev)) 421 dev = vlan_dev_real_dev(dev); 422 else if (netif_is_macvlan(dev)) 423 dev = macvlan_dev_real_dev(dev); 424 res = netdev_get_tx_queue(dev, 0)->trans_start; 425 for (i = 1; i < dev->num_tx_queues; i++) { 426 val = netdev_get_tx_queue(dev, i)->trans_start; 427 if (val && time_after(val, res)) 428 res = val; 429 } 430 431 return res; 432 } 433 EXPORT_SYMBOL(dev_trans_start); 434 435 static void dev_watchdog(struct timer_list *t) 436 { 437 struct net_device *dev = from_timer(dev, t, watchdog_timer); 438 439 netif_tx_lock(dev); 440 if (!qdisc_tx_is_noop(dev)) { 441 if (netif_device_present(dev) && 442 netif_running(dev) && 443 netif_carrier_ok(dev)) { 444 int some_queue_timedout = 0; 445 unsigned int i; 446 unsigned long trans_start; 447 448 for (i = 0; i < dev->num_tx_queues; i++) { 449 struct netdev_queue *txq; 450 451 txq = netdev_get_tx_queue(dev, i); 452 trans_start = txq->trans_start; 453 if (netif_xmit_stopped(txq) && 454 time_after(jiffies, (trans_start + 455 dev->watchdog_timeo))) { 456 some_queue_timedout = 1; 457 txq->trans_timeout++; 458 break; 459 } 460 } 461 462 if (some_queue_timedout) { 463 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 464 dev->name, netdev_drivername(dev), i); 465 dev->netdev_ops->ndo_tx_timeout(dev); 466 } 467 if (!mod_timer(&dev->watchdog_timer, 468 round_jiffies(jiffies + 469 dev->watchdog_timeo))) 470 dev_hold(dev); 471 } 472 } 473 netif_tx_unlock(dev); 474 475 dev_put(dev); 476 } 477 478 void __netdev_watchdog_up(struct net_device *dev) 479 { 480 if (dev->netdev_ops->ndo_tx_timeout) { 481 if (dev->watchdog_timeo <= 0) 482 dev->watchdog_timeo = 5*HZ; 483 if (!mod_timer(&dev->watchdog_timer, 484 round_jiffies(jiffies + dev->watchdog_timeo))) 485 dev_hold(dev); 486 } 487 } 488 489 static void dev_watchdog_up(struct net_device *dev) 490 { 491 __netdev_watchdog_up(dev); 492 } 493 494 static void dev_watchdog_down(struct net_device *dev) 495 { 496 netif_tx_lock_bh(dev); 497 if (del_timer(&dev->watchdog_timer)) 498 dev_put(dev); 499 netif_tx_unlock_bh(dev); 500 } 501 502 /** 503 * netif_carrier_on - set carrier 504 * @dev: network device 505 * 506 * Device has detected that carrier. 507 */ 508 void netif_carrier_on(struct net_device *dev) 509 { 510 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 511 if (dev->reg_state == NETREG_UNINITIALIZED) 512 return; 513 atomic_inc(&dev->carrier_up_count); 514 linkwatch_fire_event(dev); 515 if (netif_running(dev)) 516 __netdev_watchdog_up(dev); 517 } 518 } 519 EXPORT_SYMBOL(netif_carrier_on); 520 521 /** 522 * netif_carrier_off - clear carrier 523 * @dev: network device 524 * 525 * Device has detected loss of carrier. 526 */ 527 void netif_carrier_off(struct net_device *dev) 528 { 529 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 530 if (dev->reg_state == NETREG_UNINITIALIZED) 531 return; 532 atomic_inc(&dev->carrier_down_count); 533 linkwatch_fire_event(dev); 534 } 535 } 536 EXPORT_SYMBOL(netif_carrier_off); 537 538 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 539 under all circumstances. It is difficult to invent anything faster or 540 cheaper. 541 */ 542 543 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, 544 struct sk_buff **to_free) 545 { 546 __qdisc_drop(skb, to_free); 547 return NET_XMIT_CN; 548 } 549 550 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) 551 { 552 return NULL; 553 } 554 555 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 556 .id = "noop", 557 .priv_size = 0, 558 .enqueue = noop_enqueue, 559 .dequeue = noop_dequeue, 560 .peek = noop_dequeue, 561 .owner = THIS_MODULE, 562 }; 563 564 static struct netdev_queue noop_netdev_queue = { 565 .qdisc = &noop_qdisc, 566 .qdisc_sleeping = &noop_qdisc, 567 }; 568 569 struct Qdisc noop_qdisc = { 570 .enqueue = noop_enqueue, 571 .dequeue = noop_dequeue, 572 .flags = TCQ_F_BUILTIN, 573 .ops = &noop_qdisc_ops, 574 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 575 .dev_queue = &noop_netdev_queue, 576 .running = SEQCNT_ZERO(noop_qdisc.running), 577 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 578 }; 579 EXPORT_SYMBOL(noop_qdisc); 580 581 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt, 582 struct netlink_ext_ack *extack) 583 { 584 /* register_qdisc() assigns a default of noop_enqueue if unset, 585 * but __dev_queue_xmit() treats noqueue only as such 586 * if this is NULL - so clear it here. */ 587 qdisc->enqueue = NULL; 588 return 0; 589 } 590 591 struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 592 .id = "noqueue", 593 .priv_size = 0, 594 .init = noqueue_init, 595 .enqueue = noop_enqueue, 596 .dequeue = noop_dequeue, 597 .peek = noop_dequeue, 598 .owner = THIS_MODULE, 599 }; 600 601 static const u8 prio2band[TC_PRIO_MAX + 1] = { 602 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 603 }; 604 605 /* 3-band FIFO queue: old style, but should be a bit faster than 606 generic prio+fifo combination. 607 */ 608 609 #define PFIFO_FAST_BANDS 3 610 611 /* 612 * Private data for a pfifo_fast scheduler containing: 613 * - rings for priority bands 614 */ 615 struct pfifo_fast_priv { 616 struct skb_array q[PFIFO_FAST_BANDS]; 617 }; 618 619 static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, 620 int band) 621 { 622 return &priv->q[band]; 623 } 624 625 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, 626 struct sk_buff **to_free) 627 { 628 int band = prio2band[skb->priority & TC_PRIO_MAX]; 629 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 630 struct skb_array *q = band2list(priv, band); 631 int err; 632 633 err = skb_array_produce(q, skb); 634 635 if (unlikely(err)) 636 return qdisc_drop_cpu(skb, qdisc, to_free); 637 638 qdisc_qstats_cpu_qlen_inc(qdisc); 639 qdisc_qstats_cpu_backlog_inc(qdisc, skb); 640 return NET_XMIT_SUCCESS; 641 } 642 643 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) 644 { 645 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 646 struct sk_buff *skb = NULL; 647 int band; 648 649 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { 650 struct skb_array *q = band2list(priv, band); 651 652 if (__skb_array_empty(q)) 653 continue; 654 655 skb = skb_array_consume_bh(q); 656 } 657 if (likely(skb)) { 658 qdisc_qstats_cpu_backlog_dec(qdisc, skb); 659 qdisc_bstats_cpu_update(qdisc, skb); 660 qdisc_qstats_cpu_qlen_dec(qdisc); 661 } 662 663 return skb; 664 } 665 666 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) 667 { 668 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 669 struct sk_buff *skb = NULL; 670 int band; 671 672 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { 673 struct skb_array *q = band2list(priv, band); 674 675 skb = __skb_array_peek(q); 676 } 677 678 return skb; 679 } 680 681 static void pfifo_fast_reset(struct Qdisc *qdisc) 682 { 683 int i, band; 684 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 685 686 for (band = 0; band < PFIFO_FAST_BANDS; band++) { 687 struct skb_array *q = band2list(priv, band); 688 struct sk_buff *skb; 689 690 /* NULL ring is possible if destroy path is due to a failed 691 * skb_array_init() in pfifo_fast_init() case. 692 */ 693 if (!q->ring.queue) 694 continue; 695 696 while ((skb = skb_array_consume_bh(q)) != NULL) 697 kfree_skb(skb); 698 } 699 700 for_each_possible_cpu(i) { 701 struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); 702 703 q->backlog = 0; 704 q->qlen = 0; 705 } 706 } 707 708 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 709 { 710 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 711 712 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); 713 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 714 goto nla_put_failure; 715 return skb->len; 716 717 nla_put_failure: 718 return -1; 719 } 720 721 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt, 722 struct netlink_ext_ack *extack) 723 { 724 unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; 725 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 726 int prio; 727 728 /* guard against zero length rings */ 729 if (!qlen) 730 return -EINVAL; 731 732 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 733 struct skb_array *q = band2list(priv, prio); 734 int err; 735 736 err = skb_array_init(q, qlen, GFP_KERNEL); 737 if (err) 738 return -ENOMEM; 739 } 740 741 /* Can by-pass the queue discipline */ 742 qdisc->flags |= TCQ_F_CAN_BYPASS; 743 return 0; 744 } 745 746 static void pfifo_fast_destroy(struct Qdisc *sch) 747 { 748 struct pfifo_fast_priv *priv = qdisc_priv(sch); 749 int prio; 750 751 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 752 struct skb_array *q = band2list(priv, prio); 753 754 /* NULL ring is possible if destroy path is due to a failed 755 * skb_array_init() in pfifo_fast_init() case. 756 */ 757 if (!q->ring.queue) 758 continue; 759 /* Destroy ring but no need to kfree_skb because a call to 760 * pfifo_fast_reset() has already done that work. 761 */ 762 ptr_ring_cleanup(&q->ring, NULL); 763 } 764 } 765 766 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 767 .id = "pfifo_fast", 768 .priv_size = sizeof(struct pfifo_fast_priv), 769 .enqueue = pfifo_fast_enqueue, 770 .dequeue = pfifo_fast_dequeue, 771 .peek = pfifo_fast_peek, 772 .init = pfifo_fast_init, 773 .destroy = pfifo_fast_destroy, 774 .reset = pfifo_fast_reset, 775 .dump = pfifo_fast_dump, 776 .owner = THIS_MODULE, 777 .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS, 778 }; 779 EXPORT_SYMBOL(pfifo_fast_ops); 780 781 static struct lock_class_key qdisc_tx_busylock; 782 static struct lock_class_key qdisc_running_key; 783 784 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 785 const struct Qdisc_ops *ops, 786 struct netlink_ext_ack *extack) 787 { 788 void *p; 789 struct Qdisc *sch; 790 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; 791 int err = -ENOBUFS; 792 struct net_device *dev; 793 794 if (!dev_queue) { 795 NL_SET_ERR_MSG(extack, "No device queue given"); 796 err = -EINVAL; 797 goto errout; 798 } 799 800 dev = dev_queue->dev; 801 p = kzalloc_node(size, GFP_KERNEL, 802 netdev_queue_numa_node_read(dev_queue)); 803 804 if (!p) 805 goto errout; 806 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 807 /* if we got non aligned memory, ask more and do alignment ourself */ 808 if (sch != p) { 809 kfree(p); 810 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, 811 netdev_queue_numa_node_read(dev_queue)); 812 if (!p) 813 goto errout; 814 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 815 sch->padded = (char *) sch - (char *) p; 816 } 817 __skb_queue_head_init(&sch->gso_skb); 818 __skb_queue_head_init(&sch->skb_bad_txq); 819 qdisc_skb_head_init(&sch->q); 820 spin_lock_init(&sch->q.lock); 821 822 if (ops->static_flags & TCQ_F_CPUSTATS) { 823 sch->cpu_bstats = 824 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 825 if (!sch->cpu_bstats) 826 goto errout1; 827 828 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 829 if (!sch->cpu_qstats) { 830 free_percpu(sch->cpu_bstats); 831 goto errout1; 832 } 833 } 834 835 spin_lock_init(&sch->busylock); 836 lockdep_set_class(&sch->busylock, 837 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 838 839 seqcount_init(&sch->running); 840 lockdep_set_class(&sch->running, 841 dev->qdisc_running_key ?: &qdisc_running_key); 842 843 sch->ops = ops; 844 sch->flags = ops->static_flags; 845 sch->enqueue = ops->enqueue; 846 sch->dequeue = ops->dequeue; 847 sch->dev_queue = dev_queue; 848 dev_hold(dev); 849 refcount_set(&sch->refcnt, 1); 850 851 return sch; 852 errout1: 853 kfree(p); 854 errout: 855 return ERR_PTR(err); 856 } 857 858 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 859 const struct Qdisc_ops *ops, 860 unsigned int parentid, 861 struct netlink_ext_ack *extack) 862 { 863 struct Qdisc *sch; 864 865 if (!try_module_get(ops->owner)) { 866 NL_SET_ERR_MSG(extack, "Failed to increase module reference counter"); 867 return NULL; 868 } 869 870 sch = qdisc_alloc(dev_queue, ops, extack); 871 if (IS_ERR(sch)) { 872 module_put(ops->owner); 873 return NULL; 874 } 875 sch->parent = parentid; 876 877 if (!ops->init || ops->init(sch, NULL, extack) == 0) 878 return sch; 879 880 qdisc_destroy(sch); 881 return NULL; 882 } 883 EXPORT_SYMBOL(qdisc_create_dflt); 884 885 /* Under qdisc_lock(qdisc) and BH! */ 886 887 void qdisc_reset(struct Qdisc *qdisc) 888 { 889 const struct Qdisc_ops *ops = qdisc->ops; 890 struct sk_buff *skb, *tmp; 891 892 if (ops->reset) 893 ops->reset(qdisc); 894 895 skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { 896 __skb_unlink(skb, &qdisc->gso_skb); 897 kfree_skb_list(skb); 898 } 899 900 skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { 901 __skb_unlink(skb, &qdisc->skb_bad_txq); 902 kfree_skb_list(skb); 903 } 904 905 qdisc->q.qlen = 0; 906 qdisc->qstats.backlog = 0; 907 } 908 EXPORT_SYMBOL(qdisc_reset); 909 910 void qdisc_free(struct Qdisc *qdisc) 911 { 912 if (qdisc_is_percpu_stats(qdisc)) { 913 free_percpu(qdisc->cpu_bstats); 914 free_percpu(qdisc->cpu_qstats); 915 } 916 917 kfree((char *) qdisc - qdisc->padded); 918 } 919 920 void qdisc_destroy(struct Qdisc *qdisc) 921 { 922 const struct Qdisc_ops *ops = qdisc->ops; 923 struct sk_buff *skb, *tmp; 924 925 if (qdisc->flags & TCQ_F_BUILTIN || 926 !refcount_dec_and_test(&qdisc->refcnt)) 927 return; 928 929 #ifdef CONFIG_NET_SCHED 930 qdisc_hash_del(qdisc); 931 932 qdisc_put_stab(rtnl_dereference(qdisc->stab)); 933 #endif 934 gen_kill_estimator(&qdisc->rate_est); 935 if (ops->reset) 936 ops->reset(qdisc); 937 if (ops->destroy) 938 ops->destroy(qdisc); 939 940 module_put(ops->owner); 941 dev_put(qdisc_dev(qdisc)); 942 943 skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { 944 __skb_unlink(skb, &qdisc->gso_skb); 945 kfree_skb_list(skb); 946 } 947 948 skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { 949 __skb_unlink(skb, &qdisc->skb_bad_txq); 950 kfree_skb_list(skb); 951 } 952 953 qdisc_free(qdisc); 954 } 955 EXPORT_SYMBOL(qdisc_destroy); 956 957 /* Attach toplevel qdisc to device queue. */ 958 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 959 struct Qdisc *qdisc) 960 { 961 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 962 spinlock_t *root_lock; 963 964 root_lock = qdisc_lock(oqdisc); 965 spin_lock_bh(root_lock); 966 967 /* ... and graft new one */ 968 if (qdisc == NULL) 969 qdisc = &noop_qdisc; 970 dev_queue->qdisc_sleeping = qdisc; 971 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 972 973 spin_unlock_bh(root_lock); 974 975 return oqdisc; 976 } 977 EXPORT_SYMBOL(dev_graft_qdisc); 978 979 static void attach_one_default_qdisc(struct net_device *dev, 980 struct netdev_queue *dev_queue, 981 void *_unused) 982 { 983 struct Qdisc *qdisc; 984 const struct Qdisc_ops *ops = default_qdisc_ops; 985 986 if (dev->priv_flags & IFF_NO_QUEUE) 987 ops = &noqueue_qdisc_ops; 988 989 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); 990 if (!qdisc) { 991 netdev_info(dev, "activation failed\n"); 992 return; 993 } 994 if (!netif_is_multiqueue(dev)) 995 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 996 dev_queue->qdisc_sleeping = qdisc; 997 } 998 999 static void attach_default_qdiscs(struct net_device *dev) 1000 { 1001 struct netdev_queue *txq; 1002 struct Qdisc *qdisc; 1003 1004 txq = netdev_get_tx_queue(dev, 0); 1005 1006 if (!netif_is_multiqueue(dev) || 1007 dev->priv_flags & IFF_NO_QUEUE) { 1008 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 1009 dev->qdisc = txq->qdisc_sleeping; 1010 qdisc_refcount_inc(dev->qdisc); 1011 } else { 1012 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); 1013 if (qdisc) { 1014 dev->qdisc = qdisc; 1015 qdisc->ops->attach(qdisc); 1016 } 1017 } 1018 #ifdef CONFIG_NET_SCHED 1019 if (dev->qdisc != &noop_qdisc) 1020 qdisc_hash_add(dev->qdisc, false); 1021 #endif 1022 } 1023 1024 static void transition_one_qdisc(struct net_device *dev, 1025 struct netdev_queue *dev_queue, 1026 void *_need_watchdog) 1027 { 1028 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 1029 int *need_watchdog_p = _need_watchdog; 1030 1031 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 1032 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 1033 1034 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 1035 if (need_watchdog_p) { 1036 dev_queue->trans_start = 0; 1037 *need_watchdog_p = 1; 1038 } 1039 } 1040 1041 void dev_activate(struct net_device *dev) 1042 { 1043 int need_watchdog; 1044 1045 /* No queueing discipline is attached to device; 1046 * create default one for devices, which need queueing 1047 * and noqueue_qdisc for virtual interfaces 1048 */ 1049 1050 if (dev->qdisc == &noop_qdisc) 1051 attach_default_qdiscs(dev); 1052 1053 if (!netif_carrier_ok(dev)) 1054 /* Delay activation until next carrier-on event */ 1055 return; 1056 1057 need_watchdog = 0; 1058 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 1059 if (dev_ingress_queue(dev)) 1060 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 1061 1062 if (need_watchdog) { 1063 netif_trans_update(dev); 1064 dev_watchdog_up(dev); 1065 } 1066 } 1067 EXPORT_SYMBOL(dev_activate); 1068 1069 static void dev_deactivate_queue(struct net_device *dev, 1070 struct netdev_queue *dev_queue, 1071 void *_qdisc_default) 1072 { 1073 struct Qdisc *qdisc_default = _qdisc_default; 1074 struct Qdisc *qdisc; 1075 1076 qdisc = rtnl_dereference(dev_queue->qdisc); 1077 if (qdisc) { 1078 spin_lock_bh(qdisc_lock(qdisc)); 1079 1080 if (!(qdisc->flags & TCQ_F_BUILTIN)) 1081 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 1082 1083 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 1084 qdisc_reset(qdisc); 1085 1086 spin_unlock_bh(qdisc_lock(qdisc)); 1087 } 1088 } 1089 1090 static bool some_qdisc_is_busy(struct net_device *dev) 1091 { 1092 unsigned int i; 1093 1094 for (i = 0; i < dev->num_tx_queues; i++) { 1095 struct netdev_queue *dev_queue; 1096 spinlock_t *root_lock; 1097 struct Qdisc *q; 1098 int val; 1099 1100 dev_queue = netdev_get_tx_queue(dev, i); 1101 q = dev_queue->qdisc_sleeping; 1102 1103 if (q->flags & TCQ_F_NOLOCK) { 1104 val = test_bit(__QDISC_STATE_SCHED, &q->state); 1105 } else { 1106 root_lock = qdisc_lock(q); 1107 spin_lock_bh(root_lock); 1108 1109 val = (qdisc_is_running(q) || 1110 test_bit(__QDISC_STATE_SCHED, &q->state)); 1111 1112 spin_unlock_bh(root_lock); 1113 } 1114 1115 if (val) 1116 return true; 1117 } 1118 return false; 1119 } 1120 1121 static void dev_qdisc_reset(struct net_device *dev, 1122 struct netdev_queue *dev_queue, 1123 void *none) 1124 { 1125 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1126 1127 if (qdisc) 1128 qdisc_reset(qdisc); 1129 } 1130 1131 /** 1132 * dev_deactivate_many - deactivate transmissions on several devices 1133 * @head: list of devices to deactivate 1134 * 1135 * This function returns only when all outstanding transmissions 1136 * have completed, unless all devices are in dismantle phase. 1137 */ 1138 void dev_deactivate_many(struct list_head *head) 1139 { 1140 struct net_device *dev; 1141 1142 list_for_each_entry(dev, head, close_list) { 1143 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 1144 &noop_qdisc); 1145 if (dev_ingress_queue(dev)) 1146 dev_deactivate_queue(dev, dev_ingress_queue(dev), 1147 &noop_qdisc); 1148 1149 dev_watchdog_down(dev); 1150 } 1151 1152 /* Wait for outstanding qdisc-less dev_queue_xmit calls. 1153 * This is avoided if all devices are in dismantle phase : 1154 * Caller will call synchronize_net() for us 1155 */ 1156 synchronize_net(); 1157 1158 /* Wait for outstanding qdisc_run calls. */ 1159 list_for_each_entry(dev, head, close_list) { 1160 while (some_qdisc_is_busy(dev)) 1161 yield(); 1162 /* The new qdisc is assigned at this point so we can safely 1163 * unwind stale skb lists and qdisc statistics 1164 */ 1165 netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL); 1166 if (dev_ingress_queue(dev)) 1167 dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL); 1168 } 1169 } 1170 1171 void dev_deactivate(struct net_device *dev) 1172 { 1173 LIST_HEAD(single); 1174 1175 list_add(&dev->close_list, &single); 1176 dev_deactivate_many(&single); 1177 list_del(&single); 1178 } 1179 EXPORT_SYMBOL(dev_deactivate); 1180 1181 static void dev_init_scheduler_queue(struct net_device *dev, 1182 struct netdev_queue *dev_queue, 1183 void *_qdisc) 1184 { 1185 struct Qdisc *qdisc = _qdisc; 1186 1187 rcu_assign_pointer(dev_queue->qdisc, qdisc); 1188 dev_queue->qdisc_sleeping = qdisc; 1189 __skb_queue_head_init(&qdisc->gso_skb); 1190 __skb_queue_head_init(&qdisc->skb_bad_txq); 1191 } 1192 1193 void dev_init_scheduler(struct net_device *dev) 1194 { 1195 dev->qdisc = &noop_qdisc; 1196 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 1197 if (dev_ingress_queue(dev)) 1198 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 1199 1200 timer_setup(&dev->watchdog_timer, dev_watchdog, 0); 1201 } 1202 1203 static void shutdown_scheduler_queue(struct net_device *dev, 1204 struct netdev_queue *dev_queue, 1205 void *_qdisc_default) 1206 { 1207 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1208 struct Qdisc *qdisc_default = _qdisc_default; 1209 1210 if (qdisc) { 1211 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 1212 dev_queue->qdisc_sleeping = qdisc_default; 1213 1214 qdisc_destroy(qdisc); 1215 } 1216 } 1217 1218 void dev_shutdown(struct net_device *dev) 1219 { 1220 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 1221 if (dev_ingress_queue(dev)) 1222 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 1223 qdisc_destroy(dev->qdisc); 1224 dev->qdisc = &noop_qdisc; 1225 1226 WARN_ON(timer_pending(&dev->watchdog_timer)); 1227 } 1228 1229 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1230 const struct tc_ratespec *conf, 1231 u64 rate64) 1232 { 1233 memset(r, 0, sizeof(*r)); 1234 r->overhead = conf->overhead; 1235 r->rate_bytes_ps = max_t(u64, conf->rate, rate64); 1236 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); 1237 r->mult = 1; 1238 /* 1239 * The deal here is to replace a divide by a reciprocal one 1240 * in fast path (a reciprocal divide is a multiply and a shift) 1241 * 1242 * Normal formula would be : 1243 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps 1244 * 1245 * We compute mult/shift to use instead : 1246 * time_in_ns = (len * mult) >> shift; 1247 * 1248 * We try to get the highest possible mult value for accuracy, 1249 * but have to make sure no overflows will ever happen. 1250 */ 1251 if (r->rate_bytes_ps > 0) { 1252 u64 factor = NSEC_PER_SEC; 1253 1254 for (;;) { 1255 r->mult = div64_u64(factor, r->rate_bytes_ps); 1256 if (r->mult & (1U << 31) || factor & (1ULL << 63)) 1257 break; 1258 factor <<= 1; 1259 r->shift++; 1260 } 1261 } 1262 } 1263 EXPORT_SYMBOL(psched_ratecfg_precompute); 1264 1265 static void mini_qdisc_rcu_func(struct rcu_head *head) 1266 { 1267 } 1268 1269 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1270 struct tcf_proto *tp_head) 1271 { 1272 struct mini_Qdisc *miniq_old = rtnl_dereference(*miniqp->p_miniq); 1273 struct mini_Qdisc *miniq; 1274 1275 if (!tp_head) { 1276 RCU_INIT_POINTER(*miniqp->p_miniq, NULL); 1277 /* Wait for flying RCU callback before it is freed. */ 1278 rcu_barrier_bh(); 1279 return; 1280 } 1281 1282 miniq = !miniq_old || miniq_old == &miniqp->miniq2 ? 1283 &miniqp->miniq1 : &miniqp->miniq2; 1284 1285 /* We need to make sure that readers won't see the miniq 1286 * we are about to modify. So wait until previous call_rcu_bh callback 1287 * is done. 1288 */ 1289 rcu_barrier_bh(); 1290 miniq->filter_list = tp_head; 1291 rcu_assign_pointer(*miniqp->p_miniq, miniq); 1292 1293 if (miniq_old) 1294 /* This is counterpart of the rcu barriers above. We need to 1295 * block potential new user of miniq_old until all readers 1296 * are not seeing it. 1297 */ 1298 call_rcu_bh(&miniq_old->rcu, mini_qdisc_rcu_func); 1299 } 1300 EXPORT_SYMBOL(mini_qdisc_pair_swap); 1301 1302 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1303 struct mini_Qdisc __rcu **p_miniq) 1304 { 1305 miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; 1306 miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; 1307 miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; 1308 miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; 1309 miniqp->p_miniq = p_miniq; 1310 } 1311 EXPORT_SYMBOL(mini_qdisc_pair_init); 1312