1 /* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/netdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <linux/init.h> 25 #include <linux/rcupdate.h> 26 #include <linux/list.h> 27 #include <linux/slab.h> 28 #include <linux/if_vlan.h> 29 #include <net/sch_generic.h> 30 #include <net/pkt_sched.h> 31 #include <net/dst.h> 32 33 /* Qdisc to use by default */ 34 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; 35 EXPORT_SYMBOL(default_qdisc_ops); 36 37 /* Main transmission queue. */ 38 39 /* Modifications to data participating in scheduling must be protected with 40 * qdisc_lock(qdisc) spinlock. 41 * 42 * The idea is the following: 43 * - enqueue, dequeue are serialized via qdisc root lock 44 * - ingress filtering is also serialized via qdisc root lock 45 * - updates to tree and tree walking are only done under the rtnl mutex. 46 */ 47 48 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 49 { 50 q->gso_skb = skb; 51 q->qstats.requeues++; 52 q->q.qlen++; /* it's still part of the queue */ 53 __netif_schedule(q); 54 55 return 0; 56 } 57 58 static void try_bulk_dequeue_skb(struct Qdisc *q, 59 struct sk_buff *skb, 60 const struct netdev_queue *txq, 61 int *packets) 62 { 63 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; 64 65 while (bytelimit > 0) { 66 struct sk_buff *nskb = q->dequeue(q); 67 68 if (!nskb) 69 break; 70 71 bytelimit -= nskb->len; /* covers GSO len */ 72 skb->next = nskb; 73 skb = nskb; 74 (*packets)++; /* GSO counts as one pkt */ 75 } 76 skb->next = NULL; 77 } 78 79 /* Note that dequeue_skb can possibly return a SKB list (via skb->next). 80 * A requeued skb (via q->gso_skb) can also be a SKB list. 81 */ 82 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, 83 int *packets) 84 { 85 struct sk_buff *skb = q->gso_skb; 86 const struct netdev_queue *txq = q->dev_queue; 87 88 *packets = 1; 89 *validate = true; 90 if (unlikely(skb)) { 91 /* check the reason of requeuing without tx lock first */ 92 txq = skb_get_tx_queue(txq->dev, skb); 93 if (!netif_xmit_frozen_or_stopped(txq)) { 94 q->gso_skb = NULL; 95 q->q.qlen--; 96 } else 97 skb = NULL; 98 /* skb in gso_skb were already validated */ 99 *validate = false; 100 } else { 101 if (!(q->flags & TCQ_F_ONETXQUEUE) || 102 !netif_xmit_frozen_or_stopped(txq)) { 103 skb = q->dequeue(q); 104 if (skb && qdisc_may_bulk(q)) 105 try_bulk_dequeue_skb(q, skb, txq, packets); 106 } 107 } 108 return skb; 109 } 110 111 static inline int handle_dev_cpu_collision(struct sk_buff *skb, 112 struct netdev_queue *dev_queue, 113 struct Qdisc *q) 114 { 115 int ret; 116 117 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { 118 /* 119 * Same CPU holding the lock. It may be a transient 120 * configuration error, when hard_start_xmit() recurses. We 121 * detect it by checking xmit owner and drop the packet when 122 * deadloop is detected. Return OK to try the next skb. 123 */ 124 kfree_skb_list(skb); 125 net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n", 126 dev_queue->dev->name); 127 ret = qdisc_qlen(q); 128 } else { 129 /* 130 * Another cpu is holding lock, requeue & delay xmits for 131 * some time. 132 */ 133 __this_cpu_inc(softnet_data.cpu_collision); 134 ret = dev_requeue_skb(skb, q); 135 } 136 137 return ret; 138 } 139 140 /* 141 * Transmit possibly several skbs, and handle the return status as 142 * required. Holding the __QDISC___STATE_RUNNING bit guarantees that 143 * only one CPU can execute this function. 144 * 145 * Returns to the caller: 146 * 0 - queue is empty or throttled. 147 * >0 - queue is not empty. 148 */ 149 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 150 struct net_device *dev, struct netdev_queue *txq, 151 spinlock_t *root_lock, bool validate) 152 { 153 int ret = NETDEV_TX_BUSY; 154 155 /* And release qdisc */ 156 spin_unlock(root_lock); 157 158 /* Note that we validate skb (GSO, checksum, ...) outside of locks */ 159 if (validate) 160 skb = validate_xmit_skb_list(skb, dev); 161 162 if (likely(skb)) { 163 HARD_TX_LOCK(dev, txq, smp_processor_id()); 164 if (!netif_xmit_frozen_or_stopped(txq)) 165 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 166 167 HARD_TX_UNLOCK(dev, txq); 168 } else { 169 spin_lock(root_lock); 170 return qdisc_qlen(q); 171 } 172 spin_lock(root_lock); 173 174 if (dev_xmit_complete(ret)) { 175 /* Driver sent out skb successfully or skb was consumed */ 176 ret = qdisc_qlen(q); 177 } else if (ret == NETDEV_TX_LOCKED) { 178 /* Driver try lock failed */ 179 ret = handle_dev_cpu_collision(skb, txq, q); 180 } else { 181 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 182 if (unlikely(ret != NETDEV_TX_BUSY)) 183 net_warn_ratelimited("BUG %s code %d qlen %d\n", 184 dev->name, ret, q->q.qlen); 185 186 ret = dev_requeue_skb(skb, q); 187 } 188 189 if (ret && netif_xmit_frozen_or_stopped(txq)) 190 ret = 0; 191 192 return ret; 193 } 194 195 /* 196 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 197 * 198 * __QDISC___STATE_RUNNING guarantees only one CPU can process 199 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 200 * this queue. 201 * 202 * netif_tx_lock serializes accesses to device driver. 203 * 204 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 205 * if one is grabbed, another must be free. 206 * 207 * Note, that this procedure can be called by a watchdog timer 208 * 209 * Returns to the caller: 210 * 0 - queue is empty or throttled. 211 * >0 - queue is not empty. 212 * 213 */ 214 static inline int qdisc_restart(struct Qdisc *q, int *packets) 215 { 216 struct netdev_queue *txq; 217 struct net_device *dev; 218 spinlock_t *root_lock; 219 struct sk_buff *skb; 220 bool validate; 221 222 /* Dequeue packet */ 223 skb = dequeue_skb(q, &validate, packets); 224 if (unlikely(!skb)) 225 return 0; 226 227 root_lock = qdisc_lock(q); 228 dev = qdisc_dev(q); 229 txq = skb_get_tx_queue(dev, skb); 230 231 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); 232 } 233 234 void __qdisc_run(struct Qdisc *q) 235 { 236 int quota = weight_p; 237 int packets; 238 239 while (qdisc_restart(q, &packets)) { 240 /* 241 * Ordered by possible occurrence: Postpone processing if 242 * 1. we've exceeded packet quota 243 * 2. another process needs the CPU; 244 */ 245 quota -= packets; 246 if (quota <= 0 || need_resched()) { 247 __netif_schedule(q); 248 break; 249 } 250 } 251 252 qdisc_run_end(q); 253 } 254 255 unsigned long dev_trans_start(struct net_device *dev) 256 { 257 unsigned long val, res; 258 unsigned int i; 259 260 if (is_vlan_dev(dev)) 261 dev = vlan_dev_real_dev(dev); 262 res = dev->trans_start; 263 for (i = 0; i < dev->num_tx_queues; i++) { 264 val = netdev_get_tx_queue(dev, i)->trans_start; 265 if (val && time_after(val, res)) 266 res = val; 267 } 268 dev->trans_start = res; 269 270 return res; 271 } 272 EXPORT_SYMBOL(dev_trans_start); 273 274 static void dev_watchdog(unsigned long arg) 275 { 276 struct net_device *dev = (struct net_device *)arg; 277 278 netif_tx_lock(dev); 279 if (!qdisc_tx_is_noop(dev)) { 280 if (netif_device_present(dev) && 281 netif_running(dev) && 282 netif_carrier_ok(dev)) { 283 int some_queue_timedout = 0; 284 unsigned int i; 285 unsigned long trans_start; 286 287 for (i = 0; i < dev->num_tx_queues; i++) { 288 struct netdev_queue *txq; 289 290 txq = netdev_get_tx_queue(dev, i); 291 /* 292 * old device drivers set dev->trans_start 293 */ 294 trans_start = txq->trans_start ? : dev->trans_start; 295 if (netif_xmit_stopped(txq) && 296 time_after(jiffies, (trans_start + 297 dev->watchdog_timeo))) { 298 some_queue_timedout = 1; 299 txq->trans_timeout++; 300 break; 301 } 302 } 303 304 if (some_queue_timedout) { 305 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 306 dev->name, netdev_drivername(dev), i); 307 dev->netdev_ops->ndo_tx_timeout(dev); 308 } 309 if (!mod_timer(&dev->watchdog_timer, 310 round_jiffies(jiffies + 311 dev->watchdog_timeo))) 312 dev_hold(dev); 313 } 314 } 315 netif_tx_unlock(dev); 316 317 dev_put(dev); 318 } 319 320 void __netdev_watchdog_up(struct net_device *dev) 321 { 322 if (dev->netdev_ops->ndo_tx_timeout) { 323 if (dev->watchdog_timeo <= 0) 324 dev->watchdog_timeo = 5*HZ; 325 if (!mod_timer(&dev->watchdog_timer, 326 round_jiffies(jiffies + dev->watchdog_timeo))) 327 dev_hold(dev); 328 } 329 } 330 331 static void dev_watchdog_up(struct net_device *dev) 332 { 333 __netdev_watchdog_up(dev); 334 } 335 336 static void dev_watchdog_down(struct net_device *dev) 337 { 338 netif_tx_lock_bh(dev); 339 if (del_timer(&dev->watchdog_timer)) 340 dev_put(dev); 341 netif_tx_unlock_bh(dev); 342 } 343 344 /** 345 * netif_carrier_on - set carrier 346 * @dev: network device 347 * 348 * Device has detected that carrier. 349 */ 350 void netif_carrier_on(struct net_device *dev) 351 { 352 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 353 if (dev->reg_state == NETREG_UNINITIALIZED) 354 return; 355 atomic_inc(&dev->carrier_changes); 356 linkwatch_fire_event(dev); 357 if (netif_running(dev)) 358 __netdev_watchdog_up(dev); 359 } 360 } 361 EXPORT_SYMBOL(netif_carrier_on); 362 363 /** 364 * netif_carrier_off - clear carrier 365 * @dev: network device 366 * 367 * Device has detected loss of carrier. 368 */ 369 void netif_carrier_off(struct net_device *dev) 370 { 371 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 372 if (dev->reg_state == NETREG_UNINITIALIZED) 373 return; 374 atomic_inc(&dev->carrier_changes); 375 linkwatch_fire_event(dev); 376 } 377 } 378 EXPORT_SYMBOL(netif_carrier_off); 379 380 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 381 under all circumstances. It is difficult to invent anything faster or 382 cheaper. 383 */ 384 385 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) 386 { 387 kfree_skb(skb); 388 return NET_XMIT_CN; 389 } 390 391 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) 392 { 393 return NULL; 394 } 395 396 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 397 .id = "noop", 398 .priv_size = 0, 399 .enqueue = noop_enqueue, 400 .dequeue = noop_dequeue, 401 .peek = noop_dequeue, 402 .owner = THIS_MODULE, 403 }; 404 405 static struct netdev_queue noop_netdev_queue = { 406 .qdisc = &noop_qdisc, 407 .qdisc_sleeping = &noop_qdisc, 408 }; 409 410 struct Qdisc noop_qdisc = { 411 .enqueue = noop_enqueue, 412 .dequeue = noop_dequeue, 413 .flags = TCQ_F_BUILTIN, 414 .ops = &noop_qdisc_ops, 415 .list = LIST_HEAD_INIT(noop_qdisc.list), 416 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 417 .dev_queue = &noop_netdev_queue, 418 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 419 }; 420 EXPORT_SYMBOL(noop_qdisc); 421 422 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt) 423 { 424 /* register_qdisc() assigns a default of noop_enqueue if unset, 425 * but __dev_queue_xmit() treats noqueue only as such 426 * if this is NULL - so clear it here. */ 427 qdisc->enqueue = NULL; 428 return 0; 429 } 430 431 struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 432 .id = "noqueue", 433 .priv_size = 0, 434 .init = noqueue_init, 435 .enqueue = noop_enqueue, 436 .dequeue = noop_dequeue, 437 .peek = noop_dequeue, 438 .owner = THIS_MODULE, 439 }; 440 441 static const u8 prio2band[TC_PRIO_MAX + 1] = { 442 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 443 }; 444 445 /* 3-band FIFO queue: old style, but should be a bit faster than 446 generic prio+fifo combination. 447 */ 448 449 #define PFIFO_FAST_BANDS 3 450 451 /* 452 * Private data for a pfifo_fast scheduler containing: 453 * - queues for the three band 454 * - bitmap indicating which of the bands contain skbs 455 */ 456 struct pfifo_fast_priv { 457 u32 bitmap; 458 struct sk_buff_head q[PFIFO_FAST_BANDS]; 459 }; 460 461 /* 462 * Convert a bitmap to the first band number where an skb is queued, where: 463 * bitmap=0 means there are no skbs on any band. 464 * bitmap=1 means there is an skb on band 0. 465 * bitmap=7 means there are skbs on all 3 bands, etc. 466 */ 467 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; 468 469 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, 470 int band) 471 { 472 return priv->q + band; 473 } 474 475 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) 476 { 477 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 478 int band = prio2band[skb->priority & TC_PRIO_MAX]; 479 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 480 struct sk_buff_head *list = band2list(priv, band); 481 482 priv->bitmap |= (1 << band); 483 qdisc->q.qlen++; 484 return __qdisc_enqueue_tail(skb, qdisc, list); 485 } 486 487 return qdisc_drop(skb, qdisc); 488 } 489 490 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) 491 { 492 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 493 int band = bitmap2band[priv->bitmap]; 494 495 if (likely(band >= 0)) { 496 struct sk_buff_head *list = band2list(priv, band); 497 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); 498 499 qdisc->q.qlen--; 500 if (skb_queue_empty(list)) 501 priv->bitmap &= ~(1 << band); 502 503 return skb; 504 } 505 506 return NULL; 507 } 508 509 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) 510 { 511 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 512 int band = bitmap2band[priv->bitmap]; 513 514 if (band >= 0) { 515 struct sk_buff_head *list = band2list(priv, band); 516 517 return skb_peek(list); 518 } 519 520 return NULL; 521 } 522 523 static void pfifo_fast_reset(struct Qdisc *qdisc) 524 { 525 int prio; 526 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 527 528 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 529 __qdisc_reset_queue(qdisc, band2list(priv, prio)); 530 531 priv->bitmap = 0; 532 qdisc->qstats.backlog = 0; 533 qdisc->q.qlen = 0; 534 } 535 536 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 537 { 538 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 539 540 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); 541 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 542 goto nla_put_failure; 543 return skb->len; 544 545 nla_put_failure: 546 return -1; 547 } 548 549 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 550 { 551 int prio; 552 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 553 554 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 555 __skb_queue_head_init(band2list(priv, prio)); 556 557 /* Can by-pass the queue discipline */ 558 qdisc->flags |= TCQ_F_CAN_BYPASS; 559 return 0; 560 } 561 562 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 563 .id = "pfifo_fast", 564 .priv_size = sizeof(struct pfifo_fast_priv), 565 .enqueue = pfifo_fast_enqueue, 566 .dequeue = pfifo_fast_dequeue, 567 .peek = pfifo_fast_peek, 568 .init = pfifo_fast_init, 569 .reset = pfifo_fast_reset, 570 .dump = pfifo_fast_dump, 571 .owner = THIS_MODULE, 572 }; 573 EXPORT_SYMBOL(pfifo_fast_ops); 574 575 static struct lock_class_key qdisc_tx_busylock; 576 577 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 578 const struct Qdisc_ops *ops) 579 { 580 void *p; 581 struct Qdisc *sch; 582 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; 583 int err = -ENOBUFS; 584 struct net_device *dev = dev_queue->dev; 585 586 p = kzalloc_node(size, GFP_KERNEL, 587 netdev_queue_numa_node_read(dev_queue)); 588 589 if (!p) 590 goto errout; 591 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 592 /* if we got non aligned memory, ask more and do alignment ourself */ 593 if (sch != p) { 594 kfree(p); 595 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, 596 netdev_queue_numa_node_read(dev_queue)); 597 if (!p) 598 goto errout; 599 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 600 sch->padded = (char *) sch - (char *) p; 601 } 602 INIT_LIST_HEAD(&sch->list); 603 skb_queue_head_init(&sch->q); 604 605 spin_lock_init(&sch->busylock); 606 lockdep_set_class(&sch->busylock, 607 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 608 609 sch->ops = ops; 610 sch->enqueue = ops->enqueue; 611 sch->dequeue = ops->dequeue; 612 sch->dev_queue = dev_queue; 613 dev_hold(dev); 614 atomic_set(&sch->refcnt, 1); 615 616 return sch; 617 errout: 618 return ERR_PTR(err); 619 } 620 621 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 622 const struct Qdisc_ops *ops, 623 unsigned int parentid) 624 { 625 struct Qdisc *sch; 626 627 if (!try_module_get(ops->owner)) 628 goto errout; 629 630 sch = qdisc_alloc(dev_queue, ops); 631 if (IS_ERR(sch)) 632 goto errout; 633 sch->parent = parentid; 634 635 if (!ops->init || ops->init(sch, NULL) == 0) 636 return sch; 637 638 qdisc_destroy(sch); 639 errout: 640 return NULL; 641 } 642 EXPORT_SYMBOL(qdisc_create_dflt); 643 644 /* Under qdisc_lock(qdisc) and BH! */ 645 646 void qdisc_reset(struct Qdisc *qdisc) 647 { 648 const struct Qdisc_ops *ops = qdisc->ops; 649 650 if (ops->reset) 651 ops->reset(qdisc); 652 653 if (qdisc->gso_skb) { 654 kfree_skb_list(qdisc->gso_skb); 655 qdisc->gso_skb = NULL; 656 qdisc->q.qlen = 0; 657 } 658 } 659 EXPORT_SYMBOL(qdisc_reset); 660 661 static void qdisc_rcu_free(struct rcu_head *head) 662 { 663 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); 664 665 if (qdisc_is_percpu_stats(qdisc)) { 666 free_percpu(qdisc->cpu_bstats); 667 free_percpu(qdisc->cpu_qstats); 668 } 669 670 kfree((char *) qdisc - qdisc->padded); 671 } 672 673 void qdisc_destroy(struct Qdisc *qdisc) 674 { 675 const struct Qdisc_ops *ops = qdisc->ops; 676 677 if (qdisc->flags & TCQ_F_BUILTIN || 678 !atomic_dec_and_test(&qdisc->refcnt)) 679 return; 680 681 #ifdef CONFIG_NET_SCHED 682 qdisc_list_del(qdisc); 683 684 qdisc_put_stab(rtnl_dereference(qdisc->stab)); 685 #endif 686 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 687 if (ops->reset) 688 ops->reset(qdisc); 689 if (ops->destroy) 690 ops->destroy(qdisc); 691 692 module_put(ops->owner); 693 dev_put(qdisc_dev(qdisc)); 694 695 kfree_skb_list(qdisc->gso_skb); 696 /* 697 * gen_estimator est_timer() might access qdisc->q.lock, 698 * wait a RCU grace period before freeing qdisc. 699 */ 700 call_rcu(&qdisc->rcu_head, qdisc_rcu_free); 701 } 702 EXPORT_SYMBOL(qdisc_destroy); 703 704 /* Attach toplevel qdisc to device queue. */ 705 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 706 struct Qdisc *qdisc) 707 { 708 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 709 spinlock_t *root_lock; 710 711 root_lock = qdisc_lock(oqdisc); 712 spin_lock_bh(root_lock); 713 714 /* Prune old scheduler */ 715 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 716 qdisc_reset(oqdisc); 717 718 /* ... and graft new one */ 719 if (qdisc == NULL) 720 qdisc = &noop_qdisc; 721 dev_queue->qdisc_sleeping = qdisc; 722 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 723 724 spin_unlock_bh(root_lock); 725 726 return oqdisc; 727 } 728 EXPORT_SYMBOL(dev_graft_qdisc); 729 730 static void attach_one_default_qdisc(struct net_device *dev, 731 struct netdev_queue *dev_queue, 732 void *_unused) 733 { 734 struct Qdisc *qdisc; 735 const struct Qdisc_ops *ops = default_qdisc_ops; 736 737 if (dev->priv_flags & IFF_NO_QUEUE) 738 ops = &noqueue_qdisc_ops; 739 740 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT); 741 if (!qdisc) { 742 netdev_info(dev, "activation failed\n"); 743 return; 744 } 745 if (!netif_is_multiqueue(dev)) 746 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 747 dev_queue->qdisc_sleeping = qdisc; 748 } 749 750 static void attach_default_qdiscs(struct net_device *dev) 751 { 752 struct netdev_queue *txq; 753 struct Qdisc *qdisc; 754 755 txq = netdev_get_tx_queue(dev, 0); 756 757 if (!netif_is_multiqueue(dev) || 758 dev->priv_flags & IFF_NO_QUEUE) { 759 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 760 dev->qdisc = txq->qdisc_sleeping; 761 atomic_inc(&dev->qdisc->refcnt); 762 } else { 763 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); 764 if (qdisc) { 765 dev->qdisc = qdisc; 766 qdisc->ops->attach(qdisc); 767 } 768 } 769 } 770 771 static void transition_one_qdisc(struct net_device *dev, 772 struct netdev_queue *dev_queue, 773 void *_need_watchdog) 774 { 775 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 776 int *need_watchdog_p = _need_watchdog; 777 778 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 779 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 780 781 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 782 if (need_watchdog_p) { 783 dev_queue->trans_start = 0; 784 *need_watchdog_p = 1; 785 } 786 } 787 788 void dev_activate(struct net_device *dev) 789 { 790 int need_watchdog; 791 792 /* No queueing discipline is attached to device; 793 * create default one for devices, which need queueing 794 * and noqueue_qdisc for virtual interfaces 795 */ 796 797 if (dev->qdisc == &noop_qdisc) 798 attach_default_qdiscs(dev); 799 800 if (!netif_carrier_ok(dev)) 801 /* Delay activation until next carrier-on event */ 802 return; 803 804 need_watchdog = 0; 805 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 806 if (dev_ingress_queue(dev)) 807 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 808 809 if (need_watchdog) { 810 dev->trans_start = jiffies; 811 dev_watchdog_up(dev); 812 } 813 } 814 EXPORT_SYMBOL(dev_activate); 815 816 static void dev_deactivate_queue(struct net_device *dev, 817 struct netdev_queue *dev_queue, 818 void *_qdisc_default) 819 { 820 struct Qdisc *qdisc_default = _qdisc_default; 821 struct Qdisc *qdisc; 822 823 qdisc = rtnl_dereference(dev_queue->qdisc); 824 if (qdisc) { 825 spin_lock_bh(qdisc_lock(qdisc)); 826 827 if (!(qdisc->flags & TCQ_F_BUILTIN)) 828 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 829 830 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 831 qdisc_reset(qdisc); 832 833 spin_unlock_bh(qdisc_lock(qdisc)); 834 } 835 } 836 837 static bool some_qdisc_is_busy(struct net_device *dev) 838 { 839 unsigned int i; 840 841 for (i = 0; i < dev->num_tx_queues; i++) { 842 struct netdev_queue *dev_queue; 843 spinlock_t *root_lock; 844 struct Qdisc *q; 845 int val; 846 847 dev_queue = netdev_get_tx_queue(dev, i); 848 q = dev_queue->qdisc_sleeping; 849 root_lock = qdisc_lock(q); 850 851 spin_lock_bh(root_lock); 852 853 val = (qdisc_is_running(q) || 854 test_bit(__QDISC_STATE_SCHED, &q->state)); 855 856 spin_unlock_bh(root_lock); 857 858 if (val) 859 return true; 860 } 861 return false; 862 } 863 864 /** 865 * dev_deactivate_many - deactivate transmissions on several devices 866 * @head: list of devices to deactivate 867 * 868 * This function returns only when all outstanding transmissions 869 * have completed, unless all devices are in dismantle phase. 870 */ 871 void dev_deactivate_many(struct list_head *head) 872 { 873 struct net_device *dev; 874 bool sync_needed = false; 875 876 list_for_each_entry(dev, head, close_list) { 877 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 878 &noop_qdisc); 879 if (dev_ingress_queue(dev)) 880 dev_deactivate_queue(dev, dev_ingress_queue(dev), 881 &noop_qdisc); 882 883 dev_watchdog_down(dev); 884 sync_needed |= !dev->dismantle; 885 } 886 887 /* Wait for outstanding qdisc-less dev_queue_xmit calls. 888 * This is avoided if all devices are in dismantle phase : 889 * Caller will call synchronize_net() for us 890 */ 891 if (sync_needed) 892 synchronize_net(); 893 894 /* Wait for outstanding qdisc_run calls. */ 895 list_for_each_entry(dev, head, close_list) 896 while (some_qdisc_is_busy(dev)) 897 yield(); 898 } 899 900 void dev_deactivate(struct net_device *dev) 901 { 902 LIST_HEAD(single); 903 904 list_add(&dev->close_list, &single); 905 dev_deactivate_many(&single); 906 list_del(&single); 907 } 908 EXPORT_SYMBOL(dev_deactivate); 909 910 static void dev_init_scheduler_queue(struct net_device *dev, 911 struct netdev_queue *dev_queue, 912 void *_qdisc) 913 { 914 struct Qdisc *qdisc = _qdisc; 915 916 rcu_assign_pointer(dev_queue->qdisc, qdisc); 917 dev_queue->qdisc_sleeping = qdisc; 918 } 919 920 void dev_init_scheduler(struct net_device *dev) 921 { 922 dev->qdisc = &noop_qdisc; 923 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 924 if (dev_ingress_queue(dev)) 925 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 926 927 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 928 } 929 930 static void shutdown_scheduler_queue(struct net_device *dev, 931 struct netdev_queue *dev_queue, 932 void *_qdisc_default) 933 { 934 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 935 struct Qdisc *qdisc_default = _qdisc_default; 936 937 if (qdisc) { 938 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 939 dev_queue->qdisc_sleeping = qdisc_default; 940 941 qdisc_destroy(qdisc); 942 } 943 } 944 945 void dev_shutdown(struct net_device *dev) 946 { 947 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 948 if (dev_ingress_queue(dev)) 949 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 950 qdisc_destroy(dev->qdisc); 951 dev->qdisc = &noop_qdisc; 952 953 WARN_ON(timer_pending(&dev->watchdog_timer)); 954 } 955 956 void psched_ratecfg_precompute(struct psched_ratecfg *r, 957 const struct tc_ratespec *conf, 958 u64 rate64) 959 { 960 memset(r, 0, sizeof(*r)); 961 r->overhead = conf->overhead; 962 r->rate_bytes_ps = max_t(u64, conf->rate, rate64); 963 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); 964 r->mult = 1; 965 /* 966 * The deal here is to replace a divide by a reciprocal one 967 * in fast path (a reciprocal divide is a multiply and a shift) 968 * 969 * Normal formula would be : 970 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps 971 * 972 * We compute mult/shift to use instead : 973 * time_in_ns = (len * mult) >> shift; 974 * 975 * We try to get the highest possible mult value for accuracy, 976 * but have to make sure no overflows will ever happen. 977 */ 978 if (r->rate_bytes_ps > 0) { 979 u64 factor = NSEC_PER_SEC; 980 981 for (;;) { 982 r->mult = div64_u64(factor, r->rate_bytes_ps); 983 if (r->mult & (1U << 31) || factor & (1ULL << 63)) 984 break; 985 factor <<= 1; 986 r->shift++; 987 } 988 } 989 } 990 EXPORT_SYMBOL(psched_ratecfg_precompute); 991