1 /* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/netdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <linux/init.h> 25 #include <linux/rcupdate.h> 26 #include <linux/list.h> 27 #include <linux/slab.h> 28 #include <net/pkt_sched.h> 29 #include <net/dst.h> 30 31 /* Main transmission queue. */ 32 33 /* Modifications to data participating in scheduling must be protected with 34 * qdisc_lock(qdisc) spinlock. 35 * 36 * The idea is the following: 37 * - enqueue, dequeue are serialized via qdisc root lock 38 * - ingress filtering is also serialized via qdisc root lock 39 * - updates to tree and tree walking are only done under the rtnl mutex. 40 */ 41 42 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 43 { 44 skb_dst_force(skb); 45 q->gso_skb = skb; 46 q->qstats.requeues++; 47 q->q.qlen++; /* it's still part of the queue */ 48 __netif_schedule(q); 49 50 return 0; 51 } 52 53 static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 54 { 55 struct sk_buff *skb = q->gso_skb; 56 57 if (unlikely(skb)) { 58 struct net_device *dev = qdisc_dev(q); 59 struct netdev_queue *txq; 60 61 /* check the reason of requeuing without tx lock first */ 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 63 if (!netif_tx_queue_stopped(txq) && 64 !netif_tx_queue_frozen(txq)) { 65 q->gso_skb = NULL; 66 q->q.qlen--; 67 } else 68 skb = NULL; 69 } else { 70 skb = q->dequeue(q); 71 } 72 73 return skb; 74 } 75 76 static inline int handle_dev_cpu_collision(struct sk_buff *skb, 77 struct netdev_queue *dev_queue, 78 struct Qdisc *q) 79 { 80 int ret; 81 82 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { 83 /* 84 * Same CPU holding the lock. It may be a transient 85 * configuration error, when hard_start_xmit() recurses. We 86 * detect it by checking xmit owner and drop the packet when 87 * deadloop is detected. Return OK to try the next skb. 88 */ 89 kfree_skb(skb); 90 if (net_ratelimit()) 91 printk(KERN_WARNING "Dead loop on netdevice %s, " 92 "fix it urgently!\n", dev_queue->dev->name); 93 ret = qdisc_qlen(q); 94 } else { 95 /* 96 * Another cpu is holding lock, requeue & delay xmits for 97 * some time. 98 */ 99 __this_cpu_inc(softnet_data.cpu_collision); 100 ret = dev_requeue_skb(skb, q); 101 } 102 103 return ret; 104 } 105 106 /* 107 * Transmit one skb, and handle the return status as required. Holding the 108 * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this 109 * function. 110 * 111 * Returns to the caller: 112 * 0 - queue is empty or throttled. 113 * >0 - queue is not empty. 114 */ 115 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 116 struct net_device *dev, struct netdev_queue *txq, 117 spinlock_t *root_lock) 118 { 119 int ret = NETDEV_TX_BUSY; 120 121 /* And release qdisc */ 122 spin_unlock(root_lock); 123 124 HARD_TX_LOCK(dev, txq, smp_processor_id()); 125 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) 126 ret = dev_hard_start_xmit(skb, dev, txq); 127 128 HARD_TX_UNLOCK(dev, txq); 129 130 spin_lock(root_lock); 131 132 if (dev_xmit_complete(ret)) { 133 /* Driver sent out skb successfully or skb was consumed */ 134 ret = qdisc_qlen(q); 135 } else if (ret == NETDEV_TX_LOCKED) { 136 /* Driver try lock failed */ 137 ret = handle_dev_cpu_collision(skb, txq, q); 138 } else { 139 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 140 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) 141 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 142 dev->name, ret, q->q.qlen); 143 144 ret = dev_requeue_skb(skb, q); 145 } 146 147 if (ret && (netif_tx_queue_stopped(txq) || 148 netif_tx_queue_frozen(txq))) 149 ret = 0; 150 151 return ret; 152 } 153 154 /* 155 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 156 * 157 * __QDISC_STATE_RUNNING guarantees only one CPU can process 158 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 159 * this queue. 160 * 161 * netif_tx_lock serializes accesses to device driver. 162 * 163 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 164 * if one is grabbed, another must be free. 165 * 166 * Note, that this procedure can be called by a watchdog timer 167 * 168 * Returns to the caller: 169 * 0 - queue is empty or throttled. 170 * >0 - queue is not empty. 171 * 172 */ 173 static inline int qdisc_restart(struct Qdisc *q) 174 { 175 struct netdev_queue *txq; 176 struct net_device *dev; 177 spinlock_t *root_lock; 178 struct sk_buff *skb; 179 180 /* Dequeue packet */ 181 skb = dequeue_skb(q); 182 if (unlikely(!skb)) 183 return 0; 184 WARN_ON_ONCE(skb_dst_is_noref(skb)); 185 root_lock = qdisc_lock(q); 186 dev = qdisc_dev(q); 187 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 188 189 return sch_direct_xmit(skb, q, dev, txq, root_lock); 190 } 191 192 void __qdisc_run(struct Qdisc *q) 193 { 194 unsigned long start_time = jiffies; 195 196 while (qdisc_restart(q)) { 197 /* 198 * Postpone processing if 199 * 1. another process needs the CPU; 200 * 2. we've been doing it for too long. 201 */ 202 if (need_resched() || jiffies != start_time) { 203 __netif_schedule(q); 204 break; 205 } 206 } 207 208 qdisc_run_end(q); 209 } 210 211 unsigned long dev_trans_start(struct net_device *dev) 212 { 213 unsigned long val, res = dev->trans_start; 214 unsigned int i; 215 216 for (i = 0; i < dev->num_tx_queues; i++) { 217 val = netdev_get_tx_queue(dev, i)->trans_start; 218 if (val && time_after(val, res)) 219 res = val; 220 } 221 dev->trans_start = res; 222 return res; 223 } 224 EXPORT_SYMBOL(dev_trans_start); 225 226 static void dev_watchdog(unsigned long arg) 227 { 228 struct net_device *dev = (struct net_device *)arg; 229 230 netif_tx_lock(dev); 231 if (!qdisc_tx_is_noop(dev)) { 232 if (netif_device_present(dev) && 233 netif_running(dev) && 234 netif_carrier_ok(dev)) { 235 int some_queue_timedout = 0; 236 unsigned int i; 237 unsigned long trans_start; 238 239 for (i = 0; i < dev->num_tx_queues; i++) { 240 struct netdev_queue *txq; 241 242 txq = netdev_get_tx_queue(dev, i); 243 /* 244 * old device drivers set dev->trans_start 245 */ 246 trans_start = txq->trans_start ? : dev->trans_start; 247 if (netif_tx_queue_stopped(txq) && 248 time_after(jiffies, (trans_start + 249 dev->watchdog_timeo))) { 250 some_queue_timedout = 1; 251 break; 252 } 253 } 254 255 if (some_queue_timedout) { 256 char drivername[64]; 257 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 258 dev->name, netdev_drivername(dev, drivername, 64), i); 259 dev->netdev_ops->ndo_tx_timeout(dev); 260 } 261 if (!mod_timer(&dev->watchdog_timer, 262 round_jiffies(jiffies + 263 dev->watchdog_timeo))) 264 dev_hold(dev); 265 } 266 } 267 netif_tx_unlock(dev); 268 269 dev_put(dev); 270 } 271 272 void __netdev_watchdog_up(struct net_device *dev) 273 { 274 if (dev->netdev_ops->ndo_tx_timeout) { 275 if (dev->watchdog_timeo <= 0) 276 dev->watchdog_timeo = 5*HZ; 277 if (!mod_timer(&dev->watchdog_timer, 278 round_jiffies(jiffies + dev->watchdog_timeo))) 279 dev_hold(dev); 280 } 281 } 282 283 static void dev_watchdog_up(struct net_device *dev) 284 { 285 __netdev_watchdog_up(dev); 286 } 287 288 static void dev_watchdog_down(struct net_device *dev) 289 { 290 netif_tx_lock_bh(dev); 291 if (del_timer(&dev->watchdog_timer)) 292 dev_put(dev); 293 netif_tx_unlock_bh(dev); 294 } 295 296 /** 297 * netif_carrier_on - set carrier 298 * @dev: network device 299 * 300 * Device has detected that carrier. 301 */ 302 void netif_carrier_on(struct net_device *dev) 303 { 304 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 305 if (dev->reg_state == NETREG_UNINITIALIZED) 306 return; 307 linkwatch_fire_event(dev); 308 if (netif_running(dev)) 309 __netdev_watchdog_up(dev); 310 } 311 } 312 EXPORT_SYMBOL(netif_carrier_on); 313 314 /** 315 * netif_carrier_off - clear carrier 316 * @dev: network device 317 * 318 * Device has detected loss of carrier. 319 */ 320 void netif_carrier_off(struct net_device *dev) 321 { 322 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 323 if (dev->reg_state == NETREG_UNINITIALIZED) 324 return; 325 linkwatch_fire_event(dev); 326 } 327 } 328 EXPORT_SYMBOL(netif_carrier_off); 329 330 /** 331 * netif_notify_peers - notify network peers about existence of @dev 332 * @dev: network device 333 * 334 * Generate traffic such that interested network peers are aware of 335 * @dev, such as by generating a gratuitous ARP. This may be used when 336 * a device wants to inform the rest of the network about some sort of 337 * reconfiguration such as a failover event or virtual machine 338 * migration. 339 */ 340 void netif_notify_peers(struct net_device *dev) 341 { 342 rtnl_lock(); 343 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 344 rtnl_unlock(); 345 } 346 EXPORT_SYMBOL(netif_notify_peers); 347 348 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 349 under all circumstances. It is difficult to invent anything faster or 350 cheaper. 351 */ 352 353 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) 354 { 355 kfree_skb(skb); 356 return NET_XMIT_CN; 357 } 358 359 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) 360 { 361 return NULL; 362 } 363 364 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 365 .id = "noop", 366 .priv_size = 0, 367 .enqueue = noop_enqueue, 368 .dequeue = noop_dequeue, 369 .peek = noop_dequeue, 370 .owner = THIS_MODULE, 371 }; 372 373 static struct netdev_queue noop_netdev_queue = { 374 .qdisc = &noop_qdisc, 375 .qdisc_sleeping = &noop_qdisc, 376 }; 377 378 struct Qdisc noop_qdisc = { 379 .enqueue = noop_enqueue, 380 .dequeue = noop_dequeue, 381 .flags = TCQ_F_BUILTIN, 382 .ops = &noop_qdisc_ops, 383 .list = LIST_HEAD_INIT(noop_qdisc.list), 384 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 385 .dev_queue = &noop_netdev_queue, 386 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 387 }; 388 EXPORT_SYMBOL(noop_qdisc); 389 390 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 391 .id = "noqueue", 392 .priv_size = 0, 393 .enqueue = noop_enqueue, 394 .dequeue = noop_dequeue, 395 .peek = noop_dequeue, 396 .owner = THIS_MODULE, 397 }; 398 399 static struct Qdisc noqueue_qdisc; 400 static struct netdev_queue noqueue_netdev_queue = { 401 .qdisc = &noqueue_qdisc, 402 .qdisc_sleeping = &noqueue_qdisc, 403 }; 404 405 static struct Qdisc noqueue_qdisc = { 406 .enqueue = NULL, 407 .dequeue = noop_dequeue, 408 .flags = TCQ_F_BUILTIN, 409 .ops = &noqueue_qdisc_ops, 410 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 411 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 412 .dev_queue = &noqueue_netdev_queue, 413 .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock), 414 }; 415 416 417 static const u8 prio2band[TC_PRIO_MAX+1] = 418 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; 419 420 /* 3-band FIFO queue: old style, but should be a bit faster than 421 generic prio+fifo combination. 422 */ 423 424 #define PFIFO_FAST_BANDS 3 425 426 /* 427 * Private data for a pfifo_fast scheduler containing: 428 * - queues for the three band 429 * - bitmap indicating which of the bands contain skbs 430 */ 431 struct pfifo_fast_priv { 432 u32 bitmap; 433 struct sk_buff_head q[PFIFO_FAST_BANDS]; 434 }; 435 436 /* 437 * Convert a bitmap to the first band number where an skb is queued, where: 438 * bitmap=0 means there are no skbs on any band. 439 * bitmap=1 means there is an skb on band 0. 440 * bitmap=7 means there are skbs on all 3 bands, etc. 441 */ 442 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; 443 444 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, 445 int band) 446 { 447 return priv->q + band; 448 } 449 450 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) 451 { 452 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 453 int band = prio2band[skb->priority & TC_PRIO_MAX]; 454 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 455 struct sk_buff_head *list = band2list(priv, band); 456 457 priv->bitmap |= (1 << band); 458 qdisc->q.qlen++; 459 return __qdisc_enqueue_tail(skb, qdisc, list); 460 } 461 462 return qdisc_drop(skb, qdisc); 463 } 464 465 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 466 { 467 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 468 int band = bitmap2band[priv->bitmap]; 469 470 if (likely(band >= 0)) { 471 struct sk_buff_head *list = band2list(priv, band); 472 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); 473 474 qdisc->q.qlen--; 475 if (skb_queue_empty(list)) 476 priv->bitmap &= ~(1 << band); 477 478 return skb; 479 } 480 481 return NULL; 482 } 483 484 static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) 485 { 486 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 487 int band = bitmap2band[priv->bitmap]; 488 489 if (band >= 0) { 490 struct sk_buff_head *list = band2list(priv, band); 491 492 return skb_peek(list); 493 } 494 495 return NULL; 496 } 497 498 static void pfifo_fast_reset(struct Qdisc* qdisc) 499 { 500 int prio; 501 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 502 503 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 504 __qdisc_reset_queue(qdisc, band2list(priv, prio)); 505 506 priv->bitmap = 0; 507 qdisc->qstats.backlog = 0; 508 qdisc->q.qlen = 0; 509 } 510 511 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 512 { 513 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 514 515 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); 516 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 517 return skb->len; 518 519 nla_put_failure: 520 return -1; 521 } 522 523 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 524 { 525 int prio; 526 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 527 528 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 529 skb_queue_head_init(band2list(priv, prio)); 530 531 return 0; 532 } 533 534 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 535 .id = "pfifo_fast", 536 .priv_size = sizeof(struct pfifo_fast_priv), 537 .enqueue = pfifo_fast_enqueue, 538 .dequeue = pfifo_fast_dequeue, 539 .peek = pfifo_fast_peek, 540 .init = pfifo_fast_init, 541 .reset = pfifo_fast_reset, 542 .dump = pfifo_fast_dump, 543 .owner = THIS_MODULE, 544 }; 545 546 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 547 struct Qdisc_ops *ops) 548 { 549 void *p; 550 struct Qdisc *sch; 551 unsigned int size; 552 int err = -ENOBUFS; 553 554 /* ensure that the Qdisc and the private data are 64-byte aligned */ 555 size = QDISC_ALIGN(sizeof(*sch)); 556 size += ops->priv_size + (QDISC_ALIGNTO - 1); 557 558 p = kzalloc(size, GFP_KERNEL); 559 if (!p) 560 goto errout; 561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 562 sch->padded = (char *) sch - (char *) p; 563 564 INIT_LIST_HEAD(&sch->list); 565 skb_queue_head_init(&sch->q); 566 spin_lock_init(&sch->busylock); 567 sch->ops = ops; 568 sch->enqueue = ops->enqueue; 569 sch->dequeue = ops->dequeue; 570 sch->dev_queue = dev_queue; 571 dev_hold(qdisc_dev(sch)); 572 atomic_set(&sch->refcnt, 1); 573 574 return sch; 575 errout: 576 return ERR_PTR(err); 577 } 578 579 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 580 struct Qdisc_ops *ops, unsigned int parentid) 581 { 582 struct Qdisc *sch; 583 584 sch = qdisc_alloc(dev_queue, ops); 585 if (IS_ERR(sch)) 586 goto errout; 587 sch->parent = parentid; 588 589 if (!ops->init || ops->init(sch, NULL) == 0) 590 return sch; 591 592 qdisc_destroy(sch); 593 errout: 594 return NULL; 595 } 596 EXPORT_SYMBOL(qdisc_create_dflt); 597 598 /* Under qdisc_lock(qdisc) and BH! */ 599 600 void qdisc_reset(struct Qdisc *qdisc) 601 { 602 const struct Qdisc_ops *ops = qdisc->ops; 603 604 if (ops->reset) 605 ops->reset(qdisc); 606 607 if (qdisc->gso_skb) { 608 kfree_skb(qdisc->gso_skb); 609 qdisc->gso_skb = NULL; 610 qdisc->q.qlen = 0; 611 } 612 } 613 EXPORT_SYMBOL(qdisc_reset); 614 615 static void qdisc_rcu_free(struct rcu_head *head) 616 { 617 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); 618 619 kfree((char *) qdisc - qdisc->padded); 620 } 621 622 void qdisc_destroy(struct Qdisc *qdisc) 623 { 624 const struct Qdisc_ops *ops = qdisc->ops; 625 626 if (qdisc->flags & TCQ_F_BUILTIN || 627 !atomic_dec_and_test(&qdisc->refcnt)) 628 return; 629 630 #ifdef CONFIG_NET_SCHED 631 qdisc_list_del(qdisc); 632 633 qdisc_put_stab(qdisc->stab); 634 #endif 635 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 636 if (ops->reset) 637 ops->reset(qdisc); 638 if (ops->destroy) 639 ops->destroy(qdisc); 640 641 module_put(ops->owner); 642 dev_put(qdisc_dev(qdisc)); 643 644 kfree_skb(qdisc->gso_skb); 645 /* 646 * gen_estimator est_timer() might access qdisc->q.lock, 647 * wait a RCU grace period before freeing qdisc. 648 */ 649 call_rcu(&qdisc->rcu_head, qdisc_rcu_free); 650 } 651 EXPORT_SYMBOL(qdisc_destroy); 652 653 /* Attach toplevel qdisc to device queue. */ 654 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 655 struct Qdisc *qdisc) 656 { 657 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 658 spinlock_t *root_lock; 659 660 root_lock = qdisc_lock(oqdisc); 661 spin_lock_bh(root_lock); 662 663 /* Prune old scheduler */ 664 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 665 qdisc_reset(oqdisc); 666 667 /* ... and graft new one */ 668 if (qdisc == NULL) 669 qdisc = &noop_qdisc; 670 dev_queue->qdisc_sleeping = qdisc; 671 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 672 673 spin_unlock_bh(root_lock); 674 675 return oqdisc; 676 } 677 678 static void attach_one_default_qdisc(struct net_device *dev, 679 struct netdev_queue *dev_queue, 680 void *_unused) 681 { 682 struct Qdisc *qdisc; 683 684 if (dev->tx_queue_len) { 685 qdisc = qdisc_create_dflt(dev_queue, 686 &pfifo_fast_ops, TC_H_ROOT); 687 if (!qdisc) { 688 printk(KERN_INFO "%s: activation failed\n", dev->name); 689 return; 690 } 691 692 /* Can by-pass the queue discipline for default qdisc */ 693 qdisc->flags |= TCQ_F_CAN_BYPASS; 694 } else { 695 qdisc = &noqueue_qdisc; 696 } 697 dev_queue->qdisc_sleeping = qdisc; 698 } 699 700 static void attach_default_qdiscs(struct net_device *dev) 701 { 702 struct netdev_queue *txq; 703 struct Qdisc *qdisc; 704 705 txq = netdev_get_tx_queue(dev, 0); 706 707 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { 708 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 709 dev->qdisc = txq->qdisc_sleeping; 710 atomic_inc(&dev->qdisc->refcnt); 711 } else { 712 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); 713 if (qdisc) { 714 qdisc->ops->attach(qdisc); 715 dev->qdisc = qdisc; 716 } 717 } 718 } 719 720 static void transition_one_qdisc(struct net_device *dev, 721 struct netdev_queue *dev_queue, 722 void *_need_watchdog) 723 { 724 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 725 int *need_watchdog_p = _need_watchdog; 726 727 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 728 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 729 730 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 731 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) { 732 dev_queue->trans_start = 0; 733 *need_watchdog_p = 1; 734 } 735 } 736 737 void dev_activate(struct net_device *dev) 738 { 739 int need_watchdog; 740 741 /* No queueing discipline is attached to device; 742 create default one i.e. pfifo_fast for devices, 743 which need queueing and noqueue_qdisc for 744 virtual interfaces 745 */ 746 747 if (dev->qdisc == &noop_qdisc) 748 attach_default_qdiscs(dev); 749 750 if (!netif_carrier_ok(dev)) 751 /* Delay activation until next carrier-on event */ 752 return; 753 754 need_watchdog = 0; 755 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 756 if (dev_ingress_queue(dev)) 757 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 758 759 if (need_watchdog) { 760 dev->trans_start = jiffies; 761 dev_watchdog_up(dev); 762 } 763 } 764 765 static void dev_deactivate_queue(struct net_device *dev, 766 struct netdev_queue *dev_queue, 767 void *_qdisc_default) 768 { 769 struct Qdisc *qdisc_default = _qdisc_default; 770 struct Qdisc *qdisc; 771 772 qdisc = dev_queue->qdisc; 773 if (qdisc) { 774 spin_lock_bh(qdisc_lock(qdisc)); 775 776 if (!(qdisc->flags & TCQ_F_BUILTIN)) 777 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 778 779 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 780 qdisc_reset(qdisc); 781 782 spin_unlock_bh(qdisc_lock(qdisc)); 783 } 784 } 785 786 static bool some_qdisc_is_busy(struct net_device *dev) 787 { 788 unsigned int i; 789 790 for (i = 0; i < dev->num_tx_queues; i++) { 791 struct netdev_queue *dev_queue; 792 spinlock_t *root_lock; 793 struct Qdisc *q; 794 int val; 795 796 dev_queue = netdev_get_tx_queue(dev, i); 797 q = dev_queue->qdisc_sleeping; 798 root_lock = qdisc_lock(q); 799 800 spin_lock_bh(root_lock); 801 802 val = (qdisc_is_running(q) || 803 test_bit(__QDISC_STATE_SCHED, &q->state)); 804 805 spin_unlock_bh(root_lock); 806 807 if (val) 808 return true; 809 } 810 return false; 811 } 812 813 void dev_deactivate(struct net_device *dev) 814 { 815 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 816 if (dev_ingress_queue(dev)) 817 dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 818 819 dev_watchdog_down(dev); 820 821 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 822 synchronize_rcu(); 823 824 /* Wait for outstanding qdisc_run calls. */ 825 while (some_qdisc_is_busy(dev)) 826 yield(); 827 } 828 829 static void dev_init_scheduler_queue(struct net_device *dev, 830 struct netdev_queue *dev_queue, 831 void *_qdisc) 832 { 833 struct Qdisc *qdisc = _qdisc; 834 835 dev_queue->qdisc = qdisc; 836 dev_queue->qdisc_sleeping = qdisc; 837 } 838 839 void dev_init_scheduler(struct net_device *dev) 840 { 841 dev->qdisc = &noop_qdisc; 842 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 843 if (dev_ingress_queue(dev)) 844 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 845 846 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 847 } 848 849 static void shutdown_scheduler_queue(struct net_device *dev, 850 struct netdev_queue *dev_queue, 851 void *_qdisc_default) 852 { 853 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 854 struct Qdisc *qdisc_default = _qdisc_default; 855 856 if (qdisc) { 857 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 858 dev_queue->qdisc_sleeping = qdisc_default; 859 860 qdisc_destroy(qdisc); 861 } 862 } 863 864 void dev_shutdown(struct net_device *dev) 865 { 866 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 867 if (dev_ingress_queue(dev)) 868 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 869 qdisc_destroy(dev->qdisc); 870 dev->qdisc = &noop_qdisc; 871 872 WARN_ON(timer_pending(&dev->watchdog_timer)); 873 } 874