1 /* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/netdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <linux/init.h> 25 #include <linux/rcupdate.h> 26 #include <linux/list.h> 27 #include <linux/slab.h> 28 #include <net/pkt_sched.h> 29 #include <net/dst.h> 30 31 /* Main transmission queue. */ 32 33 /* Modifications to data participating in scheduling must be protected with 34 * qdisc_lock(qdisc) spinlock. 35 * 36 * The idea is the following: 37 * - enqueue, dequeue are serialized via qdisc root lock 38 * - ingress filtering is also serialized via qdisc root lock 39 * - updates to tree and tree walking are only done under the rtnl mutex. 40 */ 41 42 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 43 { 44 skb_dst_force(skb); 45 q->gso_skb = skb; 46 q->qstats.requeues++; 47 q->q.qlen++; /* it's still part of the queue */ 48 __netif_schedule(q); 49 50 return 0; 51 } 52 53 static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 54 { 55 struct sk_buff *skb = q->gso_skb; 56 57 if (unlikely(skb)) { 58 struct net_device *dev = qdisc_dev(q); 59 struct netdev_queue *txq; 60 61 /* check the reason of requeuing without tx lock first */ 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 63 if (!netif_tx_queue_frozen_or_stopped(txq)) { 64 q->gso_skb = NULL; 65 q->q.qlen--; 66 } else 67 skb = NULL; 68 } else { 69 skb = q->dequeue(q); 70 } 71 72 return skb; 73 } 74 75 static inline int handle_dev_cpu_collision(struct sk_buff *skb, 76 struct netdev_queue *dev_queue, 77 struct Qdisc *q) 78 { 79 int ret; 80 81 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { 82 /* 83 * Same CPU holding the lock. It may be a transient 84 * configuration error, when hard_start_xmit() recurses. We 85 * detect it by checking xmit owner and drop the packet when 86 * deadloop is detected. Return OK to try the next skb. 87 */ 88 kfree_skb(skb); 89 if (net_ratelimit()) 90 printk(KERN_WARNING "Dead loop on netdevice %s, " 91 "fix it urgently!\n", dev_queue->dev->name); 92 ret = qdisc_qlen(q); 93 } else { 94 /* 95 * Another cpu is holding lock, requeue & delay xmits for 96 * some time. 97 */ 98 __this_cpu_inc(softnet_data.cpu_collision); 99 ret = dev_requeue_skb(skb, q); 100 } 101 102 return ret; 103 } 104 105 /* 106 * Transmit one skb, and handle the return status as required. Holding the 107 * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this 108 * function. 109 * 110 * Returns to the caller: 111 * 0 - queue is empty or throttled. 112 * >0 - queue is not empty. 113 */ 114 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 115 struct net_device *dev, struct netdev_queue *txq, 116 spinlock_t *root_lock) 117 { 118 int ret = NETDEV_TX_BUSY; 119 120 /* And release qdisc */ 121 spin_unlock(root_lock); 122 123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 124 if (!netif_tx_queue_frozen_or_stopped(txq)) 125 ret = dev_hard_start_xmit(skb, dev, txq); 126 127 HARD_TX_UNLOCK(dev, txq); 128 129 spin_lock(root_lock); 130 131 if (dev_xmit_complete(ret)) { 132 /* Driver sent out skb successfully or skb was consumed */ 133 ret = qdisc_qlen(q); 134 } else if (ret == NETDEV_TX_LOCKED) { 135 /* Driver try lock failed */ 136 ret = handle_dev_cpu_collision(skb, txq, q); 137 } else { 138 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 139 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) 140 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 141 dev->name, ret, q->q.qlen); 142 143 ret = dev_requeue_skb(skb, q); 144 } 145 146 if (ret && netif_tx_queue_frozen_or_stopped(txq)) 147 ret = 0; 148 149 return ret; 150 } 151 152 /* 153 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 154 * 155 * __QDISC_STATE_RUNNING guarantees only one CPU can process 156 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 157 * this queue. 158 * 159 * netif_tx_lock serializes accesses to device driver. 160 * 161 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 162 * if one is grabbed, another must be free. 163 * 164 * Note, that this procedure can be called by a watchdog timer 165 * 166 * Returns to the caller: 167 * 0 - queue is empty or throttled. 168 * >0 - queue is not empty. 169 * 170 */ 171 static inline int qdisc_restart(struct Qdisc *q) 172 { 173 struct netdev_queue *txq; 174 struct net_device *dev; 175 spinlock_t *root_lock; 176 struct sk_buff *skb; 177 178 /* Dequeue packet */ 179 skb = dequeue_skb(q); 180 if (unlikely(!skb)) 181 return 0; 182 WARN_ON_ONCE(skb_dst_is_noref(skb)); 183 root_lock = qdisc_lock(q); 184 dev = qdisc_dev(q); 185 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 186 187 return sch_direct_xmit(skb, q, dev, txq, root_lock); 188 } 189 190 void __qdisc_run(struct Qdisc *q) 191 { 192 unsigned long start_time = jiffies; 193 194 while (qdisc_restart(q)) { 195 /* 196 * Postpone processing if 197 * 1. another process needs the CPU; 198 * 2. we've been doing it for too long. 199 */ 200 if (need_resched() || jiffies != start_time) { 201 __netif_schedule(q); 202 break; 203 } 204 } 205 206 qdisc_run_end(q); 207 } 208 209 unsigned long dev_trans_start(struct net_device *dev) 210 { 211 unsigned long val, res = dev->trans_start; 212 unsigned int i; 213 214 for (i = 0; i < dev->num_tx_queues; i++) { 215 val = netdev_get_tx_queue(dev, i)->trans_start; 216 if (val && time_after(val, res)) 217 res = val; 218 } 219 dev->trans_start = res; 220 return res; 221 } 222 EXPORT_SYMBOL(dev_trans_start); 223 224 static void dev_watchdog(unsigned long arg) 225 { 226 struct net_device *dev = (struct net_device *)arg; 227 228 netif_tx_lock(dev); 229 if (!qdisc_tx_is_noop(dev)) { 230 if (netif_device_present(dev) && 231 netif_running(dev) && 232 netif_carrier_ok(dev)) { 233 int some_queue_timedout = 0; 234 unsigned int i; 235 unsigned long trans_start; 236 237 for (i = 0; i < dev->num_tx_queues; i++) { 238 struct netdev_queue *txq; 239 240 txq = netdev_get_tx_queue(dev, i); 241 /* 242 * old device drivers set dev->trans_start 243 */ 244 trans_start = txq->trans_start ? : dev->trans_start; 245 if (netif_tx_queue_stopped(txq) && 246 time_after(jiffies, (trans_start + 247 dev->watchdog_timeo))) { 248 some_queue_timedout = 1; 249 break; 250 } 251 } 252 253 if (some_queue_timedout) { 254 char drivername[64]; 255 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 256 dev->name, netdev_drivername(dev, drivername, 64), i); 257 dev->netdev_ops->ndo_tx_timeout(dev); 258 } 259 if (!mod_timer(&dev->watchdog_timer, 260 round_jiffies(jiffies + 261 dev->watchdog_timeo))) 262 dev_hold(dev); 263 } 264 } 265 netif_tx_unlock(dev); 266 267 dev_put(dev); 268 } 269 270 void __netdev_watchdog_up(struct net_device *dev) 271 { 272 if (dev->netdev_ops->ndo_tx_timeout) { 273 if (dev->watchdog_timeo <= 0) 274 dev->watchdog_timeo = 5*HZ; 275 if (!mod_timer(&dev->watchdog_timer, 276 round_jiffies(jiffies + dev->watchdog_timeo))) 277 dev_hold(dev); 278 } 279 } 280 281 static void dev_watchdog_up(struct net_device *dev) 282 { 283 __netdev_watchdog_up(dev); 284 } 285 286 static void dev_watchdog_down(struct net_device *dev) 287 { 288 netif_tx_lock_bh(dev); 289 if (del_timer(&dev->watchdog_timer)) 290 dev_put(dev); 291 netif_tx_unlock_bh(dev); 292 } 293 294 /** 295 * netif_carrier_on - set carrier 296 * @dev: network device 297 * 298 * Device has detected that carrier. 299 */ 300 void netif_carrier_on(struct net_device *dev) 301 { 302 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 303 if (dev->reg_state == NETREG_UNINITIALIZED) 304 return; 305 linkwatch_fire_event(dev); 306 if (netif_running(dev)) 307 __netdev_watchdog_up(dev); 308 } 309 } 310 EXPORT_SYMBOL(netif_carrier_on); 311 312 /** 313 * netif_carrier_off - clear carrier 314 * @dev: network device 315 * 316 * Device has detected loss of carrier. 317 */ 318 void netif_carrier_off(struct net_device *dev) 319 { 320 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 321 if (dev->reg_state == NETREG_UNINITIALIZED) 322 return; 323 linkwatch_fire_event(dev); 324 } 325 } 326 EXPORT_SYMBOL(netif_carrier_off); 327 328 /** 329 * netif_notify_peers - notify network peers about existence of @dev 330 * @dev: network device 331 * 332 * Generate traffic such that interested network peers are aware of 333 * @dev, such as by generating a gratuitous ARP. This may be used when 334 * a device wants to inform the rest of the network about some sort of 335 * reconfiguration such as a failover event or virtual machine 336 * migration. 337 */ 338 void netif_notify_peers(struct net_device *dev) 339 { 340 rtnl_lock(); 341 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 342 rtnl_unlock(); 343 } 344 EXPORT_SYMBOL(netif_notify_peers); 345 346 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 347 under all circumstances. It is difficult to invent anything faster or 348 cheaper. 349 */ 350 351 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) 352 { 353 kfree_skb(skb); 354 return NET_XMIT_CN; 355 } 356 357 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) 358 { 359 return NULL; 360 } 361 362 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 363 .id = "noop", 364 .priv_size = 0, 365 .enqueue = noop_enqueue, 366 .dequeue = noop_dequeue, 367 .peek = noop_dequeue, 368 .owner = THIS_MODULE, 369 }; 370 371 static struct netdev_queue noop_netdev_queue = { 372 .qdisc = &noop_qdisc, 373 .qdisc_sleeping = &noop_qdisc, 374 }; 375 376 struct Qdisc noop_qdisc = { 377 .enqueue = noop_enqueue, 378 .dequeue = noop_dequeue, 379 .flags = TCQ_F_BUILTIN, 380 .ops = &noop_qdisc_ops, 381 .list = LIST_HEAD_INIT(noop_qdisc.list), 382 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 383 .dev_queue = &noop_netdev_queue, 384 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 385 }; 386 EXPORT_SYMBOL(noop_qdisc); 387 388 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 389 .id = "noqueue", 390 .priv_size = 0, 391 .enqueue = noop_enqueue, 392 .dequeue = noop_dequeue, 393 .peek = noop_dequeue, 394 .owner = THIS_MODULE, 395 }; 396 397 static struct Qdisc noqueue_qdisc; 398 static struct netdev_queue noqueue_netdev_queue = { 399 .qdisc = &noqueue_qdisc, 400 .qdisc_sleeping = &noqueue_qdisc, 401 }; 402 403 static struct Qdisc noqueue_qdisc = { 404 .enqueue = NULL, 405 .dequeue = noop_dequeue, 406 .flags = TCQ_F_BUILTIN, 407 .ops = &noqueue_qdisc_ops, 408 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 409 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 410 .dev_queue = &noqueue_netdev_queue, 411 .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock), 412 }; 413 414 415 static const u8 prio2band[TC_PRIO_MAX+1] = 416 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; 417 418 /* 3-band FIFO queue: old style, but should be a bit faster than 419 generic prio+fifo combination. 420 */ 421 422 #define PFIFO_FAST_BANDS 3 423 424 /* 425 * Private data for a pfifo_fast scheduler containing: 426 * - queues for the three band 427 * - bitmap indicating which of the bands contain skbs 428 */ 429 struct pfifo_fast_priv { 430 u32 bitmap; 431 struct sk_buff_head q[PFIFO_FAST_BANDS]; 432 }; 433 434 /* 435 * Convert a bitmap to the first band number where an skb is queued, where: 436 * bitmap=0 means there are no skbs on any band. 437 * bitmap=1 means there is an skb on band 0. 438 * bitmap=7 means there are skbs on all 3 bands, etc. 439 */ 440 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; 441 442 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, 443 int band) 444 { 445 return priv->q + band; 446 } 447 448 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) 449 { 450 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 451 int band = prio2band[skb->priority & TC_PRIO_MAX]; 452 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 453 struct sk_buff_head *list = band2list(priv, band); 454 455 priv->bitmap |= (1 << band); 456 qdisc->q.qlen++; 457 return __qdisc_enqueue_tail(skb, qdisc, list); 458 } 459 460 return qdisc_drop(skb, qdisc); 461 } 462 463 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 464 { 465 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 466 int band = bitmap2band[priv->bitmap]; 467 468 if (likely(band >= 0)) { 469 struct sk_buff_head *list = band2list(priv, band); 470 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); 471 472 qdisc->q.qlen--; 473 if (skb_queue_empty(list)) 474 priv->bitmap &= ~(1 << band); 475 476 return skb; 477 } 478 479 return NULL; 480 } 481 482 static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) 483 { 484 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 485 int band = bitmap2band[priv->bitmap]; 486 487 if (band >= 0) { 488 struct sk_buff_head *list = band2list(priv, band); 489 490 return skb_peek(list); 491 } 492 493 return NULL; 494 } 495 496 static void pfifo_fast_reset(struct Qdisc* qdisc) 497 { 498 int prio; 499 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 500 501 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 502 __qdisc_reset_queue(qdisc, band2list(priv, prio)); 503 504 priv->bitmap = 0; 505 qdisc->qstats.backlog = 0; 506 qdisc->q.qlen = 0; 507 } 508 509 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 510 { 511 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 512 513 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); 514 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 515 return skb->len; 516 517 nla_put_failure: 518 return -1; 519 } 520 521 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 522 { 523 int prio; 524 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 525 526 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 527 skb_queue_head_init(band2list(priv, prio)); 528 529 return 0; 530 } 531 532 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 533 .id = "pfifo_fast", 534 .priv_size = sizeof(struct pfifo_fast_priv), 535 .enqueue = pfifo_fast_enqueue, 536 .dequeue = pfifo_fast_dequeue, 537 .peek = pfifo_fast_peek, 538 .init = pfifo_fast_init, 539 .reset = pfifo_fast_reset, 540 .dump = pfifo_fast_dump, 541 .owner = THIS_MODULE, 542 }; 543 EXPORT_SYMBOL(pfifo_fast_ops); 544 545 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 546 struct Qdisc_ops *ops) 547 { 548 void *p; 549 struct Qdisc *sch; 550 unsigned int size; 551 int err = -ENOBUFS; 552 553 /* ensure that the Qdisc and the private data are 64-byte aligned */ 554 size = QDISC_ALIGN(sizeof(*sch)); 555 size += ops->priv_size + (QDISC_ALIGNTO - 1); 556 557 p = kzalloc_node(size, GFP_KERNEL, 558 netdev_queue_numa_node_read(dev_queue)); 559 560 if (!p) 561 goto errout; 562 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 563 sch->padded = (char *) sch - (char *) p; 564 565 INIT_LIST_HEAD(&sch->list); 566 skb_queue_head_init(&sch->q); 567 spin_lock_init(&sch->busylock); 568 sch->ops = ops; 569 sch->enqueue = ops->enqueue; 570 sch->dequeue = ops->dequeue; 571 sch->dev_queue = dev_queue; 572 dev_hold(qdisc_dev(sch)); 573 atomic_set(&sch->refcnt, 1); 574 575 return sch; 576 errout: 577 return ERR_PTR(err); 578 } 579 580 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 581 struct Qdisc_ops *ops, unsigned int parentid) 582 { 583 struct Qdisc *sch; 584 585 sch = qdisc_alloc(dev_queue, ops); 586 if (IS_ERR(sch)) 587 goto errout; 588 sch->parent = parentid; 589 590 if (!ops->init || ops->init(sch, NULL) == 0) 591 return sch; 592 593 qdisc_destroy(sch); 594 errout: 595 return NULL; 596 } 597 EXPORT_SYMBOL(qdisc_create_dflt); 598 599 /* Under qdisc_lock(qdisc) and BH! */ 600 601 void qdisc_reset(struct Qdisc *qdisc) 602 { 603 const struct Qdisc_ops *ops = qdisc->ops; 604 605 if (ops->reset) 606 ops->reset(qdisc); 607 608 if (qdisc->gso_skb) { 609 kfree_skb(qdisc->gso_skb); 610 qdisc->gso_skb = NULL; 611 qdisc->q.qlen = 0; 612 } 613 } 614 EXPORT_SYMBOL(qdisc_reset); 615 616 static void qdisc_rcu_free(struct rcu_head *head) 617 { 618 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); 619 620 kfree((char *) qdisc - qdisc->padded); 621 } 622 623 void qdisc_destroy(struct Qdisc *qdisc) 624 { 625 const struct Qdisc_ops *ops = qdisc->ops; 626 627 if (qdisc->flags & TCQ_F_BUILTIN || 628 !atomic_dec_and_test(&qdisc->refcnt)) 629 return; 630 631 #ifdef CONFIG_NET_SCHED 632 qdisc_list_del(qdisc); 633 634 qdisc_put_stab(qdisc->stab); 635 #endif 636 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 637 if (ops->reset) 638 ops->reset(qdisc); 639 if (ops->destroy) 640 ops->destroy(qdisc); 641 642 module_put(ops->owner); 643 dev_put(qdisc_dev(qdisc)); 644 645 kfree_skb(qdisc->gso_skb); 646 /* 647 * gen_estimator est_timer() might access qdisc->q.lock, 648 * wait a RCU grace period before freeing qdisc. 649 */ 650 call_rcu(&qdisc->rcu_head, qdisc_rcu_free); 651 } 652 EXPORT_SYMBOL(qdisc_destroy); 653 654 /* Attach toplevel qdisc to device queue. */ 655 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 656 struct Qdisc *qdisc) 657 { 658 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 659 spinlock_t *root_lock; 660 661 root_lock = qdisc_lock(oqdisc); 662 spin_lock_bh(root_lock); 663 664 /* Prune old scheduler */ 665 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 666 qdisc_reset(oqdisc); 667 668 /* ... and graft new one */ 669 if (qdisc == NULL) 670 qdisc = &noop_qdisc; 671 dev_queue->qdisc_sleeping = qdisc; 672 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 673 674 spin_unlock_bh(root_lock); 675 676 return oqdisc; 677 } 678 EXPORT_SYMBOL(dev_graft_qdisc); 679 680 static void attach_one_default_qdisc(struct net_device *dev, 681 struct netdev_queue *dev_queue, 682 void *_unused) 683 { 684 struct Qdisc *qdisc; 685 686 if (dev->tx_queue_len) { 687 qdisc = qdisc_create_dflt(dev_queue, 688 &pfifo_fast_ops, TC_H_ROOT); 689 if (!qdisc) { 690 printk(KERN_INFO "%s: activation failed\n", dev->name); 691 return; 692 } 693 694 /* Can by-pass the queue discipline for default qdisc */ 695 qdisc->flags |= TCQ_F_CAN_BYPASS; 696 } else { 697 qdisc = &noqueue_qdisc; 698 } 699 dev_queue->qdisc_sleeping = qdisc; 700 } 701 702 static void attach_default_qdiscs(struct net_device *dev) 703 { 704 struct netdev_queue *txq; 705 struct Qdisc *qdisc; 706 707 txq = netdev_get_tx_queue(dev, 0); 708 709 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { 710 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 711 dev->qdisc = txq->qdisc_sleeping; 712 atomic_inc(&dev->qdisc->refcnt); 713 } else { 714 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); 715 if (qdisc) { 716 qdisc->ops->attach(qdisc); 717 dev->qdisc = qdisc; 718 } 719 } 720 } 721 722 static void transition_one_qdisc(struct net_device *dev, 723 struct netdev_queue *dev_queue, 724 void *_need_watchdog) 725 { 726 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 727 int *need_watchdog_p = _need_watchdog; 728 729 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 730 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 731 732 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 733 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) { 734 dev_queue->trans_start = 0; 735 *need_watchdog_p = 1; 736 } 737 } 738 739 void dev_activate(struct net_device *dev) 740 { 741 int need_watchdog; 742 743 /* No queueing discipline is attached to device; 744 create default one i.e. pfifo_fast for devices, 745 which need queueing and noqueue_qdisc for 746 virtual interfaces 747 */ 748 749 if (dev->qdisc == &noop_qdisc) 750 attach_default_qdiscs(dev); 751 752 if (!netif_carrier_ok(dev)) 753 /* Delay activation until next carrier-on event */ 754 return; 755 756 need_watchdog = 0; 757 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 758 if (dev_ingress_queue(dev)) 759 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 760 761 if (need_watchdog) { 762 dev->trans_start = jiffies; 763 dev_watchdog_up(dev); 764 } 765 } 766 EXPORT_SYMBOL(dev_activate); 767 768 static void dev_deactivate_queue(struct net_device *dev, 769 struct netdev_queue *dev_queue, 770 void *_qdisc_default) 771 { 772 struct Qdisc *qdisc_default = _qdisc_default; 773 struct Qdisc *qdisc; 774 775 qdisc = dev_queue->qdisc; 776 if (qdisc) { 777 spin_lock_bh(qdisc_lock(qdisc)); 778 779 if (!(qdisc->flags & TCQ_F_BUILTIN)) 780 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 781 782 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 783 qdisc_reset(qdisc); 784 785 spin_unlock_bh(qdisc_lock(qdisc)); 786 } 787 } 788 789 static bool some_qdisc_is_busy(struct net_device *dev) 790 { 791 unsigned int i; 792 793 for (i = 0; i < dev->num_tx_queues; i++) { 794 struct netdev_queue *dev_queue; 795 spinlock_t *root_lock; 796 struct Qdisc *q; 797 int val; 798 799 dev_queue = netdev_get_tx_queue(dev, i); 800 q = dev_queue->qdisc_sleeping; 801 root_lock = qdisc_lock(q); 802 803 spin_lock_bh(root_lock); 804 805 val = (qdisc_is_running(q) || 806 test_bit(__QDISC_STATE_SCHED, &q->state)); 807 808 spin_unlock_bh(root_lock); 809 810 if (val) 811 return true; 812 } 813 return false; 814 } 815 816 void dev_deactivate_many(struct list_head *head) 817 { 818 struct net_device *dev; 819 820 list_for_each_entry(dev, head, unreg_list) { 821 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 822 &noop_qdisc); 823 if (dev_ingress_queue(dev)) 824 dev_deactivate_queue(dev, dev_ingress_queue(dev), 825 &noop_qdisc); 826 827 dev_watchdog_down(dev); 828 } 829 830 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 831 synchronize_rcu(); 832 833 /* Wait for outstanding qdisc_run calls. */ 834 list_for_each_entry(dev, head, unreg_list) 835 while (some_qdisc_is_busy(dev)) 836 yield(); 837 } 838 839 void dev_deactivate(struct net_device *dev) 840 { 841 LIST_HEAD(single); 842 843 list_add(&dev->unreg_list, &single); 844 dev_deactivate_many(&single); 845 } 846 EXPORT_SYMBOL(dev_deactivate); 847 848 static void dev_init_scheduler_queue(struct net_device *dev, 849 struct netdev_queue *dev_queue, 850 void *_qdisc) 851 { 852 struct Qdisc *qdisc = _qdisc; 853 854 dev_queue->qdisc = qdisc; 855 dev_queue->qdisc_sleeping = qdisc; 856 } 857 858 void dev_init_scheduler(struct net_device *dev) 859 { 860 dev->qdisc = &noop_qdisc; 861 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 862 if (dev_ingress_queue(dev)) 863 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 864 865 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 866 } 867 868 static void shutdown_scheduler_queue(struct net_device *dev, 869 struct netdev_queue *dev_queue, 870 void *_qdisc_default) 871 { 872 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 873 struct Qdisc *qdisc_default = _qdisc_default; 874 875 if (qdisc) { 876 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 877 dev_queue->qdisc_sleeping = qdisc_default; 878 879 qdisc_destroy(qdisc); 880 } 881 } 882 883 void dev_shutdown(struct net_device *dev) 884 { 885 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 886 if (dev_ingress_queue(dev)) 887 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 888 qdisc_destroy(dev->qdisc); 889 dev->qdisc = &noop_qdisc; 890 891 WARN_ON(timer_pending(&dev->watchdog_timer)); 892 } 893