1 /* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/netdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <linux/init.h> 25 #include <linux/rcupdate.h> 26 #include <linux/list.h> 27 #include <linux/slab.h> 28 #include <net/pkt_sched.h> 29 30 /* Main transmission queue. */ 31 32 /* Modifications to data participating in scheduling must be protected with 33 * qdisc_lock(qdisc) spinlock. 34 * 35 * The idea is the following: 36 * - enqueue, dequeue are serialized via qdisc root lock 37 * - ingress filtering is also serialized via qdisc root lock 38 * - updates to tree and tree walking are only done under the rtnl mutex. 39 */ 40 41 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 42 { 43 q->gso_skb = skb; 44 q->qstats.requeues++; 45 q->q.qlen++; /* it's still part of the queue */ 46 __netif_schedule(q); 47 48 return 0; 49 } 50 51 static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 52 { 53 struct sk_buff *skb = q->gso_skb; 54 55 if (unlikely(skb)) { 56 struct net_device *dev = qdisc_dev(q); 57 struct netdev_queue *txq; 58 59 /* check the reason of requeuing without tx lock first */ 60 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 61 if (!netif_tx_queue_stopped(txq) && 62 !netif_tx_queue_frozen(txq)) { 63 q->gso_skb = NULL; 64 q->q.qlen--; 65 } else 66 skb = NULL; 67 } else { 68 skb = q->dequeue(q); 69 } 70 71 return skb; 72 } 73 74 static inline int handle_dev_cpu_collision(struct sk_buff *skb, 75 struct netdev_queue *dev_queue, 76 struct Qdisc *q) 77 { 78 int ret; 79 80 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { 81 /* 82 * Same CPU holding the lock. It may be a transient 83 * configuration error, when hard_start_xmit() recurses. We 84 * detect it by checking xmit owner and drop the packet when 85 * deadloop is detected. Return OK to try the next skb. 86 */ 87 kfree_skb(skb); 88 if (net_ratelimit()) 89 printk(KERN_WARNING "Dead loop on netdevice %s, " 90 "fix it urgently!\n", dev_queue->dev->name); 91 ret = qdisc_qlen(q); 92 } else { 93 /* 94 * Another cpu is holding lock, requeue & delay xmits for 95 * some time. 96 */ 97 __get_cpu_var(netdev_rx_stat).cpu_collision++; 98 ret = dev_requeue_skb(skb, q); 99 } 100 101 return ret; 102 } 103 104 /* 105 * Transmit one skb, and handle the return status as required. Holding the 106 * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this 107 * function. 108 * 109 * Returns to the caller: 110 * 0 - queue is empty or throttled. 111 * >0 - queue is not empty. 112 */ 113 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 114 struct net_device *dev, struct netdev_queue *txq, 115 spinlock_t *root_lock) 116 { 117 int ret = NETDEV_TX_BUSY; 118 119 /* And release qdisc */ 120 spin_unlock(root_lock); 121 122 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) 124 ret = dev_hard_start_xmit(skb, dev, txq); 125 126 HARD_TX_UNLOCK(dev, txq); 127 128 spin_lock(root_lock); 129 130 if (dev_xmit_complete(ret)) { 131 /* Driver sent out skb successfully or skb was consumed */ 132 ret = qdisc_qlen(q); 133 } else if (ret == NETDEV_TX_LOCKED) { 134 /* Driver try lock failed */ 135 ret = handle_dev_cpu_collision(skb, txq, q); 136 } else { 137 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 138 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) 139 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 140 dev->name, ret, q->q.qlen); 141 142 ret = dev_requeue_skb(skb, q); 143 } 144 145 if (ret && (netif_tx_queue_stopped(txq) || 146 netif_tx_queue_frozen(txq))) 147 ret = 0; 148 149 return ret; 150 } 151 152 /* 153 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 154 * 155 * __QDISC_STATE_RUNNING guarantees only one CPU can process 156 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 157 * this queue. 158 * 159 * netif_tx_lock serializes accesses to device driver. 160 * 161 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 162 * if one is grabbed, another must be free. 163 * 164 * Note, that this procedure can be called by a watchdog timer 165 * 166 * Returns to the caller: 167 * 0 - queue is empty or throttled. 168 * >0 - queue is not empty. 169 * 170 */ 171 static inline int qdisc_restart(struct Qdisc *q) 172 { 173 struct netdev_queue *txq; 174 struct net_device *dev; 175 spinlock_t *root_lock; 176 struct sk_buff *skb; 177 178 /* Dequeue packet */ 179 skb = dequeue_skb(q); 180 if (unlikely(!skb)) 181 return 0; 182 183 root_lock = qdisc_lock(q); 184 dev = qdisc_dev(q); 185 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 186 187 return sch_direct_xmit(skb, q, dev, txq, root_lock); 188 } 189 190 void __qdisc_run(struct Qdisc *q) 191 { 192 unsigned long start_time = jiffies; 193 194 while (qdisc_restart(q)) { 195 /* 196 * Postpone processing if 197 * 1. another process needs the CPU; 198 * 2. we've been doing it for too long. 199 */ 200 if (need_resched() || jiffies != start_time) { 201 __netif_schedule(q); 202 break; 203 } 204 } 205 206 clear_bit(__QDISC_STATE_RUNNING, &q->state); 207 } 208 209 unsigned long dev_trans_start(struct net_device *dev) 210 { 211 unsigned long val, res = dev->trans_start; 212 unsigned int i; 213 214 for (i = 0; i < dev->num_tx_queues; i++) { 215 val = netdev_get_tx_queue(dev, i)->trans_start; 216 if (val && time_after(val, res)) 217 res = val; 218 } 219 dev->trans_start = res; 220 return res; 221 } 222 EXPORT_SYMBOL(dev_trans_start); 223 224 static void dev_watchdog(unsigned long arg) 225 { 226 struct net_device *dev = (struct net_device *)arg; 227 228 netif_tx_lock(dev); 229 if (!qdisc_tx_is_noop(dev)) { 230 if (netif_device_present(dev) && 231 netif_running(dev) && 232 netif_carrier_ok(dev)) { 233 int some_queue_timedout = 0; 234 unsigned int i; 235 unsigned long trans_start; 236 237 for (i = 0; i < dev->num_tx_queues; i++) { 238 struct netdev_queue *txq; 239 240 txq = netdev_get_tx_queue(dev, i); 241 /* 242 * old device drivers set dev->trans_start 243 */ 244 trans_start = txq->trans_start ? : dev->trans_start; 245 if (netif_tx_queue_stopped(txq) && 246 time_after(jiffies, (trans_start + 247 dev->watchdog_timeo))) { 248 some_queue_timedout = 1; 249 break; 250 } 251 } 252 253 if (some_queue_timedout) { 254 char drivername[64]; 255 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 256 dev->name, netdev_drivername(dev, drivername, 64), i); 257 dev->netdev_ops->ndo_tx_timeout(dev); 258 } 259 if (!mod_timer(&dev->watchdog_timer, 260 round_jiffies(jiffies + 261 dev->watchdog_timeo))) 262 dev_hold(dev); 263 } 264 } 265 netif_tx_unlock(dev); 266 267 dev_put(dev); 268 } 269 270 void __netdev_watchdog_up(struct net_device *dev) 271 { 272 if (dev->netdev_ops->ndo_tx_timeout) { 273 if (dev->watchdog_timeo <= 0) 274 dev->watchdog_timeo = 5*HZ; 275 if (!mod_timer(&dev->watchdog_timer, 276 round_jiffies(jiffies + dev->watchdog_timeo))) 277 dev_hold(dev); 278 } 279 } 280 281 static void dev_watchdog_up(struct net_device *dev) 282 { 283 __netdev_watchdog_up(dev); 284 } 285 286 static void dev_watchdog_down(struct net_device *dev) 287 { 288 netif_tx_lock_bh(dev); 289 if (del_timer(&dev->watchdog_timer)) 290 dev_put(dev); 291 netif_tx_unlock_bh(dev); 292 } 293 294 /** 295 * netif_carrier_on - set carrier 296 * @dev: network device 297 * 298 * Device has detected that carrier. 299 */ 300 void netif_carrier_on(struct net_device *dev) 301 { 302 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 303 if (dev->reg_state == NETREG_UNINITIALIZED) 304 return; 305 linkwatch_fire_event(dev); 306 if (netif_running(dev)) 307 __netdev_watchdog_up(dev); 308 } 309 } 310 EXPORT_SYMBOL(netif_carrier_on); 311 312 /** 313 * netif_carrier_off - clear carrier 314 * @dev: network device 315 * 316 * Device has detected loss of carrier. 317 */ 318 void netif_carrier_off(struct net_device *dev) 319 { 320 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 321 if (dev->reg_state == NETREG_UNINITIALIZED) 322 return; 323 linkwatch_fire_event(dev); 324 } 325 } 326 EXPORT_SYMBOL(netif_carrier_off); 327 328 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 329 under all circumstances. It is difficult to invent anything faster or 330 cheaper. 331 */ 332 333 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) 334 { 335 kfree_skb(skb); 336 return NET_XMIT_CN; 337 } 338 339 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) 340 { 341 return NULL; 342 } 343 344 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 345 .id = "noop", 346 .priv_size = 0, 347 .enqueue = noop_enqueue, 348 .dequeue = noop_dequeue, 349 .peek = noop_dequeue, 350 .owner = THIS_MODULE, 351 }; 352 353 static struct netdev_queue noop_netdev_queue = { 354 .qdisc = &noop_qdisc, 355 .qdisc_sleeping = &noop_qdisc, 356 }; 357 358 struct Qdisc noop_qdisc = { 359 .enqueue = noop_enqueue, 360 .dequeue = noop_dequeue, 361 .flags = TCQ_F_BUILTIN, 362 .ops = &noop_qdisc_ops, 363 .list = LIST_HEAD_INIT(noop_qdisc.list), 364 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 365 .dev_queue = &noop_netdev_queue, 366 }; 367 EXPORT_SYMBOL(noop_qdisc); 368 369 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 370 .id = "noqueue", 371 .priv_size = 0, 372 .enqueue = noop_enqueue, 373 .dequeue = noop_dequeue, 374 .peek = noop_dequeue, 375 .owner = THIS_MODULE, 376 }; 377 378 static struct Qdisc noqueue_qdisc; 379 static struct netdev_queue noqueue_netdev_queue = { 380 .qdisc = &noqueue_qdisc, 381 .qdisc_sleeping = &noqueue_qdisc, 382 }; 383 384 static struct Qdisc noqueue_qdisc = { 385 .enqueue = NULL, 386 .dequeue = noop_dequeue, 387 .flags = TCQ_F_BUILTIN, 388 .ops = &noqueue_qdisc_ops, 389 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 390 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 391 .dev_queue = &noqueue_netdev_queue, 392 }; 393 394 395 static const u8 prio2band[TC_PRIO_MAX+1] = 396 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; 397 398 /* 3-band FIFO queue: old style, but should be a bit faster than 399 generic prio+fifo combination. 400 */ 401 402 #define PFIFO_FAST_BANDS 3 403 404 /* 405 * Private data for a pfifo_fast scheduler containing: 406 * - queues for the three band 407 * - bitmap indicating which of the bands contain skbs 408 */ 409 struct pfifo_fast_priv { 410 u32 bitmap; 411 struct sk_buff_head q[PFIFO_FAST_BANDS]; 412 }; 413 414 /* 415 * Convert a bitmap to the first band number where an skb is queued, where: 416 * bitmap=0 means there are no skbs on any band. 417 * bitmap=1 means there is an skb on band 0. 418 * bitmap=7 means there are skbs on all 3 bands, etc. 419 */ 420 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; 421 422 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, 423 int band) 424 { 425 return priv->q + band; 426 } 427 428 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) 429 { 430 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 431 int band = prio2band[skb->priority & TC_PRIO_MAX]; 432 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 433 struct sk_buff_head *list = band2list(priv, band); 434 435 priv->bitmap |= (1 << band); 436 qdisc->q.qlen++; 437 return __qdisc_enqueue_tail(skb, qdisc, list); 438 } 439 440 return qdisc_drop(skb, qdisc); 441 } 442 443 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 444 { 445 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 446 int band = bitmap2band[priv->bitmap]; 447 448 if (likely(band >= 0)) { 449 struct sk_buff_head *list = band2list(priv, band); 450 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); 451 452 qdisc->q.qlen--; 453 if (skb_queue_empty(list)) 454 priv->bitmap &= ~(1 << band); 455 456 return skb; 457 } 458 459 return NULL; 460 } 461 462 static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) 463 { 464 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 465 int band = bitmap2band[priv->bitmap]; 466 467 if (band >= 0) { 468 struct sk_buff_head *list = band2list(priv, band); 469 470 return skb_peek(list); 471 } 472 473 return NULL; 474 } 475 476 static void pfifo_fast_reset(struct Qdisc* qdisc) 477 { 478 int prio; 479 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 480 481 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 482 __qdisc_reset_queue(qdisc, band2list(priv, prio)); 483 484 priv->bitmap = 0; 485 qdisc->qstats.backlog = 0; 486 qdisc->q.qlen = 0; 487 } 488 489 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 490 { 491 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 492 493 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); 494 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 495 return skb->len; 496 497 nla_put_failure: 498 return -1; 499 } 500 501 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 502 { 503 int prio; 504 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 505 506 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 507 skb_queue_head_init(band2list(priv, prio)); 508 509 return 0; 510 } 511 512 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 513 .id = "pfifo_fast", 514 .priv_size = sizeof(struct pfifo_fast_priv), 515 .enqueue = pfifo_fast_enqueue, 516 .dequeue = pfifo_fast_dequeue, 517 .peek = pfifo_fast_peek, 518 .init = pfifo_fast_init, 519 .reset = pfifo_fast_reset, 520 .dump = pfifo_fast_dump, 521 .owner = THIS_MODULE, 522 }; 523 524 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 525 struct Qdisc_ops *ops) 526 { 527 void *p; 528 struct Qdisc *sch; 529 unsigned int size; 530 int err = -ENOBUFS; 531 532 /* ensure that the Qdisc and the private data are 64-byte aligned */ 533 size = QDISC_ALIGN(sizeof(*sch)); 534 size += ops->priv_size + (QDISC_ALIGNTO - 1); 535 536 p = kzalloc(size, GFP_KERNEL); 537 if (!p) 538 goto errout; 539 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 540 sch->padded = (char *) sch - (char *) p; 541 542 INIT_LIST_HEAD(&sch->list); 543 skb_queue_head_init(&sch->q); 544 sch->ops = ops; 545 sch->enqueue = ops->enqueue; 546 sch->dequeue = ops->dequeue; 547 sch->dev_queue = dev_queue; 548 dev_hold(qdisc_dev(sch)); 549 atomic_set(&sch->refcnt, 1); 550 551 return sch; 552 errout: 553 return ERR_PTR(err); 554 } 555 556 struct Qdisc * qdisc_create_dflt(struct net_device *dev, 557 struct netdev_queue *dev_queue, 558 struct Qdisc_ops *ops, 559 unsigned int parentid) 560 { 561 struct Qdisc *sch; 562 563 sch = qdisc_alloc(dev_queue, ops); 564 if (IS_ERR(sch)) 565 goto errout; 566 sch->parent = parentid; 567 568 if (!ops->init || ops->init(sch, NULL) == 0) 569 return sch; 570 571 qdisc_destroy(sch); 572 errout: 573 return NULL; 574 } 575 EXPORT_SYMBOL(qdisc_create_dflt); 576 577 /* Under qdisc_lock(qdisc) and BH! */ 578 579 void qdisc_reset(struct Qdisc *qdisc) 580 { 581 const struct Qdisc_ops *ops = qdisc->ops; 582 583 if (ops->reset) 584 ops->reset(qdisc); 585 586 if (qdisc->gso_skb) { 587 kfree_skb(qdisc->gso_skb); 588 qdisc->gso_skb = NULL; 589 qdisc->q.qlen = 0; 590 } 591 } 592 EXPORT_SYMBOL(qdisc_reset); 593 594 static void qdisc_rcu_free(struct rcu_head *head) 595 { 596 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); 597 598 kfree((char *) qdisc - qdisc->padded); 599 } 600 601 void qdisc_destroy(struct Qdisc *qdisc) 602 { 603 const struct Qdisc_ops *ops = qdisc->ops; 604 605 if (qdisc->flags & TCQ_F_BUILTIN || 606 !atomic_dec_and_test(&qdisc->refcnt)) 607 return; 608 609 #ifdef CONFIG_NET_SCHED 610 qdisc_list_del(qdisc); 611 612 qdisc_put_stab(qdisc->stab); 613 #endif 614 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 615 if (ops->reset) 616 ops->reset(qdisc); 617 if (ops->destroy) 618 ops->destroy(qdisc); 619 620 module_put(ops->owner); 621 dev_put(qdisc_dev(qdisc)); 622 623 kfree_skb(qdisc->gso_skb); 624 /* 625 * gen_estimator est_timer() might access qdisc->q.lock, 626 * wait a RCU grace period before freeing qdisc. 627 */ 628 call_rcu(&qdisc->rcu_head, qdisc_rcu_free); 629 } 630 EXPORT_SYMBOL(qdisc_destroy); 631 632 /* Attach toplevel qdisc to device queue. */ 633 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 634 struct Qdisc *qdisc) 635 { 636 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 637 spinlock_t *root_lock; 638 639 root_lock = qdisc_lock(oqdisc); 640 spin_lock_bh(root_lock); 641 642 /* Prune old scheduler */ 643 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 644 qdisc_reset(oqdisc); 645 646 /* ... and graft new one */ 647 if (qdisc == NULL) 648 qdisc = &noop_qdisc; 649 dev_queue->qdisc_sleeping = qdisc; 650 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 651 652 spin_unlock_bh(root_lock); 653 654 return oqdisc; 655 } 656 657 static void attach_one_default_qdisc(struct net_device *dev, 658 struct netdev_queue *dev_queue, 659 void *_unused) 660 { 661 struct Qdisc *qdisc; 662 663 if (dev->tx_queue_len) { 664 qdisc = qdisc_create_dflt(dev, dev_queue, 665 &pfifo_fast_ops, TC_H_ROOT); 666 if (!qdisc) { 667 printk(KERN_INFO "%s: activation failed\n", dev->name); 668 return; 669 } 670 671 /* Can by-pass the queue discipline for default qdisc */ 672 qdisc->flags |= TCQ_F_CAN_BYPASS; 673 } else { 674 qdisc = &noqueue_qdisc; 675 } 676 dev_queue->qdisc_sleeping = qdisc; 677 } 678 679 static void attach_default_qdiscs(struct net_device *dev) 680 { 681 struct netdev_queue *txq; 682 struct Qdisc *qdisc; 683 684 txq = netdev_get_tx_queue(dev, 0); 685 686 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { 687 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 688 dev->qdisc = txq->qdisc_sleeping; 689 atomic_inc(&dev->qdisc->refcnt); 690 } else { 691 qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT); 692 if (qdisc) { 693 qdisc->ops->attach(qdisc); 694 dev->qdisc = qdisc; 695 } 696 } 697 } 698 699 static void transition_one_qdisc(struct net_device *dev, 700 struct netdev_queue *dev_queue, 701 void *_need_watchdog) 702 { 703 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 704 int *need_watchdog_p = _need_watchdog; 705 706 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 707 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 708 709 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 710 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) { 711 dev_queue->trans_start = 0; 712 *need_watchdog_p = 1; 713 } 714 } 715 716 void dev_activate(struct net_device *dev) 717 { 718 int need_watchdog; 719 720 /* No queueing discipline is attached to device; 721 create default one i.e. pfifo_fast for devices, 722 which need queueing and noqueue_qdisc for 723 virtual interfaces 724 */ 725 726 if (dev->qdisc == &noop_qdisc) 727 attach_default_qdiscs(dev); 728 729 if (!netif_carrier_ok(dev)) 730 /* Delay activation until next carrier-on event */ 731 return; 732 733 need_watchdog = 0; 734 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 735 transition_one_qdisc(dev, &dev->rx_queue, NULL); 736 737 if (need_watchdog) { 738 dev->trans_start = jiffies; 739 dev_watchdog_up(dev); 740 } 741 } 742 743 static void dev_deactivate_queue(struct net_device *dev, 744 struct netdev_queue *dev_queue, 745 void *_qdisc_default) 746 { 747 struct Qdisc *qdisc_default = _qdisc_default; 748 struct Qdisc *qdisc; 749 750 qdisc = dev_queue->qdisc; 751 if (qdisc) { 752 spin_lock_bh(qdisc_lock(qdisc)); 753 754 if (!(qdisc->flags & TCQ_F_BUILTIN)) 755 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 756 757 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 758 qdisc_reset(qdisc); 759 760 spin_unlock_bh(qdisc_lock(qdisc)); 761 } 762 } 763 764 static bool some_qdisc_is_busy(struct net_device *dev) 765 { 766 unsigned int i; 767 768 for (i = 0; i < dev->num_tx_queues; i++) { 769 struct netdev_queue *dev_queue; 770 spinlock_t *root_lock; 771 struct Qdisc *q; 772 int val; 773 774 dev_queue = netdev_get_tx_queue(dev, i); 775 q = dev_queue->qdisc_sleeping; 776 root_lock = qdisc_lock(q); 777 778 spin_lock_bh(root_lock); 779 780 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || 781 test_bit(__QDISC_STATE_SCHED, &q->state)); 782 783 spin_unlock_bh(root_lock); 784 785 if (val) 786 return true; 787 } 788 return false; 789 } 790 791 void dev_deactivate(struct net_device *dev) 792 { 793 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 794 dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); 795 796 dev_watchdog_down(dev); 797 798 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 799 synchronize_rcu(); 800 801 /* Wait for outstanding qdisc_run calls. */ 802 while (some_qdisc_is_busy(dev)) 803 yield(); 804 } 805 806 static void dev_init_scheduler_queue(struct net_device *dev, 807 struct netdev_queue *dev_queue, 808 void *_qdisc) 809 { 810 struct Qdisc *qdisc = _qdisc; 811 812 dev_queue->qdisc = qdisc; 813 dev_queue->qdisc_sleeping = qdisc; 814 } 815 816 void dev_init_scheduler(struct net_device *dev) 817 { 818 dev->qdisc = &noop_qdisc; 819 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 820 dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); 821 822 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 823 } 824 825 static void shutdown_scheduler_queue(struct net_device *dev, 826 struct netdev_queue *dev_queue, 827 void *_qdisc_default) 828 { 829 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 830 struct Qdisc *qdisc_default = _qdisc_default; 831 832 if (qdisc) { 833 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 834 dev_queue->qdisc_sleeping = qdisc_default; 835 836 qdisc_destroy(qdisc); 837 } 838 } 839 840 void dev_shutdown(struct net_device *dev) 841 { 842 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 843 shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); 844 qdisc_destroy(dev->qdisc); 845 dev->qdisc = &noop_qdisc; 846 847 WARN_ON(timer_pending(&dev->watchdog_timer)); 848 } 849