1 /* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/netdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <linux/init.h> 25 #include <linux/rcupdate.h> 26 #include <linux/list.h> 27 #include <linux/slab.h> 28 #include <linux/if_vlan.h> 29 #include <net/sch_generic.h> 30 #include <net/pkt_sched.h> 31 #include <net/dst.h> 32 33 /* Qdisc to use by default */ 34 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; 35 EXPORT_SYMBOL(default_qdisc_ops); 36 37 /* Main transmission queue. */ 38 39 /* Modifications to data participating in scheduling must be protected with 40 * qdisc_lock(qdisc) spinlock. 41 * 42 * The idea is the following: 43 * - enqueue, dequeue are serialized via qdisc root lock 44 * - ingress filtering is also serialized via qdisc root lock 45 * - updates to tree and tree walking are only done under the rtnl mutex. 46 */ 47 48 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 49 { 50 q->gso_skb = skb; 51 q->qstats.requeues++; 52 q->q.qlen++; /* it's still part of the queue */ 53 __netif_schedule(q); 54 55 return 0; 56 } 57 58 static void try_bulk_dequeue_skb(struct Qdisc *q, 59 struct sk_buff *skb, 60 const struct netdev_queue *txq, 61 int *packets) 62 { 63 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; 64 65 while (bytelimit > 0) { 66 struct sk_buff *nskb = q->dequeue(q); 67 68 if (!nskb) 69 break; 70 71 bytelimit -= nskb->len; /* covers GSO len */ 72 skb->next = nskb; 73 skb = nskb; 74 (*packets)++; /* GSO counts as one pkt */ 75 } 76 skb->next = NULL; 77 } 78 79 /* Note that dequeue_skb can possibly return a SKB list (via skb->next). 80 * A requeued skb (via q->gso_skb) can also be a SKB list. 81 */ 82 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, 83 int *packets) 84 { 85 struct sk_buff *skb = q->gso_skb; 86 const struct netdev_queue *txq = q->dev_queue; 87 88 *packets = 1; 89 *validate = true; 90 if (unlikely(skb)) { 91 /* check the reason of requeuing without tx lock first */ 92 txq = skb_get_tx_queue(txq->dev, skb); 93 if (!netif_xmit_frozen_or_stopped(txq)) { 94 q->gso_skb = NULL; 95 q->q.qlen--; 96 } else 97 skb = NULL; 98 /* skb in gso_skb were already validated */ 99 *validate = false; 100 } else { 101 if (!(q->flags & TCQ_F_ONETXQUEUE) || 102 !netif_xmit_frozen_or_stopped(txq)) { 103 skb = q->dequeue(q); 104 if (skb && qdisc_may_bulk(q)) 105 try_bulk_dequeue_skb(q, skb, txq, packets); 106 } 107 } 108 return skb; 109 } 110 111 /* 112 * Transmit possibly several skbs, and handle the return status as 113 * required. Owning running seqcount bit guarantees that 114 * only one CPU can execute this function. 115 * 116 * Returns to the caller: 117 * 0 - queue is empty or throttled. 118 * >0 - queue is not empty. 119 */ 120 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 121 struct net_device *dev, struct netdev_queue *txq, 122 spinlock_t *root_lock, bool validate) 123 { 124 int ret = NETDEV_TX_BUSY; 125 126 /* And release qdisc */ 127 spin_unlock(root_lock); 128 129 /* Note that we validate skb (GSO, checksum, ...) outside of locks */ 130 if (validate) 131 skb = validate_xmit_skb_list(skb, dev); 132 133 if (likely(skb)) { 134 HARD_TX_LOCK(dev, txq, smp_processor_id()); 135 if (!netif_xmit_frozen_or_stopped(txq)) 136 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 137 138 HARD_TX_UNLOCK(dev, txq); 139 } else { 140 spin_lock(root_lock); 141 return qdisc_qlen(q); 142 } 143 spin_lock(root_lock); 144 145 if (dev_xmit_complete(ret)) { 146 /* Driver sent out skb successfully or skb was consumed */ 147 ret = qdisc_qlen(q); 148 } else { 149 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 150 if (unlikely(ret != NETDEV_TX_BUSY)) 151 net_warn_ratelimited("BUG %s code %d qlen %d\n", 152 dev->name, ret, q->q.qlen); 153 154 ret = dev_requeue_skb(skb, q); 155 } 156 157 if (ret && netif_xmit_frozen_or_stopped(txq)) 158 ret = 0; 159 160 return ret; 161 } 162 163 /* 164 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 165 * 166 * running seqcount guarantees only one CPU can process 167 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 168 * this queue. 169 * 170 * netif_tx_lock serializes accesses to device driver. 171 * 172 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 173 * if one is grabbed, another must be free. 174 * 175 * Note, that this procedure can be called by a watchdog timer 176 * 177 * Returns to the caller: 178 * 0 - queue is empty or throttled. 179 * >0 - queue is not empty. 180 * 181 */ 182 static inline int qdisc_restart(struct Qdisc *q, int *packets) 183 { 184 struct netdev_queue *txq; 185 struct net_device *dev; 186 spinlock_t *root_lock; 187 struct sk_buff *skb; 188 bool validate; 189 190 /* Dequeue packet */ 191 skb = dequeue_skb(q, &validate, packets); 192 if (unlikely(!skb)) 193 return 0; 194 195 root_lock = qdisc_lock(q); 196 dev = qdisc_dev(q); 197 txq = skb_get_tx_queue(dev, skb); 198 199 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); 200 } 201 202 void __qdisc_run(struct Qdisc *q) 203 { 204 int quota = weight_p; 205 int packets; 206 207 while (qdisc_restart(q, &packets)) { 208 /* 209 * Ordered by possible occurrence: Postpone processing if 210 * 1. we've exceeded packet quota 211 * 2. another process needs the CPU; 212 */ 213 quota -= packets; 214 if (quota <= 0 || need_resched()) { 215 __netif_schedule(q); 216 break; 217 } 218 } 219 220 qdisc_run_end(q); 221 } 222 223 unsigned long dev_trans_start(struct net_device *dev) 224 { 225 unsigned long val, res; 226 unsigned int i; 227 228 if (is_vlan_dev(dev)) 229 dev = vlan_dev_real_dev(dev); 230 res = netdev_get_tx_queue(dev, 0)->trans_start; 231 for (i = 1; i < dev->num_tx_queues; i++) { 232 val = netdev_get_tx_queue(dev, i)->trans_start; 233 if (val && time_after(val, res)) 234 res = val; 235 } 236 237 return res; 238 } 239 EXPORT_SYMBOL(dev_trans_start); 240 241 static void dev_watchdog(unsigned long arg) 242 { 243 struct net_device *dev = (struct net_device *)arg; 244 245 netif_tx_lock(dev); 246 if (!qdisc_tx_is_noop(dev)) { 247 if (netif_device_present(dev) && 248 netif_running(dev) && 249 netif_carrier_ok(dev)) { 250 int some_queue_timedout = 0; 251 unsigned int i; 252 unsigned long trans_start; 253 254 for (i = 0; i < dev->num_tx_queues; i++) { 255 struct netdev_queue *txq; 256 257 txq = netdev_get_tx_queue(dev, i); 258 trans_start = txq->trans_start; 259 if (netif_xmit_stopped(txq) && 260 time_after(jiffies, (trans_start + 261 dev->watchdog_timeo))) { 262 some_queue_timedout = 1; 263 txq->trans_timeout++; 264 break; 265 } 266 } 267 268 if (some_queue_timedout) { 269 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 270 dev->name, netdev_drivername(dev), i); 271 dev->netdev_ops->ndo_tx_timeout(dev); 272 } 273 if (!mod_timer(&dev->watchdog_timer, 274 round_jiffies(jiffies + 275 dev->watchdog_timeo))) 276 dev_hold(dev); 277 } 278 } 279 netif_tx_unlock(dev); 280 281 dev_put(dev); 282 } 283 284 void __netdev_watchdog_up(struct net_device *dev) 285 { 286 if (dev->netdev_ops->ndo_tx_timeout) { 287 if (dev->watchdog_timeo <= 0) 288 dev->watchdog_timeo = 5*HZ; 289 if (!mod_timer(&dev->watchdog_timer, 290 round_jiffies(jiffies + dev->watchdog_timeo))) 291 dev_hold(dev); 292 } 293 } 294 295 static void dev_watchdog_up(struct net_device *dev) 296 { 297 __netdev_watchdog_up(dev); 298 } 299 300 static void dev_watchdog_down(struct net_device *dev) 301 { 302 netif_tx_lock_bh(dev); 303 if (del_timer(&dev->watchdog_timer)) 304 dev_put(dev); 305 netif_tx_unlock_bh(dev); 306 } 307 308 /** 309 * netif_carrier_on - set carrier 310 * @dev: network device 311 * 312 * Device has detected that carrier. 313 */ 314 void netif_carrier_on(struct net_device *dev) 315 { 316 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 317 if (dev->reg_state == NETREG_UNINITIALIZED) 318 return; 319 atomic_inc(&dev->carrier_changes); 320 linkwatch_fire_event(dev); 321 if (netif_running(dev)) 322 __netdev_watchdog_up(dev); 323 } 324 } 325 EXPORT_SYMBOL(netif_carrier_on); 326 327 /** 328 * netif_carrier_off - clear carrier 329 * @dev: network device 330 * 331 * Device has detected loss of carrier. 332 */ 333 void netif_carrier_off(struct net_device *dev) 334 { 335 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 336 if (dev->reg_state == NETREG_UNINITIALIZED) 337 return; 338 atomic_inc(&dev->carrier_changes); 339 linkwatch_fire_event(dev); 340 } 341 } 342 EXPORT_SYMBOL(netif_carrier_off); 343 344 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 345 under all circumstances. It is difficult to invent anything faster or 346 cheaper. 347 */ 348 349 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) 350 { 351 kfree_skb(skb); 352 return NET_XMIT_CN; 353 } 354 355 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) 356 { 357 return NULL; 358 } 359 360 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 361 .id = "noop", 362 .priv_size = 0, 363 .enqueue = noop_enqueue, 364 .dequeue = noop_dequeue, 365 .peek = noop_dequeue, 366 .owner = THIS_MODULE, 367 }; 368 369 static struct netdev_queue noop_netdev_queue = { 370 .qdisc = &noop_qdisc, 371 .qdisc_sleeping = &noop_qdisc, 372 }; 373 374 struct Qdisc noop_qdisc = { 375 .enqueue = noop_enqueue, 376 .dequeue = noop_dequeue, 377 .flags = TCQ_F_BUILTIN, 378 .ops = &noop_qdisc_ops, 379 .list = LIST_HEAD_INIT(noop_qdisc.list), 380 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 381 .dev_queue = &noop_netdev_queue, 382 .running = SEQCNT_ZERO(noop_qdisc.running), 383 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 384 }; 385 EXPORT_SYMBOL(noop_qdisc); 386 387 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt) 388 { 389 /* register_qdisc() assigns a default of noop_enqueue if unset, 390 * but __dev_queue_xmit() treats noqueue only as such 391 * if this is NULL - so clear it here. */ 392 qdisc->enqueue = NULL; 393 return 0; 394 } 395 396 struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 397 .id = "noqueue", 398 .priv_size = 0, 399 .init = noqueue_init, 400 .enqueue = noop_enqueue, 401 .dequeue = noop_dequeue, 402 .peek = noop_dequeue, 403 .owner = THIS_MODULE, 404 }; 405 406 static const u8 prio2band[TC_PRIO_MAX + 1] = { 407 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 408 }; 409 410 /* 3-band FIFO queue: old style, but should be a bit faster than 411 generic prio+fifo combination. 412 */ 413 414 #define PFIFO_FAST_BANDS 3 415 416 /* 417 * Private data for a pfifo_fast scheduler containing: 418 * - queues for the three band 419 * - bitmap indicating which of the bands contain skbs 420 */ 421 struct pfifo_fast_priv { 422 u32 bitmap; 423 struct sk_buff_head q[PFIFO_FAST_BANDS]; 424 }; 425 426 /* 427 * Convert a bitmap to the first band number where an skb is queued, where: 428 * bitmap=0 means there are no skbs on any band. 429 * bitmap=1 means there is an skb on band 0. 430 * bitmap=7 means there are skbs on all 3 bands, etc. 431 */ 432 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; 433 434 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, 435 int band) 436 { 437 return priv->q + band; 438 } 439 440 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) 441 { 442 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 443 int band = prio2band[skb->priority & TC_PRIO_MAX]; 444 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 445 struct sk_buff_head *list = band2list(priv, band); 446 447 priv->bitmap |= (1 << band); 448 qdisc->q.qlen++; 449 return __qdisc_enqueue_tail(skb, qdisc, list); 450 } 451 452 return qdisc_drop(skb, qdisc); 453 } 454 455 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) 456 { 457 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 458 int band = bitmap2band[priv->bitmap]; 459 460 if (likely(band >= 0)) { 461 struct sk_buff_head *list = band2list(priv, band); 462 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); 463 464 qdisc->q.qlen--; 465 if (skb_queue_empty(list)) 466 priv->bitmap &= ~(1 << band); 467 468 return skb; 469 } 470 471 return NULL; 472 } 473 474 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) 475 { 476 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 477 int band = bitmap2band[priv->bitmap]; 478 479 if (band >= 0) { 480 struct sk_buff_head *list = band2list(priv, band); 481 482 return skb_peek(list); 483 } 484 485 return NULL; 486 } 487 488 static void pfifo_fast_reset(struct Qdisc *qdisc) 489 { 490 int prio; 491 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 492 493 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 494 __qdisc_reset_queue(qdisc, band2list(priv, prio)); 495 496 priv->bitmap = 0; 497 qdisc->qstats.backlog = 0; 498 qdisc->q.qlen = 0; 499 } 500 501 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 502 { 503 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 504 505 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); 506 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 507 goto nla_put_failure; 508 return skb->len; 509 510 nla_put_failure: 511 return -1; 512 } 513 514 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 515 { 516 int prio; 517 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 518 519 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 520 __skb_queue_head_init(band2list(priv, prio)); 521 522 /* Can by-pass the queue discipline */ 523 qdisc->flags |= TCQ_F_CAN_BYPASS; 524 return 0; 525 } 526 527 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 528 .id = "pfifo_fast", 529 .priv_size = sizeof(struct pfifo_fast_priv), 530 .enqueue = pfifo_fast_enqueue, 531 .dequeue = pfifo_fast_dequeue, 532 .peek = pfifo_fast_peek, 533 .init = pfifo_fast_init, 534 .reset = pfifo_fast_reset, 535 .dump = pfifo_fast_dump, 536 .owner = THIS_MODULE, 537 }; 538 EXPORT_SYMBOL(pfifo_fast_ops); 539 540 static struct lock_class_key qdisc_tx_busylock; 541 static struct lock_class_key qdisc_running_key; 542 543 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 544 const struct Qdisc_ops *ops) 545 { 546 void *p; 547 struct Qdisc *sch; 548 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; 549 int err = -ENOBUFS; 550 struct net_device *dev = dev_queue->dev; 551 552 p = kzalloc_node(size, GFP_KERNEL, 553 netdev_queue_numa_node_read(dev_queue)); 554 555 if (!p) 556 goto errout; 557 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 558 /* if we got non aligned memory, ask more and do alignment ourself */ 559 if (sch != p) { 560 kfree(p); 561 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, 562 netdev_queue_numa_node_read(dev_queue)); 563 if (!p) 564 goto errout; 565 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 566 sch->padded = (char *) sch - (char *) p; 567 } 568 INIT_LIST_HEAD(&sch->list); 569 skb_queue_head_init(&sch->q); 570 571 spin_lock_init(&sch->busylock); 572 lockdep_set_class(&sch->busylock, 573 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 574 575 seqcount_init(&sch->running); 576 lockdep_set_class(&sch->running, 577 dev->qdisc_running_key ?: &qdisc_running_key); 578 579 sch->ops = ops; 580 sch->enqueue = ops->enqueue; 581 sch->dequeue = ops->dequeue; 582 sch->dev_queue = dev_queue; 583 dev_hold(dev); 584 atomic_set(&sch->refcnt, 1); 585 586 return sch; 587 errout: 588 return ERR_PTR(err); 589 } 590 591 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 592 const struct Qdisc_ops *ops, 593 unsigned int parentid) 594 { 595 struct Qdisc *sch; 596 597 if (!try_module_get(ops->owner)) 598 goto errout; 599 600 sch = qdisc_alloc(dev_queue, ops); 601 if (IS_ERR(sch)) 602 goto errout; 603 sch->parent = parentid; 604 605 if (!ops->init || ops->init(sch, NULL) == 0) 606 return sch; 607 608 qdisc_destroy(sch); 609 errout: 610 return NULL; 611 } 612 EXPORT_SYMBOL(qdisc_create_dflt); 613 614 /* Under qdisc_lock(qdisc) and BH! */ 615 616 void qdisc_reset(struct Qdisc *qdisc) 617 { 618 const struct Qdisc_ops *ops = qdisc->ops; 619 620 if (ops->reset) 621 ops->reset(qdisc); 622 623 if (qdisc->gso_skb) { 624 kfree_skb_list(qdisc->gso_skb); 625 qdisc->gso_skb = NULL; 626 qdisc->q.qlen = 0; 627 } 628 } 629 EXPORT_SYMBOL(qdisc_reset); 630 631 static void qdisc_rcu_free(struct rcu_head *head) 632 { 633 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); 634 635 if (qdisc_is_percpu_stats(qdisc)) { 636 free_percpu(qdisc->cpu_bstats); 637 free_percpu(qdisc->cpu_qstats); 638 } 639 640 kfree((char *) qdisc - qdisc->padded); 641 } 642 643 void qdisc_destroy(struct Qdisc *qdisc) 644 { 645 const struct Qdisc_ops *ops = qdisc->ops; 646 647 if (qdisc->flags & TCQ_F_BUILTIN || 648 !atomic_dec_and_test(&qdisc->refcnt)) 649 return; 650 651 #ifdef CONFIG_NET_SCHED 652 qdisc_list_del(qdisc); 653 654 qdisc_put_stab(rtnl_dereference(qdisc->stab)); 655 #endif 656 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 657 if (ops->reset) 658 ops->reset(qdisc); 659 if (ops->destroy) 660 ops->destroy(qdisc); 661 662 module_put(ops->owner); 663 dev_put(qdisc_dev(qdisc)); 664 665 kfree_skb_list(qdisc->gso_skb); 666 /* 667 * gen_estimator est_timer() might access qdisc->q.lock, 668 * wait a RCU grace period before freeing qdisc. 669 */ 670 call_rcu(&qdisc->rcu_head, qdisc_rcu_free); 671 } 672 EXPORT_SYMBOL(qdisc_destroy); 673 674 /* Attach toplevel qdisc to device queue. */ 675 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 676 struct Qdisc *qdisc) 677 { 678 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 679 spinlock_t *root_lock; 680 681 root_lock = qdisc_lock(oqdisc); 682 spin_lock_bh(root_lock); 683 684 /* Prune old scheduler */ 685 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 686 qdisc_reset(oqdisc); 687 688 /* ... and graft new one */ 689 if (qdisc == NULL) 690 qdisc = &noop_qdisc; 691 dev_queue->qdisc_sleeping = qdisc; 692 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 693 694 spin_unlock_bh(root_lock); 695 696 return oqdisc; 697 } 698 EXPORT_SYMBOL(dev_graft_qdisc); 699 700 static void attach_one_default_qdisc(struct net_device *dev, 701 struct netdev_queue *dev_queue, 702 void *_unused) 703 { 704 struct Qdisc *qdisc; 705 const struct Qdisc_ops *ops = default_qdisc_ops; 706 707 if (dev->priv_flags & IFF_NO_QUEUE) 708 ops = &noqueue_qdisc_ops; 709 710 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT); 711 if (!qdisc) { 712 netdev_info(dev, "activation failed\n"); 713 return; 714 } 715 if (!netif_is_multiqueue(dev)) 716 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 717 dev_queue->qdisc_sleeping = qdisc; 718 } 719 720 static void attach_default_qdiscs(struct net_device *dev) 721 { 722 struct netdev_queue *txq; 723 struct Qdisc *qdisc; 724 725 txq = netdev_get_tx_queue(dev, 0); 726 727 if (!netif_is_multiqueue(dev) || 728 dev->priv_flags & IFF_NO_QUEUE) { 729 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 730 dev->qdisc = txq->qdisc_sleeping; 731 atomic_inc(&dev->qdisc->refcnt); 732 } else { 733 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); 734 if (qdisc) { 735 dev->qdisc = qdisc; 736 qdisc->ops->attach(qdisc); 737 } 738 } 739 } 740 741 static void transition_one_qdisc(struct net_device *dev, 742 struct netdev_queue *dev_queue, 743 void *_need_watchdog) 744 { 745 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 746 int *need_watchdog_p = _need_watchdog; 747 748 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 749 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 750 751 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 752 if (need_watchdog_p) { 753 dev_queue->trans_start = 0; 754 *need_watchdog_p = 1; 755 } 756 } 757 758 void dev_activate(struct net_device *dev) 759 { 760 int need_watchdog; 761 762 /* No queueing discipline is attached to device; 763 * create default one for devices, which need queueing 764 * and noqueue_qdisc for virtual interfaces 765 */ 766 767 if (dev->qdisc == &noop_qdisc) 768 attach_default_qdiscs(dev); 769 770 if (!netif_carrier_ok(dev)) 771 /* Delay activation until next carrier-on event */ 772 return; 773 774 need_watchdog = 0; 775 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 776 if (dev_ingress_queue(dev)) 777 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 778 779 if (need_watchdog) { 780 netif_trans_update(dev); 781 dev_watchdog_up(dev); 782 } 783 } 784 EXPORT_SYMBOL(dev_activate); 785 786 static void dev_deactivate_queue(struct net_device *dev, 787 struct netdev_queue *dev_queue, 788 void *_qdisc_default) 789 { 790 struct Qdisc *qdisc_default = _qdisc_default; 791 struct Qdisc *qdisc; 792 793 qdisc = rtnl_dereference(dev_queue->qdisc); 794 if (qdisc) { 795 spin_lock_bh(qdisc_lock(qdisc)); 796 797 if (!(qdisc->flags & TCQ_F_BUILTIN)) 798 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 799 800 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 801 qdisc_reset(qdisc); 802 803 spin_unlock_bh(qdisc_lock(qdisc)); 804 } 805 } 806 807 static bool some_qdisc_is_busy(struct net_device *dev) 808 { 809 unsigned int i; 810 811 for (i = 0; i < dev->num_tx_queues; i++) { 812 struct netdev_queue *dev_queue; 813 spinlock_t *root_lock; 814 struct Qdisc *q; 815 int val; 816 817 dev_queue = netdev_get_tx_queue(dev, i); 818 q = dev_queue->qdisc_sleeping; 819 root_lock = qdisc_lock(q); 820 821 spin_lock_bh(root_lock); 822 823 val = (qdisc_is_running(q) || 824 test_bit(__QDISC_STATE_SCHED, &q->state)); 825 826 spin_unlock_bh(root_lock); 827 828 if (val) 829 return true; 830 } 831 return false; 832 } 833 834 /** 835 * dev_deactivate_many - deactivate transmissions on several devices 836 * @head: list of devices to deactivate 837 * 838 * This function returns only when all outstanding transmissions 839 * have completed, unless all devices are in dismantle phase. 840 */ 841 void dev_deactivate_many(struct list_head *head) 842 { 843 struct net_device *dev; 844 bool sync_needed = false; 845 846 list_for_each_entry(dev, head, close_list) { 847 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 848 &noop_qdisc); 849 if (dev_ingress_queue(dev)) 850 dev_deactivate_queue(dev, dev_ingress_queue(dev), 851 &noop_qdisc); 852 853 dev_watchdog_down(dev); 854 sync_needed |= !dev->dismantle; 855 } 856 857 /* Wait for outstanding qdisc-less dev_queue_xmit calls. 858 * This is avoided if all devices are in dismantle phase : 859 * Caller will call synchronize_net() for us 860 */ 861 if (sync_needed) 862 synchronize_net(); 863 864 /* Wait for outstanding qdisc_run calls. */ 865 list_for_each_entry(dev, head, close_list) 866 while (some_qdisc_is_busy(dev)) 867 yield(); 868 } 869 870 void dev_deactivate(struct net_device *dev) 871 { 872 LIST_HEAD(single); 873 874 list_add(&dev->close_list, &single); 875 dev_deactivate_many(&single); 876 list_del(&single); 877 } 878 EXPORT_SYMBOL(dev_deactivate); 879 880 static void dev_init_scheduler_queue(struct net_device *dev, 881 struct netdev_queue *dev_queue, 882 void *_qdisc) 883 { 884 struct Qdisc *qdisc = _qdisc; 885 886 rcu_assign_pointer(dev_queue->qdisc, qdisc); 887 dev_queue->qdisc_sleeping = qdisc; 888 } 889 890 void dev_init_scheduler(struct net_device *dev) 891 { 892 dev->qdisc = &noop_qdisc; 893 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 894 if (dev_ingress_queue(dev)) 895 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 896 897 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 898 } 899 900 static void shutdown_scheduler_queue(struct net_device *dev, 901 struct netdev_queue *dev_queue, 902 void *_qdisc_default) 903 { 904 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 905 struct Qdisc *qdisc_default = _qdisc_default; 906 907 if (qdisc) { 908 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 909 dev_queue->qdisc_sleeping = qdisc_default; 910 911 qdisc_destroy(qdisc); 912 } 913 } 914 915 void dev_shutdown(struct net_device *dev) 916 { 917 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 918 if (dev_ingress_queue(dev)) 919 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 920 qdisc_destroy(dev->qdisc); 921 dev->qdisc = &noop_qdisc; 922 923 WARN_ON(timer_pending(&dev->watchdog_timer)); 924 } 925 926 void psched_ratecfg_precompute(struct psched_ratecfg *r, 927 const struct tc_ratespec *conf, 928 u64 rate64) 929 { 930 memset(r, 0, sizeof(*r)); 931 r->overhead = conf->overhead; 932 r->rate_bytes_ps = max_t(u64, conf->rate, rate64); 933 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); 934 r->mult = 1; 935 /* 936 * The deal here is to replace a divide by a reciprocal one 937 * in fast path (a reciprocal divide is a multiply and a shift) 938 * 939 * Normal formula would be : 940 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps 941 * 942 * We compute mult/shift to use instead : 943 * time_in_ns = (len * mult) >> shift; 944 * 945 * We try to get the highest possible mult value for accuracy, 946 * but have to make sure no overflows will ever happen. 947 */ 948 if (r->rate_bytes_ps > 0) { 949 u64 factor = NSEC_PER_SEC; 950 951 for (;;) { 952 r->mult = div64_u64(factor, r->rate_bytes_ps); 953 if (r->mult & (1U << 31) || factor & (1ULL << 63)) 954 break; 955 factor <<= 1; 956 r->shift++; 957 } 958 } 959 } 960 EXPORT_SYMBOL(psched_ratecfg_precompute); 961