1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/sch_generic.c Generic packet scheduler routines. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 7 * - Ingress support 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/netdevice.h> 18 #include <linux/skbuff.h> 19 #include <linux/rtnetlink.h> 20 #include <linux/init.h> 21 #include <linux/rcupdate.h> 22 #include <linux/list.h> 23 #include <linux/slab.h> 24 #include <linux/if_vlan.h> 25 #include <linux/skb_array.h> 26 #include <linux/if_macvlan.h> 27 #include <net/sch_generic.h> 28 #include <net/pkt_sched.h> 29 #include <net/dst.h> 30 #include <trace/events/qdisc.h> 31 #include <trace/events/net.h> 32 #include <net/xfrm.h> 33 34 /* Qdisc to use by default */ 35 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; 36 EXPORT_SYMBOL(default_qdisc_ops); 37 38 /* Main transmission queue. */ 39 40 /* Modifications to data participating in scheduling must be protected with 41 * qdisc_lock(qdisc) spinlock. 42 * 43 * The idea is the following: 44 * - enqueue, dequeue are serialized via qdisc root lock 45 * - ingress filtering is also serialized via qdisc root lock 46 * - updates to tree and tree walking are only done under the rtnl mutex. 47 */ 48 49 #define SKB_XOFF_MAGIC ((struct sk_buff *)1UL) 50 51 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) 52 { 53 const struct netdev_queue *txq = q->dev_queue; 54 spinlock_t *lock = NULL; 55 struct sk_buff *skb; 56 57 if (q->flags & TCQ_F_NOLOCK) { 58 lock = qdisc_lock(q); 59 spin_lock(lock); 60 } 61 62 skb = skb_peek(&q->skb_bad_txq); 63 if (skb) { 64 /* check the reason of requeuing without tx lock first */ 65 txq = skb_get_tx_queue(txq->dev, skb); 66 if (!netif_xmit_frozen_or_stopped(txq)) { 67 skb = __skb_dequeue(&q->skb_bad_txq); 68 if (qdisc_is_percpu_stats(q)) { 69 qdisc_qstats_cpu_backlog_dec(q, skb); 70 qdisc_qstats_cpu_qlen_dec(q); 71 } else { 72 qdisc_qstats_backlog_dec(q, skb); 73 q->q.qlen--; 74 } 75 } else { 76 skb = SKB_XOFF_MAGIC; 77 } 78 } 79 80 if (lock) 81 spin_unlock(lock); 82 83 return skb; 84 } 85 86 static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) 87 { 88 struct sk_buff *skb = skb_peek(&q->skb_bad_txq); 89 90 if (unlikely(skb)) 91 skb = __skb_dequeue_bad_txq(q); 92 93 return skb; 94 } 95 96 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, 97 struct sk_buff *skb) 98 { 99 spinlock_t *lock = NULL; 100 101 if (q->flags & TCQ_F_NOLOCK) { 102 lock = qdisc_lock(q); 103 spin_lock(lock); 104 } 105 106 __skb_queue_tail(&q->skb_bad_txq, skb); 107 108 if (qdisc_is_percpu_stats(q)) { 109 qdisc_qstats_cpu_backlog_inc(q, skb); 110 qdisc_qstats_cpu_qlen_inc(q); 111 } else { 112 qdisc_qstats_backlog_inc(q, skb); 113 q->q.qlen++; 114 } 115 116 if (lock) 117 spin_unlock(lock); 118 } 119 120 static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 121 { 122 spinlock_t *lock = NULL; 123 124 if (q->flags & TCQ_F_NOLOCK) { 125 lock = qdisc_lock(q); 126 spin_lock(lock); 127 } 128 129 while (skb) { 130 struct sk_buff *next = skb->next; 131 132 __skb_queue_tail(&q->gso_skb, skb); 133 134 /* it's still part of the queue */ 135 if (qdisc_is_percpu_stats(q)) { 136 qdisc_qstats_cpu_requeues_inc(q); 137 qdisc_qstats_cpu_backlog_inc(q, skb); 138 qdisc_qstats_cpu_qlen_inc(q); 139 } else { 140 q->qstats.requeues++; 141 qdisc_qstats_backlog_inc(q, skb); 142 q->q.qlen++; 143 } 144 145 skb = next; 146 } 147 if (lock) 148 spin_unlock(lock); 149 __netif_schedule(q); 150 } 151 152 static void try_bulk_dequeue_skb(struct Qdisc *q, 153 struct sk_buff *skb, 154 const struct netdev_queue *txq, 155 int *packets) 156 { 157 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; 158 159 while (bytelimit > 0) { 160 struct sk_buff *nskb = q->dequeue(q); 161 162 if (!nskb) 163 break; 164 165 bytelimit -= nskb->len; /* covers GSO len */ 166 skb->next = nskb; 167 skb = nskb; 168 (*packets)++; /* GSO counts as one pkt */ 169 } 170 skb_mark_not_on_list(skb); 171 } 172 173 /* This variant of try_bulk_dequeue_skb() makes sure 174 * all skbs in the chain are for the same txq 175 */ 176 static void try_bulk_dequeue_skb_slow(struct Qdisc *q, 177 struct sk_buff *skb, 178 int *packets) 179 { 180 int mapping = skb_get_queue_mapping(skb); 181 struct sk_buff *nskb; 182 int cnt = 0; 183 184 do { 185 nskb = q->dequeue(q); 186 if (!nskb) 187 break; 188 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { 189 qdisc_enqueue_skb_bad_txq(q, nskb); 190 break; 191 } 192 skb->next = nskb; 193 skb = nskb; 194 } while (++cnt < 8); 195 (*packets) += cnt; 196 skb_mark_not_on_list(skb); 197 } 198 199 /* Note that dequeue_skb can possibly return a SKB list (via skb->next). 200 * A requeued skb (via q->gso_skb) can also be a SKB list. 201 */ 202 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, 203 int *packets) 204 { 205 const struct netdev_queue *txq = q->dev_queue; 206 struct sk_buff *skb = NULL; 207 208 *packets = 1; 209 if (unlikely(!skb_queue_empty(&q->gso_skb))) { 210 spinlock_t *lock = NULL; 211 212 if (q->flags & TCQ_F_NOLOCK) { 213 lock = qdisc_lock(q); 214 spin_lock(lock); 215 } 216 217 skb = skb_peek(&q->gso_skb); 218 219 /* skb may be null if another cpu pulls gso_skb off in between 220 * empty check and lock. 221 */ 222 if (!skb) { 223 if (lock) 224 spin_unlock(lock); 225 goto validate; 226 } 227 228 /* skb in gso_skb were already validated */ 229 *validate = false; 230 if (xfrm_offload(skb)) 231 *validate = true; 232 /* check the reason of requeuing without tx lock first */ 233 txq = skb_get_tx_queue(txq->dev, skb); 234 if (!netif_xmit_frozen_or_stopped(txq)) { 235 skb = __skb_dequeue(&q->gso_skb); 236 if (qdisc_is_percpu_stats(q)) { 237 qdisc_qstats_cpu_backlog_dec(q, skb); 238 qdisc_qstats_cpu_qlen_dec(q); 239 } else { 240 qdisc_qstats_backlog_dec(q, skb); 241 q->q.qlen--; 242 } 243 } else { 244 skb = NULL; 245 } 246 if (lock) 247 spin_unlock(lock); 248 goto trace; 249 } 250 validate: 251 *validate = true; 252 253 if ((q->flags & TCQ_F_ONETXQUEUE) && 254 netif_xmit_frozen_or_stopped(txq)) 255 return skb; 256 257 skb = qdisc_dequeue_skb_bad_txq(q); 258 if (unlikely(skb)) { 259 if (skb == SKB_XOFF_MAGIC) 260 return NULL; 261 goto bulk; 262 } 263 skb = q->dequeue(q); 264 if (skb) { 265 bulk: 266 if (qdisc_may_bulk(q)) 267 try_bulk_dequeue_skb(q, skb, txq, packets); 268 else 269 try_bulk_dequeue_skb_slow(q, skb, packets); 270 } 271 trace: 272 trace_qdisc_dequeue(q, txq, *packets, skb); 273 return skb; 274 } 275 276 /* 277 * Transmit possibly several skbs, and handle the return status as 278 * required. Owning running seqcount bit guarantees that 279 * only one CPU can execute this function. 280 * 281 * Returns to the caller: 282 * false - hardware queue frozen backoff 283 * true - feel free to send more pkts 284 */ 285 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 286 struct net_device *dev, struct netdev_queue *txq, 287 spinlock_t *root_lock, bool validate) 288 { 289 int ret = NETDEV_TX_BUSY; 290 bool again = false; 291 292 /* And release qdisc */ 293 if (root_lock) 294 spin_unlock(root_lock); 295 296 /* Note that we validate skb (GSO, checksum, ...) outside of locks */ 297 if (validate) 298 skb = validate_xmit_skb_list(skb, dev, &again); 299 300 #ifdef CONFIG_XFRM_OFFLOAD 301 if (unlikely(again)) { 302 if (root_lock) 303 spin_lock(root_lock); 304 305 dev_requeue_skb(skb, q); 306 return false; 307 } 308 #endif 309 310 if (likely(skb)) { 311 HARD_TX_LOCK(dev, txq, smp_processor_id()); 312 if (!netif_xmit_frozen_or_stopped(txq)) 313 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 314 315 HARD_TX_UNLOCK(dev, txq); 316 } else { 317 if (root_lock) 318 spin_lock(root_lock); 319 return true; 320 } 321 322 if (root_lock) 323 spin_lock(root_lock); 324 325 if (!dev_xmit_complete(ret)) { 326 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 327 if (unlikely(ret != NETDEV_TX_BUSY)) 328 net_warn_ratelimited("BUG %s code %d qlen %d\n", 329 dev->name, ret, q->q.qlen); 330 331 dev_requeue_skb(skb, q); 332 return false; 333 } 334 335 return true; 336 } 337 338 /* 339 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 340 * 341 * running seqcount guarantees only one CPU can process 342 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 343 * this queue. 344 * 345 * netif_tx_lock serializes accesses to device driver. 346 * 347 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 348 * if one is grabbed, another must be free. 349 * 350 * Note, that this procedure can be called by a watchdog timer 351 * 352 * Returns to the caller: 353 * 0 - queue is empty or throttled. 354 * >0 - queue is not empty. 355 * 356 */ 357 static inline bool qdisc_restart(struct Qdisc *q, int *packets) 358 { 359 spinlock_t *root_lock = NULL; 360 struct netdev_queue *txq; 361 struct net_device *dev; 362 struct sk_buff *skb; 363 bool validate; 364 365 /* Dequeue packet */ 366 skb = dequeue_skb(q, &validate, packets); 367 if (unlikely(!skb)) 368 return false; 369 370 if (!(q->flags & TCQ_F_NOLOCK)) 371 root_lock = qdisc_lock(q); 372 373 dev = qdisc_dev(q); 374 txq = skb_get_tx_queue(dev, skb); 375 376 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); 377 } 378 379 void __qdisc_run(struct Qdisc *q) 380 { 381 int quota = dev_tx_weight; 382 int packets; 383 384 while (qdisc_restart(q, &packets)) { 385 quota -= packets; 386 if (quota <= 0) { 387 __netif_schedule(q); 388 break; 389 } 390 } 391 } 392 393 unsigned long dev_trans_start(struct net_device *dev) 394 { 395 unsigned long val, res; 396 unsigned int i; 397 398 if (is_vlan_dev(dev)) 399 dev = vlan_dev_real_dev(dev); 400 else if (netif_is_macvlan(dev)) 401 dev = macvlan_dev_real_dev(dev); 402 res = netdev_get_tx_queue(dev, 0)->trans_start; 403 for (i = 1; i < dev->num_tx_queues; i++) { 404 val = netdev_get_tx_queue(dev, i)->trans_start; 405 if (val && time_after(val, res)) 406 res = val; 407 } 408 409 return res; 410 } 411 EXPORT_SYMBOL(dev_trans_start); 412 413 static void dev_watchdog(struct timer_list *t) 414 { 415 struct net_device *dev = from_timer(dev, t, watchdog_timer); 416 417 netif_tx_lock(dev); 418 if (!qdisc_tx_is_noop(dev)) { 419 if (netif_device_present(dev) && 420 netif_running(dev) && 421 netif_carrier_ok(dev)) { 422 int some_queue_timedout = 0; 423 unsigned int i; 424 unsigned long trans_start; 425 426 for (i = 0; i < dev->num_tx_queues; i++) { 427 struct netdev_queue *txq; 428 429 txq = netdev_get_tx_queue(dev, i); 430 trans_start = txq->trans_start; 431 if (netif_xmit_stopped(txq) && 432 time_after(jiffies, (trans_start + 433 dev->watchdog_timeo))) { 434 some_queue_timedout = 1; 435 txq->trans_timeout++; 436 break; 437 } 438 } 439 440 if (some_queue_timedout) { 441 trace_net_dev_xmit_timeout(dev, i); 442 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 443 dev->name, netdev_drivername(dev), i); 444 dev->netdev_ops->ndo_tx_timeout(dev, i); 445 } 446 if (!mod_timer(&dev->watchdog_timer, 447 round_jiffies(jiffies + 448 dev->watchdog_timeo))) 449 dev_hold(dev); 450 } 451 } 452 netif_tx_unlock(dev); 453 454 dev_put(dev); 455 } 456 457 void __netdev_watchdog_up(struct net_device *dev) 458 { 459 if (dev->netdev_ops->ndo_tx_timeout) { 460 if (dev->watchdog_timeo <= 0) 461 dev->watchdog_timeo = 5*HZ; 462 if (!mod_timer(&dev->watchdog_timer, 463 round_jiffies(jiffies + dev->watchdog_timeo))) 464 dev_hold(dev); 465 } 466 } 467 468 static void dev_watchdog_up(struct net_device *dev) 469 { 470 __netdev_watchdog_up(dev); 471 } 472 473 static void dev_watchdog_down(struct net_device *dev) 474 { 475 netif_tx_lock_bh(dev); 476 if (del_timer(&dev->watchdog_timer)) 477 dev_put(dev); 478 netif_tx_unlock_bh(dev); 479 } 480 481 /** 482 * netif_carrier_on - set carrier 483 * @dev: network device 484 * 485 * Device has detected acquisition of carrier. 486 */ 487 void netif_carrier_on(struct net_device *dev) 488 { 489 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 490 if (dev->reg_state == NETREG_UNINITIALIZED) 491 return; 492 atomic_inc(&dev->carrier_up_count); 493 linkwatch_fire_event(dev); 494 if (netif_running(dev)) 495 __netdev_watchdog_up(dev); 496 } 497 } 498 EXPORT_SYMBOL(netif_carrier_on); 499 500 /** 501 * netif_carrier_off - clear carrier 502 * @dev: network device 503 * 504 * Device has detected loss of carrier. 505 */ 506 void netif_carrier_off(struct net_device *dev) 507 { 508 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 509 if (dev->reg_state == NETREG_UNINITIALIZED) 510 return; 511 atomic_inc(&dev->carrier_down_count); 512 linkwatch_fire_event(dev); 513 } 514 } 515 EXPORT_SYMBOL(netif_carrier_off); 516 517 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 518 under all circumstances. It is difficult to invent anything faster or 519 cheaper. 520 */ 521 522 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, 523 struct sk_buff **to_free) 524 { 525 __qdisc_drop(skb, to_free); 526 return NET_XMIT_CN; 527 } 528 529 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) 530 { 531 return NULL; 532 } 533 534 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 535 .id = "noop", 536 .priv_size = 0, 537 .enqueue = noop_enqueue, 538 .dequeue = noop_dequeue, 539 .peek = noop_dequeue, 540 .owner = THIS_MODULE, 541 }; 542 543 static struct netdev_queue noop_netdev_queue = { 544 RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), 545 .qdisc_sleeping = &noop_qdisc, 546 }; 547 548 struct Qdisc noop_qdisc = { 549 .enqueue = noop_enqueue, 550 .dequeue = noop_dequeue, 551 .flags = TCQ_F_BUILTIN, 552 .ops = &noop_qdisc_ops, 553 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 554 .dev_queue = &noop_netdev_queue, 555 .running = SEQCNT_ZERO(noop_qdisc.running), 556 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 557 .gso_skb = { 558 .next = (struct sk_buff *)&noop_qdisc.gso_skb, 559 .prev = (struct sk_buff *)&noop_qdisc.gso_skb, 560 .qlen = 0, 561 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock), 562 }, 563 .skb_bad_txq = { 564 .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq, 565 .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq, 566 .qlen = 0, 567 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), 568 }, 569 }; 570 EXPORT_SYMBOL(noop_qdisc); 571 572 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt, 573 struct netlink_ext_ack *extack) 574 { 575 /* register_qdisc() assigns a default of noop_enqueue if unset, 576 * but __dev_queue_xmit() treats noqueue only as such 577 * if this is NULL - so clear it here. */ 578 qdisc->enqueue = NULL; 579 return 0; 580 } 581 582 struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 583 .id = "noqueue", 584 .priv_size = 0, 585 .init = noqueue_init, 586 .enqueue = noop_enqueue, 587 .dequeue = noop_dequeue, 588 .peek = noop_dequeue, 589 .owner = THIS_MODULE, 590 }; 591 592 static const u8 prio2band[TC_PRIO_MAX + 1] = { 593 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 594 }; 595 596 /* 3-band FIFO queue: old style, but should be a bit faster than 597 generic prio+fifo combination. 598 */ 599 600 #define PFIFO_FAST_BANDS 3 601 602 /* 603 * Private data for a pfifo_fast scheduler containing: 604 * - rings for priority bands 605 */ 606 struct pfifo_fast_priv { 607 struct skb_array q[PFIFO_FAST_BANDS]; 608 }; 609 610 static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, 611 int band) 612 { 613 return &priv->q[band]; 614 } 615 616 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, 617 struct sk_buff **to_free) 618 { 619 int band = prio2band[skb->priority & TC_PRIO_MAX]; 620 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 621 struct skb_array *q = band2list(priv, band); 622 unsigned int pkt_len = qdisc_pkt_len(skb); 623 int err; 624 625 err = skb_array_produce(q, skb); 626 627 if (unlikely(err)) { 628 if (qdisc_is_percpu_stats(qdisc)) 629 return qdisc_drop_cpu(skb, qdisc, to_free); 630 else 631 return qdisc_drop(skb, qdisc, to_free); 632 } 633 634 qdisc_update_stats_at_enqueue(qdisc, pkt_len); 635 return NET_XMIT_SUCCESS; 636 } 637 638 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) 639 { 640 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 641 struct sk_buff *skb = NULL; 642 int band; 643 644 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { 645 struct skb_array *q = band2list(priv, band); 646 647 if (__skb_array_empty(q)) 648 continue; 649 650 skb = __skb_array_consume(q); 651 } 652 if (likely(skb)) { 653 qdisc_update_stats_at_dequeue(qdisc, skb); 654 } else { 655 WRITE_ONCE(qdisc->empty, true); 656 } 657 658 return skb; 659 } 660 661 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) 662 { 663 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 664 struct sk_buff *skb = NULL; 665 int band; 666 667 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { 668 struct skb_array *q = band2list(priv, band); 669 670 skb = __skb_array_peek(q); 671 } 672 673 return skb; 674 } 675 676 static void pfifo_fast_reset(struct Qdisc *qdisc) 677 { 678 int i, band; 679 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 680 681 for (band = 0; band < PFIFO_FAST_BANDS; band++) { 682 struct skb_array *q = band2list(priv, band); 683 struct sk_buff *skb; 684 685 /* NULL ring is possible if destroy path is due to a failed 686 * skb_array_init() in pfifo_fast_init() case. 687 */ 688 if (!q->ring.queue) 689 continue; 690 691 while ((skb = __skb_array_consume(q)) != NULL) 692 kfree_skb(skb); 693 } 694 695 if (qdisc_is_percpu_stats(qdisc)) { 696 for_each_possible_cpu(i) { 697 struct gnet_stats_queue *q; 698 699 q = per_cpu_ptr(qdisc->cpu_qstats, i); 700 q->backlog = 0; 701 q->qlen = 0; 702 } 703 } 704 } 705 706 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 707 { 708 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 709 710 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); 711 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 712 goto nla_put_failure; 713 return skb->len; 714 715 nla_put_failure: 716 return -1; 717 } 718 719 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt, 720 struct netlink_ext_ack *extack) 721 { 722 unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; 723 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 724 int prio; 725 726 /* guard against zero length rings */ 727 if (!qlen) 728 return -EINVAL; 729 730 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 731 struct skb_array *q = band2list(priv, prio); 732 int err; 733 734 err = skb_array_init(q, qlen, GFP_KERNEL); 735 if (err) 736 return -ENOMEM; 737 } 738 739 /* Can by-pass the queue discipline */ 740 qdisc->flags |= TCQ_F_CAN_BYPASS; 741 return 0; 742 } 743 744 static void pfifo_fast_destroy(struct Qdisc *sch) 745 { 746 struct pfifo_fast_priv *priv = qdisc_priv(sch); 747 int prio; 748 749 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 750 struct skb_array *q = band2list(priv, prio); 751 752 /* NULL ring is possible if destroy path is due to a failed 753 * skb_array_init() in pfifo_fast_init() case. 754 */ 755 if (!q->ring.queue) 756 continue; 757 /* Destroy ring but no need to kfree_skb because a call to 758 * pfifo_fast_reset() has already done that work. 759 */ 760 ptr_ring_cleanup(&q->ring, NULL); 761 } 762 } 763 764 static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch, 765 unsigned int new_len) 766 { 767 struct pfifo_fast_priv *priv = qdisc_priv(sch); 768 struct skb_array *bands[PFIFO_FAST_BANDS]; 769 int prio; 770 771 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 772 struct skb_array *q = band2list(priv, prio); 773 774 bands[prio] = q; 775 } 776 777 return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len, 778 GFP_KERNEL); 779 } 780 781 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 782 .id = "pfifo_fast", 783 .priv_size = sizeof(struct pfifo_fast_priv), 784 .enqueue = pfifo_fast_enqueue, 785 .dequeue = pfifo_fast_dequeue, 786 .peek = pfifo_fast_peek, 787 .init = pfifo_fast_init, 788 .destroy = pfifo_fast_destroy, 789 .reset = pfifo_fast_reset, 790 .dump = pfifo_fast_dump, 791 .change_tx_queue_len = pfifo_fast_change_tx_queue_len, 792 .owner = THIS_MODULE, 793 .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS, 794 }; 795 EXPORT_SYMBOL(pfifo_fast_ops); 796 797 static struct lock_class_key qdisc_tx_busylock; 798 static struct lock_class_key qdisc_running_key; 799 800 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 801 const struct Qdisc_ops *ops, 802 struct netlink_ext_ack *extack) 803 { 804 void *p; 805 struct Qdisc *sch; 806 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; 807 int err = -ENOBUFS; 808 struct net_device *dev; 809 810 if (!dev_queue) { 811 NL_SET_ERR_MSG(extack, "No device queue given"); 812 err = -EINVAL; 813 goto errout; 814 } 815 816 dev = dev_queue->dev; 817 p = kzalloc_node(size, GFP_KERNEL, 818 netdev_queue_numa_node_read(dev_queue)); 819 820 if (!p) 821 goto errout; 822 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 823 /* if we got non aligned memory, ask more and do alignment ourself */ 824 if (sch != p) { 825 kfree(p); 826 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, 827 netdev_queue_numa_node_read(dev_queue)); 828 if (!p) 829 goto errout; 830 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 831 sch->padded = (char *) sch - (char *) p; 832 } 833 __skb_queue_head_init(&sch->gso_skb); 834 __skb_queue_head_init(&sch->skb_bad_txq); 835 qdisc_skb_head_init(&sch->q); 836 spin_lock_init(&sch->q.lock); 837 838 if (ops->static_flags & TCQ_F_CPUSTATS) { 839 sch->cpu_bstats = 840 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 841 if (!sch->cpu_bstats) 842 goto errout1; 843 844 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 845 if (!sch->cpu_qstats) { 846 free_percpu(sch->cpu_bstats); 847 goto errout1; 848 } 849 } 850 851 spin_lock_init(&sch->busylock); 852 lockdep_set_class(&sch->busylock, 853 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 854 855 /* seqlock has the same scope of busylock, for NOLOCK qdisc */ 856 spin_lock_init(&sch->seqlock); 857 lockdep_set_class(&sch->busylock, 858 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 859 860 seqcount_init(&sch->running); 861 lockdep_set_class(&sch->running, 862 dev->qdisc_running_key ?: &qdisc_running_key); 863 864 sch->ops = ops; 865 sch->flags = ops->static_flags; 866 sch->enqueue = ops->enqueue; 867 sch->dequeue = ops->dequeue; 868 sch->dev_queue = dev_queue; 869 sch->empty = true; 870 dev_hold(dev); 871 refcount_set(&sch->refcnt, 1); 872 873 return sch; 874 errout1: 875 kfree(p); 876 errout: 877 return ERR_PTR(err); 878 } 879 880 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 881 const struct Qdisc_ops *ops, 882 unsigned int parentid, 883 struct netlink_ext_ack *extack) 884 { 885 struct Qdisc *sch; 886 887 if (!try_module_get(ops->owner)) { 888 NL_SET_ERR_MSG(extack, "Failed to increase module reference counter"); 889 return NULL; 890 } 891 892 sch = qdisc_alloc(dev_queue, ops, extack); 893 if (IS_ERR(sch)) { 894 module_put(ops->owner); 895 return NULL; 896 } 897 sch->parent = parentid; 898 899 if (!ops->init || ops->init(sch, NULL, extack) == 0) { 900 trace_qdisc_create(ops, dev_queue->dev, parentid); 901 return sch; 902 } 903 904 qdisc_put(sch); 905 return NULL; 906 } 907 EXPORT_SYMBOL(qdisc_create_dflt); 908 909 /* Under qdisc_lock(qdisc) and BH! */ 910 911 void qdisc_reset(struct Qdisc *qdisc) 912 { 913 const struct Qdisc_ops *ops = qdisc->ops; 914 struct sk_buff *skb, *tmp; 915 916 trace_qdisc_reset(qdisc); 917 918 if (ops->reset) 919 ops->reset(qdisc); 920 921 skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { 922 __skb_unlink(skb, &qdisc->gso_skb); 923 kfree_skb_list(skb); 924 } 925 926 skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { 927 __skb_unlink(skb, &qdisc->skb_bad_txq); 928 kfree_skb_list(skb); 929 } 930 931 qdisc->q.qlen = 0; 932 qdisc->qstats.backlog = 0; 933 } 934 EXPORT_SYMBOL(qdisc_reset); 935 936 void qdisc_free(struct Qdisc *qdisc) 937 { 938 if (qdisc_is_percpu_stats(qdisc)) { 939 free_percpu(qdisc->cpu_bstats); 940 free_percpu(qdisc->cpu_qstats); 941 } 942 943 kfree((char *) qdisc - qdisc->padded); 944 } 945 946 static void qdisc_free_cb(struct rcu_head *head) 947 { 948 struct Qdisc *q = container_of(head, struct Qdisc, rcu); 949 950 qdisc_free(q); 951 } 952 953 static void qdisc_destroy(struct Qdisc *qdisc) 954 { 955 const struct Qdisc_ops *ops = qdisc->ops; 956 957 #ifdef CONFIG_NET_SCHED 958 qdisc_hash_del(qdisc); 959 960 qdisc_put_stab(rtnl_dereference(qdisc->stab)); 961 #endif 962 gen_kill_estimator(&qdisc->rate_est); 963 964 qdisc_reset(qdisc); 965 966 if (ops->destroy) 967 ops->destroy(qdisc); 968 969 module_put(ops->owner); 970 dev_put(qdisc_dev(qdisc)); 971 972 trace_qdisc_destroy(qdisc); 973 974 call_rcu(&qdisc->rcu, qdisc_free_cb); 975 } 976 977 void qdisc_put(struct Qdisc *qdisc) 978 { 979 if (!qdisc) 980 return; 981 982 if (qdisc->flags & TCQ_F_BUILTIN || 983 !refcount_dec_and_test(&qdisc->refcnt)) 984 return; 985 986 qdisc_destroy(qdisc); 987 } 988 EXPORT_SYMBOL(qdisc_put); 989 990 /* Version of qdisc_put() that is called with rtnl mutex unlocked. 991 * Intended to be used as optimization, this function only takes rtnl lock if 992 * qdisc reference counter reached zero. 993 */ 994 995 void qdisc_put_unlocked(struct Qdisc *qdisc) 996 { 997 if (qdisc->flags & TCQ_F_BUILTIN || 998 !refcount_dec_and_rtnl_lock(&qdisc->refcnt)) 999 return; 1000 1001 qdisc_destroy(qdisc); 1002 rtnl_unlock(); 1003 } 1004 EXPORT_SYMBOL(qdisc_put_unlocked); 1005 1006 /* Attach toplevel qdisc to device queue. */ 1007 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 1008 struct Qdisc *qdisc) 1009 { 1010 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 1011 spinlock_t *root_lock; 1012 1013 root_lock = qdisc_lock(oqdisc); 1014 spin_lock_bh(root_lock); 1015 1016 /* ... and graft new one */ 1017 if (qdisc == NULL) 1018 qdisc = &noop_qdisc; 1019 dev_queue->qdisc_sleeping = qdisc; 1020 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 1021 1022 spin_unlock_bh(root_lock); 1023 1024 return oqdisc; 1025 } 1026 EXPORT_SYMBOL(dev_graft_qdisc); 1027 1028 static void attach_one_default_qdisc(struct net_device *dev, 1029 struct netdev_queue *dev_queue, 1030 void *_unused) 1031 { 1032 struct Qdisc *qdisc; 1033 const struct Qdisc_ops *ops = default_qdisc_ops; 1034 1035 if (dev->priv_flags & IFF_NO_QUEUE) 1036 ops = &noqueue_qdisc_ops; 1037 else if(dev->type == ARPHRD_CAN) 1038 ops = &pfifo_fast_ops; 1039 1040 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); 1041 if (!qdisc) 1042 return; 1043 1044 if (!netif_is_multiqueue(dev)) 1045 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1046 dev_queue->qdisc_sleeping = qdisc; 1047 } 1048 1049 static void attach_default_qdiscs(struct net_device *dev) 1050 { 1051 struct netdev_queue *txq; 1052 struct Qdisc *qdisc; 1053 1054 txq = netdev_get_tx_queue(dev, 0); 1055 1056 if (!netif_is_multiqueue(dev) || 1057 dev->priv_flags & IFF_NO_QUEUE) { 1058 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 1059 dev->qdisc = txq->qdisc_sleeping; 1060 qdisc_refcount_inc(dev->qdisc); 1061 } else { 1062 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); 1063 if (qdisc) { 1064 dev->qdisc = qdisc; 1065 qdisc->ops->attach(qdisc); 1066 } 1067 } 1068 1069 /* Detect default qdisc setup/init failed and fallback to "noqueue" */ 1070 if (dev->qdisc == &noop_qdisc) { 1071 netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", 1072 default_qdisc_ops->id, noqueue_qdisc_ops.id); 1073 dev->priv_flags |= IFF_NO_QUEUE; 1074 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 1075 dev->qdisc = txq->qdisc_sleeping; 1076 qdisc_refcount_inc(dev->qdisc); 1077 dev->priv_flags ^= IFF_NO_QUEUE; 1078 } 1079 1080 #ifdef CONFIG_NET_SCHED 1081 if (dev->qdisc != &noop_qdisc) 1082 qdisc_hash_add(dev->qdisc, false); 1083 #endif 1084 } 1085 1086 static void transition_one_qdisc(struct net_device *dev, 1087 struct netdev_queue *dev_queue, 1088 void *_need_watchdog) 1089 { 1090 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 1091 int *need_watchdog_p = _need_watchdog; 1092 1093 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 1094 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 1095 1096 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 1097 if (need_watchdog_p) { 1098 dev_queue->trans_start = 0; 1099 *need_watchdog_p = 1; 1100 } 1101 } 1102 1103 void dev_activate(struct net_device *dev) 1104 { 1105 int need_watchdog; 1106 1107 /* No queueing discipline is attached to device; 1108 * create default one for devices, which need queueing 1109 * and noqueue_qdisc for virtual interfaces 1110 */ 1111 1112 if (dev->qdisc == &noop_qdisc) 1113 attach_default_qdiscs(dev); 1114 1115 if (!netif_carrier_ok(dev)) 1116 /* Delay activation until next carrier-on event */ 1117 return; 1118 1119 need_watchdog = 0; 1120 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 1121 if (dev_ingress_queue(dev)) 1122 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 1123 1124 if (need_watchdog) { 1125 netif_trans_update(dev); 1126 dev_watchdog_up(dev); 1127 } 1128 } 1129 EXPORT_SYMBOL(dev_activate); 1130 1131 static void qdisc_deactivate(struct Qdisc *qdisc) 1132 { 1133 bool nolock = qdisc->flags & TCQ_F_NOLOCK; 1134 1135 if (qdisc->flags & TCQ_F_BUILTIN) 1136 return; 1137 if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state)) 1138 return; 1139 1140 if (nolock) 1141 spin_lock_bh(&qdisc->seqlock); 1142 spin_lock_bh(qdisc_lock(qdisc)); 1143 1144 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 1145 1146 qdisc_reset(qdisc); 1147 1148 spin_unlock_bh(qdisc_lock(qdisc)); 1149 if (nolock) 1150 spin_unlock_bh(&qdisc->seqlock); 1151 } 1152 1153 static void dev_deactivate_queue(struct net_device *dev, 1154 struct netdev_queue *dev_queue, 1155 void *_qdisc_default) 1156 { 1157 struct Qdisc *qdisc_default = _qdisc_default; 1158 struct Qdisc *qdisc; 1159 1160 qdisc = rtnl_dereference(dev_queue->qdisc); 1161 if (qdisc) { 1162 qdisc_deactivate(qdisc); 1163 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 1164 } 1165 } 1166 1167 static bool some_qdisc_is_busy(struct net_device *dev) 1168 { 1169 unsigned int i; 1170 1171 for (i = 0; i < dev->num_tx_queues; i++) { 1172 struct netdev_queue *dev_queue; 1173 spinlock_t *root_lock; 1174 struct Qdisc *q; 1175 int val; 1176 1177 dev_queue = netdev_get_tx_queue(dev, i); 1178 q = dev_queue->qdisc_sleeping; 1179 1180 root_lock = qdisc_lock(q); 1181 spin_lock_bh(root_lock); 1182 1183 val = (qdisc_is_running(q) || 1184 test_bit(__QDISC_STATE_SCHED, &q->state)); 1185 1186 spin_unlock_bh(root_lock); 1187 1188 if (val) 1189 return true; 1190 } 1191 return false; 1192 } 1193 1194 /** 1195 * dev_deactivate_many - deactivate transmissions on several devices 1196 * @head: list of devices to deactivate 1197 * 1198 * This function returns only when all outstanding transmissions 1199 * have completed, unless all devices are in dismantle phase. 1200 */ 1201 void dev_deactivate_many(struct list_head *head) 1202 { 1203 struct net_device *dev; 1204 1205 list_for_each_entry(dev, head, close_list) { 1206 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 1207 &noop_qdisc); 1208 if (dev_ingress_queue(dev)) 1209 dev_deactivate_queue(dev, dev_ingress_queue(dev), 1210 &noop_qdisc); 1211 1212 dev_watchdog_down(dev); 1213 } 1214 1215 /* Wait for outstanding qdisc-less dev_queue_xmit calls. 1216 * This is avoided if all devices are in dismantle phase : 1217 * Caller will call synchronize_net() for us 1218 */ 1219 synchronize_net(); 1220 1221 /* Wait for outstanding qdisc_run calls. */ 1222 list_for_each_entry(dev, head, close_list) { 1223 while (some_qdisc_is_busy(dev)) { 1224 /* wait_event() would avoid this sleep-loop but would 1225 * require expensive checks in the fast paths of packet 1226 * processing which isn't worth it. 1227 */ 1228 schedule_timeout_uninterruptible(1); 1229 } 1230 } 1231 } 1232 1233 void dev_deactivate(struct net_device *dev) 1234 { 1235 LIST_HEAD(single); 1236 1237 list_add(&dev->close_list, &single); 1238 dev_deactivate_many(&single); 1239 list_del(&single); 1240 } 1241 EXPORT_SYMBOL(dev_deactivate); 1242 1243 static int qdisc_change_tx_queue_len(struct net_device *dev, 1244 struct netdev_queue *dev_queue) 1245 { 1246 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1247 const struct Qdisc_ops *ops = qdisc->ops; 1248 1249 if (ops->change_tx_queue_len) 1250 return ops->change_tx_queue_len(qdisc, dev->tx_queue_len); 1251 return 0; 1252 } 1253 1254 int dev_qdisc_change_tx_queue_len(struct net_device *dev) 1255 { 1256 bool up = dev->flags & IFF_UP; 1257 unsigned int i; 1258 int ret = 0; 1259 1260 if (up) 1261 dev_deactivate(dev); 1262 1263 for (i = 0; i < dev->num_tx_queues; i++) { 1264 ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]); 1265 1266 /* TODO: revert changes on a partial failure */ 1267 if (ret) 1268 break; 1269 } 1270 1271 if (up) 1272 dev_activate(dev); 1273 return ret; 1274 } 1275 1276 static void dev_init_scheduler_queue(struct net_device *dev, 1277 struct netdev_queue *dev_queue, 1278 void *_qdisc) 1279 { 1280 struct Qdisc *qdisc = _qdisc; 1281 1282 rcu_assign_pointer(dev_queue->qdisc, qdisc); 1283 dev_queue->qdisc_sleeping = qdisc; 1284 } 1285 1286 void dev_init_scheduler(struct net_device *dev) 1287 { 1288 dev->qdisc = &noop_qdisc; 1289 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 1290 if (dev_ingress_queue(dev)) 1291 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 1292 1293 timer_setup(&dev->watchdog_timer, dev_watchdog, 0); 1294 } 1295 1296 static void shutdown_scheduler_queue(struct net_device *dev, 1297 struct netdev_queue *dev_queue, 1298 void *_qdisc_default) 1299 { 1300 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1301 struct Qdisc *qdisc_default = _qdisc_default; 1302 1303 if (qdisc) { 1304 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 1305 dev_queue->qdisc_sleeping = qdisc_default; 1306 1307 qdisc_put(qdisc); 1308 } 1309 } 1310 1311 void dev_shutdown(struct net_device *dev) 1312 { 1313 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 1314 if (dev_ingress_queue(dev)) 1315 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 1316 qdisc_put(dev->qdisc); 1317 dev->qdisc = &noop_qdisc; 1318 1319 WARN_ON(timer_pending(&dev->watchdog_timer)); 1320 } 1321 1322 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1323 const struct tc_ratespec *conf, 1324 u64 rate64) 1325 { 1326 memset(r, 0, sizeof(*r)); 1327 r->overhead = conf->overhead; 1328 r->rate_bytes_ps = max_t(u64, conf->rate, rate64); 1329 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); 1330 r->mult = 1; 1331 /* 1332 * The deal here is to replace a divide by a reciprocal one 1333 * in fast path (a reciprocal divide is a multiply and a shift) 1334 * 1335 * Normal formula would be : 1336 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps 1337 * 1338 * We compute mult/shift to use instead : 1339 * time_in_ns = (len * mult) >> shift; 1340 * 1341 * We try to get the highest possible mult value for accuracy, 1342 * but have to make sure no overflows will ever happen. 1343 */ 1344 if (r->rate_bytes_ps > 0) { 1345 u64 factor = NSEC_PER_SEC; 1346 1347 for (;;) { 1348 r->mult = div64_u64(factor, r->rate_bytes_ps); 1349 if (r->mult & (1U << 31) || factor & (1ULL << 63)) 1350 break; 1351 factor <<= 1; 1352 r->shift++; 1353 } 1354 } 1355 } 1356 EXPORT_SYMBOL(psched_ratecfg_precompute); 1357 1358 static void mini_qdisc_rcu_func(struct rcu_head *head) 1359 { 1360 } 1361 1362 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1363 struct tcf_proto *tp_head) 1364 { 1365 /* Protected with chain0->filter_chain_lock. 1366 * Can't access chain directly because tp_head can be NULL. 1367 */ 1368 struct mini_Qdisc *miniq_old = 1369 rcu_dereference_protected(*miniqp->p_miniq, 1); 1370 struct mini_Qdisc *miniq; 1371 1372 if (!tp_head) { 1373 RCU_INIT_POINTER(*miniqp->p_miniq, NULL); 1374 /* Wait for flying RCU callback before it is freed. */ 1375 rcu_barrier(); 1376 return; 1377 } 1378 1379 miniq = !miniq_old || miniq_old == &miniqp->miniq2 ? 1380 &miniqp->miniq1 : &miniqp->miniq2; 1381 1382 /* We need to make sure that readers won't see the miniq 1383 * we are about to modify. So wait until previous call_rcu callback 1384 * is done. 1385 */ 1386 rcu_barrier(); 1387 miniq->filter_list = tp_head; 1388 rcu_assign_pointer(*miniqp->p_miniq, miniq); 1389 1390 if (miniq_old) 1391 /* This is counterpart of the rcu barriers above. We need to 1392 * block potential new user of miniq_old until all readers 1393 * are not seeing it. 1394 */ 1395 call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func); 1396 } 1397 EXPORT_SYMBOL(mini_qdisc_pair_swap); 1398 1399 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1400 struct tcf_block *block) 1401 { 1402 miniqp->miniq1.block = block; 1403 miniqp->miniq2.block = block; 1404 } 1405 EXPORT_SYMBOL(mini_qdisc_pair_block_init); 1406 1407 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1408 struct mini_Qdisc __rcu **p_miniq) 1409 { 1410 miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; 1411 miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; 1412 miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; 1413 miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; 1414 miniqp->p_miniq = p_miniq; 1415 } 1416 EXPORT_SYMBOL(mini_qdisc_pair_init); 1417