sch_generic.c (245050c287a9176cee9f98109df101909c1eeef4) sch_generic.c (1b5c5493e3e68181be344cb51bf9df192d05ffc2)
1/*
2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 98 unchanged lines hidden (view full) ---

107 try_bulk_dequeue_skb(q, skb, txq, packets);
108 }
109 }
110 return skb;
111}
112
113/*
114 * Transmit possibly several skbs, and handle the return status as
1/*
2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 98 unchanged lines hidden (view full) ---

107 try_bulk_dequeue_skb(q, skb, txq, packets);
108 }
109 }
110 return skb;
111}
112
113/*
114 * Transmit possibly several skbs, and handle the return status as
115 * required. Holding the __QDISC___STATE_RUNNING bit guarantees that
115 * required. Owning running seqcount bit guarantees that
116 * only one CPU can execute this function.
117 *
118 * Returns to the caller:
119 * 0 - queue is empty or throttled.
120 * >0 - queue is not empty.
121 */
122int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
123 struct net_device *dev, struct netdev_queue *txq,

--- 36 unchanged lines hidden (view full) ---

160 ret = 0;
161
162 return ret;
163}
164
165/*
166 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
167 *
116 * only one CPU can execute this function.
117 *
118 * Returns to the caller:
119 * 0 - queue is empty or throttled.
120 * >0 - queue is not empty.
121 */
122int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
123 struct net_device *dev, struct netdev_queue *txq,

--- 36 unchanged lines hidden (view full) ---

160 ret = 0;
161
162 return ret;
163}
164
165/*
166 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
167 *
168 * __QDISC___STATE_RUNNING guarantees only one CPU can process
168 * running seqcount guarantees only one CPU can process
169 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
170 * this queue.
171 *
172 * netif_tx_lock serializes accesses to device driver.
173 *
174 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
175 * if one is grabbed, another must be free.
176 *

--- 199 unchanged lines hidden (view full) ---

376struct Qdisc noop_qdisc = {
377 .enqueue = noop_enqueue,
378 .dequeue = noop_dequeue,
379 .flags = TCQ_F_BUILTIN,
380 .ops = &noop_qdisc_ops,
381 .list = LIST_HEAD_INIT(noop_qdisc.list),
382 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
383 .dev_queue = &noop_netdev_queue,
169 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
170 * this queue.
171 *
172 * netif_tx_lock serializes accesses to device driver.
173 *
174 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
175 * if one is grabbed, another must be free.
176 *

--- 199 unchanged lines hidden (view full) ---

376struct Qdisc noop_qdisc = {
377 .enqueue = noop_enqueue,
378 .dequeue = noop_dequeue,
379 .flags = TCQ_F_BUILTIN,
380 .ops = &noop_qdisc_ops,
381 .list = LIST_HEAD_INIT(noop_qdisc.list),
382 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
383 .dev_queue = &noop_netdev_queue,
384 .running = SEQCNT_ZERO(noop_qdisc.running),
384 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
385};
386EXPORT_SYMBOL(noop_qdisc);
387
388static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
389{
390 /* register_qdisc() assigns a default of noop_enqueue if unset,
391 * but __dev_queue_xmit() treats noqueue only as such

--- 95 unchanged lines hidden (view full) ---

487}
488
489static void pfifo_fast_reset(struct Qdisc *qdisc)
490{
491 int prio;
492 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
493
494 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
385 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
386};
387EXPORT_SYMBOL(noop_qdisc);
388
389static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
390{
391 /* register_qdisc() assigns a default of noop_enqueue if unset,
392 * but __dev_queue_xmit() treats noqueue only as such

--- 95 unchanged lines hidden (view full) ---

488}
489
490static void pfifo_fast_reset(struct Qdisc *qdisc)
491{
492 int prio;
493 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
494
495 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
495 __qdisc_reset_queue(qdisc, band2list(priv, prio));
496 __qdisc_reset_queue(band2list(priv, prio));
496
497 priv->bitmap = 0;
498 qdisc->qstats.backlog = 0;
499 qdisc->q.qlen = 0;
500}
501
502static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
503{

--- 30 unchanged lines hidden (view full) ---

534 .init = pfifo_fast_init,
535 .reset = pfifo_fast_reset,
536 .dump = pfifo_fast_dump,
537 .owner = THIS_MODULE,
538};
539EXPORT_SYMBOL(pfifo_fast_ops);
540
541static struct lock_class_key qdisc_tx_busylock;
497
498 priv->bitmap = 0;
499 qdisc->qstats.backlog = 0;
500 qdisc->q.qlen = 0;
501}
502
503static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
504{

--- 30 unchanged lines hidden (view full) ---

535 .init = pfifo_fast_init,
536 .reset = pfifo_fast_reset,
537 .dump = pfifo_fast_dump,
538 .owner = THIS_MODULE,
539};
540EXPORT_SYMBOL(pfifo_fast_ops);
541
542static struct lock_class_key qdisc_tx_busylock;
543static struct lock_class_key qdisc_running_key;
542
543struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
544 const struct Qdisc_ops *ops)
545{
546 void *p;
547 struct Qdisc *sch;
548 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
549 int err = -ENOBUFS;

--- 17 unchanged lines hidden (view full) ---

567 }
568 INIT_LIST_HEAD(&sch->list);
569 skb_queue_head_init(&sch->q);
570
571 spin_lock_init(&sch->busylock);
572 lockdep_set_class(&sch->busylock,
573 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
574
544
545struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
546 const struct Qdisc_ops *ops)
547{
548 void *p;
549 struct Qdisc *sch;
550 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
551 int err = -ENOBUFS;

--- 17 unchanged lines hidden (view full) ---

569 }
570 INIT_LIST_HEAD(&sch->list);
571 skb_queue_head_init(&sch->q);
572
573 spin_lock_init(&sch->busylock);
574 lockdep_set_class(&sch->busylock,
575 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
576
577 seqcount_init(&sch->running);
578 lockdep_set_class(&sch->running,
579 dev->qdisc_running_key ?: &qdisc_running_key);
580
575 sch->ops = ops;
576 sch->enqueue = ops->enqueue;
577 sch->dequeue = ops->dequeue;
578 sch->dev_queue = dev_queue;
579 dev_hold(dev);
580 atomic_set(&sch->refcnt, 1);
581
582 return sch;

--- 374 unchanged lines hidden ---
581 sch->ops = ops;
582 sch->enqueue = ops->enqueue;
583 sch->dequeue = ops->dequeue;
584 sch->dev_queue = dev_queue;
585 dev_hold(dev);
586 atomic_set(&sch->refcnt, 1);
587
588 return sch;

--- 374 unchanged lines hidden ---