sch_generic.h (245050c287a9176cee9f98109df101909c1eeef4) sch_generic.h (1b5c5493e3e68181be344cb51bf9df192d05ffc2)
1#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
4#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
7#include <linux/pkt_sched.h>
8#include <linux/pkt_cls.h>

--- 12 unchanged lines hidden (view full) ---

21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
24};
25
26enum qdisc_state_t {
27 __QDISC_STATE_SCHED,
28 __QDISC_STATE_DEACTIVATED,
1#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
4#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
7#include <linux/pkt_sched.h>
8#include <linux/pkt_cls.h>

--- 12 unchanged lines hidden (view full) ---

21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
24};
25
26enum qdisc_state_t {
27 __QDISC_STATE_SCHED,
28 __QDISC_STATE_DEACTIVATED,
29 __QDISC_STATE_THROTTLED,
30};
31
29};
30
32/*
33 * following bits are only changed while qdisc lock is held
34 */
35enum qdisc___state_t {
36 __QDISC___STATE_RUNNING = 1,
37};
38
39struct qdisc_size_table {
40 struct rcu_head rcu;
41 struct list_head list;
42 struct tc_sizespec szopts;
43 int refcnt;
44 u16 data[];
45};
46

--- 18 unchanged lines hidden (view full) ---

65 * qdisc_tree_decrease_qlen() should stop.
66 */
67 u32 limit;
68 const struct Qdisc_ops *ops;
69 struct qdisc_size_table __rcu *stab;
70 struct list_head list;
71 u32 handle;
72 u32 parent;
31struct qdisc_size_table {
32 struct rcu_head rcu;
33 struct list_head list;
34 struct tc_sizespec szopts;
35 int refcnt;
36 u16 data[];
37};
38

--- 18 unchanged lines hidden (view full) ---

57 * qdisc_tree_decrease_qlen() should stop.
58 */
59 u32 limit;
60 const struct Qdisc_ops *ops;
61 struct qdisc_size_table __rcu *stab;
62 struct list_head list;
63 u32 handle;
64 u32 parent;
73 int (*reshape_fail)(struct sk_buff *skb,
74 struct Qdisc *q);
75
76 void *u32_node;
77
65 void *u32_node;
66
78 /* This field is deprecated, but it is still used by CBQ
79 * and it will live until better solution will be invented.
80 */
81 struct Qdisc *__parent;
82 struct netdev_queue *dev_queue;
83
84 struct gnet_stats_rate_est64 rate_est;
85 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
86 struct gnet_stats_queue __percpu *cpu_qstats;
87
67 struct netdev_queue *dev_queue;
68
69 struct gnet_stats_rate_est64 rate_est;
70 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
71 struct gnet_stats_queue __percpu *cpu_qstats;
72
88 struct Qdisc *next_sched;
89 struct sk_buff *gso_skb;
90 /*
91 * For performance sake on SMP, we put highly modified fields at the end
92 */
73 /*
74 * For performance sake on SMP, we put highly modified fields at the end
75 */
76 struct Qdisc *next_sched ____cacheline_aligned_in_smp;
77 struct sk_buff *gso_skb;
93 unsigned long state;
94 struct sk_buff_head q;
95 struct gnet_stats_basic_packed bstats;
78 unsigned long state;
79 struct sk_buff_head q;
80 struct gnet_stats_basic_packed bstats;
96 unsigned int __state;
81 seqcount_t running;
97 struct gnet_stats_queue qstats;
98 struct rcu_head rcu_head;
99 int padded;
100 atomic_t refcnt;
101
102 spinlock_t busylock ____cacheline_aligned_in_smp;
103};
104
105static inline bool qdisc_is_running(const struct Qdisc *qdisc)
106{
82 struct gnet_stats_queue qstats;
83 struct rcu_head rcu_head;
84 int padded;
85 atomic_t refcnt;
86
87 spinlock_t busylock ____cacheline_aligned_in_smp;
88};
89
90static inline bool qdisc_is_running(const struct Qdisc *qdisc)
91{
107 return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
92 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
108}
109
110static inline bool qdisc_run_begin(struct Qdisc *qdisc)
111{
112 if (qdisc_is_running(qdisc))
113 return false;
93}
94
95static inline bool qdisc_run_begin(struct Qdisc *qdisc)
96{
97 if (qdisc_is_running(qdisc))
98 return false;
114 qdisc->__state |= __QDISC___STATE_RUNNING;
99 /* Variant of write_seqcount_begin() telling lockdep a trylock
100 * was attempted.
101 */
102 raw_write_seqcount_begin(&qdisc->running);
103 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
115 return true;
116}
117
118static inline void qdisc_run_end(struct Qdisc *qdisc)
119{
104 return true;
105}
106
107static inline void qdisc_run_end(struct Qdisc *qdisc)
108{
120 qdisc->__state &= ~__QDISC___STATE_RUNNING;
109 write_seqcount_end(&qdisc->running);
121}
122
123static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
124{
125 return qdisc->flags & TCQ_F_ONETXQUEUE;
126}
127
128static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
129{
130#ifdef CONFIG_BQL
131 /* Non-BQL migrated drivers will return 0, too. */
132 return dql_avail(&txq->dql);
133#else
134 return 0;
135#endif
136}
137
110}
111
112static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
113{
114 return qdisc->flags & TCQ_F_ONETXQUEUE;
115}
116
117static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
118{
119#ifdef CONFIG_BQL
120 /* Non-BQL migrated drivers will return 0, too. */
121 return dql_avail(&txq->dql);
122#else
123 return 0;
124#endif
125}
126
138static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
139{
140 return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
141}
142
143static inline void qdisc_throttled(struct Qdisc *qdisc)
144{
145 set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
146}
147
148static inline void qdisc_unthrottled(struct Qdisc *qdisc)
149{
150 clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
151}
152
153struct Qdisc_class_ops {
154 /* Child qdisc manipulation */
155 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
156 int (*graft)(struct Qdisc *, unsigned long cl,
157 struct Qdisc *, struct Qdisc **);
158 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
159 void (*qlen_notify)(struct Qdisc *, unsigned long);
160

--- 23 unchanged lines hidden (view full) ---

184 struct Qdisc_ops *next;
185 const struct Qdisc_class_ops *cl_ops;
186 char id[IFNAMSIZ];
187 int priv_size;
188
189 int (*enqueue)(struct sk_buff *, struct Qdisc *);
190 struct sk_buff * (*dequeue)(struct Qdisc *);
191 struct sk_buff * (*peek)(struct Qdisc *);
127struct Qdisc_class_ops {
128 /* Child qdisc manipulation */
129 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
130 int (*graft)(struct Qdisc *, unsigned long cl,
131 struct Qdisc *, struct Qdisc **);
132 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
133 void (*qlen_notify)(struct Qdisc *, unsigned long);
134

--- 23 unchanged lines hidden (view full) ---

158 struct Qdisc_ops *next;
159 const struct Qdisc_class_ops *cl_ops;
160 char id[IFNAMSIZ];
161 int priv_size;
162
163 int (*enqueue)(struct sk_buff *, struct Qdisc *);
164 struct sk_buff * (*dequeue)(struct Qdisc *);
165 struct sk_buff * (*peek)(struct Qdisc *);
192 unsigned int (*drop)(struct Qdisc *);
193
194 int (*init)(struct Qdisc *, struct nlattr *arg);
195 void (*reset)(struct Qdisc *);
196 void (*destroy)(struct Qdisc *);
197 int (*change)(struct Qdisc *, struct nlattr *arg);
198 void (*attach)(struct Qdisc *);
199
200 int (*dump)(struct Qdisc *, struct sk_buff *);

--- 116 unchanged lines hidden (view full) ---

317static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
318{
319 struct Qdisc *root = qdisc_root_sleeping(qdisc);
320
321 ASSERT_RTNL();
322 return qdisc_lock(root);
323}
324
166
167 int (*init)(struct Qdisc *, struct nlattr *arg);
168 void (*reset)(struct Qdisc *);
169 void (*destroy)(struct Qdisc *);
170 int (*change)(struct Qdisc *, struct nlattr *arg);
171 void (*attach)(struct Qdisc *);
172
173 int (*dump)(struct Qdisc *, struct sk_buff *);

--- 116 unchanged lines hidden (view full) ---

290static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
291{
292 struct Qdisc *root = qdisc_root_sleeping(qdisc);
293
294 ASSERT_RTNL();
295 return qdisc_lock(root);
296}
297
298static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
299{
300 struct Qdisc *root = qdisc_root_sleeping(qdisc);
301
302 ASSERT_RTNL();
303 return &root->running;
304}
305
325static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
326{
327 return qdisc->dev_queue->dev;
328}
329
330static inline void sch_tree_lock(const struct Qdisc *q)
331{
332 spin_lock_bh(qdisc_root_sleeping_lock(q));

--- 327 unchanged lines hidden (view full) ---

660 return 0;
661}
662
663static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
664{
665 return __qdisc_queue_drop_head(sch, &sch->q);
666}
667
306static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
307{
308 return qdisc->dev_queue->dev;
309}
310
311static inline void sch_tree_lock(const struct Qdisc *q)
312{
313 spin_lock_bh(qdisc_root_sleeping_lock(q));

--- 327 unchanged lines hidden (view full) ---

641 return 0;
642}
643
644static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
645{
646 return __qdisc_queue_drop_head(sch, &sch->q);
647}
648
668static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
669 struct sk_buff_head *list)
670{
671 struct sk_buff *skb = __skb_dequeue_tail(list);
672
673 if (likely(skb != NULL))
674 qdisc_qstats_backlog_dec(sch, skb);
675
676 return skb;
677}
678
679static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
680{
681 return __qdisc_dequeue_tail(sch, &sch->q);
682}
683
684static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
685{
686 return skb_peek(&sch->q);
687}
688
689/* generic pseudo peek method for non-work-conserving qdisc */
690static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
691{

--- 21 unchanged lines hidden (view full) ---

713 sch->q.qlen--;
714 } else {
715 skb = sch->dequeue(sch);
716 }
717
718 return skb;
719}
720
649static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
650{
651 return skb_peek(&sch->q);
652}
653
654/* generic pseudo peek method for non-work-conserving qdisc */
655static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
656{

--- 21 unchanged lines hidden (view full) ---

678 sch->q.qlen--;
679 } else {
680 skb = sch->dequeue(sch);
681 }
682
683 return skb;
684}
685
721static inline void __qdisc_reset_queue(struct Qdisc *sch,
722 struct sk_buff_head *list)
686static inline void __qdisc_reset_queue(struct sk_buff_head *list)
723{
724 /*
725 * We do not know the backlog in bytes of this list, it
726 * is up to the caller to correct it
727 */
687{
688 /*
689 * We do not know the backlog in bytes of this list, it
690 * is up to the caller to correct it
691 */
728 __skb_queue_purge(list);
692 if (!skb_queue_empty(list)) {
693 rtnl_kfree_skbs(list->next, list->prev);
694 __skb_queue_head_init(list);
695 }
729}
730
731static inline void qdisc_reset_queue(struct Qdisc *sch)
732{
696}
697
698static inline void qdisc_reset_queue(struct Qdisc *sch)
699{
733 __qdisc_reset_queue(sch, &sch->q);
700 __qdisc_reset_queue(&sch->q);
734 sch->qstats.backlog = 0;
735}
736
737static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
738 struct Qdisc **pold)
739{
740 struct Qdisc *old;
741

--- 4 unchanged lines hidden (view full) ---

746 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
747 qdisc_reset(old);
748 }
749 sch_tree_unlock(sch);
750
751 return old;
752}
753
701 sch->qstats.backlog = 0;
702}
703
704static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
705 struct Qdisc **pold)
706{
707 struct Qdisc *old;
708

--- 4 unchanged lines hidden (view full) ---

713 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
714 qdisc_reset(old);
715 }
716 sch_tree_unlock(sch);
717
718 return old;
719}
720
754static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
755 struct sk_buff_head *list)
721static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
756{
722{
757 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
758
759 if (likely(skb != NULL)) {
760 unsigned int len = qdisc_pkt_len(skb);
761 kfree_skb(skb);
762 return len;
763 }
764
765 return 0;
723 rtnl_kfree_skbs(skb, skb);
724 qdisc_qstats_drop(sch);
766}
767
725}
726
768static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
769{
770 return __qdisc_queue_drop(sch, &sch->q);
771}
772
773static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
774{
775 kfree_skb(skb);
776 qdisc_qstats_drop(sch);
777
778 return NET_XMIT_DROP;
779}
780
727static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
728{
729 kfree_skb(skb);
730 qdisc_qstats_drop(sch);
731
732 return NET_XMIT_DROP;
733}
734
781static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
782{
783 qdisc_qstats_drop(sch);
784
785#ifdef CONFIG_NET_CLS_ACT
786 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
787 goto drop;
788
789 return NET_XMIT_SUCCESS;
790
791drop:
792#endif
793 kfree_skb(skb);
794 return NET_XMIT_DROP;
795}
796
797/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
798 long it will take to send a packet given its size.
799 */
800static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
801{
802 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
803 if (slot < 0)
804 slot = 0;

--- 45 unchanged lines hidden ---
735/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
736 long it will take to send a packet given its size.
737 */
738static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
739{
740 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
741 if (slot < 0)
742 slot = 0;

--- 45 unchanged lines hidden ---