sch_red.c (14f0290ba44de6ed435fea24bba26e7868421c66) sch_red.c (cc7ec456f82da7f89a5b376e613b3ac4311b3e9a)
1/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 22 unchanged lines hidden (view full) ---

31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
37 */
38
1/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 22 unchanged lines hidden (view full) ---

31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
37 */
38
39struct red_sched_data
40{
39struct red_sched_data {
41 u32 limit; /* HARD maximal queue length */
42 unsigned char flags;
43 struct red_parms parms;
44 struct red_stats stats;
45 struct Qdisc *qdisc;
46};
47
48static inline int red_use_ecn(struct red_sched_data *q)
49{
50 return q->flags & TC_RED_ECN;
51}
52
53static inline int red_use_harddrop(struct red_sched_data *q)
54{
55 return q->flags & TC_RED_HARDDROP;
56}
57
40 u32 limit; /* HARD maximal queue length */
41 unsigned char flags;
42 struct red_parms parms;
43 struct red_stats stats;
44 struct Qdisc *qdisc;
45};
46
47static inline int red_use_ecn(struct red_sched_data *q)
48{
49 return q->flags & TC_RED_ECN;
50}
51
52static inline int red_use_harddrop(struct red_sched_data *q)
53{
54 return q->flags & TC_RED_HARDDROP;
55}
56
58static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
57static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
59{
60 struct red_sched_data *q = qdisc_priv(sch);
61 struct Qdisc *child = q->qdisc;
62 int ret;
63
64 q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
65
66 if (red_is_idling(&q->parms))
67 red_end_of_idle_period(&q->parms);
68
69 switch (red_action(&q->parms, q->parms.qavg)) {
58{
59 struct red_sched_data *q = qdisc_priv(sch);
60 struct Qdisc *child = q->qdisc;
61 int ret;
62
63 q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
64
65 if (red_is_idling(&q->parms))
66 red_end_of_idle_period(&q->parms);
67
68 switch (red_action(&q->parms, q->parms.qavg)) {
70 case RED_DONT_MARK:
71 break;
69 case RED_DONT_MARK:
70 break;
72
71
73 case RED_PROB_MARK:
74 sch->qstats.overlimits++;
75 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
76 q->stats.prob_drop++;
77 goto congestion_drop;
78 }
72 case RED_PROB_MARK:
73 sch->qstats.overlimits++;
74 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
75 q->stats.prob_drop++;
76 goto congestion_drop;
77 }
79
78
80 q->stats.prob_mark++;
81 break;
79 q->stats.prob_mark++;
80 break;
82
81
83 case RED_HARD_MARK:
84 sch->qstats.overlimits++;
85 if (red_use_harddrop(q) || !red_use_ecn(q) ||
86 !INET_ECN_set_ce(skb)) {
87 q->stats.forced_drop++;
88 goto congestion_drop;
89 }
82 case RED_HARD_MARK:
83 sch->qstats.overlimits++;
84 if (red_use_harddrop(q) || !red_use_ecn(q) ||
85 !INET_ECN_set_ce(skb)) {
86 q->stats.forced_drop++;
87 goto congestion_drop;
88 }
90
89
91 q->stats.forced_mark++;
92 break;
90 q->stats.forced_mark++;
91 break;
93 }
94
95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 qdisc_bstats_update(sch, skb);
98 sch->q.qlen++;
99 } else if (net_xmit_drop_count(ret)) {
100 q->stats.pdrop++;
101 sch->qstats.drops++;
102 }
103 return ret;
104
105congestion_drop:
106 qdisc_drop(skb, sch);
107 return NET_XMIT_CN;
108}
109
92 }
93
94 ret = qdisc_enqueue(skb, child);
95 if (likely(ret == NET_XMIT_SUCCESS)) {
96 qdisc_bstats_update(sch, skb);
97 sch->q.qlen++;
98 } else if (net_xmit_drop_count(ret)) {
99 q->stats.pdrop++;
100 sch->qstats.drops++;
101 }
102 return ret;
103
104congestion_drop:
105 qdisc_drop(skb, sch);
106 return NET_XMIT_CN;
107}
108
110static struct sk_buff * red_dequeue(struct Qdisc* sch)
109static struct sk_buff *red_dequeue(struct Qdisc *sch)
111{
112 struct sk_buff *skb;
113 struct red_sched_data *q = qdisc_priv(sch);
114 struct Qdisc *child = q->qdisc;
115
116 skb = child->dequeue(child);
117 if (skb)
118 sch->q.qlen--;
119 else if (!red_is_idling(&q->parms))
120 red_start_of_idle_period(&q->parms);
121
122 return skb;
123}
124
110{
111 struct sk_buff *skb;
112 struct red_sched_data *q = qdisc_priv(sch);
113 struct Qdisc *child = q->qdisc;
114
115 skb = child->dequeue(child);
116 if (skb)
117 sch->q.qlen--;
118 else if (!red_is_idling(&q->parms))
119 red_start_of_idle_period(&q->parms);
120
121 return skb;
122}
123
125static struct sk_buff * red_peek(struct Qdisc* sch)
124static struct sk_buff *red_peek(struct Qdisc *sch)
126{
127 struct red_sched_data *q = qdisc_priv(sch);
128 struct Qdisc *child = q->qdisc;
129
130 return child->ops->peek(child);
131}
132
125{
126 struct red_sched_data *q = qdisc_priv(sch);
127 struct Qdisc *child = q->qdisc;
128
129 return child->ops->peek(child);
130}
131
133static unsigned int red_drop(struct Qdisc* sch)
132static unsigned int red_drop(struct Qdisc *sch)
134{
135 struct red_sched_data *q = qdisc_priv(sch);
136 struct Qdisc *child = q->qdisc;
137 unsigned int len;
138
139 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
140 q->stats.other++;
141 sch->qstats.drops++;
142 sch->q.qlen--;
143 return len;
144 }
145
146 if (!red_is_idling(&q->parms))
147 red_start_of_idle_period(&q->parms);
148
149 return 0;
150}
151
133{
134 struct red_sched_data *q = qdisc_priv(sch);
135 struct Qdisc *child = q->qdisc;
136 unsigned int len;
137
138 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
139 q->stats.other++;
140 sch->qstats.drops++;
141 sch->q.qlen--;
142 return len;
143 }
144
145 if (!red_is_idling(&q->parms))
146 red_start_of_idle_period(&q->parms);
147
148 return 0;
149}
150
152static void red_reset(struct Qdisc* sch)
151static void red_reset(struct Qdisc *sch)
153{
154 struct red_sched_data *q = qdisc_priv(sch);
155
156 qdisc_reset(q->qdisc);
157 sch->q.qlen = 0;
158 red_restart(&q->parms);
159}
160

--- 50 unchanged lines hidden (view full) ---

211
212 if (skb_queue_empty(&sch->q))
213 red_end_of_idle_period(&q->parms);
214
215 sch_tree_unlock(sch);
216 return 0;
217}
218
152{
153 struct red_sched_data *q = qdisc_priv(sch);
154
155 qdisc_reset(q->qdisc);
156 sch->q.qlen = 0;
157 red_restart(&q->parms);
158}
159

--- 50 unchanged lines hidden (view full) ---

210
211 if (skb_queue_empty(&sch->q))
212 red_end_of_idle_period(&q->parms);
213
214 sch_tree_unlock(sch);
215 return 0;
216}
217
219static int red_init(struct Qdisc* sch, struct nlattr *opt)
218static int red_init(struct Qdisc *sch, struct nlattr *opt)
220{
221 struct red_sched_data *q = qdisc_priv(sch);
222
223 q->qdisc = &noop_qdisc;
224 return red_change(sch, opt);
225}
226
227static int red_dump(struct Qdisc *sch, struct sk_buff *skb)

--- 132 unchanged lines hidden ---
219{
220 struct red_sched_data *q = qdisc_priv(sch);
221
222 q->qdisc = &noop_qdisc;
223 return red_change(sch, opt);
224}
225
226static int red_dump(struct Qdisc *sch, struct sk_buff *skb)

--- 132 unchanged lines hidden ---