sch_gred.c (f43dc23d5ea91fca257be02138a255f02d98e806) sch_gred.c (cc7ec456f82da7f89a5b376e613b3ac4311b3e9a)
1/*
2 * net/sched/sch_gred.c Generic Random Early Detection queue.
3 *
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.

--- 18 unchanged lines hidden (view full) ---

27#include <net/red.h>
28
29#define GRED_DEF_PRIO (MAX_DPs / 2)
30#define GRED_VQ_MASK (MAX_DPs - 1)
31
32struct gred_sched_data;
33struct gred_sched;
34
1/*
2 * net/sched/sch_gred.c Generic Random Early Detection queue.
3 *
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.

--- 18 unchanged lines hidden (view full) ---

27#include <net/red.h>
28
29#define GRED_DEF_PRIO (MAX_DPs / 2)
30#define GRED_VQ_MASK (MAX_DPs - 1)
31
32struct gred_sched_data;
33struct gred_sched;
34
35struct gred_sched_data
36{
35struct gred_sched_data {
37 u32 limit; /* HARD maximal queue length */
38 u32 DP; /* the drop pramaters */
39 u32 bytesin; /* bytes seen on virtualQ so far*/
40 u32 packetsin; /* packets seen on virtualQ so far*/
41 u32 backlog; /* bytes on the virtualQ */
42 u8 prio; /* the prio of this vq */
43
44 struct red_parms parms;
45 struct red_stats stats;
46};
47
48enum {
49 GRED_WRED_MODE = 1,
50 GRED_RIO_MODE,
51};
52
36 u32 limit; /* HARD maximal queue length */
37 u32 DP; /* the drop pramaters */
38 u32 bytesin; /* bytes seen on virtualQ so far*/
39 u32 packetsin; /* packets seen on virtualQ so far*/
40 u32 backlog; /* bytes on the virtualQ */
41 u8 prio; /* the prio of this vq */
42
43 struct red_parms parms;
44 struct red_stats stats;
45};
46
47enum {
48 GRED_WRED_MODE = 1,
49 GRED_RIO_MODE,
50};
51
53struct gred_sched
54{
52struct gred_sched {
55 struct gred_sched_data *tab[MAX_DPs];
56 unsigned long flags;
57 u32 red_flags;
58 u32 DPs;
59 u32 def;
60 struct red_parms wred_set;
61};
62

--- 82 unchanged lines hidden (view full) ---

145 return t->red_flags & TC_RED_ECN;
146}
147
148static inline int gred_use_harddrop(struct gred_sched *t)
149{
150 return t->red_flags & TC_RED_HARDDROP;
151}
152
53 struct gred_sched_data *tab[MAX_DPs];
54 unsigned long flags;
55 u32 red_flags;
56 u32 DPs;
57 u32 def;
58 struct red_parms wred_set;
59};
60

--- 82 unchanged lines hidden (view full) ---

143 return t->red_flags & TC_RED_ECN;
144}
145
146static inline int gred_use_harddrop(struct gred_sched *t)
147{
148 return t->red_flags & TC_RED_HARDDROP;
149}
150
153static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
151static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
154{
152{
155 struct gred_sched_data *q=NULL;
156 struct gred_sched *t= qdisc_priv(sch);
153 struct gred_sched_data *q = NULL;
154 struct gred_sched *t = qdisc_priv(sch);
157 unsigned long qavg = 0;
158 u16 dp = tc_index_to_dp(skb);
159
155 unsigned long qavg = 0;
156 u16 dp = tc_index_to_dp(skb);
157
160 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
158 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
161 dp = t->def;
162
159 dp = t->def;
160
163 if ((q = t->tab[dp]) == NULL) {
161 q = t->tab[dp];
162 if (!q) {
164 /* Pass through packets not assigned to a DP
165 * if no default DP has been configured. This
166 * allows for DP flows to be left untouched.
167 */
168 if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
169 return qdisc_enqueue_tail(skb, sch);
170 else
171 goto drop;

--- 6 unchanged lines hidden (view full) ---

178
179 /* sum up all the qaves of prios <= to ours to get the new qave */
180 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
181 int i;
182
183 for (i = 0; i < t->DPs; i++) {
184 if (t->tab[i] && t->tab[i]->prio < q->prio &&
185 !red_is_idling(&t->tab[i]->parms))
163 /* Pass through packets not assigned to a DP
164 * if no default DP has been configured. This
165 * allows for DP flows to be left untouched.
166 */
167 if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
168 return qdisc_enqueue_tail(skb, sch);
169 else
170 goto drop;

--- 6 unchanged lines hidden (view full) ---

177
178 /* sum up all the qaves of prios <= to ours to get the new qave */
179 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
180 int i;
181
182 for (i = 0; i < t->DPs; i++) {
183 if (t->tab[i] && t->tab[i]->prio < q->prio &&
184 !red_is_idling(&t->tab[i]->parms))
186 qavg +=t->tab[i]->parms.qavg;
185 qavg += t->tab[i]->parms.qavg;
187 }
188
189 }
190
191 q->packetsin++;
192 q->bytesin += qdisc_pkt_len(skb);
193
194 if (gred_wred_mode(t))
195 gred_load_wred_set(t, q);
196
197 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
198
199 if (red_is_idling(&q->parms))
200 red_end_of_idle_period(&q->parms);
201
202 if (gred_wred_mode(t))
203 gred_store_wred_set(t, q);
204
205 switch (red_action(&q->parms, q->parms.qavg + qavg)) {
186 }
187
188 }
189
190 q->packetsin++;
191 q->bytesin += qdisc_pkt_len(skb);
192
193 if (gred_wred_mode(t))
194 gred_load_wred_set(t, q);
195
196 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
197
198 if (red_is_idling(&q->parms))
199 red_end_of_idle_period(&q->parms);
200
201 if (gred_wred_mode(t))
202 gred_store_wred_set(t, q);
203
204 switch (red_action(&q->parms, q->parms.qavg + qavg)) {
206 case RED_DONT_MARK:
207 break;
205 case RED_DONT_MARK:
206 break;
208
207
209 case RED_PROB_MARK:
210 sch->qstats.overlimits++;
211 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
212 q->stats.prob_drop++;
213 goto congestion_drop;
214 }
208 case RED_PROB_MARK:
209 sch->qstats.overlimits++;
210 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
211 q->stats.prob_drop++;
212 goto congestion_drop;
213 }
215
214
216 q->stats.prob_mark++;
217 break;
215 q->stats.prob_mark++;
216 break;
218
217
219 case RED_HARD_MARK:
220 sch->qstats.overlimits++;
221 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
222 !INET_ECN_set_ce(skb)) {
223 q->stats.forced_drop++;
224 goto congestion_drop;
225 }
226 q->stats.forced_mark++;
227 break;
218 case RED_HARD_MARK:
219 sch->qstats.overlimits++;
220 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
221 !INET_ECN_set_ce(skb)) {
222 q->stats.forced_drop++;
223 goto congestion_drop;
224 }
225 q->stats.forced_mark++;
226 break;
228 }
229
230 if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
231 q->backlog += qdisc_pkt_len(skb);
232 return qdisc_enqueue_tail(skb, sch);
233 }
234
235 q->stats.pdrop++;
236drop:
237 return qdisc_drop(skb, sch);
238
239congestion_drop:
240 qdisc_drop(skb, sch);
241 return NET_XMIT_CN;
242}
243
227 }
228
229 if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
230 q->backlog += qdisc_pkt_len(skb);
231 return qdisc_enqueue_tail(skb, sch);
232 }
233
234 q->stats.pdrop++;
235drop:
236 return qdisc_drop(skb, sch);
237
238congestion_drop:
239 qdisc_drop(skb, sch);
240 return NET_XMIT_CN;
241}
242
244static struct sk_buff *gred_dequeue(struct Qdisc* sch)
243static struct sk_buff *gred_dequeue(struct Qdisc *sch)
245{
246 struct sk_buff *skb;
247 struct gred_sched *t = qdisc_priv(sch);
248
249 skb = qdisc_dequeue_head(sch);
250
251 if (skb) {
252 struct gred_sched_data *q;
253 u16 dp = tc_index_to_dp(skb);
254
255 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
256 if (net_ratelimit())
244{
245 struct sk_buff *skb;
246 struct gred_sched *t = qdisc_priv(sch);
247
248 skb = qdisc_dequeue_head(sch);
249
250 if (skb) {
251 struct gred_sched_data *q;
252 u16 dp = tc_index_to_dp(skb);
253
254 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
255 if (net_ratelimit())
257 printk(KERN_WARNING "GRED: Unable to relocate "
258 "VQ 0x%x after dequeue, screwing up "
259 "backlog.\n", tc_index_to_dp(skb));
256 pr_warning("GRED: Unable to relocate VQ 0x%x "
257 "after dequeue, screwing up "
258 "backlog.\n", tc_index_to_dp(skb));
260 } else {
261 q->backlog -= qdisc_pkt_len(skb);
262
263 if (!q->backlog && !gred_wred_mode(t))
264 red_start_of_idle_period(&q->parms);
265 }
266
267 return skb;
268 }
269
270 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
271 red_start_of_idle_period(&t->wred_set);
272
273 return NULL;
274}
275
259 } else {
260 q->backlog -= qdisc_pkt_len(skb);
261
262 if (!q->backlog && !gred_wred_mode(t))
263 red_start_of_idle_period(&q->parms);
264 }
265
266 return skb;
267 }
268
269 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
270 red_start_of_idle_period(&t->wred_set);
271
272 return NULL;
273}
274
276static unsigned int gred_drop(struct Qdisc* sch)
275static unsigned int gred_drop(struct Qdisc *sch)
277{
278 struct sk_buff *skb;
279 struct gred_sched *t = qdisc_priv(sch);
280
281 skb = qdisc_dequeue_tail(sch);
282 if (skb) {
283 unsigned int len = qdisc_pkt_len(skb);
284 struct gred_sched_data *q;
285 u16 dp = tc_index_to_dp(skb);
286
287 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
288 if (net_ratelimit())
276{
277 struct sk_buff *skb;
278 struct gred_sched *t = qdisc_priv(sch);
279
280 skb = qdisc_dequeue_tail(sch);
281 if (skb) {
282 unsigned int len = qdisc_pkt_len(skb);
283 struct gred_sched_data *q;
284 u16 dp = tc_index_to_dp(skb);
285
286 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
287 if (net_ratelimit())
289 printk(KERN_WARNING "GRED: Unable to relocate "
290 "VQ 0x%x while dropping, screwing up "
291 "backlog.\n", tc_index_to_dp(skb));
288 pr_warning("GRED: Unable to relocate VQ 0x%x "
289 "while dropping, screwing up "
290 "backlog.\n", tc_index_to_dp(skb));
292 } else {
293 q->backlog -= len;
294 q->stats.other++;
295
296 if (!q->backlog && !gred_wred_mode(t))
297 red_start_of_idle_period(&q->parms);
298 }
299
300 qdisc_drop(skb, sch);
301 return len;
302 }
303
304 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
305 red_start_of_idle_period(&t->wred_set);
306
307 return 0;
308
309}
310
291 } else {
292 q->backlog -= len;
293 q->stats.other++;
294
295 if (!q->backlog && !gred_wred_mode(t))
296 red_start_of_idle_period(&q->parms);
297 }
298
299 qdisc_drop(skb, sch);
300 return len;
301 }
302
303 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
304 red_start_of_idle_period(&t->wred_set);
305
306 return 0;
307
308}
309
311static void gred_reset(struct Qdisc* sch)
310static void gred_reset(struct Qdisc *sch)
312{
313 int i;
314 struct gred_sched *t = qdisc_priv(sch);
315
316 qdisc_reset_queue(sch);
317
318 for (i = 0; i < t->DPs; i++) {
319 struct gred_sched_data *q = t->tab[i];

--- 44 unchanged lines hidden (view full) ---

364 gred_enable_wred_mode(table);
365 } else {
366 gred_disable_rio_mode(table);
367 gred_disable_wred_mode(table);
368 }
369
370 for (i = table->DPs; i < MAX_DPs; i++) {
371 if (table->tab[i]) {
311{
312 int i;
313 struct gred_sched *t = qdisc_priv(sch);
314
315 qdisc_reset_queue(sch);
316
317 for (i = 0; i < t->DPs; i++) {
318 struct gred_sched_data *q = t->tab[i];

--- 44 unchanged lines hidden (view full) ---

363 gred_enable_wred_mode(table);
364 } else {
365 gred_disable_rio_mode(table);
366 gred_disable_wred_mode(table);
367 }
368
369 for (i = table->DPs; i < MAX_DPs; i++) {
370 if (table->tab[i]) {
372 printk(KERN_WARNING "GRED: Warning: Destroying "
373 "shadowed VQ 0x%x\n", i);
371 pr_warning("GRED: Warning: Destroying "
372 "shadowed VQ 0x%x\n", i);
374 gred_destroy_vq(table->tab[i]);
375 table->tab[i] = NULL;
376 }
377 }
378
379 return 0;
380}
381

--- 228 unchanged lines hidden ---
373 gred_destroy_vq(table->tab[i]);
374 table->tab[i] = NULL;
375 }
376 }
377
378 return 0;
379}
380

--- 228 unchanged lines hidden ---