xref: /openbmc/linux/net/sched/sch_gred.c (revision bb0eb050)
1 /*
2  * net/sched/sch_gred.c	Generic Random Early Detection queue.
3  *
4  *
5  *              This program is free software; you can redistribute it and/or
6  *              modify it under the terms of the GNU General Public License
7  *              as published by the Free Software Foundation; either version
8  *              2 of the License, or (at your option) any later version.
9  *
10  * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
11  *
12  *             991129: -  Bug fix with grio mode
13  *		       - a better sing. AvgQ mode with Grio(WRED)
14  *		       - A finer grained VQ dequeue based on sugestion
15  *		         from Ren Liu
16  *		       - More error checks
17  *
18  *  For all the glorious comments look at include/net/red.h
19  */
20 
21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <net/pkt_sched.h>
27 #include <net/red.h>
28 
29 #define GRED_DEF_PRIO (MAX_DPs / 2)
30 #define GRED_VQ_MASK (MAX_DPs - 1)
31 
32 struct gred_sched_data;
33 struct gred_sched;
34 
35 struct gred_sched_data {
36 	u32		limit;		/* HARD maximal queue length	*/
37 	u32		DP;		/* the drop parameters */
38 	u32		bytesin;	/* bytes seen on virtualQ so far*/
39 	u32		packetsin;	/* packets seen on virtualQ so far*/
40 	u32		backlog;	/* bytes on the virtualQ */
41 	u8		prio;		/* the prio of this vq */
42 
43 	struct red_parms parms;
44 	struct red_vars  vars;
45 	struct red_stats stats;
46 };
47 
48 enum {
49 	GRED_WRED_MODE = 1,
50 	GRED_RIO_MODE,
51 };
52 
53 struct gred_sched {
54 	struct gred_sched_data *tab[MAX_DPs];
55 	unsigned long	flags;
56 	u32		red_flags;
57 	u32 		DPs;
58 	u32 		def;
59 	struct red_vars wred_set;
60 };
61 
62 static inline int gred_wred_mode(struct gred_sched *table)
63 {
64 	return test_bit(GRED_WRED_MODE, &table->flags);
65 }
66 
67 static inline void gred_enable_wred_mode(struct gred_sched *table)
68 {
69 	__set_bit(GRED_WRED_MODE, &table->flags);
70 }
71 
72 static inline void gred_disable_wred_mode(struct gred_sched *table)
73 {
74 	__clear_bit(GRED_WRED_MODE, &table->flags);
75 }
76 
77 static inline int gred_rio_mode(struct gred_sched *table)
78 {
79 	return test_bit(GRED_RIO_MODE, &table->flags);
80 }
81 
82 static inline void gred_enable_rio_mode(struct gred_sched *table)
83 {
84 	__set_bit(GRED_RIO_MODE, &table->flags);
85 }
86 
87 static inline void gred_disable_rio_mode(struct gred_sched *table)
88 {
89 	__clear_bit(GRED_RIO_MODE, &table->flags);
90 }
91 
92 static inline int gred_wred_mode_check(struct Qdisc *sch)
93 {
94 	struct gred_sched *table = qdisc_priv(sch);
95 	int i;
96 
97 	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
98 	for (i = 0; i < table->DPs; i++) {
99 		struct gred_sched_data *q = table->tab[i];
100 		int n;
101 
102 		if (q == NULL)
103 			continue;
104 
105 		for (n = i + 1; n < table->DPs; n++)
106 			if (table->tab[n] && table->tab[n]->prio == q->prio)
107 				return 1;
108 	}
109 
110 	return 0;
111 }
112 
113 static inline unsigned int gred_backlog(struct gred_sched *table,
114 					struct gred_sched_data *q,
115 					struct Qdisc *sch)
116 {
117 	if (gred_wred_mode(table))
118 		return sch->qstats.backlog;
119 	else
120 		return q->backlog;
121 }
122 
123 static inline u16 tc_index_to_dp(struct sk_buff *skb)
124 {
125 	return skb->tc_index & GRED_VQ_MASK;
126 }
127 
128 static inline void gred_load_wred_set(const struct gred_sched *table,
129 				      struct gred_sched_data *q)
130 {
131 	q->vars.qavg = table->wred_set.qavg;
132 	q->vars.qidlestart = table->wred_set.qidlestart;
133 }
134 
135 static inline void gred_store_wred_set(struct gred_sched *table,
136 				       struct gred_sched_data *q)
137 {
138 	table->wred_set.qavg = q->vars.qavg;
139 	table->wred_set.qidlestart = q->vars.qidlestart;
140 }
141 
142 static inline int gred_use_ecn(struct gred_sched *t)
143 {
144 	return t->red_flags & TC_RED_ECN;
145 }
146 
147 static inline int gred_use_harddrop(struct gred_sched *t)
148 {
149 	return t->red_flags & TC_RED_HARDDROP;
150 }
151 
152 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
153 			struct sk_buff **to_free)
154 {
155 	struct gred_sched_data *q = NULL;
156 	struct gred_sched *t = qdisc_priv(sch);
157 	unsigned long qavg = 0;
158 	u16 dp = tc_index_to_dp(skb);
159 
160 	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
161 		dp = t->def;
162 
163 		q = t->tab[dp];
164 		if (!q) {
165 			/* Pass through packets not assigned to a DP
166 			 * if no default DP has been configured. This
167 			 * allows for DP flows to be left untouched.
168 			 */
169 			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
170 					sch->limit))
171 				return qdisc_enqueue_tail(skb, sch);
172 			else
173 				goto drop;
174 		}
175 
176 		/* fix tc_index? --could be controversial but needed for
177 		   requeueing */
178 		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
179 	}
180 
181 	/* sum up all the qaves of prios < ours to get the new qave */
182 	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
183 		int i;
184 
185 		for (i = 0; i < t->DPs; i++) {
186 			if (t->tab[i] && t->tab[i]->prio < q->prio &&
187 			    !red_is_idling(&t->tab[i]->vars))
188 				qavg += t->tab[i]->vars.qavg;
189 		}
190 
191 	}
192 
193 	q->packetsin++;
194 	q->bytesin += qdisc_pkt_len(skb);
195 
196 	if (gred_wred_mode(t))
197 		gred_load_wred_set(t, q);
198 
199 	q->vars.qavg = red_calc_qavg(&q->parms,
200 				     &q->vars,
201 				     gred_backlog(t, q, sch));
202 
203 	if (red_is_idling(&q->vars))
204 		red_end_of_idle_period(&q->vars);
205 
206 	if (gred_wred_mode(t))
207 		gred_store_wred_set(t, q);
208 
209 	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
210 	case RED_DONT_MARK:
211 		break;
212 
213 	case RED_PROB_MARK:
214 		qdisc_qstats_overlimit(sch);
215 		if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
216 			q->stats.prob_drop++;
217 			goto congestion_drop;
218 		}
219 
220 		q->stats.prob_mark++;
221 		break;
222 
223 	case RED_HARD_MARK:
224 		qdisc_qstats_overlimit(sch);
225 		if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
226 		    !INET_ECN_set_ce(skb)) {
227 			q->stats.forced_drop++;
228 			goto congestion_drop;
229 		}
230 		q->stats.forced_mark++;
231 		break;
232 	}
233 
234 	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
235 		q->backlog += qdisc_pkt_len(skb);
236 		return qdisc_enqueue_tail(skb, sch);
237 	}
238 
239 	q->stats.pdrop++;
240 drop:
241 	return qdisc_drop(skb, sch, to_free);
242 
243 congestion_drop:
244 	qdisc_drop(skb, sch, to_free);
245 	return NET_XMIT_CN;
246 }
247 
248 static struct sk_buff *gred_dequeue(struct Qdisc *sch)
249 {
250 	struct sk_buff *skb;
251 	struct gred_sched *t = qdisc_priv(sch);
252 
253 	skb = qdisc_dequeue_head(sch);
254 
255 	if (skb) {
256 		struct gred_sched_data *q;
257 		u16 dp = tc_index_to_dp(skb);
258 
259 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
260 			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
261 					     tc_index_to_dp(skb));
262 		} else {
263 			q->backlog -= qdisc_pkt_len(skb);
264 
265 			if (gred_wred_mode(t)) {
266 				if (!sch->qstats.backlog)
267 					red_start_of_idle_period(&t->wred_set);
268 			} else {
269 				if (!q->backlog)
270 					red_start_of_idle_period(&q->vars);
271 			}
272 		}
273 
274 		return skb;
275 	}
276 
277 	return NULL;
278 }
279 
280 static void gred_reset(struct Qdisc *sch)
281 {
282 	int i;
283 	struct gred_sched *t = qdisc_priv(sch);
284 
285 	qdisc_reset_queue(sch);
286 
287 	for (i = 0; i < t->DPs; i++) {
288 		struct gred_sched_data *q = t->tab[i];
289 
290 		if (!q)
291 			continue;
292 
293 		red_restart(&q->vars);
294 		q->backlog = 0;
295 	}
296 }
297 
298 static inline void gred_destroy_vq(struct gred_sched_data *q)
299 {
300 	kfree(q);
301 }
302 
303 static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
304 {
305 	struct gred_sched *table = qdisc_priv(sch);
306 	struct tc_gred_sopt *sopt;
307 	int i;
308 
309 	if (dps == NULL)
310 		return -EINVAL;
311 
312 	sopt = nla_data(dps);
313 
314 	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
315 		return -EINVAL;
316 
317 	sch_tree_lock(sch);
318 	table->DPs = sopt->DPs;
319 	table->def = sopt->def_DP;
320 	table->red_flags = sopt->flags;
321 
322 	/*
323 	 * Every entry point to GRED is synchronized with the above code
324 	 * and the DP is checked against DPs, i.e. shadowed VQs can no
325 	 * longer be found so we can unlock right here.
326 	 */
327 	sch_tree_unlock(sch);
328 
329 	if (sopt->grio) {
330 		gred_enable_rio_mode(table);
331 		gred_disable_wred_mode(table);
332 		if (gred_wred_mode_check(sch))
333 			gred_enable_wred_mode(table);
334 	} else {
335 		gred_disable_rio_mode(table);
336 		gred_disable_wred_mode(table);
337 	}
338 
339 	for (i = table->DPs; i < MAX_DPs; i++) {
340 		if (table->tab[i]) {
341 			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
342 				i);
343 			gred_destroy_vq(table->tab[i]);
344 			table->tab[i] = NULL;
345 		}
346 	}
347 
348 	return 0;
349 }
350 
351 static inline int gred_change_vq(struct Qdisc *sch, int dp,
352 				 struct tc_gred_qopt *ctl, int prio,
353 				 u8 *stab, u32 max_P,
354 				 struct gred_sched_data **prealloc)
355 {
356 	struct gred_sched *table = qdisc_priv(sch);
357 	struct gred_sched_data *q = table->tab[dp];
358 
359 	if (!q) {
360 		table->tab[dp] = q = *prealloc;
361 		*prealloc = NULL;
362 		if (!q)
363 			return -ENOMEM;
364 	}
365 
366 	q->DP = dp;
367 	q->prio = prio;
368 	if (ctl->limit > sch->limit)
369 		q->limit = sch->limit;
370 	else
371 		q->limit = ctl->limit;
372 
373 	if (q->backlog == 0)
374 		red_end_of_idle_period(&q->vars);
375 
376 	red_set_parms(&q->parms,
377 		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
378 		      ctl->Scell_log, stab, max_P);
379 	red_set_vars(&q->vars);
380 	return 0;
381 }
382 
383 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
384 	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
385 	[TCA_GRED_STAB]		= { .len = 256 },
386 	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
387 	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
388 	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
389 };
390 
391 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
392 {
393 	struct gred_sched *table = qdisc_priv(sch);
394 	struct tc_gred_qopt *ctl;
395 	struct nlattr *tb[TCA_GRED_MAX + 1];
396 	int err, prio = GRED_DEF_PRIO;
397 	u8 *stab;
398 	u32 max_P;
399 	struct gred_sched_data *prealloc;
400 
401 	if (opt == NULL)
402 		return -EINVAL;
403 
404 	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
405 	if (err < 0)
406 		return err;
407 
408 	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
409 		if (tb[TCA_GRED_LIMIT] != NULL)
410 			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
411 		return gred_change_table_def(sch, opt);
412 	}
413 
414 	if (tb[TCA_GRED_PARMS] == NULL ||
415 	    tb[TCA_GRED_STAB] == NULL ||
416 	    tb[TCA_GRED_LIMIT] != NULL)
417 		return -EINVAL;
418 
419 	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
420 
421 	err = -EINVAL;
422 	ctl = nla_data(tb[TCA_GRED_PARMS]);
423 	stab = nla_data(tb[TCA_GRED_STAB]);
424 
425 	if (ctl->DP >= table->DPs)
426 		goto errout;
427 
428 	if (gred_rio_mode(table)) {
429 		if (ctl->prio == 0) {
430 			int def_prio = GRED_DEF_PRIO;
431 
432 			if (table->tab[table->def])
433 				def_prio = table->tab[table->def]->prio;
434 
435 			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
436 			       "setting default to %d\n", ctl->DP, def_prio);
437 
438 			prio = def_prio;
439 		} else
440 			prio = ctl->prio;
441 	}
442 
443 	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
444 	sch_tree_lock(sch);
445 
446 	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
447 	if (err < 0)
448 		goto errout_locked;
449 
450 	if (gred_rio_mode(table)) {
451 		gred_disable_wred_mode(table);
452 		if (gred_wred_mode_check(sch))
453 			gred_enable_wred_mode(table);
454 	}
455 
456 	err = 0;
457 
458 errout_locked:
459 	sch_tree_unlock(sch);
460 	kfree(prealloc);
461 errout:
462 	return err;
463 }
464 
465 static int gred_init(struct Qdisc *sch, struct nlattr *opt)
466 {
467 	struct nlattr *tb[TCA_GRED_MAX + 1];
468 	int err;
469 
470 	if (opt == NULL)
471 		return -EINVAL;
472 
473 	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
474 	if (err < 0)
475 		return err;
476 
477 	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
478 		return -EINVAL;
479 
480 	if (tb[TCA_GRED_LIMIT])
481 		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
482 	else
483 		sch->limit = qdisc_dev(sch)->tx_queue_len
484 		             * psched_mtu(qdisc_dev(sch));
485 
486 	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
487 }
488 
489 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
490 {
491 	struct gred_sched *table = qdisc_priv(sch);
492 	struct nlattr *parms, *opts = NULL;
493 	int i;
494 	u32 max_p[MAX_DPs];
495 	struct tc_gred_sopt sopt = {
496 		.DPs	= table->DPs,
497 		.def_DP	= table->def,
498 		.grio	= gred_rio_mode(table),
499 		.flags	= table->red_flags,
500 	};
501 
502 	opts = nla_nest_start(skb, TCA_OPTIONS);
503 	if (opts == NULL)
504 		goto nla_put_failure;
505 	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
506 		goto nla_put_failure;
507 
508 	for (i = 0; i < MAX_DPs; i++) {
509 		struct gred_sched_data *q = table->tab[i];
510 
511 		max_p[i] = q ? q->parms.max_P : 0;
512 	}
513 	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
514 		goto nla_put_failure;
515 
516 	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
517 		goto nla_put_failure;
518 
519 	parms = nla_nest_start(skb, TCA_GRED_PARMS);
520 	if (parms == NULL)
521 		goto nla_put_failure;
522 
523 	for (i = 0; i < MAX_DPs; i++) {
524 		struct gred_sched_data *q = table->tab[i];
525 		struct tc_gred_qopt opt;
526 		unsigned long qavg;
527 
528 		memset(&opt, 0, sizeof(opt));
529 
530 		if (!q) {
531 			/* hack -- fix at some point with proper message
532 			   This is how we indicate to tc that there is no VQ
533 			   at this DP */
534 
535 			opt.DP = MAX_DPs + i;
536 			goto append_opt;
537 		}
538 
539 		opt.limit	= q->limit;
540 		opt.DP		= q->DP;
541 		opt.backlog	= gred_backlog(table, q, sch);
542 		opt.prio	= q->prio;
543 		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
544 		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
545 		opt.Wlog	= q->parms.Wlog;
546 		opt.Plog	= q->parms.Plog;
547 		opt.Scell_log	= q->parms.Scell_log;
548 		opt.other	= q->stats.other;
549 		opt.early	= q->stats.prob_drop;
550 		opt.forced	= q->stats.forced_drop;
551 		opt.pdrop	= q->stats.pdrop;
552 		opt.packets	= q->packetsin;
553 		opt.bytesin	= q->bytesin;
554 
555 		if (gred_wred_mode(table))
556 			gred_load_wred_set(table, q);
557 
558 		qavg = red_calc_qavg(&q->parms, &q->vars,
559 				     q->vars.qavg >> q->parms.Wlog);
560 		opt.qave = qavg >> q->parms.Wlog;
561 
562 append_opt:
563 		if (nla_append(skb, sizeof(opt), &opt) < 0)
564 			goto nla_put_failure;
565 	}
566 
567 	nla_nest_end(skb, parms);
568 
569 	return nla_nest_end(skb, opts);
570 
571 nla_put_failure:
572 	nla_nest_cancel(skb, opts);
573 	return -EMSGSIZE;
574 }
575 
576 static void gred_destroy(struct Qdisc *sch)
577 {
578 	struct gred_sched *table = qdisc_priv(sch);
579 	int i;
580 
581 	for (i = 0; i < table->DPs; i++) {
582 		if (table->tab[i])
583 			gred_destroy_vq(table->tab[i]);
584 	}
585 }
586 
587 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
588 	.id		=	"gred",
589 	.priv_size	=	sizeof(struct gred_sched),
590 	.enqueue	=	gred_enqueue,
591 	.dequeue	=	gred_dequeue,
592 	.peek		=	qdisc_peek_head,
593 	.init		=	gred_init,
594 	.reset		=	gred_reset,
595 	.destroy	=	gred_destroy,
596 	.change		=	gred_change,
597 	.dump		=	gred_dump,
598 	.owner		=	THIS_MODULE,
599 };
600 
601 static int __init gred_module_init(void)
602 {
603 	return register_qdisc(&gred_qdisc_ops);
604 }
605 
606 static void __exit gred_module_exit(void)
607 {
608 	unregister_qdisc(&gred_qdisc_ops);
609 }
610 
611 module_init(gred_module_init)
612 module_exit(gred_module_exit)
613 
614 MODULE_LICENSE("GPL");
615