xref: /openbmc/linux/net/sched/sch_red.c (revision 732a675a)
1 /*
2  * net/sched/sch_red.c	Random Early Detection queue.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  * J Hadi Salim 980914:	computation fixes
13  * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14  * J Hadi Salim 980816:  ECN support
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
23 #include <net/red.h>
24 
25 
26 /*	Parameters, settable by user:
27 	-----------------------------
28 
29 	limit		- bytes (must be > qth_max + burst)
30 
31 	Hard limit on queue length, should be chosen >qth_max
32 	to allow packet bursts. This parameter does not
33 	affect the algorithms behaviour and can be chosen
34 	arbitrarily high (well, less than ram size)
35 	Really, this limit will never be reached
36 	if RED works correctly.
37  */
38 
39 struct red_sched_data
40 {
41 	u32			limit;		/* HARD maximal queue length */
42 	unsigned char		flags;
43 	struct red_parms	parms;
44 	struct red_stats	stats;
45 	struct Qdisc		*qdisc;
46 };
47 
48 static inline int red_use_ecn(struct red_sched_data *q)
49 {
50 	return q->flags & TC_RED_ECN;
51 }
52 
53 static inline int red_use_harddrop(struct red_sched_data *q)
54 {
55 	return q->flags & TC_RED_HARDDROP;
56 }
57 
58 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
59 {
60 	struct red_sched_data *q = qdisc_priv(sch);
61 	struct Qdisc *child = q->qdisc;
62 	int ret;
63 
64 	q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
65 
66 	if (red_is_idling(&q->parms))
67 		red_end_of_idle_period(&q->parms);
68 
69 	switch (red_action(&q->parms, q->parms.qavg)) {
70 		case RED_DONT_MARK:
71 			break;
72 
73 		case RED_PROB_MARK:
74 			sch->qstats.overlimits++;
75 			if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
76 				q->stats.prob_drop++;
77 				goto congestion_drop;
78 			}
79 
80 			q->stats.prob_mark++;
81 			break;
82 
83 		case RED_HARD_MARK:
84 			sch->qstats.overlimits++;
85 			if (red_use_harddrop(q) || !red_use_ecn(q) ||
86 			    !INET_ECN_set_ce(skb)) {
87 				q->stats.forced_drop++;
88 				goto congestion_drop;
89 			}
90 
91 			q->stats.forced_mark++;
92 			break;
93 	}
94 
95 	ret = child->enqueue(skb, child);
96 	if (likely(ret == NET_XMIT_SUCCESS)) {
97 		sch->bstats.bytes += skb->len;
98 		sch->bstats.packets++;
99 		sch->q.qlen++;
100 	} else {
101 		q->stats.pdrop++;
102 		sch->qstats.drops++;
103 	}
104 	return ret;
105 
106 congestion_drop:
107 	qdisc_drop(skb, sch);
108 	return NET_XMIT_CN;
109 }
110 
111 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
112 {
113 	struct red_sched_data *q = qdisc_priv(sch);
114 	struct Qdisc *child = q->qdisc;
115 	int ret;
116 
117 	if (red_is_idling(&q->parms))
118 		red_end_of_idle_period(&q->parms);
119 
120 	ret = child->ops->requeue(skb, child);
121 	if (likely(ret == NET_XMIT_SUCCESS)) {
122 		sch->qstats.requeues++;
123 		sch->q.qlen++;
124 	}
125 	return ret;
126 }
127 
128 static struct sk_buff * red_dequeue(struct Qdisc* sch)
129 {
130 	struct sk_buff *skb;
131 	struct red_sched_data *q = qdisc_priv(sch);
132 	struct Qdisc *child = q->qdisc;
133 
134 	skb = child->dequeue(child);
135 	if (skb)
136 		sch->q.qlen--;
137 	else if (!red_is_idling(&q->parms))
138 		red_start_of_idle_period(&q->parms);
139 
140 	return skb;
141 }
142 
143 static unsigned int red_drop(struct Qdisc* sch)
144 {
145 	struct red_sched_data *q = qdisc_priv(sch);
146 	struct Qdisc *child = q->qdisc;
147 	unsigned int len;
148 
149 	if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
150 		q->stats.other++;
151 		sch->qstats.drops++;
152 		sch->q.qlen--;
153 		return len;
154 	}
155 
156 	if (!red_is_idling(&q->parms))
157 		red_start_of_idle_period(&q->parms);
158 
159 	return 0;
160 }
161 
162 static void red_reset(struct Qdisc* sch)
163 {
164 	struct red_sched_data *q = qdisc_priv(sch);
165 
166 	qdisc_reset(q->qdisc);
167 	sch->q.qlen = 0;
168 	red_restart(&q->parms);
169 }
170 
171 static void red_destroy(struct Qdisc *sch)
172 {
173 	struct red_sched_data *q = qdisc_priv(sch);
174 	qdisc_destroy(q->qdisc);
175 }
176 
177 static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit)
178 {
179 	struct Qdisc *q;
180 	struct nlattr *nla;
181 	int ret;
182 
183 	q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
184 			      TC_H_MAKE(sch->handle, 1));
185 	if (q) {
186 		nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)),
187 			      GFP_KERNEL);
188 		if (nla) {
189 			nla->nla_type = RTM_NEWQDISC;
190 			nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
191 			((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
192 
193 			ret = q->ops->change(q, nla);
194 			kfree(nla);
195 
196 			if (ret == 0)
197 				return q;
198 		}
199 		qdisc_destroy(q);
200 	}
201 	return NULL;
202 }
203 
204 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
205 	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
206 	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
207 };
208 
209 static int red_change(struct Qdisc *sch, struct nlattr *opt)
210 {
211 	struct red_sched_data *q = qdisc_priv(sch);
212 	struct nlattr *tb[TCA_RED_MAX + 1];
213 	struct tc_red_qopt *ctl;
214 	struct Qdisc *child = NULL;
215 	int err;
216 
217 	if (opt == NULL)
218 		return -EINVAL;
219 
220 	err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
221 	if (err < 0)
222 		return err;
223 
224 	if (tb[TCA_RED_PARMS] == NULL ||
225 	    tb[TCA_RED_STAB] == NULL)
226 		return -EINVAL;
227 
228 	ctl = nla_data(tb[TCA_RED_PARMS]);
229 
230 	if (ctl->limit > 0) {
231 		child = red_create_dflt(sch, ctl->limit);
232 		if (child == NULL)
233 			return -ENOMEM;
234 	}
235 
236 	sch_tree_lock(sch);
237 	q->flags = ctl->flags;
238 	q->limit = ctl->limit;
239 	if (child) {
240 		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
241 		qdisc_destroy(xchg(&q->qdisc, child));
242 	}
243 
244 	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
245 				 ctl->Plog, ctl->Scell_log,
246 				 nla_data(tb[TCA_RED_STAB]));
247 
248 	if (skb_queue_empty(&sch->q))
249 		red_end_of_idle_period(&q->parms);
250 
251 	sch_tree_unlock(sch);
252 	return 0;
253 }
254 
255 static int red_init(struct Qdisc* sch, struct nlattr *opt)
256 {
257 	struct red_sched_data *q = qdisc_priv(sch);
258 
259 	q->qdisc = &noop_qdisc;
260 	return red_change(sch, opt);
261 }
262 
263 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
264 {
265 	struct red_sched_data *q = qdisc_priv(sch);
266 	struct nlattr *opts = NULL;
267 	struct tc_red_qopt opt = {
268 		.limit		= q->limit,
269 		.flags		= q->flags,
270 		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
271 		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
272 		.Wlog		= q->parms.Wlog,
273 		.Plog		= q->parms.Plog,
274 		.Scell_log	= q->parms.Scell_log,
275 	};
276 
277 	opts = nla_nest_start(skb, TCA_OPTIONS);
278 	if (opts == NULL)
279 		goto nla_put_failure;
280 	NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
281 	return nla_nest_end(skb, opts);
282 
283 nla_put_failure:
284 	nla_nest_cancel(skb, opts);
285 	return -EMSGSIZE;
286 }
287 
288 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
289 {
290 	struct red_sched_data *q = qdisc_priv(sch);
291 	struct tc_red_xstats st = {
292 		.early	= q->stats.prob_drop + q->stats.forced_drop,
293 		.pdrop	= q->stats.pdrop,
294 		.other	= q->stats.other,
295 		.marked	= q->stats.prob_mark + q->stats.forced_mark,
296 	};
297 
298 	return gnet_stats_copy_app(d, &st, sizeof(st));
299 }
300 
301 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
302 			  struct sk_buff *skb, struct tcmsg *tcm)
303 {
304 	struct red_sched_data *q = qdisc_priv(sch);
305 
306 	if (cl != 1)
307 		return -ENOENT;
308 	tcm->tcm_handle |= TC_H_MIN(1);
309 	tcm->tcm_info = q->qdisc->handle;
310 	return 0;
311 }
312 
313 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
314 		     struct Qdisc **old)
315 {
316 	struct red_sched_data *q = qdisc_priv(sch);
317 
318 	if (new == NULL)
319 		new = &noop_qdisc;
320 
321 	sch_tree_lock(sch);
322 	*old = xchg(&q->qdisc, new);
323 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
324 	qdisc_reset(*old);
325 	sch_tree_unlock(sch);
326 	return 0;
327 }
328 
329 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
330 {
331 	struct red_sched_data *q = qdisc_priv(sch);
332 	return q->qdisc;
333 }
334 
335 static unsigned long red_get(struct Qdisc *sch, u32 classid)
336 {
337 	return 1;
338 }
339 
340 static void red_put(struct Qdisc *sch, unsigned long arg)
341 {
342 	return;
343 }
344 
345 static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
346 			    struct nlattr **tca, unsigned long *arg)
347 {
348 	return -ENOSYS;
349 }
350 
351 static int red_delete(struct Qdisc *sch, unsigned long cl)
352 {
353 	return -ENOSYS;
354 }
355 
356 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
357 {
358 	if (!walker->stop) {
359 		if (walker->count >= walker->skip)
360 			if (walker->fn(sch, 1, walker) < 0) {
361 				walker->stop = 1;
362 				return;
363 			}
364 		walker->count++;
365 	}
366 }
367 
368 static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl)
369 {
370 	return NULL;
371 }
372 
373 static const struct Qdisc_class_ops red_class_ops = {
374 	.graft		=	red_graft,
375 	.leaf		=	red_leaf,
376 	.get		=	red_get,
377 	.put		=	red_put,
378 	.change		=	red_change_class,
379 	.delete		=	red_delete,
380 	.walk		=	red_walk,
381 	.tcf_chain	=	red_find_tcf,
382 	.dump		=	red_dump_class,
383 };
384 
385 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
386 	.id		=	"red",
387 	.priv_size	=	sizeof(struct red_sched_data),
388 	.cl_ops		=	&red_class_ops,
389 	.enqueue	=	red_enqueue,
390 	.dequeue	=	red_dequeue,
391 	.requeue	=	red_requeue,
392 	.drop		=	red_drop,
393 	.init		=	red_init,
394 	.reset		=	red_reset,
395 	.destroy	=	red_destroy,
396 	.change		=	red_change,
397 	.dump		=	red_dump,
398 	.dump_stats	=	red_dump_stats,
399 	.owner		=	THIS_MODULE,
400 };
401 
402 static int __init red_module_init(void)
403 {
404 	return register_qdisc(&red_qdisc_ops);
405 }
406 
407 static void __exit red_module_exit(void)
408 {
409 	unregister_qdisc(&red_qdisc_ops);
410 }
411 
412 module_init(red_module_init)
413 module_exit(red_module_exit)
414 
415 MODULE_LICENSE("GPL");
416