xref: /openbmc/linux/net/sched/sch_red.c (revision cc7ec456f82da7f89a5b376e613b3ac4311b3e9a)
1 /*
2  * net/sched/sch_red.c	Random Early Detection queue.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  * J Hadi Salim 980914:	computation fixes
13  * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14  * J Hadi Salim 980816:  ECN support
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
23 #include <net/red.h>
24 
25 
26 /*	Parameters, settable by user:
27 	-----------------------------
28 
29 	limit		- bytes (must be > qth_max + burst)
30 
31 	Hard limit on queue length, should be chosen >qth_max
32 	to allow packet bursts. This parameter does not
33 	affect the algorithms behaviour and can be chosen
34 	arbitrarily high (well, less than ram size)
35 	Really, this limit will never be reached
36 	if RED works correctly.
37  */
38 
39 struct red_sched_data {
40 	u32			limit;		/* HARD maximal queue length */
41 	unsigned char		flags;
42 	struct red_parms	parms;
43 	struct red_stats	stats;
44 	struct Qdisc		*qdisc;
45 };
46 
47 static inline int red_use_ecn(struct red_sched_data *q)
48 {
49 	return q->flags & TC_RED_ECN;
50 }
51 
52 static inline int red_use_harddrop(struct red_sched_data *q)
53 {
54 	return q->flags & TC_RED_HARDDROP;
55 }
56 
57 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
58 {
59 	struct red_sched_data *q = qdisc_priv(sch);
60 	struct Qdisc *child = q->qdisc;
61 	int ret;
62 
63 	q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
64 
65 	if (red_is_idling(&q->parms))
66 		red_end_of_idle_period(&q->parms);
67 
68 	switch (red_action(&q->parms, q->parms.qavg)) {
69 	case RED_DONT_MARK:
70 		break;
71 
72 	case RED_PROB_MARK:
73 		sch->qstats.overlimits++;
74 		if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
75 			q->stats.prob_drop++;
76 			goto congestion_drop;
77 		}
78 
79 		q->stats.prob_mark++;
80 		break;
81 
82 	case RED_HARD_MARK:
83 		sch->qstats.overlimits++;
84 		if (red_use_harddrop(q) || !red_use_ecn(q) ||
85 		    !INET_ECN_set_ce(skb)) {
86 			q->stats.forced_drop++;
87 			goto congestion_drop;
88 		}
89 
90 		q->stats.forced_mark++;
91 		break;
92 	}
93 
94 	ret = qdisc_enqueue(skb, child);
95 	if (likely(ret == NET_XMIT_SUCCESS)) {
96 		qdisc_bstats_update(sch, skb);
97 		sch->q.qlen++;
98 	} else if (net_xmit_drop_count(ret)) {
99 		q->stats.pdrop++;
100 		sch->qstats.drops++;
101 	}
102 	return ret;
103 
104 congestion_drop:
105 	qdisc_drop(skb, sch);
106 	return NET_XMIT_CN;
107 }
108 
109 static struct sk_buff *red_dequeue(struct Qdisc *sch)
110 {
111 	struct sk_buff *skb;
112 	struct red_sched_data *q = qdisc_priv(sch);
113 	struct Qdisc *child = q->qdisc;
114 
115 	skb = child->dequeue(child);
116 	if (skb)
117 		sch->q.qlen--;
118 	else if (!red_is_idling(&q->parms))
119 		red_start_of_idle_period(&q->parms);
120 
121 	return skb;
122 }
123 
124 static struct sk_buff *red_peek(struct Qdisc *sch)
125 {
126 	struct red_sched_data *q = qdisc_priv(sch);
127 	struct Qdisc *child = q->qdisc;
128 
129 	return child->ops->peek(child);
130 }
131 
132 static unsigned int red_drop(struct Qdisc *sch)
133 {
134 	struct red_sched_data *q = qdisc_priv(sch);
135 	struct Qdisc *child = q->qdisc;
136 	unsigned int len;
137 
138 	if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
139 		q->stats.other++;
140 		sch->qstats.drops++;
141 		sch->q.qlen--;
142 		return len;
143 	}
144 
145 	if (!red_is_idling(&q->parms))
146 		red_start_of_idle_period(&q->parms);
147 
148 	return 0;
149 }
150 
151 static void red_reset(struct Qdisc *sch)
152 {
153 	struct red_sched_data *q = qdisc_priv(sch);
154 
155 	qdisc_reset(q->qdisc);
156 	sch->q.qlen = 0;
157 	red_restart(&q->parms);
158 }
159 
160 static void red_destroy(struct Qdisc *sch)
161 {
162 	struct red_sched_data *q = qdisc_priv(sch);
163 	qdisc_destroy(q->qdisc);
164 }
165 
166 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
167 	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
168 	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
169 };
170 
171 static int red_change(struct Qdisc *sch, struct nlattr *opt)
172 {
173 	struct red_sched_data *q = qdisc_priv(sch);
174 	struct nlattr *tb[TCA_RED_MAX + 1];
175 	struct tc_red_qopt *ctl;
176 	struct Qdisc *child = NULL;
177 	int err;
178 
179 	if (opt == NULL)
180 		return -EINVAL;
181 
182 	err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
183 	if (err < 0)
184 		return err;
185 
186 	if (tb[TCA_RED_PARMS] == NULL ||
187 	    tb[TCA_RED_STAB] == NULL)
188 		return -EINVAL;
189 
190 	ctl = nla_data(tb[TCA_RED_PARMS]);
191 
192 	if (ctl->limit > 0) {
193 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
194 		if (IS_ERR(child))
195 			return PTR_ERR(child);
196 	}
197 
198 	sch_tree_lock(sch);
199 	q->flags = ctl->flags;
200 	q->limit = ctl->limit;
201 	if (child) {
202 		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
203 		qdisc_destroy(q->qdisc);
204 		q->qdisc = child;
205 	}
206 
207 	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
208 				 ctl->Plog, ctl->Scell_log,
209 				 nla_data(tb[TCA_RED_STAB]));
210 
211 	if (skb_queue_empty(&sch->q))
212 		red_end_of_idle_period(&q->parms);
213 
214 	sch_tree_unlock(sch);
215 	return 0;
216 }
217 
218 static int red_init(struct Qdisc *sch, struct nlattr *opt)
219 {
220 	struct red_sched_data *q = qdisc_priv(sch);
221 
222 	q->qdisc = &noop_qdisc;
223 	return red_change(sch, opt);
224 }
225 
226 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
227 {
228 	struct red_sched_data *q = qdisc_priv(sch);
229 	struct nlattr *opts = NULL;
230 	struct tc_red_qopt opt = {
231 		.limit		= q->limit,
232 		.flags		= q->flags,
233 		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
234 		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
235 		.Wlog		= q->parms.Wlog,
236 		.Plog		= q->parms.Plog,
237 		.Scell_log	= q->parms.Scell_log,
238 	};
239 
240 	sch->qstats.backlog = q->qdisc->qstats.backlog;
241 	opts = nla_nest_start(skb, TCA_OPTIONS);
242 	if (opts == NULL)
243 		goto nla_put_failure;
244 	NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
245 	return nla_nest_end(skb, opts);
246 
247 nla_put_failure:
248 	nla_nest_cancel(skb, opts);
249 	return -EMSGSIZE;
250 }
251 
252 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
253 {
254 	struct red_sched_data *q = qdisc_priv(sch);
255 	struct tc_red_xstats st = {
256 		.early	= q->stats.prob_drop + q->stats.forced_drop,
257 		.pdrop	= q->stats.pdrop,
258 		.other	= q->stats.other,
259 		.marked	= q->stats.prob_mark + q->stats.forced_mark,
260 	};
261 
262 	return gnet_stats_copy_app(d, &st, sizeof(st));
263 }
264 
265 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
266 			  struct sk_buff *skb, struct tcmsg *tcm)
267 {
268 	struct red_sched_data *q = qdisc_priv(sch);
269 
270 	tcm->tcm_handle |= TC_H_MIN(1);
271 	tcm->tcm_info = q->qdisc->handle;
272 	return 0;
273 }
274 
275 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
276 		     struct Qdisc **old)
277 {
278 	struct red_sched_data *q = qdisc_priv(sch);
279 
280 	if (new == NULL)
281 		new = &noop_qdisc;
282 
283 	sch_tree_lock(sch);
284 	*old = q->qdisc;
285 	q->qdisc = new;
286 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
287 	qdisc_reset(*old);
288 	sch_tree_unlock(sch);
289 	return 0;
290 }
291 
292 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
293 {
294 	struct red_sched_data *q = qdisc_priv(sch);
295 	return q->qdisc;
296 }
297 
298 static unsigned long red_get(struct Qdisc *sch, u32 classid)
299 {
300 	return 1;
301 }
302 
303 static void red_put(struct Qdisc *sch, unsigned long arg)
304 {
305 }
306 
307 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
308 {
309 	if (!walker->stop) {
310 		if (walker->count >= walker->skip)
311 			if (walker->fn(sch, 1, walker) < 0) {
312 				walker->stop = 1;
313 				return;
314 			}
315 		walker->count++;
316 	}
317 }
318 
319 static const struct Qdisc_class_ops red_class_ops = {
320 	.graft		=	red_graft,
321 	.leaf		=	red_leaf,
322 	.get		=	red_get,
323 	.put		=	red_put,
324 	.walk		=	red_walk,
325 	.dump		=	red_dump_class,
326 };
327 
328 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
329 	.id		=	"red",
330 	.priv_size	=	sizeof(struct red_sched_data),
331 	.cl_ops		=	&red_class_ops,
332 	.enqueue	=	red_enqueue,
333 	.dequeue	=	red_dequeue,
334 	.peek		=	red_peek,
335 	.drop		=	red_drop,
336 	.init		=	red_init,
337 	.reset		=	red_reset,
338 	.destroy	=	red_destroy,
339 	.change		=	red_change,
340 	.dump		=	red_dump,
341 	.dump_stats	=	red_dump_stats,
342 	.owner		=	THIS_MODULE,
343 };
344 
345 static int __init red_module_init(void)
346 {
347 	return register_qdisc(&red_qdisc_ops);
348 }
349 
350 static void __exit red_module_exit(void)
351 {
352 	unregister_qdisc(&red_qdisc_ops);
353 }
354 
355 module_init(red_module_init)
356 module_exit(red_module_exit)
357 
358 MODULE_LICENSE("GPL");
359