xref: /openbmc/linux/net/sched/sch_red.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * net/sched/sch_red.c	Random Early Detection queue.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  * J Hadi Salim 980914:	computation fixes
13  * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14  * J Hadi Salim 980816:  ECN support
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
23 #include <net/red.h>
24 
25 
26 /*	Parameters, settable by user:
27 	-----------------------------
28 
29 	limit		- bytes (must be > qth_max + burst)
30 
31 	Hard limit on queue length, should be chosen >qth_max
32 	to allow packet bursts. This parameter does not
33 	affect the algorithms behaviour and can be chosen
34 	arbitrarily high (well, less than ram size)
35 	Really, this limit will never be reached
36 	if RED works correctly.
37  */
38 
39 struct red_sched_data
40 {
41 	u32			limit;		/* HARD maximal queue length */
42 	unsigned char		flags;
43 	struct red_parms	parms;
44 	struct red_stats	stats;
45 	struct Qdisc		*qdisc;
46 };
47 
48 static inline int red_use_ecn(struct red_sched_data *q)
49 {
50 	return q->flags & TC_RED_ECN;
51 }
52 
53 static inline int red_use_harddrop(struct red_sched_data *q)
54 {
55 	return q->flags & TC_RED_HARDDROP;
56 }
57 
58 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
59 {
60 	struct red_sched_data *q = qdisc_priv(sch);
61 	struct Qdisc *child = q->qdisc;
62 	int ret;
63 
64 	q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
65 
66 	if (red_is_idling(&q->parms))
67 		red_end_of_idle_period(&q->parms);
68 
69 	switch (red_action(&q->parms, q->parms.qavg)) {
70 		case RED_DONT_MARK:
71 			break;
72 
73 		case RED_PROB_MARK:
74 			sch->qstats.overlimits++;
75 			if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
76 				q->stats.prob_drop++;
77 				goto congestion_drop;
78 			}
79 
80 			q->stats.prob_mark++;
81 			break;
82 
83 		case RED_HARD_MARK:
84 			sch->qstats.overlimits++;
85 			if (red_use_harddrop(q) || !red_use_ecn(q) ||
86 			    !INET_ECN_set_ce(skb)) {
87 				q->stats.forced_drop++;
88 				goto congestion_drop;
89 			}
90 
91 			q->stats.forced_mark++;
92 			break;
93 	}
94 
95 	ret = qdisc_enqueue(skb, child);
96 	if (likely(ret == NET_XMIT_SUCCESS)) {
97 		sch->q.qlen++;
98 	} else if (net_xmit_drop_count(ret)) {
99 		q->stats.pdrop++;
100 		sch->qstats.drops++;
101 	}
102 	return ret;
103 
104 congestion_drop:
105 	qdisc_drop(skb, sch);
106 	return NET_XMIT_CN;
107 }
108 
109 static struct sk_buff * red_dequeue(struct Qdisc* sch)
110 {
111 	struct sk_buff *skb;
112 	struct red_sched_data *q = qdisc_priv(sch);
113 	struct Qdisc *child = q->qdisc;
114 
115 	skb = child->dequeue(child);
116 	if (skb) {
117 		qdisc_bstats_update(sch, skb);
118 		sch->q.qlen--;
119 	} else {
120 		if (!red_is_idling(&q->parms))
121 			red_start_of_idle_period(&q->parms);
122 	}
123 	return skb;
124 }
125 
126 static struct sk_buff * red_peek(struct Qdisc* sch)
127 {
128 	struct red_sched_data *q = qdisc_priv(sch);
129 	struct Qdisc *child = q->qdisc;
130 
131 	return child->ops->peek(child);
132 }
133 
134 static unsigned int red_drop(struct Qdisc* sch)
135 {
136 	struct red_sched_data *q = qdisc_priv(sch);
137 	struct Qdisc *child = q->qdisc;
138 	unsigned int len;
139 
140 	if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
141 		q->stats.other++;
142 		sch->qstats.drops++;
143 		sch->q.qlen--;
144 		return len;
145 	}
146 
147 	if (!red_is_idling(&q->parms))
148 		red_start_of_idle_period(&q->parms);
149 
150 	return 0;
151 }
152 
153 static void red_reset(struct Qdisc* sch)
154 {
155 	struct red_sched_data *q = qdisc_priv(sch);
156 
157 	qdisc_reset(q->qdisc);
158 	sch->q.qlen = 0;
159 	red_restart(&q->parms);
160 }
161 
162 static void red_destroy(struct Qdisc *sch)
163 {
164 	struct red_sched_data *q = qdisc_priv(sch);
165 	qdisc_destroy(q->qdisc);
166 }
167 
168 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
169 	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
170 	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
171 };
172 
173 static int red_change(struct Qdisc *sch, struct nlattr *opt)
174 {
175 	struct red_sched_data *q = qdisc_priv(sch);
176 	struct nlattr *tb[TCA_RED_MAX + 1];
177 	struct tc_red_qopt *ctl;
178 	struct Qdisc *child = NULL;
179 	int err;
180 
181 	if (opt == NULL)
182 		return -EINVAL;
183 
184 	err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
185 	if (err < 0)
186 		return err;
187 
188 	if (tb[TCA_RED_PARMS] == NULL ||
189 	    tb[TCA_RED_STAB] == NULL)
190 		return -EINVAL;
191 
192 	ctl = nla_data(tb[TCA_RED_PARMS]);
193 
194 	if (ctl->limit > 0) {
195 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
196 		if (IS_ERR(child))
197 			return PTR_ERR(child);
198 	}
199 
200 	sch_tree_lock(sch);
201 	q->flags = ctl->flags;
202 	q->limit = ctl->limit;
203 	if (child) {
204 		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
205 		qdisc_destroy(q->qdisc);
206 		q->qdisc = child;
207 	}
208 
209 	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
210 				 ctl->Plog, ctl->Scell_log,
211 				 nla_data(tb[TCA_RED_STAB]));
212 
213 	if (skb_queue_empty(&sch->q))
214 		red_end_of_idle_period(&q->parms);
215 
216 	sch_tree_unlock(sch);
217 	return 0;
218 }
219 
220 static int red_init(struct Qdisc* sch, struct nlattr *opt)
221 {
222 	struct red_sched_data *q = qdisc_priv(sch);
223 
224 	q->qdisc = &noop_qdisc;
225 	return red_change(sch, opt);
226 }
227 
228 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
229 {
230 	struct red_sched_data *q = qdisc_priv(sch);
231 	struct nlattr *opts = NULL;
232 	struct tc_red_qopt opt = {
233 		.limit		= q->limit,
234 		.flags		= q->flags,
235 		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
236 		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
237 		.Wlog		= q->parms.Wlog,
238 		.Plog		= q->parms.Plog,
239 		.Scell_log	= q->parms.Scell_log,
240 	};
241 
242 	sch->qstats.backlog = q->qdisc->qstats.backlog;
243 	opts = nla_nest_start(skb, TCA_OPTIONS);
244 	if (opts == NULL)
245 		goto nla_put_failure;
246 	NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
247 	return nla_nest_end(skb, opts);
248 
249 nla_put_failure:
250 	nla_nest_cancel(skb, opts);
251 	return -EMSGSIZE;
252 }
253 
254 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
255 {
256 	struct red_sched_data *q = qdisc_priv(sch);
257 	struct tc_red_xstats st = {
258 		.early	= q->stats.prob_drop + q->stats.forced_drop,
259 		.pdrop	= q->stats.pdrop,
260 		.other	= q->stats.other,
261 		.marked	= q->stats.prob_mark + q->stats.forced_mark,
262 	};
263 
264 	return gnet_stats_copy_app(d, &st, sizeof(st));
265 }
266 
267 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
268 			  struct sk_buff *skb, struct tcmsg *tcm)
269 {
270 	struct red_sched_data *q = qdisc_priv(sch);
271 
272 	tcm->tcm_handle |= TC_H_MIN(1);
273 	tcm->tcm_info = q->qdisc->handle;
274 	return 0;
275 }
276 
277 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
278 		     struct Qdisc **old)
279 {
280 	struct red_sched_data *q = qdisc_priv(sch);
281 
282 	if (new == NULL)
283 		new = &noop_qdisc;
284 
285 	sch_tree_lock(sch);
286 	*old = q->qdisc;
287 	q->qdisc = new;
288 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
289 	qdisc_reset(*old);
290 	sch_tree_unlock(sch);
291 	return 0;
292 }
293 
294 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
295 {
296 	struct red_sched_data *q = qdisc_priv(sch);
297 	return q->qdisc;
298 }
299 
300 static unsigned long red_get(struct Qdisc *sch, u32 classid)
301 {
302 	return 1;
303 }
304 
305 static void red_put(struct Qdisc *sch, unsigned long arg)
306 {
307 }
308 
309 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
310 {
311 	if (!walker->stop) {
312 		if (walker->count >= walker->skip)
313 			if (walker->fn(sch, 1, walker) < 0) {
314 				walker->stop = 1;
315 				return;
316 			}
317 		walker->count++;
318 	}
319 }
320 
321 static const struct Qdisc_class_ops red_class_ops = {
322 	.graft		=	red_graft,
323 	.leaf		=	red_leaf,
324 	.get		=	red_get,
325 	.put		=	red_put,
326 	.walk		=	red_walk,
327 	.dump		=	red_dump_class,
328 };
329 
330 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
331 	.id		=	"red",
332 	.priv_size	=	sizeof(struct red_sched_data),
333 	.cl_ops		=	&red_class_ops,
334 	.enqueue	=	red_enqueue,
335 	.dequeue	=	red_dequeue,
336 	.peek		=	red_peek,
337 	.drop		=	red_drop,
338 	.init		=	red_init,
339 	.reset		=	red_reset,
340 	.destroy	=	red_destroy,
341 	.change		=	red_change,
342 	.dump		=	red_dump,
343 	.dump_stats	=	red_dump_stats,
344 	.owner		=	THIS_MODULE,
345 };
346 
347 static int __init red_module_init(void)
348 {
349 	return register_qdisc(&red_qdisc_ops);
350 }
351 
352 static void __exit red_module_exit(void)
353 {
354 	unregister_qdisc(&red_qdisc_ops);
355 }
356 
357 module_init(red_module_init)
358 module_exit(red_module_exit)
359 
360 MODULE_LICENSE("GPL");
361