xref: /openbmc/linux/net/sched/sch_tbf.c (revision 10f0fc17)
1 /*
2  * net/sched/sch_tbf.c	Token Bucket Filter queue.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *		Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11  *						 original idea by Martin Devera
12  *
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
23 
24 
25 /*	Simple Token Bucket Filter.
26 	=======================================
27 
28 	SOURCE.
29 	-------
30 
31 	None.
32 
33 	Description.
34 	------------
35 
36 	A data flow obeys TBF with rate R and depth B, if for any
37 	time interval t_i...t_f the number of transmitted bits
38 	does not exceed B + R*(t_f-t_i).
39 
40 	Packetized version of this definition:
41 	The sequence of packets of sizes s_i served at moments t_i
42 	obeys TBF, if for any i<=k:
43 
44 	s_i+....+s_k <= B + R*(t_k - t_i)
45 
46 	Algorithm.
47 	----------
48 
49 	Let N(t_i) be B/R initially and N(t) grow continuously with time as:
50 
51 	N(t+delta) = min{B/R, N(t) + delta}
52 
53 	If the first packet in queue has length S, it may be
54 	transmitted only at the time t_* when S/R <= N(t_*),
55 	and in this case N(t) jumps:
56 
57 	N(t_* + 0) = N(t_* - 0) - S/R.
58 
59 
60 
61 	Actually, QoS requires two TBF to be applied to a data stream.
62 	One of them controls steady state burst size, another
63 	one with rate P (peak rate) and depth M (equal to link MTU)
64 	limits bursts at a smaller time scale.
65 
66 	It is easy to see that P>R, and B>M. If P is infinity, this double
67 	TBF is equivalent to a single one.
68 
69 	When TBF works in reshaping mode, latency is estimated as:
70 
71 	lat = max ((L-B)/R, (L-M)/P)
72 
73 
74 	NOTES.
75 	------
76 
77 	If TBF throttles, it starts a watchdog timer, which will wake it up
78 	when it is ready to transmit.
79 	Note that the minimal timer resolution is 1/HZ.
80 	If no new packets arrive during this period,
81 	or if the device is not awaken by EOI for some previous packet,
82 	TBF can stop its activity for 1/HZ.
83 
84 
85 	This means, that with depth B, the maximal rate is
86 
87 	R_crit = B*HZ
88 
89 	F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
90 
91 	Note that the peak rate TBF is much more tough: with MTU 1500
92 	P_crit = 150Kbytes/sec. So, if you need greater peak
93 	rates, use alpha with HZ=1000 :-)
94 
95 	With classful TBF, limit is just kept for backwards compatibility.
96 	It is passed to the default bfifo qdisc - if the inner qdisc is
97 	changed the limit is not effective anymore.
98 */
99 
100 struct tbf_sched_data
101 {
102 /* Parameters */
103 	u32		limit;		/* Maximal length of backlog: bytes */
104 	u32		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
105 	u32		mtu;
106 	u32		max_size;
107 	struct qdisc_rate_table	*R_tab;
108 	struct qdisc_rate_table	*P_tab;
109 
110 /* Variables */
111 	long	tokens;			/* Current number of B tokens */
112 	long	ptokens;		/* Current number of P tokens */
113 	psched_time_t	t_c;		/* Time check-point */
114 	struct Qdisc	*qdisc;		/* Inner qdisc, default - bfifo queue */
115 	struct qdisc_watchdog watchdog;	/* Watchdog timer */
116 };
117 
118 #define L2T(q,L)   qdisc_l2t((q)->R_tab,L)
119 #define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
120 
121 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
122 {
123 	struct tbf_sched_data *q = qdisc_priv(sch);
124 	int ret;
125 
126 	if (qdisc_pkt_len(skb) > q->max_size)
127 		return qdisc_reshape_fail(skb, sch);
128 
129 	ret = qdisc_enqueue(skb, q->qdisc);
130 	if (ret != 0) {
131 		if (net_xmit_drop_count(ret))
132 			sch->qstats.drops++;
133 		return ret;
134 	}
135 
136 	sch->q.qlen++;
137 	sch->bstats.bytes += qdisc_pkt_len(skb);
138 	sch->bstats.packets++;
139 	return 0;
140 }
141 
142 static unsigned int tbf_drop(struct Qdisc* sch)
143 {
144 	struct tbf_sched_data *q = qdisc_priv(sch);
145 	unsigned int len = 0;
146 
147 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
148 		sch->q.qlen--;
149 		sch->qstats.drops++;
150 	}
151 	return len;
152 }
153 
154 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
155 {
156 	struct tbf_sched_data *q = qdisc_priv(sch);
157 	struct sk_buff *skb;
158 
159 	skb = q->qdisc->ops->peek(q->qdisc);
160 
161 	if (skb) {
162 		psched_time_t now;
163 		long toks;
164 		long ptoks = 0;
165 		unsigned int len = qdisc_pkt_len(skb);
166 
167 		now = psched_get_time();
168 		toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
169 
170 		if (q->P_tab) {
171 			ptoks = toks + q->ptokens;
172 			if (ptoks > (long)q->mtu)
173 				ptoks = q->mtu;
174 			ptoks -= L2T_P(q, len);
175 		}
176 		toks += q->tokens;
177 		if (toks > (long)q->buffer)
178 			toks = q->buffer;
179 		toks -= L2T(q, len);
180 
181 		if ((toks|ptoks) >= 0) {
182 			skb = qdisc_dequeue_peeked(q->qdisc);
183 			if (unlikely(!skb))
184 				return NULL;
185 
186 			q->t_c = now;
187 			q->tokens = toks;
188 			q->ptokens = ptoks;
189 			sch->q.qlen--;
190 			sch->flags &= ~TCQ_F_THROTTLED;
191 			return skb;
192 		}
193 
194 		qdisc_watchdog_schedule(&q->watchdog,
195 					now + max_t(long, -toks, -ptoks));
196 
197 		/* Maybe we have a shorter packet in the queue,
198 		   which can be sent now. It sounds cool,
199 		   but, however, this is wrong in principle.
200 		   We MUST NOT reorder packets under these circumstances.
201 
202 		   Really, if we split the flow into independent
203 		   subflows, it would be a very good solution.
204 		   This is the main idea of all FQ algorithms
205 		   (cf. CSZ, HPFQ, HFSC)
206 		 */
207 
208 		sch->qstats.overlimits++;
209 	}
210 	return NULL;
211 }
212 
213 static void tbf_reset(struct Qdisc* sch)
214 {
215 	struct tbf_sched_data *q = qdisc_priv(sch);
216 
217 	qdisc_reset(q->qdisc);
218 	sch->q.qlen = 0;
219 	q->t_c = psched_get_time();
220 	q->tokens = q->buffer;
221 	q->ptokens = q->mtu;
222 	qdisc_watchdog_cancel(&q->watchdog);
223 }
224 
225 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
226 	[TCA_TBF_PARMS]	= { .len = sizeof(struct tc_tbf_qopt) },
227 	[TCA_TBF_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
228 	[TCA_TBF_PTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
229 };
230 
231 static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
232 {
233 	int err;
234 	struct tbf_sched_data *q = qdisc_priv(sch);
235 	struct nlattr *tb[TCA_TBF_PTAB + 1];
236 	struct tc_tbf_qopt *qopt;
237 	struct qdisc_rate_table *rtab = NULL;
238 	struct qdisc_rate_table *ptab = NULL;
239 	struct qdisc_rate_table *tmp;
240 	struct Qdisc *child = NULL;
241 	int max_size,n;
242 
243 	err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
244 	if (err < 0)
245 		return err;
246 
247 	err = -EINVAL;
248 	if (tb[TCA_TBF_PARMS] == NULL)
249 		goto done;
250 
251 	qopt = nla_data(tb[TCA_TBF_PARMS]);
252 	rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
253 	if (rtab == NULL)
254 		goto done;
255 
256 	if (qopt->peakrate.rate) {
257 		if (qopt->peakrate.rate > qopt->rate.rate)
258 			ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
259 		if (ptab == NULL)
260 			goto done;
261 	}
262 
263 	for (n = 0; n < 256; n++)
264 		if (rtab->data[n] > qopt->buffer) break;
265 	max_size = (n << qopt->rate.cell_log)-1;
266 	if (ptab) {
267 		int size;
268 
269 		for (n = 0; n < 256; n++)
270 			if (ptab->data[n] > qopt->mtu) break;
271 		size = (n << qopt->peakrate.cell_log)-1;
272 		if (size < max_size) max_size = size;
273 	}
274 	if (max_size < 0)
275 		goto done;
276 
277 	if (qopt->limit > 0) {
278 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
279 		if (IS_ERR(child)) {
280 			err = PTR_ERR(child);
281 			goto done;
282 		}
283 	}
284 
285 	sch_tree_lock(sch);
286 	if (child) {
287 		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
288 		qdisc_destroy(q->qdisc);
289 		q->qdisc = child;
290 	}
291 	q->limit = qopt->limit;
292 	q->mtu = qopt->mtu;
293 	q->max_size = max_size;
294 	q->buffer = qopt->buffer;
295 	q->tokens = q->buffer;
296 	q->ptokens = q->mtu;
297 
298 	tmp = q->R_tab;
299 	q->R_tab = rtab;
300 	rtab = tmp;
301 
302 	tmp = q->P_tab;
303 	q->P_tab = ptab;
304 	ptab = tmp;
305 	sch_tree_unlock(sch);
306 	err = 0;
307 done:
308 	if (rtab)
309 		qdisc_put_rtab(rtab);
310 	if (ptab)
311 		qdisc_put_rtab(ptab);
312 	return err;
313 }
314 
315 static int tbf_init(struct Qdisc* sch, struct nlattr *opt)
316 {
317 	struct tbf_sched_data *q = qdisc_priv(sch);
318 
319 	if (opt == NULL)
320 		return -EINVAL;
321 
322 	q->t_c = psched_get_time();
323 	qdisc_watchdog_init(&q->watchdog, sch);
324 	q->qdisc = &noop_qdisc;
325 
326 	return tbf_change(sch, opt);
327 }
328 
329 static void tbf_destroy(struct Qdisc *sch)
330 {
331 	struct tbf_sched_data *q = qdisc_priv(sch);
332 
333 	qdisc_watchdog_cancel(&q->watchdog);
334 
335 	if (q->P_tab)
336 		qdisc_put_rtab(q->P_tab);
337 	if (q->R_tab)
338 		qdisc_put_rtab(q->R_tab);
339 
340 	qdisc_destroy(q->qdisc);
341 }
342 
343 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
344 {
345 	struct tbf_sched_data *q = qdisc_priv(sch);
346 	struct nlattr *nest;
347 	struct tc_tbf_qopt opt;
348 
349 	nest = nla_nest_start(skb, TCA_OPTIONS);
350 	if (nest == NULL)
351 		goto nla_put_failure;
352 
353 	opt.limit = q->limit;
354 	opt.rate = q->R_tab->rate;
355 	if (q->P_tab)
356 		opt.peakrate = q->P_tab->rate;
357 	else
358 		memset(&opt.peakrate, 0, sizeof(opt.peakrate));
359 	opt.mtu = q->mtu;
360 	opt.buffer = q->buffer;
361 	NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
362 
363 	nla_nest_end(skb, nest);
364 	return skb->len;
365 
366 nla_put_failure:
367 	nla_nest_cancel(skb, nest);
368 	return -1;
369 }
370 
371 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
372 			  struct sk_buff *skb, struct tcmsg *tcm)
373 {
374 	struct tbf_sched_data *q = qdisc_priv(sch);
375 
376 	if (cl != 1) 	/* only one class */
377 		return -ENOENT;
378 
379 	tcm->tcm_handle |= TC_H_MIN(1);
380 	tcm->tcm_info = q->qdisc->handle;
381 
382 	return 0;
383 }
384 
385 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
386 		     struct Qdisc **old)
387 {
388 	struct tbf_sched_data *q = qdisc_priv(sch);
389 
390 	if (new == NULL)
391 		new = &noop_qdisc;
392 
393 	sch_tree_lock(sch);
394 	*old = q->qdisc;
395 	q->qdisc = new;
396 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
397 	qdisc_reset(*old);
398 	sch_tree_unlock(sch);
399 
400 	return 0;
401 }
402 
403 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
404 {
405 	struct tbf_sched_data *q = qdisc_priv(sch);
406 	return q->qdisc;
407 }
408 
409 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
410 {
411 	return 1;
412 }
413 
414 static void tbf_put(struct Qdisc *sch, unsigned long arg)
415 {
416 }
417 
418 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
419 			    struct nlattr **tca, unsigned long *arg)
420 {
421 	return -ENOSYS;
422 }
423 
424 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
425 {
426 	return -ENOSYS;
427 }
428 
429 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
430 {
431 	if (!walker->stop) {
432 		if (walker->count >= walker->skip)
433 			if (walker->fn(sch, 1, walker) < 0) {
434 				walker->stop = 1;
435 				return;
436 			}
437 		walker->count++;
438 	}
439 }
440 
441 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
442 {
443 	return NULL;
444 }
445 
446 static const struct Qdisc_class_ops tbf_class_ops =
447 {
448 	.graft		=	tbf_graft,
449 	.leaf		=	tbf_leaf,
450 	.get		=	tbf_get,
451 	.put		=	tbf_put,
452 	.change		=	tbf_change_class,
453 	.delete		=	tbf_delete,
454 	.walk		=	tbf_walk,
455 	.tcf_chain	=	tbf_find_tcf,
456 	.dump		=	tbf_dump_class,
457 };
458 
459 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
460 	.next		=	NULL,
461 	.cl_ops		=	&tbf_class_ops,
462 	.id		=	"tbf",
463 	.priv_size	=	sizeof(struct tbf_sched_data),
464 	.enqueue	=	tbf_enqueue,
465 	.dequeue	=	tbf_dequeue,
466 	.peek		=	qdisc_peek_dequeued,
467 	.drop		=	tbf_drop,
468 	.init		=	tbf_init,
469 	.reset		=	tbf_reset,
470 	.destroy	=	tbf_destroy,
471 	.change		=	tbf_change,
472 	.dump		=	tbf_dump,
473 	.owner		=	THIS_MODULE,
474 };
475 
476 static int __init tbf_module_init(void)
477 {
478 	return register_qdisc(&tbf_qdisc_ops);
479 }
480 
481 static void __exit tbf_module_exit(void)
482 {
483 	unregister_qdisc(&tbf_qdisc_ops);
484 }
485 module_init(tbf_module_init)
486 module_exit(tbf_module_exit)
487 MODULE_LICENSE("GPL");
488