xref: /openbmc/linux/net/sched/sch_netem.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * net/sched/sch_netem.c	Network emulator
3  *
4  * 		This program is free software; you can redistribute it and/or
5  * 		modify it under the terms of the GNU General Public License
6  * 		as published by the Free Software Foundation; either version
7  * 		2 of the License.
8  *
9  *  		Many of the algorithms and ideas for this came from
10  *		NIST Net which is not copyrighted.
11  *
12  * Authors:	Stephen Hemminger <shemminger@osdl.org>
13  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14  */
15 
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/skbuff.h>
22 #include <linux/rtnetlink.h>
23 
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 
27 #define VERSION "1.2"
28 
29 /*	Network Emulation Queuing algorithm.
30 	====================================
31 
32 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
33 		 Network Emulation Tool
34 		 [2] Luigi Rizzo, DummyNet for FreeBSD
35 
36 	 ----------------------------------------------------------------
37 
38 	 This started out as a simple way to delay outgoing packets to
39 	 test TCP but has grown to include most of the functionality
40 	 of a full blown network emulator like NISTnet. It can delay
41 	 packets and add random jitter (and correlation). The random
42 	 distribution can be loaded from a table as well to provide
43 	 normal, Pareto, or experimental curves. Packet loss,
44 	 duplication, and reordering can also be emulated.
45 
46 	 This qdisc does not do classification that can be handled in
47 	 layering other disciplines.  It does not need to do bandwidth
48 	 control either since that can be handled by using token
49 	 bucket or other rate control.
50 */
51 
52 struct netem_sched_data {
53 	struct Qdisc	*qdisc;
54 	struct qdisc_watchdog watchdog;
55 
56 	psched_tdiff_t latency;
57 	psched_tdiff_t jitter;
58 
59 	u32 loss;
60 	u32 limit;
61 	u32 counter;
62 	u32 gap;
63 	u32 duplicate;
64 	u32 reorder;
65 	u32 corrupt;
66 
67 	struct crndstate {
68 		u32 last;
69 		u32 rho;
70 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
71 
72 	struct disttable {
73 		u32  size;
74 		s16 table[0];
75 	} *delay_dist;
76 };
77 
78 /* Time stamp put into socket buffer control block */
79 struct netem_skb_cb {
80 	psched_time_t	time_to_send;
81 };
82 
83 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
84 {
85 	BUILD_BUG_ON(sizeof(skb->cb) <
86 		sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
87 	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
88 }
89 
90 /* init_crandom - initialize correlated random number generator
91  * Use entropy source for initial seed.
92  */
93 static void init_crandom(struct crndstate *state, unsigned long rho)
94 {
95 	state->rho = rho;
96 	state->last = net_random();
97 }
98 
99 /* get_crandom - correlated random number generator
100  * Next number depends on last value.
101  * rho is scaled to avoid floating point.
102  */
103 static u32 get_crandom(struct crndstate *state)
104 {
105 	u64 value, rho;
106 	unsigned long answer;
107 
108 	if (state->rho == 0)	/* no correlation */
109 		return net_random();
110 
111 	value = net_random();
112 	rho = (u64)state->rho + 1;
113 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
114 	state->last = answer;
115 	return answer;
116 }
117 
118 /* tabledist - return a pseudo-randomly distributed value with mean mu and
119  * std deviation sigma.  Uses table lookup to approximate the desired
120  * distribution, and a uniformly-distributed pseudo-random source.
121  */
122 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
123 				struct crndstate *state,
124 				const struct disttable *dist)
125 {
126 	psched_tdiff_t x;
127 	long t;
128 	u32 rnd;
129 
130 	if (sigma == 0)
131 		return mu;
132 
133 	rnd = get_crandom(state);
134 
135 	/* default uniform distribution */
136 	if (dist == NULL)
137 		return (rnd % (2*sigma)) - sigma + mu;
138 
139 	t = dist->table[rnd % dist->size];
140 	x = (sigma % NETEM_DIST_SCALE) * t;
141 	if (x >= 0)
142 		x += NETEM_DIST_SCALE/2;
143 	else
144 		x -= NETEM_DIST_SCALE/2;
145 
146 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
147 }
148 
149 /*
150  * Insert one skb into qdisc.
151  * Note: parent depends on return value to account for queue length.
152  * 	NET_XMIT_DROP: queue length didn't change.
153  *      NET_XMIT_SUCCESS: one skb was queued.
154  */
155 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
156 {
157 	struct netem_sched_data *q = qdisc_priv(sch);
158 	/* We don't fill cb now as skb_unshare() may invalidate it */
159 	struct netem_skb_cb *cb;
160 	struct sk_buff *skb2;
161 	int ret;
162 	int count = 1;
163 
164 	pr_debug("netem_enqueue skb=%p\n", skb);
165 
166 	/* Random duplication */
167 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
168 		++count;
169 
170 	/* Random packet drop 0 => none, ~0 => all */
171 	if (q->loss && q->loss >= get_crandom(&q->loss_cor))
172 		--count;
173 
174 	if (count == 0) {
175 		sch->qstats.drops++;
176 		kfree_skb(skb);
177 		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
178 	}
179 
180 	skb_orphan(skb);
181 
182 	/*
183 	 * If we need to duplicate packet, then re-insert at top of the
184 	 * qdisc tree, since parent queuer expects that only one
185 	 * skb will be queued.
186 	 */
187 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
188 		struct Qdisc *rootq = qdisc_root(sch);
189 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
190 		q->duplicate = 0;
191 
192 		qdisc_enqueue_root(skb2, rootq);
193 		q->duplicate = dupsave;
194 	}
195 
196 	/*
197 	 * Randomized packet corruption.
198 	 * Make copy if needed since we are modifying
199 	 * If packet is going to be hardware checksummed, then
200 	 * do it now in software before we mangle it.
201 	 */
202 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
203 		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
204 		    (skb->ip_summed == CHECKSUM_PARTIAL &&
205 		     skb_checksum_help(skb))) {
206 			sch->qstats.drops++;
207 			return NET_XMIT_DROP;
208 		}
209 
210 		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
211 	}
212 
213 	cb = netem_skb_cb(skb);
214 	if (q->gap == 0 || 		/* not doing reordering */
215 	    q->counter < q->gap || 	/* inside last reordering gap */
216 	    q->reorder < get_crandom(&q->reorder_cor)) {
217 		psched_time_t now;
218 		psched_tdiff_t delay;
219 
220 		delay = tabledist(q->latency, q->jitter,
221 				  &q->delay_cor, q->delay_dist);
222 
223 		now = psched_get_time();
224 		cb->time_to_send = now + delay;
225 		++q->counter;
226 		ret = qdisc_enqueue(skb, q->qdisc);
227 	} else {
228 		/*
229 		 * Do re-ordering by putting one out of N packets at the front
230 		 * of the queue.
231 		 */
232 		cb->time_to_send = psched_get_time();
233 		q->counter = 0;
234 
235 		__skb_queue_head(&q->qdisc->q, skb);
236 		q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
237 		q->qdisc->qstats.requeues++;
238 		ret = NET_XMIT_SUCCESS;
239 	}
240 
241 	if (likely(ret == NET_XMIT_SUCCESS)) {
242 		sch->q.qlen++;
243 	} else if (net_xmit_drop_count(ret)) {
244 		sch->qstats.drops++;
245 	}
246 
247 	pr_debug("netem: enqueue ret %d\n", ret);
248 	return ret;
249 }
250 
251 static unsigned int netem_drop(struct Qdisc* sch)
252 {
253 	struct netem_sched_data *q = qdisc_priv(sch);
254 	unsigned int len = 0;
255 
256 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
257 		sch->q.qlen--;
258 		sch->qstats.drops++;
259 	}
260 	return len;
261 }
262 
263 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
264 {
265 	struct netem_sched_data *q = qdisc_priv(sch);
266 	struct sk_buff *skb;
267 
268 	if (sch->flags & TCQ_F_THROTTLED)
269 		return NULL;
270 
271 	skb = q->qdisc->ops->peek(q->qdisc);
272 	if (skb) {
273 		const struct netem_skb_cb *cb = netem_skb_cb(skb);
274 		psched_time_t now = psched_get_time();
275 
276 		/* if more time remaining? */
277 		if (cb->time_to_send <= now) {
278 			skb = qdisc_dequeue_peeked(q->qdisc);
279 			if (unlikely(!skb))
280 				return NULL;
281 
282 #ifdef CONFIG_NET_CLS_ACT
283 			/*
284 			 * If it's at ingress let's pretend the delay is
285 			 * from the network (tstamp will be updated).
286 			 */
287 			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
288 				skb->tstamp.tv64 = 0;
289 #endif
290 			pr_debug("netem_dequeue: return skb=%p\n", skb);
291 			qdisc_bstats_update(sch, skb);
292 			sch->q.qlen--;
293 			return skb;
294 		}
295 
296 		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
297 	}
298 
299 	return NULL;
300 }
301 
302 static void netem_reset(struct Qdisc *sch)
303 {
304 	struct netem_sched_data *q = qdisc_priv(sch);
305 
306 	qdisc_reset(q->qdisc);
307 	sch->q.qlen = 0;
308 	qdisc_watchdog_cancel(&q->watchdog);
309 }
310 
311 /*
312  * Distribution data is a variable size payload containing
313  * signed 16 bit values.
314  */
315 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
316 {
317 	struct netem_sched_data *q = qdisc_priv(sch);
318 	unsigned long n = nla_len(attr)/sizeof(__s16);
319 	const __s16 *data = nla_data(attr);
320 	spinlock_t *root_lock;
321 	struct disttable *d;
322 	int i;
323 
324 	if (n > 65536)
325 		return -EINVAL;
326 
327 	d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
328 	if (!d)
329 		return -ENOMEM;
330 
331 	d->size = n;
332 	for (i = 0; i < n; i++)
333 		d->table[i] = data[i];
334 
335 	root_lock = qdisc_root_sleeping_lock(sch);
336 
337 	spin_lock_bh(root_lock);
338 	kfree(q->delay_dist);
339 	q->delay_dist = d;
340 	spin_unlock_bh(root_lock);
341 	return 0;
342 }
343 
344 static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
345 {
346 	struct netem_sched_data *q = qdisc_priv(sch);
347 	const struct tc_netem_corr *c = nla_data(attr);
348 
349 	init_crandom(&q->delay_cor, c->delay_corr);
350 	init_crandom(&q->loss_cor, c->loss_corr);
351 	init_crandom(&q->dup_cor, c->dup_corr);
352 }
353 
354 static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
355 {
356 	struct netem_sched_data *q = qdisc_priv(sch);
357 	const struct tc_netem_reorder *r = nla_data(attr);
358 
359 	q->reorder = r->probability;
360 	init_crandom(&q->reorder_cor, r->correlation);
361 }
362 
363 static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
364 {
365 	struct netem_sched_data *q = qdisc_priv(sch);
366 	const struct tc_netem_corrupt *r = nla_data(attr);
367 
368 	q->corrupt = r->probability;
369 	init_crandom(&q->corrupt_cor, r->correlation);
370 }
371 
372 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
373 	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
374 	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
375 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
376 };
377 
378 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
379 		      const struct nla_policy *policy, int len)
380 {
381 	int nested_len = nla_len(nla) - NLA_ALIGN(len);
382 
383 	if (nested_len < 0)
384 		return -EINVAL;
385 	if (nested_len >= nla_attr_size(0))
386 		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
387 				 nested_len, policy);
388 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
389 	return 0;
390 }
391 
392 /* Parse netlink message to set options */
393 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
394 {
395 	struct netem_sched_data *q = qdisc_priv(sch);
396 	struct nlattr *tb[TCA_NETEM_MAX + 1];
397 	struct tc_netem_qopt *qopt;
398 	int ret;
399 
400 	if (opt == NULL)
401 		return -EINVAL;
402 
403 	qopt = nla_data(opt);
404 	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
405 	if (ret < 0)
406 		return ret;
407 
408 	ret = fifo_set_limit(q->qdisc, qopt->limit);
409 	if (ret) {
410 		pr_debug("netem: can't set fifo limit\n");
411 		return ret;
412 	}
413 
414 	q->latency = qopt->latency;
415 	q->jitter = qopt->jitter;
416 	q->limit = qopt->limit;
417 	q->gap = qopt->gap;
418 	q->counter = 0;
419 	q->loss = qopt->loss;
420 	q->duplicate = qopt->duplicate;
421 
422 	/* for compatibility with earlier versions.
423 	 * if gap is set, need to assume 100% probability
424 	 */
425 	if (q->gap)
426 		q->reorder = ~0;
427 
428 	if (tb[TCA_NETEM_CORR])
429 		get_correlation(sch, tb[TCA_NETEM_CORR]);
430 
431 	if (tb[TCA_NETEM_DELAY_DIST]) {
432 		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
433 		if (ret)
434 			return ret;
435 	}
436 
437 	if (tb[TCA_NETEM_REORDER])
438 		get_reorder(sch, tb[TCA_NETEM_REORDER]);
439 
440 	if (tb[TCA_NETEM_CORRUPT])
441 		get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
442 
443 	return 0;
444 }
445 
446 /*
447  * Special case version of FIFO queue for use by netem.
448  * It queues in order based on timestamps in skb's
449  */
450 struct fifo_sched_data {
451 	u32 limit;
452 	psched_time_t oldest;
453 };
454 
455 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
456 {
457 	struct fifo_sched_data *q = qdisc_priv(sch);
458 	struct sk_buff_head *list = &sch->q;
459 	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
460 	struct sk_buff *skb;
461 
462 	if (likely(skb_queue_len(list) < q->limit)) {
463 		/* Optimize for add at tail */
464 		if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
465 			q->oldest = tnext;
466 			return qdisc_enqueue_tail(nskb, sch);
467 		}
468 
469 		skb_queue_reverse_walk(list, skb) {
470 			const struct netem_skb_cb *cb = netem_skb_cb(skb);
471 
472 			if (tnext >= cb->time_to_send)
473 				break;
474 		}
475 
476 		__skb_queue_after(list, skb, nskb);
477 
478 		sch->qstats.backlog += qdisc_pkt_len(nskb);
479 
480 		return NET_XMIT_SUCCESS;
481 	}
482 
483 	return qdisc_reshape_fail(nskb, sch);
484 }
485 
486 static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
487 {
488 	struct fifo_sched_data *q = qdisc_priv(sch);
489 
490 	if (opt) {
491 		struct tc_fifo_qopt *ctl = nla_data(opt);
492 		if (nla_len(opt) < sizeof(*ctl))
493 			return -EINVAL;
494 
495 		q->limit = ctl->limit;
496 	} else
497 		q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
498 
499 	q->oldest = PSCHED_PASTPERFECT;
500 	return 0;
501 }
502 
503 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
504 {
505 	struct fifo_sched_data *q = qdisc_priv(sch);
506 	struct tc_fifo_qopt opt = { .limit = q->limit };
507 
508 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
509 	return skb->len;
510 
511 nla_put_failure:
512 	return -1;
513 }
514 
515 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
516 	.id		=	"tfifo",
517 	.priv_size	=	sizeof(struct fifo_sched_data),
518 	.enqueue	=	tfifo_enqueue,
519 	.dequeue	=	qdisc_dequeue_head,
520 	.peek		=	qdisc_peek_head,
521 	.drop		=	qdisc_queue_drop,
522 	.init		=	tfifo_init,
523 	.reset		=	qdisc_reset_queue,
524 	.change		=	tfifo_init,
525 	.dump		=	tfifo_dump,
526 };
527 
528 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
529 {
530 	struct netem_sched_data *q = qdisc_priv(sch);
531 	int ret;
532 
533 	if (!opt)
534 		return -EINVAL;
535 
536 	qdisc_watchdog_init(&q->watchdog, sch);
537 
538 	q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
539 				     TC_H_MAKE(sch->handle, 1));
540 	if (!q->qdisc) {
541 		pr_debug("netem: qdisc create failed\n");
542 		return -ENOMEM;
543 	}
544 
545 	ret = netem_change(sch, opt);
546 	if (ret) {
547 		pr_debug("netem: change failed\n");
548 		qdisc_destroy(q->qdisc);
549 	}
550 	return ret;
551 }
552 
553 static void netem_destroy(struct Qdisc *sch)
554 {
555 	struct netem_sched_data *q = qdisc_priv(sch);
556 
557 	qdisc_watchdog_cancel(&q->watchdog);
558 	qdisc_destroy(q->qdisc);
559 	kfree(q->delay_dist);
560 }
561 
562 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
563 {
564 	const struct netem_sched_data *q = qdisc_priv(sch);
565 	unsigned char *b = skb_tail_pointer(skb);
566 	struct nlattr *nla = (struct nlattr *) b;
567 	struct tc_netem_qopt qopt;
568 	struct tc_netem_corr cor;
569 	struct tc_netem_reorder reorder;
570 	struct tc_netem_corrupt corrupt;
571 
572 	qopt.latency = q->latency;
573 	qopt.jitter = q->jitter;
574 	qopt.limit = q->limit;
575 	qopt.loss = q->loss;
576 	qopt.gap = q->gap;
577 	qopt.duplicate = q->duplicate;
578 	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
579 
580 	cor.delay_corr = q->delay_cor.rho;
581 	cor.loss_corr = q->loss_cor.rho;
582 	cor.dup_corr = q->dup_cor.rho;
583 	NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
584 
585 	reorder.probability = q->reorder;
586 	reorder.correlation = q->reorder_cor.rho;
587 	NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
588 
589 	corrupt.probability = q->corrupt;
590 	corrupt.correlation = q->corrupt_cor.rho;
591 	NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
592 
593 	nla->nla_len = skb_tail_pointer(skb) - b;
594 
595 	return skb->len;
596 
597 nla_put_failure:
598 	nlmsg_trim(skb, b);
599 	return -1;
600 }
601 
602 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
603 	.id		=	"netem",
604 	.priv_size	=	sizeof(struct netem_sched_data),
605 	.enqueue	=	netem_enqueue,
606 	.dequeue	=	netem_dequeue,
607 	.peek		=	qdisc_peek_dequeued,
608 	.drop		=	netem_drop,
609 	.init		=	netem_init,
610 	.reset		=	netem_reset,
611 	.destroy	=	netem_destroy,
612 	.change		=	netem_change,
613 	.dump		=	netem_dump,
614 	.owner		=	THIS_MODULE,
615 };
616 
617 
618 static int __init netem_module_init(void)
619 {
620 	pr_info("netem: version " VERSION "\n");
621 	return register_qdisc(&netem_qdisc_ops);
622 }
623 static void __exit netem_module_exit(void)
624 {
625 	unregister_qdisc(&netem_qdisc_ops);
626 }
627 module_init(netem_module_init)
628 module_exit(netem_module_exit)
629 MODULE_LICENSE("GPL");
630