xref: /openbmc/linux/net/sched/sch_netem.c (revision 58f9d806)
1 /*
2  * net/sched/sch_netem.c	Network emulator
3  *
4  * 		This program is free software; you can redistribute it and/or
5  * 		modify it under the terms of the GNU General Public License
6  * 		as published by the Free Software Foundation; either version
7  * 		2 of the License.
8  *
9  *  		Many of the algorithms and ideas for this came from
10  *		NIST Net which is not copyrighted.
11  *
12  * Authors:	Stephen Hemminger <shemminger@osdl.org>
13  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14  */
15 
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
27 
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
31 
32 #define VERSION "1.3"
33 
34 /*	Network Emulation Queuing algorithm.
35 	====================================
36 
37 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 		 Network Emulation Tool
39 		 [2] Luigi Rizzo, DummyNet for FreeBSD
40 
41 	 ----------------------------------------------------------------
42 
43 	 This started out as a simple way to delay outgoing packets to
44 	 test TCP but has grown to include most of the functionality
45 	 of a full blown network emulator like NISTnet. It can delay
46 	 packets and add random jitter (and correlation). The random
47 	 distribution can be loaded from a table as well to provide
48 	 normal, Pareto, or experimental curves. Packet loss,
49 	 duplication, and reordering can also be emulated.
50 
51 	 This qdisc does not do classification that can be handled in
52 	 layering other disciplines.  It does not need to do bandwidth
53 	 control either since that can be handled by using token
54 	 bucket or other rate control.
55 
56      Correlated Loss Generator models
57 
58 	Added generation of correlated loss according to the
59 	"Gilbert-Elliot" model, a 4-state markov model.
60 
61 	References:
62 	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 	and intuitive loss model for packet networks and its implementation
65 	in the Netem module in the Linux kernel", available in [1]
66 
67 	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 		 Fabio Ludovici <fabio.ludovici at yahoo.it>
69 */
70 
71 struct disttable {
72 	u32  size;
73 	s16 table[0];
74 };
75 
76 struct netem_sched_data {
77 	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
78 	struct rb_root t_root;
79 
80 	/* optional qdisc for classful handling (NULL at netem init) */
81 	struct Qdisc	*qdisc;
82 
83 	struct qdisc_watchdog watchdog;
84 
85 	s64 latency;
86 	s64 jitter;
87 
88 	u32 loss;
89 	u32 ecn;
90 	u32 limit;
91 	u32 counter;
92 	u32 gap;
93 	u32 duplicate;
94 	u32 reorder;
95 	u32 corrupt;
96 	u64 rate;
97 	s32 packet_overhead;
98 	u32 cell_size;
99 	struct reciprocal_value cell_size_reciprocal;
100 	s32 cell_overhead;
101 
102 	struct crndstate {
103 		u32 last;
104 		u32 rho;
105 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
106 
107 	struct disttable *delay_dist;
108 
109 	enum  {
110 		CLG_RANDOM,
111 		CLG_4_STATES,
112 		CLG_GILB_ELL,
113 	} loss_model;
114 
115 	enum {
116 		TX_IN_GAP_PERIOD = 1,
117 		TX_IN_BURST_PERIOD,
118 		LOST_IN_GAP_PERIOD,
119 		LOST_IN_BURST_PERIOD,
120 	} _4_state_model;
121 
122 	enum {
123 		GOOD_STATE = 1,
124 		BAD_STATE,
125 	} GE_state_model;
126 
127 	/* Correlated Loss Generation models */
128 	struct clgstate {
129 		/* state of the Markov chain */
130 		u8 state;
131 
132 		/* 4-states and Gilbert-Elliot models */
133 		u32 a1;	/* p13 for 4-states or p for GE */
134 		u32 a2;	/* p31 for 4-states or r for GE */
135 		u32 a3;	/* p32 for 4-states or h for GE */
136 		u32 a4;	/* p14 for 4-states or 1-k for GE */
137 		u32 a5; /* p23 used only in 4-states */
138 	} clg;
139 
140 	struct tc_netem_slot slot_config;
141 	struct slotstate {
142 		u64 slot_next;
143 		s32 packets_left;
144 		s32 bytes_left;
145 	} slot;
146 
147 	struct disttable *slot_dist;
148 };
149 
150 /* Time stamp put into socket buffer control block
151  * Only valid when skbs are in our internal t(ime)fifo queue.
152  *
153  * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
154  * and skb->next & skb->prev are scratch space for a qdisc,
155  * we save skb->tstamp value in skb->cb[] before destroying it.
156  */
157 struct netem_skb_cb {
158 	u64	        time_to_send;
159 };
160 
161 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
162 {
163 	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
164 	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
165 	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
166 }
167 
168 /* init_crandom - initialize correlated random number generator
169  * Use entropy source for initial seed.
170  */
171 static void init_crandom(struct crndstate *state, unsigned long rho)
172 {
173 	state->rho = rho;
174 	state->last = prandom_u32();
175 }
176 
177 /* get_crandom - correlated random number generator
178  * Next number depends on last value.
179  * rho is scaled to avoid floating point.
180  */
181 static u32 get_crandom(struct crndstate *state)
182 {
183 	u64 value, rho;
184 	unsigned long answer;
185 
186 	if (!state || state->rho == 0)	/* no correlation */
187 		return prandom_u32();
188 
189 	value = prandom_u32();
190 	rho = (u64)state->rho + 1;
191 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
192 	state->last = answer;
193 	return answer;
194 }
195 
196 /* loss_4state - 4-state model loss generator
197  * Generates losses according to the 4-state Markov chain adopted in
198  * the GI (General and Intuitive) loss model.
199  */
200 static bool loss_4state(struct netem_sched_data *q)
201 {
202 	struct clgstate *clg = &q->clg;
203 	u32 rnd = prandom_u32();
204 
205 	/*
206 	 * Makes a comparison between rnd and the transition
207 	 * probabilities outgoing from the current state, then decides the
208 	 * next state and if the next packet has to be transmitted or lost.
209 	 * The four states correspond to:
210 	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
211 	 *   LOST_IN_BURST_PERIOD => isolated losses within a gap period
212 	 *   LOST_IN_GAP_PERIOD => lost packets within a burst period
213 	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
214 	 */
215 	switch (clg->state) {
216 	case TX_IN_GAP_PERIOD:
217 		if (rnd < clg->a4) {
218 			clg->state = LOST_IN_BURST_PERIOD;
219 			return true;
220 		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
221 			clg->state = LOST_IN_GAP_PERIOD;
222 			return true;
223 		} else if (clg->a1 + clg->a4 < rnd) {
224 			clg->state = TX_IN_GAP_PERIOD;
225 		}
226 
227 		break;
228 	case TX_IN_BURST_PERIOD:
229 		if (rnd < clg->a5) {
230 			clg->state = LOST_IN_GAP_PERIOD;
231 			return true;
232 		} else {
233 			clg->state = TX_IN_BURST_PERIOD;
234 		}
235 
236 		break;
237 	case LOST_IN_GAP_PERIOD:
238 		if (rnd < clg->a3)
239 			clg->state = TX_IN_BURST_PERIOD;
240 		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
241 			clg->state = TX_IN_GAP_PERIOD;
242 		} else if (clg->a2 + clg->a3 < rnd) {
243 			clg->state = LOST_IN_GAP_PERIOD;
244 			return true;
245 		}
246 		break;
247 	case LOST_IN_BURST_PERIOD:
248 		clg->state = TX_IN_GAP_PERIOD;
249 		break;
250 	}
251 
252 	return false;
253 }
254 
255 /* loss_gilb_ell - Gilbert-Elliot model loss generator
256  * Generates losses according to the Gilbert-Elliot loss model or
257  * its special cases  (Gilbert or Simple Gilbert)
258  *
259  * Makes a comparison between random number and the transition
260  * probabilities outgoing from the current state, then decides the
261  * next state. A second random number is extracted and the comparison
262  * with the loss probability of the current state decides if the next
263  * packet will be transmitted or lost.
264  */
265 static bool loss_gilb_ell(struct netem_sched_data *q)
266 {
267 	struct clgstate *clg = &q->clg;
268 
269 	switch (clg->state) {
270 	case GOOD_STATE:
271 		if (prandom_u32() < clg->a1)
272 			clg->state = BAD_STATE;
273 		if (prandom_u32() < clg->a4)
274 			return true;
275 		break;
276 	case BAD_STATE:
277 		if (prandom_u32() < clg->a2)
278 			clg->state = GOOD_STATE;
279 		if (prandom_u32() > clg->a3)
280 			return true;
281 	}
282 
283 	return false;
284 }
285 
286 static bool loss_event(struct netem_sched_data *q)
287 {
288 	switch (q->loss_model) {
289 	case CLG_RANDOM:
290 		/* Random packet drop 0 => none, ~0 => all */
291 		return q->loss && q->loss >= get_crandom(&q->loss_cor);
292 
293 	case CLG_4_STATES:
294 		/* 4state loss model algorithm (used also for GI model)
295 		* Extracts a value from the markov 4 state loss generator,
296 		* if it is 1 drops a packet and if needed writes the event in
297 		* the kernel logs
298 		*/
299 		return loss_4state(q);
300 
301 	case CLG_GILB_ELL:
302 		/* Gilbert-Elliot loss model algorithm
303 		* Extracts a value from the Gilbert-Elliot loss generator,
304 		* if it is 1 drops a packet and if needed writes the event in
305 		* the kernel logs
306 		*/
307 		return loss_gilb_ell(q);
308 	}
309 
310 	return false;	/* not reached */
311 }
312 
313 
314 /* tabledist - return a pseudo-randomly distributed value with mean mu and
315  * std deviation sigma.  Uses table lookup to approximate the desired
316  * distribution, and a uniformly-distributed pseudo-random source.
317  */
318 static s64 tabledist(s64 mu, s32 sigma,
319 		     struct crndstate *state,
320 		     const struct disttable *dist)
321 {
322 	s64 x;
323 	long t;
324 	u32 rnd;
325 
326 	if (sigma == 0)
327 		return mu;
328 
329 	rnd = get_crandom(state);
330 
331 	/* default uniform distribution */
332 	if (dist == NULL)
333 		return ((rnd % (2 * sigma)) + mu) - sigma;
334 
335 	t = dist->table[rnd % dist->size];
336 	x = (sigma % NETEM_DIST_SCALE) * t;
337 	if (x >= 0)
338 		x += NETEM_DIST_SCALE/2;
339 	else
340 		x -= NETEM_DIST_SCALE/2;
341 
342 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
343 }
344 
345 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
346 {
347 	len += q->packet_overhead;
348 
349 	if (q->cell_size) {
350 		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
351 
352 		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
353 			cells++;
354 		len = cells * (q->cell_size + q->cell_overhead);
355 	}
356 
357 	return div64_u64(len * NSEC_PER_SEC, q->rate);
358 }
359 
360 static void tfifo_reset(struct Qdisc *sch)
361 {
362 	struct netem_sched_data *q = qdisc_priv(sch);
363 	struct rb_node *p = rb_first(&q->t_root);
364 
365 	while (p) {
366 		struct sk_buff *skb = rb_to_skb(p);
367 
368 		p = rb_next(p);
369 		rb_erase(&skb->rbnode, &q->t_root);
370 		rtnl_kfree_skbs(skb, skb);
371 	}
372 }
373 
374 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
375 {
376 	struct netem_sched_data *q = qdisc_priv(sch);
377 	u64 tnext = netem_skb_cb(nskb)->time_to_send;
378 	struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
379 
380 	while (*p) {
381 		struct sk_buff *skb;
382 
383 		parent = *p;
384 		skb = rb_to_skb(parent);
385 		if (tnext >= netem_skb_cb(skb)->time_to_send)
386 			p = &parent->rb_right;
387 		else
388 			p = &parent->rb_left;
389 	}
390 	rb_link_node(&nskb->rbnode, parent, p);
391 	rb_insert_color(&nskb->rbnode, &q->t_root);
392 	sch->q.qlen++;
393 }
394 
395 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
396  * when we statistically choose to corrupt one, we instead segment it, returning
397  * the first packet to be corrupted, and re-enqueue the remaining frames
398  */
399 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
400 				     struct sk_buff **to_free)
401 {
402 	struct sk_buff *segs;
403 	netdev_features_t features = netif_skb_features(skb);
404 
405 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
406 
407 	if (IS_ERR_OR_NULL(segs)) {
408 		qdisc_drop(skb, sch, to_free);
409 		return NULL;
410 	}
411 	consume_skb(skb);
412 	return segs;
413 }
414 
415 /*
416  * Insert one skb into qdisc.
417  * Note: parent depends on return value to account for queue length.
418  * 	NET_XMIT_DROP: queue length didn't change.
419  *      NET_XMIT_SUCCESS: one skb was queued.
420  */
421 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
422 			 struct sk_buff **to_free)
423 {
424 	struct netem_sched_data *q = qdisc_priv(sch);
425 	/* We don't fill cb now as skb_unshare() may invalidate it */
426 	struct netem_skb_cb *cb;
427 	struct sk_buff *skb2;
428 	struct sk_buff *segs = NULL;
429 	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
430 	int nb = 0;
431 	int count = 1;
432 	int rc = NET_XMIT_SUCCESS;
433 
434 	/* Do not fool qdisc_drop_all() */
435 	skb->prev = NULL;
436 
437 	/* Random duplication */
438 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
439 		++count;
440 
441 	/* Drop packet? */
442 	if (loss_event(q)) {
443 		if (q->ecn && INET_ECN_set_ce(skb))
444 			qdisc_qstats_drop(sch); /* mark packet */
445 		else
446 			--count;
447 	}
448 	if (count == 0) {
449 		qdisc_qstats_drop(sch);
450 		__qdisc_drop(skb, to_free);
451 		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
452 	}
453 
454 	/* If a delay is expected, orphan the skb. (orphaning usually takes
455 	 * place at TX completion time, so _before_ the link transit delay)
456 	 */
457 	if (q->latency || q->jitter || q->rate)
458 		skb_orphan_partial(skb);
459 
460 	/*
461 	 * If we need to duplicate packet, then re-insert at top of the
462 	 * qdisc tree, since parent queuer expects that only one
463 	 * skb will be queued.
464 	 */
465 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
466 		struct Qdisc *rootq = qdisc_root(sch);
467 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
468 
469 		q->duplicate = 0;
470 		rootq->enqueue(skb2, rootq, to_free);
471 		q->duplicate = dupsave;
472 	}
473 
474 	/*
475 	 * Randomized packet corruption.
476 	 * Make copy if needed since we are modifying
477 	 * If packet is going to be hardware checksummed, then
478 	 * do it now in software before we mangle it.
479 	 */
480 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
481 		if (skb_is_gso(skb)) {
482 			segs = netem_segment(skb, sch, to_free);
483 			if (!segs)
484 				return NET_XMIT_DROP;
485 		} else {
486 			segs = skb;
487 		}
488 
489 		skb = segs;
490 		segs = segs->next;
491 
492 		skb = skb_unshare(skb, GFP_ATOMIC);
493 		if (unlikely(!skb)) {
494 			qdisc_qstats_drop(sch);
495 			goto finish_segs;
496 		}
497 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
498 		    skb_checksum_help(skb)) {
499 			qdisc_drop(skb, sch, to_free);
500 			goto finish_segs;
501 		}
502 
503 		skb->data[prandom_u32() % skb_headlen(skb)] ^=
504 			1<<(prandom_u32() % 8);
505 	}
506 
507 	if (unlikely(sch->q.qlen >= sch->limit))
508 		return qdisc_drop_all(skb, sch, to_free);
509 
510 	qdisc_qstats_backlog_inc(sch, skb);
511 
512 	cb = netem_skb_cb(skb);
513 	if (q->gap == 0 ||		/* not doing reordering */
514 	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
515 	    q->reorder < get_crandom(&q->reorder_cor)) {
516 		u64 now;
517 		s64 delay;
518 
519 		delay = tabledist(q->latency, q->jitter,
520 				  &q->delay_cor, q->delay_dist);
521 
522 		now = ktime_get_ns();
523 
524 		if (q->rate) {
525 			struct netem_skb_cb *last = NULL;
526 
527 			if (sch->q.tail)
528 				last = netem_skb_cb(sch->q.tail);
529 			if (q->t_root.rb_node) {
530 				struct sk_buff *t_skb;
531 				struct netem_skb_cb *t_last;
532 
533 				t_skb = skb_rb_last(&q->t_root);
534 				t_last = netem_skb_cb(t_skb);
535 				if (!last ||
536 				    t_last->time_to_send > last->time_to_send) {
537 					last = t_last;
538 				}
539 			}
540 
541 			if (last) {
542 				/*
543 				 * Last packet in queue is reference point (now),
544 				 * calculate this time bonus and subtract
545 				 * from delay.
546 				 */
547 				delay -= last->time_to_send - now;
548 				delay = max_t(s64, 0, delay);
549 				now = last->time_to_send;
550 			}
551 
552 			delay += packet_time_ns(qdisc_pkt_len(skb), q);
553 		}
554 
555 		cb->time_to_send = now + delay;
556 		++q->counter;
557 		tfifo_enqueue(skb, sch);
558 	} else {
559 		/*
560 		 * Do re-ordering by putting one out of N packets at the front
561 		 * of the queue.
562 		 */
563 		cb->time_to_send = ktime_get_ns();
564 		q->counter = 0;
565 
566 		__qdisc_enqueue_head(skb, &sch->q);
567 		sch->qstats.requeues++;
568 	}
569 
570 finish_segs:
571 	if (segs) {
572 		while (segs) {
573 			skb2 = segs->next;
574 			skb_mark_not_on_list(segs);
575 			qdisc_skb_cb(segs)->pkt_len = segs->len;
576 			last_len = segs->len;
577 			rc = qdisc_enqueue(segs, sch, to_free);
578 			if (rc != NET_XMIT_SUCCESS) {
579 				if (net_xmit_drop_count(rc))
580 					qdisc_qstats_drop(sch);
581 			} else {
582 				nb++;
583 				len += last_len;
584 			}
585 			segs = skb2;
586 		}
587 		sch->q.qlen += nb;
588 		if (nb > 1)
589 			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
590 	}
591 	return NET_XMIT_SUCCESS;
592 }
593 
594 /* Delay the next round with a new future slot with a
595  * correct number of bytes and packets.
596  */
597 
598 static void get_slot_next(struct netem_sched_data *q, u64 now)
599 {
600 	s64 next_delay;
601 
602 	if (!q->slot_dist)
603 		next_delay = q->slot_config.min_delay +
604 				(prandom_u32() *
605 				 (q->slot_config.max_delay -
606 				  q->slot_config.min_delay) >> 32);
607 	else
608 		next_delay = tabledist(q->slot_config.dist_delay,
609 				       (s32)(q->slot_config.dist_jitter),
610 				       NULL, q->slot_dist);
611 
612 	q->slot.slot_next = now + next_delay;
613 	q->slot.packets_left = q->slot_config.max_packets;
614 	q->slot.bytes_left = q->slot_config.max_bytes;
615 }
616 
617 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
618 {
619 	struct netem_sched_data *q = qdisc_priv(sch);
620 	struct sk_buff *skb;
621 	struct rb_node *p;
622 
623 tfifo_dequeue:
624 	skb = __qdisc_dequeue_head(&sch->q);
625 	if (skb) {
626 		qdisc_qstats_backlog_dec(sch, skb);
627 deliver:
628 		qdisc_bstats_update(sch, skb);
629 		return skb;
630 	}
631 	p = rb_first(&q->t_root);
632 	if (p) {
633 		u64 time_to_send;
634 		u64 now = ktime_get_ns();
635 
636 		skb = rb_to_skb(p);
637 
638 		/* if more time remaining? */
639 		time_to_send = netem_skb_cb(skb)->time_to_send;
640 		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
641 			get_slot_next(q, now);
642 
643 		if (time_to_send <= now &&  q->slot.slot_next <= now) {
644 			rb_erase(p, &q->t_root);
645 			sch->q.qlen--;
646 			qdisc_qstats_backlog_dec(sch, skb);
647 			skb->next = NULL;
648 			skb->prev = NULL;
649 			/* skb->dev shares skb->rbnode area,
650 			 * we need to restore its value.
651 			 */
652 			skb->dev = qdisc_dev(sch);
653 
654 			if (q->slot.slot_next) {
655 				q->slot.packets_left--;
656 				q->slot.bytes_left -= qdisc_pkt_len(skb);
657 				if (q->slot.packets_left <= 0 ||
658 				    q->slot.bytes_left <= 0)
659 					get_slot_next(q, now);
660 			}
661 
662 			if (q->qdisc) {
663 				unsigned int pkt_len = qdisc_pkt_len(skb);
664 				struct sk_buff *to_free = NULL;
665 				int err;
666 
667 				err = qdisc_enqueue(skb, q->qdisc, &to_free);
668 				kfree_skb_list(to_free);
669 				if (err != NET_XMIT_SUCCESS &&
670 				    net_xmit_drop_count(err)) {
671 					qdisc_qstats_drop(sch);
672 					qdisc_tree_reduce_backlog(sch, 1,
673 								  pkt_len);
674 				}
675 				goto tfifo_dequeue;
676 			}
677 			goto deliver;
678 		}
679 
680 		if (q->qdisc) {
681 			skb = q->qdisc->ops->dequeue(q->qdisc);
682 			if (skb)
683 				goto deliver;
684 		}
685 
686 		qdisc_watchdog_schedule_ns(&q->watchdog,
687 					   max(time_to_send,
688 					       q->slot.slot_next));
689 	}
690 
691 	if (q->qdisc) {
692 		skb = q->qdisc->ops->dequeue(q->qdisc);
693 		if (skb)
694 			goto deliver;
695 	}
696 	return NULL;
697 }
698 
699 static void netem_reset(struct Qdisc *sch)
700 {
701 	struct netem_sched_data *q = qdisc_priv(sch);
702 
703 	qdisc_reset_queue(sch);
704 	tfifo_reset(sch);
705 	if (q->qdisc)
706 		qdisc_reset(q->qdisc);
707 	qdisc_watchdog_cancel(&q->watchdog);
708 }
709 
710 static void dist_free(struct disttable *d)
711 {
712 	kvfree(d);
713 }
714 
715 /*
716  * Distribution data is a variable size payload containing
717  * signed 16 bit values.
718  */
719 
720 static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
721 			  const struct nlattr *attr)
722 {
723 	size_t n = nla_len(attr)/sizeof(__s16);
724 	const __s16 *data = nla_data(attr);
725 	spinlock_t *root_lock;
726 	struct disttable *d;
727 	int i;
728 
729 	if (n > NETEM_DIST_MAX)
730 		return -EINVAL;
731 
732 	d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
733 	if (!d)
734 		return -ENOMEM;
735 
736 	d->size = n;
737 	for (i = 0; i < n; i++)
738 		d->table[i] = data[i];
739 
740 	root_lock = qdisc_root_sleeping_lock(sch);
741 
742 	spin_lock_bh(root_lock);
743 	swap(*tbl, d);
744 	spin_unlock_bh(root_lock);
745 
746 	dist_free(d);
747 	return 0;
748 }
749 
750 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
751 {
752 	const struct tc_netem_slot *c = nla_data(attr);
753 
754 	q->slot_config = *c;
755 	if (q->slot_config.max_packets == 0)
756 		q->slot_config.max_packets = INT_MAX;
757 	if (q->slot_config.max_bytes == 0)
758 		q->slot_config.max_bytes = INT_MAX;
759 	q->slot.packets_left = q->slot_config.max_packets;
760 	q->slot.bytes_left = q->slot_config.max_bytes;
761 	if (q->slot_config.min_delay | q->slot_config.max_delay |
762 	    q->slot_config.dist_jitter)
763 		q->slot.slot_next = ktime_get_ns();
764 	else
765 		q->slot.slot_next = 0;
766 }
767 
768 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
769 {
770 	const struct tc_netem_corr *c = nla_data(attr);
771 
772 	init_crandom(&q->delay_cor, c->delay_corr);
773 	init_crandom(&q->loss_cor, c->loss_corr);
774 	init_crandom(&q->dup_cor, c->dup_corr);
775 }
776 
777 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
778 {
779 	const struct tc_netem_reorder *r = nla_data(attr);
780 
781 	q->reorder = r->probability;
782 	init_crandom(&q->reorder_cor, r->correlation);
783 }
784 
785 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
786 {
787 	const struct tc_netem_corrupt *r = nla_data(attr);
788 
789 	q->corrupt = r->probability;
790 	init_crandom(&q->corrupt_cor, r->correlation);
791 }
792 
793 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
794 {
795 	const struct tc_netem_rate *r = nla_data(attr);
796 
797 	q->rate = r->rate;
798 	q->packet_overhead = r->packet_overhead;
799 	q->cell_size = r->cell_size;
800 	q->cell_overhead = r->cell_overhead;
801 	if (q->cell_size)
802 		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
803 	else
804 		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
805 }
806 
807 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
808 {
809 	const struct nlattr *la;
810 	int rem;
811 
812 	nla_for_each_nested(la, attr, rem) {
813 		u16 type = nla_type(la);
814 
815 		switch (type) {
816 		case NETEM_LOSS_GI: {
817 			const struct tc_netem_gimodel *gi = nla_data(la);
818 
819 			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
820 				pr_info("netem: incorrect gi model size\n");
821 				return -EINVAL;
822 			}
823 
824 			q->loss_model = CLG_4_STATES;
825 
826 			q->clg.state = TX_IN_GAP_PERIOD;
827 			q->clg.a1 = gi->p13;
828 			q->clg.a2 = gi->p31;
829 			q->clg.a3 = gi->p32;
830 			q->clg.a4 = gi->p14;
831 			q->clg.a5 = gi->p23;
832 			break;
833 		}
834 
835 		case NETEM_LOSS_GE: {
836 			const struct tc_netem_gemodel *ge = nla_data(la);
837 
838 			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
839 				pr_info("netem: incorrect ge model size\n");
840 				return -EINVAL;
841 			}
842 
843 			q->loss_model = CLG_GILB_ELL;
844 			q->clg.state = GOOD_STATE;
845 			q->clg.a1 = ge->p;
846 			q->clg.a2 = ge->r;
847 			q->clg.a3 = ge->h;
848 			q->clg.a4 = ge->k1;
849 			break;
850 		}
851 
852 		default:
853 			pr_info("netem: unknown loss type %u\n", type);
854 			return -EINVAL;
855 		}
856 	}
857 
858 	return 0;
859 }
860 
861 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
862 	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
863 	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
864 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
865 	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
866 	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
867 	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
868 	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
869 	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
870 	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
871 	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
872 };
873 
874 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
875 		      const struct nla_policy *policy, int len)
876 {
877 	int nested_len = nla_len(nla) - NLA_ALIGN(len);
878 
879 	if (nested_len < 0) {
880 		pr_info("netem: invalid attributes len %d\n", nested_len);
881 		return -EINVAL;
882 	}
883 
884 	if (nested_len >= nla_attr_size(0))
885 		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
886 				 nested_len, policy, NULL);
887 
888 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
889 	return 0;
890 }
891 
892 /* Parse netlink message to set options */
893 static int netem_change(struct Qdisc *sch, struct nlattr *opt,
894 			struct netlink_ext_ack *extack)
895 {
896 	struct netem_sched_data *q = qdisc_priv(sch);
897 	struct nlattr *tb[TCA_NETEM_MAX + 1];
898 	struct tc_netem_qopt *qopt;
899 	struct clgstate old_clg;
900 	int old_loss_model = CLG_RANDOM;
901 	int ret;
902 
903 	if (opt == NULL)
904 		return -EINVAL;
905 
906 	qopt = nla_data(opt);
907 	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
908 	if (ret < 0)
909 		return ret;
910 
911 	/* backup q->clg and q->loss_model */
912 	old_clg = q->clg;
913 	old_loss_model = q->loss_model;
914 
915 	if (tb[TCA_NETEM_LOSS]) {
916 		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
917 		if (ret) {
918 			q->loss_model = old_loss_model;
919 			return ret;
920 		}
921 	} else {
922 		q->loss_model = CLG_RANDOM;
923 	}
924 
925 	if (tb[TCA_NETEM_DELAY_DIST]) {
926 		ret = get_dist_table(sch, &q->delay_dist,
927 				     tb[TCA_NETEM_DELAY_DIST]);
928 		if (ret)
929 			goto get_table_failure;
930 	}
931 
932 	if (tb[TCA_NETEM_SLOT_DIST]) {
933 		ret = get_dist_table(sch, &q->slot_dist,
934 				     tb[TCA_NETEM_SLOT_DIST]);
935 		if (ret)
936 			goto get_table_failure;
937 	}
938 
939 	sch->limit = qopt->limit;
940 
941 	q->latency = PSCHED_TICKS2NS(qopt->latency);
942 	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
943 	q->limit = qopt->limit;
944 	q->gap = qopt->gap;
945 	q->counter = 0;
946 	q->loss = qopt->loss;
947 	q->duplicate = qopt->duplicate;
948 
949 	/* for compatibility with earlier versions.
950 	 * if gap is set, need to assume 100% probability
951 	 */
952 	if (q->gap)
953 		q->reorder = ~0;
954 
955 	if (tb[TCA_NETEM_CORR])
956 		get_correlation(q, tb[TCA_NETEM_CORR]);
957 
958 	if (tb[TCA_NETEM_REORDER])
959 		get_reorder(q, tb[TCA_NETEM_REORDER]);
960 
961 	if (tb[TCA_NETEM_CORRUPT])
962 		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
963 
964 	if (tb[TCA_NETEM_RATE])
965 		get_rate(q, tb[TCA_NETEM_RATE]);
966 
967 	if (tb[TCA_NETEM_RATE64])
968 		q->rate = max_t(u64, q->rate,
969 				nla_get_u64(tb[TCA_NETEM_RATE64]));
970 
971 	if (tb[TCA_NETEM_LATENCY64])
972 		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
973 
974 	if (tb[TCA_NETEM_JITTER64])
975 		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
976 
977 	if (tb[TCA_NETEM_ECN])
978 		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
979 
980 	if (tb[TCA_NETEM_SLOT])
981 		get_slot(q, tb[TCA_NETEM_SLOT]);
982 
983 	return ret;
984 
985 get_table_failure:
986 	/* recover clg and loss_model, in case of
987 	 * q->clg and q->loss_model were modified
988 	 * in get_loss_clg()
989 	 */
990 	q->clg = old_clg;
991 	q->loss_model = old_loss_model;
992 	return ret;
993 }
994 
995 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
996 		      struct netlink_ext_ack *extack)
997 {
998 	struct netem_sched_data *q = qdisc_priv(sch);
999 	int ret;
1000 
1001 	qdisc_watchdog_init(&q->watchdog, sch);
1002 
1003 	if (!opt)
1004 		return -EINVAL;
1005 
1006 	q->loss_model = CLG_RANDOM;
1007 	ret = netem_change(sch, opt, extack);
1008 	if (ret)
1009 		pr_info("netem: change failed\n");
1010 	return ret;
1011 }
1012 
1013 static void netem_destroy(struct Qdisc *sch)
1014 {
1015 	struct netem_sched_data *q = qdisc_priv(sch);
1016 
1017 	qdisc_watchdog_cancel(&q->watchdog);
1018 	if (q->qdisc)
1019 		qdisc_put(q->qdisc);
1020 	dist_free(q->delay_dist);
1021 	dist_free(q->slot_dist);
1022 }
1023 
1024 static int dump_loss_model(const struct netem_sched_data *q,
1025 			   struct sk_buff *skb)
1026 {
1027 	struct nlattr *nest;
1028 
1029 	nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1030 	if (nest == NULL)
1031 		goto nla_put_failure;
1032 
1033 	switch (q->loss_model) {
1034 	case CLG_RANDOM:
1035 		/* legacy loss model */
1036 		nla_nest_cancel(skb, nest);
1037 		return 0;	/* no data */
1038 
1039 	case CLG_4_STATES: {
1040 		struct tc_netem_gimodel gi = {
1041 			.p13 = q->clg.a1,
1042 			.p31 = q->clg.a2,
1043 			.p32 = q->clg.a3,
1044 			.p14 = q->clg.a4,
1045 			.p23 = q->clg.a5,
1046 		};
1047 
1048 		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1049 			goto nla_put_failure;
1050 		break;
1051 	}
1052 	case CLG_GILB_ELL: {
1053 		struct tc_netem_gemodel ge = {
1054 			.p = q->clg.a1,
1055 			.r = q->clg.a2,
1056 			.h = q->clg.a3,
1057 			.k1 = q->clg.a4,
1058 		};
1059 
1060 		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1061 			goto nla_put_failure;
1062 		break;
1063 	}
1064 	}
1065 
1066 	nla_nest_end(skb, nest);
1067 	return 0;
1068 
1069 nla_put_failure:
1070 	nla_nest_cancel(skb, nest);
1071 	return -1;
1072 }
1073 
1074 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1075 {
1076 	const struct netem_sched_data *q = qdisc_priv(sch);
1077 	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1078 	struct tc_netem_qopt qopt;
1079 	struct tc_netem_corr cor;
1080 	struct tc_netem_reorder reorder;
1081 	struct tc_netem_corrupt corrupt;
1082 	struct tc_netem_rate rate;
1083 	struct tc_netem_slot slot;
1084 
1085 	qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1086 			     UINT_MAX);
1087 	qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1088 			    UINT_MAX);
1089 	qopt.limit = q->limit;
1090 	qopt.loss = q->loss;
1091 	qopt.gap = q->gap;
1092 	qopt.duplicate = q->duplicate;
1093 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1094 		goto nla_put_failure;
1095 
1096 	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1097 		goto nla_put_failure;
1098 
1099 	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1100 		goto nla_put_failure;
1101 
1102 	cor.delay_corr = q->delay_cor.rho;
1103 	cor.loss_corr = q->loss_cor.rho;
1104 	cor.dup_corr = q->dup_cor.rho;
1105 	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1106 		goto nla_put_failure;
1107 
1108 	reorder.probability = q->reorder;
1109 	reorder.correlation = q->reorder_cor.rho;
1110 	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1111 		goto nla_put_failure;
1112 
1113 	corrupt.probability = q->corrupt;
1114 	corrupt.correlation = q->corrupt_cor.rho;
1115 	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1116 		goto nla_put_failure;
1117 
1118 	if (q->rate >= (1ULL << 32)) {
1119 		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1120 				      TCA_NETEM_PAD))
1121 			goto nla_put_failure;
1122 		rate.rate = ~0U;
1123 	} else {
1124 		rate.rate = q->rate;
1125 	}
1126 	rate.packet_overhead = q->packet_overhead;
1127 	rate.cell_size = q->cell_size;
1128 	rate.cell_overhead = q->cell_overhead;
1129 	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1130 		goto nla_put_failure;
1131 
1132 	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1133 		goto nla_put_failure;
1134 
1135 	if (dump_loss_model(q, skb) != 0)
1136 		goto nla_put_failure;
1137 
1138 	if (q->slot_config.min_delay | q->slot_config.max_delay |
1139 	    q->slot_config.dist_jitter) {
1140 		slot = q->slot_config;
1141 		if (slot.max_packets == INT_MAX)
1142 			slot.max_packets = 0;
1143 		if (slot.max_bytes == INT_MAX)
1144 			slot.max_bytes = 0;
1145 		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1146 			goto nla_put_failure;
1147 	}
1148 
1149 	return nla_nest_end(skb, nla);
1150 
1151 nla_put_failure:
1152 	nlmsg_trim(skb, nla);
1153 	return -1;
1154 }
1155 
1156 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1157 			  struct sk_buff *skb, struct tcmsg *tcm)
1158 {
1159 	struct netem_sched_data *q = qdisc_priv(sch);
1160 
1161 	if (cl != 1 || !q->qdisc) 	/* only one class */
1162 		return -ENOENT;
1163 
1164 	tcm->tcm_handle |= TC_H_MIN(1);
1165 	tcm->tcm_info = q->qdisc->handle;
1166 
1167 	return 0;
1168 }
1169 
1170 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1171 		     struct Qdisc **old, struct netlink_ext_ack *extack)
1172 {
1173 	struct netem_sched_data *q = qdisc_priv(sch);
1174 
1175 	*old = qdisc_replace(sch, new, &q->qdisc);
1176 	return 0;
1177 }
1178 
1179 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1180 {
1181 	struct netem_sched_data *q = qdisc_priv(sch);
1182 	return q->qdisc;
1183 }
1184 
1185 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1186 {
1187 	return 1;
1188 }
1189 
1190 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1191 {
1192 	if (!walker->stop) {
1193 		if (walker->count >= walker->skip)
1194 			if (walker->fn(sch, 1, walker) < 0) {
1195 				walker->stop = 1;
1196 				return;
1197 			}
1198 		walker->count++;
1199 	}
1200 }
1201 
1202 static const struct Qdisc_class_ops netem_class_ops = {
1203 	.graft		=	netem_graft,
1204 	.leaf		=	netem_leaf,
1205 	.find		=	netem_find,
1206 	.walk		=	netem_walk,
1207 	.dump		=	netem_dump_class,
1208 };
1209 
1210 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1211 	.id		=	"netem",
1212 	.cl_ops		=	&netem_class_ops,
1213 	.priv_size	=	sizeof(struct netem_sched_data),
1214 	.enqueue	=	netem_enqueue,
1215 	.dequeue	=	netem_dequeue,
1216 	.peek		=	qdisc_peek_dequeued,
1217 	.init		=	netem_init,
1218 	.reset		=	netem_reset,
1219 	.destroy	=	netem_destroy,
1220 	.change		=	netem_change,
1221 	.dump		=	netem_dump,
1222 	.owner		=	THIS_MODULE,
1223 };
1224 
1225 
1226 static int __init netem_module_init(void)
1227 {
1228 	pr_info("netem: version " VERSION "\n");
1229 	return register_qdisc(&netem_qdisc_ops);
1230 }
1231 static void __exit netem_module_exit(void)
1232 {
1233 	unregister_qdisc(&netem_qdisc_ops);
1234 }
1235 module_init(netem_module_init)
1236 module_exit(netem_module_exit)
1237 MODULE_LICENSE("GPL");
1238