xref: /openbmc/linux/net/sched/sch_netem.c (revision 25763b3c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_netem.c	Network emulator
4  *
5  *  		Many of the algorithms and ideas for this came from
6  *		NIST Net which is not copyrighted.
7  *
8  * Authors:	Stephen Hemminger <shemminger@osdl.org>
9  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/vmalloc.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/reciprocal_div.h>
22 #include <linux/rbtree.h>
23 
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/inet_ecn.h>
27 
28 #define VERSION "1.3"
29 
30 /*	Network Emulation Queuing algorithm.
31 	====================================
32 
33 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 		 Network Emulation Tool
35 		 [2] Luigi Rizzo, DummyNet for FreeBSD
36 
37 	 ----------------------------------------------------------------
38 
39 	 This started out as a simple way to delay outgoing packets to
40 	 test TCP but has grown to include most of the functionality
41 	 of a full blown network emulator like NISTnet. It can delay
42 	 packets and add random jitter (and correlation). The random
43 	 distribution can be loaded from a table as well to provide
44 	 normal, Pareto, or experimental curves. Packet loss,
45 	 duplication, and reordering can also be emulated.
46 
47 	 This qdisc does not do classification that can be handled in
48 	 layering other disciplines.  It does not need to do bandwidth
49 	 control either since that can be handled by using token
50 	 bucket or other rate control.
51 
52      Correlated Loss Generator models
53 
54 	Added generation of correlated loss according to the
55 	"Gilbert-Elliot" model, a 4-state markov model.
56 
57 	References:
58 	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
59 	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
60 	and intuitive loss model for packet networks and its implementation
61 	in the Netem module in the Linux kernel", available in [1]
62 
63 	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
64 		 Fabio Ludovici <fabio.ludovici at yahoo.it>
65 */
66 
67 struct disttable {
68 	u32  size;
69 	s16 table[0];
70 };
71 
72 struct netem_sched_data {
73 	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
74 	struct rb_root t_root;
75 
76 	/* a linear queue; reduces rbtree rebalancing when jitter is low */
77 	struct sk_buff	*t_head;
78 	struct sk_buff	*t_tail;
79 
80 	/* optional qdisc for classful handling (NULL at netem init) */
81 	struct Qdisc	*qdisc;
82 
83 	struct qdisc_watchdog watchdog;
84 
85 	s64 latency;
86 	s64 jitter;
87 
88 	u32 loss;
89 	u32 ecn;
90 	u32 limit;
91 	u32 counter;
92 	u32 gap;
93 	u32 duplicate;
94 	u32 reorder;
95 	u32 corrupt;
96 	u64 rate;
97 	s32 packet_overhead;
98 	u32 cell_size;
99 	struct reciprocal_value cell_size_reciprocal;
100 	s32 cell_overhead;
101 
102 	struct crndstate {
103 		u32 last;
104 		u32 rho;
105 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
106 
107 	struct disttable *delay_dist;
108 
109 	enum  {
110 		CLG_RANDOM,
111 		CLG_4_STATES,
112 		CLG_GILB_ELL,
113 	} loss_model;
114 
115 	enum {
116 		TX_IN_GAP_PERIOD = 1,
117 		TX_IN_BURST_PERIOD,
118 		LOST_IN_GAP_PERIOD,
119 		LOST_IN_BURST_PERIOD,
120 	} _4_state_model;
121 
122 	enum {
123 		GOOD_STATE = 1,
124 		BAD_STATE,
125 	} GE_state_model;
126 
127 	/* Correlated Loss Generation models */
128 	struct clgstate {
129 		/* state of the Markov chain */
130 		u8 state;
131 
132 		/* 4-states and Gilbert-Elliot models */
133 		u32 a1;	/* p13 for 4-states or p for GE */
134 		u32 a2;	/* p31 for 4-states or r for GE */
135 		u32 a3;	/* p32 for 4-states or h for GE */
136 		u32 a4;	/* p14 for 4-states or 1-k for GE */
137 		u32 a5; /* p23 used only in 4-states */
138 	} clg;
139 
140 	struct tc_netem_slot slot_config;
141 	struct slotstate {
142 		u64 slot_next;
143 		s32 packets_left;
144 		s32 bytes_left;
145 	} slot;
146 
147 	struct disttable *slot_dist;
148 };
149 
150 /* Time stamp put into socket buffer control block
151  * Only valid when skbs are in our internal t(ime)fifo queue.
152  *
153  * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
154  * and skb->next & skb->prev are scratch space for a qdisc,
155  * we save skb->tstamp value in skb->cb[] before destroying it.
156  */
157 struct netem_skb_cb {
158 	u64	        time_to_send;
159 };
160 
161 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
162 {
163 	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
164 	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
165 	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
166 }
167 
168 /* init_crandom - initialize correlated random number generator
169  * Use entropy source for initial seed.
170  */
171 static void init_crandom(struct crndstate *state, unsigned long rho)
172 {
173 	state->rho = rho;
174 	state->last = prandom_u32();
175 }
176 
177 /* get_crandom - correlated random number generator
178  * Next number depends on last value.
179  * rho is scaled to avoid floating point.
180  */
181 static u32 get_crandom(struct crndstate *state)
182 {
183 	u64 value, rho;
184 	unsigned long answer;
185 
186 	if (!state || state->rho == 0)	/* no correlation */
187 		return prandom_u32();
188 
189 	value = prandom_u32();
190 	rho = (u64)state->rho + 1;
191 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
192 	state->last = answer;
193 	return answer;
194 }
195 
196 /* loss_4state - 4-state model loss generator
197  * Generates losses according to the 4-state Markov chain adopted in
198  * the GI (General and Intuitive) loss model.
199  */
200 static bool loss_4state(struct netem_sched_data *q)
201 {
202 	struct clgstate *clg = &q->clg;
203 	u32 rnd = prandom_u32();
204 
205 	/*
206 	 * Makes a comparison between rnd and the transition
207 	 * probabilities outgoing from the current state, then decides the
208 	 * next state and if the next packet has to be transmitted or lost.
209 	 * The four states correspond to:
210 	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
211 	 *   LOST_IN_BURST_PERIOD => isolated losses within a gap period
212 	 *   LOST_IN_GAP_PERIOD => lost packets within a burst period
213 	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
214 	 */
215 	switch (clg->state) {
216 	case TX_IN_GAP_PERIOD:
217 		if (rnd < clg->a4) {
218 			clg->state = LOST_IN_BURST_PERIOD;
219 			return true;
220 		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
221 			clg->state = LOST_IN_GAP_PERIOD;
222 			return true;
223 		} else if (clg->a1 + clg->a4 < rnd) {
224 			clg->state = TX_IN_GAP_PERIOD;
225 		}
226 
227 		break;
228 	case TX_IN_BURST_PERIOD:
229 		if (rnd < clg->a5) {
230 			clg->state = LOST_IN_GAP_PERIOD;
231 			return true;
232 		} else {
233 			clg->state = TX_IN_BURST_PERIOD;
234 		}
235 
236 		break;
237 	case LOST_IN_GAP_PERIOD:
238 		if (rnd < clg->a3)
239 			clg->state = TX_IN_BURST_PERIOD;
240 		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
241 			clg->state = TX_IN_GAP_PERIOD;
242 		} else if (clg->a2 + clg->a3 < rnd) {
243 			clg->state = LOST_IN_GAP_PERIOD;
244 			return true;
245 		}
246 		break;
247 	case LOST_IN_BURST_PERIOD:
248 		clg->state = TX_IN_GAP_PERIOD;
249 		break;
250 	}
251 
252 	return false;
253 }
254 
255 /* loss_gilb_ell - Gilbert-Elliot model loss generator
256  * Generates losses according to the Gilbert-Elliot loss model or
257  * its special cases  (Gilbert or Simple Gilbert)
258  *
259  * Makes a comparison between random number and the transition
260  * probabilities outgoing from the current state, then decides the
261  * next state. A second random number is extracted and the comparison
262  * with the loss probability of the current state decides if the next
263  * packet will be transmitted or lost.
264  */
265 static bool loss_gilb_ell(struct netem_sched_data *q)
266 {
267 	struct clgstate *clg = &q->clg;
268 
269 	switch (clg->state) {
270 	case GOOD_STATE:
271 		if (prandom_u32() < clg->a1)
272 			clg->state = BAD_STATE;
273 		if (prandom_u32() < clg->a4)
274 			return true;
275 		break;
276 	case BAD_STATE:
277 		if (prandom_u32() < clg->a2)
278 			clg->state = GOOD_STATE;
279 		if (prandom_u32() > clg->a3)
280 			return true;
281 	}
282 
283 	return false;
284 }
285 
286 static bool loss_event(struct netem_sched_data *q)
287 {
288 	switch (q->loss_model) {
289 	case CLG_RANDOM:
290 		/* Random packet drop 0 => none, ~0 => all */
291 		return q->loss && q->loss >= get_crandom(&q->loss_cor);
292 
293 	case CLG_4_STATES:
294 		/* 4state loss model algorithm (used also for GI model)
295 		* Extracts a value from the markov 4 state loss generator,
296 		* if it is 1 drops a packet and if needed writes the event in
297 		* the kernel logs
298 		*/
299 		return loss_4state(q);
300 
301 	case CLG_GILB_ELL:
302 		/* Gilbert-Elliot loss model algorithm
303 		* Extracts a value from the Gilbert-Elliot loss generator,
304 		* if it is 1 drops a packet and if needed writes the event in
305 		* the kernel logs
306 		*/
307 		return loss_gilb_ell(q);
308 	}
309 
310 	return false;	/* not reached */
311 }
312 
313 
314 /* tabledist - return a pseudo-randomly distributed value with mean mu and
315  * std deviation sigma.  Uses table lookup to approximate the desired
316  * distribution, and a uniformly-distributed pseudo-random source.
317  */
318 static s64 tabledist(s64 mu, s32 sigma,
319 		     struct crndstate *state,
320 		     const struct disttable *dist)
321 {
322 	s64 x;
323 	long t;
324 	u32 rnd;
325 
326 	if (sigma == 0)
327 		return mu;
328 
329 	rnd = get_crandom(state);
330 
331 	/* default uniform distribution */
332 	if (dist == NULL)
333 		return ((rnd % (2 * sigma)) + mu) - sigma;
334 
335 	t = dist->table[rnd % dist->size];
336 	x = (sigma % NETEM_DIST_SCALE) * t;
337 	if (x >= 0)
338 		x += NETEM_DIST_SCALE/2;
339 	else
340 		x -= NETEM_DIST_SCALE/2;
341 
342 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
343 }
344 
345 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
346 {
347 	len += q->packet_overhead;
348 
349 	if (q->cell_size) {
350 		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
351 
352 		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
353 			cells++;
354 		len = cells * (q->cell_size + q->cell_overhead);
355 	}
356 
357 	return div64_u64(len * NSEC_PER_SEC, q->rate);
358 }
359 
360 static void tfifo_reset(struct Qdisc *sch)
361 {
362 	struct netem_sched_data *q = qdisc_priv(sch);
363 	struct rb_node *p = rb_first(&q->t_root);
364 
365 	while (p) {
366 		struct sk_buff *skb = rb_to_skb(p);
367 
368 		p = rb_next(p);
369 		rb_erase(&skb->rbnode, &q->t_root);
370 		rtnl_kfree_skbs(skb, skb);
371 	}
372 
373 	rtnl_kfree_skbs(q->t_head, q->t_tail);
374 	q->t_head = NULL;
375 	q->t_tail = NULL;
376 }
377 
378 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
379 {
380 	struct netem_sched_data *q = qdisc_priv(sch);
381 	u64 tnext = netem_skb_cb(nskb)->time_to_send;
382 
383 	if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
384 		if (q->t_tail)
385 			q->t_tail->next = nskb;
386 		else
387 			q->t_head = nskb;
388 		q->t_tail = nskb;
389 	} else {
390 		struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
391 
392 		while (*p) {
393 			struct sk_buff *skb;
394 
395 			parent = *p;
396 			skb = rb_to_skb(parent);
397 			if (tnext >= netem_skb_cb(skb)->time_to_send)
398 				p = &parent->rb_right;
399 			else
400 				p = &parent->rb_left;
401 		}
402 		rb_link_node(&nskb->rbnode, parent, p);
403 		rb_insert_color(&nskb->rbnode, &q->t_root);
404 	}
405 	sch->q.qlen++;
406 }
407 
408 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
409  * when we statistically choose to corrupt one, we instead segment it, returning
410  * the first packet to be corrupted, and re-enqueue the remaining frames
411  */
412 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
413 				     struct sk_buff **to_free)
414 {
415 	struct sk_buff *segs;
416 	netdev_features_t features = netif_skb_features(skb);
417 
418 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
419 
420 	if (IS_ERR_OR_NULL(segs)) {
421 		qdisc_drop(skb, sch, to_free);
422 		return NULL;
423 	}
424 	consume_skb(skb);
425 	return segs;
426 }
427 
428 /*
429  * Insert one skb into qdisc.
430  * Note: parent depends on return value to account for queue length.
431  * 	NET_XMIT_DROP: queue length didn't change.
432  *      NET_XMIT_SUCCESS: one skb was queued.
433  */
434 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
435 			 struct sk_buff **to_free)
436 {
437 	struct netem_sched_data *q = qdisc_priv(sch);
438 	/* We don't fill cb now as skb_unshare() may invalidate it */
439 	struct netem_skb_cb *cb;
440 	struct sk_buff *skb2;
441 	struct sk_buff *segs = NULL;
442 	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
443 	int nb = 0;
444 	int count = 1;
445 	int rc = NET_XMIT_SUCCESS;
446 	int rc_drop = NET_XMIT_DROP;
447 
448 	/* Do not fool qdisc_drop_all() */
449 	skb->prev = NULL;
450 
451 	/* Random duplication */
452 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
453 		++count;
454 
455 	/* Drop packet? */
456 	if (loss_event(q)) {
457 		if (q->ecn && INET_ECN_set_ce(skb))
458 			qdisc_qstats_drop(sch); /* mark packet */
459 		else
460 			--count;
461 	}
462 	if (count == 0) {
463 		qdisc_qstats_drop(sch);
464 		__qdisc_drop(skb, to_free);
465 		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
466 	}
467 
468 	/* If a delay is expected, orphan the skb. (orphaning usually takes
469 	 * place at TX completion time, so _before_ the link transit delay)
470 	 */
471 	if (q->latency || q->jitter || q->rate)
472 		skb_orphan_partial(skb);
473 
474 	/*
475 	 * If we need to duplicate packet, then re-insert at top of the
476 	 * qdisc tree, since parent queuer expects that only one
477 	 * skb will be queued.
478 	 */
479 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
480 		struct Qdisc *rootq = qdisc_root(sch);
481 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
482 
483 		q->duplicate = 0;
484 		rootq->enqueue(skb2, rootq, to_free);
485 		q->duplicate = dupsave;
486 		rc_drop = NET_XMIT_SUCCESS;
487 	}
488 
489 	/*
490 	 * Randomized packet corruption.
491 	 * Make copy if needed since we are modifying
492 	 * If packet is going to be hardware checksummed, then
493 	 * do it now in software before we mangle it.
494 	 */
495 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
496 		if (skb_is_gso(skb)) {
497 			segs = netem_segment(skb, sch, to_free);
498 			if (!segs)
499 				return rc_drop;
500 		} else {
501 			segs = skb;
502 		}
503 
504 		skb = segs;
505 		segs = segs->next;
506 
507 		skb = skb_unshare(skb, GFP_ATOMIC);
508 		if (unlikely(!skb)) {
509 			qdisc_qstats_drop(sch);
510 			goto finish_segs;
511 		}
512 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
513 		    skb_checksum_help(skb)) {
514 			qdisc_drop(skb, sch, to_free);
515 			goto finish_segs;
516 		}
517 
518 		skb->data[prandom_u32() % skb_headlen(skb)] ^=
519 			1<<(prandom_u32() % 8);
520 	}
521 
522 	if (unlikely(sch->q.qlen >= sch->limit)) {
523 		qdisc_drop_all(skb, sch, to_free);
524 		return rc_drop;
525 	}
526 
527 	qdisc_qstats_backlog_inc(sch, skb);
528 
529 	cb = netem_skb_cb(skb);
530 	if (q->gap == 0 ||		/* not doing reordering */
531 	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
532 	    q->reorder < get_crandom(&q->reorder_cor)) {
533 		u64 now;
534 		s64 delay;
535 
536 		delay = tabledist(q->latency, q->jitter,
537 				  &q->delay_cor, q->delay_dist);
538 
539 		now = ktime_get_ns();
540 
541 		if (q->rate) {
542 			struct netem_skb_cb *last = NULL;
543 
544 			if (sch->q.tail)
545 				last = netem_skb_cb(sch->q.tail);
546 			if (q->t_root.rb_node) {
547 				struct sk_buff *t_skb;
548 				struct netem_skb_cb *t_last;
549 
550 				t_skb = skb_rb_last(&q->t_root);
551 				t_last = netem_skb_cb(t_skb);
552 				if (!last ||
553 				    t_last->time_to_send > last->time_to_send)
554 					last = t_last;
555 			}
556 			if (q->t_tail) {
557 				struct netem_skb_cb *t_last =
558 					netem_skb_cb(q->t_tail);
559 
560 				if (!last ||
561 				    t_last->time_to_send > last->time_to_send)
562 					last = t_last;
563 			}
564 
565 			if (last) {
566 				/*
567 				 * Last packet in queue is reference point (now),
568 				 * calculate this time bonus and subtract
569 				 * from delay.
570 				 */
571 				delay -= last->time_to_send - now;
572 				delay = max_t(s64, 0, delay);
573 				now = last->time_to_send;
574 			}
575 
576 			delay += packet_time_ns(qdisc_pkt_len(skb), q);
577 		}
578 
579 		cb->time_to_send = now + delay;
580 		++q->counter;
581 		tfifo_enqueue(skb, sch);
582 	} else {
583 		/*
584 		 * Do re-ordering by putting one out of N packets at the front
585 		 * of the queue.
586 		 */
587 		cb->time_to_send = ktime_get_ns();
588 		q->counter = 0;
589 
590 		__qdisc_enqueue_head(skb, &sch->q);
591 		sch->qstats.requeues++;
592 	}
593 
594 finish_segs:
595 	if (segs) {
596 		while (segs) {
597 			skb2 = segs->next;
598 			skb_mark_not_on_list(segs);
599 			qdisc_skb_cb(segs)->pkt_len = segs->len;
600 			last_len = segs->len;
601 			rc = qdisc_enqueue(segs, sch, to_free);
602 			if (rc != NET_XMIT_SUCCESS) {
603 				if (net_xmit_drop_count(rc))
604 					qdisc_qstats_drop(sch);
605 			} else {
606 				nb++;
607 				len += last_len;
608 			}
609 			segs = skb2;
610 		}
611 		sch->q.qlen += nb;
612 		if (nb > 1)
613 			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
614 	}
615 	return NET_XMIT_SUCCESS;
616 }
617 
618 /* Delay the next round with a new future slot with a
619  * correct number of bytes and packets.
620  */
621 
622 static void get_slot_next(struct netem_sched_data *q, u64 now)
623 {
624 	s64 next_delay;
625 
626 	if (!q->slot_dist)
627 		next_delay = q->slot_config.min_delay +
628 				(prandom_u32() *
629 				 (q->slot_config.max_delay -
630 				  q->slot_config.min_delay) >> 32);
631 	else
632 		next_delay = tabledist(q->slot_config.dist_delay,
633 				       (s32)(q->slot_config.dist_jitter),
634 				       NULL, q->slot_dist);
635 
636 	q->slot.slot_next = now + next_delay;
637 	q->slot.packets_left = q->slot_config.max_packets;
638 	q->slot.bytes_left = q->slot_config.max_bytes;
639 }
640 
641 static struct sk_buff *netem_peek(struct netem_sched_data *q)
642 {
643 	struct sk_buff *skb = skb_rb_first(&q->t_root);
644 	u64 t1, t2;
645 
646 	if (!skb)
647 		return q->t_head;
648 	if (!q->t_head)
649 		return skb;
650 
651 	t1 = netem_skb_cb(skb)->time_to_send;
652 	t2 = netem_skb_cb(q->t_head)->time_to_send;
653 	if (t1 < t2)
654 		return skb;
655 	return q->t_head;
656 }
657 
658 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
659 {
660 	if (skb == q->t_head) {
661 		q->t_head = skb->next;
662 		if (!q->t_head)
663 			q->t_tail = NULL;
664 	} else {
665 		rb_erase(&skb->rbnode, &q->t_root);
666 	}
667 }
668 
669 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
670 {
671 	struct netem_sched_data *q = qdisc_priv(sch);
672 	struct sk_buff *skb;
673 
674 tfifo_dequeue:
675 	skb = __qdisc_dequeue_head(&sch->q);
676 	if (skb) {
677 		qdisc_qstats_backlog_dec(sch, skb);
678 deliver:
679 		qdisc_bstats_update(sch, skb);
680 		return skb;
681 	}
682 	skb = netem_peek(q);
683 	if (skb) {
684 		u64 time_to_send;
685 		u64 now = ktime_get_ns();
686 
687 		/* if more time remaining? */
688 		time_to_send = netem_skb_cb(skb)->time_to_send;
689 		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
690 			get_slot_next(q, now);
691 
692 		if (time_to_send <= now && q->slot.slot_next <= now) {
693 			netem_erase_head(q, skb);
694 			sch->q.qlen--;
695 			qdisc_qstats_backlog_dec(sch, skb);
696 			skb->next = NULL;
697 			skb->prev = NULL;
698 			/* skb->dev shares skb->rbnode area,
699 			 * we need to restore its value.
700 			 */
701 			skb->dev = qdisc_dev(sch);
702 
703 			if (q->slot.slot_next) {
704 				q->slot.packets_left--;
705 				q->slot.bytes_left -= qdisc_pkt_len(skb);
706 				if (q->slot.packets_left <= 0 ||
707 				    q->slot.bytes_left <= 0)
708 					get_slot_next(q, now);
709 			}
710 
711 			if (q->qdisc) {
712 				unsigned int pkt_len = qdisc_pkt_len(skb);
713 				struct sk_buff *to_free = NULL;
714 				int err;
715 
716 				err = qdisc_enqueue(skb, q->qdisc, &to_free);
717 				kfree_skb_list(to_free);
718 				if (err != NET_XMIT_SUCCESS &&
719 				    net_xmit_drop_count(err)) {
720 					qdisc_qstats_drop(sch);
721 					qdisc_tree_reduce_backlog(sch, 1,
722 								  pkt_len);
723 				}
724 				goto tfifo_dequeue;
725 			}
726 			goto deliver;
727 		}
728 
729 		if (q->qdisc) {
730 			skb = q->qdisc->ops->dequeue(q->qdisc);
731 			if (skb)
732 				goto deliver;
733 		}
734 
735 		qdisc_watchdog_schedule_ns(&q->watchdog,
736 					   max(time_to_send,
737 					       q->slot.slot_next));
738 	}
739 
740 	if (q->qdisc) {
741 		skb = q->qdisc->ops->dequeue(q->qdisc);
742 		if (skb)
743 			goto deliver;
744 	}
745 	return NULL;
746 }
747 
748 static void netem_reset(struct Qdisc *sch)
749 {
750 	struct netem_sched_data *q = qdisc_priv(sch);
751 
752 	qdisc_reset_queue(sch);
753 	tfifo_reset(sch);
754 	if (q->qdisc)
755 		qdisc_reset(q->qdisc);
756 	qdisc_watchdog_cancel(&q->watchdog);
757 }
758 
759 static void dist_free(struct disttable *d)
760 {
761 	kvfree(d);
762 }
763 
764 /*
765  * Distribution data is a variable size payload containing
766  * signed 16 bit values.
767  */
768 
769 static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
770 			  const struct nlattr *attr)
771 {
772 	size_t n = nla_len(attr)/sizeof(__s16);
773 	const __s16 *data = nla_data(attr);
774 	spinlock_t *root_lock;
775 	struct disttable *d;
776 	int i;
777 
778 	if (n > NETEM_DIST_MAX)
779 		return -EINVAL;
780 
781 	d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
782 	if (!d)
783 		return -ENOMEM;
784 
785 	d->size = n;
786 	for (i = 0; i < n; i++)
787 		d->table[i] = data[i];
788 
789 	root_lock = qdisc_root_sleeping_lock(sch);
790 
791 	spin_lock_bh(root_lock);
792 	swap(*tbl, d);
793 	spin_unlock_bh(root_lock);
794 
795 	dist_free(d);
796 	return 0;
797 }
798 
799 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
800 {
801 	const struct tc_netem_slot *c = nla_data(attr);
802 
803 	q->slot_config = *c;
804 	if (q->slot_config.max_packets == 0)
805 		q->slot_config.max_packets = INT_MAX;
806 	if (q->slot_config.max_bytes == 0)
807 		q->slot_config.max_bytes = INT_MAX;
808 	q->slot.packets_left = q->slot_config.max_packets;
809 	q->slot.bytes_left = q->slot_config.max_bytes;
810 	if (q->slot_config.min_delay | q->slot_config.max_delay |
811 	    q->slot_config.dist_jitter)
812 		q->slot.slot_next = ktime_get_ns();
813 	else
814 		q->slot.slot_next = 0;
815 }
816 
817 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
818 {
819 	const struct tc_netem_corr *c = nla_data(attr);
820 
821 	init_crandom(&q->delay_cor, c->delay_corr);
822 	init_crandom(&q->loss_cor, c->loss_corr);
823 	init_crandom(&q->dup_cor, c->dup_corr);
824 }
825 
826 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
827 {
828 	const struct tc_netem_reorder *r = nla_data(attr);
829 
830 	q->reorder = r->probability;
831 	init_crandom(&q->reorder_cor, r->correlation);
832 }
833 
834 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
835 {
836 	const struct tc_netem_corrupt *r = nla_data(attr);
837 
838 	q->corrupt = r->probability;
839 	init_crandom(&q->corrupt_cor, r->correlation);
840 }
841 
842 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
843 {
844 	const struct tc_netem_rate *r = nla_data(attr);
845 
846 	q->rate = r->rate;
847 	q->packet_overhead = r->packet_overhead;
848 	q->cell_size = r->cell_size;
849 	q->cell_overhead = r->cell_overhead;
850 	if (q->cell_size)
851 		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
852 	else
853 		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
854 }
855 
856 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
857 {
858 	const struct nlattr *la;
859 	int rem;
860 
861 	nla_for_each_nested(la, attr, rem) {
862 		u16 type = nla_type(la);
863 
864 		switch (type) {
865 		case NETEM_LOSS_GI: {
866 			const struct tc_netem_gimodel *gi = nla_data(la);
867 
868 			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
869 				pr_info("netem: incorrect gi model size\n");
870 				return -EINVAL;
871 			}
872 
873 			q->loss_model = CLG_4_STATES;
874 
875 			q->clg.state = TX_IN_GAP_PERIOD;
876 			q->clg.a1 = gi->p13;
877 			q->clg.a2 = gi->p31;
878 			q->clg.a3 = gi->p32;
879 			q->clg.a4 = gi->p14;
880 			q->clg.a5 = gi->p23;
881 			break;
882 		}
883 
884 		case NETEM_LOSS_GE: {
885 			const struct tc_netem_gemodel *ge = nla_data(la);
886 
887 			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
888 				pr_info("netem: incorrect ge model size\n");
889 				return -EINVAL;
890 			}
891 
892 			q->loss_model = CLG_GILB_ELL;
893 			q->clg.state = GOOD_STATE;
894 			q->clg.a1 = ge->p;
895 			q->clg.a2 = ge->r;
896 			q->clg.a3 = ge->h;
897 			q->clg.a4 = ge->k1;
898 			break;
899 		}
900 
901 		default:
902 			pr_info("netem: unknown loss type %u\n", type);
903 			return -EINVAL;
904 		}
905 	}
906 
907 	return 0;
908 }
909 
910 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
911 	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
912 	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
913 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
914 	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
915 	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
916 	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
917 	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
918 	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
919 	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
920 	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
921 };
922 
923 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
924 		      const struct nla_policy *policy, int len)
925 {
926 	int nested_len = nla_len(nla) - NLA_ALIGN(len);
927 
928 	if (nested_len < 0) {
929 		pr_info("netem: invalid attributes len %d\n", nested_len);
930 		return -EINVAL;
931 	}
932 
933 	if (nested_len >= nla_attr_size(0))
934 		return nla_parse_deprecated(tb, maxtype,
935 					    nla_data(nla) + NLA_ALIGN(len),
936 					    nested_len, policy, NULL);
937 
938 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
939 	return 0;
940 }
941 
942 /* Parse netlink message to set options */
943 static int netem_change(struct Qdisc *sch, struct nlattr *opt,
944 			struct netlink_ext_ack *extack)
945 {
946 	struct netem_sched_data *q = qdisc_priv(sch);
947 	struct nlattr *tb[TCA_NETEM_MAX + 1];
948 	struct tc_netem_qopt *qopt;
949 	struct clgstate old_clg;
950 	int old_loss_model = CLG_RANDOM;
951 	int ret;
952 
953 	if (opt == NULL)
954 		return -EINVAL;
955 
956 	qopt = nla_data(opt);
957 	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
958 	if (ret < 0)
959 		return ret;
960 
961 	/* backup q->clg and q->loss_model */
962 	old_clg = q->clg;
963 	old_loss_model = q->loss_model;
964 
965 	if (tb[TCA_NETEM_LOSS]) {
966 		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
967 		if (ret) {
968 			q->loss_model = old_loss_model;
969 			return ret;
970 		}
971 	} else {
972 		q->loss_model = CLG_RANDOM;
973 	}
974 
975 	if (tb[TCA_NETEM_DELAY_DIST]) {
976 		ret = get_dist_table(sch, &q->delay_dist,
977 				     tb[TCA_NETEM_DELAY_DIST]);
978 		if (ret)
979 			goto get_table_failure;
980 	}
981 
982 	if (tb[TCA_NETEM_SLOT_DIST]) {
983 		ret = get_dist_table(sch, &q->slot_dist,
984 				     tb[TCA_NETEM_SLOT_DIST]);
985 		if (ret)
986 			goto get_table_failure;
987 	}
988 
989 	sch->limit = qopt->limit;
990 
991 	q->latency = PSCHED_TICKS2NS(qopt->latency);
992 	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
993 	q->limit = qopt->limit;
994 	q->gap = qopt->gap;
995 	q->counter = 0;
996 	q->loss = qopt->loss;
997 	q->duplicate = qopt->duplicate;
998 
999 	/* for compatibility with earlier versions.
1000 	 * if gap is set, need to assume 100% probability
1001 	 */
1002 	if (q->gap)
1003 		q->reorder = ~0;
1004 
1005 	if (tb[TCA_NETEM_CORR])
1006 		get_correlation(q, tb[TCA_NETEM_CORR]);
1007 
1008 	if (tb[TCA_NETEM_REORDER])
1009 		get_reorder(q, tb[TCA_NETEM_REORDER]);
1010 
1011 	if (tb[TCA_NETEM_CORRUPT])
1012 		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1013 
1014 	if (tb[TCA_NETEM_RATE])
1015 		get_rate(q, tb[TCA_NETEM_RATE]);
1016 
1017 	if (tb[TCA_NETEM_RATE64])
1018 		q->rate = max_t(u64, q->rate,
1019 				nla_get_u64(tb[TCA_NETEM_RATE64]));
1020 
1021 	if (tb[TCA_NETEM_LATENCY64])
1022 		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1023 
1024 	if (tb[TCA_NETEM_JITTER64])
1025 		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1026 
1027 	if (tb[TCA_NETEM_ECN])
1028 		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1029 
1030 	if (tb[TCA_NETEM_SLOT])
1031 		get_slot(q, tb[TCA_NETEM_SLOT]);
1032 
1033 	return ret;
1034 
1035 get_table_failure:
1036 	/* recover clg and loss_model, in case of
1037 	 * q->clg and q->loss_model were modified
1038 	 * in get_loss_clg()
1039 	 */
1040 	q->clg = old_clg;
1041 	q->loss_model = old_loss_model;
1042 	return ret;
1043 }
1044 
1045 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1046 		      struct netlink_ext_ack *extack)
1047 {
1048 	struct netem_sched_data *q = qdisc_priv(sch);
1049 	int ret;
1050 
1051 	qdisc_watchdog_init(&q->watchdog, sch);
1052 
1053 	if (!opt)
1054 		return -EINVAL;
1055 
1056 	q->loss_model = CLG_RANDOM;
1057 	ret = netem_change(sch, opt, extack);
1058 	if (ret)
1059 		pr_info("netem: change failed\n");
1060 	return ret;
1061 }
1062 
1063 static void netem_destroy(struct Qdisc *sch)
1064 {
1065 	struct netem_sched_data *q = qdisc_priv(sch);
1066 
1067 	qdisc_watchdog_cancel(&q->watchdog);
1068 	if (q->qdisc)
1069 		qdisc_put(q->qdisc);
1070 	dist_free(q->delay_dist);
1071 	dist_free(q->slot_dist);
1072 }
1073 
1074 static int dump_loss_model(const struct netem_sched_data *q,
1075 			   struct sk_buff *skb)
1076 {
1077 	struct nlattr *nest;
1078 
1079 	nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1080 	if (nest == NULL)
1081 		goto nla_put_failure;
1082 
1083 	switch (q->loss_model) {
1084 	case CLG_RANDOM:
1085 		/* legacy loss model */
1086 		nla_nest_cancel(skb, nest);
1087 		return 0;	/* no data */
1088 
1089 	case CLG_4_STATES: {
1090 		struct tc_netem_gimodel gi = {
1091 			.p13 = q->clg.a1,
1092 			.p31 = q->clg.a2,
1093 			.p32 = q->clg.a3,
1094 			.p14 = q->clg.a4,
1095 			.p23 = q->clg.a5,
1096 		};
1097 
1098 		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1099 			goto nla_put_failure;
1100 		break;
1101 	}
1102 	case CLG_GILB_ELL: {
1103 		struct tc_netem_gemodel ge = {
1104 			.p = q->clg.a1,
1105 			.r = q->clg.a2,
1106 			.h = q->clg.a3,
1107 			.k1 = q->clg.a4,
1108 		};
1109 
1110 		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1111 			goto nla_put_failure;
1112 		break;
1113 	}
1114 	}
1115 
1116 	nla_nest_end(skb, nest);
1117 	return 0;
1118 
1119 nla_put_failure:
1120 	nla_nest_cancel(skb, nest);
1121 	return -1;
1122 }
1123 
1124 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1125 {
1126 	const struct netem_sched_data *q = qdisc_priv(sch);
1127 	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1128 	struct tc_netem_qopt qopt;
1129 	struct tc_netem_corr cor;
1130 	struct tc_netem_reorder reorder;
1131 	struct tc_netem_corrupt corrupt;
1132 	struct tc_netem_rate rate;
1133 	struct tc_netem_slot slot;
1134 
1135 	qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1136 			     UINT_MAX);
1137 	qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1138 			    UINT_MAX);
1139 	qopt.limit = q->limit;
1140 	qopt.loss = q->loss;
1141 	qopt.gap = q->gap;
1142 	qopt.duplicate = q->duplicate;
1143 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1144 		goto nla_put_failure;
1145 
1146 	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1147 		goto nla_put_failure;
1148 
1149 	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1150 		goto nla_put_failure;
1151 
1152 	cor.delay_corr = q->delay_cor.rho;
1153 	cor.loss_corr = q->loss_cor.rho;
1154 	cor.dup_corr = q->dup_cor.rho;
1155 	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1156 		goto nla_put_failure;
1157 
1158 	reorder.probability = q->reorder;
1159 	reorder.correlation = q->reorder_cor.rho;
1160 	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1161 		goto nla_put_failure;
1162 
1163 	corrupt.probability = q->corrupt;
1164 	corrupt.correlation = q->corrupt_cor.rho;
1165 	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1166 		goto nla_put_failure;
1167 
1168 	if (q->rate >= (1ULL << 32)) {
1169 		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1170 				      TCA_NETEM_PAD))
1171 			goto nla_put_failure;
1172 		rate.rate = ~0U;
1173 	} else {
1174 		rate.rate = q->rate;
1175 	}
1176 	rate.packet_overhead = q->packet_overhead;
1177 	rate.cell_size = q->cell_size;
1178 	rate.cell_overhead = q->cell_overhead;
1179 	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1180 		goto nla_put_failure;
1181 
1182 	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1183 		goto nla_put_failure;
1184 
1185 	if (dump_loss_model(q, skb) != 0)
1186 		goto nla_put_failure;
1187 
1188 	if (q->slot_config.min_delay | q->slot_config.max_delay |
1189 	    q->slot_config.dist_jitter) {
1190 		slot = q->slot_config;
1191 		if (slot.max_packets == INT_MAX)
1192 			slot.max_packets = 0;
1193 		if (slot.max_bytes == INT_MAX)
1194 			slot.max_bytes = 0;
1195 		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1196 			goto nla_put_failure;
1197 	}
1198 
1199 	return nla_nest_end(skb, nla);
1200 
1201 nla_put_failure:
1202 	nlmsg_trim(skb, nla);
1203 	return -1;
1204 }
1205 
1206 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1207 			  struct sk_buff *skb, struct tcmsg *tcm)
1208 {
1209 	struct netem_sched_data *q = qdisc_priv(sch);
1210 
1211 	if (cl != 1 || !q->qdisc) 	/* only one class */
1212 		return -ENOENT;
1213 
1214 	tcm->tcm_handle |= TC_H_MIN(1);
1215 	tcm->tcm_info = q->qdisc->handle;
1216 
1217 	return 0;
1218 }
1219 
1220 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1221 		     struct Qdisc **old, struct netlink_ext_ack *extack)
1222 {
1223 	struct netem_sched_data *q = qdisc_priv(sch);
1224 
1225 	*old = qdisc_replace(sch, new, &q->qdisc);
1226 	return 0;
1227 }
1228 
1229 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1230 {
1231 	struct netem_sched_data *q = qdisc_priv(sch);
1232 	return q->qdisc;
1233 }
1234 
1235 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1236 {
1237 	return 1;
1238 }
1239 
1240 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1241 {
1242 	if (!walker->stop) {
1243 		if (walker->count >= walker->skip)
1244 			if (walker->fn(sch, 1, walker) < 0) {
1245 				walker->stop = 1;
1246 				return;
1247 			}
1248 		walker->count++;
1249 	}
1250 }
1251 
1252 static const struct Qdisc_class_ops netem_class_ops = {
1253 	.graft		=	netem_graft,
1254 	.leaf		=	netem_leaf,
1255 	.find		=	netem_find,
1256 	.walk		=	netem_walk,
1257 	.dump		=	netem_dump_class,
1258 };
1259 
1260 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1261 	.id		=	"netem",
1262 	.cl_ops		=	&netem_class_ops,
1263 	.priv_size	=	sizeof(struct netem_sched_data),
1264 	.enqueue	=	netem_enqueue,
1265 	.dequeue	=	netem_dequeue,
1266 	.peek		=	qdisc_peek_dequeued,
1267 	.init		=	netem_init,
1268 	.reset		=	netem_reset,
1269 	.destroy	=	netem_destroy,
1270 	.change		=	netem_change,
1271 	.dump		=	netem_dump,
1272 	.owner		=	THIS_MODULE,
1273 };
1274 
1275 
1276 static int __init netem_module_init(void)
1277 {
1278 	pr_info("netem: version " VERSION "\n");
1279 	return register_qdisc(&netem_qdisc_ops);
1280 }
1281 static void __exit netem_module_exit(void)
1282 {
1283 	unregister_qdisc(&netem_qdisc_ops);
1284 }
1285 module_init(netem_module_init)
1286 module_exit(netem_module_exit)
1287 MODULE_LICENSE("GPL");
1288