xref: /openbmc/linux/net/sched/sch_netem.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * net/sched/sch_netem.c	Network emulator
3  *
4  * 		This program is free software; you can redistribute it and/or
5  * 		modify it under the terms of the GNU General Public License
6  * 		as published by the Free Software Foundation; either version
7  * 		2 of the License.
8  *
9  *  		Many of the algorithms and ideas for this came from
10  *		NIST Net which is not copyrighted.
11  *
12  * Authors:	Stephen Hemminger <shemminger@osdl.org>
13  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14  */
15 
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
25 
26 #define VERSION "1.2"
27 
28 /*	Network Emulation Queuing algorithm.
29 	====================================
30 
31 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 		 Network Emulation Tool
33 		 [2] Luigi Rizzo, DummyNet for FreeBSD
34 
35 	 ----------------------------------------------------------------
36 
37 	 This started out as a simple way to delay outgoing packets to
38 	 test TCP but has grown to include most of the functionality
39 	 of a full blown network emulator like NISTnet. It can delay
40 	 packets and add random jitter (and correlation). The random
41 	 distribution can be loaded from a table as well to provide
42 	 normal, Pareto, or experimental curves. Packet loss,
43 	 duplication, and reordering can also be emulated.
44 
45 	 This qdisc does not do classification that can be handled in
46 	 layering other disciplines.  It does not need to do bandwidth
47 	 control either since that can be handled by using token
48 	 bucket or other rate control.
49 
50 	 The simulator is limited by the Linux timer resolution
51 	 and will create packet bursts on the HZ boundary (1ms).
52 */
53 
54 struct netem_sched_data {
55 	struct Qdisc	*qdisc;
56 	struct qdisc_watchdog watchdog;
57 
58 	psched_tdiff_t latency;
59 	psched_tdiff_t jitter;
60 
61 	u32 loss;
62 	u32 limit;
63 	u32 counter;
64 	u32 gap;
65 	u32 duplicate;
66 	u32 reorder;
67 	u32 corrupt;
68 
69 	struct crndstate {
70 		u32 last;
71 		u32 rho;
72 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
73 
74 	struct disttable {
75 		u32  size;
76 		s16 table[0];
77 	} *delay_dist;
78 };
79 
80 /* Time stamp put into socket buffer control block */
81 struct netem_skb_cb {
82 	psched_time_t	time_to_send;
83 };
84 
85 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
86 {
87 	BUILD_BUG_ON(sizeof(skb->cb) <
88 		sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
89 	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
90 }
91 
92 /* init_crandom - initialize correlated random number generator
93  * Use entropy source for initial seed.
94  */
95 static void init_crandom(struct crndstate *state, unsigned long rho)
96 {
97 	state->rho = rho;
98 	state->last = net_random();
99 }
100 
101 /* get_crandom - correlated random number generator
102  * Next number depends on last value.
103  * rho is scaled to avoid floating point.
104  */
105 static u32 get_crandom(struct crndstate *state)
106 {
107 	u64 value, rho;
108 	unsigned long answer;
109 
110 	if (state->rho == 0)	/* no correlation */
111 		return net_random();
112 
113 	value = net_random();
114 	rho = (u64)state->rho + 1;
115 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
116 	state->last = answer;
117 	return answer;
118 }
119 
120 /* tabledist - return a pseudo-randomly distributed value with mean mu and
121  * std deviation sigma.  Uses table lookup to approximate the desired
122  * distribution, and a uniformly-distributed pseudo-random source.
123  */
124 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
125 				struct crndstate *state,
126 				const struct disttable *dist)
127 {
128 	psched_tdiff_t x;
129 	long t;
130 	u32 rnd;
131 
132 	if (sigma == 0)
133 		return mu;
134 
135 	rnd = get_crandom(state);
136 
137 	/* default uniform distribution */
138 	if (dist == NULL)
139 		return (rnd % (2*sigma)) - sigma + mu;
140 
141 	t = dist->table[rnd % dist->size];
142 	x = (sigma % NETEM_DIST_SCALE) * t;
143 	if (x >= 0)
144 		x += NETEM_DIST_SCALE/2;
145 	else
146 		x -= NETEM_DIST_SCALE/2;
147 
148 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
149 }
150 
151 /*
152  * Insert one skb into qdisc.
153  * Note: parent depends on return value to account for queue length.
154  * 	NET_XMIT_DROP: queue length didn't change.
155  *      NET_XMIT_SUCCESS: one skb was queued.
156  */
157 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
158 {
159 	struct netem_sched_data *q = qdisc_priv(sch);
160 	/* We don't fill cb now as skb_unshare() may invalidate it */
161 	struct netem_skb_cb *cb;
162 	struct sk_buff *skb2;
163 	int ret;
164 	int count = 1;
165 
166 	pr_debug("netem_enqueue skb=%p\n", skb);
167 
168 	/* Random duplication */
169 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
170 		++count;
171 
172 	/* Random packet drop 0 => none, ~0 => all */
173 	if (q->loss && q->loss >= get_crandom(&q->loss_cor))
174 		--count;
175 
176 	if (count == 0) {
177 		sch->qstats.drops++;
178 		kfree_skb(skb);
179 		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
180 	}
181 
182 	skb_orphan(skb);
183 
184 	/*
185 	 * If we need to duplicate packet, then re-insert at top of the
186 	 * qdisc tree, since parent queuer expects that only one
187 	 * skb will be queued.
188 	 */
189 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
190 		struct Qdisc *rootq = qdisc_root(sch);
191 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
192 		q->duplicate = 0;
193 
194 		qdisc_enqueue_root(skb2, rootq);
195 		q->duplicate = dupsave;
196 	}
197 
198 	/*
199 	 * Randomized packet corruption.
200 	 * Make copy if needed since we are modifying
201 	 * If packet is going to be hardware checksummed, then
202 	 * do it now in software before we mangle it.
203 	 */
204 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
205 		if (!(skb = skb_unshare(skb, GFP_ATOMIC))
206 		    || (skb->ip_summed == CHECKSUM_PARTIAL
207 			&& skb_checksum_help(skb))) {
208 			sch->qstats.drops++;
209 			return NET_XMIT_DROP;
210 		}
211 
212 		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
213 	}
214 
215 	cb = netem_skb_cb(skb);
216 	if (q->gap == 0 		/* not doing reordering */
217 	    || q->counter < q->gap 	/* inside last reordering gap */
218 	    || q->reorder < get_crandom(&q->reorder_cor)) {
219 		psched_time_t now;
220 		psched_tdiff_t delay;
221 
222 		delay = tabledist(q->latency, q->jitter,
223 				  &q->delay_cor, q->delay_dist);
224 
225 		now = psched_get_time();
226 		cb->time_to_send = now + delay;
227 		++q->counter;
228 		ret = qdisc_enqueue(skb, q->qdisc);
229 	} else {
230 		/*
231 		 * Do re-ordering by putting one out of N packets at the front
232 		 * of the queue.
233 		 */
234 		cb->time_to_send = psched_get_time();
235 		q->counter = 0;
236 		ret = q->qdisc->ops->requeue(skb, q->qdisc);
237 	}
238 
239 	if (likely(ret == NET_XMIT_SUCCESS)) {
240 		sch->q.qlen++;
241 		sch->bstats.bytes += qdisc_pkt_len(skb);
242 		sch->bstats.packets++;
243 	} else if (net_xmit_drop_count(ret)) {
244 		sch->qstats.drops++;
245 	}
246 
247 	pr_debug("netem: enqueue ret %d\n", ret);
248 	return ret;
249 }
250 
251 /* Requeue packets but don't change time stamp */
252 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
253 {
254 	struct netem_sched_data *q = qdisc_priv(sch);
255 	int ret;
256 
257 	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
258 		sch->q.qlen++;
259 		sch->qstats.requeues++;
260 	}
261 
262 	return ret;
263 }
264 
265 static unsigned int netem_drop(struct Qdisc* sch)
266 {
267 	struct netem_sched_data *q = qdisc_priv(sch);
268 	unsigned int len = 0;
269 
270 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
271 		sch->q.qlen--;
272 		sch->qstats.drops++;
273 	}
274 	return len;
275 }
276 
277 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
278 {
279 	struct netem_sched_data *q = qdisc_priv(sch);
280 	struct sk_buff *skb;
281 
282 	smp_mb();
283 	if (sch->flags & TCQ_F_THROTTLED)
284 		return NULL;
285 
286 	skb = q->qdisc->dequeue(q->qdisc);
287 	if (skb) {
288 		const struct netem_skb_cb *cb = netem_skb_cb(skb);
289 		psched_time_t now = psched_get_time();
290 
291 		/* if more time remaining? */
292 		if (cb->time_to_send <= now) {
293 			pr_debug("netem_dequeue: return skb=%p\n", skb);
294 			sch->q.qlen--;
295 			return skb;
296 		}
297 
298 		if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
299 			qdisc_tree_decrease_qlen(q->qdisc, 1);
300 			sch->qstats.drops++;
301 			printk(KERN_ERR "netem: %s could not requeue\n",
302 			       q->qdisc->ops->id);
303 		}
304 
305 		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
306 	}
307 
308 	return NULL;
309 }
310 
311 static void netem_reset(struct Qdisc *sch)
312 {
313 	struct netem_sched_data *q = qdisc_priv(sch);
314 
315 	qdisc_reset(q->qdisc);
316 	sch->q.qlen = 0;
317 	qdisc_watchdog_cancel(&q->watchdog);
318 }
319 
320 /*
321  * Distribution data is a variable size payload containing
322  * signed 16 bit values.
323  */
324 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
325 {
326 	struct netem_sched_data *q = qdisc_priv(sch);
327 	unsigned long n = nla_len(attr)/sizeof(__s16);
328 	const __s16 *data = nla_data(attr);
329 	spinlock_t *root_lock;
330 	struct disttable *d;
331 	int i;
332 
333 	if (n > 65536)
334 		return -EINVAL;
335 
336 	d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
337 	if (!d)
338 		return -ENOMEM;
339 
340 	d->size = n;
341 	for (i = 0; i < n; i++)
342 		d->table[i] = data[i];
343 
344 	root_lock = qdisc_root_sleeping_lock(sch);
345 
346 	spin_lock_bh(root_lock);
347 	d = xchg(&q->delay_dist, d);
348 	spin_unlock_bh(root_lock);
349 
350 	kfree(d);
351 	return 0;
352 }
353 
354 static int get_correlation(struct Qdisc *sch, const struct nlattr *attr)
355 {
356 	struct netem_sched_data *q = qdisc_priv(sch);
357 	const struct tc_netem_corr *c = nla_data(attr);
358 
359 	init_crandom(&q->delay_cor, c->delay_corr);
360 	init_crandom(&q->loss_cor, c->loss_corr);
361 	init_crandom(&q->dup_cor, c->dup_corr);
362 	return 0;
363 }
364 
365 static int get_reorder(struct Qdisc *sch, const struct nlattr *attr)
366 {
367 	struct netem_sched_data *q = qdisc_priv(sch);
368 	const struct tc_netem_reorder *r = nla_data(attr);
369 
370 	q->reorder = r->probability;
371 	init_crandom(&q->reorder_cor, r->correlation);
372 	return 0;
373 }
374 
375 static int get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
376 {
377 	struct netem_sched_data *q = qdisc_priv(sch);
378 	const struct tc_netem_corrupt *r = nla_data(attr);
379 
380 	q->corrupt = r->probability;
381 	init_crandom(&q->corrupt_cor, r->correlation);
382 	return 0;
383 }
384 
385 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
386 	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
387 	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
388 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
389 };
390 
391 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
392 		      const struct nla_policy *policy, int len)
393 {
394 	int nested_len = nla_len(nla) - NLA_ALIGN(len);
395 
396 	if (nested_len < 0)
397 		return -EINVAL;
398 	if (nested_len >= nla_attr_size(0))
399 		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
400 				 nested_len, policy);
401 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
402 	return 0;
403 }
404 
405 /* Parse netlink message to set options */
406 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
407 {
408 	struct netem_sched_data *q = qdisc_priv(sch);
409 	struct nlattr *tb[TCA_NETEM_MAX + 1];
410 	struct tc_netem_qopt *qopt;
411 	int ret;
412 
413 	if (opt == NULL)
414 		return -EINVAL;
415 
416 	qopt = nla_data(opt);
417 	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
418 	if (ret < 0)
419 		return ret;
420 
421 	ret = fifo_set_limit(q->qdisc, qopt->limit);
422 	if (ret) {
423 		pr_debug("netem: can't set fifo limit\n");
424 		return ret;
425 	}
426 
427 	q->latency = qopt->latency;
428 	q->jitter = qopt->jitter;
429 	q->limit = qopt->limit;
430 	q->gap = qopt->gap;
431 	q->counter = 0;
432 	q->loss = qopt->loss;
433 	q->duplicate = qopt->duplicate;
434 
435 	/* for compatibility with earlier versions.
436 	 * if gap is set, need to assume 100% probability
437 	 */
438 	if (q->gap)
439 		q->reorder = ~0;
440 
441 	if (tb[TCA_NETEM_CORR]) {
442 		ret = get_correlation(sch, tb[TCA_NETEM_CORR]);
443 		if (ret)
444 			return ret;
445 	}
446 
447 	if (tb[TCA_NETEM_DELAY_DIST]) {
448 		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
449 		if (ret)
450 			return ret;
451 	}
452 
453 	if (tb[TCA_NETEM_REORDER]) {
454 		ret = get_reorder(sch, tb[TCA_NETEM_REORDER]);
455 		if (ret)
456 			return ret;
457 	}
458 
459 	if (tb[TCA_NETEM_CORRUPT]) {
460 		ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
461 		if (ret)
462 			return ret;
463 	}
464 
465 	return 0;
466 }
467 
468 /*
469  * Special case version of FIFO queue for use by netem.
470  * It queues in order based on timestamps in skb's
471  */
472 struct fifo_sched_data {
473 	u32 limit;
474 	psched_time_t oldest;
475 };
476 
477 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
478 {
479 	struct fifo_sched_data *q = qdisc_priv(sch);
480 	struct sk_buff_head *list = &sch->q;
481 	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
482 	struct sk_buff *skb;
483 
484 	if (likely(skb_queue_len(list) < q->limit)) {
485 		/* Optimize for add at tail */
486 		if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
487 			q->oldest = tnext;
488 			return qdisc_enqueue_tail(nskb, sch);
489 		}
490 
491 		skb_queue_reverse_walk(list, skb) {
492 			const struct netem_skb_cb *cb = netem_skb_cb(skb);
493 
494 			if (tnext >= cb->time_to_send)
495 				break;
496 		}
497 
498 		__skb_queue_after(list, skb, nskb);
499 
500 		sch->qstats.backlog += qdisc_pkt_len(nskb);
501 		sch->bstats.bytes += qdisc_pkt_len(nskb);
502 		sch->bstats.packets++;
503 
504 		return NET_XMIT_SUCCESS;
505 	}
506 
507 	return qdisc_reshape_fail(nskb, sch);
508 }
509 
510 static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
511 {
512 	struct fifo_sched_data *q = qdisc_priv(sch);
513 
514 	if (opt) {
515 		struct tc_fifo_qopt *ctl = nla_data(opt);
516 		if (nla_len(opt) < sizeof(*ctl))
517 			return -EINVAL;
518 
519 		q->limit = ctl->limit;
520 	} else
521 		q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
522 
523 	q->oldest = PSCHED_PASTPERFECT;
524 	return 0;
525 }
526 
527 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
528 {
529 	struct fifo_sched_data *q = qdisc_priv(sch);
530 	struct tc_fifo_qopt opt = { .limit = q->limit };
531 
532 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
533 	return skb->len;
534 
535 nla_put_failure:
536 	return -1;
537 }
538 
539 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
540 	.id		=	"tfifo",
541 	.priv_size	=	sizeof(struct fifo_sched_data),
542 	.enqueue	=	tfifo_enqueue,
543 	.dequeue	=	qdisc_dequeue_head,
544 	.requeue	=	qdisc_requeue,
545 	.drop		=	qdisc_queue_drop,
546 	.init		=	tfifo_init,
547 	.reset		=	qdisc_reset_queue,
548 	.change		=	tfifo_init,
549 	.dump		=	tfifo_dump,
550 };
551 
552 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
553 {
554 	struct netem_sched_data *q = qdisc_priv(sch);
555 	int ret;
556 
557 	if (!opt)
558 		return -EINVAL;
559 
560 	qdisc_watchdog_init(&q->watchdog, sch);
561 
562 	q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
563 				     &tfifo_qdisc_ops,
564 				     TC_H_MAKE(sch->handle, 1));
565 	if (!q->qdisc) {
566 		pr_debug("netem: qdisc create failed\n");
567 		return -ENOMEM;
568 	}
569 
570 	ret = netem_change(sch, opt);
571 	if (ret) {
572 		pr_debug("netem: change failed\n");
573 		qdisc_destroy(q->qdisc);
574 	}
575 	return ret;
576 }
577 
578 static void netem_destroy(struct Qdisc *sch)
579 {
580 	struct netem_sched_data *q = qdisc_priv(sch);
581 
582 	qdisc_watchdog_cancel(&q->watchdog);
583 	qdisc_destroy(q->qdisc);
584 	kfree(q->delay_dist);
585 }
586 
587 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
588 {
589 	const struct netem_sched_data *q = qdisc_priv(sch);
590 	unsigned char *b = skb_tail_pointer(skb);
591 	struct nlattr *nla = (struct nlattr *) b;
592 	struct tc_netem_qopt qopt;
593 	struct tc_netem_corr cor;
594 	struct tc_netem_reorder reorder;
595 	struct tc_netem_corrupt corrupt;
596 
597 	qopt.latency = q->latency;
598 	qopt.jitter = q->jitter;
599 	qopt.limit = q->limit;
600 	qopt.loss = q->loss;
601 	qopt.gap = q->gap;
602 	qopt.duplicate = q->duplicate;
603 	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
604 
605 	cor.delay_corr = q->delay_cor.rho;
606 	cor.loss_corr = q->loss_cor.rho;
607 	cor.dup_corr = q->dup_cor.rho;
608 	NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
609 
610 	reorder.probability = q->reorder;
611 	reorder.correlation = q->reorder_cor.rho;
612 	NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
613 
614 	corrupt.probability = q->corrupt;
615 	corrupt.correlation = q->corrupt_cor.rho;
616 	NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
617 
618 	nla->nla_len = skb_tail_pointer(skb) - b;
619 
620 	return skb->len;
621 
622 nla_put_failure:
623 	nlmsg_trim(skb, b);
624 	return -1;
625 }
626 
627 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
628 			  struct sk_buff *skb, struct tcmsg *tcm)
629 {
630 	struct netem_sched_data *q = qdisc_priv(sch);
631 
632 	if (cl != 1) 	/* only one class */
633 		return -ENOENT;
634 
635 	tcm->tcm_handle |= TC_H_MIN(1);
636 	tcm->tcm_info = q->qdisc->handle;
637 
638 	return 0;
639 }
640 
641 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
642 		     struct Qdisc **old)
643 {
644 	struct netem_sched_data *q = qdisc_priv(sch);
645 
646 	if (new == NULL)
647 		new = &noop_qdisc;
648 
649 	sch_tree_lock(sch);
650 	*old = xchg(&q->qdisc, new);
651 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
652 	qdisc_reset(*old);
653 	sch_tree_unlock(sch);
654 
655 	return 0;
656 }
657 
658 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
659 {
660 	struct netem_sched_data *q = qdisc_priv(sch);
661 	return q->qdisc;
662 }
663 
664 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
665 {
666 	return 1;
667 }
668 
669 static void netem_put(struct Qdisc *sch, unsigned long arg)
670 {
671 }
672 
673 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
674 			    struct nlattr **tca, unsigned long *arg)
675 {
676 	return -ENOSYS;
677 }
678 
679 static int netem_delete(struct Qdisc *sch, unsigned long arg)
680 {
681 	return -ENOSYS;
682 }
683 
684 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
685 {
686 	if (!walker->stop) {
687 		if (walker->count >= walker->skip)
688 			if (walker->fn(sch, 1, walker) < 0) {
689 				walker->stop = 1;
690 				return;
691 			}
692 		walker->count++;
693 	}
694 }
695 
696 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
697 {
698 	return NULL;
699 }
700 
701 static const struct Qdisc_class_ops netem_class_ops = {
702 	.graft		=	netem_graft,
703 	.leaf		=	netem_leaf,
704 	.get		=	netem_get,
705 	.put		=	netem_put,
706 	.change		=	netem_change_class,
707 	.delete		=	netem_delete,
708 	.walk		=	netem_walk,
709 	.tcf_chain	=	netem_find_tcf,
710 	.dump		=	netem_dump_class,
711 };
712 
713 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
714 	.id		=	"netem",
715 	.cl_ops		=	&netem_class_ops,
716 	.priv_size	=	sizeof(struct netem_sched_data),
717 	.enqueue	=	netem_enqueue,
718 	.dequeue	=	netem_dequeue,
719 	.requeue	=	netem_requeue,
720 	.drop		=	netem_drop,
721 	.init		=	netem_init,
722 	.reset		=	netem_reset,
723 	.destroy	=	netem_destroy,
724 	.change		=	netem_change,
725 	.dump		=	netem_dump,
726 	.owner		=	THIS_MODULE,
727 };
728 
729 
730 static int __init netem_module_init(void)
731 {
732 	pr_info("netem: version " VERSION "\n");
733 	return register_qdisc(&netem_qdisc_ops);
734 }
735 static void __exit netem_module_exit(void)
736 {
737 	unregister_qdisc(&netem_qdisc_ops);
738 }
739 module_init(netem_module_init)
740 module_exit(netem_module_exit)
741 MODULE_LICENSE("GPL");
742