xref: /openbmc/linux/net/sched/sch_netem.c (revision 20fea08b5fb639c4c175b5c74a2bb346c5c5bc2e)
1 /*
2  * net/sched/sch_netem.c	Network emulator
3  *
4  * 		This program is free software; you can redistribute it and/or
5  * 		modify it under the terms of the GNU General Public License
6  * 		as published by the Free Software Foundation; either version
7  * 		2 of the License.
8  *
9  *  		Many of the algorithms and ideas for this came from
10  *		NIST Net which is not copyrighted.
11  *
12  * Authors:	Stephen Hemminger <shemminger@osdl.org>
13  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14  */
15 
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
25 
26 #define VERSION "1.2"
27 
28 /*	Network Emulation Queuing algorithm.
29 	====================================
30 
31 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 		 Network Emulation Tool
33 		 [2] Luigi Rizzo, DummyNet for FreeBSD
34 
35 	 ----------------------------------------------------------------
36 
37 	 This started out as a simple way to delay outgoing packets to
38 	 test TCP but has grown to include most of the functionality
39 	 of a full blown network emulator like NISTnet. It can delay
40 	 packets and add random jitter (and correlation). The random
41 	 distribution can be loaded from a table as well to provide
42 	 normal, Pareto, or experimental curves. Packet loss,
43 	 duplication, and reordering can also be emulated.
44 
45 	 This qdisc does not do classification that can be handled in
46 	 layering other disciplines.  It does not need to do bandwidth
47 	 control either since that can be handled by using token
48 	 bucket or other rate control.
49 
50 	 The simulator is limited by the Linux timer resolution
51 	 and will create packet bursts on the HZ boundary (1ms).
52 */
53 
54 struct netem_sched_data {
55 	struct Qdisc	*qdisc;
56 	struct qdisc_watchdog watchdog;
57 
58 	psched_tdiff_t latency;
59 	psched_tdiff_t jitter;
60 
61 	u32 loss;
62 	u32 limit;
63 	u32 counter;
64 	u32 gap;
65 	u32 duplicate;
66 	u32 reorder;
67 	u32 corrupt;
68 
69 	struct crndstate {
70 		u32 last;
71 		u32 rho;
72 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
73 
74 	struct disttable {
75 		u32  size;
76 		s16 table[0];
77 	} *delay_dist;
78 };
79 
80 /* Time stamp put into socket buffer control block */
81 struct netem_skb_cb {
82 	psched_time_t	time_to_send;
83 };
84 
85 /* init_crandom - initialize correlated random number generator
86  * Use entropy source for initial seed.
87  */
88 static void init_crandom(struct crndstate *state, unsigned long rho)
89 {
90 	state->rho = rho;
91 	state->last = net_random();
92 }
93 
94 /* get_crandom - correlated random number generator
95  * Next number depends on last value.
96  * rho is scaled to avoid floating point.
97  */
98 static u32 get_crandom(struct crndstate *state)
99 {
100 	u64 value, rho;
101 	unsigned long answer;
102 
103 	if (state->rho == 0)	/* no correlation */
104 		return net_random();
105 
106 	value = net_random();
107 	rho = (u64)state->rho + 1;
108 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
109 	state->last = answer;
110 	return answer;
111 }
112 
113 /* tabledist - return a pseudo-randomly distributed value with mean mu and
114  * std deviation sigma.  Uses table lookup to approximate the desired
115  * distribution, and a uniformly-distributed pseudo-random source.
116  */
117 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
118 				struct crndstate *state,
119 				const struct disttable *dist)
120 {
121 	psched_tdiff_t x;
122 	long t;
123 	u32 rnd;
124 
125 	if (sigma == 0)
126 		return mu;
127 
128 	rnd = get_crandom(state);
129 
130 	/* default uniform distribution */
131 	if (dist == NULL)
132 		return (rnd % (2*sigma)) - sigma + mu;
133 
134 	t = dist->table[rnd % dist->size];
135 	x = (sigma % NETEM_DIST_SCALE) * t;
136 	if (x >= 0)
137 		x += NETEM_DIST_SCALE/2;
138 	else
139 		x -= NETEM_DIST_SCALE/2;
140 
141 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
142 }
143 
144 /*
145  * Insert one skb into qdisc.
146  * Note: parent depends on return value to account for queue length.
147  * 	NET_XMIT_DROP: queue length didn't change.
148  *      NET_XMIT_SUCCESS: one skb was queued.
149  */
150 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
151 {
152 	struct netem_sched_data *q = qdisc_priv(sch);
153 	/* We don't fill cb now as skb_unshare() may invalidate it */
154 	struct netem_skb_cb *cb;
155 	struct sk_buff *skb2;
156 	int ret;
157 	int count = 1;
158 
159 	pr_debug("netem_enqueue skb=%p\n", skb);
160 
161 	/* Random duplication */
162 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
163 		++count;
164 
165 	/* Random packet drop 0 => none, ~0 => all */
166 	if (q->loss && q->loss >= get_crandom(&q->loss_cor))
167 		--count;
168 
169 	if (count == 0) {
170 		sch->qstats.drops++;
171 		kfree_skb(skb);
172 		return NET_XMIT_BYPASS;
173 	}
174 
175 	skb_orphan(skb);
176 
177 	/*
178 	 * If we need to duplicate packet, then re-insert at top of the
179 	 * qdisc tree, since parent queuer expects that only one
180 	 * skb will be queued.
181 	 */
182 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183 		struct Qdisc *rootq = sch->dev->qdisc;
184 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 		q->duplicate = 0;
186 
187 		rootq->enqueue(skb2, rootq);
188 		q->duplicate = dupsave;
189 	}
190 
191 	/*
192 	 * Randomized packet corruption.
193 	 * Make copy if needed since we are modifying
194 	 * If packet is going to be hardware checksummed, then
195 	 * do it now in software before we mangle it.
196 	 */
197 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
198 		if (!(skb = skb_unshare(skb, GFP_ATOMIC))
199 		    || (skb->ip_summed == CHECKSUM_PARTIAL
200 			&& skb_checksum_help(skb))) {
201 			sch->qstats.drops++;
202 			return NET_XMIT_DROP;
203 		}
204 
205 		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
206 	}
207 
208 	cb = (struct netem_skb_cb *)skb->cb;
209 	if (q->gap == 0 		/* not doing reordering */
210 	    || q->counter < q->gap 	/* inside last reordering gap */
211 	    || q->reorder < get_crandom(&q->reorder_cor)) {
212 		psched_time_t now;
213 		psched_tdiff_t delay;
214 
215 		delay = tabledist(q->latency, q->jitter,
216 				  &q->delay_cor, q->delay_dist);
217 
218 		now = psched_get_time();
219 		cb->time_to_send = now + delay;
220 		++q->counter;
221 		ret = q->qdisc->enqueue(skb, q->qdisc);
222 	} else {
223 		/*
224 		 * Do re-ordering by putting one out of N packets at the front
225 		 * of the queue.
226 		 */
227 		cb->time_to_send = psched_get_time();
228 		q->counter = 0;
229 		ret = q->qdisc->ops->requeue(skb, q->qdisc);
230 	}
231 
232 	if (likely(ret == NET_XMIT_SUCCESS)) {
233 		sch->q.qlen++;
234 		sch->bstats.bytes += skb->len;
235 		sch->bstats.packets++;
236 	} else
237 		sch->qstats.drops++;
238 
239 	pr_debug("netem: enqueue ret %d\n", ret);
240 	return ret;
241 }
242 
243 /* Requeue packets but don't change time stamp */
244 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
245 {
246 	struct netem_sched_data *q = qdisc_priv(sch);
247 	int ret;
248 
249 	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
250 		sch->q.qlen++;
251 		sch->qstats.requeues++;
252 	}
253 
254 	return ret;
255 }
256 
257 static unsigned int netem_drop(struct Qdisc* sch)
258 {
259 	struct netem_sched_data *q = qdisc_priv(sch);
260 	unsigned int len = 0;
261 
262 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
263 		sch->q.qlen--;
264 		sch->qstats.drops++;
265 	}
266 	return len;
267 }
268 
269 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
270 {
271 	struct netem_sched_data *q = qdisc_priv(sch);
272 	struct sk_buff *skb;
273 
274 	smp_mb();
275 	if (sch->flags & TCQ_F_THROTTLED)
276 		return NULL;
277 
278 	skb = q->qdisc->dequeue(q->qdisc);
279 	if (skb) {
280 		const struct netem_skb_cb *cb
281 			= (const struct netem_skb_cb *)skb->cb;
282 		psched_time_t now = psched_get_time();
283 
284 		/* if more time remaining? */
285 		if (cb->time_to_send <= now) {
286 			pr_debug("netem_dequeue: return skb=%p\n", skb);
287 			sch->q.qlen--;
288 			return skb;
289 		}
290 
291 		if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
292 			qdisc_tree_decrease_qlen(q->qdisc, 1);
293 			sch->qstats.drops++;
294 			printk(KERN_ERR "netem: %s could not requeue\n",
295 			       q->qdisc->ops->id);
296 		}
297 
298 		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
299 	}
300 
301 	return NULL;
302 }
303 
304 static void netem_reset(struct Qdisc *sch)
305 {
306 	struct netem_sched_data *q = qdisc_priv(sch);
307 
308 	qdisc_reset(q->qdisc);
309 	sch->q.qlen = 0;
310 	qdisc_watchdog_cancel(&q->watchdog);
311 }
312 
313 /* Pass size change message down to embedded FIFO */
314 static int set_fifo_limit(struct Qdisc *q, int limit)
315 {
316 	struct rtattr *rta;
317 	int ret = -ENOMEM;
318 
319 	/* Hack to avoid sending change message to non-FIFO */
320 	if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
321 		return 0;
322 
323 	rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
324 	if (rta) {
325 		rta->rta_type = RTM_NEWQDISC;
326 		rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
327 		((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
328 
329 		ret = q->ops->change(q, rta);
330 		kfree(rta);
331 	}
332 	return ret;
333 }
334 
335 /*
336  * Distribution data is a variable size payload containing
337  * signed 16 bit values.
338  */
339 static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
340 {
341 	struct netem_sched_data *q = qdisc_priv(sch);
342 	unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
343 	const __s16 *data = RTA_DATA(attr);
344 	struct disttable *d;
345 	int i;
346 
347 	if (n > 65536)
348 		return -EINVAL;
349 
350 	d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
351 	if (!d)
352 		return -ENOMEM;
353 
354 	d->size = n;
355 	for (i = 0; i < n; i++)
356 		d->table[i] = data[i];
357 
358 	spin_lock_bh(&sch->dev->queue_lock);
359 	d = xchg(&q->delay_dist, d);
360 	spin_unlock_bh(&sch->dev->queue_lock);
361 
362 	kfree(d);
363 	return 0;
364 }
365 
366 static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
367 {
368 	struct netem_sched_data *q = qdisc_priv(sch);
369 	const struct tc_netem_corr *c = RTA_DATA(attr);
370 
371 	if (RTA_PAYLOAD(attr) != sizeof(*c))
372 		return -EINVAL;
373 
374 	init_crandom(&q->delay_cor, c->delay_corr);
375 	init_crandom(&q->loss_cor, c->loss_corr);
376 	init_crandom(&q->dup_cor, c->dup_corr);
377 	return 0;
378 }
379 
380 static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
381 {
382 	struct netem_sched_data *q = qdisc_priv(sch);
383 	const struct tc_netem_reorder *r = RTA_DATA(attr);
384 
385 	if (RTA_PAYLOAD(attr) != sizeof(*r))
386 		return -EINVAL;
387 
388 	q->reorder = r->probability;
389 	init_crandom(&q->reorder_cor, r->correlation);
390 	return 0;
391 }
392 
393 static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
394 {
395 	struct netem_sched_data *q = qdisc_priv(sch);
396 	const struct tc_netem_corrupt *r = RTA_DATA(attr);
397 
398 	if (RTA_PAYLOAD(attr) != sizeof(*r))
399 		return -EINVAL;
400 
401 	q->corrupt = r->probability;
402 	init_crandom(&q->corrupt_cor, r->correlation);
403 	return 0;
404 }
405 
406 /* Parse netlink message to set options */
407 static int netem_change(struct Qdisc *sch, struct rtattr *opt)
408 {
409 	struct netem_sched_data *q = qdisc_priv(sch);
410 	struct tc_netem_qopt *qopt;
411 	int ret;
412 
413 	if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
414 		return -EINVAL;
415 
416 	qopt = RTA_DATA(opt);
417 	ret = set_fifo_limit(q->qdisc, qopt->limit);
418 	if (ret) {
419 		pr_debug("netem: can't set fifo limit\n");
420 		return ret;
421 	}
422 
423 	q->latency = qopt->latency;
424 	q->jitter = qopt->jitter;
425 	q->limit = qopt->limit;
426 	q->gap = qopt->gap;
427 	q->counter = 0;
428 	q->loss = qopt->loss;
429 	q->duplicate = qopt->duplicate;
430 
431 	/* for compatibility with earlier versions.
432 	 * if gap is set, need to assume 100% probability
433 	 */
434 	if (q->gap)
435 		q->reorder = ~0;
436 
437 	/* Handle nested options after initial queue options.
438 	 * Should have put all options in nested format but too late now.
439 	 */
440 	if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
441 		struct rtattr *tb[TCA_NETEM_MAX];
442 		if (rtattr_parse(tb, TCA_NETEM_MAX,
443 				 RTA_DATA(opt) + sizeof(*qopt),
444 				 RTA_PAYLOAD(opt) - sizeof(*qopt)))
445 			return -EINVAL;
446 
447 		if (tb[TCA_NETEM_CORR-1]) {
448 			ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
449 			if (ret)
450 				return ret;
451 		}
452 
453 		if (tb[TCA_NETEM_DELAY_DIST-1]) {
454 			ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
455 			if (ret)
456 				return ret;
457 		}
458 
459 		if (tb[TCA_NETEM_REORDER-1]) {
460 			ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
461 			if (ret)
462 				return ret;
463 		}
464 
465 		if (tb[TCA_NETEM_CORRUPT-1]) {
466 			ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
467 			if (ret)
468 				return ret;
469 		}
470 	}
471 
472 	return 0;
473 }
474 
475 /*
476  * Special case version of FIFO queue for use by netem.
477  * It queues in order based on timestamps in skb's
478  */
479 struct fifo_sched_data {
480 	u32 limit;
481 	psched_time_t oldest;
482 };
483 
484 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
485 {
486 	struct fifo_sched_data *q = qdisc_priv(sch);
487 	struct sk_buff_head *list = &sch->q;
488 	psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
489 	struct sk_buff *skb;
490 
491 	if (likely(skb_queue_len(list) < q->limit)) {
492 		/* Optimize for add at tail */
493 		if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
494 			q->oldest = tnext;
495 			return qdisc_enqueue_tail(nskb, sch);
496 		}
497 
498 		skb_queue_reverse_walk(list, skb) {
499 			const struct netem_skb_cb *cb
500 				= (const struct netem_skb_cb *)skb->cb;
501 
502 			if (tnext >= cb->time_to_send)
503 				break;
504 		}
505 
506 		__skb_queue_after(list, skb, nskb);
507 
508 		sch->qstats.backlog += nskb->len;
509 		sch->bstats.bytes += nskb->len;
510 		sch->bstats.packets++;
511 
512 		return NET_XMIT_SUCCESS;
513 	}
514 
515 	return qdisc_reshape_fail(nskb, sch);
516 }
517 
518 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
519 {
520 	struct fifo_sched_data *q = qdisc_priv(sch);
521 
522 	if (opt) {
523 		struct tc_fifo_qopt *ctl = RTA_DATA(opt);
524 		if (RTA_PAYLOAD(opt) < sizeof(*ctl))
525 			return -EINVAL;
526 
527 		q->limit = ctl->limit;
528 	} else
529 		q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
530 
531 	q->oldest = PSCHED_PASTPERFECT;
532 	return 0;
533 }
534 
535 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
536 {
537 	struct fifo_sched_data *q = qdisc_priv(sch);
538 	struct tc_fifo_qopt opt = { .limit = q->limit };
539 
540 	RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
541 	return skb->len;
542 
543 rtattr_failure:
544 	return -1;
545 }
546 
547 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
548 	.id		=	"tfifo",
549 	.priv_size	=	sizeof(struct fifo_sched_data),
550 	.enqueue	=	tfifo_enqueue,
551 	.dequeue	=	qdisc_dequeue_head,
552 	.requeue	=	qdisc_requeue,
553 	.drop		=	qdisc_queue_drop,
554 	.init		=	tfifo_init,
555 	.reset		=	qdisc_reset_queue,
556 	.change		=	tfifo_init,
557 	.dump		=	tfifo_dump,
558 };
559 
560 static int netem_init(struct Qdisc *sch, struct rtattr *opt)
561 {
562 	struct netem_sched_data *q = qdisc_priv(sch);
563 	int ret;
564 
565 	if (!opt)
566 		return -EINVAL;
567 
568 	qdisc_watchdog_init(&q->watchdog, sch);
569 
570 	q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
571 				     TC_H_MAKE(sch->handle, 1));
572 	if (!q->qdisc) {
573 		pr_debug("netem: qdisc create failed\n");
574 		return -ENOMEM;
575 	}
576 
577 	ret = netem_change(sch, opt);
578 	if (ret) {
579 		pr_debug("netem: change failed\n");
580 		qdisc_destroy(q->qdisc);
581 	}
582 	return ret;
583 }
584 
585 static void netem_destroy(struct Qdisc *sch)
586 {
587 	struct netem_sched_data *q = qdisc_priv(sch);
588 
589 	qdisc_watchdog_cancel(&q->watchdog);
590 	qdisc_destroy(q->qdisc);
591 	kfree(q->delay_dist);
592 }
593 
594 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
595 {
596 	const struct netem_sched_data *q = qdisc_priv(sch);
597 	unsigned char *b = skb_tail_pointer(skb);
598 	struct rtattr *rta = (struct rtattr *) b;
599 	struct tc_netem_qopt qopt;
600 	struct tc_netem_corr cor;
601 	struct tc_netem_reorder reorder;
602 	struct tc_netem_corrupt corrupt;
603 
604 	qopt.latency = q->latency;
605 	qopt.jitter = q->jitter;
606 	qopt.limit = q->limit;
607 	qopt.loss = q->loss;
608 	qopt.gap = q->gap;
609 	qopt.duplicate = q->duplicate;
610 	RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
611 
612 	cor.delay_corr = q->delay_cor.rho;
613 	cor.loss_corr = q->loss_cor.rho;
614 	cor.dup_corr = q->dup_cor.rho;
615 	RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
616 
617 	reorder.probability = q->reorder;
618 	reorder.correlation = q->reorder_cor.rho;
619 	RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
620 
621 	corrupt.probability = q->corrupt;
622 	corrupt.correlation = q->corrupt_cor.rho;
623 	RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
624 
625 	rta->rta_len = skb_tail_pointer(skb) - b;
626 
627 	return skb->len;
628 
629 rtattr_failure:
630 	nlmsg_trim(skb, b);
631 	return -1;
632 }
633 
634 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
635 			  struct sk_buff *skb, struct tcmsg *tcm)
636 {
637 	struct netem_sched_data *q = qdisc_priv(sch);
638 
639 	if (cl != 1) 	/* only one class */
640 		return -ENOENT;
641 
642 	tcm->tcm_handle |= TC_H_MIN(1);
643 	tcm->tcm_info = q->qdisc->handle;
644 
645 	return 0;
646 }
647 
648 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
649 		     struct Qdisc **old)
650 {
651 	struct netem_sched_data *q = qdisc_priv(sch);
652 
653 	if (new == NULL)
654 		new = &noop_qdisc;
655 
656 	sch_tree_lock(sch);
657 	*old = xchg(&q->qdisc, new);
658 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
659 	qdisc_reset(*old);
660 	sch_tree_unlock(sch);
661 
662 	return 0;
663 }
664 
665 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
666 {
667 	struct netem_sched_data *q = qdisc_priv(sch);
668 	return q->qdisc;
669 }
670 
671 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
672 {
673 	return 1;
674 }
675 
676 static void netem_put(struct Qdisc *sch, unsigned long arg)
677 {
678 }
679 
680 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
681 			    struct rtattr **tca, unsigned long *arg)
682 {
683 	return -ENOSYS;
684 }
685 
686 static int netem_delete(struct Qdisc *sch, unsigned long arg)
687 {
688 	return -ENOSYS;
689 }
690 
691 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
692 {
693 	if (!walker->stop) {
694 		if (walker->count >= walker->skip)
695 			if (walker->fn(sch, 1, walker) < 0) {
696 				walker->stop = 1;
697 				return;
698 			}
699 		walker->count++;
700 	}
701 }
702 
703 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
704 {
705 	return NULL;
706 }
707 
708 static const struct Qdisc_class_ops netem_class_ops = {
709 	.graft		=	netem_graft,
710 	.leaf		=	netem_leaf,
711 	.get		=	netem_get,
712 	.put		=	netem_put,
713 	.change		=	netem_change_class,
714 	.delete		=	netem_delete,
715 	.walk		=	netem_walk,
716 	.tcf_chain	=	netem_find_tcf,
717 	.dump		=	netem_dump_class,
718 };
719 
720 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
721 	.id		=	"netem",
722 	.cl_ops		=	&netem_class_ops,
723 	.priv_size	=	sizeof(struct netem_sched_data),
724 	.enqueue	=	netem_enqueue,
725 	.dequeue	=	netem_dequeue,
726 	.requeue	=	netem_requeue,
727 	.drop		=	netem_drop,
728 	.init		=	netem_init,
729 	.reset		=	netem_reset,
730 	.destroy	=	netem_destroy,
731 	.change		=	netem_change,
732 	.dump		=	netem_dump,
733 	.owner		=	THIS_MODULE,
734 };
735 
736 
737 static int __init netem_module_init(void)
738 {
739 	pr_info("netem: version " VERSION "\n");
740 	return register_qdisc(&netem_qdisc_ops);
741 }
742 static void __exit netem_module_exit(void)
743 {
744 	unregister_qdisc(&netem_qdisc_ops);
745 }
746 module_init(netem_module_init)
747 module_exit(netem_module_exit)
748 MODULE_LICENSE("GPL");
749