xref: /openbmc/linux/net/sched/sch_netem.c (revision f42b3800)
1 /*
2  * net/sched/sch_netem.c	Network emulator
3  *
4  * 		This program is free software; you can redistribute it and/or
5  * 		modify it under the terms of the GNU General Public License
6  * 		as published by the Free Software Foundation; either version
7  * 		2 of the License.
8  *
9  *  		Many of the algorithms and ideas for this came from
10  *		NIST Net which is not copyrighted.
11  *
12  * Authors:	Stephen Hemminger <shemminger@osdl.org>
13  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14  */
15 
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
25 
26 #define VERSION "1.2"
27 
28 /*	Network Emulation Queuing algorithm.
29 	====================================
30 
31 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 		 Network Emulation Tool
33 		 [2] Luigi Rizzo, DummyNet for FreeBSD
34 
35 	 ----------------------------------------------------------------
36 
37 	 This started out as a simple way to delay outgoing packets to
38 	 test TCP but has grown to include most of the functionality
39 	 of a full blown network emulator like NISTnet. It can delay
40 	 packets and add random jitter (and correlation). The random
41 	 distribution can be loaded from a table as well to provide
42 	 normal, Pareto, or experimental curves. Packet loss,
43 	 duplication, and reordering can also be emulated.
44 
45 	 This qdisc does not do classification that can be handled in
46 	 layering other disciplines.  It does not need to do bandwidth
47 	 control either since that can be handled by using token
48 	 bucket or other rate control.
49 
50 	 The simulator is limited by the Linux timer resolution
51 	 and will create packet bursts on the HZ boundary (1ms).
52 */
53 
54 struct netem_sched_data {
55 	struct Qdisc	*qdisc;
56 	struct qdisc_watchdog watchdog;
57 
58 	psched_tdiff_t latency;
59 	psched_tdiff_t jitter;
60 
61 	u32 loss;
62 	u32 limit;
63 	u32 counter;
64 	u32 gap;
65 	u32 duplicate;
66 	u32 reorder;
67 	u32 corrupt;
68 
69 	struct crndstate {
70 		u32 last;
71 		u32 rho;
72 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
73 
74 	struct disttable {
75 		u32  size;
76 		s16 table[0];
77 	} *delay_dist;
78 };
79 
80 /* Time stamp put into socket buffer control block */
81 struct netem_skb_cb {
82 	psched_time_t	time_to_send;
83 };
84 
85 /* init_crandom - initialize correlated random number generator
86  * Use entropy source for initial seed.
87  */
88 static void init_crandom(struct crndstate *state, unsigned long rho)
89 {
90 	state->rho = rho;
91 	state->last = net_random();
92 }
93 
94 /* get_crandom - correlated random number generator
95  * Next number depends on last value.
96  * rho is scaled to avoid floating point.
97  */
98 static u32 get_crandom(struct crndstate *state)
99 {
100 	u64 value, rho;
101 	unsigned long answer;
102 
103 	if (state->rho == 0)	/* no correlation */
104 		return net_random();
105 
106 	value = net_random();
107 	rho = (u64)state->rho + 1;
108 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
109 	state->last = answer;
110 	return answer;
111 }
112 
113 /* tabledist - return a pseudo-randomly distributed value with mean mu and
114  * std deviation sigma.  Uses table lookup to approximate the desired
115  * distribution, and a uniformly-distributed pseudo-random source.
116  */
117 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
118 				struct crndstate *state,
119 				const struct disttable *dist)
120 {
121 	psched_tdiff_t x;
122 	long t;
123 	u32 rnd;
124 
125 	if (sigma == 0)
126 		return mu;
127 
128 	rnd = get_crandom(state);
129 
130 	/* default uniform distribution */
131 	if (dist == NULL)
132 		return (rnd % (2*sigma)) - sigma + mu;
133 
134 	t = dist->table[rnd % dist->size];
135 	x = (sigma % NETEM_DIST_SCALE) * t;
136 	if (x >= 0)
137 		x += NETEM_DIST_SCALE/2;
138 	else
139 		x -= NETEM_DIST_SCALE/2;
140 
141 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
142 }
143 
144 /*
145  * Insert one skb into qdisc.
146  * Note: parent depends on return value to account for queue length.
147  * 	NET_XMIT_DROP: queue length didn't change.
148  *      NET_XMIT_SUCCESS: one skb was queued.
149  */
150 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
151 {
152 	struct netem_sched_data *q = qdisc_priv(sch);
153 	/* We don't fill cb now as skb_unshare() may invalidate it */
154 	struct netem_skb_cb *cb;
155 	struct sk_buff *skb2;
156 	int ret;
157 	int count = 1;
158 
159 	pr_debug("netem_enqueue skb=%p\n", skb);
160 
161 	/* Random duplication */
162 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
163 		++count;
164 
165 	/* Random packet drop 0 => none, ~0 => all */
166 	if (q->loss && q->loss >= get_crandom(&q->loss_cor))
167 		--count;
168 
169 	if (count == 0) {
170 		sch->qstats.drops++;
171 		kfree_skb(skb);
172 		return NET_XMIT_BYPASS;
173 	}
174 
175 	skb_orphan(skb);
176 
177 	/*
178 	 * If we need to duplicate packet, then re-insert at top of the
179 	 * qdisc tree, since parent queuer expects that only one
180 	 * skb will be queued.
181 	 */
182 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183 		struct Qdisc *rootq = sch->dev->qdisc;
184 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 		q->duplicate = 0;
186 
187 		rootq->enqueue(skb2, rootq);
188 		q->duplicate = dupsave;
189 	}
190 
191 	/*
192 	 * Randomized packet corruption.
193 	 * Make copy if needed since we are modifying
194 	 * If packet is going to be hardware checksummed, then
195 	 * do it now in software before we mangle it.
196 	 */
197 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
198 		if (!(skb = skb_unshare(skb, GFP_ATOMIC))
199 		    || (skb->ip_summed == CHECKSUM_PARTIAL
200 			&& skb_checksum_help(skb))) {
201 			sch->qstats.drops++;
202 			return NET_XMIT_DROP;
203 		}
204 
205 		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
206 	}
207 
208 	cb = (struct netem_skb_cb *)skb->cb;
209 	if (q->gap == 0 		/* not doing reordering */
210 	    || q->counter < q->gap 	/* inside last reordering gap */
211 	    || q->reorder < get_crandom(&q->reorder_cor)) {
212 		psched_time_t now;
213 		psched_tdiff_t delay;
214 
215 		delay = tabledist(q->latency, q->jitter,
216 				  &q->delay_cor, q->delay_dist);
217 
218 		now = psched_get_time();
219 		cb->time_to_send = now + delay;
220 		++q->counter;
221 		ret = q->qdisc->enqueue(skb, q->qdisc);
222 	} else {
223 		/*
224 		 * Do re-ordering by putting one out of N packets at the front
225 		 * of the queue.
226 		 */
227 		cb->time_to_send = psched_get_time();
228 		q->counter = 0;
229 		ret = q->qdisc->ops->requeue(skb, q->qdisc);
230 	}
231 
232 	if (likely(ret == NET_XMIT_SUCCESS)) {
233 		sch->q.qlen++;
234 		sch->bstats.bytes += skb->len;
235 		sch->bstats.packets++;
236 	} else
237 		sch->qstats.drops++;
238 
239 	pr_debug("netem: enqueue ret %d\n", ret);
240 	return ret;
241 }
242 
243 /* Requeue packets but don't change time stamp */
244 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
245 {
246 	struct netem_sched_data *q = qdisc_priv(sch);
247 	int ret;
248 
249 	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
250 		sch->q.qlen++;
251 		sch->qstats.requeues++;
252 	}
253 
254 	return ret;
255 }
256 
257 static unsigned int netem_drop(struct Qdisc* sch)
258 {
259 	struct netem_sched_data *q = qdisc_priv(sch);
260 	unsigned int len = 0;
261 
262 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
263 		sch->q.qlen--;
264 		sch->qstats.drops++;
265 	}
266 	return len;
267 }
268 
269 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
270 {
271 	struct netem_sched_data *q = qdisc_priv(sch);
272 	struct sk_buff *skb;
273 
274 	smp_mb();
275 	if (sch->flags & TCQ_F_THROTTLED)
276 		return NULL;
277 
278 	skb = q->qdisc->dequeue(q->qdisc);
279 	if (skb) {
280 		const struct netem_skb_cb *cb
281 			= (const struct netem_skb_cb *)skb->cb;
282 		psched_time_t now = psched_get_time();
283 
284 		/* if more time remaining? */
285 		if (cb->time_to_send <= now) {
286 			pr_debug("netem_dequeue: return skb=%p\n", skb);
287 			sch->q.qlen--;
288 			return skb;
289 		}
290 
291 		if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
292 			qdisc_tree_decrease_qlen(q->qdisc, 1);
293 			sch->qstats.drops++;
294 			printk(KERN_ERR "netem: %s could not requeue\n",
295 			       q->qdisc->ops->id);
296 		}
297 
298 		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
299 	}
300 
301 	return NULL;
302 }
303 
304 static void netem_reset(struct Qdisc *sch)
305 {
306 	struct netem_sched_data *q = qdisc_priv(sch);
307 
308 	qdisc_reset(q->qdisc);
309 	sch->q.qlen = 0;
310 	qdisc_watchdog_cancel(&q->watchdog);
311 }
312 
313 /* Pass size change message down to embedded FIFO */
314 static int set_fifo_limit(struct Qdisc *q, int limit)
315 {
316 	struct nlattr *nla;
317 	int ret = -ENOMEM;
318 
319 	/* Hack to avoid sending change message to non-FIFO */
320 	if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
321 		return 0;
322 
323 	nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
324 	if (nla) {
325 		nla->nla_type = RTM_NEWQDISC;
326 		nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
327 		((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
328 
329 		ret = q->ops->change(q, nla);
330 		kfree(nla);
331 	}
332 	return ret;
333 }
334 
335 /*
336  * Distribution data is a variable size payload containing
337  * signed 16 bit values.
338  */
339 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
340 {
341 	struct netem_sched_data *q = qdisc_priv(sch);
342 	unsigned long n = nla_len(attr)/sizeof(__s16);
343 	const __s16 *data = nla_data(attr);
344 	struct disttable *d;
345 	int i;
346 
347 	if (n > 65536)
348 		return -EINVAL;
349 
350 	d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
351 	if (!d)
352 		return -ENOMEM;
353 
354 	d->size = n;
355 	for (i = 0; i < n; i++)
356 		d->table[i] = data[i];
357 
358 	spin_lock_bh(&sch->dev->queue_lock);
359 	d = xchg(&q->delay_dist, d);
360 	spin_unlock_bh(&sch->dev->queue_lock);
361 
362 	kfree(d);
363 	return 0;
364 }
365 
366 static int get_correlation(struct Qdisc *sch, const struct nlattr *attr)
367 {
368 	struct netem_sched_data *q = qdisc_priv(sch);
369 	const struct tc_netem_corr *c = nla_data(attr);
370 
371 	init_crandom(&q->delay_cor, c->delay_corr);
372 	init_crandom(&q->loss_cor, c->loss_corr);
373 	init_crandom(&q->dup_cor, c->dup_corr);
374 	return 0;
375 }
376 
377 static int get_reorder(struct Qdisc *sch, const struct nlattr *attr)
378 {
379 	struct netem_sched_data *q = qdisc_priv(sch);
380 	const struct tc_netem_reorder *r = nla_data(attr);
381 
382 	q->reorder = r->probability;
383 	init_crandom(&q->reorder_cor, r->correlation);
384 	return 0;
385 }
386 
387 static int get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
388 {
389 	struct netem_sched_data *q = qdisc_priv(sch);
390 	const struct tc_netem_corrupt *r = nla_data(attr);
391 
392 	q->corrupt = r->probability;
393 	init_crandom(&q->corrupt_cor, r->correlation);
394 	return 0;
395 }
396 
397 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
398 	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
399 	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
400 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
401 };
402 
403 /* Parse netlink message to set options */
404 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
405 {
406 	struct netem_sched_data *q = qdisc_priv(sch);
407 	struct nlattr *tb[TCA_NETEM_MAX + 1];
408 	struct tc_netem_qopt *qopt;
409 	int ret;
410 
411 	if (opt == NULL)
412 		return -EINVAL;
413 
414 	ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy,
415 				      qopt, sizeof(*qopt));
416 	if (ret < 0)
417 		return ret;
418 
419 	ret = set_fifo_limit(q->qdisc, qopt->limit);
420 	if (ret) {
421 		pr_debug("netem: can't set fifo limit\n");
422 		return ret;
423 	}
424 
425 	q->latency = qopt->latency;
426 	q->jitter = qopt->jitter;
427 	q->limit = qopt->limit;
428 	q->gap = qopt->gap;
429 	q->counter = 0;
430 	q->loss = qopt->loss;
431 	q->duplicate = qopt->duplicate;
432 
433 	/* for compatibility with earlier versions.
434 	 * if gap is set, need to assume 100% probability
435 	 */
436 	if (q->gap)
437 		q->reorder = ~0;
438 
439 	if (tb[TCA_NETEM_CORR]) {
440 		ret = get_correlation(sch, tb[TCA_NETEM_CORR]);
441 		if (ret)
442 			return ret;
443 	}
444 
445 	if (tb[TCA_NETEM_DELAY_DIST]) {
446 		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
447 		if (ret)
448 			return ret;
449 	}
450 
451 	if (tb[TCA_NETEM_REORDER]) {
452 		ret = get_reorder(sch, tb[TCA_NETEM_REORDER]);
453 		if (ret)
454 			return ret;
455 	}
456 
457 	if (tb[TCA_NETEM_CORRUPT]) {
458 		ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
459 		if (ret)
460 			return ret;
461 	}
462 
463 	return 0;
464 }
465 
466 /*
467  * Special case version of FIFO queue for use by netem.
468  * It queues in order based on timestamps in skb's
469  */
470 struct fifo_sched_data {
471 	u32 limit;
472 	psched_time_t oldest;
473 };
474 
475 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
476 {
477 	struct fifo_sched_data *q = qdisc_priv(sch);
478 	struct sk_buff_head *list = &sch->q;
479 	psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
480 	struct sk_buff *skb;
481 
482 	if (likely(skb_queue_len(list) < q->limit)) {
483 		/* Optimize for add at tail */
484 		if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
485 			q->oldest = tnext;
486 			return qdisc_enqueue_tail(nskb, sch);
487 		}
488 
489 		skb_queue_reverse_walk(list, skb) {
490 			const struct netem_skb_cb *cb
491 				= (const struct netem_skb_cb *)skb->cb;
492 
493 			if (tnext >= cb->time_to_send)
494 				break;
495 		}
496 
497 		__skb_queue_after(list, skb, nskb);
498 
499 		sch->qstats.backlog += nskb->len;
500 		sch->bstats.bytes += nskb->len;
501 		sch->bstats.packets++;
502 
503 		return NET_XMIT_SUCCESS;
504 	}
505 
506 	return qdisc_reshape_fail(nskb, sch);
507 }
508 
509 static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
510 {
511 	struct fifo_sched_data *q = qdisc_priv(sch);
512 
513 	if (opt) {
514 		struct tc_fifo_qopt *ctl = nla_data(opt);
515 		if (nla_len(opt) < sizeof(*ctl))
516 			return -EINVAL;
517 
518 		q->limit = ctl->limit;
519 	} else
520 		q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
521 
522 	q->oldest = PSCHED_PASTPERFECT;
523 	return 0;
524 }
525 
526 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
527 {
528 	struct fifo_sched_data *q = qdisc_priv(sch);
529 	struct tc_fifo_qopt opt = { .limit = q->limit };
530 
531 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
532 	return skb->len;
533 
534 nla_put_failure:
535 	return -1;
536 }
537 
538 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
539 	.id		=	"tfifo",
540 	.priv_size	=	sizeof(struct fifo_sched_data),
541 	.enqueue	=	tfifo_enqueue,
542 	.dequeue	=	qdisc_dequeue_head,
543 	.requeue	=	qdisc_requeue,
544 	.drop		=	qdisc_queue_drop,
545 	.init		=	tfifo_init,
546 	.reset		=	qdisc_reset_queue,
547 	.change		=	tfifo_init,
548 	.dump		=	tfifo_dump,
549 };
550 
551 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
552 {
553 	struct netem_sched_data *q = qdisc_priv(sch);
554 	int ret;
555 
556 	if (!opt)
557 		return -EINVAL;
558 
559 	qdisc_watchdog_init(&q->watchdog, sch);
560 
561 	q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
562 				     TC_H_MAKE(sch->handle, 1));
563 	if (!q->qdisc) {
564 		pr_debug("netem: qdisc create failed\n");
565 		return -ENOMEM;
566 	}
567 
568 	ret = netem_change(sch, opt);
569 	if (ret) {
570 		pr_debug("netem: change failed\n");
571 		qdisc_destroy(q->qdisc);
572 	}
573 	return ret;
574 }
575 
576 static void netem_destroy(struct Qdisc *sch)
577 {
578 	struct netem_sched_data *q = qdisc_priv(sch);
579 
580 	qdisc_watchdog_cancel(&q->watchdog);
581 	qdisc_destroy(q->qdisc);
582 	kfree(q->delay_dist);
583 }
584 
585 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
586 {
587 	const struct netem_sched_data *q = qdisc_priv(sch);
588 	unsigned char *b = skb_tail_pointer(skb);
589 	struct nlattr *nla = (struct nlattr *) b;
590 	struct tc_netem_qopt qopt;
591 	struct tc_netem_corr cor;
592 	struct tc_netem_reorder reorder;
593 	struct tc_netem_corrupt corrupt;
594 
595 	qopt.latency = q->latency;
596 	qopt.jitter = q->jitter;
597 	qopt.limit = q->limit;
598 	qopt.loss = q->loss;
599 	qopt.gap = q->gap;
600 	qopt.duplicate = q->duplicate;
601 	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
602 
603 	cor.delay_corr = q->delay_cor.rho;
604 	cor.loss_corr = q->loss_cor.rho;
605 	cor.dup_corr = q->dup_cor.rho;
606 	NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
607 
608 	reorder.probability = q->reorder;
609 	reorder.correlation = q->reorder_cor.rho;
610 	NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
611 
612 	corrupt.probability = q->corrupt;
613 	corrupt.correlation = q->corrupt_cor.rho;
614 	NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
615 
616 	nla->nla_len = skb_tail_pointer(skb) - b;
617 
618 	return skb->len;
619 
620 nla_put_failure:
621 	nlmsg_trim(skb, b);
622 	return -1;
623 }
624 
625 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
626 			  struct sk_buff *skb, struct tcmsg *tcm)
627 {
628 	struct netem_sched_data *q = qdisc_priv(sch);
629 
630 	if (cl != 1) 	/* only one class */
631 		return -ENOENT;
632 
633 	tcm->tcm_handle |= TC_H_MIN(1);
634 	tcm->tcm_info = q->qdisc->handle;
635 
636 	return 0;
637 }
638 
639 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
640 		     struct Qdisc **old)
641 {
642 	struct netem_sched_data *q = qdisc_priv(sch);
643 
644 	if (new == NULL)
645 		new = &noop_qdisc;
646 
647 	sch_tree_lock(sch);
648 	*old = xchg(&q->qdisc, new);
649 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
650 	qdisc_reset(*old);
651 	sch_tree_unlock(sch);
652 
653 	return 0;
654 }
655 
656 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
657 {
658 	struct netem_sched_data *q = qdisc_priv(sch);
659 	return q->qdisc;
660 }
661 
662 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
663 {
664 	return 1;
665 }
666 
667 static void netem_put(struct Qdisc *sch, unsigned long arg)
668 {
669 }
670 
671 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
672 			    struct nlattr **tca, unsigned long *arg)
673 {
674 	return -ENOSYS;
675 }
676 
677 static int netem_delete(struct Qdisc *sch, unsigned long arg)
678 {
679 	return -ENOSYS;
680 }
681 
682 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
683 {
684 	if (!walker->stop) {
685 		if (walker->count >= walker->skip)
686 			if (walker->fn(sch, 1, walker) < 0) {
687 				walker->stop = 1;
688 				return;
689 			}
690 		walker->count++;
691 	}
692 }
693 
694 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
695 {
696 	return NULL;
697 }
698 
699 static const struct Qdisc_class_ops netem_class_ops = {
700 	.graft		=	netem_graft,
701 	.leaf		=	netem_leaf,
702 	.get		=	netem_get,
703 	.put		=	netem_put,
704 	.change		=	netem_change_class,
705 	.delete		=	netem_delete,
706 	.walk		=	netem_walk,
707 	.tcf_chain	=	netem_find_tcf,
708 	.dump		=	netem_dump_class,
709 };
710 
711 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
712 	.id		=	"netem",
713 	.cl_ops		=	&netem_class_ops,
714 	.priv_size	=	sizeof(struct netem_sched_data),
715 	.enqueue	=	netem_enqueue,
716 	.dequeue	=	netem_dequeue,
717 	.requeue	=	netem_requeue,
718 	.drop		=	netem_drop,
719 	.init		=	netem_init,
720 	.reset		=	netem_reset,
721 	.destroy	=	netem_destroy,
722 	.change		=	netem_change,
723 	.dump		=	netem_dump,
724 	.owner		=	THIS_MODULE,
725 };
726 
727 
728 static int __init netem_module_init(void)
729 {
730 	pr_info("netem: version " VERSION "\n");
731 	return register_qdisc(&netem_qdisc_ops);
732 }
733 static void __exit netem_module_exit(void)
734 {
735 	unregister_qdisc(&netem_qdisc_ops);
736 }
737 module_init(netem_module_init)
738 module_exit(netem_module_exit)
739 MODULE_LICENSE("GPL");
740