xref: /openbmc/linux/net/sched/sch_sfb.c (revision 15e47304)
1 /*
2  * net/sched/sch_sfb.c	  Stochastic Fair Blue
3  *
4  * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
5  * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * version 2 as published by the Free Software Foundation.
10  *
11  * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
12  * A New Class of Active Queue Management Algorithms.
13  * U. Michigan CSE-TR-387-99, April 1999.
14  *
15  * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
16  *
17  */
18 
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/random.h>
25 #include <linux/jhash.h>
26 #include <net/ip.h>
27 #include <net/pkt_sched.h>
28 #include <net/inet_ecn.h>
29 #include <net/flow_keys.h>
30 
31 /*
32  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
33  * This implementation uses L = 8 and N = 16
34  * This permits us to split one 32bit hash (provided per packet by rxhash or
35  * external classifier) into 8 subhashes of 4 bits.
36  */
37 #define SFB_BUCKET_SHIFT 4
38 #define SFB_NUMBUCKETS	(1 << SFB_BUCKET_SHIFT) /* N bins per Level */
39 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
40 #define SFB_LEVELS	(32 / SFB_BUCKET_SHIFT) /* L */
41 
42 /* SFB algo uses a virtual queue, named "bin" */
43 struct sfb_bucket {
44 	u16		qlen; /* length of virtual queue */
45 	u16		p_mark; /* marking probability */
46 };
47 
48 /* We use a double buffering right before hash change
49  * (Section 4.4 of SFB reference : moving hash functions)
50  */
51 struct sfb_bins {
52 	u32		  perturbation; /* jhash perturbation */
53 	struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
54 };
55 
56 struct sfb_sched_data {
57 	struct Qdisc	*qdisc;
58 	struct tcf_proto *filter_list;
59 	unsigned long	rehash_interval;
60 	unsigned long	warmup_time;	/* double buffering warmup time in jiffies */
61 	u32		max;
62 	u32		bin_size;	/* maximum queue length per bin */
63 	u32		increment;	/* d1 */
64 	u32		decrement;	/* d2 */
65 	u32		limit;		/* HARD maximal queue length */
66 	u32		penalty_rate;
67 	u32		penalty_burst;
68 	u32		tokens_avail;
69 	unsigned long	rehash_time;
70 	unsigned long	token_time;
71 
72 	u8		slot;		/* current active bins (0 or 1) */
73 	bool		double_buffering;
74 	struct sfb_bins bins[2];
75 
76 	struct {
77 		u32	earlydrop;
78 		u32	penaltydrop;
79 		u32	bucketdrop;
80 		u32	queuedrop;
81 		u32	childdrop;	/* drops in child qdisc */
82 		u32	marked;		/* ECN mark */
83 	} stats;
84 };
85 
86 /*
87  * Each queued skb might be hashed on one or two bins
88  * We store in skb_cb the two hash values.
89  * (A zero value means double buffering was not used)
90  */
91 struct sfb_skb_cb {
92 	u32 hashes[2];
93 };
94 
95 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96 {
97 	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
98 	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
99 }
100 
101 /*
102  * If using 'internal' SFB flow classifier, hash comes from skb rxhash
103  * If using external classifier, hash comes from the classid.
104  */
105 static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
106 {
107 	return sfb_skb_cb(skb)->hashes[slot];
108 }
109 
110 /* Probabilities are coded as Q0.16 fixed-point values,
111  * with 0xFFFF representing 65535/65536 (almost 1.0)
112  * Addition and subtraction are saturating in [0, 65535]
113  */
114 static u32 prob_plus(u32 p1, u32 p2)
115 {
116 	u32 res = p1 + p2;
117 
118 	return min_t(u32, res, SFB_MAX_PROB);
119 }
120 
121 static u32 prob_minus(u32 p1, u32 p2)
122 {
123 	return p1 > p2 ? p1 - p2 : 0;
124 }
125 
126 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
127 {
128 	int i;
129 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
130 
131 	for (i = 0; i < SFB_LEVELS; i++) {
132 		u32 hash = sfbhash & SFB_BUCKET_MASK;
133 
134 		sfbhash >>= SFB_BUCKET_SHIFT;
135 		if (b[hash].qlen < 0xFFFF)
136 			b[hash].qlen++;
137 		b += SFB_NUMBUCKETS; /* next level */
138 	}
139 }
140 
141 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
142 {
143 	u32 sfbhash;
144 
145 	sfbhash = sfb_hash(skb, 0);
146 	if (sfbhash)
147 		increment_one_qlen(sfbhash, 0, q);
148 
149 	sfbhash = sfb_hash(skb, 1);
150 	if (sfbhash)
151 		increment_one_qlen(sfbhash, 1, q);
152 }
153 
154 static void decrement_one_qlen(u32 sfbhash, u32 slot,
155 			       struct sfb_sched_data *q)
156 {
157 	int i;
158 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
159 
160 	for (i = 0; i < SFB_LEVELS; i++) {
161 		u32 hash = sfbhash & SFB_BUCKET_MASK;
162 
163 		sfbhash >>= SFB_BUCKET_SHIFT;
164 		if (b[hash].qlen > 0)
165 			b[hash].qlen--;
166 		b += SFB_NUMBUCKETS; /* next level */
167 	}
168 }
169 
170 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
171 {
172 	u32 sfbhash;
173 
174 	sfbhash = sfb_hash(skb, 0);
175 	if (sfbhash)
176 		decrement_one_qlen(sfbhash, 0, q);
177 
178 	sfbhash = sfb_hash(skb, 1);
179 	if (sfbhash)
180 		decrement_one_qlen(sfbhash, 1, q);
181 }
182 
183 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
184 {
185 	b->p_mark = prob_minus(b->p_mark, q->decrement);
186 }
187 
188 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
189 {
190 	b->p_mark = prob_plus(b->p_mark, q->increment);
191 }
192 
193 static void sfb_zero_all_buckets(struct sfb_sched_data *q)
194 {
195 	memset(&q->bins, 0, sizeof(q->bins));
196 }
197 
198 /*
199  * compute max qlen, max p_mark, and avg p_mark
200  */
201 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
202 {
203 	int i;
204 	u32 qlen = 0, prob = 0, totalpm = 0;
205 	const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
206 
207 	for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
208 		if (qlen < b->qlen)
209 			qlen = b->qlen;
210 		totalpm += b->p_mark;
211 		if (prob < b->p_mark)
212 			prob = b->p_mark;
213 		b++;
214 	}
215 	*prob_r = prob;
216 	*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
217 	return qlen;
218 }
219 
220 
221 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
222 {
223 	q->bins[slot].perturbation = net_random();
224 }
225 
226 static void sfb_swap_slot(struct sfb_sched_data *q)
227 {
228 	sfb_init_perturbation(q->slot, q);
229 	q->slot ^= 1;
230 	q->double_buffering = false;
231 }
232 
233 /* Non elastic flows are allowed to use part of the bandwidth, expressed
234  * in "penalty_rate" packets per second, with "penalty_burst" burst
235  */
236 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
237 {
238 	if (q->penalty_rate == 0 || q->penalty_burst == 0)
239 		return true;
240 
241 	if (q->tokens_avail < 1) {
242 		unsigned long age = min(10UL * HZ, jiffies - q->token_time);
243 
244 		q->tokens_avail = (age * q->penalty_rate) / HZ;
245 		if (q->tokens_avail > q->penalty_burst)
246 			q->tokens_avail = q->penalty_burst;
247 		q->token_time = jiffies;
248 		if (q->tokens_avail < 1)
249 			return true;
250 	}
251 
252 	q->tokens_avail--;
253 	return false;
254 }
255 
256 static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q,
257 			 int *qerr, u32 *salt)
258 {
259 	struct tcf_result res;
260 	int result;
261 
262 	result = tc_classify(skb, q->filter_list, &res);
263 	if (result >= 0) {
264 #ifdef CONFIG_NET_CLS_ACT
265 		switch (result) {
266 		case TC_ACT_STOLEN:
267 		case TC_ACT_QUEUED:
268 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
269 		case TC_ACT_SHOT:
270 			return false;
271 		}
272 #endif
273 		*salt = TC_H_MIN(res.classid);
274 		return true;
275 	}
276 	return false;
277 }
278 
279 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
280 {
281 
282 	struct sfb_sched_data *q = qdisc_priv(sch);
283 	struct Qdisc *child = q->qdisc;
284 	int i;
285 	u32 p_min = ~0;
286 	u32 minqlen = ~0;
287 	u32 r, slot, salt, sfbhash;
288 	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
289 	struct flow_keys keys;
290 
291 	if (unlikely(sch->q.qlen >= q->limit)) {
292 		sch->qstats.overlimits++;
293 		q->stats.queuedrop++;
294 		goto drop;
295 	}
296 
297 	if (q->rehash_interval > 0) {
298 		unsigned long limit = q->rehash_time + q->rehash_interval;
299 
300 		if (unlikely(time_after(jiffies, limit))) {
301 			sfb_swap_slot(q);
302 			q->rehash_time = jiffies;
303 		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
304 				    time_after(jiffies, limit - q->warmup_time))) {
305 			q->double_buffering = true;
306 		}
307 	}
308 
309 	if (q->filter_list) {
310 		/* If using external classifiers, get result and record it. */
311 		if (!sfb_classify(skb, q, &ret, &salt))
312 			goto other_drop;
313 		keys.src = salt;
314 		keys.dst = 0;
315 		keys.ports = 0;
316 	} else {
317 		skb_flow_dissect(skb, &keys);
318 	}
319 
320 	slot = q->slot;
321 
322 	sfbhash = jhash_3words((__force u32)keys.dst,
323 			       (__force u32)keys.src,
324 			       (__force u32)keys.ports,
325 			       q->bins[slot].perturbation);
326 	if (!sfbhash)
327 		sfbhash = 1;
328 	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
329 
330 	for (i = 0; i < SFB_LEVELS; i++) {
331 		u32 hash = sfbhash & SFB_BUCKET_MASK;
332 		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
333 
334 		sfbhash >>= SFB_BUCKET_SHIFT;
335 		if (b->qlen == 0)
336 			decrement_prob(b, q);
337 		else if (b->qlen >= q->bin_size)
338 			increment_prob(b, q);
339 		if (minqlen > b->qlen)
340 			minqlen = b->qlen;
341 		if (p_min > b->p_mark)
342 			p_min = b->p_mark;
343 	}
344 
345 	slot ^= 1;
346 	sfb_skb_cb(skb)->hashes[slot] = 0;
347 
348 	if (unlikely(minqlen >= q->max)) {
349 		sch->qstats.overlimits++;
350 		q->stats.bucketdrop++;
351 		goto drop;
352 	}
353 
354 	if (unlikely(p_min >= SFB_MAX_PROB)) {
355 		/* Inelastic flow */
356 		if (q->double_buffering) {
357 			sfbhash = jhash_3words((__force u32)keys.dst,
358 					       (__force u32)keys.src,
359 					       (__force u32)keys.ports,
360 					       q->bins[slot].perturbation);
361 			if (!sfbhash)
362 				sfbhash = 1;
363 			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
364 
365 			for (i = 0; i < SFB_LEVELS; i++) {
366 				u32 hash = sfbhash & SFB_BUCKET_MASK;
367 				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
368 
369 				sfbhash >>= SFB_BUCKET_SHIFT;
370 				if (b->qlen == 0)
371 					decrement_prob(b, q);
372 				else if (b->qlen >= q->bin_size)
373 					increment_prob(b, q);
374 			}
375 		}
376 		if (sfb_rate_limit(skb, q)) {
377 			sch->qstats.overlimits++;
378 			q->stats.penaltydrop++;
379 			goto drop;
380 		}
381 		goto enqueue;
382 	}
383 
384 	r = net_random() & SFB_MAX_PROB;
385 
386 	if (unlikely(r < p_min)) {
387 		if (unlikely(p_min > SFB_MAX_PROB / 2)) {
388 			/* If we're marking that many packets, then either
389 			 * this flow is unresponsive, or we're badly congested.
390 			 * In either case, we want to start dropping packets.
391 			 */
392 			if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
393 				q->stats.earlydrop++;
394 				goto drop;
395 			}
396 		}
397 		if (INET_ECN_set_ce(skb)) {
398 			q->stats.marked++;
399 		} else {
400 			q->stats.earlydrop++;
401 			goto drop;
402 		}
403 	}
404 
405 enqueue:
406 	ret = qdisc_enqueue(skb, child);
407 	if (likely(ret == NET_XMIT_SUCCESS)) {
408 		sch->q.qlen++;
409 		increment_qlen(skb, q);
410 	} else if (net_xmit_drop_count(ret)) {
411 		q->stats.childdrop++;
412 		sch->qstats.drops++;
413 	}
414 	return ret;
415 
416 drop:
417 	qdisc_drop(skb, sch);
418 	return NET_XMIT_CN;
419 other_drop:
420 	if (ret & __NET_XMIT_BYPASS)
421 		sch->qstats.drops++;
422 	kfree_skb(skb);
423 	return ret;
424 }
425 
426 static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
427 {
428 	struct sfb_sched_data *q = qdisc_priv(sch);
429 	struct Qdisc *child = q->qdisc;
430 	struct sk_buff *skb;
431 
432 	skb = child->dequeue(q->qdisc);
433 
434 	if (skb) {
435 		qdisc_bstats_update(sch, skb);
436 		sch->q.qlen--;
437 		decrement_qlen(skb, q);
438 	}
439 
440 	return skb;
441 }
442 
443 static struct sk_buff *sfb_peek(struct Qdisc *sch)
444 {
445 	struct sfb_sched_data *q = qdisc_priv(sch);
446 	struct Qdisc *child = q->qdisc;
447 
448 	return child->ops->peek(child);
449 }
450 
451 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
452 
453 static void sfb_reset(struct Qdisc *sch)
454 {
455 	struct sfb_sched_data *q = qdisc_priv(sch);
456 
457 	qdisc_reset(q->qdisc);
458 	sch->q.qlen = 0;
459 	q->slot = 0;
460 	q->double_buffering = false;
461 	sfb_zero_all_buckets(q);
462 	sfb_init_perturbation(0, q);
463 }
464 
465 static void sfb_destroy(struct Qdisc *sch)
466 {
467 	struct sfb_sched_data *q = qdisc_priv(sch);
468 
469 	tcf_destroy_chain(&q->filter_list);
470 	qdisc_destroy(q->qdisc);
471 }
472 
473 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
474 	[TCA_SFB_PARMS]	= { .len = sizeof(struct tc_sfb_qopt) },
475 };
476 
477 static const struct tc_sfb_qopt sfb_default_ops = {
478 	.rehash_interval = 600 * MSEC_PER_SEC,
479 	.warmup_time = 60 * MSEC_PER_SEC,
480 	.limit = 0,
481 	.max = 25,
482 	.bin_size = 20,
483 	.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
484 	.decrement = (SFB_MAX_PROB + 3000) / 6000,
485 	.penalty_rate = 10,
486 	.penalty_burst = 20,
487 };
488 
489 static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
490 {
491 	struct sfb_sched_data *q = qdisc_priv(sch);
492 	struct Qdisc *child;
493 	struct nlattr *tb[TCA_SFB_MAX + 1];
494 	const struct tc_sfb_qopt *ctl = &sfb_default_ops;
495 	u32 limit;
496 	int err;
497 
498 	if (opt) {
499 		err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy);
500 		if (err < 0)
501 			return -EINVAL;
502 
503 		if (tb[TCA_SFB_PARMS] == NULL)
504 			return -EINVAL;
505 
506 		ctl = nla_data(tb[TCA_SFB_PARMS]);
507 	}
508 
509 	limit = ctl->limit;
510 	if (limit == 0)
511 		limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
512 
513 	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
514 	if (IS_ERR(child))
515 		return PTR_ERR(child);
516 
517 	sch_tree_lock(sch);
518 
519 	qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
520 	qdisc_destroy(q->qdisc);
521 	q->qdisc = child;
522 
523 	q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
524 	q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
525 	q->rehash_time = jiffies;
526 	q->limit = limit;
527 	q->increment = ctl->increment;
528 	q->decrement = ctl->decrement;
529 	q->max = ctl->max;
530 	q->bin_size = ctl->bin_size;
531 	q->penalty_rate = ctl->penalty_rate;
532 	q->penalty_burst = ctl->penalty_burst;
533 	q->tokens_avail = ctl->penalty_burst;
534 	q->token_time = jiffies;
535 
536 	q->slot = 0;
537 	q->double_buffering = false;
538 	sfb_zero_all_buckets(q);
539 	sfb_init_perturbation(0, q);
540 	sfb_init_perturbation(1, q);
541 
542 	sch_tree_unlock(sch);
543 
544 	return 0;
545 }
546 
547 static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
548 {
549 	struct sfb_sched_data *q = qdisc_priv(sch);
550 
551 	q->qdisc = &noop_qdisc;
552 	return sfb_change(sch, opt);
553 }
554 
555 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
556 {
557 	struct sfb_sched_data *q = qdisc_priv(sch);
558 	struct nlattr *opts;
559 	struct tc_sfb_qopt opt = {
560 		.rehash_interval = jiffies_to_msecs(q->rehash_interval),
561 		.warmup_time = jiffies_to_msecs(q->warmup_time),
562 		.limit = q->limit,
563 		.max = q->max,
564 		.bin_size = q->bin_size,
565 		.increment = q->increment,
566 		.decrement = q->decrement,
567 		.penalty_rate = q->penalty_rate,
568 		.penalty_burst = q->penalty_burst,
569 	};
570 
571 	sch->qstats.backlog = q->qdisc->qstats.backlog;
572 	opts = nla_nest_start(skb, TCA_OPTIONS);
573 	if (opts == NULL)
574 		goto nla_put_failure;
575 	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
576 		goto nla_put_failure;
577 	return nla_nest_end(skb, opts);
578 
579 nla_put_failure:
580 	nla_nest_cancel(skb, opts);
581 	return -EMSGSIZE;
582 }
583 
584 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
585 {
586 	struct sfb_sched_data *q = qdisc_priv(sch);
587 	struct tc_sfb_xstats st = {
588 		.earlydrop = q->stats.earlydrop,
589 		.penaltydrop = q->stats.penaltydrop,
590 		.bucketdrop = q->stats.bucketdrop,
591 		.queuedrop = q->stats.queuedrop,
592 		.childdrop = q->stats.childdrop,
593 		.marked = q->stats.marked,
594 	};
595 
596 	st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
597 
598 	return gnet_stats_copy_app(d, &st, sizeof(st));
599 }
600 
601 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
602 			  struct sk_buff *skb, struct tcmsg *tcm)
603 {
604 	return -ENOSYS;
605 }
606 
607 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
608 		     struct Qdisc **old)
609 {
610 	struct sfb_sched_data *q = qdisc_priv(sch);
611 
612 	if (new == NULL)
613 		new = &noop_qdisc;
614 
615 	sch_tree_lock(sch);
616 	*old = q->qdisc;
617 	q->qdisc = new;
618 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
619 	qdisc_reset(*old);
620 	sch_tree_unlock(sch);
621 	return 0;
622 }
623 
624 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
625 {
626 	struct sfb_sched_data *q = qdisc_priv(sch);
627 
628 	return q->qdisc;
629 }
630 
631 static unsigned long sfb_get(struct Qdisc *sch, u32 classid)
632 {
633 	return 1;
634 }
635 
636 static void sfb_put(struct Qdisc *sch, unsigned long arg)
637 {
638 }
639 
640 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
641 			    struct nlattr **tca, unsigned long *arg)
642 {
643 	return -ENOSYS;
644 }
645 
646 static int sfb_delete(struct Qdisc *sch, unsigned long cl)
647 {
648 	return -ENOSYS;
649 }
650 
651 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
652 {
653 	if (!walker->stop) {
654 		if (walker->count >= walker->skip)
655 			if (walker->fn(sch, 1, walker) < 0) {
656 				walker->stop = 1;
657 				return;
658 			}
659 		walker->count++;
660 	}
661 }
662 
663 static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl)
664 {
665 	struct sfb_sched_data *q = qdisc_priv(sch);
666 
667 	if (cl)
668 		return NULL;
669 	return &q->filter_list;
670 }
671 
672 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
673 			      u32 classid)
674 {
675 	return 0;
676 }
677 
678 
679 static const struct Qdisc_class_ops sfb_class_ops = {
680 	.graft		=	sfb_graft,
681 	.leaf		=	sfb_leaf,
682 	.get		=	sfb_get,
683 	.put		=	sfb_put,
684 	.change		=	sfb_change_class,
685 	.delete		=	sfb_delete,
686 	.walk		=	sfb_walk,
687 	.tcf_chain	=	sfb_find_tcf,
688 	.bind_tcf	=	sfb_bind,
689 	.unbind_tcf	=	sfb_put,
690 	.dump		=	sfb_dump_class,
691 };
692 
693 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
694 	.id		=	"sfb",
695 	.priv_size	=	sizeof(struct sfb_sched_data),
696 	.cl_ops		=	&sfb_class_ops,
697 	.enqueue	=	sfb_enqueue,
698 	.dequeue	=	sfb_dequeue,
699 	.peek		=	sfb_peek,
700 	.init		=	sfb_init,
701 	.reset		=	sfb_reset,
702 	.destroy	=	sfb_destroy,
703 	.change		=	sfb_change,
704 	.dump		=	sfb_dump,
705 	.dump_stats	=	sfb_dump_stats,
706 	.owner		=	THIS_MODULE,
707 };
708 
709 static int __init sfb_module_init(void)
710 {
711 	return register_qdisc(&sfb_qdisc_ops);
712 }
713 
714 static void __exit sfb_module_exit(void)
715 {
716 	unregister_qdisc(&sfb_qdisc_ops);
717 }
718 
719 module_init(sfb_module_init)
720 module_exit(sfb_module_exit)
721 
722 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
723 MODULE_AUTHOR("Juliusz Chroboczek");
724 MODULE_AUTHOR("Eric Dumazet");
725 MODULE_LICENSE("GPL");
726