xref: /openbmc/linux/net/sched/sch_sfb.c (revision 349f631d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_sfb.c	  Stochastic Fair Blue
4  *
5  * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
6  * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7  *
8  * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
9  * A New Class of Active Queue Management Algorithms.
10  * U. Michigan CSE-TR-387-99, April 1999.
11  *
12  * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/random.h>
21 #include <linux/siphash.h>
22 #include <net/ip.h>
23 #include <net/pkt_sched.h>
24 #include <net/pkt_cls.h>
25 #include <net/inet_ecn.h>
26 
27 /*
28  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
29  * This implementation uses L = 8 and N = 16
30  * This permits us to split one 32bit hash (provided per packet by rxhash or
31  * external classifier) into 8 subhashes of 4 bits.
32  */
33 #define SFB_BUCKET_SHIFT 4
34 #define SFB_NUMBUCKETS	(1 << SFB_BUCKET_SHIFT) /* N bins per Level */
35 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
36 #define SFB_LEVELS	(32 / SFB_BUCKET_SHIFT) /* L */
37 
38 /* SFB algo uses a virtual queue, named "bin" */
39 struct sfb_bucket {
40 	u16		qlen; /* length of virtual queue */
41 	u16		p_mark; /* marking probability */
42 };
43 
44 /* We use a double buffering right before hash change
45  * (Section 4.4 of SFB reference : moving hash functions)
46  */
47 struct sfb_bins {
48 	siphash_key_t	  perturbation; /* siphash key */
49 	struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
50 };
51 
52 struct sfb_sched_data {
53 	struct Qdisc	*qdisc;
54 	struct tcf_proto __rcu *filter_list;
55 	struct tcf_block *block;
56 	unsigned long	rehash_interval;
57 	unsigned long	warmup_time;	/* double buffering warmup time in jiffies */
58 	u32		max;
59 	u32		bin_size;	/* maximum queue length per bin */
60 	u32		increment;	/* d1 */
61 	u32		decrement;	/* d2 */
62 	u32		limit;		/* HARD maximal queue length */
63 	u32		penalty_rate;
64 	u32		penalty_burst;
65 	u32		tokens_avail;
66 	unsigned long	rehash_time;
67 	unsigned long	token_time;
68 
69 	u8		slot;		/* current active bins (0 or 1) */
70 	bool		double_buffering;
71 	struct sfb_bins bins[2];
72 
73 	struct {
74 		u32	earlydrop;
75 		u32	penaltydrop;
76 		u32	bucketdrop;
77 		u32	queuedrop;
78 		u32	childdrop;	/* drops in child qdisc */
79 		u32	marked;		/* ECN mark */
80 	} stats;
81 };
82 
83 /*
84  * Each queued skb might be hashed on one or two bins
85  * We store in skb_cb the two hash values.
86  * (A zero value means double buffering was not used)
87  */
88 struct sfb_skb_cb {
89 	u32 hashes[2];
90 };
91 
92 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
93 {
94 	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
95 	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
96 }
97 
98 /*
99  * If using 'internal' SFB flow classifier, hash comes from skb rxhash
100  * If using external classifier, hash comes from the classid.
101  */
102 static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
103 {
104 	return sfb_skb_cb(skb)->hashes[slot];
105 }
106 
107 /* Probabilities are coded as Q0.16 fixed-point values,
108  * with 0xFFFF representing 65535/65536 (almost 1.0)
109  * Addition and subtraction are saturating in [0, 65535]
110  */
111 static u32 prob_plus(u32 p1, u32 p2)
112 {
113 	u32 res = p1 + p2;
114 
115 	return min_t(u32, res, SFB_MAX_PROB);
116 }
117 
118 static u32 prob_minus(u32 p1, u32 p2)
119 {
120 	return p1 > p2 ? p1 - p2 : 0;
121 }
122 
123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
124 {
125 	int i;
126 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
127 
128 	for (i = 0; i < SFB_LEVELS; i++) {
129 		u32 hash = sfbhash & SFB_BUCKET_MASK;
130 
131 		sfbhash >>= SFB_BUCKET_SHIFT;
132 		if (b[hash].qlen < 0xFFFF)
133 			b[hash].qlen++;
134 		b += SFB_NUMBUCKETS; /* next level */
135 	}
136 }
137 
138 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
139 {
140 	u32 sfbhash;
141 
142 	sfbhash = sfb_hash(skb, 0);
143 	if (sfbhash)
144 		increment_one_qlen(sfbhash, 0, q);
145 
146 	sfbhash = sfb_hash(skb, 1);
147 	if (sfbhash)
148 		increment_one_qlen(sfbhash, 1, q);
149 }
150 
151 static void decrement_one_qlen(u32 sfbhash, u32 slot,
152 			       struct sfb_sched_data *q)
153 {
154 	int i;
155 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
156 
157 	for (i = 0; i < SFB_LEVELS; i++) {
158 		u32 hash = sfbhash & SFB_BUCKET_MASK;
159 
160 		sfbhash >>= SFB_BUCKET_SHIFT;
161 		if (b[hash].qlen > 0)
162 			b[hash].qlen--;
163 		b += SFB_NUMBUCKETS; /* next level */
164 	}
165 }
166 
167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
168 {
169 	u32 sfbhash;
170 
171 	sfbhash = sfb_hash(skb, 0);
172 	if (sfbhash)
173 		decrement_one_qlen(sfbhash, 0, q);
174 
175 	sfbhash = sfb_hash(skb, 1);
176 	if (sfbhash)
177 		decrement_one_qlen(sfbhash, 1, q);
178 }
179 
180 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
181 {
182 	b->p_mark = prob_minus(b->p_mark, q->decrement);
183 }
184 
185 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
186 {
187 	b->p_mark = prob_plus(b->p_mark, q->increment);
188 }
189 
190 static void sfb_zero_all_buckets(struct sfb_sched_data *q)
191 {
192 	memset(&q->bins, 0, sizeof(q->bins));
193 }
194 
195 /*
196  * compute max qlen, max p_mark, and avg p_mark
197  */
198 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
199 {
200 	int i;
201 	u32 qlen = 0, prob = 0, totalpm = 0;
202 	const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
203 
204 	for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
205 		if (qlen < b->qlen)
206 			qlen = b->qlen;
207 		totalpm += b->p_mark;
208 		if (prob < b->p_mark)
209 			prob = b->p_mark;
210 		b++;
211 	}
212 	*prob_r = prob;
213 	*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
214 	return qlen;
215 }
216 
217 
218 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
219 {
220 	get_random_bytes(&q->bins[slot].perturbation,
221 			 sizeof(q->bins[slot].perturbation));
222 }
223 
224 static void sfb_swap_slot(struct sfb_sched_data *q)
225 {
226 	sfb_init_perturbation(q->slot, q);
227 	q->slot ^= 1;
228 	q->double_buffering = false;
229 }
230 
231 /* Non elastic flows are allowed to use part of the bandwidth, expressed
232  * in "penalty_rate" packets per second, with "penalty_burst" burst
233  */
234 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
235 {
236 	if (q->penalty_rate == 0 || q->penalty_burst == 0)
237 		return true;
238 
239 	if (q->tokens_avail < 1) {
240 		unsigned long age = min(10UL * HZ, jiffies - q->token_time);
241 
242 		q->tokens_avail = (age * q->penalty_rate) / HZ;
243 		if (q->tokens_avail > q->penalty_burst)
244 			q->tokens_avail = q->penalty_burst;
245 		q->token_time = jiffies;
246 		if (q->tokens_avail < 1)
247 			return true;
248 	}
249 
250 	q->tokens_avail--;
251 	return false;
252 }
253 
254 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
255 			 int *qerr, u32 *salt)
256 {
257 	struct tcf_result res;
258 	int result;
259 
260 	result = tcf_classify(skb, NULL, fl, &res, false);
261 	if (result >= 0) {
262 #ifdef CONFIG_NET_CLS_ACT
263 		switch (result) {
264 		case TC_ACT_STOLEN:
265 		case TC_ACT_QUEUED:
266 		case TC_ACT_TRAP:
267 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
268 			fallthrough;
269 		case TC_ACT_SHOT:
270 			return false;
271 		}
272 #endif
273 		*salt = TC_H_MIN(res.classid);
274 		return true;
275 	}
276 	return false;
277 }
278 
279 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
280 		       struct sk_buff **to_free)
281 {
282 
283 	struct sfb_sched_data *q = qdisc_priv(sch);
284 	struct Qdisc *child = q->qdisc;
285 	struct tcf_proto *fl;
286 	int i;
287 	u32 p_min = ~0;
288 	u32 minqlen = ~0;
289 	u32 r, sfbhash;
290 	u32 slot = q->slot;
291 	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
292 
293 	if (unlikely(sch->q.qlen >= q->limit)) {
294 		qdisc_qstats_overlimit(sch);
295 		q->stats.queuedrop++;
296 		goto drop;
297 	}
298 
299 	if (q->rehash_interval > 0) {
300 		unsigned long limit = q->rehash_time + q->rehash_interval;
301 
302 		if (unlikely(time_after(jiffies, limit))) {
303 			sfb_swap_slot(q);
304 			q->rehash_time = jiffies;
305 		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
306 				    time_after(jiffies, limit - q->warmup_time))) {
307 			q->double_buffering = true;
308 		}
309 	}
310 
311 	fl = rcu_dereference_bh(q->filter_list);
312 	if (fl) {
313 		u32 salt;
314 
315 		/* If using external classifiers, get result and record it. */
316 		if (!sfb_classify(skb, fl, &ret, &salt))
317 			goto other_drop;
318 		sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
319 	} else {
320 		sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
321 	}
322 
323 
324 	if (!sfbhash)
325 		sfbhash = 1;
326 	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
327 
328 	for (i = 0; i < SFB_LEVELS; i++) {
329 		u32 hash = sfbhash & SFB_BUCKET_MASK;
330 		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
331 
332 		sfbhash >>= SFB_BUCKET_SHIFT;
333 		if (b->qlen == 0)
334 			decrement_prob(b, q);
335 		else if (b->qlen >= q->bin_size)
336 			increment_prob(b, q);
337 		if (minqlen > b->qlen)
338 			minqlen = b->qlen;
339 		if (p_min > b->p_mark)
340 			p_min = b->p_mark;
341 	}
342 
343 	slot ^= 1;
344 	sfb_skb_cb(skb)->hashes[slot] = 0;
345 
346 	if (unlikely(minqlen >= q->max)) {
347 		qdisc_qstats_overlimit(sch);
348 		q->stats.bucketdrop++;
349 		goto drop;
350 	}
351 
352 	if (unlikely(p_min >= SFB_MAX_PROB)) {
353 		/* Inelastic flow */
354 		if (q->double_buffering) {
355 			sfbhash = skb_get_hash_perturb(skb,
356 			    &q->bins[slot].perturbation);
357 			if (!sfbhash)
358 				sfbhash = 1;
359 			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
360 
361 			for (i = 0; i < SFB_LEVELS; i++) {
362 				u32 hash = sfbhash & SFB_BUCKET_MASK;
363 				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
364 
365 				sfbhash >>= SFB_BUCKET_SHIFT;
366 				if (b->qlen == 0)
367 					decrement_prob(b, q);
368 				else if (b->qlen >= q->bin_size)
369 					increment_prob(b, q);
370 			}
371 		}
372 		if (sfb_rate_limit(skb, q)) {
373 			qdisc_qstats_overlimit(sch);
374 			q->stats.penaltydrop++;
375 			goto drop;
376 		}
377 		goto enqueue;
378 	}
379 
380 	r = prandom_u32() & SFB_MAX_PROB;
381 
382 	if (unlikely(r < p_min)) {
383 		if (unlikely(p_min > SFB_MAX_PROB / 2)) {
384 			/* If we're marking that many packets, then either
385 			 * this flow is unresponsive, or we're badly congested.
386 			 * In either case, we want to start dropping packets.
387 			 */
388 			if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
389 				q->stats.earlydrop++;
390 				goto drop;
391 			}
392 		}
393 		if (INET_ECN_set_ce(skb)) {
394 			q->stats.marked++;
395 		} else {
396 			q->stats.earlydrop++;
397 			goto drop;
398 		}
399 	}
400 
401 enqueue:
402 	ret = qdisc_enqueue(skb, child, to_free);
403 	if (likely(ret == NET_XMIT_SUCCESS)) {
404 		qdisc_qstats_backlog_inc(sch, skb);
405 		sch->q.qlen++;
406 		increment_qlen(skb, q);
407 	} else if (net_xmit_drop_count(ret)) {
408 		q->stats.childdrop++;
409 		qdisc_qstats_drop(sch);
410 	}
411 	return ret;
412 
413 drop:
414 	qdisc_drop(skb, sch, to_free);
415 	return NET_XMIT_CN;
416 other_drop:
417 	if (ret & __NET_XMIT_BYPASS)
418 		qdisc_qstats_drop(sch);
419 	kfree_skb(skb);
420 	return ret;
421 }
422 
423 static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
424 {
425 	struct sfb_sched_data *q = qdisc_priv(sch);
426 	struct Qdisc *child = q->qdisc;
427 	struct sk_buff *skb;
428 
429 	skb = child->dequeue(q->qdisc);
430 
431 	if (skb) {
432 		qdisc_bstats_update(sch, skb);
433 		qdisc_qstats_backlog_dec(sch, skb);
434 		sch->q.qlen--;
435 		decrement_qlen(skb, q);
436 	}
437 
438 	return skb;
439 }
440 
441 static struct sk_buff *sfb_peek(struct Qdisc *sch)
442 {
443 	struct sfb_sched_data *q = qdisc_priv(sch);
444 	struct Qdisc *child = q->qdisc;
445 
446 	return child->ops->peek(child);
447 }
448 
449 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
450 
451 static void sfb_reset(struct Qdisc *sch)
452 {
453 	struct sfb_sched_data *q = qdisc_priv(sch);
454 
455 	qdisc_reset(q->qdisc);
456 	sch->qstats.backlog = 0;
457 	sch->q.qlen = 0;
458 	q->slot = 0;
459 	q->double_buffering = false;
460 	sfb_zero_all_buckets(q);
461 	sfb_init_perturbation(0, q);
462 }
463 
464 static void sfb_destroy(struct Qdisc *sch)
465 {
466 	struct sfb_sched_data *q = qdisc_priv(sch);
467 
468 	tcf_block_put(q->block);
469 	qdisc_put(q->qdisc);
470 }
471 
472 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
473 	[TCA_SFB_PARMS]	= { .len = sizeof(struct tc_sfb_qopt) },
474 };
475 
476 static const struct tc_sfb_qopt sfb_default_ops = {
477 	.rehash_interval = 600 * MSEC_PER_SEC,
478 	.warmup_time = 60 * MSEC_PER_SEC,
479 	.limit = 0,
480 	.max = 25,
481 	.bin_size = 20,
482 	.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
483 	.decrement = (SFB_MAX_PROB + 3000) / 6000,
484 	.penalty_rate = 10,
485 	.penalty_burst = 20,
486 };
487 
488 static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
489 		      struct netlink_ext_ack *extack)
490 {
491 	struct sfb_sched_data *q = qdisc_priv(sch);
492 	struct Qdisc *child, *old;
493 	struct nlattr *tb[TCA_SFB_MAX + 1];
494 	const struct tc_sfb_qopt *ctl = &sfb_default_ops;
495 	u32 limit;
496 	int err;
497 
498 	if (opt) {
499 		err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt,
500 						  sfb_policy, NULL);
501 		if (err < 0)
502 			return -EINVAL;
503 
504 		if (tb[TCA_SFB_PARMS] == NULL)
505 			return -EINVAL;
506 
507 		ctl = nla_data(tb[TCA_SFB_PARMS]);
508 	}
509 
510 	limit = ctl->limit;
511 	if (limit == 0)
512 		limit = qdisc_dev(sch)->tx_queue_len;
513 
514 	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
515 	if (IS_ERR(child))
516 		return PTR_ERR(child);
517 
518 	if (child != &noop_qdisc)
519 		qdisc_hash_add(child, true);
520 	sch_tree_lock(sch);
521 
522 	qdisc_purge_queue(q->qdisc);
523 	old = q->qdisc;
524 	q->qdisc = child;
525 
526 	q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
527 	q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
528 	q->rehash_time = jiffies;
529 	q->limit = limit;
530 	q->increment = ctl->increment;
531 	q->decrement = ctl->decrement;
532 	q->max = ctl->max;
533 	q->bin_size = ctl->bin_size;
534 	q->penalty_rate = ctl->penalty_rate;
535 	q->penalty_burst = ctl->penalty_burst;
536 	q->tokens_avail = ctl->penalty_burst;
537 	q->token_time = jiffies;
538 
539 	q->slot = 0;
540 	q->double_buffering = false;
541 	sfb_zero_all_buckets(q);
542 	sfb_init_perturbation(0, q);
543 	sfb_init_perturbation(1, q);
544 
545 	sch_tree_unlock(sch);
546 	qdisc_put(old);
547 
548 	return 0;
549 }
550 
551 static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
552 		    struct netlink_ext_ack *extack)
553 {
554 	struct sfb_sched_data *q = qdisc_priv(sch);
555 	int err;
556 
557 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
558 	if (err)
559 		return err;
560 
561 	q->qdisc = &noop_qdisc;
562 	return sfb_change(sch, opt, extack);
563 }
564 
565 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
566 {
567 	struct sfb_sched_data *q = qdisc_priv(sch);
568 	struct nlattr *opts;
569 	struct tc_sfb_qopt opt = {
570 		.rehash_interval = jiffies_to_msecs(q->rehash_interval),
571 		.warmup_time = jiffies_to_msecs(q->warmup_time),
572 		.limit = q->limit,
573 		.max = q->max,
574 		.bin_size = q->bin_size,
575 		.increment = q->increment,
576 		.decrement = q->decrement,
577 		.penalty_rate = q->penalty_rate,
578 		.penalty_burst = q->penalty_burst,
579 	};
580 
581 	sch->qstats.backlog = q->qdisc->qstats.backlog;
582 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
583 	if (opts == NULL)
584 		goto nla_put_failure;
585 	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
586 		goto nla_put_failure;
587 	return nla_nest_end(skb, opts);
588 
589 nla_put_failure:
590 	nla_nest_cancel(skb, opts);
591 	return -EMSGSIZE;
592 }
593 
594 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
595 {
596 	struct sfb_sched_data *q = qdisc_priv(sch);
597 	struct tc_sfb_xstats st = {
598 		.earlydrop = q->stats.earlydrop,
599 		.penaltydrop = q->stats.penaltydrop,
600 		.bucketdrop = q->stats.bucketdrop,
601 		.queuedrop = q->stats.queuedrop,
602 		.childdrop = q->stats.childdrop,
603 		.marked = q->stats.marked,
604 	};
605 
606 	st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
607 
608 	return gnet_stats_copy_app(d, &st, sizeof(st));
609 }
610 
611 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
612 			  struct sk_buff *skb, struct tcmsg *tcm)
613 {
614 	return -ENOSYS;
615 }
616 
617 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
618 		     struct Qdisc **old, struct netlink_ext_ack *extack)
619 {
620 	struct sfb_sched_data *q = qdisc_priv(sch);
621 
622 	if (new == NULL)
623 		new = &noop_qdisc;
624 
625 	*old = qdisc_replace(sch, new, &q->qdisc);
626 	return 0;
627 }
628 
629 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
630 {
631 	struct sfb_sched_data *q = qdisc_priv(sch);
632 
633 	return q->qdisc;
634 }
635 
636 static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
637 {
638 	return 1;
639 }
640 
641 static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
642 {
643 }
644 
645 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
646 			    struct nlattr **tca, unsigned long *arg,
647 			    struct netlink_ext_ack *extack)
648 {
649 	return -ENOSYS;
650 }
651 
652 static int sfb_delete(struct Qdisc *sch, unsigned long cl,
653 		      struct netlink_ext_ack *extack)
654 {
655 	return -ENOSYS;
656 }
657 
658 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
659 {
660 	if (!walker->stop) {
661 		if (walker->count >= walker->skip)
662 			if (walker->fn(sch, 1, walker) < 0) {
663 				walker->stop = 1;
664 				return;
665 			}
666 		walker->count++;
667 	}
668 }
669 
670 static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
671 				       struct netlink_ext_ack *extack)
672 {
673 	struct sfb_sched_data *q = qdisc_priv(sch);
674 
675 	if (cl)
676 		return NULL;
677 	return q->block;
678 }
679 
680 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
681 			      u32 classid)
682 {
683 	return 0;
684 }
685 
686 
687 static const struct Qdisc_class_ops sfb_class_ops = {
688 	.graft		=	sfb_graft,
689 	.leaf		=	sfb_leaf,
690 	.find		=	sfb_find,
691 	.change		=	sfb_change_class,
692 	.delete		=	sfb_delete,
693 	.walk		=	sfb_walk,
694 	.tcf_block	=	sfb_tcf_block,
695 	.bind_tcf	=	sfb_bind,
696 	.unbind_tcf	=	sfb_unbind,
697 	.dump		=	sfb_dump_class,
698 };
699 
700 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
701 	.id		=	"sfb",
702 	.priv_size	=	sizeof(struct sfb_sched_data),
703 	.cl_ops		=	&sfb_class_ops,
704 	.enqueue	=	sfb_enqueue,
705 	.dequeue	=	sfb_dequeue,
706 	.peek		=	sfb_peek,
707 	.init		=	sfb_init,
708 	.reset		=	sfb_reset,
709 	.destroy	=	sfb_destroy,
710 	.change		=	sfb_change,
711 	.dump		=	sfb_dump,
712 	.dump_stats	=	sfb_dump_stats,
713 	.owner		=	THIS_MODULE,
714 };
715 
716 static int __init sfb_module_init(void)
717 {
718 	return register_qdisc(&sfb_qdisc_ops);
719 }
720 
721 static void __exit sfb_module_exit(void)
722 {
723 	unregister_qdisc(&sfb_qdisc_ops);
724 }
725 
726 module_init(sfb_module_init)
727 module_exit(sfb_module_exit)
728 
729 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
730 MODULE_AUTHOR("Juliusz Chroboczek");
731 MODULE_AUTHOR("Eric Dumazet");
732 MODULE_LICENSE("GPL");
733