1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * net/sched/sch_sfb.c Stochastic Fair Blue 4 * 5 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr> 6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com> 7 * 8 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue: 9 * A New Class of Active Queue Management Algorithms. 10 * U. Michigan CSE-TR-387-99, April 1999. 11 * 12 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/skbuff.h> 20 #include <linux/random.h> 21 #include <linux/jhash.h> 22 #include <net/ip.h> 23 #include <net/pkt_sched.h> 24 #include <net/pkt_cls.h> 25 #include <net/inet_ecn.h> 26 27 /* 28 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level) 29 * This implementation uses L = 8 and N = 16 30 * This permits us to split one 32bit hash (provided per packet by rxhash or 31 * external classifier) into 8 subhashes of 4 bits. 32 */ 33 #define SFB_BUCKET_SHIFT 4 34 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */ 35 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1) 36 #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */ 37 38 /* SFB algo uses a virtual queue, named "bin" */ 39 struct sfb_bucket { 40 u16 qlen; /* length of virtual queue */ 41 u16 p_mark; /* marking probability */ 42 }; 43 44 /* We use a double buffering right before hash change 45 * (Section 4.4 of SFB reference : moving hash functions) 46 */ 47 struct sfb_bins { 48 u32 perturbation; /* jhash perturbation */ 49 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; 50 }; 51 52 struct sfb_sched_data { 53 struct Qdisc *qdisc; 54 struct tcf_proto __rcu *filter_list; 55 struct tcf_block *block; 56 unsigned long rehash_interval; 57 unsigned long warmup_time; /* double buffering warmup time in jiffies */ 58 u32 max; 59 u32 bin_size; /* maximum queue length per bin */ 60 u32 increment; /* d1 */ 61 u32 decrement; /* d2 */ 62 u32 limit; /* HARD maximal queue length */ 63 u32 penalty_rate; 64 u32 penalty_burst; 65 u32 tokens_avail; 66 unsigned long rehash_time; 67 unsigned long token_time; 68 69 u8 slot; /* current active bins (0 or 1) */ 70 bool double_buffering; 71 struct sfb_bins bins[2]; 72 73 struct { 74 u32 earlydrop; 75 u32 penaltydrop; 76 u32 bucketdrop; 77 u32 queuedrop; 78 u32 childdrop; /* drops in child qdisc */ 79 u32 marked; /* ECN mark */ 80 } stats; 81 }; 82 83 /* 84 * Each queued skb might be hashed on one or two bins 85 * We store in skb_cb the two hash values. 86 * (A zero value means double buffering was not used) 87 */ 88 struct sfb_skb_cb { 89 u32 hashes[2]; 90 }; 91 92 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 93 { 94 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb)); 95 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 96 } 97 98 /* 99 * If using 'internal' SFB flow classifier, hash comes from skb rxhash 100 * If using external classifier, hash comes from the classid. 101 */ 102 static u32 sfb_hash(const struct sk_buff *skb, u32 slot) 103 { 104 return sfb_skb_cb(skb)->hashes[slot]; 105 } 106 107 /* Probabilities are coded as Q0.16 fixed-point values, 108 * with 0xFFFF representing 65535/65536 (almost 1.0) 109 * Addition and subtraction are saturating in [0, 65535] 110 */ 111 static u32 prob_plus(u32 p1, u32 p2) 112 { 113 u32 res = p1 + p2; 114 115 return min_t(u32, res, SFB_MAX_PROB); 116 } 117 118 static u32 prob_minus(u32 p1, u32 p2) 119 { 120 return p1 > p2 ? p1 - p2 : 0; 121 } 122 123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) 124 { 125 int i; 126 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; 127 128 for (i = 0; i < SFB_LEVELS; i++) { 129 u32 hash = sfbhash & SFB_BUCKET_MASK; 130 131 sfbhash >>= SFB_BUCKET_SHIFT; 132 if (b[hash].qlen < 0xFFFF) 133 b[hash].qlen++; 134 b += SFB_NUMBUCKETS; /* next level */ 135 } 136 } 137 138 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) 139 { 140 u32 sfbhash; 141 142 sfbhash = sfb_hash(skb, 0); 143 if (sfbhash) 144 increment_one_qlen(sfbhash, 0, q); 145 146 sfbhash = sfb_hash(skb, 1); 147 if (sfbhash) 148 increment_one_qlen(sfbhash, 1, q); 149 } 150 151 static void decrement_one_qlen(u32 sfbhash, u32 slot, 152 struct sfb_sched_data *q) 153 { 154 int i; 155 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; 156 157 for (i = 0; i < SFB_LEVELS; i++) { 158 u32 hash = sfbhash & SFB_BUCKET_MASK; 159 160 sfbhash >>= SFB_BUCKET_SHIFT; 161 if (b[hash].qlen > 0) 162 b[hash].qlen--; 163 b += SFB_NUMBUCKETS; /* next level */ 164 } 165 } 166 167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) 168 { 169 u32 sfbhash; 170 171 sfbhash = sfb_hash(skb, 0); 172 if (sfbhash) 173 decrement_one_qlen(sfbhash, 0, q); 174 175 sfbhash = sfb_hash(skb, 1); 176 if (sfbhash) 177 decrement_one_qlen(sfbhash, 1, q); 178 } 179 180 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) 181 { 182 b->p_mark = prob_minus(b->p_mark, q->decrement); 183 } 184 185 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) 186 { 187 b->p_mark = prob_plus(b->p_mark, q->increment); 188 } 189 190 static void sfb_zero_all_buckets(struct sfb_sched_data *q) 191 { 192 memset(&q->bins, 0, sizeof(q->bins)); 193 } 194 195 /* 196 * compute max qlen, max p_mark, and avg p_mark 197 */ 198 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q) 199 { 200 int i; 201 u32 qlen = 0, prob = 0, totalpm = 0; 202 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; 203 204 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) { 205 if (qlen < b->qlen) 206 qlen = b->qlen; 207 totalpm += b->p_mark; 208 if (prob < b->p_mark) 209 prob = b->p_mark; 210 b++; 211 } 212 *prob_r = prob; 213 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS); 214 return qlen; 215 } 216 217 218 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) 219 { 220 q->bins[slot].perturbation = prandom_u32(); 221 } 222 223 static void sfb_swap_slot(struct sfb_sched_data *q) 224 { 225 sfb_init_perturbation(q->slot, q); 226 q->slot ^= 1; 227 q->double_buffering = false; 228 } 229 230 /* Non elastic flows are allowed to use part of the bandwidth, expressed 231 * in "penalty_rate" packets per second, with "penalty_burst" burst 232 */ 233 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) 234 { 235 if (q->penalty_rate == 0 || q->penalty_burst == 0) 236 return true; 237 238 if (q->tokens_avail < 1) { 239 unsigned long age = min(10UL * HZ, jiffies - q->token_time); 240 241 q->tokens_avail = (age * q->penalty_rate) / HZ; 242 if (q->tokens_avail > q->penalty_burst) 243 q->tokens_avail = q->penalty_burst; 244 q->token_time = jiffies; 245 if (q->tokens_avail < 1) 246 return true; 247 } 248 249 q->tokens_avail--; 250 return false; 251 } 252 253 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, 254 int *qerr, u32 *salt) 255 { 256 struct tcf_result res; 257 int result; 258 259 result = tcf_classify(skb, fl, &res, false); 260 if (result >= 0) { 261 #ifdef CONFIG_NET_CLS_ACT 262 switch (result) { 263 case TC_ACT_STOLEN: 264 case TC_ACT_QUEUED: 265 case TC_ACT_TRAP: 266 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 267 /* fall through */ 268 case TC_ACT_SHOT: 269 return false; 270 } 271 #endif 272 *salt = TC_H_MIN(res.classid); 273 return true; 274 } 275 return false; 276 } 277 278 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, 279 struct sk_buff **to_free) 280 { 281 282 struct sfb_sched_data *q = qdisc_priv(sch); 283 struct Qdisc *child = q->qdisc; 284 struct tcf_proto *fl; 285 int i; 286 u32 p_min = ~0; 287 u32 minqlen = ~0; 288 u32 r, sfbhash; 289 u32 slot = q->slot; 290 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 291 292 if (unlikely(sch->q.qlen >= q->limit)) { 293 qdisc_qstats_overlimit(sch); 294 q->stats.queuedrop++; 295 goto drop; 296 } 297 298 if (q->rehash_interval > 0) { 299 unsigned long limit = q->rehash_time + q->rehash_interval; 300 301 if (unlikely(time_after(jiffies, limit))) { 302 sfb_swap_slot(q); 303 q->rehash_time = jiffies; 304 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && 305 time_after(jiffies, limit - q->warmup_time))) { 306 q->double_buffering = true; 307 } 308 } 309 310 fl = rcu_dereference_bh(q->filter_list); 311 if (fl) { 312 u32 salt; 313 314 /* If using external classifiers, get result and record it. */ 315 if (!sfb_classify(skb, fl, &ret, &salt)) 316 goto other_drop; 317 sfbhash = jhash_1word(salt, q->bins[slot].perturbation); 318 } else { 319 sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); 320 } 321 322 323 if (!sfbhash) 324 sfbhash = 1; 325 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 326 327 for (i = 0; i < SFB_LEVELS; i++) { 328 u32 hash = sfbhash & SFB_BUCKET_MASK; 329 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; 330 331 sfbhash >>= SFB_BUCKET_SHIFT; 332 if (b->qlen == 0) 333 decrement_prob(b, q); 334 else if (b->qlen >= q->bin_size) 335 increment_prob(b, q); 336 if (minqlen > b->qlen) 337 minqlen = b->qlen; 338 if (p_min > b->p_mark) 339 p_min = b->p_mark; 340 } 341 342 slot ^= 1; 343 sfb_skb_cb(skb)->hashes[slot] = 0; 344 345 if (unlikely(minqlen >= q->max)) { 346 qdisc_qstats_overlimit(sch); 347 q->stats.bucketdrop++; 348 goto drop; 349 } 350 351 if (unlikely(p_min >= SFB_MAX_PROB)) { 352 /* Inelastic flow */ 353 if (q->double_buffering) { 354 sfbhash = skb_get_hash_perturb(skb, 355 q->bins[slot].perturbation); 356 if (!sfbhash) 357 sfbhash = 1; 358 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 359 360 for (i = 0; i < SFB_LEVELS; i++) { 361 u32 hash = sfbhash & SFB_BUCKET_MASK; 362 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; 363 364 sfbhash >>= SFB_BUCKET_SHIFT; 365 if (b->qlen == 0) 366 decrement_prob(b, q); 367 else if (b->qlen >= q->bin_size) 368 increment_prob(b, q); 369 } 370 } 371 if (sfb_rate_limit(skb, q)) { 372 qdisc_qstats_overlimit(sch); 373 q->stats.penaltydrop++; 374 goto drop; 375 } 376 goto enqueue; 377 } 378 379 r = prandom_u32() & SFB_MAX_PROB; 380 381 if (unlikely(r < p_min)) { 382 if (unlikely(p_min > SFB_MAX_PROB / 2)) { 383 /* If we're marking that many packets, then either 384 * this flow is unresponsive, or we're badly congested. 385 * In either case, we want to start dropping packets. 386 */ 387 if (r < (p_min - SFB_MAX_PROB / 2) * 2) { 388 q->stats.earlydrop++; 389 goto drop; 390 } 391 } 392 if (INET_ECN_set_ce(skb)) { 393 q->stats.marked++; 394 } else { 395 q->stats.earlydrop++; 396 goto drop; 397 } 398 } 399 400 enqueue: 401 ret = qdisc_enqueue(skb, child, to_free); 402 if (likely(ret == NET_XMIT_SUCCESS)) { 403 qdisc_qstats_backlog_inc(sch, skb); 404 sch->q.qlen++; 405 increment_qlen(skb, q); 406 } else if (net_xmit_drop_count(ret)) { 407 q->stats.childdrop++; 408 qdisc_qstats_drop(sch); 409 } 410 return ret; 411 412 drop: 413 qdisc_drop(skb, sch, to_free); 414 return NET_XMIT_CN; 415 other_drop: 416 if (ret & __NET_XMIT_BYPASS) 417 qdisc_qstats_drop(sch); 418 kfree_skb(skb); 419 return ret; 420 } 421 422 static struct sk_buff *sfb_dequeue(struct Qdisc *sch) 423 { 424 struct sfb_sched_data *q = qdisc_priv(sch); 425 struct Qdisc *child = q->qdisc; 426 struct sk_buff *skb; 427 428 skb = child->dequeue(q->qdisc); 429 430 if (skb) { 431 qdisc_bstats_update(sch, skb); 432 qdisc_qstats_backlog_dec(sch, skb); 433 sch->q.qlen--; 434 decrement_qlen(skb, q); 435 } 436 437 return skb; 438 } 439 440 static struct sk_buff *sfb_peek(struct Qdisc *sch) 441 { 442 struct sfb_sched_data *q = qdisc_priv(sch); 443 struct Qdisc *child = q->qdisc; 444 445 return child->ops->peek(child); 446 } 447 448 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */ 449 450 static void sfb_reset(struct Qdisc *sch) 451 { 452 struct sfb_sched_data *q = qdisc_priv(sch); 453 454 qdisc_reset(q->qdisc); 455 sch->qstats.backlog = 0; 456 sch->q.qlen = 0; 457 q->slot = 0; 458 q->double_buffering = false; 459 sfb_zero_all_buckets(q); 460 sfb_init_perturbation(0, q); 461 } 462 463 static void sfb_destroy(struct Qdisc *sch) 464 { 465 struct sfb_sched_data *q = qdisc_priv(sch); 466 467 tcf_block_put(q->block); 468 qdisc_put(q->qdisc); 469 } 470 471 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = { 472 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) }, 473 }; 474 475 static const struct tc_sfb_qopt sfb_default_ops = { 476 .rehash_interval = 600 * MSEC_PER_SEC, 477 .warmup_time = 60 * MSEC_PER_SEC, 478 .limit = 0, 479 .max = 25, 480 .bin_size = 20, 481 .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */ 482 .decrement = (SFB_MAX_PROB + 3000) / 6000, 483 .penalty_rate = 10, 484 .penalty_burst = 20, 485 }; 486 487 static int sfb_change(struct Qdisc *sch, struct nlattr *opt, 488 struct netlink_ext_ack *extack) 489 { 490 struct sfb_sched_data *q = qdisc_priv(sch); 491 struct Qdisc *child; 492 struct nlattr *tb[TCA_SFB_MAX + 1]; 493 const struct tc_sfb_qopt *ctl = &sfb_default_ops; 494 u32 limit; 495 int err; 496 497 if (opt) { 498 err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt, 499 sfb_policy, NULL); 500 if (err < 0) 501 return -EINVAL; 502 503 if (tb[TCA_SFB_PARMS] == NULL) 504 return -EINVAL; 505 506 ctl = nla_data(tb[TCA_SFB_PARMS]); 507 } 508 509 limit = ctl->limit; 510 if (limit == 0) 511 limit = qdisc_dev(sch)->tx_queue_len; 512 513 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack); 514 if (IS_ERR(child)) 515 return PTR_ERR(child); 516 517 if (child != &noop_qdisc) 518 qdisc_hash_add(child, true); 519 sch_tree_lock(sch); 520 521 qdisc_tree_flush_backlog(q->qdisc); 522 qdisc_put(q->qdisc); 523 q->qdisc = child; 524 525 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); 526 q->warmup_time = msecs_to_jiffies(ctl->warmup_time); 527 q->rehash_time = jiffies; 528 q->limit = limit; 529 q->increment = ctl->increment; 530 q->decrement = ctl->decrement; 531 q->max = ctl->max; 532 q->bin_size = ctl->bin_size; 533 q->penalty_rate = ctl->penalty_rate; 534 q->penalty_burst = ctl->penalty_burst; 535 q->tokens_avail = ctl->penalty_burst; 536 q->token_time = jiffies; 537 538 q->slot = 0; 539 q->double_buffering = false; 540 sfb_zero_all_buckets(q); 541 sfb_init_perturbation(0, q); 542 sfb_init_perturbation(1, q); 543 544 sch_tree_unlock(sch); 545 546 return 0; 547 } 548 549 static int sfb_init(struct Qdisc *sch, struct nlattr *opt, 550 struct netlink_ext_ack *extack) 551 { 552 struct sfb_sched_data *q = qdisc_priv(sch); 553 int err; 554 555 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 556 if (err) 557 return err; 558 559 q->qdisc = &noop_qdisc; 560 return sfb_change(sch, opt, extack); 561 } 562 563 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) 564 { 565 struct sfb_sched_data *q = qdisc_priv(sch); 566 struct nlattr *opts; 567 struct tc_sfb_qopt opt = { 568 .rehash_interval = jiffies_to_msecs(q->rehash_interval), 569 .warmup_time = jiffies_to_msecs(q->warmup_time), 570 .limit = q->limit, 571 .max = q->max, 572 .bin_size = q->bin_size, 573 .increment = q->increment, 574 .decrement = q->decrement, 575 .penalty_rate = q->penalty_rate, 576 .penalty_burst = q->penalty_burst, 577 }; 578 579 sch->qstats.backlog = q->qdisc->qstats.backlog; 580 opts = nla_nest_start_noflag(skb, TCA_OPTIONS); 581 if (opts == NULL) 582 goto nla_put_failure; 583 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) 584 goto nla_put_failure; 585 return nla_nest_end(skb, opts); 586 587 nla_put_failure: 588 nla_nest_cancel(skb, opts); 589 return -EMSGSIZE; 590 } 591 592 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 593 { 594 struct sfb_sched_data *q = qdisc_priv(sch); 595 struct tc_sfb_xstats st = { 596 .earlydrop = q->stats.earlydrop, 597 .penaltydrop = q->stats.penaltydrop, 598 .bucketdrop = q->stats.bucketdrop, 599 .queuedrop = q->stats.queuedrop, 600 .childdrop = q->stats.childdrop, 601 .marked = q->stats.marked, 602 }; 603 604 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); 605 606 return gnet_stats_copy_app(d, &st, sizeof(st)); 607 } 608 609 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl, 610 struct sk_buff *skb, struct tcmsg *tcm) 611 { 612 return -ENOSYS; 613 } 614 615 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 616 struct Qdisc **old, struct netlink_ext_ack *extack) 617 { 618 struct sfb_sched_data *q = qdisc_priv(sch); 619 620 if (new == NULL) 621 new = &noop_qdisc; 622 623 *old = qdisc_replace(sch, new, &q->qdisc); 624 return 0; 625 } 626 627 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg) 628 { 629 struct sfb_sched_data *q = qdisc_priv(sch); 630 631 return q->qdisc; 632 } 633 634 static unsigned long sfb_find(struct Qdisc *sch, u32 classid) 635 { 636 return 1; 637 } 638 639 static void sfb_unbind(struct Qdisc *sch, unsigned long arg) 640 { 641 } 642 643 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 644 struct nlattr **tca, unsigned long *arg, 645 struct netlink_ext_ack *extack) 646 { 647 return -ENOSYS; 648 } 649 650 static int sfb_delete(struct Qdisc *sch, unsigned long cl) 651 { 652 return -ENOSYS; 653 } 654 655 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) 656 { 657 if (!walker->stop) { 658 if (walker->count >= walker->skip) 659 if (walker->fn(sch, 1, walker) < 0) { 660 walker->stop = 1; 661 return; 662 } 663 walker->count++; 664 } 665 } 666 667 static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl, 668 struct netlink_ext_ack *extack) 669 { 670 struct sfb_sched_data *q = qdisc_priv(sch); 671 672 if (cl) 673 return NULL; 674 return q->block; 675 } 676 677 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent, 678 u32 classid) 679 { 680 return 0; 681 } 682 683 684 static const struct Qdisc_class_ops sfb_class_ops = { 685 .graft = sfb_graft, 686 .leaf = sfb_leaf, 687 .find = sfb_find, 688 .change = sfb_change_class, 689 .delete = sfb_delete, 690 .walk = sfb_walk, 691 .tcf_block = sfb_tcf_block, 692 .bind_tcf = sfb_bind, 693 .unbind_tcf = sfb_unbind, 694 .dump = sfb_dump_class, 695 }; 696 697 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = { 698 .id = "sfb", 699 .priv_size = sizeof(struct sfb_sched_data), 700 .cl_ops = &sfb_class_ops, 701 .enqueue = sfb_enqueue, 702 .dequeue = sfb_dequeue, 703 .peek = sfb_peek, 704 .init = sfb_init, 705 .reset = sfb_reset, 706 .destroy = sfb_destroy, 707 .change = sfb_change, 708 .dump = sfb_dump, 709 .dump_stats = sfb_dump_stats, 710 .owner = THIS_MODULE, 711 }; 712 713 static int __init sfb_module_init(void) 714 { 715 return register_qdisc(&sfb_qdisc_ops); 716 } 717 718 static void __exit sfb_module_exit(void) 719 { 720 unregister_qdisc(&sfb_qdisc_ops); 721 } 722 723 module_init(sfb_module_init) 724 module_exit(sfb_module_exit) 725 726 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline"); 727 MODULE_AUTHOR("Juliusz Chroboczek"); 728 MODULE_AUTHOR("Eric Dumazet"); 729 MODULE_LICENSE("GPL"); 730