1 /* 2 * net/sched/sch_sfb.c Stochastic Fair Blue 3 * 4 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr> 5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * version 2 as published by the Free Software Foundation. 10 * 11 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue: 12 * A New Class of Active Queue Management Algorithms. 13 * U. Michigan CSE-TR-387-99, April 1999. 14 * 15 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf 16 * 17 */ 18 19 #include <linux/module.h> 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/errno.h> 23 #include <linux/skbuff.h> 24 #include <linux/random.h> 25 #include <linux/jhash.h> 26 #include <net/ip.h> 27 #include <net/pkt_sched.h> 28 #include <net/pkt_cls.h> 29 #include <net/inet_ecn.h> 30 31 /* 32 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level) 33 * This implementation uses L = 8 and N = 16 34 * This permits us to split one 32bit hash (provided per packet by rxhash or 35 * external classifier) into 8 subhashes of 4 bits. 36 */ 37 #define SFB_BUCKET_SHIFT 4 38 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */ 39 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1) 40 #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */ 41 42 /* SFB algo uses a virtual queue, named "bin" */ 43 struct sfb_bucket { 44 u16 qlen; /* length of virtual queue */ 45 u16 p_mark; /* marking probability */ 46 }; 47 48 /* We use a double buffering right before hash change 49 * (Section 4.4 of SFB reference : moving hash functions) 50 */ 51 struct sfb_bins { 52 u32 perturbation; /* jhash perturbation */ 53 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; 54 }; 55 56 struct sfb_sched_data { 57 struct Qdisc *qdisc; 58 struct tcf_proto __rcu *filter_list; 59 unsigned long rehash_interval; 60 unsigned long warmup_time; /* double buffering warmup time in jiffies */ 61 u32 max; 62 u32 bin_size; /* maximum queue length per bin */ 63 u32 increment; /* d1 */ 64 u32 decrement; /* d2 */ 65 u32 limit; /* HARD maximal queue length */ 66 u32 penalty_rate; 67 u32 penalty_burst; 68 u32 tokens_avail; 69 unsigned long rehash_time; 70 unsigned long token_time; 71 72 u8 slot; /* current active bins (0 or 1) */ 73 bool double_buffering; 74 struct sfb_bins bins[2]; 75 76 struct { 77 u32 earlydrop; 78 u32 penaltydrop; 79 u32 bucketdrop; 80 u32 queuedrop; 81 u32 childdrop; /* drops in child qdisc */ 82 u32 marked; /* ECN mark */ 83 } stats; 84 }; 85 86 /* 87 * Each queued skb might be hashed on one or two bins 88 * We store in skb_cb the two hash values. 89 * (A zero value means double buffering was not used) 90 */ 91 struct sfb_skb_cb { 92 u32 hashes[2]; 93 }; 94 95 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 96 { 97 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb)); 98 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 99 } 100 101 /* 102 * If using 'internal' SFB flow classifier, hash comes from skb rxhash 103 * If using external classifier, hash comes from the classid. 104 */ 105 static u32 sfb_hash(const struct sk_buff *skb, u32 slot) 106 { 107 return sfb_skb_cb(skb)->hashes[slot]; 108 } 109 110 /* Probabilities are coded as Q0.16 fixed-point values, 111 * with 0xFFFF representing 65535/65536 (almost 1.0) 112 * Addition and subtraction are saturating in [0, 65535] 113 */ 114 static u32 prob_plus(u32 p1, u32 p2) 115 { 116 u32 res = p1 + p2; 117 118 return min_t(u32, res, SFB_MAX_PROB); 119 } 120 121 static u32 prob_minus(u32 p1, u32 p2) 122 { 123 return p1 > p2 ? p1 - p2 : 0; 124 } 125 126 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) 127 { 128 int i; 129 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; 130 131 for (i = 0; i < SFB_LEVELS; i++) { 132 u32 hash = sfbhash & SFB_BUCKET_MASK; 133 134 sfbhash >>= SFB_BUCKET_SHIFT; 135 if (b[hash].qlen < 0xFFFF) 136 b[hash].qlen++; 137 b += SFB_NUMBUCKETS; /* next level */ 138 } 139 } 140 141 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) 142 { 143 u32 sfbhash; 144 145 sfbhash = sfb_hash(skb, 0); 146 if (sfbhash) 147 increment_one_qlen(sfbhash, 0, q); 148 149 sfbhash = sfb_hash(skb, 1); 150 if (sfbhash) 151 increment_one_qlen(sfbhash, 1, q); 152 } 153 154 static void decrement_one_qlen(u32 sfbhash, u32 slot, 155 struct sfb_sched_data *q) 156 { 157 int i; 158 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; 159 160 for (i = 0; i < SFB_LEVELS; i++) { 161 u32 hash = sfbhash & SFB_BUCKET_MASK; 162 163 sfbhash >>= SFB_BUCKET_SHIFT; 164 if (b[hash].qlen > 0) 165 b[hash].qlen--; 166 b += SFB_NUMBUCKETS; /* next level */ 167 } 168 } 169 170 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) 171 { 172 u32 sfbhash; 173 174 sfbhash = sfb_hash(skb, 0); 175 if (sfbhash) 176 decrement_one_qlen(sfbhash, 0, q); 177 178 sfbhash = sfb_hash(skb, 1); 179 if (sfbhash) 180 decrement_one_qlen(sfbhash, 1, q); 181 } 182 183 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) 184 { 185 b->p_mark = prob_minus(b->p_mark, q->decrement); 186 } 187 188 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) 189 { 190 b->p_mark = prob_plus(b->p_mark, q->increment); 191 } 192 193 static void sfb_zero_all_buckets(struct sfb_sched_data *q) 194 { 195 memset(&q->bins, 0, sizeof(q->bins)); 196 } 197 198 /* 199 * compute max qlen, max p_mark, and avg p_mark 200 */ 201 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q) 202 { 203 int i; 204 u32 qlen = 0, prob = 0, totalpm = 0; 205 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; 206 207 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) { 208 if (qlen < b->qlen) 209 qlen = b->qlen; 210 totalpm += b->p_mark; 211 if (prob < b->p_mark) 212 prob = b->p_mark; 213 b++; 214 } 215 *prob_r = prob; 216 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS); 217 return qlen; 218 } 219 220 221 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) 222 { 223 q->bins[slot].perturbation = prandom_u32(); 224 } 225 226 static void sfb_swap_slot(struct sfb_sched_data *q) 227 { 228 sfb_init_perturbation(q->slot, q); 229 q->slot ^= 1; 230 q->double_buffering = false; 231 } 232 233 /* Non elastic flows are allowed to use part of the bandwidth, expressed 234 * in "penalty_rate" packets per second, with "penalty_burst" burst 235 */ 236 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) 237 { 238 if (q->penalty_rate == 0 || q->penalty_burst == 0) 239 return true; 240 241 if (q->tokens_avail < 1) { 242 unsigned long age = min(10UL * HZ, jiffies - q->token_time); 243 244 q->tokens_avail = (age * q->penalty_rate) / HZ; 245 if (q->tokens_avail > q->penalty_burst) 246 q->tokens_avail = q->penalty_burst; 247 q->token_time = jiffies; 248 if (q->tokens_avail < 1) 249 return true; 250 } 251 252 q->tokens_avail--; 253 return false; 254 } 255 256 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, 257 int *qerr, u32 *salt) 258 { 259 struct tcf_result res; 260 int result; 261 262 result = tc_classify(skb, fl, &res, false); 263 if (result >= 0) { 264 #ifdef CONFIG_NET_CLS_ACT 265 switch (result) { 266 case TC_ACT_STOLEN: 267 case TC_ACT_QUEUED: 268 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 269 case TC_ACT_SHOT: 270 return false; 271 } 272 #endif 273 *salt = TC_H_MIN(res.classid); 274 return true; 275 } 276 return false; 277 } 278 279 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, 280 struct sk_buff **to_free) 281 { 282 283 struct sfb_sched_data *q = qdisc_priv(sch); 284 struct Qdisc *child = q->qdisc; 285 struct tcf_proto *fl; 286 int i; 287 u32 p_min = ~0; 288 u32 minqlen = ~0; 289 u32 r, sfbhash; 290 u32 slot = q->slot; 291 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 292 293 if (unlikely(sch->q.qlen >= q->limit)) { 294 qdisc_qstats_overlimit(sch); 295 q->stats.queuedrop++; 296 goto drop; 297 } 298 299 if (q->rehash_interval > 0) { 300 unsigned long limit = q->rehash_time + q->rehash_interval; 301 302 if (unlikely(time_after(jiffies, limit))) { 303 sfb_swap_slot(q); 304 q->rehash_time = jiffies; 305 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && 306 time_after(jiffies, limit - q->warmup_time))) { 307 q->double_buffering = true; 308 } 309 } 310 311 fl = rcu_dereference_bh(q->filter_list); 312 if (fl) { 313 u32 salt; 314 315 /* If using external classifiers, get result and record it. */ 316 if (!sfb_classify(skb, fl, &ret, &salt)) 317 goto other_drop; 318 sfbhash = jhash_1word(salt, q->bins[slot].perturbation); 319 } else { 320 sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); 321 } 322 323 324 if (!sfbhash) 325 sfbhash = 1; 326 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 327 328 for (i = 0; i < SFB_LEVELS; i++) { 329 u32 hash = sfbhash & SFB_BUCKET_MASK; 330 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; 331 332 sfbhash >>= SFB_BUCKET_SHIFT; 333 if (b->qlen == 0) 334 decrement_prob(b, q); 335 else if (b->qlen >= q->bin_size) 336 increment_prob(b, q); 337 if (minqlen > b->qlen) 338 minqlen = b->qlen; 339 if (p_min > b->p_mark) 340 p_min = b->p_mark; 341 } 342 343 slot ^= 1; 344 sfb_skb_cb(skb)->hashes[slot] = 0; 345 346 if (unlikely(minqlen >= q->max)) { 347 qdisc_qstats_overlimit(sch); 348 q->stats.bucketdrop++; 349 goto drop; 350 } 351 352 if (unlikely(p_min >= SFB_MAX_PROB)) { 353 /* Inelastic flow */ 354 if (q->double_buffering) { 355 sfbhash = skb_get_hash_perturb(skb, 356 q->bins[slot].perturbation); 357 if (!sfbhash) 358 sfbhash = 1; 359 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 360 361 for (i = 0; i < SFB_LEVELS; i++) { 362 u32 hash = sfbhash & SFB_BUCKET_MASK; 363 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; 364 365 sfbhash >>= SFB_BUCKET_SHIFT; 366 if (b->qlen == 0) 367 decrement_prob(b, q); 368 else if (b->qlen >= q->bin_size) 369 increment_prob(b, q); 370 } 371 } 372 if (sfb_rate_limit(skb, q)) { 373 qdisc_qstats_overlimit(sch); 374 q->stats.penaltydrop++; 375 goto drop; 376 } 377 goto enqueue; 378 } 379 380 r = prandom_u32() & SFB_MAX_PROB; 381 382 if (unlikely(r < p_min)) { 383 if (unlikely(p_min > SFB_MAX_PROB / 2)) { 384 /* If we're marking that many packets, then either 385 * this flow is unresponsive, or we're badly congested. 386 * In either case, we want to start dropping packets. 387 */ 388 if (r < (p_min - SFB_MAX_PROB / 2) * 2) { 389 q->stats.earlydrop++; 390 goto drop; 391 } 392 } 393 if (INET_ECN_set_ce(skb)) { 394 q->stats.marked++; 395 } else { 396 q->stats.earlydrop++; 397 goto drop; 398 } 399 } 400 401 enqueue: 402 ret = qdisc_enqueue(skb, child, to_free); 403 if (likely(ret == NET_XMIT_SUCCESS)) { 404 qdisc_qstats_backlog_inc(sch, skb); 405 sch->q.qlen++; 406 increment_qlen(skb, q); 407 } else if (net_xmit_drop_count(ret)) { 408 q->stats.childdrop++; 409 qdisc_qstats_drop(sch); 410 } 411 return ret; 412 413 drop: 414 qdisc_drop(skb, sch, to_free); 415 return NET_XMIT_CN; 416 other_drop: 417 if (ret & __NET_XMIT_BYPASS) 418 qdisc_qstats_drop(sch); 419 kfree_skb(skb); 420 return ret; 421 } 422 423 static struct sk_buff *sfb_dequeue(struct Qdisc *sch) 424 { 425 struct sfb_sched_data *q = qdisc_priv(sch); 426 struct Qdisc *child = q->qdisc; 427 struct sk_buff *skb; 428 429 skb = child->dequeue(q->qdisc); 430 431 if (skb) { 432 qdisc_bstats_update(sch, skb); 433 qdisc_qstats_backlog_dec(sch, skb); 434 sch->q.qlen--; 435 decrement_qlen(skb, q); 436 } 437 438 return skb; 439 } 440 441 static struct sk_buff *sfb_peek(struct Qdisc *sch) 442 { 443 struct sfb_sched_data *q = qdisc_priv(sch); 444 struct Qdisc *child = q->qdisc; 445 446 return child->ops->peek(child); 447 } 448 449 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */ 450 451 static void sfb_reset(struct Qdisc *sch) 452 { 453 struct sfb_sched_data *q = qdisc_priv(sch); 454 455 qdisc_reset(q->qdisc); 456 sch->qstats.backlog = 0; 457 sch->q.qlen = 0; 458 q->slot = 0; 459 q->double_buffering = false; 460 sfb_zero_all_buckets(q); 461 sfb_init_perturbation(0, q); 462 } 463 464 static void sfb_destroy(struct Qdisc *sch) 465 { 466 struct sfb_sched_data *q = qdisc_priv(sch); 467 468 tcf_destroy_chain(&q->filter_list); 469 qdisc_destroy(q->qdisc); 470 } 471 472 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = { 473 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) }, 474 }; 475 476 static const struct tc_sfb_qopt sfb_default_ops = { 477 .rehash_interval = 600 * MSEC_PER_SEC, 478 .warmup_time = 60 * MSEC_PER_SEC, 479 .limit = 0, 480 .max = 25, 481 .bin_size = 20, 482 .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */ 483 .decrement = (SFB_MAX_PROB + 3000) / 6000, 484 .penalty_rate = 10, 485 .penalty_burst = 20, 486 }; 487 488 static int sfb_change(struct Qdisc *sch, struct nlattr *opt) 489 { 490 struct sfb_sched_data *q = qdisc_priv(sch); 491 struct Qdisc *child; 492 struct nlattr *tb[TCA_SFB_MAX + 1]; 493 const struct tc_sfb_qopt *ctl = &sfb_default_ops; 494 u32 limit; 495 int err; 496 497 if (opt) { 498 err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy); 499 if (err < 0) 500 return -EINVAL; 501 502 if (tb[TCA_SFB_PARMS] == NULL) 503 return -EINVAL; 504 505 ctl = nla_data(tb[TCA_SFB_PARMS]); 506 } 507 508 limit = ctl->limit; 509 if (limit == 0) 510 limit = qdisc_dev(sch)->tx_queue_len; 511 512 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit); 513 if (IS_ERR(child)) 514 return PTR_ERR(child); 515 516 sch_tree_lock(sch); 517 518 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 519 q->qdisc->qstats.backlog); 520 qdisc_destroy(q->qdisc); 521 q->qdisc = child; 522 523 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); 524 q->warmup_time = msecs_to_jiffies(ctl->warmup_time); 525 q->rehash_time = jiffies; 526 q->limit = limit; 527 q->increment = ctl->increment; 528 q->decrement = ctl->decrement; 529 q->max = ctl->max; 530 q->bin_size = ctl->bin_size; 531 q->penalty_rate = ctl->penalty_rate; 532 q->penalty_burst = ctl->penalty_burst; 533 q->tokens_avail = ctl->penalty_burst; 534 q->token_time = jiffies; 535 536 q->slot = 0; 537 q->double_buffering = false; 538 sfb_zero_all_buckets(q); 539 sfb_init_perturbation(0, q); 540 sfb_init_perturbation(1, q); 541 542 sch_tree_unlock(sch); 543 544 return 0; 545 } 546 547 static int sfb_init(struct Qdisc *sch, struct nlattr *opt) 548 { 549 struct sfb_sched_data *q = qdisc_priv(sch); 550 551 q->qdisc = &noop_qdisc; 552 return sfb_change(sch, opt); 553 } 554 555 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) 556 { 557 struct sfb_sched_data *q = qdisc_priv(sch); 558 struct nlattr *opts; 559 struct tc_sfb_qopt opt = { 560 .rehash_interval = jiffies_to_msecs(q->rehash_interval), 561 .warmup_time = jiffies_to_msecs(q->warmup_time), 562 .limit = q->limit, 563 .max = q->max, 564 .bin_size = q->bin_size, 565 .increment = q->increment, 566 .decrement = q->decrement, 567 .penalty_rate = q->penalty_rate, 568 .penalty_burst = q->penalty_burst, 569 }; 570 571 sch->qstats.backlog = q->qdisc->qstats.backlog; 572 opts = nla_nest_start(skb, TCA_OPTIONS); 573 if (opts == NULL) 574 goto nla_put_failure; 575 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) 576 goto nla_put_failure; 577 return nla_nest_end(skb, opts); 578 579 nla_put_failure: 580 nla_nest_cancel(skb, opts); 581 return -EMSGSIZE; 582 } 583 584 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 585 { 586 struct sfb_sched_data *q = qdisc_priv(sch); 587 struct tc_sfb_xstats st = { 588 .earlydrop = q->stats.earlydrop, 589 .penaltydrop = q->stats.penaltydrop, 590 .bucketdrop = q->stats.bucketdrop, 591 .queuedrop = q->stats.queuedrop, 592 .childdrop = q->stats.childdrop, 593 .marked = q->stats.marked, 594 }; 595 596 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); 597 598 return gnet_stats_copy_app(d, &st, sizeof(st)); 599 } 600 601 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl, 602 struct sk_buff *skb, struct tcmsg *tcm) 603 { 604 return -ENOSYS; 605 } 606 607 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 608 struct Qdisc **old) 609 { 610 struct sfb_sched_data *q = qdisc_priv(sch); 611 612 if (new == NULL) 613 new = &noop_qdisc; 614 615 *old = qdisc_replace(sch, new, &q->qdisc); 616 return 0; 617 } 618 619 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg) 620 { 621 struct sfb_sched_data *q = qdisc_priv(sch); 622 623 return q->qdisc; 624 } 625 626 static unsigned long sfb_get(struct Qdisc *sch, u32 classid) 627 { 628 return 1; 629 } 630 631 static void sfb_put(struct Qdisc *sch, unsigned long arg) 632 { 633 } 634 635 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 636 struct nlattr **tca, unsigned long *arg) 637 { 638 return -ENOSYS; 639 } 640 641 static int sfb_delete(struct Qdisc *sch, unsigned long cl) 642 { 643 return -ENOSYS; 644 } 645 646 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) 647 { 648 if (!walker->stop) { 649 if (walker->count >= walker->skip) 650 if (walker->fn(sch, 1, walker) < 0) { 651 walker->stop = 1; 652 return; 653 } 654 walker->count++; 655 } 656 } 657 658 static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch, 659 unsigned long cl) 660 { 661 struct sfb_sched_data *q = qdisc_priv(sch); 662 663 if (cl) 664 return NULL; 665 return &q->filter_list; 666 } 667 668 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent, 669 u32 classid) 670 { 671 return 0; 672 } 673 674 675 static const struct Qdisc_class_ops sfb_class_ops = { 676 .graft = sfb_graft, 677 .leaf = sfb_leaf, 678 .get = sfb_get, 679 .put = sfb_put, 680 .change = sfb_change_class, 681 .delete = sfb_delete, 682 .walk = sfb_walk, 683 .tcf_chain = sfb_find_tcf, 684 .bind_tcf = sfb_bind, 685 .unbind_tcf = sfb_put, 686 .dump = sfb_dump_class, 687 }; 688 689 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = { 690 .id = "sfb", 691 .priv_size = sizeof(struct sfb_sched_data), 692 .cl_ops = &sfb_class_ops, 693 .enqueue = sfb_enqueue, 694 .dequeue = sfb_dequeue, 695 .peek = sfb_peek, 696 .init = sfb_init, 697 .reset = sfb_reset, 698 .destroy = sfb_destroy, 699 .change = sfb_change, 700 .dump = sfb_dump, 701 .dump_stats = sfb_dump_stats, 702 .owner = THIS_MODULE, 703 }; 704 705 static int __init sfb_module_init(void) 706 { 707 return register_qdisc(&sfb_qdisc_ops); 708 } 709 710 static void __exit sfb_module_exit(void) 711 { 712 unregister_qdisc(&sfb_qdisc_ops); 713 } 714 715 module_init(sfb_module_init) 716 module_exit(sfb_module_exit) 717 718 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline"); 719 MODULE_AUTHOR("Juliusz Chroboczek"); 720 MODULE_AUTHOR("Eric Dumazet"); 721 MODULE_LICENSE("GPL"); 722