1 /* 2 * net/sched/sch_sfb.c Stochastic Fair Blue 3 * 4 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr> 5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * version 2 as published by the Free Software Foundation. 10 * 11 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue: 12 * A New Class of Active Queue Management Algorithms. 13 * U. Michigan CSE-TR-387-99, April 1999. 14 * 15 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf 16 * 17 */ 18 19 #include <linux/module.h> 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/errno.h> 23 #include <linux/skbuff.h> 24 #include <linux/random.h> 25 #include <linux/jhash.h> 26 #include <net/ip.h> 27 #include <net/pkt_sched.h> 28 #include <net/inet_ecn.h> 29 30 /* 31 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level) 32 * This implementation uses L = 8 and N = 16 33 * This permits us to split one 32bit hash (provided per packet by rxhash or 34 * external classifier) into 8 subhashes of 4 bits. 35 */ 36 #define SFB_BUCKET_SHIFT 4 37 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */ 38 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1) 39 #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */ 40 41 /* SFB algo uses a virtual queue, named "bin" */ 42 struct sfb_bucket { 43 u16 qlen; /* length of virtual queue */ 44 u16 p_mark; /* marking probability */ 45 }; 46 47 /* We use a double buffering right before hash change 48 * (Section 4.4 of SFB reference : moving hash functions) 49 */ 50 struct sfb_bins { 51 u32 perturbation; /* jhash perturbation */ 52 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; 53 }; 54 55 struct sfb_sched_data { 56 struct Qdisc *qdisc; 57 struct tcf_proto __rcu *filter_list; 58 unsigned long rehash_interval; 59 unsigned long warmup_time; /* double buffering warmup time in jiffies */ 60 u32 max; 61 u32 bin_size; /* maximum queue length per bin */ 62 u32 increment; /* d1 */ 63 u32 decrement; /* d2 */ 64 u32 limit; /* HARD maximal queue length */ 65 u32 penalty_rate; 66 u32 penalty_burst; 67 u32 tokens_avail; 68 unsigned long rehash_time; 69 unsigned long token_time; 70 71 u8 slot; /* current active bins (0 or 1) */ 72 bool double_buffering; 73 struct sfb_bins bins[2]; 74 75 struct { 76 u32 earlydrop; 77 u32 penaltydrop; 78 u32 bucketdrop; 79 u32 queuedrop; 80 u32 childdrop; /* drops in child qdisc */ 81 u32 marked; /* ECN mark */ 82 } stats; 83 }; 84 85 /* 86 * Each queued skb might be hashed on one or two bins 87 * We store in skb_cb the two hash values. 88 * (A zero value means double buffering was not used) 89 */ 90 struct sfb_skb_cb { 91 u32 hashes[2]; 92 }; 93 94 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 95 { 96 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb)); 97 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 98 } 99 100 /* 101 * If using 'internal' SFB flow classifier, hash comes from skb rxhash 102 * If using external classifier, hash comes from the classid. 103 */ 104 static u32 sfb_hash(const struct sk_buff *skb, u32 slot) 105 { 106 return sfb_skb_cb(skb)->hashes[slot]; 107 } 108 109 /* Probabilities are coded as Q0.16 fixed-point values, 110 * with 0xFFFF representing 65535/65536 (almost 1.0) 111 * Addition and subtraction are saturating in [0, 65535] 112 */ 113 static u32 prob_plus(u32 p1, u32 p2) 114 { 115 u32 res = p1 + p2; 116 117 return min_t(u32, res, SFB_MAX_PROB); 118 } 119 120 static u32 prob_minus(u32 p1, u32 p2) 121 { 122 return p1 > p2 ? p1 - p2 : 0; 123 } 124 125 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) 126 { 127 int i; 128 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; 129 130 for (i = 0; i < SFB_LEVELS; i++) { 131 u32 hash = sfbhash & SFB_BUCKET_MASK; 132 133 sfbhash >>= SFB_BUCKET_SHIFT; 134 if (b[hash].qlen < 0xFFFF) 135 b[hash].qlen++; 136 b += SFB_NUMBUCKETS; /* next level */ 137 } 138 } 139 140 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) 141 { 142 u32 sfbhash; 143 144 sfbhash = sfb_hash(skb, 0); 145 if (sfbhash) 146 increment_one_qlen(sfbhash, 0, q); 147 148 sfbhash = sfb_hash(skb, 1); 149 if (sfbhash) 150 increment_one_qlen(sfbhash, 1, q); 151 } 152 153 static void decrement_one_qlen(u32 sfbhash, u32 slot, 154 struct sfb_sched_data *q) 155 { 156 int i; 157 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; 158 159 for (i = 0; i < SFB_LEVELS; i++) { 160 u32 hash = sfbhash & SFB_BUCKET_MASK; 161 162 sfbhash >>= SFB_BUCKET_SHIFT; 163 if (b[hash].qlen > 0) 164 b[hash].qlen--; 165 b += SFB_NUMBUCKETS; /* next level */ 166 } 167 } 168 169 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) 170 { 171 u32 sfbhash; 172 173 sfbhash = sfb_hash(skb, 0); 174 if (sfbhash) 175 decrement_one_qlen(sfbhash, 0, q); 176 177 sfbhash = sfb_hash(skb, 1); 178 if (sfbhash) 179 decrement_one_qlen(sfbhash, 1, q); 180 } 181 182 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) 183 { 184 b->p_mark = prob_minus(b->p_mark, q->decrement); 185 } 186 187 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) 188 { 189 b->p_mark = prob_plus(b->p_mark, q->increment); 190 } 191 192 static void sfb_zero_all_buckets(struct sfb_sched_data *q) 193 { 194 memset(&q->bins, 0, sizeof(q->bins)); 195 } 196 197 /* 198 * compute max qlen, max p_mark, and avg p_mark 199 */ 200 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q) 201 { 202 int i; 203 u32 qlen = 0, prob = 0, totalpm = 0; 204 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; 205 206 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) { 207 if (qlen < b->qlen) 208 qlen = b->qlen; 209 totalpm += b->p_mark; 210 if (prob < b->p_mark) 211 prob = b->p_mark; 212 b++; 213 } 214 *prob_r = prob; 215 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS); 216 return qlen; 217 } 218 219 220 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) 221 { 222 q->bins[slot].perturbation = prandom_u32(); 223 } 224 225 static void sfb_swap_slot(struct sfb_sched_data *q) 226 { 227 sfb_init_perturbation(q->slot, q); 228 q->slot ^= 1; 229 q->double_buffering = false; 230 } 231 232 /* Non elastic flows are allowed to use part of the bandwidth, expressed 233 * in "penalty_rate" packets per second, with "penalty_burst" burst 234 */ 235 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) 236 { 237 if (q->penalty_rate == 0 || q->penalty_burst == 0) 238 return true; 239 240 if (q->tokens_avail < 1) { 241 unsigned long age = min(10UL * HZ, jiffies - q->token_time); 242 243 q->tokens_avail = (age * q->penalty_rate) / HZ; 244 if (q->tokens_avail > q->penalty_burst) 245 q->tokens_avail = q->penalty_burst; 246 q->token_time = jiffies; 247 if (q->tokens_avail < 1) 248 return true; 249 } 250 251 q->tokens_avail--; 252 return false; 253 } 254 255 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, 256 int *qerr, u32 *salt) 257 { 258 struct tcf_result res; 259 int result; 260 261 result = tc_classify(skb, fl, &res, false); 262 if (result >= 0) { 263 #ifdef CONFIG_NET_CLS_ACT 264 switch (result) { 265 case TC_ACT_STOLEN: 266 case TC_ACT_QUEUED: 267 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 268 case TC_ACT_SHOT: 269 return false; 270 } 271 #endif 272 *salt = TC_H_MIN(res.classid); 273 return true; 274 } 275 return false; 276 } 277 278 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) 279 { 280 281 struct sfb_sched_data *q = qdisc_priv(sch); 282 struct Qdisc *child = q->qdisc; 283 struct tcf_proto *fl; 284 int i; 285 u32 p_min = ~0; 286 u32 minqlen = ~0; 287 u32 r, sfbhash; 288 u32 slot = q->slot; 289 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 290 291 if (unlikely(sch->q.qlen >= q->limit)) { 292 qdisc_qstats_overlimit(sch); 293 q->stats.queuedrop++; 294 goto drop; 295 } 296 297 if (q->rehash_interval > 0) { 298 unsigned long limit = q->rehash_time + q->rehash_interval; 299 300 if (unlikely(time_after(jiffies, limit))) { 301 sfb_swap_slot(q); 302 q->rehash_time = jiffies; 303 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && 304 time_after(jiffies, limit - q->warmup_time))) { 305 q->double_buffering = true; 306 } 307 } 308 309 fl = rcu_dereference_bh(q->filter_list); 310 if (fl) { 311 u32 salt; 312 313 /* If using external classifiers, get result and record it. */ 314 if (!sfb_classify(skb, fl, &ret, &salt)) 315 goto other_drop; 316 sfbhash = jhash_1word(salt, q->bins[slot].perturbation); 317 } else { 318 sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); 319 } 320 321 322 if (!sfbhash) 323 sfbhash = 1; 324 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 325 326 for (i = 0; i < SFB_LEVELS; i++) { 327 u32 hash = sfbhash & SFB_BUCKET_MASK; 328 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; 329 330 sfbhash >>= SFB_BUCKET_SHIFT; 331 if (b->qlen == 0) 332 decrement_prob(b, q); 333 else if (b->qlen >= q->bin_size) 334 increment_prob(b, q); 335 if (minqlen > b->qlen) 336 minqlen = b->qlen; 337 if (p_min > b->p_mark) 338 p_min = b->p_mark; 339 } 340 341 slot ^= 1; 342 sfb_skb_cb(skb)->hashes[slot] = 0; 343 344 if (unlikely(minqlen >= q->max)) { 345 qdisc_qstats_overlimit(sch); 346 q->stats.bucketdrop++; 347 goto drop; 348 } 349 350 if (unlikely(p_min >= SFB_MAX_PROB)) { 351 /* Inelastic flow */ 352 if (q->double_buffering) { 353 sfbhash = skb_get_hash_perturb(skb, 354 q->bins[slot].perturbation); 355 if (!sfbhash) 356 sfbhash = 1; 357 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 358 359 for (i = 0; i < SFB_LEVELS; i++) { 360 u32 hash = sfbhash & SFB_BUCKET_MASK; 361 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; 362 363 sfbhash >>= SFB_BUCKET_SHIFT; 364 if (b->qlen == 0) 365 decrement_prob(b, q); 366 else if (b->qlen >= q->bin_size) 367 increment_prob(b, q); 368 } 369 } 370 if (sfb_rate_limit(skb, q)) { 371 qdisc_qstats_overlimit(sch); 372 q->stats.penaltydrop++; 373 goto drop; 374 } 375 goto enqueue; 376 } 377 378 r = prandom_u32() & SFB_MAX_PROB; 379 380 if (unlikely(r < p_min)) { 381 if (unlikely(p_min > SFB_MAX_PROB / 2)) { 382 /* If we're marking that many packets, then either 383 * this flow is unresponsive, or we're badly congested. 384 * In either case, we want to start dropping packets. 385 */ 386 if (r < (p_min - SFB_MAX_PROB / 2) * 2) { 387 q->stats.earlydrop++; 388 goto drop; 389 } 390 } 391 if (INET_ECN_set_ce(skb)) { 392 q->stats.marked++; 393 } else { 394 q->stats.earlydrop++; 395 goto drop; 396 } 397 } 398 399 enqueue: 400 ret = qdisc_enqueue(skb, child); 401 if (likely(ret == NET_XMIT_SUCCESS)) { 402 sch->q.qlen++; 403 increment_qlen(skb, q); 404 } else if (net_xmit_drop_count(ret)) { 405 q->stats.childdrop++; 406 qdisc_qstats_drop(sch); 407 } 408 return ret; 409 410 drop: 411 qdisc_drop(skb, sch); 412 return NET_XMIT_CN; 413 other_drop: 414 if (ret & __NET_XMIT_BYPASS) 415 qdisc_qstats_drop(sch); 416 kfree_skb(skb); 417 return ret; 418 } 419 420 static struct sk_buff *sfb_dequeue(struct Qdisc *sch) 421 { 422 struct sfb_sched_data *q = qdisc_priv(sch); 423 struct Qdisc *child = q->qdisc; 424 struct sk_buff *skb; 425 426 skb = child->dequeue(q->qdisc); 427 428 if (skb) { 429 qdisc_bstats_update(sch, skb); 430 sch->q.qlen--; 431 decrement_qlen(skb, q); 432 } 433 434 return skb; 435 } 436 437 static struct sk_buff *sfb_peek(struct Qdisc *sch) 438 { 439 struct sfb_sched_data *q = qdisc_priv(sch); 440 struct Qdisc *child = q->qdisc; 441 442 return child->ops->peek(child); 443 } 444 445 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */ 446 447 static void sfb_reset(struct Qdisc *sch) 448 { 449 struct sfb_sched_data *q = qdisc_priv(sch); 450 451 qdisc_reset(q->qdisc); 452 sch->q.qlen = 0; 453 q->slot = 0; 454 q->double_buffering = false; 455 sfb_zero_all_buckets(q); 456 sfb_init_perturbation(0, q); 457 } 458 459 static void sfb_destroy(struct Qdisc *sch) 460 { 461 struct sfb_sched_data *q = qdisc_priv(sch); 462 463 tcf_destroy_chain(&q->filter_list); 464 qdisc_destroy(q->qdisc); 465 } 466 467 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = { 468 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) }, 469 }; 470 471 static const struct tc_sfb_qopt sfb_default_ops = { 472 .rehash_interval = 600 * MSEC_PER_SEC, 473 .warmup_time = 60 * MSEC_PER_SEC, 474 .limit = 0, 475 .max = 25, 476 .bin_size = 20, 477 .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */ 478 .decrement = (SFB_MAX_PROB + 3000) / 6000, 479 .penalty_rate = 10, 480 .penalty_burst = 20, 481 }; 482 483 static int sfb_change(struct Qdisc *sch, struct nlattr *opt) 484 { 485 struct sfb_sched_data *q = qdisc_priv(sch); 486 struct Qdisc *child; 487 struct nlattr *tb[TCA_SFB_MAX + 1]; 488 const struct tc_sfb_qopt *ctl = &sfb_default_ops; 489 u32 limit; 490 int err; 491 492 if (opt) { 493 err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy); 494 if (err < 0) 495 return -EINVAL; 496 497 if (tb[TCA_SFB_PARMS] == NULL) 498 return -EINVAL; 499 500 ctl = nla_data(tb[TCA_SFB_PARMS]); 501 } 502 503 limit = ctl->limit; 504 if (limit == 0) 505 limit = qdisc_dev(sch)->tx_queue_len; 506 507 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit); 508 if (IS_ERR(child)) 509 return PTR_ERR(child); 510 511 sch_tree_lock(sch); 512 513 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 514 q->qdisc->qstats.backlog); 515 qdisc_destroy(q->qdisc); 516 q->qdisc = child; 517 518 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); 519 q->warmup_time = msecs_to_jiffies(ctl->warmup_time); 520 q->rehash_time = jiffies; 521 q->limit = limit; 522 q->increment = ctl->increment; 523 q->decrement = ctl->decrement; 524 q->max = ctl->max; 525 q->bin_size = ctl->bin_size; 526 q->penalty_rate = ctl->penalty_rate; 527 q->penalty_burst = ctl->penalty_burst; 528 q->tokens_avail = ctl->penalty_burst; 529 q->token_time = jiffies; 530 531 q->slot = 0; 532 q->double_buffering = false; 533 sfb_zero_all_buckets(q); 534 sfb_init_perturbation(0, q); 535 sfb_init_perturbation(1, q); 536 537 sch_tree_unlock(sch); 538 539 return 0; 540 } 541 542 static int sfb_init(struct Qdisc *sch, struct nlattr *opt) 543 { 544 struct sfb_sched_data *q = qdisc_priv(sch); 545 546 q->qdisc = &noop_qdisc; 547 return sfb_change(sch, opt); 548 } 549 550 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) 551 { 552 struct sfb_sched_data *q = qdisc_priv(sch); 553 struct nlattr *opts; 554 struct tc_sfb_qopt opt = { 555 .rehash_interval = jiffies_to_msecs(q->rehash_interval), 556 .warmup_time = jiffies_to_msecs(q->warmup_time), 557 .limit = q->limit, 558 .max = q->max, 559 .bin_size = q->bin_size, 560 .increment = q->increment, 561 .decrement = q->decrement, 562 .penalty_rate = q->penalty_rate, 563 .penalty_burst = q->penalty_burst, 564 }; 565 566 sch->qstats.backlog = q->qdisc->qstats.backlog; 567 opts = nla_nest_start(skb, TCA_OPTIONS); 568 if (opts == NULL) 569 goto nla_put_failure; 570 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) 571 goto nla_put_failure; 572 return nla_nest_end(skb, opts); 573 574 nla_put_failure: 575 nla_nest_cancel(skb, opts); 576 return -EMSGSIZE; 577 } 578 579 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 580 { 581 struct sfb_sched_data *q = qdisc_priv(sch); 582 struct tc_sfb_xstats st = { 583 .earlydrop = q->stats.earlydrop, 584 .penaltydrop = q->stats.penaltydrop, 585 .bucketdrop = q->stats.bucketdrop, 586 .queuedrop = q->stats.queuedrop, 587 .childdrop = q->stats.childdrop, 588 .marked = q->stats.marked, 589 }; 590 591 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); 592 593 return gnet_stats_copy_app(d, &st, sizeof(st)); 594 } 595 596 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl, 597 struct sk_buff *skb, struct tcmsg *tcm) 598 { 599 return -ENOSYS; 600 } 601 602 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 603 struct Qdisc **old) 604 { 605 struct sfb_sched_data *q = qdisc_priv(sch); 606 607 if (new == NULL) 608 new = &noop_qdisc; 609 610 *old = qdisc_replace(sch, new, &q->qdisc); 611 return 0; 612 } 613 614 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg) 615 { 616 struct sfb_sched_data *q = qdisc_priv(sch); 617 618 return q->qdisc; 619 } 620 621 static unsigned long sfb_get(struct Qdisc *sch, u32 classid) 622 { 623 return 1; 624 } 625 626 static void sfb_put(struct Qdisc *sch, unsigned long arg) 627 { 628 } 629 630 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 631 struct nlattr **tca, unsigned long *arg) 632 { 633 return -ENOSYS; 634 } 635 636 static int sfb_delete(struct Qdisc *sch, unsigned long cl) 637 { 638 return -ENOSYS; 639 } 640 641 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) 642 { 643 if (!walker->stop) { 644 if (walker->count >= walker->skip) 645 if (walker->fn(sch, 1, walker) < 0) { 646 walker->stop = 1; 647 return; 648 } 649 walker->count++; 650 } 651 } 652 653 static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch, 654 unsigned long cl) 655 { 656 struct sfb_sched_data *q = qdisc_priv(sch); 657 658 if (cl) 659 return NULL; 660 return &q->filter_list; 661 } 662 663 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent, 664 u32 classid) 665 { 666 return 0; 667 } 668 669 670 static const struct Qdisc_class_ops sfb_class_ops = { 671 .graft = sfb_graft, 672 .leaf = sfb_leaf, 673 .get = sfb_get, 674 .put = sfb_put, 675 .change = sfb_change_class, 676 .delete = sfb_delete, 677 .walk = sfb_walk, 678 .tcf_chain = sfb_find_tcf, 679 .bind_tcf = sfb_bind, 680 .unbind_tcf = sfb_put, 681 .dump = sfb_dump_class, 682 }; 683 684 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = { 685 .id = "sfb", 686 .priv_size = sizeof(struct sfb_sched_data), 687 .cl_ops = &sfb_class_ops, 688 .enqueue = sfb_enqueue, 689 .dequeue = sfb_dequeue, 690 .peek = sfb_peek, 691 .init = sfb_init, 692 .reset = sfb_reset, 693 .destroy = sfb_destroy, 694 .change = sfb_change, 695 .dump = sfb_dump, 696 .dump_stats = sfb_dump_stats, 697 .owner = THIS_MODULE, 698 }; 699 700 static int __init sfb_module_init(void) 701 { 702 return register_qdisc(&sfb_qdisc_ops); 703 } 704 705 static void __exit sfb_module_exit(void) 706 { 707 unregister_qdisc(&sfb_qdisc_ops); 708 } 709 710 module_init(sfb_module_init) 711 module_exit(sfb_module_exit) 712 713 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline"); 714 MODULE_AUTHOR("Juliusz Chroboczek"); 715 MODULE_AUTHOR("Eric Dumazet"); 716 MODULE_LICENSE("GPL"); 717