1 /* 2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing) 3 * 4 * Copyright (C) 2013 Eric Dumazet <edumazet@google.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * Meant to be mostly used for localy generated traffic : 12 * Fast classification depends on skb->sk being set before reaching us. 13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. 14 * All packets belonging to a socket are considered as a 'flow'. 15 * 16 * Flows are dynamically allocated and stored in a hash table of RB trees 17 * They are also part of one Round Robin 'queues' (new or old flows) 18 * 19 * Burst avoidance (aka pacing) capability : 20 * 21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a 22 * bunch of packets, and this packet scheduler adds delay between 23 * packets to respect rate limitation. 24 * 25 * enqueue() : 26 * - lookup one RB tree (out of 1024 or more) to find the flow. 27 * If non existent flow, create it, add it to the tree. 28 * Add skb to the per flow list of skb (fifo). 29 * - Use a special fifo for high prio packets 30 * 31 * dequeue() : serves flows in Round Robin 32 * Note : When a flow becomes empty, we do not immediately remove it from 33 * rb trees, for performance reasons (its expected to send additional packets, 34 * or SLAB cache will reuse socket for another flow) 35 */ 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/jiffies.h> 41 #include <linux/string.h> 42 #include <linux/in.h> 43 #include <linux/errno.h> 44 #include <linux/init.h> 45 #include <linux/skbuff.h> 46 #include <linux/slab.h> 47 #include <linux/rbtree.h> 48 #include <linux/hash.h> 49 #include <linux/prefetch.h> 50 #include <linux/vmalloc.h> 51 #include <net/netlink.h> 52 #include <net/pkt_sched.h> 53 #include <net/sock.h> 54 #include <net/tcp_states.h> 55 56 /* 57 * Per flow structure, dynamically allocated 58 */ 59 struct fq_flow { 60 struct sk_buff *head; /* list of skbs for this flow : first skb */ 61 union { 62 struct sk_buff *tail; /* last skb in the list */ 63 unsigned long age; /* jiffies when flow was emptied, for gc */ 64 }; 65 struct rb_node fq_node; /* anchor in fq_root[] trees */ 66 struct sock *sk; 67 int qlen; /* number of packets in flow queue */ 68 int credit; 69 u32 socket_hash; /* sk_hash */ 70 struct fq_flow *next; /* next pointer in RR lists, or &detached */ 71 72 struct rb_node rate_node; /* anchor in q->delayed tree */ 73 u64 time_next_packet; 74 }; 75 76 struct fq_flow_head { 77 struct fq_flow *first; 78 struct fq_flow *last; 79 }; 80 81 struct fq_sched_data { 82 struct fq_flow_head new_flows; 83 84 struct fq_flow_head old_flows; 85 86 struct rb_root delayed; /* for rate limited flows */ 87 u64 time_next_delayed_flow; 88 89 struct fq_flow internal; /* for non classified or high prio packets */ 90 u32 quantum; 91 u32 initial_quantum; 92 u32 flow_refill_delay; 93 u32 flow_max_rate; /* optional max rate per flow */ 94 u32 flow_plimit; /* max packets per flow */ 95 struct rb_root *fq_root; 96 u8 rate_enable; 97 u8 fq_trees_log; 98 99 u32 flows; 100 u32 inactive_flows; 101 u32 throttled_flows; 102 103 u64 stat_gc_flows; 104 u64 stat_internal_packets; 105 u64 stat_tcp_retrans; 106 u64 stat_throttled; 107 u64 stat_flows_plimit; 108 u64 stat_pkts_too_long; 109 u64 stat_allocation_errors; 110 struct qdisc_watchdog watchdog; 111 }; 112 113 /* special value to mark a detached flow (not on old/new list) */ 114 static struct fq_flow detached, throttled; 115 116 static void fq_flow_set_detached(struct fq_flow *f) 117 { 118 f->next = &detached; 119 f->age = jiffies; 120 } 121 122 static bool fq_flow_is_detached(const struct fq_flow *f) 123 { 124 return f->next == &detached; 125 } 126 127 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 128 { 129 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; 130 131 while (*p) { 132 struct fq_flow *aux; 133 134 parent = *p; 135 aux = container_of(parent, struct fq_flow, rate_node); 136 if (f->time_next_packet >= aux->time_next_packet) 137 p = &parent->rb_right; 138 else 139 p = &parent->rb_left; 140 } 141 rb_link_node(&f->rate_node, parent, p); 142 rb_insert_color(&f->rate_node, &q->delayed); 143 q->throttled_flows++; 144 q->stat_throttled++; 145 146 f->next = &throttled; 147 if (q->time_next_delayed_flow > f->time_next_packet) 148 q->time_next_delayed_flow = f->time_next_packet; 149 } 150 151 152 static struct kmem_cache *fq_flow_cachep __read_mostly; 153 154 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) 155 { 156 if (head->first) 157 head->last->next = flow; 158 else 159 head->first = flow; 160 head->last = flow; 161 flow->next = NULL; 162 } 163 164 /* limit number of collected flows per round */ 165 #define FQ_GC_MAX 8 166 #define FQ_GC_AGE (3*HZ) 167 168 static bool fq_gc_candidate(const struct fq_flow *f) 169 { 170 return fq_flow_is_detached(f) && 171 time_after(jiffies, f->age + FQ_GC_AGE); 172 } 173 174 static void fq_gc(struct fq_sched_data *q, 175 struct rb_root *root, 176 struct sock *sk) 177 { 178 struct fq_flow *f, *tofree[FQ_GC_MAX]; 179 struct rb_node **p, *parent; 180 int fcnt = 0; 181 182 p = &root->rb_node; 183 parent = NULL; 184 while (*p) { 185 parent = *p; 186 187 f = container_of(parent, struct fq_flow, fq_node); 188 if (f->sk == sk) 189 break; 190 191 if (fq_gc_candidate(f)) { 192 tofree[fcnt++] = f; 193 if (fcnt == FQ_GC_MAX) 194 break; 195 } 196 197 if (f->sk > sk) 198 p = &parent->rb_right; 199 else 200 p = &parent->rb_left; 201 } 202 203 q->flows -= fcnt; 204 q->inactive_flows -= fcnt; 205 q->stat_gc_flows += fcnt; 206 while (fcnt) { 207 struct fq_flow *f = tofree[--fcnt]; 208 209 rb_erase(&f->fq_node, root); 210 kmem_cache_free(fq_flow_cachep, f); 211 } 212 } 213 214 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) 215 { 216 struct rb_node **p, *parent; 217 struct sock *sk = skb->sk; 218 struct rb_root *root; 219 struct fq_flow *f; 220 221 /* warning: no starvation prevention... */ 222 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL)) 223 return &q->internal; 224 225 if (unlikely(!sk)) { 226 /* By forcing low order bit to 1, we make sure to not 227 * collide with a local flow (socket pointers are word aligned) 228 */ 229 sk = (struct sock *)(skb_get_hash(skb) | 1L); 230 } 231 232 root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)]; 233 234 if (q->flows >= (2U << q->fq_trees_log) && 235 q->inactive_flows > q->flows/2) 236 fq_gc(q, root, sk); 237 238 p = &root->rb_node; 239 parent = NULL; 240 while (*p) { 241 parent = *p; 242 243 f = container_of(parent, struct fq_flow, fq_node); 244 if (f->sk == sk) { 245 /* socket might have been reallocated, so check 246 * if its sk_hash is the same. 247 * It not, we need to refill credit with 248 * initial quantum 249 */ 250 if (unlikely(skb->sk && 251 f->socket_hash != sk->sk_hash)) { 252 f->credit = q->initial_quantum; 253 f->socket_hash = sk->sk_hash; 254 f->time_next_packet = 0ULL; 255 } 256 return f; 257 } 258 if (f->sk > sk) 259 p = &parent->rb_right; 260 else 261 p = &parent->rb_left; 262 } 263 264 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN); 265 if (unlikely(!f)) { 266 q->stat_allocation_errors++; 267 return &q->internal; 268 } 269 fq_flow_set_detached(f); 270 f->sk = sk; 271 if (skb->sk) 272 f->socket_hash = sk->sk_hash; 273 f->credit = q->initial_quantum; 274 275 rb_link_node(&f->fq_node, parent, p); 276 rb_insert_color(&f->fq_node, root); 277 278 q->flows++; 279 q->inactive_flows++; 280 return f; 281 } 282 283 284 /* remove one skb from head of flow queue */ 285 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) 286 { 287 struct sk_buff *skb = flow->head; 288 289 if (skb) { 290 flow->head = skb->next; 291 skb->next = NULL; 292 flow->qlen--; 293 qdisc_qstats_backlog_dec(sch, skb); 294 sch->q.qlen--; 295 } 296 return skb; 297 } 298 299 /* We might add in the future detection of retransmits 300 * For the time being, just return false 301 */ 302 static bool skb_is_retransmit(struct sk_buff *skb) 303 { 304 return false; 305 } 306 307 /* add skb to flow queue 308 * flow queue is a linked list, kind of FIFO, except for TCP retransmits 309 * We special case tcp retransmits to be transmitted before other packets. 310 * We rely on fact that TCP retransmits are unlikely, so we do not waste 311 * a separate queue or a pointer. 312 * head-> [retrans pkt 1] 313 * [retrans pkt 2] 314 * [ normal pkt 1] 315 * [ normal pkt 2] 316 * [ normal pkt 3] 317 * tail-> [ normal pkt 4] 318 */ 319 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) 320 { 321 struct sk_buff *prev, *head = flow->head; 322 323 skb->next = NULL; 324 if (!head) { 325 flow->head = skb; 326 flow->tail = skb; 327 return; 328 } 329 if (likely(!skb_is_retransmit(skb))) { 330 flow->tail->next = skb; 331 flow->tail = skb; 332 return; 333 } 334 335 /* This skb is a tcp retransmit, 336 * find the last retrans packet in the queue 337 */ 338 prev = NULL; 339 while (skb_is_retransmit(head)) { 340 prev = head; 341 head = head->next; 342 if (!head) 343 break; 344 } 345 if (!prev) { /* no rtx packet in queue, become the new head */ 346 skb->next = flow->head; 347 flow->head = skb; 348 } else { 349 if (prev == flow->tail) 350 flow->tail = skb; 351 else 352 skb->next = prev->next; 353 prev->next = skb; 354 } 355 } 356 357 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 358 { 359 struct fq_sched_data *q = qdisc_priv(sch); 360 struct fq_flow *f; 361 362 if (unlikely(sch->q.qlen >= sch->limit)) 363 return qdisc_drop(skb, sch); 364 365 f = fq_classify(skb, q); 366 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { 367 q->stat_flows_plimit++; 368 return qdisc_drop(skb, sch); 369 } 370 371 f->qlen++; 372 if (skb_is_retransmit(skb)) 373 q->stat_tcp_retrans++; 374 qdisc_qstats_backlog_inc(sch, skb); 375 if (fq_flow_is_detached(f)) { 376 fq_flow_add_tail(&q->new_flows, f); 377 if (time_after(jiffies, f->age + q->flow_refill_delay)) 378 f->credit = max_t(u32, f->credit, q->quantum); 379 q->inactive_flows--; 380 } 381 382 /* Note: this overwrites f->age */ 383 flow_queue_add(f, skb); 384 385 if (unlikely(f == &q->internal)) { 386 q->stat_internal_packets++; 387 } 388 sch->q.qlen++; 389 390 return NET_XMIT_SUCCESS; 391 } 392 393 static void fq_check_throttled(struct fq_sched_data *q, u64 now) 394 { 395 struct rb_node *p; 396 397 if (q->time_next_delayed_flow > now) 398 return; 399 400 q->time_next_delayed_flow = ~0ULL; 401 while ((p = rb_first(&q->delayed)) != NULL) { 402 struct fq_flow *f = container_of(p, struct fq_flow, rate_node); 403 404 if (f->time_next_packet > now) { 405 q->time_next_delayed_flow = f->time_next_packet; 406 break; 407 } 408 rb_erase(p, &q->delayed); 409 q->throttled_flows--; 410 fq_flow_add_tail(&q->old_flows, f); 411 } 412 } 413 414 static struct sk_buff *fq_dequeue(struct Qdisc *sch) 415 { 416 struct fq_sched_data *q = qdisc_priv(sch); 417 u64 now = ktime_get_ns(); 418 struct fq_flow_head *head; 419 struct sk_buff *skb; 420 struct fq_flow *f; 421 u32 rate; 422 423 skb = fq_dequeue_head(sch, &q->internal); 424 if (skb) 425 goto out; 426 fq_check_throttled(q, now); 427 begin: 428 head = &q->new_flows; 429 if (!head->first) { 430 head = &q->old_flows; 431 if (!head->first) { 432 if (q->time_next_delayed_flow != ~0ULL) 433 qdisc_watchdog_schedule_ns(&q->watchdog, 434 q->time_next_delayed_flow, 435 false); 436 return NULL; 437 } 438 } 439 f = head->first; 440 441 if (f->credit <= 0) { 442 f->credit += q->quantum; 443 head->first = f->next; 444 fq_flow_add_tail(&q->old_flows, f); 445 goto begin; 446 } 447 448 if (unlikely(f->head && now < f->time_next_packet)) { 449 head->first = f->next; 450 fq_flow_set_throttled(q, f); 451 goto begin; 452 } 453 454 skb = fq_dequeue_head(sch, f); 455 if (!skb) { 456 head->first = f->next; 457 /* force a pass through old_flows to prevent starvation */ 458 if ((head == &q->new_flows) && q->old_flows.first) { 459 fq_flow_add_tail(&q->old_flows, f); 460 } else { 461 fq_flow_set_detached(f); 462 q->inactive_flows++; 463 } 464 goto begin; 465 } 466 prefetch(&skb->end); 467 f->time_next_packet = now; 468 f->credit -= qdisc_pkt_len(skb); 469 470 if (f->credit > 0 || !q->rate_enable) 471 goto out; 472 473 rate = q->flow_max_rate; 474 if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) 475 rate = min(skb->sk->sk_pacing_rate, rate); 476 477 if (rate != ~0U) { 478 u32 plen = max(qdisc_pkt_len(skb), q->quantum); 479 u64 len = (u64)plen * NSEC_PER_SEC; 480 481 if (likely(rate)) 482 do_div(len, rate); 483 /* Since socket rate can change later, 484 * clamp the delay to 125 ms. 485 * TODO: maybe segment the too big skb, as in commit 486 * e43ac79a4bc ("sch_tbf: segment too big GSO packets") 487 */ 488 if (unlikely(len > 125 * NSEC_PER_MSEC)) { 489 len = 125 * NSEC_PER_MSEC; 490 q->stat_pkts_too_long++; 491 } 492 493 f->time_next_packet = now + len; 494 } 495 out: 496 qdisc_bstats_update(sch, skb); 497 return skb; 498 } 499 500 static void fq_reset(struct Qdisc *sch) 501 { 502 struct fq_sched_data *q = qdisc_priv(sch); 503 struct rb_root *root; 504 struct sk_buff *skb; 505 struct rb_node *p; 506 struct fq_flow *f; 507 unsigned int idx; 508 509 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL) 510 kfree_skb(skb); 511 512 if (!q->fq_root) 513 return; 514 515 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { 516 root = &q->fq_root[idx]; 517 while ((p = rb_first(root)) != NULL) { 518 f = container_of(p, struct fq_flow, fq_node); 519 rb_erase(p, root); 520 521 while ((skb = fq_dequeue_head(sch, f)) != NULL) 522 kfree_skb(skb); 523 524 kmem_cache_free(fq_flow_cachep, f); 525 } 526 } 527 q->new_flows.first = NULL; 528 q->old_flows.first = NULL; 529 q->delayed = RB_ROOT; 530 q->flows = 0; 531 q->inactive_flows = 0; 532 q->throttled_flows = 0; 533 } 534 535 static void fq_rehash(struct fq_sched_data *q, 536 struct rb_root *old_array, u32 old_log, 537 struct rb_root *new_array, u32 new_log) 538 { 539 struct rb_node *op, **np, *parent; 540 struct rb_root *oroot, *nroot; 541 struct fq_flow *of, *nf; 542 int fcnt = 0; 543 u32 idx; 544 545 for (idx = 0; idx < (1U << old_log); idx++) { 546 oroot = &old_array[idx]; 547 while ((op = rb_first(oroot)) != NULL) { 548 rb_erase(op, oroot); 549 of = container_of(op, struct fq_flow, fq_node); 550 if (fq_gc_candidate(of)) { 551 fcnt++; 552 kmem_cache_free(fq_flow_cachep, of); 553 continue; 554 } 555 nroot = &new_array[hash_32((u32)(long)of->sk, new_log)]; 556 557 np = &nroot->rb_node; 558 parent = NULL; 559 while (*np) { 560 parent = *np; 561 562 nf = container_of(parent, struct fq_flow, fq_node); 563 BUG_ON(nf->sk == of->sk); 564 565 if (nf->sk > of->sk) 566 np = &parent->rb_right; 567 else 568 np = &parent->rb_left; 569 } 570 571 rb_link_node(&of->fq_node, parent, np); 572 rb_insert_color(&of->fq_node, nroot); 573 } 574 } 575 q->flows -= fcnt; 576 q->inactive_flows -= fcnt; 577 q->stat_gc_flows += fcnt; 578 } 579 580 static void *fq_alloc_node(size_t sz, int node) 581 { 582 void *ptr; 583 584 ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node); 585 if (!ptr) 586 ptr = vmalloc_node(sz, node); 587 return ptr; 588 } 589 590 static void fq_free(void *addr) 591 { 592 kvfree(addr); 593 } 594 595 static int fq_resize(struct Qdisc *sch, u32 log) 596 { 597 struct fq_sched_data *q = qdisc_priv(sch); 598 struct rb_root *array; 599 void *old_fq_root; 600 u32 idx; 601 602 if (q->fq_root && log == q->fq_trees_log) 603 return 0; 604 605 /* If XPS was setup, we can allocate memory on right NUMA node */ 606 array = fq_alloc_node(sizeof(struct rb_root) << log, 607 netdev_queue_numa_node_read(sch->dev_queue)); 608 if (!array) 609 return -ENOMEM; 610 611 for (idx = 0; idx < (1U << log); idx++) 612 array[idx] = RB_ROOT; 613 614 sch_tree_lock(sch); 615 616 old_fq_root = q->fq_root; 617 if (old_fq_root) 618 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); 619 620 q->fq_root = array; 621 q->fq_trees_log = log; 622 623 sch_tree_unlock(sch); 624 625 fq_free(old_fq_root); 626 627 return 0; 628 } 629 630 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { 631 [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, 632 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, 633 [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, 634 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 }, 635 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, 636 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, 637 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, 638 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, 639 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, 640 }; 641 642 static int fq_change(struct Qdisc *sch, struct nlattr *opt) 643 { 644 struct fq_sched_data *q = qdisc_priv(sch); 645 struct nlattr *tb[TCA_FQ_MAX + 1]; 646 int err, drop_count = 0; 647 u32 fq_log; 648 649 if (!opt) 650 return -EINVAL; 651 652 err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy); 653 if (err < 0) 654 return err; 655 656 sch_tree_lock(sch); 657 658 fq_log = q->fq_trees_log; 659 660 if (tb[TCA_FQ_BUCKETS_LOG]) { 661 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); 662 663 if (nval >= 1 && nval <= ilog2(256*1024)) 664 fq_log = nval; 665 else 666 err = -EINVAL; 667 } 668 if (tb[TCA_FQ_PLIMIT]) 669 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); 670 671 if (tb[TCA_FQ_FLOW_PLIMIT]) 672 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); 673 674 if (tb[TCA_FQ_QUANTUM]) 675 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 676 677 if (tb[TCA_FQ_INITIAL_QUANTUM]) 678 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 679 680 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) 681 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", 682 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); 683 684 if (tb[TCA_FQ_FLOW_MAX_RATE]) 685 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); 686 687 if (tb[TCA_FQ_RATE_ENABLE]) { 688 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); 689 690 if (enable <= 1) 691 q->rate_enable = enable; 692 else 693 err = -EINVAL; 694 } 695 696 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { 697 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; 698 699 q->flow_refill_delay = usecs_to_jiffies(usecs_delay); 700 } 701 702 if (!err) { 703 sch_tree_unlock(sch); 704 err = fq_resize(sch, fq_log); 705 sch_tree_lock(sch); 706 } 707 while (sch->q.qlen > sch->limit) { 708 struct sk_buff *skb = fq_dequeue(sch); 709 710 if (!skb) 711 break; 712 kfree_skb(skb); 713 drop_count++; 714 } 715 qdisc_tree_decrease_qlen(sch, drop_count); 716 717 sch_tree_unlock(sch); 718 return err; 719 } 720 721 static void fq_destroy(struct Qdisc *sch) 722 { 723 struct fq_sched_data *q = qdisc_priv(sch); 724 725 fq_reset(sch); 726 fq_free(q->fq_root); 727 qdisc_watchdog_cancel(&q->watchdog); 728 } 729 730 static int fq_init(struct Qdisc *sch, struct nlattr *opt) 731 { 732 struct fq_sched_data *q = qdisc_priv(sch); 733 int err; 734 735 sch->limit = 10000; 736 q->flow_plimit = 100; 737 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); 738 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); 739 q->flow_refill_delay = msecs_to_jiffies(40); 740 q->flow_max_rate = ~0U; 741 q->rate_enable = 1; 742 q->new_flows.first = NULL; 743 q->old_flows.first = NULL; 744 q->delayed = RB_ROOT; 745 q->fq_root = NULL; 746 q->fq_trees_log = ilog2(1024); 747 qdisc_watchdog_init(&q->watchdog, sch); 748 749 if (opt) 750 err = fq_change(sch, opt); 751 else 752 err = fq_resize(sch, q->fq_trees_log); 753 754 return err; 755 } 756 757 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) 758 { 759 struct fq_sched_data *q = qdisc_priv(sch); 760 struct nlattr *opts; 761 762 opts = nla_nest_start(skb, TCA_OPTIONS); 763 if (opts == NULL) 764 goto nla_put_failure; 765 766 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */ 767 768 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || 769 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || 770 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || 771 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || 772 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || 773 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || 774 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY, 775 jiffies_to_usecs(q->flow_refill_delay)) || 776 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) 777 goto nla_put_failure; 778 779 return nla_nest_end(skb, opts); 780 781 nla_put_failure: 782 return -1; 783 } 784 785 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 786 { 787 struct fq_sched_data *q = qdisc_priv(sch); 788 u64 now = ktime_get_ns(); 789 struct tc_fq_qd_stats st = { 790 .gc_flows = q->stat_gc_flows, 791 .highprio_packets = q->stat_internal_packets, 792 .tcp_retrans = q->stat_tcp_retrans, 793 .throttled = q->stat_throttled, 794 .flows_plimit = q->stat_flows_plimit, 795 .pkts_too_long = q->stat_pkts_too_long, 796 .allocation_errors = q->stat_allocation_errors, 797 .flows = q->flows, 798 .inactive_flows = q->inactive_flows, 799 .throttled_flows = q->throttled_flows, 800 .time_next_delayed_flow = q->time_next_delayed_flow - now, 801 }; 802 803 return gnet_stats_copy_app(d, &st, sizeof(st)); 804 } 805 806 static struct Qdisc_ops fq_qdisc_ops __read_mostly = { 807 .id = "fq", 808 .priv_size = sizeof(struct fq_sched_data), 809 810 .enqueue = fq_enqueue, 811 .dequeue = fq_dequeue, 812 .peek = qdisc_peek_dequeued, 813 .init = fq_init, 814 .reset = fq_reset, 815 .destroy = fq_destroy, 816 .change = fq_change, 817 .dump = fq_dump, 818 .dump_stats = fq_dump_stats, 819 .owner = THIS_MODULE, 820 }; 821 822 static int __init fq_module_init(void) 823 { 824 int ret; 825 826 fq_flow_cachep = kmem_cache_create("fq_flow_cache", 827 sizeof(struct fq_flow), 828 0, 0, NULL); 829 if (!fq_flow_cachep) 830 return -ENOMEM; 831 832 ret = register_qdisc(&fq_qdisc_ops); 833 if (ret) 834 kmem_cache_destroy(fq_flow_cachep); 835 return ret; 836 } 837 838 static void __exit fq_module_exit(void) 839 { 840 unregister_qdisc(&fq_qdisc_ops); 841 kmem_cache_destroy(fq_flow_cachep); 842 } 843 844 module_init(fq_module_init) 845 module_exit(fq_module_exit) 846 MODULE_AUTHOR("Eric Dumazet"); 847 MODULE_LICENSE("GPL"); 848