1 /* 2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing) 3 * 4 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * Meant to be mostly used for locally generated traffic : 12 * Fast classification depends on skb->sk being set before reaching us. 13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. 14 * All packets belonging to a socket are considered as a 'flow'. 15 * 16 * Flows are dynamically allocated and stored in a hash table of RB trees 17 * They are also part of one Round Robin 'queues' (new or old flows) 18 * 19 * Burst avoidance (aka pacing) capability : 20 * 21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a 22 * bunch of packets, and this packet scheduler adds delay between 23 * packets to respect rate limitation. 24 * 25 * enqueue() : 26 * - lookup one RB tree (out of 1024 or more) to find the flow. 27 * If non existent flow, create it, add it to the tree. 28 * Add skb to the per flow list of skb (fifo). 29 * - Use a special fifo for high prio packets 30 * 31 * dequeue() : serves flows in Round Robin 32 * Note : When a flow becomes empty, we do not immediately remove it from 33 * rb trees, for performance reasons (its expected to send additional packets, 34 * or SLAB cache will reuse socket for another flow) 35 */ 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/jiffies.h> 41 #include <linux/string.h> 42 #include <linux/in.h> 43 #include <linux/errno.h> 44 #include <linux/init.h> 45 #include <linux/skbuff.h> 46 #include <linux/slab.h> 47 #include <linux/rbtree.h> 48 #include <linux/hash.h> 49 #include <linux/prefetch.h> 50 #include <linux/vmalloc.h> 51 #include <net/netlink.h> 52 #include <net/pkt_sched.h> 53 #include <net/sock.h> 54 #include <net/tcp_states.h> 55 #include <net/tcp.h> 56 57 /* 58 * Per flow structure, dynamically allocated 59 */ 60 struct fq_flow { 61 struct sk_buff *head; /* list of skbs for this flow : first skb */ 62 union { 63 struct sk_buff *tail; /* last skb in the list */ 64 unsigned long age; /* jiffies when flow was emptied, for gc */ 65 }; 66 struct rb_node fq_node; /* anchor in fq_root[] trees */ 67 struct sock *sk; 68 int qlen; /* number of packets in flow queue */ 69 int credit; 70 u32 socket_hash; /* sk_hash */ 71 struct fq_flow *next; /* next pointer in RR lists, or &detached */ 72 73 struct rb_node rate_node; /* anchor in q->delayed tree */ 74 u64 time_next_packet; 75 }; 76 77 struct fq_flow_head { 78 struct fq_flow *first; 79 struct fq_flow *last; 80 }; 81 82 struct fq_sched_data { 83 struct fq_flow_head new_flows; 84 85 struct fq_flow_head old_flows; 86 87 struct rb_root delayed; /* for rate limited flows */ 88 u64 time_next_delayed_flow; 89 unsigned long unthrottle_latency_ns; 90 91 struct fq_flow internal; /* for non classified or high prio packets */ 92 u32 quantum; 93 u32 initial_quantum; 94 u32 flow_refill_delay; 95 u32 flow_max_rate; /* optional max rate per flow */ 96 u32 flow_plimit; /* max packets per flow */ 97 u32 orphan_mask; /* mask for orphaned skb */ 98 u32 low_rate_threshold; 99 struct rb_root *fq_root; 100 u8 rate_enable; 101 u8 fq_trees_log; 102 103 u32 flows; 104 u32 inactive_flows; 105 u32 throttled_flows; 106 107 u64 stat_gc_flows; 108 u64 stat_internal_packets; 109 u64 stat_tcp_retrans; 110 u64 stat_throttled; 111 u64 stat_flows_plimit; 112 u64 stat_pkts_too_long; 113 u64 stat_allocation_errors; 114 struct qdisc_watchdog watchdog; 115 }; 116 117 /* special value to mark a detached flow (not on old/new list) */ 118 static struct fq_flow detached, throttled; 119 120 static void fq_flow_set_detached(struct fq_flow *f) 121 { 122 f->next = &detached; 123 f->age = jiffies; 124 } 125 126 static bool fq_flow_is_detached(const struct fq_flow *f) 127 { 128 return f->next == &detached; 129 } 130 131 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 132 { 133 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; 134 135 while (*p) { 136 struct fq_flow *aux; 137 138 parent = *p; 139 aux = rb_entry(parent, struct fq_flow, rate_node); 140 if (f->time_next_packet >= aux->time_next_packet) 141 p = &parent->rb_right; 142 else 143 p = &parent->rb_left; 144 } 145 rb_link_node(&f->rate_node, parent, p); 146 rb_insert_color(&f->rate_node, &q->delayed); 147 q->throttled_flows++; 148 q->stat_throttled++; 149 150 f->next = &throttled; 151 if (q->time_next_delayed_flow > f->time_next_packet) 152 q->time_next_delayed_flow = f->time_next_packet; 153 } 154 155 156 static struct kmem_cache *fq_flow_cachep __read_mostly; 157 158 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) 159 { 160 if (head->first) 161 head->last->next = flow; 162 else 163 head->first = flow; 164 head->last = flow; 165 flow->next = NULL; 166 } 167 168 /* limit number of collected flows per round */ 169 #define FQ_GC_MAX 8 170 #define FQ_GC_AGE (3*HZ) 171 172 static bool fq_gc_candidate(const struct fq_flow *f) 173 { 174 return fq_flow_is_detached(f) && 175 time_after(jiffies, f->age + FQ_GC_AGE); 176 } 177 178 static void fq_gc(struct fq_sched_data *q, 179 struct rb_root *root, 180 struct sock *sk) 181 { 182 struct fq_flow *f, *tofree[FQ_GC_MAX]; 183 struct rb_node **p, *parent; 184 int fcnt = 0; 185 186 p = &root->rb_node; 187 parent = NULL; 188 while (*p) { 189 parent = *p; 190 191 f = rb_entry(parent, struct fq_flow, fq_node); 192 if (f->sk == sk) 193 break; 194 195 if (fq_gc_candidate(f)) { 196 tofree[fcnt++] = f; 197 if (fcnt == FQ_GC_MAX) 198 break; 199 } 200 201 if (f->sk > sk) 202 p = &parent->rb_right; 203 else 204 p = &parent->rb_left; 205 } 206 207 q->flows -= fcnt; 208 q->inactive_flows -= fcnt; 209 q->stat_gc_flows += fcnt; 210 while (fcnt) { 211 struct fq_flow *f = tofree[--fcnt]; 212 213 rb_erase(&f->fq_node, root); 214 kmem_cache_free(fq_flow_cachep, f); 215 } 216 } 217 218 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) 219 { 220 struct rb_node **p, *parent; 221 struct sock *sk = skb->sk; 222 struct rb_root *root; 223 struct fq_flow *f; 224 225 /* warning: no starvation prevention... */ 226 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL)) 227 return &q->internal; 228 229 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket 230 * or a listener (SYNCOOKIE mode) 231 * 1) request sockets are not full blown, 232 * they do not contain sk_pacing_rate 233 * 2) They are not part of a 'flow' yet 234 * 3) We do not want to rate limit them (eg SYNFLOOD attack), 235 * especially if the listener set SO_MAX_PACING_RATE 236 * 4) We pretend they are orphaned 237 */ 238 if (!sk || sk_listener(sk)) { 239 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; 240 241 /* By forcing low order bit to 1, we make sure to not 242 * collide with a local flow (socket pointers are word aligned) 243 */ 244 sk = (struct sock *)((hash << 1) | 1UL); 245 skb_orphan(skb); 246 } 247 248 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; 249 250 if (q->flows >= (2U << q->fq_trees_log) && 251 q->inactive_flows > q->flows/2) 252 fq_gc(q, root, sk); 253 254 p = &root->rb_node; 255 parent = NULL; 256 while (*p) { 257 parent = *p; 258 259 f = rb_entry(parent, struct fq_flow, fq_node); 260 if (f->sk == sk) { 261 /* socket might have been reallocated, so check 262 * if its sk_hash is the same. 263 * It not, we need to refill credit with 264 * initial quantum 265 */ 266 if (unlikely(skb->sk && 267 f->socket_hash != sk->sk_hash)) { 268 f->credit = q->initial_quantum; 269 f->socket_hash = sk->sk_hash; 270 f->time_next_packet = 0ULL; 271 } 272 return f; 273 } 274 if (f->sk > sk) 275 p = &parent->rb_right; 276 else 277 p = &parent->rb_left; 278 } 279 280 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN); 281 if (unlikely(!f)) { 282 q->stat_allocation_errors++; 283 return &q->internal; 284 } 285 fq_flow_set_detached(f); 286 f->sk = sk; 287 if (skb->sk) 288 f->socket_hash = sk->sk_hash; 289 f->credit = q->initial_quantum; 290 291 rb_link_node(&f->fq_node, parent, p); 292 rb_insert_color(&f->fq_node, root); 293 294 q->flows++; 295 q->inactive_flows++; 296 return f; 297 } 298 299 300 /* remove one skb from head of flow queue */ 301 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) 302 { 303 struct sk_buff *skb = flow->head; 304 305 if (skb) { 306 flow->head = skb->next; 307 skb->next = NULL; 308 flow->qlen--; 309 qdisc_qstats_backlog_dec(sch, skb); 310 sch->q.qlen--; 311 } 312 return skb; 313 } 314 315 /* We might add in the future detection of retransmits 316 * For the time being, just return false 317 */ 318 static bool skb_is_retransmit(struct sk_buff *skb) 319 { 320 return false; 321 } 322 323 /* add skb to flow queue 324 * flow queue is a linked list, kind of FIFO, except for TCP retransmits 325 * We special case tcp retransmits to be transmitted before other packets. 326 * We rely on fact that TCP retransmits are unlikely, so we do not waste 327 * a separate queue or a pointer. 328 * head-> [retrans pkt 1] 329 * [retrans pkt 2] 330 * [ normal pkt 1] 331 * [ normal pkt 2] 332 * [ normal pkt 3] 333 * tail-> [ normal pkt 4] 334 */ 335 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) 336 { 337 struct sk_buff *prev, *head = flow->head; 338 339 skb->next = NULL; 340 if (!head) { 341 flow->head = skb; 342 flow->tail = skb; 343 return; 344 } 345 if (likely(!skb_is_retransmit(skb))) { 346 flow->tail->next = skb; 347 flow->tail = skb; 348 return; 349 } 350 351 /* This skb is a tcp retransmit, 352 * find the last retrans packet in the queue 353 */ 354 prev = NULL; 355 while (skb_is_retransmit(head)) { 356 prev = head; 357 head = head->next; 358 if (!head) 359 break; 360 } 361 if (!prev) { /* no rtx packet in queue, become the new head */ 362 skb->next = flow->head; 363 flow->head = skb; 364 } else { 365 if (prev == flow->tail) 366 flow->tail = skb; 367 else 368 skb->next = prev->next; 369 prev->next = skb; 370 } 371 } 372 373 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 374 struct sk_buff **to_free) 375 { 376 struct fq_sched_data *q = qdisc_priv(sch); 377 struct fq_flow *f; 378 379 if (unlikely(sch->q.qlen >= sch->limit)) 380 return qdisc_drop(skb, sch, to_free); 381 382 f = fq_classify(skb, q); 383 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { 384 q->stat_flows_plimit++; 385 return qdisc_drop(skb, sch, to_free); 386 } 387 388 f->qlen++; 389 if (skb_is_retransmit(skb)) 390 q->stat_tcp_retrans++; 391 qdisc_qstats_backlog_inc(sch, skb); 392 if (fq_flow_is_detached(f)) { 393 fq_flow_add_tail(&q->new_flows, f); 394 if (time_after(jiffies, f->age + q->flow_refill_delay)) 395 f->credit = max_t(u32, f->credit, q->quantum); 396 q->inactive_flows--; 397 } 398 399 /* Note: this overwrites f->age */ 400 flow_queue_add(f, skb); 401 402 if (unlikely(f == &q->internal)) { 403 q->stat_internal_packets++; 404 } 405 sch->q.qlen++; 406 407 return NET_XMIT_SUCCESS; 408 } 409 410 static void fq_check_throttled(struct fq_sched_data *q, u64 now) 411 { 412 unsigned long sample; 413 struct rb_node *p; 414 415 if (q->time_next_delayed_flow > now) 416 return; 417 418 /* Update unthrottle latency EWMA. 419 * This is cheap and can help diagnosing timer/latency problems. 420 */ 421 sample = (unsigned long)(now - q->time_next_delayed_flow); 422 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; 423 q->unthrottle_latency_ns += sample >> 3; 424 425 q->time_next_delayed_flow = ~0ULL; 426 while ((p = rb_first(&q->delayed)) != NULL) { 427 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node); 428 429 if (f->time_next_packet > now) { 430 q->time_next_delayed_flow = f->time_next_packet; 431 break; 432 } 433 rb_erase(p, &q->delayed); 434 q->throttled_flows--; 435 fq_flow_add_tail(&q->old_flows, f); 436 } 437 } 438 439 static struct sk_buff *fq_dequeue(struct Qdisc *sch) 440 { 441 struct fq_sched_data *q = qdisc_priv(sch); 442 u64 now = ktime_get_ns(); 443 struct fq_flow_head *head; 444 struct sk_buff *skb; 445 struct fq_flow *f; 446 u32 rate, plen; 447 448 skb = fq_dequeue_head(sch, &q->internal); 449 if (skb) 450 goto out; 451 fq_check_throttled(q, now); 452 begin: 453 head = &q->new_flows; 454 if (!head->first) { 455 head = &q->old_flows; 456 if (!head->first) { 457 if (q->time_next_delayed_flow != ~0ULL) 458 qdisc_watchdog_schedule_ns(&q->watchdog, 459 q->time_next_delayed_flow); 460 return NULL; 461 } 462 } 463 f = head->first; 464 465 if (f->credit <= 0) { 466 f->credit += q->quantum; 467 head->first = f->next; 468 fq_flow_add_tail(&q->old_flows, f); 469 goto begin; 470 } 471 472 skb = f->head; 473 if (unlikely(skb && now < f->time_next_packet && 474 !skb_is_tcp_pure_ack(skb))) { 475 head->first = f->next; 476 fq_flow_set_throttled(q, f); 477 goto begin; 478 } 479 480 skb = fq_dequeue_head(sch, f); 481 if (!skb) { 482 head->first = f->next; 483 /* force a pass through old_flows to prevent starvation */ 484 if ((head == &q->new_flows) && q->old_flows.first) { 485 fq_flow_add_tail(&q->old_flows, f); 486 } else { 487 fq_flow_set_detached(f); 488 q->inactive_flows++; 489 } 490 goto begin; 491 } 492 prefetch(&skb->end); 493 f->credit -= qdisc_pkt_len(skb); 494 495 if (!q->rate_enable) 496 goto out; 497 498 /* Do not pace locally generated ack packets */ 499 if (skb_is_tcp_pure_ack(skb)) 500 goto out; 501 502 rate = q->flow_max_rate; 503 if (skb->sk) 504 rate = min(skb->sk->sk_pacing_rate, rate); 505 506 if (rate <= q->low_rate_threshold) { 507 f->credit = 0; 508 plen = qdisc_pkt_len(skb); 509 } else { 510 plen = max(qdisc_pkt_len(skb), q->quantum); 511 if (f->credit > 0) 512 goto out; 513 } 514 if (rate != ~0U) { 515 u64 len = (u64)plen * NSEC_PER_SEC; 516 517 if (likely(rate)) 518 do_div(len, rate); 519 /* Since socket rate can change later, 520 * clamp the delay to 1 second. 521 * Really, providers of too big packets should be fixed ! 522 */ 523 if (unlikely(len > NSEC_PER_SEC)) { 524 len = NSEC_PER_SEC; 525 q->stat_pkts_too_long++; 526 } 527 /* Account for schedule/timers drifts. 528 * f->time_next_packet was set when prior packet was sent, 529 * and current time (@now) can be too late by tens of us. 530 */ 531 if (f->time_next_packet) 532 len -= min(len/2, now - f->time_next_packet); 533 f->time_next_packet = now + len; 534 } 535 out: 536 qdisc_bstats_update(sch, skb); 537 return skb; 538 } 539 540 static void fq_flow_purge(struct fq_flow *flow) 541 { 542 rtnl_kfree_skbs(flow->head, flow->tail); 543 flow->head = NULL; 544 flow->qlen = 0; 545 } 546 547 static void fq_reset(struct Qdisc *sch) 548 { 549 struct fq_sched_data *q = qdisc_priv(sch); 550 struct rb_root *root; 551 struct rb_node *p; 552 struct fq_flow *f; 553 unsigned int idx; 554 555 sch->q.qlen = 0; 556 sch->qstats.backlog = 0; 557 558 fq_flow_purge(&q->internal); 559 560 if (!q->fq_root) 561 return; 562 563 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { 564 root = &q->fq_root[idx]; 565 while ((p = rb_first(root)) != NULL) { 566 f = rb_entry(p, struct fq_flow, fq_node); 567 rb_erase(p, root); 568 569 fq_flow_purge(f); 570 571 kmem_cache_free(fq_flow_cachep, f); 572 } 573 } 574 q->new_flows.first = NULL; 575 q->old_flows.first = NULL; 576 q->delayed = RB_ROOT; 577 q->flows = 0; 578 q->inactive_flows = 0; 579 q->throttled_flows = 0; 580 } 581 582 static void fq_rehash(struct fq_sched_data *q, 583 struct rb_root *old_array, u32 old_log, 584 struct rb_root *new_array, u32 new_log) 585 { 586 struct rb_node *op, **np, *parent; 587 struct rb_root *oroot, *nroot; 588 struct fq_flow *of, *nf; 589 int fcnt = 0; 590 u32 idx; 591 592 for (idx = 0; idx < (1U << old_log); idx++) { 593 oroot = &old_array[idx]; 594 while ((op = rb_first(oroot)) != NULL) { 595 rb_erase(op, oroot); 596 of = rb_entry(op, struct fq_flow, fq_node); 597 if (fq_gc_candidate(of)) { 598 fcnt++; 599 kmem_cache_free(fq_flow_cachep, of); 600 continue; 601 } 602 nroot = &new_array[hash_ptr(of->sk, new_log)]; 603 604 np = &nroot->rb_node; 605 parent = NULL; 606 while (*np) { 607 parent = *np; 608 609 nf = rb_entry(parent, struct fq_flow, fq_node); 610 BUG_ON(nf->sk == of->sk); 611 612 if (nf->sk > of->sk) 613 np = &parent->rb_right; 614 else 615 np = &parent->rb_left; 616 } 617 618 rb_link_node(&of->fq_node, parent, np); 619 rb_insert_color(&of->fq_node, nroot); 620 } 621 } 622 q->flows -= fcnt; 623 q->inactive_flows -= fcnt; 624 q->stat_gc_flows += fcnt; 625 } 626 627 static void fq_free(void *addr) 628 { 629 kvfree(addr); 630 } 631 632 static int fq_resize(struct Qdisc *sch, u32 log) 633 { 634 struct fq_sched_data *q = qdisc_priv(sch); 635 struct rb_root *array; 636 void *old_fq_root; 637 u32 idx; 638 639 if (q->fq_root && log == q->fq_trees_log) 640 return 0; 641 642 /* If XPS was setup, we can allocate memory on right NUMA node */ 643 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_REPEAT, 644 netdev_queue_numa_node_read(sch->dev_queue)); 645 if (!array) 646 return -ENOMEM; 647 648 for (idx = 0; idx < (1U << log); idx++) 649 array[idx] = RB_ROOT; 650 651 sch_tree_lock(sch); 652 653 old_fq_root = q->fq_root; 654 if (old_fq_root) 655 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); 656 657 q->fq_root = array; 658 q->fq_trees_log = log; 659 660 sch_tree_unlock(sch); 661 662 fq_free(old_fq_root); 663 664 return 0; 665 } 666 667 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { 668 [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, 669 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, 670 [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, 671 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 }, 672 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, 673 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, 674 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, 675 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, 676 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, 677 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 }, 678 }; 679 680 static int fq_change(struct Qdisc *sch, struct nlattr *opt) 681 { 682 struct fq_sched_data *q = qdisc_priv(sch); 683 struct nlattr *tb[TCA_FQ_MAX + 1]; 684 int err, drop_count = 0; 685 unsigned drop_len = 0; 686 u32 fq_log; 687 688 if (!opt) 689 return -EINVAL; 690 691 err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy, NULL); 692 if (err < 0) 693 return err; 694 695 sch_tree_lock(sch); 696 697 fq_log = q->fq_trees_log; 698 699 if (tb[TCA_FQ_BUCKETS_LOG]) { 700 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); 701 702 if (nval >= 1 && nval <= ilog2(256*1024)) 703 fq_log = nval; 704 else 705 err = -EINVAL; 706 } 707 if (tb[TCA_FQ_PLIMIT]) 708 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); 709 710 if (tb[TCA_FQ_FLOW_PLIMIT]) 711 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); 712 713 if (tb[TCA_FQ_QUANTUM]) { 714 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 715 716 if (quantum > 0) 717 q->quantum = quantum; 718 else 719 err = -EINVAL; 720 } 721 722 if (tb[TCA_FQ_INITIAL_QUANTUM]) 723 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 724 725 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) 726 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", 727 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); 728 729 if (tb[TCA_FQ_FLOW_MAX_RATE]) 730 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); 731 732 if (tb[TCA_FQ_LOW_RATE_THRESHOLD]) 733 q->low_rate_threshold = 734 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]); 735 736 if (tb[TCA_FQ_RATE_ENABLE]) { 737 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); 738 739 if (enable <= 1) 740 q->rate_enable = enable; 741 else 742 err = -EINVAL; 743 } 744 745 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { 746 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; 747 748 q->flow_refill_delay = usecs_to_jiffies(usecs_delay); 749 } 750 751 if (tb[TCA_FQ_ORPHAN_MASK]) 752 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); 753 754 if (!err) { 755 sch_tree_unlock(sch); 756 err = fq_resize(sch, fq_log); 757 sch_tree_lock(sch); 758 } 759 while (sch->q.qlen > sch->limit) { 760 struct sk_buff *skb = fq_dequeue(sch); 761 762 if (!skb) 763 break; 764 drop_len += qdisc_pkt_len(skb); 765 rtnl_kfree_skbs(skb, skb); 766 drop_count++; 767 } 768 qdisc_tree_reduce_backlog(sch, drop_count, drop_len); 769 770 sch_tree_unlock(sch); 771 return err; 772 } 773 774 static void fq_destroy(struct Qdisc *sch) 775 { 776 struct fq_sched_data *q = qdisc_priv(sch); 777 778 fq_reset(sch); 779 fq_free(q->fq_root); 780 qdisc_watchdog_cancel(&q->watchdog); 781 } 782 783 static int fq_init(struct Qdisc *sch, struct nlattr *opt) 784 { 785 struct fq_sched_data *q = qdisc_priv(sch); 786 int err; 787 788 sch->limit = 10000; 789 q->flow_plimit = 100; 790 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); 791 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); 792 q->flow_refill_delay = msecs_to_jiffies(40); 793 q->flow_max_rate = ~0U; 794 q->time_next_delayed_flow = ~0ULL; 795 q->rate_enable = 1; 796 q->new_flows.first = NULL; 797 q->old_flows.first = NULL; 798 q->delayed = RB_ROOT; 799 q->fq_root = NULL; 800 q->fq_trees_log = ilog2(1024); 801 q->orphan_mask = 1024 - 1; 802 q->low_rate_threshold = 550000 / 8; 803 qdisc_watchdog_init(&q->watchdog, sch); 804 805 if (opt) 806 err = fq_change(sch, opt); 807 else 808 err = fq_resize(sch, q->fq_trees_log); 809 810 return err; 811 } 812 813 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) 814 { 815 struct fq_sched_data *q = qdisc_priv(sch); 816 struct nlattr *opts; 817 818 opts = nla_nest_start(skb, TCA_OPTIONS); 819 if (opts == NULL) 820 goto nla_put_failure; 821 822 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */ 823 824 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || 825 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || 826 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || 827 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || 828 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || 829 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || 830 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY, 831 jiffies_to_usecs(q->flow_refill_delay)) || 832 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) || 833 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD, 834 q->low_rate_threshold) || 835 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) 836 goto nla_put_failure; 837 838 return nla_nest_end(skb, opts); 839 840 nla_put_failure: 841 return -1; 842 } 843 844 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 845 { 846 struct fq_sched_data *q = qdisc_priv(sch); 847 struct tc_fq_qd_stats st; 848 849 sch_tree_lock(sch); 850 851 st.gc_flows = q->stat_gc_flows; 852 st.highprio_packets = q->stat_internal_packets; 853 st.tcp_retrans = q->stat_tcp_retrans; 854 st.throttled = q->stat_throttled; 855 st.flows_plimit = q->stat_flows_plimit; 856 st.pkts_too_long = q->stat_pkts_too_long; 857 st.allocation_errors = q->stat_allocation_errors; 858 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns(); 859 st.flows = q->flows; 860 st.inactive_flows = q->inactive_flows; 861 st.throttled_flows = q->throttled_flows; 862 st.unthrottle_latency_ns = min_t(unsigned long, 863 q->unthrottle_latency_ns, ~0U); 864 sch_tree_unlock(sch); 865 866 return gnet_stats_copy_app(d, &st, sizeof(st)); 867 } 868 869 static struct Qdisc_ops fq_qdisc_ops __read_mostly = { 870 .id = "fq", 871 .priv_size = sizeof(struct fq_sched_data), 872 873 .enqueue = fq_enqueue, 874 .dequeue = fq_dequeue, 875 .peek = qdisc_peek_dequeued, 876 .init = fq_init, 877 .reset = fq_reset, 878 .destroy = fq_destroy, 879 .change = fq_change, 880 .dump = fq_dump, 881 .dump_stats = fq_dump_stats, 882 .owner = THIS_MODULE, 883 }; 884 885 static int __init fq_module_init(void) 886 { 887 int ret; 888 889 fq_flow_cachep = kmem_cache_create("fq_flow_cache", 890 sizeof(struct fq_flow), 891 0, 0, NULL); 892 if (!fq_flow_cachep) 893 return -ENOMEM; 894 895 ret = register_qdisc(&fq_qdisc_ops); 896 if (ret) 897 kmem_cache_destroy(fq_flow_cachep); 898 return ret; 899 } 900 901 static void __exit fq_module_exit(void) 902 { 903 unregister_qdisc(&fq_qdisc_ops); 904 kmem_cache_destroy(fq_flow_cachep); 905 } 906 907 module_init(fq_module_init) 908 module_exit(fq_module_exit) 909 MODULE_AUTHOR("Eric Dumazet"); 910 MODULE_LICENSE("GPL"); 911