1 /* 2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing) 3 * 4 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * Meant to be mostly used for locally generated traffic : 12 * Fast classification depends on skb->sk being set before reaching us. 13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. 14 * All packets belonging to a socket are considered as a 'flow'. 15 * 16 * Flows are dynamically allocated and stored in a hash table of RB trees 17 * They are also part of one Round Robin 'queues' (new or old flows) 18 * 19 * Burst avoidance (aka pacing) capability : 20 * 21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a 22 * bunch of packets, and this packet scheduler adds delay between 23 * packets to respect rate limitation. 24 * 25 * enqueue() : 26 * - lookup one RB tree (out of 1024 or more) to find the flow. 27 * If non existent flow, create it, add it to the tree. 28 * Add skb to the per flow list of skb (fifo). 29 * - Use a special fifo for high prio packets 30 * 31 * dequeue() : serves flows in Round Robin 32 * Note : When a flow becomes empty, we do not immediately remove it from 33 * rb trees, for performance reasons (its expected to send additional packets, 34 * or SLAB cache will reuse socket for another flow) 35 */ 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/jiffies.h> 41 #include <linux/string.h> 42 #include <linux/in.h> 43 #include <linux/errno.h> 44 #include <linux/init.h> 45 #include <linux/skbuff.h> 46 #include <linux/slab.h> 47 #include <linux/rbtree.h> 48 #include <linux/hash.h> 49 #include <linux/prefetch.h> 50 #include <linux/vmalloc.h> 51 #include <net/netlink.h> 52 #include <net/pkt_sched.h> 53 #include <net/sock.h> 54 #include <net/tcp_states.h> 55 #include <net/tcp.h> 56 57 struct fq_skb_cb { 58 u64 time_to_send; 59 }; 60 61 static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb) 62 { 63 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb)); 64 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data; 65 } 66 67 /* 68 * Per flow structure, dynamically allocated. 69 * If packets have monotically increasing time_to_send, they are placed in O(1) 70 * in linear list (head,tail), otherwise are placed in a rbtree (t_root). 71 */ 72 struct fq_flow { 73 struct rb_root t_root; 74 struct sk_buff *head; /* list of skbs for this flow : first skb */ 75 union { 76 struct sk_buff *tail; /* last skb in the list */ 77 unsigned long age; /* jiffies when flow was emptied, for gc */ 78 }; 79 struct rb_node fq_node; /* anchor in fq_root[] trees */ 80 struct sock *sk; 81 int qlen; /* number of packets in flow queue */ 82 int credit; 83 u32 socket_hash; /* sk_hash */ 84 struct fq_flow *next; /* next pointer in RR lists, or &detached */ 85 86 struct rb_node rate_node; /* anchor in q->delayed tree */ 87 u64 time_next_packet; 88 }; 89 90 struct fq_flow_head { 91 struct fq_flow *first; 92 struct fq_flow *last; 93 }; 94 95 struct fq_sched_data { 96 struct fq_flow_head new_flows; 97 98 struct fq_flow_head old_flows; 99 100 struct rb_root delayed; /* for rate limited flows */ 101 u64 time_next_delayed_flow; 102 unsigned long unthrottle_latency_ns; 103 104 struct fq_flow internal; /* for non classified or high prio packets */ 105 u32 quantum; 106 u32 initial_quantum; 107 u32 flow_refill_delay; 108 u32 flow_plimit; /* max packets per flow */ 109 unsigned long flow_max_rate; /* optional max rate per flow */ 110 u64 ce_threshold; 111 u32 orphan_mask; /* mask for orphaned skb */ 112 u32 low_rate_threshold; 113 struct rb_root *fq_root; 114 u8 rate_enable; 115 u8 fq_trees_log; 116 117 u32 flows; 118 u32 inactive_flows; 119 u32 throttled_flows; 120 121 u64 stat_gc_flows; 122 u64 stat_internal_packets; 123 u64 stat_throttled; 124 u64 stat_ce_mark; 125 u64 stat_flows_plimit; 126 u64 stat_pkts_too_long; 127 u64 stat_allocation_errors; 128 struct qdisc_watchdog watchdog; 129 }; 130 131 /* special value to mark a detached flow (not on old/new list) */ 132 static struct fq_flow detached, throttled; 133 134 static void fq_flow_set_detached(struct fq_flow *f) 135 { 136 f->next = &detached; 137 f->age = jiffies; 138 } 139 140 static bool fq_flow_is_detached(const struct fq_flow *f) 141 { 142 return f->next == &detached; 143 } 144 145 static bool fq_flow_is_throttled(const struct fq_flow *f) 146 { 147 return f->next == &throttled; 148 } 149 150 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) 151 { 152 if (head->first) 153 head->last->next = flow; 154 else 155 head->first = flow; 156 head->last = flow; 157 flow->next = NULL; 158 } 159 160 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) 161 { 162 rb_erase(&f->rate_node, &q->delayed); 163 q->throttled_flows--; 164 fq_flow_add_tail(&q->old_flows, f); 165 } 166 167 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 168 { 169 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; 170 171 while (*p) { 172 struct fq_flow *aux; 173 174 parent = *p; 175 aux = rb_entry(parent, struct fq_flow, rate_node); 176 if (f->time_next_packet >= aux->time_next_packet) 177 p = &parent->rb_right; 178 else 179 p = &parent->rb_left; 180 } 181 rb_link_node(&f->rate_node, parent, p); 182 rb_insert_color(&f->rate_node, &q->delayed); 183 q->throttled_flows++; 184 q->stat_throttled++; 185 186 f->next = &throttled; 187 if (q->time_next_delayed_flow > f->time_next_packet) 188 q->time_next_delayed_flow = f->time_next_packet; 189 } 190 191 192 static struct kmem_cache *fq_flow_cachep __read_mostly; 193 194 195 /* limit number of collected flows per round */ 196 #define FQ_GC_MAX 8 197 #define FQ_GC_AGE (3*HZ) 198 199 static bool fq_gc_candidate(const struct fq_flow *f) 200 { 201 return fq_flow_is_detached(f) && 202 time_after(jiffies, f->age + FQ_GC_AGE); 203 } 204 205 static void fq_gc(struct fq_sched_data *q, 206 struct rb_root *root, 207 struct sock *sk) 208 { 209 struct fq_flow *f, *tofree[FQ_GC_MAX]; 210 struct rb_node **p, *parent; 211 int fcnt = 0; 212 213 p = &root->rb_node; 214 parent = NULL; 215 while (*p) { 216 parent = *p; 217 218 f = rb_entry(parent, struct fq_flow, fq_node); 219 if (f->sk == sk) 220 break; 221 222 if (fq_gc_candidate(f)) { 223 tofree[fcnt++] = f; 224 if (fcnt == FQ_GC_MAX) 225 break; 226 } 227 228 if (f->sk > sk) 229 p = &parent->rb_right; 230 else 231 p = &parent->rb_left; 232 } 233 234 q->flows -= fcnt; 235 q->inactive_flows -= fcnt; 236 q->stat_gc_flows += fcnt; 237 while (fcnt) { 238 struct fq_flow *f = tofree[--fcnt]; 239 240 rb_erase(&f->fq_node, root); 241 kmem_cache_free(fq_flow_cachep, f); 242 } 243 } 244 245 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) 246 { 247 struct rb_node **p, *parent; 248 struct sock *sk = skb->sk; 249 struct rb_root *root; 250 struct fq_flow *f; 251 252 /* warning: no starvation prevention... */ 253 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL)) 254 return &q->internal; 255 256 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket 257 * or a listener (SYNCOOKIE mode) 258 * 1) request sockets are not full blown, 259 * they do not contain sk_pacing_rate 260 * 2) They are not part of a 'flow' yet 261 * 3) We do not want to rate limit them (eg SYNFLOOD attack), 262 * especially if the listener set SO_MAX_PACING_RATE 263 * 4) We pretend they are orphaned 264 */ 265 if (!sk || sk_listener(sk)) { 266 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; 267 268 /* By forcing low order bit to 1, we make sure to not 269 * collide with a local flow (socket pointers are word aligned) 270 */ 271 sk = (struct sock *)((hash << 1) | 1UL); 272 skb_orphan(skb); 273 } else if (sk->sk_state == TCP_CLOSE) { 274 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; 275 /* 276 * Sockets in TCP_CLOSE are non connected. 277 * Typical use case is UDP sockets, they can send packets 278 * with sendto() to many different destinations. 279 * We probably could use a generic bit advertising 280 * non connected sockets, instead of sk_state == TCP_CLOSE, 281 * if we care enough. 282 */ 283 sk = (struct sock *)((hash << 1) | 1UL); 284 } 285 286 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; 287 288 if (q->flows >= (2U << q->fq_trees_log) && 289 q->inactive_flows > q->flows/2) 290 fq_gc(q, root, sk); 291 292 p = &root->rb_node; 293 parent = NULL; 294 while (*p) { 295 parent = *p; 296 297 f = rb_entry(parent, struct fq_flow, fq_node); 298 if (f->sk == sk) { 299 /* socket might have been reallocated, so check 300 * if its sk_hash is the same. 301 * It not, we need to refill credit with 302 * initial quantum 303 */ 304 if (unlikely(skb->sk == sk && 305 f->socket_hash != sk->sk_hash)) { 306 f->credit = q->initial_quantum; 307 f->socket_hash = sk->sk_hash; 308 if (fq_flow_is_throttled(f)) 309 fq_flow_unset_throttled(q, f); 310 f->time_next_packet = 0ULL; 311 } 312 return f; 313 } 314 if (f->sk > sk) 315 p = &parent->rb_right; 316 else 317 p = &parent->rb_left; 318 } 319 320 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN); 321 if (unlikely(!f)) { 322 q->stat_allocation_errors++; 323 return &q->internal; 324 } 325 /* f->t_root is already zeroed after kmem_cache_zalloc() */ 326 327 fq_flow_set_detached(f); 328 f->sk = sk; 329 if (skb->sk == sk) 330 f->socket_hash = sk->sk_hash; 331 f->credit = q->initial_quantum; 332 333 rb_link_node(&f->fq_node, parent, p); 334 rb_insert_color(&f->fq_node, root); 335 336 q->flows++; 337 q->inactive_flows++; 338 return f; 339 } 340 341 static struct sk_buff *fq_peek(struct fq_flow *flow) 342 { 343 struct sk_buff *skb = skb_rb_first(&flow->t_root); 344 struct sk_buff *head = flow->head; 345 346 if (!skb) 347 return head; 348 349 if (!head) 350 return skb; 351 352 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send) 353 return skb; 354 return head; 355 } 356 357 static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow, 358 struct sk_buff *skb) 359 { 360 if (skb == flow->head) { 361 flow->head = skb->next; 362 } else { 363 rb_erase(&skb->rbnode, &flow->t_root); 364 skb->dev = qdisc_dev(sch); 365 } 366 } 367 368 /* remove one skb from head of flow queue */ 369 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) 370 { 371 struct sk_buff *skb = fq_peek(flow); 372 373 if (skb) { 374 fq_erase_head(sch, flow, skb); 375 skb_mark_not_on_list(skb); 376 flow->qlen--; 377 qdisc_qstats_backlog_dec(sch, skb); 378 sch->q.qlen--; 379 } 380 return skb; 381 } 382 383 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) 384 { 385 struct rb_node **p, *parent; 386 struct sk_buff *head, *aux; 387 388 fq_skb_cb(skb)->time_to_send = skb->tstamp ?: ktime_get_ns(); 389 390 head = flow->head; 391 if (!head || 392 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) { 393 if (!head) 394 flow->head = skb; 395 else 396 flow->tail->next = skb; 397 flow->tail = skb; 398 skb->next = NULL; 399 return; 400 } 401 402 p = &flow->t_root.rb_node; 403 parent = NULL; 404 405 while (*p) { 406 parent = *p; 407 aux = rb_to_skb(parent); 408 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send) 409 p = &parent->rb_right; 410 else 411 p = &parent->rb_left; 412 } 413 rb_link_node(&skb->rbnode, parent, p); 414 rb_insert_color(&skb->rbnode, &flow->t_root); 415 } 416 417 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 418 struct sk_buff **to_free) 419 { 420 struct fq_sched_data *q = qdisc_priv(sch); 421 struct fq_flow *f; 422 423 if (unlikely(sch->q.qlen >= sch->limit)) 424 return qdisc_drop(skb, sch, to_free); 425 426 f = fq_classify(skb, q); 427 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { 428 q->stat_flows_plimit++; 429 return qdisc_drop(skb, sch, to_free); 430 } 431 432 f->qlen++; 433 qdisc_qstats_backlog_inc(sch, skb); 434 if (fq_flow_is_detached(f)) { 435 struct sock *sk = skb->sk; 436 437 fq_flow_add_tail(&q->new_flows, f); 438 if (time_after(jiffies, f->age + q->flow_refill_delay)) 439 f->credit = max_t(u32, f->credit, q->quantum); 440 if (sk && q->rate_enable) { 441 if (unlikely(smp_load_acquire(&sk->sk_pacing_status) != 442 SK_PACING_FQ)) 443 smp_store_release(&sk->sk_pacing_status, 444 SK_PACING_FQ); 445 } 446 q->inactive_flows--; 447 } 448 449 /* Note: this overwrites f->age */ 450 flow_queue_add(f, skb); 451 452 if (unlikely(f == &q->internal)) { 453 q->stat_internal_packets++; 454 } 455 sch->q.qlen++; 456 457 return NET_XMIT_SUCCESS; 458 } 459 460 static void fq_check_throttled(struct fq_sched_data *q, u64 now) 461 { 462 unsigned long sample; 463 struct rb_node *p; 464 465 if (q->time_next_delayed_flow > now) 466 return; 467 468 /* Update unthrottle latency EWMA. 469 * This is cheap and can help diagnosing timer/latency problems. 470 */ 471 sample = (unsigned long)(now - q->time_next_delayed_flow); 472 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; 473 q->unthrottle_latency_ns += sample >> 3; 474 475 q->time_next_delayed_flow = ~0ULL; 476 while ((p = rb_first(&q->delayed)) != NULL) { 477 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node); 478 479 if (f->time_next_packet > now) { 480 q->time_next_delayed_flow = f->time_next_packet; 481 break; 482 } 483 fq_flow_unset_throttled(q, f); 484 } 485 } 486 487 static struct sk_buff *fq_dequeue(struct Qdisc *sch) 488 { 489 struct fq_sched_data *q = qdisc_priv(sch); 490 struct fq_flow_head *head; 491 struct sk_buff *skb; 492 struct fq_flow *f; 493 unsigned long rate; 494 u32 plen; 495 u64 now; 496 497 if (!sch->q.qlen) 498 return NULL; 499 500 skb = fq_dequeue_head(sch, &q->internal); 501 if (skb) 502 goto out; 503 504 now = ktime_get_ns(); 505 fq_check_throttled(q, now); 506 begin: 507 head = &q->new_flows; 508 if (!head->first) { 509 head = &q->old_flows; 510 if (!head->first) { 511 if (q->time_next_delayed_flow != ~0ULL) 512 qdisc_watchdog_schedule_ns(&q->watchdog, 513 q->time_next_delayed_flow); 514 return NULL; 515 } 516 } 517 f = head->first; 518 519 if (f->credit <= 0) { 520 f->credit += q->quantum; 521 head->first = f->next; 522 fq_flow_add_tail(&q->old_flows, f); 523 goto begin; 524 } 525 526 skb = fq_peek(f); 527 if (skb) { 528 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send, 529 f->time_next_packet); 530 531 if (now < time_next_packet) { 532 head->first = f->next; 533 f->time_next_packet = time_next_packet; 534 fq_flow_set_throttled(q, f); 535 goto begin; 536 } 537 if (time_next_packet && 538 (s64)(now - time_next_packet - q->ce_threshold) > 0) { 539 INET_ECN_set_ce(skb); 540 q->stat_ce_mark++; 541 } 542 } 543 544 skb = fq_dequeue_head(sch, f); 545 if (!skb) { 546 head->first = f->next; 547 /* force a pass through old_flows to prevent starvation */ 548 if ((head == &q->new_flows) && q->old_flows.first) { 549 fq_flow_add_tail(&q->old_flows, f); 550 } else { 551 fq_flow_set_detached(f); 552 q->inactive_flows++; 553 } 554 goto begin; 555 } 556 prefetch(&skb->end); 557 plen = qdisc_pkt_len(skb); 558 f->credit -= plen; 559 560 if (!q->rate_enable) 561 goto out; 562 563 rate = q->flow_max_rate; 564 565 /* If EDT time was provided for this skb, we need to 566 * update f->time_next_packet only if this qdisc enforces 567 * a flow max rate. 568 */ 569 if (!skb->tstamp) { 570 if (skb->sk) 571 rate = min(skb->sk->sk_pacing_rate, rate); 572 573 if (rate <= q->low_rate_threshold) { 574 f->credit = 0; 575 } else { 576 plen = max(plen, q->quantum); 577 if (f->credit > 0) 578 goto out; 579 } 580 } 581 if (rate != ~0UL) { 582 u64 len = (u64)plen * NSEC_PER_SEC; 583 584 if (likely(rate)) 585 len = div64_ul(len, rate); 586 /* Since socket rate can change later, 587 * clamp the delay to 1 second. 588 * Really, providers of too big packets should be fixed ! 589 */ 590 if (unlikely(len > NSEC_PER_SEC)) { 591 len = NSEC_PER_SEC; 592 q->stat_pkts_too_long++; 593 } 594 /* Account for schedule/timers drifts. 595 * f->time_next_packet was set when prior packet was sent, 596 * and current time (@now) can be too late by tens of us. 597 */ 598 if (f->time_next_packet) 599 len -= min(len/2, now - f->time_next_packet); 600 f->time_next_packet = now + len; 601 } 602 out: 603 qdisc_bstats_update(sch, skb); 604 return skb; 605 } 606 607 static void fq_flow_purge(struct fq_flow *flow) 608 { 609 struct rb_node *p = rb_first(&flow->t_root); 610 611 while (p) { 612 struct sk_buff *skb = rb_to_skb(p); 613 614 p = rb_next(p); 615 rb_erase(&skb->rbnode, &flow->t_root); 616 rtnl_kfree_skbs(skb, skb); 617 } 618 rtnl_kfree_skbs(flow->head, flow->tail); 619 flow->head = NULL; 620 flow->qlen = 0; 621 } 622 623 static void fq_reset(struct Qdisc *sch) 624 { 625 struct fq_sched_data *q = qdisc_priv(sch); 626 struct rb_root *root; 627 struct rb_node *p; 628 struct fq_flow *f; 629 unsigned int idx; 630 631 sch->q.qlen = 0; 632 sch->qstats.backlog = 0; 633 634 fq_flow_purge(&q->internal); 635 636 if (!q->fq_root) 637 return; 638 639 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { 640 root = &q->fq_root[idx]; 641 while ((p = rb_first(root)) != NULL) { 642 f = rb_entry(p, struct fq_flow, fq_node); 643 rb_erase(p, root); 644 645 fq_flow_purge(f); 646 647 kmem_cache_free(fq_flow_cachep, f); 648 } 649 } 650 q->new_flows.first = NULL; 651 q->old_flows.first = NULL; 652 q->delayed = RB_ROOT; 653 q->flows = 0; 654 q->inactive_flows = 0; 655 q->throttled_flows = 0; 656 } 657 658 static void fq_rehash(struct fq_sched_data *q, 659 struct rb_root *old_array, u32 old_log, 660 struct rb_root *new_array, u32 new_log) 661 { 662 struct rb_node *op, **np, *parent; 663 struct rb_root *oroot, *nroot; 664 struct fq_flow *of, *nf; 665 int fcnt = 0; 666 u32 idx; 667 668 for (idx = 0; idx < (1U << old_log); idx++) { 669 oroot = &old_array[idx]; 670 while ((op = rb_first(oroot)) != NULL) { 671 rb_erase(op, oroot); 672 of = rb_entry(op, struct fq_flow, fq_node); 673 if (fq_gc_candidate(of)) { 674 fcnt++; 675 kmem_cache_free(fq_flow_cachep, of); 676 continue; 677 } 678 nroot = &new_array[hash_ptr(of->sk, new_log)]; 679 680 np = &nroot->rb_node; 681 parent = NULL; 682 while (*np) { 683 parent = *np; 684 685 nf = rb_entry(parent, struct fq_flow, fq_node); 686 BUG_ON(nf->sk == of->sk); 687 688 if (nf->sk > of->sk) 689 np = &parent->rb_right; 690 else 691 np = &parent->rb_left; 692 } 693 694 rb_link_node(&of->fq_node, parent, np); 695 rb_insert_color(&of->fq_node, nroot); 696 } 697 } 698 q->flows -= fcnt; 699 q->inactive_flows -= fcnt; 700 q->stat_gc_flows += fcnt; 701 } 702 703 static void fq_free(void *addr) 704 { 705 kvfree(addr); 706 } 707 708 static int fq_resize(struct Qdisc *sch, u32 log) 709 { 710 struct fq_sched_data *q = qdisc_priv(sch); 711 struct rb_root *array; 712 void *old_fq_root; 713 u32 idx; 714 715 if (q->fq_root && log == q->fq_trees_log) 716 return 0; 717 718 /* If XPS was setup, we can allocate memory on right NUMA node */ 719 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL, 720 netdev_queue_numa_node_read(sch->dev_queue)); 721 if (!array) 722 return -ENOMEM; 723 724 for (idx = 0; idx < (1U << log); idx++) 725 array[idx] = RB_ROOT; 726 727 sch_tree_lock(sch); 728 729 old_fq_root = q->fq_root; 730 if (old_fq_root) 731 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); 732 733 q->fq_root = array; 734 q->fq_trees_log = log; 735 736 sch_tree_unlock(sch); 737 738 fq_free(old_fq_root); 739 740 return 0; 741 } 742 743 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { 744 [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, 745 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, 746 [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, 747 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 }, 748 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, 749 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, 750 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, 751 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, 752 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, 753 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 }, 754 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 }, 755 }; 756 757 static int fq_change(struct Qdisc *sch, struct nlattr *opt, 758 struct netlink_ext_ack *extack) 759 { 760 struct fq_sched_data *q = qdisc_priv(sch); 761 struct nlattr *tb[TCA_FQ_MAX + 1]; 762 int err, drop_count = 0; 763 unsigned drop_len = 0; 764 u32 fq_log; 765 766 if (!opt) 767 return -EINVAL; 768 769 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy, 770 NULL); 771 if (err < 0) 772 return err; 773 774 sch_tree_lock(sch); 775 776 fq_log = q->fq_trees_log; 777 778 if (tb[TCA_FQ_BUCKETS_LOG]) { 779 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); 780 781 if (nval >= 1 && nval <= ilog2(256*1024)) 782 fq_log = nval; 783 else 784 err = -EINVAL; 785 } 786 if (tb[TCA_FQ_PLIMIT]) 787 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); 788 789 if (tb[TCA_FQ_FLOW_PLIMIT]) 790 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); 791 792 if (tb[TCA_FQ_QUANTUM]) { 793 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 794 795 if (quantum > 0) 796 q->quantum = quantum; 797 else 798 err = -EINVAL; 799 } 800 801 if (tb[TCA_FQ_INITIAL_QUANTUM]) 802 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 803 804 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) 805 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", 806 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); 807 808 if (tb[TCA_FQ_FLOW_MAX_RATE]) { 809 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); 810 811 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate; 812 } 813 if (tb[TCA_FQ_LOW_RATE_THRESHOLD]) 814 q->low_rate_threshold = 815 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]); 816 817 if (tb[TCA_FQ_RATE_ENABLE]) { 818 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); 819 820 if (enable <= 1) 821 q->rate_enable = enable; 822 else 823 err = -EINVAL; 824 } 825 826 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { 827 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; 828 829 q->flow_refill_delay = usecs_to_jiffies(usecs_delay); 830 } 831 832 if (tb[TCA_FQ_ORPHAN_MASK]) 833 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); 834 835 if (tb[TCA_FQ_CE_THRESHOLD]) 836 q->ce_threshold = (u64)NSEC_PER_USEC * 837 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]); 838 839 if (!err) { 840 sch_tree_unlock(sch); 841 err = fq_resize(sch, fq_log); 842 sch_tree_lock(sch); 843 } 844 while (sch->q.qlen > sch->limit) { 845 struct sk_buff *skb = fq_dequeue(sch); 846 847 if (!skb) 848 break; 849 drop_len += qdisc_pkt_len(skb); 850 rtnl_kfree_skbs(skb, skb); 851 drop_count++; 852 } 853 qdisc_tree_reduce_backlog(sch, drop_count, drop_len); 854 855 sch_tree_unlock(sch); 856 return err; 857 } 858 859 static void fq_destroy(struct Qdisc *sch) 860 { 861 struct fq_sched_data *q = qdisc_priv(sch); 862 863 fq_reset(sch); 864 fq_free(q->fq_root); 865 qdisc_watchdog_cancel(&q->watchdog); 866 } 867 868 static int fq_init(struct Qdisc *sch, struct nlattr *opt, 869 struct netlink_ext_ack *extack) 870 { 871 struct fq_sched_data *q = qdisc_priv(sch); 872 int err; 873 874 sch->limit = 10000; 875 q->flow_plimit = 100; 876 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); 877 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); 878 q->flow_refill_delay = msecs_to_jiffies(40); 879 q->flow_max_rate = ~0UL; 880 q->time_next_delayed_flow = ~0ULL; 881 q->rate_enable = 1; 882 q->new_flows.first = NULL; 883 q->old_flows.first = NULL; 884 q->delayed = RB_ROOT; 885 q->fq_root = NULL; 886 q->fq_trees_log = ilog2(1024); 887 q->orphan_mask = 1024 - 1; 888 q->low_rate_threshold = 550000 / 8; 889 890 /* Default ce_threshold of 4294 seconds */ 891 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U; 892 893 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC); 894 895 if (opt) 896 err = fq_change(sch, opt, extack); 897 else 898 err = fq_resize(sch, q->fq_trees_log); 899 900 return err; 901 } 902 903 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) 904 { 905 struct fq_sched_data *q = qdisc_priv(sch); 906 u64 ce_threshold = q->ce_threshold; 907 struct nlattr *opts; 908 909 opts = nla_nest_start_noflag(skb, TCA_OPTIONS); 910 if (opts == NULL) 911 goto nla_put_failure; 912 913 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */ 914 915 do_div(ce_threshold, NSEC_PER_USEC); 916 917 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || 918 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || 919 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || 920 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || 921 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || 922 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, 923 min_t(unsigned long, q->flow_max_rate, ~0U)) || 924 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY, 925 jiffies_to_usecs(q->flow_refill_delay)) || 926 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) || 927 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD, 928 q->low_rate_threshold) || 929 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) || 930 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) 931 goto nla_put_failure; 932 933 return nla_nest_end(skb, opts); 934 935 nla_put_failure: 936 return -1; 937 } 938 939 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 940 { 941 struct fq_sched_data *q = qdisc_priv(sch); 942 struct tc_fq_qd_stats st; 943 944 sch_tree_lock(sch); 945 946 st.gc_flows = q->stat_gc_flows; 947 st.highprio_packets = q->stat_internal_packets; 948 st.tcp_retrans = 0; 949 st.throttled = q->stat_throttled; 950 st.flows_plimit = q->stat_flows_plimit; 951 st.pkts_too_long = q->stat_pkts_too_long; 952 st.allocation_errors = q->stat_allocation_errors; 953 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns(); 954 st.flows = q->flows; 955 st.inactive_flows = q->inactive_flows; 956 st.throttled_flows = q->throttled_flows; 957 st.unthrottle_latency_ns = min_t(unsigned long, 958 q->unthrottle_latency_ns, ~0U); 959 st.ce_mark = q->stat_ce_mark; 960 sch_tree_unlock(sch); 961 962 return gnet_stats_copy_app(d, &st, sizeof(st)); 963 } 964 965 static struct Qdisc_ops fq_qdisc_ops __read_mostly = { 966 .id = "fq", 967 .priv_size = sizeof(struct fq_sched_data), 968 969 .enqueue = fq_enqueue, 970 .dequeue = fq_dequeue, 971 .peek = qdisc_peek_dequeued, 972 .init = fq_init, 973 .reset = fq_reset, 974 .destroy = fq_destroy, 975 .change = fq_change, 976 .dump = fq_dump, 977 .dump_stats = fq_dump_stats, 978 .owner = THIS_MODULE, 979 }; 980 981 static int __init fq_module_init(void) 982 { 983 int ret; 984 985 fq_flow_cachep = kmem_cache_create("fq_flow_cache", 986 sizeof(struct fq_flow), 987 0, 0, NULL); 988 if (!fq_flow_cachep) 989 return -ENOMEM; 990 991 ret = register_qdisc(&fq_qdisc_ops); 992 if (ret) 993 kmem_cache_destroy(fq_flow_cachep); 994 return ret; 995 } 996 997 static void __exit fq_module_exit(void) 998 { 999 unregister_qdisc(&fq_qdisc_ops); 1000 kmem_cache_destroy(fq_flow_cachep); 1001 } 1002 1003 module_init(fq_module_init) 1004 module_exit(fq_module_exit) 1005 MODULE_AUTHOR("Eric Dumazet"); 1006 MODULE_LICENSE("GPL"); 1007