1 /* 2 * Fair Queue CoDel discipline 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/jiffies.h> 16 #include <linux/string.h> 17 #include <linux/in.h> 18 #include <linux/errno.h> 19 #include <linux/init.h> 20 #include <linux/skbuff.h> 21 #include <linux/jhash.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <net/netlink.h> 25 #include <net/pkt_sched.h> 26 #include <net/codel.h> 27 28 /* Fair Queue CoDel. 29 * 30 * Principles : 31 * Packets are classified (internal classifier or external) on flows. 32 * This is a Stochastic model (as we use a hash, several flows 33 * might be hashed on same slot) 34 * Each flow has a CoDel managed queue. 35 * Flows are linked onto two (Round Robin) lists, 36 * so that new flows have priority on old ones. 37 * 38 * For a given flow, packets are not reordered (CoDel uses a FIFO) 39 * head drops only. 40 * ECN capability is on by default. 41 * Low memory footprint (64 bytes per flow) 42 */ 43 44 struct fq_codel_flow { 45 struct sk_buff *head; 46 struct sk_buff *tail; 47 struct list_head flowchain; 48 int deficit; 49 u32 dropped; /* number of drops (or ECN marks) on this flow */ 50 struct codel_vars cvars; 51 }; /* please try to keep this structure <= 64 bytes */ 52 53 struct fq_codel_sched_data { 54 struct tcf_proto __rcu *filter_list; /* optional external classifier */ 55 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ 56 u32 *backlogs; /* backlog table [flows_cnt] */ 57 u32 flows_cnt; /* number of flows */ 58 u32 perturbation; /* hash perturbation */ 59 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 60 struct codel_params cparams; 61 struct codel_stats cstats; 62 u32 drop_overlimit; 63 u32 new_flow_count; 64 65 struct list_head new_flows; /* list of new flows */ 66 struct list_head old_flows; /* list of old flows */ 67 }; 68 69 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, 70 struct sk_buff *skb) 71 { 72 u32 hash = skb_get_hash_perturb(skb, q->perturbation); 73 74 return reciprocal_scale(hash, q->flows_cnt); 75 } 76 77 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, 78 int *qerr) 79 { 80 struct fq_codel_sched_data *q = qdisc_priv(sch); 81 struct tcf_proto *filter; 82 struct tcf_result res; 83 int result; 84 85 if (TC_H_MAJ(skb->priority) == sch->handle && 86 TC_H_MIN(skb->priority) > 0 && 87 TC_H_MIN(skb->priority) <= q->flows_cnt) 88 return TC_H_MIN(skb->priority); 89 90 filter = rcu_dereference_bh(q->filter_list); 91 if (!filter) 92 return fq_codel_hash(q, skb) + 1; 93 94 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 95 result = tc_classify(skb, filter, &res); 96 if (result >= 0) { 97 #ifdef CONFIG_NET_CLS_ACT 98 switch (result) { 99 case TC_ACT_STOLEN: 100 case TC_ACT_QUEUED: 101 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 102 case TC_ACT_SHOT: 103 return 0; 104 } 105 #endif 106 if (TC_H_MIN(res.classid) <= q->flows_cnt) 107 return TC_H_MIN(res.classid); 108 } 109 return 0; 110 } 111 112 /* helper functions : might be changed when/if skb use a standard list_head */ 113 114 /* remove one skb from head of slot queue */ 115 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) 116 { 117 struct sk_buff *skb = flow->head; 118 119 flow->head = skb->next; 120 skb->next = NULL; 121 return skb; 122 } 123 124 /* add skb to flow queue (tail add) */ 125 static inline void flow_queue_add(struct fq_codel_flow *flow, 126 struct sk_buff *skb) 127 { 128 if (flow->head == NULL) 129 flow->head = skb; 130 else 131 flow->tail->next = skb; 132 flow->tail = skb; 133 skb->next = NULL; 134 } 135 136 static unsigned int fq_codel_drop(struct Qdisc *sch) 137 { 138 struct fq_codel_sched_data *q = qdisc_priv(sch); 139 struct sk_buff *skb; 140 unsigned int maxbacklog = 0, idx = 0, i, len; 141 struct fq_codel_flow *flow; 142 143 /* Queue is full! Find the fat flow and drop packet from it. 144 * This might sound expensive, but with 1024 flows, we scan 145 * 4KB of memory, and we dont need to handle a complex tree 146 * in fast path (packet queue/enqueue) with many cache misses. 147 */ 148 for (i = 0; i < q->flows_cnt; i++) { 149 if (q->backlogs[i] > maxbacklog) { 150 maxbacklog = q->backlogs[i]; 151 idx = i; 152 } 153 } 154 flow = &q->flows[idx]; 155 skb = dequeue_head(flow); 156 len = qdisc_pkt_len(skb); 157 q->backlogs[idx] -= len; 158 kfree_skb(skb); 159 sch->q.qlen--; 160 qdisc_qstats_drop(sch); 161 qdisc_qstats_backlog_dec(sch, skb); 162 flow->dropped++; 163 return idx; 164 } 165 166 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) 167 { 168 struct fq_codel_sched_data *q = qdisc_priv(sch); 169 unsigned int idx; 170 struct fq_codel_flow *flow; 171 int uninitialized_var(ret); 172 173 idx = fq_codel_classify(skb, sch, &ret); 174 if (idx == 0) { 175 if (ret & __NET_XMIT_BYPASS) 176 qdisc_qstats_drop(sch); 177 kfree_skb(skb); 178 return ret; 179 } 180 idx--; 181 182 codel_set_enqueue_time(skb); 183 flow = &q->flows[idx]; 184 flow_queue_add(flow, skb); 185 q->backlogs[idx] += qdisc_pkt_len(skb); 186 qdisc_qstats_backlog_inc(sch, skb); 187 188 if (list_empty(&flow->flowchain)) { 189 list_add_tail(&flow->flowchain, &q->new_flows); 190 q->new_flow_count++; 191 flow->deficit = q->quantum; 192 flow->dropped = 0; 193 } 194 if (++sch->q.qlen <= sch->limit) 195 return NET_XMIT_SUCCESS; 196 197 q->drop_overlimit++; 198 /* Return Congestion Notification only if we dropped a packet 199 * from this flow. 200 */ 201 if (fq_codel_drop(sch) == idx) 202 return NET_XMIT_CN; 203 204 /* As we dropped a packet, better let upper stack know this */ 205 qdisc_tree_decrease_qlen(sch, 1); 206 return NET_XMIT_SUCCESS; 207 } 208 209 /* This is the specific function called from codel_dequeue() 210 * to dequeue a packet from queue. Note: backlog is handled in 211 * codel, we dont need to reduce it here. 212 */ 213 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) 214 { 215 struct fq_codel_sched_data *q = qdisc_priv(sch); 216 struct fq_codel_flow *flow; 217 struct sk_buff *skb = NULL; 218 219 flow = container_of(vars, struct fq_codel_flow, cvars); 220 if (flow->head) { 221 skb = dequeue_head(flow); 222 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); 223 sch->q.qlen--; 224 } 225 return skb; 226 } 227 228 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) 229 { 230 struct fq_codel_sched_data *q = qdisc_priv(sch); 231 struct sk_buff *skb; 232 struct fq_codel_flow *flow; 233 struct list_head *head; 234 u32 prev_drop_count, prev_ecn_mark; 235 236 begin: 237 head = &q->new_flows; 238 if (list_empty(head)) { 239 head = &q->old_flows; 240 if (list_empty(head)) 241 return NULL; 242 } 243 flow = list_first_entry(head, struct fq_codel_flow, flowchain); 244 245 if (flow->deficit <= 0) { 246 flow->deficit += q->quantum; 247 list_move_tail(&flow->flowchain, &q->old_flows); 248 goto begin; 249 } 250 251 prev_drop_count = q->cstats.drop_count; 252 prev_ecn_mark = q->cstats.ecn_mark; 253 254 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, 255 dequeue); 256 257 flow->dropped += q->cstats.drop_count - prev_drop_count; 258 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; 259 260 if (!skb) { 261 /* force a pass through old_flows to prevent starvation */ 262 if ((head == &q->new_flows) && !list_empty(&q->old_flows)) 263 list_move_tail(&flow->flowchain, &q->old_flows); 264 else 265 list_del_init(&flow->flowchain); 266 goto begin; 267 } 268 qdisc_bstats_update(sch, skb); 269 flow->deficit -= qdisc_pkt_len(skb); 270 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, 271 * or HTB crashes. Defer it for next round. 272 */ 273 if (q->cstats.drop_count && sch->q.qlen) { 274 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); 275 q->cstats.drop_count = 0; 276 } 277 return skb; 278 } 279 280 static void fq_codel_reset(struct Qdisc *sch) 281 { 282 struct sk_buff *skb; 283 284 while ((skb = fq_codel_dequeue(sch)) != NULL) 285 kfree_skb(skb); 286 } 287 288 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { 289 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 }, 290 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 }, 291 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 }, 292 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 }, 293 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 }, 294 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, 295 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, 296 }; 297 298 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) 299 { 300 struct fq_codel_sched_data *q = qdisc_priv(sch); 301 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; 302 int err; 303 304 if (!opt) 305 return -EINVAL; 306 307 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy); 308 if (err < 0) 309 return err; 310 if (tb[TCA_FQ_CODEL_FLOWS]) { 311 if (q->flows) 312 return -EINVAL; 313 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); 314 if (!q->flows_cnt || 315 q->flows_cnt > 65536) 316 return -EINVAL; 317 } 318 sch_tree_lock(sch); 319 320 if (tb[TCA_FQ_CODEL_TARGET]) { 321 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); 322 323 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; 324 } 325 326 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) { 327 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]); 328 329 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; 330 } 331 332 if (tb[TCA_FQ_CODEL_INTERVAL]) { 333 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); 334 335 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; 336 } 337 338 if (tb[TCA_FQ_CODEL_LIMIT]) 339 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); 340 341 if (tb[TCA_FQ_CODEL_ECN]) 342 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); 343 344 if (tb[TCA_FQ_CODEL_QUANTUM]) 345 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); 346 347 while (sch->q.qlen > sch->limit) { 348 struct sk_buff *skb = fq_codel_dequeue(sch); 349 350 kfree_skb(skb); 351 q->cstats.drop_count++; 352 } 353 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); 354 q->cstats.drop_count = 0; 355 356 sch_tree_unlock(sch); 357 return 0; 358 } 359 360 static void *fq_codel_zalloc(size_t sz) 361 { 362 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); 363 364 if (!ptr) 365 ptr = vzalloc(sz); 366 return ptr; 367 } 368 369 static void fq_codel_free(void *addr) 370 { 371 kvfree(addr); 372 } 373 374 static void fq_codel_destroy(struct Qdisc *sch) 375 { 376 struct fq_codel_sched_data *q = qdisc_priv(sch); 377 378 tcf_destroy_chain(&q->filter_list); 379 fq_codel_free(q->backlogs); 380 fq_codel_free(q->flows); 381 } 382 383 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) 384 { 385 struct fq_codel_sched_data *q = qdisc_priv(sch); 386 int i; 387 388 sch->limit = 10*1024; 389 q->flows_cnt = 1024; 390 q->quantum = psched_mtu(qdisc_dev(sch)); 391 q->perturbation = prandom_u32(); 392 INIT_LIST_HEAD(&q->new_flows); 393 INIT_LIST_HEAD(&q->old_flows); 394 codel_params_init(&q->cparams, sch); 395 codel_stats_init(&q->cstats); 396 q->cparams.ecn = true; 397 398 if (opt) { 399 int err = fq_codel_change(sch, opt); 400 if (err) 401 return err; 402 } 403 404 if (!q->flows) { 405 q->flows = fq_codel_zalloc(q->flows_cnt * 406 sizeof(struct fq_codel_flow)); 407 if (!q->flows) 408 return -ENOMEM; 409 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32)); 410 if (!q->backlogs) { 411 fq_codel_free(q->flows); 412 return -ENOMEM; 413 } 414 for (i = 0; i < q->flows_cnt; i++) { 415 struct fq_codel_flow *flow = q->flows + i; 416 417 INIT_LIST_HEAD(&flow->flowchain); 418 codel_vars_init(&flow->cvars); 419 } 420 } 421 if (sch->limit >= 1) 422 sch->flags |= TCQ_F_CAN_BYPASS; 423 else 424 sch->flags &= ~TCQ_F_CAN_BYPASS; 425 return 0; 426 } 427 428 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) 429 { 430 struct fq_codel_sched_data *q = qdisc_priv(sch); 431 struct nlattr *opts; 432 433 opts = nla_nest_start(skb, TCA_OPTIONS); 434 if (opts == NULL) 435 goto nla_put_failure; 436 437 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, 438 codel_time_to_us(q->cparams.target)) || 439 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, 440 sch->limit) || 441 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, 442 codel_time_to_us(q->cparams.interval)) || 443 nla_put_u32(skb, TCA_FQ_CODEL_ECN, 444 q->cparams.ecn) || 445 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, 446 q->quantum) || 447 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, 448 q->flows_cnt)) 449 goto nla_put_failure; 450 451 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD && 452 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, 453 codel_time_to_us(q->cparams.ce_threshold))) 454 goto nla_put_failure; 455 456 return nla_nest_end(skb, opts); 457 458 nla_put_failure: 459 return -1; 460 } 461 462 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 463 { 464 struct fq_codel_sched_data *q = qdisc_priv(sch); 465 struct tc_fq_codel_xstats st = { 466 .type = TCA_FQ_CODEL_XSTATS_QDISC, 467 }; 468 struct list_head *pos; 469 470 st.qdisc_stats.maxpacket = q->cstats.maxpacket; 471 st.qdisc_stats.drop_overlimit = q->drop_overlimit; 472 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; 473 st.qdisc_stats.new_flow_count = q->new_flow_count; 474 st.qdisc_stats.ce_mark = q->cstats.ce_mark; 475 476 list_for_each(pos, &q->new_flows) 477 st.qdisc_stats.new_flows_len++; 478 479 list_for_each(pos, &q->old_flows) 480 st.qdisc_stats.old_flows_len++; 481 482 return gnet_stats_copy_app(d, &st, sizeof(st)); 483 } 484 485 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg) 486 { 487 return NULL; 488 } 489 490 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid) 491 { 492 return 0; 493 } 494 495 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent, 496 u32 classid) 497 { 498 /* we cannot bypass queue discipline anymore */ 499 sch->flags &= ~TCQ_F_CAN_BYPASS; 500 return 0; 501 } 502 503 static void fq_codel_put(struct Qdisc *q, unsigned long cl) 504 { 505 } 506 507 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch, 508 unsigned long cl) 509 { 510 struct fq_codel_sched_data *q = qdisc_priv(sch); 511 512 if (cl) 513 return NULL; 514 return &q->filter_list; 515 } 516 517 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, 518 struct sk_buff *skb, struct tcmsg *tcm) 519 { 520 tcm->tcm_handle |= TC_H_MIN(cl); 521 return 0; 522 } 523 524 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, 525 struct gnet_dump *d) 526 { 527 struct fq_codel_sched_data *q = qdisc_priv(sch); 528 u32 idx = cl - 1; 529 struct gnet_stats_queue qs = { 0 }; 530 struct tc_fq_codel_xstats xstats; 531 532 if (idx < q->flows_cnt) { 533 const struct fq_codel_flow *flow = &q->flows[idx]; 534 const struct sk_buff *skb = flow->head; 535 536 memset(&xstats, 0, sizeof(xstats)); 537 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; 538 xstats.class_stats.deficit = flow->deficit; 539 xstats.class_stats.ldelay = 540 codel_time_to_us(flow->cvars.ldelay); 541 xstats.class_stats.count = flow->cvars.count; 542 xstats.class_stats.lastcount = flow->cvars.lastcount; 543 xstats.class_stats.dropping = flow->cvars.dropping; 544 if (flow->cvars.dropping) { 545 codel_tdiff_t delta = flow->cvars.drop_next - 546 codel_get_time(); 547 548 xstats.class_stats.drop_next = (delta >= 0) ? 549 codel_time_to_us(delta) : 550 -codel_time_to_us(-delta); 551 } 552 while (skb) { 553 qs.qlen++; 554 skb = skb->next; 555 } 556 qs.backlog = q->backlogs[idx]; 557 qs.drops = flow->dropped; 558 } 559 if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) 560 return -1; 561 if (idx < q->flows_cnt) 562 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 563 return 0; 564 } 565 566 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg) 567 { 568 struct fq_codel_sched_data *q = qdisc_priv(sch); 569 unsigned int i; 570 571 if (arg->stop) 572 return; 573 574 for (i = 0; i < q->flows_cnt; i++) { 575 if (list_empty(&q->flows[i].flowchain) || 576 arg->count < arg->skip) { 577 arg->count++; 578 continue; 579 } 580 if (arg->fn(sch, i + 1, arg) < 0) { 581 arg->stop = 1; 582 break; 583 } 584 arg->count++; 585 } 586 } 587 588 static const struct Qdisc_class_ops fq_codel_class_ops = { 589 .leaf = fq_codel_leaf, 590 .get = fq_codel_get, 591 .put = fq_codel_put, 592 .tcf_chain = fq_codel_find_tcf, 593 .bind_tcf = fq_codel_bind, 594 .unbind_tcf = fq_codel_put, 595 .dump = fq_codel_dump_class, 596 .dump_stats = fq_codel_dump_class_stats, 597 .walk = fq_codel_walk, 598 }; 599 600 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { 601 .cl_ops = &fq_codel_class_ops, 602 .id = "fq_codel", 603 .priv_size = sizeof(struct fq_codel_sched_data), 604 .enqueue = fq_codel_enqueue, 605 .dequeue = fq_codel_dequeue, 606 .peek = qdisc_peek_dequeued, 607 .drop = fq_codel_drop, 608 .init = fq_codel_init, 609 .reset = fq_codel_reset, 610 .destroy = fq_codel_destroy, 611 .change = fq_codel_change, 612 .dump = fq_codel_dump, 613 .dump_stats = fq_codel_dump_stats, 614 .owner = THIS_MODULE, 615 }; 616 617 static int __init fq_codel_module_init(void) 618 { 619 return register_qdisc(&fq_codel_qdisc_ops); 620 } 621 622 static void __exit fq_codel_module_exit(void) 623 { 624 unregister_qdisc(&fq_codel_qdisc_ops); 625 } 626 627 module_init(fq_codel_module_init) 628 module_exit(fq_codel_module_exit) 629 MODULE_AUTHOR("Eric Dumazet"); 630 MODULE_LICENSE("GPL"); 631