1 /* 2 * Fair Queue CoDel discipline 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/jiffies.h> 16 #include <linux/string.h> 17 #include <linux/in.h> 18 #include <linux/errno.h> 19 #include <linux/init.h> 20 #include <linux/skbuff.h> 21 #include <linux/jhash.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <net/netlink.h> 25 #include <net/pkt_sched.h> 26 #include <net/pkt_cls.h> 27 #include <net/codel.h> 28 #include <net/codel_impl.h> 29 #include <net/codel_qdisc.h> 30 31 /* Fair Queue CoDel. 32 * 33 * Principles : 34 * Packets are classified (internal classifier or external) on flows. 35 * This is a Stochastic model (as we use a hash, several flows 36 * might be hashed on same slot) 37 * Each flow has a CoDel managed queue. 38 * Flows are linked onto two (Round Robin) lists, 39 * so that new flows have priority on old ones. 40 * 41 * For a given flow, packets are not reordered (CoDel uses a FIFO) 42 * head drops only. 43 * ECN capability is on by default. 44 * Low memory footprint (64 bytes per flow) 45 */ 46 47 struct fq_codel_flow { 48 struct sk_buff *head; 49 struct sk_buff *tail; 50 struct list_head flowchain; 51 int deficit; 52 u32 dropped; /* number of drops (or ECN marks) on this flow */ 53 struct codel_vars cvars; 54 }; /* please try to keep this structure <= 64 bytes */ 55 56 struct fq_codel_sched_data { 57 struct tcf_proto __rcu *filter_list; /* optional external classifier */ 58 struct tcf_block *block; 59 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ 60 u32 *backlogs; /* backlog table [flows_cnt] */ 61 u32 flows_cnt; /* number of flows */ 62 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 63 u32 drop_batch_size; 64 u32 memory_limit; 65 struct codel_params cparams; 66 struct codel_stats cstats; 67 u32 memory_usage; 68 u32 drop_overmemory; 69 u32 drop_overlimit; 70 u32 new_flow_count; 71 72 struct list_head new_flows; /* list of new flows */ 73 struct list_head old_flows; /* list of old flows */ 74 }; 75 76 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, 77 struct sk_buff *skb) 78 { 79 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); 80 } 81 82 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, 83 int *qerr) 84 { 85 struct fq_codel_sched_data *q = qdisc_priv(sch); 86 struct tcf_proto *filter; 87 struct tcf_result res; 88 int result; 89 90 if (TC_H_MAJ(skb->priority) == sch->handle && 91 TC_H_MIN(skb->priority) > 0 && 92 TC_H_MIN(skb->priority) <= q->flows_cnt) 93 return TC_H_MIN(skb->priority); 94 95 filter = rcu_dereference_bh(q->filter_list); 96 if (!filter) 97 return fq_codel_hash(q, skb) + 1; 98 99 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 100 result = tcf_classify(skb, filter, &res, false); 101 if (result >= 0) { 102 #ifdef CONFIG_NET_CLS_ACT 103 switch (result) { 104 case TC_ACT_STOLEN: 105 case TC_ACT_QUEUED: 106 case TC_ACT_TRAP: 107 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 108 /* fall through */ 109 case TC_ACT_SHOT: 110 return 0; 111 } 112 #endif 113 if (TC_H_MIN(res.classid) <= q->flows_cnt) 114 return TC_H_MIN(res.classid); 115 } 116 return 0; 117 } 118 119 /* helper functions : might be changed when/if skb use a standard list_head */ 120 121 /* remove one skb from head of slot queue */ 122 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) 123 { 124 struct sk_buff *skb = flow->head; 125 126 flow->head = skb->next; 127 skb_mark_not_on_list(skb); 128 return skb; 129 } 130 131 /* add skb to flow queue (tail add) */ 132 static inline void flow_queue_add(struct fq_codel_flow *flow, 133 struct sk_buff *skb) 134 { 135 if (flow->head == NULL) 136 flow->head = skb; 137 else 138 flow->tail->next = skb; 139 flow->tail = skb; 140 skb->next = NULL; 141 } 142 143 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, 144 struct sk_buff **to_free) 145 { 146 struct fq_codel_sched_data *q = qdisc_priv(sch); 147 struct sk_buff *skb; 148 unsigned int maxbacklog = 0, idx = 0, i, len; 149 struct fq_codel_flow *flow; 150 unsigned int threshold; 151 unsigned int mem = 0; 152 153 /* Queue is full! Find the fat flow and drop packet(s) from it. 154 * This might sound expensive, but with 1024 flows, we scan 155 * 4KB of memory, and we dont need to handle a complex tree 156 * in fast path (packet queue/enqueue) with many cache misses. 157 * In stress mode, we'll try to drop 64 packets from the flow, 158 * amortizing this linear lookup to one cache line per drop. 159 */ 160 for (i = 0; i < q->flows_cnt; i++) { 161 if (q->backlogs[i] > maxbacklog) { 162 maxbacklog = q->backlogs[i]; 163 idx = i; 164 } 165 } 166 167 /* Our goal is to drop half of this fat flow backlog */ 168 threshold = maxbacklog >> 1; 169 170 flow = &q->flows[idx]; 171 len = 0; 172 i = 0; 173 do { 174 skb = dequeue_head(flow); 175 len += qdisc_pkt_len(skb); 176 mem += get_codel_cb(skb)->mem_usage; 177 __qdisc_drop(skb, to_free); 178 } while (++i < max_packets && len < threshold); 179 180 flow->dropped += i; 181 q->backlogs[idx] -= len; 182 q->memory_usage -= mem; 183 sch->qstats.drops += i; 184 sch->qstats.backlog -= len; 185 sch->q.qlen -= i; 186 return idx; 187 } 188 189 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, 190 struct sk_buff **to_free) 191 { 192 struct fq_codel_sched_data *q = qdisc_priv(sch); 193 unsigned int idx, prev_backlog, prev_qlen; 194 struct fq_codel_flow *flow; 195 int uninitialized_var(ret); 196 unsigned int pkt_len; 197 bool memory_limited; 198 199 idx = fq_codel_classify(skb, sch, &ret); 200 if (idx == 0) { 201 if (ret & __NET_XMIT_BYPASS) 202 qdisc_qstats_drop(sch); 203 __qdisc_drop(skb, to_free); 204 return ret; 205 } 206 idx--; 207 208 codel_set_enqueue_time(skb); 209 flow = &q->flows[idx]; 210 flow_queue_add(flow, skb); 211 q->backlogs[idx] += qdisc_pkt_len(skb); 212 qdisc_qstats_backlog_inc(sch, skb); 213 214 if (list_empty(&flow->flowchain)) { 215 list_add_tail(&flow->flowchain, &q->new_flows); 216 q->new_flow_count++; 217 flow->deficit = q->quantum; 218 flow->dropped = 0; 219 } 220 get_codel_cb(skb)->mem_usage = skb->truesize; 221 q->memory_usage += get_codel_cb(skb)->mem_usage; 222 memory_limited = q->memory_usage > q->memory_limit; 223 if (++sch->q.qlen <= sch->limit && !memory_limited) 224 return NET_XMIT_SUCCESS; 225 226 prev_backlog = sch->qstats.backlog; 227 prev_qlen = sch->q.qlen; 228 229 /* save this packet length as it might be dropped by fq_codel_drop() */ 230 pkt_len = qdisc_pkt_len(skb); 231 /* fq_codel_drop() is quite expensive, as it performs a linear search 232 * in q->backlogs[] to find a fat flow. 233 * So instead of dropping a single packet, drop half of its backlog 234 * with a 64 packets limit to not add a too big cpu spike here. 235 */ 236 ret = fq_codel_drop(sch, q->drop_batch_size, to_free); 237 238 prev_qlen -= sch->q.qlen; 239 prev_backlog -= sch->qstats.backlog; 240 q->drop_overlimit += prev_qlen; 241 if (memory_limited) 242 q->drop_overmemory += prev_qlen; 243 244 /* As we dropped packet(s), better let upper stack know this. 245 * If we dropped a packet for this flow, return NET_XMIT_CN, 246 * but in this case, our parents wont increase their backlogs. 247 */ 248 if (ret == idx) { 249 qdisc_tree_reduce_backlog(sch, prev_qlen - 1, 250 prev_backlog - pkt_len); 251 return NET_XMIT_CN; 252 } 253 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); 254 return NET_XMIT_SUCCESS; 255 } 256 257 /* This is the specific function called from codel_dequeue() 258 * to dequeue a packet from queue. Note: backlog is handled in 259 * codel, we dont need to reduce it here. 260 */ 261 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) 262 { 263 struct Qdisc *sch = ctx; 264 struct fq_codel_sched_data *q = qdisc_priv(sch); 265 struct fq_codel_flow *flow; 266 struct sk_buff *skb = NULL; 267 268 flow = container_of(vars, struct fq_codel_flow, cvars); 269 if (flow->head) { 270 skb = dequeue_head(flow); 271 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); 272 q->memory_usage -= get_codel_cb(skb)->mem_usage; 273 sch->q.qlen--; 274 sch->qstats.backlog -= qdisc_pkt_len(skb); 275 } 276 return skb; 277 } 278 279 static void drop_func(struct sk_buff *skb, void *ctx) 280 { 281 struct Qdisc *sch = ctx; 282 283 kfree_skb(skb); 284 qdisc_qstats_drop(sch); 285 } 286 287 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) 288 { 289 struct fq_codel_sched_data *q = qdisc_priv(sch); 290 struct sk_buff *skb; 291 struct fq_codel_flow *flow; 292 struct list_head *head; 293 u32 prev_drop_count, prev_ecn_mark; 294 295 begin: 296 head = &q->new_flows; 297 if (list_empty(head)) { 298 head = &q->old_flows; 299 if (list_empty(head)) 300 return NULL; 301 } 302 flow = list_first_entry(head, struct fq_codel_flow, flowchain); 303 304 if (flow->deficit <= 0) { 305 flow->deficit += q->quantum; 306 list_move_tail(&flow->flowchain, &q->old_flows); 307 goto begin; 308 } 309 310 prev_drop_count = q->cstats.drop_count; 311 prev_ecn_mark = q->cstats.ecn_mark; 312 313 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, 314 &flow->cvars, &q->cstats, qdisc_pkt_len, 315 codel_get_enqueue_time, drop_func, dequeue_func); 316 317 flow->dropped += q->cstats.drop_count - prev_drop_count; 318 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; 319 320 if (!skb) { 321 /* force a pass through old_flows to prevent starvation */ 322 if ((head == &q->new_flows) && !list_empty(&q->old_flows)) 323 list_move_tail(&flow->flowchain, &q->old_flows); 324 else 325 list_del_init(&flow->flowchain); 326 goto begin; 327 } 328 qdisc_bstats_update(sch, skb); 329 flow->deficit -= qdisc_pkt_len(skb); 330 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, 331 * or HTB crashes. Defer it for next round. 332 */ 333 if (q->cstats.drop_count && sch->q.qlen) { 334 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, 335 q->cstats.drop_len); 336 q->cstats.drop_count = 0; 337 q->cstats.drop_len = 0; 338 } 339 return skb; 340 } 341 342 static void fq_codel_flow_purge(struct fq_codel_flow *flow) 343 { 344 rtnl_kfree_skbs(flow->head, flow->tail); 345 flow->head = NULL; 346 } 347 348 static void fq_codel_reset(struct Qdisc *sch) 349 { 350 struct fq_codel_sched_data *q = qdisc_priv(sch); 351 int i; 352 353 INIT_LIST_HEAD(&q->new_flows); 354 INIT_LIST_HEAD(&q->old_flows); 355 for (i = 0; i < q->flows_cnt; i++) { 356 struct fq_codel_flow *flow = q->flows + i; 357 358 fq_codel_flow_purge(flow); 359 INIT_LIST_HEAD(&flow->flowchain); 360 codel_vars_init(&flow->cvars); 361 } 362 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); 363 sch->q.qlen = 0; 364 sch->qstats.backlog = 0; 365 q->memory_usage = 0; 366 } 367 368 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { 369 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 }, 370 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 }, 371 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 }, 372 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 }, 373 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 }, 374 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, 375 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, 376 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 }, 377 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 }, 378 }; 379 380 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, 381 struct netlink_ext_ack *extack) 382 { 383 struct fq_codel_sched_data *q = qdisc_priv(sch); 384 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; 385 int err; 386 387 if (!opt) 388 return -EINVAL; 389 390 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy, 391 NULL); 392 if (err < 0) 393 return err; 394 if (tb[TCA_FQ_CODEL_FLOWS]) { 395 if (q->flows) 396 return -EINVAL; 397 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); 398 if (!q->flows_cnt || 399 q->flows_cnt > 65536) 400 return -EINVAL; 401 } 402 sch_tree_lock(sch); 403 404 if (tb[TCA_FQ_CODEL_TARGET]) { 405 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); 406 407 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; 408 } 409 410 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) { 411 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]); 412 413 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; 414 } 415 416 if (tb[TCA_FQ_CODEL_INTERVAL]) { 417 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); 418 419 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; 420 } 421 422 if (tb[TCA_FQ_CODEL_LIMIT]) 423 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); 424 425 if (tb[TCA_FQ_CODEL_ECN]) 426 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); 427 428 if (tb[TCA_FQ_CODEL_QUANTUM]) 429 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); 430 431 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) 432 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); 433 434 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) 435 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); 436 437 while (sch->q.qlen > sch->limit || 438 q->memory_usage > q->memory_limit) { 439 struct sk_buff *skb = fq_codel_dequeue(sch); 440 441 q->cstats.drop_len += qdisc_pkt_len(skb); 442 rtnl_kfree_skbs(skb, skb); 443 q->cstats.drop_count++; 444 } 445 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); 446 q->cstats.drop_count = 0; 447 q->cstats.drop_len = 0; 448 449 sch_tree_unlock(sch); 450 return 0; 451 } 452 453 static void fq_codel_destroy(struct Qdisc *sch) 454 { 455 struct fq_codel_sched_data *q = qdisc_priv(sch); 456 457 tcf_block_put(q->block); 458 kvfree(q->backlogs); 459 kvfree(q->flows); 460 } 461 462 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, 463 struct netlink_ext_ack *extack) 464 { 465 struct fq_codel_sched_data *q = qdisc_priv(sch); 466 int i; 467 int err; 468 469 sch->limit = 10*1024; 470 q->flows_cnt = 1024; 471 q->memory_limit = 32 << 20; /* 32 MBytes */ 472 q->drop_batch_size = 64; 473 q->quantum = psched_mtu(qdisc_dev(sch)); 474 INIT_LIST_HEAD(&q->new_flows); 475 INIT_LIST_HEAD(&q->old_flows); 476 codel_params_init(&q->cparams); 477 codel_stats_init(&q->cstats); 478 q->cparams.ecn = true; 479 q->cparams.mtu = psched_mtu(qdisc_dev(sch)); 480 481 if (opt) { 482 err = fq_codel_change(sch, opt, extack); 483 if (err) 484 goto init_failure; 485 } 486 487 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 488 if (err) 489 goto init_failure; 490 491 if (!q->flows) { 492 q->flows = kvcalloc(q->flows_cnt, 493 sizeof(struct fq_codel_flow), 494 GFP_KERNEL); 495 if (!q->flows) { 496 err = -ENOMEM; 497 goto init_failure; 498 } 499 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); 500 if (!q->backlogs) { 501 err = -ENOMEM; 502 goto alloc_failure; 503 } 504 for (i = 0; i < q->flows_cnt; i++) { 505 struct fq_codel_flow *flow = q->flows + i; 506 507 INIT_LIST_HEAD(&flow->flowchain); 508 codel_vars_init(&flow->cvars); 509 } 510 } 511 if (sch->limit >= 1) 512 sch->flags |= TCQ_F_CAN_BYPASS; 513 else 514 sch->flags &= ~TCQ_F_CAN_BYPASS; 515 return 0; 516 517 alloc_failure: 518 kvfree(q->flows); 519 q->flows = NULL; 520 init_failure: 521 q->flows_cnt = 0; 522 return err; 523 } 524 525 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) 526 { 527 struct fq_codel_sched_data *q = qdisc_priv(sch); 528 struct nlattr *opts; 529 530 opts = nla_nest_start(skb, TCA_OPTIONS); 531 if (opts == NULL) 532 goto nla_put_failure; 533 534 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, 535 codel_time_to_us(q->cparams.target)) || 536 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, 537 sch->limit) || 538 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, 539 codel_time_to_us(q->cparams.interval)) || 540 nla_put_u32(skb, TCA_FQ_CODEL_ECN, 541 q->cparams.ecn) || 542 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, 543 q->quantum) || 544 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, 545 q->drop_batch_size) || 546 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT, 547 q->memory_limit) || 548 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, 549 q->flows_cnt)) 550 goto nla_put_failure; 551 552 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD && 553 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, 554 codel_time_to_us(q->cparams.ce_threshold))) 555 goto nla_put_failure; 556 557 return nla_nest_end(skb, opts); 558 559 nla_put_failure: 560 return -1; 561 } 562 563 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 564 { 565 struct fq_codel_sched_data *q = qdisc_priv(sch); 566 struct tc_fq_codel_xstats st = { 567 .type = TCA_FQ_CODEL_XSTATS_QDISC, 568 }; 569 struct list_head *pos; 570 571 st.qdisc_stats.maxpacket = q->cstats.maxpacket; 572 st.qdisc_stats.drop_overlimit = q->drop_overlimit; 573 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; 574 st.qdisc_stats.new_flow_count = q->new_flow_count; 575 st.qdisc_stats.ce_mark = q->cstats.ce_mark; 576 st.qdisc_stats.memory_usage = q->memory_usage; 577 st.qdisc_stats.drop_overmemory = q->drop_overmemory; 578 579 sch_tree_lock(sch); 580 list_for_each(pos, &q->new_flows) 581 st.qdisc_stats.new_flows_len++; 582 583 list_for_each(pos, &q->old_flows) 584 st.qdisc_stats.old_flows_len++; 585 sch_tree_unlock(sch); 586 587 return gnet_stats_copy_app(d, &st, sizeof(st)); 588 } 589 590 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg) 591 { 592 return NULL; 593 } 594 595 static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid) 596 { 597 return 0; 598 } 599 600 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent, 601 u32 classid) 602 { 603 /* we cannot bypass queue discipline anymore */ 604 sch->flags &= ~TCQ_F_CAN_BYPASS; 605 return 0; 606 } 607 608 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl) 609 { 610 } 611 612 static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl, 613 struct netlink_ext_ack *extack) 614 { 615 struct fq_codel_sched_data *q = qdisc_priv(sch); 616 617 if (cl) 618 return NULL; 619 return q->block; 620 } 621 622 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, 623 struct sk_buff *skb, struct tcmsg *tcm) 624 { 625 tcm->tcm_handle |= TC_H_MIN(cl); 626 return 0; 627 } 628 629 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, 630 struct gnet_dump *d) 631 { 632 struct fq_codel_sched_data *q = qdisc_priv(sch); 633 u32 idx = cl - 1; 634 struct gnet_stats_queue qs = { 0 }; 635 struct tc_fq_codel_xstats xstats; 636 637 if (idx < q->flows_cnt) { 638 const struct fq_codel_flow *flow = &q->flows[idx]; 639 const struct sk_buff *skb; 640 641 memset(&xstats, 0, sizeof(xstats)); 642 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; 643 xstats.class_stats.deficit = flow->deficit; 644 xstats.class_stats.ldelay = 645 codel_time_to_us(flow->cvars.ldelay); 646 xstats.class_stats.count = flow->cvars.count; 647 xstats.class_stats.lastcount = flow->cvars.lastcount; 648 xstats.class_stats.dropping = flow->cvars.dropping; 649 if (flow->cvars.dropping) { 650 codel_tdiff_t delta = flow->cvars.drop_next - 651 codel_get_time(); 652 653 xstats.class_stats.drop_next = (delta >= 0) ? 654 codel_time_to_us(delta) : 655 -codel_time_to_us(-delta); 656 } 657 if (flow->head) { 658 sch_tree_lock(sch); 659 skb = flow->head; 660 while (skb) { 661 qs.qlen++; 662 skb = skb->next; 663 } 664 sch_tree_unlock(sch); 665 } 666 qs.backlog = q->backlogs[idx]; 667 qs.drops = flow->dropped; 668 } 669 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) 670 return -1; 671 if (idx < q->flows_cnt) 672 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 673 return 0; 674 } 675 676 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg) 677 { 678 struct fq_codel_sched_data *q = qdisc_priv(sch); 679 unsigned int i; 680 681 if (arg->stop) 682 return; 683 684 for (i = 0; i < q->flows_cnt; i++) { 685 if (list_empty(&q->flows[i].flowchain) || 686 arg->count < arg->skip) { 687 arg->count++; 688 continue; 689 } 690 if (arg->fn(sch, i + 1, arg) < 0) { 691 arg->stop = 1; 692 break; 693 } 694 arg->count++; 695 } 696 } 697 698 static const struct Qdisc_class_ops fq_codel_class_ops = { 699 .leaf = fq_codel_leaf, 700 .find = fq_codel_find, 701 .tcf_block = fq_codel_tcf_block, 702 .bind_tcf = fq_codel_bind, 703 .unbind_tcf = fq_codel_unbind, 704 .dump = fq_codel_dump_class, 705 .dump_stats = fq_codel_dump_class_stats, 706 .walk = fq_codel_walk, 707 }; 708 709 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { 710 .cl_ops = &fq_codel_class_ops, 711 .id = "fq_codel", 712 .priv_size = sizeof(struct fq_codel_sched_data), 713 .enqueue = fq_codel_enqueue, 714 .dequeue = fq_codel_dequeue, 715 .peek = qdisc_peek_dequeued, 716 .init = fq_codel_init, 717 .reset = fq_codel_reset, 718 .destroy = fq_codel_destroy, 719 .change = fq_codel_change, 720 .dump = fq_codel_dump, 721 .dump_stats = fq_codel_dump_stats, 722 .owner = THIS_MODULE, 723 }; 724 725 static int __init fq_codel_module_init(void) 726 { 727 return register_qdisc(&fq_codel_qdisc_ops); 728 } 729 730 static void __exit fq_codel_module_exit(void) 731 { 732 unregister_qdisc(&fq_codel_qdisc_ops); 733 } 734 735 module_init(fq_codel_module_init) 736 module_exit(fq_codel_module_exit) 737 MODULE_AUTHOR("Eric Dumazet"); 738 MODULE_LICENSE("GPL"); 739