xref: /openbmc/linux/net/sched/sch_fq_codel.c (revision 83b975b5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Fair Queue CoDel discipline
4  *
5  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/jiffies.h>
12 #include <linux/string.h>
13 #include <linux/in.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/skbuff.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/codel.h>
23 #include <net/codel_impl.h>
24 #include <net/codel_qdisc.h>
25 
26 /*	Fair Queue CoDel.
27  *
28  * Principles :
29  * Packets are classified (internal classifier or external) on flows.
30  * This is a Stochastic model (as we use a hash, several flows
31  *			       might be hashed on same slot)
32  * Each flow has a CoDel managed queue.
33  * Flows are linked onto two (Round Robin) lists,
34  * so that new flows have priority on old ones.
35  *
36  * For a given flow, packets are not reordered (CoDel uses a FIFO)
37  * head drops only.
38  * ECN capability is on by default.
39  * Low memory footprint (64 bytes per flow)
40  */
41 
42 struct fq_codel_flow {
43 	struct sk_buff	  *head;
44 	struct sk_buff	  *tail;
45 	struct list_head  flowchain;
46 	int		  deficit;
47 	struct codel_vars cvars;
48 }; /* please try to keep this structure <= 64 bytes */
49 
50 struct fq_codel_sched_data {
51 	struct tcf_proto __rcu *filter_list; /* optional external classifier */
52 	struct tcf_block *block;
53 	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
54 	u32		*backlogs;	/* backlog table [flows_cnt] */
55 	u32		flows_cnt;	/* number of flows */
56 	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
57 	u32		drop_batch_size;
58 	u32		memory_limit;
59 	struct codel_params cparams;
60 	struct codel_stats cstats;
61 	u32		memory_usage;
62 	u32		drop_overmemory;
63 	u32		drop_overlimit;
64 	u32		new_flow_count;
65 
66 	struct list_head new_flows;	/* list of new flows */
67 	struct list_head old_flows;	/* list of old flows */
68 };
69 
70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71 				  struct sk_buff *skb)
72 {
73 	return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
74 }
75 
76 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
77 				      int *qerr)
78 {
79 	struct fq_codel_sched_data *q = qdisc_priv(sch);
80 	struct tcf_proto *filter;
81 	struct tcf_result res;
82 	int result;
83 
84 	if (TC_H_MAJ(skb->priority) == sch->handle &&
85 	    TC_H_MIN(skb->priority) > 0 &&
86 	    TC_H_MIN(skb->priority) <= q->flows_cnt)
87 		return TC_H_MIN(skb->priority);
88 
89 	filter = rcu_dereference_bh(q->filter_list);
90 	if (!filter)
91 		return fq_codel_hash(q, skb) + 1;
92 
93 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
94 	result = tcf_classify(skb, NULL, filter, &res, false);
95 	if (result >= 0) {
96 #ifdef CONFIG_NET_CLS_ACT
97 		switch (result) {
98 		case TC_ACT_STOLEN:
99 		case TC_ACT_QUEUED:
100 		case TC_ACT_TRAP:
101 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
102 			fallthrough;
103 		case TC_ACT_SHOT:
104 			return 0;
105 		}
106 #endif
107 		if (TC_H_MIN(res.classid) <= q->flows_cnt)
108 			return TC_H_MIN(res.classid);
109 	}
110 	return 0;
111 }
112 
113 /* helper functions : might be changed when/if skb use a standard list_head */
114 
115 /* remove one skb from head of slot queue */
116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
117 {
118 	struct sk_buff *skb = flow->head;
119 
120 	flow->head = skb->next;
121 	skb_mark_not_on_list(skb);
122 	return skb;
123 }
124 
125 /* add skb to flow queue (tail add) */
126 static inline void flow_queue_add(struct fq_codel_flow *flow,
127 				  struct sk_buff *skb)
128 {
129 	if (flow->head == NULL)
130 		flow->head = skb;
131 	else
132 		flow->tail->next = skb;
133 	flow->tail = skb;
134 	skb->next = NULL;
135 }
136 
137 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
138 				  struct sk_buff **to_free)
139 {
140 	struct fq_codel_sched_data *q = qdisc_priv(sch);
141 	struct sk_buff *skb;
142 	unsigned int maxbacklog = 0, idx = 0, i, len;
143 	struct fq_codel_flow *flow;
144 	unsigned int threshold;
145 	unsigned int mem = 0;
146 
147 	/* Queue is full! Find the fat flow and drop packet(s) from it.
148 	 * This might sound expensive, but with 1024 flows, we scan
149 	 * 4KB of memory, and we dont need to handle a complex tree
150 	 * in fast path (packet queue/enqueue) with many cache misses.
151 	 * In stress mode, we'll try to drop 64 packets from the flow,
152 	 * amortizing this linear lookup to one cache line per drop.
153 	 */
154 	for (i = 0; i < q->flows_cnt; i++) {
155 		if (q->backlogs[i] > maxbacklog) {
156 			maxbacklog = q->backlogs[i];
157 			idx = i;
158 		}
159 	}
160 
161 	/* Our goal is to drop half of this fat flow backlog */
162 	threshold = maxbacklog >> 1;
163 
164 	flow = &q->flows[idx];
165 	len = 0;
166 	i = 0;
167 	do {
168 		skb = dequeue_head(flow);
169 		len += qdisc_pkt_len(skb);
170 		mem += get_codel_cb(skb)->mem_usage;
171 		__qdisc_drop(skb, to_free);
172 	} while (++i < max_packets && len < threshold);
173 
174 	/* Tell codel to increase its signal strength also */
175 	flow->cvars.count += i;
176 	q->backlogs[idx] -= len;
177 	q->memory_usage -= mem;
178 	sch->qstats.drops += i;
179 	sch->qstats.backlog -= len;
180 	sch->q.qlen -= i;
181 	return idx;
182 }
183 
184 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185 			    struct sk_buff **to_free)
186 {
187 	struct fq_codel_sched_data *q = qdisc_priv(sch);
188 	unsigned int idx, prev_backlog, prev_qlen;
189 	struct fq_codel_flow *flow;
190 	int ret;
191 	unsigned int pkt_len;
192 	bool memory_limited;
193 
194 	idx = fq_codel_classify(skb, sch, &ret);
195 	if (idx == 0) {
196 		if (ret & __NET_XMIT_BYPASS)
197 			qdisc_qstats_drop(sch);
198 		__qdisc_drop(skb, to_free);
199 		return ret;
200 	}
201 	idx--;
202 
203 	codel_set_enqueue_time(skb);
204 	flow = &q->flows[idx];
205 	flow_queue_add(flow, skb);
206 	q->backlogs[idx] += qdisc_pkt_len(skb);
207 	qdisc_qstats_backlog_inc(sch, skb);
208 
209 	if (list_empty(&flow->flowchain)) {
210 		list_add_tail(&flow->flowchain, &q->new_flows);
211 		q->new_flow_count++;
212 		flow->deficit = q->quantum;
213 	}
214 	get_codel_cb(skb)->mem_usage = skb->truesize;
215 	q->memory_usage += get_codel_cb(skb)->mem_usage;
216 	memory_limited = q->memory_usage > q->memory_limit;
217 	if (++sch->q.qlen <= sch->limit && !memory_limited)
218 		return NET_XMIT_SUCCESS;
219 
220 	prev_backlog = sch->qstats.backlog;
221 	prev_qlen = sch->q.qlen;
222 
223 	/* save this packet length as it might be dropped by fq_codel_drop() */
224 	pkt_len = qdisc_pkt_len(skb);
225 	/* fq_codel_drop() is quite expensive, as it performs a linear search
226 	 * in q->backlogs[] to find a fat flow.
227 	 * So instead of dropping a single packet, drop half of its backlog
228 	 * with a 64 packets limit to not add a too big cpu spike here.
229 	 */
230 	ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
231 
232 	prev_qlen -= sch->q.qlen;
233 	prev_backlog -= sch->qstats.backlog;
234 	q->drop_overlimit += prev_qlen;
235 	if (memory_limited)
236 		q->drop_overmemory += prev_qlen;
237 
238 	/* As we dropped packet(s), better let upper stack know this.
239 	 * If we dropped a packet for this flow, return NET_XMIT_CN,
240 	 * but in this case, our parents wont increase their backlogs.
241 	 */
242 	if (ret == idx) {
243 		qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
244 					  prev_backlog - pkt_len);
245 		return NET_XMIT_CN;
246 	}
247 	qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
248 	return NET_XMIT_SUCCESS;
249 }
250 
251 /* This is the specific function called from codel_dequeue()
252  * to dequeue a packet from queue. Note: backlog is handled in
253  * codel, we dont need to reduce it here.
254  */
255 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
256 {
257 	struct Qdisc *sch = ctx;
258 	struct fq_codel_sched_data *q = qdisc_priv(sch);
259 	struct fq_codel_flow *flow;
260 	struct sk_buff *skb = NULL;
261 
262 	flow = container_of(vars, struct fq_codel_flow, cvars);
263 	if (flow->head) {
264 		skb = dequeue_head(flow);
265 		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
266 		q->memory_usage -= get_codel_cb(skb)->mem_usage;
267 		sch->q.qlen--;
268 		sch->qstats.backlog -= qdisc_pkt_len(skb);
269 	}
270 	return skb;
271 }
272 
273 static void drop_func(struct sk_buff *skb, void *ctx)
274 {
275 	struct Qdisc *sch = ctx;
276 
277 	kfree_skb(skb);
278 	qdisc_qstats_drop(sch);
279 }
280 
281 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
282 {
283 	struct fq_codel_sched_data *q = qdisc_priv(sch);
284 	struct sk_buff *skb;
285 	struct fq_codel_flow *flow;
286 	struct list_head *head;
287 
288 begin:
289 	head = &q->new_flows;
290 	if (list_empty(head)) {
291 		head = &q->old_flows;
292 		if (list_empty(head))
293 			return NULL;
294 	}
295 	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
296 
297 	if (flow->deficit <= 0) {
298 		flow->deficit += q->quantum;
299 		list_move_tail(&flow->flowchain, &q->old_flows);
300 		goto begin;
301 	}
302 
303 	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
304 			    &flow->cvars, &q->cstats, qdisc_pkt_len,
305 			    codel_get_enqueue_time, drop_func, dequeue_func);
306 
307 	if (!skb) {
308 		/* force a pass through old_flows to prevent starvation */
309 		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
310 			list_move_tail(&flow->flowchain, &q->old_flows);
311 		else
312 			list_del_init(&flow->flowchain);
313 		goto begin;
314 	}
315 	qdisc_bstats_update(sch, skb);
316 	flow->deficit -= qdisc_pkt_len(skb);
317 	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
318 	 * or HTB crashes. Defer it for next round.
319 	 */
320 	if (q->cstats.drop_count && sch->q.qlen) {
321 		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
322 					  q->cstats.drop_len);
323 		q->cstats.drop_count = 0;
324 		q->cstats.drop_len = 0;
325 	}
326 	return skb;
327 }
328 
329 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
330 {
331 	rtnl_kfree_skbs(flow->head, flow->tail);
332 	flow->head = NULL;
333 }
334 
335 static void fq_codel_reset(struct Qdisc *sch)
336 {
337 	struct fq_codel_sched_data *q = qdisc_priv(sch);
338 	int i;
339 
340 	INIT_LIST_HEAD(&q->new_flows);
341 	INIT_LIST_HEAD(&q->old_flows);
342 	for (i = 0; i < q->flows_cnt; i++) {
343 		struct fq_codel_flow *flow = q->flows + i;
344 
345 		fq_codel_flow_purge(flow);
346 		INIT_LIST_HEAD(&flow->flowchain);
347 		codel_vars_init(&flow->cvars);
348 	}
349 	memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
350 	q->memory_usage = 0;
351 }
352 
353 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
354 	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
355 	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
356 	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
357 	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
358 	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
359 	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
360 	[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
361 	[TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
362 	[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
363 	[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 },
364 	[TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 },
365 };
366 
367 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
368 			   struct netlink_ext_ack *extack)
369 {
370 	struct fq_codel_sched_data *q = qdisc_priv(sch);
371 	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
372 	u32 quantum = 0;
373 	int err;
374 
375 	err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
376 					  fq_codel_policy, NULL);
377 	if (err < 0)
378 		return err;
379 	if (tb[TCA_FQ_CODEL_FLOWS]) {
380 		if (q->flows)
381 			return -EINVAL;
382 		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
383 		if (!q->flows_cnt ||
384 		    q->flows_cnt > 65536)
385 			return -EINVAL;
386 	}
387 	if (tb[TCA_FQ_CODEL_QUANTUM]) {
388 		quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
389 		if (quantum > FQ_CODEL_QUANTUM_MAX) {
390 			NL_SET_ERR_MSG(extack, "Invalid quantum");
391 			return -EINVAL;
392 		}
393 	}
394 	sch_tree_lock(sch);
395 
396 	if (tb[TCA_FQ_CODEL_TARGET]) {
397 		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
398 
399 		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
400 	}
401 
402 	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
403 		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
404 
405 		q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
406 	}
407 
408 	if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
409 		q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]);
410 	if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
411 		q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]);
412 
413 	if (tb[TCA_FQ_CODEL_INTERVAL]) {
414 		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
415 
416 		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
417 	}
418 
419 	if (tb[TCA_FQ_CODEL_LIMIT])
420 		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
421 
422 	if (tb[TCA_FQ_CODEL_ECN])
423 		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
424 
425 	if (quantum)
426 		q->quantum = quantum;
427 
428 	if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
429 		q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
430 
431 	if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
432 		q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
433 
434 	while (sch->q.qlen > sch->limit ||
435 	       q->memory_usage > q->memory_limit) {
436 		struct sk_buff *skb = fq_codel_dequeue(sch);
437 
438 		q->cstats.drop_len += qdisc_pkt_len(skb);
439 		rtnl_kfree_skbs(skb, skb);
440 		q->cstats.drop_count++;
441 	}
442 	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
443 	q->cstats.drop_count = 0;
444 	q->cstats.drop_len = 0;
445 
446 	sch_tree_unlock(sch);
447 	return 0;
448 }
449 
450 static void fq_codel_destroy(struct Qdisc *sch)
451 {
452 	struct fq_codel_sched_data *q = qdisc_priv(sch);
453 
454 	tcf_block_put(q->block);
455 	kvfree(q->backlogs);
456 	kvfree(q->flows);
457 }
458 
459 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
460 			 struct netlink_ext_ack *extack)
461 {
462 	struct fq_codel_sched_data *q = qdisc_priv(sch);
463 	int i;
464 	int err;
465 
466 	sch->limit = 10*1024;
467 	q->flows_cnt = 1024;
468 	q->memory_limit = 32 << 20; /* 32 MBytes */
469 	q->drop_batch_size = 64;
470 	q->quantum = psched_mtu(qdisc_dev(sch));
471 	INIT_LIST_HEAD(&q->new_flows);
472 	INIT_LIST_HEAD(&q->old_flows);
473 	codel_params_init(&q->cparams);
474 	codel_stats_init(&q->cstats);
475 	q->cparams.ecn = true;
476 	q->cparams.mtu = psched_mtu(qdisc_dev(sch));
477 
478 	if (opt) {
479 		err = fq_codel_change(sch, opt, extack);
480 		if (err)
481 			return err;
482 	}
483 
484 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
485 	if (err)
486 		return err;
487 
488 	if (!q->flows) {
489 		q->flows = kvcalloc(q->flows_cnt,
490 				    sizeof(struct fq_codel_flow),
491 				    GFP_KERNEL);
492 		if (!q->flows)
493 			return -ENOMEM;
494 
495 		q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
496 		if (!q->backlogs)
497 			return -ENOMEM;
498 
499 		for (i = 0; i < q->flows_cnt; i++) {
500 			struct fq_codel_flow *flow = q->flows + i;
501 
502 			INIT_LIST_HEAD(&flow->flowchain);
503 			codel_vars_init(&flow->cvars);
504 		}
505 	}
506 	if (sch->limit >= 1)
507 		sch->flags |= TCQ_F_CAN_BYPASS;
508 	else
509 		sch->flags &= ~TCQ_F_CAN_BYPASS;
510 	return 0;
511 }
512 
513 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
514 {
515 	struct fq_codel_sched_data *q = qdisc_priv(sch);
516 	struct nlattr *opts;
517 
518 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
519 	if (opts == NULL)
520 		goto nla_put_failure;
521 
522 	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
523 			codel_time_to_us(q->cparams.target)) ||
524 	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
525 			sch->limit) ||
526 	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
527 			codel_time_to_us(q->cparams.interval)) ||
528 	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
529 			q->cparams.ecn) ||
530 	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
531 			q->quantum) ||
532 	    nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
533 			q->drop_batch_size) ||
534 	    nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
535 			q->memory_limit) ||
536 	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
537 			q->flows_cnt))
538 		goto nla_put_failure;
539 
540 	if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) {
541 		if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
542 				codel_time_to_us(q->cparams.ce_threshold)))
543 			goto nla_put_failure;
544 		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector))
545 			goto nla_put_failure;
546 		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask))
547 			goto nla_put_failure;
548 	}
549 
550 	return nla_nest_end(skb, opts);
551 
552 nla_put_failure:
553 	return -1;
554 }
555 
556 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
557 {
558 	struct fq_codel_sched_data *q = qdisc_priv(sch);
559 	struct tc_fq_codel_xstats st = {
560 		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
561 	};
562 	struct list_head *pos;
563 
564 	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
565 	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
566 	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
567 	st.qdisc_stats.new_flow_count = q->new_flow_count;
568 	st.qdisc_stats.ce_mark = q->cstats.ce_mark;
569 	st.qdisc_stats.memory_usage  = q->memory_usage;
570 	st.qdisc_stats.drop_overmemory = q->drop_overmemory;
571 
572 	sch_tree_lock(sch);
573 	list_for_each(pos, &q->new_flows)
574 		st.qdisc_stats.new_flows_len++;
575 
576 	list_for_each(pos, &q->old_flows)
577 		st.qdisc_stats.old_flows_len++;
578 	sch_tree_unlock(sch);
579 
580 	return gnet_stats_copy_app(d, &st, sizeof(st));
581 }
582 
583 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
584 {
585 	return NULL;
586 }
587 
588 static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
589 {
590 	return 0;
591 }
592 
593 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
594 			      u32 classid)
595 {
596 	return 0;
597 }
598 
599 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
600 {
601 }
602 
603 static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
604 					    struct netlink_ext_ack *extack)
605 {
606 	struct fq_codel_sched_data *q = qdisc_priv(sch);
607 
608 	if (cl)
609 		return NULL;
610 	return q->block;
611 }
612 
613 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
614 			  struct sk_buff *skb, struct tcmsg *tcm)
615 {
616 	tcm->tcm_handle |= TC_H_MIN(cl);
617 	return 0;
618 }
619 
620 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
621 				     struct gnet_dump *d)
622 {
623 	struct fq_codel_sched_data *q = qdisc_priv(sch);
624 	u32 idx = cl - 1;
625 	struct gnet_stats_queue qs = { 0 };
626 	struct tc_fq_codel_xstats xstats;
627 
628 	if (idx < q->flows_cnt) {
629 		const struct fq_codel_flow *flow = &q->flows[idx];
630 		const struct sk_buff *skb;
631 
632 		memset(&xstats, 0, sizeof(xstats));
633 		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
634 		xstats.class_stats.deficit = flow->deficit;
635 		xstats.class_stats.ldelay =
636 			codel_time_to_us(flow->cvars.ldelay);
637 		xstats.class_stats.count = flow->cvars.count;
638 		xstats.class_stats.lastcount = flow->cvars.lastcount;
639 		xstats.class_stats.dropping = flow->cvars.dropping;
640 		if (flow->cvars.dropping) {
641 			codel_tdiff_t delta = flow->cvars.drop_next -
642 					      codel_get_time();
643 
644 			xstats.class_stats.drop_next = (delta >= 0) ?
645 				codel_time_to_us(delta) :
646 				-codel_time_to_us(-delta);
647 		}
648 		if (flow->head) {
649 			sch_tree_lock(sch);
650 			skb = flow->head;
651 			while (skb) {
652 				qs.qlen++;
653 				skb = skb->next;
654 			}
655 			sch_tree_unlock(sch);
656 		}
657 		qs.backlog = q->backlogs[idx];
658 		qs.drops = 0;
659 	}
660 	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
661 		return -1;
662 	if (idx < q->flows_cnt)
663 		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
664 	return 0;
665 }
666 
667 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
668 {
669 	struct fq_codel_sched_data *q = qdisc_priv(sch);
670 	unsigned int i;
671 
672 	if (arg->stop)
673 		return;
674 
675 	for (i = 0; i < q->flows_cnt; i++) {
676 		if (list_empty(&q->flows[i].flowchain)) {
677 			arg->count++;
678 			continue;
679 		}
680 		if (!tc_qdisc_stats_dump(sch, i + 1, arg))
681 			break;
682 	}
683 }
684 
685 static const struct Qdisc_class_ops fq_codel_class_ops = {
686 	.leaf		=	fq_codel_leaf,
687 	.find		=	fq_codel_find,
688 	.tcf_block	=	fq_codel_tcf_block,
689 	.bind_tcf	=	fq_codel_bind,
690 	.unbind_tcf	=	fq_codel_unbind,
691 	.dump		=	fq_codel_dump_class,
692 	.dump_stats	=	fq_codel_dump_class_stats,
693 	.walk		=	fq_codel_walk,
694 };
695 
696 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
697 	.cl_ops		=	&fq_codel_class_ops,
698 	.id		=	"fq_codel",
699 	.priv_size	=	sizeof(struct fq_codel_sched_data),
700 	.enqueue	=	fq_codel_enqueue,
701 	.dequeue	=	fq_codel_dequeue,
702 	.peek		=	qdisc_peek_dequeued,
703 	.init		=	fq_codel_init,
704 	.reset		=	fq_codel_reset,
705 	.destroy	=	fq_codel_destroy,
706 	.change		=	fq_codel_change,
707 	.dump		=	fq_codel_dump,
708 	.dump_stats =	fq_codel_dump_stats,
709 	.owner		=	THIS_MODULE,
710 };
711 
712 static int __init fq_codel_module_init(void)
713 {
714 	return register_qdisc(&fq_codel_qdisc_ops);
715 }
716 
717 static void __exit fq_codel_module_exit(void)
718 {
719 	unregister_qdisc(&fq_codel_qdisc_ops);
720 }
721 
722 module_init(fq_codel_module_init)
723 module_exit(fq_codel_module_exit)
724 MODULE_AUTHOR("Eric Dumazet");
725 MODULE_LICENSE("GPL");
726 MODULE_DESCRIPTION("Fair Queue CoDel discipline");
727