xref: /openbmc/linux/net/sched/sch_fq_codel.c (revision 92b19ff5)
1 /*
2  * Fair Queue CoDel discipline
3  *
4  *	This program is free software; you can redistribute it and/or
5  *	modify it under the terms of the GNU General Public License
6  *	as published by the Free Software Foundation; either version
7  *	2 of the License, or (at your option) any later version.
8  *
9  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/codel.h>
27 
28 /*	Fair Queue CoDel.
29  *
30  * Principles :
31  * Packets are classified (internal classifier or external) on flows.
32  * This is a Stochastic model (as we use a hash, several flows
33  *			       might be hashed on same slot)
34  * Each flow has a CoDel managed queue.
35  * Flows are linked onto two (Round Robin) lists,
36  * so that new flows have priority on old ones.
37  *
38  * For a given flow, packets are not reordered (CoDel uses a FIFO)
39  * head drops only.
40  * ECN capability is on by default.
41  * Low memory footprint (64 bytes per flow)
42  */
43 
44 struct fq_codel_flow {
45 	struct sk_buff	  *head;
46 	struct sk_buff	  *tail;
47 	struct list_head  flowchain;
48 	int		  deficit;
49 	u32		  dropped; /* number of drops (or ECN marks) on this flow */
50 	struct codel_vars cvars;
51 }; /* please try to keep this structure <= 64 bytes */
52 
53 struct fq_codel_sched_data {
54 	struct tcf_proto __rcu *filter_list; /* optional external classifier */
55 	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
56 	u32		*backlogs;	/* backlog table [flows_cnt] */
57 	u32		flows_cnt;	/* number of flows */
58 	u32		perturbation;	/* hash perturbation */
59 	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
60 	struct codel_params cparams;
61 	struct codel_stats cstats;
62 	u32		drop_overlimit;
63 	u32		new_flow_count;
64 
65 	struct list_head new_flows;	/* list of new flows */
66 	struct list_head old_flows;	/* list of old flows */
67 };
68 
69 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
70 				  struct sk_buff *skb)
71 {
72 	u32 hash = skb_get_hash_perturb(skb, q->perturbation);
73 
74 	return reciprocal_scale(hash, q->flows_cnt);
75 }
76 
77 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
78 				      int *qerr)
79 {
80 	struct fq_codel_sched_data *q = qdisc_priv(sch);
81 	struct tcf_proto *filter;
82 	struct tcf_result res;
83 	int result;
84 
85 	if (TC_H_MAJ(skb->priority) == sch->handle &&
86 	    TC_H_MIN(skb->priority) > 0 &&
87 	    TC_H_MIN(skb->priority) <= q->flows_cnt)
88 		return TC_H_MIN(skb->priority);
89 
90 	filter = rcu_dereference_bh(q->filter_list);
91 	if (!filter)
92 		return fq_codel_hash(q, skb) + 1;
93 
94 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
95 	result = tc_classify(skb, filter, &res);
96 	if (result >= 0) {
97 #ifdef CONFIG_NET_CLS_ACT
98 		switch (result) {
99 		case TC_ACT_STOLEN:
100 		case TC_ACT_QUEUED:
101 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
102 		case TC_ACT_SHOT:
103 			return 0;
104 		}
105 #endif
106 		if (TC_H_MIN(res.classid) <= q->flows_cnt)
107 			return TC_H_MIN(res.classid);
108 	}
109 	return 0;
110 }
111 
112 /* helper functions : might be changed when/if skb use a standard list_head */
113 
114 /* remove one skb from head of slot queue */
115 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
116 {
117 	struct sk_buff *skb = flow->head;
118 
119 	flow->head = skb->next;
120 	skb->next = NULL;
121 	return skb;
122 }
123 
124 /* add skb to flow queue (tail add) */
125 static inline void flow_queue_add(struct fq_codel_flow *flow,
126 				  struct sk_buff *skb)
127 {
128 	if (flow->head == NULL)
129 		flow->head = skb;
130 	else
131 		flow->tail->next = skb;
132 	flow->tail = skb;
133 	skb->next = NULL;
134 }
135 
136 static unsigned int fq_codel_drop(struct Qdisc *sch)
137 {
138 	struct fq_codel_sched_data *q = qdisc_priv(sch);
139 	struct sk_buff *skb;
140 	unsigned int maxbacklog = 0, idx = 0, i, len;
141 	struct fq_codel_flow *flow;
142 
143 	/* Queue is full! Find the fat flow and drop packet from it.
144 	 * This might sound expensive, but with 1024 flows, we scan
145 	 * 4KB of memory, and we dont need to handle a complex tree
146 	 * in fast path (packet queue/enqueue) with many cache misses.
147 	 */
148 	for (i = 0; i < q->flows_cnt; i++) {
149 		if (q->backlogs[i] > maxbacklog) {
150 			maxbacklog = q->backlogs[i];
151 			idx = i;
152 		}
153 	}
154 	flow = &q->flows[idx];
155 	skb = dequeue_head(flow);
156 	len = qdisc_pkt_len(skb);
157 	q->backlogs[idx] -= len;
158 	sch->q.qlen--;
159 	qdisc_qstats_drop(sch);
160 	qdisc_qstats_backlog_dec(sch, skb);
161 	kfree_skb(skb);
162 	flow->dropped++;
163 	return idx;
164 }
165 
166 static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
167 {
168 	unsigned int prev_backlog;
169 
170 	prev_backlog = sch->qstats.backlog;
171 	fq_codel_drop(sch);
172 	return prev_backlog - sch->qstats.backlog;
173 }
174 
175 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
176 {
177 	struct fq_codel_sched_data *q = qdisc_priv(sch);
178 	unsigned int idx;
179 	struct fq_codel_flow *flow;
180 	int uninitialized_var(ret);
181 
182 	idx = fq_codel_classify(skb, sch, &ret);
183 	if (idx == 0) {
184 		if (ret & __NET_XMIT_BYPASS)
185 			qdisc_qstats_drop(sch);
186 		kfree_skb(skb);
187 		return ret;
188 	}
189 	idx--;
190 
191 	codel_set_enqueue_time(skb);
192 	flow = &q->flows[idx];
193 	flow_queue_add(flow, skb);
194 	q->backlogs[idx] += qdisc_pkt_len(skb);
195 	qdisc_qstats_backlog_inc(sch, skb);
196 
197 	if (list_empty(&flow->flowchain)) {
198 		list_add_tail(&flow->flowchain, &q->new_flows);
199 		q->new_flow_count++;
200 		flow->deficit = q->quantum;
201 		flow->dropped = 0;
202 	}
203 	if (++sch->q.qlen <= sch->limit)
204 		return NET_XMIT_SUCCESS;
205 
206 	q->drop_overlimit++;
207 	/* Return Congestion Notification only if we dropped a packet
208 	 * from this flow.
209 	 */
210 	if (fq_codel_drop(sch) == idx)
211 		return NET_XMIT_CN;
212 
213 	/* As we dropped a packet, better let upper stack know this */
214 	qdisc_tree_decrease_qlen(sch, 1);
215 	return NET_XMIT_SUCCESS;
216 }
217 
218 /* This is the specific function called from codel_dequeue()
219  * to dequeue a packet from queue. Note: backlog is handled in
220  * codel, we dont need to reduce it here.
221  */
222 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
223 {
224 	struct fq_codel_sched_data *q = qdisc_priv(sch);
225 	struct fq_codel_flow *flow;
226 	struct sk_buff *skb = NULL;
227 
228 	flow = container_of(vars, struct fq_codel_flow, cvars);
229 	if (flow->head) {
230 		skb = dequeue_head(flow);
231 		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
232 		sch->q.qlen--;
233 	}
234 	return skb;
235 }
236 
237 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
238 {
239 	struct fq_codel_sched_data *q = qdisc_priv(sch);
240 	struct sk_buff *skb;
241 	struct fq_codel_flow *flow;
242 	struct list_head *head;
243 	u32 prev_drop_count, prev_ecn_mark;
244 
245 begin:
246 	head = &q->new_flows;
247 	if (list_empty(head)) {
248 		head = &q->old_flows;
249 		if (list_empty(head))
250 			return NULL;
251 	}
252 	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
253 
254 	if (flow->deficit <= 0) {
255 		flow->deficit += q->quantum;
256 		list_move_tail(&flow->flowchain, &q->old_flows);
257 		goto begin;
258 	}
259 
260 	prev_drop_count = q->cstats.drop_count;
261 	prev_ecn_mark = q->cstats.ecn_mark;
262 
263 	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
264 			    dequeue);
265 
266 	flow->dropped += q->cstats.drop_count - prev_drop_count;
267 	flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
268 
269 	if (!skb) {
270 		/* force a pass through old_flows to prevent starvation */
271 		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
272 			list_move_tail(&flow->flowchain, &q->old_flows);
273 		else
274 			list_del_init(&flow->flowchain);
275 		goto begin;
276 	}
277 	qdisc_bstats_update(sch, skb);
278 	flow->deficit -= qdisc_pkt_len(skb);
279 	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
280 	 * or HTB crashes. Defer it for next round.
281 	 */
282 	if (q->cstats.drop_count && sch->q.qlen) {
283 		qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
284 		q->cstats.drop_count = 0;
285 	}
286 	return skb;
287 }
288 
289 static void fq_codel_reset(struct Qdisc *sch)
290 {
291 	struct sk_buff *skb;
292 
293 	while ((skb = fq_codel_dequeue(sch)) != NULL)
294 		kfree_skb(skb);
295 }
296 
297 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
298 	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
299 	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
300 	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
301 	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
302 	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
303 	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
304 	[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
305 };
306 
307 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
308 {
309 	struct fq_codel_sched_data *q = qdisc_priv(sch);
310 	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
311 	int err;
312 
313 	if (!opt)
314 		return -EINVAL;
315 
316 	err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
317 	if (err < 0)
318 		return err;
319 	if (tb[TCA_FQ_CODEL_FLOWS]) {
320 		if (q->flows)
321 			return -EINVAL;
322 		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
323 		if (!q->flows_cnt ||
324 		    q->flows_cnt > 65536)
325 			return -EINVAL;
326 	}
327 	sch_tree_lock(sch);
328 
329 	if (tb[TCA_FQ_CODEL_TARGET]) {
330 		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
331 
332 		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
333 	}
334 
335 	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
336 		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
337 
338 		q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
339 	}
340 
341 	if (tb[TCA_FQ_CODEL_INTERVAL]) {
342 		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
343 
344 		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
345 	}
346 
347 	if (tb[TCA_FQ_CODEL_LIMIT])
348 		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
349 
350 	if (tb[TCA_FQ_CODEL_ECN])
351 		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
352 
353 	if (tb[TCA_FQ_CODEL_QUANTUM])
354 		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
355 
356 	while (sch->q.qlen > sch->limit) {
357 		struct sk_buff *skb = fq_codel_dequeue(sch);
358 
359 		kfree_skb(skb);
360 		q->cstats.drop_count++;
361 	}
362 	qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
363 	q->cstats.drop_count = 0;
364 
365 	sch_tree_unlock(sch);
366 	return 0;
367 }
368 
369 static void *fq_codel_zalloc(size_t sz)
370 {
371 	void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
372 
373 	if (!ptr)
374 		ptr = vzalloc(sz);
375 	return ptr;
376 }
377 
378 static void fq_codel_free(void *addr)
379 {
380 	kvfree(addr);
381 }
382 
383 static void fq_codel_destroy(struct Qdisc *sch)
384 {
385 	struct fq_codel_sched_data *q = qdisc_priv(sch);
386 
387 	tcf_destroy_chain(&q->filter_list);
388 	fq_codel_free(q->backlogs);
389 	fq_codel_free(q->flows);
390 }
391 
392 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
393 {
394 	struct fq_codel_sched_data *q = qdisc_priv(sch);
395 	int i;
396 
397 	sch->limit = 10*1024;
398 	q->flows_cnt = 1024;
399 	q->quantum = psched_mtu(qdisc_dev(sch));
400 	q->perturbation = prandom_u32();
401 	INIT_LIST_HEAD(&q->new_flows);
402 	INIT_LIST_HEAD(&q->old_flows);
403 	codel_params_init(&q->cparams, sch);
404 	codel_stats_init(&q->cstats);
405 	q->cparams.ecn = true;
406 
407 	if (opt) {
408 		int err = fq_codel_change(sch, opt);
409 		if (err)
410 			return err;
411 	}
412 
413 	if (!q->flows) {
414 		q->flows = fq_codel_zalloc(q->flows_cnt *
415 					   sizeof(struct fq_codel_flow));
416 		if (!q->flows)
417 			return -ENOMEM;
418 		q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
419 		if (!q->backlogs) {
420 			fq_codel_free(q->flows);
421 			return -ENOMEM;
422 		}
423 		for (i = 0; i < q->flows_cnt; i++) {
424 			struct fq_codel_flow *flow = q->flows + i;
425 
426 			INIT_LIST_HEAD(&flow->flowchain);
427 			codel_vars_init(&flow->cvars);
428 		}
429 	}
430 	if (sch->limit >= 1)
431 		sch->flags |= TCQ_F_CAN_BYPASS;
432 	else
433 		sch->flags &= ~TCQ_F_CAN_BYPASS;
434 	return 0;
435 }
436 
437 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
438 {
439 	struct fq_codel_sched_data *q = qdisc_priv(sch);
440 	struct nlattr *opts;
441 
442 	opts = nla_nest_start(skb, TCA_OPTIONS);
443 	if (opts == NULL)
444 		goto nla_put_failure;
445 
446 	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
447 			codel_time_to_us(q->cparams.target)) ||
448 	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
449 			sch->limit) ||
450 	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
451 			codel_time_to_us(q->cparams.interval)) ||
452 	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
453 			q->cparams.ecn) ||
454 	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
455 			q->quantum) ||
456 	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
457 			q->flows_cnt))
458 		goto nla_put_failure;
459 
460 	if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
461 	    nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
462 			codel_time_to_us(q->cparams.ce_threshold)))
463 		goto nla_put_failure;
464 
465 	return nla_nest_end(skb, opts);
466 
467 nla_put_failure:
468 	return -1;
469 }
470 
471 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
472 {
473 	struct fq_codel_sched_data *q = qdisc_priv(sch);
474 	struct tc_fq_codel_xstats st = {
475 		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
476 	};
477 	struct list_head *pos;
478 
479 	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
480 	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
481 	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
482 	st.qdisc_stats.new_flow_count = q->new_flow_count;
483 	st.qdisc_stats.ce_mark = q->cstats.ce_mark;
484 
485 	list_for_each(pos, &q->new_flows)
486 		st.qdisc_stats.new_flows_len++;
487 
488 	list_for_each(pos, &q->old_flows)
489 		st.qdisc_stats.old_flows_len++;
490 
491 	return gnet_stats_copy_app(d, &st, sizeof(st));
492 }
493 
494 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
495 {
496 	return NULL;
497 }
498 
499 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
500 {
501 	return 0;
502 }
503 
504 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
505 			      u32 classid)
506 {
507 	/* we cannot bypass queue discipline anymore */
508 	sch->flags &= ~TCQ_F_CAN_BYPASS;
509 	return 0;
510 }
511 
512 static void fq_codel_put(struct Qdisc *q, unsigned long cl)
513 {
514 }
515 
516 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
517 						  unsigned long cl)
518 {
519 	struct fq_codel_sched_data *q = qdisc_priv(sch);
520 
521 	if (cl)
522 		return NULL;
523 	return &q->filter_list;
524 }
525 
526 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
527 			  struct sk_buff *skb, struct tcmsg *tcm)
528 {
529 	tcm->tcm_handle |= TC_H_MIN(cl);
530 	return 0;
531 }
532 
533 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
534 				     struct gnet_dump *d)
535 {
536 	struct fq_codel_sched_data *q = qdisc_priv(sch);
537 	u32 idx = cl - 1;
538 	struct gnet_stats_queue qs = { 0 };
539 	struct tc_fq_codel_xstats xstats;
540 
541 	if (idx < q->flows_cnt) {
542 		const struct fq_codel_flow *flow = &q->flows[idx];
543 		const struct sk_buff *skb = flow->head;
544 
545 		memset(&xstats, 0, sizeof(xstats));
546 		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
547 		xstats.class_stats.deficit = flow->deficit;
548 		xstats.class_stats.ldelay =
549 			codel_time_to_us(flow->cvars.ldelay);
550 		xstats.class_stats.count = flow->cvars.count;
551 		xstats.class_stats.lastcount = flow->cvars.lastcount;
552 		xstats.class_stats.dropping = flow->cvars.dropping;
553 		if (flow->cvars.dropping) {
554 			codel_tdiff_t delta = flow->cvars.drop_next -
555 					      codel_get_time();
556 
557 			xstats.class_stats.drop_next = (delta >= 0) ?
558 				codel_time_to_us(delta) :
559 				-codel_time_to_us(-delta);
560 		}
561 		while (skb) {
562 			qs.qlen++;
563 			skb = skb->next;
564 		}
565 		qs.backlog = q->backlogs[idx];
566 		qs.drops = flow->dropped;
567 	}
568 	if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
569 		return -1;
570 	if (idx < q->flows_cnt)
571 		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
572 	return 0;
573 }
574 
575 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
576 {
577 	struct fq_codel_sched_data *q = qdisc_priv(sch);
578 	unsigned int i;
579 
580 	if (arg->stop)
581 		return;
582 
583 	for (i = 0; i < q->flows_cnt; i++) {
584 		if (list_empty(&q->flows[i].flowchain) ||
585 		    arg->count < arg->skip) {
586 			arg->count++;
587 			continue;
588 		}
589 		if (arg->fn(sch, i + 1, arg) < 0) {
590 			arg->stop = 1;
591 			break;
592 		}
593 		arg->count++;
594 	}
595 }
596 
597 static const struct Qdisc_class_ops fq_codel_class_ops = {
598 	.leaf		=	fq_codel_leaf,
599 	.get		=	fq_codel_get,
600 	.put		=	fq_codel_put,
601 	.tcf_chain	=	fq_codel_find_tcf,
602 	.bind_tcf	=	fq_codel_bind,
603 	.unbind_tcf	=	fq_codel_put,
604 	.dump		=	fq_codel_dump_class,
605 	.dump_stats	=	fq_codel_dump_class_stats,
606 	.walk		=	fq_codel_walk,
607 };
608 
609 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
610 	.cl_ops		=	&fq_codel_class_ops,
611 	.id		=	"fq_codel",
612 	.priv_size	=	sizeof(struct fq_codel_sched_data),
613 	.enqueue	=	fq_codel_enqueue,
614 	.dequeue	=	fq_codel_dequeue,
615 	.peek		=	qdisc_peek_dequeued,
616 	.drop		=	fq_codel_qdisc_drop,
617 	.init		=	fq_codel_init,
618 	.reset		=	fq_codel_reset,
619 	.destroy	=	fq_codel_destroy,
620 	.change		=	fq_codel_change,
621 	.dump		=	fq_codel_dump,
622 	.dump_stats =	fq_codel_dump_stats,
623 	.owner		=	THIS_MODULE,
624 };
625 
626 static int __init fq_codel_module_init(void)
627 {
628 	return register_qdisc(&fq_codel_qdisc_ops);
629 }
630 
631 static void __exit fq_codel_module_exit(void)
632 {
633 	unregister_qdisc(&fq_codel_qdisc_ops);
634 }
635 
636 module_init(fq_codel_module_init)
637 module_exit(fq_codel_module_exit)
638 MODULE_AUTHOR("Eric Dumazet");
639 MODULE_LICENSE("GPL");
640