xref: /openbmc/linux/net/sched/sch_sfq.c (revision 92ed1a76)
1 /*
2  * net/sched/sch_sfq.c	Stochastic Fairness Queueing discipline.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/ipv6.h>
21 #include <linux/skbuff.h>
22 #include <linux/jhash.h>
23 #include <linux/slab.h>
24 #include <net/ip.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
27 
28 
29 /*	Stochastic Fairness Queuing algorithm.
30 	=======================================
31 
32 	Source:
33 	Paul E. McKenney "Stochastic Fairness Queuing",
34 	IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
35 
36 	Paul E. McKenney "Stochastic Fairness Queuing",
37 	"Interworking: Research and Experience", v.2, 1991, p.113-131.
38 
39 
40 	See also:
41 	M. Shreedhar and George Varghese "Efficient Fair
42 	Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
43 
44 
45 	This is not the thing that is usually called (W)FQ nowadays.
46 	It does not use any timestamp mechanism, but instead
47 	processes queues in round-robin order.
48 
49 	ADVANTAGE:
50 
51 	- It is very cheap. Both CPU and memory requirements are minimal.
52 
53 	DRAWBACKS:
54 
55 	- "Stochastic" -> It is not 100% fair.
56 	When hash collisions occur, several flows are considered as one.
57 
58 	- "Round-robin" -> It introduces larger delays than virtual clock
59 	based schemes, and should not be used for isolating interactive
60 	traffic	from non-interactive. It means, that this scheduler
61 	should be used as leaf of CBQ or P3, which put interactive traffic
62 	to higher priority band.
63 
64 	We still need true WFQ for top level CSZ, but using WFQ
65 	for the best effort traffic is absolutely pointless:
66 	SFQ is superior for this purpose.
67 
68 	IMPLEMENTATION:
69 	This implementation limits maximal queue length to 128;
70 	max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
71 	The only goal of this restrictions was that all data
72 	fit into one 4K page on 32bit arches.
73 
74 	It is easy to increase these values, but not in flight.  */
75 
76 #define SFQ_DEPTH		128 /* max number of packets per flow */
77 #define SFQ_SLOTS		128 /* max number of flows */
78 #define SFQ_EMPTY_SLOT		255
79 #define SFQ_HASH_DIVISOR	1024
80 /* We use 16 bits to store allot, and want to handle packets up to 64K
81  * Scale allot by 8 (1<<3) so that no overflow occurs.
82  */
83 #define SFQ_ALLOT_SHIFT		3
84 #define SFQ_ALLOT_SIZE(X)	DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
85 
86 /* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
87 typedef unsigned char sfq_index;
88 
89 /*
90  * We dont use pointers to save space.
91  * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
92  * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
93  * are 'pointers' to dep[] array
94  */
95 struct sfq_head
96 {
97 	sfq_index	next;
98 	sfq_index	prev;
99 };
100 
101 struct sfq_slot {
102 	struct sk_buff	*skblist_next;
103 	struct sk_buff	*skblist_prev;
104 	sfq_index	qlen; /* number of skbs in skblist */
105 	sfq_index	next; /* next slot in sfq chain */
106 	struct sfq_head dep; /* anchor in dep[] chains */
107 	unsigned short	hash; /* hash value (index in ht[]) */
108 	short		allot; /* credit for this slot */
109 };
110 
111 struct sfq_sched_data
112 {
113 /* Parameters */
114 	int		perturb_period;
115 	unsigned	quantum;	/* Allotment per round: MUST BE >= MTU */
116 	int		limit;
117 
118 /* Variables */
119 	struct tcf_proto *filter_list;
120 	struct timer_list perturb_timer;
121 	u32		perturbation;
122 	sfq_index	cur_depth;	/* depth of longest slot */
123 	unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
124 	struct sfq_slot *tail;		/* current slot in round */
125 	sfq_index	ht[SFQ_HASH_DIVISOR];	/* Hash table */
126 	struct sfq_slot	slots[SFQ_SLOTS];
127 	struct sfq_head	dep[SFQ_DEPTH];	/* Linked list of slots, indexed by depth */
128 };
129 
130 /*
131  * sfq_head are either in a sfq_slot or in dep[] array
132  */
133 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
134 {
135 	if (val < SFQ_SLOTS)
136 		return &q->slots[val].dep;
137 	return &q->dep[val - SFQ_SLOTS];
138 }
139 
140 static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
141 {
142 	return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
143 }
144 
145 static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
146 {
147 	u32 h, h2;
148 
149 	switch (skb->protocol) {
150 	case htons(ETH_P_IP):
151 	{
152 		const struct iphdr *iph;
153 		int poff;
154 
155 		if (!pskb_network_may_pull(skb, sizeof(*iph)))
156 			goto err;
157 		iph = ip_hdr(skb);
158 		h = (__force u32)iph->daddr;
159 		h2 = (__force u32)iph->saddr ^ iph->protocol;
160 		if (iph->frag_off & htons(IP_MF|IP_OFFSET))
161 			break;
162 		poff = proto_ports_offset(iph->protocol);
163 		if (poff >= 0 &&
164 		    pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
165 			iph = ip_hdr(skb);
166 			h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
167 		}
168 		break;
169 	}
170 	case htons(ETH_P_IPV6):
171 	{
172 		struct ipv6hdr *iph;
173 		int poff;
174 
175 		if (!pskb_network_may_pull(skb, sizeof(*iph)))
176 			goto err;
177 		iph = ipv6_hdr(skb);
178 		h = (__force u32)iph->daddr.s6_addr32[3];
179 		h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
180 		poff = proto_ports_offset(iph->nexthdr);
181 		if (poff >= 0 &&
182 		    pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
183 			iph = ipv6_hdr(skb);
184 			h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
185 		}
186 		break;
187 	}
188 	default:
189 err:
190 		h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
191 		h2 = (unsigned long)skb->sk;
192 	}
193 
194 	return sfq_fold_hash(q, h, h2);
195 }
196 
197 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
198 				 int *qerr)
199 {
200 	struct sfq_sched_data *q = qdisc_priv(sch);
201 	struct tcf_result res;
202 	int result;
203 
204 	if (TC_H_MAJ(skb->priority) == sch->handle &&
205 	    TC_H_MIN(skb->priority) > 0 &&
206 	    TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
207 		return TC_H_MIN(skb->priority);
208 
209 	if (!q->filter_list)
210 		return sfq_hash(q, skb) + 1;
211 
212 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
213 	result = tc_classify(skb, q->filter_list, &res);
214 	if (result >= 0) {
215 #ifdef CONFIG_NET_CLS_ACT
216 		switch (result) {
217 		case TC_ACT_STOLEN:
218 		case TC_ACT_QUEUED:
219 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
220 		case TC_ACT_SHOT:
221 			return 0;
222 		}
223 #endif
224 		if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
225 			return TC_H_MIN(res.classid);
226 	}
227 	return 0;
228 }
229 
230 /*
231  * x : slot number [0 .. SFQ_SLOTS - 1]
232  */
233 static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
234 {
235 	sfq_index p, n;
236 	int qlen = q->slots[x].qlen;
237 
238 	p = qlen + SFQ_SLOTS;
239 	n = q->dep[qlen].next;
240 
241 	q->slots[x].dep.next = n;
242 	q->slots[x].dep.prev = p;
243 
244 	q->dep[qlen].next = x;		/* sfq_dep_head(q, p)->next = x */
245 	sfq_dep_head(q, n)->prev = x;
246 }
247 
248 #define sfq_unlink(q, x, n, p)			\
249 	n = q->slots[x].dep.next;		\
250 	p = q->slots[x].dep.prev;		\
251 	sfq_dep_head(q, p)->next = n;		\
252 	sfq_dep_head(q, n)->prev = p
253 
254 
255 static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
256 {
257 	sfq_index p, n;
258 	int d;
259 
260 	sfq_unlink(q, x, n, p);
261 
262 	d = q->slots[x].qlen--;
263 	if (n == p && q->cur_depth == d)
264 		q->cur_depth--;
265 	sfq_link(q, x);
266 }
267 
268 static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
269 {
270 	sfq_index p, n;
271 	int d;
272 
273 	sfq_unlink(q, x, n, p);
274 
275 	d = ++q->slots[x].qlen;
276 	if (q->cur_depth < d)
277 		q->cur_depth = d;
278 	sfq_link(q, x);
279 }
280 
281 /* helper functions : might be changed when/if skb use a standard list_head */
282 
283 /* remove one skb from tail of slot queue */
284 static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
285 {
286 	struct sk_buff *skb = slot->skblist_prev;
287 
288 	slot->skblist_prev = skb->prev;
289 	skb->prev->next = (struct sk_buff *)slot;
290 	skb->next = skb->prev = NULL;
291 	return skb;
292 }
293 
294 /* remove one skb from head of slot queue */
295 static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
296 {
297 	struct sk_buff *skb = slot->skblist_next;
298 
299 	slot->skblist_next = skb->next;
300 	skb->next->prev = (struct sk_buff *)slot;
301 	skb->next = skb->prev = NULL;
302 	return skb;
303 }
304 
305 static inline void slot_queue_init(struct sfq_slot *slot)
306 {
307 	slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
308 }
309 
310 /* add skb to slot queue (tail add) */
311 static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
312 {
313 	skb->prev = slot->skblist_prev;
314 	skb->next = (struct sk_buff *)slot;
315 	slot->skblist_prev->next = skb;
316 	slot->skblist_prev = skb;
317 }
318 
319 #define	slot_queue_walk(slot, skb)		\
320 	for (skb = slot->skblist_next;		\
321 	     skb != (struct sk_buff *)slot;	\
322 	     skb = skb->next)
323 
324 static unsigned int sfq_drop(struct Qdisc *sch)
325 {
326 	struct sfq_sched_data *q = qdisc_priv(sch);
327 	sfq_index x, d = q->cur_depth;
328 	struct sk_buff *skb;
329 	unsigned int len;
330 	struct sfq_slot *slot;
331 
332 	/* Queue is full! Find the longest slot and drop tail packet from it */
333 	if (d > 1) {
334 		x = q->dep[d].next;
335 		slot = &q->slots[x];
336 drop:
337 		skb = slot_dequeue_tail(slot);
338 		len = qdisc_pkt_len(skb);
339 		sfq_dec(q, x);
340 		kfree_skb(skb);
341 		sch->q.qlen--;
342 		sch->qstats.drops++;
343 		sch->qstats.backlog -= len;
344 		return len;
345 	}
346 
347 	if (d == 1) {
348 		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
349 		x = q->tail->next;
350 		slot = &q->slots[x];
351 		q->tail->next = slot->next;
352 		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
353 		goto drop;
354 	}
355 
356 	return 0;
357 }
358 
359 static int
360 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
361 {
362 	struct sfq_sched_data *q = qdisc_priv(sch);
363 	unsigned int hash;
364 	sfq_index x;
365 	struct sfq_slot *slot;
366 	int uninitialized_var(ret);
367 
368 	hash = sfq_classify(skb, sch, &ret);
369 	if (hash == 0) {
370 		if (ret & __NET_XMIT_BYPASS)
371 			sch->qstats.drops++;
372 		kfree_skb(skb);
373 		return ret;
374 	}
375 	hash--;
376 
377 	x = q->ht[hash];
378 	slot = &q->slots[x];
379 	if (x == SFQ_EMPTY_SLOT) {
380 		x = q->dep[0].next; /* get a free slot */
381 		q->ht[hash] = x;
382 		slot = &q->slots[x];
383 		slot->hash = hash;
384 	}
385 
386 	/* If selected queue has length q->limit, do simple tail drop,
387 	 * i.e. drop _this_ packet.
388 	 */
389 	if (slot->qlen >= q->limit)
390 		return qdisc_drop(skb, sch);
391 
392 	sch->qstats.backlog += qdisc_pkt_len(skb);
393 	slot_queue_add(slot, skb);
394 	sfq_inc(q, x);
395 	if (slot->qlen == 1) {		/* The flow is new */
396 		if (q->tail == NULL) {	/* It is the first flow */
397 			slot->next = x;
398 		} else {
399 			slot->next = q->tail->next;
400 			q->tail->next = x;
401 		}
402 		q->tail = slot;
403 		slot->allot = q->scaled_quantum;
404 	}
405 	if (++sch->q.qlen <= q->limit) {
406 		sch->bstats.bytes += qdisc_pkt_len(skb);
407 		sch->bstats.packets++;
408 		return NET_XMIT_SUCCESS;
409 	}
410 
411 	sfq_drop(sch);
412 	return NET_XMIT_CN;
413 }
414 
415 static struct sk_buff *
416 sfq_peek(struct Qdisc *sch)
417 {
418 	struct sfq_sched_data *q = qdisc_priv(sch);
419 
420 	/* No active slots */
421 	if (q->tail == NULL)
422 		return NULL;
423 
424 	return q->slots[q->tail->next].skblist_next;
425 }
426 
427 static struct sk_buff *
428 sfq_dequeue(struct Qdisc *sch)
429 {
430 	struct sfq_sched_data *q = qdisc_priv(sch);
431 	struct sk_buff *skb;
432 	sfq_index a, next_a;
433 	struct sfq_slot *slot;
434 
435 	/* No active slots */
436 	if (q->tail == NULL)
437 		return NULL;
438 
439 next_slot:
440 	a = q->tail->next;
441 	slot = &q->slots[a];
442 	if (slot->allot <= 0) {
443 		q->tail = slot;
444 		slot->allot += q->scaled_quantum;
445 		goto next_slot;
446 	}
447 	skb = slot_dequeue_head(slot);
448 	sfq_dec(q, a);
449 	sch->q.qlen--;
450 	sch->qstats.backlog -= qdisc_pkt_len(skb);
451 
452 	/* Is the slot empty? */
453 	if (slot->qlen == 0) {
454 		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
455 		next_a = slot->next;
456 		if (a == next_a) {
457 			q->tail = NULL; /* no more active slots */
458 			return skb;
459 		}
460 		q->tail->next = next_a;
461 	} else {
462 		slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
463 	}
464 	return skb;
465 }
466 
467 static void
468 sfq_reset(struct Qdisc *sch)
469 {
470 	struct sk_buff *skb;
471 
472 	while ((skb = sfq_dequeue(sch)) != NULL)
473 		kfree_skb(skb);
474 }
475 
476 static void sfq_perturbation(unsigned long arg)
477 {
478 	struct Qdisc *sch = (struct Qdisc *)arg;
479 	struct sfq_sched_data *q = qdisc_priv(sch);
480 
481 	q->perturbation = net_random();
482 
483 	if (q->perturb_period)
484 		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
485 }
486 
487 static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
488 {
489 	struct sfq_sched_data *q = qdisc_priv(sch);
490 	struct tc_sfq_qopt *ctl = nla_data(opt);
491 	unsigned int qlen;
492 
493 	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
494 		return -EINVAL;
495 
496 	sch_tree_lock(sch);
497 	q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
498 	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
499 	q->perturb_period = ctl->perturb_period * HZ;
500 	if (ctl->limit)
501 		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
502 
503 	qlen = sch->q.qlen;
504 	while (sch->q.qlen > q->limit)
505 		sfq_drop(sch);
506 	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
507 
508 	del_timer(&q->perturb_timer);
509 	if (q->perturb_period) {
510 		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
511 		q->perturbation = net_random();
512 	}
513 	sch_tree_unlock(sch);
514 	return 0;
515 }
516 
517 static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
518 {
519 	struct sfq_sched_data *q = qdisc_priv(sch);
520 	int i;
521 
522 	q->perturb_timer.function = sfq_perturbation;
523 	q->perturb_timer.data = (unsigned long)sch;
524 	init_timer_deferrable(&q->perturb_timer);
525 
526 	for (i = 0; i < SFQ_HASH_DIVISOR; i++)
527 		q->ht[i] = SFQ_EMPTY_SLOT;
528 
529 	for (i = 0; i < SFQ_DEPTH; i++) {
530 		q->dep[i].next = i + SFQ_SLOTS;
531 		q->dep[i].prev = i + SFQ_SLOTS;
532 	}
533 
534 	q->limit = SFQ_DEPTH - 1;
535 	q->cur_depth = 0;
536 	q->tail = NULL;
537 	if (opt == NULL) {
538 		q->quantum = psched_mtu(qdisc_dev(sch));
539 		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
540 		q->perturb_period = 0;
541 		q->perturbation = net_random();
542 	} else {
543 		int err = sfq_change(sch, opt);
544 		if (err)
545 			return err;
546 	}
547 
548 	for (i = 0; i < SFQ_SLOTS; i++) {
549 		slot_queue_init(&q->slots[i]);
550 		sfq_link(q, i);
551 	}
552 	return 0;
553 }
554 
555 static void sfq_destroy(struct Qdisc *sch)
556 {
557 	struct sfq_sched_data *q = qdisc_priv(sch);
558 
559 	tcf_destroy_chain(&q->filter_list);
560 	q->perturb_period = 0;
561 	del_timer_sync(&q->perturb_timer);
562 }
563 
564 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
565 {
566 	struct sfq_sched_data *q = qdisc_priv(sch);
567 	unsigned char *b = skb_tail_pointer(skb);
568 	struct tc_sfq_qopt opt;
569 
570 	opt.quantum = q->quantum;
571 	opt.perturb_period = q->perturb_period / HZ;
572 
573 	opt.limit = q->limit;
574 	opt.divisor = SFQ_HASH_DIVISOR;
575 	opt.flows = q->limit;
576 
577 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
578 
579 	return skb->len;
580 
581 nla_put_failure:
582 	nlmsg_trim(skb, b);
583 	return -1;
584 }
585 
586 static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
587 {
588 	return NULL;
589 }
590 
591 static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
592 {
593 	return 0;
594 }
595 
596 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
597 			      u32 classid)
598 {
599 	return 0;
600 }
601 
602 static void sfq_put(struct Qdisc *q, unsigned long cl)
603 {
604 }
605 
606 static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
607 {
608 	struct sfq_sched_data *q = qdisc_priv(sch);
609 
610 	if (cl)
611 		return NULL;
612 	return &q->filter_list;
613 }
614 
615 static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
616 			  struct sk_buff *skb, struct tcmsg *tcm)
617 {
618 	tcm->tcm_handle |= TC_H_MIN(cl);
619 	return 0;
620 }
621 
622 static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
623 				struct gnet_dump *d)
624 {
625 	struct sfq_sched_data *q = qdisc_priv(sch);
626 	sfq_index idx = q->ht[cl - 1];
627 	struct gnet_stats_queue qs = { 0 };
628 	struct tc_sfq_xstats xstats = { 0 };
629 	struct sk_buff *skb;
630 
631 	if (idx != SFQ_EMPTY_SLOT) {
632 		const struct sfq_slot *slot = &q->slots[idx];
633 
634 		xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
635 		qs.qlen = slot->qlen;
636 		slot_queue_walk(slot, skb)
637 			qs.backlog += qdisc_pkt_len(skb);
638 	}
639 	if (gnet_stats_copy_queue(d, &qs) < 0)
640 		return -1;
641 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
642 }
643 
644 static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
645 {
646 	struct sfq_sched_data *q = qdisc_priv(sch);
647 	unsigned int i;
648 
649 	if (arg->stop)
650 		return;
651 
652 	for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
653 		if (q->ht[i] == SFQ_EMPTY_SLOT ||
654 		    arg->count < arg->skip) {
655 			arg->count++;
656 			continue;
657 		}
658 		if (arg->fn(sch, i + 1, arg) < 0) {
659 			arg->stop = 1;
660 			break;
661 		}
662 		arg->count++;
663 	}
664 }
665 
666 static const struct Qdisc_class_ops sfq_class_ops = {
667 	.leaf		=	sfq_leaf,
668 	.get		=	sfq_get,
669 	.put		=	sfq_put,
670 	.tcf_chain	=	sfq_find_tcf,
671 	.bind_tcf	=	sfq_bind,
672 	.unbind_tcf	=	sfq_put,
673 	.dump		=	sfq_dump_class,
674 	.dump_stats	=	sfq_dump_class_stats,
675 	.walk		=	sfq_walk,
676 };
677 
678 static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
679 	.cl_ops		=	&sfq_class_ops,
680 	.id		=	"sfq",
681 	.priv_size	=	sizeof(struct sfq_sched_data),
682 	.enqueue	=	sfq_enqueue,
683 	.dequeue	=	sfq_dequeue,
684 	.peek		=	sfq_peek,
685 	.drop		=	sfq_drop,
686 	.init		=	sfq_init,
687 	.reset		=	sfq_reset,
688 	.destroy	=	sfq_destroy,
689 	.change		=	NULL,
690 	.dump		=	sfq_dump,
691 	.owner		=	THIS_MODULE,
692 };
693 
694 static int __init sfq_module_init(void)
695 {
696 	return register_qdisc(&sfq_qdisc_ops);
697 }
698 static void __exit sfq_module_exit(void)
699 {
700 	unregister_qdisc(&sfq_qdisc_ops);
701 }
702 module_init(sfq_module_init)
703 module_exit(sfq_module_exit)
704 MODULE_LICENSE("GPL");
705