xref: /openbmc/linux/net/sched/sch_qfq.c (revision 95e9fd10)
1 /*
2  * net/sched/sch_qfq.c         Quick Fair Queueing Scheduler.
3  *
4  * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * version 2 as published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/bitops.h>
14 #include <linux/errno.h>
15 #include <linux/netdevice.h>
16 #include <linux/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
20 
21 
22 /*  Quick Fair Queueing
23     ===================
24 
25     Sources:
26 
27     Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
28     Packet Scheduling with Tight Bandwidth Distribution Guarantees."
29 
30     See also:
31     http://retis.sssup.it/~fabio/linux/qfq/
32  */
33 
34 /*
35 
36   Virtual time computations.
37 
38   S, F and V are all computed in fixed point arithmetic with
39   FRAC_BITS decimal bits.
40 
41   QFQ_MAX_INDEX is the maximum index allowed for a group. We need
42 	one bit per index.
43   QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
44 
45   The layout of the bits is as below:
46 
47                    [ MTU_SHIFT ][      FRAC_BITS    ]
48                    [ MAX_INDEX    ][ MIN_SLOT_SHIFT ]
49 				 ^.__grp->index = 0
50 				 *.__grp->slot_shift
51 
52   where MIN_SLOT_SHIFT is derived by difference from the others.
53 
54   The max group index corresponds to Lmax/w_min, where
55   Lmax=1<<MTU_SHIFT, w_min = 1 .
56   From this, and knowing how many groups (MAX_INDEX) we want,
57   we can derive the shift corresponding to each group.
58 
59   Because we often need to compute
60 	F = S + len/w_i  and V = V + len/wsum
61   instead of storing w_i store the value
62 	inv_w = (1<<FRAC_BITS)/w_i
63   so we can do F = S + len * inv_w * wsum.
64   We use W_TOT in the formulas so we can easily move between
65   static and adaptive weight sum.
66 
67   The per-scheduler-instance data contain all the data structures
68   for the scheduler: bitmaps and bucket lists.
69 
70  */
71 
72 /*
73  * Maximum number of consecutive slots occupied by backlogged classes
74  * inside a group.
75  */
76 #define QFQ_MAX_SLOTS	32
77 
78 /*
79  * Shifts used for class<->group mapping.  We allow class weights that are
80  * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
81  * group with the smallest index that can support the L_i / r_i configured
82  * for the class.
83  *
84  * grp->index is the index of the group; and grp->slot_shift
85  * is the shift for the corresponding (scaled) sigma_i.
86  */
87 #define QFQ_MAX_INDEX		19
88 #define QFQ_MAX_WSHIFT		16
89 
90 #define	QFQ_MAX_WEIGHT		(1<<QFQ_MAX_WSHIFT)
91 #define QFQ_MAX_WSUM		(2*QFQ_MAX_WEIGHT)
92 
93 #define FRAC_BITS		30	/* fixed point arithmetic */
94 #define ONE_FP			(1UL << FRAC_BITS)
95 #define IWSUM			(ONE_FP/QFQ_MAX_WSUM)
96 
97 #define QFQ_MTU_SHIFT		11
98 #define QFQ_MIN_SLOT_SHIFT	(FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
99 
100 /*
101  * Possible group states.  These values are used as indexes for the bitmaps
102  * array of struct qfq_queue.
103  */
104 enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
105 
106 struct qfq_group;
107 
108 struct qfq_class {
109 	struct Qdisc_class_common common;
110 
111 	unsigned int refcnt;
112 	unsigned int filter_cnt;
113 
114 	struct gnet_stats_basic_packed bstats;
115 	struct gnet_stats_queue qstats;
116 	struct gnet_stats_rate_est rate_est;
117 	struct Qdisc *qdisc;
118 
119 	struct hlist_node next;	/* Link for the slot list. */
120 	u64 S, F;		/* flow timestamps (exact) */
121 
122 	/* group we belong to. In principle we would need the index,
123 	 * which is log_2(lmax/weight), but we never reference it
124 	 * directly, only the group.
125 	 */
126 	struct qfq_group *grp;
127 
128 	/* these are copied from the flowset. */
129 	u32	inv_w;		/* ONE_FP/weight */
130 	u32	lmax;		/* Max packet size for this flow. */
131 };
132 
133 struct qfq_group {
134 	u64 S, F;			/* group timestamps (approx). */
135 	unsigned int slot_shift;	/* Slot shift. */
136 	unsigned int index;		/* Group index. */
137 	unsigned int front;		/* Index of the front slot. */
138 	unsigned long full_slots;	/* non-empty slots */
139 
140 	/* Array of RR lists of active classes. */
141 	struct hlist_head slots[QFQ_MAX_SLOTS];
142 };
143 
144 struct qfq_sched {
145 	struct tcf_proto *filter_list;
146 	struct Qdisc_class_hash clhash;
147 
148 	u64		V;		/* Precise virtual time. */
149 	u32		wsum;		/* weight sum */
150 
151 	unsigned long bitmaps[QFQ_MAX_STATE];	    /* Group bitmaps. */
152 	struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
153 };
154 
155 static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
156 {
157 	struct qfq_sched *q = qdisc_priv(sch);
158 	struct Qdisc_class_common *clc;
159 
160 	clc = qdisc_class_find(&q->clhash, classid);
161 	if (clc == NULL)
162 		return NULL;
163 	return container_of(clc, struct qfq_class, common);
164 }
165 
166 static void qfq_purge_queue(struct qfq_class *cl)
167 {
168 	unsigned int len = cl->qdisc->q.qlen;
169 
170 	qdisc_reset(cl->qdisc);
171 	qdisc_tree_decrease_qlen(cl->qdisc, len);
172 }
173 
174 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
175 	[TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
176 	[TCA_QFQ_LMAX] = { .type = NLA_U32 },
177 };
178 
179 /*
180  * Calculate a flow index, given its weight and maximum packet length.
181  * index = log_2(maxlen/weight) but we need to apply the scaling.
182  * This is used only once at flow creation.
183  */
184 static int qfq_calc_index(u32 inv_w, unsigned int maxlen)
185 {
186 	u64 slot_size = (u64)maxlen * inv_w;
187 	unsigned long size_map;
188 	int index = 0;
189 
190 	size_map = slot_size >> QFQ_MIN_SLOT_SHIFT;
191 	if (!size_map)
192 		goto out;
193 
194 	index = __fls(size_map) + 1;	/* basically a log_2 */
195 	index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
196 
197 	if (index < 0)
198 		index = 0;
199 out:
200 	pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
201 		 (unsigned long) ONE_FP/inv_w, maxlen, index);
202 
203 	return index;
204 }
205 
206 /* Length of the next packet (0 if the queue is empty). */
207 static unsigned int qdisc_peek_len(struct Qdisc *sch)
208 {
209 	struct sk_buff *skb;
210 
211 	skb = sch->ops->peek(sch);
212 	return skb ? qdisc_pkt_len(skb) : 0;
213 }
214 
215 static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
216 static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
217 			       unsigned int len);
218 
219 static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
220 				    u32 lmax, u32 inv_w, int delta_w)
221 {
222 	int i;
223 
224 	/* update qfq-specific data */
225 	cl->lmax = lmax;
226 	cl->inv_w = inv_w;
227 	i = qfq_calc_index(cl->inv_w, cl->lmax);
228 
229 	cl->grp = &q->groups[i];
230 
231 	q->wsum += delta_w;
232 }
233 
234 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
235 			    struct nlattr **tca, unsigned long *arg)
236 {
237 	struct qfq_sched *q = qdisc_priv(sch);
238 	struct qfq_class *cl = (struct qfq_class *)*arg;
239 	struct nlattr *tb[TCA_QFQ_MAX + 1];
240 	u32 weight, lmax, inv_w;
241 	int i, err;
242 	int delta_w;
243 
244 	if (tca[TCA_OPTIONS] == NULL) {
245 		pr_notice("qfq: no options\n");
246 		return -EINVAL;
247 	}
248 
249 	err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy);
250 	if (err < 0)
251 		return err;
252 
253 	if (tb[TCA_QFQ_WEIGHT]) {
254 		weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
255 		if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
256 			pr_notice("qfq: invalid weight %u\n", weight);
257 			return -EINVAL;
258 		}
259 	} else
260 		weight = 1;
261 
262 	inv_w = ONE_FP / weight;
263 	weight = ONE_FP / inv_w;
264 	delta_w = weight - (cl ? ONE_FP / cl->inv_w : 0);
265 	if (q->wsum + delta_w > QFQ_MAX_WSUM) {
266 		pr_notice("qfq: total weight out of range (%u + %u)\n",
267 			  delta_w, q->wsum);
268 		return -EINVAL;
269 	}
270 
271 	if (tb[TCA_QFQ_LMAX]) {
272 		lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
273 		if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
274 			pr_notice("qfq: invalid max length %u\n", lmax);
275 			return -EINVAL;
276 		}
277 	} else
278 		lmax = 1UL << QFQ_MTU_SHIFT;
279 
280 	if (cl != NULL) {
281 		bool need_reactivation = false;
282 
283 		if (tca[TCA_RATE]) {
284 			err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
285 						    qdisc_root_sleeping_lock(sch),
286 						    tca[TCA_RATE]);
287 			if (err)
288 				return err;
289 		}
290 
291 		if (lmax == cl->lmax && inv_w == cl->inv_w)
292 			return 0; /* nothing to update */
293 
294 		i = qfq_calc_index(inv_w, lmax);
295 		sch_tree_lock(sch);
296 		if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
297 			/*
298 			 * shift cl->F back, to not charge the
299 			 * class for the not-yet-served head
300 			 * packet
301 			 */
302 			cl->F = cl->S;
303 			/* remove class from its slot in the old group */
304 			qfq_deactivate_class(q, cl);
305 			need_reactivation = true;
306 		}
307 
308 		qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
309 
310 		if (need_reactivation) /* activate in new group */
311 			qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
312 		sch_tree_unlock(sch);
313 
314 		return 0;
315 	}
316 
317 	cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
318 	if (cl == NULL)
319 		return -ENOBUFS;
320 
321 	cl->refcnt = 1;
322 	cl->common.classid = classid;
323 
324 	qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
325 
326 	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
327 				      &pfifo_qdisc_ops, classid);
328 	if (cl->qdisc == NULL)
329 		cl->qdisc = &noop_qdisc;
330 
331 	if (tca[TCA_RATE]) {
332 		err = gen_new_estimator(&cl->bstats, &cl->rate_est,
333 					qdisc_root_sleeping_lock(sch),
334 					tca[TCA_RATE]);
335 		if (err) {
336 			qdisc_destroy(cl->qdisc);
337 			kfree(cl);
338 			return err;
339 		}
340 	}
341 
342 	sch_tree_lock(sch);
343 	qdisc_class_hash_insert(&q->clhash, &cl->common);
344 	sch_tree_unlock(sch);
345 
346 	qdisc_class_hash_grow(sch, &q->clhash);
347 
348 	*arg = (unsigned long)cl;
349 	return 0;
350 }
351 
352 static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
353 {
354 	struct qfq_sched *q = qdisc_priv(sch);
355 
356 	if (cl->inv_w) {
357 		q->wsum -= ONE_FP / cl->inv_w;
358 		cl->inv_w = 0;
359 	}
360 
361 	gen_kill_estimator(&cl->bstats, &cl->rate_est);
362 	qdisc_destroy(cl->qdisc);
363 	kfree(cl);
364 }
365 
366 static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
367 {
368 	struct qfq_sched *q = qdisc_priv(sch);
369 	struct qfq_class *cl = (struct qfq_class *)arg;
370 
371 	if (cl->filter_cnt > 0)
372 		return -EBUSY;
373 
374 	sch_tree_lock(sch);
375 
376 	qfq_purge_queue(cl);
377 	qdisc_class_hash_remove(&q->clhash, &cl->common);
378 
379 	BUG_ON(--cl->refcnt == 0);
380 	/*
381 	 * This shouldn't happen: we "hold" one cops->get() when called
382 	 * from tc_ctl_tclass; the destroy method is done from cops->put().
383 	 */
384 
385 	sch_tree_unlock(sch);
386 	return 0;
387 }
388 
389 static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid)
390 {
391 	struct qfq_class *cl = qfq_find_class(sch, classid);
392 
393 	if (cl != NULL)
394 		cl->refcnt++;
395 
396 	return (unsigned long)cl;
397 }
398 
399 static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
400 {
401 	struct qfq_class *cl = (struct qfq_class *)arg;
402 
403 	if (--cl->refcnt == 0)
404 		qfq_destroy_class(sch, cl);
405 }
406 
407 static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl)
408 {
409 	struct qfq_sched *q = qdisc_priv(sch);
410 
411 	if (cl)
412 		return NULL;
413 
414 	return &q->filter_list;
415 }
416 
417 static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
418 				  u32 classid)
419 {
420 	struct qfq_class *cl = qfq_find_class(sch, classid);
421 
422 	if (cl != NULL)
423 		cl->filter_cnt++;
424 
425 	return (unsigned long)cl;
426 }
427 
428 static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
429 {
430 	struct qfq_class *cl = (struct qfq_class *)arg;
431 
432 	cl->filter_cnt--;
433 }
434 
435 static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
436 			   struct Qdisc *new, struct Qdisc **old)
437 {
438 	struct qfq_class *cl = (struct qfq_class *)arg;
439 
440 	if (new == NULL) {
441 		new = qdisc_create_dflt(sch->dev_queue,
442 					&pfifo_qdisc_ops, cl->common.classid);
443 		if (new == NULL)
444 			new = &noop_qdisc;
445 	}
446 
447 	sch_tree_lock(sch);
448 	qfq_purge_queue(cl);
449 	*old = cl->qdisc;
450 	cl->qdisc = new;
451 	sch_tree_unlock(sch);
452 	return 0;
453 }
454 
455 static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
456 {
457 	struct qfq_class *cl = (struct qfq_class *)arg;
458 
459 	return cl->qdisc;
460 }
461 
462 static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
463 			  struct sk_buff *skb, struct tcmsg *tcm)
464 {
465 	struct qfq_class *cl = (struct qfq_class *)arg;
466 	struct nlattr *nest;
467 
468 	tcm->tcm_parent	= TC_H_ROOT;
469 	tcm->tcm_handle	= cl->common.classid;
470 	tcm->tcm_info	= cl->qdisc->handle;
471 
472 	nest = nla_nest_start(skb, TCA_OPTIONS);
473 	if (nest == NULL)
474 		goto nla_put_failure;
475 	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) ||
476 	    nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax))
477 		goto nla_put_failure;
478 	return nla_nest_end(skb, nest);
479 
480 nla_put_failure:
481 	nla_nest_cancel(skb, nest);
482 	return -EMSGSIZE;
483 }
484 
485 static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
486 				struct gnet_dump *d)
487 {
488 	struct qfq_class *cl = (struct qfq_class *)arg;
489 	struct tc_qfq_stats xstats;
490 
491 	memset(&xstats, 0, sizeof(xstats));
492 	cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
493 
494 	xstats.weight = ONE_FP/cl->inv_w;
495 	xstats.lmax = cl->lmax;
496 
497 	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
498 	    gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
499 	    gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
500 		return -1;
501 
502 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
503 }
504 
505 static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
506 {
507 	struct qfq_sched *q = qdisc_priv(sch);
508 	struct qfq_class *cl;
509 	struct hlist_node *n;
510 	unsigned int i;
511 
512 	if (arg->stop)
513 		return;
514 
515 	for (i = 0; i < q->clhash.hashsize; i++) {
516 		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
517 			if (arg->count < arg->skip) {
518 				arg->count++;
519 				continue;
520 			}
521 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
522 				arg->stop = 1;
523 				return;
524 			}
525 			arg->count++;
526 		}
527 	}
528 }
529 
530 static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
531 				      int *qerr)
532 {
533 	struct qfq_sched *q = qdisc_priv(sch);
534 	struct qfq_class *cl;
535 	struct tcf_result res;
536 	int result;
537 
538 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
539 		pr_debug("qfq_classify: found %d\n", skb->priority);
540 		cl = qfq_find_class(sch, skb->priority);
541 		if (cl != NULL)
542 			return cl;
543 	}
544 
545 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
546 	result = tc_classify(skb, q->filter_list, &res);
547 	if (result >= 0) {
548 #ifdef CONFIG_NET_CLS_ACT
549 		switch (result) {
550 		case TC_ACT_QUEUED:
551 		case TC_ACT_STOLEN:
552 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
553 		case TC_ACT_SHOT:
554 			return NULL;
555 		}
556 #endif
557 		cl = (struct qfq_class *)res.class;
558 		if (cl == NULL)
559 			cl = qfq_find_class(sch, res.classid);
560 		return cl;
561 	}
562 
563 	return NULL;
564 }
565 
566 /* Generic comparison function, handling wraparound. */
567 static inline int qfq_gt(u64 a, u64 b)
568 {
569 	return (s64)(a - b) > 0;
570 }
571 
572 /* Round a precise timestamp to its slotted value. */
573 static inline u64 qfq_round_down(u64 ts, unsigned int shift)
574 {
575 	return ts & ~((1ULL << shift) - 1);
576 }
577 
578 /* return the pointer to the group with lowest index in the bitmap */
579 static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
580 					unsigned long bitmap)
581 {
582 	int index = __ffs(bitmap);
583 	return &q->groups[index];
584 }
585 /* Calculate a mask to mimic what would be ffs_from(). */
586 static inline unsigned long mask_from(unsigned long bitmap, int from)
587 {
588 	return bitmap & ~((1UL << from) - 1);
589 }
590 
591 /*
592  * The state computation relies on ER=0, IR=1, EB=2, IB=3
593  * First compute eligibility comparing grp->S, q->V,
594  * then check if someone is blocking us and possibly add EB
595  */
596 static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
597 {
598 	/* if S > V we are not eligible */
599 	unsigned int state = qfq_gt(grp->S, q->V);
600 	unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
601 	struct qfq_group *next;
602 
603 	if (mask) {
604 		next = qfq_ffs(q, mask);
605 		if (qfq_gt(grp->F, next->F))
606 			state |= EB;
607 	}
608 
609 	return state;
610 }
611 
612 
613 /*
614  * In principle
615  *	q->bitmaps[dst] |= q->bitmaps[src] & mask;
616  *	q->bitmaps[src] &= ~mask;
617  * but we should make sure that src != dst
618  */
619 static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
620 				   int src, int dst)
621 {
622 	q->bitmaps[dst] |= q->bitmaps[src] & mask;
623 	q->bitmaps[src] &= ~mask;
624 }
625 
626 static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
627 {
628 	unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
629 	struct qfq_group *next;
630 
631 	if (mask) {
632 		next = qfq_ffs(q, mask);
633 		if (!qfq_gt(next->F, old_F))
634 			return;
635 	}
636 
637 	mask = (1UL << index) - 1;
638 	qfq_move_groups(q, mask, EB, ER);
639 	qfq_move_groups(q, mask, IB, IR);
640 }
641 
642 /*
643  * perhaps
644  *
645 	old_V ^= q->V;
646 	old_V >>= QFQ_MIN_SLOT_SHIFT;
647 	if (old_V) {
648 		...
649 	}
650  *
651  */
652 static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
653 {
654 	unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
655 	unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
656 
657 	if (vslot != old_vslot) {
658 		unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
659 		qfq_move_groups(q, mask, IR, ER);
660 		qfq_move_groups(q, mask, IB, EB);
661 	}
662 }
663 
664 
665 /*
666  * XXX we should make sure that slot becomes less than 32.
667  * This is guaranteed by the input values.
668  * roundedS is always cl->S rounded on grp->slot_shift bits.
669  */
670 static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
671 			    u64 roundedS)
672 {
673 	u64 slot = (roundedS - grp->S) >> grp->slot_shift;
674 	unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
675 
676 	hlist_add_head(&cl->next, &grp->slots[i]);
677 	__set_bit(slot, &grp->full_slots);
678 }
679 
680 /* Maybe introduce hlist_first_entry?? */
681 static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
682 {
683 	return hlist_entry(grp->slots[grp->front].first,
684 			   struct qfq_class, next);
685 }
686 
687 /*
688  * remove the entry from the slot
689  */
690 static void qfq_front_slot_remove(struct qfq_group *grp)
691 {
692 	struct qfq_class *cl = qfq_slot_head(grp);
693 
694 	BUG_ON(!cl);
695 	hlist_del(&cl->next);
696 	if (hlist_empty(&grp->slots[grp->front]))
697 		__clear_bit(0, &grp->full_slots);
698 }
699 
700 /*
701  * Returns the first full queue in a group. As a side effect,
702  * adjust the bucket list so the first non-empty bucket is at
703  * position 0 in full_slots.
704  */
705 static struct qfq_class *qfq_slot_scan(struct qfq_group *grp)
706 {
707 	unsigned int i;
708 
709 	pr_debug("qfq slot_scan: grp %u full %#lx\n",
710 		 grp->index, grp->full_slots);
711 
712 	if (grp->full_slots == 0)
713 		return NULL;
714 
715 	i = __ffs(grp->full_slots);  /* zero based */
716 	if (i > 0) {
717 		grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
718 		grp->full_slots >>= i;
719 	}
720 
721 	return qfq_slot_head(grp);
722 }
723 
724 /*
725  * adjust the bucket list. When the start time of a group decreases,
726  * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
727  * move the objects. The mask of occupied slots must be shifted
728  * because we use ffs() to find the first non-empty slot.
729  * This covers decreases in the group's start time, but what about
730  * increases of the start time ?
731  * Here too we should make sure that i is less than 32
732  */
733 static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
734 {
735 	unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
736 
737 	grp->full_slots <<= i;
738 	grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
739 }
740 
741 static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
742 {
743 	struct qfq_group *grp;
744 	unsigned long ineligible;
745 
746 	ineligible = q->bitmaps[IR] | q->bitmaps[IB];
747 	if (ineligible) {
748 		if (!q->bitmaps[ER]) {
749 			grp = qfq_ffs(q, ineligible);
750 			if (qfq_gt(grp->S, q->V))
751 				q->V = grp->S;
752 		}
753 		qfq_make_eligible(q, old_V);
754 	}
755 }
756 
757 /*
758  * Updates the class, returns true if also the group needs to be updated.
759  */
760 static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
761 {
762 	unsigned int len = qdisc_peek_len(cl->qdisc);
763 
764 	cl->S = cl->F;
765 	if (!len)
766 		qfq_front_slot_remove(grp);	/* queue is empty */
767 	else {
768 		u64 roundedS;
769 
770 		cl->F = cl->S + (u64)len * cl->inv_w;
771 		roundedS = qfq_round_down(cl->S, grp->slot_shift);
772 		if (roundedS == grp->S)
773 			return false;
774 
775 		qfq_front_slot_remove(grp);
776 		qfq_slot_insert(grp, cl, roundedS);
777 	}
778 
779 	return true;
780 }
781 
782 static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
783 {
784 	struct qfq_sched *q = qdisc_priv(sch);
785 	struct qfq_group *grp;
786 	struct qfq_class *cl;
787 	struct sk_buff *skb;
788 	unsigned int len;
789 	u64 old_V;
790 
791 	if (!q->bitmaps[ER])
792 		return NULL;
793 
794 	grp = qfq_ffs(q, q->bitmaps[ER]);
795 
796 	cl = qfq_slot_head(grp);
797 	skb = qdisc_dequeue_peeked(cl->qdisc);
798 	if (!skb) {
799 		WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
800 		return NULL;
801 	}
802 
803 	sch->q.qlen--;
804 	qdisc_bstats_update(sch, skb);
805 
806 	old_V = q->V;
807 	len = qdisc_pkt_len(skb);
808 	q->V += (u64)len * IWSUM;
809 	pr_debug("qfq dequeue: len %u F %lld now %lld\n",
810 		 len, (unsigned long long) cl->F, (unsigned long long) q->V);
811 
812 	if (qfq_update_class(grp, cl)) {
813 		u64 old_F = grp->F;
814 
815 		cl = qfq_slot_scan(grp);
816 		if (!cl)
817 			__clear_bit(grp->index, &q->bitmaps[ER]);
818 		else {
819 			u64 roundedS = qfq_round_down(cl->S, grp->slot_shift);
820 			unsigned int s;
821 
822 			if (grp->S == roundedS)
823 				goto skip_unblock;
824 			grp->S = roundedS;
825 			grp->F = roundedS + (2ULL << grp->slot_shift);
826 			__clear_bit(grp->index, &q->bitmaps[ER]);
827 			s = qfq_calc_state(q, grp);
828 			__set_bit(grp->index, &q->bitmaps[s]);
829 		}
830 
831 		qfq_unblock_groups(q, grp->index, old_F);
832 	}
833 
834 skip_unblock:
835 	qfq_update_eligible(q, old_V);
836 
837 	return skb;
838 }
839 
840 /*
841  * Assign a reasonable start time for a new flow k in group i.
842  * Admissible values for \hat(F) are multiples of \sigma_i
843  * no greater than V+\sigma_i . Larger values mean that
844  * we had a wraparound so we consider the timestamp to be stale.
845  *
846  * If F is not stale and F >= V then we set S = F.
847  * Otherwise we should assign S = V, but this may violate
848  * the ordering in ER. So, if we have groups in ER, set S to
849  * the F_j of the first group j which would be blocking us.
850  * We are guaranteed not to move S backward because
851  * otherwise our group i would still be blocked.
852  */
853 static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
854 {
855 	unsigned long mask;
856 	u64 limit, roundedF;
857 	int slot_shift = cl->grp->slot_shift;
858 
859 	roundedF = qfq_round_down(cl->F, slot_shift);
860 	limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
861 
862 	if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
863 		/* timestamp was stale */
864 		mask = mask_from(q->bitmaps[ER], cl->grp->index);
865 		if (mask) {
866 			struct qfq_group *next = qfq_ffs(q, mask);
867 			if (qfq_gt(roundedF, next->F)) {
868 				cl->S = next->F;
869 				return;
870 			}
871 		}
872 		cl->S = q->V;
873 	} else  /* timestamp is not stale */
874 		cl->S = cl->F;
875 }
876 
877 static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
878 {
879 	struct qfq_sched *q = qdisc_priv(sch);
880 	struct qfq_class *cl;
881 	int err;
882 
883 	cl = qfq_classify(skb, sch, &err);
884 	if (cl == NULL) {
885 		if (err & __NET_XMIT_BYPASS)
886 			sch->qstats.drops++;
887 		kfree_skb(skb);
888 		return err;
889 	}
890 	pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
891 
892 	err = qdisc_enqueue(skb, cl->qdisc);
893 	if (unlikely(err != NET_XMIT_SUCCESS)) {
894 		pr_debug("qfq_enqueue: enqueue failed %d\n", err);
895 		if (net_xmit_drop_count(err)) {
896 			cl->qstats.drops++;
897 			sch->qstats.drops++;
898 		}
899 		return err;
900 	}
901 
902 	bstats_update(&cl->bstats, skb);
903 	++sch->q.qlen;
904 
905 	/* If the new skb is not the head of queue, then done here. */
906 	if (cl->qdisc->q.qlen != 1)
907 		return err;
908 
909 	/* If reach this point, queue q was idle */
910 	qfq_activate_class(q, cl, qdisc_pkt_len(skb));
911 
912 	return err;
913 }
914 
915 /*
916  * Handle class switch from idle to backlogged.
917  */
918 static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
919 			       unsigned int pkt_len)
920 {
921 	struct qfq_group *grp = cl->grp;
922 	u64 roundedS;
923 	int s;
924 
925 	qfq_update_start(q, cl);
926 
927 	/* compute new finish time and rounded start. */
928 	cl->F = cl->S + (u64)pkt_len * cl->inv_w;
929 	roundedS = qfq_round_down(cl->S, grp->slot_shift);
930 
931 	/*
932 	 * insert cl in the correct bucket.
933 	 * If cl->S >= grp->S we don't need to adjust the
934 	 * bucket list and simply go to the insertion phase.
935 	 * Otherwise grp->S is decreasing, we must make room
936 	 * in the bucket list, and also recompute the group state.
937 	 * Finally, if there were no flows in this group and nobody
938 	 * was in ER make sure to adjust V.
939 	 */
940 	if (grp->full_slots) {
941 		if (!qfq_gt(grp->S, cl->S))
942 			goto skip_update;
943 
944 		/* create a slot for this cl->S */
945 		qfq_slot_rotate(grp, roundedS);
946 		/* group was surely ineligible, remove */
947 		__clear_bit(grp->index, &q->bitmaps[IR]);
948 		__clear_bit(grp->index, &q->bitmaps[IB]);
949 	} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
950 		q->V = roundedS;
951 
952 	grp->S = roundedS;
953 	grp->F = roundedS + (2ULL << grp->slot_shift);
954 	s = qfq_calc_state(q, grp);
955 	__set_bit(grp->index, &q->bitmaps[s]);
956 
957 	pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
958 		 s, q->bitmaps[s],
959 		 (unsigned long long) cl->S,
960 		 (unsigned long long) cl->F,
961 		 (unsigned long long) q->V);
962 
963 skip_update:
964 	qfq_slot_insert(grp, cl, roundedS);
965 }
966 
967 
968 static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
969 			    struct qfq_class *cl)
970 {
971 	unsigned int i, offset;
972 	u64 roundedS;
973 
974 	roundedS = qfq_round_down(cl->S, grp->slot_shift);
975 	offset = (roundedS - grp->S) >> grp->slot_shift;
976 	i = (grp->front + offset) % QFQ_MAX_SLOTS;
977 
978 	hlist_del(&cl->next);
979 	if (hlist_empty(&grp->slots[i]))
980 		__clear_bit(offset, &grp->full_slots);
981 }
982 
983 /*
984  * called to forcibly destroy a queue.
985  * If the queue is not in the front bucket, or if it has
986  * other queues in the front bucket, we can simply remove
987  * the queue with no other side effects.
988  * Otherwise we must propagate the event up.
989  */
990 static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
991 {
992 	struct qfq_group *grp = cl->grp;
993 	unsigned long mask;
994 	u64 roundedS;
995 	int s;
996 
997 	cl->F = cl->S;
998 	qfq_slot_remove(q, grp, cl);
999 
1000 	if (!grp->full_slots) {
1001 		__clear_bit(grp->index, &q->bitmaps[IR]);
1002 		__clear_bit(grp->index, &q->bitmaps[EB]);
1003 		__clear_bit(grp->index, &q->bitmaps[IB]);
1004 
1005 		if (test_bit(grp->index, &q->bitmaps[ER]) &&
1006 		    !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
1007 			mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
1008 			if (mask)
1009 				mask = ~((1UL << __fls(mask)) - 1);
1010 			else
1011 				mask = ~0UL;
1012 			qfq_move_groups(q, mask, EB, ER);
1013 			qfq_move_groups(q, mask, IB, IR);
1014 		}
1015 		__clear_bit(grp->index, &q->bitmaps[ER]);
1016 	} else if (hlist_empty(&grp->slots[grp->front])) {
1017 		cl = qfq_slot_scan(grp);
1018 		roundedS = qfq_round_down(cl->S, grp->slot_shift);
1019 		if (grp->S != roundedS) {
1020 			__clear_bit(grp->index, &q->bitmaps[ER]);
1021 			__clear_bit(grp->index, &q->bitmaps[IR]);
1022 			__clear_bit(grp->index, &q->bitmaps[EB]);
1023 			__clear_bit(grp->index, &q->bitmaps[IB]);
1024 			grp->S = roundedS;
1025 			grp->F = roundedS + (2ULL << grp->slot_shift);
1026 			s = qfq_calc_state(q, grp);
1027 			__set_bit(grp->index, &q->bitmaps[s]);
1028 		}
1029 	}
1030 
1031 	qfq_update_eligible(q, q->V);
1032 }
1033 
1034 static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1035 {
1036 	struct qfq_sched *q = qdisc_priv(sch);
1037 	struct qfq_class *cl = (struct qfq_class *)arg;
1038 
1039 	if (cl->qdisc->q.qlen == 0)
1040 		qfq_deactivate_class(q, cl);
1041 }
1042 
1043 static unsigned int qfq_drop(struct Qdisc *sch)
1044 {
1045 	struct qfq_sched *q = qdisc_priv(sch);
1046 	struct qfq_group *grp;
1047 	unsigned int i, j, len;
1048 
1049 	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1050 		grp = &q->groups[i];
1051 		for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1052 			struct qfq_class *cl;
1053 			struct hlist_node *n;
1054 
1055 			hlist_for_each_entry(cl, n, &grp->slots[j], next) {
1056 
1057 				if (!cl->qdisc->ops->drop)
1058 					continue;
1059 
1060 				len = cl->qdisc->ops->drop(cl->qdisc);
1061 				if (len > 0) {
1062 					sch->q.qlen--;
1063 					if (!cl->qdisc->q.qlen)
1064 						qfq_deactivate_class(q, cl);
1065 
1066 					return len;
1067 				}
1068 			}
1069 		}
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1076 {
1077 	struct qfq_sched *q = qdisc_priv(sch);
1078 	struct qfq_group *grp;
1079 	int i, j, err;
1080 
1081 	err = qdisc_class_hash_init(&q->clhash);
1082 	if (err < 0)
1083 		return err;
1084 
1085 	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1086 		grp = &q->groups[i];
1087 		grp->index = i;
1088 		grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS
1089 				   - (QFQ_MAX_INDEX - i);
1090 		for (j = 0; j < QFQ_MAX_SLOTS; j++)
1091 			INIT_HLIST_HEAD(&grp->slots[j]);
1092 	}
1093 
1094 	return 0;
1095 }
1096 
1097 static void qfq_reset_qdisc(struct Qdisc *sch)
1098 {
1099 	struct qfq_sched *q = qdisc_priv(sch);
1100 	struct qfq_group *grp;
1101 	struct qfq_class *cl;
1102 	struct hlist_node *n, *tmp;
1103 	unsigned int i, j;
1104 
1105 	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1106 		grp = &q->groups[i];
1107 		for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1108 			hlist_for_each_entry_safe(cl, n, tmp,
1109 						  &grp->slots[j], next) {
1110 				qfq_deactivate_class(q, cl);
1111 			}
1112 		}
1113 	}
1114 
1115 	for (i = 0; i < q->clhash.hashsize; i++) {
1116 		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1117 			qdisc_reset(cl->qdisc);
1118 	}
1119 	sch->q.qlen = 0;
1120 }
1121 
1122 static void qfq_destroy_qdisc(struct Qdisc *sch)
1123 {
1124 	struct qfq_sched *q = qdisc_priv(sch);
1125 	struct qfq_class *cl;
1126 	struct hlist_node *n, *next;
1127 	unsigned int i;
1128 
1129 	tcf_destroy_chain(&q->filter_list);
1130 
1131 	for (i = 0; i < q->clhash.hashsize; i++) {
1132 		hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1133 					  common.hnode) {
1134 			qfq_destroy_class(sch, cl);
1135 		}
1136 	}
1137 	qdisc_class_hash_destroy(&q->clhash);
1138 }
1139 
1140 static const struct Qdisc_class_ops qfq_class_ops = {
1141 	.change		= qfq_change_class,
1142 	.delete		= qfq_delete_class,
1143 	.get		= qfq_get_class,
1144 	.put		= qfq_put_class,
1145 	.tcf_chain	= qfq_tcf_chain,
1146 	.bind_tcf	= qfq_bind_tcf,
1147 	.unbind_tcf	= qfq_unbind_tcf,
1148 	.graft		= qfq_graft_class,
1149 	.leaf		= qfq_class_leaf,
1150 	.qlen_notify	= qfq_qlen_notify,
1151 	.dump		= qfq_dump_class,
1152 	.dump_stats	= qfq_dump_class_stats,
1153 	.walk		= qfq_walk,
1154 };
1155 
1156 static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1157 	.cl_ops		= &qfq_class_ops,
1158 	.id		= "qfq",
1159 	.priv_size	= sizeof(struct qfq_sched),
1160 	.enqueue	= qfq_enqueue,
1161 	.dequeue	= qfq_dequeue,
1162 	.peek		= qdisc_peek_dequeued,
1163 	.drop		= qfq_drop,
1164 	.init		= qfq_init_qdisc,
1165 	.reset		= qfq_reset_qdisc,
1166 	.destroy	= qfq_destroy_qdisc,
1167 	.owner		= THIS_MODULE,
1168 };
1169 
1170 static int __init qfq_init(void)
1171 {
1172 	return register_qdisc(&qfq_qdisc_ops);
1173 }
1174 
1175 static void __exit qfq_exit(void)
1176 {
1177 	unregister_qdisc(&qfq_qdisc_ops);
1178 }
1179 
1180 module_init(qfq_init);
1181 module_exit(qfq_exit);
1182 MODULE_LICENSE("GPL");
1183