1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler.
4 *
5 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
6 * Copyright (c) 2012 Paolo Valente.
7 */
8
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/bitops.h>
12 #include <linux/errno.h>
13 #include <linux/netdevice.h>
14 #include <linux/pkt_sched.h>
15 #include <net/sch_generic.h>
16 #include <net/pkt_sched.h>
17 #include <net/pkt_cls.h>
18
19
20 /* Quick Fair Queueing Plus
21 ========================
22
23 Sources:
24
25 [1] Paolo Valente,
26 "Reducing the Execution Time of Fair-Queueing Schedulers."
27 http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
28
29 Sources for QFQ:
30
31 [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
32 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
33
34 See also:
35 http://retis.sssup.it/~fabio/linux/qfq/
36 */
37
38 /*
39
40 QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
41 classes. Each aggregate is timestamped with a virtual start time S
42 and a virtual finish time F, and scheduled according to its
43 timestamps. S and F are computed as a function of a system virtual
44 time function V. The classes within each aggregate are instead
45 scheduled with DRR.
46
47 To speed up operations, QFQ+ divides also aggregates into a limited
48 number of groups. Which group a class belongs to depends on the
49 ratio between the maximum packet length for the class and the weight
50 of the class. Groups have their own S and F. In the end, QFQ+
51 schedules groups, then aggregates within groups, then classes within
52 aggregates. See [1] and [2] for a full description.
53
54 Virtual time computations.
55
56 S, F and V are all computed in fixed point arithmetic with
57 FRAC_BITS decimal bits.
58
59 QFQ_MAX_INDEX is the maximum index allowed for a group. We need
60 one bit per index.
61 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
62
63 The layout of the bits is as below:
64
65 [ MTU_SHIFT ][ FRAC_BITS ]
66 [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
67 ^.__grp->index = 0
68 *.__grp->slot_shift
69
70 where MIN_SLOT_SHIFT is derived by difference from the others.
71
72 The max group index corresponds to Lmax/w_min, where
73 Lmax=1<<MTU_SHIFT, w_min = 1 .
74 From this, and knowing how many groups (MAX_INDEX) we want,
75 we can derive the shift corresponding to each group.
76
77 Because we often need to compute
78 F = S + len/w_i and V = V + len/wsum
79 instead of storing w_i store the value
80 inv_w = (1<<FRAC_BITS)/w_i
81 so we can do F = S + len * inv_w * wsum.
82 We use W_TOT in the formulas so we can easily move between
83 static and adaptive weight sum.
84
85 The per-scheduler-instance data contain all the data structures
86 for the scheduler: bitmaps and bucket lists.
87
88 */
89
90 /*
91 * Maximum number of consecutive slots occupied by backlogged classes
92 * inside a group.
93 */
94 #define QFQ_MAX_SLOTS 32
95
96 /*
97 * Shifts used for aggregate<->group mapping. We allow class weights that are
98 * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
99 * group with the smallest index that can support the L_i / r_i configured
100 * for the classes in the aggregate.
101 *
102 * grp->index is the index of the group; and grp->slot_shift
103 * is the shift for the corresponding (scaled) sigma_i.
104 */
105 #define QFQ_MAX_INDEX 24
106 #define QFQ_MAX_WSHIFT 10
107
108 #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
109 #define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT)
110
111 #define FRAC_BITS 30 /* fixed point arithmetic */
112 #define ONE_FP (1UL << FRAC_BITS)
113
114 #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
115 #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
116 #define QFQ_MAX_LMAX (1UL << QFQ_MTU_SHIFT)
117
118 #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */
119
120 /*
121 * Possible group states. These values are used as indexes for the bitmaps
122 * array of struct qfq_queue.
123 */
124 enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
125
126 struct qfq_group;
127
128 struct qfq_aggregate;
129
130 struct qfq_class {
131 struct Qdisc_class_common common;
132
133 struct gnet_stats_basic_sync bstats;
134 struct gnet_stats_queue qstats;
135 struct net_rate_estimator __rcu *rate_est;
136 struct Qdisc *qdisc;
137 struct list_head alist; /* Link for active-classes list. */
138 struct qfq_aggregate *agg; /* Parent aggregate. */
139 int deficit; /* DRR deficit counter. */
140 };
141
142 struct qfq_aggregate {
143 struct hlist_node next; /* Link for the slot list. */
144 u64 S, F; /* flow timestamps (exact) */
145
146 /* group we belong to. In principle we would need the index,
147 * which is log_2(lmax/weight), but we never reference it
148 * directly, only the group.
149 */
150 struct qfq_group *grp;
151
152 /* these are copied from the flowset. */
153 u32 class_weight; /* Weight of each class in this aggregate. */
154 /* Max pkt size for the classes in this aggregate, DRR quantum. */
155 int lmax;
156
157 u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */
158 u32 budgetmax; /* Max budget for this aggregate. */
159 u32 initial_budget, budget; /* Initial and current budget. */
160
161 int num_classes; /* Number of classes in this aggr. */
162 struct list_head active; /* DRR queue of active classes. */
163
164 struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */
165 };
166
167 struct qfq_group {
168 u64 S, F; /* group timestamps (approx). */
169 unsigned int slot_shift; /* Slot shift. */
170 unsigned int index; /* Group index. */
171 unsigned int front; /* Index of the front slot. */
172 unsigned long full_slots; /* non-empty slots */
173
174 /* Array of RR lists of active aggregates. */
175 struct hlist_head slots[QFQ_MAX_SLOTS];
176 };
177
178 struct qfq_sched {
179 struct tcf_proto __rcu *filter_list;
180 struct tcf_block *block;
181 struct Qdisc_class_hash clhash;
182
183 u64 oldV, V; /* Precise virtual times. */
184 struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
185 u32 wsum; /* weight sum */
186 u32 iwsum; /* inverse weight sum */
187
188 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
189 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
190 u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */
191
192 u32 max_agg_classes; /* Max number of classes per aggr. */
193 struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
194 };
195
196 /*
197 * Possible reasons why the timestamps of an aggregate are updated
198 * enqueue: the aggregate switches from idle to active and must scheduled
199 * for service
200 * requeue: the aggregate finishes its budget, so it stops being served and
201 * must be rescheduled for service
202 */
203 enum update_reason {enqueue, requeue};
204
cl_is_active(struct qfq_class * cl)205 static bool cl_is_active(struct qfq_class *cl)
206 {
207 return !list_empty(&cl->alist);
208 }
209
qfq_find_class(struct Qdisc * sch,u32 classid)210 static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
211 {
212 struct qfq_sched *q = qdisc_priv(sch);
213 struct Qdisc_class_common *clc;
214
215 clc = qdisc_class_find(&q->clhash, classid);
216 if (clc == NULL)
217 return NULL;
218 return container_of(clc, struct qfq_class, common);
219 }
220
221 static struct netlink_range_validation lmax_range = {
222 .min = QFQ_MIN_LMAX,
223 .max = QFQ_MAX_LMAX,
224 };
225
226 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
227 [TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT),
228 [TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range),
229 };
230
231 /*
232 * Calculate a flow index, given its weight and maximum packet length.
233 * index = log_2(maxlen/weight) but we need to apply the scaling.
234 * This is used only once at flow creation.
235 */
qfq_calc_index(u32 inv_w,unsigned int maxlen,u32 min_slot_shift)236 static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
237 {
238 u64 slot_size = (u64)maxlen * inv_w;
239 unsigned long size_map;
240 int index = 0;
241
242 size_map = slot_size >> min_slot_shift;
243 if (!size_map)
244 goto out;
245
246 index = __fls(size_map) + 1; /* basically a log_2 */
247 index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
248
249 if (index < 0)
250 index = 0;
251 out:
252 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
253 (unsigned long) ONE_FP/inv_w, maxlen, index);
254
255 return index;
256 }
257
258 static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
259 static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
260 enum update_reason);
261
qfq_init_agg(struct qfq_sched * q,struct qfq_aggregate * agg,u32 lmax,u32 weight)262 static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
263 u32 lmax, u32 weight)
264 {
265 INIT_LIST_HEAD(&agg->active);
266 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
267
268 agg->lmax = lmax;
269 agg->class_weight = weight;
270 }
271
qfq_find_agg(struct qfq_sched * q,u32 lmax,u32 weight)272 static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
273 u32 lmax, u32 weight)
274 {
275 struct qfq_aggregate *agg;
276
277 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
278 if (agg->lmax == lmax && agg->class_weight == weight)
279 return agg;
280
281 return NULL;
282 }
283
284
285 /* Update aggregate as a function of the new number of classes. */
qfq_update_agg(struct qfq_sched * q,struct qfq_aggregate * agg,int new_num_classes)286 static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
287 int new_num_classes)
288 {
289 u32 new_agg_weight;
290
291 if (new_num_classes == q->max_agg_classes)
292 hlist_del_init(&agg->nonfull_next);
293
294 if (agg->num_classes > new_num_classes &&
295 new_num_classes == q->max_agg_classes - 1) /* agg no more full */
296 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
297
298 /* The next assignment may let
299 * agg->initial_budget > agg->budgetmax
300 * hold, we will take it into account in charge_actual_service().
301 */
302 agg->budgetmax = new_num_classes * agg->lmax;
303 new_agg_weight = agg->class_weight * new_num_classes;
304 agg->inv_w = ONE_FP/new_agg_weight;
305
306 if (agg->grp == NULL) {
307 int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
308 q->min_slot_shift);
309 agg->grp = &q->groups[i];
310 }
311
312 q->wsum +=
313 (int) agg->class_weight * (new_num_classes - agg->num_classes);
314 q->iwsum = ONE_FP / q->wsum;
315
316 agg->num_classes = new_num_classes;
317 }
318
319 /* Add class to aggregate. */
qfq_add_to_agg(struct qfq_sched * q,struct qfq_aggregate * agg,struct qfq_class * cl)320 static void qfq_add_to_agg(struct qfq_sched *q,
321 struct qfq_aggregate *agg,
322 struct qfq_class *cl)
323 {
324 cl->agg = agg;
325
326 qfq_update_agg(q, agg, agg->num_classes+1);
327 if (cl->qdisc->q.qlen > 0) { /* adding an active class */
328 list_add_tail(&cl->alist, &agg->active);
329 if (list_first_entry(&agg->active, struct qfq_class, alist) ==
330 cl && q->in_serv_agg != agg) /* agg was inactive */
331 qfq_activate_agg(q, agg, enqueue); /* schedule agg */
332 }
333 }
334
335 static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
336
qfq_destroy_agg(struct qfq_sched * q,struct qfq_aggregate * agg)337 static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
338 {
339 hlist_del_init(&agg->nonfull_next);
340 q->wsum -= agg->class_weight;
341 if (q->wsum != 0)
342 q->iwsum = ONE_FP / q->wsum;
343
344 if (q->in_serv_agg == agg)
345 q->in_serv_agg = qfq_choose_next_agg(q);
346 kfree(agg);
347 }
348
349 /* Deschedule class from within its parent aggregate. */
qfq_deactivate_class(struct qfq_sched * q,struct qfq_class * cl)350 static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
351 {
352 struct qfq_aggregate *agg = cl->agg;
353
354
355 list_del_init(&cl->alist); /* remove from RR queue of the aggregate */
356 if (list_empty(&agg->active)) /* agg is now inactive */
357 qfq_deactivate_agg(q, agg);
358 }
359
360 /* Remove class from its parent aggregate. */
qfq_rm_from_agg(struct qfq_sched * q,struct qfq_class * cl)361 static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
362 {
363 struct qfq_aggregate *agg = cl->agg;
364
365 cl->agg = NULL;
366 if (agg->num_classes == 1) { /* agg being emptied, destroy it */
367 qfq_destroy_agg(q, agg);
368 return;
369 }
370 qfq_update_agg(q, agg, agg->num_classes-1);
371 }
372
373 /* Deschedule class and remove it from its parent aggregate. */
qfq_deact_rm_from_agg(struct qfq_sched * q,struct qfq_class * cl)374 static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
375 {
376 if (cl->qdisc->q.qlen > 0) /* class is active */
377 qfq_deactivate_class(q, cl);
378
379 qfq_rm_from_agg(q, cl);
380 }
381
382 /* Move class to a new aggregate, matching the new class weight and/or lmax */
qfq_change_agg(struct Qdisc * sch,struct qfq_class * cl,u32 weight,u32 lmax)383 static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
384 u32 lmax)
385 {
386 struct qfq_sched *q = qdisc_priv(sch);
387 struct qfq_aggregate *new_agg;
388
389 /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */
390 if (lmax > QFQ_MAX_LMAX)
391 return -EINVAL;
392
393 new_agg = qfq_find_agg(q, lmax, weight);
394 if (new_agg == NULL) { /* create new aggregate */
395 new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
396 if (new_agg == NULL)
397 return -ENOBUFS;
398 qfq_init_agg(q, new_agg, lmax, weight);
399 }
400 qfq_deact_rm_from_agg(q, cl);
401 qfq_add_to_agg(q, new_agg, cl);
402
403 return 0;
404 }
405
qfq_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)406 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
407 struct nlattr **tca, unsigned long *arg,
408 struct netlink_ext_ack *extack)
409 {
410 struct qfq_sched *q = qdisc_priv(sch);
411 struct qfq_class *cl = (struct qfq_class *)*arg;
412 bool existing = false;
413 struct nlattr *tb[TCA_QFQ_MAX + 1];
414 struct qfq_aggregate *new_agg = NULL;
415 u32 weight, lmax, inv_w, old_weight, old_lmax;
416 int err;
417 int delta_w;
418
419 if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) {
420 NL_SET_ERR_MSG_MOD(extack, "missing options");
421 return -EINVAL;
422 }
423
424 err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS],
425 qfq_policy, extack);
426 if (err < 0)
427 return err;
428
429 if (tb[TCA_QFQ_WEIGHT])
430 weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
431 else
432 weight = 1;
433
434 if (tb[TCA_QFQ_LMAX]) {
435 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
436 } else {
437 /* MTU size is user controlled */
438 lmax = psched_mtu(qdisc_dev(sch));
439 if (lmax < QFQ_MIN_LMAX || lmax > QFQ_MAX_LMAX) {
440 NL_SET_ERR_MSG_MOD(extack,
441 "MTU size out of bounds for qfq");
442 return -EINVAL;
443 }
444 }
445
446 inv_w = ONE_FP / weight;
447 weight = ONE_FP / inv_w;
448
449 if (cl != NULL) {
450 sch_tree_lock(sch);
451 old_weight = cl->agg->class_weight;
452 old_lmax = cl->agg->lmax;
453 sch_tree_unlock(sch);
454 if (lmax == old_lmax && weight == old_weight)
455 return 0; /* nothing to change */
456 }
457
458 delta_w = weight - (cl ? old_weight : 0);
459
460 if (q->wsum + delta_w > QFQ_MAX_WSUM) {
461 NL_SET_ERR_MSG_FMT_MOD(extack,
462 "total weight out of range (%d + %u)\n",
463 delta_w, q->wsum);
464 return -EINVAL;
465 }
466
467 if (cl != NULL) { /* modify existing class */
468 if (tca[TCA_RATE]) {
469 err = gen_replace_estimator(&cl->bstats, NULL,
470 &cl->rate_est,
471 NULL,
472 true,
473 tca[TCA_RATE]);
474 if (err)
475 return err;
476 }
477 existing = true;
478 goto set_change_agg;
479 }
480
481 /* create and init new class */
482 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
483 if (cl == NULL)
484 return -ENOBUFS;
485
486 gnet_stats_basic_sync_init(&cl->bstats);
487 cl->common.classid = classid;
488 cl->deficit = lmax;
489 INIT_LIST_HEAD(&cl->alist);
490
491 cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
492 classid, NULL);
493 if (cl->qdisc == NULL)
494 cl->qdisc = &noop_qdisc;
495
496 if (tca[TCA_RATE]) {
497 err = gen_new_estimator(&cl->bstats, NULL,
498 &cl->rate_est,
499 NULL,
500 true,
501 tca[TCA_RATE]);
502 if (err)
503 goto destroy_class;
504 }
505
506 if (cl->qdisc != &noop_qdisc)
507 qdisc_hash_add(cl->qdisc, true);
508
509 set_change_agg:
510 sch_tree_lock(sch);
511 new_agg = qfq_find_agg(q, lmax, weight);
512 if (new_agg == NULL) { /* create new aggregate */
513 sch_tree_unlock(sch);
514 new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
515 if (new_agg == NULL) {
516 err = -ENOBUFS;
517 gen_kill_estimator(&cl->rate_est);
518 goto destroy_class;
519 }
520 sch_tree_lock(sch);
521 qfq_init_agg(q, new_agg, lmax, weight);
522 }
523 if (existing)
524 qfq_deact_rm_from_agg(q, cl);
525 else
526 qdisc_class_hash_insert(&q->clhash, &cl->common);
527 qfq_add_to_agg(q, new_agg, cl);
528 sch_tree_unlock(sch);
529 qdisc_class_hash_grow(sch, &q->clhash);
530
531 *arg = (unsigned long)cl;
532 return 0;
533
534 destroy_class:
535 qdisc_put(cl->qdisc);
536 kfree(cl);
537 return err;
538 }
539
qfq_destroy_class(struct Qdisc * sch,struct qfq_class * cl)540 static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
541 {
542 struct qfq_sched *q = qdisc_priv(sch);
543
544 qfq_rm_from_agg(q, cl);
545 gen_kill_estimator(&cl->rate_est);
546 qdisc_put(cl->qdisc);
547 kfree(cl);
548 }
549
qfq_delete_class(struct Qdisc * sch,unsigned long arg,struct netlink_ext_ack * extack)550 static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
551 struct netlink_ext_ack *extack)
552 {
553 struct qfq_sched *q = qdisc_priv(sch);
554 struct qfq_class *cl = (struct qfq_class *)arg;
555
556 if (qdisc_class_in_use(&cl->common)) {
557 NL_SET_ERR_MSG_MOD(extack, "QFQ class in use");
558 return -EBUSY;
559 }
560
561 sch_tree_lock(sch);
562
563 qdisc_purge_queue(cl->qdisc);
564 qdisc_class_hash_remove(&q->clhash, &cl->common);
565 qfq_destroy_class(sch, cl);
566
567 sch_tree_unlock(sch);
568
569 return 0;
570 }
571
qfq_search_class(struct Qdisc * sch,u32 classid)572 static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
573 {
574 return (unsigned long)qfq_find_class(sch, classid);
575 }
576
qfq_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)577 static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
578 struct netlink_ext_ack *extack)
579 {
580 struct qfq_sched *q = qdisc_priv(sch);
581
582 if (cl)
583 return NULL;
584
585 return q->block;
586 }
587
qfq_bind_tcf(struct Qdisc * sch,unsigned long parent,u32 classid)588 static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
589 u32 classid)
590 {
591 struct qfq_class *cl = qfq_find_class(sch, classid);
592
593 if (cl)
594 qdisc_class_get(&cl->common);
595
596 return (unsigned long)cl;
597 }
598
qfq_unbind_tcf(struct Qdisc * sch,unsigned long arg)599 static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
600 {
601 struct qfq_class *cl = (struct qfq_class *)arg;
602
603 qdisc_class_put(&cl->common);
604 }
605
qfq_graft_class(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)606 static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
607 struct Qdisc *new, struct Qdisc **old,
608 struct netlink_ext_ack *extack)
609 {
610 struct qfq_class *cl = (struct qfq_class *)arg;
611
612 if (new == NULL) {
613 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
614 cl->common.classid, NULL);
615 if (new == NULL)
616 new = &noop_qdisc;
617 }
618
619 *old = qdisc_replace(sch, new, &cl->qdisc);
620 return 0;
621 }
622
qfq_class_leaf(struct Qdisc * sch,unsigned long arg)623 static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
624 {
625 struct qfq_class *cl = (struct qfq_class *)arg;
626
627 return cl->qdisc;
628 }
629
qfq_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)630 static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
631 struct sk_buff *skb, struct tcmsg *tcm)
632 {
633 struct qfq_class *cl = (struct qfq_class *)arg;
634 struct nlattr *nest;
635 u32 class_weight, lmax;
636
637 tcm->tcm_parent = TC_H_ROOT;
638 tcm->tcm_handle = cl->common.classid;
639 tcm->tcm_info = cl->qdisc->handle;
640
641 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
642 if (nest == NULL)
643 goto nla_put_failure;
644
645 sch_tree_lock(sch);
646 class_weight = cl->agg->class_weight;
647 lmax = cl->agg->lmax;
648 sch_tree_unlock(sch);
649 if (nla_put_u32(skb, TCA_QFQ_WEIGHT, class_weight) ||
650 nla_put_u32(skb, TCA_QFQ_LMAX, lmax))
651 goto nla_put_failure;
652 return nla_nest_end(skb, nest);
653
654 nla_put_failure:
655 nla_nest_cancel(skb, nest);
656 return -EMSGSIZE;
657 }
658
qfq_dump_class_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)659 static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
660 struct gnet_dump *d)
661 {
662 struct qfq_class *cl = (struct qfq_class *)arg;
663 struct tc_qfq_stats xstats;
664
665 memset(&xstats, 0, sizeof(xstats));
666
667 sch_tree_lock(sch);
668 xstats.weight = cl->agg->class_weight;
669 xstats.lmax = cl->agg->lmax;
670 sch_tree_unlock(sch);
671
672 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
673 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
674 qdisc_qstats_copy(d, cl->qdisc) < 0)
675 return -1;
676
677 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
678 }
679
qfq_walk(struct Qdisc * sch,struct qdisc_walker * arg)680 static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
681 {
682 struct qfq_sched *q = qdisc_priv(sch);
683 struct qfq_class *cl;
684 unsigned int i;
685
686 if (arg->stop)
687 return;
688
689 for (i = 0; i < q->clhash.hashsize; i++) {
690 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
691 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
692 return;
693 }
694 }
695 }
696
qfq_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)697 static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
698 int *qerr)
699 {
700 struct qfq_sched *q = qdisc_priv(sch);
701 struct qfq_class *cl;
702 struct tcf_result res;
703 struct tcf_proto *fl;
704 int result;
705
706 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
707 pr_debug("qfq_classify: found %d\n", skb->priority);
708 cl = qfq_find_class(sch, skb->priority);
709 if (cl != NULL)
710 return cl;
711 }
712
713 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
714 fl = rcu_dereference_bh(q->filter_list);
715 result = tcf_classify(skb, NULL, fl, &res, false);
716 if (result >= 0) {
717 #ifdef CONFIG_NET_CLS_ACT
718 switch (result) {
719 case TC_ACT_QUEUED:
720 case TC_ACT_STOLEN:
721 case TC_ACT_TRAP:
722 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
723 fallthrough;
724 case TC_ACT_SHOT:
725 return NULL;
726 }
727 #endif
728 cl = (struct qfq_class *)res.class;
729 if (cl == NULL)
730 cl = qfq_find_class(sch, res.classid);
731 return cl;
732 }
733
734 return NULL;
735 }
736
737 /* Generic comparison function, handling wraparound. */
qfq_gt(u64 a,u64 b)738 static inline int qfq_gt(u64 a, u64 b)
739 {
740 return (s64)(a - b) > 0;
741 }
742
743 /* Round a precise timestamp to its slotted value. */
qfq_round_down(u64 ts,unsigned int shift)744 static inline u64 qfq_round_down(u64 ts, unsigned int shift)
745 {
746 return ts & ~((1ULL << shift) - 1);
747 }
748
749 /* return the pointer to the group with lowest index in the bitmap */
qfq_ffs(struct qfq_sched * q,unsigned long bitmap)750 static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
751 unsigned long bitmap)
752 {
753 int index = __ffs(bitmap);
754 return &q->groups[index];
755 }
756 /* Calculate a mask to mimic what would be ffs_from(). */
mask_from(unsigned long bitmap,int from)757 static inline unsigned long mask_from(unsigned long bitmap, int from)
758 {
759 return bitmap & ~((1UL << from) - 1);
760 }
761
762 /*
763 * The state computation relies on ER=0, IR=1, EB=2, IB=3
764 * First compute eligibility comparing grp->S, q->V,
765 * then check if someone is blocking us and possibly add EB
766 */
qfq_calc_state(struct qfq_sched * q,const struct qfq_group * grp)767 static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
768 {
769 /* if S > V we are not eligible */
770 unsigned int state = qfq_gt(grp->S, q->V);
771 unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
772 struct qfq_group *next;
773
774 if (mask) {
775 next = qfq_ffs(q, mask);
776 if (qfq_gt(grp->F, next->F))
777 state |= EB;
778 }
779
780 return state;
781 }
782
783
784 /*
785 * In principle
786 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
787 * q->bitmaps[src] &= ~mask;
788 * but we should make sure that src != dst
789 */
qfq_move_groups(struct qfq_sched * q,unsigned long mask,int src,int dst)790 static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
791 int src, int dst)
792 {
793 q->bitmaps[dst] |= q->bitmaps[src] & mask;
794 q->bitmaps[src] &= ~mask;
795 }
796
qfq_unblock_groups(struct qfq_sched * q,int index,u64 old_F)797 static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
798 {
799 unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
800 struct qfq_group *next;
801
802 if (mask) {
803 next = qfq_ffs(q, mask);
804 if (!qfq_gt(next->F, old_F))
805 return;
806 }
807
808 mask = (1UL << index) - 1;
809 qfq_move_groups(q, mask, EB, ER);
810 qfq_move_groups(q, mask, IB, IR);
811 }
812
813 /*
814 * perhaps
815 *
816 old_V ^= q->V;
817 old_V >>= q->min_slot_shift;
818 if (old_V) {
819 ...
820 }
821 *
822 */
qfq_make_eligible(struct qfq_sched * q)823 static void qfq_make_eligible(struct qfq_sched *q)
824 {
825 unsigned long vslot = q->V >> q->min_slot_shift;
826 unsigned long old_vslot = q->oldV >> q->min_slot_shift;
827
828 if (vslot != old_vslot) {
829 unsigned long mask;
830 int last_flip_pos = fls(vslot ^ old_vslot);
831
832 if (last_flip_pos > 31) /* higher than the number of groups */
833 mask = ~0UL; /* make all groups eligible */
834 else
835 mask = (1UL << last_flip_pos) - 1;
836
837 qfq_move_groups(q, mask, IR, ER);
838 qfq_move_groups(q, mask, IB, EB);
839 }
840 }
841
842 /*
843 * The index of the slot in which the input aggregate agg is to be
844 * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
845 * and not a '-1' because the start time of the group may be moved
846 * backward by one slot after the aggregate has been inserted, and
847 * this would cause non-empty slots to be right-shifted by one
848 * position.
849 *
850 * QFQ+ fully satisfies this bound to the slot index if the parameters
851 * of the classes are not changed dynamically, and if QFQ+ never
852 * happens to postpone the service of agg unjustly, i.e., it never
853 * happens that the aggregate becomes backlogged and eligible, or just
854 * eligible, while an aggregate with a higher approximated finish time
855 * is being served. In particular, in this case QFQ+ guarantees that
856 * the timestamps of agg are low enough that the slot index is never
857 * higher than 2. Unfortunately, QFQ+ cannot provide the same
858 * guarantee if it happens to unjustly postpone the service of agg, or
859 * if the parameters of some class are changed.
860 *
861 * As for the first event, i.e., an out-of-order service, the
862 * upper bound to the slot index guaranteed by QFQ+ grows to
863 * 2 +
864 * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
865 * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
866 *
867 * The following function deals with this problem by backward-shifting
868 * the timestamps of agg, if needed, so as to guarantee that the slot
869 * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
870 * cause the service of other aggregates to be postponed, yet the
871 * worst-case guarantees of these aggregates are not violated. In
872 * fact, in case of no out-of-order service, the timestamps of agg
873 * would have been even lower than they are after the backward shift,
874 * because QFQ+ would have guaranteed a maximum value equal to 2 for
875 * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
876 * service is postponed because of the backward-shift would have
877 * however waited for the service of agg before being served.
878 *
879 * The other event that may cause the slot index to be higher than 2
880 * for agg is a recent change of the parameters of some class. If the
881 * weight of a class is increased or the lmax (max_pkt_size) of the
882 * class is decreased, then a new aggregate with smaller slot size
883 * than the original parent aggregate of the class may happen to be
884 * activated. The activation of this aggregate should be properly
885 * delayed to when the service of the class has finished in the ideal
886 * system tracked by QFQ+. If the activation of the aggregate is not
887 * delayed to this reference time instant, then this aggregate may be
888 * unjustly served before other aggregates waiting for service. This
889 * may cause the above bound to the slot index to be violated for some
890 * of these unlucky aggregates.
891 *
892 * Instead of delaying the activation of the new aggregate, which is
893 * quite complex, the above-discussed capping of the slot index is
894 * used to handle also the consequences of a change of the parameters
895 * of a class.
896 */
qfq_slot_insert(struct qfq_group * grp,struct qfq_aggregate * agg,u64 roundedS)897 static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
898 u64 roundedS)
899 {
900 u64 slot = (roundedS - grp->S) >> grp->slot_shift;
901 unsigned int i; /* slot index in the bucket list */
902
903 if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
904 u64 deltaS = roundedS - grp->S -
905 ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
906 agg->S -= deltaS;
907 agg->F -= deltaS;
908 slot = QFQ_MAX_SLOTS - 2;
909 }
910
911 i = (grp->front + slot) % QFQ_MAX_SLOTS;
912
913 hlist_add_head(&agg->next, &grp->slots[i]);
914 __set_bit(slot, &grp->full_slots);
915 }
916
917 /* Maybe introduce hlist_first_entry?? */
qfq_slot_head(struct qfq_group * grp)918 static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
919 {
920 return hlist_entry(grp->slots[grp->front].first,
921 struct qfq_aggregate, next);
922 }
923
924 /*
925 * remove the entry from the slot
926 */
qfq_front_slot_remove(struct qfq_group * grp)927 static void qfq_front_slot_remove(struct qfq_group *grp)
928 {
929 struct qfq_aggregate *agg = qfq_slot_head(grp);
930
931 BUG_ON(!agg);
932 hlist_del(&agg->next);
933 if (hlist_empty(&grp->slots[grp->front]))
934 __clear_bit(0, &grp->full_slots);
935 }
936
937 /*
938 * Returns the first aggregate in the first non-empty bucket of the
939 * group. As a side effect, adjusts the bucket list so the first
940 * non-empty bucket is at position 0 in full_slots.
941 */
qfq_slot_scan(struct qfq_group * grp)942 static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
943 {
944 unsigned int i;
945
946 pr_debug("qfq slot_scan: grp %u full %#lx\n",
947 grp->index, grp->full_slots);
948
949 if (grp->full_slots == 0)
950 return NULL;
951
952 i = __ffs(grp->full_slots); /* zero based */
953 if (i > 0) {
954 grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
955 grp->full_slots >>= i;
956 }
957
958 return qfq_slot_head(grp);
959 }
960
961 /*
962 * adjust the bucket list. When the start time of a group decreases,
963 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
964 * move the objects. The mask of occupied slots must be shifted
965 * because we use ffs() to find the first non-empty slot.
966 * This covers decreases in the group's start time, but what about
967 * increases of the start time ?
968 * Here too we should make sure that i is less than 32
969 */
qfq_slot_rotate(struct qfq_group * grp,u64 roundedS)970 static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
971 {
972 unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
973
974 grp->full_slots <<= i;
975 grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
976 }
977
qfq_update_eligible(struct qfq_sched * q)978 static void qfq_update_eligible(struct qfq_sched *q)
979 {
980 struct qfq_group *grp;
981 unsigned long ineligible;
982
983 ineligible = q->bitmaps[IR] | q->bitmaps[IB];
984 if (ineligible) {
985 if (!q->bitmaps[ER]) {
986 grp = qfq_ffs(q, ineligible);
987 if (qfq_gt(grp->S, q->V))
988 q->V = grp->S;
989 }
990 qfq_make_eligible(q);
991 }
992 }
993
994 /* Dequeue head packet of the head class in the DRR queue of the aggregate. */
agg_dequeue(struct qfq_aggregate * agg,struct qfq_class * cl,unsigned int len)995 static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
996 struct qfq_class *cl, unsigned int len)
997 {
998 struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc);
999
1000 if (!skb)
1001 return NULL;
1002
1003 cl->deficit -= (int) len;
1004
1005 if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
1006 list_del_init(&cl->alist);
1007 else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
1008 cl->deficit += agg->lmax;
1009 list_move_tail(&cl->alist, &agg->active);
1010 }
1011
1012 return skb;
1013 }
1014
qfq_peek_skb(struct qfq_aggregate * agg,struct qfq_class ** cl,unsigned int * len)1015 static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
1016 struct qfq_class **cl,
1017 unsigned int *len)
1018 {
1019 struct sk_buff *skb;
1020
1021 *cl = list_first_entry(&agg->active, struct qfq_class, alist);
1022 skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
1023 if (skb == NULL)
1024 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
1025 else
1026 *len = qdisc_pkt_len(skb);
1027
1028 return skb;
1029 }
1030
1031 /* Update F according to the actual service received by the aggregate. */
charge_actual_service(struct qfq_aggregate * agg)1032 static inline void charge_actual_service(struct qfq_aggregate *agg)
1033 {
1034 /* Compute the service received by the aggregate, taking into
1035 * account that, after decreasing the number of classes in
1036 * agg, it may happen that
1037 * agg->initial_budget - agg->budget > agg->bugdetmax
1038 */
1039 u32 service_received = min(agg->budgetmax,
1040 agg->initial_budget - agg->budget);
1041
1042 agg->F = agg->S + (u64)service_received * agg->inv_w;
1043 }
1044
1045 /* Assign a reasonable start time for a new aggregate in group i.
1046 * Admissible values for \hat(F) are multiples of \sigma_i
1047 * no greater than V+\sigma_i . Larger values mean that
1048 * we had a wraparound so we consider the timestamp to be stale.
1049 *
1050 * If F is not stale and F >= V then we set S = F.
1051 * Otherwise we should assign S = V, but this may violate
1052 * the ordering in EB (see [2]). So, if we have groups in ER,
1053 * set S to the F_j of the first group j which would be blocking us.
1054 * We are guaranteed not to move S backward because
1055 * otherwise our group i would still be blocked.
1056 */
qfq_update_start(struct qfq_sched * q,struct qfq_aggregate * agg)1057 static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
1058 {
1059 unsigned long mask;
1060 u64 limit, roundedF;
1061 int slot_shift = agg->grp->slot_shift;
1062
1063 roundedF = qfq_round_down(agg->F, slot_shift);
1064 limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
1065
1066 if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
1067 /* timestamp was stale */
1068 mask = mask_from(q->bitmaps[ER], agg->grp->index);
1069 if (mask) {
1070 struct qfq_group *next = qfq_ffs(q, mask);
1071 if (qfq_gt(roundedF, next->F)) {
1072 if (qfq_gt(limit, next->F))
1073 agg->S = next->F;
1074 else /* preserve timestamp correctness */
1075 agg->S = limit;
1076 return;
1077 }
1078 }
1079 agg->S = q->V;
1080 } else /* timestamp is not stale */
1081 agg->S = agg->F;
1082 }
1083
1084 /* Update the timestamps of agg before scheduling/rescheduling it for
1085 * service. In particular, assign to agg->F its maximum possible
1086 * value, i.e., the virtual finish time with which the aggregate
1087 * should be labeled if it used all its budget once in service.
1088 */
1089 static inline void
qfq_update_agg_ts(struct qfq_sched * q,struct qfq_aggregate * agg,enum update_reason reason)1090 qfq_update_agg_ts(struct qfq_sched *q,
1091 struct qfq_aggregate *agg, enum update_reason reason)
1092 {
1093 if (reason != requeue)
1094 qfq_update_start(q, agg);
1095 else /* just charge agg for the service received */
1096 agg->S = agg->F;
1097
1098 agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
1099 }
1100
1101 static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
1102
qfq_dequeue(struct Qdisc * sch)1103 static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
1104 {
1105 struct qfq_sched *q = qdisc_priv(sch);
1106 struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
1107 struct qfq_class *cl;
1108 struct sk_buff *skb = NULL;
1109 /* next-packet len, 0 means no more active classes in in-service agg */
1110 unsigned int len = 0;
1111
1112 if (in_serv_agg == NULL)
1113 return NULL;
1114
1115 if (!list_empty(&in_serv_agg->active))
1116 skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1117
1118 /*
1119 * If there are no active classes in the in-service aggregate,
1120 * or if the aggregate has not enough budget to serve its next
1121 * class, then choose the next aggregate to serve.
1122 */
1123 if (len == 0 || in_serv_agg->budget < len) {
1124 charge_actual_service(in_serv_agg);
1125
1126 /* recharge the budget of the aggregate */
1127 in_serv_agg->initial_budget = in_serv_agg->budget =
1128 in_serv_agg->budgetmax;
1129
1130 if (!list_empty(&in_serv_agg->active)) {
1131 /*
1132 * Still active: reschedule for
1133 * service. Possible optimization: if no other
1134 * aggregate is active, then there is no point
1135 * in rescheduling this aggregate, and we can
1136 * just keep it as the in-service one. This
1137 * should be however a corner case, and to
1138 * handle it, we would need to maintain an
1139 * extra num_active_aggs field.
1140 */
1141 qfq_update_agg_ts(q, in_serv_agg, requeue);
1142 qfq_schedule_agg(q, in_serv_agg);
1143 } else if (sch->q.qlen == 0) { /* no aggregate to serve */
1144 q->in_serv_agg = NULL;
1145 return NULL;
1146 }
1147
1148 /*
1149 * If we get here, there are other aggregates queued:
1150 * choose the new aggregate to serve.
1151 */
1152 in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
1153 skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1154 }
1155 if (!skb)
1156 return NULL;
1157
1158 sch->q.qlen--;
1159
1160 skb = agg_dequeue(in_serv_agg, cl, len);
1161
1162 if (!skb) {
1163 sch->q.qlen++;
1164 return NULL;
1165 }
1166
1167 qdisc_qstats_backlog_dec(sch, skb);
1168 qdisc_bstats_update(sch, skb);
1169
1170 /* If lmax is lowered, through qfq_change_class, for a class
1171 * owning pending packets with larger size than the new value
1172 * of lmax, then the following condition may hold.
1173 */
1174 if (unlikely(in_serv_agg->budget < len))
1175 in_serv_agg->budget = 0;
1176 else
1177 in_serv_agg->budget -= len;
1178
1179 q->V += (u64)len * q->iwsum;
1180 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
1181 len, (unsigned long long) in_serv_agg->F,
1182 (unsigned long long) q->V);
1183
1184 return skb;
1185 }
1186
qfq_choose_next_agg(struct qfq_sched * q)1187 static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
1188 {
1189 struct qfq_group *grp;
1190 struct qfq_aggregate *agg, *new_front_agg;
1191 u64 old_F;
1192
1193 qfq_update_eligible(q);
1194 q->oldV = q->V;
1195
1196 if (!q->bitmaps[ER])
1197 return NULL;
1198
1199 grp = qfq_ffs(q, q->bitmaps[ER]);
1200 old_F = grp->F;
1201
1202 agg = qfq_slot_head(grp);
1203
1204 /* agg starts to be served, remove it from schedule */
1205 qfq_front_slot_remove(grp);
1206
1207 new_front_agg = qfq_slot_scan(grp);
1208
1209 if (new_front_agg == NULL) /* group is now inactive, remove from ER */
1210 __clear_bit(grp->index, &q->bitmaps[ER]);
1211 else {
1212 u64 roundedS = qfq_round_down(new_front_agg->S,
1213 grp->slot_shift);
1214 unsigned int s;
1215
1216 if (grp->S == roundedS)
1217 return agg;
1218 grp->S = roundedS;
1219 grp->F = roundedS + (2ULL << grp->slot_shift);
1220 __clear_bit(grp->index, &q->bitmaps[ER]);
1221 s = qfq_calc_state(q, grp);
1222 __set_bit(grp->index, &q->bitmaps[s]);
1223 }
1224
1225 qfq_unblock_groups(q, grp->index, old_F);
1226
1227 return agg;
1228 }
1229
qfq_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1230 static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1231 struct sk_buff **to_free)
1232 {
1233 unsigned int len = qdisc_pkt_len(skb), gso_segs;
1234 struct qfq_sched *q = qdisc_priv(sch);
1235 struct qfq_class *cl;
1236 struct qfq_aggregate *agg;
1237 int err = 0;
1238
1239 cl = qfq_classify(skb, sch, &err);
1240 if (cl == NULL) {
1241 if (err & __NET_XMIT_BYPASS)
1242 qdisc_qstats_drop(sch);
1243 __qdisc_drop(skb, to_free);
1244 return err;
1245 }
1246 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
1247
1248 if (unlikely(cl->agg->lmax < len)) {
1249 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
1250 cl->agg->lmax, len, cl->common.classid);
1251 err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
1252 if (err) {
1253 cl->qstats.drops++;
1254 return qdisc_drop(skb, sch, to_free);
1255 }
1256 }
1257
1258 gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
1259 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1260 if (unlikely(err != NET_XMIT_SUCCESS)) {
1261 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
1262 if (net_xmit_drop_count(err)) {
1263 cl->qstats.drops++;
1264 qdisc_qstats_drop(sch);
1265 }
1266 return err;
1267 }
1268
1269 _bstats_update(&cl->bstats, len, gso_segs);
1270 sch->qstats.backlog += len;
1271 ++sch->q.qlen;
1272
1273 agg = cl->agg;
1274 /* if the class is active, then done here */
1275 if (cl_is_active(cl)) {
1276 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
1277 list_first_entry(&agg->active, struct qfq_class, alist)
1278 == cl && cl->deficit < len)
1279 list_move_tail(&cl->alist, &agg->active);
1280
1281 return err;
1282 }
1283
1284 /* schedule class for service within the aggregate */
1285 cl->deficit = agg->lmax;
1286 list_add_tail(&cl->alist, &agg->active);
1287
1288 if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
1289 q->in_serv_agg == agg)
1290 return err; /* non-empty or in service, nothing else to do */
1291
1292 qfq_activate_agg(q, agg, enqueue);
1293
1294 return err;
1295 }
1296
1297 /*
1298 * Schedule aggregate according to its timestamps.
1299 */
qfq_schedule_agg(struct qfq_sched * q,struct qfq_aggregate * agg)1300 static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
1301 {
1302 struct qfq_group *grp = agg->grp;
1303 u64 roundedS;
1304 int s;
1305
1306 roundedS = qfq_round_down(agg->S, grp->slot_shift);
1307
1308 /*
1309 * Insert agg in the correct bucket.
1310 * If agg->S >= grp->S we don't need to adjust the
1311 * bucket list and simply go to the insertion phase.
1312 * Otherwise grp->S is decreasing, we must make room
1313 * in the bucket list, and also recompute the group state.
1314 * Finally, if there were no flows in this group and nobody
1315 * was in ER make sure to adjust V.
1316 */
1317 if (grp->full_slots) {
1318 if (!qfq_gt(grp->S, agg->S))
1319 goto skip_update;
1320
1321 /* create a slot for this agg->S */
1322 qfq_slot_rotate(grp, roundedS);
1323 /* group was surely ineligible, remove */
1324 __clear_bit(grp->index, &q->bitmaps[IR]);
1325 __clear_bit(grp->index, &q->bitmaps[IB]);
1326 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
1327 q->in_serv_agg == NULL)
1328 q->V = roundedS;
1329
1330 grp->S = roundedS;
1331 grp->F = roundedS + (2ULL << grp->slot_shift);
1332 s = qfq_calc_state(q, grp);
1333 __set_bit(grp->index, &q->bitmaps[s]);
1334
1335 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
1336 s, q->bitmaps[s],
1337 (unsigned long long) agg->S,
1338 (unsigned long long) agg->F,
1339 (unsigned long long) q->V);
1340
1341 skip_update:
1342 qfq_slot_insert(grp, agg, roundedS);
1343 }
1344
1345
1346 /* Update agg ts and schedule agg for service */
qfq_activate_agg(struct qfq_sched * q,struct qfq_aggregate * agg,enum update_reason reason)1347 static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
1348 enum update_reason reason)
1349 {
1350 agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
1351
1352 qfq_update_agg_ts(q, agg, reason);
1353 if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
1354 q->in_serv_agg = agg; /* start serving this aggregate */
1355 /* update V: to be in service, agg must be eligible */
1356 q->oldV = q->V = agg->S;
1357 } else if (agg != q->in_serv_agg)
1358 qfq_schedule_agg(q, agg);
1359 }
1360
qfq_slot_remove(struct qfq_sched * q,struct qfq_group * grp,struct qfq_aggregate * agg)1361 static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
1362 struct qfq_aggregate *agg)
1363 {
1364 unsigned int i, offset;
1365 u64 roundedS;
1366
1367 roundedS = qfq_round_down(agg->S, grp->slot_shift);
1368 offset = (roundedS - grp->S) >> grp->slot_shift;
1369
1370 i = (grp->front + offset) % QFQ_MAX_SLOTS;
1371
1372 hlist_del(&agg->next);
1373 if (hlist_empty(&grp->slots[i]))
1374 __clear_bit(offset, &grp->full_slots);
1375 }
1376
1377 /*
1378 * Called to forcibly deschedule an aggregate. If the aggregate is
1379 * not in the front bucket, or if the latter has other aggregates in
1380 * the front bucket, we can simply remove the aggregate with no other
1381 * side effects.
1382 * Otherwise we must propagate the event up.
1383 */
qfq_deactivate_agg(struct qfq_sched * q,struct qfq_aggregate * agg)1384 static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
1385 {
1386 struct qfq_group *grp = agg->grp;
1387 unsigned long mask;
1388 u64 roundedS;
1389 int s;
1390
1391 if (agg == q->in_serv_agg) {
1392 charge_actual_service(agg);
1393 q->in_serv_agg = qfq_choose_next_agg(q);
1394 return;
1395 }
1396
1397 agg->F = agg->S;
1398 qfq_slot_remove(q, grp, agg);
1399
1400 if (!grp->full_slots) {
1401 __clear_bit(grp->index, &q->bitmaps[IR]);
1402 __clear_bit(grp->index, &q->bitmaps[EB]);
1403 __clear_bit(grp->index, &q->bitmaps[IB]);
1404
1405 if (test_bit(grp->index, &q->bitmaps[ER]) &&
1406 !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
1407 mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
1408 if (mask)
1409 mask = ~((1UL << __fls(mask)) - 1);
1410 else
1411 mask = ~0UL;
1412 qfq_move_groups(q, mask, EB, ER);
1413 qfq_move_groups(q, mask, IB, IR);
1414 }
1415 __clear_bit(grp->index, &q->bitmaps[ER]);
1416 } else if (hlist_empty(&grp->slots[grp->front])) {
1417 agg = qfq_slot_scan(grp);
1418 roundedS = qfq_round_down(agg->S, grp->slot_shift);
1419 if (grp->S != roundedS) {
1420 __clear_bit(grp->index, &q->bitmaps[ER]);
1421 __clear_bit(grp->index, &q->bitmaps[IR]);
1422 __clear_bit(grp->index, &q->bitmaps[EB]);
1423 __clear_bit(grp->index, &q->bitmaps[IB]);
1424 grp->S = roundedS;
1425 grp->F = roundedS + (2ULL << grp->slot_shift);
1426 s = qfq_calc_state(q, grp);
1427 __set_bit(grp->index, &q->bitmaps[s]);
1428 }
1429 }
1430 }
1431
qfq_qlen_notify(struct Qdisc * sch,unsigned long arg)1432 static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1433 {
1434 struct qfq_sched *q = qdisc_priv(sch);
1435 struct qfq_class *cl = (struct qfq_class *)arg;
1436
1437 if (list_empty(&cl->alist))
1438 return;
1439 qfq_deactivate_class(q, cl);
1440 }
1441
qfq_init_qdisc(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1442 static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
1443 struct netlink_ext_ack *extack)
1444 {
1445 struct qfq_sched *q = qdisc_priv(sch);
1446 struct qfq_group *grp;
1447 int i, j, err;
1448 u32 max_cl_shift, maxbudg_shift, max_classes;
1449
1450 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1451 if (err)
1452 return err;
1453
1454 err = qdisc_class_hash_init(&q->clhash);
1455 if (err < 0)
1456 return err;
1457
1458 max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
1459 QFQ_MAX_AGG_CLASSES);
1460 /* max_cl_shift = floor(log_2(max_classes)) */
1461 max_cl_shift = __fls(max_classes);
1462 q->max_agg_classes = 1<<max_cl_shift;
1463
1464 /* maxbudg_shift = log2(max_len * max_classes_per_agg) */
1465 maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
1466 q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
1467
1468 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1469 grp = &q->groups[i];
1470 grp->index = i;
1471 grp->slot_shift = q->min_slot_shift + i;
1472 for (j = 0; j < QFQ_MAX_SLOTS; j++)
1473 INIT_HLIST_HEAD(&grp->slots[j]);
1474 }
1475
1476 INIT_HLIST_HEAD(&q->nonfull_aggs);
1477
1478 return 0;
1479 }
1480
qfq_reset_qdisc(struct Qdisc * sch)1481 static void qfq_reset_qdisc(struct Qdisc *sch)
1482 {
1483 struct qfq_sched *q = qdisc_priv(sch);
1484 struct qfq_class *cl;
1485 unsigned int i;
1486
1487 for (i = 0; i < q->clhash.hashsize; i++) {
1488 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1489 if (cl->qdisc->q.qlen > 0)
1490 qfq_deactivate_class(q, cl);
1491
1492 qdisc_reset(cl->qdisc);
1493 }
1494 }
1495 }
1496
qfq_destroy_qdisc(struct Qdisc * sch)1497 static void qfq_destroy_qdisc(struct Qdisc *sch)
1498 {
1499 struct qfq_sched *q = qdisc_priv(sch);
1500 struct qfq_class *cl;
1501 struct hlist_node *next;
1502 unsigned int i;
1503
1504 tcf_block_put(q->block);
1505
1506 for (i = 0; i < q->clhash.hashsize; i++) {
1507 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1508 common.hnode) {
1509 qfq_destroy_class(sch, cl);
1510 }
1511 }
1512 qdisc_class_hash_destroy(&q->clhash);
1513 }
1514
1515 static const struct Qdisc_class_ops qfq_class_ops = {
1516 .change = qfq_change_class,
1517 .delete = qfq_delete_class,
1518 .find = qfq_search_class,
1519 .tcf_block = qfq_tcf_block,
1520 .bind_tcf = qfq_bind_tcf,
1521 .unbind_tcf = qfq_unbind_tcf,
1522 .graft = qfq_graft_class,
1523 .leaf = qfq_class_leaf,
1524 .qlen_notify = qfq_qlen_notify,
1525 .dump = qfq_dump_class,
1526 .dump_stats = qfq_dump_class_stats,
1527 .walk = qfq_walk,
1528 };
1529
1530 static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1531 .cl_ops = &qfq_class_ops,
1532 .id = "qfq",
1533 .priv_size = sizeof(struct qfq_sched),
1534 .enqueue = qfq_enqueue,
1535 .dequeue = qfq_dequeue,
1536 .peek = qdisc_peek_dequeued,
1537 .init = qfq_init_qdisc,
1538 .reset = qfq_reset_qdisc,
1539 .destroy = qfq_destroy_qdisc,
1540 .owner = THIS_MODULE,
1541 };
1542
qfq_init(void)1543 static int __init qfq_init(void)
1544 {
1545 return register_qdisc(&qfq_qdisc_ops);
1546 }
1547
qfq_exit(void)1548 static void __exit qfq_exit(void)
1549 {
1550 unregister_qdisc(&qfq_qdisc_ops);
1551 }
1552
1553 module_init(qfq_init);
1554 module_exit(qfq_exit);
1555 MODULE_LICENSE("GPL");
1556