1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20545a303Sstephen hemminger /*
3462dbc91SPaolo Valente * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler.
40545a303Sstephen hemminger *
50545a303Sstephen hemminger * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
6462dbc91SPaolo Valente * Copyright (c) 2012 Paolo Valente.
70545a303Sstephen hemminger */
80545a303Sstephen hemminger
90545a303Sstephen hemminger #include <linux/module.h>
100545a303Sstephen hemminger #include <linux/init.h>
110545a303Sstephen hemminger #include <linux/bitops.h>
120545a303Sstephen hemminger #include <linux/errno.h>
130545a303Sstephen hemminger #include <linux/netdevice.h>
140545a303Sstephen hemminger #include <linux/pkt_sched.h>
150545a303Sstephen hemminger #include <net/sch_generic.h>
160545a303Sstephen hemminger #include <net/pkt_sched.h>
170545a303Sstephen hemminger #include <net/pkt_cls.h>
180545a303Sstephen hemminger
190545a303Sstephen hemminger
20462dbc91SPaolo Valente /* Quick Fair Queueing Plus
21462dbc91SPaolo Valente ========================
220545a303Sstephen hemminger
230545a303Sstephen hemminger Sources:
240545a303Sstephen hemminger
25462dbc91SPaolo Valente [1] Paolo Valente,
26462dbc91SPaolo Valente "Reducing the Execution Time of Fair-Queueing Schedulers."
27462dbc91SPaolo Valente http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
28462dbc91SPaolo Valente
29462dbc91SPaolo Valente Sources for QFQ:
30462dbc91SPaolo Valente
31462dbc91SPaolo Valente [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
320545a303Sstephen hemminger Packet Scheduling with Tight Bandwidth Distribution Guarantees."
330545a303Sstephen hemminger
340545a303Sstephen hemminger See also:
350545a303Sstephen hemminger http://retis.sssup.it/~fabio/linux/qfq/
360545a303Sstephen hemminger */
370545a303Sstephen hemminger
380545a303Sstephen hemminger /*
390545a303Sstephen hemminger
40462dbc91SPaolo Valente QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
41462dbc91SPaolo Valente classes. Each aggregate is timestamped with a virtual start time S
42462dbc91SPaolo Valente and a virtual finish time F, and scheduled according to its
43462dbc91SPaolo Valente timestamps. S and F are computed as a function of a system virtual
44462dbc91SPaolo Valente time function V. The classes within each aggregate are instead
45462dbc91SPaolo Valente scheduled with DRR.
46462dbc91SPaolo Valente
47462dbc91SPaolo Valente To speed up operations, QFQ+ divides also aggregates into a limited
48462dbc91SPaolo Valente number of groups. Which group a class belongs to depends on the
49462dbc91SPaolo Valente ratio between the maximum packet length for the class and the weight
50462dbc91SPaolo Valente of the class. Groups have their own S and F. In the end, QFQ+
51462dbc91SPaolo Valente schedules groups, then aggregates within groups, then classes within
52462dbc91SPaolo Valente aggregates. See [1] and [2] for a full description.
53462dbc91SPaolo Valente
540545a303Sstephen hemminger Virtual time computations.
550545a303Sstephen hemminger
560545a303Sstephen hemminger S, F and V are all computed in fixed point arithmetic with
570545a303Sstephen hemminger FRAC_BITS decimal bits.
580545a303Sstephen hemminger
590545a303Sstephen hemminger QFQ_MAX_INDEX is the maximum index allowed for a group. We need
600545a303Sstephen hemminger one bit per index.
610545a303Sstephen hemminger QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
620545a303Sstephen hemminger
630545a303Sstephen hemminger The layout of the bits is as below:
640545a303Sstephen hemminger
650545a303Sstephen hemminger [ MTU_SHIFT ][ FRAC_BITS ]
660545a303Sstephen hemminger [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
670545a303Sstephen hemminger ^.__grp->index = 0
680545a303Sstephen hemminger *.__grp->slot_shift
690545a303Sstephen hemminger
700545a303Sstephen hemminger where MIN_SLOT_SHIFT is derived by difference from the others.
710545a303Sstephen hemminger
720545a303Sstephen hemminger The max group index corresponds to Lmax/w_min, where
730545a303Sstephen hemminger Lmax=1<<MTU_SHIFT, w_min = 1 .
740545a303Sstephen hemminger From this, and knowing how many groups (MAX_INDEX) we want,
750545a303Sstephen hemminger we can derive the shift corresponding to each group.
760545a303Sstephen hemminger
770545a303Sstephen hemminger Because we often need to compute
780545a303Sstephen hemminger F = S + len/w_i and V = V + len/wsum
790545a303Sstephen hemminger instead of storing w_i store the value
800545a303Sstephen hemminger inv_w = (1<<FRAC_BITS)/w_i
810545a303Sstephen hemminger so we can do F = S + len * inv_w * wsum.
820545a303Sstephen hemminger We use W_TOT in the formulas so we can easily move between
830545a303Sstephen hemminger static and adaptive weight sum.
840545a303Sstephen hemminger
850545a303Sstephen hemminger The per-scheduler-instance data contain all the data structures
860545a303Sstephen hemminger for the scheduler: bitmaps and bucket lists.
870545a303Sstephen hemminger
880545a303Sstephen hemminger */
890545a303Sstephen hemminger
900545a303Sstephen hemminger /*
910545a303Sstephen hemminger * Maximum number of consecutive slots occupied by backlogged classes
920545a303Sstephen hemminger * inside a group.
930545a303Sstephen hemminger */
940545a303Sstephen hemminger #define QFQ_MAX_SLOTS 32
950545a303Sstephen hemminger
960545a303Sstephen hemminger /*
97462dbc91SPaolo Valente * Shifts used for aggregate<->group mapping. We allow class weights that are
98462dbc91SPaolo Valente * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
990545a303Sstephen hemminger * group with the smallest index that can support the L_i / r_i configured
100462dbc91SPaolo Valente * for the classes in the aggregate.
1010545a303Sstephen hemminger *
1020545a303Sstephen hemminger * grp->index is the index of the group; and grp->slot_shift
1030545a303Sstephen hemminger * is the shift for the corresponding (scaled) sigma_i.
1040545a303Sstephen hemminger */
1053015f3d2SPaolo Valente #define QFQ_MAX_INDEX 24
106462dbc91SPaolo Valente #define QFQ_MAX_WSHIFT 10
1070545a303Sstephen hemminger
108462dbc91SPaolo Valente #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
109462dbc91SPaolo Valente #define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT)
1100545a303Sstephen hemminger
1110545a303Sstephen hemminger #define FRAC_BITS 30 /* fixed point arithmetic */
1120545a303Sstephen hemminger #define ONE_FP (1UL << FRAC_BITS)
1130545a303Sstephen hemminger
1143015f3d2SPaolo Valente #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
115462dbc91SPaolo Valente #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
11625369891SPedro Tammela #define QFQ_MAX_LMAX (1UL << QFQ_MTU_SHIFT)
117462dbc91SPaolo Valente
118462dbc91SPaolo Valente #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */
1190545a303Sstephen hemminger
1200545a303Sstephen hemminger /*
1210545a303Sstephen hemminger * Possible group states. These values are used as indexes for the bitmaps
1220545a303Sstephen hemminger * array of struct qfq_queue.
1230545a303Sstephen hemminger */
1240545a303Sstephen hemminger enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
1250545a303Sstephen hemminger
1260545a303Sstephen hemminger struct qfq_group;
1270545a303Sstephen hemminger
128462dbc91SPaolo Valente struct qfq_aggregate;
129462dbc91SPaolo Valente
1300545a303Sstephen hemminger struct qfq_class {
1310545a303Sstephen hemminger struct Qdisc_class_common common;
1320545a303Sstephen hemminger
13350dc9a85SAhmed S. Darwish struct gnet_stats_basic_sync bstats;
1340545a303Sstephen hemminger struct gnet_stats_queue qstats;
1351c0d32fdSEric Dumazet struct net_rate_estimator __rcu *rate_est;
1360545a303Sstephen hemminger struct Qdisc *qdisc;
137462dbc91SPaolo Valente struct list_head alist; /* Link for active-classes list. */
138462dbc91SPaolo Valente struct qfq_aggregate *agg; /* Parent aggregate. */
139462dbc91SPaolo Valente int deficit; /* DRR deficit counter. */
140462dbc91SPaolo Valente };
1410545a303Sstephen hemminger
142462dbc91SPaolo Valente struct qfq_aggregate {
1430545a303Sstephen hemminger struct hlist_node next; /* Link for the slot list. */
1440545a303Sstephen hemminger u64 S, F; /* flow timestamps (exact) */
1450545a303Sstephen hemminger
1460545a303Sstephen hemminger /* group we belong to. In principle we would need the index,
1470545a303Sstephen hemminger * which is log_2(lmax/weight), but we never reference it
1480545a303Sstephen hemminger * directly, only the group.
1490545a303Sstephen hemminger */
1500545a303Sstephen hemminger struct qfq_group *grp;
1510545a303Sstephen hemminger
1520545a303Sstephen hemminger /* these are copied from the flowset. */
153462dbc91SPaolo Valente u32 class_weight; /* Weight of each class in this aggregate. */
154462dbc91SPaolo Valente /* Max pkt size for the classes in this aggregate, DRR quantum. */
155462dbc91SPaolo Valente int lmax;
156462dbc91SPaolo Valente
157462dbc91SPaolo Valente u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */
158462dbc91SPaolo Valente u32 budgetmax; /* Max budget for this aggregate. */
159462dbc91SPaolo Valente u32 initial_budget, budget; /* Initial and current budget. */
160462dbc91SPaolo Valente
161462dbc91SPaolo Valente int num_classes; /* Number of classes in this aggr. */
162462dbc91SPaolo Valente struct list_head active; /* DRR queue of active classes. */
163462dbc91SPaolo Valente
164462dbc91SPaolo Valente struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */
1650545a303Sstephen hemminger };
1660545a303Sstephen hemminger
1670545a303Sstephen hemminger struct qfq_group {
1680545a303Sstephen hemminger u64 S, F; /* group timestamps (approx). */
1690545a303Sstephen hemminger unsigned int slot_shift; /* Slot shift. */
1700545a303Sstephen hemminger unsigned int index; /* Group index. */
1710545a303Sstephen hemminger unsigned int front; /* Index of the front slot. */
1720545a303Sstephen hemminger unsigned long full_slots; /* non-empty slots */
1730545a303Sstephen hemminger
174462dbc91SPaolo Valente /* Array of RR lists of active aggregates. */
1750545a303Sstephen hemminger struct hlist_head slots[QFQ_MAX_SLOTS];
1760545a303Sstephen hemminger };
1770545a303Sstephen hemminger
1780545a303Sstephen hemminger struct qfq_sched {
17925d8c0d5SJohn Fastabend struct tcf_proto __rcu *filter_list;
1806529eabaSJiri Pirko struct tcf_block *block;
1810545a303Sstephen hemminger struct Qdisc_class_hash clhash;
1820545a303Sstephen hemminger
183462dbc91SPaolo Valente u64 oldV, V; /* Precise virtual times. */
184462dbc91SPaolo Valente struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
1850545a303Sstephen hemminger u32 wsum; /* weight sum */
18687f40dd6SPaolo Valente u32 iwsum; /* inverse weight sum */
1870545a303Sstephen hemminger
1880545a303Sstephen hemminger unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
1890545a303Sstephen hemminger struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
190462dbc91SPaolo Valente u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */
191462dbc91SPaolo Valente
192462dbc91SPaolo Valente u32 max_agg_classes; /* Max number of classes per aggr. */
193462dbc91SPaolo Valente struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
1940545a303Sstephen hemminger };
1950545a303Sstephen hemminger
196462dbc91SPaolo Valente /*
197462dbc91SPaolo Valente * Possible reasons why the timestamps of an aggregate are updated
198462dbc91SPaolo Valente * enqueue: the aggregate switches from idle to active and must scheduled
199462dbc91SPaolo Valente * for service
200462dbc91SPaolo Valente * requeue: the aggregate finishes its budget, so it stops being served and
201462dbc91SPaolo Valente * must be rescheduled for service
202462dbc91SPaolo Valente */
203462dbc91SPaolo Valente enum update_reason {enqueue, requeue};
204462dbc91SPaolo Valente
qfq_find_class(struct Qdisc * sch,u32 classid)2050545a303Sstephen hemminger static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
2060545a303Sstephen hemminger {
2070545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
2080545a303Sstephen hemminger struct Qdisc_class_common *clc;
2090545a303Sstephen hemminger
2100545a303Sstephen hemminger clc = qdisc_class_find(&q->clhash, classid);
2110545a303Sstephen hemminger if (clc == NULL)
2120545a303Sstephen hemminger return NULL;
2130545a303Sstephen hemminger return container_of(clc, struct qfq_class, common);
2140545a303Sstephen hemminger }
2150545a303Sstephen hemminger
21625369891SPedro Tammela static struct netlink_range_validation lmax_range = {
21725369891SPedro Tammela .min = QFQ_MIN_LMAX,
21825369891SPedro Tammela .max = QFQ_MAX_LMAX,
21925369891SPedro Tammela };
22025369891SPedro Tammela
2210545a303Sstephen hemminger static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
22225369891SPedro Tammela [TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT),
22325369891SPedro Tammela [TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range),
2240545a303Sstephen hemminger };
2250545a303Sstephen hemminger
2260545a303Sstephen hemminger /*
2270545a303Sstephen hemminger * Calculate a flow index, given its weight and maximum packet length.
2280545a303Sstephen hemminger * index = log_2(maxlen/weight) but we need to apply the scaling.
2290545a303Sstephen hemminger * This is used only once at flow creation.
2300545a303Sstephen hemminger */
qfq_calc_index(u32 inv_w,unsigned int maxlen,u32 min_slot_shift)231462dbc91SPaolo Valente static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
2320545a303Sstephen hemminger {
2330545a303Sstephen hemminger u64 slot_size = (u64)maxlen * inv_w;
2340545a303Sstephen hemminger unsigned long size_map;
2350545a303Sstephen hemminger int index = 0;
2360545a303Sstephen hemminger
237462dbc91SPaolo Valente size_map = slot_size >> min_slot_shift;
2380545a303Sstephen hemminger if (!size_map)
2390545a303Sstephen hemminger goto out;
2400545a303Sstephen hemminger
2410545a303Sstephen hemminger index = __fls(size_map) + 1; /* basically a log_2 */
242462dbc91SPaolo Valente index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
2430545a303Sstephen hemminger
2440545a303Sstephen hemminger if (index < 0)
2450545a303Sstephen hemminger index = 0;
2460545a303Sstephen hemminger out:
2470545a303Sstephen hemminger pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
2480545a303Sstephen hemminger (unsigned long) ONE_FP/inv_w, maxlen, index);
2490545a303Sstephen hemminger
2500545a303Sstephen hemminger return index;
2510545a303Sstephen hemminger }
2520545a303Sstephen hemminger
253462dbc91SPaolo Valente static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
254462dbc91SPaolo Valente static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
255462dbc91SPaolo Valente enum update_reason);
256be72f63bSPaolo Valente
qfq_init_agg(struct qfq_sched * q,struct qfq_aggregate * agg,u32 lmax,u32 weight)257462dbc91SPaolo Valente static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
258462dbc91SPaolo Valente u32 lmax, u32 weight)
259462dbc91SPaolo Valente {
260462dbc91SPaolo Valente INIT_LIST_HEAD(&agg->active);
261462dbc91SPaolo Valente hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
262462dbc91SPaolo Valente
263462dbc91SPaolo Valente agg->lmax = lmax;
264462dbc91SPaolo Valente agg->class_weight = weight;
265be72f63bSPaolo Valente }
266be72f63bSPaolo Valente
qfq_find_agg(struct qfq_sched * q,u32 lmax,u32 weight)267462dbc91SPaolo Valente static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
268462dbc91SPaolo Valente u32 lmax, u32 weight)
269be72f63bSPaolo Valente {
270462dbc91SPaolo Valente struct qfq_aggregate *agg;
271be72f63bSPaolo Valente
272b67bfe0dSSasha Levin hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
273462dbc91SPaolo Valente if (agg->lmax == lmax && agg->class_weight == weight)
274462dbc91SPaolo Valente return agg;
275be72f63bSPaolo Valente
276462dbc91SPaolo Valente return NULL;
277be72f63bSPaolo Valente }
278be72f63bSPaolo Valente
2793015f3d2SPaolo Valente
280462dbc91SPaolo Valente /* Update aggregate as a function of the new number of classes. */
qfq_update_agg(struct qfq_sched * q,struct qfq_aggregate * agg,int new_num_classes)281462dbc91SPaolo Valente static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
282462dbc91SPaolo Valente int new_num_classes)
283462dbc91SPaolo Valente {
284462dbc91SPaolo Valente u32 new_agg_weight;
285462dbc91SPaolo Valente
286462dbc91SPaolo Valente if (new_num_classes == q->max_agg_classes)
287462dbc91SPaolo Valente hlist_del_init(&agg->nonfull_next);
288462dbc91SPaolo Valente
289462dbc91SPaolo Valente if (agg->num_classes > new_num_classes &&
290462dbc91SPaolo Valente new_num_classes == q->max_agg_classes - 1) /* agg no more full */
291462dbc91SPaolo Valente hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
292462dbc91SPaolo Valente
2939b99b7e9SPaolo Valente /* The next assignment may let
2949b99b7e9SPaolo Valente * agg->initial_budget > agg->budgetmax
2959b99b7e9SPaolo Valente * hold, we will take it into account in charge_actual_service().
2969b99b7e9SPaolo Valente */
297462dbc91SPaolo Valente agg->budgetmax = new_num_classes * agg->lmax;
298462dbc91SPaolo Valente new_agg_weight = agg->class_weight * new_num_classes;
299462dbc91SPaolo Valente agg->inv_w = ONE_FP/new_agg_weight;
300462dbc91SPaolo Valente
301462dbc91SPaolo Valente if (agg->grp == NULL) {
302462dbc91SPaolo Valente int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
303462dbc91SPaolo Valente q->min_slot_shift);
304462dbc91SPaolo Valente agg->grp = &q->groups[i];
305462dbc91SPaolo Valente }
306462dbc91SPaolo Valente
307462dbc91SPaolo Valente q->wsum +=
308462dbc91SPaolo Valente (int) agg->class_weight * (new_num_classes - agg->num_classes);
30987f40dd6SPaolo Valente q->iwsum = ONE_FP / q->wsum;
310462dbc91SPaolo Valente
311462dbc91SPaolo Valente agg->num_classes = new_num_classes;
312462dbc91SPaolo Valente }
313462dbc91SPaolo Valente
314462dbc91SPaolo Valente /* Add class to aggregate. */
qfq_add_to_agg(struct qfq_sched * q,struct qfq_aggregate * agg,struct qfq_class * cl)315462dbc91SPaolo Valente static void qfq_add_to_agg(struct qfq_sched *q,
316462dbc91SPaolo Valente struct qfq_aggregate *agg,
317462dbc91SPaolo Valente struct qfq_class *cl)
318462dbc91SPaolo Valente {
319462dbc91SPaolo Valente cl->agg = agg;
320462dbc91SPaolo Valente
321462dbc91SPaolo Valente qfq_update_agg(q, agg, agg->num_classes+1);
322462dbc91SPaolo Valente if (cl->qdisc->q.qlen > 0) { /* adding an active class */
323462dbc91SPaolo Valente list_add_tail(&cl->alist, &agg->active);
324462dbc91SPaolo Valente if (list_first_entry(&agg->active, struct qfq_class, alist) ==
325462dbc91SPaolo Valente cl && q->in_serv_agg != agg) /* agg was inactive */
326462dbc91SPaolo Valente qfq_activate_agg(q, agg, enqueue); /* schedule agg */
327462dbc91SPaolo Valente }
328462dbc91SPaolo Valente }
329462dbc91SPaolo Valente
330462dbc91SPaolo Valente static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
331462dbc91SPaolo Valente
qfq_destroy_agg(struct qfq_sched * q,struct qfq_aggregate * agg)332462dbc91SPaolo Valente static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
333462dbc91SPaolo Valente {
334462dbc91SPaolo Valente hlist_del_init(&agg->nonfull_next);
33587f40dd6SPaolo Valente q->wsum -= agg->class_weight;
33687f40dd6SPaolo Valente if (q->wsum != 0)
33787f40dd6SPaolo Valente q->iwsum = ONE_FP / q->wsum;
33887f40dd6SPaolo Valente
339462dbc91SPaolo Valente if (q->in_serv_agg == agg)
340462dbc91SPaolo Valente q->in_serv_agg = qfq_choose_next_agg(q);
341462dbc91SPaolo Valente kfree(agg);
342462dbc91SPaolo Valente }
343462dbc91SPaolo Valente
344462dbc91SPaolo Valente /* Deschedule class from within its parent aggregate. */
qfq_deactivate_class(struct qfq_sched * q,struct qfq_class * cl)345462dbc91SPaolo Valente static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
346462dbc91SPaolo Valente {
347462dbc91SPaolo Valente struct qfq_aggregate *agg = cl->agg;
348462dbc91SPaolo Valente
349462dbc91SPaolo Valente
350462dbc91SPaolo Valente list_del(&cl->alist); /* remove from RR queue of the aggregate */
351462dbc91SPaolo Valente if (list_empty(&agg->active)) /* agg is now inactive */
352462dbc91SPaolo Valente qfq_deactivate_agg(q, agg);
353462dbc91SPaolo Valente }
354462dbc91SPaolo Valente
355462dbc91SPaolo Valente /* Remove class from its parent aggregate. */
qfq_rm_from_agg(struct qfq_sched * q,struct qfq_class * cl)356462dbc91SPaolo Valente static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
357462dbc91SPaolo Valente {
358462dbc91SPaolo Valente struct qfq_aggregate *agg = cl->agg;
359462dbc91SPaolo Valente
360462dbc91SPaolo Valente cl->agg = NULL;
361462dbc91SPaolo Valente if (agg->num_classes == 1) { /* agg being emptied, destroy it */
362462dbc91SPaolo Valente qfq_destroy_agg(q, agg);
363462dbc91SPaolo Valente return;
364462dbc91SPaolo Valente }
365462dbc91SPaolo Valente qfq_update_agg(q, agg, agg->num_classes-1);
366462dbc91SPaolo Valente }
367462dbc91SPaolo Valente
368462dbc91SPaolo Valente /* Deschedule class and remove it from its parent aggregate. */
qfq_deact_rm_from_agg(struct qfq_sched * q,struct qfq_class * cl)369462dbc91SPaolo Valente static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
370462dbc91SPaolo Valente {
371462dbc91SPaolo Valente if (cl->qdisc->q.qlen > 0) /* class is active */
3723015f3d2SPaolo Valente qfq_deactivate_class(q, cl);
373462dbc91SPaolo Valente
374462dbc91SPaolo Valente qfq_rm_from_agg(q, cl);
3753015f3d2SPaolo Valente }
3763015f3d2SPaolo Valente
377462dbc91SPaolo Valente /* Move class to a new aggregate, matching the new class weight and/or lmax */
qfq_change_agg(struct Qdisc * sch,struct qfq_class * cl,u32 weight,u32 lmax)378462dbc91SPaolo Valente static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
379462dbc91SPaolo Valente u32 lmax)
380462dbc91SPaolo Valente {
381462dbc91SPaolo Valente struct qfq_sched *q = qdisc_priv(sch);
3823e337087SPedro Tammela struct qfq_aggregate *new_agg;
3833015f3d2SPaolo Valente
3843e337087SPedro Tammela /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */
3853e337087SPedro Tammela if (lmax > QFQ_MAX_LMAX)
3863e337087SPedro Tammela return -EINVAL;
3873e337087SPedro Tammela
3883e337087SPedro Tammela new_agg = qfq_find_agg(q, lmax, weight);
389462dbc91SPaolo Valente if (new_agg == NULL) { /* create new aggregate */
390462dbc91SPaolo Valente new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
391462dbc91SPaolo Valente if (new_agg == NULL)
392462dbc91SPaolo Valente return -ENOBUFS;
393462dbc91SPaolo Valente qfq_init_agg(q, new_agg, lmax, weight);
3943015f3d2SPaolo Valente }
395462dbc91SPaolo Valente qfq_deact_rm_from_agg(q, cl);
396462dbc91SPaolo Valente qfq_add_to_agg(q, new_agg, cl);
3973015f3d2SPaolo Valente
398462dbc91SPaolo Valente return 0;
399462dbc91SPaolo Valente }
4003015f3d2SPaolo Valente
qfq_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)4010545a303Sstephen hemminger static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
402793d81d6SAlexander Aring struct nlattr **tca, unsigned long *arg,
403793d81d6SAlexander Aring struct netlink_ext_ack *extack)
4040545a303Sstephen hemminger {
4050545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
4060545a303Sstephen hemminger struct qfq_class *cl = (struct qfq_class *)*arg;
407462dbc91SPaolo Valente bool existing = false;
4080545a303Sstephen hemminger struct nlattr *tb[TCA_QFQ_MAX + 1];
409462dbc91SPaolo Valente struct qfq_aggregate *new_agg = NULL;
4100545a303Sstephen hemminger u32 weight, lmax, inv_w;
4113015f3d2SPaolo Valente int err;
412d32ae76fSEric Dumazet int delta_w;
4130545a303Sstephen hemminger
414c69a9b02SPedro Tammela if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) {
415c69a9b02SPedro Tammela NL_SET_ERR_MSG_MOD(extack, "missing options");
4160545a303Sstephen hemminger return -EINVAL;
4170545a303Sstephen hemminger }
4180545a303Sstephen hemminger
4198cb08174SJohannes Berg err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS],
42025369891SPedro Tammela qfq_policy, extack);
4210545a303Sstephen hemminger if (err < 0)
4220545a303Sstephen hemminger return err;
4230545a303Sstephen hemminger
42425369891SPedro Tammela if (tb[TCA_QFQ_WEIGHT])
4250545a303Sstephen hemminger weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
42625369891SPedro Tammela else
4270545a303Sstephen hemminger weight = 1;
4280545a303Sstephen hemminger
429158810b2SPedro Tammela if (tb[TCA_QFQ_LMAX]) {
4300545a303Sstephen hemminger lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
431158810b2SPedro Tammela } else {
432158810b2SPedro Tammela /* MTU size is user controlled */
43330379334SGwangun Jung lmax = psched_mtu(qdisc_dev(sch));
434158810b2SPedro Tammela if (lmax < QFQ_MIN_LMAX || lmax > QFQ_MAX_LMAX) {
435158810b2SPedro Tammela NL_SET_ERR_MSG_MOD(extack,
436158810b2SPedro Tammela "MTU size out of bounds for qfq");
437158810b2SPedro Tammela return -EINVAL;
438158810b2SPedro Tammela }
439158810b2SPedro Tammela }
44030379334SGwangun Jung
441462dbc91SPaolo Valente inv_w = ONE_FP / weight;
442462dbc91SPaolo Valente weight = ONE_FP / inv_w;
443462dbc91SPaolo Valente
444462dbc91SPaolo Valente if (cl != NULL &&
445462dbc91SPaolo Valente lmax == cl->agg->lmax &&
446462dbc91SPaolo Valente weight == cl->agg->class_weight)
447462dbc91SPaolo Valente return 0; /* nothing to change */
448462dbc91SPaolo Valente
449462dbc91SPaolo Valente delta_w = weight - (cl ? cl->agg->class_weight : 0);
450462dbc91SPaolo Valente
451462dbc91SPaolo Valente if (q->wsum + delta_w > QFQ_MAX_WSUM) {
452c69a9b02SPedro Tammela NL_SET_ERR_MSG_FMT_MOD(extack,
453c69a9b02SPedro Tammela "total weight out of range (%d + %u)\n",
454462dbc91SPaolo Valente delta_w, q->wsum);
455462dbc91SPaolo Valente return -EINVAL;
456462dbc91SPaolo Valente }
457462dbc91SPaolo Valente
458462dbc91SPaolo Valente if (cl != NULL) { /* modify existing class */
4590545a303Sstephen hemminger if (tca[TCA_RATE]) {
46022e0f8b9SJohn Fastabend err = gen_replace_estimator(&cl->bstats, NULL,
46122e0f8b9SJohn Fastabend &cl->rate_est,
462edb09eb1SEric Dumazet NULL,
46329cbcd85SAhmed S. Darwish true,
4640545a303Sstephen hemminger tca[TCA_RATE]);
4650545a303Sstephen hemminger if (err)
4660545a303Sstephen hemminger return err;
4670545a303Sstephen hemminger }
468462dbc91SPaolo Valente existing = true;
469462dbc91SPaolo Valente goto set_change_agg;
4700545a303Sstephen hemminger }
4710545a303Sstephen hemminger
472462dbc91SPaolo Valente /* create and init new class */
4730545a303Sstephen hemminger cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
4740545a303Sstephen hemminger if (cl == NULL)
4750545a303Sstephen hemminger return -ENOBUFS;
4760545a303Sstephen hemminger
47750dc9a85SAhmed S. Darwish gnet_stats_basic_sync_init(&cl->bstats);
4780545a303Sstephen hemminger cl->common.classid = classid;
479462dbc91SPaolo Valente cl->deficit = lmax;
4800545a303Sstephen hemminger
481a38a9882SAlexander Aring cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
482a38a9882SAlexander Aring classid, NULL);
4830545a303Sstephen hemminger if (cl->qdisc == NULL)
4840545a303Sstephen hemminger cl->qdisc = &noop_qdisc;
4850545a303Sstephen hemminger
4860545a303Sstephen hemminger if (tca[TCA_RATE]) {
48722e0f8b9SJohn Fastabend err = gen_new_estimator(&cl->bstats, NULL,
48822e0f8b9SJohn Fastabend &cl->rate_est,
489edb09eb1SEric Dumazet NULL,
49029cbcd85SAhmed S. Darwish true,
4910545a303Sstephen hemminger tca[TCA_RATE]);
492462dbc91SPaolo Valente if (err)
493462dbc91SPaolo Valente goto destroy_class;
4940545a303Sstephen hemminger }
4950545a303Sstephen hemminger
49649b49971SJiri Kosina if (cl->qdisc != &noop_qdisc)
49749b49971SJiri Kosina qdisc_hash_add(cl->qdisc, true);
4980545a303Sstephen hemminger
499462dbc91SPaolo Valente set_change_agg:
500462dbc91SPaolo Valente sch_tree_lock(sch);
501462dbc91SPaolo Valente new_agg = qfq_find_agg(q, lmax, weight);
502462dbc91SPaolo Valente if (new_agg == NULL) { /* create new aggregate */
503462dbc91SPaolo Valente sch_tree_unlock(sch);
504462dbc91SPaolo Valente new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
505462dbc91SPaolo Valente if (new_agg == NULL) {
506462dbc91SPaolo Valente err = -ENOBUFS;
5071c0d32fdSEric Dumazet gen_kill_estimator(&cl->rate_est);
508462dbc91SPaolo Valente goto destroy_class;
509462dbc91SPaolo Valente }
510462dbc91SPaolo Valente sch_tree_lock(sch);
511462dbc91SPaolo Valente qfq_init_agg(q, new_agg, lmax, weight);
512462dbc91SPaolo Valente }
513462dbc91SPaolo Valente if (existing)
514462dbc91SPaolo Valente qfq_deact_rm_from_agg(q, cl);
5150cd58e5cSEric Dumazet else
5160cd58e5cSEric Dumazet qdisc_class_hash_insert(&q->clhash, &cl->common);
517462dbc91SPaolo Valente qfq_add_to_agg(q, new_agg, cl);
518462dbc91SPaolo Valente sch_tree_unlock(sch);
5190cd58e5cSEric Dumazet qdisc_class_hash_grow(sch, &q->clhash);
520462dbc91SPaolo Valente
5210545a303Sstephen hemminger *arg = (unsigned long)cl;
5220545a303Sstephen hemminger return 0;
523462dbc91SPaolo Valente
524462dbc91SPaolo Valente destroy_class:
52586bd446bSVlad Buslov qdisc_put(cl->qdisc);
526462dbc91SPaolo Valente kfree(cl);
527462dbc91SPaolo Valente return err;
5280545a303Sstephen hemminger }
5290545a303Sstephen hemminger
qfq_destroy_class(struct Qdisc * sch,struct qfq_class * cl)5300545a303Sstephen hemminger static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
5310545a303Sstephen hemminger {
5320545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
5330545a303Sstephen hemminger
534462dbc91SPaolo Valente qfq_rm_from_agg(q, cl);
5351c0d32fdSEric Dumazet gen_kill_estimator(&cl->rate_est);
53686bd446bSVlad Buslov qdisc_put(cl->qdisc);
5370545a303Sstephen hemminger kfree(cl);
5380545a303Sstephen hemminger }
5390545a303Sstephen hemminger
qfq_delete_class(struct Qdisc * sch,unsigned long arg,struct netlink_ext_ack * extack)5404dd78a73SMaxim Mikityanskiy static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
5414dd78a73SMaxim Mikityanskiy struct netlink_ext_ack *extack)
5420545a303Sstephen hemminger {
5430545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
5440545a303Sstephen hemminger struct qfq_class *cl = (struct qfq_class *)arg;
5450545a303Sstephen hemminger
546e20e7501SPedro Tammela if (qdisc_class_in_use(&cl->common)) {
547e20e7501SPedro Tammela NL_SET_ERR_MSG_MOD(extack, "QFQ class in use");
5480545a303Sstephen hemminger return -EBUSY;
549e20e7501SPedro Tammela }
5500545a303Sstephen hemminger
5510545a303Sstephen hemminger sch_tree_lock(sch);
5520545a303Sstephen hemminger
553e5f0e8f8SPaolo Abeni qdisc_purge_queue(cl->qdisc);
5540545a303Sstephen hemminger qdisc_class_hash_remove(&q->clhash, &cl->common);
5550545a303Sstephen hemminger
5560545a303Sstephen hemminger sch_tree_unlock(sch);
557143976ceSWANG Cong
558143976ceSWANG Cong qfq_destroy_class(sch, cl);
5590545a303Sstephen hemminger return 0;
5600545a303Sstephen hemminger }
5610545a303Sstephen hemminger
qfq_search_class(struct Qdisc * sch,u32 classid)562143976ceSWANG Cong static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
5630545a303Sstephen hemminger {
564143976ceSWANG Cong return (unsigned long)qfq_find_class(sch, classid);
5650545a303Sstephen hemminger }
5660545a303Sstephen hemminger
qfq_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)567cbaacc4eSAlexander Aring static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
568cbaacc4eSAlexander Aring struct netlink_ext_ack *extack)
5690545a303Sstephen hemminger {
5700545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
5710545a303Sstephen hemminger
5720545a303Sstephen hemminger if (cl)
5730545a303Sstephen hemminger return NULL;
5740545a303Sstephen hemminger
5756529eabaSJiri Pirko return q->block;
5760545a303Sstephen hemminger }
5770545a303Sstephen hemminger
qfq_bind_tcf(struct Qdisc * sch,unsigned long parent,u32 classid)5780545a303Sstephen hemminger static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
5790545a303Sstephen hemminger u32 classid)
5800545a303Sstephen hemminger {
5810545a303Sstephen hemminger struct qfq_class *cl = qfq_find_class(sch, classid);
5820545a303Sstephen hemminger
5838798481bSPedro Tammela if (cl)
5848798481bSPedro Tammela qdisc_class_get(&cl->common);
5850545a303Sstephen hemminger
5860545a303Sstephen hemminger return (unsigned long)cl;
5870545a303Sstephen hemminger }
5880545a303Sstephen hemminger
qfq_unbind_tcf(struct Qdisc * sch,unsigned long arg)5890545a303Sstephen hemminger static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
5900545a303Sstephen hemminger {
5910545a303Sstephen hemminger struct qfq_class *cl = (struct qfq_class *)arg;
5920545a303Sstephen hemminger
5938798481bSPedro Tammela qdisc_class_put(&cl->common);
5940545a303Sstephen hemminger }
5950545a303Sstephen hemminger
qfq_graft_class(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)5960545a303Sstephen hemminger static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
597653d6fd6SAlexander Aring struct Qdisc *new, struct Qdisc **old,
598653d6fd6SAlexander Aring struct netlink_ext_ack *extack)
5990545a303Sstephen hemminger {
6000545a303Sstephen hemminger struct qfq_class *cl = (struct qfq_class *)arg;
6010545a303Sstephen hemminger
6020545a303Sstephen hemminger if (new == NULL) {
603a38a9882SAlexander Aring new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
604a38a9882SAlexander Aring cl->common.classid, NULL);
6050545a303Sstephen hemminger if (new == NULL)
6060545a303Sstephen hemminger new = &noop_qdisc;
6070545a303Sstephen hemminger }
6080545a303Sstephen hemminger
60986a7996cSWANG Cong *old = qdisc_replace(sch, new, &cl->qdisc);
6100545a303Sstephen hemminger return 0;
6110545a303Sstephen hemminger }
6120545a303Sstephen hemminger
qfq_class_leaf(struct Qdisc * sch,unsigned long arg)6130545a303Sstephen hemminger static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
6140545a303Sstephen hemminger {
6150545a303Sstephen hemminger struct qfq_class *cl = (struct qfq_class *)arg;
6160545a303Sstephen hemminger
6170545a303Sstephen hemminger return cl->qdisc;
6180545a303Sstephen hemminger }
6190545a303Sstephen hemminger
qfq_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)6200545a303Sstephen hemminger static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
6210545a303Sstephen hemminger struct sk_buff *skb, struct tcmsg *tcm)
6220545a303Sstephen hemminger {
6230545a303Sstephen hemminger struct qfq_class *cl = (struct qfq_class *)arg;
6240545a303Sstephen hemminger struct nlattr *nest;
6250545a303Sstephen hemminger
6260545a303Sstephen hemminger tcm->tcm_parent = TC_H_ROOT;
6270545a303Sstephen hemminger tcm->tcm_handle = cl->common.classid;
6280545a303Sstephen hemminger tcm->tcm_info = cl->qdisc->handle;
6290545a303Sstephen hemminger
630ae0be8deSMichal Kubecek nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
6310545a303Sstephen hemminger if (nest == NULL)
6320545a303Sstephen hemminger goto nla_put_failure;
633462dbc91SPaolo Valente if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
634462dbc91SPaolo Valente nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
6351b34ec43SDavid S. Miller goto nla_put_failure;
6360545a303Sstephen hemminger return nla_nest_end(skb, nest);
6370545a303Sstephen hemminger
6380545a303Sstephen hemminger nla_put_failure:
6390545a303Sstephen hemminger nla_nest_cancel(skb, nest);
6400545a303Sstephen hemminger return -EMSGSIZE;
6410545a303Sstephen hemminger }
6420545a303Sstephen hemminger
qfq_dump_class_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)6430545a303Sstephen hemminger static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
6440545a303Sstephen hemminger struct gnet_dump *d)
6450545a303Sstephen hemminger {
6460545a303Sstephen hemminger struct qfq_class *cl = (struct qfq_class *)arg;
6470545a303Sstephen hemminger struct tc_qfq_stats xstats;
6480545a303Sstephen hemminger
6490545a303Sstephen hemminger memset(&xstats, 0, sizeof(xstats));
6500545a303Sstephen hemminger
651462dbc91SPaolo Valente xstats.weight = cl->agg->class_weight;
652462dbc91SPaolo Valente xstats.lmax = cl->agg->lmax;
6530545a303Sstephen hemminger
65429cbcd85SAhmed S. Darwish if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
6551c0d32fdSEric Dumazet gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
6565dd431b6SPaolo Abeni qdisc_qstats_copy(d, cl->qdisc) < 0)
6570545a303Sstephen hemminger return -1;
6580545a303Sstephen hemminger
6590545a303Sstephen hemminger return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
6600545a303Sstephen hemminger }
6610545a303Sstephen hemminger
qfq_walk(struct Qdisc * sch,struct qdisc_walker * arg)6620545a303Sstephen hemminger static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
6630545a303Sstephen hemminger {
6640545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
6650545a303Sstephen hemminger struct qfq_class *cl;
6660545a303Sstephen hemminger unsigned int i;
6670545a303Sstephen hemminger
6680545a303Sstephen hemminger if (arg->stop)
6690545a303Sstephen hemminger return;
6700545a303Sstephen hemminger
6710545a303Sstephen hemminger for (i = 0; i < q->clhash.hashsize; i++) {
672b67bfe0dSSasha Levin hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
673e046fa89SZhengchao Shao if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
6740545a303Sstephen hemminger return;
6750545a303Sstephen hemminger }
6760545a303Sstephen hemminger }
6770545a303Sstephen hemminger }
6780545a303Sstephen hemminger
qfq_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)6790545a303Sstephen hemminger static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
6800545a303Sstephen hemminger int *qerr)
6810545a303Sstephen hemminger {
6820545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
6830545a303Sstephen hemminger struct qfq_class *cl;
6840545a303Sstephen hemminger struct tcf_result res;
68525d8c0d5SJohn Fastabend struct tcf_proto *fl;
6860545a303Sstephen hemminger int result;
6870545a303Sstephen hemminger
6880545a303Sstephen hemminger if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
6890545a303Sstephen hemminger pr_debug("qfq_classify: found %d\n", skb->priority);
6900545a303Sstephen hemminger cl = qfq_find_class(sch, skb->priority);
6910545a303Sstephen hemminger if (cl != NULL)
6920545a303Sstephen hemminger return cl;
6930545a303Sstephen hemminger }
6940545a303Sstephen hemminger
6950545a303Sstephen hemminger *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
69625d8c0d5SJohn Fastabend fl = rcu_dereference_bh(q->filter_list);
6973aa26055SDavide Caratti result = tcf_classify(skb, NULL, fl, &res, false);
6980545a303Sstephen hemminger if (result >= 0) {
6990545a303Sstephen hemminger #ifdef CONFIG_NET_CLS_ACT
7000545a303Sstephen hemminger switch (result) {
7010545a303Sstephen hemminger case TC_ACT_QUEUED:
7020545a303Sstephen hemminger case TC_ACT_STOLEN:
703e25ea21fSJiri Pirko case TC_ACT_TRAP:
7040545a303Sstephen hemminger *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
705964201deSGustavo A. R. Silva fallthrough;
7060545a303Sstephen hemminger case TC_ACT_SHOT:
7070545a303Sstephen hemminger return NULL;
7080545a303Sstephen hemminger }
7090545a303Sstephen hemminger #endif
7100545a303Sstephen hemminger cl = (struct qfq_class *)res.class;
7110545a303Sstephen hemminger if (cl == NULL)
7120545a303Sstephen hemminger cl = qfq_find_class(sch, res.classid);
7130545a303Sstephen hemminger return cl;
7140545a303Sstephen hemminger }
7150545a303Sstephen hemminger
7160545a303Sstephen hemminger return NULL;
7170545a303Sstephen hemminger }
7180545a303Sstephen hemminger
7190545a303Sstephen hemminger /* Generic comparison function, handling wraparound. */
qfq_gt(u64 a,u64 b)7200545a303Sstephen hemminger static inline int qfq_gt(u64 a, u64 b)
7210545a303Sstephen hemminger {
7220545a303Sstephen hemminger return (s64)(a - b) > 0;
7230545a303Sstephen hemminger }
7240545a303Sstephen hemminger
7250545a303Sstephen hemminger /* Round a precise timestamp to its slotted value. */
qfq_round_down(u64 ts,unsigned int shift)7260545a303Sstephen hemminger static inline u64 qfq_round_down(u64 ts, unsigned int shift)
7270545a303Sstephen hemminger {
7280545a303Sstephen hemminger return ts & ~((1ULL << shift) - 1);
7290545a303Sstephen hemminger }
7300545a303Sstephen hemminger
7310545a303Sstephen hemminger /* return the pointer to the group with lowest index in the bitmap */
qfq_ffs(struct qfq_sched * q,unsigned long bitmap)7320545a303Sstephen hemminger static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
7330545a303Sstephen hemminger unsigned long bitmap)
7340545a303Sstephen hemminger {
7350545a303Sstephen hemminger int index = __ffs(bitmap);
7360545a303Sstephen hemminger return &q->groups[index];
7370545a303Sstephen hemminger }
7380545a303Sstephen hemminger /* Calculate a mask to mimic what would be ffs_from(). */
mask_from(unsigned long bitmap,int from)7390545a303Sstephen hemminger static inline unsigned long mask_from(unsigned long bitmap, int from)
7400545a303Sstephen hemminger {
7410545a303Sstephen hemminger return bitmap & ~((1UL << from) - 1);
7420545a303Sstephen hemminger }
7430545a303Sstephen hemminger
7440545a303Sstephen hemminger /*
7450545a303Sstephen hemminger * The state computation relies on ER=0, IR=1, EB=2, IB=3
7460545a303Sstephen hemminger * First compute eligibility comparing grp->S, q->V,
7470545a303Sstephen hemminger * then check if someone is blocking us and possibly add EB
7480545a303Sstephen hemminger */
qfq_calc_state(struct qfq_sched * q,const struct qfq_group * grp)7490545a303Sstephen hemminger static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
7500545a303Sstephen hemminger {
7510545a303Sstephen hemminger /* if S > V we are not eligible */
7520545a303Sstephen hemminger unsigned int state = qfq_gt(grp->S, q->V);
7530545a303Sstephen hemminger unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
7540545a303Sstephen hemminger struct qfq_group *next;
7550545a303Sstephen hemminger
7560545a303Sstephen hemminger if (mask) {
7570545a303Sstephen hemminger next = qfq_ffs(q, mask);
7580545a303Sstephen hemminger if (qfq_gt(grp->F, next->F))
7590545a303Sstephen hemminger state |= EB;
7600545a303Sstephen hemminger }
7610545a303Sstephen hemminger
7620545a303Sstephen hemminger return state;
7630545a303Sstephen hemminger }
7640545a303Sstephen hemminger
7650545a303Sstephen hemminger
7660545a303Sstephen hemminger /*
7670545a303Sstephen hemminger * In principle
7680545a303Sstephen hemminger * q->bitmaps[dst] |= q->bitmaps[src] & mask;
7690545a303Sstephen hemminger * q->bitmaps[src] &= ~mask;
7700545a303Sstephen hemminger * but we should make sure that src != dst
7710545a303Sstephen hemminger */
qfq_move_groups(struct qfq_sched * q,unsigned long mask,int src,int dst)7720545a303Sstephen hemminger static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
7730545a303Sstephen hemminger int src, int dst)
7740545a303Sstephen hemminger {
7750545a303Sstephen hemminger q->bitmaps[dst] |= q->bitmaps[src] & mask;
7760545a303Sstephen hemminger q->bitmaps[src] &= ~mask;
7770545a303Sstephen hemminger }
7780545a303Sstephen hemminger
qfq_unblock_groups(struct qfq_sched * q,int index,u64 old_F)7790545a303Sstephen hemminger static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
7800545a303Sstephen hemminger {
7810545a303Sstephen hemminger unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
7820545a303Sstephen hemminger struct qfq_group *next;
7830545a303Sstephen hemminger
7840545a303Sstephen hemminger if (mask) {
7850545a303Sstephen hemminger next = qfq_ffs(q, mask);
7860545a303Sstephen hemminger if (!qfq_gt(next->F, old_F))
7870545a303Sstephen hemminger return;
7880545a303Sstephen hemminger }
7890545a303Sstephen hemminger
7900545a303Sstephen hemminger mask = (1UL << index) - 1;
7910545a303Sstephen hemminger qfq_move_groups(q, mask, EB, ER);
7920545a303Sstephen hemminger qfq_move_groups(q, mask, IB, IR);
7930545a303Sstephen hemminger }
7940545a303Sstephen hemminger
7950545a303Sstephen hemminger /*
7960545a303Sstephen hemminger * perhaps
7970545a303Sstephen hemminger *
7980545a303Sstephen hemminger old_V ^= q->V;
799462dbc91SPaolo Valente old_V >>= q->min_slot_shift;
8000545a303Sstephen hemminger if (old_V) {
8010545a303Sstephen hemminger ...
8020545a303Sstephen hemminger }
8030545a303Sstephen hemminger *
8040545a303Sstephen hemminger */
qfq_make_eligible(struct qfq_sched * q)805462dbc91SPaolo Valente static void qfq_make_eligible(struct qfq_sched *q)
8060545a303Sstephen hemminger {
807462dbc91SPaolo Valente unsigned long vslot = q->V >> q->min_slot_shift;
808462dbc91SPaolo Valente unsigned long old_vslot = q->oldV >> q->min_slot_shift;
8090545a303Sstephen hemminger
8100545a303Sstephen hemminger if (vslot != old_vslot) {
81187f1369dSPaolo Valente unsigned long mask;
81287f1369dSPaolo Valente int last_flip_pos = fls(vslot ^ old_vslot);
81387f1369dSPaolo Valente
81487f1369dSPaolo Valente if (last_flip_pos > 31) /* higher than the number of groups */
81587f1369dSPaolo Valente mask = ~0UL; /* make all groups eligible */
81687f1369dSPaolo Valente else
81787f1369dSPaolo Valente mask = (1UL << last_flip_pos) - 1;
81887f1369dSPaolo Valente
8190545a303Sstephen hemminger qfq_move_groups(q, mask, IR, ER);
8200545a303Sstephen hemminger qfq_move_groups(q, mask, IB, EB);
8210545a303Sstephen hemminger }
8220545a303Sstephen hemminger }
8230545a303Sstephen hemminger
8240545a303Sstephen hemminger /*
82587f40dd6SPaolo Valente * The index of the slot in which the input aggregate agg is to be
82687f40dd6SPaolo Valente * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
82787f40dd6SPaolo Valente * and not a '-1' because the start time of the group may be moved
82887f40dd6SPaolo Valente * backward by one slot after the aggregate has been inserted, and
82987f40dd6SPaolo Valente * this would cause non-empty slots to be right-shifted by one
83087f40dd6SPaolo Valente * position.
8313015f3d2SPaolo Valente *
83287f40dd6SPaolo Valente * QFQ+ fully satisfies this bound to the slot index if the parameters
83387f40dd6SPaolo Valente * of the classes are not changed dynamically, and if QFQ+ never
83487f40dd6SPaolo Valente * happens to postpone the service of agg unjustly, i.e., it never
83587f40dd6SPaolo Valente * happens that the aggregate becomes backlogged and eligible, or just
83687f40dd6SPaolo Valente * eligible, while an aggregate with a higher approximated finish time
83787f40dd6SPaolo Valente * is being served. In particular, in this case QFQ+ guarantees that
83887f40dd6SPaolo Valente * the timestamps of agg are low enough that the slot index is never
83987f40dd6SPaolo Valente * higher than 2. Unfortunately, QFQ+ cannot provide the same
84087f40dd6SPaolo Valente * guarantee if it happens to unjustly postpone the service of agg, or
84187f40dd6SPaolo Valente * if the parameters of some class are changed.
8423015f3d2SPaolo Valente *
84387f40dd6SPaolo Valente * As for the first event, i.e., an out-of-order service, the
84487f40dd6SPaolo Valente * upper bound to the slot index guaranteed by QFQ+ grows to
84587f40dd6SPaolo Valente * 2 +
84687f40dd6SPaolo Valente * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
84787f40dd6SPaolo Valente * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
84887f40dd6SPaolo Valente *
84987f40dd6SPaolo Valente * The following function deals with this problem by backward-shifting
85087f40dd6SPaolo Valente * the timestamps of agg, if needed, so as to guarantee that the slot
85187f40dd6SPaolo Valente * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
85287f40dd6SPaolo Valente * cause the service of other aggregates to be postponed, yet the
85387f40dd6SPaolo Valente * worst-case guarantees of these aggregates are not violated. In
85487f40dd6SPaolo Valente * fact, in case of no out-of-order service, the timestamps of agg
85587f40dd6SPaolo Valente * would have been even lower than they are after the backward shift,
85687f40dd6SPaolo Valente * because QFQ+ would have guaranteed a maximum value equal to 2 for
85787f40dd6SPaolo Valente * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
85887f40dd6SPaolo Valente * service is postponed because of the backward-shift would have
85987f40dd6SPaolo Valente * however waited for the service of agg before being served.
86087f40dd6SPaolo Valente *
86187f40dd6SPaolo Valente * The other event that may cause the slot index to be higher than 2
86287f40dd6SPaolo Valente * for agg is a recent change of the parameters of some class. If the
86387f40dd6SPaolo Valente * weight of a class is increased or the lmax (max_pkt_size) of the
86487f40dd6SPaolo Valente * class is decreased, then a new aggregate with smaller slot size
86587f40dd6SPaolo Valente * than the original parent aggregate of the class may happen to be
86687f40dd6SPaolo Valente * activated. The activation of this aggregate should be properly
86787f40dd6SPaolo Valente * delayed to when the service of the class has finished in the ideal
86887f40dd6SPaolo Valente * system tracked by QFQ+. If the activation of the aggregate is not
86987f40dd6SPaolo Valente * delayed to this reference time instant, then this aggregate may be
87087f40dd6SPaolo Valente * unjustly served before other aggregates waiting for service. This
87187f40dd6SPaolo Valente * may cause the above bound to the slot index to be violated for some
87287f40dd6SPaolo Valente * of these unlucky aggregates.
8733015f3d2SPaolo Valente *
874462dbc91SPaolo Valente * Instead of delaying the activation of the new aggregate, which is
87587f40dd6SPaolo Valente * quite complex, the above-discussed capping of the slot index is
87687f40dd6SPaolo Valente * used to handle also the consequences of a change of the parameters
87787f40dd6SPaolo Valente * of a class.
8780545a303Sstephen hemminger */
qfq_slot_insert(struct qfq_group * grp,struct qfq_aggregate * agg,u64 roundedS)879462dbc91SPaolo Valente static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
8800545a303Sstephen hemminger u64 roundedS)
8810545a303Sstephen hemminger {
8820545a303Sstephen hemminger u64 slot = (roundedS - grp->S) >> grp->slot_shift;
8833015f3d2SPaolo Valente unsigned int i; /* slot index in the bucket list */
8843015f3d2SPaolo Valente
8853015f3d2SPaolo Valente if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
8863015f3d2SPaolo Valente u64 deltaS = roundedS - grp->S -
8873015f3d2SPaolo Valente ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
888462dbc91SPaolo Valente agg->S -= deltaS;
889462dbc91SPaolo Valente agg->F -= deltaS;
8903015f3d2SPaolo Valente slot = QFQ_MAX_SLOTS - 2;
8913015f3d2SPaolo Valente }
8923015f3d2SPaolo Valente
8933015f3d2SPaolo Valente i = (grp->front + slot) % QFQ_MAX_SLOTS;
8940545a303Sstephen hemminger
895462dbc91SPaolo Valente hlist_add_head(&agg->next, &grp->slots[i]);
8960545a303Sstephen hemminger __set_bit(slot, &grp->full_slots);
8970545a303Sstephen hemminger }
8980545a303Sstephen hemminger
8990545a303Sstephen hemminger /* Maybe introduce hlist_first_entry?? */
qfq_slot_head(struct qfq_group * grp)900462dbc91SPaolo Valente static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
9010545a303Sstephen hemminger {
9020545a303Sstephen hemminger return hlist_entry(grp->slots[grp->front].first,
903462dbc91SPaolo Valente struct qfq_aggregate, next);
9040545a303Sstephen hemminger }
9050545a303Sstephen hemminger
9060545a303Sstephen hemminger /*
9070545a303Sstephen hemminger * remove the entry from the slot
9080545a303Sstephen hemminger */
qfq_front_slot_remove(struct qfq_group * grp)9090545a303Sstephen hemminger static void qfq_front_slot_remove(struct qfq_group *grp)
9100545a303Sstephen hemminger {
911462dbc91SPaolo Valente struct qfq_aggregate *agg = qfq_slot_head(grp);
9120545a303Sstephen hemminger
913462dbc91SPaolo Valente BUG_ON(!agg);
914462dbc91SPaolo Valente hlist_del(&agg->next);
9150545a303Sstephen hemminger if (hlist_empty(&grp->slots[grp->front]))
9160545a303Sstephen hemminger __clear_bit(0, &grp->full_slots);
9170545a303Sstephen hemminger }
9180545a303Sstephen hemminger
9190545a303Sstephen hemminger /*
920462dbc91SPaolo Valente * Returns the first aggregate in the first non-empty bucket of the
921462dbc91SPaolo Valente * group. As a side effect, adjusts the bucket list so the first
922462dbc91SPaolo Valente * non-empty bucket is at position 0 in full_slots.
9230545a303Sstephen hemminger */
qfq_slot_scan(struct qfq_group * grp)924462dbc91SPaolo Valente static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
9250545a303Sstephen hemminger {
9260545a303Sstephen hemminger unsigned int i;
9270545a303Sstephen hemminger
9280545a303Sstephen hemminger pr_debug("qfq slot_scan: grp %u full %#lx\n",
9290545a303Sstephen hemminger grp->index, grp->full_slots);
9300545a303Sstephen hemminger
9310545a303Sstephen hemminger if (grp->full_slots == 0)
9320545a303Sstephen hemminger return NULL;
9330545a303Sstephen hemminger
9340545a303Sstephen hemminger i = __ffs(grp->full_slots); /* zero based */
9350545a303Sstephen hemminger if (i > 0) {
9360545a303Sstephen hemminger grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
9370545a303Sstephen hemminger grp->full_slots >>= i;
9380545a303Sstephen hemminger }
9390545a303Sstephen hemminger
9400545a303Sstephen hemminger return qfq_slot_head(grp);
9410545a303Sstephen hemminger }
9420545a303Sstephen hemminger
9430545a303Sstephen hemminger /*
9440545a303Sstephen hemminger * adjust the bucket list. When the start time of a group decreases,
9450545a303Sstephen hemminger * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
9460545a303Sstephen hemminger * move the objects. The mask of occupied slots must be shifted
9470545a303Sstephen hemminger * because we use ffs() to find the first non-empty slot.
9480545a303Sstephen hemminger * This covers decreases in the group's start time, but what about
9490545a303Sstephen hemminger * increases of the start time ?
9500545a303Sstephen hemminger * Here too we should make sure that i is less than 32
9510545a303Sstephen hemminger */
qfq_slot_rotate(struct qfq_group * grp,u64 roundedS)9520545a303Sstephen hemminger static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
9530545a303Sstephen hemminger {
9540545a303Sstephen hemminger unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
9550545a303Sstephen hemminger
9560545a303Sstephen hemminger grp->full_slots <<= i;
9570545a303Sstephen hemminger grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
9580545a303Sstephen hemminger }
9590545a303Sstephen hemminger
qfq_update_eligible(struct qfq_sched * q)960462dbc91SPaolo Valente static void qfq_update_eligible(struct qfq_sched *q)
9610545a303Sstephen hemminger {
9620545a303Sstephen hemminger struct qfq_group *grp;
9630545a303Sstephen hemminger unsigned long ineligible;
9640545a303Sstephen hemminger
9650545a303Sstephen hemminger ineligible = q->bitmaps[IR] | q->bitmaps[IB];
9660545a303Sstephen hemminger if (ineligible) {
9670545a303Sstephen hemminger if (!q->bitmaps[ER]) {
9680545a303Sstephen hemminger grp = qfq_ffs(q, ineligible);
9690545a303Sstephen hemminger if (qfq_gt(grp->S, q->V))
9700545a303Sstephen hemminger q->V = grp->S;
9710545a303Sstephen hemminger }
972462dbc91SPaolo Valente qfq_make_eligible(q);
9730545a303Sstephen hemminger }
9740545a303Sstephen hemminger }
9750545a303Sstephen hemminger
976462dbc91SPaolo Valente /* Dequeue head packet of the head class in the DRR queue of the aggregate. */
agg_dequeue(struct qfq_aggregate * agg,struct qfq_class * cl,unsigned int len)977*8fc134feSvalis static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
978462dbc91SPaolo Valente struct qfq_class *cl, unsigned int len)
9790545a303Sstephen hemminger {
980*8fc134feSvalis struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc);
981*8fc134feSvalis
982*8fc134feSvalis if (!skb)
983*8fc134feSvalis return NULL;
9840545a303Sstephen hemminger
985462dbc91SPaolo Valente cl->deficit -= (int) len;
9860545a303Sstephen hemminger
987462dbc91SPaolo Valente if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
988462dbc91SPaolo Valente list_del(&cl->alist);
989462dbc91SPaolo Valente else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
990462dbc91SPaolo Valente cl->deficit += agg->lmax;
991462dbc91SPaolo Valente list_move_tail(&cl->alist, &agg->active);
992462dbc91SPaolo Valente }
993*8fc134feSvalis
994*8fc134feSvalis return skb;
9950545a303Sstephen hemminger }
9960545a303Sstephen hemminger
qfq_peek_skb(struct qfq_aggregate * agg,struct qfq_class ** cl,unsigned int * len)997462dbc91SPaolo Valente static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
998462dbc91SPaolo Valente struct qfq_class **cl,
999462dbc91SPaolo Valente unsigned int *len)
1000462dbc91SPaolo Valente {
1001462dbc91SPaolo Valente struct sk_buff *skb;
1002462dbc91SPaolo Valente
1003462dbc91SPaolo Valente *cl = list_first_entry(&agg->active, struct qfq_class, alist);
1004462dbc91SPaolo Valente skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
1005462dbc91SPaolo Valente if (skb == NULL)
1006462dbc91SPaolo Valente WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
1007462dbc91SPaolo Valente else
1008462dbc91SPaolo Valente *len = qdisc_pkt_len(skb);
1009462dbc91SPaolo Valente
1010462dbc91SPaolo Valente return skb;
1011462dbc91SPaolo Valente }
1012462dbc91SPaolo Valente
1013462dbc91SPaolo Valente /* Update F according to the actual service received by the aggregate. */
charge_actual_service(struct qfq_aggregate * agg)1014462dbc91SPaolo Valente static inline void charge_actual_service(struct qfq_aggregate *agg)
1015462dbc91SPaolo Valente {
10169b99b7e9SPaolo Valente /* Compute the service received by the aggregate, taking into
10179b99b7e9SPaolo Valente * account that, after decreasing the number of classes in
10189b99b7e9SPaolo Valente * agg, it may happen that
10199b99b7e9SPaolo Valente * agg->initial_budget - agg->budget > agg->bugdetmax
10209b99b7e9SPaolo Valente */
10219b99b7e9SPaolo Valente u32 service_received = min(agg->budgetmax,
10229b99b7e9SPaolo Valente agg->initial_budget - agg->budget);
1023462dbc91SPaolo Valente
1024462dbc91SPaolo Valente agg->F = agg->S + (u64)service_received * agg->inv_w;
10250545a303Sstephen hemminger }
10260545a303Sstephen hemminger
102788d4f419SPaolo Valente /* Assign a reasonable start time for a new aggregate in group i.
102888d4f419SPaolo Valente * Admissible values for \hat(F) are multiples of \sigma_i
102988d4f419SPaolo Valente * no greater than V+\sigma_i . Larger values mean that
103088d4f419SPaolo Valente * we had a wraparound so we consider the timestamp to be stale.
103188d4f419SPaolo Valente *
103288d4f419SPaolo Valente * If F is not stale and F >= V then we set S = F.
103388d4f419SPaolo Valente * Otherwise we should assign S = V, but this may violate
103488d4f419SPaolo Valente * the ordering in EB (see [2]). So, if we have groups in ER,
103588d4f419SPaolo Valente * set S to the F_j of the first group j which would be blocking us.
103688d4f419SPaolo Valente * We are guaranteed not to move S backward because
103788d4f419SPaolo Valente * otherwise our group i would still be blocked.
103888d4f419SPaolo Valente */
qfq_update_start(struct qfq_sched * q,struct qfq_aggregate * agg)103988d4f419SPaolo Valente static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
104088d4f419SPaolo Valente {
104188d4f419SPaolo Valente unsigned long mask;
104288d4f419SPaolo Valente u64 limit, roundedF;
104388d4f419SPaolo Valente int slot_shift = agg->grp->slot_shift;
104488d4f419SPaolo Valente
104588d4f419SPaolo Valente roundedF = qfq_round_down(agg->F, slot_shift);
104688d4f419SPaolo Valente limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
104788d4f419SPaolo Valente
104888d4f419SPaolo Valente if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
104988d4f419SPaolo Valente /* timestamp was stale */
105088d4f419SPaolo Valente mask = mask_from(q->bitmaps[ER], agg->grp->index);
105188d4f419SPaolo Valente if (mask) {
105288d4f419SPaolo Valente struct qfq_group *next = qfq_ffs(q, mask);
105388d4f419SPaolo Valente if (qfq_gt(roundedF, next->F)) {
105488d4f419SPaolo Valente if (qfq_gt(limit, next->F))
105588d4f419SPaolo Valente agg->S = next->F;
105688d4f419SPaolo Valente else /* preserve timestamp correctness */
105788d4f419SPaolo Valente agg->S = limit;
105888d4f419SPaolo Valente return;
105988d4f419SPaolo Valente }
106088d4f419SPaolo Valente }
106188d4f419SPaolo Valente agg->S = q->V;
106288d4f419SPaolo Valente } else /* timestamp is not stale */
106388d4f419SPaolo Valente agg->S = agg->F;
106488d4f419SPaolo Valente }
106588d4f419SPaolo Valente
106688d4f419SPaolo Valente /* Update the timestamps of agg before scheduling/rescheduling it for
106788d4f419SPaolo Valente * service. In particular, assign to agg->F its maximum possible
106888d4f419SPaolo Valente * value, i.e., the virtual finish time with which the aggregate
106988d4f419SPaolo Valente * should be labeled if it used all its budget once in service.
107088d4f419SPaolo Valente */
107188d4f419SPaolo Valente static inline void
qfq_update_agg_ts(struct qfq_sched * q,struct qfq_aggregate * agg,enum update_reason reason)107288d4f419SPaolo Valente qfq_update_agg_ts(struct qfq_sched *q,
107388d4f419SPaolo Valente struct qfq_aggregate *agg, enum update_reason reason)
107488d4f419SPaolo Valente {
107588d4f419SPaolo Valente if (reason != requeue)
107688d4f419SPaolo Valente qfq_update_start(q, agg);
107788d4f419SPaolo Valente else /* just charge agg for the service received */
107888d4f419SPaolo Valente agg->S = agg->F;
107988d4f419SPaolo Valente
108088d4f419SPaolo Valente agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
108188d4f419SPaolo Valente }
10822f3b89a1SPaolo Valente
10832f3b89a1SPaolo Valente static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
10842f3b89a1SPaolo Valente
qfq_dequeue(struct Qdisc * sch)10850545a303Sstephen hemminger static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
10860545a303Sstephen hemminger {
10870545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
1088462dbc91SPaolo Valente struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
10890545a303Sstephen hemminger struct qfq_class *cl;
1090462dbc91SPaolo Valente struct sk_buff *skb = NULL;
1091462dbc91SPaolo Valente /* next-packet len, 0 means no more active classes in in-service agg */
1092462dbc91SPaolo Valente unsigned int len = 0;
1093462dbc91SPaolo Valente
1094462dbc91SPaolo Valente if (in_serv_agg == NULL)
1095462dbc91SPaolo Valente return NULL;
1096462dbc91SPaolo Valente
1097462dbc91SPaolo Valente if (!list_empty(&in_serv_agg->active))
1098462dbc91SPaolo Valente skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1099462dbc91SPaolo Valente
1100462dbc91SPaolo Valente /*
1101462dbc91SPaolo Valente * If there are no active classes in the in-service aggregate,
1102462dbc91SPaolo Valente * or if the aggregate has not enough budget to serve its next
1103462dbc91SPaolo Valente * class, then choose the next aggregate to serve.
1104462dbc91SPaolo Valente */
1105462dbc91SPaolo Valente if (len == 0 || in_serv_agg->budget < len) {
1106462dbc91SPaolo Valente charge_actual_service(in_serv_agg);
1107462dbc91SPaolo Valente
1108462dbc91SPaolo Valente /* recharge the budget of the aggregate */
1109462dbc91SPaolo Valente in_serv_agg->initial_budget = in_serv_agg->budget =
1110462dbc91SPaolo Valente in_serv_agg->budgetmax;
1111462dbc91SPaolo Valente
11122f3b89a1SPaolo Valente if (!list_empty(&in_serv_agg->active)) {
1113462dbc91SPaolo Valente /*
1114462dbc91SPaolo Valente * Still active: reschedule for
1115462dbc91SPaolo Valente * service. Possible optimization: if no other
1116462dbc91SPaolo Valente * aggregate is active, then there is no point
1117462dbc91SPaolo Valente * in rescheduling this aggregate, and we can
1118462dbc91SPaolo Valente * just keep it as the in-service one. This
1119462dbc91SPaolo Valente * should be however a corner case, and to
1120462dbc91SPaolo Valente * handle it, we would need to maintain an
1121462dbc91SPaolo Valente * extra num_active_aggs field.
1122462dbc91SPaolo Valente */
11232f3b89a1SPaolo Valente qfq_update_agg_ts(q, in_serv_agg, requeue);
11242f3b89a1SPaolo Valente qfq_schedule_agg(q, in_serv_agg);
11252f3b89a1SPaolo Valente } else if (sch->q.qlen == 0) { /* no aggregate to serve */
1126462dbc91SPaolo Valente q->in_serv_agg = NULL;
1127462dbc91SPaolo Valente return NULL;
1128462dbc91SPaolo Valente }
1129462dbc91SPaolo Valente
1130462dbc91SPaolo Valente /*
1131462dbc91SPaolo Valente * If we get here, there are other aggregates queued:
1132462dbc91SPaolo Valente * choose the new aggregate to serve.
1133462dbc91SPaolo Valente */
1134462dbc91SPaolo Valente in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
1135462dbc91SPaolo Valente skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1136462dbc91SPaolo Valente }
1137462dbc91SPaolo Valente if (!skb)
1138462dbc91SPaolo Valente return NULL;
1139462dbc91SPaolo Valente
1140462dbc91SPaolo Valente sch->q.qlen--;
1141*8fc134feSvalis
1142*8fc134feSvalis skb = agg_dequeue(in_serv_agg, cl, len);
1143*8fc134feSvalis
1144*8fc134feSvalis if (!skb) {
1145*8fc134feSvalis sch->q.qlen++;
1146*8fc134feSvalis return NULL;
1147*8fc134feSvalis }
1148*8fc134feSvalis
1149*8fc134feSvalis qdisc_qstats_backlog_dec(sch, skb);
1150462dbc91SPaolo Valente qdisc_bstats_update(sch, skb);
1151462dbc91SPaolo Valente
1152a0143efaSPaolo Valente /* If lmax is lowered, through qfq_change_class, for a class
1153a0143efaSPaolo Valente * owning pending packets with larger size than the new value
1154a0143efaSPaolo Valente * of lmax, then the following condition may hold.
1155a0143efaSPaolo Valente */
1156a0143efaSPaolo Valente if (unlikely(in_serv_agg->budget < len))
1157a0143efaSPaolo Valente in_serv_agg->budget = 0;
1158a0143efaSPaolo Valente else
1159462dbc91SPaolo Valente in_serv_agg->budget -= len;
1160a0143efaSPaolo Valente
116187f40dd6SPaolo Valente q->V += (u64)len * q->iwsum;
1162462dbc91SPaolo Valente pr_debug("qfq dequeue: len %u F %lld now %lld\n",
1163462dbc91SPaolo Valente len, (unsigned long long) in_serv_agg->F,
1164462dbc91SPaolo Valente (unsigned long long) q->V);
1165462dbc91SPaolo Valente
1166462dbc91SPaolo Valente return skb;
1167462dbc91SPaolo Valente }
1168462dbc91SPaolo Valente
qfq_choose_next_agg(struct qfq_sched * q)1169462dbc91SPaolo Valente static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
1170462dbc91SPaolo Valente {
1171462dbc91SPaolo Valente struct qfq_group *grp;
1172462dbc91SPaolo Valente struct qfq_aggregate *agg, *new_front_agg;
1173462dbc91SPaolo Valente u64 old_F;
1174462dbc91SPaolo Valente
1175462dbc91SPaolo Valente qfq_update_eligible(q);
1176462dbc91SPaolo Valente q->oldV = q->V;
11770545a303Sstephen hemminger
11780545a303Sstephen hemminger if (!q->bitmaps[ER])
11790545a303Sstephen hemminger return NULL;
11800545a303Sstephen hemminger
11810545a303Sstephen hemminger grp = qfq_ffs(q, q->bitmaps[ER]);
1182462dbc91SPaolo Valente old_F = grp->F;
11830545a303Sstephen hemminger
1184462dbc91SPaolo Valente agg = qfq_slot_head(grp);
11850545a303Sstephen hemminger
1186462dbc91SPaolo Valente /* agg starts to be served, remove it from schedule */
1187462dbc91SPaolo Valente qfq_front_slot_remove(grp);
11880545a303Sstephen hemminger
1189462dbc91SPaolo Valente new_front_agg = qfq_slot_scan(grp);
11900545a303Sstephen hemminger
1191462dbc91SPaolo Valente if (new_front_agg == NULL) /* group is now inactive, remove from ER */
11920545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[ER]);
11930545a303Sstephen hemminger else {
1194462dbc91SPaolo Valente u64 roundedS = qfq_round_down(new_front_agg->S,
1195462dbc91SPaolo Valente grp->slot_shift);
11960545a303Sstephen hemminger unsigned int s;
11970545a303Sstephen hemminger
11980545a303Sstephen hemminger if (grp->S == roundedS)
1199462dbc91SPaolo Valente return agg;
12000545a303Sstephen hemminger grp->S = roundedS;
12010545a303Sstephen hemminger grp->F = roundedS + (2ULL << grp->slot_shift);
12020545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[ER]);
12030545a303Sstephen hemminger s = qfq_calc_state(q, grp);
12040545a303Sstephen hemminger __set_bit(grp->index, &q->bitmaps[s]);
12050545a303Sstephen hemminger }
12060545a303Sstephen hemminger
12070545a303Sstephen hemminger qfq_unblock_groups(q, grp->index, old_F);
12080545a303Sstephen hemminger
1209462dbc91SPaolo Valente return agg;
12100545a303Sstephen hemminger }
12110545a303Sstephen hemminger
qfq_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1212ac5c66f2SPetr Machata static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1213520ac30fSEric Dumazet struct sk_buff **to_free)
12140545a303Sstephen hemminger {
1215f6bab199SToke Høiland-Jørgensen unsigned int len = qdisc_pkt_len(skb), gso_segs;
12160545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
12170545a303Sstephen hemminger struct qfq_class *cl;
1218462dbc91SPaolo Valente struct qfq_aggregate *agg;
1219f54ba779SDavid S. Miller int err = 0;
122037d9cf1aSToke Høiland-Jørgensen bool first;
12210545a303Sstephen hemminger
12220545a303Sstephen hemminger cl = qfq_classify(skb, sch, &err);
12230545a303Sstephen hemminger if (cl == NULL) {
12240545a303Sstephen hemminger if (err & __NET_XMIT_BYPASS)
122525331d6cSJohn Fastabend qdisc_qstats_drop(sch);
122639ad1297SGao Feng __qdisc_drop(skb, to_free);
12270545a303Sstephen hemminger return err;
12280545a303Sstephen hemminger }
12290545a303Sstephen hemminger pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
12300545a303Sstephen hemminger
1231f6bab199SToke Høiland-Jørgensen if (unlikely(cl->agg->lmax < len)) {
12323015f3d2SPaolo Valente pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
1233f6bab199SToke Høiland-Jørgensen cl->agg->lmax, len, cl->common.classid);
1234f6bab199SToke Høiland-Jørgensen err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
12359b15350fSFlorian Westphal if (err) {
12369b15350fSFlorian Westphal cl->qstats.drops++;
1237520ac30fSEric Dumazet return qdisc_drop(skb, sch, to_free);
12389b15350fSFlorian Westphal }
12393015f3d2SPaolo Valente }
12403015f3d2SPaolo Valente
1241f6bab199SToke Høiland-Jørgensen gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
124237d9cf1aSToke Høiland-Jørgensen first = !cl->qdisc->q.qlen;
1243ac5c66f2SPetr Machata err = qdisc_enqueue(skb, cl->qdisc, to_free);
12440545a303Sstephen hemminger if (unlikely(err != NET_XMIT_SUCCESS)) {
12450545a303Sstephen hemminger pr_debug("qfq_enqueue: enqueue failed %d\n", err);
12460545a303Sstephen hemminger if (net_xmit_drop_count(err)) {
12470545a303Sstephen hemminger cl->qstats.drops++;
124825331d6cSJohn Fastabend qdisc_qstats_drop(sch);
12490545a303Sstephen hemminger }
12500545a303Sstephen hemminger return err;
12510545a303Sstephen hemminger }
12520545a303Sstephen hemminger
1253f56940daSAhmed S. Darwish _bstats_update(&cl->bstats, len, gso_segs);
1254f6bab199SToke Høiland-Jørgensen sch->qstats.backlog += len;
12550545a303Sstephen hemminger ++sch->q.qlen;
12560545a303Sstephen hemminger
1257462dbc91SPaolo Valente agg = cl->agg;
1258462dbc91SPaolo Valente /* if the queue was not empty, then done here */
125937d9cf1aSToke Høiland-Jørgensen if (!first) {
1260462dbc91SPaolo Valente if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
1261462dbc91SPaolo Valente list_first_entry(&agg->active, struct qfq_class, alist)
1262f6bab199SToke Høiland-Jørgensen == cl && cl->deficit < len)
1263462dbc91SPaolo Valente list_move_tail(&cl->alist, &agg->active);
12640545a303Sstephen hemminger
1265462dbc91SPaolo Valente return err;
1266462dbc91SPaolo Valente }
1267462dbc91SPaolo Valente
1268462dbc91SPaolo Valente /* schedule class for service within the aggregate */
1269462dbc91SPaolo Valente cl->deficit = agg->lmax;
1270462dbc91SPaolo Valente list_add_tail(&cl->alist, &agg->active);
1271462dbc91SPaolo Valente
12722f3b89a1SPaolo Valente if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
12732f3b89a1SPaolo Valente q->in_serv_agg == agg)
12742f3b89a1SPaolo Valente return err; /* non-empty or in service, nothing else to do */
1275462dbc91SPaolo Valente
12762f3b89a1SPaolo Valente qfq_activate_agg(q, agg, enqueue);
1277be72f63bSPaolo Valente
1278be72f63bSPaolo Valente return err;
1279be72f63bSPaolo Valente }
1280be72f63bSPaolo Valente
1281be72f63bSPaolo Valente /*
1282462dbc91SPaolo Valente * Schedule aggregate according to its timestamps.
1283be72f63bSPaolo Valente */
qfq_schedule_agg(struct qfq_sched * q,struct qfq_aggregate * agg)1284462dbc91SPaolo Valente static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
1285be72f63bSPaolo Valente {
1286462dbc91SPaolo Valente struct qfq_group *grp = agg->grp;
1287be72f63bSPaolo Valente u64 roundedS;
1288be72f63bSPaolo Valente int s;
1289be72f63bSPaolo Valente
1290462dbc91SPaolo Valente roundedS = qfq_round_down(agg->S, grp->slot_shift);
12910545a303Sstephen hemminger
12920545a303Sstephen hemminger /*
1293462dbc91SPaolo Valente * Insert agg in the correct bucket.
1294462dbc91SPaolo Valente * If agg->S >= grp->S we don't need to adjust the
12950545a303Sstephen hemminger * bucket list and simply go to the insertion phase.
12960545a303Sstephen hemminger * Otherwise grp->S is decreasing, we must make room
12970545a303Sstephen hemminger * in the bucket list, and also recompute the group state.
12980545a303Sstephen hemminger * Finally, if there were no flows in this group and nobody
12990545a303Sstephen hemminger * was in ER make sure to adjust V.
13000545a303Sstephen hemminger */
13010545a303Sstephen hemminger if (grp->full_slots) {
1302462dbc91SPaolo Valente if (!qfq_gt(grp->S, agg->S))
13030545a303Sstephen hemminger goto skip_update;
13040545a303Sstephen hemminger
1305462dbc91SPaolo Valente /* create a slot for this agg->S */
13060545a303Sstephen hemminger qfq_slot_rotate(grp, roundedS);
13070545a303Sstephen hemminger /* group was surely ineligible, remove */
13080545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[IR]);
13090545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[IB]);
131040dd2d54SPaolo Valente } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
131140dd2d54SPaolo Valente q->in_serv_agg == NULL)
13120545a303Sstephen hemminger q->V = roundedS;
13130545a303Sstephen hemminger
13140545a303Sstephen hemminger grp->S = roundedS;
13150545a303Sstephen hemminger grp->F = roundedS + (2ULL << grp->slot_shift);
13160545a303Sstephen hemminger s = qfq_calc_state(q, grp);
13170545a303Sstephen hemminger __set_bit(grp->index, &q->bitmaps[s]);
13180545a303Sstephen hemminger
13190545a303Sstephen hemminger pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
13200545a303Sstephen hemminger s, q->bitmaps[s],
1321462dbc91SPaolo Valente (unsigned long long) agg->S,
1322462dbc91SPaolo Valente (unsigned long long) agg->F,
13230545a303Sstephen hemminger (unsigned long long) q->V);
13240545a303Sstephen hemminger
13250545a303Sstephen hemminger skip_update:
1326462dbc91SPaolo Valente qfq_slot_insert(grp, agg, roundedS);
13270545a303Sstephen hemminger }
13280545a303Sstephen hemminger
13290545a303Sstephen hemminger
1330462dbc91SPaolo Valente /* Update agg ts and schedule agg for service */
qfq_activate_agg(struct qfq_sched * q,struct qfq_aggregate * agg,enum update_reason reason)1331462dbc91SPaolo Valente static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
1332462dbc91SPaolo Valente enum update_reason reason)
1333462dbc91SPaolo Valente {
13342f3b89a1SPaolo Valente agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
13352f3b89a1SPaolo Valente
1336462dbc91SPaolo Valente qfq_update_agg_ts(q, agg, reason);
13372f3b89a1SPaolo Valente if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
13382f3b89a1SPaolo Valente q->in_serv_agg = agg; /* start serving this aggregate */
13392f3b89a1SPaolo Valente /* update V: to be in service, agg must be eligible */
13402f3b89a1SPaolo Valente q->oldV = q->V = agg->S;
13412f3b89a1SPaolo Valente } else if (agg != q->in_serv_agg)
1342462dbc91SPaolo Valente qfq_schedule_agg(q, agg);
1343462dbc91SPaolo Valente }
1344462dbc91SPaolo Valente
qfq_slot_remove(struct qfq_sched * q,struct qfq_group * grp,struct qfq_aggregate * agg)13450545a303Sstephen hemminger static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
1346462dbc91SPaolo Valente struct qfq_aggregate *agg)
13470545a303Sstephen hemminger {
13480545a303Sstephen hemminger unsigned int i, offset;
13490545a303Sstephen hemminger u64 roundedS;
13500545a303Sstephen hemminger
1351462dbc91SPaolo Valente roundedS = qfq_round_down(agg->S, grp->slot_shift);
13520545a303Sstephen hemminger offset = (roundedS - grp->S) >> grp->slot_shift;
1353462dbc91SPaolo Valente
13540545a303Sstephen hemminger i = (grp->front + offset) % QFQ_MAX_SLOTS;
13550545a303Sstephen hemminger
1356462dbc91SPaolo Valente hlist_del(&agg->next);
13570545a303Sstephen hemminger if (hlist_empty(&grp->slots[i]))
13580545a303Sstephen hemminger __clear_bit(offset, &grp->full_slots);
13590545a303Sstephen hemminger }
13600545a303Sstephen hemminger
13610545a303Sstephen hemminger /*
1362462dbc91SPaolo Valente * Called to forcibly deschedule an aggregate. If the aggregate is
1363462dbc91SPaolo Valente * not in the front bucket, or if the latter has other aggregates in
1364462dbc91SPaolo Valente * the front bucket, we can simply remove the aggregate with no other
1365462dbc91SPaolo Valente * side effects.
13660545a303Sstephen hemminger * Otherwise we must propagate the event up.
13670545a303Sstephen hemminger */
qfq_deactivate_agg(struct qfq_sched * q,struct qfq_aggregate * agg)1368462dbc91SPaolo Valente static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
13690545a303Sstephen hemminger {
1370462dbc91SPaolo Valente struct qfq_group *grp = agg->grp;
13710545a303Sstephen hemminger unsigned long mask;
13720545a303Sstephen hemminger u64 roundedS;
13730545a303Sstephen hemminger int s;
13740545a303Sstephen hemminger
1375462dbc91SPaolo Valente if (agg == q->in_serv_agg) {
1376462dbc91SPaolo Valente charge_actual_service(agg);
1377462dbc91SPaolo Valente q->in_serv_agg = qfq_choose_next_agg(q);
1378462dbc91SPaolo Valente return;
1379462dbc91SPaolo Valente }
1380462dbc91SPaolo Valente
1381462dbc91SPaolo Valente agg->F = agg->S;
1382462dbc91SPaolo Valente qfq_slot_remove(q, grp, agg);
13830545a303Sstephen hemminger
13840545a303Sstephen hemminger if (!grp->full_slots) {
13850545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[IR]);
13860545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[EB]);
13870545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[IB]);
13880545a303Sstephen hemminger
13890545a303Sstephen hemminger if (test_bit(grp->index, &q->bitmaps[ER]) &&
13900545a303Sstephen hemminger !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
13910545a303Sstephen hemminger mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
13920545a303Sstephen hemminger if (mask)
13930545a303Sstephen hemminger mask = ~((1UL << __fls(mask)) - 1);
13940545a303Sstephen hemminger else
13950545a303Sstephen hemminger mask = ~0UL;
13960545a303Sstephen hemminger qfq_move_groups(q, mask, EB, ER);
13970545a303Sstephen hemminger qfq_move_groups(q, mask, IB, IR);
13980545a303Sstephen hemminger }
13990545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[ER]);
14000545a303Sstephen hemminger } else if (hlist_empty(&grp->slots[grp->front])) {
1401462dbc91SPaolo Valente agg = qfq_slot_scan(grp);
1402462dbc91SPaolo Valente roundedS = qfq_round_down(agg->S, grp->slot_shift);
14030545a303Sstephen hemminger if (grp->S != roundedS) {
14040545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[ER]);
14050545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[IR]);
14060545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[EB]);
14070545a303Sstephen hemminger __clear_bit(grp->index, &q->bitmaps[IB]);
14080545a303Sstephen hemminger grp->S = roundedS;
14090545a303Sstephen hemminger grp->F = roundedS + (2ULL << grp->slot_shift);
14100545a303Sstephen hemminger s = qfq_calc_state(q, grp);
14110545a303Sstephen hemminger __set_bit(grp->index, &q->bitmaps[s]);
14120545a303Sstephen hemminger }
14130545a303Sstephen hemminger }
14140545a303Sstephen hemminger }
14150545a303Sstephen hemminger
qfq_qlen_notify(struct Qdisc * sch,unsigned long arg)14160545a303Sstephen hemminger static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
14170545a303Sstephen hemminger {
14180545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
14190545a303Sstephen hemminger struct qfq_class *cl = (struct qfq_class *)arg;
14200545a303Sstephen hemminger
14210545a303Sstephen hemminger qfq_deactivate_class(q, cl);
14220545a303Sstephen hemminger }
14230545a303Sstephen hemminger
qfq_init_qdisc(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1424e63d7dfdSAlexander Aring static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
1425e63d7dfdSAlexander Aring struct netlink_ext_ack *extack)
14260545a303Sstephen hemminger {
14270545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
14280545a303Sstephen hemminger struct qfq_group *grp;
14290545a303Sstephen hemminger int i, j, err;
1430462dbc91SPaolo Valente u32 max_cl_shift, maxbudg_shift, max_classes;
14310545a303Sstephen hemminger
14328d1a77f9SAlexander Aring err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
14336529eabaSJiri Pirko if (err)
14346529eabaSJiri Pirko return err;
14356529eabaSJiri Pirko
14360545a303Sstephen hemminger err = qdisc_class_hash_init(&q->clhash);
14370545a303Sstephen hemminger if (err < 0)
14380545a303Sstephen hemminger return err;
14390545a303Sstephen hemminger
14407d18a078SEric Dumazet max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
14417d18a078SEric Dumazet QFQ_MAX_AGG_CLASSES);
1442462dbc91SPaolo Valente /* max_cl_shift = floor(log_2(max_classes)) */
1443462dbc91SPaolo Valente max_cl_shift = __fls(max_classes);
1444462dbc91SPaolo Valente q->max_agg_classes = 1<<max_cl_shift;
1445462dbc91SPaolo Valente
1446462dbc91SPaolo Valente /* maxbudg_shift = log2(max_len * max_classes_per_agg) */
1447462dbc91SPaolo Valente maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
1448462dbc91SPaolo Valente q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
1449462dbc91SPaolo Valente
14500545a303Sstephen hemminger for (i = 0; i <= QFQ_MAX_INDEX; i++) {
14510545a303Sstephen hemminger grp = &q->groups[i];
14520545a303Sstephen hemminger grp->index = i;
1453462dbc91SPaolo Valente grp->slot_shift = q->min_slot_shift + i;
14540545a303Sstephen hemminger for (j = 0; j < QFQ_MAX_SLOTS; j++)
14550545a303Sstephen hemminger INIT_HLIST_HEAD(&grp->slots[j]);
14560545a303Sstephen hemminger }
14570545a303Sstephen hemminger
1458462dbc91SPaolo Valente INIT_HLIST_HEAD(&q->nonfull_aggs);
1459462dbc91SPaolo Valente
14600545a303Sstephen hemminger return 0;
14610545a303Sstephen hemminger }
14620545a303Sstephen hemminger
qfq_reset_qdisc(struct Qdisc * sch)14630545a303Sstephen hemminger static void qfq_reset_qdisc(struct Qdisc *sch)
14640545a303Sstephen hemminger {
14650545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
14660545a303Sstephen hemminger struct qfq_class *cl;
1467462dbc91SPaolo Valente unsigned int i;
14680545a303Sstephen hemminger
14690545a303Sstephen hemminger for (i = 0; i < q->clhash.hashsize; i++) {
1470b67bfe0dSSasha Levin hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1471462dbc91SPaolo Valente if (cl->qdisc->q.qlen > 0)
1472462dbc91SPaolo Valente qfq_deactivate_class(q, cl);
1473462dbc91SPaolo Valente
14740545a303Sstephen hemminger qdisc_reset(cl->qdisc);
14750545a303Sstephen hemminger }
1476462dbc91SPaolo Valente }
14770545a303Sstephen hemminger }
14780545a303Sstephen hemminger
qfq_destroy_qdisc(struct Qdisc * sch)14790545a303Sstephen hemminger static void qfq_destroy_qdisc(struct Qdisc *sch)
14800545a303Sstephen hemminger {
14810545a303Sstephen hemminger struct qfq_sched *q = qdisc_priv(sch);
14820545a303Sstephen hemminger struct qfq_class *cl;
1483b67bfe0dSSasha Levin struct hlist_node *next;
14840545a303Sstephen hemminger unsigned int i;
14850545a303Sstephen hemminger
14866529eabaSJiri Pirko tcf_block_put(q->block);
14870545a303Sstephen hemminger
14880545a303Sstephen hemminger for (i = 0; i < q->clhash.hashsize; i++) {
1489b67bfe0dSSasha Levin hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
14900545a303Sstephen hemminger common.hnode) {
14910545a303Sstephen hemminger qfq_destroy_class(sch, cl);
14920545a303Sstephen hemminger }
14930545a303Sstephen hemminger }
14940545a303Sstephen hemminger qdisc_class_hash_destroy(&q->clhash);
14950545a303Sstephen hemminger }
14960545a303Sstephen hemminger
14970545a303Sstephen hemminger static const struct Qdisc_class_ops qfq_class_ops = {
14980545a303Sstephen hemminger .change = qfq_change_class,
14990545a303Sstephen hemminger .delete = qfq_delete_class,
1500143976ceSWANG Cong .find = qfq_search_class,
15016529eabaSJiri Pirko .tcf_block = qfq_tcf_block,
15020545a303Sstephen hemminger .bind_tcf = qfq_bind_tcf,
15030545a303Sstephen hemminger .unbind_tcf = qfq_unbind_tcf,
15040545a303Sstephen hemminger .graft = qfq_graft_class,
15050545a303Sstephen hemminger .leaf = qfq_class_leaf,
15060545a303Sstephen hemminger .qlen_notify = qfq_qlen_notify,
15070545a303Sstephen hemminger .dump = qfq_dump_class,
15080545a303Sstephen hemminger .dump_stats = qfq_dump_class_stats,
15090545a303Sstephen hemminger .walk = qfq_walk,
15100545a303Sstephen hemminger };
15110545a303Sstephen hemminger
15120545a303Sstephen hemminger static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
15130545a303Sstephen hemminger .cl_ops = &qfq_class_ops,
15140545a303Sstephen hemminger .id = "qfq",
15150545a303Sstephen hemminger .priv_size = sizeof(struct qfq_sched),
15160545a303Sstephen hemminger .enqueue = qfq_enqueue,
15170545a303Sstephen hemminger .dequeue = qfq_dequeue,
15180545a303Sstephen hemminger .peek = qdisc_peek_dequeued,
15190545a303Sstephen hemminger .init = qfq_init_qdisc,
15200545a303Sstephen hemminger .reset = qfq_reset_qdisc,
15210545a303Sstephen hemminger .destroy = qfq_destroy_qdisc,
15220545a303Sstephen hemminger .owner = THIS_MODULE,
15230545a303Sstephen hemminger };
15240545a303Sstephen hemminger
qfq_init(void)15250545a303Sstephen hemminger static int __init qfq_init(void)
15260545a303Sstephen hemminger {
15270545a303Sstephen hemminger return register_qdisc(&qfq_qdisc_ops);
15280545a303Sstephen hemminger }
15290545a303Sstephen hemminger
qfq_exit(void)15300545a303Sstephen hemminger static void __exit qfq_exit(void)
15310545a303Sstephen hemminger {
15320545a303Sstephen hemminger unregister_qdisc(&qfq_qdisc_ops);
15330545a303Sstephen hemminger }
15340545a303Sstephen hemminger
15350545a303Sstephen hemminger module_init(qfq_init);
15360545a303Sstephen hemminger module_exit(qfq_exit);
15370545a303Sstephen hemminger MODULE_LICENSE("GPL");
1538