12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
287990467SStephen Hemminger /*
31da177e4SLinus Torvalds * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Authors: Martin Devera, <devik@cdi.cz>
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * Credits (in time order) for older HTB versions:
81da177e4SLinus Torvalds * Stef Coene <stef.coene@docum.org>
91da177e4SLinus Torvalds * HTB support at LARTC mailing list
101da177e4SLinus Torvalds * Ondrej Kraus, <krauso@barr.cz>
111da177e4SLinus Torvalds * found missing INIT_QDISC(htb)
121da177e4SLinus Torvalds * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
131da177e4SLinus Torvalds * helped a lot to locate nasty class stall bug
141da177e4SLinus Torvalds * Andi Kleen, Jamal Hadi, Bert Hubert
151da177e4SLinus Torvalds * code review and helpful comments on shaping
161da177e4SLinus Torvalds * Tomasz Wrona, <tw@eter.tym.pl>
171da177e4SLinus Torvalds * created test case so that I was able to fix nasty bug
181da177e4SLinus Torvalds * Wilfried Weissmann
191da177e4SLinus Torvalds * spotted bug in dequeue code and helped with fix
201da177e4SLinus Torvalds * Jiri Fojtasek
211da177e4SLinus Torvalds * fixed requeue routine
221da177e4SLinus Torvalds * and many others. thanks.
231da177e4SLinus Torvalds */
241da177e4SLinus Torvalds #include <linux/module.h>
2547083fc0SJesper Dangaard Brouer #include <linux/moduleparam.h>
261da177e4SLinus Torvalds #include <linux/types.h>
271da177e4SLinus Torvalds #include <linux/kernel.h>
281da177e4SLinus Torvalds #include <linux/string.h>
291da177e4SLinus Torvalds #include <linux/errno.h>
301da177e4SLinus Torvalds #include <linux/skbuff.h>
311da177e4SLinus Torvalds #include <linux/list.h>
321da177e4SLinus Torvalds #include <linux/compiler.h>
331da177e4SLinus Torvalds #include <linux/rbtree.h>
341224736dSJarek Poplawski #include <linux/workqueue.h>
355a0e3ad6STejun Heo #include <linux/slab.h>
360ba48053SPatrick McHardy #include <net/netlink.h>
37292f1c7fSJiri Pirko #include <net/sch_generic.h>
380ba48053SPatrick McHardy #include <net/pkt_sched.h>
39cf1facdaSJiri Pirko #include <net/pkt_cls.h>
401da177e4SLinus Torvalds
411da177e4SLinus Torvalds /* HTB algorithm.
421da177e4SLinus Torvalds Author: devik@cdi.cz
431da177e4SLinus Torvalds ========================================================================
441da177e4SLinus Torvalds HTB is like TBF with multiple classes. It is also similar to CBQ because
451da177e4SLinus Torvalds it allows to assign priority to each class in hierarchy.
461da177e4SLinus Torvalds In fact it is another implementation of Floyd's formal sharing.
471da177e4SLinus Torvalds
481da177e4SLinus Torvalds Levels:
491da177e4SLinus Torvalds Each class is assigned level. Leaf has ALWAYS level 0 and root
501da177e4SLinus Torvalds classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
511da177e4SLinus Torvalds one less than their parent.
521da177e4SLinus Torvalds */
531da177e4SLinus Torvalds
5447083fc0SJesper Dangaard Brouer static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
5537f2ad2bSZheng Yongjun #define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */
561da177e4SLinus Torvalds
571da177e4SLinus Torvalds #if HTB_VER >> 16 != TC_HTB_PROTOVER
581da177e4SLinus Torvalds #error "Mismatched sch_htb.c and pkt_sch.h"
591da177e4SLinus Torvalds #endif
601da177e4SLinus Torvalds
6147083fc0SJesper Dangaard Brouer /* Module parameter and sysfs export */
6247083fc0SJesper Dangaard Brouer module_param (htb_hysteresis, int, 0640);
6347083fc0SJesper Dangaard Brouer MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
6447083fc0SJesper Dangaard Brouer
6564153ce0SEric Dumazet static int htb_rate_est = 0; /* htb classes have a default rate estimator */
6664153ce0SEric Dumazet module_param(htb_rate_est, int, 0640);
6764153ce0SEric Dumazet MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
6864153ce0SEric Dumazet
691da177e4SLinus Torvalds /* used internaly to keep status of single class */
701da177e4SLinus Torvalds enum htb_cmode {
711da177e4SLinus Torvalds HTB_CANT_SEND, /* class can't send and can't borrow */
721da177e4SLinus Torvalds HTB_MAY_BORROW, /* class can't send but may borrow */
731da177e4SLinus Torvalds HTB_CAN_SEND /* class can send */
741da177e4SLinus Torvalds };
751da177e4SLinus Torvalds
76c9364636SEric Dumazet struct htb_prio {
77c9364636SEric Dumazet union {
78c9364636SEric Dumazet struct rb_root row;
79c9364636SEric Dumazet struct rb_root feed;
80c9364636SEric Dumazet };
81c9364636SEric Dumazet struct rb_node *ptr;
82c9364636SEric Dumazet /* When class changes from state 1->2 and disconnects from
83c9364636SEric Dumazet * parent's feed then we lost ptr value and start from the
84c9364636SEric Dumazet * first child again. Here we store classid of the
85c9364636SEric Dumazet * last valid ptr (used when ptr is NULL).
86c9364636SEric Dumazet */
87c9364636SEric Dumazet u32 last_ptr_id;
88c9364636SEric Dumazet };
89c9364636SEric Dumazet
90ca4ec90bSEric Dumazet /* interior & leaf nodes; props specific to leaves are marked L:
91ca4ec90bSEric Dumazet * To reduce false sharing, place mostly read fields at beginning,
92ca4ec90bSEric Dumazet * and mostly written ones at the end.
93ca4ec90bSEric Dumazet */
9487990467SStephen Hemminger struct htb_class {
95f4c1f3e0SPatrick McHardy struct Qdisc_class_common common;
96ca4ec90bSEric Dumazet struct psched_ratecfg rate;
97ca4ec90bSEric Dumazet struct psched_ratecfg ceil;
98ca4ec90bSEric Dumazet s64 buffer, cbuffer;/* token bucket depth/rate */
99ca4ec90bSEric Dumazet s64 mbuffer; /* max wait time */
100cbd37556Sstephen hemminger u32 prio; /* these two are used only by leaves... */
101ca4ec90bSEric Dumazet int quantum; /* but stored for parent-to-leaf return */
102ca4ec90bSEric Dumazet
10325d8c0d5SJohn Fastabend struct tcf_proto __rcu *filter_list; /* class attached filters */
1046529eabaSJiri Pirko struct tcf_block *block;
1051da177e4SLinus Torvalds
1061da177e4SLinus Torvalds int level; /* our level (see above) */
10742077599SPatrick McHardy unsigned int children;
1081da177e4SLinus Torvalds struct htb_class *parent; /* parent class */
1091da177e4SLinus Torvalds
1101c0d32fdSEric Dumazet struct net_rate_estimator __rcu *rate_est;
111ca4ec90bSEric Dumazet
112ca4ec90bSEric Dumazet /*
113ca4ec90bSEric Dumazet * Written often fields
114ca4ec90bSEric Dumazet */
11550dc9a85SAhmed S. Darwish struct gnet_stats_basic_sync bstats;
11650dc9a85SAhmed S. Darwish struct gnet_stats_basic_sync bstats_bias;
117ca4ec90bSEric Dumazet struct tc_htb_xstats xstats; /* our special stats */
118ca4ec90bSEric Dumazet
119ca4ec90bSEric Dumazet /* token bucket parameters */
120ca4ec90bSEric Dumazet s64 tokens, ctokens;/* current number of tokens */
121ca4ec90bSEric Dumazet s64 t_c; /* checkpoint time */
122c19f7a34SJarek Poplawski
1231da177e4SLinus Torvalds union {
1241da177e4SLinus Torvalds struct htb_class_leaf {
125c9364636SEric Dumazet int deficit[TC_HTB_MAXDEPTH];
126c9364636SEric Dumazet struct Qdisc *q;
127ca49bfd9SMaxim Mikityanskiy struct netdev_queue *offload_queue;
1281da177e4SLinus Torvalds } leaf;
1291da177e4SLinus Torvalds struct htb_class_inner {
130c9364636SEric Dumazet struct htb_prio clprio[TC_HTB_NUMPRIO];
1311da177e4SLinus Torvalds } inner;
13211957be2SCong Wang };
1335343a7f8SEric Dumazet s64 pq_key;
1341da177e4SLinus Torvalds
1351da177e4SLinus Torvalds int prio_activity; /* for which prios are we active */
1361da177e4SLinus Torvalds enum htb_cmode cmode; /* current mode of the class */
137ca4ec90bSEric Dumazet struct rb_node pq_node; /* node for event queue */
138ca4ec90bSEric Dumazet struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
139338ed9b4SEric Dumazet
140338ed9b4SEric Dumazet unsigned int drops ____cacheline_aligned_in_smp;
1413c75f6eeSEric Dumazet unsigned int overlimits;
1421da177e4SLinus Torvalds };
1431da177e4SLinus Torvalds
144c9364636SEric Dumazet struct htb_level {
145c9364636SEric Dumazet struct rb_root wait_pq;
146c9364636SEric Dumazet struct htb_prio hprio[TC_HTB_NUMPRIO];
147c9364636SEric Dumazet };
148c9364636SEric Dumazet
14987990467SStephen Hemminger struct htb_sched {
150f4c1f3e0SPatrick McHardy struct Qdisc_class_hash clhash;
1511da177e4SLinus Torvalds int defcls; /* class where unclassified flows go to */
152c9364636SEric Dumazet int rate2quantum; /* quant = rate / rate2quantum */
1531da177e4SLinus Torvalds
1541da177e4SLinus Torvalds /* filters for qdisc itself */
15525d8c0d5SJohn Fastabend struct tcf_proto __rcu *filter_list;
1566529eabaSJiri Pirko struct tcf_block *block;
1571da177e4SLinus Torvalds
158c9364636SEric Dumazet #define HTB_WARN_TOOMANYEVENTS 0x1
159c9364636SEric Dumazet unsigned int warned; /* only one warning */
160c9364636SEric Dumazet int direct_qlen;
161c9364636SEric Dumazet struct work_struct work;
1621da177e4SLinus Torvalds
1631da177e4SLinus Torvalds /* non shaped skbs; let them go directly thru */
16448da34b7SFlorian Westphal struct qdisc_skb_head direct_queue;
165b362487aSCong Wang u32 direct_pkts;
166b362487aSCong Wang u32 overlimits;
167e82181deSJarek Poplawski
168c9364636SEric Dumazet struct qdisc_watchdog watchdog;
169c9364636SEric Dumazet
170c9364636SEric Dumazet s64 now; /* cached dequeue time */
171c9364636SEric Dumazet
172c9364636SEric Dumazet /* time of nearest event per level (row) */
173c9364636SEric Dumazet s64 near_ev_cache[TC_HTB_MAXDEPTH];
174c9364636SEric Dumazet
175c9364636SEric Dumazet int row_mask[TC_HTB_MAXDEPTH];
176c9364636SEric Dumazet
177c9364636SEric Dumazet struct htb_level hlevel[TC_HTB_MAXDEPTH];
178d03b195bSMaxim Mikityanskiy
179d03b195bSMaxim Mikityanskiy struct Qdisc **direct_qdiscs;
180d03b195bSMaxim Mikityanskiy unsigned int num_direct_qdiscs;
181d03b195bSMaxim Mikityanskiy
182d03b195bSMaxim Mikityanskiy bool offload;
1831da177e4SLinus Torvalds };
1841da177e4SLinus Torvalds
1851da177e4SLinus Torvalds /* find class in global hash table using given handle */
htb_find(u32 handle,struct Qdisc * sch)18687990467SStephen Hemminger static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
1871da177e4SLinus Torvalds {
1881da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
189f4c1f3e0SPatrick McHardy struct Qdisc_class_common *clc;
1900cef296dSStephen Hemminger
191f4c1f3e0SPatrick McHardy clc = qdisc_class_find(&q->clhash, handle);
192f4c1f3e0SPatrick McHardy if (clc == NULL)
1931da177e4SLinus Torvalds return NULL;
194f4c1f3e0SPatrick McHardy return container_of(clc, struct htb_class, common);
1951da177e4SLinus Torvalds }
1961da177e4SLinus Torvalds
htb_search(struct Qdisc * sch,u32 handle)197143976ceSWANG Cong static unsigned long htb_search(struct Qdisc *sch, u32 handle)
198143976ceSWANG Cong {
199143976ceSWANG Cong return (unsigned long)htb_find(handle, sch);
200143976ceSWANG Cong }
20143d25378SRandy Dunlap
20243d25378SRandy Dunlap #define HTB_DIRECT ((struct htb_class *)-1L)
20343d25378SRandy Dunlap
2041da177e4SLinus Torvalds /**
2051da177e4SLinus Torvalds * htb_classify - classify a packet into class
20643d25378SRandy Dunlap * @skb: the socket buffer
20743d25378SRandy Dunlap * @sch: the active queue discipline
20843d25378SRandy Dunlap * @qerr: pointer for returned status code
2091da177e4SLinus Torvalds *
2101da177e4SLinus Torvalds * It returns NULL if the packet should be dropped or -1 if the packet
2111da177e4SLinus Torvalds * should be passed directly thru. In all other cases leaf class is returned.
2121da177e4SLinus Torvalds * We allow direct class selection by classid in priority. The we examine
2131da177e4SLinus Torvalds * filters in qdisc and in inner nodes (if higher filter points to the inner
2141da177e4SLinus Torvalds * node). If we end up with classid MAJOR:0 we enqueue the skb into special
2151da177e4SLinus Torvalds * internal fifo (direct). These packets then go directly thru. If we still
21625985edcSLucas De Marchi * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
2171da177e4SLinus Torvalds * then finish and return direct queue.
2181da177e4SLinus Torvalds */
htb_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)21987990467SStephen Hemminger static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
22087990467SStephen Hemminger int *qerr)
2211da177e4SLinus Torvalds {
2221da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
2231da177e4SLinus Torvalds struct htb_class *cl;
2241da177e4SLinus Torvalds struct tcf_result res;
2251da177e4SLinus Torvalds struct tcf_proto *tcf;
2261da177e4SLinus Torvalds int result;
2271da177e4SLinus Torvalds
2281da177e4SLinus Torvalds /* allow to select class by setting skb->priority to valid classid;
229cc7ec456SEric Dumazet * note that nfmark can be used too by attaching filter fw with no
230cc7ec456SEric Dumazet * rules in it
231cc7ec456SEric Dumazet */
2321da177e4SLinus Torvalds if (skb->priority == sch->handle)
2331da177e4SLinus Torvalds return HTB_DIRECT; /* X:0 (direct flow) selected */
234cc7ec456SEric Dumazet cl = htb_find(skb->priority, sch);
23529824310SHarry Mason if (cl) {
23629824310SHarry Mason if (cl->level == 0)
2371da177e4SLinus Torvalds return cl;
23829824310SHarry Mason /* Start with inner filter chain if a non-leaf class is selected */
23925d8c0d5SJohn Fastabend tcf = rcu_dereference_bh(cl->filter_list);
24029824310SHarry Mason } else {
24125d8c0d5SJohn Fastabend tcf = rcu_dereference_bh(q->filter_list);
24229824310SHarry Mason }
2431da177e4SLinus Torvalds
244c27f339aSJarek Poplawski *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
2453aa26055SDavide Caratti while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
2461da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT
2471da177e4SLinus Torvalds switch (result) {
2481da177e4SLinus Torvalds case TC_ACT_QUEUED:
2491da177e4SLinus Torvalds case TC_ACT_STOLEN:
250e25ea21fSJiri Pirko case TC_ACT_TRAP:
251378a2f09SJarek Poplawski *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
252964201deSGustavo A. R. Silva fallthrough;
2531da177e4SLinus Torvalds case TC_ACT_SHOT:
2541da177e4SLinus Torvalds return NULL;
2551da177e4SLinus Torvalds }
2561da177e4SLinus Torvalds #endif
257cc7ec456SEric Dumazet cl = (void *)res.class;
258cc7ec456SEric Dumazet if (!cl) {
2591da177e4SLinus Torvalds if (res.classid == sch->handle)
2601da177e4SLinus Torvalds return HTB_DIRECT; /* X:0 (direct flow) */
261cc7ec456SEric Dumazet cl = htb_find(res.classid, sch);
262cc7ec456SEric Dumazet if (!cl)
2631da177e4SLinus Torvalds break; /* filter selected invalid classid */
2641da177e4SLinus Torvalds }
2651da177e4SLinus Torvalds if (!cl->level)
2661da177e4SLinus Torvalds return cl; /* we hit leaf; return it */
2671da177e4SLinus Torvalds
2681da177e4SLinus Torvalds /* we have got inner class; apply inner filter chain */
26925d8c0d5SJohn Fastabend tcf = rcu_dereference_bh(cl->filter_list);
2701da177e4SLinus Torvalds }
2711da177e4SLinus Torvalds /* classification failed; try to use default class */
2721da177e4SLinus Torvalds cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
2731da177e4SLinus Torvalds if (!cl || cl->level)
2741da177e4SLinus Torvalds return HTB_DIRECT; /* bad default .. this is safe bet */
2751da177e4SLinus Torvalds return cl;
2761da177e4SLinus Torvalds }
2771da177e4SLinus Torvalds
2781da177e4SLinus Torvalds /**
2791da177e4SLinus Torvalds * htb_add_to_id_tree - adds class to the round robin list
280a10541f5SYu Kuai * @root: the root of the tree
281a10541f5SYu Kuai * @cl: the class to add
282a10541f5SYu Kuai * @prio: the give prio in class
2831da177e4SLinus Torvalds *
2841da177e4SLinus Torvalds * Routine adds class to the list (actually tree) sorted by classid.
2851da177e4SLinus Torvalds * Make sure that class is not already on such list for given prio.
2861da177e4SLinus Torvalds */
htb_add_to_id_tree(struct rb_root * root,struct htb_class * cl,int prio)2873bf72957SStephen Hemminger static void htb_add_to_id_tree(struct rb_root *root,
2881da177e4SLinus Torvalds struct htb_class *cl, int prio)
2891da177e4SLinus Torvalds {
2901da177e4SLinus Torvalds struct rb_node **p = &root->rb_node, *parent = NULL;
2913bf72957SStephen Hemminger
2921da177e4SLinus Torvalds while (*p) {
29387990467SStephen Hemminger struct htb_class *c;
29487990467SStephen Hemminger parent = *p;
2951da177e4SLinus Torvalds c = rb_entry(parent, struct htb_class, node[prio]);
2963bf72957SStephen Hemminger
297f4c1f3e0SPatrick McHardy if (cl->common.classid > c->common.classid)
2981da177e4SLinus Torvalds p = &parent->rb_right;
2991da177e4SLinus Torvalds else
3001da177e4SLinus Torvalds p = &parent->rb_left;
3011da177e4SLinus Torvalds }
3021da177e4SLinus Torvalds rb_link_node(&cl->node[prio], parent, p);
3031da177e4SLinus Torvalds rb_insert_color(&cl->node[prio], root);
3041da177e4SLinus Torvalds }
3051da177e4SLinus Torvalds
3061da177e4SLinus Torvalds /**
3071da177e4SLinus Torvalds * htb_add_to_wait_tree - adds class to the event queue with delay
3084d7efa73SYu Kuai * @q: the priority event queue
3094d7efa73SYu Kuai * @cl: the class to add
3104d7efa73SYu Kuai * @delay: delay in microseconds
3111da177e4SLinus Torvalds *
3121da177e4SLinus Torvalds * The class is added to priority event queue to indicate that class will
3131da177e4SLinus Torvalds * change its mode in cl->pq_key microseconds. Make sure that class is not
3141da177e4SLinus Torvalds * already in the queue.
3151da177e4SLinus Torvalds */
htb_add_to_wait_tree(struct htb_sched * q,struct htb_class * cl,s64 delay)3161da177e4SLinus Torvalds static void htb_add_to_wait_tree(struct htb_sched *q,
31756b765b7SVimalkumar struct htb_class *cl, s64 delay)
3181da177e4SLinus Torvalds {
319c9364636SEric Dumazet struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
3203bf72957SStephen Hemminger
321fb983d45SPatrick McHardy cl->pq_key = q->now + delay;
322fb983d45SPatrick McHardy if (cl->pq_key == q->now)
3231da177e4SLinus Torvalds cl->pq_key++;
3241da177e4SLinus Torvalds
3251da177e4SLinus Torvalds /* update the nearest event cache */
326fb983d45SPatrick McHardy if (q->near_ev_cache[cl->level] > cl->pq_key)
3271da177e4SLinus Torvalds q->near_ev_cache[cl->level] = cl->pq_key;
3281da177e4SLinus Torvalds
3291da177e4SLinus Torvalds while (*p) {
33087990467SStephen Hemminger struct htb_class *c;
33187990467SStephen Hemminger parent = *p;
3321da177e4SLinus Torvalds c = rb_entry(parent, struct htb_class, pq_node);
333fb983d45SPatrick McHardy if (cl->pq_key >= c->pq_key)
3341da177e4SLinus Torvalds p = &parent->rb_right;
3351da177e4SLinus Torvalds else
3361da177e4SLinus Torvalds p = &parent->rb_left;
3371da177e4SLinus Torvalds }
3381da177e4SLinus Torvalds rb_link_node(&cl->pq_node, parent, p);
339c9364636SEric Dumazet rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
3401da177e4SLinus Torvalds }
3411da177e4SLinus Torvalds
3421da177e4SLinus Torvalds /**
3431da177e4SLinus Torvalds * htb_next_rb_node - finds next node in binary tree
344274e5d0eSYu Kuai * @n: the current node in binary tree
3451da177e4SLinus Torvalds *
3461da177e4SLinus Torvalds * When we are past last key we return NULL.
3471da177e4SLinus Torvalds * Average complexity is 2 steps per call.
3481da177e4SLinus Torvalds */
htb_next_rb_node(struct rb_node ** n)3493696f625SStephen Hemminger static inline void htb_next_rb_node(struct rb_node **n)
3501da177e4SLinus Torvalds {
3511da177e4SLinus Torvalds *n = rb_next(*n);
3521da177e4SLinus Torvalds }
3531da177e4SLinus Torvalds
3541da177e4SLinus Torvalds /**
3551da177e4SLinus Torvalds * htb_add_class_to_row - add class to its row
356996bccc3SYu Kuai * @q: the priority event queue
357996bccc3SYu Kuai * @cl: the class to add
358996bccc3SYu Kuai * @mask: the given priorities in class in bitmap
3591da177e4SLinus Torvalds *
3601da177e4SLinus Torvalds * The class is added to row at priorities marked in mask.
3611da177e4SLinus Torvalds * It does nothing if mask == 0.
3621da177e4SLinus Torvalds */
htb_add_class_to_row(struct htb_sched * q,struct htb_class * cl,int mask)3631da177e4SLinus Torvalds static inline void htb_add_class_to_row(struct htb_sched *q,
3641da177e4SLinus Torvalds struct htb_class *cl, int mask)
3651da177e4SLinus Torvalds {
3661da177e4SLinus Torvalds q->row_mask[cl->level] |= mask;
3671da177e4SLinus Torvalds while (mask) {
3681da177e4SLinus Torvalds int prio = ffz(~mask);
3691da177e4SLinus Torvalds mask &= ~(1 << prio);
370c9364636SEric Dumazet htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
3711da177e4SLinus Torvalds }
3721da177e4SLinus Torvalds }
3731da177e4SLinus Torvalds
3743696f625SStephen Hemminger /* If this triggers, it is a bug in this code, but it need not be fatal */
htb_safe_rb_erase(struct rb_node * rb,struct rb_root * root)3753696f625SStephen Hemminger static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
3763696f625SStephen Hemminger {
37781771b3bSIsmail Donmez if (RB_EMPTY_NODE(rb)) {
3783696f625SStephen Hemminger WARN_ON(1);
3793696f625SStephen Hemminger } else {
3803696f625SStephen Hemminger rb_erase(rb, root);
3813696f625SStephen Hemminger RB_CLEAR_NODE(rb);
3823696f625SStephen Hemminger }
3833696f625SStephen Hemminger }
3843696f625SStephen Hemminger
3853696f625SStephen Hemminger
3861da177e4SLinus Torvalds /**
3871da177e4SLinus Torvalds * htb_remove_class_from_row - removes class from its row
3885f8c6d05SYu Kuai * @q: the priority event queue
3895f8c6d05SYu Kuai * @cl: the class to add
3905f8c6d05SYu Kuai * @mask: the given priorities in class in bitmap
3911da177e4SLinus Torvalds *
3921da177e4SLinus Torvalds * The class is removed from row at priorities marked in mask.
3931da177e4SLinus Torvalds * It does nothing if mask == 0.
3941da177e4SLinus Torvalds */
htb_remove_class_from_row(struct htb_sched * q,struct htb_class * cl,int mask)39587990467SStephen Hemminger static inline void htb_remove_class_from_row(struct htb_sched *q,
3961da177e4SLinus Torvalds struct htb_class *cl, int mask)
3971da177e4SLinus Torvalds {
3981da177e4SLinus Torvalds int m = 0;
399c9364636SEric Dumazet struct htb_level *hlevel = &q->hlevel[cl->level];
4003bf72957SStephen Hemminger
4011da177e4SLinus Torvalds while (mask) {
4021da177e4SLinus Torvalds int prio = ffz(~mask);
403c9364636SEric Dumazet struct htb_prio *hprio = &hlevel->hprio[prio];
4043696f625SStephen Hemminger
4051da177e4SLinus Torvalds mask &= ~(1 << prio);
406c9364636SEric Dumazet if (hprio->ptr == cl->node + prio)
407c9364636SEric Dumazet htb_next_rb_node(&hprio->ptr);
4083696f625SStephen Hemminger
409c9364636SEric Dumazet htb_safe_rb_erase(cl->node + prio, &hprio->row);
410c9364636SEric Dumazet if (!hprio->row.rb_node)
4111da177e4SLinus Torvalds m |= 1 << prio;
4121da177e4SLinus Torvalds }
4131da177e4SLinus Torvalds q->row_mask[cl->level] &= ~m;
4141da177e4SLinus Torvalds }
4151da177e4SLinus Torvalds
4161da177e4SLinus Torvalds /**
4171da177e4SLinus Torvalds * htb_activate_prios - creates active classe's feed chain
418876b5fc0SYu Kuai * @q: the priority event queue
419876b5fc0SYu Kuai * @cl: the class to activate
4201da177e4SLinus Torvalds *
4211da177e4SLinus Torvalds * The class is connected to ancestors and/or appropriate rows
4221da177e4SLinus Torvalds * for priorities it is participating on. cl->cmode must be new
4231da177e4SLinus Torvalds * (activated) mode. It does nothing if cl->prio_activity == 0.
4241da177e4SLinus Torvalds */
htb_activate_prios(struct htb_sched * q,struct htb_class * cl)4251da177e4SLinus Torvalds static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
4261da177e4SLinus Torvalds {
4271da177e4SLinus Torvalds struct htb_class *p = cl->parent;
4281da177e4SLinus Torvalds long m, mask = cl->prio_activity;
4291da177e4SLinus Torvalds
4301da177e4SLinus Torvalds while (cl->cmode == HTB_MAY_BORROW && p && mask) {
43187990467SStephen Hemminger m = mask;
43287990467SStephen Hemminger while (m) {
433de5ca4c3SKees Cook unsigned int prio = ffz(~m);
434de5ca4c3SKees Cook
4359cec2aafSDan Carpenter if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
436de5ca4c3SKees Cook break;
4371da177e4SLinus Torvalds m &= ~(1 << prio);
4381da177e4SLinus Torvalds
43911957be2SCong Wang if (p->inner.clprio[prio].feed.rb_node)
4401da177e4SLinus Torvalds /* parent already has its feed in use so that
441cc7ec456SEric Dumazet * reset bit in mask as parent is already ok
442cc7ec456SEric Dumazet */
4431da177e4SLinus Torvalds mask &= ~(1 << prio);
4441da177e4SLinus Torvalds
44511957be2SCong Wang htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
4461da177e4SLinus Torvalds }
4471da177e4SLinus Torvalds p->prio_activity |= mask;
44887990467SStephen Hemminger cl = p;
44987990467SStephen Hemminger p = cl->parent;
4503bf72957SStephen Hemminger
4511da177e4SLinus Torvalds }
4521da177e4SLinus Torvalds if (cl->cmode == HTB_CAN_SEND && mask)
4531da177e4SLinus Torvalds htb_add_class_to_row(q, cl, mask);
4541da177e4SLinus Torvalds }
4551da177e4SLinus Torvalds
4561da177e4SLinus Torvalds /**
4571da177e4SLinus Torvalds * htb_deactivate_prios - remove class from feed chain
4584113be20SYu Kuai * @q: the priority event queue
4594113be20SYu Kuai * @cl: the class to deactivate
4601da177e4SLinus Torvalds *
4611da177e4SLinus Torvalds * cl->cmode must represent old mode (before deactivation). It does
4621da177e4SLinus Torvalds * nothing if cl->prio_activity == 0. Class is removed from all feed
4631da177e4SLinus Torvalds * chains and rows.
4641da177e4SLinus Torvalds */
htb_deactivate_prios(struct htb_sched * q,struct htb_class * cl)4651da177e4SLinus Torvalds static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
4661da177e4SLinus Torvalds {
4671da177e4SLinus Torvalds struct htb_class *p = cl->parent;
4681da177e4SLinus Torvalds long m, mask = cl->prio_activity;
4693bf72957SStephen Hemminger
4701da177e4SLinus Torvalds while (cl->cmode == HTB_MAY_BORROW && p && mask) {
47187990467SStephen Hemminger m = mask;
47287990467SStephen Hemminger mask = 0;
4731da177e4SLinus Torvalds while (m) {
4741da177e4SLinus Torvalds int prio = ffz(~m);
4751da177e4SLinus Torvalds m &= ~(1 << prio);
4761da177e4SLinus Torvalds
47711957be2SCong Wang if (p->inner.clprio[prio].ptr == cl->node + prio) {
4781da177e4SLinus Torvalds /* we are removing child which is pointed to from
479cc7ec456SEric Dumazet * parent feed - forget the pointer but remember
480cc7ec456SEric Dumazet * classid
481cc7ec456SEric Dumazet */
48211957be2SCong Wang p->inner.clprio[prio].last_ptr_id = cl->common.classid;
48311957be2SCong Wang p->inner.clprio[prio].ptr = NULL;
4841da177e4SLinus Torvalds }
4851da177e4SLinus Torvalds
486c9364636SEric Dumazet htb_safe_rb_erase(cl->node + prio,
48711957be2SCong Wang &p->inner.clprio[prio].feed);
4881da177e4SLinus Torvalds
48911957be2SCong Wang if (!p->inner.clprio[prio].feed.rb_node)
4901da177e4SLinus Torvalds mask |= 1 << prio;
4911da177e4SLinus Torvalds }
4923bf72957SStephen Hemminger
4931da177e4SLinus Torvalds p->prio_activity &= ~mask;
49487990467SStephen Hemminger cl = p;
49587990467SStephen Hemminger p = cl->parent;
4963bf72957SStephen Hemminger
4971da177e4SLinus Torvalds }
4981da177e4SLinus Torvalds if (cl->cmode == HTB_CAN_SEND && mask)
4991da177e4SLinus Torvalds htb_remove_class_from_row(q, cl, mask);
5001da177e4SLinus Torvalds }
5011da177e4SLinus Torvalds
htb_lowater(const struct htb_class * cl)50256b765b7SVimalkumar static inline s64 htb_lowater(const struct htb_class *cl)
50318a63e86SStephen Hemminger {
50447083fc0SJesper Dangaard Brouer if (htb_hysteresis)
50518a63e86SStephen Hemminger return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
50647083fc0SJesper Dangaard Brouer else
50747083fc0SJesper Dangaard Brouer return 0;
50818a63e86SStephen Hemminger }
htb_hiwater(const struct htb_class * cl)50956b765b7SVimalkumar static inline s64 htb_hiwater(const struct htb_class *cl)
51018a63e86SStephen Hemminger {
51147083fc0SJesper Dangaard Brouer if (htb_hysteresis)
51218a63e86SStephen Hemminger return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
51347083fc0SJesper Dangaard Brouer else
51447083fc0SJesper Dangaard Brouer return 0;
51518a63e86SStephen Hemminger }
51647083fc0SJesper Dangaard Brouer
51718a63e86SStephen Hemminger
5181da177e4SLinus Torvalds /**
5191da177e4SLinus Torvalds * htb_class_mode - computes and returns current class mode
5201e955952SYu Kuai * @cl: the target class
5211e955952SYu Kuai * @diff: diff time in microseconds
5221da177e4SLinus Torvalds *
5231da177e4SLinus Torvalds * It computes cl's mode at time cl->t_c+diff and returns it. If mode
5241da177e4SLinus Torvalds * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
5251da177e4SLinus Torvalds * from now to time when cl will change its state.
5261da177e4SLinus Torvalds * Also it is worth to note that class mode doesn't change simply
5271da177e4SLinus Torvalds * at cl->{c,}tokens == 0 but there can rather be hysteresis of
5281da177e4SLinus Torvalds * 0 .. -cl->{c,}buffer range. It is meant to limit number of
5291da177e4SLinus Torvalds * mode transitions per time unit. The speed gain is about 1/6.
5301da177e4SLinus Torvalds */
53187990467SStephen Hemminger static inline enum htb_cmode
htb_class_mode(struct htb_class * cl,s64 * diff)53256b765b7SVimalkumar htb_class_mode(struct htb_class *cl, s64 *diff)
5331da177e4SLinus Torvalds {
53456b765b7SVimalkumar s64 toks;
5351da177e4SLinus Torvalds
53618a63e86SStephen Hemminger if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
5371da177e4SLinus Torvalds *diff = -toks;
5381da177e4SLinus Torvalds return HTB_CANT_SEND;
5391da177e4SLinus Torvalds }
54018a63e86SStephen Hemminger
54118a63e86SStephen Hemminger if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
5421da177e4SLinus Torvalds return HTB_CAN_SEND;
5431da177e4SLinus Torvalds
5441da177e4SLinus Torvalds *diff = -toks;
5451da177e4SLinus Torvalds return HTB_MAY_BORROW;
5461da177e4SLinus Torvalds }
5471da177e4SLinus Torvalds
5481da177e4SLinus Torvalds /**
5491da177e4SLinus Torvalds * htb_change_class_mode - changes classe's mode
5504b479e98SYu Kuai * @q: the priority event queue
5514b479e98SYu Kuai * @cl: the target class
5524b479e98SYu Kuai * @diff: diff time in microseconds
5531da177e4SLinus Torvalds *
5541da177e4SLinus Torvalds * This should be the only way how to change classe's mode under normal
55537f2ad2bSZheng Yongjun * circumstances. Routine will update feed lists linkage, change mode
5561da177e4SLinus Torvalds * and add class to the wait event queue if appropriate. New mode should
5571da177e4SLinus Torvalds * be different from old one and cl->pq_key has to be valid if changing
5581da177e4SLinus Torvalds * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
5591da177e4SLinus Torvalds */
5601da177e4SLinus Torvalds static void
htb_change_class_mode(struct htb_sched * q,struct htb_class * cl,s64 * diff)56156b765b7SVimalkumar htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
5621da177e4SLinus Torvalds {
5631da177e4SLinus Torvalds enum htb_cmode new_mode = htb_class_mode(cl, diff);
5641da177e4SLinus Torvalds
5651da177e4SLinus Torvalds if (new_mode == cl->cmode)
5661da177e4SLinus Torvalds return;
5671da177e4SLinus Torvalds
568b362487aSCong Wang if (new_mode == HTB_CANT_SEND) {
5693c75f6eeSEric Dumazet cl->overlimits++;
570b362487aSCong Wang q->overlimits++;
571b362487aSCong Wang }
5723c75f6eeSEric Dumazet
5731da177e4SLinus Torvalds if (cl->prio_activity) { /* not necessary: speed optimization */
5741da177e4SLinus Torvalds if (cl->cmode != HTB_CANT_SEND)
5751da177e4SLinus Torvalds htb_deactivate_prios(q, cl);
5761da177e4SLinus Torvalds cl->cmode = new_mode;
5771da177e4SLinus Torvalds if (new_mode != HTB_CANT_SEND)
5781da177e4SLinus Torvalds htb_activate_prios(q, cl);
5791da177e4SLinus Torvalds } else
5801da177e4SLinus Torvalds cl->cmode = new_mode;
5811da177e4SLinus Torvalds }
5821da177e4SLinus Torvalds
5831da177e4SLinus Torvalds /**
5841da177e4SLinus Torvalds * htb_activate - inserts leaf cl into appropriate active feeds
5858df7e8ffSYu Kuai * @q: the priority event queue
5868df7e8ffSYu Kuai * @cl: the target class
5871da177e4SLinus Torvalds *
5881da177e4SLinus Torvalds * Routine learns (new) priority of leaf and activates feed chain
5891da177e4SLinus Torvalds * for the prio. It can be called on already active leaf safely.
5901da177e4SLinus Torvalds * It also adds leaf into droplist.
5911da177e4SLinus Torvalds */
htb_activate(struct htb_sched * q,struct htb_class * cl)59287990467SStephen Hemminger static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
5931da177e4SLinus Torvalds {
59411957be2SCong Wang WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
5953bf72957SStephen Hemminger
5961da177e4SLinus Torvalds if (!cl->prio_activity) {
597c19f7a34SJarek Poplawski cl->prio_activity = 1 << cl->prio;
5981da177e4SLinus Torvalds htb_activate_prios(q, cl);
5991da177e4SLinus Torvalds }
6001da177e4SLinus Torvalds }
6011da177e4SLinus Torvalds
6021da177e4SLinus Torvalds /**
6031da177e4SLinus Torvalds * htb_deactivate - remove leaf cl from active feeds
6049a034f25SYu Kuai * @q: the priority event queue
6059a034f25SYu Kuai * @cl: the target class
6061da177e4SLinus Torvalds *
6071da177e4SLinus Torvalds * Make sure that leaf is active. In the other words it can't be called
6081da177e4SLinus Torvalds * with non-active leaf. It also removes class from the drop list.
6091da177e4SLinus Torvalds */
htb_deactivate(struct htb_sched * q,struct htb_class * cl)61087990467SStephen Hemminger static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
6111da177e4SLinus Torvalds {
612547b792cSIlpo Järvinen WARN_ON(!cl->prio_activity);
6133bf72957SStephen Hemminger
6141da177e4SLinus Torvalds htb_deactivate_prios(q, cl);
6151da177e4SLinus Torvalds cl->prio_activity = 0;
6161da177e4SLinus Torvalds }
6171da177e4SLinus Torvalds
htb_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)618520ac30fSEric Dumazet static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
619520ac30fSEric Dumazet struct sk_buff **to_free)
6201da177e4SLinus Torvalds {
6213f649ab7SKees Cook int ret;
622f6bab199SToke Høiland-Jørgensen unsigned int len = qdisc_pkt_len(skb);
6231da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
6241da177e4SLinus Torvalds struct htb_class *cl = htb_classify(skb, sch, &ret);
6251da177e4SLinus Torvalds
6261da177e4SLinus Torvalds if (cl == HTB_DIRECT) {
6271da177e4SLinus Torvalds /* enqueue to helper queue */
6281da177e4SLinus Torvalds if (q->direct_queue.qlen < q->direct_qlen) {
629aea890b8SDavid S. Miller __qdisc_enqueue_tail(skb, &q->direct_queue);
6301da177e4SLinus Torvalds q->direct_pkts++;
631033d8999SAsim Shankar } else {
632520ac30fSEric Dumazet return qdisc_drop(skb, sch, to_free);
6331da177e4SLinus Torvalds }
6341da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT
6351da177e4SLinus Torvalds } else if (!cl) {
636c27f339aSJarek Poplawski if (ret & __NET_XMIT_BYPASS)
63725331d6cSJohn Fastabend qdisc_qstats_drop(sch);
638520ac30fSEric Dumazet __qdisc_drop(skb, to_free);
6391da177e4SLinus Torvalds return ret;
6401da177e4SLinus Torvalds #endif
64111957be2SCong Wang } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
642520ac30fSEric Dumazet to_free)) != NET_XMIT_SUCCESS) {
643378a2f09SJarek Poplawski if (net_xmit_drop_count(ret)) {
64425331d6cSJohn Fastabend qdisc_qstats_drop(sch);
645338ed9b4SEric Dumazet cl->drops++;
646378a2f09SJarek Poplawski }
64769747650SDavid S. Miller return ret;
6481da177e4SLinus Torvalds } else {
6491da177e4SLinus Torvalds htb_activate(q, cl);
6501da177e4SLinus Torvalds }
6511da177e4SLinus Torvalds
652f6bab199SToke Høiland-Jørgensen sch->qstats.backlog += len;
6531da177e4SLinus Torvalds sch->q.qlen++;
6541da177e4SLinus Torvalds return NET_XMIT_SUCCESS;
6551da177e4SLinus Torvalds }
6561da177e4SLinus Torvalds
htb_accnt_tokens(struct htb_class * cl,int bytes,s64 diff)65756b765b7SVimalkumar static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
65859e4220aSJarek Poplawski {
65956b765b7SVimalkumar s64 toks = diff + cl->tokens;
66059e4220aSJarek Poplawski
66159e4220aSJarek Poplawski if (toks > cl->buffer)
66259e4220aSJarek Poplawski toks = cl->buffer;
663292f1c7fSJiri Pirko toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
66459e4220aSJarek Poplawski if (toks <= -cl->mbuffer)
66559e4220aSJarek Poplawski toks = 1 - cl->mbuffer;
66659e4220aSJarek Poplawski
66759e4220aSJarek Poplawski cl->tokens = toks;
66859e4220aSJarek Poplawski }
66959e4220aSJarek Poplawski
htb_accnt_ctokens(struct htb_class * cl,int bytes,s64 diff)67056b765b7SVimalkumar static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
67159e4220aSJarek Poplawski {
67256b765b7SVimalkumar s64 toks = diff + cl->ctokens;
67359e4220aSJarek Poplawski
67459e4220aSJarek Poplawski if (toks > cl->cbuffer)
67559e4220aSJarek Poplawski toks = cl->cbuffer;
676292f1c7fSJiri Pirko toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
67759e4220aSJarek Poplawski if (toks <= -cl->mbuffer)
67859e4220aSJarek Poplawski toks = 1 - cl->mbuffer;
67959e4220aSJarek Poplawski
68059e4220aSJarek Poplawski cl->ctokens = toks;
68159e4220aSJarek Poplawski }
68259e4220aSJarek Poplawski
6831da177e4SLinus Torvalds /**
6841da177e4SLinus Torvalds * htb_charge_class - charges amount "bytes" to leaf and ancestors
6850e5c9084SYu Kuai * @q: the priority event queue
6860e5c9084SYu Kuai * @cl: the class to start iterate
6870e5c9084SYu Kuai * @level: the minimum level to account
6880e5c9084SYu Kuai * @skb: the socket buffer
6891da177e4SLinus Torvalds *
6901da177e4SLinus Torvalds * Routine assumes that packet "bytes" long was dequeued from leaf cl
6911da177e4SLinus Torvalds * borrowing from "level". It accounts bytes to ceil leaky bucket for
6921da177e4SLinus Torvalds * leaf and all ancestors and to rate bucket for ancestors at levels
6931da177e4SLinus Torvalds * "level" and higher. It also handles possible change of mode resulting
6941da177e4SLinus Torvalds * from the update. Note that mode can also increase here (MAY_BORROW to
6951da177e4SLinus Torvalds * CAN_SEND) because we can use more precise clock that event queue here.
6961da177e4SLinus Torvalds * In such case we remove class from event queue first.
6971da177e4SLinus Torvalds */
htb_charge_class(struct htb_sched * q,struct htb_class * cl,int level,struct sk_buff * skb)6981da177e4SLinus Torvalds static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
699c9726d68SRanjit Manomohan int level, struct sk_buff *skb)
7001da177e4SLinus Torvalds {
7010abf77e5SJussi Kivilinna int bytes = qdisc_pkt_len(skb);
7021da177e4SLinus Torvalds enum htb_cmode old_mode;
70356b765b7SVimalkumar s64 diff;
7041da177e4SLinus Torvalds
7051da177e4SLinus Torvalds while (cl) {
70656b765b7SVimalkumar diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
7071da177e4SLinus Torvalds if (cl->level >= level) {
70887990467SStephen Hemminger if (cl->level == level)
70987990467SStephen Hemminger cl->xstats.lends++;
71059e4220aSJarek Poplawski htb_accnt_tokens(cl, bytes, diff);
7111da177e4SLinus Torvalds } else {
7121da177e4SLinus Torvalds cl->xstats.borrows++;
7131da177e4SLinus Torvalds cl->tokens += diff; /* we moved t_c; update tokens */
7141da177e4SLinus Torvalds }
71559e4220aSJarek Poplawski htb_accnt_ctokens(cl, bytes, diff);
7161da177e4SLinus Torvalds cl->t_c = q->now;
7171da177e4SLinus Torvalds
71887990467SStephen Hemminger old_mode = cl->cmode;
71987990467SStephen Hemminger diff = 0;
7201da177e4SLinus Torvalds htb_change_class_mode(q, cl, &diff);
7211da177e4SLinus Torvalds if (old_mode != cl->cmode) {
7221da177e4SLinus Torvalds if (old_mode != HTB_CAN_SEND)
723c9364636SEric Dumazet htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
7241da177e4SLinus Torvalds if (cl->cmode != HTB_CAN_SEND)
7253bf72957SStephen Hemminger htb_add_to_wait_tree(q, cl, diff);
7261da177e4SLinus Torvalds }
7271da177e4SLinus Torvalds
728bfe0d029SEric Dumazet /* update basic stats except for leaves which are already updated */
729bfe0d029SEric Dumazet if (cl->level)
730bfe0d029SEric Dumazet bstats_update(&cl->bstats, skb);
731bfe0d029SEric Dumazet
7321da177e4SLinus Torvalds cl = cl->parent;
7331da177e4SLinus Torvalds }
7341da177e4SLinus Torvalds }
7351da177e4SLinus Torvalds
7361da177e4SLinus Torvalds /**
7371da177e4SLinus Torvalds * htb_do_events - make mode changes to classes at the level
7382c3ee53eSYu Kuai * @q: the priority event queue
7392c3ee53eSYu Kuai * @level: which wait_pq in 'q->hlevel'
7402c3ee53eSYu Kuai * @start: start jiffies
7411da177e4SLinus Torvalds *
742fb983d45SPatrick McHardy * Scans event queue for pending events and applies them. Returns time of
7431224736dSJarek Poplawski * next pending event (0 for no event in pq, q->now for too many events).
744fb983d45SPatrick McHardy * Note: Applied are events whose have cl->pq_key <= q->now.
7451da177e4SLinus Torvalds */
htb_do_events(struct htb_sched * q,const int level,unsigned long start)746c9364636SEric Dumazet static s64 htb_do_events(struct htb_sched *q, const int level,
747a73be040SJarek Poplawski unsigned long start)
7481da177e4SLinus Torvalds {
7498f3ea33aSMartin Devera /* don't run for longer than 2 jiffies; 2 is used instead of
750cc7ec456SEric Dumazet * 1 to simplify things when jiffy is going to be incremented
751cc7ec456SEric Dumazet * too soon
752cc7ec456SEric Dumazet */
753a73be040SJarek Poplawski unsigned long stop_at = start + 2;
754c9364636SEric Dumazet struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
755c9364636SEric Dumazet
7568f3ea33aSMartin Devera while (time_before(jiffies, stop_at)) {
7571da177e4SLinus Torvalds struct htb_class *cl;
75856b765b7SVimalkumar s64 diff;
759c9364636SEric Dumazet struct rb_node *p = rb_first(wait_pq);
76030bdbe39SAkinbou Mita
76187990467SStephen Hemminger if (!p)
76287990467SStephen Hemminger return 0;
7631da177e4SLinus Torvalds
7641da177e4SLinus Torvalds cl = rb_entry(p, struct htb_class, pq_node);
765fb983d45SPatrick McHardy if (cl->pq_key > q->now)
766fb983d45SPatrick McHardy return cl->pq_key;
767fb983d45SPatrick McHardy
768c9364636SEric Dumazet htb_safe_rb_erase(p, wait_pq);
76956b765b7SVimalkumar diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
7701da177e4SLinus Torvalds htb_change_class_mode(q, cl, &diff);
7711da177e4SLinus Torvalds if (cl->cmode != HTB_CAN_SEND)
7723bf72957SStephen Hemminger htb_add_to_wait_tree(q, cl, diff);
7731da177e4SLinus Torvalds }
7741224736dSJarek Poplawski
7751224736dSJarek Poplawski /* too much load - let's continue after a break for scheduling */
776e82181deSJarek Poplawski if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
777c17988a9SYang Yingliang pr_warn("htb: too many events!\n");
778e82181deSJarek Poplawski q->warned |= HTB_WARN_TOOMANYEVENTS;
779e82181deSJarek Poplawski }
7801224736dSJarek Poplawski
7811224736dSJarek Poplawski return q->now;
7821da177e4SLinus Torvalds }
7831da177e4SLinus Torvalds
7841da177e4SLinus Torvalds /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
785cc7ec456SEric Dumazet * is no such one exists.
786cc7ec456SEric Dumazet */
htb_id_find_next_upper(int prio,struct rb_node * n,u32 id)78787990467SStephen Hemminger static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
78887990467SStephen Hemminger u32 id)
7891da177e4SLinus Torvalds {
7901da177e4SLinus Torvalds struct rb_node *r = NULL;
7911da177e4SLinus Torvalds while (n) {
79287990467SStephen Hemminger struct htb_class *cl =
79387990467SStephen Hemminger rb_entry(n, struct htb_class, node[prio]);
7941da177e4SLinus Torvalds
795f4c1f3e0SPatrick McHardy if (id > cl->common.classid) {
7961da177e4SLinus Torvalds n = n->rb_right;
7971b5c0077SJarek Poplawski } else if (id < cl->common.classid) {
7981da177e4SLinus Torvalds r = n;
7991da177e4SLinus Torvalds n = n->rb_left;
8001b5c0077SJarek Poplawski } else {
8011b5c0077SJarek Poplawski return n;
8021da177e4SLinus Torvalds }
8031da177e4SLinus Torvalds }
8041da177e4SLinus Torvalds return r;
8051da177e4SLinus Torvalds }
8061da177e4SLinus Torvalds
8071da177e4SLinus Torvalds /**
8081da177e4SLinus Torvalds * htb_lookup_leaf - returns next leaf class in DRR order
8099977d6f5SYu Kuai * @hprio: the current one
8109977d6f5SYu Kuai * @prio: which prio in class
8111da177e4SLinus Torvalds *
8121da177e4SLinus Torvalds * Find leaf where current feed pointers points to.
8131da177e4SLinus Torvalds */
htb_lookup_leaf(struct htb_prio * hprio,const int prio)814c9364636SEric Dumazet static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
8151da177e4SLinus Torvalds {
8161da177e4SLinus Torvalds int i;
8171da177e4SLinus Torvalds struct {
8181da177e4SLinus Torvalds struct rb_node *root;
8191da177e4SLinus Torvalds struct rb_node **pptr;
8201da177e4SLinus Torvalds u32 *pid;
8211da177e4SLinus Torvalds } stk[TC_HTB_MAXDEPTH], *sp = stk;
8221da177e4SLinus Torvalds
823c9364636SEric Dumazet BUG_ON(!hprio->row.rb_node);
824c9364636SEric Dumazet sp->root = hprio->row.rb_node;
825c9364636SEric Dumazet sp->pptr = &hprio->ptr;
826c9364636SEric Dumazet sp->pid = &hprio->last_ptr_id;
8271da177e4SLinus Torvalds
8281da177e4SLinus Torvalds for (i = 0; i < 65535; i++) {
8291da177e4SLinus Torvalds if (!*sp->pptr && *sp->pid) {
8301da177e4SLinus Torvalds /* ptr was invalidated but id is valid - try to recover
831cc7ec456SEric Dumazet * the original or next ptr
832cc7ec456SEric Dumazet */
83387990467SStephen Hemminger *sp->pptr =
83487990467SStephen Hemminger htb_id_find_next_upper(prio, sp->root, *sp->pid);
8351da177e4SLinus Torvalds }
8361da177e4SLinus Torvalds *sp->pid = 0; /* ptr is valid now so that remove this hint as it
837cc7ec456SEric Dumazet * can become out of date quickly
838cc7ec456SEric Dumazet */
8391da177e4SLinus Torvalds if (!*sp->pptr) { /* we are at right end; rewind & go up */
8401da177e4SLinus Torvalds *sp->pptr = sp->root;
8411da177e4SLinus Torvalds while ((*sp->pptr)->rb_left)
8421da177e4SLinus Torvalds *sp->pptr = (*sp->pptr)->rb_left;
8431da177e4SLinus Torvalds if (sp > stk) {
8441da177e4SLinus Torvalds sp--;
845512bb43eSJarek Poplawski if (!*sp->pptr) {
846512bb43eSJarek Poplawski WARN_ON(1);
84787990467SStephen Hemminger return NULL;
848512bb43eSJarek Poplawski }
8491da177e4SLinus Torvalds htb_next_rb_node(sp->pptr);
8501da177e4SLinus Torvalds }
8511da177e4SLinus Torvalds } else {
8521da177e4SLinus Torvalds struct htb_class *cl;
853c9364636SEric Dumazet struct htb_prio *clp;
854c9364636SEric Dumazet
8551da177e4SLinus Torvalds cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
8561da177e4SLinus Torvalds if (!cl->level)
8571da177e4SLinus Torvalds return cl;
85811957be2SCong Wang clp = &cl->inner.clprio[prio];
859c9364636SEric Dumazet (++sp)->root = clp->feed.rb_node;
860c9364636SEric Dumazet sp->pptr = &clp->ptr;
861c9364636SEric Dumazet sp->pid = &clp->last_ptr_id;
8621da177e4SLinus Torvalds }
8631da177e4SLinus Torvalds }
864547b792cSIlpo Järvinen WARN_ON(1);
8651da177e4SLinus Torvalds return NULL;
8661da177e4SLinus Torvalds }
8671da177e4SLinus Torvalds
8681da177e4SLinus Torvalds /* dequeues packet at given priority and level; call only if
869cc7ec456SEric Dumazet * you are sure that there is active class at prio/level
870cc7ec456SEric Dumazet */
htb_dequeue_tree(struct htb_sched * q,const int prio,const int level)871c9364636SEric Dumazet static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
872c9364636SEric Dumazet const int level)
8731da177e4SLinus Torvalds {
8741da177e4SLinus Torvalds struct sk_buff *skb = NULL;
8751da177e4SLinus Torvalds struct htb_class *cl, *start;
876c9364636SEric Dumazet struct htb_level *hlevel = &q->hlevel[level];
877c9364636SEric Dumazet struct htb_prio *hprio = &hlevel->hprio[prio];
878c9364636SEric Dumazet
8791da177e4SLinus Torvalds /* look initial class up in the row */
880c9364636SEric Dumazet start = cl = htb_lookup_leaf(hprio, prio);
8811da177e4SLinus Torvalds
8821da177e4SLinus Torvalds do {
8831da177e4SLinus Torvalds next:
884512bb43eSJarek Poplawski if (unlikely(!cl))
88587990467SStephen Hemminger return NULL;
8861da177e4SLinus Torvalds
8871da177e4SLinus Torvalds /* class can be empty - it is unlikely but can be true if leaf
888cc7ec456SEric Dumazet * qdisc drops packets in enqueue routine or if someone used
889cc7ec456SEric Dumazet * graft operation on the leaf since last dequeue;
890cc7ec456SEric Dumazet * simply deactivate and skip such class
891cc7ec456SEric Dumazet */
89211957be2SCong Wang if (unlikely(cl->leaf.q->q.qlen == 0)) {
8931da177e4SLinus Torvalds struct htb_class *next;
8941da177e4SLinus Torvalds htb_deactivate(q, cl);
8951da177e4SLinus Torvalds
8961da177e4SLinus Torvalds /* row/level might become empty */
8971da177e4SLinus Torvalds if ((q->row_mask[level] & (1 << prio)) == 0)
8981da177e4SLinus Torvalds return NULL;
8991da177e4SLinus Torvalds
900c9364636SEric Dumazet next = htb_lookup_leaf(hprio, prio);
9011da177e4SLinus Torvalds
9021da177e4SLinus Torvalds if (cl == start) /* fix start if we just deleted it */
9031da177e4SLinus Torvalds start = next;
9041da177e4SLinus Torvalds cl = next;
9051da177e4SLinus Torvalds goto next;
9061da177e4SLinus Torvalds }
9071da177e4SLinus Torvalds
90811957be2SCong Wang skb = cl->leaf.q->dequeue(cl->leaf.q);
90987990467SStephen Hemminger if (likely(skb != NULL))
9101da177e4SLinus Torvalds break;
911633fe66eSJarek Poplawski
91211957be2SCong Wang qdisc_warn_nonwc("htb", cl->leaf.q);
91311957be2SCong Wang htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
914c9364636SEric Dumazet &q->hlevel[0].hprio[prio].ptr);
915c9364636SEric Dumazet cl = htb_lookup_leaf(hprio, prio);
9161da177e4SLinus Torvalds
9171da177e4SLinus Torvalds } while (cl != start);
9181da177e4SLinus Torvalds
9191da177e4SLinus Torvalds if (likely(skb != NULL)) {
920196d97f6SEric Dumazet bstats_update(&cl->bstats, skb);
92111957be2SCong Wang cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
92211957be2SCong Wang if (cl->leaf.deficit[level] < 0) {
92311957be2SCong Wang cl->leaf.deficit[level] += cl->quantum;
92411957be2SCong Wang htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
925c9364636SEric Dumazet &q->hlevel[0].hprio[prio].ptr);
9261da177e4SLinus Torvalds }
9271da177e4SLinus Torvalds /* this used to be after charge_class but this constelation
928cc7ec456SEric Dumazet * gives us slightly better performance
929cc7ec456SEric Dumazet */
93011957be2SCong Wang if (!cl->leaf.q->q.qlen)
9311da177e4SLinus Torvalds htb_deactivate(q, cl);
932c9726d68SRanjit Manomohan htb_charge_class(q, cl, level, skb);
9331da177e4SLinus Torvalds }
9341da177e4SLinus Torvalds return skb;
9351da177e4SLinus Torvalds }
9361da177e4SLinus Torvalds
htb_dequeue(struct Qdisc * sch)9371da177e4SLinus Torvalds static struct sk_buff *htb_dequeue(struct Qdisc *sch)
9381da177e4SLinus Torvalds {
9399190b3b3SEric Dumazet struct sk_buff *skb;
9401da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
9411da177e4SLinus Torvalds int level;
9425343a7f8SEric Dumazet s64 next_event;
943a73be040SJarek Poplawski unsigned long start_at;
9441da177e4SLinus Torvalds
9451da177e4SLinus Torvalds /* try to dequeue direct packets as high prio (!) to minimize cpu work */
94648da34b7SFlorian Westphal skb = __qdisc_dequeue_head(&q->direct_queue);
94787990467SStephen Hemminger if (skb != NULL) {
9489190b3b3SEric Dumazet ok:
9499190b3b3SEric Dumazet qdisc_bstats_update(sch, skb);
950431e3a8eSWANG Cong qdisc_qstats_backlog_dec(sch, skb);
9511da177e4SLinus Torvalds sch->q.qlen--;
9521da177e4SLinus Torvalds return skb;
9531da177e4SLinus Torvalds }
9541da177e4SLinus Torvalds
95587990467SStephen Hemminger if (!sch->q.qlen)
95687990467SStephen Hemminger goto fin;
957d2de875cSEric Dumazet q->now = ktime_get_ns();
958a73be040SJarek Poplawski start_at = jiffies;
9591da177e4SLinus Torvalds
960d2fe85daSStefan Hasko next_event = q->now + 5LLU * NSEC_PER_SEC;
961633fe66eSJarek Poplawski
9621da177e4SLinus Torvalds for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
9631da177e4SLinus Torvalds /* common case optimization - skip event handler quickly */
9641da177e4SLinus Torvalds int m;
965c9364636SEric Dumazet s64 event = q->near_ev_cache[level];
9661da177e4SLinus Torvalds
967c9364636SEric Dumazet if (q->now >= event) {
968a73be040SJarek Poplawski event = htb_do_events(q, level, start_at);
9692e4b3b0eSPatrick McHardy if (!event)
97056b765b7SVimalkumar event = q->now + NSEC_PER_SEC;
9712e4b3b0eSPatrick McHardy q->near_ev_cache[level] = event;
972c9364636SEric Dumazet }
973fb983d45SPatrick McHardy
974c0851347SJarek Poplawski if (next_event > event)
975fb983d45SPatrick McHardy next_event = event;
976fb983d45SPatrick McHardy
9771da177e4SLinus Torvalds m = ~q->row_mask[level];
9781da177e4SLinus Torvalds while (m != (int)(-1)) {
9791da177e4SLinus Torvalds int prio = ffz(m);
980cc7ec456SEric Dumazet
9811da177e4SLinus Torvalds m |= 1 << prio;
9821da177e4SLinus Torvalds skb = htb_dequeue_tree(q, prio, level);
9839190b3b3SEric Dumazet if (likely(skb != NULL))
9849190b3b3SEric Dumazet goto ok;
9851da177e4SLinus Torvalds }
9861da177e4SLinus Torvalds }
987a9efad8bSEric Dumazet if (likely(next_event > q->now))
98845f50bedSEric Dumazet qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
989a9efad8bSEric Dumazet else
9901224736dSJarek Poplawski schedule_work(&q->work);
9911da177e4SLinus Torvalds fin:
9921da177e4SLinus Torvalds return skb;
9931da177e4SLinus Torvalds }
9941da177e4SLinus Torvalds
9951da177e4SLinus Torvalds /* reset all classes */
9961da177e4SLinus Torvalds /* always caled under BH & queue lock */
htb_reset(struct Qdisc * sch)9971da177e4SLinus Torvalds static void htb_reset(struct Qdisc *sch)
9981da177e4SLinus Torvalds {
9991da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
10000cef296dSStephen Hemminger struct htb_class *cl;
1001f4c1f3e0SPatrick McHardy unsigned int i;
10020cef296dSStephen Hemminger
1003f4c1f3e0SPatrick McHardy for (i = 0; i < q->clhash.hashsize; i++) {
1004b67bfe0dSSasha Levin hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
10051da177e4SLinus Torvalds if (cl->level)
100611957be2SCong Wang memset(&cl->inner, 0, sizeof(cl->inner));
10071da177e4SLinus Torvalds else {
1008d03b195bSMaxim Mikityanskiy if (cl->leaf.q && !q->offload)
100911957be2SCong Wang qdisc_reset(cl->leaf.q);
10101da177e4SLinus Torvalds }
10111da177e4SLinus Torvalds cl->prio_activity = 0;
10121da177e4SLinus Torvalds cl->cmode = HTB_CAN_SEND;
10131da177e4SLinus Torvalds }
10141da177e4SLinus Torvalds }
1015fb983d45SPatrick McHardy qdisc_watchdog_cancel(&q->watchdog);
1016a5a9f534SEric Dumazet __qdisc_reset_queue(&q->direct_queue);
1017c9364636SEric Dumazet memset(q->hlevel, 0, sizeof(q->hlevel));
10181da177e4SLinus Torvalds memset(q->row_mask, 0, sizeof(q->row_mask));
10191da177e4SLinus Torvalds }
10201da177e4SLinus Torvalds
102127a3421eSPatrick McHardy static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
102227a3421eSPatrick McHardy [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
102327a3421eSPatrick McHardy [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
102427a3421eSPatrick McHardy [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
102527a3421eSPatrick McHardy [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
10266906f4edSEric Dumazet [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
1027df62cdf3SEric Dumazet [TCA_HTB_RATE64] = { .type = NLA_U64 },
1028df62cdf3SEric Dumazet [TCA_HTB_CEIL64] = { .type = NLA_U64 },
1029d03b195bSMaxim Mikityanskiy [TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
103027a3421eSPatrick McHardy };
103127a3421eSPatrick McHardy
htb_work_func(struct work_struct * work)10321224736dSJarek Poplawski static void htb_work_func(struct work_struct *work)
10331224736dSJarek Poplawski {
10341224736dSJarek Poplawski struct htb_sched *q = container_of(work, struct htb_sched, work);
10351224736dSJarek Poplawski struct Qdisc *sch = q->watchdog.qdisc;
10361224736dSJarek Poplawski
10370ee13627SFlorian Westphal rcu_read_lock();
10381224736dSJarek Poplawski __netif_schedule(qdisc_root(sch));
10390ee13627SFlorian Westphal rcu_read_unlock();
10401224736dSJarek Poplawski }
10411224736dSJarek Poplawski
htb_offload(struct net_device * dev,struct tc_htb_qopt_offload * opt)1042d03b195bSMaxim Mikityanskiy static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
1043d03b195bSMaxim Mikityanskiy {
1044d03b195bSMaxim Mikityanskiy return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
1045d03b195bSMaxim Mikityanskiy }
1046d03b195bSMaxim Mikityanskiy
htb_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1047e63d7dfdSAlexander Aring static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1048e63d7dfdSAlexander Aring struct netlink_ext_ack *extack)
10491da177e4SLinus Torvalds {
1050d03b195bSMaxim Mikityanskiy struct net_device *dev = qdisc_dev(sch);
1051d03b195bSMaxim Mikityanskiy struct tc_htb_qopt_offload offload_opt;
10521da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
10536906f4edSEric Dumazet struct nlattr *tb[TCA_HTB_MAX + 1];
10541da177e4SLinus Torvalds struct tc_htb_glob *gopt;
1055d03b195bSMaxim Mikityanskiy unsigned int ntx;
1056fb3a3e37SMaxim Mikityanskiy bool offload;
1057cee63723SPatrick McHardy int err;
1058cee63723SPatrick McHardy
105988c2ace6SNikolay Aleksandrov qdisc_watchdog_init(&q->watchdog, sch);
106088c2ace6SNikolay Aleksandrov INIT_WORK(&q->work, htb_work_func);
106188c2ace6SNikolay Aleksandrov
1062cee63723SPatrick McHardy if (!opt)
1063cee63723SPatrick McHardy return -EINVAL;
1064cee63723SPatrick McHardy
10658d1a77f9SAlexander Aring err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
10666529eabaSJiri Pirko if (err)
10676529eabaSJiri Pirko return err;
10686529eabaSJiri Pirko
10698cb08174SJohannes Berg err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
10708cb08174SJohannes Berg NULL);
1071cee63723SPatrick McHardy if (err < 0)
1072cee63723SPatrick McHardy return err;
1073cee63723SPatrick McHardy
10746906f4edSEric Dumazet if (!tb[TCA_HTB_INIT])
10751da177e4SLinus Torvalds return -EINVAL;
10766906f4edSEric Dumazet
10771e90474cSPatrick McHardy gopt = nla_data(tb[TCA_HTB_INIT]);
10786906f4edSEric Dumazet if (gopt->version != HTB_VER >> 16)
10791da177e4SLinus Torvalds return -EINVAL;
10801da177e4SLinus Torvalds
1081fb3a3e37SMaxim Mikityanskiy offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
1082d03b195bSMaxim Mikityanskiy
1083fb3a3e37SMaxim Mikityanskiy if (offload) {
1084648a991cSMaxim Mikityanskiy if (sch->parent != TC_H_ROOT) {
1085648a991cSMaxim Mikityanskiy NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload");
1086d03b195bSMaxim Mikityanskiy return -EOPNOTSUPP;
1087648a991cSMaxim Mikityanskiy }
1088d03b195bSMaxim Mikityanskiy
1089648a991cSMaxim Mikityanskiy if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) {
1090648a991cSMaxim Mikityanskiy NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on");
1091d03b195bSMaxim Mikityanskiy return -EOPNOTSUPP;
1092648a991cSMaxim Mikityanskiy }
1093d03b195bSMaxim Mikityanskiy
1094d03b195bSMaxim Mikityanskiy q->num_direct_qdiscs = dev->real_num_tx_queues;
1095d03b195bSMaxim Mikityanskiy q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
1096d03b195bSMaxim Mikityanskiy sizeof(*q->direct_qdiscs),
1097d03b195bSMaxim Mikityanskiy GFP_KERNEL);
1098d03b195bSMaxim Mikityanskiy if (!q->direct_qdiscs)
1099d03b195bSMaxim Mikityanskiy return -ENOMEM;
1100d03b195bSMaxim Mikityanskiy }
1101d03b195bSMaxim Mikityanskiy
1102f4c1f3e0SPatrick McHardy err = qdisc_class_hash_init(&q->clhash);
1103f4c1f3e0SPatrick McHardy if (err < 0)
1104d59f4e1dSZhengchao Shao return err;
11051da177e4SLinus Torvalds
11066906f4edSEric Dumazet if (tb[TCA_HTB_DIRECT_QLEN])
11076906f4edSEric Dumazet q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1108348e3435SPhil Sutter else
11095ce2d488SDavid S. Miller q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1110348e3435SPhil Sutter
11111da177e4SLinus Torvalds if ((q->rate2quantum = gopt->rate2quantum) < 1)
11121da177e4SLinus Torvalds q->rate2quantum = 1;
11131da177e4SLinus Torvalds q->defcls = gopt->defcls;
11141da177e4SLinus Torvalds
1115fb3a3e37SMaxim Mikityanskiy if (!offload)
11161da177e4SLinus Torvalds return 0;
1117d03b195bSMaxim Mikityanskiy
1118d03b195bSMaxim Mikityanskiy for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1119d03b195bSMaxim Mikityanskiy struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1120d03b195bSMaxim Mikityanskiy struct Qdisc *qdisc;
1121d03b195bSMaxim Mikityanskiy
1122d03b195bSMaxim Mikityanskiy qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1123d03b195bSMaxim Mikityanskiy TC_H_MAKE(sch->handle, 0), extack);
1124d03b195bSMaxim Mikityanskiy if (!qdisc) {
1125d59f4e1dSZhengchao Shao return -ENOMEM;
1126d03b195bSMaxim Mikityanskiy }
1127d03b195bSMaxim Mikityanskiy
1128d03b195bSMaxim Mikityanskiy q->direct_qdiscs[ntx] = qdisc;
1129d03b195bSMaxim Mikityanskiy qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1130d03b195bSMaxim Mikityanskiy }
1131d03b195bSMaxim Mikityanskiy
1132d03b195bSMaxim Mikityanskiy sch->flags |= TCQ_F_MQROOT;
1133d03b195bSMaxim Mikityanskiy
1134d03b195bSMaxim Mikityanskiy offload_opt = (struct tc_htb_qopt_offload) {
1135d03b195bSMaxim Mikityanskiy .command = TC_HTB_CREATE,
1136d03b195bSMaxim Mikityanskiy .parent_classid = TC_H_MAJ(sch->handle) >> 16,
1137d03b195bSMaxim Mikityanskiy .classid = TC_H_MIN(q->defcls),
1138d03b195bSMaxim Mikityanskiy .extack = extack,
1139d03b195bSMaxim Mikityanskiy };
1140d03b195bSMaxim Mikityanskiy err = htb_offload(dev, &offload_opt);
1141d03b195bSMaxim Mikityanskiy if (err)
1142d59f4e1dSZhengchao Shao return err;
1143d03b195bSMaxim Mikityanskiy
1144fb3a3e37SMaxim Mikityanskiy /* Defer this assignment, so that htb_destroy skips offload-related
1145fb3a3e37SMaxim Mikityanskiy * parts (especially calling ndo_setup_tc) on errors.
1146fb3a3e37SMaxim Mikityanskiy */
1147fb3a3e37SMaxim Mikityanskiy q->offload = true;
1148fb3a3e37SMaxim Mikityanskiy
1149d03b195bSMaxim Mikityanskiy return 0;
1150d03b195bSMaxim Mikityanskiy }
1151d03b195bSMaxim Mikityanskiy
htb_attach_offload(struct Qdisc * sch)1152d03b195bSMaxim Mikityanskiy static void htb_attach_offload(struct Qdisc *sch)
1153d03b195bSMaxim Mikityanskiy {
1154d03b195bSMaxim Mikityanskiy struct net_device *dev = qdisc_dev(sch);
1155d03b195bSMaxim Mikityanskiy struct htb_sched *q = qdisc_priv(sch);
1156d03b195bSMaxim Mikityanskiy unsigned int ntx;
1157d03b195bSMaxim Mikityanskiy
1158d03b195bSMaxim Mikityanskiy for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1159d03b195bSMaxim Mikityanskiy struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
1160d03b195bSMaxim Mikityanskiy
1161d03b195bSMaxim Mikityanskiy old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1162d03b195bSMaxim Mikityanskiy qdisc_put(old);
1163d03b195bSMaxim Mikityanskiy qdisc_hash_add(qdisc, false);
1164d03b195bSMaxim Mikityanskiy }
1165d03b195bSMaxim Mikityanskiy for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
1166d03b195bSMaxim Mikityanskiy struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1167d03b195bSMaxim Mikityanskiy struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
1168d03b195bSMaxim Mikityanskiy
1169d03b195bSMaxim Mikityanskiy qdisc_put(old);
1170d03b195bSMaxim Mikityanskiy }
1171d03b195bSMaxim Mikityanskiy
1172d03b195bSMaxim Mikityanskiy kfree(q->direct_qdiscs);
1173d03b195bSMaxim Mikityanskiy q->direct_qdiscs = NULL;
1174d03b195bSMaxim Mikityanskiy }
1175d03b195bSMaxim Mikityanskiy
htb_attach_software(struct Qdisc * sch)1176d03b195bSMaxim Mikityanskiy static void htb_attach_software(struct Qdisc *sch)
1177d03b195bSMaxim Mikityanskiy {
1178d03b195bSMaxim Mikityanskiy struct net_device *dev = qdisc_dev(sch);
1179d03b195bSMaxim Mikityanskiy unsigned int ntx;
1180d03b195bSMaxim Mikityanskiy
1181d03b195bSMaxim Mikityanskiy /* Resemble qdisc_graft behavior. */
1182d03b195bSMaxim Mikityanskiy for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1183d03b195bSMaxim Mikityanskiy struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1184d03b195bSMaxim Mikityanskiy struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
1185d03b195bSMaxim Mikityanskiy
1186d03b195bSMaxim Mikityanskiy qdisc_refcount_inc(sch);
1187d03b195bSMaxim Mikityanskiy
1188d03b195bSMaxim Mikityanskiy qdisc_put(old);
1189d03b195bSMaxim Mikityanskiy }
1190d03b195bSMaxim Mikityanskiy }
1191d03b195bSMaxim Mikityanskiy
htb_attach(struct Qdisc * sch)1192d03b195bSMaxim Mikityanskiy static void htb_attach(struct Qdisc *sch)
1193d03b195bSMaxim Mikityanskiy {
1194d03b195bSMaxim Mikityanskiy struct htb_sched *q = qdisc_priv(sch);
1195d03b195bSMaxim Mikityanskiy
1196d03b195bSMaxim Mikityanskiy if (q->offload)
1197d03b195bSMaxim Mikityanskiy htb_attach_offload(sch);
1198d03b195bSMaxim Mikityanskiy else
1199d03b195bSMaxim Mikityanskiy htb_attach_software(sch);
12001da177e4SLinus Torvalds }
12011da177e4SLinus Torvalds
htb_dump(struct Qdisc * sch,struct sk_buff * skb)12021da177e4SLinus Torvalds static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
12031da177e4SLinus Torvalds {
12041da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
12054b3550efSPatrick McHardy struct nlattr *nest;
12061da177e4SLinus Torvalds struct tc_htb_glob gopt;
12071da177e4SLinus Torvalds
1208d03b195bSMaxim Mikityanskiy if (q->offload)
1209d03b195bSMaxim Mikityanskiy sch->flags |= TCQ_F_OFFLOADED;
1210d03b195bSMaxim Mikityanskiy else
1211d03b195bSMaxim Mikityanskiy sch->flags &= ~TCQ_F_OFFLOADED;
1212d03b195bSMaxim Mikityanskiy
1213b362487aSCong Wang sch->qstats.overlimits = q->overlimits;
12146f542efcSEric Dumazet /* Its safe to not acquire qdisc lock. As we hold RTNL,
12156f542efcSEric Dumazet * no change can happen on the qdisc parameters.
12166f542efcSEric Dumazet */
12174b3550efSPatrick McHardy
12184b3550efSPatrick McHardy gopt.direct_pkts = q->direct_pkts;
12191da177e4SLinus Torvalds gopt.version = HTB_VER;
12201da177e4SLinus Torvalds gopt.rate2quantum = q->rate2quantum;
12211da177e4SLinus Torvalds gopt.defcls = q->defcls;
12223bf72957SStephen Hemminger gopt.debug = 0;
12234b3550efSPatrick McHardy
1224ae0be8deSMichal Kubecek nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
12254b3550efSPatrick McHardy if (nest == NULL)
12264b3550efSPatrick McHardy goto nla_put_failure;
12276906f4edSEric Dumazet if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
12286906f4edSEric Dumazet nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
12291b34ec43SDavid S. Miller goto nla_put_failure;
1230d03b195bSMaxim Mikityanskiy if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1231d03b195bSMaxim Mikityanskiy goto nla_put_failure;
12324b3550efSPatrick McHardy
12336f542efcSEric Dumazet return nla_nest_end(skb, nest);
12344b3550efSPatrick McHardy
12351e90474cSPatrick McHardy nla_put_failure:
12364b3550efSPatrick McHardy nla_nest_cancel(skb, nest);
12371da177e4SLinus Torvalds return -1;
12381da177e4SLinus Torvalds }
12391da177e4SLinus Torvalds
htb_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)12401da177e4SLinus Torvalds static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
12411da177e4SLinus Torvalds struct sk_buff *skb, struct tcmsg *tcm)
12421da177e4SLinus Torvalds {
12431da177e4SLinus Torvalds struct htb_class *cl = (struct htb_class *)arg;
124483271586SMaxim Mikityanskiy struct htb_sched *q = qdisc_priv(sch);
12454b3550efSPatrick McHardy struct nlattr *nest;
12461da177e4SLinus Torvalds struct tc_htb_opt opt;
12471da177e4SLinus Torvalds
12486f542efcSEric Dumazet /* Its safe to not acquire qdisc lock. As we hold RTNL,
12496f542efcSEric Dumazet * no change can happen on the class parameters.
12506f542efcSEric Dumazet */
1251f4c1f3e0SPatrick McHardy tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1252f4c1f3e0SPatrick McHardy tcm->tcm_handle = cl->common.classid;
125311957be2SCong Wang if (!cl->level && cl->leaf.q)
125411957be2SCong Wang tcm->tcm_info = cl->leaf.q->handle;
12551da177e4SLinus Torvalds
1256ae0be8deSMichal Kubecek nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
12574b3550efSPatrick McHardy if (nest == NULL)
12584b3550efSPatrick McHardy goto nla_put_failure;
12591da177e4SLinus Torvalds
12601da177e4SLinus Torvalds memset(&opt, 0, sizeof(opt));
12611da177e4SLinus Torvalds
126201cb71d2SEric Dumazet psched_ratecfg_getrate(&opt.rate, &cl->rate);
12639c10f411SJiri Pirko opt.buffer = PSCHED_NS2TICKS(cl->buffer);
126401cb71d2SEric Dumazet psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
12659c10f411SJiri Pirko opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1266c19f7a34SJarek Poplawski opt.quantum = cl->quantum;
1267c19f7a34SJarek Poplawski opt.prio = cl->prio;
12681da177e4SLinus Torvalds opt.level = cl->level;
12691b34ec43SDavid S. Miller if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
12701b34ec43SDavid S. Miller goto nla_put_failure;
127183271586SMaxim Mikityanskiy if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
127283271586SMaxim Mikityanskiy goto nla_put_failure;
1273df62cdf3SEric Dumazet if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
12742a51c1e8SNicolas Dichtel nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
12752a51c1e8SNicolas Dichtel TCA_HTB_PAD))
1276df62cdf3SEric Dumazet goto nla_put_failure;
1277df62cdf3SEric Dumazet if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
12782a51c1e8SNicolas Dichtel nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
12792a51c1e8SNicolas Dichtel TCA_HTB_PAD))
1280df62cdf3SEric Dumazet goto nla_put_failure;
12814b3550efSPatrick McHardy
12826f542efcSEric Dumazet return nla_nest_end(skb, nest);
12834b3550efSPatrick McHardy
12841e90474cSPatrick McHardy nla_put_failure:
12854b3550efSPatrick McHardy nla_nest_cancel(skb, nest);
12861da177e4SLinus Torvalds return -1;
12871da177e4SLinus Torvalds }
12881da177e4SLinus Torvalds
htb_offload_aggregate_stats(struct htb_sched * q,struct htb_class * cl)128983271586SMaxim Mikityanskiy static void htb_offload_aggregate_stats(struct htb_sched *q,
129083271586SMaxim Mikityanskiy struct htb_class *cl)
129183271586SMaxim Mikityanskiy {
1292f56940daSAhmed S. Darwish u64 bytes = 0, packets = 0;
129383271586SMaxim Mikityanskiy struct htb_class *c;
129483271586SMaxim Mikityanskiy unsigned int i;
129583271586SMaxim Mikityanskiy
129650dc9a85SAhmed S. Darwish gnet_stats_basic_sync_init(&cl->bstats);
129783271586SMaxim Mikityanskiy
129883271586SMaxim Mikityanskiy for (i = 0; i < q->clhash.hashsize; i++) {
129983271586SMaxim Mikityanskiy hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
130083271586SMaxim Mikityanskiy struct htb_class *p = c;
130183271586SMaxim Mikityanskiy
130283271586SMaxim Mikityanskiy while (p && p->level < cl->level)
130383271586SMaxim Mikityanskiy p = p->parent;
130483271586SMaxim Mikityanskiy
130583271586SMaxim Mikityanskiy if (p != cl)
130683271586SMaxim Mikityanskiy continue;
130783271586SMaxim Mikityanskiy
130850dc9a85SAhmed S. Darwish bytes += u64_stats_read(&c->bstats_bias.bytes);
130950dc9a85SAhmed S. Darwish packets += u64_stats_read(&c->bstats_bias.packets);
131083271586SMaxim Mikityanskiy if (c->level == 0) {
131150dc9a85SAhmed S. Darwish bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
131250dc9a85SAhmed S. Darwish packets += u64_stats_read(&c->leaf.q->bstats.packets);
131383271586SMaxim Mikityanskiy }
131483271586SMaxim Mikityanskiy }
131583271586SMaxim Mikityanskiy }
1316f56940daSAhmed S. Darwish _bstats_update(&cl->bstats, bytes, packets);
131783271586SMaxim Mikityanskiy }
131883271586SMaxim Mikityanskiy
13191da177e4SLinus Torvalds static int
htb_dump_class_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)132087990467SStephen Hemminger htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
13211da177e4SLinus Torvalds {
13221da177e4SLinus Torvalds struct htb_class *cl = (struct htb_class *)arg;
132383271586SMaxim Mikityanskiy struct htb_sched *q = qdisc_priv(sch);
1324338ed9b4SEric Dumazet struct gnet_stats_queue qs = {
1325338ed9b4SEric Dumazet .drops = cl->drops,
13263c75f6eeSEric Dumazet .overlimits = cl->overlimits,
1327338ed9b4SEric Dumazet };
132864015853SJohn Fastabend __u32 qlen = 0;
13291da177e4SLinus Torvalds
13305dd431b6SPaolo Abeni if (!cl->level && cl->leaf.q)
13315dd431b6SPaolo Abeni qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
13325dd431b6SPaolo Abeni
13330564bf0aSKonstantin Khlebnikov cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
13340564bf0aSKonstantin Khlebnikov INT_MIN, INT_MAX);
13350564bf0aSKonstantin Khlebnikov cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
13360564bf0aSKonstantin Khlebnikov INT_MIN, INT_MAX);
13371da177e4SLinus Torvalds
133883271586SMaxim Mikityanskiy if (q->offload) {
133983271586SMaxim Mikityanskiy if (!cl->level) {
134083271586SMaxim Mikityanskiy if (cl->leaf.q)
134183271586SMaxim Mikityanskiy cl->bstats = cl->leaf.q->bstats;
134283271586SMaxim Mikityanskiy else
134350dc9a85SAhmed S. Darwish gnet_stats_basic_sync_init(&cl->bstats);
1344f56940daSAhmed S. Darwish _bstats_update(&cl->bstats,
134550dc9a85SAhmed S. Darwish u64_stats_read(&cl->bstats_bias.bytes),
134650dc9a85SAhmed S. Darwish u64_stats_read(&cl->bstats_bias.packets));
134783271586SMaxim Mikityanskiy } else {
134883271586SMaxim Mikityanskiy htb_offload_aggregate_stats(q, cl);
134983271586SMaxim Mikityanskiy }
135083271586SMaxim Mikityanskiy }
135183271586SMaxim Mikityanskiy
135229cbcd85SAhmed S. Darwish if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
13531c0d32fdSEric Dumazet gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1354338ed9b4SEric Dumazet gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
13551da177e4SLinus Torvalds return -1;
13561da177e4SLinus Torvalds
13571da177e4SLinus Torvalds return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
13581da177e4SLinus Torvalds }
13591da177e4SLinus Torvalds
1360d03b195bSMaxim Mikityanskiy static struct netdev_queue *
htb_select_queue(struct Qdisc * sch,struct tcmsg * tcm)1361d03b195bSMaxim Mikityanskiy htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
1362d03b195bSMaxim Mikityanskiy {
1363d03b195bSMaxim Mikityanskiy struct net_device *dev = qdisc_dev(sch);
1364d03b195bSMaxim Mikityanskiy struct tc_htb_qopt_offload offload_opt;
136593bde210SMaxim Mikityanskiy struct htb_sched *q = qdisc_priv(sch);
1366d03b195bSMaxim Mikityanskiy int err;
1367d03b195bSMaxim Mikityanskiy
136893bde210SMaxim Mikityanskiy if (!q->offload)
136993bde210SMaxim Mikityanskiy return sch->dev_queue;
137093bde210SMaxim Mikityanskiy
1371d03b195bSMaxim Mikityanskiy offload_opt = (struct tc_htb_qopt_offload) {
1372d03b195bSMaxim Mikityanskiy .command = TC_HTB_LEAF_QUERY_QUEUE,
1373d03b195bSMaxim Mikityanskiy .classid = TC_H_MIN(tcm->tcm_parent),
1374d03b195bSMaxim Mikityanskiy };
1375d03b195bSMaxim Mikityanskiy err = htb_offload(dev, &offload_opt);
1376d03b195bSMaxim Mikityanskiy if (err || offload_opt.qid >= dev->num_tx_queues)
1377d03b195bSMaxim Mikityanskiy return NULL;
1378d03b195bSMaxim Mikityanskiy return netdev_get_tx_queue(dev, offload_opt.qid);
1379d03b195bSMaxim Mikityanskiy }
1380d03b195bSMaxim Mikityanskiy
1381d03b195bSMaxim Mikityanskiy static struct Qdisc *
htb_graft_helper(struct netdev_queue * dev_queue,struct Qdisc * new_q)1382d03b195bSMaxim Mikityanskiy htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
1383d03b195bSMaxim Mikityanskiy {
1384d03b195bSMaxim Mikityanskiy struct net_device *dev = dev_queue->dev;
1385d03b195bSMaxim Mikityanskiy struct Qdisc *old_q;
1386d03b195bSMaxim Mikityanskiy
1387d03b195bSMaxim Mikityanskiy if (dev->flags & IFF_UP)
1388d03b195bSMaxim Mikityanskiy dev_deactivate(dev);
1389d03b195bSMaxim Mikityanskiy old_q = dev_graft_qdisc(dev_queue, new_q);
1390d03b195bSMaxim Mikityanskiy if (new_q)
1391d03b195bSMaxim Mikityanskiy new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1392d03b195bSMaxim Mikityanskiy if (dev->flags & IFF_UP)
1393d03b195bSMaxim Mikityanskiy dev_activate(dev);
1394d03b195bSMaxim Mikityanskiy
1395d03b195bSMaxim Mikityanskiy return old_q;
1396d03b195bSMaxim Mikityanskiy }
1397d03b195bSMaxim Mikityanskiy
htb_offload_get_queue(struct htb_class * cl)1398ca49bfd9SMaxim Mikityanskiy static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
1399ca49bfd9SMaxim Mikityanskiy {
1400ca49bfd9SMaxim Mikityanskiy struct netdev_queue *queue;
1401ca49bfd9SMaxim Mikityanskiy
1402ca49bfd9SMaxim Mikityanskiy queue = cl->leaf.offload_queue;
1403ca49bfd9SMaxim Mikityanskiy if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
1404ca49bfd9SMaxim Mikityanskiy WARN_ON(cl->leaf.q->dev_queue != queue);
1405ca49bfd9SMaxim Mikityanskiy
1406ca49bfd9SMaxim Mikityanskiy return queue;
1407ca49bfd9SMaxim Mikityanskiy }
1408ca49bfd9SMaxim Mikityanskiy
htb_offload_move_qdisc(struct Qdisc * sch,struct htb_class * cl_old,struct htb_class * cl_new,bool destroying)1409ca49bfd9SMaxim Mikityanskiy static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
1410ca49bfd9SMaxim Mikityanskiy struct htb_class *cl_new, bool destroying)
1411d03b195bSMaxim Mikityanskiy {
1412d03b195bSMaxim Mikityanskiy struct netdev_queue *queue_old, *queue_new;
1413d03b195bSMaxim Mikityanskiy struct net_device *dev = qdisc_dev(sch);
1414d03b195bSMaxim Mikityanskiy
1415ca49bfd9SMaxim Mikityanskiy queue_old = htb_offload_get_queue(cl_old);
1416ca49bfd9SMaxim Mikityanskiy queue_new = htb_offload_get_queue(cl_new);
1417ca49bfd9SMaxim Mikityanskiy
1418ca49bfd9SMaxim Mikityanskiy if (!destroying) {
1419ca49bfd9SMaxim Mikityanskiy struct Qdisc *qdisc;
1420d03b195bSMaxim Mikityanskiy
1421d03b195bSMaxim Mikityanskiy if (dev->flags & IFF_UP)
1422d03b195bSMaxim Mikityanskiy dev_deactivate(dev);
1423d03b195bSMaxim Mikityanskiy qdisc = dev_graft_qdisc(queue_old, NULL);
1424ca49bfd9SMaxim Mikityanskiy WARN_ON(qdisc != cl_old->leaf.q);
1425ca49bfd9SMaxim Mikityanskiy }
1426ca49bfd9SMaxim Mikityanskiy
1427ca49bfd9SMaxim Mikityanskiy if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
1428ca49bfd9SMaxim Mikityanskiy cl_old->leaf.q->dev_queue = queue_new;
1429ca49bfd9SMaxim Mikityanskiy cl_old->leaf.offload_queue = queue_new;
1430ca49bfd9SMaxim Mikityanskiy
1431ca49bfd9SMaxim Mikityanskiy if (!destroying) {
1432ca49bfd9SMaxim Mikityanskiy struct Qdisc *qdisc;
1433ca49bfd9SMaxim Mikityanskiy
1434ca49bfd9SMaxim Mikityanskiy qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
1435d03b195bSMaxim Mikityanskiy if (dev->flags & IFF_UP)
1436d03b195bSMaxim Mikityanskiy dev_activate(dev);
1437d03b195bSMaxim Mikityanskiy WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
1438d03b195bSMaxim Mikityanskiy }
1439ca49bfd9SMaxim Mikityanskiy }
1440d03b195bSMaxim Mikityanskiy
htb_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)14411da177e4SLinus Torvalds static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1442653d6fd6SAlexander Aring struct Qdisc **old, struct netlink_ext_ack *extack)
14431da177e4SLinus Torvalds {
1444d03b195bSMaxim Mikityanskiy struct netdev_queue *dev_queue = sch->dev_queue;
14451da177e4SLinus Torvalds struct htb_class *cl = (struct htb_class *)arg;
1446d03b195bSMaxim Mikityanskiy struct htb_sched *q = qdisc_priv(sch);
1447d03b195bSMaxim Mikityanskiy struct Qdisc *old_q;
14481da177e4SLinus Torvalds
14495b9a9ccfSPatrick McHardy if (cl->level)
14505b9a9ccfSPatrick McHardy return -EINVAL;
1451d03b195bSMaxim Mikityanskiy
1452ca49bfd9SMaxim Mikityanskiy if (q->offload)
1453ca49bfd9SMaxim Mikityanskiy dev_queue = htb_offload_get_queue(cl);
1454d03b195bSMaxim Mikityanskiy
1455d03b195bSMaxim Mikityanskiy if (!new) {
1456d03b195bSMaxim Mikityanskiy new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1457d03b195bSMaxim Mikityanskiy cl->common.classid, extack);
1458d03b195bSMaxim Mikityanskiy if (!new)
14591da177e4SLinus Torvalds return -ENOBUFS;
1460d03b195bSMaxim Mikityanskiy }
1461d03b195bSMaxim Mikityanskiy
1462d03b195bSMaxim Mikityanskiy if (q->offload) {
1463d03b195bSMaxim Mikityanskiy /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1464d03b195bSMaxim Mikityanskiy qdisc_refcount_inc(new);
1465d03b195bSMaxim Mikityanskiy old_q = htb_graft_helper(dev_queue, new);
1466d03b195bSMaxim Mikityanskiy }
14675b9a9ccfSPatrick McHardy
146811957be2SCong Wang *old = qdisc_replace(sch, new, &cl->leaf.q);
1469d03b195bSMaxim Mikityanskiy
1470d03b195bSMaxim Mikityanskiy if (q->offload) {
1471d03b195bSMaxim Mikityanskiy WARN_ON(old_q != *old);
1472d03b195bSMaxim Mikityanskiy qdisc_put(old_q);
1473d03b195bSMaxim Mikityanskiy }
1474d03b195bSMaxim Mikityanskiy
14751da177e4SLinus Torvalds return 0;
14761da177e4SLinus Torvalds }
14771da177e4SLinus Torvalds
htb_leaf(struct Qdisc * sch,unsigned long arg)14781da177e4SLinus Torvalds static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
14791da177e4SLinus Torvalds {
14801da177e4SLinus Torvalds struct htb_class *cl = (struct htb_class *)arg;
148111957be2SCong Wang return !cl->level ? cl->leaf.q : NULL;
14821da177e4SLinus Torvalds }
14831da177e4SLinus Torvalds
htb_qlen_notify(struct Qdisc * sch,unsigned long arg)1484256d61b8SPatrick McHardy static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1485256d61b8SPatrick McHardy {
1486256d61b8SPatrick McHardy struct htb_class *cl = (struct htb_class *)arg;
1487256d61b8SPatrick McHardy
1488256d61b8SPatrick McHardy htb_deactivate(qdisc_priv(sch), cl);
1489256d61b8SPatrick McHardy }
1490256d61b8SPatrick McHardy
htb_parent_last_child(struct htb_class * cl)1491160d5e10SJarek Poplawski static inline int htb_parent_last_child(struct htb_class *cl)
1492160d5e10SJarek Poplawski {
1493160d5e10SJarek Poplawski if (!cl->parent)
1494160d5e10SJarek Poplawski /* the root class */
1495160d5e10SJarek Poplawski return 0;
149642077599SPatrick McHardy if (cl->parent->children > 1)
1497160d5e10SJarek Poplawski /* not the last child */
1498160d5e10SJarek Poplawski return 0;
1499160d5e10SJarek Poplawski return 1;
1500160d5e10SJarek Poplawski }
1501160d5e10SJarek Poplawski
htb_parent_to_leaf(struct Qdisc * sch,struct htb_class * cl,struct Qdisc * new_q)1502d03b195bSMaxim Mikityanskiy static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
15033ba08b00SJarek Poplawski struct Qdisc *new_q)
1504160d5e10SJarek Poplawski {
1505d03b195bSMaxim Mikityanskiy struct htb_sched *q = qdisc_priv(sch);
1506160d5e10SJarek Poplawski struct htb_class *parent = cl->parent;
1507160d5e10SJarek Poplawski
150811957be2SCong Wang WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1509160d5e10SJarek Poplawski
15103ba08b00SJarek Poplawski if (parent->cmode != HTB_CAN_SEND)
1511c9364636SEric Dumazet htb_safe_rb_erase(&parent->pq_node,
1512c9364636SEric Dumazet &q->hlevel[parent->level].wait_pq);
15133ba08b00SJarek Poplawski
1514160d5e10SJarek Poplawski parent->level = 0;
151511957be2SCong Wang memset(&parent->inner, 0, sizeof(parent->inner));
151611957be2SCong Wang parent->leaf.q = new_q ? new_q : &noop_qdisc;
1517160d5e10SJarek Poplawski parent->tokens = parent->buffer;
1518160d5e10SJarek Poplawski parent->ctokens = parent->cbuffer;
1519d2de875cSEric Dumazet parent->t_c = ktime_get_ns();
1520160d5e10SJarek Poplawski parent->cmode = HTB_CAN_SEND;
1521ca49bfd9SMaxim Mikityanskiy if (q->offload)
1522ca49bfd9SMaxim Mikityanskiy parent->leaf.offload_queue = cl->leaf.offload_queue;
1523160d5e10SJarek Poplawski }
1524160d5e10SJarek Poplawski
htb_parent_to_leaf_offload(struct Qdisc * sch,struct netdev_queue * dev_queue,struct Qdisc * new_q)1525d03b195bSMaxim Mikityanskiy static void htb_parent_to_leaf_offload(struct Qdisc *sch,
1526d03b195bSMaxim Mikityanskiy struct netdev_queue *dev_queue,
1527d03b195bSMaxim Mikityanskiy struct Qdisc *new_q)
1528d03b195bSMaxim Mikityanskiy {
1529d03b195bSMaxim Mikityanskiy struct Qdisc *old_q;
1530d03b195bSMaxim Mikityanskiy
1531d03b195bSMaxim Mikityanskiy /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1532944d671dSYunjian Wang if (new_q)
1533d03b195bSMaxim Mikityanskiy qdisc_refcount_inc(new_q);
1534d03b195bSMaxim Mikityanskiy old_q = htb_graft_helper(dev_queue, new_q);
1535d03b195bSMaxim Mikityanskiy WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1536d03b195bSMaxim Mikityanskiy }
1537d03b195bSMaxim Mikityanskiy
htb_destroy_class_offload(struct Qdisc * sch,struct htb_class * cl,bool last_child,bool destroying,struct netlink_ext_ack * extack)1538d03b195bSMaxim Mikityanskiy static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
1539d03b195bSMaxim Mikityanskiy bool last_child, bool destroying,
1540d03b195bSMaxim Mikityanskiy struct netlink_ext_ack *extack)
1541d03b195bSMaxim Mikityanskiy {
1542d03b195bSMaxim Mikityanskiy struct tc_htb_qopt_offload offload_opt;
1543ca49bfd9SMaxim Mikityanskiy struct netdev_queue *dev_queue;
1544d03b195bSMaxim Mikityanskiy struct Qdisc *q = cl->leaf.q;
1545a22b7388SRahul Rameshbabu struct Qdisc *old;
1546d03b195bSMaxim Mikityanskiy int err;
1547d03b195bSMaxim Mikityanskiy
1548d03b195bSMaxim Mikityanskiy if (cl->level)
1549d03b195bSMaxim Mikityanskiy return -EINVAL;
1550d03b195bSMaxim Mikityanskiy
1551d03b195bSMaxim Mikityanskiy WARN_ON(!q);
1552ca49bfd9SMaxim Mikityanskiy dev_queue = htb_offload_get_queue(cl);
1553a22b7388SRahul Rameshbabu /* When destroying, caller qdisc_graft grafts the new qdisc and invokes
1554a22b7388SRahul Rameshbabu * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
1555a22b7388SRahul Rameshbabu * does not need to graft or qdisc_put the qdisc being destroyed.
1556d03b195bSMaxim Mikityanskiy */
1557a22b7388SRahul Rameshbabu if (!destroying) {
1558a22b7388SRahul Rameshbabu old = htb_graft_helper(dev_queue, NULL);
1559a22b7388SRahul Rameshbabu /* Last qdisc grafted should be the same as cl->leaf.q when
1560a22b7388SRahul Rameshbabu * calling htb_delete.
1561a22b7388SRahul Rameshbabu */
1562d03b195bSMaxim Mikityanskiy WARN_ON(old != q);
1563a22b7388SRahul Rameshbabu }
1564d03b195bSMaxim Mikityanskiy
156583271586SMaxim Mikityanskiy if (cl->parent) {
1566f56940daSAhmed S. Darwish _bstats_update(&cl->parent->bstats_bias,
156750dc9a85SAhmed S. Darwish u64_stats_read(&q->bstats.bytes),
156850dc9a85SAhmed S. Darwish u64_stats_read(&q->bstats.packets));
156983271586SMaxim Mikityanskiy }
157083271586SMaxim Mikityanskiy
1571d03b195bSMaxim Mikityanskiy offload_opt = (struct tc_htb_qopt_offload) {
1572d03b195bSMaxim Mikityanskiy .command = !last_child ? TC_HTB_LEAF_DEL :
1573d03b195bSMaxim Mikityanskiy destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
1574d03b195bSMaxim Mikityanskiy TC_HTB_LEAF_DEL_LAST,
1575d03b195bSMaxim Mikityanskiy .classid = cl->common.classid,
1576d03b195bSMaxim Mikityanskiy .extack = extack,
1577d03b195bSMaxim Mikityanskiy };
1578d03b195bSMaxim Mikityanskiy err = htb_offload(qdisc_dev(sch), &offload_opt);
1579d03b195bSMaxim Mikityanskiy
1580a22b7388SRahul Rameshbabu if (!destroying) {
1581a22b7388SRahul Rameshbabu if (!err)
1582d03b195bSMaxim Mikityanskiy qdisc_put(old);
1583d03b195bSMaxim Mikityanskiy else
1584ca49bfd9SMaxim Mikityanskiy htb_graft_helper(dev_queue, old);
1585a22b7388SRahul Rameshbabu }
1586d03b195bSMaxim Mikityanskiy
1587d03b195bSMaxim Mikityanskiy if (last_child)
1588d03b195bSMaxim Mikityanskiy return err;
1589d03b195bSMaxim Mikityanskiy
1590ca49bfd9SMaxim Mikityanskiy if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
1591ca49bfd9SMaxim Mikityanskiy u32 classid = TC_H_MAJ(sch->handle) |
1592ca49bfd9SMaxim Mikityanskiy TC_H_MIN(offload_opt.classid);
1593ca49bfd9SMaxim Mikityanskiy struct htb_class *moved_cl = htb_find(classid, sch);
1594ca49bfd9SMaxim Mikityanskiy
1595ca49bfd9SMaxim Mikityanskiy htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
1596d03b195bSMaxim Mikityanskiy }
1597d03b195bSMaxim Mikityanskiy
1598d03b195bSMaxim Mikityanskiy return err;
1599d03b195bSMaxim Mikityanskiy }
1600d03b195bSMaxim Mikityanskiy
htb_destroy_class(struct Qdisc * sch,struct htb_class * cl)16011da177e4SLinus Torvalds static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
16021da177e4SLinus Torvalds {
16031da177e4SLinus Torvalds if (!cl->level) {
160411957be2SCong Wang WARN_ON(!cl->leaf.q);
160586bd446bSVlad Buslov qdisc_put(cl->leaf.q);
16061da177e4SLinus Torvalds }
16071c0d32fdSEric Dumazet gen_kill_estimator(&cl->rate_est);
16086529eabaSJiri Pirko tcf_block_put(cl->block);
16091da177e4SLinus Torvalds kfree(cl);
16101da177e4SLinus Torvalds }
16111da177e4SLinus Torvalds
htb_destroy(struct Qdisc * sch)16121da177e4SLinus Torvalds static void htb_destroy(struct Qdisc *sch)
16131da177e4SLinus Torvalds {
1614d03b195bSMaxim Mikityanskiy struct net_device *dev = qdisc_dev(sch);
1615d03b195bSMaxim Mikityanskiy struct tc_htb_qopt_offload offload_opt;
16161da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
1617b67bfe0dSSasha Levin struct hlist_node *next;
1618d03b195bSMaxim Mikityanskiy bool nonempty, changed;
1619fbd8f137SPatrick McHardy struct htb_class *cl;
1620fbd8f137SPatrick McHardy unsigned int i;
16211da177e4SLinus Torvalds
16221224736dSJarek Poplawski cancel_work_sync(&q->work);
1623fb983d45SPatrick McHardy qdisc_watchdog_cancel(&q->watchdog);
16241da177e4SLinus Torvalds /* This line used to be after htb_destroy_class call below
1625cc7ec456SEric Dumazet * and surprisingly it worked in 2.4. But it must precede it
1626cc7ec456SEric Dumazet * because filter need its target class alive to be able to call
1627cc7ec456SEric Dumazet * unbind_filter on it (without Oops).
1628cc7ec456SEric Dumazet */
16296529eabaSJiri Pirko tcf_block_put(q->block);
16301da177e4SLinus Torvalds
1631f4c1f3e0SPatrick McHardy for (i = 0; i < q->clhash.hashsize; i++) {
163289890422SKonstantin Khlebnikov hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
16336529eabaSJiri Pirko tcf_block_put(cl->block);
163489890422SKonstantin Khlebnikov cl->block = NULL;
163589890422SKonstantin Khlebnikov }
1636fbd8f137SPatrick McHardy }
1637d03b195bSMaxim Mikityanskiy
1638d03b195bSMaxim Mikityanskiy do {
1639d03b195bSMaxim Mikityanskiy nonempty = false;
1640d03b195bSMaxim Mikityanskiy changed = false;
1641f4c1f3e0SPatrick McHardy for (i = 0; i < q->clhash.hashsize; i++) {
1642b67bfe0dSSasha Levin hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1643d03b195bSMaxim Mikityanskiy common.hnode) {
1644d03b195bSMaxim Mikityanskiy bool last_child;
1645d03b195bSMaxim Mikityanskiy
1646d03b195bSMaxim Mikityanskiy if (!q->offload) {
1647d03b195bSMaxim Mikityanskiy htb_destroy_class(sch, cl);
1648d03b195bSMaxim Mikityanskiy continue;
1649d03b195bSMaxim Mikityanskiy }
1650d03b195bSMaxim Mikityanskiy
1651d03b195bSMaxim Mikityanskiy nonempty = true;
1652d03b195bSMaxim Mikityanskiy
1653d03b195bSMaxim Mikityanskiy if (cl->level)
1654d03b195bSMaxim Mikityanskiy continue;
1655d03b195bSMaxim Mikityanskiy
1656d03b195bSMaxim Mikityanskiy changed = true;
1657d03b195bSMaxim Mikityanskiy
1658d03b195bSMaxim Mikityanskiy last_child = htb_parent_last_child(cl);
1659d03b195bSMaxim Mikityanskiy htb_destroy_class_offload(sch, cl, last_child,
1660d03b195bSMaxim Mikityanskiy true, NULL);
1661d03b195bSMaxim Mikityanskiy qdisc_class_hash_remove(&q->clhash,
1662d03b195bSMaxim Mikityanskiy &cl->common);
1663d03b195bSMaxim Mikityanskiy if (cl->parent)
1664d03b195bSMaxim Mikityanskiy cl->parent->children--;
1665d03b195bSMaxim Mikityanskiy if (last_child)
1666d03b195bSMaxim Mikityanskiy htb_parent_to_leaf(sch, cl, NULL);
1667fbd8f137SPatrick McHardy htb_destroy_class(sch, cl);
1668fbd8f137SPatrick McHardy }
1669d03b195bSMaxim Mikityanskiy }
1670d03b195bSMaxim Mikityanskiy } while (changed);
1671d03b195bSMaxim Mikityanskiy WARN_ON(nonempty);
1672d03b195bSMaxim Mikityanskiy
1673f4c1f3e0SPatrick McHardy qdisc_class_hash_destroy(&q->clhash);
1674a5a9f534SEric Dumazet __qdisc_reset_queue(&q->direct_queue);
1675d03b195bSMaxim Mikityanskiy
1676d59f4e1dSZhengchao Shao if (q->offload) {
1677d03b195bSMaxim Mikityanskiy offload_opt = (struct tc_htb_qopt_offload) {
1678d03b195bSMaxim Mikityanskiy .command = TC_HTB_DESTROY,
1679d03b195bSMaxim Mikityanskiy };
1680d03b195bSMaxim Mikityanskiy htb_offload(dev, &offload_opt);
1681d59f4e1dSZhengchao Shao }
1682d03b195bSMaxim Mikityanskiy
1683d03b195bSMaxim Mikityanskiy if (!q->direct_qdiscs)
1684d03b195bSMaxim Mikityanskiy return;
1685d03b195bSMaxim Mikityanskiy for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
1686d03b195bSMaxim Mikityanskiy qdisc_put(q->direct_qdiscs[i]);
1687d03b195bSMaxim Mikityanskiy kfree(q->direct_qdiscs);
16881da177e4SLinus Torvalds }
16891da177e4SLinus Torvalds
htb_delete(struct Qdisc * sch,unsigned long arg,struct netlink_ext_ack * extack)16904dd78a73SMaxim Mikityanskiy static int htb_delete(struct Qdisc *sch, unsigned long arg,
16914dd78a73SMaxim Mikityanskiy struct netlink_ext_ack *extack)
16921da177e4SLinus Torvalds {
16931da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
16941da177e4SLinus Torvalds struct htb_class *cl = (struct htb_class *)arg;
1695160d5e10SJarek Poplawski struct Qdisc *new_q = NULL;
1696160d5e10SJarek Poplawski int last_child = 0;
1697d03b195bSMaxim Mikityanskiy int err;
16981da177e4SLinus Torvalds
1699a071d272SYang Yingliang /* TODO: why don't allow to delete subtree ? references ? does
1700a071d272SYang Yingliang * tc subsys guarantee us that in htb_destroy it holds no class
1701a071d272SYang Yingliang * refs so that we can remove children safely there ?
1702a071d272SYang Yingliang */
17037118f56eSPedro Tammela if (cl->children || qdisc_class_in_use(&cl->common)) {
17047118f56eSPedro Tammela NL_SET_ERR_MSG(extack, "HTB class in use");
17051da177e4SLinus Torvalds return -EBUSY;
17067118f56eSPedro Tammela }
17071da177e4SLinus Torvalds
1708d03b195bSMaxim Mikityanskiy if (!cl->level && htb_parent_last_child(cl))
1709d03b195bSMaxim Mikityanskiy last_child = 1;
1710d03b195bSMaxim Mikityanskiy
1711d03b195bSMaxim Mikityanskiy if (q->offload) {
1712d03b195bSMaxim Mikityanskiy err = htb_destroy_class_offload(sch, cl, last_child, false,
1713d03b195bSMaxim Mikityanskiy extack);
1714d03b195bSMaxim Mikityanskiy if (err)
1715d03b195bSMaxim Mikityanskiy return err;
1716d03b195bSMaxim Mikityanskiy }
1717d03b195bSMaxim Mikityanskiy
1718d03b195bSMaxim Mikityanskiy if (last_child) {
1719ca49bfd9SMaxim Mikityanskiy struct netdev_queue *dev_queue = sch->dev_queue;
1720d03b195bSMaxim Mikityanskiy
1721ca49bfd9SMaxim Mikityanskiy if (q->offload)
1722ca49bfd9SMaxim Mikityanskiy dev_queue = htb_offload_get_queue(cl);
1723ca49bfd9SMaxim Mikityanskiy
1724d03b195bSMaxim Mikityanskiy new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1725a38a9882SAlexander Aring cl->parent->common.classid,
1726a38a9882SAlexander Aring NULL);
1727*6d8b2c52SDavide Caratti if (q->offload)
1728d03b195bSMaxim Mikityanskiy htb_parent_to_leaf_offload(sch, dev_queue, new_q);
1729d03b195bSMaxim Mikityanskiy }
1730160d5e10SJarek Poplawski
17311da177e4SLinus Torvalds sch_tree_lock(sch);
17321da177e4SLinus Torvalds
1733e5f0e8f8SPaolo Abeni if (!cl->level)
1734e5f0e8f8SPaolo Abeni qdisc_purge_queue(cl->leaf.q);
1735814a175eSPatrick McHardy
1736f4c1f3e0SPatrick McHardy /* delete from hash and active; remainder in destroy_class */
1737f4c1f3e0SPatrick McHardy qdisc_class_hash_remove(&q->clhash, &cl->common);
173826b284deSJarek Poplawski if (cl->parent)
173942077599SPatrick McHardy cl->parent->children--;
1740c38c83cbSPatrick McHardy
17411da177e4SLinus Torvalds if (cl->prio_activity)
17421da177e4SLinus Torvalds htb_deactivate(q, cl);
17431da177e4SLinus Torvalds
1744fbd8f137SPatrick McHardy if (cl->cmode != HTB_CAN_SEND)
1745c9364636SEric Dumazet htb_safe_rb_erase(&cl->pq_node,
1746c9364636SEric Dumazet &q->hlevel[cl->level].wait_pq);
1747fbd8f137SPatrick McHardy
1748160d5e10SJarek Poplawski if (last_child)
1749d03b195bSMaxim Mikityanskiy htb_parent_to_leaf(sch, cl, new_q);
1750160d5e10SJarek Poplawski
17511da177e4SLinus Torvalds sch_tree_unlock(sch);
17521da177e4SLinus Torvalds
17531da177e4SLinus Torvalds htb_destroy_class(sch, cl);
1754143976ceSWANG Cong return 0;
17551da177e4SLinus Torvalds }
17561da177e4SLinus Torvalds
htb_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)17571da177e4SLinus Torvalds static int htb_change_class(struct Qdisc *sch, u32 classid,
17581e90474cSPatrick McHardy u32 parentid, struct nlattr **tca,
1759793d81d6SAlexander Aring unsigned long *arg, struct netlink_ext_ack *extack)
17601da177e4SLinus Torvalds {
17611da177e4SLinus Torvalds int err = -EINVAL;
17621da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
17631da177e4SLinus Torvalds struct htb_class *cl = (struct htb_class *)*arg, *parent;
1764d03b195bSMaxim Mikityanskiy struct tc_htb_qopt_offload offload_opt;
17651e90474cSPatrick McHardy struct nlattr *opt = tca[TCA_OPTIONS];
17666906f4edSEric Dumazet struct nlattr *tb[TCA_HTB_MAX + 1];
17674ce70b4aSVlad Buslov struct Qdisc *parent_qdisc = NULL;
1768d03b195bSMaxim Mikityanskiy struct netdev_queue *dev_queue;
17691da177e4SLinus Torvalds struct tc_htb_opt *hopt;
1770df62cdf3SEric Dumazet u64 rate64, ceil64;
1771da01ec4eSLi RongQing int warn = 0;
17721da177e4SLinus Torvalds
17731da177e4SLinus Torvalds /* extract all subattrs from opt attr */
1774cee63723SPatrick McHardy if (!opt)
1775cee63723SPatrick McHardy goto failure;
1776cee63723SPatrick McHardy
17778cb08174SJohannes Berg err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1778807cfdedSPedro Tammela extack);
1779cee63723SPatrick McHardy if (err < 0)
1780cee63723SPatrick McHardy goto failure;
1781cee63723SPatrick McHardy
1782cee63723SPatrick McHardy err = -EINVAL;
178327a3421eSPatrick McHardy if (tb[TCA_HTB_PARMS] == NULL)
17841da177e4SLinus Torvalds goto failure;
17851da177e4SLinus Torvalds
17861da177e4SLinus Torvalds parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
17871da177e4SLinus Torvalds
17881e90474cSPatrick McHardy hopt = nla_data(tb[TCA_HTB_PARMS]);
1789196d97f6SEric Dumazet if (!hopt->rate.rate || !hopt->ceil.rate)
179087990467SStephen Hemminger goto failure;
17911da177e4SLinus Torvalds
1792429c3be8SMaxim Mikityanskiy if (q->offload) {
1793429c3be8SMaxim Mikityanskiy /* Options not supported by the offload. */
1794429c3be8SMaxim Mikityanskiy if (hopt->rate.overhead || hopt->ceil.overhead) {
1795429c3be8SMaxim Mikityanskiy NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter");
1796429c3be8SMaxim Mikityanskiy goto failure;
1797429c3be8SMaxim Mikityanskiy }
1798429c3be8SMaxim Mikityanskiy if (hopt->rate.mpu || hopt->ceil.mpu) {
1799429c3be8SMaxim Mikityanskiy NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter");
1800429c3be8SMaxim Mikityanskiy goto failure;
1801429c3be8SMaxim Mikityanskiy }
1802429c3be8SMaxim Mikityanskiy }
1803429c3be8SMaxim Mikityanskiy
18048a8e3d84SJesper Dangaard Brouer /* Keeping backward compatible with rate_table based iproute2 tc */
18056b1dd856SYang Yingliang if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1806e9bc3fa2SAlexander Aring qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1807e9bc3fa2SAlexander Aring NULL));
18086b1dd856SYang Yingliang
18096b1dd856SYang Yingliang if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1810e9bc3fa2SAlexander Aring qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1811e9bc3fa2SAlexander Aring NULL));
18128a8e3d84SJesper Dangaard Brouer
1813d03b195bSMaxim Mikityanskiy rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1814d03b195bSMaxim Mikityanskiy ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1815d03b195bSMaxim Mikityanskiy
18161da177e4SLinus Torvalds if (!cl) { /* new class */
1817d03b195bSMaxim Mikityanskiy struct net_device *dev = qdisc_dev(sch);
1818d03b195bSMaxim Mikityanskiy struct Qdisc *new_q, *old_q;
18193696f625SStephen Hemminger int prio;
1820ee39e10cSPatrick McHardy struct {
18211e90474cSPatrick McHardy struct nlattr nla;
1822ee39e10cSPatrick McHardy struct gnet_estimator opt;
1823ee39e10cSPatrick McHardy } est = {
18241e90474cSPatrick McHardy .nla = {
18251e90474cSPatrick McHardy .nla_len = nla_attr_size(sizeof(est.opt)),
18261e90474cSPatrick McHardy .nla_type = TCA_RATE,
1827ee39e10cSPatrick McHardy },
1828ee39e10cSPatrick McHardy .opt = {
1829ee39e10cSPatrick McHardy /* 4s interval, 16s averaging constant */
1830ee39e10cSPatrick McHardy .interval = 2,
1831ee39e10cSPatrick McHardy .ewma_log = 2,
1832ee39e10cSPatrick McHardy },
1833ee39e10cSPatrick McHardy };
18343696f625SStephen Hemminger
18351da177e4SLinus Torvalds /* check for valid classid */
1836f64f9e71SJoe Perches if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1837f64f9e71SJoe Perches htb_find(classid, sch))
18381da177e4SLinus Torvalds goto failure;
18391da177e4SLinus Torvalds
18401da177e4SLinus Torvalds /* check maximal depth */
18411da177e4SLinus Torvalds if (parent && parent->parent && parent->parent->level < 2) {
1842807cfdedSPedro Tammela NL_SET_ERR_MSG_MOD(extack, "tree is too deep");
18431da177e4SLinus Torvalds goto failure;
18441da177e4SLinus Torvalds }
18451da177e4SLinus Torvalds err = -ENOBUFS;
1846cc7ec456SEric Dumazet cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1847cc7ec456SEric Dumazet if (!cl)
18481da177e4SLinus Torvalds goto failure;
18491da177e4SLinus Torvalds
185050dc9a85SAhmed S. Darwish gnet_stats_basic_sync_init(&cl->bstats);
185150dc9a85SAhmed S. Darwish gnet_stats_basic_sync_init(&cl->bstats_bias);
185267c9e627SAhmed S. Darwish
18538d1a77f9SAlexander Aring err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
18546529eabaSJiri Pirko if (err) {
18556529eabaSJiri Pirko kfree(cl);
18566529eabaSJiri Pirko goto failure;
18576529eabaSJiri Pirko }
185864153ce0SEric Dumazet if (htb_rate_est || tca[TCA_RATE]) {
185922e0f8b9SJohn Fastabend err = gen_new_estimator(&cl->bstats, NULL,
186022e0f8b9SJohn Fastabend &cl->rate_est,
1861edb09eb1SEric Dumazet NULL,
186229cbcd85SAhmed S. Darwish true,
18631e90474cSPatrick McHardy tca[TCA_RATE] ? : &est.nla);
1864d03b195bSMaxim Mikityanskiy if (err)
1865d03b195bSMaxim Mikityanskiy goto err_block_put;
186664153ce0SEric Dumazet }
186771bcb09aSStephen Hemminger
186842077599SPatrick McHardy cl->children = 0;
18693696f625SStephen Hemminger RB_CLEAR_NODE(&cl->pq_node);
18703696f625SStephen Hemminger
18713696f625SStephen Hemminger for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
18723696f625SStephen Hemminger RB_CLEAR_NODE(&cl->node[prio]);
18731da177e4SLinus Torvalds
1874d03b195bSMaxim Mikityanskiy cl->common.classid = classid;
1875d03b195bSMaxim Mikityanskiy
1876d03b195bSMaxim Mikityanskiy /* Make sure nothing interrupts us in between of two
1877d03b195bSMaxim Mikityanskiy * ndo_setup_tc calls.
1878d03b195bSMaxim Mikityanskiy */
1879d03b195bSMaxim Mikityanskiy ASSERT_RTNL();
1880d03b195bSMaxim Mikityanskiy
18811da177e4SLinus Torvalds /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1882cc7ec456SEric Dumazet * so that can't be used inside of sch_tree_lock
1883cc7ec456SEric Dumazet * -- thanks to Karlis Peisenieks
1884cc7ec456SEric Dumazet */
1885d03b195bSMaxim Mikityanskiy if (!q->offload) {
1886d03b195bSMaxim Mikityanskiy dev_queue = sch->dev_queue;
1887d03b195bSMaxim Mikityanskiy } else if (!(parent && !parent->level)) {
1888d03b195bSMaxim Mikityanskiy /* Assign a dev_queue to this classid. */
1889d03b195bSMaxim Mikityanskiy offload_opt = (struct tc_htb_qopt_offload) {
1890d03b195bSMaxim Mikityanskiy .command = TC_HTB_LEAF_ALLOC_QUEUE,
1891d03b195bSMaxim Mikityanskiy .classid = cl->common.classid,
1892d03b195bSMaxim Mikityanskiy .parent_classid = parent ?
1893d03b195bSMaxim Mikityanskiy TC_H_MIN(parent->common.classid) :
1894d03b195bSMaxim Mikityanskiy TC_HTB_CLASSID_ROOT,
1895d03b195bSMaxim Mikityanskiy .rate = max_t(u64, hopt->rate.rate, rate64),
1896d03b195bSMaxim Mikityanskiy .ceil = max_t(u64, hopt->ceil.rate, ceil64),
189712e7789aSNaveen Mamindlapalli .prio = hopt->prio,
18989fe63d5fSNaveen Mamindlapalli .quantum = hopt->quantum,
1899d03b195bSMaxim Mikityanskiy .extack = extack,
1900d03b195bSMaxim Mikityanskiy };
1901d03b195bSMaxim Mikityanskiy err = htb_offload(dev, &offload_opt);
1902d03b195bSMaxim Mikityanskiy if (err) {
1903807cfdedSPedro Tammela NL_SET_ERR_MSG_WEAK(extack,
1904807cfdedSPedro Tammela "Failed to offload TC_HTB_LEAF_ALLOC_QUEUE");
1905d03b195bSMaxim Mikityanskiy goto err_kill_estimator;
1906d03b195bSMaxim Mikityanskiy }
1907d03b195bSMaxim Mikityanskiy dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
1908d03b195bSMaxim Mikityanskiy } else { /* First child. */
1909ca49bfd9SMaxim Mikityanskiy dev_queue = htb_offload_get_queue(parent);
1910d03b195bSMaxim Mikityanskiy old_q = htb_graft_helper(dev_queue, NULL);
1911d03b195bSMaxim Mikityanskiy WARN_ON(old_q != parent->leaf.q);
1912d03b195bSMaxim Mikityanskiy offload_opt = (struct tc_htb_qopt_offload) {
1913d03b195bSMaxim Mikityanskiy .command = TC_HTB_LEAF_TO_INNER,
1914d03b195bSMaxim Mikityanskiy .classid = cl->common.classid,
1915d03b195bSMaxim Mikityanskiy .parent_classid =
1916d03b195bSMaxim Mikityanskiy TC_H_MIN(parent->common.classid),
1917d03b195bSMaxim Mikityanskiy .rate = max_t(u64, hopt->rate.rate, rate64),
1918d03b195bSMaxim Mikityanskiy .ceil = max_t(u64, hopt->ceil.rate, ceil64),
191912e7789aSNaveen Mamindlapalli .prio = hopt->prio,
19209fe63d5fSNaveen Mamindlapalli .quantum = hopt->quantum,
1921d03b195bSMaxim Mikityanskiy .extack = extack,
1922d03b195bSMaxim Mikityanskiy };
1923d03b195bSMaxim Mikityanskiy err = htb_offload(dev, &offload_opt);
1924d03b195bSMaxim Mikityanskiy if (err) {
1925807cfdedSPedro Tammela NL_SET_ERR_MSG_WEAK(extack,
1926807cfdedSPedro Tammela "Failed to offload TC_HTB_LEAF_TO_INNER");
1927d03b195bSMaxim Mikityanskiy htb_graft_helper(dev_queue, old_q);
1928d03b195bSMaxim Mikityanskiy goto err_kill_estimator;
1929d03b195bSMaxim Mikityanskiy }
1930f56940daSAhmed S. Darwish _bstats_update(&parent->bstats_bias,
193150dc9a85SAhmed S. Darwish u64_stats_read(&old_q->bstats.bytes),
193250dc9a85SAhmed S. Darwish u64_stats_read(&old_q->bstats.packets));
1933d03b195bSMaxim Mikityanskiy qdisc_put(old_q);
1934d03b195bSMaxim Mikityanskiy }
1935d03b195bSMaxim Mikityanskiy new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1936a38a9882SAlexander Aring classid, NULL);
1937d03b195bSMaxim Mikityanskiy if (q->offload) {
1938*6d8b2c52SDavide Caratti /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1939*6d8b2c52SDavide Caratti if (new_q)
1940d03b195bSMaxim Mikityanskiy qdisc_refcount_inc(new_q);
1941d03b195bSMaxim Mikityanskiy old_q = htb_graft_helper(dev_queue, new_q);
1942d03b195bSMaxim Mikityanskiy /* No qdisc_put needed. */
1943d03b195bSMaxim Mikityanskiy WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1944d03b195bSMaxim Mikityanskiy }
19451da177e4SLinus Torvalds sch_tree_lock(sch);
19461da177e4SLinus Torvalds if (parent && !parent->level) {
19471da177e4SLinus Torvalds /* turn parent into inner node */
1948e5f0e8f8SPaolo Abeni qdisc_purge_queue(parent->leaf.q);
19494ce70b4aSVlad Buslov parent_qdisc = parent->leaf.q;
19501da177e4SLinus Torvalds if (parent->prio_activity)
19511da177e4SLinus Torvalds htb_deactivate(q, parent);
19521da177e4SLinus Torvalds
19531da177e4SLinus Torvalds /* remove from evt list because of level change */
19541da177e4SLinus Torvalds if (parent->cmode != HTB_CAN_SEND) {
1955c9364636SEric Dumazet htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
19561da177e4SLinus Torvalds parent->cmode = HTB_CAN_SEND;
19571da177e4SLinus Torvalds }
19581da177e4SLinus Torvalds parent->level = (parent->parent ? parent->parent->level
19591da177e4SLinus Torvalds : TC_HTB_MAXDEPTH) - 1;
196011957be2SCong Wang memset(&parent->inner, 0, sizeof(parent->inner));
19611da177e4SLinus Torvalds }
1962d03b195bSMaxim Mikityanskiy
19631da177e4SLinus Torvalds /* leaf (we) needs elementary qdisc */
196411957be2SCong Wang cl->leaf.q = new_q ? new_q : &noop_qdisc;
1965ca49bfd9SMaxim Mikityanskiy if (q->offload)
1966ca49bfd9SMaxim Mikityanskiy cl->leaf.offload_queue = dev_queue;
19671da177e4SLinus Torvalds
196887990467SStephen Hemminger cl->parent = parent;
19691da177e4SLinus Torvalds
19701da177e4SLinus Torvalds /* set class to be in HTB_CAN_SEND state */
1971b9a7afdeSJiri Pirko cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1972b9a7afdeSJiri Pirko cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
19735343a7f8SEric Dumazet cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1974d2de875cSEric Dumazet cl->t_c = ktime_get_ns();
19751da177e4SLinus Torvalds cl->cmode = HTB_CAN_SEND;
19761da177e4SLinus Torvalds
19771da177e4SLinus Torvalds /* attach to the hash list and parent's family */
1978f4c1f3e0SPatrick McHardy qdisc_class_hash_insert(&q->clhash, &cl->common);
197942077599SPatrick McHardy if (parent)
198042077599SPatrick McHardy parent->children++;
198111957be2SCong Wang if (cl->leaf.q != &noop_qdisc)
198211957be2SCong Wang qdisc_hash_add(cl->leaf.q, true);
1983ee39e10cSPatrick McHardy } else {
198471bcb09aSStephen Hemminger if (tca[TCA_RATE]) {
198522e0f8b9SJohn Fastabend err = gen_replace_estimator(&cl->bstats, NULL,
198622e0f8b9SJohn Fastabend &cl->rate_est,
1987edb09eb1SEric Dumazet NULL,
198829cbcd85SAhmed S. Darwish true,
19891e90474cSPatrick McHardy tca[TCA_RATE]);
199071bcb09aSStephen Hemminger if (err)
199171bcb09aSStephen Hemminger return err;
199271bcb09aSStephen Hemminger }
1993d03b195bSMaxim Mikityanskiy
1994d03b195bSMaxim Mikityanskiy if (q->offload) {
1995d03b195bSMaxim Mikityanskiy struct net_device *dev = qdisc_dev(sch);
1996d03b195bSMaxim Mikityanskiy
1997d03b195bSMaxim Mikityanskiy offload_opt = (struct tc_htb_qopt_offload) {
1998d03b195bSMaxim Mikityanskiy .command = TC_HTB_NODE_MODIFY,
1999d03b195bSMaxim Mikityanskiy .classid = cl->common.classid,
2000d03b195bSMaxim Mikityanskiy .rate = max_t(u64, hopt->rate.rate, rate64),
2001d03b195bSMaxim Mikityanskiy .ceil = max_t(u64, hopt->ceil.rate, ceil64),
200212e7789aSNaveen Mamindlapalli .prio = hopt->prio,
20039fe63d5fSNaveen Mamindlapalli .quantum = hopt->quantum,
2004d03b195bSMaxim Mikityanskiy .extack = extack,
2005d03b195bSMaxim Mikityanskiy };
2006d03b195bSMaxim Mikityanskiy err = htb_offload(dev, &offload_opt);
2007d03b195bSMaxim Mikityanskiy if (err)
2008d03b195bSMaxim Mikityanskiy /* Estimator was replaced, and rollback may fail
2009d03b195bSMaxim Mikityanskiy * as well, so we don't try to recover it, and
2010d03b195bSMaxim Mikityanskiy * the estimator won't work property with the
2011d03b195bSMaxim Mikityanskiy * offload anyway, because bstats are updated
2012d03b195bSMaxim Mikityanskiy * only when the stats are queried.
2013d03b195bSMaxim Mikityanskiy */
2014d03b195bSMaxim Mikityanskiy return err;
2015ee39e10cSPatrick McHardy }
20161da177e4SLinus Torvalds
2017d03b195bSMaxim Mikityanskiy sch_tree_lock(sch);
2018d03b195bSMaxim Mikityanskiy }
20191598f7cbSYang Yingliang
20201598f7cbSYang Yingliang psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
20211598f7cbSYang Yingliang psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
20221598f7cbSYang Yingliang
20231da177e4SLinus Torvalds /* it used to be a nasty bug here, we have to check that node
202411957be2SCong Wang * is really leaf before changing cl->leaf !
2025cc7ec456SEric Dumazet */
20261da177e4SLinus Torvalds if (!cl->level) {
20271598f7cbSYang Yingliang u64 quantum = cl->rate.rate_bytes_ps;
20281598f7cbSYang Yingliang
20291598f7cbSYang Yingliang do_div(quantum, q->rate2quantum);
20301598f7cbSYang Yingliang cl->quantum = min_t(u64, quantum, INT_MAX);
20311598f7cbSYang Yingliang
2032c19f7a34SJarek Poplawski if (!hopt->quantum && cl->quantum < 1000) {
2033da01ec4eSLi RongQing warn = -1;
2034c19f7a34SJarek Poplawski cl->quantum = 1000;
20351da177e4SLinus Torvalds }
2036c19f7a34SJarek Poplawski if (!hopt->quantum && cl->quantum > 200000) {
2037da01ec4eSLi RongQing warn = 1;
2038c19f7a34SJarek Poplawski cl->quantum = 200000;
20391da177e4SLinus Torvalds }
20401da177e4SLinus Torvalds if (hopt->quantum)
2041c19f7a34SJarek Poplawski cl->quantum = hopt->quantum;
2042c19f7a34SJarek Poplawski if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
2043c19f7a34SJarek Poplawski cl->prio = TC_HTB_NUMPRIO - 1;
20441da177e4SLinus Torvalds }
20451da177e4SLinus Torvalds
2046324f5aa5SJiri Pirko cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
2047f3ad857eSVimalkumar cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
204856b765b7SVimalkumar
20491da177e4SLinus Torvalds sch_tree_unlock(sch);
20504ce70b4aSVlad Buslov qdisc_put(parent_qdisc);
20511da177e4SLinus Torvalds
2052da01ec4eSLi RongQing if (warn)
2053807cfdedSPedro Tammela NL_SET_ERR_MSG_FMT_MOD(extack,
2054807cfdedSPedro Tammela "quantum of class %X is %s. Consider r2q change.",
2055da01ec4eSLi RongQing cl->common.classid, (warn == -1 ? "small" : "big"));
2056da01ec4eSLi RongQing
2057f4c1f3e0SPatrick McHardy qdisc_class_hash_grow(sch, &q->clhash);
2058f4c1f3e0SPatrick McHardy
20591da177e4SLinus Torvalds *arg = (unsigned long)cl;
20601da177e4SLinus Torvalds return 0;
20611da177e4SLinus Torvalds
2062d03b195bSMaxim Mikityanskiy err_kill_estimator:
2063d03b195bSMaxim Mikityanskiy gen_kill_estimator(&cl->rate_est);
2064d03b195bSMaxim Mikityanskiy err_block_put:
2065d03b195bSMaxim Mikityanskiy tcf_block_put(cl->block);
2066d03b195bSMaxim Mikityanskiy kfree(cl);
20671da177e4SLinus Torvalds failure:
20681da177e4SLinus Torvalds return err;
20691da177e4SLinus Torvalds }
20701da177e4SLinus Torvalds
htb_tcf_block(struct Qdisc * sch,unsigned long arg,struct netlink_ext_ack * extack)2071cbaacc4eSAlexander Aring static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
2072cbaacc4eSAlexander Aring struct netlink_ext_ack *extack)
20731da177e4SLinus Torvalds {
20741da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
20751da177e4SLinus Torvalds struct htb_class *cl = (struct htb_class *)arg;
20763bf72957SStephen Hemminger
20776529eabaSJiri Pirko return cl ? cl->block : q->block;
20781da177e4SLinus Torvalds }
20791da177e4SLinus Torvalds
htb_bind_filter(struct Qdisc * sch,unsigned long parent,u32 classid)20801da177e4SLinus Torvalds static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
20811da177e4SLinus Torvalds u32 classid)
20821da177e4SLinus Torvalds {
20831da177e4SLinus Torvalds struct htb_class *cl = htb_find(classid, sch);
20843bf72957SStephen Hemminger
20851da177e4SLinus Torvalds /*if (cl && !cl->level) return 0;
2086cc7ec456SEric Dumazet * The line above used to be there to prevent attaching filters to
2087cc7ec456SEric Dumazet * leaves. But at least tc_index filter uses this just to get class
2088cc7ec456SEric Dumazet * for other reasons so that we have to allow for it.
2089cc7ec456SEric Dumazet * ----
2090cc7ec456SEric Dumazet * 19.6.2002 As Werner explained it is ok - bind filter is just
2091cc7ec456SEric Dumazet * another way to "lock" the class - unlike "get" this lock can
2092cc7ec456SEric Dumazet * be broken by class during destroy IIUC.
20931da177e4SLinus Torvalds */
20941da177e4SLinus Torvalds if (cl)
20958798481bSPedro Tammela qdisc_class_get(&cl->common);
20961da177e4SLinus Torvalds return (unsigned long)cl;
20971da177e4SLinus Torvalds }
20981da177e4SLinus Torvalds
htb_unbind_filter(struct Qdisc * sch,unsigned long arg)20991da177e4SLinus Torvalds static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
21001da177e4SLinus Torvalds {
21011da177e4SLinus Torvalds struct htb_class *cl = (struct htb_class *)arg;
21023bf72957SStephen Hemminger
21038798481bSPedro Tammela qdisc_class_put(&cl->common);
21041da177e4SLinus Torvalds }
21051da177e4SLinus Torvalds
htb_walk(struct Qdisc * sch,struct qdisc_walker * arg)21061da177e4SLinus Torvalds static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
21071da177e4SLinus Torvalds {
21081da177e4SLinus Torvalds struct htb_sched *q = qdisc_priv(sch);
2109f4c1f3e0SPatrick McHardy struct htb_class *cl;
2110f4c1f3e0SPatrick McHardy unsigned int i;
21111da177e4SLinus Torvalds
21121da177e4SLinus Torvalds if (arg->stop)
21131da177e4SLinus Torvalds return;
21141da177e4SLinus Torvalds
2115f4c1f3e0SPatrick McHardy for (i = 0; i < q->clhash.hashsize; i++) {
2116b67bfe0dSSasha Levin hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
2117e046fa89SZhengchao Shao if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
21181da177e4SLinus Torvalds return;
21191da177e4SLinus Torvalds }
21201da177e4SLinus Torvalds }
21211da177e4SLinus Torvalds }
21221da177e4SLinus Torvalds
212320fea08bSEric Dumazet static const struct Qdisc_class_ops htb_class_ops = {
2124d03b195bSMaxim Mikityanskiy .select_queue = htb_select_queue,
21251da177e4SLinus Torvalds .graft = htb_graft,
21261da177e4SLinus Torvalds .leaf = htb_leaf,
2127256d61b8SPatrick McHardy .qlen_notify = htb_qlen_notify,
2128143976ceSWANG Cong .find = htb_search,
21291da177e4SLinus Torvalds .change = htb_change_class,
21301da177e4SLinus Torvalds .delete = htb_delete,
21311da177e4SLinus Torvalds .walk = htb_walk,
21326529eabaSJiri Pirko .tcf_block = htb_tcf_block,
21331da177e4SLinus Torvalds .bind_tcf = htb_bind_filter,
21341da177e4SLinus Torvalds .unbind_tcf = htb_unbind_filter,
21351da177e4SLinus Torvalds .dump = htb_dump_class,
21361da177e4SLinus Torvalds .dump_stats = htb_dump_class_stats,
21371da177e4SLinus Torvalds };
21381da177e4SLinus Torvalds
213920fea08bSEric Dumazet static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
21401da177e4SLinus Torvalds .cl_ops = &htb_class_ops,
21411da177e4SLinus Torvalds .id = "htb",
21421da177e4SLinus Torvalds .priv_size = sizeof(struct htb_sched),
21431da177e4SLinus Torvalds .enqueue = htb_enqueue,
21441da177e4SLinus Torvalds .dequeue = htb_dequeue,
214577be155cSJarek Poplawski .peek = qdisc_peek_dequeued,
21461da177e4SLinus Torvalds .init = htb_init,
2147d03b195bSMaxim Mikityanskiy .attach = htb_attach,
21481da177e4SLinus Torvalds .reset = htb_reset,
21491da177e4SLinus Torvalds .destroy = htb_destroy,
21501da177e4SLinus Torvalds .dump = htb_dump,
21511da177e4SLinus Torvalds .owner = THIS_MODULE,
21521da177e4SLinus Torvalds };
21531da177e4SLinus Torvalds
htb_module_init(void)21541da177e4SLinus Torvalds static int __init htb_module_init(void)
21551da177e4SLinus Torvalds {
21561da177e4SLinus Torvalds return register_qdisc(&htb_qdisc_ops);
21571da177e4SLinus Torvalds }
htb_module_exit(void)21581da177e4SLinus Torvalds static void __exit htb_module_exit(void)
21591da177e4SLinus Torvalds {
21601da177e4SLinus Torvalds unregister_qdisc(&htb_qdisc_ops);
21611da177e4SLinus Torvalds }
216287990467SStephen Hemminger
21631da177e4SLinus Torvalds module_init(htb_module_init)
21641da177e4SLinus Torvalds module_exit(htb_module_exit)
21651da177e4SLinus Torvalds MODULE_LICENSE("GPL");
2166