xref: /openbmc/linux/net/sched/sch_htb.c (revision 53809828)
1 /*
2  * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *			HTB support at LARTC mailing list
14  *		Ondrej Kraus, <krauso@barr.cz>
15  *			found missing INIT_QDISC(htb)
16  *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *			helped a lot to locate nasty class stall bug
18  *		Andi Kleen, Jamal Hadi, Bert Hubert
19  *			code review and helpful comments on shaping
20  *		Tomasz Wrona, <tw@eter.tym.pl>
21  *			created test case so that I was able to fix nasty bug
22  *		Wilfried Weissmann
23  *			spotted bug in dequeue code and helped with fix
24  *		Jiri Fojtasek
25  *			fixed requeue routine
26  *		and many others. thanks.
27  */
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/errno.h>
34 #include <linux/skbuff.h>
35 #include <linux/list.h>
36 #include <linux/compiler.h>
37 #include <linux/rbtree.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <net/netlink.h>
41 #include <net/sch_generic.h>
42 #include <net/pkt_sched.h>
43 #include <net/pkt_cls.h>
44 
45 /* HTB algorithm.
46     Author: devik@cdi.cz
47     ========================================================================
48     HTB is like TBF with multiple classes. It is also similar to CBQ because
49     it allows to assign priority to each class in hierarchy.
50     In fact it is another implementation of Floyd's formal sharing.
51 
52     Levels:
53     Each class is assigned level. Leaf has ALWAYS level 0 and root
54     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
55     one less than their parent.
56 */
57 
58 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
59 #define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
60 
61 #if HTB_VER >> 16 != TC_HTB_PROTOVER
62 #error "Mismatched sch_htb.c and pkt_sch.h"
63 #endif
64 
65 /* Module parameter and sysfs export */
66 module_param    (htb_hysteresis, int, 0640);
67 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
68 
69 static int htb_rate_est = 0; /* htb classes have a default rate estimator */
70 module_param(htb_rate_est, int, 0640);
71 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
72 
73 /* used internaly to keep status of single class */
74 enum htb_cmode {
75 	HTB_CANT_SEND,		/* class can't send and can't borrow */
76 	HTB_MAY_BORROW,		/* class can't send but may borrow */
77 	HTB_CAN_SEND		/* class can send */
78 };
79 
80 struct htb_prio {
81 	union {
82 		struct rb_root	row;
83 		struct rb_root	feed;
84 	};
85 	struct rb_node	*ptr;
86 	/* When class changes from state 1->2 and disconnects from
87 	 * parent's feed then we lost ptr value and start from the
88 	 * first child again. Here we store classid of the
89 	 * last valid ptr (used when ptr is NULL).
90 	 */
91 	u32		last_ptr_id;
92 };
93 
94 /* interior & leaf nodes; props specific to leaves are marked L:
95  * To reduce false sharing, place mostly read fields at beginning,
96  * and mostly written ones at the end.
97  */
98 struct htb_class {
99 	struct Qdisc_class_common common;
100 	struct psched_ratecfg	rate;
101 	struct psched_ratecfg	ceil;
102 	s64			buffer, cbuffer;/* token bucket depth/rate */
103 	s64			mbuffer;	/* max wait time */
104 	u32			prio;		/* these two are used only by leaves... */
105 	int			quantum;	/* but stored for parent-to-leaf return */
106 
107 	struct tcf_proto __rcu	*filter_list;	/* class attached filters */
108 	struct tcf_block	*block;
109 	int			filter_cnt;
110 
111 	int			level;		/* our level (see above) */
112 	unsigned int		children;
113 	struct htb_class	*parent;	/* parent class */
114 
115 	struct net_rate_estimator __rcu *rate_est;
116 
117 	/*
118 	 * Written often fields
119 	 */
120 	struct gnet_stats_basic_packed bstats;
121 	struct tc_htb_xstats	xstats;	/* our special stats */
122 
123 	/* token bucket parameters */
124 	s64			tokens, ctokens;/* current number of tokens */
125 	s64			t_c;		/* checkpoint time */
126 
127 	union {
128 		struct htb_class_leaf {
129 			int		deficit[TC_HTB_MAXDEPTH];
130 			struct Qdisc	*q;
131 		} leaf;
132 		struct htb_class_inner {
133 			struct htb_prio clprio[TC_HTB_NUMPRIO];
134 		} inner;
135 	};
136 	s64			pq_key;
137 
138 	int			prio_activity;	/* for which prios are we active */
139 	enum htb_cmode		cmode;		/* current mode of the class */
140 	struct rb_node		pq_node;	/* node for event queue */
141 	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
142 
143 	unsigned int drops ____cacheline_aligned_in_smp;
144 	unsigned int		overlimits;
145 };
146 
147 struct htb_level {
148 	struct rb_root	wait_pq;
149 	struct htb_prio hprio[TC_HTB_NUMPRIO];
150 };
151 
152 struct htb_sched {
153 	struct Qdisc_class_hash clhash;
154 	int			defcls;		/* class where unclassified flows go to */
155 	int			rate2quantum;	/* quant = rate / rate2quantum */
156 
157 	/* filters for qdisc itself */
158 	struct tcf_proto __rcu	*filter_list;
159 	struct tcf_block	*block;
160 
161 #define HTB_WARN_TOOMANYEVENTS	0x1
162 	unsigned int		warned;	/* only one warning */
163 	int			direct_qlen;
164 	struct work_struct	work;
165 
166 	/* non shaped skbs; let them go directly thru */
167 	struct qdisc_skb_head	direct_queue;
168 	long			direct_pkts;
169 
170 	struct qdisc_watchdog	watchdog;
171 
172 	s64			now;	/* cached dequeue time */
173 
174 	/* time of nearest event per level (row) */
175 	s64			near_ev_cache[TC_HTB_MAXDEPTH];
176 
177 	int			row_mask[TC_HTB_MAXDEPTH];
178 
179 	struct htb_level	hlevel[TC_HTB_MAXDEPTH];
180 };
181 
182 /* find class in global hash table using given handle */
183 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
184 {
185 	struct htb_sched *q = qdisc_priv(sch);
186 	struct Qdisc_class_common *clc;
187 
188 	clc = qdisc_class_find(&q->clhash, handle);
189 	if (clc == NULL)
190 		return NULL;
191 	return container_of(clc, struct htb_class, common);
192 }
193 
194 static unsigned long htb_search(struct Qdisc *sch, u32 handle)
195 {
196 	return (unsigned long)htb_find(handle, sch);
197 }
198 /**
199  * htb_classify - classify a packet into class
200  *
201  * It returns NULL if the packet should be dropped or -1 if the packet
202  * should be passed directly thru. In all other cases leaf class is returned.
203  * We allow direct class selection by classid in priority. The we examine
204  * filters in qdisc and in inner nodes (if higher filter points to the inner
205  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
206  * internal fifo (direct). These packets then go directly thru. If we still
207  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
208  * then finish and return direct queue.
209  */
210 #define HTB_DIRECT ((struct htb_class *)-1L)
211 
212 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
213 				      int *qerr)
214 {
215 	struct htb_sched *q = qdisc_priv(sch);
216 	struct htb_class *cl;
217 	struct tcf_result res;
218 	struct tcf_proto *tcf;
219 	int result;
220 
221 	/* allow to select class by setting skb->priority to valid classid;
222 	 * note that nfmark can be used too by attaching filter fw with no
223 	 * rules in it
224 	 */
225 	if (skb->priority == sch->handle)
226 		return HTB_DIRECT;	/* X:0 (direct flow) selected */
227 	cl = htb_find(skb->priority, sch);
228 	if (cl) {
229 		if (cl->level == 0)
230 			return cl;
231 		/* Start with inner filter chain if a non-leaf class is selected */
232 		tcf = rcu_dereference_bh(cl->filter_list);
233 	} else {
234 		tcf = rcu_dereference_bh(q->filter_list);
235 	}
236 
237 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
238 	while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
239 #ifdef CONFIG_NET_CLS_ACT
240 		switch (result) {
241 		case TC_ACT_QUEUED:
242 		case TC_ACT_STOLEN:
243 		case TC_ACT_TRAP:
244 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
245 			/* fall through */
246 		case TC_ACT_SHOT:
247 			return NULL;
248 		}
249 #endif
250 		cl = (void *)res.class;
251 		if (!cl) {
252 			if (res.classid == sch->handle)
253 				return HTB_DIRECT;	/* X:0 (direct flow) */
254 			cl = htb_find(res.classid, sch);
255 			if (!cl)
256 				break;	/* filter selected invalid classid */
257 		}
258 		if (!cl->level)
259 			return cl;	/* we hit leaf; return it */
260 
261 		/* we have got inner class; apply inner filter chain */
262 		tcf = rcu_dereference_bh(cl->filter_list);
263 	}
264 	/* classification failed; try to use default class */
265 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
266 	if (!cl || cl->level)
267 		return HTB_DIRECT;	/* bad default .. this is safe bet */
268 	return cl;
269 }
270 
271 /**
272  * htb_add_to_id_tree - adds class to the round robin list
273  *
274  * Routine adds class to the list (actually tree) sorted by classid.
275  * Make sure that class is not already on such list for given prio.
276  */
277 static void htb_add_to_id_tree(struct rb_root *root,
278 			       struct htb_class *cl, int prio)
279 {
280 	struct rb_node **p = &root->rb_node, *parent = NULL;
281 
282 	while (*p) {
283 		struct htb_class *c;
284 		parent = *p;
285 		c = rb_entry(parent, struct htb_class, node[prio]);
286 
287 		if (cl->common.classid > c->common.classid)
288 			p = &parent->rb_right;
289 		else
290 			p = &parent->rb_left;
291 	}
292 	rb_link_node(&cl->node[prio], parent, p);
293 	rb_insert_color(&cl->node[prio], root);
294 }
295 
296 /**
297  * htb_add_to_wait_tree - adds class to the event queue with delay
298  *
299  * The class is added to priority event queue to indicate that class will
300  * change its mode in cl->pq_key microseconds. Make sure that class is not
301  * already in the queue.
302  */
303 static void htb_add_to_wait_tree(struct htb_sched *q,
304 				 struct htb_class *cl, s64 delay)
305 {
306 	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
307 
308 	cl->pq_key = q->now + delay;
309 	if (cl->pq_key == q->now)
310 		cl->pq_key++;
311 
312 	/* update the nearest event cache */
313 	if (q->near_ev_cache[cl->level] > cl->pq_key)
314 		q->near_ev_cache[cl->level] = cl->pq_key;
315 
316 	while (*p) {
317 		struct htb_class *c;
318 		parent = *p;
319 		c = rb_entry(parent, struct htb_class, pq_node);
320 		if (cl->pq_key >= c->pq_key)
321 			p = &parent->rb_right;
322 		else
323 			p = &parent->rb_left;
324 	}
325 	rb_link_node(&cl->pq_node, parent, p);
326 	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
327 }
328 
329 /**
330  * htb_next_rb_node - finds next node in binary tree
331  *
332  * When we are past last key we return NULL.
333  * Average complexity is 2 steps per call.
334  */
335 static inline void htb_next_rb_node(struct rb_node **n)
336 {
337 	*n = rb_next(*n);
338 }
339 
340 /**
341  * htb_add_class_to_row - add class to its row
342  *
343  * The class is added to row at priorities marked in mask.
344  * It does nothing if mask == 0.
345  */
346 static inline void htb_add_class_to_row(struct htb_sched *q,
347 					struct htb_class *cl, int mask)
348 {
349 	q->row_mask[cl->level] |= mask;
350 	while (mask) {
351 		int prio = ffz(~mask);
352 		mask &= ~(1 << prio);
353 		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
354 	}
355 }
356 
357 /* If this triggers, it is a bug in this code, but it need not be fatal */
358 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
359 {
360 	if (RB_EMPTY_NODE(rb)) {
361 		WARN_ON(1);
362 	} else {
363 		rb_erase(rb, root);
364 		RB_CLEAR_NODE(rb);
365 	}
366 }
367 
368 
369 /**
370  * htb_remove_class_from_row - removes class from its row
371  *
372  * The class is removed from row at priorities marked in mask.
373  * It does nothing if mask == 0.
374  */
375 static inline void htb_remove_class_from_row(struct htb_sched *q,
376 						 struct htb_class *cl, int mask)
377 {
378 	int m = 0;
379 	struct htb_level *hlevel = &q->hlevel[cl->level];
380 
381 	while (mask) {
382 		int prio = ffz(~mask);
383 		struct htb_prio *hprio = &hlevel->hprio[prio];
384 
385 		mask &= ~(1 << prio);
386 		if (hprio->ptr == cl->node + prio)
387 			htb_next_rb_node(&hprio->ptr);
388 
389 		htb_safe_rb_erase(cl->node + prio, &hprio->row);
390 		if (!hprio->row.rb_node)
391 			m |= 1 << prio;
392 	}
393 	q->row_mask[cl->level] &= ~m;
394 }
395 
396 /**
397  * htb_activate_prios - creates active classe's feed chain
398  *
399  * The class is connected to ancestors and/or appropriate rows
400  * for priorities it is participating on. cl->cmode must be new
401  * (activated) mode. It does nothing if cl->prio_activity == 0.
402  */
403 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
404 {
405 	struct htb_class *p = cl->parent;
406 	long m, mask = cl->prio_activity;
407 
408 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
409 		m = mask;
410 		while (m) {
411 			int prio = ffz(~m);
412 			m &= ~(1 << prio);
413 
414 			if (p->inner.clprio[prio].feed.rb_node)
415 				/* parent already has its feed in use so that
416 				 * reset bit in mask as parent is already ok
417 				 */
418 				mask &= ~(1 << prio);
419 
420 			htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
421 		}
422 		p->prio_activity |= mask;
423 		cl = p;
424 		p = cl->parent;
425 
426 	}
427 	if (cl->cmode == HTB_CAN_SEND && mask)
428 		htb_add_class_to_row(q, cl, mask);
429 }
430 
431 /**
432  * htb_deactivate_prios - remove class from feed chain
433  *
434  * cl->cmode must represent old mode (before deactivation). It does
435  * nothing if cl->prio_activity == 0. Class is removed from all feed
436  * chains and rows.
437  */
438 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
439 {
440 	struct htb_class *p = cl->parent;
441 	long m, mask = cl->prio_activity;
442 
443 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
444 		m = mask;
445 		mask = 0;
446 		while (m) {
447 			int prio = ffz(~m);
448 			m &= ~(1 << prio);
449 
450 			if (p->inner.clprio[prio].ptr == cl->node + prio) {
451 				/* we are removing child which is pointed to from
452 				 * parent feed - forget the pointer but remember
453 				 * classid
454 				 */
455 				p->inner.clprio[prio].last_ptr_id = cl->common.classid;
456 				p->inner.clprio[prio].ptr = NULL;
457 			}
458 
459 			htb_safe_rb_erase(cl->node + prio,
460 					  &p->inner.clprio[prio].feed);
461 
462 			if (!p->inner.clprio[prio].feed.rb_node)
463 				mask |= 1 << prio;
464 		}
465 
466 		p->prio_activity &= ~mask;
467 		cl = p;
468 		p = cl->parent;
469 
470 	}
471 	if (cl->cmode == HTB_CAN_SEND && mask)
472 		htb_remove_class_from_row(q, cl, mask);
473 }
474 
475 static inline s64 htb_lowater(const struct htb_class *cl)
476 {
477 	if (htb_hysteresis)
478 		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
479 	else
480 		return 0;
481 }
482 static inline s64 htb_hiwater(const struct htb_class *cl)
483 {
484 	if (htb_hysteresis)
485 		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
486 	else
487 		return 0;
488 }
489 
490 
491 /**
492  * htb_class_mode - computes and returns current class mode
493  *
494  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
495  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
496  * from now to time when cl will change its state.
497  * Also it is worth to note that class mode doesn't change simply
498  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
499  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
500  * mode transitions per time unit. The speed gain is about 1/6.
501  */
502 static inline enum htb_cmode
503 htb_class_mode(struct htb_class *cl, s64 *diff)
504 {
505 	s64 toks;
506 
507 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
508 		*diff = -toks;
509 		return HTB_CANT_SEND;
510 	}
511 
512 	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
513 		return HTB_CAN_SEND;
514 
515 	*diff = -toks;
516 	return HTB_MAY_BORROW;
517 }
518 
519 /**
520  * htb_change_class_mode - changes classe's mode
521  *
522  * This should be the only way how to change classe's mode under normal
523  * cirsumstances. Routine will update feed lists linkage, change mode
524  * and add class to the wait event queue if appropriate. New mode should
525  * be different from old one and cl->pq_key has to be valid if changing
526  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
527  */
528 static void
529 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
530 {
531 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
532 
533 	if (new_mode == cl->cmode)
534 		return;
535 
536 	if (new_mode == HTB_CANT_SEND)
537 		cl->overlimits++;
538 
539 	if (cl->prio_activity) {	/* not necessary: speed optimization */
540 		if (cl->cmode != HTB_CANT_SEND)
541 			htb_deactivate_prios(q, cl);
542 		cl->cmode = new_mode;
543 		if (new_mode != HTB_CANT_SEND)
544 			htb_activate_prios(q, cl);
545 	} else
546 		cl->cmode = new_mode;
547 }
548 
549 /**
550  * htb_activate - inserts leaf cl into appropriate active feeds
551  *
552  * Routine learns (new) priority of leaf and activates feed chain
553  * for the prio. It can be called on already active leaf safely.
554  * It also adds leaf into droplist.
555  */
556 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
557 {
558 	WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
559 
560 	if (!cl->prio_activity) {
561 		cl->prio_activity = 1 << cl->prio;
562 		htb_activate_prios(q, cl);
563 	}
564 }
565 
566 /**
567  * htb_deactivate - remove leaf cl from active feeds
568  *
569  * Make sure that leaf is active. In the other words it can't be called
570  * with non-active leaf. It also removes class from the drop list.
571  */
572 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
573 {
574 	WARN_ON(!cl->prio_activity);
575 
576 	htb_deactivate_prios(q, cl);
577 	cl->prio_activity = 0;
578 }
579 
580 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
581 		       struct sk_buff **to_free)
582 {
583 	int uninitialized_var(ret);
584 	struct htb_sched *q = qdisc_priv(sch);
585 	struct htb_class *cl = htb_classify(skb, sch, &ret);
586 
587 	if (cl == HTB_DIRECT) {
588 		/* enqueue to helper queue */
589 		if (q->direct_queue.qlen < q->direct_qlen) {
590 			__qdisc_enqueue_tail(skb, &q->direct_queue);
591 			q->direct_pkts++;
592 		} else {
593 			return qdisc_drop(skb, sch, to_free);
594 		}
595 #ifdef CONFIG_NET_CLS_ACT
596 	} else if (!cl) {
597 		if (ret & __NET_XMIT_BYPASS)
598 			qdisc_qstats_drop(sch);
599 		__qdisc_drop(skb, to_free);
600 		return ret;
601 #endif
602 	} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
603 					to_free)) != NET_XMIT_SUCCESS) {
604 		if (net_xmit_drop_count(ret)) {
605 			qdisc_qstats_drop(sch);
606 			cl->drops++;
607 		}
608 		return ret;
609 	} else {
610 		htb_activate(q, cl);
611 	}
612 
613 	qdisc_qstats_backlog_inc(sch, skb);
614 	sch->q.qlen++;
615 	return NET_XMIT_SUCCESS;
616 }
617 
618 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
619 {
620 	s64 toks = diff + cl->tokens;
621 
622 	if (toks > cl->buffer)
623 		toks = cl->buffer;
624 	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
625 	if (toks <= -cl->mbuffer)
626 		toks = 1 - cl->mbuffer;
627 
628 	cl->tokens = toks;
629 }
630 
631 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
632 {
633 	s64 toks = diff + cl->ctokens;
634 
635 	if (toks > cl->cbuffer)
636 		toks = cl->cbuffer;
637 	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
638 	if (toks <= -cl->mbuffer)
639 		toks = 1 - cl->mbuffer;
640 
641 	cl->ctokens = toks;
642 }
643 
644 /**
645  * htb_charge_class - charges amount "bytes" to leaf and ancestors
646  *
647  * Routine assumes that packet "bytes" long was dequeued from leaf cl
648  * borrowing from "level". It accounts bytes to ceil leaky bucket for
649  * leaf and all ancestors and to rate bucket for ancestors at levels
650  * "level" and higher. It also handles possible change of mode resulting
651  * from the update. Note that mode can also increase here (MAY_BORROW to
652  * CAN_SEND) because we can use more precise clock that event queue here.
653  * In such case we remove class from event queue first.
654  */
655 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
656 			     int level, struct sk_buff *skb)
657 {
658 	int bytes = qdisc_pkt_len(skb);
659 	enum htb_cmode old_mode;
660 	s64 diff;
661 
662 	while (cl) {
663 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
664 		if (cl->level >= level) {
665 			if (cl->level == level)
666 				cl->xstats.lends++;
667 			htb_accnt_tokens(cl, bytes, diff);
668 		} else {
669 			cl->xstats.borrows++;
670 			cl->tokens += diff;	/* we moved t_c; update tokens */
671 		}
672 		htb_accnt_ctokens(cl, bytes, diff);
673 		cl->t_c = q->now;
674 
675 		old_mode = cl->cmode;
676 		diff = 0;
677 		htb_change_class_mode(q, cl, &diff);
678 		if (old_mode != cl->cmode) {
679 			if (old_mode != HTB_CAN_SEND)
680 				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
681 			if (cl->cmode != HTB_CAN_SEND)
682 				htb_add_to_wait_tree(q, cl, diff);
683 		}
684 
685 		/* update basic stats except for leaves which are already updated */
686 		if (cl->level)
687 			bstats_update(&cl->bstats, skb);
688 
689 		cl = cl->parent;
690 	}
691 }
692 
693 /**
694  * htb_do_events - make mode changes to classes at the level
695  *
696  * Scans event queue for pending events and applies them. Returns time of
697  * next pending event (0 for no event in pq, q->now for too many events).
698  * Note: Applied are events whose have cl->pq_key <= q->now.
699  */
700 static s64 htb_do_events(struct htb_sched *q, const int level,
701 			 unsigned long start)
702 {
703 	/* don't run for longer than 2 jiffies; 2 is used instead of
704 	 * 1 to simplify things when jiffy is going to be incremented
705 	 * too soon
706 	 */
707 	unsigned long stop_at = start + 2;
708 	struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
709 
710 	while (time_before(jiffies, stop_at)) {
711 		struct htb_class *cl;
712 		s64 diff;
713 		struct rb_node *p = rb_first(wait_pq);
714 
715 		if (!p)
716 			return 0;
717 
718 		cl = rb_entry(p, struct htb_class, pq_node);
719 		if (cl->pq_key > q->now)
720 			return cl->pq_key;
721 
722 		htb_safe_rb_erase(p, wait_pq);
723 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
724 		htb_change_class_mode(q, cl, &diff);
725 		if (cl->cmode != HTB_CAN_SEND)
726 			htb_add_to_wait_tree(q, cl, diff);
727 	}
728 
729 	/* too much load - let's continue after a break for scheduling */
730 	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
731 		pr_warn("htb: too many events!\n");
732 		q->warned |= HTB_WARN_TOOMANYEVENTS;
733 	}
734 
735 	return q->now;
736 }
737 
738 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
739  * is no such one exists.
740  */
741 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
742 					      u32 id)
743 {
744 	struct rb_node *r = NULL;
745 	while (n) {
746 		struct htb_class *cl =
747 		    rb_entry(n, struct htb_class, node[prio]);
748 
749 		if (id > cl->common.classid) {
750 			n = n->rb_right;
751 		} else if (id < cl->common.classid) {
752 			r = n;
753 			n = n->rb_left;
754 		} else {
755 			return n;
756 		}
757 	}
758 	return r;
759 }
760 
761 /**
762  * htb_lookup_leaf - returns next leaf class in DRR order
763  *
764  * Find leaf where current feed pointers points to.
765  */
766 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
767 {
768 	int i;
769 	struct {
770 		struct rb_node *root;
771 		struct rb_node **pptr;
772 		u32 *pid;
773 	} stk[TC_HTB_MAXDEPTH], *sp = stk;
774 
775 	BUG_ON(!hprio->row.rb_node);
776 	sp->root = hprio->row.rb_node;
777 	sp->pptr = &hprio->ptr;
778 	sp->pid = &hprio->last_ptr_id;
779 
780 	for (i = 0; i < 65535; i++) {
781 		if (!*sp->pptr && *sp->pid) {
782 			/* ptr was invalidated but id is valid - try to recover
783 			 * the original or next ptr
784 			 */
785 			*sp->pptr =
786 			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
787 		}
788 		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
789 				 * can become out of date quickly
790 				 */
791 		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
792 			*sp->pptr = sp->root;
793 			while ((*sp->pptr)->rb_left)
794 				*sp->pptr = (*sp->pptr)->rb_left;
795 			if (sp > stk) {
796 				sp--;
797 				if (!*sp->pptr) {
798 					WARN_ON(1);
799 					return NULL;
800 				}
801 				htb_next_rb_node(sp->pptr);
802 			}
803 		} else {
804 			struct htb_class *cl;
805 			struct htb_prio *clp;
806 
807 			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
808 			if (!cl->level)
809 				return cl;
810 			clp = &cl->inner.clprio[prio];
811 			(++sp)->root = clp->feed.rb_node;
812 			sp->pptr = &clp->ptr;
813 			sp->pid = &clp->last_ptr_id;
814 		}
815 	}
816 	WARN_ON(1);
817 	return NULL;
818 }
819 
820 /* dequeues packet at given priority and level; call only if
821  * you are sure that there is active class at prio/level
822  */
823 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
824 					const int level)
825 {
826 	struct sk_buff *skb = NULL;
827 	struct htb_class *cl, *start;
828 	struct htb_level *hlevel = &q->hlevel[level];
829 	struct htb_prio *hprio = &hlevel->hprio[prio];
830 
831 	/* look initial class up in the row */
832 	start = cl = htb_lookup_leaf(hprio, prio);
833 
834 	do {
835 next:
836 		if (unlikely(!cl))
837 			return NULL;
838 
839 		/* class can be empty - it is unlikely but can be true if leaf
840 		 * qdisc drops packets in enqueue routine or if someone used
841 		 * graft operation on the leaf since last dequeue;
842 		 * simply deactivate and skip such class
843 		 */
844 		if (unlikely(cl->leaf.q->q.qlen == 0)) {
845 			struct htb_class *next;
846 			htb_deactivate(q, cl);
847 
848 			/* row/level might become empty */
849 			if ((q->row_mask[level] & (1 << prio)) == 0)
850 				return NULL;
851 
852 			next = htb_lookup_leaf(hprio, prio);
853 
854 			if (cl == start)	/* fix start if we just deleted it */
855 				start = next;
856 			cl = next;
857 			goto next;
858 		}
859 
860 		skb = cl->leaf.q->dequeue(cl->leaf.q);
861 		if (likely(skb != NULL))
862 			break;
863 
864 		qdisc_warn_nonwc("htb", cl->leaf.q);
865 		htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
866 					 &q->hlevel[0].hprio[prio].ptr);
867 		cl = htb_lookup_leaf(hprio, prio);
868 
869 	} while (cl != start);
870 
871 	if (likely(skb != NULL)) {
872 		bstats_update(&cl->bstats, skb);
873 		cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
874 		if (cl->leaf.deficit[level] < 0) {
875 			cl->leaf.deficit[level] += cl->quantum;
876 			htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
877 						 &q->hlevel[0].hprio[prio].ptr);
878 		}
879 		/* this used to be after charge_class but this constelation
880 		 * gives us slightly better performance
881 		 */
882 		if (!cl->leaf.q->q.qlen)
883 			htb_deactivate(q, cl);
884 		htb_charge_class(q, cl, level, skb);
885 	}
886 	return skb;
887 }
888 
889 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
890 {
891 	struct sk_buff *skb;
892 	struct htb_sched *q = qdisc_priv(sch);
893 	int level;
894 	s64 next_event;
895 	unsigned long start_at;
896 
897 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
898 	skb = __qdisc_dequeue_head(&q->direct_queue);
899 	if (skb != NULL) {
900 ok:
901 		qdisc_bstats_update(sch, skb);
902 		qdisc_qstats_backlog_dec(sch, skb);
903 		sch->q.qlen--;
904 		return skb;
905 	}
906 
907 	if (!sch->q.qlen)
908 		goto fin;
909 	q->now = ktime_get_ns();
910 	start_at = jiffies;
911 
912 	next_event = q->now + 5LLU * NSEC_PER_SEC;
913 
914 	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
915 		/* common case optimization - skip event handler quickly */
916 		int m;
917 		s64 event = q->near_ev_cache[level];
918 
919 		if (q->now >= event) {
920 			event = htb_do_events(q, level, start_at);
921 			if (!event)
922 				event = q->now + NSEC_PER_SEC;
923 			q->near_ev_cache[level] = event;
924 		}
925 
926 		if (next_event > event)
927 			next_event = event;
928 
929 		m = ~q->row_mask[level];
930 		while (m != (int)(-1)) {
931 			int prio = ffz(m);
932 
933 			m |= 1 << prio;
934 			skb = htb_dequeue_tree(q, prio, level);
935 			if (likely(skb != NULL))
936 				goto ok;
937 		}
938 	}
939 	qdisc_qstats_overlimit(sch);
940 	if (likely(next_event > q->now))
941 		qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
942 	else
943 		schedule_work(&q->work);
944 fin:
945 	return skb;
946 }
947 
948 /* reset all classes */
949 /* always caled under BH & queue lock */
950 static void htb_reset(struct Qdisc *sch)
951 {
952 	struct htb_sched *q = qdisc_priv(sch);
953 	struct htb_class *cl;
954 	unsigned int i;
955 
956 	for (i = 0; i < q->clhash.hashsize; i++) {
957 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
958 			if (cl->level)
959 				memset(&cl->inner, 0, sizeof(cl->inner));
960 			else {
961 				if (cl->leaf.q)
962 					qdisc_reset(cl->leaf.q);
963 			}
964 			cl->prio_activity = 0;
965 			cl->cmode = HTB_CAN_SEND;
966 		}
967 	}
968 	qdisc_watchdog_cancel(&q->watchdog);
969 	__qdisc_reset_queue(&q->direct_queue);
970 	sch->q.qlen = 0;
971 	sch->qstats.backlog = 0;
972 	memset(q->hlevel, 0, sizeof(q->hlevel));
973 	memset(q->row_mask, 0, sizeof(q->row_mask));
974 }
975 
976 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
977 	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
978 	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
979 	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
980 	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
981 	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
982 	[TCA_HTB_RATE64] = { .type = NLA_U64 },
983 	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
984 };
985 
986 static void htb_work_func(struct work_struct *work)
987 {
988 	struct htb_sched *q = container_of(work, struct htb_sched, work);
989 	struct Qdisc *sch = q->watchdog.qdisc;
990 
991 	rcu_read_lock();
992 	__netif_schedule(qdisc_root(sch));
993 	rcu_read_unlock();
994 }
995 
996 static int htb_init(struct Qdisc *sch, struct nlattr *opt,
997 		    struct netlink_ext_ack *extack)
998 {
999 	struct htb_sched *q = qdisc_priv(sch);
1000 	struct nlattr *tb[TCA_HTB_MAX + 1];
1001 	struct tc_htb_glob *gopt;
1002 	int err;
1003 
1004 	qdisc_watchdog_init(&q->watchdog, sch);
1005 	INIT_WORK(&q->work, htb_work_func);
1006 
1007 	if (!opt)
1008 		return -EINVAL;
1009 
1010 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1011 	if (err)
1012 		return err;
1013 
1014 	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1015 	if (err < 0)
1016 		return err;
1017 
1018 	if (!tb[TCA_HTB_INIT])
1019 		return -EINVAL;
1020 
1021 	gopt = nla_data(tb[TCA_HTB_INIT]);
1022 	if (gopt->version != HTB_VER >> 16)
1023 		return -EINVAL;
1024 
1025 	err = qdisc_class_hash_init(&q->clhash);
1026 	if (err < 0)
1027 		return err;
1028 
1029 	qdisc_skb_head_init(&q->direct_queue);
1030 
1031 	if (tb[TCA_HTB_DIRECT_QLEN])
1032 		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1033 	else
1034 		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1035 
1036 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1037 		q->rate2quantum = 1;
1038 	q->defcls = gopt->defcls;
1039 
1040 	return 0;
1041 }
1042 
1043 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1044 {
1045 	struct htb_sched *q = qdisc_priv(sch);
1046 	struct nlattr *nest;
1047 	struct tc_htb_glob gopt;
1048 
1049 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1050 	 * no change can happen on the qdisc parameters.
1051 	 */
1052 
1053 	gopt.direct_pkts = q->direct_pkts;
1054 	gopt.version = HTB_VER;
1055 	gopt.rate2quantum = q->rate2quantum;
1056 	gopt.defcls = q->defcls;
1057 	gopt.debug = 0;
1058 
1059 	nest = nla_nest_start(skb, TCA_OPTIONS);
1060 	if (nest == NULL)
1061 		goto nla_put_failure;
1062 	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1063 	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1064 		goto nla_put_failure;
1065 
1066 	return nla_nest_end(skb, nest);
1067 
1068 nla_put_failure:
1069 	nla_nest_cancel(skb, nest);
1070 	return -1;
1071 }
1072 
1073 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1074 			  struct sk_buff *skb, struct tcmsg *tcm)
1075 {
1076 	struct htb_class *cl = (struct htb_class *)arg;
1077 	struct nlattr *nest;
1078 	struct tc_htb_opt opt;
1079 
1080 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1081 	 * no change can happen on the class parameters.
1082 	 */
1083 	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1084 	tcm->tcm_handle = cl->common.classid;
1085 	if (!cl->level && cl->leaf.q)
1086 		tcm->tcm_info = cl->leaf.q->handle;
1087 
1088 	nest = nla_nest_start(skb, TCA_OPTIONS);
1089 	if (nest == NULL)
1090 		goto nla_put_failure;
1091 
1092 	memset(&opt, 0, sizeof(opt));
1093 
1094 	psched_ratecfg_getrate(&opt.rate, &cl->rate);
1095 	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1096 	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1097 	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1098 	opt.quantum = cl->quantum;
1099 	opt.prio = cl->prio;
1100 	opt.level = cl->level;
1101 	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1102 		goto nla_put_failure;
1103 	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1104 	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1105 			      TCA_HTB_PAD))
1106 		goto nla_put_failure;
1107 	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1108 	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1109 			      TCA_HTB_PAD))
1110 		goto nla_put_failure;
1111 
1112 	return nla_nest_end(skb, nest);
1113 
1114 nla_put_failure:
1115 	nla_nest_cancel(skb, nest);
1116 	return -1;
1117 }
1118 
1119 static int
1120 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1121 {
1122 	struct htb_class *cl = (struct htb_class *)arg;
1123 	struct gnet_stats_queue qs = {
1124 		.drops = cl->drops,
1125 		.overlimits = cl->overlimits,
1126 	};
1127 	__u32 qlen = 0;
1128 
1129 	if (!cl->level && cl->leaf.q) {
1130 		qlen = cl->leaf.q->q.qlen;
1131 		qs.backlog = cl->leaf.q->qstats.backlog;
1132 	}
1133 	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1134 				    INT_MIN, INT_MAX);
1135 	cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1136 				     INT_MIN, INT_MAX);
1137 
1138 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1139 				  d, NULL, &cl->bstats) < 0 ||
1140 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1141 	    gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1142 		return -1;
1143 
1144 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1145 }
1146 
1147 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1148 		     struct Qdisc **old, struct netlink_ext_ack *extack)
1149 {
1150 	struct htb_class *cl = (struct htb_class *)arg;
1151 
1152 	if (cl->level)
1153 		return -EINVAL;
1154 	if (new == NULL &&
1155 	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1156 				     cl->common.classid, extack)) == NULL)
1157 		return -ENOBUFS;
1158 
1159 	*old = qdisc_replace(sch, new, &cl->leaf.q);
1160 	return 0;
1161 }
1162 
1163 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1164 {
1165 	struct htb_class *cl = (struct htb_class *)arg;
1166 	return !cl->level ? cl->leaf.q : NULL;
1167 }
1168 
1169 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1170 {
1171 	struct htb_class *cl = (struct htb_class *)arg;
1172 
1173 	htb_deactivate(qdisc_priv(sch), cl);
1174 }
1175 
1176 static inline int htb_parent_last_child(struct htb_class *cl)
1177 {
1178 	if (!cl->parent)
1179 		/* the root class */
1180 		return 0;
1181 	if (cl->parent->children > 1)
1182 		/* not the last child */
1183 		return 0;
1184 	return 1;
1185 }
1186 
1187 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1188 			       struct Qdisc *new_q)
1189 {
1190 	struct htb_class *parent = cl->parent;
1191 
1192 	WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1193 
1194 	if (parent->cmode != HTB_CAN_SEND)
1195 		htb_safe_rb_erase(&parent->pq_node,
1196 				  &q->hlevel[parent->level].wait_pq);
1197 
1198 	parent->level = 0;
1199 	memset(&parent->inner, 0, sizeof(parent->inner));
1200 	parent->leaf.q = new_q ? new_q : &noop_qdisc;
1201 	parent->tokens = parent->buffer;
1202 	parent->ctokens = parent->cbuffer;
1203 	parent->t_c = ktime_get_ns();
1204 	parent->cmode = HTB_CAN_SEND;
1205 }
1206 
1207 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1208 {
1209 	if (!cl->level) {
1210 		WARN_ON(!cl->leaf.q);
1211 		qdisc_put(cl->leaf.q);
1212 	}
1213 	gen_kill_estimator(&cl->rate_est);
1214 	tcf_block_put(cl->block);
1215 	kfree(cl);
1216 }
1217 
1218 static void htb_destroy(struct Qdisc *sch)
1219 {
1220 	struct htb_sched *q = qdisc_priv(sch);
1221 	struct hlist_node *next;
1222 	struct htb_class *cl;
1223 	unsigned int i;
1224 
1225 	cancel_work_sync(&q->work);
1226 	qdisc_watchdog_cancel(&q->watchdog);
1227 	/* This line used to be after htb_destroy_class call below
1228 	 * and surprisingly it worked in 2.4. But it must precede it
1229 	 * because filter need its target class alive to be able to call
1230 	 * unbind_filter on it (without Oops).
1231 	 */
1232 	tcf_block_put(q->block);
1233 
1234 	for (i = 0; i < q->clhash.hashsize; i++) {
1235 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1236 			tcf_block_put(cl->block);
1237 			cl->block = NULL;
1238 		}
1239 	}
1240 	for (i = 0; i < q->clhash.hashsize; i++) {
1241 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1242 					  common.hnode)
1243 			htb_destroy_class(sch, cl);
1244 	}
1245 	qdisc_class_hash_destroy(&q->clhash);
1246 	__qdisc_reset_queue(&q->direct_queue);
1247 }
1248 
1249 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1250 {
1251 	struct htb_sched *q = qdisc_priv(sch);
1252 	struct htb_class *cl = (struct htb_class *)arg;
1253 	struct Qdisc *new_q = NULL;
1254 	int last_child = 0;
1255 
1256 	/* TODO: why don't allow to delete subtree ? references ? does
1257 	 * tc subsys guarantee us that in htb_destroy it holds no class
1258 	 * refs so that we can remove children safely there ?
1259 	 */
1260 	if (cl->children || cl->filter_cnt)
1261 		return -EBUSY;
1262 
1263 	if (!cl->level && htb_parent_last_child(cl)) {
1264 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1265 					  cl->parent->common.classid,
1266 					  NULL);
1267 		last_child = 1;
1268 	}
1269 
1270 	sch_tree_lock(sch);
1271 
1272 	if (!cl->level) {
1273 		unsigned int qlen = cl->leaf.q->q.qlen;
1274 		unsigned int backlog = cl->leaf.q->qstats.backlog;
1275 
1276 		qdisc_reset(cl->leaf.q);
1277 		qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
1278 	}
1279 
1280 	/* delete from hash and active; remainder in destroy_class */
1281 	qdisc_class_hash_remove(&q->clhash, &cl->common);
1282 	if (cl->parent)
1283 		cl->parent->children--;
1284 
1285 	if (cl->prio_activity)
1286 		htb_deactivate(q, cl);
1287 
1288 	if (cl->cmode != HTB_CAN_SEND)
1289 		htb_safe_rb_erase(&cl->pq_node,
1290 				  &q->hlevel[cl->level].wait_pq);
1291 
1292 	if (last_child)
1293 		htb_parent_to_leaf(q, cl, new_q);
1294 
1295 	sch_tree_unlock(sch);
1296 
1297 	htb_destroy_class(sch, cl);
1298 	return 0;
1299 }
1300 
1301 static int htb_change_class(struct Qdisc *sch, u32 classid,
1302 			    u32 parentid, struct nlattr **tca,
1303 			    unsigned long *arg, struct netlink_ext_ack *extack)
1304 {
1305 	int err = -EINVAL;
1306 	struct htb_sched *q = qdisc_priv(sch);
1307 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1308 	struct nlattr *opt = tca[TCA_OPTIONS];
1309 	struct nlattr *tb[TCA_HTB_MAX + 1];
1310 	struct tc_htb_opt *hopt;
1311 	u64 rate64, ceil64;
1312 	int warn = 0;
1313 
1314 	/* extract all subattrs from opt attr */
1315 	if (!opt)
1316 		goto failure;
1317 
1318 	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1319 	if (err < 0)
1320 		goto failure;
1321 
1322 	err = -EINVAL;
1323 	if (tb[TCA_HTB_PARMS] == NULL)
1324 		goto failure;
1325 
1326 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1327 
1328 	hopt = nla_data(tb[TCA_HTB_PARMS]);
1329 	if (!hopt->rate.rate || !hopt->ceil.rate)
1330 		goto failure;
1331 
1332 	/* Keeping backward compatible with rate_table based iproute2 tc */
1333 	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1334 		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1335 					      NULL));
1336 
1337 	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1338 		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1339 					      NULL));
1340 
1341 	if (!cl) {		/* new class */
1342 		struct Qdisc *new_q;
1343 		int prio;
1344 		struct {
1345 			struct nlattr		nla;
1346 			struct gnet_estimator	opt;
1347 		} est = {
1348 			.nla = {
1349 				.nla_len	= nla_attr_size(sizeof(est.opt)),
1350 				.nla_type	= TCA_RATE,
1351 			},
1352 			.opt = {
1353 				/* 4s interval, 16s averaging constant */
1354 				.interval	= 2,
1355 				.ewma_log	= 2,
1356 			},
1357 		};
1358 
1359 		/* check for valid classid */
1360 		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1361 		    htb_find(classid, sch))
1362 			goto failure;
1363 
1364 		/* check maximal depth */
1365 		if (parent && parent->parent && parent->parent->level < 2) {
1366 			pr_err("htb: tree is too deep\n");
1367 			goto failure;
1368 		}
1369 		err = -ENOBUFS;
1370 		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1371 		if (!cl)
1372 			goto failure;
1373 
1374 		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1375 		if (err) {
1376 			kfree(cl);
1377 			goto failure;
1378 		}
1379 		if (htb_rate_est || tca[TCA_RATE]) {
1380 			err = gen_new_estimator(&cl->bstats, NULL,
1381 						&cl->rate_est,
1382 						NULL,
1383 						qdisc_root_sleeping_running(sch),
1384 						tca[TCA_RATE] ? : &est.nla);
1385 			if (err) {
1386 				tcf_block_put(cl->block);
1387 				kfree(cl);
1388 				goto failure;
1389 			}
1390 		}
1391 
1392 		cl->children = 0;
1393 		RB_CLEAR_NODE(&cl->pq_node);
1394 
1395 		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1396 			RB_CLEAR_NODE(&cl->node[prio]);
1397 
1398 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1399 		 * so that can't be used inside of sch_tree_lock
1400 		 * -- thanks to Karlis Peisenieks
1401 		 */
1402 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1403 					  classid, NULL);
1404 		sch_tree_lock(sch);
1405 		if (parent && !parent->level) {
1406 			unsigned int qlen = parent->leaf.q->q.qlen;
1407 			unsigned int backlog = parent->leaf.q->qstats.backlog;
1408 
1409 			/* turn parent into inner node */
1410 			qdisc_reset(parent->leaf.q);
1411 			qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
1412 			qdisc_put(parent->leaf.q);
1413 			if (parent->prio_activity)
1414 				htb_deactivate(q, parent);
1415 
1416 			/* remove from evt list because of level change */
1417 			if (parent->cmode != HTB_CAN_SEND) {
1418 				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1419 				parent->cmode = HTB_CAN_SEND;
1420 			}
1421 			parent->level = (parent->parent ? parent->parent->level
1422 					 : TC_HTB_MAXDEPTH) - 1;
1423 			memset(&parent->inner, 0, sizeof(parent->inner));
1424 		}
1425 		/* leaf (we) needs elementary qdisc */
1426 		cl->leaf.q = new_q ? new_q : &noop_qdisc;
1427 
1428 		cl->common.classid = classid;
1429 		cl->parent = parent;
1430 
1431 		/* set class to be in HTB_CAN_SEND state */
1432 		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1433 		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1434 		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
1435 		cl->t_c = ktime_get_ns();
1436 		cl->cmode = HTB_CAN_SEND;
1437 
1438 		/* attach to the hash list and parent's family */
1439 		qdisc_class_hash_insert(&q->clhash, &cl->common);
1440 		if (parent)
1441 			parent->children++;
1442 		if (cl->leaf.q != &noop_qdisc)
1443 			qdisc_hash_add(cl->leaf.q, true);
1444 	} else {
1445 		if (tca[TCA_RATE]) {
1446 			err = gen_replace_estimator(&cl->bstats, NULL,
1447 						    &cl->rate_est,
1448 						    NULL,
1449 						    qdisc_root_sleeping_running(sch),
1450 						    tca[TCA_RATE]);
1451 			if (err)
1452 				return err;
1453 		}
1454 		sch_tree_lock(sch);
1455 	}
1456 
1457 	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1458 
1459 	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1460 
1461 	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1462 	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1463 
1464 	/* it used to be a nasty bug here, we have to check that node
1465 	 * is really leaf before changing cl->leaf !
1466 	 */
1467 	if (!cl->level) {
1468 		u64 quantum = cl->rate.rate_bytes_ps;
1469 
1470 		do_div(quantum, q->rate2quantum);
1471 		cl->quantum = min_t(u64, quantum, INT_MAX);
1472 
1473 		if (!hopt->quantum && cl->quantum < 1000) {
1474 			warn = -1;
1475 			cl->quantum = 1000;
1476 		}
1477 		if (!hopt->quantum && cl->quantum > 200000) {
1478 			warn = 1;
1479 			cl->quantum = 200000;
1480 		}
1481 		if (hopt->quantum)
1482 			cl->quantum = hopt->quantum;
1483 		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1484 			cl->prio = TC_HTB_NUMPRIO - 1;
1485 	}
1486 
1487 	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1488 	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1489 
1490 	sch_tree_unlock(sch);
1491 
1492 	if (warn)
1493 		pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
1494 			    cl->common.classid, (warn == -1 ? "small" : "big"));
1495 
1496 	qdisc_class_hash_grow(sch, &q->clhash);
1497 
1498 	*arg = (unsigned long)cl;
1499 	return 0;
1500 
1501 failure:
1502 	return err;
1503 }
1504 
1505 static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
1506 				       struct netlink_ext_ack *extack)
1507 {
1508 	struct htb_sched *q = qdisc_priv(sch);
1509 	struct htb_class *cl = (struct htb_class *)arg;
1510 
1511 	return cl ? cl->block : q->block;
1512 }
1513 
1514 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1515 				     u32 classid)
1516 {
1517 	struct htb_class *cl = htb_find(classid, sch);
1518 
1519 	/*if (cl && !cl->level) return 0;
1520 	 * The line above used to be there to prevent attaching filters to
1521 	 * leaves. But at least tc_index filter uses this just to get class
1522 	 * for other reasons so that we have to allow for it.
1523 	 * ----
1524 	 * 19.6.2002 As Werner explained it is ok - bind filter is just
1525 	 * another way to "lock" the class - unlike "get" this lock can
1526 	 * be broken by class during destroy IIUC.
1527 	 */
1528 	if (cl)
1529 		cl->filter_cnt++;
1530 	return (unsigned long)cl;
1531 }
1532 
1533 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1534 {
1535 	struct htb_class *cl = (struct htb_class *)arg;
1536 
1537 	if (cl)
1538 		cl->filter_cnt--;
1539 }
1540 
1541 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1542 {
1543 	struct htb_sched *q = qdisc_priv(sch);
1544 	struct htb_class *cl;
1545 	unsigned int i;
1546 
1547 	if (arg->stop)
1548 		return;
1549 
1550 	for (i = 0; i < q->clhash.hashsize; i++) {
1551 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1552 			if (arg->count < arg->skip) {
1553 				arg->count++;
1554 				continue;
1555 			}
1556 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1557 				arg->stop = 1;
1558 				return;
1559 			}
1560 			arg->count++;
1561 		}
1562 	}
1563 }
1564 
1565 static const struct Qdisc_class_ops htb_class_ops = {
1566 	.graft		=	htb_graft,
1567 	.leaf		=	htb_leaf,
1568 	.qlen_notify	=	htb_qlen_notify,
1569 	.find		=	htb_search,
1570 	.change		=	htb_change_class,
1571 	.delete		=	htb_delete,
1572 	.walk		=	htb_walk,
1573 	.tcf_block	=	htb_tcf_block,
1574 	.bind_tcf	=	htb_bind_filter,
1575 	.unbind_tcf	=	htb_unbind_filter,
1576 	.dump		=	htb_dump_class,
1577 	.dump_stats	=	htb_dump_class_stats,
1578 };
1579 
1580 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1581 	.cl_ops		=	&htb_class_ops,
1582 	.id		=	"htb",
1583 	.priv_size	=	sizeof(struct htb_sched),
1584 	.enqueue	=	htb_enqueue,
1585 	.dequeue	=	htb_dequeue,
1586 	.peek		=	qdisc_peek_dequeued,
1587 	.init		=	htb_init,
1588 	.reset		=	htb_reset,
1589 	.destroy	=	htb_destroy,
1590 	.dump		=	htb_dump,
1591 	.owner		=	THIS_MODULE,
1592 };
1593 
1594 static int __init htb_module_init(void)
1595 {
1596 	return register_qdisc(&htb_qdisc_ops);
1597 }
1598 static void __exit htb_module_exit(void)
1599 {
1600 	unregister_qdisc(&htb_qdisc_ops);
1601 }
1602 
1603 module_init(htb_module_init)
1604 module_exit(htb_module_exit)
1605 MODULE_LICENSE("GPL");
1606