xref: /openbmc/linux/net/sched/sch_htb.c (revision 2fa49589)
1 /*
2  * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *			HTB support at LARTC mailing list
14  *		Ondrej Kraus, <krauso@barr.cz>
15  *			found missing INIT_QDISC(htb)
16  *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *			helped a lot to locate nasty class stall bug
18  *		Andi Kleen, Jamal Hadi, Bert Hubert
19  *			code review and helpful comments on shaping
20  *		Tomasz Wrona, <tw@eter.tym.pl>
21  *			created test case so that I was able to fix nasty bug
22  *		Wilfried Weissmann
23  *			spotted bug in dequeue code and helped with fix
24  *		Jiri Fojtasek
25  *			fixed requeue routine
26  *		and many others. thanks.
27  */
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/errno.h>
34 #include <linux/skbuff.h>
35 #include <linux/list.h>
36 #include <linux/compiler.h>
37 #include <linux/rbtree.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <net/netlink.h>
41 #include <net/sch_generic.h>
42 #include <net/pkt_sched.h>
43 #include <net/pkt_cls.h>
44 
45 /* HTB algorithm.
46     Author: devik@cdi.cz
47     ========================================================================
48     HTB is like TBF with multiple classes. It is also similar to CBQ because
49     it allows to assign priority to each class in hierarchy.
50     In fact it is another implementation of Floyd's formal sharing.
51 
52     Levels:
53     Each class is assigned level. Leaf has ALWAYS level 0 and root
54     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
55     one less than their parent.
56 */
57 
58 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
59 #define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
60 
61 #if HTB_VER >> 16 != TC_HTB_PROTOVER
62 #error "Mismatched sch_htb.c and pkt_sch.h"
63 #endif
64 
65 /* Module parameter and sysfs export */
66 module_param    (htb_hysteresis, int, 0640);
67 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
68 
69 static int htb_rate_est = 0; /* htb classes have a default rate estimator */
70 module_param(htb_rate_est, int, 0640);
71 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
72 
73 /* used internaly to keep status of single class */
74 enum htb_cmode {
75 	HTB_CANT_SEND,		/* class can't send and can't borrow */
76 	HTB_MAY_BORROW,		/* class can't send but may borrow */
77 	HTB_CAN_SEND		/* class can send */
78 };
79 
80 struct htb_prio {
81 	union {
82 		struct rb_root	row;
83 		struct rb_root	feed;
84 	};
85 	struct rb_node	*ptr;
86 	/* When class changes from state 1->2 and disconnects from
87 	 * parent's feed then we lost ptr value and start from the
88 	 * first child again. Here we store classid of the
89 	 * last valid ptr (used when ptr is NULL).
90 	 */
91 	u32		last_ptr_id;
92 };
93 
94 /* interior & leaf nodes; props specific to leaves are marked L:
95  * To reduce false sharing, place mostly read fields at beginning,
96  * and mostly written ones at the end.
97  */
98 struct htb_class {
99 	struct Qdisc_class_common common;
100 	struct psched_ratecfg	rate;
101 	struct psched_ratecfg	ceil;
102 	s64			buffer, cbuffer;/* token bucket depth/rate */
103 	s64			mbuffer;	/* max wait time */
104 	u32			prio;		/* these two are used only by leaves... */
105 	int			quantum;	/* but stored for parent-to-leaf return */
106 
107 	struct tcf_proto __rcu	*filter_list;	/* class attached filters */
108 	struct tcf_block	*block;
109 	int			filter_cnt;
110 
111 	int			level;		/* our level (see above) */
112 	unsigned int		children;
113 	struct htb_class	*parent;	/* parent class */
114 
115 	struct net_rate_estimator __rcu *rate_est;
116 
117 	/*
118 	 * Written often fields
119 	 */
120 	struct gnet_stats_basic_packed bstats;
121 	struct tc_htb_xstats	xstats;	/* our special stats */
122 
123 	/* token bucket parameters */
124 	s64			tokens, ctokens;/* current number of tokens */
125 	s64			t_c;		/* checkpoint time */
126 
127 	union {
128 		struct htb_class_leaf {
129 			int		deficit[TC_HTB_MAXDEPTH];
130 			struct Qdisc	*q;
131 		} leaf;
132 		struct htb_class_inner {
133 			struct htb_prio clprio[TC_HTB_NUMPRIO];
134 		} inner;
135 	};
136 	s64			pq_key;
137 
138 	int			prio_activity;	/* for which prios are we active */
139 	enum htb_cmode		cmode;		/* current mode of the class */
140 	struct rb_node		pq_node;	/* node for event queue */
141 	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
142 
143 	unsigned int drops ____cacheline_aligned_in_smp;
144 	unsigned int		overlimits;
145 };
146 
147 struct htb_level {
148 	struct rb_root	wait_pq;
149 	struct htb_prio hprio[TC_HTB_NUMPRIO];
150 };
151 
152 struct htb_sched {
153 	struct Qdisc_class_hash clhash;
154 	int			defcls;		/* class where unclassified flows go to */
155 	int			rate2quantum;	/* quant = rate / rate2quantum */
156 
157 	/* filters for qdisc itself */
158 	struct tcf_proto __rcu	*filter_list;
159 	struct tcf_block	*block;
160 
161 #define HTB_WARN_TOOMANYEVENTS	0x1
162 	unsigned int		warned;	/* only one warning */
163 	int			direct_qlen;
164 	struct work_struct	work;
165 
166 	/* non shaped skbs; let them go directly thru */
167 	struct qdisc_skb_head	direct_queue;
168 	long			direct_pkts;
169 
170 	struct qdisc_watchdog	watchdog;
171 
172 	s64			now;	/* cached dequeue time */
173 
174 	/* time of nearest event per level (row) */
175 	s64			near_ev_cache[TC_HTB_MAXDEPTH];
176 
177 	int			row_mask[TC_HTB_MAXDEPTH];
178 
179 	struct htb_level	hlevel[TC_HTB_MAXDEPTH];
180 };
181 
182 /* find class in global hash table using given handle */
183 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
184 {
185 	struct htb_sched *q = qdisc_priv(sch);
186 	struct Qdisc_class_common *clc;
187 
188 	clc = qdisc_class_find(&q->clhash, handle);
189 	if (clc == NULL)
190 		return NULL;
191 	return container_of(clc, struct htb_class, common);
192 }
193 
194 static unsigned long htb_search(struct Qdisc *sch, u32 handle)
195 {
196 	return (unsigned long)htb_find(handle, sch);
197 }
198 /**
199  * htb_classify - classify a packet into class
200  *
201  * It returns NULL if the packet should be dropped or -1 if the packet
202  * should be passed directly thru. In all other cases leaf class is returned.
203  * We allow direct class selection by classid in priority. The we examine
204  * filters in qdisc and in inner nodes (if higher filter points to the inner
205  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
206  * internal fifo (direct). These packets then go directly thru. If we still
207  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
208  * then finish and return direct queue.
209  */
210 #define HTB_DIRECT ((struct htb_class *)-1L)
211 
212 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
213 				      int *qerr)
214 {
215 	struct htb_sched *q = qdisc_priv(sch);
216 	struct htb_class *cl;
217 	struct tcf_result res;
218 	struct tcf_proto *tcf;
219 	int result;
220 
221 	/* allow to select class by setting skb->priority to valid classid;
222 	 * note that nfmark can be used too by attaching filter fw with no
223 	 * rules in it
224 	 */
225 	if (skb->priority == sch->handle)
226 		return HTB_DIRECT;	/* X:0 (direct flow) selected */
227 	cl = htb_find(skb->priority, sch);
228 	if (cl) {
229 		if (cl->level == 0)
230 			return cl;
231 		/* Start with inner filter chain if a non-leaf class is selected */
232 		tcf = rcu_dereference_bh(cl->filter_list);
233 	} else {
234 		tcf = rcu_dereference_bh(q->filter_list);
235 	}
236 
237 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
238 	while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
239 #ifdef CONFIG_NET_CLS_ACT
240 		switch (result) {
241 		case TC_ACT_QUEUED:
242 		case TC_ACT_STOLEN:
243 		case TC_ACT_TRAP:
244 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
245 			/* fall through */
246 		case TC_ACT_SHOT:
247 			return NULL;
248 		}
249 #endif
250 		cl = (void *)res.class;
251 		if (!cl) {
252 			if (res.classid == sch->handle)
253 				return HTB_DIRECT;	/* X:0 (direct flow) */
254 			cl = htb_find(res.classid, sch);
255 			if (!cl)
256 				break;	/* filter selected invalid classid */
257 		}
258 		if (!cl->level)
259 			return cl;	/* we hit leaf; return it */
260 
261 		/* we have got inner class; apply inner filter chain */
262 		tcf = rcu_dereference_bh(cl->filter_list);
263 	}
264 	/* classification failed; try to use default class */
265 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
266 	if (!cl || cl->level)
267 		return HTB_DIRECT;	/* bad default .. this is safe bet */
268 	return cl;
269 }
270 
271 /**
272  * htb_add_to_id_tree - adds class to the round robin list
273  *
274  * Routine adds class to the list (actually tree) sorted by classid.
275  * Make sure that class is not already on such list for given prio.
276  */
277 static void htb_add_to_id_tree(struct rb_root *root,
278 			       struct htb_class *cl, int prio)
279 {
280 	struct rb_node **p = &root->rb_node, *parent = NULL;
281 
282 	while (*p) {
283 		struct htb_class *c;
284 		parent = *p;
285 		c = rb_entry(parent, struct htb_class, node[prio]);
286 
287 		if (cl->common.classid > c->common.classid)
288 			p = &parent->rb_right;
289 		else
290 			p = &parent->rb_left;
291 	}
292 	rb_link_node(&cl->node[prio], parent, p);
293 	rb_insert_color(&cl->node[prio], root);
294 }
295 
296 /**
297  * htb_add_to_wait_tree - adds class to the event queue with delay
298  *
299  * The class is added to priority event queue to indicate that class will
300  * change its mode in cl->pq_key microseconds. Make sure that class is not
301  * already in the queue.
302  */
303 static void htb_add_to_wait_tree(struct htb_sched *q,
304 				 struct htb_class *cl, s64 delay)
305 {
306 	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
307 
308 	cl->pq_key = q->now + delay;
309 	if (cl->pq_key == q->now)
310 		cl->pq_key++;
311 
312 	/* update the nearest event cache */
313 	if (q->near_ev_cache[cl->level] > cl->pq_key)
314 		q->near_ev_cache[cl->level] = cl->pq_key;
315 
316 	while (*p) {
317 		struct htb_class *c;
318 		parent = *p;
319 		c = rb_entry(parent, struct htb_class, pq_node);
320 		if (cl->pq_key >= c->pq_key)
321 			p = &parent->rb_right;
322 		else
323 			p = &parent->rb_left;
324 	}
325 	rb_link_node(&cl->pq_node, parent, p);
326 	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
327 }
328 
329 /**
330  * htb_next_rb_node - finds next node in binary tree
331  *
332  * When we are past last key we return NULL.
333  * Average complexity is 2 steps per call.
334  */
335 static inline void htb_next_rb_node(struct rb_node **n)
336 {
337 	*n = rb_next(*n);
338 }
339 
340 /**
341  * htb_add_class_to_row - add class to its row
342  *
343  * The class is added to row at priorities marked in mask.
344  * It does nothing if mask == 0.
345  */
346 static inline void htb_add_class_to_row(struct htb_sched *q,
347 					struct htb_class *cl, int mask)
348 {
349 	q->row_mask[cl->level] |= mask;
350 	while (mask) {
351 		int prio = ffz(~mask);
352 		mask &= ~(1 << prio);
353 		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
354 	}
355 }
356 
357 /* If this triggers, it is a bug in this code, but it need not be fatal */
358 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
359 {
360 	if (RB_EMPTY_NODE(rb)) {
361 		WARN_ON(1);
362 	} else {
363 		rb_erase(rb, root);
364 		RB_CLEAR_NODE(rb);
365 	}
366 }
367 
368 
369 /**
370  * htb_remove_class_from_row - removes class from its row
371  *
372  * The class is removed from row at priorities marked in mask.
373  * It does nothing if mask == 0.
374  */
375 static inline void htb_remove_class_from_row(struct htb_sched *q,
376 						 struct htb_class *cl, int mask)
377 {
378 	int m = 0;
379 	struct htb_level *hlevel = &q->hlevel[cl->level];
380 
381 	while (mask) {
382 		int prio = ffz(~mask);
383 		struct htb_prio *hprio = &hlevel->hprio[prio];
384 
385 		mask &= ~(1 << prio);
386 		if (hprio->ptr == cl->node + prio)
387 			htb_next_rb_node(&hprio->ptr);
388 
389 		htb_safe_rb_erase(cl->node + prio, &hprio->row);
390 		if (!hprio->row.rb_node)
391 			m |= 1 << prio;
392 	}
393 	q->row_mask[cl->level] &= ~m;
394 }
395 
396 /**
397  * htb_activate_prios - creates active classe's feed chain
398  *
399  * The class is connected to ancestors and/or appropriate rows
400  * for priorities it is participating on. cl->cmode must be new
401  * (activated) mode. It does nothing if cl->prio_activity == 0.
402  */
403 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
404 {
405 	struct htb_class *p = cl->parent;
406 	long m, mask = cl->prio_activity;
407 
408 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
409 		m = mask;
410 		while (m) {
411 			int prio = ffz(~m);
412 			m &= ~(1 << prio);
413 
414 			if (p->inner.clprio[prio].feed.rb_node)
415 				/* parent already has its feed in use so that
416 				 * reset bit in mask as parent is already ok
417 				 */
418 				mask &= ~(1 << prio);
419 
420 			htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
421 		}
422 		p->prio_activity |= mask;
423 		cl = p;
424 		p = cl->parent;
425 
426 	}
427 	if (cl->cmode == HTB_CAN_SEND && mask)
428 		htb_add_class_to_row(q, cl, mask);
429 }
430 
431 /**
432  * htb_deactivate_prios - remove class from feed chain
433  *
434  * cl->cmode must represent old mode (before deactivation). It does
435  * nothing if cl->prio_activity == 0. Class is removed from all feed
436  * chains and rows.
437  */
438 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
439 {
440 	struct htb_class *p = cl->parent;
441 	long m, mask = cl->prio_activity;
442 
443 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
444 		m = mask;
445 		mask = 0;
446 		while (m) {
447 			int prio = ffz(~m);
448 			m &= ~(1 << prio);
449 
450 			if (p->inner.clprio[prio].ptr == cl->node + prio) {
451 				/* we are removing child which is pointed to from
452 				 * parent feed - forget the pointer but remember
453 				 * classid
454 				 */
455 				p->inner.clprio[prio].last_ptr_id = cl->common.classid;
456 				p->inner.clprio[prio].ptr = NULL;
457 			}
458 
459 			htb_safe_rb_erase(cl->node + prio,
460 					  &p->inner.clprio[prio].feed);
461 
462 			if (!p->inner.clprio[prio].feed.rb_node)
463 				mask |= 1 << prio;
464 		}
465 
466 		p->prio_activity &= ~mask;
467 		cl = p;
468 		p = cl->parent;
469 
470 	}
471 	if (cl->cmode == HTB_CAN_SEND && mask)
472 		htb_remove_class_from_row(q, cl, mask);
473 }
474 
475 static inline s64 htb_lowater(const struct htb_class *cl)
476 {
477 	if (htb_hysteresis)
478 		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
479 	else
480 		return 0;
481 }
482 static inline s64 htb_hiwater(const struct htb_class *cl)
483 {
484 	if (htb_hysteresis)
485 		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
486 	else
487 		return 0;
488 }
489 
490 
491 /**
492  * htb_class_mode - computes and returns current class mode
493  *
494  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
495  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
496  * from now to time when cl will change its state.
497  * Also it is worth to note that class mode doesn't change simply
498  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
499  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
500  * mode transitions per time unit. The speed gain is about 1/6.
501  */
502 static inline enum htb_cmode
503 htb_class_mode(struct htb_class *cl, s64 *diff)
504 {
505 	s64 toks;
506 
507 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
508 		*diff = -toks;
509 		return HTB_CANT_SEND;
510 	}
511 
512 	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
513 		return HTB_CAN_SEND;
514 
515 	*diff = -toks;
516 	return HTB_MAY_BORROW;
517 }
518 
519 /**
520  * htb_change_class_mode - changes classe's mode
521  *
522  * This should be the only way how to change classe's mode under normal
523  * cirsumstances. Routine will update feed lists linkage, change mode
524  * and add class to the wait event queue if appropriate. New mode should
525  * be different from old one and cl->pq_key has to be valid if changing
526  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
527  */
528 static void
529 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
530 {
531 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
532 
533 	if (new_mode == cl->cmode)
534 		return;
535 
536 	if (new_mode == HTB_CANT_SEND)
537 		cl->overlimits++;
538 
539 	if (cl->prio_activity) {	/* not necessary: speed optimization */
540 		if (cl->cmode != HTB_CANT_SEND)
541 			htb_deactivate_prios(q, cl);
542 		cl->cmode = new_mode;
543 		if (new_mode != HTB_CANT_SEND)
544 			htb_activate_prios(q, cl);
545 	} else
546 		cl->cmode = new_mode;
547 }
548 
549 /**
550  * htb_activate - inserts leaf cl into appropriate active feeds
551  *
552  * Routine learns (new) priority of leaf and activates feed chain
553  * for the prio. It can be called on already active leaf safely.
554  * It also adds leaf into droplist.
555  */
556 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
557 {
558 	WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
559 
560 	if (!cl->prio_activity) {
561 		cl->prio_activity = 1 << cl->prio;
562 		htb_activate_prios(q, cl);
563 	}
564 }
565 
566 /**
567  * htb_deactivate - remove leaf cl from active feeds
568  *
569  * Make sure that leaf is active. In the other words it can't be called
570  * with non-active leaf. It also removes class from the drop list.
571  */
572 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
573 {
574 	WARN_ON(!cl->prio_activity);
575 
576 	htb_deactivate_prios(q, cl);
577 	cl->prio_activity = 0;
578 }
579 
580 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
581 		       struct sk_buff **to_free)
582 {
583 	int uninitialized_var(ret);
584 	unsigned int len = qdisc_pkt_len(skb);
585 	struct htb_sched *q = qdisc_priv(sch);
586 	struct htb_class *cl = htb_classify(skb, sch, &ret);
587 
588 	if (cl == HTB_DIRECT) {
589 		/* enqueue to helper queue */
590 		if (q->direct_queue.qlen < q->direct_qlen) {
591 			__qdisc_enqueue_tail(skb, &q->direct_queue);
592 			q->direct_pkts++;
593 		} else {
594 			return qdisc_drop(skb, sch, to_free);
595 		}
596 #ifdef CONFIG_NET_CLS_ACT
597 	} else if (!cl) {
598 		if (ret & __NET_XMIT_BYPASS)
599 			qdisc_qstats_drop(sch);
600 		__qdisc_drop(skb, to_free);
601 		return ret;
602 #endif
603 	} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
604 					to_free)) != NET_XMIT_SUCCESS) {
605 		if (net_xmit_drop_count(ret)) {
606 			qdisc_qstats_drop(sch);
607 			cl->drops++;
608 		}
609 		return ret;
610 	} else {
611 		htb_activate(q, cl);
612 	}
613 
614 	sch->qstats.backlog += len;
615 	sch->q.qlen++;
616 	return NET_XMIT_SUCCESS;
617 }
618 
619 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
620 {
621 	s64 toks = diff + cl->tokens;
622 
623 	if (toks > cl->buffer)
624 		toks = cl->buffer;
625 	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
626 	if (toks <= -cl->mbuffer)
627 		toks = 1 - cl->mbuffer;
628 
629 	cl->tokens = toks;
630 }
631 
632 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
633 {
634 	s64 toks = diff + cl->ctokens;
635 
636 	if (toks > cl->cbuffer)
637 		toks = cl->cbuffer;
638 	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
639 	if (toks <= -cl->mbuffer)
640 		toks = 1 - cl->mbuffer;
641 
642 	cl->ctokens = toks;
643 }
644 
645 /**
646  * htb_charge_class - charges amount "bytes" to leaf and ancestors
647  *
648  * Routine assumes that packet "bytes" long was dequeued from leaf cl
649  * borrowing from "level". It accounts bytes to ceil leaky bucket for
650  * leaf and all ancestors and to rate bucket for ancestors at levels
651  * "level" and higher. It also handles possible change of mode resulting
652  * from the update. Note that mode can also increase here (MAY_BORROW to
653  * CAN_SEND) because we can use more precise clock that event queue here.
654  * In such case we remove class from event queue first.
655  */
656 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
657 			     int level, struct sk_buff *skb)
658 {
659 	int bytes = qdisc_pkt_len(skb);
660 	enum htb_cmode old_mode;
661 	s64 diff;
662 
663 	while (cl) {
664 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
665 		if (cl->level >= level) {
666 			if (cl->level == level)
667 				cl->xstats.lends++;
668 			htb_accnt_tokens(cl, bytes, diff);
669 		} else {
670 			cl->xstats.borrows++;
671 			cl->tokens += diff;	/* we moved t_c; update tokens */
672 		}
673 		htb_accnt_ctokens(cl, bytes, diff);
674 		cl->t_c = q->now;
675 
676 		old_mode = cl->cmode;
677 		diff = 0;
678 		htb_change_class_mode(q, cl, &diff);
679 		if (old_mode != cl->cmode) {
680 			if (old_mode != HTB_CAN_SEND)
681 				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
682 			if (cl->cmode != HTB_CAN_SEND)
683 				htb_add_to_wait_tree(q, cl, diff);
684 		}
685 
686 		/* update basic stats except for leaves which are already updated */
687 		if (cl->level)
688 			bstats_update(&cl->bstats, skb);
689 
690 		cl = cl->parent;
691 	}
692 }
693 
694 /**
695  * htb_do_events - make mode changes to classes at the level
696  *
697  * Scans event queue for pending events and applies them. Returns time of
698  * next pending event (0 for no event in pq, q->now for too many events).
699  * Note: Applied are events whose have cl->pq_key <= q->now.
700  */
701 static s64 htb_do_events(struct htb_sched *q, const int level,
702 			 unsigned long start)
703 {
704 	/* don't run for longer than 2 jiffies; 2 is used instead of
705 	 * 1 to simplify things when jiffy is going to be incremented
706 	 * too soon
707 	 */
708 	unsigned long stop_at = start + 2;
709 	struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
710 
711 	while (time_before(jiffies, stop_at)) {
712 		struct htb_class *cl;
713 		s64 diff;
714 		struct rb_node *p = rb_first(wait_pq);
715 
716 		if (!p)
717 			return 0;
718 
719 		cl = rb_entry(p, struct htb_class, pq_node);
720 		if (cl->pq_key > q->now)
721 			return cl->pq_key;
722 
723 		htb_safe_rb_erase(p, wait_pq);
724 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
725 		htb_change_class_mode(q, cl, &diff);
726 		if (cl->cmode != HTB_CAN_SEND)
727 			htb_add_to_wait_tree(q, cl, diff);
728 	}
729 
730 	/* too much load - let's continue after a break for scheduling */
731 	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
732 		pr_warn("htb: too many events!\n");
733 		q->warned |= HTB_WARN_TOOMANYEVENTS;
734 	}
735 
736 	return q->now;
737 }
738 
739 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
740  * is no such one exists.
741  */
742 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
743 					      u32 id)
744 {
745 	struct rb_node *r = NULL;
746 	while (n) {
747 		struct htb_class *cl =
748 		    rb_entry(n, struct htb_class, node[prio]);
749 
750 		if (id > cl->common.classid) {
751 			n = n->rb_right;
752 		} else if (id < cl->common.classid) {
753 			r = n;
754 			n = n->rb_left;
755 		} else {
756 			return n;
757 		}
758 	}
759 	return r;
760 }
761 
762 /**
763  * htb_lookup_leaf - returns next leaf class in DRR order
764  *
765  * Find leaf where current feed pointers points to.
766  */
767 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
768 {
769 	int i;
770 	struct {
771 		struct rb_node *root;
772 		struct rb_node **pptr;
773 		u32 *pid;
774 	} stk[TC_HTB_MAXDEPTH], *sp = stk;
775 
776 	BUG_ON(!hprio->row.rb_node);
777 	sp->root = hprio->row.rb_node;
778 	sp->pptr = &hprio->ptr;
779 	sp->pid = &hprio->last_ptr_id;
780 
781 	for (i = 0; i < 65535; i++) {
782 		if (!*sp->pptr && *sp->pid) {
783 			/* ptr was invalidated but id is valid - try to recover
784 			 * the original or next ptr
785 			 */
786 			*sp->pptr =
787 			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
788 		}
789 		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
790 				 * can become out of date quickly
791 				 */
792 		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
793 			*sp->pptr = sp->root;
794 			while ((*sp->pptr)->rb_left)
795 				*sp->pptr = (*sp->pptr)->rb_left;
796 			if (sp > stk) {
797 				sp--;
798 				if (!*sp->pptr) {
799 					WARN_ON(1);
800 					return NULL;
801 				}
802 				htb_next_rb_node(sp->pptr);
803 			}
804 		} else {
805 			struct htb_class *cl;
806 			struct htb_prio *clp;
807 
808 			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
809 			if (!cl->level)
810 				return cl;
811 			clp = &cl->inner.clprio[prio];
812 			(++sp)->root = clp->feed.rb_node;
813 			sp->pptr = &clp->ptr;
814 			sp->pid = &clp->last_ptr_id;
815 		}
816 	}
817 	WARN_ON(1);
818 	return NULL;
819 }
820 
821 /* dequeues packet at given priority and level; call only if
822  * you are sure that there is active class at prio/level
823  */
824 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
825 					const int level)
826 {
827 	struct sk_buff *skb = NULL;
828 	struct htb_class *cl, *start;
829 	struct htb_level *hlevel = &q->hlevel[level];
830 	struct htb_prio *hprio = &hlevel->hprio[prio];
831 
832 	/* look initial class up in the row */
833 	start = cl = htb_lookup_leaf(hprio, prio);
834 
835 	do {
836 next:
837 		if (unlikely(!cl))
838 			return NULL;
839 
840 		/* class can be empty - it is unlikely but can be true if leaf
841 		 * qdisc drops packets in enqueue routine or if someone used
842 		 * graft operation on the leaf since last dequeue;
843 		 * simply deactivate and skip such class
844 		 */
845 		if (unlikely(cl->leaf.q->q.qlen == 0)) {
846 			struct htb_class *next;
847 			htb_deactivate(q, cl);
848 
849 			/* row/level might become empty */
850 			if ((q->row_mask[level] & (1 << prio)) == 0)
851 				return NULL;
852 
853 			next = htb_lookup_leaf(hprio, prio);
854 
855 			if (cl == start)	/* fix start if we just deleted it */
856 				start = next;
857 			cl = next;
858 			goto next;
859 		}
860 
861 		skb = cl->leaf.q->dequeue(cl->leaf.q);
862 		if (likely(skb != NULL))
863 			break;
864 
865 		qdisc_warn_nonwc("htb", cl->leaf.q);
866 		htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
867 					 &q->hlevel[0].hprio[prio].ptr);
868 		cl = htb_lookup_leaf(hprio, prio);
869 
870 	} while (cl != start);
871 
872 	if (likely(skb != NULL)) {
873 		bstats_update(&cl->bstats, skb);
874 		cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
875 		if (cl->leaf.deficit[level] < 0) {
876 			cl->leaf.deficit[level] += cl->quantum;
877 			htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
878 						 &q->hlevel[0].hprio[prio].ptr);
879 		}
880 		/* this used to be after charge_class but this constelation
881 		 * gives us slightly better performance
882 		 */
883 		if (!cl->leaf.q->q.qlen)
884 			htb_deactivate(q, cl);
885 		htb_charge_class(q, cl, level, skb);
886 	}
887 	return skb;
888 }
889 
890 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
891 {
892 	struct sk_buff *skb;
893 	struct htb_sched *q = qdisc_priv(sch);
894 	int level;
895 	s64 next_event;
896 	unsigned long start_at;
897 
898 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
899 	skb = __qdisc_dequeue_head(&q->direct_queue);
900 	if (skb != NULL) {
901 ok:
902 		qdisc_bstats_update(sch, skb);
903 		qdisc_qstats_backlog_dec(sch, skb);
904 		sch->q.qlen--;
905 		return skb;
906 	}
907 
908 	if (!sch->q.qlen)
909 		goto fin;
910 	q->now = ktime_get_ns();
911 	start_at = jiffies;
912 
913 	next_event = q->now + 5LLU * NSEC_PER_SEC;
914 
915 	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
916 		/* common case optimization - skip event handler quickly */
917 		int m;
918 		s64 event = q->near_ev_cache[level];
919 
920 		if (q->now >= event) {
921 			event = htb_do_events(q, level, start_at);
922 			if (!event)
923 				event = q->now + NSEC_PER_SEC;
924 			q->near_ev_cache[level] = event;
925 		}
926 
927 		if (next_event > event)
928 			next_event = event;
929 
930 		m = ~q->row_mask[level];
931 		while (m != (int)(-1)) {
932 			int prio = ffz(m);
933 
934 			m |= 1 << prio;
935 			skb = htb_dequeue_tree(q, prio, level);
936 			if (likely(skb != NULL))
937 				goto ok;
938 		}
939 	}
940 	qdisc_qstats_overlimit(sch);
941 	if (likely(next_event > q->now))
942 		qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
943 	else
944 		schedule_work(&q->work);
945 fin:
946 	return skb;
947 }
948 
949 /* reset all classes */
950 /* always caled under BH & queue lock */
951 static void htb_reset(struct Qdisc *sch)
952 {
953 	struct htb_sched *q = qdisc_priv(sch);
954 	struct htb_class *cl;
955 	unsigned int i;
956 
957 	for (i = 0; i < q->clhash.hashsize; i++) {
958 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
959 			if (cl->level)
960 				memset(&cl->inner, 0, sizeof(cl->inner));
961 			else {
962 				if (cl->leaf.q)
963 					qdisc_reset(cl->leaf.q);
964 			}
965 			cl->prio_activity = 0;
966 			cl->cmode = HTB_CAN_SEND;
967 		}
968 	}
969 	qdisc_watchdog_cancel(&q->watchdog);
970 	__qdisc_reset_queue(&q->direct_queue);
971 	sch->q.qlen = 0;
972 	sch->qstats.backlog = 0;
973 	memset(q->hlevel, 0, sizeof(q->hlevel));
974 	memset(q->row_mask, 0, sizeof(q->row_mask));
975 }
976 
977 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
978 	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
979 	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
980 	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
981 	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
982 	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
983 	[TCA_HTB_RATE64] = { .type = NLA_U64 },
984 	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
985 };
986 
987 static void htb_work_func(struct work_struct *work)
988 {
989 	struct htb_sched *q = container_of(work, struct htb_sched, work);
990 	struct Qdisc *sch = q->watchdog.qdisc;
991 
992 	rcu_read_lock();
993 	__netif_schedule(qdisc_root(sch));
994 	rcu_read_unlock();
995 }
996 
997 static int htb_init(struct Qdisc *sch, struct nlattr *opt,
998 		    struct netlink_ext_ack *extack)
999 {
1000 	struct htb_sched *q = qdisc_priv(sch);
1001 	struct nlattr *tb[TCA_HTB_MAX + 1];
1002 	struct tc_htb_glob *gopt;
1003 	int err;
1004 
1005 	qdisc_watchdog_init(&q->watchdog, sch);
1006 	INIT_WORK(&q->work, htb_work_func);
1007 
1008 	if (!opt)
1009 		return -EINVAL;
1010 
1011 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1012 	if (err)
1013 		return err;
1014 
1015 	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1016 	if (err < 0)
1017 		return err;
1018 
1019 	if (!tb[TCA_HTB_INIT])
1020 		return -EINVAL;
1021 
1022 	gopt = nla_data(tb[TCA_HTB_INIT]);
1023 	if (gopt->version != HTB_VER >> 16)
1024 		return -EINVAL;
1025 
1026 	err = qdisc_class_hash_init(&q->clhash);
1027 	if (err < 0)
1028 		return err;
1029 
1030 	qdisc_skb_head_init(&q->direct_queue);
1031 
1032 	if (tb[TCA_HTB_DIRECT_QLEN])
1033 		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1034 	else
1035 		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1036 
1037 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1038 		q->rate2quantum = 1;
1039 	q->defcls = gopt->defcls;
1040 
1041 	return 0;
1042 }
1043 
1044 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1045 {
1046 	struct htb_sched *q = qdisc_priv(sch);
1047 	struct nlattr *nest;
1048 	struct tc_htb_glob gopt;
1049 
1050 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1051 	 * no change can happen on the qdisc parameters.
1052 	 */
1053 
1054 	gopt.direct_pkts = q->direct_pkts;
1055 	gopt.version = HTB_VER;
1056 	gopt.rate2quantum = q->rate2quantum;
1057 	gopt.defcls = q->defcls;
1058 	gopt.debug = 0;
1059 
1060 	nest = nla_nest_start(skb, TCA_OPTIONS);
1061 	if (nest == NULL)
1062 		goto nla_put_failure;
1063 	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1064 	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1065 		goto nla_put_failure;
1066 
1067 	return nla_nest_end(skb, nest);
1068 
1069 nla_put_failure:
1070 	nla_nest_cancel(skb, nest);
1071 	return -1;
1072 }
1073 
1074 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1075 			  struct sk_buff *skb, struct tcmsg *tcm)
1076 {
1077 	struct htb_class *cl = (struct htb_class *)arg;
1078 	struct nlattr *nest;
1079 	struct tc_htb_opt opt;
1080 
1081 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1082 	 * no change can happen on the class parameters.
1083 	 */
1084 	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1085 	tcm->tcm_handle = cl->common.classid;
1086 	if (!cl->level && cl->leaf.q)
1087 		tcm->tcm_info = cl->leaf.q->handle;
1088 
1089 	nest = nla_nest_start(skb, TCA_OPTIONS);
1090 	if (nest == NULL)
1091 		goto nla_put_failure;
1092 
1093 	memset(&opt, 0, sizeof(opt));
1094 
1095 	psched_ratecfg_getrate(&opt.rate, &cl->rate);
1096 	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1097 	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1098 	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1099 	opt.quantum = cl->quantum;
1100 	opt.prio = cl->prio;
1101 	opt.level = cl->level;
1102 	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1103 		goto nla_put_failure;
1104 	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1105 	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1106 			      TCA_HTB_PAD))
1107 		goto nla_put_failure;
1108 	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1109 	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1110 			      TCA_HTB_PAD))
1111 		goto nla_put_failure;
1112 
1113 	return nla_nest_end(skb, nest);
1114 
1115 nla_put_failure:
1116 	nla_nest_cancel(skb, nest);
1117 	return -1;
1118 }
1119 
1120 static int
1121 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1122 {
1123 	struct htb_class *cl = (struct htb_class *)arg;
1124 	struct gnet_stats_queue qs = {
1125 		.drops = cl->drops,
1126 		.overlimits = cl->overlimits,
1127 	};
1128 	__u32 qlen = 0;
1129 
1130 	if (!cl->level && cl->leaf.q) {
1131 		qlen = cl->leaf.q->q.qlen;
1132 		qs.backlog = cl->leaf.q->qstats.backlog;
1133 	}
1134 	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1135 				    INT_MIN, INT_MAX);
1136 	cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1137 				     INT_MIN, INT_MAX);
1138 
1139 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1140 				  d, NULL, &cl->bstats) < 0 ||
1141 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1142 	    gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1143 		return -1;
1144 
1145 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1146 }
1147 
1148 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1149 		     struct Qdisc **old, struct netlink_ext_ack *extack)
1150 {
1151 	struct htb_class *cl = (struct htb_class *)arg;
1152 
1153 	if (cl->level)
1154 		return -EINVAL;
1155 	if (new == NULL &&
1156 	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1157 				     cl->common.classid, extack)) == NULL)
1158 		return -ENOBUFS;
1159 
1160 	*old = qdisc_replace(sch, new, &cl->leaf.q);
1161 	return 0;
1162 }
1163 
1164 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1165 {
1166 	struct htb_class *cl = (struct htb_class *)arg;
1167 	return !cl->level ? cl->leaf.q : NULL;
1168 }
1169 
1170 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1171 {
1172 	struct htb_class *cl = (struct htb_class *)arg;
1173 
1174 	htb_deactivate(qdisc_priv(sch), cl);
1175 }
1176 
1177 static inline int htb_parent_last_child(struct htb_class *cl)
1178 {
1179 	if (!cl->parent)
1180 		/* the root class */
1181 		return 0;
1182 	if (cl->parent->children > 1)
1183 		/* not the last child */
1184 		return 0;
1185 	return 1;
1186 }
1187 
1188 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1189 			       struct Qdisc *new_q)
1190 {
1191 	struct htb_class *parent = cl->parent;
1192 
1193 	WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1194 
1195 	if (parent->cmode != HTB_CAN_SEND)
1196 		htb_safe_rb_erase(&parent->pq_node,
1197 				  &q->hlevel[parent->level].wait_pq);
1198 
1199 	parent->level = 0;
1200 	memset(&parent->inner, 0, sizeof(parent->inner));
1201 	parent->leaf.q = new_q ? new_q : &noop_qdisc;
1202 	parent->tokens = parent->buffer;
1203 	parent->ctokens = parent->cbuffer;
1204 	parent->t_c = ktime_get_ns();
1205 	parent->cmode = HTB_CAN_SEND;
1206 }
1207 
1208 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1209 {
1210 	if (!cl->level) {
1211 		WARN_ON(!cl->leaf.q);
1212 		qdisc_put(cl->leaf.q);
1213 	}
1214 	gen_kill_estimator(&cl->rate_est);
1215 	tcf_block_put(cl->block);
1216 	kfree(cl);
1217 }
1218 
1219 static void htb_destroy(struct Qdisc *sch)
1220 {
1221 	struct htb_sched *q = qdisc_priv(sch);
1222 	struct hlist_node *next;
1223 	struct htb_class *cl;
1224 	unsigned int i;
1225 
1226 	cancel_work_sync(&q->work);
1227 	qdisc_watchdog_cancel(&q->watchdog);
1228 	/* This line used to be after htb_destroy_class call below
1229 	 * and surprisingly it worked in 2.4. But it must precede it
1230 	 * because filter need its target class alive to be able to call
1231 	 * unbind_filter on it (without Oops).
1232 	 */
1233 	tcf_block_put(q->block);
1234 
1235 	for (i = 0; i < q->clhash.hashsize; i++) {
1236 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1237 			tcf_block_put(cl->block);
1238 			cl->block = NULL;
1239 		}
1240 	}
1241 	for (i = 0; i < q->clhash.hashsize; i++) {
1242 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1243 					  common.hnode)
1244 			htb_destroy_class(sch, cl);
1245 	}
1246 	qdisc_class_hash_destroy(&q->clhash);
1247 	__qdisc_reset_queue(&q->direct_queue);
1248 }
1249 
1250 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1251 {
1252 	struct htb_sched *q = qdisc_priv(sch);
1253 	struct htb_class *cl = (struct htb_class *)arg;
1254 	struct Qdisc *new_q = NULL;
1255 	int last_child = 0;
1256 
1257 	/* TODO: why don't allow to delete subtree ? references ? does
1258 	 * tc subsys guarantee us that in htb_destroy it holds no class
1259 	 * refs so that we can remove children safely there ?
1260 	 */
1261 	if (cl->children || cl->filter_cnt)
1262 		return -EBUSY;
1263 
1264 	if (!cl->level && htb_parent_last_child(cl)) {
1265 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1266 					  cl->parent->common.classid,
1267 					  NULL);
1268 		last_child = 1;
1269 	}
1270 
1271 	sch_tree_lock(sch);
1272 
1273 	if (!cl->level) {
1274 		unsigned int qlen = cl->leaf.q->q.qlen;
1275 		unsigned int backlog = cl->leaf.q->qstats.backlog;
1276 
1277 		qdisc_reset(cl->leaf.q);
1278 		qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
1279 	}
1280 
1281 	/* delete from hash and active; remainder in destroy_class */
1282 	qdisc_class_hash_remove(&q->clhash, &cl->common);
1283 	if (cl->parent)
1284 		cl->parent->children--;
1285 
1286 	if (cl->prio_activity)
1287 		htb_deactivate(q, cl);
1288 
1289 	if (cl->cmode != HTB_CAN_SEND)
1290 		htb_safe_rb_erase(&cl->pq_node,
1291 				  &q->hlevel[cl->level].wait_pq);
1292 
1293 	if (last_child)
1294 		htb_parent_to_leaf(q, cl, new_q);
1295 
1296 	sch_tree_unlock(sch);
1297 
1298 	htb_destroy_class(sch, cl);
1299 	return 0;
1300 }
1301 
1302 static int htb_change_class(struct Qdisc *sch, u32 classid,
1303 			    u32 parentid, struct nlattr **tca,
1304 			    unsigned long *arg, struct netlink_ext_ack *extack)
1305 {
1306 	int err = -EINVAL;
1307 	struct htb_sched *q = qdisc_priv(sch);
1308 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1309 	struct nlattr *opt = tca[TCA_OPTIONS];
1310 	struct nlattr *tb[TCA_HTB_MAX + 1];
1311 	struct tc_htb_opt *hopt;
1312 	u64 rate64, ceil64;
1313 	int warn = 0;
1314 
1315 	/* extract all subattrs from opt attr */
1316 	if (!opt)
1317 		goto failure;
1318 
1319 	err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1320 	if (err < 0)
1321 		goto failure;
1322 
1323 	err = -EINVAL;
1324 	if (tb[TCA_HTB_PARMS] == NULL)
1325 		goto failure;
1326 
1327 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1328 
1329 	hopt = nla_data(tb[TCA_HTB_PARMS]);
1330 	if (!hopt->rate.rate || !hopt->ceil.rate)
1331 		goto failure;
1332 
1333 	/* Keeping backward compatible with rate_table based iproute2 tc */
1334 	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1335 		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1336 					      NULL));
1337 
1338 	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1339 		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1340 					      NULL));
1341 
1342 	if (!cl) {		/* new class */
1343 		struct Qdisc *new_q;
1344 		int prio;
1345 		struct {
1346 			struct nlattr		nla;
1347 			struct gnet_estimator	opt;
1348 		} est = {
1349 			.nla = {
1350 				.nla_len	= nla_attr_size(sizeof(est.opt)),
1351 				.nla_type	= TCA_RATE,
1352 			},
1353 			.opt = {
1354 				/* 4s interval, 16s averaging constant */
1355 				.interval	= 2,
1356 				.ewma_log	= 2,
1357 			},
1358 		};
1359 
1360 		/* check for valid classid */
1361 		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1362 		    htb_find(classid, sch))
1363 			goto failure;
1364 
1365 		/* check maximal depth */
1366 		if (parent && parent->parent && parent->parent->level < 2) {
1367 			pr_err("htb: tree is too deep\n");
1368 			goto failure;
1369 		}
1370 		err = -ENOBUFS;
1371 		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1372 		if (!cl)
1373 			goto failure;
1374 
1375 		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1376 		if (err) {
1377 			kfree(cl);
1378 			goto failure;
1379 		}
1380 		if (htb_rate_est || tca[TCA_RATE]) {
1381 			err = gen_new_estimator(&cl->bstats, NULL,
1382 						&cl->rate_est,
1383 						NULL,
1384 						qdisc_root_sleeping_running(sch),
1385 						tca[TCA_RATE] ? : &est.nla);
1386 			if (err) {
1387 				tcf_block_put(cl->block);
1388 				kfree(cl);
1389 				goto failure;
1390 			}
1391 		}
1392 
1393 		cl->children = 0;
1394 		RB_CLEAR_NODE(&cl->pq_node);
1395 
1396 		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1397 			RB_CLEAR_NODE(&cl->node[prio]);
1398 
1399 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1400 		 * so that can't be used inside of sch_tree_lock
1401 		 * -- thanks to Karlis Peisenieks
1402 		 */
1403 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1404 					  classid, NULL);
1405 		sch_tree_lock(sch);
1406 		if (parent && !parent->level) {
1407 			unsigned int qlen = parent->leaf.q->q.qlen;
1408 			unsigned int backlog = parent->leaf.q->qstats.backlog;
1409 
1410 			/* turn parent into inner node */
1411 			qdisc_reset(parent->leaf.q);
1412 			qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
1413 			qdisc_put(parent->leaf.q);
1414 			if (parent->prio_activity)
1415 				htb_deactivate(q, parent);
1416 
1417 			/* remove from evt list because of level change */
1418 			if (parent->cmode != HTB_CAN_SEND) {
1419 				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1420 				parent->cmode = HTB_CAN_SEND;
1421 			}
1422 			parent->level = (parent->parent ? parent->parent->level
1423 					 : TC_HTB_MAXDEPTH) - 1;
1424 			memset(&parent->inner, 0, sizeof(parent->inner));
1425 		}
1426 		/* leaf (we) needs elementary qdisc */
1427 		cl->leaf.q = new_q ? new_q : &noop_qdisc;
1428 
1429 		cl->common.classid = classid;
1430 		cl->parent = parent;
1431 
1432 		/* set class to be in HTB_CAN_SEND state */
1433 		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1434 		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1435 		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
1436 		cl->t_c = ktime_get_ns();
1437 		cl->cmode = HTB_CAN_SEND;
1438 
1439 		/* attach to the hash list and parent's family */
1440 		qdisc_class_hash_insert(&q->clhash, &cl->common);
1441 		if (parent)
1442 			parent->children++;
1443 		if (cl->leaf.q != &noop_qdisc)
1444 			qdisc_hash_add(cl->leaf.q, true);
1445 	} else {
1446 		if (tca[TCA_RATE]) {
1447 			err = gen_replace_estimator(&cl->bstats, NULL,
1448 						    &cl->rate_est,
1449 						    NULL,
1450 						    qdisc_root_sleeping_running(sch),
1451 						    tca[TCA_RATE]);
1452 			if (err)
1453 				return err;
1454 		}
1455 		sch_tree_lock(sch);
1456 	}
1457 
1458 	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1459 
1460 	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1461 
1462 	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1463 	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1464 
1465 	/* it used to be a nasty bug here, we have to check that node
1466 	 * is really leaf before changing cl->leaf !
1467 	 */
1468 	if (!cl->level) {
1469 		u64 quantum = cl->rate.rate_bytes_ps;
1470 
1471 		do_div(quantum, q->rate2quantum);
1472 		cl->quantum = min_t(u64, quantum, INT_MAX);
1473 
1474 		if (!hopt->quantum && cl->quantum < 1000) {
1475 			warn = -1;
1476 			cl->quantum = 1000;
1477 		}
1478 		if (!hopt->quantum && cl->quantum > 200000) {
1479 			warn = 1;
1480 			cl->quantum = 200000;
1481 		}
1482 		if (hopt->quantum)
1483 			cl->quantum = hopt->quantum;
1484 		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1485 			cl->prio = TC_HTB_NUMPRIO - 1;
1486 	}
1487 
1488 	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1489 	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1490 
1491 	sch_tree_unlock(sch);
1492 
1493 	if (warn)
1494 		pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
1495 			    cl->common.classid, (warn == -1 ? "small" : "big"));
1496 
1497 	qdisc_class_hash_grow(sch, &q->clhash);
1498 
1499 	*arg = (unsigned long)cl;
1500 	return 0;
1501 
1502 failure:
1503 	return err;
1504 }
1505 
1506 static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
1507 				       struct netlink_ext_ack *extack)
1508 {
1509 	struct htb_sched *q = qdisc_priv(sch);
1510 	struct htb_class *cl = (struct htb_class *)arg;
1511 
1512 	return cl ? cl->block : q->block;
1513 }
1514 
1515 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1516 				     u32 classid)
1517 {
1518 	struct htb_class *cl = htb_find(classid, sch);
1519 
1520 	/*if (cl && !cl->level) return 0;
1521 	 * The line above used to be there to prevent attaching filters to
1522 	 * leaves. But at least tc_index filter uses this just to get class
1523 	 * for other reasons so that we have to allow for it.
1524 	 * ----
1525 	 * 19.6.2002 As Werner explained it is ok - bind filter is just
1526 	 * another way to "lock" the class - unlike "get" this lock can
1527 	 * be broken by class during destroy IIUC.
1528 	 */
1529 	if (cl)
1530 		cl->filter_cnt++;
1531 	return (unsigned long)cl;
1532 }
1533 
1534 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1535 {
1536 	struct htb_class *cl = (struct htb_class *)arg;
1537 
1538 	if (cl)
1539 		cl->filter_cnt--;
1540 }
1541 
1542 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1543 {
1544 	struct htb_sched *q = qdisc_priv(sch);
1545 	struct htb_class *cl;
1546 	unsigned int i;
1547 
1548 	if (arg->stop)
1549 		return;
1550 
1551 	for (i = 0; i < q->clhash.hashsize; i++) {
1552 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1553 			if (arg->count < arg->skip) {
1554 				arg->count++;
1555 				continue;
1556 			}
1557 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1558 				arg->stop = 1;
1559 				return;
1560 			}
1561 			arg->count++;
1562 		}
1563 	}
1564 }
1565 
1566 static const struct Qdisc_class_ops htb_class_ops = {
1567 	.graft		=	htb_graft,
1568 	.leaf		=	htb_leaf,
1569 	.qlen_notify	=	htb_qlen_notify,
1570 	.find		=	htb_search,
1571 	.change		=	htb_change_class,
1572 	.delete		=	htb_delete,
1573 	.walk		=	htb_walk,
1574 	.tcf_block	=	htb_tcf_block,
1575 	.bind_tcf	=	htb_bind_filter,
1576 	.unbind_tcf	=	htb_unbind_filter,
1577 	.dump		=	htb_dump_class,
1578 	.dump_stats	=	htb_dump_class_stats,
1579 };
1580 
1581 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1582 	.cl_ops		=	&htb_class_ops,
1583 	.id		=	"htb",
1584 	.priv_size	=	sizeof(struct htb_sched),
1585 	.enqueue	=	htb_enqueue,
1586 	.dequeue	=	htb_dequeue,
1587 	.peek		=	qdisc_peek_dequeued,
1588 	.init		=	htb_init,
1589 	.reset		=	htb_reset,
1590 	.destroy	=	htb_destroy,
1591 	.dump		=	htb_dump,
1592 	.owner		=	THIS_MODULE,
1593 };
1594 
1595 static int __init htb_module_init(void)
1596 {
1597 	return register_qdisc(&htb_qdisc_ops);
1598 }
1599 static void __exit htb_module_exit(void)
1600 {
1601 	unregister_qdisc(&htb_qdisc_ops);
1602 }
1603 
1604 module_init(htb_module_init)
1605 module_exit(htb_module_exit)
1606 MODULE_LICENSE("GPL");
1607