xref: /openbmc/linux/net/sched/sch_htb.c (revision 752beb5e)
1 /*
2  * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *			HTB support at LARTC mailing list
14  *		Ondrej Kraus, <krauso@barr.cz>
15  *			found missing INIT_QDISC(htb)
16  *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *			helped a lot to locate nasty class stall bug
18  *		Andi Kleen, Jamal Hadi, Bert Hubert
19  *			code review and helpful comments on shaping
20  *		Tomasz Wrona, <tw@eter.tym.pl>
21  *			created test case so that I was able to fix nasty bug
22  *		Wilfried Weissmann
23  *			spotted bug in dequeue code and helped with fix
24  *		Jiri Fojtasek
25  *			fixed requeue routine
26  *		and many others. thanks.
27  */
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/errno.h>
34 #include <linux/skbuff.h>
35 #include <linux/list.h>
36 #include <linux/compiler.h>
37 #include <linux/rbtree.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <net/netlink.h>
41 #include <net/sch_generic.h>
42 #include <net/pkt_sched.h>
43 #include <net/pkt_cls.h>
44 
45 /* HTB algorithm.
46     Author: devik@cdi.cz
47     ========================================================================
48     HTB is like TBF with multiple classes. It is also similar to CBQ because
49     it allows to assign priority to each class in hierarchy.
50     In fact it is another implementation of Floyd's formal sharing.
51 
52     Levels:
53     Each class is assigned level. Leaf has ALWAYS level 0 and root
54     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
55     one less than their parent.
56 */
57 
58 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
59 #define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
60 
61 #if HTB_VER >> 16 != TC_HTB_PROTOVER
62 #error "Mismatched sch_htb.c and pkt_sch.h"
63 #endif
64 
65 /* Module parameter and sysfs export */
66 module_param    (htb_hysteresis, int, 0640);
67 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
68 
69 static int htb_rate_est = 0; /* htb classes have a default rate estimator */
70 module_param(htb_rate_est, int, 0640);
71 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
72 
73 /* used internaly to keep status of single class */
74 enum htb_cmode {
75 	HTB_CANT_SEND,		/* class can't send and can't borrow */
76 	HTB_MAY_BORROW,		/* class can't send but may borrow */
77 	HTB_CAN_SEND		/* class can send */
78 };
79 
80 struct htb_prio {
81 	union {
82 		struct rb_root	row;
83 		struct rb_root	feed;
84 	};
85 	struct rb_node	*ptr;
86 	/* When class changes from state 1->2 and disconnects from
87 	 * parent's feed then we lost ptr value and start from the
88 	 * first child again. Here we store classid of the
89 	 * last valid ptr (used when ptr is NULL).
90 	 */
91 	u32		last_ptr_id;
92 };
93 
94 /* interior & leaf nodes; props specific to leaves are marked L:
95  * To reduce false sharing, place mostly read fields at beginning,
96  * and mostly written ones at the end.
97  */
98 struct htb_class {
99 	struct Qdisc_class_common common;
100 	struct psched_ratecfg	rate;
101 	struct psched_ratecfg	ceil;
102 	s64			buffer, cbuffer;/* token bucket depth/rate */
103 	s64			mbuffer;	/* max wait time */
104 	u32			prio;		/* these two are used only by leaves... */
105 	int			quantum;	/* but stored for parent-to-leaf return */
106 
107 	struct tcf_proto __rcu	*filter_list;	/* class attached filters */
108 	struct tcf_block	*block;
109 	int			filter_cnt;
110 
111 	int			level;		/* our level (see above) */
112 	unsigned int		children;
113 	struct htb_class	*parent;	/* parent class */
114 
115 	struct net_rate_estimator __rcu *rate_est;
116 
117 	/*
118 	 * Written often fields
119 	 */
120 	struct gnet_stats_basic_packed bstats;
121 	struct tc_htb_xstats	xstats;	/* our special stats */
122 
123 	/* token bucket parameters */
124 	s64			tokens, ctokens;/* current number of tokens */
125 	s64			t_c;		/* checkpoint time */
126 
127 	union {
128 		struct htb_class_leaf {
129 			int		deficit[TC_HTB_MAXDEPTH];
130 			struct Qdisc	*q;
131 		} leaf;
132 		struct htb_class_inner {
133 			struct htb_prio clprio[TC_HTB_NUMPRIO];
134 		} inner;
135 	};
136 	s64			pq_key;
137 
138 	int			prio_activity;	/* for which prios are we active */
139 	enum htb_cmode		cmode;		/* current mode of the class */
140 	struct rb_node		pq_node;	/* node for event queue */
141 	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
142 
143 	unsigned int drops ____cacheline_aligned_in_smp;
144 	unsigned int		overlimits;
145 };
146 
147 struct htb_level {
148 	struct rb_root	wait_pq;
149 	struct htb_prio hprio[TC_HTB_NUMPRIO];
150 };
151 
152 struct htb_sched {
153 	struct Qdisc_class_hash clhash;
154 	int			defcls;		/* class where unclassified flows go to */
155 	int			rate2quantum;	/* quant = rate / rate2quantum */
156 
157 	/* filters for qdisc itself */
158 	struct tcf_proto __rcu	*filter_list;
159 	struct tcf_block	*block;
160 
161 #define HTB_WARN_TOOMANYEVENTS	0x1
162 	unsigned int		warned;	/* only one warning */
163 	int			direct_qlen;
164 	struct work_struct	work;
165 
166 	/* non shaped skbs; let them go directly thru */
167 	struct qdisc_skb_head	direct_queue;
168 	u32			direct_pkts;
169 	u32			overlimits;
170 
171 	struct qdisc_watchdog	watchdog;
172 
173 	s64			now;	/* cached dequeue time */
174 
175 	/* time of nearest event per level (row) */
176 	s64			near_ev_cache[TC_HTB_MAXDEPTH];
177 
178 	int			row_mask[TC_HTB_MAXDEPTH];
179 
180 	struct htb_level	hlevel[TC_HTB_MAXDEPTH];
181 };
182 
183 /* find class in global hash table using given handle */
184 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
185 {
186 	struct htb_sched *q = qdisc_priv(sch);
187 	struct Qdisc_class_common *clc;
188 
189 	clc = qdisc_class_find(&q->clhash, handle);
190 	if (clc == NULL)
191 		return NULL;
192 	return container_of(clc, struct htb_class, common);
193 }
194 
195 static unsigned long htb_search(struct Qdisc *sch, u32 handle)
196 {
197 	return (unsigned long)htb_find(handle, sch);
198 }
199 /**
200  * htb_classify - classify a packet into class
201  *
202  * It returns NULL if the packet should be dropped or -1 if the packet
203  * should be passed directly thru. In all other cases leaf class is returned.
204  * We allow direct class selection by classid in priority. The we examine
205  * filters in qdisc and in inner nodes (if higher filter points to the inner
206  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
207  * internal fifo (direct). These packets then go directly thru. If we still
208  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
209  * then finish and return direct queue.
210  */
211 #define HTB_DIRECT ((struct htb_class *)-1L)
212 
213 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
214 				      int *qerr)
215 {
216 	struct htb_sched *q = qdisc_priv(sch);
217 	struct htb_class *cl;
218 	struct tcf_result res;
219 	struct tcf_proto *tcf;
220 	int result;
221 
222 	/* allow to select class by setting skb->priority to valid classid;
223 	 * note that nfmark can be used too by attaching filter fw with no
224 	 * rules in it
225 	 */
226 	if (skb->priority == sch->handle)
227 		return HTB_DIRECT;	/* X:0 (direct flow) selected */
228 	cl = htb_find(skb->priority, sch);
229 	if (cl) {
230 		if (cl->level == 0)
231 			return cl;
232 		/* Start with inner filter chain if a non-leaf class is selected */
233 		tcf = rcu_dereference_bh(cl->filter_list);
234 	} else {
235 		tcf = rcu_dereference_bh(q->filter_list);
236 	}
237 
238 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
239 	while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
240 #ifdef CONFIG_NET_CLS_ACT
241 		switch (result) {
242 		case TC_ACT_QUEUED:
243 		case TC_ACT_STOLEN:
244 		case TC_ACT_TRAP:
245 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
246 			/* fall through */
247 		case TC_ACT_SHOT:
248 			return NULL;
249 		}
250 #endif
251 		cl = (void *)res.class;
252 		if (!cl) {
253 			if (res.classid == sch->handle)
254 				return HTB_DIRECT;	/* X:0 (direct flow) */
255 			cl = htb_find(res.classid, sch);
256 			if (!cl)
257 				break;	/* filter selected invalid classid */
258 		}
259 		if (!cl->level)
260 			return cl;	/* we hit leaf; return it */
261 
262 		/* we have got inner class; apply inner filter chain */
263 		tcf = rcu_dereference_bh(cl->filter_list);
264 	}
265 	/* classification failed; try to use default class */
266 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
267 	if (!cl || cl->level)
268 		return HTB_DIRECT;	/* bad default .. this is safe bet */
269 	return cl;
270 }
271 
272 /**
273  * htb_add_to_id_tree - adds class to the round robin list
274  *
275  * Routine adds class to the list (actually tree) sorted by classid.
276  * Make sure that class is not already on such list for given prio.
277  */
278 static void htb_add_to_id_tree(struct rb_root *root,
279 			       struct htb_class *cl, int prio)
280 {
281 	struct rb_node **p = &root->rb_node, *parent = NULL;
282 
283 	while (*p) {
284 		struct htb_class *c;
285 		parent = *p;
286 		c = rb_entry(parent, struct htb_class, node[prio]);
287 
288 		if (cl->common.classid > c->common.classid)
289 			p = &parent->rb_right;
290 		else
291 			p = &parent->rb_left;
292 	}
293 	rb_link_node(&cl->node[prio], parent, p);
294 	rb_insert_color(&cl->node[prio], root);
295 }
296 
297 /**
298  * htb_add_to_wait_tree - adds class to the event queue with delay
299  *
300  * The class is added to priority event queue to indicate that class will
301  * change its mode in cl->pq_key microseconds. Make sure that class is not
302  * already in the queue.
303  */
304 static void htb_add_to_wait_tree(struct htb_sched *q,
305 				 struct htb_class *cl, s64 delay)
306 {
307 	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
308 
309 	cl->pq_key = q->now + delay;
310 	if (cl->pq_key == q->now)
311 		cl->pq_key++;
312 
313 	/* update the nearest event cache */
314 	if (q->near_ev_cache[cl->level] > cl->pq_key)
315 		q->near_ev_cache[cl->level] = cl->pq_key;
316 
317 	while (*p) {
318 		struct htb_class *c;
319 		parent = *p;
320 		c = rb_entry(parent, struct htb_class, pq_node);
321 		if (cl->pq_key >= c->pq_key)
322 			p = &parent->rb_right;
323 		else
324 			p = &parent->rb_left;
325 	}
326 	rb_link_node(&cl->pq_node, parent, p);
327 	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
328 }
329 
330 /**
331  * htb_next_rb_node - finds next node in binary tree
332  *
333  * When we are past last key we return NULL.
334  * Average complexity is 2 steps per call.
335  */
336 static inline void htb_next_rb_node(struct rb_node **n)
337 {
338 	*n = rb_next(*n);
339 }
340 
341 /**
342  * htb_add_class_to_row - add class to its row
343  *
344  * The class is added to row at priorities marked in mask.
345  * It does nothing if mask == 0.
346  */
347 static inline void htb_add_class_to_row(struct htb_sched *q,
348 					struct htb_class *cl, int mask)
349 {
350 	q->row_mask[cl->level] |= mask;
351 	while (mask) {
352 		int prio = ffz(~mask);
353 		mask &= ~(1 << prio);
354 		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
355 	}
356 }
357 
358 /* If this triggers, it is a bug in this code, but it need not be fatal */
359 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
360 {
361 	if (RB_EMPTY_NODE(rb)) {
362 		WARN_ON(1);
363 	} else {
364 		rb_erase(rb, root);
365 		RB_CLEAR_NODE(rb);
366 	}
367 }
368 
369 
370 /**
371  * htb_remove_class_from_row - removes class from its row
372  *
373  * The class is removed from row at priorities marked in mask.
374  * It does nothing if mask == 0.
375  */
376 static inline void htb_remove_class_from_row(struct htb_sched *q,
377 						 struct htb_class *cl, int mask)
378 {
379 	int m = 0;
380 	struct htb_level *hlevel = &q->hlevel[cl->level];
381 
382 	while (mask) {
383 		int prio = ffz(~mask);
384 		struct htb_prio *hprio = &hlevel->hprio[prio];
385 
386 		mask &= ~(1 << prio);
387 		if (hprio->ptr == cl->node + prio)
388 			htb_next_rb_node(&hprio->ptr);
389 
390 		htb_safe_rb_erase(cl->node + prio, &hprio->row);
391 		if (!hprio->row.rb_node)
392 			m |= 1 << prio;
393 	}
394 	q->row_mask[cl->level] &= ~m;
395 }
396 
397 /**
398  * htb_activate_prios - creates active classe's feed chain
399  *
400  * The class is connected to ancestors and/or appropriate rows
401  * for priorities it is participating on. cl->cmode must be new
402  * (activated) mode. It does nothing if cl->prio_activity == 0.
403  */
404 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
405 {
406 	struct htb_class *p = cl->parent;
407 	long m, mask = cl->prio_activity;
408 
409 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
410 		m = mask;
411 		while (m) {
412 			int prio = ffz(~m);
413 			m &= ~(1 << prio);
414 
415 			if (p->inner.clprio[prio].feed.rb_node)
416 				/* parent already has its feed in use so that
417 				 * reset bit in mask as parent is already ok
418 				 */
419 				mask &= ~(1 << prio);
420 
421 			htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
422 		}
423 		p->prio_activity |= mask;
424 		cl = p;
425 		p = cl->parent;
426 
427 	}
428 	if (cl->cmode == HTB_CAN_SEND && mask)
429 		htb_add_class_to_row(q, cl, mask);
430 }
431 
432 /**
433  * htb_deactivate_prios - remove class from feed chain
434  *
435  * cl->cmode must represent old mode (before deactivation). It does
436  * nothing if cl->prio_activity == 0. Class is removed from all feed
437  * chains and rows.
438  */
439 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
440 {
441 	struct htb_class *p = cl->parent;
442 	long m, mask = cl->prio_activity;
443 
444 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
445 		m = mask;
446 		mask = 0;
447 		while (m) {
448 			int prio = ffz(~m);
449 			m &= ~(1 << prio);
450 
451 			if (p->inner.clprio[prio].ptr == cl->node + prio) {
452 				/* we are removing child which is pointed to from
453 				 * parent feed - forget the pointer but remember
454 				 * classid
455 				 */
456 				p->inner.clprio[prio].last_ptr_id = cl->common.classid;
457 				p->inner.clprio[prio].ptr = NULL;
458 			}
459 
460 			htb_safe_rb_erase(cl->node + prio,
461 					  &p->inner.clprio[prio].feed);
462 
463 			if (!p->inner.clprio[prio].feed.rb_node)
464 				mask |= 1 << prio;
465 		}
466 
467 		p->prio_activity &= ~mask;
468 		cl = p;
469 		p = cl->parent;
470 
471 	}
472 	if (cl->cmode == HTB_CAN_SEND && mask)
473 		htb_remove_class_from_row(q, cl, mask);
474 }
475 
476 static inline s64 htb_lowater(const struct htb_class *cl)
477 {
478 	if (htb_hysteresis)
479 		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
480 	else
481 		return 0;
482 }
483 static inline s64 htb_hiwater(const struct htb_class *cl)
484 {
485 	if (htb_hysteresis)
486 		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
487 	else
488 		return 0;
489 }
490 
491 
492 /**
493  * htb_class_mode - computes and returns current class mode
494  *
495  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
496  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
497  * from now to time when cl will change its state.
498  * Also it is worth to note that class mode doesn't change simply
499  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
500  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
501  * mode transitions per time unit. The speed gain is about 1/6.
502  */
503 static inline enum htb_cmode
504 htb_class_mode(struct htb_class *cl, s64 *diff)
505 {
506 	s64 toks;
507 
508 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
509 		*diff = -toks;
510 		return HTB_CANT_SEND;
511 	}
512 
513 	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
514 		return HTB_CAN_SEND;
515 
516 	*diff = -toks;
517 	return HTB_MAY_BORROW;
518 }
519 
520 /**
521  * htb_change_class_mode - changes classe's mode
522  *
523  * This should be the only way how to change classe's mode under normal
524  * cirsumstances. Routine will update feed lists linkage, change mode
525  * and add class to the wait event queue if appropriate. New mode should
526  * be different from old one and cl->pq_key has to be valid if changing
527  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
528  */
529 static void
530 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
531 {
532 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
533 
534 	if (new_mode == cl->cmode)
535 		return;
536 
537 	if (new_mode == HTB_CANT_SEND) {
538 		cl->overlimits++;
539 		q->overlimits++;
540 	}
541 
542 	if (cl->prio_activity) {	/* not necessary: speed optimization */
543 		if (cl->cmode != HTB_CANT_SEND)
544 			htb_deactivate_prios(q, cl);
545 		cl->cmode = new_mode;
546 		if (new_mode != HTB_CANT_SEND)
547 			htb_activate_prios(q, cl);
548 	} else
549 		cl->cmode = new_mode;
550 }
551 
552 /**
553  * htb_activate - inserts leaf cl into appropriate active feeds
554  *
555  * Routine learns (new) priority of leaf and activates feed chain
556  * for the prio. It can be called on already active leaf safely.
557  * It also adds leaf into droplist.
558  */
559 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
560 {
561 	WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
562 
563 	if (!cl->prio_activity) {
564 		cl->prio_activity = 1 << cl->prio;
565 		htb_activate_prios(q, cl);
566 	}
567 }
568 
569 /**
570  * htb_deactivate - remove leaf cl from active feeds
571  *
572  * Make sure that leaf is active. In the other words it can't be called
573  * with non-active leaf. It also removes class from the drop list.
574  */
575 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
576 {
577 	WARN_ON(!cl->prio_activity);
578 
579 	htb_deactivate_prios(q, cl);
580 	cl->prio_activity = 0;
581 }
582 
583 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
584 		       struct sk_buff **to_free)
585 {
586 	int uninitialized_var(ret);
587 	unsigned int len = qdisc_pkt_len(skb);
588 	struct htb_sched *q = qdisc_priv(sch);
589 	struct htb_class *cl = htb_classify(skb, sch, &ret);
590 
591 	if (cl == HTB_DIRECT) {
592 		/* enqueue to helper queue */
593 		if (q->direct_queue.qlen < q->direct_qlen) {
594 			__qdisc_enqueue_tail(skb, &q->direct_queue);
595 			q->direct_pkts++;
596 		} else {
597 			return qdisc_drop(skb, sch, to_free);
598 		}
599 #ifdef CONFIG_NET_CLS_ACT
600 	} else if (!cl) {
601 		if (ret & __NET_XMIT_BYPASS)
602 			qdisc_qstats_drop(sch);
603 		__qdisc_drop(skb, to_free);
604 		return ret;
605 #endif
606 	} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
607 					to_free)) != NET_XMIT_SUCCESS) {
608 		if (net_xmit_drop_count(ret)) {
609 			qdisc_qstats_drop(sch);
610 			cl->drops++;
611 		}
612 		return ret;
613 	} else {
614 		htb_activate(q, cl);
615 	}
616 
617 	sch->qstats.backlog += len;
618 	sch->q.qlen++;
619 	return NET_XMIT_SUCCESS;
620 }
621 
622 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
623 {
624 	s64 toks = diff + cl->tokens;
625 
626 	if (toks > cl->buffer)
627 		toks = cl->buffer;
628 	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
629 	if (toks <= -cl->mbuffer)
630 		toks = 1 - cl->mbuffer;
631 
632 	cl->tokens = toks;
633 }
634 
635 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
636 {
637 	s64 toks = diff + cl->ctokens;
638 
639 	if (toks > cl->cbuffer)
640 		toks = cl->cbuffer;
641 	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
642 	if (toks <= -cl->mbuffer)
643 		toks = 1 - cl->mbuffer;
644 
645 	cl->ctokens = toks;
646 }
647 
648 /**
649  * htb_charge_class - charges amount "bytes" to leaf and ancestors
650  *
651  * Routine assumes that packet "bytes" long was dequeued from leaf cl
652  * borrowing from "level". It accounts bytes to ceil leaky bucket for
653  * leaf and all ancestors and to rate bucket for ancestors at levels
654  * "level" and higher. It also handles possible change of mode resulting
655  * from the update. Note that mode can also increase here (MAY_BORROW to
656  * CAN_SEND) because we can use more precise clock that event queue here.
657  * In such case we remove class from event queue first.
658  */
659 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
660 			     int level, struct sk_buff *skb)
661 {
662 	int bytes = qdisc_pkt_len(skb);
663 	enum htb_cmode old_mode;
664 	s64 diff;
665 
666 	while (cl) {
667 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
668 		if (cl->level >= level) {
669 			if (cl->level == level)
670 				cl->xstats.lends++;
671 			htb_accnt_tokens(cl, bytes, diff);
672 		} else {
673 			cl->xstats.borrows++;
674 			cl->tokens += diff;	/* we moved t_c; update tokens */
675 		}
676 		htb_accnt_ctokens(cl, bytes, diff);
677 		cl->t_c = q->now;
678 
679 		old_mode = cl->cmode;
680 		diff = 0;
681 		htb_change_class_mode(q, cl, &diff);
682 		if (old_mode != cl->cmode) {
683 			if (old_mode != HTB_CAN_SEND)
684 				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
685 			if (cl->cmode != HTB_CAN_SEND)
686 				htb_add_to_wait_tree(q, cl, diff);
687 		}
688 
689 		/* update basic stats except for leaves which are already updated */
690 		if (cl->level)
691 			bstats_update(&cl->bstats, skb);
692 
693 		cl = cl->parent;
694 	}
695 }
696 
697 /**
698  * htb_do_events - make mode changes to classes at the level
699  *
700  * Scans event queue for pending events and applies them. Returns time of
701  * next pending event (0 for no event in pq, q->now for too many events).
702  * Note: Applied are events whose have cl->pq_key <= q->now.
703  */
704 static s64 htb_do_events(struct htb_sched *q, const int level,
705 			 unsigned long start)
706 {
707 	/* don't run for longer than 2 jiffies; 2 is used instead of
708 	 * 1 to simplify things when jiffy is going to be incremented
709 	 * too soon
710 	 */
711 	unsigned long stop_at = start + 2;
712 	struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
713 
714 	while (time_before(jiffies, stop_at)) {
715 		struct htb_class *cl;
716 		s64 diff;
717 		struct rb_node *p = rb_first(wait_pq);
718 
719 		if (!p)
720 			return 0;
721 
722 		cl = rb_entry(p, struct htb_class, pq_node);
723 		if (cl->pq_key > q->now)
724 			return cl->pq_key;
725 
726 		htb_safe_rb_erase(p, wait_pq);
727 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
728 		htb_change_class_mode(q, cl, &diff);
729 		if (cl->cmode != HTB_CAN_SEND)
730 			htb_add_to_wait_tree(q, cl, diff);
731 	}
732 
733 	/* too much load - let's continue after a break for scheduling */
734 	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
735 		pr_warn("htb: too many events!\n");
736 		q->warned |= HTB_WARN_TOOMANYEVENTS;
737 	}
738 
739 	return q->now;
740 }
741 
742 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
743  * is no such one exists.
744  */
745 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
746 					      u32 id)
747 {
748 	struct rb_node *r = NULL;
749 	while (n) {
750 		struct htb_class *cl =
751 		    rb_entry(n, struct htb_class, node[prio]);
752 
753 		if (id > cl->common.classid) {
754 			n = n->rb_right;
755 		} else if (id < cl->common.classid) {
756 			r = n;
757 			n = n->rb_left;
758 		} else {
759 			return n;
760 		}
761 	}
762 	return r;
763 }
764 
765 /**
766  * htb_lookup_leaf - returns next leaf class in DRR order
767  *
768  * Find leaf where current feed pointers points to.
769  */
770 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
771 {
772 	int i;
773 	struct {
774 		struct rb_node *root;
775 		struct rb_node **pptr;
776 		u32 *pid;
777 	} stk[TC_HTB_MAXDEPTH], *sp = stk;
778 
779 	BUG_ON(!hprio->row.rb_node);
780 	sp->root = hprio->row.rb_node;
781 	sp->pptr = &hprio->ptr;
782 	sp->pid = &hprio->last_ptr_id;
783 
784 	for (i = 0; i < 65535; i++) {
785 		if (!*sp->pptr && *sp->pid) {
786 			/* ptr was invalidated but id is valid - try to recover
787 			 * the original or next ptr
788 			 */
789 			*sp->pptr =
790 			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
791 		}
792 		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
793 				 * can become out of date quickly
794 				 */
795 		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
796 			*sp->pptr = sp->root;
797 			while ((*sp->pptr)->rb_left)
798 				*sp->pptr = (*sp->pptr)->rb_left;
799 			if (sp > stk) {
800 				sp--;
801 				if (!*sp->pptr) {
802 					WARN_ON(1);
803 					return NULL;
804 				}
805 				htb_next_rb_node(sp->pptr);
806 			}
807 		} else {
808 			struct htb_class *cl;
809 			struct htb_prio *clp;
810 
811 			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
812 			if (!cl->level)
813 				return cl;
814 			clp = &cl->inner.clprio[prio];
815 			(++sp)->root = clp->feed.rb_node;
816 			sp->pptr = &clp->ptr;
817 			sp->pid = &clp->last_ptr_id;
818 		}
819 	}
820 	WARN_ON(1);
821 	return NULL;
822 }
823 
824 /* dequeues packet at given priority and level; call only if
825  * you are sure that there is active class at prio/level
826  */
827 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
828 					const int level)
829 {
830 	struct sk_buff *skb = NULL;
831 	struct htb_class *cl, *start;
832 	struct htb_level *hlevel = &q->hlevel[level];
833 	struct htb_prio *hprio = &hlevel->hprio[prio];
834 
835 	/* look initial class up in the row */
836 	start = cl = htb_lookup_leaf(hprio, prio);
837 
838 	do {
839 next:
840 		if (unlikely(!cl))
841 			return NULL;
842 
843 		/* class can be empty - it is unlikely but can be true if leaf
844 		 * qdisc drops packets in enqueue routine or if someone used
845 		 * graft operation on the leaf since last dequeue;
846 		 * simply deactivate and skip such class
847 		 */
848 		if (unlikely(cl->leaf.q->q.qlen == 0)) {
849 			struct htb_class *next;
850 			htb_deactivate(q, cl);
851 
852 			/* row/level might become empty */
853 			if ((q->row_mask[level] & (1 << prio)) == 0)
854 				return NULL;
855 
856 			next = htb_lookup_leaf(hprio, prio);
857 
858 			if (cl == start)	/* fix start if we just deleted it */
859 				start = next;
860 			cl = next;
861 			goto next;
862 		}
863 
864 		skb = cl->leaf.q->dequeue(cl->leaf.q);
865 		if (likely(skb != NULL))
866 			break;
867 
868 		qdisc_warn_nonwc("htb", cl->leaf.q);
869 		htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
870 					 &q->hlevel[0].hprio[prio].ptr);
871 		cl = htb_lookup_leaf(hprio, prio);
872 
873 	} while (cl != start);
874 
875 	if (likely(skb != NULL)) {
876 		bstats_update(&cl->bstats, skb);
877 		cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
878 		if (cl->leaf.deficit[level] < 0) {
879 			cl->leaf.deficit[level] += cl->quantum;
880 			htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
881 						 &q->hlevel[0].hprio[prio].ptr);
882 		}
883 		/* this used to be after charge_class but this constelation
884 		 * gives us slightly better performance
885 		 */
886 		if (!cl->leaf.q->q.qlen)
887 			htb_deactivate(q, cl);
888 		htb_charge_class(q, cl, level, skb);
889 	}
890 	return skb;
891 }
892 
893 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
894 {
895 	struct sk_buff *skb;
896 	struct htb_sched *q = qdisc_priv(sch);
897 	int level;
898 	s64 next_event;
899 	unsigned long start_at;
900 
901 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
902 	skb = __qdisc_dequeue_head(&q->direct_queue);
903 	if (skb != NULL) {
904 ok:
905 		qdisc_bstats_update(sch, skb);
906 		qdisc_qstats_backlog_dec(sch, skb);
907 		sch->q.qlen--;
908 		return skb;
909 	}
910 
911 	if (!sch->q.qlen)
912 		goto fin;
913 	q->now = ktime_get_ns();
914 	start_at = jiffies;
915 
916 	next_event = q->now + 5LLU * NSEC_PER_SEC;
917 
918 	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
919 		/* common case optimization - skip event handler quickly */
920 		int m;
921 		s64 event = q->near_ev_cache[level];
922 
923 		if (q->now >= event) {
924 			event = htb_do_events(q, level, start_at);
925 			if (!event)
926 				event = q->now + NSEC_PER_SEC;
927 			q->near_ev_cache[level] = event;
928 		}
929 
930 		if (next_event > event)
931 			next_event = event;
932 
933 		m = ~q->row_mask[level];
934 		while (m != (int)(-1)) {
935 			int prio = ffz(m);
936 
937 			m |= 1 << prio;
938 			skb = htb_dequeue_tree(q, prio, level);
939 			if (likely(skb != NULL))
940 				goto ok;
941 		}
942 	}
943 	if (likely(next_event > q->now))
944 		qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
945 	else
946 		schedule_work(&q->work);
947 fin:
948 	return skb;
949 }
950 
951 /* reset all classes */
952 /* always caled under BH & queue lock */
953 static void htb_reset(struct Qdisc *sch)
954 {
955 	struct htb_sched *q = qdisc_priv(sch);
956 	struct htb_class *cl;
957 	unsigned int i;
958 
959 	for (i = 0; i < q->clhash.hashsize; i++) {
960 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
961 			if (cl->level)
962 				memset(&cl->inner, 0, sizeof(cl->inner));
963 			else {
964 				if (cl->leaf.q)
965 					qdisc_reset(cl->leaf.q);
966 			}
967 			cl->prio_activity = 0;
968 			cl->cmode = HTB_CAN_SEND;
969 		}
970 	}
971 	qdisc_watchdog_cancel(&q->watchdog);
972 	__qdisc_reset_queue(&q->direct_queue);
973 	sch->q.qlen = 0;
974 	sch->qstats.backlog = 0;
975 	memset(q->hlevel, 0, sizeof(q->hlevel));
976 	memset(q->row_mask, 0, sizeof(q->row_mask));
977 }
978 
979 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
980 	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
981 	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
982 	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
983 	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
984 	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
985 	[TCA_HTB_RATE64] = { .type = NLA_U64 },
986 	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
987 };
988 
989 static void htb_work_func(struct work_struct *work)
990 {
991 	struct htb_sched *q = container_of(work, struct htb_sched, work);
992 	struct Qdisc *sch = q->watchdog.qdisc;
993 
994 	rcu_read_lock();
995 	__netif_schedule(qdisc_root(sch));
996 	rcu_read_unlock();
997 }
998 
999 static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1000 		    struct netlink_ext_ack *extack)
1001 {
1002 	struct htb_sched *q = qdisc_priv(sch);
1003 	struct nlattr *tb[TCA_HTB_MAX + 1];
1004 	struct tc_htb_glob *gopt;
1005 	int err;
1006 
1007 	qdisc_watchdog_init(&q->watchdog, sch);
1008 	INIT_WORK(&q->work, htb_work_func);
1009 
1010 	if (!opt)
1011 		return -EINVAL;
1012 
1013 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1014 	if (err)
1015 		return err;
1016 
1017 	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1018 					  NULL);
1019 	if (err < 0)
1020 		return err;
1021 
1022 	if (!tb[TCA_HTB_INIT])
1023 		return -EINVAL;
1024 
1025 	gopt = nla_data(tb[TCA_HTB_INIT]);
1026 	if (gopt->version != HTB_VER >> 16)
1027 		return -EINVAL;
1028 
1029 	err = qdisc_class_hash_init(&q->clhash);
1030 	if (err < 0)
1031 		return err;
1032 
1033 	qdisc_skb_head_init(&q->direct_queue);
1034 
1035 	if (tb[TCA_HTB_DIRECT_QLEN])
1036 		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1037 	else
1038 		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1039 
1040 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1041 		q->rate2quantum = 1;
1042 	q->defcls = gopt->defcls;
1043 
1044 	return 0;
1045 }
1046 
1047 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1048 {
1049 	struct htb_sched *q = qdisc_priv(sch);
1050 	struct nlattr *nest;
1051 	struct tc_htb_glob gopt;
1052 
1053 	sch->qstats.overlimits = q->overlimits;
1054 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1055 	 * no change can happen on the qdisc parameters.
1056 	 */
1057 
1058 	gopt.direct_pkts = q->direct_pkts;
1059 	gopt.version = HTB_VER;
1060 	gopt.rate2quantum = q->rate2quantum;
1061 	gopt.defcls = q->defcls;
1062 	gopt.debug = 0;
1063 
1064 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1065 	if (nest == NULL)
1066 		goto nla_put_failure;
1067 	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1068 	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1069 		goto nla_put_failure;
1070 
1071 	return nla_nest_end(skb, nest);
1072 
1073 nla_put_failure:
1074 	nla_nest_cancel(skb, nest);
1075 	return -1;
1076 }
1077 
1078 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1079 			  struct sk_buff *skb, struct tcmsg *tcm)
1080 {
1081 	struct htb_class *cl = (struct htb_class *)arg;
1082 	struct nlattr *nest;
1083 	struct tc_htb_opt opt;
1084 
1085 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1086 	 * no change can happen on the class parameters.
1087 	 */
1088 	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1089 	tcm->tcm_handle = cl->common.classid;
1090 	if (!cl->level && cl->leaf.q)
1091 		tcm->tcm_info = cl->leaf.q->handle;
1092 
1093 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1094 	if (nest == NULL)
1095 		goto nla_put_failure;
1096 
1097 	memset(&opt, 0, sizeof(opt));
1098 
1099 	psched_ratecfg_getrate(&opt.rate, &cl->rate);
1100 	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1101 	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1102 	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1103 	opt.quantum = cl->quantum;
1104 	opt.prio = cl->prio;
1105 	opt.level = cl->level;
1106 	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1107 		goto nla_put_failure;
1108 	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1109 	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1110 			      TCA_HTB_PAD))
1111 		goto nla_put_failure;
1112 	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1113 	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1114 			      TCA_HTB_PAD))
1115 		goto nla_put_failure;
1116 
1117 	return nla_nest_end(skb, nest);
1118 
1119 nla_put_failure:
1120 	nla_nest_cancel(skb, nest);
1121 	return -1;
1122 }
1123 
1124 static int
1125 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1126 {
1127 	struct htb_class *cl = (struct htb_class *)arg;
1128 	struct gnet_stats_queue qs = {
1129 		.drops = cl->drops,
1130 		.overlimits = cl->overlimits,
1131 	};
1132 	__u32 qlen = 0;
1133 
1134 	if (!cl->level && cl->leaf.q)
1135 		qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1136 
1137 	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1138 				    INT_MIN, INT_MAX);
1139 	cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1140 				     INT_MIN, INT_MAX);
1141 
1142 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1143 				  d, NULL, &cl->bstats) < 0 ||
1144 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1145 	    gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1146 		return -1;
1147 
1148 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1149 }
1150 
1151 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1152 		     struct Qdisc **old, struct netlink_ext_ack *extack)
1153 {
1154 	struct htb_class *cl = (struct htb_class *)arg;
1155 
1156 	if (cl->level)
1157 		return -EINVAL;
1158 	if (new == NULL &&
1159 	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1160 				     cl->common.classid, extack)) == NULL)
1161 		return -ENOBUFS;
1162 
1163 	*old = qdisc_replace(sch, new, &cl->leaf.q);
1164 	return 0;
1165 }
1166 
1167 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1168 {
1169 	struct htb_class *cl = (struct htb_class *)arg;
1170 	return !cl->level ? cl->leaf.q : NULL;
1171 }
1172 
1173 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1174 {
1175 	struct htb_class *cl = (struct htb_class *)arg;
1176 
1177 	htb_deactivate(qdisc_priv(sch), cl);
1178 }
1179 
1180 static inline int htb_parent_last_child(struct htb_class *cl)
1181 {
1182 	if (!cl->parent)
1183 		/* the root class */
1184 		return 0;
1185 	if (cl->parent->children > 1)
1186 		/* not the last child */
1187 		return 0;
1188 	return 1;
1189 }
1190 
1191 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1192 			       struct Qdisc *new_q)
1193 {
1194 	struct htb_class *parent = cl->parent;
1195 
1196 	WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1197 
1198 	if (parent->cmode != HTB_CAN_SEND)
1199 		htb_safe_rb_erase(&parent->pq_node,
1200 				  &q->hlevel[parent->level].wait_pq);
1201 
1202 	parent->level = 0;
1203 	memset(&parent->inner, 0, sizeof(parent->inner));
1204 	parent->leaf.q = new_q ? new_q : &noop_qdisc;
1205 	parent->tokens = parent->buffer;
1206 	parent->ctokens = parent->cbuffer;
1207 	parent->t_c = ktime_get_ns();
1208 	parent->cmode = HTB_CAN_SEND;
1209 }
1210 
1211 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1212 {
1213 	if (!cl->level) {
1214 		WARN_ON(!cl->leaf.q);
1215 		qdisc_put(cl->leaf.q);
1216 	}
1217 	gen_kill_estimator(&cl->rate_est);
1218 	tcf_block_put(cl->block);
1219 	kfree(cl);
1220 }
1221 
1222 static void htb_destroy(struct Qdisc *sch)
1223 {
1224 	struct htb_sched *q = qdisc_priv(sch);
1225 	struct hlist_node *next;
1226 	struct htb_class *cl;
1227 	unsigned int i;
1228 
1229 	cancel_work_sync(&q->work);
1230 	qdisc_watchdog_cancel(&q->watchdog);
1231 	/* This line used to be after htb_destroy_class call below
1232 	 * and surprisingly it worked in 2.4. But it must precede it
1233 	 * because filter need its target class alive to be able to call
1234 	 * unbind_filter on it (without Oops).
1235 	 */
1236 	tcf_block_put(q->block);
1237 
1238 	for (i = 0; i < q->clhash.hashsize; i++) {
1239 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1240 			tcf_block_put(cl->block);
1241 			cl->block = NULL;
1242 		}
1243 	}
1244 	for (i = 0; i < q->clhash.hashsize; i++) {
1245 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1246 					  common.hnode)
1247 			htb_destroy_class(sch, cl);
1248 	}
1249 	qdisc_class_hash_destroy(&q->clhash);
1250 	__qdisc_reset_queue(&q->direct_queue);
1251 }
1252 
1253 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1254 {
1255 	struct htb_sched *q = qdisc_priv(sch);
1256 	struct htb_class *cl = (struct htb_class *)arg;
1257 	struct Qdisc *new_q = NULL;
1258 	int last_child = 0;
1259 
1260 	/* TODO: why don't allow to delete subtree ? references ? does
1261 	 * tc subsys guarantee us that in htb_destroy it holds no class
1262 	 * refs so that we can remove children safely there ?
1263 	 */
1264 	if (cl->children || cl->filter_cnt)
1265 		return -EBUSY;
1266 
1267 	if (!cl->level && htb_parent_last_child(cl)) {
1268 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1269 					  cl->parent->common.classid,
1270 					  NULL);
1271 		last_child = 1;
1272 	}
1273 
1274 	sch_tree_lock(sch);
1275 
1276 	if (!cl->level)
1277 		qdisc_purge_queue(cl->leaf.q);
1278 
1279 	/* delete from hash and active; remainder in destroy_class */
1280 	qdisc_class_hash_remove(&q->clhash, &cl->common);
1281 	if (cl->parent)
1282 		cl->parent->children--;
1283 
1284 	if (cl->prio_activity)
1285 		htb_deactivate(q, cl);
1286 
1287 	if (cl->cmode != HTB_CAN_SEND)
1288 		htb_safe_rb_erase(&cl->pq_node,
1289 				  &q->hlevel[cl->level].wait_pq);
1290 
1291 	if (last_child)
1292 		htb_parent_to_leaf(q, cl, new_q);
1293 
1294 	sch_tree_unlock(sch);
1295 
1296 	htb_destroy_class(sch, cl);
1297 	return 0;
1298 }
1299 
1300 static int htb_change_class(struct Qdisc *sch, u32 classid,
1301 			    u32 parentid, struct nlattr **tca,
1302 			    unsigned long *arg, struct netlink_ext_ack *extack)
1303 {
1304 	int err = -EINVAL;
1305 	struct htb_sched *q = qdisc_priv(sch);
1306 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1307 	struct nlattr *opt = tca[TCA_OPTIONS];
1308 	struct nlattr *tb[TCA_HTB_MAX + 1];
1309 	struct tc_htb_opt *hopt;
1310 	u64 rate64, ceil64;
1311 	int warn = 0;
1312 
1313 	/* extract all subattrs from opt attr */
1314 	if (!opt)
1315 		goto failure;
1316 
1317 	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1318 					  NULL);
1319 	if (err < 0)
1320 		goto failure;
1321 
1322 	err = -EINVAL;
1323 	if (tb[TCA_HTB_PARMS] == NULL)
1324 		goto failure;
1325 
1326 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1327 
1328 	hopt = nla_data(tb[TCA_HTB_PARMS]);
1329 	if (!hopt->rate.rate || !hopt->ceil.rate)
1330 		goto failure;
1331 
1332 	/* Keeping backward compatible with rate_table based iproute2 tc */
1333 	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1334 		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1335 					      NULL));
1336 
1337 	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1338 		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1339 					      NULL));
1340 
1341 	if (!cl) {		/* new class */
1342 		struct Qdisc *new_q;
1343 		int prio;
1344 		struct {
1345 			struct nlattr		nla;
1346 			struct gnet_estimator	opt;
1347 		} est = {
1348 			.nla = {
1349 				.nla_len	= nla_attr_size(sizeof(est.opt)),
1350 				.nla_type	= TCA_RATE,
1351 			},
1352 			.opt = {
1353 				/* 4s interval, 16s averaging constant */
1354 				.interval	= 2,
1355 				.ewma_log	= 2,
1356 			},
1357 		};
1358 
1359 		/* check for valid classid */
1360 		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1361 		    htb_find(classid, sch))
1362 			goto failure;
1363 
1364 		/* check maximal depth */
1365 		if (parent && parent->parent && parent->parent->level < 2) {
1366 			pr_err("htb: tree is too deep\n");
1367 			goto failure;
1368 		}
1369 		err = -ENOBUFS;
1370 		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1371 		if (!cl)
1372 			goto failure;
1373 
1374 		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1375 		if (err) {
1376 			kfree(cl);
1377 			goto failure;
1378 		}
1379 		if (htb_rate_est || tca[TCA_RATE]) {
1380 			err = gen_new_estimator(&cl->bstats, NULL,
1381 						&cl->rate_est,
1382 						NULL,
1383 						qdisc_root_sleeping_running(sch),
1384 						tca[TCA_RATE] ? : &est.nla);
1385 			if (err) {
1386 				tcf_block_put(cl->block);
1387 				kfree(cl);
1388 				goto failure;
1389 			}
1390 		}
1391 
1392 		cl->children = 0;
1393 		RB_CLEAR_NODE(&cl->pq_node);
1394 
1395 		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1396 			RB_CLEAR_NODE(&cl->node[prio]);
1397 
1398 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1399 		 * so that can't be used inside of sch_tree_lock
1400 		 * -- thanks to Karlis Peisenieks
1401 		 */
1402 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1403 					  classid, NULL);
1404 		sch_tree_lock(sch);
1405 		if (parent && !parent->level) {
1406 			/* turn parent into inner node */
1407 			qdisc_purge_queue(parent->leaf.q);
1408 			qdisc_put(parent->leaf.q);
1409 			if (parent->prio_activity)
1410 				htb_deactivate(q, parent);
1411 
1412 			/* remove from evt list because of level change */
1413 			if (parent->cmode != HTB_CAN_SEND) {
1414 				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1415 				parent->cmode = HTB_CAN_SEND;
1416 			}
1417 			parent->level = (parent->parent ? parent->parent->level
1418 					 : TC_HTB_MAXDEPTH) - 1;
1419 			memset(&parent->inner, 0, sizeof(parent->inner));
1420 		}
1421 		/* leaf (we) needs elementary qdisc */
1422 		cl->leaf.q = new_q ? new_q : &noop_qdisc;
1423 
1424 		cl->common.classid = classid;
1425 		cl->parent = parent;
1426 
1427 		/* set class to be in HTB_CAN_SEND state */
1428 		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1429 		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1430 		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
1431 		cl->t_c = ktime_get_ns();
1432 		cl->cmode = HTB_CAN_SEND;
1433 
1434 		/* attach to the hash list and parent's family */
1435 		qdisc_class_hash_insert(&q->clhash, &cl->common);
1436 		if (parent)
1437 			parent->children++;
1438 		if (cl->leaf.q != &noop_qdisc)
1439 			qdisc_hash_add(cl->leaf.q, true);
1440 	} else {
1441 		if (tca[TCA_RATE]) {
1442 			err = gen_replace_estimator(&cl->bstats, NULL,
1443 						    &cl->rate_est,
1444 						    NULL,
1445 						    qdisc_root_sleeping_running(sch),
1446 						    tca[TCA_RATE]);
1447 			if (err)
1448 				return err;
1449 		}
1450 		sch_tree_lock(sch);
1451 	}
1452 
1453 	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1454 
1455 	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1456 
1457 	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1458 	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1459 
1460 	/* it used to be a nasty bug here, we have to check that node
1461 	 * is really leaf before changing cl->leaf !
1462 	 */
1463 	if (!cl->level) {
1464 		u64 quantum = cl->rate.rate_bytes_ps;
1465 
1466 		do_div(quantum, q->rate2quantum);
1467 		cl->quantum = min_t(u64, quantum, INT_MAX);
1468 
1469 		if (!hopt->quantum && cl->quantum < 1000) {
1470 			warn = -1;
1471 			cl->quantum = 1000;
1472 		}
1473 		if (!hopt->quantum && cl->quantum > 200000) {
1474 			warn = 1;
1475 			cl->quantum = 200000;
1476 		}
1477 		if (hopt->quantum)
1478 			cl->quantum = hopt->quantum;
1479 		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1480 			cl->prio = TC_HTB_NUMPRIO - 1;
1481 	}
1482 
1483 	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1484 	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1485 
1486 	sch_tree_unlock(sch);
1487 
1488 	if (warn)
1489 		pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
1490 			    cl->common.classid, (warn == -1 ? "small" : "big"));
1491 
1492 	qdisc_class_hash_grow(sch, &q->clhash);
1493 
1494 	*arg = (unsigned long)cl;
1495 	return 0;
1496 
1497 failure:
1498 	return err;
1499 }
1500 
1501 static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
1502 				       struct netlink_ext_ack *extack)
1503 {
1504 	struct htb_sched *q = qdisc_priv(sch);
1505 	struct htb_class *cl = (struct htb_class *)arg;
1506 
1507 	return cl ? cl->block : q->block;
1508 }
1509 
1510 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1511 				     u32 classid)
1512 {
1513 	struct htb_class *cl = htb_find(classid, sch);
1514 
1515 	/*if (cl && !cl->level) return 0;
1516 	 * The line above used to be there to prevent attaching filters to
1517 	 * leaves. But at least tc_index filter uses this just to get class
1518 	 * for other reasons so that we have to allow for it.
1519 	 * ----
1520 	 * 19.6.2002 As Werner explained it is ok - bind filter is just
1521 	 * another way to "lock" the class - unlike "get" this lock can
1522 	 * be broken by class during destroy IIUC.
1523 	 */
1524 	if (cl)
1525 		cl->filter_cnt++;
1526 	return (unsigned long)cl;
1527 }
1528 
1529 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1530 {
1531 	struct htb_class *cl = (struct htb_class *)arg;
1532 
1533 	if (cl)
1534 		cl->filter_cnt--;
1535 }
1536 
1537 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1538 {
1539 	struct htb_sched *q = qdisc_priv(sch);
1540 	struct htb_class *cl;
1541 	unsigned int i;
1542 
1543 	if (arg->stop)
1544 		return;
1545 
1546 	for (i = 0; i < q->clhash.hashsize; i++) {
1547 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1548 			if (arg->count < arg->skip) {
1549 				arg->count++;
1550 				continue;
1551 			}
1552 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1553 				arg->stop = 1;
1554 				return;
1555 			}
1556 			arg->count++;
1557 		}
1558 	}
1559 }
1560 
1561 static const struct Qdisc_class_ops htb_class_ops = {
1562 	.graft		=	htb_graft,
1563 	.leaf		=	htb_leaf,
1564 	.qlen_notify	=	htb_qlen_notify,
1565 	.find		=	htb_search,
1566 	.change		=	htb_change_class,
1567 	.delete		=	htb_delete,
1568 	.walk		=	htb_walk,
1569 	.tcf_block	=	htb_tcf_block,
1570 	.bind_tcf	=	htb_bind_filter,
1571 	.unbind_tcf	=	htb_unbind_filter,
1572 	.dump		=	htb_dump_class,
1573 	.dump_stats	=	htb_dump_class_stats,
1574 };
1575 
1576 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1577 	.cl_ops		=	&htb_class_ops,
1578 	.id		=	"htb",
1579 	.priv_size	=	sizeof(struct htb_sched),
1580 	.enqueue	=	htb_enqueue,
1581 	.dequeue	=	htb_dequeue,
1582 	.peek		=	qdisc_peek_dequeued,
1583 	.init		=	htb_init,
1584 	.reset		=	htb_reset,
1585 	.destroy	=	htb_destroy,
1586 	.dump		=	htb_dump,
1587 	.owner		=	THIS_MODULE,
1588 };
1589 
1590 static int __init htb_module_init(void)
1591 {
1592 	return register_qdisc(&htb_qdisc_ops);
1593 }
1594 static void __exit htb_module_exit(void)
1595 {
1596 	unregister_qdisc(&htb_qdisc_ops);
1597 }
1598 
1599 module_init(htb_module_init)
1600 module_exit(htb_module_exit)
1601 MODULE_LICENSE("GPL");
1602