xref: /openbmc/linux/net/sched/sch_htb.c (revision b68e31d0)
1 /*
2  * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *			HTB support at LARTC mailing list
14  *		Ondrej Kraus, <krauso@barr.cz>
15  *			found missing INIT_QDISC(htb)
16  *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *			helped a lot to locate nasty class stall bug
18  *		Andi Kleen, Jamal Hadi, Bert Hubert
19  *			code review and helpful comments on shaping
20  *		Tomasz Wrona, <tw@eter.tym.pl>
21  *			created test case so that I was able to fix nasty bug
22  *		Wilfried Weissmann
23  *			spotted bug in dequeue code and helped with fix
24  *		Jiri Fojtasek
25  *			fixed requeue routine
26  *		and many others. thanks.
27  *
28  * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29  */
30 #include <linux/module.h>
31 #include <asm/uaccess.h>
32 #include <asm/system.h>
33 #include <linux/bitops.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/string.h>
38 #include <linux/mm.h>
39 #include <linux/socket.h>
40 #include <linux/sockios.h>
41 #include <linux/in.h>
42 #include <linux/errno.h>
43 #include <linux/interrupt.h>
44 #include <linux/if_ether.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/notifier.h>
49 #include <net/ip.h>
50 #include <net/route.h>
51 #include <linux/skbuff.h>
52 #include <linux/list.h>
53 #include <linux/compiler.h>
54 #include <net/sock.h>
55 #include <net/pkt_sched.h>
56 #include <linux/rbtree.h>
57 
58 /* HTB algorithm.
59     Author: devik@cdi.cz
60     ========================================================================
61     HTB is like TBF with multiple classes. It is also similar to CBQ because
62     it allows to assign priority to each class in hierarchy.
63     In fact it is another implementation of Floyd's formal sharing.
64 
65     Levels:
66     Each class is assigned level. Leaf has ALWAYS level 0 and root
67     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
68     one less than their parent.
69 */
70 
71 #define HTB_HSIZE 16		/* classid hash size */
72 #define HTB_EWMAC 2		/* rate average over HTB_EWMAC*HTB_HSIZE sec */
73 #define HTB_RATECM 1		/* whether to use rate computer */
74 #define HTB_HYSTERESIS 1	/* whether to use mode hysteresis for speedup */
75 #define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
76 
77 #if HTB_VER >> 16 != TC_HTB_PROTOVER
78 #error "Mismatched sch_htb.c and pkt_sch.h"
79 #endif
80 
81 /* used internaly to keep status of single class */
82 enum htb_cmode {
83 	HTB_CANT_SEND,		/* class can't send and can't borrow */
84 	HTB_MAY_BORROW,		/* class can't send but may borrow */
85 	HTB_CAN_SEND		/* class can send */
86 };
87 
88 /* interior & leaf nodes; props specific to leaves are marked L: */
89 struct htb_class {
90 	/* general class parameters */
91 	u32 classid;
92 	struct gnet_stats_basic bstats;
93 	struct gnet_stats_queue qstats;
94 	struct gnet_stats_rate_est rate_est;
95 	struct tc_htb_xstats xstats;	/* our special stats */
96 	int refcnt;		/* usage count of this class */
97 
98 #ifdef HTB_RATECM
99 	/* rate measurement counters */
100 	unsigned long rate_bytes, sum_bytes;
101 	unsigned long rate_packets, sum_packets;
102 #endif
103 
104 	/* topology */
105 	int level;		/* our level (see above) */
106 	struct htb_class *parent;	/* parent class */
107 	struct hlist_node hlist;	/* classid hash list item */
108 	struct list_head sibling;	/* sibling list item */
109 	struct list_head children;	/* children list */
110 
111 	union {
112 		struct htb_class_leaf {
113 			struct Qdisc *q;
114 			int prio;
115 			int aprio;
116 			int quantum;
117 			int deficit[TC_HTB_MAXDEPTH];
118 			struct list_head drop_list;
119 		} leaf;
120 		struct htb_class_inner {
121 			struct rb_root feed[TC_HTB_NUMPRIO];	/* feed trees */
122 			struct rb_node *ptr[TC_HTB_NUMPRIO];	/* current class ptr */
123 			/* When class changes from state 1->2 and disconnects from
124 			   parent's feed then we lost ptr value and start from the
125 			   first child again. Here we store classid of the
126 			   last valid ptr (used when ptr is NULL). */
127 			u32 last_ptr_id[TC_HTB_NUMPRIO];
128 		} inner;
129 	} un;
130 	struct rb_node node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
131 	struct rb_node pq_node;	/* node for event queue */
132 	unsigned long pq_key;	/* the same type as jiffies global */
133 
134 	int prio_activity;	/* for which prios are we active */
135 	enum htb_cmode cmode;	/* current mode of the class */
136 
137 	/* class attached filters */
138 	struct tcf_proto *filter_list;
139 	int filter_cnt;
140 
141 	int warned;		/* only one warning about non work conserving .. */
142 
143 	/* token bucket parameters */
144 	struct qdisc_rate_table *rate;	/* rate table of the class itself */
145 	struct qdisc_rate_table *ceil;	/* ceiling rate (limits borrows too) */
146 	long buffer, cbuffer;	/* token bucket depth/rate */
147 	psched_tdiff_t mbuffer;	/* max wait time */
148 	long tokens, ctokens;	/* current number of tokens */
149 	psched_time_t t_c;	/* checkpoint time */
150 };
151 
152 /* TODO: maybe compute rate when size is too large .. or drop ? */
153 static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
154 			   int size)
155 {
156 	int slot = size >> rate->rate.cell_log;
157 	if (slot > 255) {
158 		cl->xstats.giants++;
159 		slot = 255;
160 	}
161 	return rate->data[slot];
162 }
163 
164 struct htb_sched {
165 	struct list_head root;	/* root classes list */
166 	struct hlist_head hash[HTB_HSIZE];	/* hashed by classid */
167 	struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
168 
169 	/* self list - roots of self generating tree */
170 	struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
171 	int row_mask[TC_HTB_MAXDEPTH];
172 	struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
173 	u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
174 
175 	/* self wait list - roots of wait PQs per row */
176 	struct rb_root wait_pq[TC_HTB_MAXDEPTH];
177 
178 	/* time of nearest event per level (row) */
179 	unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
180 
181 	/* cached value of jiffies in dequeue */
182 	unsigned long jiffies;
183 
184 	/* whether we hit non-work conserving class during this dequeue; we use */
185 	int nwc_hit;		/* this to disable mindelay complaint in dequeue */
186 
187 	int defcls;		/* class where unclassified flows go to */
188 
189 	/* filters for qdisc itself */
190 	struct tcf_proto *filter_list;
191 	int filter_cnt;
192 
193 	int rate2quantum;	/* quant = rate / rate2quantum */
194 	psched_time_t now;	/* cached dequeue time */
195 	struct timer_list timer;	/* send delay timer */
196 #ifdef HTB_RATECM
197 	struct timer_list rttim;	/* rate computer timer */
198 	int recmp_bucket;	/* which hash bucket to recompute next */
199 #endif
200 
201 	/* non shaped skbs; let them go directly thru */
202 	struct sk_buff_head direct_queue;
203 	int direct_qlen;	/* max qlen of above */
204 
205 	long direct_pkts;
206 };
207 
208 /* compute hash of size HTB_HSIZE for given handle */
209 static inline int htb_hash(u32 h)
210 {
211 #if HTB_HSIZE != 16
212 #error "Declare new hash for your HTB_HSIZE"
213 #endif
214 	h ^= h >> 8;		/* stolen from cbq_hash */
215 	h ^= h >> 4;
216 	return h & 0xf;
217 }
218 
219 /* find class in global hash table using given handle */
220 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
221 {
222 	struct htb_sched *q = qdisc_priv(sch);
223 	struct hlist_node *p;
224 	struct htb_class *cl;
225 
226 	if (TC_H_MAJ(handle) != sch->handle)
227 		return NULL;
228 
229 	hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
230 		if (cl->classid == handle)
231 			return cl;
232 	}
233 	return NULL;
234 }
235 
236 /**
237  * htb_classify - classify a packet into class
238  *
239  * It returns NULL if the packet should be dropped or -1 if the packet
240  * should be passed directly thru. In all other cases leaf class is returned.
241  * We allow direct class selection by classid in priority. The we examine
242  * filters in qdisc and in inner nodes (if higher filter points to the inner
243  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
244  * internal fifo (direct). These packets then go directly thru. If we still
245  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
246  * then finish and return direct queue.
247  */
248 #define HTB_DIRECT (struct htb_class*)-1
249 static inline u32 htb_classid(struct htb_class *cl)
250 {
251 	return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
252 }
253 
254 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
255 				      int *qerr)
256 {
257 	struct htb_sched *q = qdisc_priv(sch);
258 	struct htb_class *cl;
259 	struct tcf_result res;
260 	struct tcf_proto *tcf;
261 	int result;
262 
263 	/* allow to select class by setting skb->priority to valid classid;
264 	   note that nfmark can be used too by attaching filter fw with no
265 	   rules in it */
266 	if (skb->priority == sch->handle)
267 		return HTB_DIRECT;	/* X:0 (direct flow) selected */
268 	if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
269 		return cl;
270 
271 	*qerr = NET_XMIT_BYPASS;
272 	tcf = q->filter_list;
273 	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
274 #ifdef CONFIG_NET_CLS_ACT
275 		switch (result) {
276 		case TC_ACT_QUEUED:
277 		case TC_ACT_STOLEN:
278 			*qerr = NET_XMIT_SUCCESS;
279 		case TC_ACT_SHOT:
280 			return NULL;
281 		}
282 #elif defined(CONFIG_NET_CLS_POLICE)
283 		if (result == TC_POLICE_SHOT)
284 			return HTB_DIRECT;
285 #endif
286 		if ((cl = (void *)res.class) == NULL) {
287 			if (res.classid == sch->handle)
288 				return HTB_DIRECT;	/* X:0 (direct flow) */
289 			if ((cl = htb_find(res.classid, sch)) == NULL)
290 				break;	/* filter selected invalid classid */
291 		}
292 		if (!cl->level)
293 			return cl;	/* we hit leaf; return it */
294 
295 		/* we have got inner class; apply inner filter chain */
296 		tcf = cl->filter_list;
297 	}
298 	/* classification failed; try to use default class */
299 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
300 	if (!cl || cl->level)
301 		return HTB_DIRECT;	/* bad default .. this is safe bet */
302 	return cl;
303 }
304 
305 /**
306  * htb_add_to_id_tree - adds class to the round robin list
307  *
308  * Routine adds class to the list (actually tree) sorted by classid.
309  * Make sure that class is not already on such list for given prio.
310  */
311 static void htb_add_to_id_tree(struct rb_root *root,
312 			       struct htb_class *cl, int prio)
313 {
314 	struct rb_node **p = &root->rb_node, *parent = NULL;
315 
316 	while (*p) {
317 		struct htb_class *c;
318 		parent = *p;
319 		c = rb_entry(parent, struct htb_class, node[prio]);
320 
321 		if (cl->classid > c->classid)
322 			p = &parent->rb_right;
323 		else
324 			p = &parent->rb_left;
325 	}
326 	rb_link_node(&cl->node[prio], parent, p);
327 	rb_insert_color(&cl->node[prio], root);
328 }
329 
330 /**
331  * htb_add_to_wait_tree - adds class to the event queue with delay
332  *
333  * The class is added to priority event queue to indicate that class will
334  * change its mode in cl->pq_key microseconds. Make sure that class is not
335  * already in the queue.
336  */
337 static void htb_add_to_wait_tree(struct htb_sched *q,
338 				 struct htb_class *cl, long delay)
339 {
340 	struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
341 
342 	cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
343 	if (cl->pq_key == q->jiffies)
344 		cl->pq_key++;
345 
346 	/* update the nearest event cache */
347 	if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
348 		q->near_ev_cache[cl->level] = cl->pq_key;
349 
350 	while (*p) {
351 		struct htb_class *c;
352 		parent = *p;
353 		c = rb_entry(parent, struct htb_class, pq_node);
354 		if (time_after_eq(cl->pq_key, c->pq_key))
355 			p = &parent->rb_right;
356 		else
357 			p = &parent->rb_left;
358 	}
359 	rb_link_node(&cl->pq_node, parent, p);
360 	rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
361 }
362 
363 /**
364  * htb_next_rb_node - finds next node in binary tree
365  *
366  * When we are past last key we return NULL.
367  * Average complexity is 2 steps per call.
368  */
369 static inline void htb_next_rb_node(struct rb_node **n)
370 {
371 	*n = rb_next(*n);
372 }
373 
374 /**
375  * htb_add_class_to_row - add class to its row
376  *
377  * The class is added to row at priorities marked in mask.
378  * It does nothing if mask == 0.
379  */
380 static inline void htb_add_class_to_row(struct htb_sched *q,
381 					struct htb_class *cl, int mask)
382 {
383 	q->row_mask[cl->level] |= mask;
384 	while (mask) {
385 		int prio = ffz(~mask);
386 		mask &= ~(1 << prio);
387 		htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
388 	}
389 }
390 
391 /* If this triggers, it is a bug in this code, but it need not be fatal */
392 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
393 {
394 	if (!RB_EMPTY_NODE(rb)) {
395 		WARN_ON(1);
396 	} else {
397 		rb_erase(rb, root);
398 		RB_CLEAR_NODE(rb);
399 	}
400 }
401 
402 
403 /**
404  * htb_remove_class_from_row - removes class from its row
405  *
406  * The class is removed from row at priorities marked in mask.
407  * It does nothing if mask == 0.
408  */
409 static inline void htb_remove_class_from_row(struct htb_sched *q,
410 						 struct htb_class *cl, int mask)
411 {
412 	int m = 0;
413 
414 	while (mask) {
415 		int prio = ffz(~mask);
416 
417 		mask &= ~(1 << prio);
418 		if (q->ptr[cl->level][prio] == cl->node + prio)
419 			htb_next_rb_node(q->ptr[cl->level] + prio);
420 
421 		htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
422 		if (!q->row[cl->level][prio].rb_node)
423 			m |= 1 << prio;
424 	}
425 	q->row_mask[cl->level] &= ~m;
426 }
427 
428 /**
429  * htb_activate_prios - creates active classe's feed chain
430  *
431  * The class is connected to ancestors and/or appropriate rows
432  * for priorities it is participating on. cl->cmode must be new
433  * (activated) mode. It does nothing if cl->prio_activity == 0.
434  */
435 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
436 {
437 	struct htb_class *p = cl->parent;
438 	long m, mask = cl->prio_activity;
439 
440 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
441 		m = mask;
442 		while (m) {
443 			int prio = ffz(~m);
444 			m &= ~(1 << prio);
445 
446 			if (p->un.inner.feed[prio].rb_node)
447 				/* parent already has its feed in use so that
448 				   reset bit in mask as parent is already ok */
449 				mask &= ~(1 << prio);
450 
451 			htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
452 		}
453 		p->prio_activity |= mask;
454 		cl = p;
455 		p = cl->parent;
456 
457 	}
458 	if (cl->cmode == HTB_CAN_SEND && mask)
459 		htb_add_class_to_row(q, cl, mask);
460 }
461 
462 /**
463  * htb_deactivate_prios - remove class from feed chain
464  *
465  * cl->cmode must represent old mode (before deactivation). It does
466  * nothing if cl->prio_activity == 0. Class is removed from all feed
467  * chains and rows.
468  */
469 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
470 {
471 	struct htb_class *p = cl->parent;
472 	long m, mask = cl->prio_activity;
473 
474 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
475 		m = mask;
476 		mask = 0;
477 		while (m) {
478 			int prio = ffz(~m);
479 			m &= ~(1 << prio);
480 
481 			if (p->un.inner.ptr[prio] == cl->node + prio) {
482 				/* we are removing child which is pointed to from
483 				   parent feed - forget the pointer but remember
484 				   classid */
485 				p->un.inner.last_ptr_id[prio] = cl->classid;
486 				p->un.inner.ptr[prio] = NULL;
487 			}
488 
489 			htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
490 
491 			if (!p->un.inner.feed[prio].rb_node)
492 				mask |= 1 << prio;
493 		}
494 
495 		p->prio_activity &= ~mask;
496 		cl = p;
497 		p = cl->parent;
498 
499 	}
500 	if (cl->cmode == HTB_CAN_SEND && mask)
501 		htb_remove_class_from_row(q, cl, mask);
502 }
503 
504 #if HTB_HYSTERESIS
505 static inline long htb_lowater(const struct htb_class *cl)
506 {
507 	return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
508 }
509 static inline long htb_hiwater(const struct htb_class *cl)
510 {
511 	return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
512 }
513 #else
514 #define htb_lowater(cl)	(0)
515 #define htb_hiwater(cl)	(0)
516 #endif
517 
518 /**
519  * htb_class_mode - computes and returns current class mode
520  *
521  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
522  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
523  * from now to time when cl will change its state.
524  * Also it is worth to note that class mode doesn't change simply
525  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
526  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
527  * mode transitions per time unit. The speed gain is about 1/6.
528  */
529 static inline enum htb_cmode
530 htb_class_mode(struct htb_class *cl, long *diff)
531 {
532 	long toks;
533 
534 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
535 		*diff = -toks;
536 		return HTB_CANT_SEND;
537 	}
538 
539 	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
540 		return HTB_CAN_SEND;
541 
542 	*diff = -toks;
543 	return HTB_MAY_BORROW;
544 }
545 
546 /**
547  * htb_change_class_mode - changes classe's mode
548  *
549  * This should be the only way how to change classe's mode under normal
550  * cirsumstances. Routine will update feed lists linkage, change mode
551  * and add class to the wait event queue if appropriate. New mode should
552  * be different from old one and cl->pq_key has to be valid if changing
553  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
554  */
555 static void
556 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
557 {
558 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
559 
560 	if (new_mode == cl->cmode)
561 		return;
562 
563 	if (cl->prio_activity) {	/* not necessary: speed optimization */
564 		if (cl->cmode != HTB_CANT_SEND)
565 			htb_deactivate_prios(q, cl);
566 		cl->cmode = new_mode;
567 		if (new_mode != HTB_CANT_SEND)
568 			htb_activate_prios(q, cl);
569 	} else
570 		cl->cmode = new_mode;
571 }
572 
573 /**
574  * htb_activate - inserts leaf cl into appropriate active feeds
575  *
576  * Routine learns (new) priority of leaf and activates feed chain
577  * for the prio. It can be called on already active leaf safely.
578  * It also adds leaf into droplist.
579  */
580 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
581 {
582 	BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
583 
584 	if (!cl->prio_activity) {
585 		cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
586 		htb_activate_prios(q, cl);
587 		list_add_tail(&cl->un.leaf.drop_list,
588 			      q->drops + cl->un.leaf.aprio);
589 	}
590 }
591 
592 /**
593  * htb_deactivate - remove leaf cl from active feeds
594  *
595  * Make sure that leaf is active. In the other words it can't be called
596  * with non-active leaf. It also removes class from the drop list.
597  */
598 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
599 {
600 	BUG_TRAP(cl->prio_activity);
601 
602 	htb_deactivate_prios(q, cl);
603 	cl->prio_activity = 0;
604 	list_del_init(&cl->un.leaf.drop_list);
605 }
606 
607 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
608 {
609 	int ret;
610 	struct htb_sched *q = qdisc_priv(sch);
611 	struct htb_class *cl = htb_classify(skb, sch, &ret);
612 
613 	if (cl == HTB_DIRECT) {
614 		/* enqueue to helper queue */
615 		if (q->direct_queue.qlen < q->direct_qlen) {
616 			__skb_queue_tail(&q->direct_queue, skb);
617 			q->direct_pkts++;
618 		} else {
619 			kfree_skb(skb);
620 			sch->qstats.drops++;
621 			return NET_XMIT_DROP;
622 		}
623 #ifdef CONFIG_NET_CLS_ACT
624 	} else if (!cl) {
625 		if (ret == NET_XMIT_BYPASS)
626 			sch->qstats.drops++;
627 		kfree_skb(skb);
628 		return ret;
629 #endif
630 	} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
631 		   NET_XMIT_SUCCESS) {
632 		sch->qstats.drops++;
633 		cl->qstats.drops++;
634 		return NET_XMIT_DROP;
635 	} else {
636 		cl->bstats.packets++;
637 		cl->bstats.bytes += skb->len;
638 		htb_activate(q, cl);
639 	}
640 
641 	sch->q.qlen++;
642 	sch->bstats.packets++;
643 	sch->bstats.bytes += skb->len;
644 	return NET_XMIT_SUCCESS;
645 }
646 
647 /* TODO: requeuing packet charges it to policers again !! */
648 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
649 {
650 	struct htb_sched *q = qdisc_priv(sch);
651 	int ret = NET_XMIT_SUCCESS;
652 	struct htb_class *cl = htb_classify(skb, sch, &ret);
653 	struct sk_buff *tskb;
654 
655 	if (cl == HTB_DIRECT || !cl) {
656 		/* enqueue to helper queue */
657 		if (q->direct_queue.qlen < q->direct_qlen && cl) {
658 			__skb_queue_head(&q->direct_queue, skb);
659 		} else {
660 			__skb_queue_head(&q->direct_queue, skb);
661 			tskb = __skb_dequeue_tail(&q->direct_queue);
662 			kfree_skb(tskb);
663 			sch->qstats.drops++;
664 			return NET_XMIT_CN;
665 		}
666 	} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
667 		   NET_XMIT_SUCCESS) {
668 		sch->qstats.drops++;
669 		cl->qstats.drops++;
670 		return NET_XMIT_DROP;
671 	} else
672 		htb_activate(q, cl);
673 
674 	sch->q.qlen++;
675 	sch->qstats.requeues++;
676 	return NET_XMIT_SUCCESS;
677 }
678 
679 static void htb_timer(unsigned long arg)
680 {
681 	struct Qdisc *sch = (struct Qdisc *)arg;
682 	sch->flags &= ~TCQ_F_THROTTLED;
683 	wmb();
684 	netif_schedule(sch->dev);
685 }
686 
687 #ifdef HTB_RATECM
688 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
689 static void htb_rate_timer(unsigned long arg)
690 {
691 	struct Qdisc *sch = (struct Qdisc *)arg;
692 	struct htb_sched *q = qdisc_priv(sch);
693 	struct hlist_node *p;
694 	struct htb_class *cl;
695 
696 
697 	/* lock queue so that we can muck with it */
698 	spin_lock_bh(&sch->dev->queue_lock);
699 
700 	q->rttim.expires = jiffies + HZ;
701 	add_timer(&q->rttim);
702 
703 	/* scan and recompute one bucket at time */
704 	if (++q->recmp_bucket >= HTB_HSIZE)
705 		q->recmp_bucket = 0;
706 
707 	hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
708 		RT_GEN(cl->sum_bytes, cl->rate_bytes);
709 		RT_GEN(cl->sum_packets, cl->rate_packets);
710 	}
711 	spin_unlock_bh(&sch->dev->queue_lock);
712 }
713 #endif
714 
715 /**
716  * htb_charge_class - charges amount "bytes" to leaf and ancestors
717  *
718  * Routine assumes that packet "bytes" long was dequeued from leaf cl
719  * borrowing from "level". It accounts bytes to ceil leaky bucket for
720  * leaf and all ancestors and to rate bucket for ancestors at levels
721  * "level" and higher. It also handles possible change of mode resulting
722  * from the update. Note that mode can also increase here (MAY_BORROW to
723  * CAN_SEND) because we can use more precise clock that event queue here.
724  * In such case we remove class from event queue first.
725  */
726 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
727 			     int level, int bytes)
728 {
729 	long toks, diff;
730 	enum htb_cmode old_mode;
731 
732 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
733 	if (toks > cl->B) toks = cl->B; \
734 	toks -= L2T(cl, cl->R, bytes); \
735 	if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
736 	cl->T = toks
737 
738 	while (cl) {
739 		diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
740 		if (cl->level >= level) {
741 			if (cl->level == level)
742 				cl->xstats.lends++;
743 			HTB_ACCNT(tokens, buffer, rate);
744 		} else {
745 			cl->xstats.borrows++;
746 			cl->tokens += diff;	/* we moved t_c; update tokens */
747 		}
748 		HTB_ACCNT(ctokens, cbuffer, ceil);
749 		cl->t_c = q->now;
750 
751 		old_mode = cl->cmode;
752 		diff = 0;
753 		htb_change_class_mode(q, cl, &diff);
754 		if (old_mode != cl->cmode) {
755 			if (old_mode != HTB_CAN_SEND)
756 				htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
757 			if (cl->cmode != HTB_CAN_SEND)
758 				htb_add_to_wait_tree(q, cl, diff);
759 		}
760 #ifdef HTB_RATECM
761 		/* update rate counters */
762 		cl->sum_bytes += bytes;
763 		cl->sum_packets++;
764 #endif
765 
766 		/* update byte stats except for leaves which are already updated */
767 		if (cl->level) {
768 			cl->bstats.bytes += bytes;
769 			cl->bstats.packets++;
770 		}
771 		cl = cl->parent;
772 	}
773 }
774 
775 /**
776  * htb_do_events - make mode changes to classes at the level
777  *
778  * Scans event queue for pending events and applies them. Returns jiffies to
779  * next pending event (0 for no event in pq).
780  * Note: Aplied are events whose have cl->pq_key <= jiffies.
781  */
782 static long htb_do_events(struct htb_sched *q, int level)
783 {
784 	int i;
785 
786 	for (i = 0; i < 500; i++) {
787 		struct htb_class *cl;
788 		long diff;
789 		struct rb_node *p = q->wait_pq[level].rb_node;
790 		if (!p)
791 			return 0;
792 		while (p->rb_left)
793 			p = p->rb_left;
794 
795 		cl = rb_entry(p, struct htb_class, pq_node);
796 		if (time_after(cl->pq_key, q->jiffies)) {
797 			return cl->pq_key - q->jiffies;
798 		}
799 		htb_safe_rb_erase(p, q->wait_pq + level);
800 		diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
801 		htb_change_class_mode(q, cl, &diff);
802 		if (cl->cmode != HTB_CAN_SEND)
803 			htb_add_to_wait_tree(q, cl, diff);
804 	}
805 	if (net_ratelimit())
806 		printk(KERN_WARNING "htb: too many events !\n");
807 	return HZ / 10;
808 }
809 
810 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
811    is no such one exists. */
812 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
813 					      u32 id)
814 {
815 	struct rb_node *r = NULL;
816 	while (n) {
817 		struct htb_class *cl =
818 		    rb_entry(n, struct htb_class, node[prio]);
819 		if (id == cl->classid)
820 			return n;
821 
822 		if (id > cl->classid) {
823 			n = n->rb_right;
824 		} else {
825 			r = n;
826 			n = n->rb_left;
827 		}
828 	}
829 	return r;
830 }
831 
832 /**
833  * htb_lookup_leaf - returns next leaf class in DRR order
834  *
835  * Find leaf where current feed pointers points to.
836  */
837 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
838 					 struct rb_node **pptr, u32 * pid)
839 {
840 	int i;
841 	struct {
842 		struct rb_node *root;
843 		struct rb_node **pptr;
844 		u32 *pid;
845 	} stk[TC_HTB_MAXDEPTH], *sp = stk;
846 
847 	BUG_TRAP(tree->rb_node);
848 	sp->root = tree->rb_node;
849 	sp->pptr = pptr;
850 	sp->pid = pid;
851 
852 	for (i = 0; i < 65535; i++) {
853 		if (!*sp->pptr && *sp->pid) {
854 			/* ptr was invalidated but id is valid - try to recover
855 			   the original or next ptr */
856 			*sp->pptr =
857 			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
858 		}
859 		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
860 				   can become out of date quickly */
861 		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
862 			*sp->pptr = sp->root;
863 			while ((*sp->pptr)->rb_left)
864 				*sp->pptr = (*sp->pptr)->rb_left;
865 			if (sp > stk) {
866 				sp--;
867 				BUG_TRAP(*sp->pptr);
868 				if (!*sp->pptr)
869 					return NULL;
870 				htb_next_rb_node(sp->pptr);
871 			}
872 		} else {
873 			struct htb_class *cl;
874 			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
875 			if (!cl->level)
876 				return cl;
877 			(++sp)->root = cl->un.inner.feed[prio].rb_node;
878 			sp->pptr = cl->un.inner.ptr + prio;
879 			sp->pid = cl->un.inner.last_ptr_id + prio;
880 		}
881 	}
882 	BUG_TRAP(0);
883 	return NULL;
884 }
885 
886 /* dequeues packet at given priority and level; call only if
887    you are sure that there is active class at prio/level */
888 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
889 					int level)
890 {
891 	struct sk_buff *skb = NULL;
892 	struct htb_class *cl, *start;
893 	/* look initial class up in the row */
894 	start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
895 				     q->ptr[level] + prio,
896 				     q->last_ptr_id[level] + prio);
897 
898 	do {
899 next:
900 		BUG_TRAP(cl);
901 		if (!cl)
902 			return NULL;
903 
904 		/* class can be empty - it is unlikely but can be true if leaf
905 		   qdisc drops packets in enqueue routine or if someone used
906 		   graft operation on the leaf since last dequeue;
907 		   simply deactivate and skip such class */
908 		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
909 			struct htb_class *next;
910 			htb_deactivate(q, cl);
911 
912 			/* row/level might become empty */
913 			if ((q->row_mask[level] & (1 << prio)) == 0)
914 				return NULL;
915 
916 			next = htb_lookup_leaf(q->row[level] + prio,
917 					       prio, q->ptr[level] + prio,
918 					       q->last_ptr_id[level] + prio);
919 
920 			if (cl == start)	/* fix start if we just deleted it */
921 				start = next;
922 			cl = next;
923 			goto next;
924 		}
925 
926 		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
927 		if (likely(skb != NULL))
928 			break;
929 		if (!cl->warned) {
930 			printk(KERN_WARNING
931 			       "htb: class %X isn't work conserving ?!\n",
932 			       cl->classid);
933 			cl->warned = 1;
934 		}
935 		q->nwc_hit++;
936 		htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
937 				  ptr[0]) + prio);
938 		cl = htb_lookup_leaf(q->row[level] + prio, prio,
939 				     q->ptr[level] + prio,
940 				     q->last_ptr_id[level] + prio);
941 
942 	} while (cl != start);
943 
944 	if (likely(skb != NULL)) {
945 		if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
946 			cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
947 			htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
948 					  ptr[0]) + prio);
949 		}
950 		/* this used to be after charge_class but this constelation
951 		   gives us slightly better performance */
952 		if (!cl->un.leaf.q->q.qlen)
953 			htb_deactivate(q, cl);
954 		htb_charge_class(q, cl, level, skb->len);
955 	}
956 	return skb;
957 }
958 
959 static void htb_delay_by(struct Qdisc *sch, long delay)
960 {
961 	struct htb_sched *q = qdisc_priv(sch);
962 	if (delay <= 0)
963 		delay = 1;
964 	if (unlikely(delay > 5 * HZ)) {
965 		if (net_ratelimit())
966 			printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
967 		delay = 5 * HZ;
968 	}
969 	/* why don't use jiffies here ? because expires can be in past */
970 	mod_timer(&q->timer, q->jiffies + delay);
971 	sch->flags |= TCQ_F_THROTTLED;
972 	sch->qstats.overlimits++;
973 }
974 
975 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
976 {
977 	struct sk_buff *skb = NULL;
978 	struct htb_sched *q = qdisc_priv(sch);
979 	int level;
980 	long min_delay;
981 
982 	q->jiffies = jiffies;
983 
984 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
985 	skb = __skb_dequeue(&q->direct_queue);
986 	if (skb != NULL) {
987 		sch->flags &= ~TCQ_F_THROTTLED;
988 		sch->q.qlen--;
989 		return skb;
990 	}
991 
992 	if (!sch->q.qlen)
993 		goto fin;
994 	PSCHED_GET_TIME(q->now);
995 
996 	min_delay = LONG_MAX;
997 	q->nwc_hit = 0;
998 	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
999 		/* common case optimization - skip event handler quickly */
1000 		int m;
1001 		long delay;
1002 		if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
1003 			delay = htb_do_events(q, level);
1004 			q->near_ev_cache[level] =
1005 			    q->jiffies + (delay ? delay : HZ);
1006 		} else
1007 			delay = q->near_ev_cache[level] - q->jiffies;
1008 
1009 		if (delay && min_delay > delay)
1010 			min_delay = delay;
1011 		m = ~q->row_mask[level];
1012 		while (m != (int)(-1)) {
1013 			int prio = ffz(m);
1014 			m |= 1 << prio;
1015 			skb = htb_dequeue_tree(q, prio, level);
1016 			if (likely(skb != NULL)) {
1017 				sch->q.qlen--;
1018 				sch->flags &= ~TCQ_F_THROTTLED;
1019 				goto fin;
1020 			}
1021 		}
1022 	}
1023 	htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
1024 fin:
1025 	return skb;
1026 }
1027 
1028 /* try to drop from each class (by prio) until one succeed */
1029 static unsigned int htb_drop(struct Qdisc *sch)
1030 {
1031 	struct htb_sched *q = qdisc_priv(sch);
1032 	int prio;
1033 
1034 	for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1035 		struct list_head *p;
1036 		list_for_each(p, q->drops + prio) {
1037 			struct htb_class *cl = list_entry(p, struct htb_class,
1038 							  un.leaf.drop_list);
1039 			unsigned int len;
1040 			if (cl->un.leaf.q->ops->drop &&
1041 			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1042 				sch->q.qlen--;
1043 				if (!cl->un.leaf.q->q.qlen)
1044 					htb_deactivate(q, cl);
1045 				return len;
1046 			}
1047 		}
1048 	}
1049 	return 0;
1050 }
1051 
1052 /* reset all classes */
1053 /* always caled under BH & queue lock */
1054 static void htb_reset(struct Qdisc *sch)
1055 {
1056 	struct htb_sched *q = qdisc_priv(sch);
1057 	int i;
1058 
1059 	for (i = 0; i < HTB_HSIZE; i++) {
1060 		struct hlist_node *p;
1061 		struct htb_class *cl;
1062 
1063 		hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1064 			if (cl->level)
1065 				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
1066 			else {
1067 				if (cl->un.leaf.q)
1068 					qdisc_reset(cl->un.leaf.q);
1069 				INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1070 			}
1071 			cl->prio_activity = 0;
1072 			cl->cmode = HTB_CAN_SEND;
1073 
1074 		}
1075 	}
1076 	sch->flags &= ~TCQ_F_THROTTLED;
1077 	del_timer(&q->timer);
1078 	__skb_queue_purge(&q->direct_queue);
1079 	sch->q.qlen = 0;
1080 	memset(q->row, 0, sizeof(q->row));
1081 	memset(q->row_mask, 0, sizeof(q->row_mask));
1082 	memset(q->wait_pq, 0, sizeof(q->wait_pq));
1083 	memset(q->ptr, 0, sizeof(q->ptr));
1084 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1085 		INIT_LIST_HEAD(q->drops + i);
1086 }
1087 
1088 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1089 {
1090 	struct htb_sched *q = qdisc_priv(sch);
1091 	struct rtattr *tb[TCA_HTB_INIT];
1092 	struct tc_htb_glob *gopt;
1093 	int i;
1094 	if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1095 	    tb[TCA_HTB_INIT - 1] == NULL ||
1096 	    RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
1097 		printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1098 		return -EINVAL;
1099 	}
1100 	gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
1101 	if (gopt->version != HTB_VER >> 16) {
1102 		printk(KERN_ERR
1103 		       "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1104 		       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1105 		return -EINVAL;
1106 	}
1107 
1108 	INIT_LIST_HEAD(&q->root);
1109 	for (i = 0; i < HTB_HSIZE; i++)
1110 		INIT_HLIST_HEAD(q->hash + i);
1111 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1112 		INIT_LIST_HEAD(q->drops + i);
1113 
1114 	init_timer(&q->timer);
1115 	skb_queue_head_init(&q->direct_queue);
1116 
1117 	q->direct_qlen = sch->dev->tx_queue_len;
1118 	if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */
1119 		q->direct_qlen = 2;
1120 	q->timer.function = htb_timer;
1121 	q->timer.data = (unsigned long)sch;
1122 
1123 #ifdef HTB_RATECM
1124 	init_timer(&q->rttim);
1125 	q->rttim.function = htb_rate_timer;
1126 	q->rttim.data = (unsigned long)sch;
1127 	q->rttim.expires = jiffies + HZ;
1128 	add_timer(&q->rttim);
1129 #endif
1130 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1131 		q->rate2quantum = 1;
1132 	q->defcls = gopt->defcls;
1133 
1134 	return 0;
1135 }
1136 
1137 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1138 {
1139 	struct htb_sched *q = qdisc_priv(sch);
1140 	unsigned char *b = skb->tail;
1141 	struct rtattr *rta;
1142 	struct tc_htb_glob gopt;
1143 	spin_lock_bh(&sch->dev->queue_lock);
1144 	gopt.direct_pkts = q->direct_pkts;
1145 
1146 	gopt.version = HTB_VER;
1147 	gopt.rate2quantum = q->rate2quantum;
1148 	gopt.defcls = q->defcls;
1149 	gopt.debug = 0;
1150 	rta = (struct rtattr *)b;
1151 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1152 	RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1153 	rta->rta_len = skb->tail - b;
1154 	spin_unlock_bh(&sch->dev->queue_lock);
1155 	return skb->len;
1156 rtattr_failure:
1157 	spin_unlock_bh(&sch->dev->queue_lock);
1158 	skb_trim(skb, skb->tail - skb->data);
1159 	return -1;
1160 }
1161 
1162 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1163 			  struct sk_buff *skb, struct tcmsg *tcm)
1164 {
1165 	struct htb_class *cl = (struct htb_class *)arg;
1166 	unsigned char *b = skb->tail;
1167 	struct rtattr *rta;
1168 	struct tc_htb_opt opt;
1169 
1170 	spin_lock_bh(&sch->dev->queue_lock);
1171 	tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1172 	tcm->tcm_handle = cl->classid;
1173 	if (!cl->level && cl->un.leaf.q)
1174 		tcm->tcm_info = cl->un.leaf.q->handle;
1175 
1176 	rta = (struct rtattr *)b;
1177 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1178 
1179 	memset(&opt, 0, sizeof(opt));
1180 
1181 	opt.rate = cl->rate->rate;
1182 	opt.buffer = cl->buffer;
1183 	opt.ceil = cl->ceil->rate;
1184 	opt.cbuffer = cl->cbuffer;
1185 	opt.quantum = cl->un.leaf.quantum;
1186 	opt.prio = cl->un.leaf.prio;
1187 	opt.level = cl->level;
1188 	RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1189 	rta->rta_len = skb->tail - b;
1190 	spin_unlock_bh(&sch->dev->queue_lock);
1191 	return skb->len;
1192 rtattr_failure:
1193 	spin_unlock_bh(&sch->dev->queue_lock);
1194 	skb_trim(skb, b - skb->data);
1195 	return -1;
1196 }
1197 
1198 static int
1199 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1200 {
1201 	struct htb_class *cl = (struct htb_class *)arg;
1202 
1203 #ifdef HTB_RATECM
1204 	cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
1205 	cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
1206 #endif
1207 
1208 	if (!cl->level && cl->un.leaf.q)
1209 		cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1210 	cl->xstats.tokens = cl->tokens;
1211 	cl->xstats.ctokens = cl->ctokens;
1212 
1213 	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1214 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1215 	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
1216 		return -1;
1217 
1218 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1219 }
1220 
1221 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1222 		     struct Qdisc **old)
1223 {
1224 	struct htb_class *cl = (struct htb_class *)arg;
1225 
1226 	if (cl && !cl->level) {
1227 		if (new == NULL && (new = qdisc_create_dflt(sch->dev,
1228 							    &pfifo_qdisc_ops))
1229 		    == NULL)
1230 			return -ENOBUFS;
1231 		sch_tree_lock(sch);
1232 		if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1233 			if (cl->prio_activity)
1234 				htb_deactivate(qdisc_priv(sch), cl);
1235 
1236 			/* TODO: is it correct ? Why CBQ doesn't do it ? */
1237 			sch->q.qlen -= (*old)->q.qlen;
1238 			qdisc_reset(*old);
1239 		}
1240 		sch_tree_unlock(sch);
1241 		return 0;
1242 	}
1243 	return -ENOENT;
1244 }
1245 
1246 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1247 {
1248 	struct htb_class *cl = (struct htb_class *)arg;
1249 	return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1250 }
1251 
1252 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1253 {
1254 	struct htb_class *cl = htb_find(classid, sch);
1255 	if (cl)
1256 		cl->refcnt++;
1257 	return (unsigned long)cl;
1258 }
1259 
1260 static void htb_destroy_filters(struct tcf_proto **fl)
1261 {
1262 	struct tcf_proto *tp;
1263 
1264 	while ((tp = *fl) != NULL) {
1265 		*fl = tp->next;
1266 		tcf_destroy(tp);
1267 	}
1268 }
1269 
1270 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1271 {
1272 	struct htb_sched *q = qdisc_priv(sch);
1273 	if (!cl->level) {
1274 		BUG_TRAP(cl->un.leaf.q);
1275 		sch->q.qlen -= cl->un.leaf.q->q.qlen;
1276 		qdisc_destroy(cl->un.leaf.q);
1277 	}
1278 	qdisc_put_rtab(cl->rate);
1279 	qdisc_put_rtab(cl->ceil);
1280 
1281 	htb_destroy_filters(&cl->filter_list);
1282 
1283 	while (!list_empty(&cl->children))
1284 		htb_destroy_class(sch, list_entry(cl->children.next,
1285 						  struct htb_class, sibling));
1286 
1287 	/* note: this delete may happen twice (see htb_delete) */
1288 	if (!hlist_unhashed(&cl->hlist))
1289 		hlist_del(&cl->hlist);
1290 	list_del(&cl->sibling);
1291 
1292 	if (cl->prio_activity)
1293 		htb_deactivate(q, cl);
1294 
1295 	if (cl->cmode != HTB_CAN_SEND)
1296 		htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1297 
1298 	kfree(cl);
1299 }
1300 
1301 /* always caled under BH & queue lock */
1302 static void htb_destroy(struct Qdisc *sch)
1303 {
1304 	struct htb_sched *q = qdisc_priv(sch);
1305 
1306 	del_timer_sync(&q->timer);
1307 #ifdef HTB_RATECM
1308 	del_timer_sync(&q->rttim);
1309 #endif
1310 	/* This line used to be after htb_destroy_class call below
1311 	   and surprisingly it worked in 2.4. But it must precede it
1312 	   because filter need its target class alive to be able to call
1313 	   unbind_filter on it (without Oops). */
1314 	htb_destroy_filters(&q->filter_list);
1315 
1316 	while (!list_empty(&q->root))
1317 		htb_destroy_class(sch, list_entry(q->root.next,
1318 						  struct htb_class, sibling));
1319 
1320 	__skb_queue_purge(&q->direct_queue);
1321 }
1322 
1323 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1324 {
1325 	struct htb_sched *q = qdisc_priv(sch);
1326 	struct htb_class *cl = (struct htb_class *)arg;
1327 
1328 	// TODO: why don't allow to delete subtree ? references ? does
1329 	// tc subsys quarantee us that in htb_destroy it holds no class
1330 	// refs so that we can remove children safely there ?
1331 	if (!list_empty(&cl->children) || cl->filter_cnt)
1332 		return -EBUSY;
1333 
1334 	sch_tree_lock(sch);
1335 
1336 	/* delete from hash and active; remainder in destroy_class */
1337 	if (!hlist_unhashed(&cl->hlist))
1338 		hlist_del(&cl->hlist);
1339 
1340 	if (cl->prio_activity)
1341 		htb_deactivate(q, cl);
1342 
1343 	if (--cl->refcnt == 0)
1344 		htb_destroy_class(sch, cl);
1345 
1346 	sch_tree_unlock(sch);
1347 	return 0;
1348 }
1349 
1350 static void htb_put(struct Qdisc *sch, unsigned long arg)
1351 {
1352 	struct htb_class *cl = (struct htb_class *)arg;
1353 
1354 	if (--cl->refcnt == 0)
1355 		htb_destroy_class(sch, cl);
1356 }
1357 
1358 static int htb_change_class(struct Qdisc *sch, u32 classid,
1359 			    u32 parentid, struct rtattr **tca,
1360 			    unsigned long *arg)
1361 {
1362 	int err = -EINVAL;
1363 	struct htb_sched *q = qdisc_priv(sch);
1364 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1365 	struct rtattr *opt = tca[TCA_OPTIONS - 1];
1366 	struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1367 	struct rtattr *tb[TCA_HTB_RTAB];
1368 	struct tc_htb_opt *hopt;
1369 
1370 	/* extract all subattrs from opt attr */
1371 	if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1372 	    tb[TCA_HTB_PARMS - 1] == NULL ||
1373 	    RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
1374 		goto failure;
1375 
1376 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1377 
1378 	hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
1379 
1380 	rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1381 	ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1382 	if (!rtab || !ctab)
1383 		goto failure;
1384 
1385 	if (!cl) {		/* new class */
1386 		struct Qdisc *new_q;
1387 		int prio;
1388 
1389 		/* check for valid classid */
1390 		if (!classid || TC_H_MAJ(classid ^ sch->handle)
1391 		    || htb_find(classid, sch))
1392 			goto failure;
1393 
1394 		/* check maximal depth */
1395 		if (parent && parent->parent && parent->parent->level < 2) {
1396 			printk(KERN_ERR "htb: tree is too deep\n");
1397 			goto failure;
1398 		}
1399 		err = -ENOBUFS;
1400 		if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1401 			goto failure;
1402 
1403 		cl->refcnt = 1;
1404 		INIT_LIST_HEAD(&cl->sibling);
1405 		INIT_HLIST_NODE(&cl->hlist);
1406 		INIT_LIST_HEAD(&cl->children);
1407 		INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1408 		RB_CLEAR_NODE(&cl->pq_node);
1409 
1410 		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1411 			RB_CLEAR_NODE(&cl->node[prio]);
1412 
1413 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1414 		   so that can't be used inside of sch_tree_lock
1415 		   -- thanks to Karlis Peisenieks */
1416 		new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1417 		sch_tree_lock(sch);
1418 		if (parent && !parent->level) {
1419 			/* turn parent into inner node */
1420 			sch->q.qlen -= parent->un.leaf.q->q.qlen;
1421 			qdisc_destroy(parent->un.leaf.q);
1422 			if (parent->prio_activity)
1423 				htb_deactivate(q, parent);
1424 
1425 			/* remove from evt list because of level change */
1426 			if (parent->cmode != HTB_CAN_SEND) {
1427 				htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1428 				parent->cmode = HTB_CAN_SEND;
1429 			}
1430 			parent->level = (parent->parent ? parent->parent->level
1431 					 : TC_HTB_MAXDEPTH) - 1;
1432 			memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1433 		}
1434 		/* leaf (we) needs elementary qdisc */
1435 		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1436 
1437 		cl->classid = classid;
1438 		cl->parent = parent;
1439 
1440 		/* set class to be in HTB_CAN_SEND state */
1441 		cl->tokens = hopt->buffer;
1442 		cl->ctokens = hopt->cbuffer;
1443 		cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60);	/* 1min */
1444 		PSCHED_GET_TIME(cl->t_c);
1445 		cl->cmode = HTB_CAN_SEND;
1446 
1447 		/* attach to the hash list and parent's family */
1448 		hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
1449 		list_add_tail(&cl->sibling,
1450 			      parent ? &parent->children : &q->root);
1451 	} else
1452 		sch_tree_lock(sch);
1453 
1454 	/* it used to be a nasty bug here, we have to check that node
1455 	   is really leaf before changing cl->un.leaf ! */
1456 	if (!cl->level) {
1457 		cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1458 		if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1459 			printk(KERN_WARNING
1460 			       "HTB: quantum of class %X is small. Consider r2q change.\n",
1461 			       cl->classid);
1462 			cl->un.leaf.quantum = 1000;
1463 		}
1464 		if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1465 			printk(KERN_WARNING
1466 			       "HTB: quantum of class %X is big. Consider r2q change.\n",
1467 			       cl->classid);
1468 			cl->un.leaf.quantum = 200000;
1469 		}
1470 		if (hopt->quantum)
1471 			cl->un.leaf.quantum = hopt->quantum;
1472 		if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1473 			cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1474 	}
1475 
1476 	cl->buffer = hopt->buffer;
1477 	cl->cbuffer = hopt->cbuffer;
1478 	if (cl->rate)
1479 		qdisc_put_rtab(cl->rate);
1480 	cl->rate = rtab;
1481 	if (cl->ceil)
1482 		qdisc_put_rtab(cl->ceil);
1483 	cl->ceil = ctab;
1484 	sch_tree_unlock(sch);
1485 
1486 	*arg = (unsigned long)cl;
1487 	return 0;
1488 
1489 failure:
1490 	if (rtab)
1491 		qdisc_put_rtab(rtab);
1492 	if (ctab)
1493 		qdisc_put_rtab(ctab);
1494 	return err;
1495 }
1496 
1497 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1498 {
1499 	struct htb_sched *q = qdisc_priv(sch);
1500 	struct htb_class *cl = (struct htb_class *)arg;
1501 	struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1502 
1503 	return fl;
1504 }
1505 
1506 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1507 				     u32 classid)
1508 {
1509 	struct htb_sched *q = qdisc_priv(sch);
1510 	struct htb_class *cl = htb_find(classid, sch);
1511 
1512 	/*if (cl && !cl->level) return 0;
1513 	   The line above used to be there to prevent attaching filters to
1514 	   leaves. But at least tc_index filter uses this just to get class
1515 	   for other reasons so that we have to allow for it.
1516 	   ----
1517 	   19.6.2002 As Werner explained it is ok - bind filter is just
1518 	   another way to "lock" the class - unlike "get" this lock can
1519 	   be broken by class during destroy IIUC.
1520 	 */
1521 	if (cl)
1522 		cl->filter_cnt++;
1523 	else
1524 		q->filter_cnt++;
1525 	return (unsigned long)cl;
1526 }
1527 
1528 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1529 {
1530 	struct htb_sched *q = qdisc_priv(sch);
1531 	struct htb_class *cl = (struct htb_class *)arg;
1532 
1533 	if (cl)
1534 		cl->filter_cnt--;
1535 	else
1536 		q->filter_cnt--;
1537 }
1538 
1539 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1540 {
1541 	struct htb_sched *q = qdisc_priv(sch);
1542 	int i;
1543 
1544 	if (arg->stop)
1545 		return;
1546 
1547 	for (i = 0; i < HTB_HSIZE; i++) {
1548 		struct hlist_node *p;
1549 		struct htb_class *cl;
1550 
1551 		hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1552 			if (arg->count < arg->skip) {
1553 				arg->count++;
1554 				continue;
1555 			}
1556 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1557 				arg->stop = 1;
1558 				return;
1559 			}
1560 			arg->count++;
1561 		}
1562 	}
1563 }
1564 
1565 static struct Qdisc_class_ops htb_class_ops = {
1566 	.graft		=	htb_graft,
1567 	.leaf		=	htb_leaf,
1568 	.get		=	htb_get,
1569 	.put		=	htb_put,
1570 	.change		=	htb_change_class,
1571 	.delete		=	htb_delete,
1572 	.walk		=	htb_walk,
1573 	.tcf_chain	=	htb_find_tcf,
1574 	.bind_tcf	=	htb_bind_filter,
1575 	.unbind_tcf	=	htb_unbind_filter,
1576 	.dump		=	htb_dump_class,
1577 	.dump_stats	=	htb_dump_class_stats,
1578 };
1579 
1580 static struct Qdisc_ops htb_qdisc_ops = {
1581 	.next		=	NULL,
1582 	.cl_ops		=	&htb_class_ops,
1583 	.id		=	"htb",
1584 	.priv_size	=	sizeof(struct htb_sched),
1585 	.enqueue	=	htb_enqueue,
1586 	.dequeue	=	htb_dequeue,
1587 	.requeue	=	htb_requeue,
1588 	.drop		=	htb_drop,
1589 	.init		=	htb_init,
1590 	.reset		=	htb_reset,
1591 	.destroy	=	htb_destroy,
1592 	.change		=	NULL /* htb_change */,
1593 	.dump		=	htb_dump,
1594 	.owner		=	THIS_MODULE,
1595 };
1596 
1597 static int __init htb_module_init(void)
1598 {
1599 	return register_qdisc(&htb_qdisc_ops);
1600 }
1601 static void __exit htb_module_exit(void)
1602 {
1603 	unregister_qdisc(&htb_qdisc_ops);
1604 }
1605 
1606 module_init(htb_module_init)
1607 module_exit(htb_module_exit)
1608 MODULE_LICENSE("GPL");
1609