xref: /openbmc/linux/net/sched/sch_htb.c (revision c537b994)
1 /*
2  * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *			HTB support at LARTC mailing list
14  *		Ondrej Kraus, <krauso@barr.cz>
15  *			found missing INIT_QDISC(htb)
16  *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *			helped a lot to locate nasty class stall bug
18  *		Andi Kleen, Jamal Hadi, Bert Hubert
19  *			code review and helpful comments on shaping
20  *		Tomasz Wrona, <tw@eter.tym.pl>
21  *			created test case so that I was able to fix nasty bug
22  *		Wilfried Weissmann
23  *			spotted bug in dequeue code and helped with fix
24  *		Jiri Fojtasek
25  *			fixed requeue routine
26  *		and many others. thanks.
27  *
28  * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29  */
30 #include <linux/module.h>
31 #include <asm/uaccess.h>
32 #include <asm/system.h>
33 #include <linux/bitops.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/mm.h>
38 #include <linux/socket.h>
39 #include <linux/sockios.h>
40 #include <linux/in.h>
41 #include <linux/errno.h>
42 #include <linux/interrupt.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/notifier.h>
48 #include <net/ip.h>
49 #include <net/route.h>
50 #include <linux/skbuff.h>
51 #include <linux/list.h>
52 #include <linux/compiler.h>
53 #include <net/sock.h>
54 #include <net/pkt_sched.h>
55 #include <linux/rbtree.h>
56 
57 /* HTB algorithm.
58     Author: devik@cdi.cz
59     ========================================================================
60     HTB is like TBF with multiple classes. It is also similar to CBQ because
61     it allows to assign priority to each class in hierarchy.
62     In fact it is another implementation of Floyd's formal sharing.
63 
64     Levels:
65     Each class is assigned level. Leaf has ALWAYS level 0 and root
66     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
67     one less than their parent.
68 */
69 
70 #define HTB_HSIZE 16		/* classid hash size */
71 #define HTB_EWMAC 2		/* rate average over HTB_EWMAC*HTB_HSIZE sec */
72 #define HTB_RATECM 1		/* whether to use rate computer */
73 #define HTB_HYSTERESIS 1	/* whether to use mode hysteresis for speedup */
74 #define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
75 
76 #if HTB_VER >> 16 != TC_HTB_PROTOVER
77 #error "Mismatched sch_htb.c and pkt_sch.h"
78 #endif
79 
80 /* used internaly to keep status of single class */
81 enum htb_cmode {
82 	HTB_CANT_SEND,		/* class can't send and can't borrow */
83 	HTB_MAY_BORROW,		/* class can't send but may borrow */
84 	HTB_CAN_SEND		/* class can send */
85 };
86 
87 /* interior & leaf nodes; props specific to leaves are marked L: */
88 struct htb_class {
89 	/* general class parameters */
90 	u32 classid;
91 	struct gnet_stats_basic bstats;
92 	struct gnet_stats_queue qstats;
93 	struct gnet_stats_rate_est rate_est;
94 	struct tc_htb_xstats xstats;	/* our special stats */
95 	int refcnt;		/* usage count of this class */
96 
97 #ifdef HTB_RATECM
98 	/* rate measurement counters */
99 	unsigned long rate_bytes, sum_bytes;
100 	unsigned long rate_packets, sum_packets;
101 #endif
102 
103 	/* topology */
104 	int level;		/* our level (see above) */
105 	struct htb_class *parent;	/* parent class */
106 	struct hlist_node hlist;	/* classid hash list item */
107 	struct list_head sibling;	/* sibling list item */
108 	struct list_head children;	/* children list */
109 
110 	union {
111 		struct htb_class_leaf {
112 			struct Qdisc *q;
113 			int prio;
114 			int aprio;
115 			int quantum;
116 			int deficit[TC_HTB_MAXDEPTH];
117 			struct list_head drop_list;
118 		} leaf;
119 		struct htb_class_inner {
120 			struct rb_root feed[TC_HTB_NUMPRIO];	/* feed trees */
121 			struct rb_node *ptr[TC_HTB_NUMPRIO];	/* current class ptr */
122 			/* When class changes from state 1->2 and disconnects from
123 			   parent's feed then we lost ptr value and start from the
124 			   first child again. Here we store classid of the
125 			   last valid ptr (used when ptr is NULL). */
126 			u32 last_ptr_id[TC_HTB_NUMPRIO];
127 		} inner;
128 	} un;
129 	struct rb_node node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
130 	struct rb_node pq_node;	/* node for event queue */
131 	unsigned long pq_key;	/* the same type as jiffies global */
132 
133 	int prio_activity;	/* for which prios are we active */
134 	enum htb_cmode cmode;	/* current mode of the class */
135 
136 	/* class attached filters */
137 	struct tcf_proto *filter_list;
138 	int filter_cnt;
139 
140 	int warned;		/* only one warning about non work conserving .. */
141 
142 	/* token bucket parameters */
143 	struct qdisc_rate_table *rate;	/* rate table of the class itself */
144 	struct qdisc_rate_table *ceil;	/* ceiling rate (limits borrows too) */
145 	long buffer, cbuffer;	/* token bucket depth/rate */
146 	psched_tdiff_t mbuffer;	/* max wait time */
147 	long tokens, ctokens;	/* current number of tokens */
148 	psched_time_t t_c;	/* checkpoint time */
149 
150 	int prio;		/* For parent to leaf return possible here */
151 	int quantum;		/* we do backup. Finally full replacement  */
152 				/* of un.leaf originals should be done. */
153 };
154 
155 /* TODO: maybe compute rate when size is too large .. or drop ? */
156 static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
157 			   int size)
158 {
159 	int slot = size >> rate->rate.cell_log;
160 	if (slot > 255) {
161 		cl->xstats.giants++;
162 		slot = 255;
163 	}
164 	return rate->data[slot];
165 }
166 
167 struct htb_sched {
168 	struct list_head root;	/* root classes list */
169 	struct hlist_head hash[HTB_HSIZE];	/* hashed by classid */
170 	struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
171 
172 	/* self list - roots of self generating tree */
173 	struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
174 	int row_mask[TC_HTB_MAXDEPTH];
175 	struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
176 	u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
177 
178 	/* self wait list - roots of wait PQs per row */
179 	struct rb_root wait_pq[TC_HTB_MAXDEPTH];
180 
181 	/* time of nearest event per level (row) */
182 	unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
183 
184 	/* cached value of jiffies in dequeue */
185 	unsigned long jiffies;
186 
187 	/* whether we hit non-work conserving class during this dequeue; we use */
188 	int nwc_hit;		/* this to disable mindelay complaint in dequeue */
189 
190 	int defcls;		/* class where unclassified flows go to */
191 
192 	/* filters for qdisc itself */
193 	struct tcf_proto *filter_list;
194 	int filter_cnt;
195 
196 	int rate2quantum;	/* quant = rate / rate2quantum */
197 	psched_time_t now;	/* cached dequeue time */
198 	struct timer_list timer;	/* send delay timer */
199 #ifdef HTB_RATECM
200 	struct timer_list rttim;	/* rate computer timer */
201 	int recmp_bucket;	/* which hash bucket to recompute next */
202 #endif
203 
204 	/* non shaped skbs; let them go directly thru */
205 	struct sk_buff_head direct_queue;
206 	int direct_qlen;	/* max qlen of above */
207 
208 	long direct_pkts;
209 };
210 
211 /* compute hash of size HTB_HSIZE for given handle */
212 static inline int htb_hash(u32 h)
213 {
214 #if HTB_HSIZE != 16
215 #error "Declare new hash for your HTB_HSIZE"
216 #endif
217 	h ^= h >> 8;		/* stolen from cbq_hash */
218 	h ^= h >> 4;
219 	return h & 0xf;
220 }
221 
222 /* find class in global hash table using given handle */
223 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
224 {
225 	struct htb_sched *q = qdisc_priv(sch);
226 	struct hlist_node *p;
227 	struct htb_class *cl;
228 
229 	if (TC_H_MAJ(handle) != sch->handle)
230 		return NULL;
231 
232 	hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
233 		if (cl->classid == handle)
234 			return cl;
235 	}
236 	return NULL;
237 }
238 
239 /**
240  * htb_classify - classify a packet into class
241  *
242  * It returns NULL if the packet should be dropped or -1 if the packet
243  * should be passed directly thru. In all other cases leaf class is returned.
244  * We allow direct class selection by classid in priority. The we examine
245  * filters in qdisc and in inner nodes (if higher filter points to the inner
246  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
247  * internal fifo (direct). These packets then go directly thru. If we still
248  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
249  * then finish and return direct queue.
250  */
251 #define HTB_DIRECT (struct htb_class*)-1
252 static inline u32 htb_classid(struct htb_class *cl)
253 {
254 	return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
255 }
256 
257 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
258 				      int *qerr)
259 {
260 	struct htb_sched *q = qdisc_priv(sch);
261 	struct htb_class *cl;
262 	struct tcf_result res;
263 	struct tcf_proto *tcf;
264 	int result;
265 
266 	/* allow to select class by setting skb->priority to valid classid;
267 	   note that nfmark can be used too by attaching filter fw with no
268 	   rules in it */
269 	if (skb->priority == sch->handle)
270 		return HTB_DIRECT;	/* X:0 (direct flow) selected */
271 	if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
272 		return cl;
273 
274 	*qerr = NET_XMIT_BYPASS;
275 	tcf = q->filter_list;
276 	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
277 #ifdef CONFIG_NET_CLS_ACT
278 		switch (result) {
279 		case TC_ACT_QUEUED:
280 		case TC_ACT_STOLEN:
281 			*qerr = NET_XMIT_SUCCESS;
282 		case TC_ACT_SHOT:
283 			return NULL;
284 		}
285 #elif defined(CONFIG_NET_CLS_POLICE)
286 		if (result == TC_POLICE_SHOT)
287 			return HTB_DIRECT;
288 #endif
289 		if ((cl = (void *)res.class) == NULL) {
290 			if (res.classid == sch->handle)
291 				return HTB_DIRECT;	/* X:0 (direct flow) */
292 			if ((cl = htb_find(res.classid, sch)) == NULL)
293 				break;	/* filter selected invalid classid */
294 		}
295 		if (!cl->level)
296 			return cl;	/* we hit leaf; return it */
297 
298 		/* we have got inner class; apply inner filter chain */
299 		tcf = cl->filter_list;
300 	}
301 	/* classification failed; try to use default class */
302 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
303 	if (!cl || cl->level)
304 		return HTB_DIRECT;	/* bad default .. this is safe bet */
305 	return cl;
306 }
307 
308 /**
309  * htb_add_to_id_tree - adds class to the round robin list
310  *
311  * Routine adds class to the list (actually tree) sorted by classid.
312  * Make sure that class is not already on such list for given prio.
313  */
314 static void htb_add_to_id_tree(struct rb_root *root,
315 			       struct htb_class *cl, int prio)
316 {
317 	struct rb_node **p = &root->rb_node, *parent = NULL;
318 
319 	while (*p) {
320 		struct htb_class *c;
321 		parent = *p;
322 		c = rb_entry(parent, struct htb_class, node[prio]);
323 
324 		if (cl->classid > c->classid)
325 			p = &parent->rb_right;
326 		else
327 			p = &parent->rb_left;
328 	}
329 	rb_link_node(&cl->node[prio], parent, p);
330 	rb_insert_color(&cl->node[prio], root);
331 }
332 
333 /**
334  * htb_add_to_wait_tree - adds class to the event queue with delay
335  *
336  * The class is added to priority event queue to indicate that class will
337  * change its mode in cl->pq_key microseconds. Make sure that class is not
338  * already in the queue.
339  */
340 static void htb_add_to_wait_tree(struct htb_sched *q,
341 				 struct htb_class *cl, long delay)
342 {
343 	struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
344 
345 	cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
346 	if (cl->pq_key == q->jiffies)
347 		cl->pq_key++;
348 
349 	/* update the nearest event cache */
350 	if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
351 		q->near_ev_cache[cl->level] = cl->pq_key;
352 
353 	while (*p) {
354 		struct htb_class *c;
355 		parent = *p;
356 		c = rb_entry(parent, struct htb_class, pq_node);
357 		if (time_after_eq(cl->pq_key, c->pq_key))
358 			p = &parent->rb_right;
359 		else
360 			p = &parent->rb_left;
361 	}
362 	rb_link_node(&cl->pq_node, parent, p);
363 	rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
364 }
365 
366 /**
367  * htb_next_rb_node - finds next node in binary tree
368  *
369  * When we are past last key we return NULL.
370  * Average complexity is 2 steps per call.
371  */
372 static inline void htb_next_rb_node(struct rb_node **n)
373 {
374 	*n = rb_next(*n);
375 }
376 
377 /**
378  * htb_add_class_to_row - add class to its row
379  *
380  * The class is added to row at priorities marked in mask.
381  * It does nothing if mask == 0.
382  */
383 static inline void htb_add_class_to_row(struct htb_sched *q,
384 					struct htb_class *cl, int mask)
385 {
386 	q->row_mask[cl->level] |= mask;
387 	while (mask) {
388 		int prio = ffz(~mask);
389 		mask &= ~(1 << prio);
390 		htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
391 	}
392 }
393 
394 /* If this triggers, it is a bug in this code, but it need not be fatal */
395 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
396 {
397 	if (RB_EMPTY_NODE(rb)) {
398 		WARN_ON(1);
399 	} else {
400 		rb_erase(rb, root);
401 		RB_CLEAR_NODE(rb);
402 	}
403 }
404 
405 
406 /**
407  * htb_remove_class_from_row - removes class from its row
408  *
409  * The class is removed from row at priorities marked in mask.
410  * It does nothing if mask == 0.
411  */
412 static inline void htb_remove_class_from_row(struct htb_sched *q,
413 						 struct htb_class *cl, int mask)
414 {
415 	int m = 0;
416 
417 	while (mask) {
418 		int prio = ffz(~mask);
419 
420 		mask &= ~(1 << prio);
421 		if (q->ptr[cl->level][prio] == cl->node + prio)
422 			htb_next_rb_node(q->ptr[cl->level] + prio);
423 
424 		htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
425 		if (!q->row[cl->level][prio].rb_node)
426 			m |= 1 << prio;
427 	}
428 	q->row_mask[cl->level] &= ~m;
429 }
430 
431 /**
432  * htb_activate_prios - creates active classe's feed chain
433  *
434  * The class is connected to ancestors and/or appropriate rows
435  * for priorities it is participating on. cl->cmode must be new
436  * (activated) mode. It does nothing if cl->prio_activity == 0.
437  */
438 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
439 {
440 	struct htb_class *p = cl->parent;
441 	long m, mask = cl->prio_activity;
442 
443 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
444 		m = mask;
445 		while (m) {
446 			int prio = ffz(~m);
447 			m &= ~(1 << prio);
448 
449 			if (p->un.inner.feed[prio].rb_node)
450 				/* parent already has its feed in use so that
451 				   reset bit in mask as parent is already ok */
452 				mask &= ~(1 << prio);
453 
454 			htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
455 		}
456 		p->prio_activity |= mask;
457 		cl = p;
458 		p = cl->parent;
459 
460 	}
461 	if (cl->cmode == HTB_CAN_SEND && mask)
462 		htb_add_class_to_row(q, cl, mask);
463 }
464 
465 /**
466  * htb_deactivate_prios - remove class from feed chain
467  *
468  * cl->cmode must represent old mode (before deactivation). It does
469  * nothing if cl->prio_activity == 0. Class is removed from all feed
470  * chains and rows.
471  */
472 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
473 {
474 	struct htb_class *p = cl->parent;
475 	long m, mask = cl->prio_activity;
476 
477 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
478 		m = mask;
479 		mask = 0;
480 		while (m) {
481 			int prio = ffz(~m);
482 			m &= ~(1 << prio);
483 
484 			if (p->un.inner.ptr[prio] == cl->node + prio) {
485 				/* we are removing child which is pointed to from
486 				   parent feed - forget the pointer but remember
487 				   classid */
488 				p->un.inner.last_ptr_id[prio] = cl->classid;
489 				p->un.inner.ptr[prio] = NULL;
490 			}
491 
492 			htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
493 
494 			if (!p->un.inner.feed[prio].rb_node)
495 				mask |= 1 << prio;
496 		}
497 
498 		p->prio_activity &= ~mask;
499 		cl = p;
500 		p = cl->parent;
501 
502 	}
503 	if (cl->cmode == HTB_CAN_SEND && mask)
504 		htb_remove_class_from_row(q, cl, mask);
505 }
506 
507 #if HTB_HYSTERESIS
508 static inline long htb_lowater(const struct htb_class *cl)
509 {
510 	return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
511 }
512 static inline long htb_hiwater(const struct htb_class *cl)
513 {
514 	return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
515 }
516 #else
517 #define htb_lowater(cl)	(0)
518 #define htb_hiwater(cl)	(0)
519 #endif
520 
521 /**
522  * htb_class_mode - computes and returns current class mode
523  *
524  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
525  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
526  * from now to time when cl will change its state.
527  * Also it is worth to note that class mode doesn't change simply
528  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
529  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
530  * mode transitions per time unit. The speed gain is about 1/6.
531  */
532 static inline enum htb_cmode
533 htb_class_mode(struct htb_class *cl, long *diff)
534 {
535 	long toks;
536 
537 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
538 		*diff = -toks;
539 		return HTB_CANT_SEND;
540 	}
541 
542 	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
543 		return HTB_CAN_SEND;
544 
545 	*diff = -toks;
546 	return HTB_MAY_BORROW;
547 }
548 
549 /**
550  * htb_change_class_mode - changes classe's mode
551  *
552  * This should be the only way how to change classe's mode under normal
553  * cirsumstances. Routine will update feed lists linkage, change mode
554  * and add class to the wait event queue if appropriate. New mode should
555  * be different from old one and cl->pq_key has to be valid if changing
556  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
557  */
558 static void
559 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
560 {
561 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
562 
563 	if (new_mode == cl->cmode)
564 		return;
565 
566 	if (cl->prio_activity) {	/* not necessary: speed optimization */
567 		if (cl->cmode != HTB_CANT_SEND)
568 			htb_deactivate_prios(q, cl);
569 		cl->cmode = new_mode;
570 		if (new_mode != HTB_CANT_SEND)
571 			htb_activate_prios(q, cl);
572 	} else
573 		cl->cmode = new_mode;
574 }
575 
576 /**
577  * htb_activate - inserts leaf cl into appropriate active feeds
578  *
579  * Routine learns (new) priority of leaf and activates feed chain
580  * for the prio. It can be called on already active leaf safely.
581  * It also adds leaf into droplist.
582  */
583 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
584 {
585 	BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
586 
587 	if (!cl->prio_activity) {
588 		cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
589 		htb_activate_prios(q, cl);
590 		list_add_tail(&cl->un.leaf.drop_list,
591 			      q->drops + cl->un.leaf.aprio);
592 	}
593 }
594 
595 /**
596  * htb_deactivate - remove leaf cl from active feeds
597  *
598  * Make sure that leaf is active. In the other words it can't be called
599  * with non-active leaf. It also removes class from the drop list.
600  */
601 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
602 {
603 	BUG_TRAP(cl->prio_activity);
604 
605 	htb_deactivate_prios(q, cl);
606 	cl->prio_activity = 0;
607 	list_del_init(&cl->un.leaf.drop_list);
608 }
609 
610 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
611 {
612 	int ret;
613 	struct htb_sched *q = qdisc_priv(sch);
614 	struct htb_class *cl = htb_classify(skb, sch, &ret);
615 
616 	if (cl == HTB_DIRECT) {
617 		/* enqueue to helper queue */
618 		if (q->direct_queue.qlen < q->direct_qlen) {
619 			__skb_queue_tail(&q->direct_queue, skb);
620 			q->direct_pkts++;
621 		} else {
622 			kfree_skb(skb);
623 			sch->qstats.drops++;
624 			return NET_XMIT_DROP;
625 		}
626 #ifdef CONFIG_NET_CLS_ACT
627 	} else if (!cl) {
628 		if (ret == NET_XMIT_BYPASS)
629 			sch->qstats.drops++;
630 		kfree_skb(skb);
631 		return ret;
632 #endif
633 	} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
634 		   NET_XMIT_SUCCESS) {
635 		sch->qstats.drops++;
636 		cl->qstats.drops++;
637 		return NET_XMIT_DROP;
638 	} else {
639 		cl->bstats.packets++;
640 		cl->bstats.bytes += skb->len;
641 		htb_activate(q, cl);
642 	}
643 
644 	sch->q.qlen++;
645 	sch->bstats.packets++;
646 	sch->bstats.bytes += skb->len;
647 	return NET_XMIT_SUCCESS;
648 }
649 
650 /* TODO: requeuing packet charges it to policers again !! */
651 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
652 {
653 	struct htb_sched *q = qdisc_priv(sch);
654 	int ret = NET_XMIT_SUCCESS;
655 	struct htb_class *cl = htb_classify(skb, sch, &ret);
656 	struct sk_buff *tskb;
657 
658 	if (cl == HTB_DIRECT || !cl) {
659 		/* enqueue to helper queue */
660 		if (q->direct_queue.qlen < q->direct_qlen && cl) {
661 			__skb_queue_head(&q->direct_queue, skb);
662 		} else {
663 			__skb_queue_head(&q->direct_queue, skb);
664 			tskb = __skb_dequeue_tail(&q->direct_queue);
665 			kfree_skb(tskb);
666 			sch->qstats.drops++;
667 			return NET_XMIT_CN;
668 		}
669 	} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
670 		   NET_XMIT_SUCCESS) {
671 		sch->qstats.drops++;
672 		cl->qstats.drops++;
673 		return NET_XMIT_DROP;
674 	} else
675 		htb_activate(q, cl);
676 
677 	sch->q.qlen++;
678 	sch->qstats.requeues++;
679 	return NET_XMIT_SUCCESS;
680 }
681 
682 static void htb_timer(unsigned long arg)
683 {
684 	struct Qdisc *sch = (struct Qdisc *)arg;
685 	sch->flags &= ~TCQ_F_THROTTLED;
686 	wmb();
687 	netif_schedule(sch->dev);
688 }
689 
690 #ifdef HTB_RATECM
691 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
692 static void htb_rate_timer(unsigned long arg)
693 {
694 	struct Qdisc *sch = (struct Qdisc *)arg;
695 	struct htb_sched *q = qdisc_priv(sch);
696 	struct hlist_node *p;
697 	struct htb_class *cl;
698 
699 
700 	/* lock queue so that we can muck with it */
701 	spin_lock_bh(&sch->dev->queue_lock);
702 
703 	q->rttim.expires = jiffies + HZ;
704 	add_timer(&q->rttim);
705 
706 	/* scan and recompute one bucket at time */
707 	if (++q->recmp_bucket >= HTB_HSIZE)
708 		q->recmp_bucket = 0;
709 
710 	hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
711 		RT_GEN(cl->sum_bytes, cl->rate_bytes);
712 		RT_GEN(cl->sum_packets, cl->rate_packets);
713 	}
714 	spin_unlock_bh(&sch->dev->queue_lock);
715 }
716 #endif
717 
718 /**
719  * htb_charge_class - charges amount "bytes" to leaf and ancestors
720  *
721  * Routine assumes that packet "bytes" long was dequeued from leaf cl
722  * borrowing from "level". It accounts bytes to ceil leaky bucket for
723  * leaf and all ancestors and to rate bucket for ancestors at levels
724  * "level" and higher. It also handles possible change of mode resulting
725  * from the update. Note that mode can also increase here (MAY_BORROW to
726  * CAN_SEND) because we can use more precise clock that event queue here.
727  * In such case we remove class from event queue first.
728  */
729 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
730 			     int level, int bytes)
731 {
732 	long toks, diff;
733 	enum htb_cmode old_mode;
734 
735 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
736 	if (toks > cl->B) toks = cl->B; \
737 	toks -= L2T(cl, cl->R, bytes); \
738 	if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
739 	cl->T = toks
740 
741 	while (cl) {
742 		diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
743 		if (cl->level >= level) {
744 			if (cl->level == level)
745 				cl->xstats.lends++;
746 			HTB_ACCNT(tokens, buffer, rate);
747 		} else {
748 			cl->xstats.borrows++;
749 			cl->tokens += diff;	/* we moved t_c; update tokens */
750 		}
751 		HTB_ACCNT(ctokens, cbuffer, ceil);
752 		cl->t_c = q->now;
753 
754 		old_mode = cl->cmode;
755 		diff = 0;
756 		htb_change_class_mode(q, cl, &diff);
757 		if (old_mode != cl->cmode) {
758 			if (old_mode != HTB_CAN_SEND)
759 				htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
760 			if (cl->cmode != HTB_CAN_SEND)
761 				htb_add_to_wait_tree(q, cl, diff);
762 		}
763 #ifdef HTB_RATECM
764 		/* update rate counters */
765 		cl->sum_bytes += bytes;
766 		cl->sum_packets++;
767 #endif
768 
769 		/* update byte stats except for leaves which are already updated */
770 		if (cl->level) {
771 			cl->bstats.bytes += bytes;
772 			cl->bstats.packets++;
773 		}
774 		cl = cl->parent;
775 	}
776 }
777 
778 /**
779  * htb_do_events - make mode changes to classes at the level
780  *
781  * Scans event queue for pending events and applies them. Returns jiffies to
782  * next pending event (0 for no event in pq).
783  * Note: Aplied are events whose have cl->pq_key <= jiffies.
784  */
785 static long htb_do_events(struct htb_sched *q, int level)
786 {
787 	int i;
788 
789 	for (i = 0; i < 500; i++) {
790 		struct htb_class *cl;
791 		long diff;
792 		struct rb_node *p = rb_first(&q->wait_pq[level]);
793 
794 		if (!p)
795 			return 0;
796 
797 		cl = rb_entry(p, struct htb_class, pq_node);
798 		if (time_after(cl->pq_key, q->jiffies)) {
799 			return cl->pq_key - q->jiffies;
800 		}
801 		htb_safe_rb_erase(p, q->wait_pq + level);
802 		diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
803 		htb_change_class_mode(q, cl, &diff);
804 		if (cl->cmode != HTB_CAN_SEND)
805 			htb_add_to_wait_tree(q, cl, diff);
806 	}
807 	if (net_ratelimit())
808 		printk(KERN_WARNING "htb: too many events !\n");
809 	return HZ / 10;
810 }
811 
812 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
813    is no such one exists. */
814 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
815 					      u32 id)
816 {
817 	struct rb_node *r = NULL;
818 	while (n) {
819 		struct htb_class *cl =
820 		    rb_entry(n, struct htb_class, node[prio]);
821 		if (id == cl->classid)
822 			return n;
823 
824 		if (id > cl->classid) {
825 			n = n->rb_right;
826 		} else {
827 			r = n;
828 			n = n->rb_left;
829 		}
830 	}
831 	return r;
832 }
833 
834 /**
835  * htb_lookup_leaf - returns next leaf class in DRR order
836  *
837  * Find leaf where current feed pointers points to.
838  */
839 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
840 					 struct rb_node **pptr, u32 * pid)
841 {
842 	int i;
843 	struct {
844 		struct rb_node *root;
845 		struct rb_node **pptr;
846 		u32 *pid;
847 	} stk[TC_HTB_MAXDEPTH], *sp = stk;
848 
849 	BUG_TRAP(tree->rb_node);
850 	sp->root = tree->rb_node;
851 	sp->pptr = pptr;
852 	sp->pid = pid;
853 
854 	for (i = 0; i < 65535; i++) {
855 		if (!*sp->pptr && *sp->pid) {
856 			/* ptr was invalidated but id is valid - try to recover
857 			   the original or next ptr */
858 			*sp->pptr =
859 			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
860 		}
861 		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
862 				   can become out of date quickly */
863 		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
864 			*sp->pptr = sp->root;
865 			while ((*sp->pptr)->rb_left)
866 				*sp->pptr = (*sp->pptr)->rb_left;
867 			if (sp > stk) {
868 				sp--;
869 				BUG_TRAP(*sp->pptr);
870 				if (!*sp->pptr)
871 					return NULL;
872 				htb_next_rb_node(sp->pptr);
873 			}
874 		} else {
875 			struct htb_class *cl;
876 			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
877 			if (!cl->level)
878 				return cl;
879 			(++sp)->root = cl->un.inner.feed[prio].rb_node;
880 			sp->pptr = cl->un.inner.ptr + prio;
881 			sp->pid = cl->un.inner.last_ptr_id + prio;
882 		}
883 	}
884 	BUG_TRAP(0);
885 	return NULL;
886 }
887 
888 /* dequeues packet at given priority and level; call only if
889    you are sure that there is active class at prio/level */
890 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
891 					int level)
892 {
893 	struct sk_buff *skb = NULL;
894 	struct htb_class *cl, *start;
895 	/* look initial class up in the row */
896 	start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
897 				     q->ptr[level] + prio,
898 				     q->last_ptr_id[level] + prio);
899 
900 	do {
901 next:
902 		BUG_TRAP(cl);
903 		if (!cl)
904 			return NULL;
905 
906 		/* class can be empty - it is unlikely but can be true if leaf
907 		   qdisc drops packets in enqueue routine or if someone used
908 		   graft operation on the leaf since last dequeue;
909 		   simply deactivate and skip such class */
910 		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
911 			struct htb_class *next;
912 			htb_deactivate(q, cl);
913 
914 			/* row/level might become empty */
915 			if ((q->row_mask[level] & (1 << prio)) == 0)
916 				return NULL;
917 
918 			next = htb_lookup_leaf(q->row[level] + prio,
919 					       prio, q->ptr[level] + prio,
920 					       q->last_ptr_id[level] + prio);
921 
922 			if (cl == start)	/* fix start if we just deleted it */
923 				start = next;
924 			cl = next;
925 			goto next;
926 		}
927 
928 		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
929 		if (likely(skb != NULL))
930 			break;
931 		if (!cl->warned) {
932 			printk(KERN_WARNING
933 			       "htb: class %X isn't work conserving ?!\n",
934 			       cl->classid);
935 			cl->warned = 1;
936 		}
937 		q->nwc_hit++;
938 		htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
939 				  ptr[0]) + prio);
940 		cl = htb_lookup_leaf(q->row[level] + prio, prio,
941 				     q->ptr[level] + prio,
942 				     q->last_ptr_id[level] + prio);
943 
944 	} while (cl != start);
945 
946 	if (likely(skb != NULL)) {
947 		if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
948 			cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
949 			htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
950 					  ptr[0]) + prio);
951 		}
952 		/* this used to be after charge_class but this constelation
953 		   gives us slightly better performance */
954 		if (!cl->un.leaf.q->q.qlen)
955 			htb_deactivate(q, cl);
956 		htb_charge_class(q, cl, level, skb->len);
957 	}
958 	return skb;
959 }
960 
961 static void htb_delay_by(struct Qdisc *sch, long delay)
962 {
963 	struct htb_sched *q = qdisc_priv(sch);
964 	if (delay <= 0)
965 		delay = 1;
966 	if (unlikely(delay > 5 * HZ)) {
967 		if (net_ratelimit())
968 			printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
969 		delay = 5 * HZ;
970 	}
971 	/* why don't use jiffies here ? because expires can be in past */
972 	mod_timer(&q->timer, q->jiffies + delay);
973 	sch->flags |= TCQ_F_THROTTLED;
974 	sch->qstats.overlimits++;
975 }
976 
977 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
978 {
979 	struct sk_buff *skb = NULL;
980 	struct htb_sched *q = qdisc_priv(sch);
981 	int level;
982 	long min_delay;
983 
984 	q->jiffies = jiffies;
985 
986 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
987 	skb = __skb_dequeue(&q->direct_queue);
988 	if (skb != NULL) {
989 		sch->flags &= ~TCQ_F_THROTTLED;
990 		sch->q.qlen--;
991 		return skb;
992 	}
993 
994 	if (!sch->q.qlen)
995 		goto fin;
996 	PSCHED_GET_TIME(q->now);
997 
998 	min_delay = LONG_MAX;
999 	q->nwc_hit = 0;
1000 	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
1001 		/* common case optimization - skip event handler quickly */
1002 		int m;
1003 		long delay;
1004 		if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
1005 			delay = htb_do_events(q, level);
1006 			q->near_ev_cache[level] =
1007 			    q->jiffies + (delay ? delay : HZ);
1008 		} else
1009 			delay = q->near_ev_cache[level] - q->jiffies;
1010 
1011 		if (delay && min_delay > delay)
1012 			min_delay = delay;
1013 		m = ~q->row_mask[level];
1014 		while (m != (int)(-1)) {
1015 			int prio = ffz(m);
1016 			m |= 1 << prio;
1017 			skb = htb_dequeue_tree(q, prio, level);
1018 			if (likely(skb != NULL)) {
1019 				sch->q.qlen--;
1020 				sch->flags &= ~TCQ_F_THROTTLED;
1021 				goto fin;
1022 			}
1023 		}
1024 	}
1025 	htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
1026 fin:
1027 	return skb;
1028 }
1029 
1030 /* try to drop from each class (by prio) until one succeed */
1031 static unsigned int htb_drop(struct Qdisc *sch)
1032 {
1033 	struct htb_sched *q = qdisc_priv(sch);
1034 	int prio;
1035 
1036 	for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1037 		struct list_head *p;
1038 		list_for_each(p, q->drops + prio) {
1039 			struct htb_class *cl = list_entry(p, struct htb_class,
1040 							  un.leaf.drop_list);
1041 			unsigned int len;
1042 			if (cl->un.leaf.q->ops->drop &&
1043 			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1044 				sch->q.qlen--;
1045 				if (!cl->un.leaf.q->q.qlen)
1046 					htb_deactivate(q, cl);
1047 				return len;
1048 			}
1049 		}
1050 	}
1051 	return 0;
1052 }
1053 
1054 /* reset all classes */
1055 /* always caled under BH & queue lock */
1056 static void htb_reset(struct Qdisc *sch)
1057 {
1058 	struct htb_sched *q = qdisc_priv(sch);
1059 	int i;
1060 
1061 	for (i = 0; i < HTB_HSIZE; i++) {
1062 		struct hlist_node *p;
1063 		struct htb_class *cl;
1064 
1065 		hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1066 			if (cl->level)
1067 				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
1068 			else {
1069 				if (cl->un.leaf.q)
1070 					qdisc_reset(cl->un.leaf.q);
1071 				INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1072 			}
1073 			cl->prio_activity = 0;
1074 			cl->cmode = HTB_CAN_SEND;
1075 
1076 		}
1077 	}
1078 	sch->flags &= ~TCQ_F_THROTTLED;
1079 	del_timer(&q->timer);
1080 	__skb_queue_purge(&q->direct_queue);
1081 	sch->q.qlen = 0;
1082 	memset(q->row, 0, sizeof(q->row));
1083 	memset(q->row_mask, 0, sizeof(q->row_mask));
1084 	memset(q->wait_pq, 0, sizeof(q->wait_pq));
1085 	memset(q->ptr, 0, sizeof(q->ptr));
1086 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1087 		INIT_LIST_HEAD(q->drops + i);
1088 }
1089 
1090 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1091 {
1092 	struct htb_sched *q = qdisc_priv(sch);
1093 	struct rtattr *tb[TCA_HTB_INIT];
1094 	struct tc_htb_glob *gopt;
1095 	int i;
1096 	if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1097 	    tb[TCA_HTB_INIT - 1] == NULL ||
1098 	    RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
1099 		printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1100 		return -EINVAL;
1101 	}
1102 	gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
1103 	if (gopt->version != HTB_VER >> 16) {
1104 		printk(KERN_ERR
1105 		       "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1106 		       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1107 		return -EINVAL;
1108 	}
1109 
1110 	INIT_LIST_HEAD(&q->root);
1111 	for (i = 0; i < HTB_HSIZE; i++)
1112 		INIT_HLIST_HEAD(q->hash + i);
1113 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
1114 		INIT_LIST_HEAD(q->drops + i);
1115 
1116 	init_timer(&q->timer);
1117 	skb_queue_head_init(&q->direct_queue);
1118 
1119 	q->direct_qlen = sch->dev->tx_queue_len;
1120 	if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */
1121 		q->direct_qlen = 2;
1122 	q->timer.function = htb_timer;
1123 	q->timer.data = (unsigned long)sch;
1124 
1125 #ifdef HTB_RATECM
1126 	init_timer(&q->rttim);
1127 	q->rttim.function = htb_rate_timer;
1128 	q->rttim.data = (unsigned long)sch;
1129 	q->rttim.expires = jiffies + HZ;
1130 	add_timer(&q->rttim);
1131 #endif
1132 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1133 		q->rate2quantum = 1;
1134 	q->defcls = gopt->defcls;
1135 
1136 	return 0;
1137 }
1138 
1139 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1140 {
1141 	struct htb_sched *q = qdisc_priv(sch);
1142 	unsigned char *b = skb->tail;
1143 	struct rtattr *rta;
1144 	struct tc_htb_glob gopt;
1145 	spin_lock_bh(&sch->dev->queue_lock);
1146 	gopt.direct_pkts = q->direct_pkts;
1147 
1148 	gopt.version = HTB_VER;
1149 	gopt.rate2quantum = q->rate2quantum;
1150 	gopt.defcls = q->defcls;
1151 	gopt.debug = 0;
1152 	rta = (struct rtattr *)b;
1153 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1154 	RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1155 	rta->rta_len = skb->tail - b;
1156 	spin_unlock_bh(&sch->dev->queue_lock);
1157 	return skb->len;
1158 rtattr_failure:
1159 	spin_unlock_bh(&sch->dev->queue_lock);
1160 	skb_trim(skb, skb->tail - skb->data);
1161 	return -1;
1162 }
1163 
1164 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1165 			  struct sk_buff *skb, struct tcmsg *tcm)
1166 {
1167 	struct htb_class *cl = (struct htb_class *)arg;
1168 	unsigned char *b = skb->tail;
1169 	struct rtattr *rta;
1170 	struct tc_htb_opt opt;
1171 
1172 	spin_lock_bh(&sch->dev->queue_lock);
1173 	tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1174 	tcm->tcm_handle = cl->classid;
1175 	if (!cl->level && cl->un.leaf.q)
1176 		tcm->tcm_info = cl->un.leaf.q->handle;
1177 
1178 	rta = (struct rtattr *)b;
1179 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1180 
1181 	memset(&opt, 0, sizeof(opt));
1182 
1183 	opt.rate = cl->rate->rate;
1184 	opt.buffer = cl->buffer;
1185 	opt.ceil = cl->ceil->rate;
1186 	opt.cbuffer = cl->cbuffer;
1187 	opt.quantum = cl->un.leaf.quantum;
1188 	opt.prio = cl->un.leaf.prio;
1189 	opt.level = cl->level;
1190 	RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1191 	rta->rta_len = skb->tail - b;
1192 	spin_unlock_bh(&sch->dev->queue_lock);
1193 	return skb->len;
1194 rtattr_failure:
1195 	spin_unlock_bh(&sch->dev->queue_lock);
1196 	skb_trim(skb, b - skb->data);
1197 	return -1;
1198 }
1199 
1200 static int
1201 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1202 {
1203 	struct htb_class *cl = (struct htb_class *)arg;
1204 
1205 #ifdef HTB_RATECM
1206 	cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
1207 	cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
1208 #endif
1209 
1210 	if (!cl->level && cl->un.leaf.q)
1211 		cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1212 	cl->xstats.tokens = cl->tokens;
1213 	cl->xstats.ctokens = cl->ctokens;
1214 
1215 	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1216 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1217 	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
1218 		return -1;
1219 
1220 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1221 }
1222 
1223 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1224 		     struct Qdisc **old)
1225 {
1226 	struct htb_class *cl = (struct htb_class *)arg;
1227 
1228 	if (cl && !cl->level) {
1229 		if (new == NULL &&
1230 		    (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1231 					     cl->classid))
1232 		    == NULL)
1233 			return -ENOBUFS;
1234 		sch_tree_lock(sch);
1235 		if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1236 			qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1237 			qdisc_reset(*old);
1238 		}
1239 		sch_tree_unlock(sch);
1240 		return 0;
1241 	}
1242 	return -ENOENT;
1243 }
1244 
1245 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1246 {
1247 	struct htb_class *cl = (struct htb_class *)arg;
1248 	return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1249 }
1250 
1251 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1252 {
1253 	struct htb_class *cl = (struct htb_class *)arg;
1254 
1255 	if (cl->un.leaf.q->q.qlen == 0)
1256 		htb_deactivate(qdisc_priv(sch), cl);
1257 }
1258 
1259 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1260 {
1261 	struct htb_class *cl = htb_find(classid, sch);
1262 	if (cl)
1263 		cl->refcnt++;
1264 	return (unsigned long)cl;
1265 }
1266 
1267 static void htb_destroy_filters(struct tcf_proto **fl)
1268 {
1269 	struct tcf_proto *tp;
1270 
1271 	while ((tp = *fl) != NULL) {
1272 		*fl = tp->next;
1273 		tcf_destroy(tp);
1274 	}
1275 }
1276 
1277 static inline int htb_parent_last_child(struct htb_class *cl)
1278 {
1279 	if (!cl->parent)
1280 		/* the root class */
1281 		return 0;
1282 
1283 	if (!(cl->parent->children.next == &cl->sibling &&
1284 		cl->parent->children.prev == &cl->sibling))
1285 		/* not the last child */
1286 		return 0;
1287 
1288 	return 1;
1289 }
1290 
1291 static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q)
1292 {
1293 	struct htb_class *parent = cl->parent;
1294 
1295 	BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
1296 
1297 	parent->level = 0;
1298 	memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1299 	INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1300 	parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1301 	parent->un.leaf.quantum = parent->quantum;
1302 	parent->un.leaf.prio = parent->prio;
1303 	parent->tokens = parent->buffer;
1304 	parent->ctokens = parent->cbuffer;
1305 	PSCHED_GET_TIME(parent->t_c);
1306 	parent->cmode = HTB_CAN_SEND;
1307 }
1308 
1309 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1310 {
1311 	struct htb_sched *q = qdisc_priv(sch);
1312 
1313 	if (!cl->level) {
1314 		BUG_TRAP(cl->un.leaf.q);
1315 		qdisc_destroy(cl->un.leaf.q);
1316 	}
1317 	qdisc_put_rtab(cl->rate);
1318 	qdisc_put_rtab(cl->ceil);
1319 
1320 	htb_destroy_filters(&cl->filter_list);
1321 
1322 	while (!list_empty(&cl->children))
1323 		htb_destroy_class(sch, list_entry(cl->children.next,
1324 						  struct htb_class, sibling));
1325 
1326 	/* note: this delete may happen twice (see htb_delete) */
1327 	hlist_del_init(&cl->hlist);
1328 	list_del(&cl->sibling);
1329 
1330 	if (cl->prio_activity)
1331 		htb_deactivate(q, cl);
1332 
1333 	if (cl->cmode != HTB_CAN_SEND)
1334 		htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1335 
1336 	kfree(cl);
1337 }
1338 
1339 /* always caled under BH & queue lock */
1340 static void htb_destroy(struct Qdisc *sch)
1341 {
1342 	struct htb_sched *q = qdisc_priv(sch);
1343 
1344 	del_timer_sync(&q->timer);
1345 #ifdef HTB_RATECM
1346 	del_timer_sync(&q->rttim);
1347 #endif
1348 	/* This line used to be after htb_destroy_class call below
1349 	   and surprisingly it worked in 2.4. But it must precede it
1350 	   because filter need its target class alive to be able to call
1351 	   unbind_filter on it (without Oops). */
1352 	htb_destroy_filters(&q->filter_list);
1353 
1354 	while (!list_empty(&q->root))
1355 		htb_destroy_class(sch, list_entry(q->root.next,
1356 						  struct htb_class, sibling));
1357 
1358 	__skb_queue_purge(&q->direct_queue);
1359 }
1360 
1361 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1362 {
1363 	struct htb_sched *q = qdisc_priv(sch);
1364 	struct htb_class *cl = (struct htb_class *)arg;
1365 	unsigned int qlen;
1366 	struct Qdisc *new_q = NULL;
1367 	int last_child = 0;
1368 
1369 	// TODO: why don't allow to delete subtree ? references ? does
1370 	// tc subsys quarantee us that in htb_destroy it holds no class
1371 	// refs so that we can remove children safely there ?
1372 	if (!list_empty(&cl->children) || cl->filter_cnt)
1373 		return -EBUSY;
1374 
1375 	if (!cl->level && htb_parent_last_child(cl)) {
1376 		new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1377 						cl->parent->classid);
1378 		last_child = 1;
1379 	}
1380 
1381 	sch_tree_lock(sch);
1382 
1383 	/* delete from hash and active; remainder in destroy_class */
1384 	hlist_del_init(&cl->hlist);
1385 
1386 	if (!cl->level) {
1387 		qlen = cl->un.leaf.q->q.qlen;
1388 		qdisc_reset(cl->un.leaf.q);
1389 		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1390 	}
1391 
1392 	if (cl->prio_activity)
1393 		htb_deactivate(q, cl);
1394 
1395 	if (last_child)
1396 		htb_parent_to_leaf(cl, new_q);
1397 
1398 	if (--cl->refcnt == 0)
1399 		htb_destroy_class(sch, cl);
1400 
1401 	sch_tree_unlock(sch);
1402 	return 0;
1403 }
1404 
1405 static void htb_put(struct Qdisc *sch, unsigned long arg)
1406 {
1407 	struct htb_class *cl = (struct htb_class *)arg;
1408 
1409 	if (--cl->refcnt == 0)
1410 		htb_destroy_class(sch, cl);
1411 }
1412 
1413 static int htb_change_class(struct Qdisc *sch, u32 classid,
1414 			    u32 parentid, struct rtattr **tca,
1415 			    unsigned long *arg)
1416 {
1417 	int err = -EINVAL;
1418 	struct htb_sched *q = qdisc_priv(sch);
1419 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1420 	struct rtattr *opt = tca[TCA_OPTIONS - 1];
1421 	struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1422 	struct rtattr *tb[TCA_HTB_RTAB];
1423 	struct tc_htb_opt *hopt;
1424 
1425 	/* extract all subattrs from opt attr */
1426 	if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1427 	    tb[TCA_HTB_PARMS - 1] == NULL ||
1428 	    RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
1429 		goto failure;
1430 
1431 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1432 
1433 	hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
1434 
1435 	rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1436 	ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1437 	if (!rtab || !ctab)
1438 		goto failure;
1439 
1440 	if (!cl) {		/* new class */
1441 		struct Qdisc *new_q;
1442 		int prio;
1443 
1444 		/* check for valid classid */
1445 		if (!classid || TC_H_MAJ(classid ^ sch->handle)
1446 		    || htb_find(classid, sch))
1447 			goto failure;
1448 
1449 		/* check maximal depth */
1450 		if (parent && parent->parent && parent->parent->level < 2) {
1451 			printk(KERN_ERR "htb: tree is too deep\n");
1452 			goto failure;
1453 		}
1454 		err = -ENOBUFS;
1455 		if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1456 			goto failure;
1457 
1458 		cl->refcnt = 1;
1459 		INIT_LIST_HEAD(&cl->sibling);
1460 		INIT_HLIST_NODE(&cl->hlist);
1461 		INIT_LIST_HEAD(&cl->children);
1462 		INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1463 		RB_CLEAR_NODE(&cl->pq_node);
1464 
1465 		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1466 			RB_CLEAR_NODE(&cl->node[prio]);
1467 
1468 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1469 		   so that can't be used inside of sch_tree_lock
1470 		   -- thanks to Karlis Peisenieks */
1471 		new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1472 		sch_tree_lock(sch);
1473 		if (parent && !parent->level) {
1474 			unsigned int qlen = parent->un.leaf.q->q.qlen;
1475 
1476 			/* turn parent into inner node */
1477 			qdisc_reset(parent->un.leaf.q);
1478 			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1479 			qdisc_destroy(parent->un.leaf.q);
1480 			if (parent->prio_activity)
1481 				htb_deactivate(q, parent);
1482 
1483 			/* remove from evt list because of level change */
1484 			if (parent->cmode != HTB_CAN_SEND) {
1485 				htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1486 				parent->cmode = HTB_CAN_SEND;
1487 			}
1488 			parent->level = (parent->parent ? parent->parent->level
1489 					 : TC_HTB_MAXDEPTH) - 1;
1490 			memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1491 		}
1492 		/* leaf (we) needs elementary qdisc */
1493 		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1494 
1495 		cl->classid = classid;
1496 		cl->parent = parent;
1497 
1498 		/* set class to be in HTB_CAN_SEND state */
1499 		cl->tokens = hopt->buffer;
1500 		cl->ctokens = hopt->cbuffer;
1501 		cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60);	/* 1min */
1502 		PSCHED_GET_TIME(cl->t_c);
1503 		cl->cmode = HTB_CAN_SEND;
1504 
1505 		/* attach to the hash list and parent's family */
1506 		hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
1507 		list_add_tail(&cl->sibling,
1508 			      parent ? &parent->children : &q->root);
1509 	} else
1510 		sch_tree_lock(sch);
1511 
1512 	/* it used to be a nasty bug here, we have to check that node
1513 	   is really leaf before changing cl->un.leaf ! */
1514 	if (!cl->level) {
1515 		cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1516 		if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1517 			printk(KERN_WARNING
1518 			       "HTB: quantum of class %X is small. Consider r2q change.\n",
1519 			       cl->classid);
1520 			cl->un.leaf.quantum = 1000;
1521 		}
1522 		if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1523 			printk(KERN_WARNING
1524 			       "HTB: quantum of class %X is big. Consider r2q change.\n",
1525 			       cl->classid);
1526 			cl->un.leaf.quantum = 200000;
1527 		}
1528 		if (hopt->quantum)
1529 			cl->un.leaf.quantum = hopt->quantum;
1530 		if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1531 			cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1532 
1533 		/* backup for htb_parent_to_leaf */
1534 		cl->quantum = cl->un.leaf.quantum;
1535 		cl->prio = cl->un.leaf.prio;
1536 	}
1537 
1538 	cl->buffer = hopt->buffer;
1539 	cl->cbuffer = hopt->cbuffer;
1540 	if (cl->rate)
1541 		qdisc_put_rtab(cl->rate);
1542 	cl->rate = rtab;
1543 	if (cl->ceil)
1544 		qdisc_put_rtab(cl->ceil);
1545 	cl->ceil = ctab;
1546 	sch_tree_unlock(sch);
1547 
1548 	*arg = (unsigned long)cl;
1549 	return 0;
1550 
1551 failure:
1552 	if (rtab)
1553 		qdisc_put_rtab(rtab);
1554 	if (ctab)
1555 		qdisc_put_rtab(ctab);
1556 	return err;
1557 }
1558 
1559 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1560 {
1561 	struct htb_sched *q = qdisc_priv(sch);
1562 	struct htb_class *cl = (struct htb_class *)arg;
1563 	struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1564 
1565 	return fl;
1566 }
1567 
1568 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1569 				     u32 classid)
1570 {
1571 	struct htb_sched *q = qdisc_priv(sch);
1572 	struct htb_class *cl = htb_find(classid, sch);
1573 
1574 	/*if (cl && !cl->level) return 0;
1575 	   The line above used to be there to prevent attaching filters to
1576 	   leaves. But at least tc_index filter uses this just to get class
1577 	   for other reasons so that we have to allow for it.
1578 	   ----
1579 	   19.6.2002 As Werner explained it is ok - bind filter is just
1580 	   another way to "lock" the class - unlike "get" this lock can
1581 	   be broken by class during destroy IIUC.
1582 	 */
1583 	if (cl)
1584 		cl->filter_cnt++;
1585 	else
1586 		q->filter_cnt++;
1587 	return (unsigned long)cl;
1588 }
1589 
1590 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1591 {
1592 	struct htb_sched *q = qdisc_priv(sch);
1593 	struct htb_class *cl = (struct htb_class *)arg;
1594 
1595 	if (cl)
1596 		cl->filter_cnt--;
1597 	else
1598 		q->filter_cnt--;
1599 }
1600 
1601 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1602 {
1603 	struct htb_sched *q = qdisc_priv(sch);
1604 	int i;
1605 
1606 	if (arg->stop)
1607 		return;
1608 
1609 	for (i = 0; i < HTB_HSIZE; i++) {
1610 		struct hlist_node *p;
1611 		struct htb_class *cl;
1612 
1613 		hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1614 			if (arg->count < arg->skip) {
1615 				arg->count++;
1616 				continue;
1617 			}
1618 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1619 				arg->stop = 1;
1620 				return;
1621 			}
1622 			arg->count++;
1623 		}
1624 	}
1625 }
1626 
1627 static struct Qdisc_class_ops htb_class_ops = {
1628 	.graft		=	htb_graft,
1629 	.leaf		=	htb_leaf,
1630 	.qlen_notify	=	htb_qlen_notify,
1631 	.get		=	htb_get,
1632 	.put		=	htb_put,
1633 	.change		=	htb_change_class,
1634 	.delete		=	htb_delete,
1635 	.walk		=	htb_walk,
1636 	.tcf_chain	=	htb_find_tcf,
1637 	.bind_tcf	=	htb_bind_filter,
1638 	.unbind_tcf	=	htb_unbind_filter,
1639 	.dump		=	htb_dump_class,
1640 	.dump_stats	=	htb_dump_class_stats,
1641 };
1642 
1643 static struct Qdisc_ops htb_qdisc_ops = {
1644 	.next		=	NULL,
1645 	.cl_ops		=	&htb_class_ops,
1646 	.id		=	"htb",
1647 	.priv_size	=	sizeof(struct htb_sched),
1648 	.enqueue	=	htb_enqueue,
1649 	.dequeue	=	htb_dequeue,
1650 	.requeue	=	htb_requeue,
1651 	.drop		=	htb_drop,
1652 	.init		=	htb_init,
1653 	.reset		=	htb_reset,
1654 	.destroy	=	htb_destroy,
1655 	.change		=	NULL /* htb_change */,
1656 	.dump		=	htb_dump,
1657 	.owner		=	THIS_MODULE,
1658 };
1659 
1660 static int __init htb_module_init(void)
1661 {
1662 	return register_qdisc(&htb_qdisc_ops);
1663 }
1664 static void __exit htb_module_exit(void)
1665 {
1666 	unregister_qdisc(&htb_qdisc_ops);
1667 }
1668 
1669 module_init(htb_module_init)
1670 module_exit(htb_module_exit)
1671 MODULE_LICENSE("GPL");
1672