xref: /openbmc/linux/net/sched/sch_hfsc.c (revision 9b9c2cd4)
1 /*
2  * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * 2003-10-17 - Ported from altq
10  */
11 /*
12  * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13  *
14  * Permission to use, copy, modify, and distribute this software and
15  * its documentation is hereby granted (including for commercial or
16  * for-profit use), provided that both the copyright notice and this
17  * permission notice appear in all copies of the software, derivative
18  * works, or modified versions, and any portions thereof.
19  *
20  * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21  * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
22  * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32  * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33  * DAMAGE.
34  *
35  * Carnegie Mellon encourages (but does not require) users of this
36  * software to return any improvements or extensions that they make,
37  * and to grant Carnegie Mellon the rights to redistribute these
38  * changes without encumbrance.
39  */
40 /*
41  * H-FSC is described in Proceedings of SIGCOMM'97,
42  * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43  * Real-Time and Priority Service"
44  * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45  *
46  * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47  * when a class has an upperlimit, the fit-time is computed from the
48  * upperlimit service curve.  the link-sharing scheduler does not schedule
49  * a class whose fit-time exceeds the current time.
50  */
51 
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
70 
71 /*
72  * kernel internal service curve representation:
73  *   coordinates are given by 64 bit unsigned integers.
74  *   x-axis: unit is clock count.
75  *   y-axis: unit is byte.
76  *
77  *   The service curve parameters are converted to the internal
78  *   representation. The slope values are scaled to avoid overflow.
79  *   the inverse slope values as well as the y-projection of the 1st
80  *   segment are kept in order to avoid 64-bit divide operations
81  *   that are expensive on 32-bit architectures.
82  */
83 
84 struct internal_sc {
85 	u64	sm1;	/* scaled slope of the 1st segment */
86 	u64	ism1;	/* scaled inverse-slope of the 1st segment */
87 	u64	dx;	/* the x-projection of the 1st segment */
88 	u64	dy;	/* the y-projection of the 1st segment */
89 	u64	sm2;	/* scaled slope of the 2nd segment */
90 	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
91 };
92 
93 /* runtime service curve */
94 struct runtime_sc {
95 	u64	x;	/* current starting position on x-axis */
96 	u64	y;	/* current starting position on y-axis */
97 	u64	sm1;	/* scaled slope of the 1st segment */
98 	u64	ism1;	/* scaled inverse-slope of the 1st segment */
99 	u64	dx;	/* the x-projection of the 1st segment */
100 	u64	dy;	/* the y-projection of the 1st segment */
101 	u64	sm2;	/* scaled slope of the 2nd segment */
102 	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
103 };
104 
105 enum hfsc_class_flags {
106 	HFSC_RSC = 0x1,
107 	HFSC_FSC = 0x2,
108 	HFSC_USC = 0x4
109 };
110 
111 struct hfsc_class {
112 	struct Qdisc_class_common cl_common;
113 	unsigned int	refcnt;		/* usage count */
114 
115 	struct gnet_stats_basic_packed bstats;
116 	struct gnet_stats_queue qstats;
117 	struct gnet_stats_rate_est64 rate_est;
118 	unsigned int	level;		/* class level in hierarchy */
119 	struct tcf_proto __rcu *filter_list; /* filter list */
120 	unsigned int	filter_cnt;	/* filter count */
121 
122 	struct hfsc_sched *sched;	/* scheduler data */
123 	struct hfsc_class *cl_parent;	/* parent class */
124 	struct list_head siblings;	/* sibling classes */
125 	struct list_head children;	/* child classes */
126 	struct Qdisc	*qdisc;		/* leaf qdisc */
127 
128 	struct rb_node el_node;		/* qdisc's eligible tree member */
129 	struct rb_root vt_tree;		/* active children sorted by cl_vt */
130 	struct rb_node vt_node;		/* parent's vt_tree member */
131 	struct rb_root cf_tree;		/* active children sorted by cl_f */
132 	struct rb_node cf_node;		/* parent's cf_heap member */
133 	struct list_head dlist;		/* drop list member */
134 
135 	u64	cl_total;		/* total work in bytes */
136 	u64	cl_cumul;		/* cumulative work in bytes done by
137 					   real-time criteria */
138 
139 	u64	cl_d;			/* deadline*/
140 	u64	cl_e;			/* eligible time */
141 	u64	cl_vt;			/* virtual time */
142 	u64	cl_f;			/* time when this class will fit for
143 					   link-sharing, max(myf, cfmin) */
144 	u64	cl_myf;			/* my fit-time (calculated from this
145 					   class's own upperlimit curve) */
146 	u64	cl_myfadj;		/* my fit-time adjustment (to cancel
147 					   history dependence) */
148 	u64	cl_cfmin;		/* earliest children's fit-time (used
149 					   with cl_myf to obtain cl_f) */
150 	u64	cl_cvtmin;		/* minimal virtual time among the
151 					   children fit for link-sharing
152 					   (monotonic within a period) */
153 	u64	cl_vtadj;		/* intra-period cumulative vt
154 					   adjustment */
155 	u64	cl_vtoff;		/* inter-period cumulative vt offset */
156 	u64	cl_cvtmax;		/* max child's vt in the last period */
157 	u64	cl_cvtoff;		/* cumulative cvtmax of all periods */
158 	u64	cl_pcvtoff;		/* parent's cvtoff at initialization
159 					   time */
160 
161 	struct internal_sc cl_rsc;	/* internal real-time service curve */
162 	struct internal_sc cl_fsc;	/* internal fair service curve */
163 	struct internal_sc cl_usc;	/* internal upperlimit service curve */
164 	struct runtime_sc cl_deadline;	/* deadline curve */
165 	struct runtime_sc cl_eligible;	/* eligible curve */
166 	struct runtime_sc cl_virtual;	/* virtual curve */
167 	struct runtime_sc cl_ulimit;	/* upperlimit curve */
168 
169 	unsigned long	cl_flags;	/* which curves are valid */
170 	unsigned long	cl_vtperiod;	/* vt period sequence number */
171 	unsigned long	cl_parentperiod;/* parent's vt period sequence number*/
172 	unsigned long	cl_nactive;	/* number of active children */
173 };
174 
175 struct hfsc_sched {
176 	u16	defcls;				/* default class id */
177 	struct hfsc_class root;			/* root class */
178 	struct Qdisc_class_hash clhash;		/* class hash */
179 	struct rb_root eligible;		/* eligible tree */
180 	struct list_head droplist;		/* active leaf class list (for
181 						   dropping) */
182 	struct qdisc_watchdog watchdog;		/* watchdog timer */
183 };
184 
185 #define	HT_INFINITY	0xffffffffffffffffULL	/* infinite time value */
186 
187 
188 /*
189  * eligible tree holds backlogged classes being sorted by their eligible times.
190  * there is one eligible tree per hfsc instance.
191  */
192 
193 static void
194 eltree_insert(struct hfsc_class *cl)
195 {
196 	struct rb_node **p = &cl->sched->eligible.rb_node;
197 	struct rb_node *parent = NULL;
198 	struct hfsc_class *cl1;
199 
200 	while (*p != NULL) {
201 		parent = *p;
202 		cl1 = rb_entry(parent, struct hfsc_class, el_node);
203 		if (cl->cl_e >= cl1->cl_e)
204 			p = &parent->rb_right;
205 		else
206 			p = &parent->rb_left;
207 	}
208 	rb_link_node(&cl->el_node, parent, p);
209 	rb_insert_color(&cl->el_node, &cl->sched->eligible);
210 }
211 
212 static inline void
213 eltree_remove(struct hfsc_class *cl)
214 {
215 	rb_erase(&cl->el_node, &cl->sched->eligible);
216 }
217 
218 static inline void
219 eltree_update(struct hfsc_class *cl)
220 {
221 	eltree_remove(cl);
222 	eltree_insert(cl);
223 }
224 
225 /* find the class with the minimum deadline among the eligible classes */
226 static inline struct hfsc_class *
227 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
228 {
229 	struct hfsc_class *p, *cl = NULL;
230 	struct rb_node *n;
231 
232 	for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
233 		p = rb_entry(n, struct hfsc_class, el_node);
234 		if (p->cl_e > cur_time)
235 			break;
236 		if (cl == NULL || p->cl_d < cl->cl_d)
237 			cl = p;
238 	}
239 	return cl;
240 }
241 
242 /* find the class with minimum eligible time among the eligible classes */
243 static inline struct hfsc_class *
244 eltree_get_minel(struct hfsc_sched *q)
245 {
246 	struct rb_node *n;
247 
248 	n = rb_first(&q->eligible);
249 	if (n == NULL)
250 		return NULL;
251 	return rb_entry(n, struct hfsc_class, el_node);
252 }
253 
254 /*
255  * vttree holds holds backlogged child classes being sorted by their virtual
256  * time. each intermediate class has one vttree.
257  */
258 static void
259 vttree_insert(struct hfsc_class *cl)
260 {
261 	struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
262 	struct rb_node *parent = NULL;
263 	struct hfsc_class *cl1;
264 
265 	while (*p != NULL) {
266 		parent = *p;
267 		cl1 = rb_entry(parent, struct hfsc_class, vt_node);
268 		if (cl->cl_vt >= cl1->cl_vt)
269 			p = &parent->rb_right;
270 		else
271 			p = &parent->rb_left;
272 	}
273 	rb_link_node(&cl->vt_node, parent, p);
274 	rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
275 }
276 
277 static inline void
278 vttree_remove(struct hfsc_class *cl)
279 {
280 	rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
281 }
282 
283 static inline void
284 vttree_update(struct hfsc_class *cl)
285 {
286 	vttree_remove(cl);
287 	vttree_insert(cl);
288 }
289 
290 static inline struct hfsc_class *
291 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
292 {
293 	struct hfsc_class *p;
294 	struct rb_node *n;
295 
296 	for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
297 		p = rb_entry(n, struct hfsc_class, vt_node);
298 		if (p->cl_f <= cur_time)
299 			return p;
300 	}
301 	return NULL;
302 }
303 
304 /*
305  * get the leaf class with the minimum vt in the hierarchy
306  */
307 static struct hfsc_class *
308 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
309 {
310 	/* if root-class's cfmin is bigger than cur_time nothing to do */
311 	if (cl->cl_cfmin > cur_time)
312 		return NULL;
313 
314 	while (cl->level > 0) {
315 		cl = vttree_firstfit(cl, cur_time);
316 		if (cl == NULL)
317 			return NULL;
318 		/*
319 		 * update parent's cl_cvtmin.
320 		 */
321 		if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
322 			cl->cl_parent->cl_cvtmin = cl->cl_vt;
323 	}
324 	return cl;
325 }
326 
327 static void
328 cftree_insert(struct hfsc_class *cl)
329 {
330 	struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
331 	struct rb_node *parent = NULL;
332 	struct hfsc_class *cl1;
333 
334 	while (*p != NULL) {
335 		parent = *p;
336 		cl1 = rb_entry(parent, struct hfsc_class, cf_node);
337 		if (cl->cl_f >= cl1->cl_f)
338 			p = &parent->rb_right;
339 		else
340 			p = &parent->rb_left;
341 	}
342 	rb_link_node(&cl->cf_node, parent, p);
343 	rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
344 }
345 
346 static inline void
347 cftree_remove(struct hfsc_class *cl)
348 {
349 	rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
350 }
351 
352 static inline void
353 cftree_update(struct hfsc_class *cl)
354 {
355 	cftree_remove(cl);
356 	cftree_insert(cl);
357 }
358 
359 /*
360  * service curve support functions
361  *
362  *  external service curve parameters
363  *	m: bps
364  *	d: us
365  *  internal service curve parameters
366  *	sm: (bytes/psched_us) << SM_SHIFT
367  *	ism: (psched_us/byte) << ISM_SHIFT
368  *	dx: psched_us
369  *
370  * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
371  *
372  * sm and ism are scaled in order to keep effective digits.
373  * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
374  * digits in decimal using the following table.
375  *
376  *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
377  *  ------------+-------------------------------------------------------
378  *  bytes/1.024us 12.8e-3    128e-3     1280e-3    12800e-3   128000e-3
379  *
380  *  1.024us/byte  78.125     7.8125     0.78125    0.078125   0.0078125
381  *
382  * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
383  */
384 #define	SM_SHIFT	(30 - PSCHED_SHIFT)
385 #define	ISM_SHIFT	(8 + PSCHED_SHIFT)
386 
387 #define	SM_MASK		((1ULL << SM_SHIFT) - 1)
388 #define	ISM_MASK	((1ULL << ISM_SHIFT) - 1)
389 
390 static inline u64
391 seg_x2y(u64 x, u64 sm)
392 {
393 	u64 y;
394 
395 	/*
396 	 * compute
397 	 *	y = x * sm >> SM_SHIFT
398 	 * but divide it for the upper and lower bits to avoid overflow
399 	 */
400 	y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
401 	return y;
402 }
403 
404 static inline u64
405 seg_y2x(u64 y, u64 ism)
406 {
407 	u64 x;
408 
409 	if (y == 0)
410 		x = 0;
411 	else if (ism == HT_INFINITY)
412 		x = HT_INFINITY;
413 	else {
414 		x = (y >> ISM_SHIFT) * ism
415 		    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
416 	}
417 	return x;
418 }
419 
420 /* Convert m (bps) into sm (bytes/psched us) */
421 static u64
422 m2sm(u32 m)
423 {
424 	u64 sm;
425 
426 	sm = ((u64)m << SM_SHIFT);
427 	sm += PSCHED_TICKS_PER_SEC - 1;
428 	do_div(sm, PSCHED_TICKS_PER_SEC);
429 	return sm;
430 }
431 
432 /* convert m (bps) into ism (psched us/byte) */
433 static u64
434 m2ism(u32 m)
435 {
436 	u64 ism;
437 
438 	if (m == 0)
439 		ism = HT_INFINITY;
440 	else {
441 		ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
442 		ism += m - 1;
443 		do_div(ism, m);
444 	}
445 	return ism;
446 }
447 
448 /* convert d (us) into dx (psched us) */
449 static u64
450 d2dx(u32 d)
451 {
452 	u64 dx;
453 
454 	dx = ((u64)d * PSCHED_TICKS_PER_SEC);
455 	dx += USEC_PER_SEC - 1;
456 	do_div(dx, USEC_PER_SEC);
457 	return dx;
458 }
459 
460 /* convert sm (bytes/psched us) into m (bps) */
461 static u32
462 sm2m(u64 sm)
463 {
464 	u64 m;
465 
466 	m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
467 	return (u32)m;
468 }
469 
470 /* convert dx (psched us) into d (us) */
471 static u32
472 dx2d(u64 dx)
473 {
474 	u64 d;
475 
476 	d = dx * USEC_PER_SEC;
477 	do_div(d, PSCHED_TICKS_PER_SEC);
478 	return (u32)d;
479 }
480 
481 static void
482 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
483 {
484 	isc->sm1  = m2sm(sc->m1);
485 	isc->ism1 = m2ism(sc->m1);
486 	isc->dx   = d2dx(sc->d);
487 	isc->dy   = seg_x2y(isc->dx, isc->sm1);
488 	isc->sm2  = m2sm(sc->m2);
489 	isc->ism2 = m2ism(sc->m2);
490 }
491 
492 /*
493  * initialize the runtime service curve with the given internal
494  * service curve starting at (x, y).
495  */
496 static void
497 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
498 {
499 	rtsc->x	   = x;
500 	rtsc->y    = y;
501 	rtsc->sm1  = isc->sm1;
502 	rtsc->ism1 = isc->ism1;
503 	rtsc->dx   = isc->dx;
504 	rtsc->dy   = isc->dy;
505 	rtsc->sm2  = isc->sm2;
506 	rtsc->ism2 = isc->ism2;
507 }
508 
509 /*
510  * calculate the y-projection of the runtime service curve by the
511  * given x-projection value
512  */
513 static u64
514 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
515 {
516 	u64 x;
517 
518 	if (y < rtsc->y)
519 		x = rtsc->x;
520 	else if (y <= rtsc->y + rtsc->dy) {
521 		/* x belongs to the 1st segment */
522 		if (rtsc->dy == 0)
523 			x = rtsc->x + rtsc->dx;
524 		else
525 			x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
526 	} else {
527 		/* x belongs to the 2nd segment */
528 		x = rtsc->x + rtsc->dx
529 		    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
530 	}
531 	return x;
532 }
533 
534 static u64
535 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
536 {
537 	u64 y;
538 
539 	if (x <= rtsc->x)
540 		y = rtsc->y;
541 	else if (x <= rtsc->x + rtsc->dx)
542 		/* y belongs to the 1st segment */
543 		y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
544 	else
545 		/* y belongs to the 2nd segment */
546 		y = rtsc->y + rtsc->dy
547 		    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
548 	return y;
549 }
550 
551 /*
552  * update the runtime service curve by taking the minimum of the current
553  * runtime service curve and the service curve starting at (x, y).
554  */
555 static void
556 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
557 {
558 	u64 y1, y2, dx, dy;
559 	u32 dsm;
560 
561 	if (isc->sm1 <= isc->sm2) {
562 		/* service curve is convex */
563 		y1 = rtsc_x2y(rtsc, x);
564 		if (y1 < y)
565 			/* the current rtsc is smaller */
566 			return;
567 		rtsc->x = x;
568 		rtsc->y = y;
569 		return;
570 	}
571 
572 	/*
573 	 * service curve is concave
574 	 * compute the two y values of the current rtsc
575 	 *	y1: at x
576 	 *	y2: at (x + dx)
577 	 */
578 	y1 = rtsc_x2y(rtsc, x);
579 	if (y1 <= y) {
580 		/* rtsc is below isc, no change to rtsc */
581 		return;
582 	}
583 
584 	y2 = rtsc_x2y(rtsc, x + isc->dx);
585 	if (y2 >= y + isc->dy) {
586 		/* rtsc is above isc, replace rtsc by isc */
587 		rtsc->x = x;
588 		rtsc->y = y;
589 		rtsc->dx = isc->dx;
590 		rtsc->dy = isc->dy;
591 		return;
592 	}
593 
594 	/*
595 	 * the two curves intersect
596 	 * compute the offsets (dx, dy) using the reverse
597 	 * function of seg_x2y()
598 	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
599 	 */
600 	dx = (y1 - y) << SM_SHIFT;
601 	dsm = isc->sm1 - isc->sm2;
602 	do_div(dx, dsm);
603 	/*
604 	 * check if (x, y1) belongs to the 1st segment of rtsc.
605 	 * if so, add the offset.
606 	 */
607 	if (rtsc->x + rtsc->dx > x)
608 		dx += rtsc->x + rtsc->dx - x;
609 	dy = seg_x2y(dx, isc->sm1);
610 
611 	rtsc->x = x;
612 	rtsc->y = y;
613 	rtsc->dx = dx;
614 	rtsc->dy = dy;
615 }
616 
617 static void
618 init_ed(struct hfsc_class *cl, unsigned int next_len)
619 {
620 	u64 cur_time = psched_get_time();
621 
622 	/* update the deadline curve */
623 	rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
624 
625 	/*
626 	 * update the eligible curve.
627 	 * for concave, it is equal to the deadline curve.
628 	 * for convex, it is a linear curve with slope m2.
629 	 */
630 	cl->cl_eligible = cl->cl_deadline;
631 	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
632 		cl->cl_eligible.dx = 0;
633 		cl->cl_eligible.dy = 0;
634 	}
635 
636 	/* compute e and d */
637 	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
638 	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
639 
640 	eltree_insert(cl);
641 }
642 
643 static void
644 update_ed(struct hfsc_class *cl, unsigned int next_len)
645 {
646 	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
647 	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
648 
649 	eltree_update(cl);
650 }
651 
652 static inline void
653 update_d(struct hfsc_class *cl, unsigned int next_len)
654 {
655 	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
656 }
657 
658 static inline void
659 update_cfmin(struct hfsc_class *cl)
660 {
661 	struct rb_node *n = rb_first(&cl->cf_tree);
662 	struct hfsc_class *p;
663 
664 	if (n == NULL) {
665 		cl->cl_cfmin = 0;
666 		return;
667 	}
668 	p = rb_entry(n, struct hfsc_class, cf_node);
669 	cl->cl_cfmin = p->cl_f;
670 }
671 
672 static void
673 init_vf(struct hfsc_class *cl, unsigned int len)
674 {
675 	struct hfsc_class *max_cl;
676 	struct rb_node *n;
677 	u64 vt, f, cur_time;
678 	int go_active;
679 
680 	cur_time = 0;
681 	go_active = 1;
682 	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
683 		if (go_active && cl->cl_nactive++ == 0)
684 			go_active = 1;
685 		else
686 			go_active = 0;
687 
688 		if (go_active) {
689 			n = rb_last(&cl->cl_parent->vt_tree);
690 			if (n != NULL) {
691 				max_cl = rb_entry(n, struct hfsc_class, vt_node);
692 				/*
693 				 * set vt to the average of the min and max
694 				 * classes.  if the parent's period didn't
695 				 * change, don't decrease vt of the class.
696 				 */
697 				vt = max_cl->cl_vt;
698 				if (cl->cl_parent->cl_cvtmin != 0)
699 					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
700 
701 				if (cl->cl_parent->cl_vtperiod !=
702 				    cl->cl_parentperiod || vt > cl->cl_vt)
703 					cl->cl_vt = vt;
704 			} else {
705 				/*
706 				 * first child for a new parent backlog period.
707 				 * add parent's cvtmax to cvtoff to make a new
708 				 * vt (vtoff + vt) larger than the vt in the
709 				 * last period for all children.
710 				 */
711 				vt = cl->cl_parent->cl_cvtmax;
712 				cl->cl_parent->cl_cvtoff += vt;
713 				cl->cl_parent->cl_cvtmax = 0;
714 				cl->cl_parent->cl_cvtmin = 0;
715 				cl->cl_vt = 0;
716 			}
717 
718 			cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
719 							cl->cl_pcvtoff;
720 
721 			/* update the virtual curve */
722 			vt = cl->cl_vt + cl->cl_vtoff;
723 			rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
724 						      cl->cl_total);
725 			if (cl->cl_virtual.x == vt) {
726 				cl->cl_virtual.x -= cl->cl_vtoff;
727 				cl->cl_vtoff = 0;
728 			}
729 			cl->cl_vtadj = 0;
730 
731 			cl->cl_vtperiod++;  /* increment vt period */
732 			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
733 			if (cl->cl_parent->cl_nactive == 0)
734 				cl->cl_parentperiod++;
735 			cl->cl_f = 0;
736 
737 			vttree_insert(cl);
738 			cftree_insert(cl);
739 
740 			if (cl->cl_flags & HFSC_USC) {
741 				/* class has upper limit curve */
742 				if (cur_time == 0)
743 					cur_time = psched_get_time();
744 
745 				/* update the ulimit curve */
746 				rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
747 					 cl->cl_total);
748 				/* compute myf */
749 				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
750 						      cl->cl_total);
751 				cl->cl_myfadj = 0;
752 			}
753 		}
754 
755 		f = max(cl->cl_myf, cl->cl_cfmin);
756 		if (f != cl->cl_f) {
757 			cl->cl_f = f;
758 			cftree_update(cl);
759 		}
760 		update_cfmin(cl->cl_parent);
761 	}
762 }
763 
764 static void
765 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
766 {
767 	u64 f; /* , myf_bound, delta; */
768 	int go_passive = 0;
769 
770 	if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
771 		go_passive = 1;
772 
773 	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
774 		cl->cl_total += len;
775 
776 		if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
777 			continue;
778 
779 		if (go_passive && --cl->cl_nactive == 0)
780 			go_passive = 1;
781 		else
782 			go_passive = 0;
783 
784 		if (go_passive) {
785 			/* no more active child, going passive */
786 
787 			/* update cvtmax of the parent class */
788 			if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
789 				cl->cl_parent->cl_cvtmax = cl->cl_vt;
790 
791 			/* remove this class from the vt tree */
792 			vttree_remove(cl);
793 
794 			cftree_remove(cl);
795 			update_cfmin(cl->cl_parent);
796 
797 			continue;
798 		}
799 
800 		/*
801 		 * update vt and f
802 		 */
803 		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
804 			    - cl->cl_vtoff + cl->cl_vtadj;
805 
806 		/*
807 		 * if vt of the class is smaller than cvtmin,
808 		 * the class was skipped in the past due to non-fit.
809 		 * if so, we need to adjust vtadj.
810 		 */
811 		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
812 			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
813 			cl->cl_vt = cl->cl_parent->cl_cvtmin;
814 		}
815 
816 		/* update the vt tree */
817 		vttree_update(cl);
818 
819 		if (cl->cl_flags & HFSC_USC) {
820 			cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
821 							      cl->cl_total);
822 #if 0
823 			/*
824 			 * This code causes classes to stay way under their
825 			 * limit when multiple classes are used at gigabit
826 			 * speed. needs investigation. -kaber
827 			 */
828 			/*
829 			 * if myf lags behind by more than one clock tick
830 			 * from the current time, adjust myfadj to prevent
831 			 * a rate-limited class from going greedy.
832 			 * in a steady state under rate-limiting, myf
833 			 * fluctuates within one clock tick.
834 			 */
835 			myf_bound = cur_time - PSCHED_JIFFIE2US(1);
836 			if (cl->cl_myf < myf_bound) {
837 				delta = cur_time - cl->cl_myf;
838 				cl->cl_myfadj += delta;
839 				cl->cl_myf += delta;
840 			}
841 #endif
842 		}
843 
844 		f = max(cl->cl_myf, cl->cl_cfmin);
845 		if (f != cl->cl_f) {
846 			cl->cl_f = f;
847 			cftree_update(cl);
848 			update_cfmin(cl->cl_parent);
849 		}
850 	}
851 }
852 
853 static void
854 set_active(struct hfsc_class *cl, unsigned int len)
855 {
856 	if (cl->cl_flags & HFSC_RSC)
857 		init_ed(cl, len);
858 	if (cl->cl_flags & HFSC_FSC)
859 		init_vf(cl, len);
860 
861 	list_add_tail(&cl->dlist, &cl->sched->droplist);
862 }
863 
864 static void
865 set_passive(struct hfsc_class *cl)
866 {
867 	if (cl->cl_flags & HFSC_RSC)
868 		eltree_remove(cl);
869 
870 	list_del(&cl->dlist);
871 
872 	/*
873 	 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
874 	 * needs to be called explicitly to remove a class from vttree.
875 	 */
876 }
877 
878 static unsigned int
879 qdisc_peek_len(struct Qdisc *sch)
880 {
881 	struct sk_buff *skb;
882 	unsigned int len;
883 
884 	skb = sch->ops->peek(sch);
885 	if (skb == NULL) {
886 		qdisc_warn_nonwc("qdisc_peek_len", sch);
887 		return 0;
888 	}
889 	len = qdisc_pkt_len(skb);
890 
891 	return len;
892 }
893 
894 static void
895 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
896 {
897 	unsigned int len = cl->qdisc->q.qlen;
898 
899 	qdisc_reset(cl->qdisc);
900 	qdisc_tree_decrease_qlen(cl->qdisc, len);
901 }
902 
903 static void
904 hfsc_adjust_levels(struct hfsc_class *cl)
905 {
906 	struct hfsc_class *p;
907 	unsigned int level;
908 
909 	do {
910 		level = 0;
911 		list_for_each_entry(p, &cl->children, siblings) {
912 			if (p->level >= level)
913 				level = p->level + 1;
914 		}
915 		cl->level = level;
916 	} while ((cl = cl->cl_parent) != NULL);
917 }
918 
919 static inline struct hfsc_class *
920 hfsc_find_class(u32 classid, struct Qdisc *sch)
921 {
922 	struct hfsc_sched *q = qdisc_priv(sch);
923 	struct Qdisc_class_common *clc;
924 
925 	clc = qdisc_class_find(&q->clhash, classid);
926 	if (clc == NULL)
927 		return NULL;
928 	return container_of(clc, struct hfsc_class, cl_common);
929 }
930 
931 static void
932 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
933 		u64 cur_time)
934 {
935 	sc2isc(rsc, &cl->cl_rsc);
936 	rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
937 	cl->cl_eligible = cl->cl_deadline;
938 	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
939 		cl->cl_eligible.dx = 0;
940 		cl->cl_eligible.dy = 0;
941 	}
942 	cl->cl_flags |= HFSC_RSC;
943 }
944 
945 static void
946 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
947 {
948 	sc2isc(fsc, &cl->cl_fsc);
949 	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
950 	cl->cl_flags |= HFSC_FSC;
951 }
952 
953 static void
954 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
955 		u64 cur_time)
956 {
957 	sc2isc(usc, &cl->cl_usc);
958 	rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
959 	cl->cl_flags |= HFSC_USC;
960 }
961 
962 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
963 	[TCA_HFSC_RSC]	= { .len = sizeof(struct tc_service_curve) },
964 	[TCA_HFSC_FSC]	= { .len = sizeof(struct tc_service_curve) },
965 	[TCA_HFSC_USC]	= { .len = sizeof(struct tc_service_curve) },
966 };
967 
968 static int
969 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
970 		  struct nlattr **tca, unsigned long *arg)
971 {
972 	struct hfsc_sched *q = qdisc_priv(sch);
973 	struct hfsc_class *cl = (struct hfsc_class *)*arg;
974 	struct hfsc_class *parent = NULL;
975 	struct nlattr *opt = tca[TCA_OPTIONS];
976 	struct nlattr *tb[TCA_HFSC_MAX + 1];
977 	struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
978 	u64 cur_time;
979 	int err;
980 
981 	if (opt == NULL)
982 		return -EINVAL;
983 
984 	err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy);
985 	if (err < 0)
986 		return err;
987 
988 	if (tb[TCA_HFSC_RSC]) {
989 		rsc = nla_data(tb[TCA_HFSC_RSC]);
990 		if (rsc->m1 == 0 && rsc->m2 == 0)
991 			rsc = NULL;
992 	}
993 
994 	if (tb[TCA_HFSC_FSC]) {
995 		fsc = nla_data(tb[TCA_HFSC_FSC]);
996 		if (fsc->m1 == 0 && fsc->m2 == 0)
997 			fsc = NULL;
998 	}
999 
1000 	if (tb[TCA_HFSC_USC]) {
1001 		usc = nla_data(tb[TCA_HFSC_USC]);
1002 		if (usc->m1 == 0 && usc->m2 == 0)
1003 			usc = NULL;
1004 	}
1005 
1006 	if (cl != NULL) {
1007 		if (parentid) {
1008 			if (cl->cl_parent &&
1009 			    cl->cl_parent->cl_common.classid != parentid)
1010 				return -EINVAL;
1011 			if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1012 				return -EINVAL;
1013 		}
1014 		cur_time = psched_get_time();
1015 
1016 		if (tca[TCA_RATE]) {
1017 			spinlock_t *lock = qdisc_root_sleeping_lock(sch);
1018 
1019 			err = gen_replace_estimator(&cl->bstats, NULL,
1020 						    &cl->rate_est,
1021 						    lock,
1022 						    tca[TCA_RATE]);
1023 			if (err)
1024 				return err;
1025 		}
1026 
1027 		sch_tree_lock(sch);
1028 		if (rsc != NULL)
1029 			hfsc_change_rsc(cl, rsc, cur_time);
1030 		if (fsc != NULL)
1031 			hfsc_change_fsc(cl, fsc);
1032 		if (usc != NULL)
1033 			hfsc_change_usc(cl, usc, cur_time);
1034 
1035 		if (cl->qdisc->q.qlen != 0) {
1036 			if (cl->cl_flags & HFSC_RSC)
1037 				update_ed(cl, qdisc_peek_len(cl->qdisc));
1038 			if (cl->cl_flags & HFSC_FSC)
1039 				update_vf(cl, 0, cur_time);
1040 		}
1041 		sch_tree_unlock(sch);
1042 
1043 		return 0;
1044 	}
1045 
1046 	if (parentid == TC_H_ROOT)
1047 		return -EEXIST;
1048 
1049 	parent = &q->root;
1050 	if (parentid) {
1051 		parent = hfsc_find_class(parentid, sch);
1052 		if (parent == NULL)
1053 			return -ENOENT;
1054 	}
1055 
1056 	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1057 		return -EINVAL;
1058 	if (hfsc_find_class(classid, sch))
1059 		return -EEXIST;
1060 
1061 	if (rsc == NULL && fsc == NULL)
1062 		return -EINVAL;
1063 
1064 	cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1065 	if (cl == NULL)
1066 		return -ENOBUFS;
1067 
1068 	if (tca[TCA_RATE]) {
1069 		err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1070 					qdisc_root_sleeping_lock(sch),
1071 					tca[TCA_RATE]);
1072 		if (err) {
1073 			kfree(cl);
1074 			return err;
1075 		}
1076 	}
1077 
1078 	if (rsc != NULL)
1079 		hfsc_change_rsc(cl, rsc, 0);
1080 	if (fsc != NULL)
1081 		hfsc_change_fsc(cl, fsc);
1082 	if (usc != NULL)
1083 		hfsc_change_usc(cl, usc, 0);
1084 
1085 	cl->cl_common.classid = classid;
1086 	cl->refcnt    = 1;
1087 	cl->sched     = q;
1088 	cl->cl_parent = parent;
1089 	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
1090 				      &pfifo_qdisc_ops, classid);
1091 	if (cl->qdisc == NULL)
1092 		cl->qdisc = &noop_qdisc;
1093 	INIT_LIST_HEAD(&cl->children);
1094 	cl->vt_tree = RB_ROOT;
1095 	cl->cf_tree = RB_ROOT;
1096 
1097 	sch_tree_lock(sch);
1098 	qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1099 	list_add_tail(&cl->siblings, &parent->children);
1100 	if (parent->level == 0)
1101 		hfsc_purge_queue(sch, parent);
1102 	hfsc_adjust_levels(parent);
1103 	cl->cl_pcvtoff = parent->cl_cvtoff;
1104 	sch_tree_unlock(sch);
1105 
1106 	qdisc_class_hash_grow(sch, &q->clhash);
1107 
1108 	*arg = (unsigned long)cl;
1109 	return 0;
1110 }
1111 
1112 static void
1113 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1114 {
1115 	struct hfsc_sched *q = qdisc_priv(sch);
1116 
1117 	tcf_destroy_chain(&cl->filter_list);
1118 	qdisc_destroy(cl->qdisc);
1119 	gen_kill_estimator(&cl->bstats, &cl->rate_est);
1120 	if (cl != &q->root)
1121 		kfree(cl);
1122 }
1123 
1124 static int
1125 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1126 {
1127 	struct hfsc_sched *q = qdisc_priv(sch);
1128 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1129 
1130 	if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1131 		return -EBUSY;
1132 
1133 	sch_tree_lock(sch);
1134 
1135 	list_del(&cl->siblings);
1136 	hfsc_adjust_levels(cl->cl_parent);
1137 
1138 	hfsc_purge_queue(sch, cl);
1139 	qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1140 
1141 	BUG_ON(--cl->refcnt == 0);
1142 	/*
1143 	 * This shouldn't happen: we "hold" one cops->get() when called
1144 	 * from tc_ctl_tclass; the destroy method is done from cops->put().
1145 	 */
1146 
1147 	sch_tree_unlock(sch);
1148 	return 0;
1149 }
1150 
1151 static struct hfsc_class *
1152 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1153 {
1154 	struct hfsc_sched *q = qdisc_priv(sch);
1155 	struct hfsc_class *head, *cl;
1156 	struct tcf_result res;
1157 	struct tcf_proto *tcf;
1158 	int result;
1159 
1160 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1161 	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1162 		if (cl->level == 0)
1163 			return cl;
1164 
1165 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1166 	head = &q->root;
1167 	tcf = rcu_dereference_bh(q->root.filter_list);
1168 	while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
1169 #ifdef CONFIG_NET_CLS_ACT
1170 		switch (result) {
1171 		case TC_ACT_QUEUED:
1172 		case TC_ACT_STOLEN:
1173 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1174 		case TC_ACT_SHOT:
1175 			return NULL;
1176 		}
1177 #endif
1178 		cl = (struct hfsc_class *)res.class;
1179 		if (!cl) {
1180 			cl = hfsc_find_class(res.classid, sch);
1181 			if (!cl)
1182 				break; /* filter selected invalid classid */
1183 			if (cl->level >= head->level)
1184 				break; /* filter may only point downwards */
1185 		}
1186 
1187 		if (cl->level == 0)
1188 			return cl; /* hit leaf class */
1189 
1190 		/* apply inner filter chain */
1191 		tcf = rcu_dereference_bh(cl->filter_list);
1192 		head = cl;
1193 	}
1194 
1195 	/* classification failed, try default class */
1196 	cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1197 	if (cl == NULL || cl->level > 0)
1198 		return NULL;
1199 
1200 	return cl;
1201 }
1202 
1203 static int
1204 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1205 		 struct Qdisc **old)
1206 {
1207 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1208 
1209 	if (cl->level > 0)
1210 		return -EINVAL;
1211 	if (new == NULL) {
1212 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1213 					cl->cl_common.classid);
1214 		if (new == NULL)
1215 			new = &noop_qdisc;
1216 	}
1217 
1218 	sch_tree_lock(sch);
1219 	hfsc_purge_queue(sch, cl);
1220 	*old = cl->qdisc;
1221 	cl->qdisc = new;
1222 	sch_tree_unlock(sch);
1223 	return 0;
1224 }
1225 
1226 static struct Qdisc *
1227 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1228 {
1229 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1230 
1231 	if (cl->level == 0)
1232 		return cl->qdisc;
1233 
1234 	return NULL;
1235 }
1236 
1237 static void
1238 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1239 {
1240 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1241 
1242 	if (cl->qdisc->q.qlen == 0) {
1243 		update_vf(cl, 0, 0);
1244 		set_passive(cl);
1245 	}
1246 }
1247 
1248 static unsigned long
1249 hfsc_get_class(struct Qdisc *sch, u32 classid)
1250 {
1251 	struct hfsc_class *cl = hfsc_find_class(classid, sch);
1252 
1253 	if (cl != NULL)
1254 		cl->refcnt++;
1255 
1256 	return (unsigned long)cl;
1257 }
1258 
1259 static void
1260 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1261 {
1262 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1263 
1264 	if (--cl->refcnt == 0)
1265 		hfsc_destroy_class(sch, cl);
1266 }
1267 
1268 static unsigned long
1269 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1270 {
1271 	struct hfsc_class *p = (struct hfsc_class *)parent;
1272 	struct hfsc_class *cl = hfsc_find_class(classid, sch);
1273 
1274 	if (cl != NULL) {
1275 		if (p != NULL && p->level <= cl->level)
1276 			return 0;
1277 		cl->filter_cnt++;
1278 	}
1279 
1280 	return (unsigned long)cl;
1281 }
1282 
1283 static void
1284 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1285 {
1286 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1287 
1288 	cl->filter_cnt--;
1289 }
1290 
1291 static struct tcf_proto __rcu **
1292 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1293 {
1294 	struct hfsc_sched *q = qdisc_priv(sch);
1295 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1296 
1297 	if (cl == NULL)
1298 		cl = &q->root;
1299 
1300 	return &cl->filter_list;
1301 }
1302 
1303 static int
1304 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1305 {
1306 	struct tc_service_curve tsc;
1307 
1308 	tsc.m1 = sm2m(sc->sm1);
1309 	tsc.d  = dx2d(sc->dx);
1310 	tsc.m2 = sm2m(sc->sm2);
1311 	if (nla_put(skb, attr, sizeof(tsc), &tsc))
1312 		goto nla_put_failure;
1313 
1314 	return skb->len;
1315 
1316  nla_put_failure:
1317 	return -1;
1318 }
1319 
1320 static int
1321 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1322 {
1323 	if ((cl->cl_flags & HFSC_RSC) &&
1324 	    (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1325 		goto nla_put_failure;
1326 
1327 	if ((cl->cl_flags & HFSC_FSC) &&
1328 	    (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1329 		goto nla_put_failure;
1330 
1331 	if ((cl->cl_flags & HFSC_USC) &&
1332 	    (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1333 		goto nla_put_failure;
1334 
1335 	return skb->len;
1336 
1337  nla_put_failure:
1338 	return -1;
1339 }
1340 
1341 static int
1342 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1343 		struct tcmsg *tcm)
1344 {
1345 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1346 	struct nlattr *nest;
1347 
1348 	tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1349 					  TC_H_ROOT;
1350 	tcm->tcm_handle = cl->cl_common.classid;
1351 	if (cl->level == 0)
1352 		tcm->tcm_info = cl->qdisc->handle;
1353 
1354 	nest = nla_nest_start(skb, TCA_OPTIONS);
1355 	if (nest == NULL)
1356 		goto nla_put_failure;
1357 	if (hfsc_dump_curves(skb, cl) < 0)
1358 		goto nla_put_failure;
1359 	return nla_nest_end(skb, nest);
1360 
1361  nla_put_failure:
1362 	nla_nest_cancel(skb, nest);
1363 	return -EMSGSIZE;
1364 }
1365 
1366 static int
1367 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1368 	struct gnet_dump *d)
1369 {
1370 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1371 	struct tc_hfsc_stats xstats;
1372 
1373 	cl->qstats.backlog = cl->qdisc->qstats.backlog;
1374 	xstats.level   = cl->level;
1375 	xstats.period  = cl->cl_vtperiod;
1376 	xstats.work    = cl->cl_total;
1377 	xstats.rtwork  = cl->cl_cumul;
1378 
1379 	if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
1380 	    gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1381 	    gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
1382 		return -1;
1383 
1384 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1385 }
1386 
1387 
1388 
1389 static void
1390 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1391 {
1392 	struct hfsc_sched *q = qdisc_priv(sch);
1393 	struct hfsc_class *cl;
1394 	unsigned int i;
1395 
1396 	if (arg->stop)
1397 		return;
1398 
1399 	for (i = 0; i < q->clhash.hashsize; i++) {
1400 		hlist_for_each_entry(cl, &q->clhash.hash[i],
1401 				     cl_common.hnode) {
1402 			if (arg->count < arg->skip) {
1403 				arg->count++;
1404 				continue;
1405 			}
1406 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1407 				arg->stop = 1;
1408 				return;
1409 			}
1410 			arg->count++;
1411 		}
1412 	}
1413 }
1414 
1415 static void
1416 hfsc_schedule_watchdog(struct Qdisc *sch)
1417 {
1418 	struct hfsc_sched *q = qdisc_priv(sch);
1419 	struct hfsc_class *cl;
1420 	u64 next_time = 0;
1421 
1422 	cl = eltree_get_minel(q);
1423 	if (cl)
1424 		next_time = cl->cl_e;
1425 	if (q->root.cl_cfmin != 0) {
1426 		if (next_time == 0 || next_time > q->root.cl_cfmin)
1427 			next_time = q->root.cl_cfmin;
1428 	}
1429 	WARN_ON(next_time == 0);
1430 	qdisc_watchdog_schedule(&q->watchdog, next_time);
1431 }
1432 
1433 static int
1434 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1435 {
1436 	struct hfsc_sched *q = qdisc_priv(sch);
1437 	struct tc_hfsc_qopt *qopt;
1438 	int err;
1439 
1440 	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1441 		return -EINVAL;
1442 	qopt = nla_data(opt);
1443 
1444 	q->defcls = qopt->defcls;
1445 	err = qdisc_class_hash_init(&q->clhash);
1446 	if (err < 0)
1447 		return err;
1448 	q->eligible = RB_ROOT;
1449 	INIT_LIST_HEAD(&q->droplist);
1450 
1451 	q->root.cl_common.classid = sch->handle;
1452 	q->root.refcnt  = 1;
1453 	q->root.sched   = q;
1454 	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1455 					  sch->handle);
1456 	if (q->root.qdisc == NULL)
1457 		q->root.qdisc = &noop_qdisc;
1458 	INIT_LIST_HEAD(&q->root.children);
1459 	q->root.vt_tree = RB_ROOT;
1460 	q->root.cf_tree = RB_ROOT;
1461 
1462 	qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1463 	qdisc_class_hash_grow(sch, &q->clhash);
1464 
1465 	qdisc_watchdog_init(&q->watchdog, sch);
1466 
1467 	return 0;
1468 }
1469 
1470 static int
1471 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1472 {
1473 	struct hfsc_sched *q = qdisc_priv(sch);
1474 	struct tc_hfsc_qopt *qopt;
1475 
1476 	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1477 		return -EINVAL;
1478 	qopt = nla_data(opt);
1479 
1480 	sch_tree_lock(sch);
1481 	q->defcls = qopt->defcls;
1482 	sch_tree_unlock(sch);
1483 
1484 	return 0;
1485 }
1486 
1487 static void
1488 hfsc_reset_class(struct hfsc_class *cl)
1489 {
1490 	cl->cl_total        = 0;
1491 	cl->cl_cumul        = 0;
1492 	cl->cl_d            = 0;
1493 	cl->cl_e            = 0;
1494 	cl->cl_vt           = 0;
1495 	cl->cl_vtadj        = 0;
1496 	cl->cl_vtoff        = 0;
1497 	cl->cl_cvtmin       = 0;
1498 	cl->cl_cvtmax       = 0;
1499 	cl->cl_cvtoff       = 0;
1500 	cl->cl_pcvtoff      = 0;
1501 	cl->cl_vtperiod     = 0;
1502 	cl->cl_parentperiod = 0;
1503 	cl->cl_f            = 0;
1504 	cl->cl_myf          = 0;
1505 	cl->cl_myfadj       = 0;
1506 	cl->cl_cfmin        = 0;
1507 	cl->cl_nactive      = 0;
1508 
1509 	cl->vt_tree = RB_ROOT;
1510 	cl->cf_tree = RB_ROOT;
1511 	qdisc_reset(cl->qdisc);
1512 
1513 	if (cl->cl_flags & HFSC_RSC)
1514 		rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1515 	if (cl->cl_flags & HFSC_FSC)
1516 		rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1517 	if (cl->cl_flags & HFSC_USC)
1518 		rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1519 }
1520 
1521 static void
1522 hfsc_reset_qdisc(struct Qdisc *sch)
1523 {
1524 	struct hfsc_sched *q = qdisc_priv(sch);
1525 	struct hfsc_class *cl;
1526 	unsigned int i;
1527 
1528 	for (i = 0; i < q->clhash.hashsize; i++) {
1529 		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1530 			hfsc_reset_class(cl);
1531 	}
1532 	q->eligible = RB_ROOT;
1533 	INIT_LIST_HEAD(&q->droplist);
1534 	qdisc_watchdog_cancel(&q->watchdog);
1535 	sch->q.qlen = 0;
1536 }
1537 
1538 static void
1539 hfsc_destroy_qdisc(struct Qdisc *sch)
1540 {
1541 	struct hfsc_sched *q = qdisc_priv(sch);
1542 	struct hlist_node *next;
1543 	struct hfsc_class *cl;
1544 	unsigned int i;
1545 
1546 	for (i = 0; i < q->clhash.hashsize; i++) {
1547 		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1548 			tcf_destroy_chain(&cl->filter_list);
1549 	}
1550 	for (i = 0; i < q->clhash.hashsize; i++) {
1551 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1552 					  cl_common.hnode)
1553 			hfsc_destroy_class(sch, cl);
1554 	}
1555 	qdisc_class_hash_destroy(&q->clhash);
1556 	qdisc_watchdog_cancel(&q->watchdog);
1557 }
1558 
1559 static int
1560 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1561 {
1562 	struct hfsc_sched *q = qdisc_priv(sch);
1563 	unsigned char *b = skb_tail_pointer(skb);
1564 	struct tc_hfsc_qopt qopt;
1565 	struct hfsc_class *cl;
1566 	unsigned int i;
1567 
1568 	sch->qstats.backlog = 0;
1569 	for (i = 0; i < q->clhash.hashsize; i++) {
1570 		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1571 			sch->qstats.backlog += cl->qdisc->qstats.backlog;
1572 	}
1573 
1574 	qopt.defcls = q->defcls;
1575 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1576 		goto nla_put_failure;
1577 	return skb->len;
1578 
1579  nla_put_failure:
1580 	nlmsg_trim(skb, b);
1581 	return -1;
1582 }
1583 
1584 static int
1585 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1586 {
1587 	struct hfsc_class *cl;
1588 	int uninitialized_var(err);
1589 
1590 	cl = hfsc_classify(skb, sch, &err);
1591 	if (cl == NULL) {
1592 		if (err & __NET_XMIT_BYPASS)
1593 			qdisc_qstats_drop(sch);
1594 		kfree_skb(skb);
1595 		return err;
1596 	}
1597 
1598 	err = qdisc_enqueue(skb, cl->qdisc);
1599 	if (unlikely(err != NET_XMIT_SUCCESS)) {
1600 		if (net_xmit_drop_count(err)) {
1601 			cl->qstats.drops++;
1602 			qdisc_qstats_drop(sch);
1603 		}
1604 		return err;
1605 	}
1606 
1607 	if (cl->qdisc->q.qlen == 1)
1608 		set_active(cl, qdisc_pkt_len(skb));
1609 
1610 	sch->q.qlen++;
1611 
1612 	return NET_XMIT_SUCCESS;
1613 }
1614 
1615 static struct sk_buff *
1616 hfsc_dequeue(struct Qdisc *sch)
1617 {
1618 	struct hfsc_sched *q = qdisc_priv(sch);
1619 	struct hfsc_class *cl;
1620 	struct sk_buff *skb;
1621 	u64 cur_time;
1622 	unsigned int next_len;
1623 	int realtime = 0;
1624 
1625 	if (sch->q.qlen == 0)
1626 		return NULL;
1627 
1628 	cur_time = psched_get_time();
1629 
1630 	/*
1631 	 * if there are eligible classes, use real-time criteria.
1632 	 * find the class with the minimum deadline among
1633 	 * the eligible classes.
1634 	 */
1635 	cl = eltree_get_mindl(q, cur_time);
1636 	if (cl) {
1637 		realtime = 1;
1638 	} else {
1639 		/*
1640 		 * use link-sharing criteria
1641 		 * get the class with the minimum vt in the hierarchy
1642 		 */
1643 		cl = vttree_get_minvt(&q->root, cur_time);
1644 		if (cl == NULL) {
1645 			qdisc_qstats_overlimit(sch);
1646 			hfsc_schedule_watchdog(sch);
1647 			return NULL;
1648 		}
1649 	}
1650 
1651 	skb = qdisc_dequeue_peeked(cl->qdisc);
1652 	if (skb == NULL) {
1653 		qdisc_warn_nonwc("HFSC", cl->qdisc);
1654 		return NULL;
1655 	}
1656 
1657 	bstats_update(&cl->bstats, skb);
1658 	update_vf(cl, qdisc_pkt_len(skb), cur_time);
1659 	if (realtime)
1660 		cl->cl_cumul += qdisc_pkt_len(skb);
1661 
1662 	if (cl->qdisc->q.qlen != 0) {
1663 		if (cl->cl_flags & HFSC_RSC) {
1664 			/* update ed */
1665 			next_len = qdisc_peek_len(cl->qdisc);
1666 			if (realtime)
1667 				update_ed(cl, next_len);
1668 			else
1669 				update_d(cl, next_len);
1670 		}
1671 	} else {
1672 		/* the class becomes passive */
1673 		set_passive(cl);
1674 	}
1675 
1676 	qdisc_unthrottled(sch);
1677 	qdisc_bstats_update(sch, skb);
1678 	sch->q.qlen--;
1679 
1680 	return skb;
1681 }
1682 
1683 static unsigned int
1684 hfsc_drop(struct Qdisc *sch)
1685 {
1686 	struct hfsc_sched *q = qdisc_priv(sch);
1687 	struct hfsc_class *cl;
1688 	unsigned int len;
1689 
1690 	list_for_each_entry(cl, &q->droplist, dlist) {
1691 		if (cl->qdisc->ops->drop != NULL &&
1692 		    (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1693 			if (cl->qdisc->q.qlen == 0) {
1694 				update_vf(cl, 0, 0);
1695 				set_passive(cl);
1696 			} else {
1697 				list_move_tail(&cl->dlist, &q->droplist);
1698 			}
1699 			cl->qstats.drops++;
1700 			qdisc_qstats_drop(sch);
1701 			sch->q.qlen--;
1702 			return len;
1703 		}
1704 	}
1705 	return 0;
1706 }
1707 
1708 static const struct Qdisc_class_ops hfsc_class_ops = {
1709 	.change		= hfsc_change_class,
1710 	.delete		= hfsc_delete_class,
1711 	.graft		= hfsc_graft_class,
1712 	.leaf		= hfsc_class_leaf,
1713 	.qlen_notify	= hfsc_qlen_notify,
1714 	.get		= hfsc_get_class,
1715 	.put		= hfsc_put_class,
1716 	.bind_tcf	= hfsc_bind_tcf,
1717 	.unbind_tcf	= hfsc_unbind_tcf,
1718 	.tcf_chain	= hfsc_tcf_chain,
1719 	.dump		= hfsc_dump_class,
1720 	.dump_stats	= hfsc_dump_class_stats,
1721 	.walk		= hfsc_walk
1722 };
1723 
1724 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1725 	.id		= "hfsc",
1726 	.init		= hfsc_init_qdisc,
1727 	.change		= hfsc_change_qdisc,
1728 	.reset		= hfsc_reset_qdisc,
1729 	.destroy	= hfsc_destroy_qdisc,
1730 	.dump		= hfsc_dump_qdisc,
1731 	.enqueue	= hfsc_enqueue,
1732 	.dequeue	= hfsc_dequeue,
1733 	.peek		= qdisc_peek_dequeued,
1734 	.drop		= hfsc_drop,
1735 	.cl_ops		= &hfsc_class_ops,
1736 	.priv_size	= sizeof(struct hfsc_sched),
1737 	.owner		= THIS_MODULE
1738 };
1739 
1740 static int __init
1741 hfsc_init(void)
1742 {
1743 	return register_qdisc(&hfsc_qdisc_ops);
1744 }
1745 
1746 static void __exit
1747 hfsc_cleanup(void)
1748 {
1749 	unregister_qdisc(&hfsc_qdisc_ops);
1750 }
1751 
1752 MODULE_LICENSE("GPL");
1753 module_init(hfsc_init);
1754 module_exit(hfsc_cleanup);
1755