xref: /openbmc/linux/net/sched/sch_hfsc.c (revision 5b394b2d)
1 /*
2  * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * 2003-10-17 - Ported from altq
10  */
11 /*
12  * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13  *
14  * Permission to use, copy, modify, and distribute this software and
15  * its documentation is hereby granted (including for commercial or
16  * for-profit use), provided that both the copyright notice and this
17  * permission notice appear in all copies of the software, derivative
18  * works, or modified versions, and any portions thereof.
19  *
20  * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21  * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
22  * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32  * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33  * DAMAGE.
34  *
35  * Carnegie Mellon encourages (but does not require) users of this
36  * software to return any improvements or extensions that they make,
37  * and to grant Carnegie Mellon the rights to redistribute these
38  * changes without encumbrance.
39  */
40 /*
41  * H-FSC is described in Proceedings of SIGCOMM'97,
42  * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43  * Real-Time and Priority Service"
44  * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45  *
46  * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47  * when a class has an upperlimit, the fit-time is computed from the
48  * upperlimit service curve.  the link-sharing scheduler does not schedule
49  * a class whose fit-time exceeds the current time.
50  */
51 
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
70 
71 /*
72  * kernel internal service curve representation:
73  *   coordinates are given by 64 bit unsigned integers.
74  *   x-axis: unit is clock count.
75  *   y-axis: unit is byte.
76  *
77  *   The service curve parameters are converted to the internal
78  *   representation. The slope values are scaled to avoid overflow.
79  *   the inverse slope values as well as the y-projection of the 1st
80  *   segment are kept in order to avoid 64-bit divide operations
81  *   that are expensive on 32-bit architectures.
82  */
83 
84 struct internal_sc {
85 	u64	sm1;	/* scaled slope of the 1st segment */
86 	u64	ism1;	/* scaled inverse-slope of the 1st segment */
87 	u64	dx;	/* the x-projection of the 1st segment */
88 	u64	dy;	/* the y-projection of the 1st segment */
89 	u64	sm2;	/* scaled slope of the 2nd segment */
90 	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
91 };
92 
93 /* runtime service curve */
94 struct runtime_sc {
95 	u64	x;	/* current starting position on x-axis */
96 	u64	y;	/* current starting position on y-axis */
97 	u64	sm1;	/* scaled slope of the 1st segment */
98 	u64	ism1;	/* scaled inverse-slope of the 1st segment */
99 	u64	dx;	/* the x-projection of the 1st segment */
100 	u64	dy;	/* the y-projection of the 1st segment */
101 	u64	sm2;	/* scaled slope of the 2nd segment */
102 	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
103 };
104 
105 enum hfsc_class_flags {
106 	HFSC_RSC = 0x1,
107 	HFSC_FSC = 0x2,
108 	HFSC_USC = 0x4
109 };
110 
111 struct hfsc_class {
112 	struct Qdisc_class_common cl_common;
113 
114 	struct gnet_stats_basic_packed bstats;
115 	struct gnet_stats_queue qstats;
116 	struct net_rate_estimator __rcu *rate_est;
117 	struct tcf_proto __rcu *filter_list; /* filter list */
118 	struct tcf_block *block;
119 	unsigned int	filter_cnt;	/* filter count */
120 	unsigned int	level;		/* class level in hierarchy */
121 
122 	struct hfsc_sched *sched;	/* scheduler data */
123 	struct hfsc_class *cl_parent;	/* parent class */
124 	struct list_head siblings;	/* sibling classes */
125 	struct list_head children;	/* child classes */
126 	struct Qdisc	*qdisc;		/* leaf qdisc */
127 
128 	struct rb_node el_node;		/* qdisc's eligible tree member */
129 	struct rb_root vt_tree;		/* active children sorted by cl_vt */
130 	struct rb_node vt_node;		/* parent's vt_tree member */
131 	struct rb_root cf_tree;		/* active children sorted by cl_f */
132 	struct rb_node cf_node;		/* parent's cf_heap member */
133 
134 	u64	cl_total;		/* total work in bytes */
135 	u64	cl_cumul;		/* cumulative work in bytes done by
136 					   real-time criteria */
137 
138 	u64	cl_d;			/* deadline*/
139 	u64	cl_e;			/* eligible time */
140 	u64	cl_vt;			/* virtual time */
141 	u64	cl_f;			/* time when this class will fit for
142 					   link-sharing, max(myf, cfmin) */
143 	u64	cl_myf;			/* my fit-time (calculated from this
144 					   class's own upperlimit curve) */
145 	u64	cl_cfmin;		/* earliest children's fit-time (used
146 					   with cl_myf to obtain cl_f) */
147 	u64	cl_cvtmin;		/* minimal virtual time among the
148 					   children fit for link-sharing
149 					   (monotonic within a period) */
150 	u64	cl_vtadj;		/* intra-period cumulative vt
151 					   adjustment */
152 	u64	cl_cvtoff;		/* largest virtual time seen among
153 					   the children */
154 
155 	struct internal_sc cl_rsc;	/* internal real-time service curve */
156 	struct internal_sc cl_fsc;	/* internal fair service curve */
157 	struct internal_sc cl_usc;	/* internal upperlimit service curve */
158 	struct runtime_sc cl_deadline;	/* deadline curve */
159 	struct runtime_sc cl_eligible;	/* eligible curve */
160 	struct runtime_sc cl_virtual;	/* virtual curve */
161 	struct runtime_sc cl_ulimit;	/* upperlimit curve */
162 
163 	u8		cl_flags;	/* which curves are valid */
164 	u32		cl_vtperiod;	/* vt period sequence number */
165 	u32		cl_parentperiod;/* parent's vt period sequence number*/
166 	u32		cl_nactive;	/* number of active children */
167 };
168 
169 struct hfsc_sched {
170 	u16	defcls;				/* default class id */
171 	struct hfsc_class root;			/* root class */
172 	struct Qdisc_class_hash clhash;		/* class hash */
173 	struct rb_root eligible;		/* eligible tree */
174 	struct qdisc_watchdog watchdog;		/* watchdog timer */
175 };
176 
177 #define	HT_INFINITY	0xffffffffffffffffULL	/* infinite time value */
178 
179 
180 /*
181  * eligible tree holds backlogged classes being sorted by their eligible times.
182  * there is one eligible tree per hfsc instance.
183  */
184 
185 static void
186 eltree_insert(struct hfsc_class *cl)
187 {
188 	struct rb_node **p = &cl->sched->eligible.rb_node;
189 	struct rb_node *parent = NULL;
190 	struct hfsc_class *cl1;
191 
192 	while (*p != NULL) {
193 		parent = *p;
194 		cl1 = rb_entry(parent, struct hfsc_class, el_node);
195 		if (cl->cl_e >= cl1->cl_e)
196 			p = &parent->rb_right;
197 		else
198 			p = &parent->rb_left;
199 	}
200 	rb_link_node(&cl->el_node, parent, p);
201 	rb_insert_color(&cl->el_node, &cl->sched->eligible);
202 }
203 
204 static inline void
205 eltree_remove(struct hfsc_class *cl)
206 {
207 	rb_erase(&cl->el_node, &cl->sched->eligible);
208 }
209 
210 static inline void
211 eltree_update(struct hfsc_class *cl)
212 {
213 	eltree_remove(cl);
214 	eltree_insert(cl);
215 }
216 
217 /* find the class with the minimum deadline among the eligible classes */
218 static inline struct hfsc_class *
219 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
220 {
221 	struct hfsc_class *p, *cl = NULL;
222 	struct rb_node *n;
223 
224 	for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
225 		p = rb_entry(n, struct hfsc_class, el_node);
226 		if (p->cl_e > cur_time)
227 			break;
228 		if (cl == NULL || p->cl_d < cl->cl_d)
229 			cl = p;
230 	}
231 	return cl;
232 }
233 
234 /* find the class with minimum eligible time among the eligible classes */
235 static inline struct hfsc_class *
236 eltree_get_minel(struct hfsc_sched *q)
237 {
238 	struct rb_node *n;
239 
240 	n = rb_first(&q->eligible);
241 	if (n == NULL)
242 		return NULL;
243 	return rb_entry(n, struct hfsc_class, el_node);
244 }
245 
246 /*
247  * vttree holds holds backlogged child classes being sorted by their virtual
248  * time. each intermediate class has one vttree.
249  */
250 static void
251 vttree_insert(struct hfsc_class *cl)
252 {
253 	struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
254 	struct rb_node *parent = NULL;
255 	struct hfsc_class *cl1;
256 
257 	while (*p != NULL) {
258 		parent = *p;
259 		cl1 = rb_entry(parent, struct hfsc_class, vt_node);
260 		if (cl->cl_vt >= cl1->cl_vt)
261 			p = &parent->rb_right;
262 		else
263 			p = &parent->rb_left;
264 	}
265 	rb_link_node(&cl->vt_node, parent, p);
266 	rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
267 }
268 
269 static inline void
270 vttree_remove(struct hfsc_class *cl)
271 {
272 	rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
273 }
274 
275 static inline void
276 vttree_update(struct hfsc_class *cl)
277 {
278 	vttree_remove(cl);
279 	vttree_insert(cl);
280 }
281 
282 static inline struct hfsc_class *
283 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
284 {
285 	struct hfsc_class *p;
286 	struct rb_node *n;
287 
288 	for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
289 		p = rb_entry(n, struct hfsc_class, vt_node);
290 		if (p->cl_f <= cur_time)
291 			return p;
292 	}
293 	return NULL;
294 }
295 
296 /*
297  * get the leaf class with the minimum vt in the hierarchy
298  */
299 static struct hfsc_class *
300 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
301 {
302 	/* if root-class's cfmin is bigger than cur_time nothing to do */
303 	if (cl->cl_cfmin > cur_time)
304 		return NULL;
305 
306 	while (cl->level > 0) {
307 		cl = vttree_firstfit(cl, cur_time);
308 		if (cl == NULL)
309 			return NULL;
310 		/*
311 		 * update parent's cl_cvtmin.
312 		 */
313 		if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
314 			cl->cl_parent->cl_cvtmin = cl->cl_vt;
315 	}
316 	return cl;
317 }
318 
319 static void
320 cftree_insert(struct hfsc_class *cl)
321 {
322 	struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
323 	struct rb_node *parent = NULL;
324 	struct hfsc_class *cl1;
325 
326 	while (*p != NULL) {
327 		parent = *p;
328 		cl1 = rb_entry(parent, struct hfsc_class, cf_node);
329 		if (cl->cl_f >= cl1->cl_f)
330 			p = &parent->rb_right;
331 		else
332 			p = &parent->rb_left;
333 	}
334 	rb_link_node(&cl->cf_node, parent, p);
335 	rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
336 }
337 
338 static inline void
339 cftree_remove(struct hfsc_class *cl)
340 {
341 	rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
342 }
343 
344 static inline void
345 cftree_update(struct hfsc_class *cl)
346 {
347 	cftree_remove(cl);
348 	cftree_insert(cl);
349 }
350 
351 /*
352  * service curve support functions
353  *
354  *  external service curve parameters
355  *	m: bps
356  *	d: us
357  *  internal service curve parameters
358  *	sm: (bytes/psched_us) << SM_SHIFT
359  *	ism: (psched_us/byte) << ISM_SHIFT
360  *	dx: psched_us
361  *
362  * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
363  *
364  * sm and ism are scaled in order to keep effective digits.
365  * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
366  * digits in decimal using the following table.
367  *
368  *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
369  *  ------------+-------------------------------------------------------
370  *  bytes/1.024us 12.8e-3    128e-3     1280e-3    12800e-3   128000e-3
371  *
372  *  1.024us/byte  78.125     7.8125     0.78125    0.078125   0.0078125
373  *
374  * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
375  */
376 #define	SM_SHIFT	(30 - PSCHED_SHIFT)
377 #define	ISM_SHIFT	(8 + PSCHED_SHIFT)
378 
379 #define	SM_MASK		((1ULL << SM_SHIFT) - 1)
380 #define	ISM_MASK	((1ULL << ISM_SHIFT) - 1)
381 
382 static inline u64
383 seg_x2y(u64 x, u64 sm)
384 {
385 	u64 y;
386 
387 	/*
388 	 * compute
389 	 *	y = x * sm >> SM_SHIFT
390 	 * but divide it for the upper and lower bits to avoid overflow
391 	 */
392 	y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
393 	return y;
394 }
395 
396 static inline u64
397 seg_y2x(u64 y, u64 ism)
398 {
399 	u64 x;
400 
401 	if (y == 0)
402 		x = 0;
403 	else if (ism == HT_INFINITY)
404 		x = HT_INFINITY;
405 	else {
406 		x = (y >> ISM_SHIFT) * ism
407 		    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
408 	}
409 	return x;
410 }
411 
412 /* Convert m (bps) into sm (bytes/psched us) */
413 static u64
414 m2sm(u32 m)
415 {
416 	u64 sm;
417 
418 	sm = ((u64)m << SM_SHIFT);
419 	sm += PSCHED_TICKS_PER_SEC - 1;
420 	do_div(sm, PSCHED_TICKS_PER_SEC);
421 	return sm;
422 }
423 
424 /* convert m (bps) into ism (psched us/byte) */
425 static u64
426 m2ism(u32 m)
427 {
428 	u64 ism;
429 
430 	if (m == 0)
431 		ism = HT_INFINITY;
432 	else {
433 		ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
434 		ism += m - 1;
435 		do_div(ism, m);
436 	}
437 	return ism;
438 }
439 
440 /* convert d (us) into dx (psched us) */
441 static u64
442 d2dx(u32 d)
443 {
444 	u64 dx;
445 
446 	dx = ((u64)d * PSCHED_TICKS_PER_SEC);
447 	dx += USEC_PER_SEC - 1;
448 	do_div(dx, USEC_PER_SEC);
449 	return dx;
450 }
451 
452 /* convert sm (bytes/psched us) into m (bps) */
453 static u32
454 sm2m(u64 sm)
455 {
456 	u64 m;
457 
458 	m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
459 	return (u32)m;
460 }
461 
462 /* convert dx (psched us) into d (us) */
463 static u32
464 dx2d(u64 dx)
465 {
466 	u64 d;
467 
468 	d = dx * USEC_PER_SEC;
469 	do_div(d, PSCHED_TICKS_PER_SEC);
470 	return (u32)d;
471 }
472 
473 static void
474 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
475 {
476 	isc->sm1  = m2sm(sc->m1);
477 	isc->ism1 = m2ism(sc->m1);
478 	isc->dx   = d2dx(sc->d);
479 	isc->dy   = seg_x2y(isc->dx, isc->sm1);
480 	isc->sm2  = m2sm(sc->m2);
481 	isc->ism2 = m2ism(sc->m2);
482 }
483 
484 /*
485  * initialize the runtime service curve with the given internal
486  * service curve starting at (x, y).
487  */
488 static void
489 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
490 {
491 	rtsc->x	   = x;
492 	rtsc->y    = y;
493 	rtsc->sm1  = isc->sm1;
494 	rtsc->ism1 = isc->ism1;
495 	rtsc->dx   = isc->dx;
496 	rtsc->dy   = isc->dy;
497 	rtsc->sm2  = isc->sm2;
498 	rtsc->ism2 = isc->ism2;
499 }
500 
501 /*
502  * calculate the y-projection of the runtime service curve by the
503  * given x-projection value
504  */
505 static u64
506 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
507 {
508 	u64 x;
509 
510 	if (y < rtsc->y)
511 		x = rtsc->x;
512 	else if (y <= rtsc->y + rtsc->dy) {
513 		/* x belongs to the 1st segment */
514 		if (rtsc->dy == 0)
515 			x = rtsc->x + rtsc->dx;
516 		else
517 			x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
518 	} else {
519 		/* x belongs to the 2nd segment */
520 		x = rtsc->x + rtsc->dx
521 		    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
522 	}
523 	return x;
524 }
525 
526 static u64
527 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
528 {
529 	u64 y;
530 
531 	if (x <= rtsc->x)
532 		y = rtsc->y;
533 	else if (x <= rtsc->x + rtsc->dx)
534 		/* y belongs to the 1st segment */
535 		y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
536 	else
537 		/* y belongs to the 2nd segment */
538 		y = rtsc->y + rtsc->dy
539 		    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
540 	return y;
541 }
542 
543 /*
544  * update the runtime service curve by taking the minimum of the current
545  * runtime service curve and the service curve starting at (x, y).
546  */
547 static void
548 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
549 {
550 	u64 y1, y2, dx, dy;
551 	u32 dsm;
552 
553 	if (isc->sm1 <= isc->sm2) {
554 		/* service curve is convex */
555 		y1 = rtsc_x2y(rtsc, x);
556 		if (y1 < y)
557 			/* the current rtsc is smaller */
558 			return;
559 		rtsc->x = x;
560 		rtsc->y = y;
561 		return;
562 	}
563 
564 	/*
565 	 * service curve is concave
566 	 * compute the two y values of the current rtsc
567 	 *	y1: at x
568 	 *	y2: at (x + dx)
569 	 */
570 	y1 = rtsc_x2y(rtsc, x);
571 	if (y1 <= y) {
572 		/* rtsc is below isc, no change to rtsc */
573 		return;
574 	}
575 
576 	y2 = rtsc_x2y(rtsc, x + isc->dx);
577 	if (y2 >= y + isc->dy) {
578 		/* rtsc is above isc, replace rtsc by isc */
579 		rtsc->x = x;
580 		rtsc->y = y;
581 		rtsc->dx = isc->dx;
582 		rtsc->dy = isc->dy;
583 		return;
584 	}
585 
586 	/*
587 	 * the two curves intersect
588 	 * compute the offsets (dx, dy) using the reverse
589 	 * function of seg_x2y()
590 	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
591 	 */
592 	dx = (y1 - y) << SM_SHIFT;
593 	dsm = isc->sm1 - isc->sm2;
594 	do_div(dx, dsm);
595 	/*
596 	 * check if (x, y1) belongs to the 1st segment of rtsc.
597 	 * if so, add the offset.
598 	 */
599 	if (rtsc->x + rtsc->dx > x)
600 		dx += rtsc->x + rtsc->dx - x;
601 	dy = seg_x2y(dx, isc->sm1);
602 
603 	rtsc->x = x;
604 	rtsc->y = y;
605 	rtsc->dx = dx;
606 	rtsc->dy = dy;
607 }
608 
609 static void
610 init_ed(struct hfsc_class *cl, unsigned int next_len)
611 {
612 	u64 cur_time = psched_get_time();
613 
614 	/* update the deadline curve */
615 	rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
616 
617 	/*
618 	 * update the eligible curve.
619 	 * for concave, it is equal to the deadline curve.
620 	 * for convex, it is a linear curve with slope m2.
621 	 */
622 	cl->cl_eligible = cl->cl_deadline;
623 	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
624 		cl->cl_eligible.dx = 0;
625 		cl->cl_eligible.dy = 0;
626 	}
627 
628 	/* compute e and d */
629 	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
630 	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
631 
632 	eltree_insert(cl);
633 }
634 
635 static void
636 update_ed(struct hfsc_class *cl, unsigned int next_len)
637 {
638 	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
639 	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
640 
641 	eltree_update(cl);
642 }
643 
644 static inline void
645 update_d(struct hfsc_class *cl, unsigned int next_len)
646 {
647 	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
648 }
649 
650 static inline void
651 update_cfmin(struct hfsc_class *cl)
652 {
653 	struct rb_node *n = rb_first(&cl->cf_tree);
654 	struct hfsc_class *p;
655 
656 	if (n == NULL) {
657 		cl->cl_cfmin = 0;
658 		return;
659 	}
660 	p = rb_entry(n, struct hfsc_class, cf_node);
661 	cl->cl_cfmin = p->cl_f;
662 }
663 
664 static void
665 init_vf(struct hfsc_class *cl, unsigned int len)
666 {
667 	struct hfsc_class *max_cl;
668 	struct rb_node *n;
669 	u64 vt, f, cur_time;
670 	int go_active;
671 
672 	cur_time = 0;
673 	go_active = 1;
674 	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
675 		if (go_active && cl->cl_nactive++ == 0)
676 			go_active = 1;
677 		else
678 			go_active = 0;
679 
680 		if (go_active) {
681 			n = rb_last(&cl->cl_parent->vt_tree);
682 			if (n != NULL) {
683 				max_cl = rb_entry(n, struct hfsc_class, vt_node);
684 				/*
685 				 * set vt to the average of the min and max
686 				 * classes.  if the parent's period didn't
687 				 * change, don't decrease vt of the class.
688 				 */
689 				vt = max_cl->cl_vt;
690 				if (cl->cl_parent->cl_cvtmin != 0)
691 					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
692 
693 				if (cl->cl_parent->cl_vtperiod !=
694 				    cl->cl_parentperiod || vt > cl->cl_vt)
695 					cl->cl_vt = vt;
696 			} else {
697 				/*
698 				 * first child for a new parent backlog period.
699 				 * initialize cl_vt to the highest value seen
700 				 * among the siblings. this is analogous to
701 				 * what cur_time would provide in realtime case.
702 				 */
703 				cl->cl_vt = cl->cl_parent->cl_cvtoff;
704 				cl->cl_parent->cl_cvtmin = 0;
705 			}
706 
707 			/* update the virtual curve */
708 			rtsc_min(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
709 			cl->cl_vtadj = 0;
710 
711 			cl->cl_vtperiod++;  /* increment vt period */
712 			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
713 			if (cl->cl_parent->cl_nactive == 0)
714 				cl->cl_parentperiod++;
715 			cl->cl_f = 0;
716 
717 			vttree_insert(cl);
718 			cftree_insert(cl);
719 
720 			if (cl->cl_flags & HFSC_USC) {
721 				/* class has upper limit curve */
722 				if (cur_time == 0)
723 					cur_time = psched_get_time();
724 
725 				/* update the ulimit curve */
726 				rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
727 					 cl->cl_total);
728 				/* compute myf */
729 				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
730 						      cl->cl_total);
731 			}
732 		}
733 
734 		f = max(cl->cl_myf, cl->cl_cfmin);
735 		if (f != cl->cl_f) {
736 			cl->cl_f = f;
737 			cftree_update(cl);
738 		}
739 		update_cfmin(cl->cl_parent);
740 	}
741 }
742 
743 static void
744 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
745 {
746 	u64 f; /* , myf_bound, delta; */
747 	int go_passive = 0;
748 
749 	if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
750 		go_passive = 1;
751 
752 	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
753 		cl->cl_total += len;
754 
755 		if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
756 			continue;
757 
758 		if (go_passive && --cl->cl_nactive == 0)
759 			go_passive = 1;
760 		else
761 			go_passive = 0;
762 
763 		/* update vt */
764 		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) + cl->cl_vtadj;
765 
766 		/*
767 		 * if vt of the class is smaller than cvtmin,
768 		 * the class was skipped in the past due to non-fit.
769 		 * if so, we need to adjust vtadj.
770 		 */
771 		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
772 			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
773 			cl->cl_vt = cl->cl_parent->cl_cvtmin;
774 		}
775 
776 		if (go_passive) {
777 			/* no more active child, going passive */
778 
779 			/* update cvtoff of the parent class */
780 			if (cl->cl_vt > cl->cl_parent->cl_cvtoff)
781 				cl->cl_parent->cl_cvtoff = cl->cl_vt;
782 
783 			/* remove this class from the vt tree */
784 			vttree_remove(cl);
785 
786 			cftree_remove(cl);
787 			update_cfmin(cl->cl_parent);
788 
789 			continue;
790 		}
791 
792 		/* update the vt tree */
793 		vttree_update(cl);
794 
795 		/* update f */
796 		if (cl->cl_flags & HFSC_USC) {
797 			cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
798 #if 0
799 			cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
800 							      cl->cl_total);
801 			/*
802 			 * This code causes classes to stay way under their
803 			 * limit when multiple classes are used at gigabit
804 			 * speed. needs investigation. -kaber
805 			 */
806 			/*
807 			 * if myf lags behind by more than one clock tick
808 			 * from the current time, adjust myfadj to prevent
809 			 * a rate-limited class from going greedy.
810 			 * in a steady state under rate-limiting, myf
811 			 * fluctuates within one clock tick.
812 			 */
813 			myf_bound = cur_time - PSCHED_JIFFIE2US(1);
814 			if (cl->cl_myf < myf_bound) {
815 				delta = cur_time - cl->cl_myf;
816 				cl->cl_myfadj += delta;
817 				cl->cl_myf += delta;
818 			}
819 #endif
820 		}
821 
822 		f = max(cl->cl_myf, cl->cl_cfmin);
823 		if (f != cl->cl_f) {
824 			cl->cl_f = f;
825 			cftree_update(cl);
826 			update_cfmin(cl->cl_parent);
827 		}
828 	}
829 }
830 
831 static unsigned int
832 qdisc_peek_len(struct Qdisc *sch)
833 {
834 	struct sk_buff *skb;
835 	unsigned int len;
836 
837 	skb = sch->ops->peek(sch);
838 	if (unlikely(skb == NULL)) {
839 		qdisc_warn_nonwc("qdisc_peek_len", sch);
840 		return 0;
841 	}
842 	len = qdisc_pkt_len(skb);
843 
844 	return len;
845 }
846 
847 static void
848 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
849 {
850 	unsigned int len = cl->qdisc->q.qlen;
851 	unsigned int backlog = cl->qdisc->qstats.backlog;
852 
853 	qdisc_reset(cl->qdisc);
854 	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
855 }
856 
857 static void
858 hfsc_adjust_levels(struct hfsc_class *cl)
859 {
860 	struct hfsc_class *p;
861 	unsigned int level;
862 
863 	do {
864 		level = 0;
865 		list_for_each_entry(p, &cl->children, siblings) {
866 			if (p->level >= level)
867 				level = p->level + 1;
868 		}
869 		cl->level = level;
870 	} while ((cl = cl->cl_parent) != NULL);
871 }
872 
873 static inline struct hfsc_class *
874 hfsc_find_class(u32 classid, struct Qdisc *sch)
875 {
876 	struct hfsc_sched *q = qdisc_priv(sch);
877 	struct Qdisc_class_common *clc;
878 
879 	clc = qdisc_class_find(&q->clhash, classid);
880 	if (clc == NULL)
881 		return NULL;
882 	return container_of(clc, struct hfsc_class, cl_common);
883 }
884 
885 static void
886 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
887 		u64 cur_time)
888 {
889 	sc2isc(rsc, &cl->cl_rsc);
890 	rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
891 	cl->cl_eligible = cl->cl_deadline;
892 	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
893 		cl->cl_eligible.dx = 0;
894 		cl->cl_eligible.dy = 0;
895 	}
896 	cl->cl_flags |= HFSC_RSC;
897 }
898 
899 static void
900 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
901 {
902 	sc2isc(fsc, &cl->cl_fsc);
903 	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
904 	cl->cl_flags |= HFSC_FSC;
905 }
906 
907 static void
908 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
909 		u64 cur_time)
910 {
911 	sc2isc(usc, &cl->cl_usc);
912 	rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
913 	cl->cl_flags |= HFSC_USC;
914 }
915 
916 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
917 	[TCA_HFSC_RSC]	= { .len = sizeof(struct tc_service_curve) },
918 	[TCA_HFSC_FSC]	= { .len = sizeof(struct tc_service_curve) },
919 	[TCA_HFSC_USC]	= { .len = sizeof(struct tc_service_curve) },
920 };
921 
922 static int
923 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
924 		  struct nlattr **tca, unsigned long *arg,
925 		  struct netlink_ext_ack *extack)
926 {
927 	struct hfsc_sched *q = qdisc_priv(sch);
928 	struct hfsc_class *cl = (struct hfsc_class *)*arg;
929 	struct hfsc_class *parent = NULL;
930 	struct nlattr *opt = tca[TCA_OPTIONS];
931 	struct nlattr *tb[TCA_HFSC_MAX + 1];
932 	struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
933 	u64 cur_time;
934 	int err;
935 
936 	if (opt == NULL)
937 		return -EINVAL;
938 
939 	err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy, NULL);
940 	if (err < 0)
941 		return err;
942 
943 	if (tb[TCA_HFSC_RSC]) {
944 		rsc = nla_data(tb[TCA_HFSC_RSC]);
945 		if (rsc->m1 == 0 && rsc->m2 == 0)
946 			rsc = NULL;
947 	}
948 
949 	if (tb[TCA_HFSC_FSC]) {
950 		fsc = nla_data(tb[TCA_HFSC_FSC]);
951 		if (fsc->m1 == 0 && fsc->m2 == 0)
952 			fsc = NULL;
953 	}
954 
955 	if (tb[TCA_HFSC_USC]) {
956 		usc = nla_data(tb[TCA_HFSC_USC]);
957 		if (usc->m1 == 0 && usc->m2 == 0)
958 			usc = NULL;
959 	}
960 
961 	if (cl != NULL) {
962 		int old_flags;
963 
964 		if (parentid) {
965 			if (cl->cl_parent &&
966 			    cl->cl_parent->cl_common.classid != parentid)
967 				return -EINVAL;
968 			if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
969 				return -EINVAL;
970 		}
971 		cur_time = psched_get_time();
972 
973 		if (tca[TCA_RATE]) {
974 			err = gen_replace_estimator(&cl->bstats, NULL,
975 						    &cl->rate_est,
976 						    NULL,
977 						    qdisc_root_sleeping_running(sch),
978 						    tca[TCA_RATE]);
979 			if (err)
980 				return err;
981 		}
982 
983 		sch_tree_lock(sch);
984 		old_flags = cl->cl_flags;
985 
986 		if (rsc != NULL)
987 			hfsc_change_rsc(cl, rsc, cur_time);
988 		if (fsc != NULL)
989 			hfsc_change_fsc(cl, fsc);
990 		if (usc != NULL)
991 			hfsc_change_usc(cl, usc, cur_time);
992 
993 		if (cl->qdisc->q.qlen != 0) {
994 			int len = qdisc_peek_len(cl->qdisc);
995 
996 			if (cl->cl_flags & HFSC_RSC) {
997 				if (old_flags & HFSC_RSC)
998 					update_ed(cl, len);
999 				else
1000 					init_ed(cl, len);
1001 			}
1002 
1003 			if (cl->cl_flags & HFSC_FSC) {
1004 				if (old_flags & HFSC_FSC)
1005 					update_vf(cl, 0, cur_time);
1006 				else
1007 					init_vf(cl, len);
1008 			}
1009 		}
1010 		sch_tree_unlock(sch);
1011 
1012 		return 0;
1013 	}
1014 
1015 	if (parentid == TC_H_ROOT)
1016 		return -EEXIST;
1017 
1018 	parent = &q->root;
1019 	if (parentid) {
1020 		parent = hfsc_find_class(parentid, sch);
1021 		if (parent == NULL)
1022 			return -ENOENT;
1023 	}
1024 
1025 	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1026 		return -EINVAL;
1027 	if (hfsc_find_class(classid, sch))
1028 		return -EEXIST;
1029 
1030 	if (rsc == NULL && fsc == NULL)
1031 		return -EINVAL;
1032 
1033 	cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1034 	if (cl == NULL)
1035 		return -ENOBUFS;
1036 
1037 	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1038 	if (err) {
1039 		kfree(cl);
1040 		return err;
1041 	}
1042 
1043 	if (tca[TCA_RATE]) {
1044 		err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1045 					NULL,
1046 					qdisc_root_sleeping_running(sch),
1047 					tca[TCA_RATE]);
1048 		if (err) {
1049 			tcf_block_put(cl->block);
1050 			kfree(cl);
1051 			return err;
1052 		}
1053 	}
1054 
1055 	if (rsc != NULL)
1056 		hfsc_change_rsc(cl, rsc, 0);
1057 	if (fsc != NULL)
1058 		hfsc_change_fsc(cl, fsc);
1059 	if (usc != NULL)
1060 		hfsc_change_usc(cl, usc, 0);
1061 
1062 	cl->cl_common.classid = classid;
1063 	cl->sched     = q;
1064 	cl->cl_parent = parent;
1065 	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1066 				      classid, NULL);
1067 	if (cl->qdisc == NULL)
1068 		cl->qdisc = &noop_qdisc;
1069 	else
1070 		qdisc_hash_add(cl->qdisc, true);
1071 	INIT_LIST_HEAD(&cl->children);
1072 	cl->vt_tree = RB_ROOT;
1073 	cl->cf_tree = RB_ROOT;
1074 
1075 	sch_tree_lock(sch);
1076 	qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1077 	list_add_tail(&cl->siblings, &parent->children);
1078 	if (parent->level == 0)
1079 		hfsc_purge_queue(sch, parent);
1080 	hfsc_adjust_levels(parent);
1081 	sch_tree_unlock(sch);
1082 
1083 	qdisc_class_hash_grow(sch, &q->clhash);
1084 
1085 	*arg = (unsigned long)cl;
1086 	return 0;
1087 }
1088 
1089 static void
1090 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1091 {
1092 	struct hfsc_sched *q = qdisc_priv(sch);
1093 
1094 	tcf_block_put(cl->block);
1095 	qdisc_destroy(cl->qdisc);
1096 	gen_kill_estimator(&cl->rate_est);
1097 	if (cl != &q->root)
1098 		kfree(cl);
1099 }
1100 
1101 static int
1102 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1103 {
1104 	struct hfsc_sched *q = qdisc_priv(sch);
1105 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1106 
1107 	if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1108 		return -EBUSY;
1109 
1110 	sch_tree_lock(sch);
1111 
1112 	list_del(&cl->siblings);
1113 	hfsc_adjust_levels(cl->cl_parent);
1114 
1115 	hfsc_purge_queue(sch, cl);
1116 	qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1117 
1118 	sch_tree_unlock(sch);
1119 
1120 	hfsc_destroy_class(sch, cl);
1121 	return 0;
1122 }
1123 
1124 static struct hfsc_class *
1125 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1126 {
1127 	struct hfsc_sched *q = qdisc_priv(sch);
1128 	struct hfsc_class *head, *cl;
1129 	struct tcf_result res;
1130 	struct tcf_proto *tcf;
1131 	int result;
1132 
1133 	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1134 	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1135 		if (cl->level == 0)
1136 			return cl;
1137 
1138 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1139 	head = &q->root;
1140 	tcf = rcu_dereference_bh(q->root.filter_list);
1141 	while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
1142 #ifdef CONFIG_NET_CLS_ACT
1143 		switch (result) {
1144 		case TC_ACT_QUEUED:
1145 		case TC_ACT_STOLEN:
1146 		case TC_ACT_TRAP:
1147 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1148 			/* fall through */
1149 		case TC_ACT_SHOT:
1150 			return NULL;
1151 		}
1152 #endif
1153 		cl = (struct hfsc_class *)res.class;
1154 		if (!cl) {
1155 			cl = hfsc_find_class(res.classid, sch);
1156 			if (!cl)
1157 				break; /* filter selected invalid classid */
1158 			if (cl->level >= head->level)
1159 				break; /* filter may only point downwards */
1160 		}
1161 
1162 		if (cl->level == 0)
1163 			return cl; /* hit leaf class */
1164 
1165 		/* apply inner filter chain */
1166 		tcf = rcu_dereference_bh(cl->filter_list);
1167 		head = cl;
1168 	}
1169 
1170 	/* classification failed, try default class */
1171 	cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1172 	if (cl == NULL || cl->level > 0)
1173 		return NULL;
1174 
1175 	return cl;
1176 }
1177 
1178 static int
1179 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1180 		 struct Qdisc **old, struct netlink_ext_ack *extack)
1181 {
1182 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1183 
1184 	if (cl->level > 0)
1185 		return -EINVAL;
1186 	if (new == NULL) {
1187 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1188 					cl->cl_common.classid, NULL);
1189 		if (new == NULL)
1190 			new = &noop_qdisc;
1191 	}
1192 
1193 	*old = qdisc_replace(sch, new, &cl->qdisc);
1194 	return 0;
1195 }
1196 
1197 static struct Qdisc *
1198 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1199 {
1200 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1201 
1202 	if (cl->level == 0)
1203 		return cl->qdisc;
1204 
1205 	return NULL;
1206 }
1207 
1208 static void
1209 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1210 {
1211 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1212 
1213 	/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
1214 	 * needs to be called explicitly to remove a class from vttree.
1215 	 */
1216 	update_vf(cl, 0, 0);
1217 	if (cl->cl_flags & HFSC_RSC)
1218 		eltree_remove(cl);
1219 }
1220 
1221 static unsigned long
1222 hfsc_search_class(struct Qdisc *sch, u32 classid)
1223 {
1224 	return (unsigned long)hfsc_find_class(classid, sch);
1225 }
1226 
1227 static unsigned long
1228 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1229 {
1230 	struct hfsc_class *p = (struct hfsc_class *)parent;
1231 	struct hfsc_class *cl = hfsc_find_class(classid, sch);
1232 
1233 	if (cl != NULL) {
1234 		if (p != NULL && p->level <= cl->level)
1235 			return 0;
1236 		cl->filter_cnt++;
1237 	}
1238 
1239 	return (unsigned long)cl;
1240 }
1241 
1242 static void
1243 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1244 {
1245 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1246 
1247 	cl->filter_cnt--;
1248 }
1249 
1250 static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
1251 					struct netlink_ext_ack *extack)
1252 {
1253 	struct hfsc_sched *q = qdisc_priv(sch);
1254 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1255 
1256 	if (cl == NULL)
1257 		cl = &q->root;
1258 
1259 	return cl->block;
1260 }
1261 
1262 static int
1263 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1264 {
1265 	struct tc_service_curve tsc;
1266 
1267 	tsc.m1 = sm2m(sc->sm1);
1268 	tsc.d  = dx2d(sc->dx);
1269 	tsc.m2 = sm2m(sc->sm2);
1270 	if (nla_put(skb, attr, sizeof(tsc), &tsc))
1271 		goto nla_put_failure;
1272 
1273 	return skb->len;
1274 
1275  nla_put_failure:
1276 	return -1;
1277 }
1278 
1279 static int
1280 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1281 {
1282 	if ((cl->cl_flags & HFSC_RSC) &&
1283 	    (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1284 		goto nla_put_failure;
1285 
1286 	if ((cl->cl_flags & HFSC_FSC) &&
1287 	    (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1288 		goto nla_put_failure;
1289 
1290 	if ((cl->cl_flags & HFSC_USC) &&
1291 	    (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1292 		goto nla_put_failure;
1293 
1294 	return skb->len;
1295 
1296  nla_put_failure:
1297 	return -1;
1298 }
1299 
1300 static int
1301 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1302 		struct tcmsg *tcm)
1303 {
1304 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1305 	struct nlattr *nest;
1306 
1307 	tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1308 					  TC_H_ROOT;
1309 	tcm->tcm_handle = cl->cl_common.classid;
1310 	if (cl->level == 0)
1311 		tcm->tcm_info = cl->qdisc->handle;
1312 
1313 	nest = nla_nest_start(skb, TCA_OPTIONS);
1314 	if (nest == NULL)
1315 		goto nla_put_failure;
1316 	if (hfsc_dump_curves(skb, cl) < 0)
1317 		goto nla_put_failure;
1318 	return nla_nest_end(skb, nest);
1319 
1320  nla_put_failure:
1321 	nla_nest_cancel(skb, nest);
1322 	return -EMSGSIZE;
1323 }
1324 
1325 static int
1326 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1327 	struct gnet_dump *d)
1328 {
1329 	struct hfsc_class *cl = (struct hfsc_class *)arg;
1330 	struct tc_hfsc_stats xstats;
1331 
1332 	cl->qstats.backlog = cl->qdisc->qstats.backlog;
1333 	xstats.level   = cl->level;
1334 	xstats.period  = cl->cl_vtperiod;
1335 	xstats.work    = cl->cl_total;
1336 	xstats.rtwork  = cl->cl_cumul;
1337 
1338 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
1339 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1340 	    gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
1341 		return -1;
1342 
1343 	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1344 }
1345 
1346 
1347 
1348 static void
1349 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1350 {
1351 	struct hfsc_sched *q = qdisc_priv(sch);
1352 	struct hfsc_class *cl;
1353 	unsigned int i;
1354 
1355 	if (arg->stop)
1356 		return;
1357 
1358 	for (i = 0; i < q->clhash.hashsize; i++) {
1359 		hlist_for_each_entry(cl, &q->clhash.hash[i],
1360 				     cl_common.hnode) {
1361 			if (arg->count < arg->skip) {
1362 				arg->count++;
1363 				continue;
1364 			}
1365 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1366 				arg->stop = 1;
1367 				return;
1368 			}
1369 			arg->count++;
1370 		}
1371 	}
1372 }
1373 
1374 static void
1375 hfsc_schedule_watchdog(struct Qdisc *sch)
1376 {
1377 	struct hfsc_sched *q = qdisc_priv(sch);
1378 	struct hfsc_class *cl;
1379 	u64 next_time = 0;
1380 
1381 	cl = eltree_get_minel(q);
1382 	if (cl)
1383 		next_time = cl->cl_e;
1384 	if (q->root.cl_cfmin != 0) {
1385 		if (next_time == 0 || next_time > q->root.cl_cfmin)
1386 			next_time = q->root.cl_cfmin;
1387 	}
1388 	if (next_time)
1389 		qdisc_watchdog_schedule(&q->watchdog, next_time);
1390 }
1391 
1392 static int
1393 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
1394 		struct netlink_ext_ack *extack)
1395 {
1396 	struct hfsc_sched *q = qdisc_priv(sch);
1397 	struct tc_hfsc_qopt *qopt;
1398 	int err;
1399 
1400 	qdisc_watchdog_init(&q->watchdog, sch);
1401 
1402 	if (!opt || nla_len(opt) < sizeof(*qopt))
1403 		return -EINVAL;
1404 	qopt = nla_data(opt);
1405 
1406 	q->defcls = qopt->defcls;
1407 	err = qdisc_class_hash_init(&q->clhash);
1408 	if (err < 0)
1409 		return err;
1410 	q->eligible = RB_ROOT;
1411 
1412 	err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
1413 	if (err)
1414 		return err;
1415 
1416 	q->root.cl_common.classid = sch->handle;
1417 	q->root.sched   = q;
1418 	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1419 					  sch->handle, NULL);
1420 	if (q->root.qdisc == NULL)
1421 		q->root.qdisc = &noop_qdisc;
1422 	else
1423 		qdisc_hash_add(q->root.qdisc, true);
1424 	INIT_LIST_HEAD(&q->root.children);
1425 	q->root.vt_tree = RB_ROOT;
1426 	q->root.cf_tree = RB_ROOT;
1427 
1428 	qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1429 	qdisc_class_hash_grow(sch, &q->clhash);
1430 
1431 	return 0;
1432 }
1433 
1434 static int
1435 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
1436 		  struct netlink_ext_ack *extack)
1437 {
1438 	struct hfsc_sched *q = qdisc_priv(sch);
1439 	struct tc_hfsc_qopt *qopt;
1440 
1441 	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1442 		return -EINVAL;
1443 	qopt = nla_data(opt);
1444 
1445 	sch_tree_lock(sch);
1446 	q->defcls = qopt->defcls;
1447 	sch_tree_unlock(sch);
1448 
1449 	return 0;
1450 }
1451 
1452 static void
1453 hfsc_reset_class(struct hfsc_class *cl)
1454 {
1455 	cl->cl_total        = 0;
1456 	cl->cl_cumul        = 0;
1457 	cl->cl_d            = 0;
1458 	cl->cl_e            = 0;
1459 	cl->cl_vt           = 0;
1460 	cl->cl_vtadj        = 0;
1461 	cl->cl_cvtmin       = 0;
1462 	cl->cl_cvtoff       = 0;
1463 	cl->cl_vtperiod     = 0;
1464 	cl->cl_parentperiod = 0;
1465 	cl->cl_f            = 0;
1466 	cl->cl_myf          = 0;
1467 	cl->cl_cfmin        = 0;
1468 	cl->cl_nactive      = 0;
1469 
1470 	cl->vt_tree = RB_ROOT;
1471 	cl->cf_tree = RB_ROOT;
1472 	qdisc_reset(cl->qdisc);
1473 
1474 	if (cl->cl_flags & HFSC_RSC)
1475 		rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1476 	if (cl->cl_flags & HFSC_FSC)
1477 		rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1478 	if (cl->cl_flags & HFSC_USC)
1479 		rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1480 }
1481 
1482 static void
1483 hfsc_reset_qdisc(struct Qdisc *sch)
1484 {
1485 	struct hfsc_sched *q = qdisc_priv(sch);
1486 	struct hfsc_class *cl;
1487 	unsigned int i;
1488 
1489 	for (i = 0; i < q->clhash.hashsize; i++) {
1490 		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1491 			hfsc_reset_class(cl);
1492 	}
1493 	q->eligible = RB_ROOT;
1494 	qdisc_watchdog_cancel(&q->watchdog);
1495 	sch->qstats.backlog = 0;
1496 	sch->q.qlen = 0;
1497 }
1498 
1499 static void
1500 hfsc_destroy_qdisc(struct Qdisc *sch)
1501 {
1502 	struct hfsc_sched *q = qdisc_priv(sch);
1503 	struct hlist_node *next;
1504 	struct hfsc_class *cl;
1505 	unsigned int i;
1506 
1507 	for (i = 0; i < q->clhash.hashsize; i++) {
1508 		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
1509 			tcf_block_put(cl->block);
1510 			cl->block = NULL;
1511 		}
1512 	}
1513 	for (i = 0; i < q->clhash.hashsize; i++) {
1514 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1515 					  cl_common.hnode)
1516 			hfsc_destroy_class(sch, cl);
1517 	}
1518 	qdisc_class_hash_destroy(&q->clhash);
1519 	qdisc_watchdog_cancel(&q->watchdog);
1520 }
1521 
1522 static int
1523 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1524 {
1525 	struct hfsc_sched *q = qdisc_priv(sch);
1526 	unsigned char *b = skb_tail_pointer(skb);
1527 	struct tc_hfsc_qopt qopt;
1528 
1529 	qopt.defcls = q->defcls;
1530 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1531 		goto nla_put_failure;
1532 	return skb->len;
1533 
1534  nla_put_failure:
1535 	nlmsg_trim(skb, b);
1536 	return -1;
1537 }
1538 
1539 static int
1540 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1541 {
1542 	struct hfsc_class *cl;
1543 	int uninitialized_var(err);
1544 
1545 	cl = hfsc_classify(skb, sch, &err);
1546 	if (cl == NULL) {
1547 		if (err & __NET_XMIT_BYPASS)
1548 			qdisc_qstats_drop(sch);
1549 		__qdisc_drop(skb, to_free);
1550 		return err;
1551 	}
1552 
1553 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
1554 	if (unlikely(err != NET_XMIT_SUCCESS)) {
1555 		if (net_xmit_drop_count(err)) {
1556 			cl->qstats.drops++;
1557 			qdisc_qstats_drop(sch);
1558 		}
1559 		return err;
1560 	}
1561 
1562 	if (cl->qdisc->q.qlen == 1) {
1563 		unsigned int len = qdisc_pkt_len(skb);
1564 
1565 		if (cl->cl_flags & HFSC_RSC)
1566 			init_ed(cl, len);
1567 		if (cl->cl_flags & HFSC_FSC)
1568 			init_vf(cl, len);
1569 		/*
1570 		 * If this is the first packet, isolate the head so an eventual
1571 		 * head drop before the first dequeue operation has no chance
1572 		 * to invalidate the deadline.
1573 		 */
1574 		if (cl->cl_flags & HFSC_RSC)
1575 			cl->qdisc->ops->peek(cl->qdisc);
1576 
1577 	}
1578 
1579 	qdisc_qstats_backlog_inc(sch, skb);
1580 	sch->q.qlen++;
1581 
1582 	return NET_XMIT_SUCCESS;
1583 }
1584 
1585 static struct sk_buff *
1586 hfsc_dequeue(struct Qdisc *sch)
1587 {
1588 	struct hfsc_sched *q = qdisc_priv(sch);
1589 	struct hfsc_class *cl;
1590 	struct sk_buff *skb;
1591 	u64 cur_time;
1592 	unsigned int next_len;
1593 	int realtime = 0;
1594 
1595 	if (sch->q.qlen == 0)
1596 		return NULL;
1597 
1598 	cur_time = psched_get_time();
1599 
1600 	/*
1601 	 * if there are eligible classes, use real-time criteria.
1602 	 * find the class with the minimum deadline among
1603 	 * the eligible classes.
1604 	 */
1605 	cl = eltree_get_mindl(q, cur_time);
1606 	if (cl) {
1607 		realtime = 1;
1608 	} else {
1609 		/*
1610 		 * use link-sharing criteria
1611 		 * get the class with the minimum vt in the hierarchy
1612 		 */
1613 		cl = vttree_get_minvt(&q->root, cur_time);
1614 		if (cl == NULL) {
1615 			qdisc_qstats_overlimit(sch);
1616 			hfsc_schedule_watchdog(sch);
1617 			return NULL;
1618 		}
1619 	}
1620 
1621 	skb = qdisc_dequeue_peeked(cl->qdisc);
1622 	if (skb == NULL) {
1623 		qdisc_warn_nonwc("HFSC", cl->qdisc);
1624 		return NULL;
1625 	}
1626 
1627 	bstats_update(&cl->bstats, skb);
1628 	update_vf(cl, qdisc_pkt_len(skb), cur_time);
1629 	if (realtime)
1630 		cl->cl_cumul += qdisc_pkt_len(skb);
1631 
1632 	if (cl->cl_flags & HFSC_RSC) {
1633 		if (cl->qdisc->q.qlen != 0) {
1634 			/* update ed */
1635 			next_len = qdisc_peek_len(cl->qdisc);
1636 			if (realtime)
1637 				update_ed(cl, next_len);
1638 			else
1639 				update_d(cl, next_len);
1640 		} else {
1641 			/* the class becomes passive */
1642 			eltree_remove(cl);
1643 		}
1644 	}
1645 
1646 	qdisc_bstats_update(sch, skb);
1647 	qdisc_qstats_backlog_dec(sch, skb);
1648 	sch->q.qlen--;
1649 
1650 	return skb;
1651 }
1652 
1653 static const struct Qdisc_class_ops hfsc_class_ops = {
1654 	.change		= hfsc_change_class,
1655 	.delete		= hfsc_delete_class,
1656 	.graft		= hfsc_graft_class,
1657 	.leaf		= hfsc_class_leaf,
1658 	.qlen_notify	= hfsc_qlen_notify,
1659 	.find		= hfsc_search_class,
1660 	.bind_tcf	= hfsc_bind_tcf,
1661 	.unbind_tcf	= hfsc_unbind_tcf,
1662 	.tcf_block	= hfsc_tcf_block,
1663 	.dump		= hfsc_dump_class,
1664 	.dump_stats	= hfsc_dump_class_stats,
1665 	.walk		= hfsc_walk
1666 };
1667 
1668 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1669 	.id		= "hfsc",
1670 	.init		= hfsc_init_qdisc,
1671 	.change		= hfsc_change_qdisc,
1672 	.reset		= hfsc_reset_qdisc,
1673 	.destroy	= hfsc_destroy_qdisc,
1674 	.dump		= hfsc_dump_qdisc,
1675 	.enqueue	= hfsc_enqueue,
1676 	.dequeue	= hfsc_dequeue,
1677 	.peek		= qdisc_peek_dequeued,
1678 	.cl_ops		= &hfsc_class_ops,
1679 	.priv_size	= sizeof(struct hfsc_sched),
1680 	.owner		= THIS_MODULE
1681 };
1682 
1683 static int __init
1684 hfsc_init(void)
1685 {
1686 	return register_qdisc(&hfsc_qdisc_ops);
1687 }
1688 
1689 static void __exit
1690 hfsc_cleanup(void)
1691 {
1692 	unregister_qdisc(&hfsc_qdisc_ops);
1693 }
1694 
1695 MODULE_LICENSE("GPL");
1696 module_init(hfsc_init);
1697 module_exit(hfsc_cleanup);
1698