xref: /openbmc/linux/net/sched/sch_hfsc.c (revision ce55c22ec8b223a90ff3e084d842f73cfba35588)
1  /*
2   * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
3   *
4   * This program is free software; you can redistribute it and/or
5   * modify it under the terms of the GNU General Public License
6   * as published by the Free Software Foundation; either version 2
7   * of the License, or (at your option) any later version.
8   *
9   * 2003-10-17 - Ported from altq
10   */
11  /*
12   * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13   *
14   * Permission to use, copy, modify, and distribute this software and
15   * its documentation is hereby granted (including for commercial or
16   * for-profit use), provided that both the copyright notice and this
17   * permission notice appear in all copies of the software, derivative
18   * works, or modified versions, and any portions thereof.
19   *
20   * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21   * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
22   * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23   * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25   * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26   * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28   * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29   * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30   * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32   * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33   * DAMAGE.
34   *
35   * Carnegie Mellon encourages (but does not require) users of this
36   * software to return any improvements or extensions that they make,
37   * and to grant Carnegie Mellon the rights to redistribute these
38   * changes without encumbrance.
39   */
40  /*
41   * H-FSC is described in Proceedings of SIGCOMM'97,
42   * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43   * Real-Time and Priority Service"
44   * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45   *
46   * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47   * when a class has an upperlimit, the fit-time is computed from the
48   * upperlimit service curve.  the link-sharing scheduler does not schedule
49   * a class whose fit-time exceeds the current time.
50   */
51  
52  #include <linux/kernel.h>
53  #include <linux/module.h>
54  #include <linux/types.h>
55  #include <linux/errno.h>
56  #include <linux/compiler.h>
57  #include <linux/spinlock.h>
58  #include <linux/skbuff.h>
59  #include <linux/string.h>
60  #include <linux/slab.h>
61  #include <linux/list.h>
62  #include <linux/rbtree.h>
63  #include <linux/init.h>
64  #include <linux/rtnetlink.h>
65  #include <linux/pkt_sched.h>
66  #include <net/netlink.h>
67  #include <net/pkt_sched.h>
68  #include <net/pkt_cls.h>
69  #include <asm/div64.h>
70  
71  /*
72   * kernel internal service curve representation:
73   *   coordinates are given by 64 bit unsigned integers.
74   *   x-axis: unit is clock count.
75   *   y-axis: unit is byte.
76   *
77   *   The service curve parameters are converted to the internal
78   *   representation. The slope values are scaled to avoid overflow.
79   *   the inverse slope values as well as the y-projection of the 1st
80   *   segment are kept in order to avoid 64-bit divide operations
81   *   that are expensive on 32-bit architectures.
82   */
83  
84  struct internal_sc {
85  	u64	sm1;	/* scaled slope of the 1st segment */
86  	u64	ism1;	/* scaled inverse-slope of the 1st segment */
87  	u64	dx;	/* the x-projection of the 1st segment */
88  	u64	dy;	/* the y-projection of the 1st segment */
89  	u64	sm2;	/* scaled slope of the 2nd segment */
90  	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
91  };
92  
93  /* runtime service curve */
94  struct runtime_sc {
95  	u64	x;	/* current starting position on x-axis */
96  	u64	y;	/* current starting position on y-axis */
97  	u64	sm1;	/* scaled slope of the 1st segment */
98  	u64	ism1;	/* scaled inverse-slope of the 1st segment */
99  	u64	dx;	/* the x-projection of the 1st segment */
100  	u64	dy;	/* the y-projection of the 1st segment */
101  	u64	sm2;	/* scaled slope of the 2nd segment */
102  	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
103  };
104  
105  enum hfsc_class_flags {
106  	HFSC_RSC = 0x1,
107  	HFSC_FSC = 0x2,
108  	HFSC_USC = 0x4
109  };
110  
111  struct hfsc_class {
112  	struct Qdisc_class_common cl_common;
113  
114  	struct gnet_stats_basic_sync bstats;
115  	struct gnet_stats_queue qstats;
116  	struct net_rate_estimator __rcu *rate_est;
117  	struct tcf_proto __rcu *filter_list; /* filter list */
118  	struct tcf_block *block;
119  	unsigned int	level;		/* class level in hierarchy */
120  
121  	struct hfsc_sched *sched;	/* scheduler data */
122  	struct hfsc_class *cl_parent;	/* parent class */
123  	struct list_head siblings;	/* sibling classes */
124  	struct list_head children;	/* child classes */
125  	struct Qdisc	*qdisc;		/* leaf qdisc */
126  
127  	struct rb_node el_node;		/* qdisc's eligible tree member */
128  	struct rb_root vt_tree;		/* active children sorted by cl_vt */
129  	struct rb_node vt_node;		/* parent's vt_tree member */
130  	struct rb_root cf_tree;		/* active children sorted by cl_f */
131  	struct rb_node cf_node;		/* parent's cf_heap member */
132  
133  	u64	cl_total;		/* total work in bytes */
134  	u64	cl_cumul;		/* cumulative work in bytes done by
135  					   real-time criteria */
136  
137  	u64	cl_d;			/* deadline*/
138  	u64	cl_e;			/* eligible time */
139  	u64	cl_vt;			/* virtual time */
140  	u64	cl_f;			/* time when this class will fit for
141  					   link-sharing, max(myf, cfmin) */
142  	u64	cl_myf;			/* my fit-time (calculated from this
143  					   class's own upperlimit curve) */
144  	u64	cl_cfmin;		/* earliest children's fit-time (used
145  					   with cl_myf to obtain cl_f) */
146  	u64	cl_cvtmin;		/* minimal virtual time among the
147  					   children fit for link-sharing
148  					   (monotonic within a period) */
149  	u64	cl_vtadj;		/* intra-period cumulative vt
150  					   adjustment */
151  	u64	cl_cvtoff;		/* largest virtual time seen among
152  					   the children */
153  
154  	struct internal_sc cl_rsc;	/* internal real-time service curve */
155  	struct internal_sc cl_fsc;	/* internal fair service curve */
156  	struct internal_sc cl_usc;	/* internal upperlimit service curve */
157  	struct runtime_sc cl_deadline;	/* deadline curve */
158  	struct runtime_sc cl_eligible;	/* eligible curve */
159  	struct runtime_sc cl_virtual;	/* virtual curve */
160  	struct runtime_sc cl_ulimit;	/* upperlimit curve */
161  
162  	u8		cl_flags;	/* which curves are valid */
163  	u32		cl_vtperiod;	/* vt period sequence number */
164  	u32		cl_parentperiod;/* parent's vt period sequence number*/
165  	u32		cl_nactive;	/* number of active children */
166  };
167  
168  struct hfsc_sched {
169  	u16	defcls;				/* default class id */
170  	struct hfsc_class root;			/* root class */
171  	struct Qdisc_class_hash clhash;		/* class hash */
172  	struct rb_root eligible;		/* eligible tree */
173  	struct qdisc_watchdog watchdog;		/* watchdog timer */
174  };
175  
176  #define	HT_INFINITY	0xffffffffffffffffULL	/* infinite time value */
177  
178  
179  /*
180   * eligible tree holds backlogged classes being sorted by their eligible times.
181   * there is one eligible tree per hfsc instance.
182   */
183  
184  static void
eltree_insert(struct hfsc_class * cl)185  eltree_insert(struct hfsc_class *cl)
186  {
187  	struct rb_node **p = &cl->sched->eligible.rb_node;
188  	struct rb_node *parent = NULL;
189  	struct hfsc_class *cl1;
190  
191  	while (*p != NULL) {
192  		parent = *p;
193  		cl1 = rb_entry(parent, struct hfsc_class, el_node);
194  		if (cl->cl_e >= cl1->cl_e)
195  			p = &parent->rb_right;
196  		else
197  			p = &parent->rb_left;
198  	}
199  	rb_link_node(&cl->el_node, parent, p);
200  	rb_insert_color(&cl->el_node, &cl->sched->eligible);
201  }
202  
203  static inline void
eltree_remove(struct hfsc_class * cl)204  eltree_remove(struct hfsc_class *cl)
205  {
206  	rb_erase(&cl->el_node, &cl->sched->eligible);
207  }
208  
209  static inline void
eltree_update(struct hfsc_class * cl)210  eltree_update(struct hfsc_class *cl)
211  {
212  	eltree_remove(cl);
213  	eltree_insert(cl);
214  }
215  
216  /* find the class with the minimum deadline among the eligible classes */
217  static inline struct hfsc_class *
eltree_get_mindl(struct hfsc_sched * q,u64 cur_time)218  eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
219  {
220  	struct hfsc_class *p, *cl = NULL;
221  	struct rb_node *n;
222  
223  	for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
224  		p = rb_entry(n, struct hfsc_class, el_node);
225  		if (p->cl_e > cur_time)
226  			break;
227  		if (cl == NULL || p->cl_d < cl->cl_d)
228  			cl = p;
229  	}
230  	return cl;
231  }
232  
233  /* find the class with minimum eligible time among the eligible classes */
234  static inline struct hfsc_class *
eltree_get_minel(struct hfsc_sched * q)235  eltree_get_minel(struct hfsc_sched *q)
236  {
237  	struct rb_node *n;
238  
239  	n = rb_first(&q->eligible);
240  	if (n == NULL)
241  		return NULL;
242  	return rb_entry(n, struct hfsc_class, el_node);
243  }
244  
245  /*
246   * vttree holds holds backlogged child classes being sorted by their virtual
247   * time. each intermediate class has one vttree.
248   */
249  static void
vttree_insert(struct hfsc_class * cl)250  vttree_insert(struct hfsc_class *cl)
251  {
252  	struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
253  	struct rb_node *parent = NULL;
254  	struct hfsc_class *cl1;
255  
256  	while (*p != NULL) {
257  		parent = *p;
258  		cl1 = rb_entry(parent, struct hfsc_class, vt_node);
259  		if (cl->cl_vt >= cl1->cl_vt)
260  			p = &parent->rb_right;
261  		else
262  			p = &parent->rb_left;
263  	}
264  	rb_link_node(&cl->vt_node, parent, p);
265  	rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
266  }
267  
268  static inline void
vttree_remove(struct hfsc_class * cl)269  vttree_remove(struct hfsc_class *cl)
270  {
271  	rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
272  }
273  
274  static inline void
vttree_update(struct hfsc_class * cl)275  vttree_update(struct hfsc_class *cl)
276  {
277  	vttree_remove(cl);
278  	vttree_insert(cl);
279  }
280  
281  static inline struct hfsc_class *
vttree_firstfit(struct hfsc_class * cl,u64 cur_time)282  vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
283  {
284  	struct hfsc_class *p;
285  	struct rb_node *n;
286  
287  	for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
288  		p = rb_entry(n, struct hfsc_class, vt_node);
289  		if (p->cl_f <= cur_time)
290  			return p;
291  	}
292  	return NULL;
293  }
294  
295  /*
296   * get the leaf class with the minimum vt in the hierarchy
297   */
298  static struct hfsc_class *
vttree_get_minvt(struct hfsc_class * cl,u64 cur_time)299  vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
300  {
301  	/* if root-class's cfmin is bigger than cur_time nothing to do */
302  	if (cl->cl_cfmin > cur_time)
303  		return NULL;
304  
305  	while (cl->level > 0) {
306  		cl = vttree_firstfit(cl, cur_time);
307  		if (cl == NULL)
308  			return NULL;
309  		/*
310  		 * update parent's cl_cvtmin.
311  		 */
312  		if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
313  			cl->cl_parent->cl_cvtmin = cl->cl_vt;
314  	}
315  	return cl;
316  }
317  
318  static void
cftree_insert(struct hfsc_class * cl)319  cftree_insert(struct hfsc_class *cl)
320  {
321  	struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
322  	struct rb_node *parent = NULL;
323  	struct hfsc_class *cl1;
324  
325  	while (*p != NULL) {
326  		parent = *p;
327  		cl1 = rb_entry(parent, struct hfsc_class, cf_node);
328  		if (cl->cl_f >= cl1->cl_f)
329  			p = &parent->rb_right;
330  		else
331  			p = &parent->rb_left;
332  	}
333  	rb_link_node(&cl->cf_node, parent, p);
334  	rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
335  }
336  
337  static inline void
cftree_remove(struct hfsc_class * cl)338  cftree_remove(struct hfsc_class *cl)
339  {
340  	rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
341  }
342  
343  static inline void
cftree_update(struct hfsc_class * cl)344  cftree_update(struct hfsc_class *cl)
345  {
346  	cftree_remove(cl);
347  	cftree_insert(cl);
348  }
349  
350  /*
351   * service curve support functions
352   *
353   *  external service curve parameters
354   *	m: bps
355   *	d: us
356   *  internal service curve parameters
357   *	sm: (bytes/psched_us) << SM_SHIFT
358   *	ism: (psched_us/byte) << ISM_SHIFT
359   *	dx: psched_us
360   *
361   * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
362   *
363   * sm and ism are scaled in order to keep effective digits.
364   * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
365   * digits in decimal using the following table.
366   *
367   *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
368   *  ------------+-------------------------------------------------------
369   *  bytes/1.024us 12.8e-3    128e-3     1280e-3    12800e-3   128000e-3
370   *
371   *  1.024us/byte  78.125     7.8125     0.78125    0.078125   0.0078125
372   *
373   * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
374   */
375  #define	SM_SHIFT	(30 - PSCHED_SHIFT)
376  #define	ISM_SHIFT	(8 + PSCHED_SHIFT)
377  
378  #define	SM_MASK		((1ULL << SM_SHIFT) - 1)
379  #define	ISM_MASK	((1ULL << ISM_SHIFT) - 1)
380  
381  static inline u64
seg_x2y(u64 x,u64 sm)382  seg_x2y(u64 x, u64 sm)
383  {
384  	u64 y;
385  
386  	/*
387  	 * compute
388  	 *	y = x * sm >> SM_SHIFT
389  	 * but divide it for the upper and lower bits to avoid overflow
390  	 */
391  	y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
392  	return y;
393  }
394  
395  static inline u64
seg_y2x(u64 y,u64 ism)396  seg_y2x(u64 y, u64 ism)
397  {
398  	u64 x;
399  
400  	if (y == 0)
401  		x = 0;
402  	else if (ism == HT_INFINITY)
403  		x = HT_INFINITY;
404  	else {
405  		x = (y >> ISM_SHIFT) * ism
406  		    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
407  	}
408  	return x;
409  }
410  
411  /* Convert m (bps) into sm (bytes/psched us) */
412  static u64
m2sm(u32 m)413  m2sm(u32 m)
414  {
415  	u64 sm;
416  
417  	sm = ((u64)m << SM_SHIFT);
418  	sm += PSCHED_TICKS_PER_SEC - 1;
419  	do_div(sm, PSCHED_TICKS_PER_SEC);
420  	return sm;
421  }
422  
423  /* convert m (bps) into ism (psched us/byte) */
424  static u64
m2ism(u32 m)425  m2ism(u32 m)
426  {
427  	u64 ism;
428  
429  	if (m == 0)
430  		ism = HT_INFINITY;
431  	else {
432  		ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
433  		ism += m - 1;
434  		do_div(ism, m);
435  	}
436  	return ism;
437  }
438  
439  /* convert d (us) into dx (psched us) */
440  static u64
d2dx(u32 d)441  d2dx(u32 d)
442  {
443  	u64 dx;
444  
445  	dx = ((u64)d * PSCHED_TICKS_PER_SEC);
446  	dx += USEC_PER_SEC - 1;
447  	do_div(dx, USEC_PER_SEC);
448  	return dx;
449  }
450  
451  /* convert sm (bytes/psched us) into m (bps) */
452  static u32
sm2m(u64 sm)453  sm2m(u64 sm)
454  {
455  	u64 m;
456  
457  	m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
458  	return (u32)m;
459  }
460  
461  /* convert dx (psched us) into d (us) */
462  static u32
dx2d(u64 dx)463  dx2d(u64 dx)
464  {
465  	u64 d;
466  
467  	d = dx * USEC_PER_SEC;
468  	do_div(d, PSCHED_TICKS_PER_SEC);
469  	return (u32)d;
470  }
471  
472  static void
sc2isc(struct tc_service_curve * sc,struct internal_sc * isc)473  sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
474  {
475  	isc->sm1  = m2sm(sc->m1);
476  	isc->ism1 = m2ism(sc->m1);
477  	isc->dx   = d2dx(sc->d);
478  	isc->dy   = seg_x2y(isc->dx, isc->sm1);
479  	isc->sm2  = m2sm(sc->m2);
480  	isc->ism2 = m2ism(sc->m2);
481  }
482  
483  /*
484   * initialize the runtime service curve with the given internal
485   * service curve starting at (x, y).
486   */
487  static void
rtsc_init(struct runtime_sc * rtsc,struct internal_sc * isc,u64 x,u64 y)488  rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
489  {
490  	rtsc->x	   = x;
491  	rtsc->y    = y;
492  	rtsc->sm1  = isc->sm1;
493  	rtsc->ism1 = isc->ism1;
494  	rtsc->dx   = isc->dx;
495  	rtsc->dy   = isc->dy;
496  	rtsc->sm2  = isc->sm2;
497  	rtsc->ism2 = isc->ism2;
498  }
499  
500  /*
501   * calculate the y-projection of the runtime service curve by the
502   * given x-projection value
503   */
504  static u64
rtsc_y2x(struct runtime_sc * rtsc,u64 y)505  rtsc_y2x(struct runtime_sc *rtsc, u64 y)
506  {
507  	u64 x;
508  
509  	if (y < rtsc->y)
510  		x = rtsc->x;
511  	else if (y <= rtsc->y + rtsc->dy) {
512  		/* x belongs to the 1st segment */
513  		if (rtsc->dy == 0)
514  			x = rtsc->x + rtsc->dx;
515  		else
516  			x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
517  	} else {
518  		/* x belongs to the 2nd segment */
519  		x = rtsc->x + rtsc->dx
520  		    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
521  	}
522  	return x;
523  }
524  
525  static u64
rtsc_x2y(struct runtime_sc * rtsc,u64 x)526  rtsc_x2y(struct runtime_sc *rtsc, u64 x)
527  {
528  	u64 y;
529  
530  	if (x <= rtsc->x)
531  		y = rtsc->y;
532  	else if (x <= rtsc->x + rtsc->dx)
533  		/* y belongs to the 1st segment */
534  		y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
535  	else
536  		/* y belongs to the 2nd segment */
537  		y = rtsc->y + rtsc->dy
538  		    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
539  	return y;
540  }
541  
542  /*
543   * update the runtime service curve by taking the minimum of the current
544   * runtime service curve and the service curve starting at (x, y).
545   */
546  static void
rtsc_min(struct runtime_sc * rtsc,struct internal_sc * isc,u64 x,u64 y)547  rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
548  {
549  	u64 y1, y2, dx, dy;
550  	u32 dsm;
551  
552  	if (isc->sm1 <= isc->sm2) {
553  		/* service curve is convex */
554  		y1 = rtsc_x2y(rtsc, x);
555  		if (y1 < y)
556  			/* the current rtsc is smaller */
557  			return;
558  		rtsc->x = x;
559  		rtsc->y = y;
560  		return;
561  	}
562  
563  	/*
564  	 * service curve is concave
565  	 * compute the two y values of the current rtsc
566  	 *	y1: at x
567  	 *	y2: at (x + dx)
568  	 */
569  	y1 = rtsc_x2y(rtsc, x);
570  	if (y1 <= y) {
571  		/* rtsc is below isc, no change to rtsc */
572  		return;
573  	}
574  
575  	y2 = rtsc_x2y(rtsc, x + isc->dx);
576  	if (y2 >= y + isc->dy) {
577  		/* rtsc is above isc, replace rtsc by isc */
578  		rtsc->x = x;
579  		rtsc->y = y;
580  		rtsc->dx = isc->dx;
581  		rtsc->dy = isc->dy;
582  		return;
583  	}
584  
585  	/*
586  	 * the two curves intersect
587  	 * compute the offsets (dx, dy) using the reverse
588  	 * function of seg_x2y()
589  	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
590  	 */
591  	dx = (y1 - y) << SM_SHIFT;
592  	dsm = isc->sm1 - isc->sm2;
593  	do_div(dx, dsm);
594  	/*
595  	 * check if (x, y1) belongs to the 1st segment of rtsc.
596  	 * if so, add the offset.
597  	 */
598  	if (rtsc->x + rtsc->dx > x)
599  		dx += rtsc->x + rtsc->dx - x;
600  	dy = seg_x2y(dx, isc->sm1);
601  
602  	rtsc->x = x;
603  	rtsc->y = y;
604  	rtsc->dx = dx;
605  	rtsc->dy = dy;
606  }
607  
608  static void
init_ed(struct hfsc_class * cl,unsigned int next_len)609  init_ed(struct hfsc_class *cl, unsigned int next_len)
610  {
611  	u64 cur_time = psched_get_time();
612  
613  	/* update the deadline curve */
614  	rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
615  
616  	/*
617  	 * update the eligible curve.
618  	 * for concave, it is equal to the deadline curve.
619  	 * for convex, it is a linear curve with slope m2.
620  	 */
621  	cl->cl_eligible = cl->cl_deadline;
622  	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
623  		cl->cl_eligible.dx = 0;
624  		cl->cl_eligible.dy = 0;
625  	}
626  
627  	/* compute e and d */
628  	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
629  	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
630  
631  	eltree_insert(cl);
632  }
633  
634  static void
update_ed(struct hfsc_class * cl,unsigned int next_len)635  update_ed(struct hfsc_class *cl, unsigned int next_len)
636  {
637  	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
638  	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
639  
640  	eltree_update(cl);
641  }
642  
643  static inline void
update_d(struct hfsc_class * cl,unsigned int next_len)644  update_d(struct hfsc_class *cl, unsigned int next_len)
645  {
646  	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
647  }
648  
649  static inline void
update_cfmin(struct hfsc_class * cl)650  update_cfmin(struct hfsc_class *cl)
651  {
652  	struct rb_node *n = rb_first(&cl->cf_tree);
653  	struct hfsc_class *p;
654  
655  	if (n == NULL) {
656  		cl->cl_cfmin = 0;
657  		return;
658  	}
659  	p = rb_entry(n, struct hfsc_class, cf_node);
660  	cl->cl_cfmin = p->cl_f;
661  }
662  
663  static void
init_vf(struct hfsc_class * cl,unsigned int len)664  init_vf(struct hfsc_class *cl, unsigned int len)
665  {
666  	struct hfsc_class *max_cl;
667  	struct rb_node *n;
668  	u64 vt, f, cur_time;
669  	int go_active;
670  
671  	cur_time = 0;
672  	go_active = 1;
673  	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
674  		if (go_active && cl->cl_nactive++ == 0)
675  			go_active = 1;
676  		else
677  			go_active = 0;
678  
679  		if (go_active) {
680  			n = rb_last(&cl->cl_parent->vt_tree);
681  			if (n != NULL) {
682  				max_cl = rb_entry(n, struct hfsc_class, vt_node);
683  				/*
684  				 * set vt to the average of the min and max
685  				 * classes.  if the parent's period didn't
686  				 * change, don't decrease vt of the class.
687  				 */
688  				vt = max_cl->cl_vt;
689  				if (cl->cl_parent->cl_cvtmin != 0)
690  					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
691  
692  				if (cl->cl_parent->cl_vtperiod !=
693  				    cl->cl_parentperiod || vt > cl->cl_vt)
694  					cl->cl_vt = vt;
695  			} else {
696  				/*
697  				 * first child for a new parent backlog period.
698  				 * initialize cl_vt to the highest value seen
699  				 * among the siblings. this is analogous to
700  				 * what cur_time would provide in realtime case.
701  				 */
702  				cl->cl_vt = cl->cl_parent->cl_cvtoff;
703  				cl->cl_parent->cl_cvtmin = 0;
704  			}
705  
706  			/* update the virtual curve */
707  			rtsc_min(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
708  			cl->cl_vtadj = 0;
709  
710  			cl->cl_vtperiod++;  /* increment vt period */
711  			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
712  			if (cl->cl_parent->cl_nactive == 0)
713  				cl->cl_parentperiod++;
714  			cl->cl_f = 0;
715  
716  			vttree_insert(cl);
717  			cftree_insert(cl);
718  
719  			if (cl->cl_flags & HFSC_USC) {
720  				/* class has upper limit curve */
721  				if (cur_time == 0)
722  					cur_time = psched_get_time();
723  
724  				/* update the ulimit curve */
725  				rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
726  					 cl->cl_total);
727  				/* compute myf */
728  				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
729  						      cl->cl_total);
730  			}
731  		}
732  
733  		f = max(cl->cl_myf, cl->cl_cfmin);
734  		if (f != cl->cl_f) {
735  			cl->cl_f = f;
736  			cftree_update(cl);
737  		}
738  		update_cfmin(cl->cl_parent);
739  	}
740  }
741  
742  static void
update_vf(struct hfsc_class * cl,unsigned int len,u64 cur_time)743  update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
744  {
745  	u64 f; /* , myf_bound, delta; */
746  	int go_passive = 0;
747  
748  	if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
749  		go_passive = 1;
750  
751  	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
752  		cl->cl_total += len;
753  
754  		if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
755  			continue;
756  
757  		if (go_passive && --cl->cl_nactive == 0)
758  			go_passive = 1;
759  		else
760  			go_passive = 0;
761  
762  		/* update vt */
763  		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) + cl->cl_vtadj;
764  
765  		/*
766  		 * if vt of the class is smaller than cvtmin,
767  		 * the class was skipped in the past due to non-fit.
768  		 * if so, we need to adjust vtadj.
769  		 */
770  		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
771  			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
772  			cl->cl_vt = cl->cl_parent->cl_cvtmin;
773  		}
774  
775  		if (go_passive) {
776  			/* no more active child, going passive */
777  
778  			/* update cvtoff of the parent class */
779  			if (cl->cl_vt > cl->cl_parent->cl_cvtoff)
780  				cl->cl_parent->cl_cvtoff = cl->cl_vt;
781  
782  			/* remove this class from the vt tree */
783  			vttree_remove(cl);
784  
785  			cftree_remove(cl);
786  			update_cfmin(cl->cl_parent);
787  
788  			continue;
789  		}
790  
791  		/* update the vt tree */
792  		vttree_update(cl);
793  
794  		/* update f */
795  		if (cl->cl_flags & HFSC_USC) {
796  			cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
797  #if 0
798  			cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
799  							      cl->cl_total);
800  			/*
801  			 * This code causes classes to stay way under their
802  			 * limit when multiple classes are used at gigabit
803  			 * speed. needs investigation. -kaber
804  			 */
805  			/*
806  			 * if myf lags behind by more than one clock tick
807  			 * from the current time, adjust myfadj to prevent
808  			 * a rate-limited class from going greedy.
809  			 * in a steady state under rate-limiting, myf
810  			 * fluctuates within one clock tick.
811  			 */
812  			myf_bound = cur_time - PSCHED_JIFFIE2US(1);
813  			if (cl->cl_myf < myf_bound) {
814  				delta = cur_time - cl->cl_myf;
815  				cl->cl_myfadj += delta;
816  				cl->cl_myf += delta;
817  			}
818  #endif
819  		}
820  
821  		f = max(cl->cl_myf, cl->cl_cfmin);
822  		if (f != cl->cl_f) {
823  			cl->cl_f = f;
824  			cftree_update(cl);
825  			update_cfmin(cl->cl_parent);
826  		}
827  	}
828  }
829  
830  static unsigned int
qdisc_peek_len(struct Qdisc * sch)831  qdisc_peek_len(struct Qdisc *sch)
832  {
833  	struct sk_buff *skb;
834  	unsigned int len;
835  
836  	skb = sch->ops->peek(sch);
837  	if (unlikely(skb == NULL)) {
838  		qdisc_warn_nonwc("qdisc_peek_len", sch);
839  		return 0;
840  	}
841  	len = qdisc_pkt_len(skb);
842  
843  	return len;
844  }
845  
846  static void
hfsc_adjust_levels(struct hfsc_class * cl)847  hfsc_adjust_levels(struct hfsc_class *cl)
848  {
849  	struct hfsc_class *p;
850  	unsigned int level;
851  
852  	do {
853  		level = 0;
854  		list_for_each_entry(p, &cl->children, siblings) {
855  			if (p->level >= level)
856  				level = p->level + 1;
857  		}
858  		cl->level = level;
859  	} while ((cl = cl->cl_parent) != NULL);
860  }
861  
862  static inline struct hfsc_class *
hfsc_find_class(u32 classid,struct Qdisc * sch)863  hfsc_find_class(u32 classid, struct Qdisc *sch)
864  {
865  	struct hfsc_sched *q = qdisc_priv(sch);
866  	struct Qdisc_class_common *clc;
867  
868  	clc = qdisc_class_find(&q->clhash, classid);
869  	if (clc == NULL)
870  		return NULL;
871  	return container_of(clc, struct hfsc_class, cl_common);
872  }
873  
874  static void
hfsc_change_rsc(struct hfsc_class * cl,struct tc_service_curve * rsc,u64 cur_time)875  hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
876  		u64 cur_time)
877  {
878  	sc2isc(rsc, &cl->cl_rsc);
879  	rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
880  	cl->cl_eligible = cl->cl_deadline;
881  	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
882  		cl->cl_eligible.dx = 0;
883  		cl->cl_eligible.dy = 0;
884  	}
885  	cl->cl_flags |= HFSC_RSC;
886  }
887  
888  static void
hfsc_change_fsc(struct hfsc_class * cl,struct tc_service_curve * fsc)889  hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
890  {
891  	sc2isc(fsc, &cl->cl_fsc);
892  	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
893  	cl->cl_flags |= HFSC_FSC;
894  }
895  
896  static void
hfsc_change_usc(struct hfsc_class * cl,struct tc_service_curve * usc,u64 cur_time)897  hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
898  		u64 cur_time)
899  {
900  	sc2isc(usc, &cl->cl_usc);
901  	rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
902  	cl->cl_flags |= HFSC_USC;
903  }
904  
905  static void
hfsc_upgrade_rt(struct hfsc_class * cl)906  hfsc_upgrade_rt(struct hfsc_class *cl)
907  {
908  	cl->cl_fsc = cl->cl_rsc;
909  	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
910  	cl->cl_flags |= HFSC_FSC;
911  }
912  
913  static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
914  	[TCA_HFSC_RSC]	= { .len = sizeof(struct tc_service_curve) },
915  	[TCA_HFSC_FSC]	= { .len = sizeof(struct tc_service_curve) },
916  	[TCA_HFSC_USC]	= { .len = sizeof(struct tc_service_curve) },
917  };
918  
919  static int
hfsc_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)920  hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
921  		  struct nlattr **tca, unsigned long *arg,
922  		  struct netlink_ext_ack *extack)
923  {
924  	struct hfsc_sched *q = qdisc_priv(sch);
925  	struct hfsc_class *cl = (struct hfsc_class *)*arg;
926  	struct hfsc_class *parent = NULL;
927  	struct nlattr *opt = tca[TCA_OPTIONS];
928  	struct nlattr *tb[TCA_HFSC_MAX + 1];
929  	struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
930  	u64 cur_time;
931  	int err;
932  
933  	if (opt == NULL)
934  		return -EINVAL;
935  
936  	err = nla_parse_nested_deprecated(tb, TCA_HFSC_MAX, opt, hfsc_policy,
937  					  NULL);
938  	if (err < 0)
939  		return err;
940  
941  	if (tb[TCA_HFSC_RSC]) {
942  		rsc = nla_data(tb[TCA_HFSC_RSC]);
943  		if (rsc->m1 == 0 && rsc->m2 == 0)
944  			rsc = NULL;
945  	}
946  
947  	if (tb[TCA_HFSC_FSC]) {
948  		fsc = nla_data(tb[TCA_HFSC_FSC]);
949  		if (fsc->m1 == 0 && fsc->m2 == 0)
950  			fsc = NULL;
951  	}
952  
953  	if (tb[TCA_HFSC_USC]) {
954  		usc = nla_data(tb[TCA_HFSC_USC]);
955  		if (usc->m1 == 0 && usc->m2 == 0)
956  			usc = NULL;
957  	}
958  
959  	if (cl != NULL) {
960  		int old_flags;
961  
962  		if (parentid) {
963  			if (cl->cl_parent &&
964  			    cl->cl_parent->cl_common.classid != parentid)
965  				return -EINVAL;
966  			if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
967  				return -EINVAL;
968  		}
969  		cur_time = psched_get_time();
970  
971  		if (tca[TCA_RATE]) {
972  			err = gen_replace_estimator(&cl->bstats, NULL,
973  						    &cl->rate_est,
974  						    NULL,
975  						    true,
976  						    tca[TCA_RATE]);
977  			if (err)
978  				return err;
979  		}
980  
981  		sch_tree_lock(sch);
982  		old_flags = cl->cl_flags;
983  
984  		if (rsc != NULL)
985  			hfsc_change_rsc(cl, rsc, cur_time);
986  		if (fsc != NULL)
987  			hfsc_change_fsc(cl, fsc);
988  		if (usc != NULL)
989  			hfsc_change_usc(cl, usc, cur_time);
990  
991  		if (cl->qdisc->q.qlen != 0) {
992  			int len = qdisc_peek_len(cl->qdisc);
993  
994  			if (cl->cl_flags & HFSC_RSC) {
995  				if (old_flags & HFSC_RSC)
996  					update_ed(cl, len);
997  				else
998  					init_ed(cl, len);
999  			}
1000  
1001  			if (cl->cl_flags & HFSC_FSC) {
1002  				if (old_flags & HFSC_FSC)
1003  					update_vf(cl, 0, cur_time);
1004  				else
1005  					init_vf(cl, len);
1006  			}
1007  		}
1008  		sch_tree_unlock(sch);
1009  
1010  		return 0;
1011  	}
1012  
1013  	if (parentid == TC_H_ROOT)
1014  		return -EEXIST;
1015  
1016  	parent = &q->root;
1017  	if (parentid) {
1018  		parent = hfsc_find_class(parentid, sch);
1019  		if (parent == NULL)
1020  			return -ENOENT;
1021  	}
1022  
1023  	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1024  		return -EINVAL;
1025  	if (hfsc_find_class(classid, sch))
1026  		return -EEXIST;
1027  
1028  	if (rsc == NULL && fsc == NULL)
1029  		return -EINVAL;
1030  
1031  	cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1032  	if (cl == NULL)
1033  		return -ENOBUFS;
1034  
1035  	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1036  	if (err) {
1037  		kfree(cl);
1038  		return err;
1039  	}
1040  
1041  	if (tca[TCA_RATE]) {
1042  		err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1043  					NULL, true, tca[TCA_RATE]);
1044  		if (err) {
1045  			tcf_block_put(cl->block);
1046  			kfree(cl);
1047  			return err;
1048  		}
1049  	}
1050  
1051  	if (rsc != NULL)
1052  		hfsc_change_rsc(cl, rsc, 0);
1053  	if (fsc != NULL)
1054  		hfsc_change_fsc(cl, fsc);
1055  	if (usc != NULL)
1056  		hfsc_change_usc(cl, usc, 0);
1057  
1058  	cl->cl_common.classid = classid;
1059  	cl->sched     = q;
1060  	cl->cl_parent = parent;
1061  	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1062  				      classid, NULL);
1063  	if (cl->qdisc == NULL)
1064  		cl->qdisc = &noop_qdisc;
1065  	else
1066  		qdisc_hash_add(cl->qdisc, true);
1067  	INIT_LIST_HEAD(&cl->children);
1068  	cl->vt_tree = RB_ROOT;
1069  	cl->cf_tree = RB_ROOT;
1070  
1071  	sch_tree_lock(sch);
1072  	/* Check if the inner class is a misconfigured 'rt' */
1073  	if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
1074  		NL_SET_ERR_MSG(extack,
1075  			       "Forced curve change on parent 'rt' to 'sc'");
1076  		hfsc_upgrade_rt(parent);
1077  	}
1078  	qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1079  	list_add_tail(&cl->siblings, &parent->children);
1080  	if (parent->level == 0)
1081  		qdisc_purge_queue(parent->qdisc);
1082  	hfsc_adjust_levels(parent);
1083  	sch_tree_unlock(sch);
1084  
1085  	qdisc_class_hash_grow(sch, &q->clhash);
1086  
1087  	*arg = (unsigned long)cl;
1088  	return 0;
1089  }
1090  
1091  static void
hfsc_destroy_class(struct Qdisc * sch,struct hfsc_class * cl)1092  hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1093  {
1094  	struct hfsc_sched *q = qdisc_priv(sch);
1095  
1096  	tcf_block_put(cl->block);
1097  	qdisc_put(cl->qdisc);
1098  	gen_kill_estimator(&cl->rate_est);
1099  	if (cl != &q->root)
1100  		kfree(cl);
1101  }
1102  
1103  static int
hfsc_delete_class(struct Qdisc * sch,unsigned long arg,struct netlink_ext_ack * extack)1104  hfsc_delete_class(struct Qdisc *sch, unsigned long arg,
1105  		  struct netlink_ext_ack *extack)
1106  {
1107  	struct hfsc_sched *q = qdisc_priv(sch);
1108  	struct hfsc_class *cl = (struct hfsc_class *)arg;
1109  
1110  	if (cl->level > 0 || qdisc_class_in_use(&cl->cl_common) ||
1111  	    cl == &q->root) {
1112  		NL_SET_ERR_MSG(extack, "HFSC class in use");
1113  		return -EBUSY;
1114  	}
1115  
1116  	sch_tree_lock(sch);
1117  
1118  	list_del(&cl->siblings);
1119  	hfsc_adjust_levels(cl->cl_parent);
1120  
1121  	qdisc_purge_queue(cl->qdisc);
1122  	qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1123  
1124  	sch_tree_unlock(sch);
1125  
1126  	hfsc_destroy_class(sch, cl);
1127  	return 0;
1128  }
1129  
1130  static struct hfsc_class *
hfsc_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)1131  hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1132  {
1133  	struct hfsc_sched *q = qdisc_priv(sch);
1134  	struct hfsc_class *head, *cl;
1135  	struct tcf_result res;
1136  	struct tcf_proto *tcf;
1137  	int result;
1138  
1139  	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1140  	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1141  		if (cl->level == 0)
1142  			return cl;
1143  
1144  	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1145  	head = &q->root;
1146  	tcf = rcu_dereference_bh(q->root.filter_list);
1147  	while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
1148  #ifdef CONFIG_NET_CLS_ACT
1149  		switch (result) {
1150  		case TC_ACT_QUEUED:
1151  		case TC_ACT_STOLEN:
1152  		case TC_ACT_TRAP:
1153  			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1154  			fallthrough;
1155  		case TC_ACT_SHOT:
1156  			return NULL;
1157  		}
1158  #endif
1159  		cl = (struct hfsc_class *)res.class;
1160  		if (!cl) {
1161  			cl = hfsc_find_class(res.classid, sch);
1162  			if (!cl)
1163  				break; /* filter selected invalid classid */
1164  			if (cl->level >= head->level)
1165  				break; /* filter may only point downwards */
1166  		}
1167  
1168  		if (cl->level == 0)
1169  			return cl; /* hit leaf class */
1170  
1171  		/* apply inner filter chain */
1172  		tcf = rcu_dereference_bh(cl->filter_list);
1173  		head = cl;
1174  	}
1175  
1176  	/* classification failed, try default class */
1177  	cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1178  	if (cl == NULL || cl->level > 0)
1179  		return NULL;
1180  
1181  	return cl;
1182  }
1183  
1184  static int
hfsc_graft_class(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)1185  hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1186  		 struct Qdisc **old, struct netlink_ext_ack *extack)
1187  {
1188  	struct hfsc_class *cl = (struct hfsc_class *)arg;
1189  
1190  	if (cl->level > 0)
1191  		return -EINVAL;
1192  	if (new == NULL) {
1193  		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1194  					cl->cl_common.classid, NULL);
1195  		if (new == NULL)
1196  			new = &noop_qdisc;
1197  	}
1198  
1199  	*old = qdisc_replace(sch, new, &cl->qdisc);
1200  	return 0;
1201  }
1202  
1203  static struct Qdisc *
hfsc_class_leaf(struct Qdisc * sch,unsigned long arg)1204  hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1205  {
1206  	struct hfsc_class *cl = (struct hfsc_class *)arg;
1207  
1208  	if (cl->level == 0)
1209  		return cl->qdisc;
1210  
1211  	return NULL;
1212  }
1213  
1214  static void
hfsc_qlen_notify(struct Qdisc * sch,unsigned long arg)1215  hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1216  {
1217  	struct hfsc_class *cl = (struct hfsc_class *)arg;
1218  
1219  	/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
1220  	 * needs to be called explicitly to remove a class from vttree.
1221  	 */
1222  	update_vf(cl, 0, 0);
1223  	if (cl->cl_flags & HFSC_RSC)
1224  		eltree_remove(cl);
1225  }
1226  
1227  static unsigned long
hfsc_search_class(struct Qdisc * sch,u32 classid)1228  hfsc_search_class(struct Qdisc *sch, u32 classid)
1229  {
1230  	return (unsigned long)hfsc_find_class(classid, sch);
1231  }
1232  
1233  static unsigned long
hfsc_bind_tcf(struct Qdisc * sch,unsigned long parent,u32 classid)1234  hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1235  {
1236  	struct hfsc_class *p = (struct hfsc_class *)parent;
1237  	struct hfsc_class *cl = hfsc_find_class(classid, sch);
1238  
1239  	if (cl != NULL) {
1240  		if (p != NULL && p->level <= cl->level)
1241  			return 0;
1242  		qdisc_class_get(&cl->cl_common);
1243  	}
1244  
1245  	return (unsigned long)cl;
1246  }
1247  
1248  static void
hfsc_unbind_tcf(struct Qdisc * sch,unsigned long arg)1249  hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1250  {
1251  	struct hfsc_class *cl = (struct hfsc_class *)arg;
1252  
1253  	qdisc_class_put(&cl->cl_common);
1254  }
1255  
hfsc_tcf_block(struct Qdisc * sch,unsigned long arg,struct netlink_ext_ack * extack)1256  static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
1257  					struct netlink_ext_ack *extack)
1258  {
1259  	struct hfsc_sched *q = qdisc_priv(sch);
1260  	struct hfsc_class *cl = (struct hfsc_class *)arg;
1261  
1262  	if (cl == NULL)
1263  		cl = &q->root;
1264  
1265  	return cl->block;
1266  }
1267  
1268  static int
hfsc_dump_sc(struct sk_buff * skb,int attr,struct internal_sc * sc)1269  hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1270  {
1271  	struct tc_service_curve tsc;
1272  
1273  	tsc.m1 = sm2m(sc->sm1);
1274  	tsc.d  = dx2d(sc->dx);
1275  	tsc.m2 = sm2m(sc->sm2);
1276  	if (nla_put(skb, attr, sizeof(tsc), &tsc))
1277  		goto nla_put_failure;
1278  
1279  	return skb->len;
1280  
1281   nla_put_failure:
1282  	return -1;
1283  }
1284  
1285  static int
hfsc_dump_curves(struct sk_buff * skb,struct hfsc_class * cl)1286  hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1287  {
1288  	if ((cl->cl_flags & HFSC_RSC) &&
1289  	    (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1290  		goto nla_put_failure;
1291  
1292  	if ((cl->cl_flags & HFSC_FSC) &&
1293  	    (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1294  		goto nla_put_failure;
1295  
1296  	if ((cl->cl_flags & HFSC_USC) &&
1297  	    (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1298  		goto nla_put_failure;
1299  
1300  	return skb->len;
1301  
1302   nla_put_failure:
1303  	return -1;
1304  }
1305  
1306  static int
hfsc_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)1307  hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1308  		struct tcmsg *tcm)
1309  {
1310  	struct hfsc_class *cl = (struct hfsc_class *)arg;
1311  	struct nlattr *nest;
1312  
1313  	tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1314  					  TC_H_ROOT;
1315  	tcm->tcm_handle = cl->cl_common.classid;
1316  	if (cl->level == 0)
1317  		tcm->tcm_info = cl->qdisc->handle;
1318  
1319  	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1320  	if (nest == NULL)
1321  		goto nla_put_failure;
1322  	if (hfsc_dump_curves(skb, cl) < 0)
1323  		goto nla_put_failure;
1324  	return nla_nest_end(skb, nest);
1325  
1326   nla_put_failure:
1327  	nla_nest_cancel(skb, nest);
1328  	return -EMSGSIZE;
1329  }
1330  
1331  static int
hfsc_dump_class_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)1332  hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1333  	struct gnet_dump *d)
1334  {
1335  	struct hfsc_class *cl = (struct hfsc_class *)arg;
1336  	struct tc_hfsc_stats xstats;
1337  	__u32 qlen;
1338  
1339  	qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
1340  	xstats.level   = cl->level;
1341  	xstats.period  = cl->cl_vtperiod;
1342  	xstats.work    = cl->cl_total;
1343  	xstats.rtwork  = cl->cl_cumul;
1344  
1345  	if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
1346  	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1347  	    gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1348  		return -1;
1349  
1350  	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1351  }
1352  
1353  
1354  
1355  static void
hfsc_walk(struct Qdisc * sch,struct qdisc_walker * arg)1356  hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1357  {
1358  	struct hfsc_sched *q = qdisc_priv(sch);
1359  	struct hfsc_class *cl;
1360  	unsigned int i;
1361  
1362  	if (arg->stop)
1363  		return;
1364  
1365  	for (i = 0; i < q->clhash.hashsize; i++) {
1366  		hlist_for_each_entry(cl, &q->clhash.hash[i],
1367  				     cl_common.hnode) {
1368  			if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
1369  				return;
1370  		}
1371  	}
1372  }
1373  
1374  static void
hfsc_schedule_watchdog(struct Qdisc * sch)1375  hfsc_schedule_watchdog(struct Qdisc *sch)
1376  {
1377  	struct hfsc_sched *q = qdisc_priv(sch);
1378  	struct hfsc_class *cl;
1379  	u64 next_time = 0;
1380  
1381  	cl = eltree_get_minel(q);
1382  	if (cl)
1383  		next_time = cl->cl_e;
1384  	if (q->root.cl_cfmin != 0) {
1385  		if (next_time == 0 || next_time > q->root.cl_cfmin)
1386  			next_time = q->root.cl_cfmin;
1387  	}
1388  	if (next_time)
1389  		qdisc_watchdog_schedule(&q->watchdog, next_time);
1390  }
1391  
1392  static int
hfsc_init_qdisc(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1393  hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
1394  		struct netlink_ext_ack *extack)
1395  {
1396  	struct hfsc_sched *q = qdisc_priv(sch);
1397  	struct tc_hfsc_qopt *qopt;
1398  	int err;
1399  
1400  	qdisc_watchdog_init(&q->watchdog, sch);
1401  
1402  	if (!opt || nla_len(opt) < sizeof(*qopt))
1403  		return -EINVAL;
1404  	qopt = nla_data(opt);
1405  
1406  	q->defcls = qopt->defcls;
1407  	err = qdisc_class_hash_init(&q->clhash);
1408  	if (err < 0)
1409  		return err;
1410  	q->eligible = RB_ROOT;
1411  
1412  	err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
1413  	if (err)
1414  		return err;
1415  
1416  	gnet_stats_basic_sync_init(&q->root.bstats);
1417  	q->root.cl_common.classid = sch->handle;
1418  	q->root.sched   = q;
1419  	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1420  					  sch->handle, NULL);
1421  	if (q->root.qdisc == NULL)
1422  		q->root.qdisc = &noop_qdisc;
1423  	else
1424  		qdisc_hash_add(q->root.qdisc, true);
1425  	INIT_LIST_HEAD(&q->root.children);
1426  	q->root.vt_tree = RB_ROOT;
1427  	q->root.cf_tree = RB_ROOT;
1428  
1429  	qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1430  	qdisc_class_hash_grow(sch, &q->clhash);
1431  
1432  	return 0;
1433  }
1434  
1435  static int
hfsc_change_qdisc(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1436  hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
1437  		  struct netlink_ext_ack *extack)
1438  {
1439  	struct hfsc_sched *q = qdisc_priv(sch);
1440  	struct tc_hfsc_qopt *qopt;
1441  
1442  	if (nla_len(opt) < sizeof(*qopt))
1443  		return -EINVAL;
1444  	qopt = nla_data(opt);
1445  
1446  	sch_tree_lock(sch);
1447  	q->defcls = qopt->defcls;
1448  	sch_tree_unlock(sch);
1449  
1450  	return 0;
1451  }
1452  
1453  static void
hfsc_reset_class(struct hfsc_class * cl)1454  hfsc_reset_class(struct hfsc_class *cl)
1455  {
1456  	cl->cl_total        = 0;
1457  	cl->cl_cumul        = 0;
1458  	cl->cl_d            = 0;
1459  	cl->cl_e            = 0;
1460  	cl->cl_vt           = 0;
1461  	cl->cl_vtadj        = 0;
1462  	cl->cl_cvtmin       = 0;
1463  	cl->cl_cvtoff       = 0;
1464  	cl->cl_vtperiod     = 0;
1465  	cl->cl_parentperiod = 0;
1466  	cl->cl_f            = 0;
1467  	cl->cl_myf          = 0;
1468  	cl->cl_cfmin        = 0;
1469  	cl->cl_nactive      = 0;
1470  
1471  	cl->vt_tree = RB_ROOT;
1472  	cl->cf_tree = RB_ROOT;
1473  	qdisc_reset(cl->qdisc);
1474  
1475  	if (cl->cl_flags & HFSC_RSC)
1476  		rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1477  	if (cl->cl_flags & HFSC_FSC)
1478  		rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1479  	if (cl->cl_flags & HFSC_USC)
1480  		rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1481  }
1482  
1483  static void
hfsc_reset_qdisc(struct Qdisc * sch)1484  hfsc_reset_qdisc(struct Qdisc *sch)
1485  {
1486  	struct hfsc_sched *q = qdisc_priv(sch);
1487  	struct hfsc_class *cl;
1488  	unsigned int i;
1489  
1490  	for (i = 0; i < q->clhash.hashsize; i++) {
1491  		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1492  			hfsc_reset_class(cl);
1493  	}
1494  	q->eligible = RB_ROOT;
1495  	qdisc_watchdog_cancel(&q->watchdog);
1496  }
1497  
1498  static void
hfsc_destroy_qdisc(struct Qdisc * sch)1499  hfsc_destroy_qdisc(struct Qdisc *sch)
1500  {
1501  	struct hfsc_sched *q = qdisc_priv(sch);
1502  	struct hlist_node *next;
1503  	struct hfsc_class *cl;
1504  	unsigned int i;
1505  
1506  	for (i = 0; i < q->clhash.hashsize; i++) {
1507  		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
1508  			tcf_block_put(cl->block);
1509  			cl->block = NULL;
1510  		}
1511  	}
1512  	for (i = 0; i < q->clhash.hashsize; i++) {
1513  		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1514  					  cl_common.hnode)
1515  			hfsc_destroy_class(sch, cl);
1516  	}
1517  	qdisc_class_hash_destroy(&q->clhash);
1518  	qdisc_watchdog_cancel(&q->watchdog);
1519  }
1520  
1521  static int
hfsc_dump_qdisc(struct Qdisc * sch,struct sk_buff * skb)1522  hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1523  {
1524  	struct hfsc_sched *q = qdisc_priv(sch);
1525  	unsigned char *b = skb_tail_pointer(skb);
1526  	struct tc_hfsc_qopt qopt;
1527  
1528  	qopt.defcls = q->defcls;
1529  	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1530  		goto nla_put_failure;
1531  	return skb->len;
1532  
1533   nla_put_failure:
1534  	nlmsg_trim(skb, b);
1535  	return -1;
1536  }
1537  
1538  static int
hfsc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1539  hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1540  {
1541  	unsigned int len = qdisc_pkt_len(skb);
1542  	struct hfsc_class *cl;
1543  	int err;
1544  	bool first;
1545  
1546  	cl = hfsc_classify(skb, sch, &err);
1547  	if (cl == NULL) {
1548  		if (err & __NET_XMIT_BYPASS)
1549  			qdisc_qstats_drop(sch);
1550  		__qdisc_drop(skb, to_free);
1551  		return err;
1552  	}
1553  
1554  	first = !cl->qdisc->q.qlen;
1555  	err = qdisc_enqueue(skb, cl->qdisc, to_free);
1556  	if (unlikely(err != NET_XMIT_SUCCESS)) {
1557  		if (net_xmit_drop_count(err)) {
1558  			cl->qstats.drops++;
1559  			qdisc_qstats_drop(sch);
1560  		}
1561  		return err;
1562  	}
1563  
1564  	if (first) {
1565  		if (cl->cl_flags & HFSC_RSC)
1566  			init_ed(cl, len);
1567  		if (cl->cl_flags & HFSC_FSC)
1568  			init_vf(cl, len);
1569  		/*
1570  		 * If this is the first packet, isolate the head so an eventual
1571  		 * head drop before the first dequeue operation has no chance
1572  		 * to invalidate the deadline.
1573  		 */
1574  		if (cl->cl_flags & HFSC_RSC)
1575  			cl->qdisc->ops->peek(cl->qdisc);
1576  
1577  	}
1578  
1579  	sch->qstats.backlog += len;
1580  	sch->q.qlen++;
1581  
1582  	return NET_XMIT_SUCCESS;
1583  }
1584  
1585  static struct sk_buff *
hfsc_dequeue(struct Qdisc * sch)1586  hfsc_dequeue(struct Qdisc *sch)
1587  {
1588  	struct hfsc_sched *q = qdisc_priv(sch);
1589  	struct hfsc_class *cl;
1590  	struct sk_buff *skb;
1591  	u64 cur_time;
1592  	unsigned int next_len;
1593  	int realtime = 0;
1594  
1595  	if (sch->q.qlen == 0)
1596  		return NULL;
1597  
1598  	cur_time = psched_get_time();
1599  
1600  	/*
1601  	 * if there are eligible classes, use real-time criteria.
1602  	 * find the class with the minimum deadline among
1603  	 * the eligible classes.
1604  	 */
1605  	cl = eltree_get_mindl(q, cur_time);
1606  	if (cl) {
1607  		realtime = 1;
1608  	} else {
1609  		/*
1610  		 * use link-sharing criteria
1611  		 * get the class with the minimum vt in the hierarchy
1612  		 */
1613  		cl = vttree_get_minvt(&q->root, cur_time);
1614  		if (cl == NULL) {
1615  			qdisc_qstats_overlimit(sch);
1616  			hfsc_schedule_watchdog(sch);
1617  			return NULL;
1618  		}
1619  	}
1620  
1621  	skb = qdisc_dequeue_peeked(cl->qdisc);
1622  	if (skb == NULL) {
1623  		qdisc_warn_nonwc("HFSC", cl->qdisc);
1624  		return NULL;
1625  	}
1626  
1627  	bstats_update(&cl->bstats, skb);
1628  	update_vf(cl, qdisc_pkt_len(skb), cur_time);
1629  	if (realtime)
1630  		cl->cl_cumul += qdisc_pkt_len(skb);
1631  
1632  	if (cl->cl_flags & HFSC_RSC) {
1633  		if (cl->qdisc->q.qlen != 0) {
1634  			/* update ed */
1635  			next_len = qdisc_peek_len(cl->qdisc);
1636  			if (realtime)
1637  				update_ed(cl, next_len);
1638  			else
1639  				update_d(cl, next_len);
1640  		} else {
1641  			/* the class becomes passive */
1642  			eltree_remove(cl);
1643  		}
1644  	}
1645  
1646  	qdisc_bstats_update(sch, skb);
1647  	qdisc_qstats_backlog_dec(sch, skb);
1648  	sch->q.qlen--;
1649  
1650  	return skb;
1651  }
1652  
1653  static const struct Qdisc_class_ops hfsc_class_ops = {
1654  	.change		= hfsc_change_class,
1655  	.delete		= hfsc_delete_class,
1656  	.graft		= hfsc_graft_class,
1657  	.leaf		= hfsc_class_leaf,
1658  	.qlen_notify	= hfsc_qlen_notify,
1659  	.find		= hfsc_search_class,
1660  	.bind_tcf	= hfsc_bind_tcf,
1661  	.unbind_tcf	= hfsc_unbind_tcf,
1662  	.tcf_block	= hfsc_tcf_block,
1663  	.dump		= hfsc_dump_class,
1664  	.dump_stats	= hfsc_dump_class_stats,
1665  	.walk		= hfsc_walk
1666  };
1667  
1668  static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1669  	.id		= "hfsc",
1670  	.init		= hfsc_init_qdisc,
1671  	.change		= hfsc_change_qdisc,
1672  	.reset		= hfsc_reset_qdisc,
1673  	.destroy	= hfsc_destroy_qdisc,
1674  	.dump		= hfsc_dump_qdisc,
1675  	.enqueue	= hfsc_enqueue,
1676  	.dequeue	= hfsc_dequeue,
1677  	.peek		= qdisc_peek_dequeued,
1678  	.cl_ops		= &hfsc_class_ops,
1679  	.priv_size	= sizeof(struct hfsc_sched),
1680  	.owner		= THIS_MODULE
1681  };
1682  
1683  static int __init
hfsc_init(void)1684  hfsc_init(void)
1685  {
1686  	return register_qdisc(&hfsc_qdisc_ops);
1687  }
1688  
1689  static void __exit
hfsc_cleanup(void)1690  hfsc_cleanup(void)
1691  {
1692  	unregister_qdisc(&hfsc_qdisc_ops);
1693  }
1694  
1695  MODULE_LICENSE("GPL");
1696  module_init(hfsc_init);
1697  module_exit(hfsc_cleanup);
1698