xref: /openbmc/linux/include/net/red.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2a7834745SThomas Graf #ifndef __NET_SCHED_RED_H
3a7834745SThomas Graf #define __NET_SCHED_RED_H
4a7834745SThomas Graf 
5a7834745SThomas Graf #include <linux/types.h>
6187f1882SPaul Gortmaker #include <linux/bug.h>
7a7834745SThomas Graf #include <net/pkt_sched.h>
8a7834745SThomas Graf #include <net/inet_ecn.h>
9a7834745SThomas Graf #include <net/dsfield.h>
108af2a218SEric Dumazet #include <linux/reciprocal_div.h>
11a7834745SThomas Graf 
12a7834745SThomas Graf /*	Random Early Detection (RED) algorithm.
13a7834745SThomas Graf 	=======================================
14a7834745SThomas Graf 
15a7834745SThomas Graf 	Source: Sally Floyd and Van Jacobson, "Random Early Detection Gateways
16a7834745SThomas Graf 	for Congestion Avoidance", 1993, IEEE/ACM Transactions on Networking.
17a7834745SThomas Graf 
18a7834745SThomas Graf 	This file codes a "divisionless" version of RED algorithm
19a7834745SThomas Graf 	as written down in Fig.17 of the paper.
20a7834745SThomas Graf 
21a7834745SThomas Graf 	Short description.
22a7834745SThomas Graf 	------------------
23a7834745SThomas Graf 
24a7834745SThomas Graf 	When a new packet arrives we calculate the average queue length:
25a7834745SThomas Graf 
26a7834745SThomas Graf 	avg = (1-W)*avg + W*current_queue_len,
27a7834745SThomas Graf 
28a7834745SThomas Graf 	W is the filter time constant (chosen as 2^(-Wlog)), it controls
29a7834745SThomas Graf 	the inertia of the algorithm. To allow larger bursts, W should be
30a7834745SThomas Graf 	decreased.
31a7834745SThomas Graf 
32a7834745SThomas Graf 	if (avg > th_max) -> packet marked (dropped).
33a7834745SThomas Graf 	if (avg < th_min) -> packet passes.
34a7834745SThomas Graf 	if (th_min < avg < th_max) we calculate probability:
35a7834745SThomas Graf 
36a7834745SThomas Graf 	Pb = max_P * (avg - th_min)/(th_max-th_min)
37a7834745SThomas Graf 
38a7834745SThomas Graf 	and mark (drop) packet with this probability.
39a7834745SThomas Graf 	Pb changes from 0 (at avg==th_min) to max_P (avg==th_max).
40a7834745SThomas Graf 	max_P should be small (not 1), usually 0.01..0.02 is good value.
41a7834745SThomas Graf 
42a7834745SThomas Graf 	max_P is chosen as a number, so that max_P/(th_max-th_min)
43a7834745SThomas Graf 	is a negative power of two in order arithmetics to contain
44a7834745SThomas Graf 	only shifts.
45a7834745SThomas Graf 
46a7834745SThomas Graf 
47a7834745SThomas Graf 	Parameters, settable by user:
48a7834745SThomas Graf 	-----------------------------
49a7834745SThomas Graf 
50a7834745SThomas Graf 	qth_min		- bytes (should be < qth_max/2)
51a7834745SThomas Graf 	qth_max		- bytes (should be at least 2*qth_min and less limit)
52a7834745SThomas Graf 	Wlog	       	- bits (<32) log(1/W).
53a7834745SThomas Graf 	Plog	       	- bits (<32)
54a7834745SThomas Graf 
55a7834745SThomas Graf 	Plog is related to max_P by formula:
56a7834745SThomas Graf 
57a7834745SThomas Graf 	max_P = (qth_max-qth_min)/2^Plog;
58a7834745SThomas Graf 
59a7834745SThomas Graf 	F.e. if qth_max=128K and qth_min=32K, then Plog=22
60a7834745SThomas Graf 	corresponds to max_P=0.02
61a7834745SThomas Graf 
62a7834745SThomas Graf 	Scell_log
63a7834745SThomas Graf 	Stab
64a7834745SThomas Graf 
65a7834745SThomas Graf 	Lookup table for log((1-W)^(t/t_ave).
66a7834745SThomas Graf 
67a7834745SThomas Graf 
68a7834745SThomas Graf 	NOTES:
69a7834745SThomas Graf 
70a7834745SThomas Graf 	Upper bound on W.
71a7834745SThomas Graf 	-----------------
72a7834745SThomas Graf 
73a7834745SThomas Graf 	If you want to allow bursts of L packets of size S,
74a7834745SThomas Graf 	you should choose W:
75a7834745SThomas Graf 
76a7834745SThomas Graf 	L + 1 - th_min/S < (1-(1-W)^L)/W
77a7834745SThomas Graf 
78a7834745SThomas Graf 	th_min/S = 32         th_min/S = 4
79a7834745SThomas Graf 
80a7834745SThomas Graf 	log(W)	L
81a7834745SThomas Graf 	-1	33
82a7834745SThomas Graf 	-2	35
83a7834745SThomas Graf 	-3	39
84a7834745SThomas Graf 	-4	46
85a7834745SThomas Graf 	-5	57
86a7834745SThomas Graf 	-6	75
87a7834745SThomas Graf 	-7	101
88a7834745SThomas Graf 	-8	135
89a7834745SThomas Graf 	-9	190
90a7834745SThomas Graf 	etc.
91a7834745SThomas Graf  */
92a7834745SThomas Graf 
938af2a218SEric Dumazet /*
948af2a218SEric Dumazet  * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
958af2a218SEric Dumazet  * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
968af2a218SEric Dumazet  *
978af2a218SEric Dumazet  * Every 500 ms:
988af2a218SEric Dumazet  *  if (avg > target and max_p <= 0.5)
998af2a218SEric Dumazet  *   increase max_p : max_p += alpha;
1008af2a218SEric Dumazet  *  else if (avg < target and max_p >= 0.01)
1018af2a218SEric Dumazet  *   decrease max_p : max_p *= beta;
1028af2a218SEric Dumazet  *
1038af2a218SEric Dumazet  * target :[qth_min + 0.4*(qth_min - qth_max),
1048af2a218SEric Dumazet  *          qth_min + 0.6*(qth_min - qth_max)].
1058af2a218SEric Dumazet  * alpha : min(0.01, max_p / 4)
1068af2a218SEric Dumazet  * beta : 0.9
1078af2a218SEric Dumazet  * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
1088af2a218SEric Dumazet  * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
1098af2a218SEric Dumazet  */
1108af2a218SEric Dumazet #define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
1118af2a218SEric Dumazet 
1128af2a218SEric Dumazet #define MAX_P_MIN (1 * RED_ONE_PERCENT)
1138af2a218SEric Dumazet #define MAX_P_MAX (50 * RED_ONE_PERCENT)
1148af2a218SEric Dumazet #define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
1158af2a218SEric Dumazet 
116a7834745SThomas Graf #define RED_STAB_SIZE	256
117a7834745SThomas Graf #define RED_STAB_MASK	(RED_STAB_SIZE - 1)
118a7834745SThomas Graf 
119fd2c3ef7SEric Dumazet struct red_stats {
120a7834745SThomas Graf 	u32		prob_drop;	/* Early probability drops */
121a7834745SThomas Graf 	u32		prob_mark;	/* Early probability marks */
122a7834745SThomas Graf 	u32		forced_drop;	/* Forced drops, qavg > max_thresh */
123a7834745SThomas Graf 	u32		forced_mark;	/* Forced marks, qavg > max_thresh */
124a7834745SThomas Graf 	u32		pdrop;          /* Drops due to queue limits */
125a7834745SThomas Graf };
126a7834745SThomas Graf 
127fd2c3ef7SEric Dumazet struct red_parms {
128a7834745SThomas Graf 	/* Parameters */
1298af2a218SEric Dumazet 	u32		qth_min;	/* Min avg length threshold: Wlog scaled */
1308af2a218SEric Dumazet 	u32		qth_max;	/* Max avg length threshold: Wlog scaled */
131a7834745SThomas Graf 	u32		Scell_max;
1328af2a218SEric Dumazet 	u32		max_P;		/* probability, [0 .. 1.0] 32 scaled */
133809fa972SHannes Frederic Sowa 	/* reciprocal_value(max_P / qth_delta) */
134809fa972SHannes Frederic Sowa 	struct reciprocal_value	max_P_reciprocal;
1358af2a218SEric Dumazet 	u32		qth_delta;	/* max_th - min_th */
1368af2a218SEric Dumazet 	u32		target_min;	/* min_th + 0.4*(max_th - min_th) */
1378af2a218SEric Dumazet 	u32		target_max;	/* min_th + 0.6*(max_th - min_th) */
138a7834745SThomas Graf 	u8		Scell_log;
139a7834745SThomas Graf 	u8		Wlog;		/* log(W)		*/
140a7834745SThomas Graf 	u8		Plog;		/* random number bits	*/
141a7834745SThomas Graf 	u8		Stab[RED_STAB_SIZE];
142eeca6688SEric Dumazet };
143a7834745SThomas Graf 
144eeca6688SEric Dumazet struct red_vars {
145a7834745SThomas Graf 	/* Variables */
146a7834745SThomas Graf 	int		qcount;		/* Number of packets since last random
147a7834745SThomas Graf 					   number generation */
148a7834745SThomas Graf 	u32		qR;		/* Cached random number */
149a7834745SThomas Graf 
1508af2a218SEric Dumazet 	unsigned long	qavg;		/* Average queue length: Wlog scaled */
151ea6a5d3bSEric Dumazet 	ktime_t		qidlestart;	/* Start of current idle period */
152a7834745SThomas Graf };
153a7834745SThomas Graf 
red_maxp(u8 Plog)1548af2a218SEric Dumazet static inline u32 red_maxp(u8 Plog)
155a7834745SThomas Graf {
1568af2a218SEric Dumazet 	return Plog < 32 ? (~0U >> Plog) : ~0U;
157a7834745SThomas Graf }
158a7834745SThomas Graf 
red_set_vars(struct red_vars * v)159eeca6688SEric Dumazet static inline void red_set_vars(struct red_vars *v)
160eeca6688SEric Dumazet {
161eeca6688SEric Dumazet 	/* Reset average queue length, the value is strictly bound
162eeca6688SEric Dumazet 	 * to the parameters below, reseting hurts a bit but leaving
163eeca6688SEric Dumazet 	 * it might result in an unreasonable qavg for a while. --TGR
164eeca6688SEric Dumazet 	 */
165eeca6688SEric Dumazet 	v->qavg		= 0;
166eeca6688SEric Dumazet 
167eeca6688SEric Dumazet 	v->qcount	= -1;
168eeca6688SEric Dumazet }
1698af2a218SEric Dumazet 
red_check_params(u32 qth_min,u32 qth_max,u8 Wlog,u8 Scell_log,u8 * stab)170e323d865SEric Dumazet static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
171e323d865SEric Dumazet 				    u8 Scell_log, u8 *stab)
1728afa10cbSNogah Frankel {
1733a87571fSEric Dumazet 	if (fls(qth_min) + Wlog >= 32)
1748afa10cbSNogah Frankel 		return false;
1753a87571fSEric Dumazet 	if (fls(qth_max) + Wlog >= 32)
1768afa10cbSNogah Frankel 		return false;
177bd1248f1SRandy Dunlap 	if (Scell_log >= 32)
178bd1248f1SRandy Dunlap 		return false;
1798afa10cbSNogah Frankel 	if (qth_max < qth_min)
1808afa10cbSNogah Frankel 		return false;
181e323d865SEric Dumazet 	if (stab) {
182e323d865SEric Dumazet 		int i;
183e323d865SEric Dumazet 
184e323d865SEric Dumazet 		for (i = 0; i < RED_STAB_SIZE; i++)
185e323d865SEric Dumazet 			if (stab[i] >= 32)
186e323d865SEric Dumazet 				return false;
187e323d865SEric Dumazet 	}
1888afa10cbSNogah Frankel 	return true;
1898afa10cbSNogah Frankel }
1908afa10cbSNogah Frankel 
red_get_flags(unsigned char qopt_flags,unsigned char historic_mask,struct nlattr * flags_attr,unsigned char supported_mask,struct nla_bitfield32 * p_flags,unsigned char * p_userbits,struct netlink_ext_ack * extack)19114bc175dSPetr Machata static inline int red_get_flags(unsigned char qopt_flags,
19214bc175dSPetr Machata 				unsigned char historic_mask,
19314bc175dSPetr Machata 				struct nlattr *flags_attr,
19414bc175dSPetr Machata 				unsigned char supported_mask,
19514bc175dSPetr Machata 				struct nla_bitfield32 *p_flags,
19614bc175dSPetr Machata 				unsigned char *p_userbits,
19714bc175dSPetr Machata 				struct netlink_ext_ack *extack)
19814bc175dSPetr Machata {
19914bc175dSPetr Machata 	struct nla_bitfield32 flags;
20014bc175dSPetr Machata 
20114bc175dSPetr Machata 	if (qopt_flags && flags_attr) {
20214bc175dSPetr Machata 		NL_SET_ERR_MSG_MOD(extack, "flags should be passed either through qopt, or through a dedicated attribute");
20314bc175dSPetr Machata 		return -EINVAL;
20414bc175dSPetr Machata 	}
20514bc175dSPetr Machata 
20614bc175dSPetr Machata 	if (flags_attr) {
20714bc175dSPetr Machata 		flags = nla_get_bitfield32(flags_attr);
20814bc175dSPetr Machata 	} else {
20914bc175dSPetr Machata 		flags.selector = historic_mask;
21014bc175dSPetr Machata 		flags.value = qopt_flags & historic_mask;
21114bc175dSPetr Machata 	}
21214bc175dSPetr Machata 
21314bc175dSPetr Machata 	*p_flags = flags;
21414bc175dSPetr Machata 	*p_userbits = qopt_flags & ~historic_mask;
21514bc175dSPetr Machata 	return 0;
21614bc175dSPetr Machata }
21714bc175dSPetr Machata 
red_validate_flags(unsigned char flags,struct netlink_ext_ack * extack)21814bc175dSPetr Machata static inline int red_validate_flags(unsigned char flags,
21914bc175dSPetr Machata 				     struct netlink_ext_ack *extack)
22014bc175dSPetr Machata {
2210a7fad23SPetr Machata 	if ((flags & TC_RED_NODROP) && !(flags & TC_RED_ECN)) {
2220a7fad23SPetr Machata 		NL_SET_ERR_MSG_MOD(extack, "nodrop mode is only meaningful with ECN");
2230a7fad23SPetr Machata 		return -EINVAL;
2240a7fad23SPetr Machata 	}
2250a7fad23SPetr Machata 
22614bc175dSPetr Machata 	return 0;
22714bc175dSPetr Machata }
22814bc175dSPetr Machata 
red_set_parms(struct red_parms * p,u32 qth_min,u32 qth_max,u8 Wlog,u8 Plog,u8 Scell_log,u8 * stab,u32 max_P)229a7834745SThomas Graf static inline void red_set_parms(struct red_parms *p,
230a7834745SThomas Graf 				 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
231a73ed26bSEric Dumazet 				 u8 Scell_log, u8 *stab, u32 max_P)
232a7834745SThomas Graf {
2338af2a218SEric Dumazet 	int delta = qth_max - qth_min;
234a73ed26bSEric Dumazet 	u32 max_p_delta;
2358af2a218SEric Dumazet 
236a7834745SThomas Graf 	p->qth_min	= qth_min << Wlog;
237a7834745SThomas Graf 	p->qth_max	= qth_max << Wlog;
238a7834745SThomas Graf 	p->Wlog		= Wlog;
239a7834745SThomas Graf 	p->Plog		= Plog;
2405c472203SNogah Frankel 	if (delta <= 0)
2418af2a218SEric Dumazet 		delta = 1;
2428af2a218SEric Dumazet 	p->qth_delta	= delta;
243a73ed26bSEric Dumazet 	if (!max_P) {
244a73ed26bSEric Dumazet 		max_P = red_maxp(Plog);
245a73ed26bSEric Dumazet 		max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
246a73ed26bSEric Dumazet 	}
247a73ed26bSEric Dumazet 	p->max_P = max_P;
248a73ed26bSEric Dumazet 	max_p_delta = max_P / delta;
249a73ed26bSEric Dumazet 	max_p_delta = max(max_p_delta, 1U);
250a73ed26bSEric Dumazet 	p->max_P_reciprocal  = reciprocal_value(max_p_delta);
2518af2a218SEric Dumazet 
2528af2a218SEric Dumazet 	/* RED Adaptative target :
2538af2a218SEric Dumazet 	 * [min_th + 0.4*(min_th - max_th),
2548af2a218SEric Dumazet 	 *  min_th + 0.6*(min_th - max_th)].
2558af2a218SEric Dumazet 	 */
2568af2a218SEric Dumazet 	delta /= 5;
2578af2a218SEric Dumazet 	p->target_min = qth_min + 2*delta;
2588af2a218SEric Dumazet 	p->target_max = qth_min + 3*delta;
2598af2a218SEric Dumazet 
260a7834745SThomas Graf 	p->Scell_log	= Scell_log;
261a7834745SThomas Graf 	p->Scell_max	= (255 << Scell_log);
262a7834745SThomas Graf 
263ddecf0f4SEric Dumazet 	if (stab)
264a7834745SThomas Graf 		memcpy(p->Stab, stab, sizeof(p->Stab));
265a7834745SThomas Graf }
266a7834745SThomas Graf 
red_is_idling(const struct red_vars * v)267eeca6688SEric Dumazet static inline int red_is_idling(const struct red_vars *v)
268a7834745SThomas Graf {
2692456e855SThomas Gleixner 	return v->qidlestart != 0;
270a7834745SThomas Graf }
271a7834745SThomas Graf 
red_start_of_idle_period(struct red_vars * v)272eeca6688SEric Dumazet static inline void red_start_of_idle_period(struct red_vars *v)
273a7834745SThomas Graf {
274eeca6688SEric Dumazet 	v->qidlestart = ktime_get();
275a7834745SThomas Graf }
276a7834745SThomas Graf 
red_end_of_idle_period(struct red_vars * v)277eeca6688SEric Dumazet static inline void red_end_of_idle_period(struct red_vars *v)
278a7834745SThomas Graf {
2792456e855SThomas Gleixner 	v->qidlestart = 0;
280a7834745SThomas Graf }
281a7834745SThomas Graf 
red_restart(struct red_vars * v)282eeca6688SEric Dumazet static inline void red_restart(struct red_vars *v)
283a7834745SThomas Graf {
284eeca6688SEric Dumazet 	red_end_of_idle_period(v);
285eeca6688SEric Dumazet 	v->qavg = 0;
286eeca6688SEric Dumazet 	v->qcount = -1;
287a7834745SThomas Graf }
288a7834745SThomas Graf 
red_calc_qavg_from_idle_time(const struct red_parms * p,const struct red_vars * v)289eeca6688SEric Dumazet static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p,
290eeca6688SEric Dumazet 							 const struct red_vars *v)
291a7834745SThomas Graf {
292eeca6688SEric Dumazet 	s64 delta = ktime_us_delta(ktime_get(), v->qidlestart);
293ea6a5d3bSEric Dumazet 	long us_idle = min_t(s64, delta, p->Scell_max);
294a7834745SThomas Graf 	int  shift;
295a7834745SThomas Graf 
296a7834745SThomas Graf 	/*
2978a2dc6afSBhaskar Chowdhury 	 * The problem: ideally, average length queue recalculation should
298a7834745SThomas Graf 	 * be done over constant clock intervals. This is too expensive, so
299a7834745SThomas Graf 	 * that the calculation is driven by outgoing packets.
300a7834745SThomas Graf 	 * When the queue is idle we have to model this clock by hand.
301a7834745SThomas Graf 	 *
302a7834745SThomas Graf 	 * SF+VJ proposed to "generate":
303a7834745SThomas Graf 	 *
304a7834745SThomas Graf 	 *	m = idletime / (average_pkt_size / bandwidth)
305a7834745SThomas Graf 	 *
306a7834745SThomas Graf 	 * dummy packets as a burst after idle time, i.e.
307a7834745SThomas Graf 	 *
3084362aaf6SDavid Ward 	 * 	v->qavg *= (1-W)^m
309a7834745SThomas Graf 	 *
310a7834745SThomas Graf 	 * This is an apparently overcomplicated solution (f.e. we have to
311a7834745SThomas Graf 	 * precompute a table to make this calculation in reasonable time)
312a7834745SThomas Graf 	 * I believe that a simpler model may be used here,
313a7834745SThomas Graf 	 * but it is field for experiments.
314a7834745SThomas Graf 	 */
315a7834745SThomas Graf 
316a7834745SThomas Graf 	shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
317a7834745SThomas Graf 
318a7834745SThomas Graf 	if (shift)
319eeca6688SEric Dumazet 		return v->qavg >> shift;
320a7834745SThomas Graf 	else {
321a7834745SThomas Graf 		/* Approximate initial part of exponent with linear function:
322a7834745SThomas Graf 		 *
323a7834745SThomas Graf 		 * 	(1-W)^m ~= 1-mW + ...
324a7834745SThomas Graf 		 *
325a7834745SThomas Graf 		 * Seems, it is the best solution to
326a7834745SThomas Graf 		 * problem of too coarse exponent tabulation.
327a7834745SThomas Graf 		 */
328eeca6688SEric Dumazet 		us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log;
329a7834745SThomas Graf 
330eeca6688SEric Dumazet 		if (us_idle < (v->qavg >> 1))
331eeca6688SEric Dumazet 			return v->qavg - us_idle;
332a7834745SThomas Graf 		else
333eeca6688SEric Dumazet 			return v->qavg >> 1;
334a7834745SThomas Graf 	}
335a7834745SThomas Graf }
336a7834745SThomas Graf 
red_calc_qavg_no_idle_time(const struct red_parms * p,const struct red_vars * v,unsigned int backlog)3378af2a218SEric Dumazet static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
338eeca6688SEric Dumazet 						       const struct red_vars *v,
339a7834745SThomas Graf 						       unsigned int backlog)
340a7834745SThomas Graf {
341a7834745SThomas Graf 	/*
3424362aaf6SDavid Ward 	 * NOTE: v->qavg is fixed point number with point at Wlog.
343a7834745SThomas Graf 	 * The formula below is equvalent to floating point
344a7834745SThomas Graf 	 * version:
345a7834745SThomas Graf 	 *
346a7834745SThomas Graf 	 * 	qavg = qavg*(1-W) + backlog*W;
347a7834745SThomas Graf 	 *
348a7834745SThomas Graf 	 * --ANK (980924)
349a7834745SThomas Graf 	 */
350eeca6688SEric Dumazet 	return v->qavg + (backlog - (v->qavg >> p->Wlog));
351a7834745SThomas Graf }
352a7834745SThomas Graf 
red_calc_qavg(const struct red_parms * p,const struct red_vars * v,unsigned int backlog)3538af2a218SEric Dumazet static inline unsigned long red_calc_qavg(const struct red_parms *p,
354eeca6688SEric Dumazet 					  const struct red_vars *v,
355a7834745SThomas Graf 					  unsigned int backlog)
356a7834745SThomas Graf {
357eeca6688SEric Dumazet 	if (!red_is_idling(v))
358eeca6688SEric Dumazet 		return red_calc_qavg_no_idle_time(p, v, backlog);
359a7834745SThomas Graf 	else
360eeca6688SEric Dumazet 		return red_calc_qavg_from_idle_time(p, v);
361a7834745SThomas Graf }
362a7834745SThomas Graf 
3638af2a218SEric Dumazet 
red_random(const struct red_parms * p)3648af2a218SEric Dumazet static inline u32 red_random(const struct red_parms *p)
365a7834745SThomas Graf {
366*a251c17aSJason A. Donenfeld 	return reciprocal_divide(get_random_u32(), p->max_P_reciprocal);
367a7834745SThomas Graf }
368a7834745SThomas Graf 
red_mark_probability(const struct red_parms * p,const struct red_vars * v,unsigned long qavg)369eeca6688SEric Dumazet static inline int red_mark_probability(const struct red_parms *p,
370eeca6688SEric Dumazet 				       const struct red_vars *v,
371eeca6688SEric Dumazet 				       unsigned long qavg)
372a7834745SThomas Graf {
373a7834745SThomas Graf 	/* The formula used below causes questions.
374a7834745SThomas Graf 
3758af2a218SEric Dumazet 	   OK. qR is random number in the interval
3768af2a218SEric Dumazet 		(0..1/max_P)*(qth_max-qth_min)
377a7834745SThomas Graf 	   i.e. 0..(2^Plog). If we used floating point
378a7834745SThomas Graf 	   arithmetics, it would be: (2^Plog)*rnd_num,
379a7834745SThomas Graf 	   where rnd_num is less 1.
380a7834745SThomas Graf 
381a7834745SThomas Graf 	   Taking into account, that qavg have fixed
3828af2a218SEric Dumazet 	   point at Wlog, two lines
383a7834745SThomas Graf 	   below have the following floating point equivalent:
384a7834745SThomas Graf 
385a7834745SThomas Graf 	   max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
386a7834745SThomas Graf 
387a7834745SThomas Graf 	   Any questions? --ANK (980924)
388a7834745SThomas Graf 	 */
389eeca6688SEric Dumazet 	return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR);
390a7834745SThomas Graf }
391a7834745SThomas Graf 
392a7834745SThomas Graf enum {
393a7834745SThomas Graf 	RED_BELOW_MIN_THRESH,
394a7834745SThomas Graf 	RED_BETWEEN_TRESH,
395a7834745SThomas Graf 	RED_ABOVE_MAX_TRESH,
396a7834745SThomas Graf };
397a7834745SThomas Graf 
red_cmp_thresh(const struct red_parms * p,unsigned long qavg)398eeca6688SEric Dumazet static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg)
399a7834745SThomas Graf {
400a7834745SThomas Graf 	if (qavg < p->qth_min)
401a7834745SThomas Graf 		return RED_BELOW_MIN_THRESH;
402a7834745SThomas Graf 	else if (qavg >= p->qth_max)
403a7834745SThomas Graf 		return RED_ABOVE_MAX_TRESH;
404a7834745SThomas Graf 	else
405a7834745SThomas Graf 		return RED_BETWEEN_TRESH;
406a7834745SThomas Graf }
407a7834745SThomas Graf 
408a7834745SThomas Graf enum {
409a7834745SThomas Graf 	RED_DONT_MARK,
410a7834745SThomas Graf 	RED_PROB_MARK,
411a7834745SThomas Graf 	RED_HARD_MARK,
412a7834745SThomas Graf };
413a7834745SThomas Graf 
red_action(const struct red_parms * p,struct red_vars * v,unsigned long qavg)414eeca6688SEric Dumazet static inline int red_action(const struct red_parms *p,
415eeca6688SEric Dumazet 			     struct red_vars *v,
416eeca6688SEric Dumazet 			     unsigned long qavg)
417a7834745SThomas Graf {
418a7834745SThomas Graf 	switch (red_cmp_thresh(p, qavg)) {
419a7834745SThomas Graf 		case RED_BELOW_MIN_THRESH:
420eeca6688SEric Dumazet 			v->qcount = -1;
421a7834745SThomas Graf 			return RED_DONT_MARK;
422a7834745SThomas Graf 
423a7834745SThomas Graf 		case RED_BETWEEN_TRESH:
424eeca6688SEric Dumazet 			if (++v->qcount) {
425eeca6688SEric Dumazet 				if (red_mark_probability(p, v, qavg)) {
426eeca6688SEric Dumazet 					v->qcount = 0;
427eeca6688SEric Dumazet 					v->qR = red_random(p);
428a7834745SThomas Graf 					return RED_PROB_MARK;
429a7834745SThomas Graf 				}
430a7834745SThomas Graf 			} else
431eeca6688SEric Dumazet 				v->qR = red_random(p);
432a7834745SThomas Graf 
433a7834745SThomas Graf 			return RED_DONT_MARK;
434a7834745SThomas Graf 
435a7834745SThomas Graf 		case RED_ABOVE_MAX_TRESH:
436eeca6688SEric Dumazet 			v->qcount = -1;
437a7834745SThomas Graf 			return RED_HARD_MARK;
438a7834745SThomas Graf 	}
439a7834745SThomas Graf 
440a7834745SThomas Graf 	BUG();
441a7834745SThomas Graf 	return RED_DONT_MARK;
442a7834745SThomas Graf }
443a7834745SThomas Graf 
red_adaptative_algo(struct red_parms * p,struct red_vars * v)444eeca6688SEric Dumazet static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
4458af2a218SEric Dumazet {
4468af2a218SEric Dumazet 	unsigned long qavg;
4478af2a218SEric Dumazet 	u32 max_p_delta;
4488af2a218SEric Dumazet 
449eeca6688SEric Dumazet 	qavg = v->qavg;
450eeca6688SEric Dumazet 	if (red_is_idling(v))
451eeca6688SEric Dumazet 		qavg = red_calc_qavg_from_idle_time(p, v);
4528af2a218SEric Dumazet 
4534362aaf6SDavid Ward 	/* v->qavg is fixed point number with point at Wlog */
4548af2a218SEric Dumazet 	qavg >>= p->Wlog;
4558af2a218SEric Dumazet 
4568af2a218SEric Dumazet 	if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
4578af2a218SEric Dumazet 		p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
4588af2a218SEric Dumazet 	else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
4598af2a218SEric Dumazet 		p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
4608af2a218SEric Dumazet 
4618af2a218SEric Dumazet 	max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
462a73ed26bSEric Dumazet 	max_p_delta = max(max_p_delta, 1U);
4638af2a218SEric Dumazet 	p->max_P_reciprocal = reciprocal_value(max_p_delta);
4648af2a218SEric Dumazet }
465a7834745SThomas Graf #endif
466