xref: /openbmc/linux/include/net/codel_impl.h (revision 84e85359)
1 #ifndef __NET_SCHED_CODEL_IMPL_H
2 #define __NET_SCHED_CODEL_IMPL_H
3 
4 /*
5  * Codel - The Controlled-Delay Active Queue Management algorithm
6  *
7  *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
8  *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
9  *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions, and the following disclaimer,
17  *    without modification.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The names of the authors may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * Alternatively, provided that this notice is retained in full, this
25  * software may be distributed under the terms of the GNU General
26  * Public License ("GPL") version 2, in which case the provisions of the
27  * GPL apply INSTEAD OF those given above.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
40  * DAMAGE.
41  *
42  */
43 
44 /* Controlling Queue Delay (CoDel) algorithm
45  * =========================================
46  * Source : Kathleen Nichols and Van Jacobson
47  * http://queue.acm.org/detail.cfm?id=2209336
48  *
49  * Implemented on linux by Dave Taht and Eric Dumazet
50  */
51 
52 #include <net/inet_ecn.h>
53 
54 static void codel_params_init(struct codel_params *params)
55 {
56 	params->interval = MS2TIME(100);
57 	params->target = MS2TIME(5);
58 	params->ce_threshold = CODEL_DISABLED_THRESHOLD;
59 	params->ce_threshold_mask = 0;
60 	params->ce_threshold_selector = 0;
61 	params->ecn = false;
62 }
63 
64 static void codel_vars_init(struct codel_vars *vars)
65 {
66 	memset(vars, 0, sizeof(*vars));
67 }
68 
69 static void codel_stats_init(struct codel_stats *stats)
70 {
71 	stats->maxpacket = 0;
72 }
73 
74 /*
75  * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
76  * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
77  *
78  * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
79  */
80 static void codel_Newton_step(struct codel_vars *vars)
81 {
82 	u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
83 	u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
84 	u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
85 
86 	val >>= 2; /* avoid overflow in following multiply */
87 	val = (val * invsqrt) >> (32 - 2 + 1);
88 
89 	vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
90 }
91 
92 /*
93  * CoDel control_law is t + interval/sqrt(count)
94  * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
95  * both sqrt() and divide operation.
96  */
97 static codel_time_t codel_control_law(codel_time_t t,
98 				      codel_time_t interval,
99 				      u32 rec_inv_sqrt)
100 {
101 	return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
102 }
103 
104 static bool codel_should_drop(const struct sk_buff *skb,
105 			      void *ctx,
106 			      struct codel_vars *vars,
107 			      struct codel_params *params,
108 			      struct codel_stats *stats,
109 			      codel_skb_len_t skb_len_func,
110 			      codel_skb_time_t skb_time_func,
111 			      u32 *backlog,
112 			      codel_time_t now)
113 {
114 	bool ok_to_drop;
115 	u32 skb_len;
116 
117 	if (!skb) {
118 		vars->first_above_time = 0;
119 		return false;
120 	}
121 
122 	skb_len = skb_len_func(skb);
123 	vars->ldelay = now - skb_time_func(skb);
124 
125 	if (unlikely(skb_len > stats->maxpacket))
126 		stats->maxpacket = skb_len;
127 
128 	if (codel_time_before(vars->ldelay, params->target) ||
129 	    *backlog <= params->mtu) {
130 		/* went below - stay below for at least interval */
131 		vars->first_above_time = 0;
132 		return false;
133 	}
134 	ok_to_drop = false;
135 	if (vars->first_above_time == 0) {
136 		/* just went above from below. If we stay above
137 		 * for at least interval we'll say it's ok to drop
138 		 */
139 		vars->first_above_time = now + params->interval;
140 	} else if (codel_time_after(now, vars->first_above_time)) {
141 		ok_to_drop = true;
142 	}
143 	return ok_to_drop;
144 }
145 
146 static struct sk_buff *codel_dequeue(void *ctx,
147 				     u32 *backlog,
148 				     struct codel_params *params,
149 				     struct codel_vars *vars,
150 				     struct codel_stats *stats,
151 				     codel_skb_len_t skb_len_func,
152 				     codel_skb_time_t skb_time_func,
153 				     codel_skb_drop_t drop_func,
154 				     codel_skb_dequeue_t dequeue_func)
155 {
156 	struct sk_buff *skb = dequeue_func(vars, ctx);
157 	codel_time_t now;
158 	bool drop;
159 
160 	if (!skb) {
161 		vars->dropping = false;
162 		return skb;
163 	}
164 	now = codel_get_time();
165 	drop = codel_should_drop(skb, ctx, vars, params, stats,
166 				 skb_len_func, skb_time_func, backlog, now);
167 	if (vars->dropping) {
168 		if (!drop) {
169 			/* sojourn time below target - leave dropping state */
170 			vars->dropping = false;
171 		} else if (codel_time_after_eq(now, vars->drop_next)) {
172 			/* It's time for the next drop. Drop the current
173 			 * packet and dequeue the next. The dequeue might
174 			 * take us out of dropping state.
175 			 * If not, schedule the next drop.
176 			 * A large backlog might result in drop rates so high
177 			 * that the next drop should happen now,
178 			 * hence the while loop.
179 			 */
180 			while (vars->dropping &&
181 			       codel_time_after_eq(now, vars->drop_next)) {
182 				vars->count++; /* dont care of possible wrap
183 						* since there is no more divide
184 						*/
185 				codel_Newton_step(vars);
186 				if (params->ecn && INET_ECN_set_ce(skb)) {
187 					stats->ecn_mark++;
188 					vars->drop_next =
189 						codel_control_law(vars->drop_next,
190 								  params->interval,
191 								  vars->rec_inv_sqrt);
192 					goto end;
193 				}
194 				stats->drop_len += skb_len_func(skb);
195 				drop_func(skb, ctx);
196 				stats->drop_count++;
197 				skb = dequeue_func(vars, ctx);
198 				if (!codel_should_drop(skb, ctx,
199 						       vars, params, stats,
200 						       skb_len_func,
201 						       skb_time_func,
202 						       backlog, now)) {
203 					/* leave dropping state */
204 					vars->dropping = false;
205 				} else {
206 					/* and schedule the next drop */
207 					vars->drop_next =
208 						codel_control_law(vars->drop_next,
209 								  params->interval,
210 								  vars->rec_inv_sqrt);
211 				}
212 			}
213 		}
214 	} else if (drop) {
215 		u32 delta;
216 
217 		if (params->ecn && INET_ECN_set_ce(skb)) {
218 			stats->ecn_mark++;
219 		} else {
220 			stats->drop_len += skb_len_func(skb);
221 			drop_func(skb, ctx);
222 			stats->drop_count++;
223 
224 			skb = dequeue_func(vars, ctx);
225 			drop = codel_should_drop(skb, ctx, vars, params,
226 						 stats, skb_len_func,
227 						 skb_time_func, backlog, now);
228 		}
229 		vars->dropping = true;
230 		/* if min went above target close to when we last went below it
231 		 * assume that the drop rate that controlled the queue on the
232 		 * last cycle is a good starting point to control it now.
233 		 */
234 		delta = vars->count - vars->lastcount;
235 		if (delta > 1 &&
236 		    codel_time_before(now - vars->drop_next,
237 				      16 * params->interval)) {
238 			vars->count = delta;
239 			/* we dont care if rec_inv_sqrt approximation
240 			 * is not very precise :
241 			 * Next Newton steps will correct it quadratically.
242 			 */
243 			codel_Newton_step(vars);
244 		} else {
245 			vars->count = 1;
246 			vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
247 		}
248 		vars->lastcount = vars->count;
249 		vars->drop_next = codel_control_law(now, params->interval,
250 						    vars->rec_inv_sqrt);
251 	}
252 end:
253 	if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) {
254 		bool set_ce = true;
255 
256 		if (params->ce_threshold_mask) {
257 			int dsfield = skb_get_dsfield(skb);
258 
259 			set_ce = (dsfield >= 0 &&
260 				  (((u8)dsfield & params->ce_threshold_mask) ==
261 				   params->ce_threshold_selector));
262 		}
263 		if (set_ce && INET_ECN_set_ce(skb))
264 			stats->ce_mark++;
265 	}
266 	return skb;
267 }
268 
269 #endif
270