1 #ifndef __NET_SCHED_CODEL_H 2 #define __NET_SCHED_CODEL_H 3 4 /* 5 * Codel - The Controlled-Delay Active Queue Management algorithm 6 * 7 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> 8 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> 9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> 10 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. The names of the authors may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * Alternatively, provided that this notice is retained in full, this 25 * software may be distributed under the terms of the GNU General 26 * Public License ("GPL") version 2, in which case the provisions of the 27 * GPL apply INSTEAD OF those given above. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 40 * DAMAGE. 41 * 42 */ 43 44 #include <linux/types.h> 45 #include <linux/ktime.h> 46 #include <linux/skbuff.h> 47 #include <net/pkt_sched.h> 48 #include <net/inet_ecn.h> 49 50 /* Controlling Queue Delay (CoDel) algorithm 51 * ========================================= 52 * Source : Kathleen Nichols and Van Jacobson 53 * http://queue.acm.org/detail.cfm?id=2209336 54 * 55 * Implemented on linux by Dave Taht and Eric Dumazet 56 */ 57 58 59 /* CoDel uses a 1024 nsec clock, encoded in u32 60 * This gives a range of 2199 seconds, because of signed compares 61 */ 62 typedef u32 codel_time_t; 63 typedef s32 codel_tdiff_t; 64 #define CODEL_SHIFT 10 65 #define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT) 66 67 static inline codel_time_t codel_get_time(void) 68 { 69 u64 ns = ktime_get_ns(); 70 71 return ns >> CODEL_SHIFT; 72 } 73 74 /* Dealing with timer wrapping, according to RFC 1982, as desc in wikipedia: 75 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution 76 * codel_time_after(a,b) returns true if the time a is after time b. 77 */ 78 #define codel_time_after(a, b) \ 79 (typecheck(codel_time_t, a) && \ 80 typecheck(codel_time_t, b) && \ 81 ((s32)((a) - (b)) > 0)) 82 #define codel_time_before(a, b) codel_time_after(b, a) 83 84 #define codel_time_after_eq(a, b) \ 85 (typecheck(codel_time_t, a) && \ 86 typecheck(codel_time_t, b) && \ 87 ((s32)((a) - (b)) >= 0)) 88 #define codel_time_before_eq(a, b) codel_time_after_eq(b, a) 89 90 /* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ 91 struct codel_skb_cb { 92 codel_time_t enqueue_time; 93 }; 94 95 static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) 96 { 97 qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb)); 98 return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data; 99 } 100 101 static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb) 102 { 103 return get_codel_cb(skb)->enqueue_time; 104 } 105 106 static void codel_set_enqueue_time(struct sk_buff *skb) 107 { 108 get_codel_cb(skb)->enqueue_time = codel_get_time(); 109 } 110 111 static inline u32 codel_time_to_us(codel_time_t val) 112 { 113 u64 valns = ((u64)val << CODEL_SHIFT); 114 115 do_div(valns, NSEC_PER_USEC); 116 return (u32)valns; 117 } 118 119 /** 120 * struct codel_params - contains codel parameters 121 * @target: target queue size (in time units) 122 * @ce_threshold: threshold for marking packets with ECN CE 123 * @interval: width of moving time window 124 * @mtu: device mtu, or minimal queue backlog in bytes. 125 * @ecn: is Explicit Congestion Notification enabled 126 */ 127 struct codel_params { 128 codel_time_t target; 129 codel_time_t ce_threshold; 130 codel_time_t interval; 131 u32 mtu; 132 bool ecn; 133 }; 134 135 /** 136 * struct codel_vars - contains codel variables 137 * @count: how many drops we've done since the last time we 138 * entered dropping state 139 * @lastcount: count at entry to dropping state 140 * @dropping: set to true if in dropping state 141 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 142 * @first_above_time: when we went (or will go) continuously above target 143 * for interval 144 * @drop_next: time to drop next packet, or when we dropped last 145 * @ldelay: sojourn time of last dequeued packet 146 */ 147 struct codel_vars { 148 u32 count; 149 u32 lastcount; 150 bool dropping; 151 u16 rec_inv_sqrt; 152 codel_time_t first_above_time; 153 codel_time_t drop_next; 154 codel_time_t ldelay; 155 }; 156 157 #define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */ 158 /* needed shift to get a Q0.32 number from rec_inv_sqrt */ 159 #define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS) 160 161 /** 162 * struct codel_stats - contains codel shared variables and stats 163 * @maxpacket: largest packet we've seen so far 164 * @drop_count: temp count of dropped packets in dequeue() 165 * ecn_mark: number of packets we ECN marked instead of dropping 166 * ce_mark: number of packets CE marked because sojourn time was above ce_threshold 167 */ 168 struct codel_stats { 169 u32 maxpacket; 170 u32 drop_count; 171 u32 ecn_mark; 172 u32 ce_mark; 173 }; 174 175 #define CODEL_DISABLED_THRESHOLD INT_MAX 176 177 static void codel_params_init(struct codel_params *params, 178 const struct Qdisc *sch) 179 { 180 params->interval = MS2TIME(100); 181 params->target = MS2TIME(5); 182 params->mtu = psched_mtu(qdisc_dev(sch)); 183 params->ce_threshold = CODEL_DISABLED_THRESHOLD; 184 params->ecn = false; 185 } 186 187 static void codel_vars_init(struct codel_vars *vars) 188 { 189 memset(vars, 0, sizeof(*vars)); 190 } 191 192 static void codel_stats_init(struct codel_stats *stats) 193 { 194 stats->maxpacket = 0; 195 } 196 197 /* 198 * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots 199 * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) 200 * 201 * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 202 */ 203 static void codel_Newton_step(struct codel_vars *vars) 204 { 205 u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT; 206 u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; 207 u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2); 208 209 val >>= 2; /* avoid overflow in following multiply */ 210 val = (val * invsqrt) >> (32 - 2 + 1); 211 212 vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT; 213 } 214 215 /* 216 * CoDel control_law is t + interval/sqrt(count) 217 * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid 218 * both sqrt() and divide operation. 219 */ 220 static codel_time_t codel_control_law(codel_time_t t, 221 codel_time_t interval, 222 u32 rec_inv_sqrt) 223 { 224 return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); 225 } 226 227 static bool codel_should_drop(const struct sk_buff *skb, 228 struct Qdisc *sch, 229 struct codel_vars *vars, 230 struct codel_params *params, 231 struct codel_stats *stats, 232 codel_time_t now) 233 { 234 bool ok_to_drop; 235 236 if (!skb) { 237 vars->first_above_time = 0; 238 return false; 239 } 240 241 vars->ldelay = now - codel_get_enqueue_time(skb); 242 sch->qstats.backlog -= qdisc_pkt_len(skb); 243 244 if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) 245 stats->maxpacket = qdisc_pkt_len(skb); 246 247 if (codel_time_before(vars->ldelay, params->target) || 248 sch->qstats.backlog <= params->mtu) { 249 /* went below - stay below for at least interval */ 250 vars->first_above_time = 0; 251 return false; 252 } 253 ok_to_drop = false; 254 if (vars->first_above_time == 0) { 255 /* just went above from below. If we stay above 256 * for at least interval we'll say it's ok to drop 257 */ 258 vars->first_above_time = now + params->interval; 259 } else if (codel_time_after(now, vars->first_above_time)) { 260 ok_to_drop = true; 261 } 262 return ok_to_drop; 263 } 264 265 typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, 266 struct Qdisc *sch); 267 268 static struct sk_buff *codel_dequeue(struct Qdisc *sch, 269 struct codel_params *params, 270 struct codel_vars *vars, 271 struct codel_stats *stats, 272 codel_skb_dequeue_t dequeue_func) 273 { 274 struct sk_buff *skb = dequeue_func(vars, sch); 275 codel_time_t now; 276 bool drop; 277 278 if (!skb) { 279 vars->dropping = false; 280 return skb; 281 } 282 now = codel_get_time(); 283 drop = codel_should_drop(skb, sch, vars, params, stats, now); 284 if (vars->dropping) { 285 if (!drop) { 286 /* sojourn time below target - leave dropping state */ 287 vars->dropping = false; 288 } else if (codel_time_after_eq(now, vars->drop_next)) { 289 /* It's time for the next drop. Drop the current 290 * packet and dequeue the next. The dequeue might 291 * take us out of dropping state. 292 * If not, schedule the next drop. 293 * A large backlog might result in drop rates so high 294 * that the next drop should happen now, 295 * hence the while loop. 296 */ 297 while (vars->dropping && 298 codel_time_after_eq(now, vars->drop_next)) { 299 vars->count++; /* dont care of possible wrap 300 * since there is no more divide 301 */ 302 codel_Newton_step(vars); 303 if (params->ecn && INET_ECN_set_ce(skb)) { 304 stats->ecn_mark++; 305 vars->drop_next = 306 codel_control_law(vars->drop_next, 307 params->interval, 308 vars->rec_inv_sqrt); 309 goto end; 310 } 311 qdisc_drop(skb, sch); 312 stats->drop_count++; 313 skb = dequeue_func(vars, sch); 314 if (!codel_should_drop(skb, sch, 315 vars, params, stats, now)) { 316 /* leave dropping state */ 317 vars->dropping = false; 318 } else { 319 /* and schedule the next drop */ 320 vars->drop_next = 321 codel_control_law(vars->drop_next, 322 params->interval, 323 vars->rec_inv_sqrt); 324 } 325 } 326 } 327 } else if (drop) { 328 u32 delta; 329 330 if (params->ecn && INET_ECN_set_ce(skb)) { 331 stats->ecn_mark++; 332 } else { 333 qdisc_drop(skb, sch); 334 stats->drop_count++; 335 336 skb = dequeue_func(vars, sch); 337 drop = codel_should_drop(skb, sch, vars, params, 338 stats, now); 339 } 340 vars->dropping = true; 341 /* if min went above target close to when we last went below it 342 * assume that the drop rate that controlled the queue on the 343 * last cycle is a good starting point to control it now. 344 */ 345 delta = vars->count - vars->lastcount; 346 if (delta > 1 && 347 codel_time_before(now - vars->drop_next, 348 16 * params->interval)) { 349 vars->count = delta; 350 /* we dont care if rec_inv_sqrt approximation 351 * is not very precise : 352 * Next Newton steps will correct it quadratically. 353 */ 354 codel_Newton_step(vars); 355 } else { 356 vars->count = 1; 357 vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; 358 } 359 vars->lastcount = vars->count; 360 vars->drop_next = codel_control_law(now, params->interval, 361 vars->rec_inv_sqrt); 362 } 363 end: 364 if (skb && codel_time_after(vars->ldelay, params->ce_threshold) && 365 INET_ECN_set_ce(skb)) 366 stats->ce_mark++; 367 return skb; 368 } 369 #endif 370