1 /* Copyright (C) 2013 Cisco Systems, Inc, 2013. 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of the GNU General Public License 5 * as published by the Free Software Foundation; either version 2 6 * of the License. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * Author: Vijay Subramanian <vijaynsu@cisco.com> 14 * Author: Mythili Prabhu <mysuryan@cisco.com> 15 * 16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> 17 * University of Oslo, Norway. 18 * 19 * References: 20 * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00 21 * IEEE Conference on High Performance Switching and Routing 2013 : 22 * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem" 23 */ 24 25 #include <linux/module.h> 26 #include <linux/slab.h> 27 #include <linux/types.h> 28 #include <linux/kernel.h> 29 #include <linux/errno.h> 30 #include <linux/skbuff.h> 31 #include <net/pkt_sched.h> 32 #include <net/inet_ecn.h> 33 34 #define QUEUE_THRESHOLD 10000 35 #define DQCOUNT_INVALID -1 36 #define MAX_PROB 0xffffffff 37 #define PIE_SCALE 8 38 39 /* parameters used */ 40 struct pie_params { 41 psched_time_t target; /* user specified target delay in pschedtime */ 42 u32 tupdate; /* timer frequency (in jiffies) */ 43 u32 limit; /* number of packets that can be enqueued */ 44 u32 alpha; /* alpha and beta are between 0 and 32 */ 45 u32 beta; /* and are used for shift relative to 1 */ 46 bool ecn; /* true if ecn is enabled */ 47 bool bytemode; /* to scale drop early prob based on pkt size */ 48 }; 49 50 /* variables used */ 51 struct pie_vars { 52 u32 prob; /* probability but scaled by u32 limit. */ 53 psched_time_t burst_time; 54 psched_time_t qdelay; 55 psched_time_t qdelay_old; 56 u64 dq_count; /* measured in bytes */ 57 psched_time_t dq_tstamp; /* drain rate */ 58 u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */ 59 u32 qlen_old; /* in bytes */ 60 }; 61 62 /* statistics gathering */ 63 struct pie_stats { 64 u32 packets_in; /* total number of packets enqueued */ 65 u32 dropped; /* packets dropped due to pie_action */ 66 u32 overlimit; /* dropped due to lack of space in queue */ 67 u32 maxq; /* maximum queue size */ 68 u32 ecn_mark; /* packets marked with ECN */ 69 }; 70 71 /* private data for the Qdisc */ 72 struct pie_sched_data { 73 struct pie_params params; 74 struct pie_vars vars; 75 struct pie_stats stats; 76 struct timer_list adapt_timer; 77 }; 78 79 static void pie_params_init(struct pie_params *params) 80 { 81 params->alpha = 2; 82 params->beta = 20; 83 params->tupdate = usecs_to_jiffies(30 * USEC_PER_MSEC); /* 30 ms */ 84 params->limit = 1000; /* default of 1000 packets */ 85 params->target = PSCHED_NS2TICKS(20 * NSEC_PER_MSEC); /* 20 ms */ 86 params->ecn = false; 87 params->bytemode = false; 88 } 89 90 static void pie_vars_init(struct pie_vars *vars) 91 { 92 vars->dq_count = DQCOUNT_INVALID; 93 vars->avg_dq_rate = 0; 94 /* default of 100 ms in pschedtime */ 95 vars->burst_time = PSCHED_NS2TICKS(100 * NSEC_PER_MSEC); 96 } 97 98 static bool drop_early(struct Qdisc *sch, u32 packet_size) 99 { 100 struct pie_sched_data *q = qdisc_priv(sch); 101 u32 rnd; 102 u32 local_prob = q->vars.prob; 103 u32 mtu = psched_mtu(qdisc_dev(sch)); 104 105 /* If there is still burst allowance left skip random early drop */ 106 if (q->vars.burst_time > 0) 107 return false; 108 109 /* If current delay is less than half of target, and 110 * if drop prob is low already, disable early_drop 111 */ 112 if ((q->vars.qdelay < q->params.target / 2) 113 && (q->vars.prob < MAX_PROB / 5)) 114 return false; 115 116 /* If we have fewer than 2 mtu-sized packets, disable drop_early, 117 * similar to min_th in RED 118 */ 119 if (sch->qstats.backlog < 2 * mtu) 120 return false; 121 122 /* If bytemode is turned on, use packet size to compute new 123 * probablity. Smaller packets will have lower drop prob in this case 124 */ 125 if (q->params.bytemode && packet_size <= mtu) 126 local_prob = (local_prob / mtu) * packet_size; 127 else 128 local_prob = q->vars.prob; 129 130 rnd = prandom_u32(); 131 if (rnd < local_prob) 132 return true; 133 134 return false; 135 } 136 137 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 138 struct sk_buff **to_free) 139 { 140 struct pie_sched_data *q = qdisc_priv(sch); 141 bool enqueue = false; 142 143 if (unlikely(qdisc_qlen(sch) >= sch->limit)) { 144 q->stats.overlimit++; 145 goto out; 146 } 147 148 if (!drop_early(sch, skb->len)) { 149 enqueue = true; 150 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && 151 INET_ECN_set_ce(skb)) { 152 /* If packet is ecn capable, mark it if drop probability 153 * is lower than 10%, else drop it. 154 */ 155 q->stats.ecn_mark++; 156 enqueue = true; 157 } 158 159 /* we can enqueue the packet */ 160 if (enqueue) { 161 q->stats.packets_in++; 162 if (qdisc_qlen(sch) > q->stats.maxq) 163 q->stats.maxq = qdisc_qlen(sch); 164 165 return qdisc_enqueue_tail(skb, sch); 166 } 167 168 out: 169 q->stats.dropped++; 170 return qdisc_drop(skb, sch, to_free); 171 } 172 173 static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { 174 [TCA_PIE_TARGET] = {.type = NLA_U32}, 175 [TCA_PIE_LIMIT] = {.type = NLA_U32}, 176 [TCA_PIE_TUPDATE] = {.type = NLA_U32}, 177 [TCA_PIE_ALPHA] = {.type = NLA_U32}, 178 [TCA_PIE_BETA] = {.type = NLA_U32}, 179 [TCA_PIE_ECN] = {.type = NLA_U32}, 180 [TCA_PIE_BYTEMODE] = {.type = NLA_U32}, 181 }; 182 183 static int pie_change(struct Qdisc *sch, struct nlattr *opt) 184 { 185 struct pie_sched_data *q = qdisc_priv(sch); 186 struct nlattr *tb[TCA_PIE_MAX + 1]; 187 unsigned int qlen, dropped = 0; 188 int err; 189 190 if (!opt) 191 return -EINVAL; 192 193 err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy, NULL); 194 if (err < 0) 195 return err; 196 197 sch_tree_lock(sch); 198 199 /* convert from microseconds to pschedtime */ 200 if (tb[TCA_PIE_TARGET]) { 201 /* target is in us */ 202 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]); 203 204 /* convert to pschedtime */ 205 q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC); 206 } 207 208 /* tupdate is in jiffies */ 209 if (tb[TCA_PIE_TUPDATE]) 210 q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])); 211 212 if (tb[TCA_PIE_LIMIT]) { 213 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]); 214 215 q->params.limit = limit; 216 sch->limit = limit; 217 } 218 219 if (tb[TCA_PIE_ALPHA]) 220 q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]); 221 222 if (tb[TCA_PIE_BETA]) 223 q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]); 224 225 if (tb[TCA_PIE_ECN]) 226 q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]); 227 228 if (tb[TCA_PIE_BYTEMODE]) 229 q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]); 230 231 /* Drop excess packets if new limit is lower */ 232 qlen = sch->q.qlen; 233 while (sch->q.qlen > sch->limit) { 234 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 235 236 dropped += qdisc_pkt_len(skb); 237 qdisc_qstats_backlog_dec(sch, skb); 238 rtnl_qdisc_drop(skb, sch); 239 } 240 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); 241 242 sch_tree_unlock(sch); 243 return 0; 244 } 245 246 static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) 247 { 248 249 struct pie_sched_data *q = qdisc_priv(sch); 250 int qlen = sch->qstats.backlog; /* current queue size in bytes */ 251 252 /* If current queue is about 10 packets or more and dq_count is unset 253 * we have enough packets to calculate the drain rate. Save 254 * current time as dq_tstamp and start measurement cycle. 255 */ 256 if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) { 257 q->vars.dq_tstamp = psched_get_time(); 258 q->vars.dq_count = 0; 259 } 260 261 /* Calculate the average drain rate from this value. If queue length 262 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset 263 * the dq_count to -1 as we don't have enough packets to calculate the 264 * drain rate anymore The following if block is entered only when we 265 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more) 266 * and we calculate the drain rate for the threshold here. dq_count is 267 * in bytes, time difference in psched_time, hence rate is in 268 * bytes/psched_time. 269 */ 270 if (q->vars.dq_count != DQCOUNT_INVALID) { 271 q->vars.dq_count += skb->len; 272 273 if (q->vars.dq_count >= QUEUE_THRESHOLD) { 274 psched_time_t now = psched_get_time(); 275 u32 dtime = now - q->vars.dq_tstamp; 276 u32 count = q->vars.dq_count << PIE_SCALE; 277 278 if (dtime == 0) 279 return; 280 281 count = count / dtime; 282 283 if (q->vars.avg_dq_rate == 0) 284 q->vars.avg_dq_rate = count; 285 else 286 q->vars.avg_dq_rate = 287 (q->vars.avg_dq_rate - 288 (q->vars.avg_dq_rate >> 3)) + (count >> 3); 289 290 /* If the queue has receded below the threshold, we hold 291 * on to the last drain rate calculated, else we reset 292 * dq_count to 0 to re-enter the if block when the next 293 * packet is dequeued 294 */ 295 if (qlen < QUEUE_THRESHOLD) 296 q->vars.dq_count = DQCOUNT_INVALID; 297 else { 298 q->vars.dq_count = 0; 299 q->vars.dq_tstamp = psched_get_time(); 300 } 301 302 if (q->vars.burst_time > 0) { 303 if (q->vars.burst_time > dtime) 304 q->vars.burst_time -= dtime; 305 else 306 q->vars.burst_time = 0; 307 } 308 } 309 } 310 } 311 312 static void calculate_probability(struct Qdisc *sch) 313 { 314 struct pie_sched_data *q = qdisc_priv(sch); 315 u32 qlen = sch->qstats.backlog; /* queue size in bytes */ 316 psched_time_t qdelay = 0; /* in pschedtime */ 317 psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */ 318 s32 delta = 0; /* determines the change in probability */ 319 u32 oldprob; 320 u32 alpha, beta; 321 bool update_prob = true; 322 323 q->vars.qdelay_old = q->vars.qdelay; 324 325 if (q->vars.avg_dq_rate > 0) 326 qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate; 327 else 328 qdelay = 0; 329 330 /* If qdelay is zero and qlen is not, it means qlen is very small, less 331 * than dequeue_rate, so we do not update probabilty in this round 332 */ 333 if (qdelay == 0 && qlen != 0) 334 update_prob = false; 335 336 /* In the algorithm, alpha and beta are between 0 and 2 with typical 337 * value for alpha as 0.125. In this implementation, we use values 0-32 338 * passed from user space to represent this. Also, alpha and beta have 339 * unit of HZ and need to be scaled before they can used to update 340 * probability. alpha/beta are updated locally below by 1) scaling them 341 * appropriately 2) scaling down by 16 to come to 0-2 range. 342 * Please see paper for details. 343 * 344 * We scale alpha and beta differently depending on whether we are in 345 * light, medium or high dropping mode. 346 */ 347 if (q->vars.prob < MAX_PROB / 100) { 348 alpha = 349 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7; 350 beta = 351 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7; 352 } else if (q->vars.prob < MAX_PROB / 10) { 353 alpha = 354 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5; 355 beta = 356 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5; 357 } else { 358 alpha = 359 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; 360 beta = 361 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; 362 } 363 364 /* alpha and beta should be between 0 and 32, in multiples of 1/16 */ 365 delta += alpha * ((qdelay - q->params.target)); 366 delta += beta * ((qdelay - qdelay_old)); 367 368 oldprob = q->vars.prob; 369 370 /* to ensure we increase probability in steps of no more than 2% */ 371 if (delta > (s32) (MAX_PROB / (100 / 2)) && 372 q->vars.prob >= MAX_PROB / 10) 373 delta = (MAX_PROB / 100) * 2; 374 375 /* Non-linear drop: 376 * Tune drop probability to increase quickly for high delays(>= 250ms) 377 * 250ms is derived through experiments and provides error protection 378 */ 379 380 if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC))) 381 delta += MAX_PROB / (100 / 2); 382 383 q->vars.prob += delta; 384 385 if (delta > 0) { 386 /* prevent overflow */ 387 if (q->vars.prob < oldprob) { 388 q->vars.prob = MAX_PROB; 389 /* Prevent normalization error. If probability is at 390 * maximum value already, we normalize it here, and 391 * skip the check to do a non-linear drop in the next 392 * section. 393 */ 394 update_prob = false; 395 } 396 } else { 397 /* prevent underflow */ 398 if (q->vars.prob > oldprob) 399 q->vars.prob = 0; 400 } 401 402 /* Non-linear drop in probability: Reduce drop probability quickly if 403 * delay is 0 for 2 consecutive Tupdate periods. 404 */ 405 406 if ((qdelay == 0) && (qdelay_old == 0) && update_prob) 407 q->vars.prob = (q->vars.prob * 98) / 100; 408 409 q->vars.qdelay = qdelay; 410 q->vars.qlen_old = qlen; 411 412 /* We restart the measurement cycle if the following conditions are met 413 * 1. If the delay has been low for 2 consecutive Tupdate periods 414 * 2. Calculated drop probability is zero 415 * 3. We have atleast one estimate for the avg_dq_rate ie., 416 * is a non-zero value 417 */ 418 if ((q->vars.qdelay < q->params.target / 2) && 419 (q->vars.qdelay_old < q->params.target / 2) && 420 (q->vars.prob == 0) && 421 (q->vars.avg_dq_rate > 0)) 422 pie_vars_init(&q->vars); 423 } 424 425 static void pie_timer(unsigned long arg) 426 { 427 struct Qdisc *sch = (struct Qdisc *)arg; 428 struct pie_sched_data *q = qdisc_priv(sch); 429 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 430 431 spin_lock(root_lock); 432 calculate_probability(sch); 433 434 /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */ 435 if (q->params.tupdate) 436 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); 437 spin_unlock(root_lock); 438 439 } 440 441 static int pie_init(struct Qdisc *sch, struct nlattr *opt) 442 { 443 struct pie_sched_data *q = qdisc_priv(sch); 444 445 pie_params_init(&q->params); 446 pie_vars_init(&q->vars); 447 sch->limit = q->params.limit; 448 449 setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch); 450 451 if (opt) { 452 int err = pie_change(sch, opt); 453 454 if (err) 455 return err; 456 } 457 458 mod_timer(&q->adapt_timer, jiffies + HZ / 2); 459 return 0; 460 } 461 462 static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) 463 { 464 struct pie_sched_data *q = qdisc_priv(sch); 465 struct nlattr *opts; 466 467 opts = nla_nest_start(skb, TCA_OPTIONS); 468 if (opts == NULL) 469 goto nla_put_failure; 470 471 /* convert target from pschedtime to us */ 472 if (nla_put_u32(skb, TCA_PIE_TARGET, 473 ((u32) PSCHED_TICKS2NS(q->params.target)) / 474 NSEC_PER_USEC) || 475 nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) || 476 nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) || 477 nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) || 478 nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) || 479 nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) || 480 nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode)) 481 goto nla_put_failure; 482 483 return nla_nest_end(skb, opts); 484 485 nla_put_failure: 486 nla_nest_cancel(skb, opts); 487 return -1; 488 489 } 490 491 static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 492 { 493 struct pie_sched_data *q = qdisc_priv(sch); 494 struct tc_pie_xstats st = { 495 .prob = q->vars.prob, 496 .delay = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) / 497 NSEC_PER_USEC, 498 /* unscale and return dq_rate in bytes per sec */ 499 .avg_dq_rate = q->vars.avg_dq_rate * 500 (PSCHED_TICKS_PER_SEC) >> PIE_SCALE, 501 .packets_in = q->stats.packets_in, 502 .overlimit = q->stats.overlimit, 503 .maxq = q->stats.maxq, 504 .dropped = q->stats.dropped, 505 .ecn_mark = q->stats.ecn_mark, 506 }; 507 508 return gnet_stats_copy_app(d, &st, sizeof(st)); 509 } 510 511 static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) 512 { 513 struct sk_buff *skb; 514 skb = qdisc_dequeue_head(sch); 515 516 if (!skb) 517 return NULL; 518 519 pie_process_dequeue(sch, skb); 520 return skb; 521 } 522 523 static void pie_reset(struct Qdisc *sch) 524 { 525 struct pie_sched_data *q = qdisc_priv(sch); 526 qdisc_reset_queue(sch); 527 pie_vars_init(&q->vars); 528 } 529 530 static void pie_destroy(struct Qdisc *sch) 531 { 532 struct pie_sched_data *q = qdisc_priv(sch); 533 q->params.tupdate = 0; 534 del_timer_sync(&q->adapt_timer); 535 } 536 537 static struct Qdisc_ops pie_qdisc_ops __read_mostly = { 538 .id = "pie", 539 .priv_size = sizeof(struct pie_sched_data), 540 .enqueue = pie_qdisc_enqueue, 541 .dequeue = pie_qdisc_dequeue, 542 .peek = qdisc_peek_dequeued, 543 .init = pie_init, 544 .destroy = pie_destroy, 545 .reset = pie_reset, 546 .change = pie_change, 547 .dump = pie_dump, 548 .dump_stats = pie_dump_stats, 549 .owner = THIS_MODULE, 550 }; 551 552 static int __init pie_module_init(void) 553 { 554 return register_qdisc(&pie_qdisc_ops); 555 } 556 557 static void __exit pie_module_exit(void) 558 { 559 unregister_qdisc(&pie_qdisc_ops); 560 } 561 562 module_init(pie_module_init); 563 module_exit(pie_module_exit); 564 565 MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler"); 566 MODULE_AUTHOR("Vijay Subramanian"); 567 MODULE_AUTHOR("Mythili Prabhu"); 568 MODULE_LICENSE("GPL"); 569