1 /* 2 * net/sched/sch_netem.c Network emulator 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License. 8 * 9 * Many of the algorithms and ideas for this came from 10 * NIST Net which is not copyrighted. 11 * 12 * Authors: Stephen Hemminger <shemminger@osdl.org> 13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> 14 */ 15 16 #include <linux/mm.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/errno.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/rtnetlink.h> 25 #include <linux/reciprocal_div.h> 26 #include <linux/rbtree.h> 27 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/inet_ecn.h> 31 32 #define VERSION "1.3" 33 34 /* Network Emulation Queuing algorithm. 35 ==================================== 36 37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based 38 Network Emulation Tool 39 [2] Luigi Rizzo, DummyNet for FreeBSD 40 41 ---------------------------------------------------------------- 42 43 This started out as a simple way to delay outgoing packets to 44 test TCP but has grown to include most of the functionality 45 of a full blown network emulator like NISTnet. It can delay 46 packets and add random jitter (and correlation). The random 47 distribution can be loaded from a table as well to provide 48 normal, Pareto, or experimental curves. Packet loss, 49 duplication, and reordering can also be emulated. 50 51 This qdisc does not do classification that can be handled in 52 layering other disciplines. It does not need to do bandwidth 53 control either since that can be handled by using token 54 bucket or other rate control. 55 56 Correlated Loss Generator models 57 58 Added generation of correlated loss according to the 59 "Gilbert-Elliot" model, a 4-state markov model. 60 61 References: 62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG 63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general 64 and intuitive loss model for packet networks and its implementation 65 in the Netem module in the Linux kernel", available in [1] 66 67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it 68 Fabio Ludovici <fabio.ludovici at yahoo.it> 69 */ 70 71 struct disttable { 72 u32 size; 73 s16 table[0]; 74 }; 75 76 struct netem_sched_data { 77 /* internal t(ime)fifo qdisc uses t_root and sch->limit */ 78 struct rb_root t_root; 79 80 /* optional qdisc for classful handling (NULL at netem init) */ 81 struct Qdisc *qdisc; 82 83 struct qdisc_watchdog watchdog; 84 85 s64 latency; 86 s64 jitter; 87 88 u32 loss; 89 u32 ecn; 90 u32 limit; 91 u32 counter; 92 u32 gap; 93 u32 duplicate; 94 u32 reorder; 95 u32 corrupt; 96 u64 rate; 97 s32 packet_overhead; 98 u32 cell_size; 99 struct reciprocal_value cell_size_reciprocal; 100 s32 cell_overhead; 101 102 struct crndstate { 103 u32 last; 104 u32 rho; 105 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; 106 107 struct disttable *delay_dist; 108 109 enum { 110 CLG_RANDOM, 111 CLG_4_STATES, 112 CLG_GILB_ELL, 113 } loss_model; 114 115 enum { 116 TX_IN_GAP_PERIOD = 1, 117 TX_IN_BURST_PERIOD, 118 LOST_IN_GAP_PERIOD, 119 LOST_IN_BURST_PERIOD, 120 } _4_state_model; 121 122 enum { 123 GOOD_STATE = 1, 124 BAD_STATE, 125 } GE_state_model; 126 127 /* Correlated Loss Generation models */ 128 struct clgstate { 129 /* state of the Markov chain */ 130 u8 state; 131 132 /* 4-states and Gilbert-Elliot models */ 133 u32 a1; /* p13 for 4-states or p for GE */ 134 u32 a2; /* p31 for 4-states or r for GE */ 135 u32 a3; /* p32 for 4-states or h for GE */ 136 u32 a4; /* p14 for 4-states or 1-k for GE */ 137 u32 a5; /* p23 used only in 4-states */ 138 } clg; 139 140 struct tc_netem_slot slot_config; 141 struct slotstate { 142 u64 slot_next; 143 s32 packets_left; 144 s32 bytes_left; 145 } slot; 146 147 struct disttable *slot_dist; 148 }; 149 150 /* Time stamp put into socket buffer control block 151 * Only valid when skbs are in our internal t(ime)fifo queue. 152 * 153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, 154 * and skb->next & skb->prev are scratch space for a qdisc, 155 * we save skb->tstamp value in skb->cb[] before destroying it. 156 */ 157 struct netem_skb_cb { 158 u64 time_to_send; 159 }; 160 161 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 162 { 163 /* we assume we can use skb next/prev/tstamp as storage for rb_node */ 164 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); 165 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 166 } 167 168 /* init_crandom - initialize correlated random number generator 169 * Use entropy source for initial seed. 170 */ 171 static void init_crandom(struct crndstate *state, unsigned long rho) 172 { 173 state->rho = rho; 174 state->last = prandom_u32(); 175 } 176 177 /* get_crandom - correlated random number generator 178 * Next number depends on last value. 179 * rho is scaled to avoid floating point. 180 */ 181 static u32 get_crandom(struct crndstate *state) 182 { 183 u64 value, rho; 184 unsigned long answer; 185 186 if (!state || state->rho == 0) /* no correlation */ 187 return prandom_u32(); 188 189 value = prandom_u32(); 190 rho = (u64)state->rho + 1; 191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; 192 state->last = answer; 193 return answer; 194 } 195 196 /* loss_4state - 4-state model loss generator 197 * Generates losses according to the 4-state Markov chain adopted in 198 * the GI (General and Intuitive) loss model. 199 */ 200 static bool loss_4state(struct netem_sched_data *q) 201 { 202 struct clgstate *clg = &q->clg; 203 u32 rnd = prandom_u32(); 204 205 /* 206 * Makes a comparison between rnd and the transition 207 * probabilities outgoing from the current state, then decides the 208 * next state and if the next packet has to be transmitted or lost. 209 * The four states correspond to: 210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period 211 * LOST_IN_BURST_PERIOD => isolated losses within a gap period 212 * LOST_IN_GAP_PERIOD => lost packets within a burst period 213 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period 214 */ 215 switch (clg->state) { 216 case TX_IN_GAP_PERIOD: 217 if (rnd < clg->a4) { 218 clg->state = LOST_IN_BURST_PERIOD; 219 return true; 220 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { 221 clg->state = LOST_IN_GAP_PERIOD; 222 return true; 223 } else if (clg->a1 + clg->a4 < rnd) { 224 clg->state = TX_IN_GAP_PERIOD; 225 } 226 227 break; 228 case TX_IN_BURST_PERIOD: 229 if (rnd < clg->a5) { 230 clg->state = LOST_IN_GAP_PERIOD; 231 return true; 232 } else { 233 clg->state = TX_IN_BURST_PERIOD; 234 } 235 236 break; 237 case LOST_IN_GAP_PERIOD: 238 if (rnd < clg->a3) 239 clg->state = TX_IN_BURST_PERIOD; 240 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { 241 clg->state = TX_IN_GAP_PERIOD; 242 } else if (clg->a2 + clg->a3 < rnd) { 243 clg->state = LOST_IN_GAP_PERIOD; 244 return true; 245 } 246 break; 247 case LOST_IN_BURST_PERIOD: 248 clg->state = TX_IN_GAP_PERIOD; 249 break; 250 } 251 252 return false; 253 } 254 255 /* loss_gilb_ell - Gilbert-Elliot model loss generator 256 * Generates losses according to the Gilbert-Elliot loss model or 257 * its special cases (Gilbert or Simple Gilbert) 258 * 259 * Makes a comparison between random number and the transition 260 * probabilities outgoing from the current state, then decides the 261 * next state. A second random number is extracted and the comparison 262 * with the loss probability of the current state decides if the next 263 * packet will be transmitted or lost. 264 */ 265 static bool loss_gilb_ell(struct netem_sched_data *q) 266 { 267 struct clgstate *clg = &q->clg; 268 269 switch (clg->state) { 270 case GOOD_STATE: 271 if (prandom_u32() < clg->a1) 272 clg->state = BAD_STATE; 273 if (prandom_u32() < clg->a4) 274 return true; 275 break; 276 case BAD_STATE: 277 if (prandom_u32() < clg->a2) 278 clg->state = GOOD_STATE; 279 if (prandom_u32() > clg->a3) 280 return true; 281 } 282 283 return false; 284 } 285 286 static bool loss_event(struct netem_sched_data *q) 287 { 288 switch (q->loss_model) { 289 case CLG_RANDOM: 290 /* Random packet drop 0 => none, ~0 => all */ 291 return q->loss && q->loss >= get_crandom(&q->loss_cor); 292 293 case CLG_4_STATES: 294 /* 4state loss model algorithm (used also for GI model) 295 * Extracts a value from the markov 4 state loss generator, 296 * if it is 1 drops a packet and if needed writes the event in 297 * the kernel logs 298 */ 299 return loss_4state(q); 300 301 case CLG_GILB_ELL: 302 /* Gilbert-Elliot loss model algorithm 303 * Extracts a value from the Gilbert-Elliot loss generator, 304 * if it is 1 drops a packet and if needed writes the event in 305 * the kernel logs 306 */ 307 return loss_gilb_ell(q); 308 } 309 310 return false; /* not reached */ 311 } 312 313 314 /* tabledist - return a pseudo-randomly distributed value with mean mu and 315 * std deviation sigma. Uses table lookup to approximate the desired 316 * distribution, and a uniformly-distributed pseudo-random source. 317 */ 318 static s64 tabledist(s64 mu, s32 sigma, 319 struct crndstate *state, 320 const struct disttable *dist) 321 { 322 s64 x; 323 long t; 324 u32 rnd; 325 326 if (sigma == 0) 327 return mu; 328 329 rnd = get_crandom(state); 330 331 /* default uniform distribution */ 332 if (dist == NULL) 333 return ((rnd % (2 * sigma)) + mu) - sigma; 334 335 t = dist->table[rnd % dist->size]; 336 x = (sigma % NETEM_DIST_SCALE) * t; 337 if (x >= 0) 338 x += NETEM_DIST_SCALE/2; 339 else 340 x -= NETEM_DIST_SCALE/2; 341 342 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; 343 } 344 345 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) 346 { 347 len += q->packet_overhead; 348 349 if (q->cell_size) { 350 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); 351 352 if (len > cells * q->cell_size) /* extra cell needed for remainder */ 353 cells++; 354 len = cells * (q->cell_size + q->cell_overhead); 355 } 356 357 return div64_u64(len * NSEC_PER_SEC, q->rate); 358 } 359 360 static void tfifo_reset(struct Qdisc *sch) 361 { 362 struct netem_sched_data *q = qdisc_priv(sch); 363 struct rb_node *p = rb_first(&q->t_root); 364 365 while (p) { 366 struct sk_buff *skb = rb_to_skb(p); 367 368 p = rb_next(p); 369 rb_erase(&skb->rbnode, &q->t_root); 370 rtnl_kfree_skbs(skb, skb); 371 } 372 } 373 374 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 375 { 376 struct netem_sched_data *q = qdisc_priv(sch); 377 u64 tnext = netem_skb_cb(nskb)->time_to_send; 378 struct rb_node **p = &q->t_root.rb_node, *parent = NULL; 379 380 while (*p) { 381 struct sk_buff *skb; 382 383 parent = *p; 384 skb = rb_to_skb(parent); 385 if (tnext >= netem_skb_cb(skb)->time_to_send) 386 p = &parent->rb_right; 387 else 388 p = &parent->rb_left; 389 } 390 rb_link_node(&nskb->rbnode, parent, p); 391 rb_insert_color(&nskb->rbnode, &q->t_root); 392 sch->q.qlen++; 393 } 394 395 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead 396 * when we statistically choose to corrupt one, we instead segment it, returning 397 * the first packet to be corrupted, and re-enqueue the remaining frames 398 */ 399 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, 400 struct sk_buff **to_free) 401 { 402 struct sk_buff *segs; 403 netdev_features_t features = netif_skb_features(skb); 404 405 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 406 407 if (IS_ERR_OR_NULL(segs)) { 408 qdisc_drop(skb, sch, to_free); 409 return NULL; 410 } 411 consume_skb(skb); 412 return segs; 413 } 414 415 /* 416 * Insert one skb into qdisc. 417 * Note: parent depends on return value to account for queue length. 418 * NET_XMIT_DROP: queue length didn't change. 419 * NET_XMIT_SUCCESS: one skb was queued. 420 */ 421 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, 422 struct sk_buff **to_free) 423 { 424 struct netem_sched_data *q = qdisc_priv(sch); 425 /* We don't fill cb now as skb_unshare() may invalidate it */ 426 struct netem_skb_cb *cb; 427 struct sk_buff *skb2; 428 struct sk_buff *segs = NULL; 429 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb); 430 int nb = 0; 431 int count = 1; 432 int rc = NET_XMIT_SUCCESS; 433 434 /* Random duplication */ 435 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) 436 ++count; 437 438 /* Drop packet? */ 439 if (loss_event(q)) { 440 if (q->ecn && INET_ECN_set_ce(skb)) 441 qdisc_qstats_drop(sch); /* mark packet */ 442 else 443 --count; 444 } 445 if (count == 0) { 446 qdisc_qstats_drop(sch); 447 __qdisc_drop(skb, to_free); 448 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 449 } 450 451 /* If a delay is expected, orphan the skb. (orphaning usually takes 452 * place at TX completion time, so _before_ the link transit delay) 453 */ 454 if (q->latency || q->jitter || q->rate) 455 skb_orphan_partial(skb); 456 457 /* 458 * If we need to duplicate packet, then re-insert at top of the 459 * qdisc tree, since parent queuer expects that only one 460 * skb will be queued. 461 */ 462 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 463 struct Qdisc *rootq = qdisc_root(sch); 464 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 465 466 q->duplicate = 0; 467 rootq->enqueue(skb2, rootq, to_free); 468 q->duplicate = dupsave; 469 } 470 471 /* 472 * Randomized packet corruption. 473 * Make copy if needed since we are modifying 474 * If packet is going to be hardware checksummed, then 475 * do it now in software before we mangle it. 476 */ 477 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 478 if (skb_is_gso(skb)) { 479 segs = netem_segment(skb, sch, to_free); 480 if (!segs) 481 return NET_XMIT_DROP; 482 } else { 483 segs = skb; 484 } 485 486 skb = segs; 487 segs = segs->next; 488 489 skb = skb_unshare(skb, GFP_ATOMIC); 490 if (unlikely(!skb)) { 491 qdisc_qstats_drop(sch); 492 goto finish_segs; 493 } 494 if (skb->ip_summed == CHECKSUM_PARTIAL && 495 skb_checksum_help(skb)) { 496 qdisc_drop(skb, sch, to_free); 497 goto finish_segs; 498 } 499 500 skb->data[prandom_u32() % skb_headlen(skb)] ^= 501 1<<(prandom_u32() % 8); 502 } 503 504 if (unlikely(sch->q.qlen >= sch->limit)) 505 return qdisc_drop_all(skb, sch, to_free); 506 507 qdisc_qstats_backlog_inc(sch, skb); 508 509 cb = netem_skb_cb(skb); 510 if (q->gap == 0 || /* not doing reordering */ 511 q->counter < q->gap - 1 || /* inside last reordering gap */ 512 q->reorder < get_crandom(&q->reorder_cor)) { 513 u64 now; 514 s64 delay; 515 516 delay = tabledist(q->latency, q->jitter, 517 &q->delay_cor, q->delay_dist); 518 519 now = ktime_get_ns(); 520 521 if (q->rate) { 522 struct netem_skb_cb *last = NULL; 523 524 if (sch->q.tail) 525 last = netem_skb_cb(sch->q.tail); 526 if (q->t_root.rb_node) { 527 struct sk_buff *t_skb; 528 struct netem_skb_cb *t_last; 529 530 t_skb = skb_rb_last(&q->t_root); 531 t_last = netem_skb_cb(t_skb); 532 if (!last || 533 t_last->time_to_send > last->time_to_send) { 534 last = t_last; 535 } 536 } 537 538 if (last) { 539 /* 540 * Last packet in queue is reference point (now), 541 * calculate this time bonus and subtract 542 * from delay. 543 */ 544 delay -= last->time_to_send - now; 545 delay = max_t(s64, 0, delay); 546 now = last->time_to_send; 547 } 548 549 delay += packet_time_ns(qdisc_pkt_len(skb), q); 550 } 551 552 cb->time_to_send = now + delay; 553 ++q->counter; 554 tfifo_enqueue(skb, sch); 555 } else { 556 /* 557 * Do re-ordering by putting one out of N packets at the front 558 * of the queue. 559 */ 560 cb->time_to_send = ktime_get_ns(); 561 q->counter = 0; 562 563 __qdisc_enqueue_head(skb, &sch->q); 564 sch->qstats.requeues++; 565 } 566 567 finish_segs: 568 if (segs) { 569 while (segs) { 570 skb2 = segs->next; 571 skb_mark_not_on_list(segs); 572 qdisc_skb_cb(segs)->pkt_len = segs->len; 573 last_len = segs->len; 574 rc = qdisc_enqueue(segs, sch, to_free); 575 if (rc != NET_XMIT_SUCCESS) { 576 if (net_xmit_drop_count(rc)) 577 qdisc_qstats_drop(sch); 578 } else { 579 nb++; 580 len += last_len; 581 } 582 segs = skb2; 583 } 584 sch->q.qlen += nb; 585 if (nb > 1) 586 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); 587 } 588 return NET_XMIT_SUCCESS; 589 } 590 591 /* Delay the next round with a new future slot with a 592 * correct number of bytes and packets. 593 */ 594 595 static void get_slot_next(struct netem_sched_data *q, u64 now) 596 { 597 s64 next_delay; 598 599 if (!q->slot_dist) 600 next_delay = q->slot_config.min_delay + 601 (prandom_u32() * 602 (q->slot_config.max_delay - 603 q->slot_config.min_delay) >> 32); 604 else 605 next_delay = tabledist(q->slot_config.dist_delay, 606 (s32)(q->slot_config.dist_jitter), 607 NULL, q->slot_dist); 608 609 q->slot.slot_next = now + next_delay; 610 q->slot.packets_left = q->slot_config.max_packets; 611 q->slot.bytes_left = q->slot_config.max_bytes; 612 } 613 614 static struct sk_buff *netem_dequeue(struct Qdisc *sch) 615 { 616 struct netem_sched_data *q = qdisc_priv(sch); 617 struct sk_buff *skb; 618 struct rb_node *p; 619 620 tfifo_dequeue: 621 skb = __qdisc_dequeue_head(&sch->q); 622 if (skb) { 623 qdisc_qstats_backlog_dec(sch, skb); 624 deliver: 625 qdisc_bstats_update(sch, skb); 626 return skb; 627 } 628 p = rb_first(&q->t_root); 629 if (p) { 630 u64 time_to_send; 631 u64 now = ktime_get_ns(); 632 633 skb = rb_to_skb(p); 634 635 /* if more time remaining? */ 636 time_to_send = netem_skb_cb(skb)->time_to_send; 637 if (q->slot.slot_next && q->slot.slot_next < time_to_send) 638 get_slot_next(q, now); 639 640 if (time_to_send <= now && q->slot.slot_next <= now) { 641 rb_erase(p, &q->t_root); 642 sch->q.qlen--; 643 qdisc_qstats_backlog_dec(sch, skb); 644 skb->next = NULL; 645 skb->prev = NULL; 646 /* skb->dev shares skb->rbnode area, 647 * we need to restore its value. 648 */ 649 skb->dev = qdisc_dev(sch); 650 651 #ifdef CONFIG_NET_CLS_ACT 652 /* 653 * If it's at ingress let's pretend the delay is 654 * from the network (tstamp will be updated). 655 */ 656 if (skb->tc_redirected && skb->tc_from_ingress) 657 skb->tstamp = 0; 658 #endif 659 660 if (q->slot.slot_next) { 661 q->slot.packets_left--; 662 q->slot.bytes_left -= qdisc_pkt_len(skb); 663 if (q->slot.packets_left <= 0 || 664 q->slot.bytes_left <= 0) 665 get_slot_next(q, now); 666 } 667 668 if (q->qdisc) { 669 unsigned int pkt_len = qdisc_pkt_len(skb); 670 struct sk_buff *to_free = NULL; 671 int err; 672 673 err = qdisc_enqueue(skb, q->qdisc, &to_free); 674 kfree_skb_list(to_free); 675 if (err != NET_XMIT_SUCCESS && 676 net_xmit_drop_count(err)) { 677 qdisc_qstats_drop(sch); 678 qdisc_tree_reduce_backlog(sch, 1, 679 pkt_len); 680 } 681 goto tfifo_dequeue; 682 } 683 goto deliver; 684 } 685 686 if (q->qdisc) { 687 skb = q->qdisc->ops->dequeue(q->qdisc); 688 if (skb) 689 goto deliver; 690 } 691 692 qdisc_watchdog_schedule_ns(&q->watchdog, 693 max(time_to_send, 694 q->slot.slot_next)); 695 } 696 697 if (q->qdisc) { 698 skb = q->qdisc->ops->dequeue(q->qdisc); 699 if (skb) 700 goto deliver; 701 } 702 return NULL; 703 } 704 705 static void netem_reset(struct Qdisc *sch) 706 { 707 struct netem_sched_data *q = qdisc_priv(sch); 708 709 qdisc_reset_queue(sch); 710 tfifo_reset(sch); 711 if (q->qdisc) 712 qdisc_reset(q->qdisc); 713 qdisc_watchdog_cancel(&q->watchdog); 714 } 715 716 static void dist_free(struct disttable *d) 717 { 718 kvfree(d); 719 } 720 721 /* 722 * Distribution data is a variable size payload containing 723 * signed 16 bit values. 724 */ 725 726 static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, 727 const struct nlattr *attr) 728 { 729 size_t n = nla_len(attr)/sizeof(__s16); 730 const __s16 *data = nla_data(attr); 731 spinlock_t *root_lock; 732 struct disttable *d; 733 int i; 734 735 if (n > NETEM_DIST_MAX) 736 return -EINVAL; 737 738 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL); 739 if (!d) 740 return -ENOMEM; 741 742 d->size = n; 743 for (i = 0; i < n; i++) 744 d->table[i] = data[i]; 745 746 root_lock = qdisc_root_sleeping_lock(sch); 747 748 spin_lock_bh(root_lock); 749 swap(*tbl, d); 750 spin_unlock_bh(root_lock); 751 752 dist_free(d); 753 return 0; 754 } 755 756 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) 757 { 758 const struct tc_netem_slot *c = nla_data(attr); 759 760 q->slot_config = *c; 761 if (q->slot_config.max_packets == 0) 762 q->slot_config.max_packets = INT_MAX; 763 if (q->slot_config.max_bytes == 0) 764 q->slot_config.max_bytes = INT_MAX; 765 q->slot.packets_left = q->slot_config.max_packets; 766 q->slot.bytes_left = q->slot_config.max_bytes; 767 if (q->slot_config.min_delay | q->slot_config.max_delay | 768 q->slot_config.dist_jitter) 769 q->slot.slot_next = ktime_get_ns(); 770 else 771 q->slot.slot_next = 0; 772 } 773 774 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) 775 { 776 const struct tc_netem_corr *c = nla_data(attr); 777 778 init_crandom(&q->delay_cor, c->delay_corr); 779 init_crandom(&q->loss_cor, c->loss_corr); 780 init_crandom(&q->dup_cor, c->dup_corr); 781 } 782 783 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) 784 { 785 const struct tc_netem_reorder *r = nla_data(attr); 786 787 q->reorder = r->probability; 788 init_crandom(&q->reorder_cor, r->correlation); 789 } 790 791 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) 792 { 793 const struct tc_netem_corrupt *r = nla_data(attr); 794 795 q->corrupt = r->probability; 796 init_crandom(&q->corrupt_cor, r->correlation); 797 } 798 799 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) 800 { 801 const struct tc_netem_rate *r = nla_data(attr); 802 803 q->rate = r->rate; 804 q->packet_overhead = r->packet_overhead; 805 q->cell_size = r->cell_size; 806 q->cell_overhead = r->cell_overhead; 807 if (q->cell_size) 808 q->cell_size_reciprocal = reciprocal_value(q->cell_size); 809 else 810 q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; 811 } 812 813 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) 814 { 815 const struct nlattr *la; 816 int rem; 817 818 nla_for_each_nested(la, attr, rem) { 819 u16 type = nla_type(la); 820 821 switch (type) { 822 case NETEM_LOSS_GI: { 823 const struct tc_netem_gimodel *gi = nla_data(la); 824 825 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { 826 pr_info("netem: incorrect gi model size\n"); 827 return -EINVAL; 828 } 829 830 q->loss_model = CLG_4_STATES; 831 832 q->clg.state = TX_IN_GAP_PERIOD; 833 q->clg.a1 = gi->p13; 834 q->clg.a2 = gi->p31; 835 q->clg.a3 = gi->p32; 836 q->clg.a4 = gi->p14; 837 q->clg.a5 = gi->p23; 838 break; 839 } 840 841 case NETEM_LOSS_GE: { 842 const struct tc_netem_gemodel *ge = nla_data(la); 843 844 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { 845 pr_info("netem: incorrect ge model size\n"); 846 return -EINVAL; 847 } 848 849 q->loss_model = CLG_GILB_ELL; 850 q->clg.state = GOOD_STATE; 851 q->clg.a1 = ge->p; 852 q->clg.a2 = ge->r; 853 q->clg.a3 = ge->h; 854 q->clg.a4 = ge->k1; 855 break; 856 } 857 858 default: 859 pr_info("netem: unknown loss type %u\n", type); 860 return -EINVAL; 861 } 862 } 863 864 return 0; 865 } 866 867 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { 868 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, 869 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, 870 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 871 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, 872 [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, 873 [TCA_NETEM_ECN] = { .type = NLA_U32 }, 874 [TCA_NETEM_RATE64] = { .type = NLA_U64 }, 875 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 }, 876 [TCA_NETEM_JITTER64] = { .type = NLA_S64 }, 877 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) }, 878 }; 879 880 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, 881 const struct nla_policy *policy, int len) 882 { 883 int nested_len = nla_len(nla) - NLA_ALIGN(len); 884 885 if (nested_len < 0) { 886 pr_info("netem: invalid attributes len %d\n", nested_len); 887 return -EINVAL; 888 } 889 890 if (nested_len >= nla_attr_size(0)) 891 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), 892 nested_len, policy, NULL); 893 894 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 895 return 0; 896 } 897 898 /* Parse netlink message to set options */ 899 static int netem_change(struct Qdisc *sch, struct nlattr *opt, 900 struct netlink_ext_ack *extack) 901 { 902 struct netem_sched_data *q = qdisc_priv(sch); 903 struct nlattr *tb[TCA_NETEM_MAX + 1]; 904 struct tc_netem_qopt *qopt; 905 struct clgstate old_clg; 906 int old_loss_model = CLG_RANDOM; 907 int ret; 908 909 if (opt == NULL) 910 return -EINVAL; 911 912 qopt = nla_data(opt); 913 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); 914 if (ret < 0) 915 return ret; 916 917 /* backup q->clg and q->loss_model */ 918 old_clg = q->clg; 919 old_loss_model = q->loss_model; 920 921 if (tb[TCA_NETEM_LOSS]) { 922 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); 923 if (ret) { 924 q->loss_model = old_loss_model; 925 return ret; 926 } 927 } else { 928 q->loss_model = CLG_RANDOM; 929 } 930 931 if (tb[TCA_NETEM_DELAY_DIST]) { 932 ret = get_dist_table(sch, &q->delay_dist, 933 tb[TCA_NETEM_DELAY_DIST]); 934 if (ret) 935 goto get_table_failure; 936 } 937 938 if (tb[TCA_NETEM_SLOT_DIST]) { 939 ret = get_dist_table(sch, &q->slot_dist, 940 tb[TCA_NETEM_SLOT_DIST]); 941 if (ret) 942 goto get_table_failure; 943 } 944 945 sch->limit = qopt->limit; 946 947 q->latency = PSCHED_TICKS2NS(qopt->latency); 948 q->jitter = PSCHED_TICKS2NS(qopt->jitter); 949 q->limit = qopt->limit; 950 q->gap = qopt->gap; 951 q->counter = 0; 952 q->loss = qopt->loss; 953 q->duplicate = qopt->duplicate; 954 955 /* for compatibility with earlier versions. 956 * if gap is set, need to assume 100% probability 957 */ 958 if (q->gap) 959 q->reorder = ~0; 960 961 if (tb[TCA_NETEM_CORR]) 962 get_correlation(q, tb[TCA_NETEM_CORR]); 963 964 if (tb[TCA_NETEM_REORDER]) 965 get_reorder(q, tb[TCA_NETEM_REORDER]); 966 967 if (tb[TCA_NETEM_CORRUPT]) 968 get_corrupt(q, tb[TCA_NETEM_CORRUPT]); 969 970 if (tb[TCA_NETEM_RATE]) 971 get_rate(q, tb[TCA_NETEM_RATE]); 972 973 if (tb[TCA_NETEM_RATE64]) 974 q->rate = max_t(u64, q->rate, 975 nla_get_u64(tb[TCA_NETEM_RATE64])); 976 977 if (tb[TCA_NETEM_LATENCY64]) 978 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]); 979 980 if (tb[TCA_NETEM_JITTER64]) 981 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]); 982 983 if (tb[TCA_NETEM_ECN]) 984 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); 985 986 if (tb[TCA_NETEM_SLOT]) 987 get_slot(q, tb[TCA_NETEM_SLOT]); 988 989 return ret; 990 991 get_table_failure: 992 /* recover clg and loss_model, in case of 993 * q->clg and q->loss_model were modified 994 * in get_loss_clg() 995 */ 996 q->clg = old_clg; 997 q->loss_model = old_loss_model; 998 return ret; 999 } 1000 1001 static int netem_init(struct Qdisc *sch, struct nlattr *opt, 1002 struct netlink_ext_ack *extack) 1003 { 1004 struct netem_sched_data *q = qdisc_priv(sch); 1005 int ret; 1006 1007 qdisc_watchdog_init(&q->watchdog, sch); 1008 1009 if (!opt) 1010 return -EINVAL; 1011 1012 q->loss_model = CLG_RANDOM; 1013 ret = netem_change(sch, opt, extack); 1014 if (ret) 1015 pr_info("netem: change failed\n"); 1016 return ret; 1017 } 1018 1019 static void netem_destroy(struct Qdisc *sch) 1020 { 1021 struct netem_sched_data *q = qdisc_priv(sch); 1022 1023 qdisc_watchdog_cancel(&q->watchdog); 1024 if (q->qdisc) 1025 qdisc_put(q->qdisc); 1026 dist_free(q->delay_dist); 1027 dist_free(q->slot_dist); 1028 } 1029 1030 static int dump_loss_model(const struct netem_sched_data *q, 1031 struct sk_buff *skb) 1032 { 1033 struct nlattr *nest; 1034 1035 nest = nla_nest_start(skb, TCA_NETEM_LOSS); 1036 if (nest == NULL) 1037 goto nla_put_failure; 1038 1039 switch (q->loss_model) { 1040 case CLG_RANDOM: 1041 /* legacy loss model */ 1042 nla_nest_cancel(skb, nest); 1043 return 0; /* no data */ 1044 1045 case CLG_4_STATES: { 1046 struct tc_netem_gimodel gi = { 1047 .p13 = q->clg.a1, 1048 .p31 = q->clg.a2, 1049 .p32 = q->clg.a3, 1050 .p14 = q->clg.a4, 1051 .p23 = q->clg.a5, 1052 }; 1053 1054 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) 1055 goto nla_put_failure; 1056 break; 1057 } 1058 case CLG_GILB_ELL: { 1059 struct tc_netem_gemodel ge = { 1060 .p = q->clg.a1, 1061 .r = q->clg.a2, 1062 .h = q->clg.a3, 1063 .k1 = q->clg.a4, 1064 }; 1065 1066 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) 1067 goto nla_put_failure; 1068 break; 1069 } 1070 } 1071 1072 nla_nest_end(skb, nest); 1073 return 0; 1074 1075 nla_put_failure: 1076 nla_nest_cancel(skb, nest); 1077 return -1; 1078 } 1079 1080 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) 1081 { 1082 const struct netem_sched_data *q = qdisc_priv(sch); 1083 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); 1084 struct tc_netem_qopt qopt; 1085 struct tc_netem_corr cor; 1086 struct tc_netem_reorder reorder; 1087 struct tc_netem_corrupt corrupt; 1088 struct tc_netem_rate rate; 1089 struct tc_netem_slot slot; 1090 1091 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency), 1092 UINT_MAX); 1093 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), 1094 UINT_MAX); 1095 qopt.limit = q->limit; 1096 qopt.loss = q->loss; 1097 qopt.gap = q->gap; 1098 qopt.duplicate = q->duplicate; 1099 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) 1100 goto nla_put_failure; 1101 1102 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency)) 1103 goto nla_put_failure; 1104 1105 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter)) 1106 goto nla_put_failure; 1107 1108 cor.delay_corr = q->delay_cor.rho; 1109 cor.loss_corr = q->loss_cor.rho; 1110 cor.dup_corr = q->dup_cor.rho; 1111 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) 1112 goto nla_put_failure; 1113 1114 reorder.probability = q->reorder; 1115 reorder.correlation = q->reorder_cor.rho; 1116 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) 1117 goto nla_put_failure; 1118 1119 corrupt.probability = q->corrupt; 1120 corrupt.correlation = q->corrupt_cor.rho; 1121 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) 1122 goto nla_put_failure; 1123 1124 if (q->rate >= (1ULL << 32)) { 1125 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, 1126 TCA_NETEM_PAD)) 1127 goto nla_put_failure; 1128 rate.rate = ~0U; 1129 } else { 1130 rate.rate = q->rate; 1131 } 1132 rate.packet_overhead = q->packet_overhead; 1133 rate.cell_size = q->cell_size; 1134 rate.cell_overhead = q->cell_overhead; 1135 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) 1136 goto nla_put_failure; 1137 1138 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) 1139 goto nla_put_failure; 1140 1141 if (dump_loss_model(q, skb) != 0) 1142 goto nla_put_failure; 1143 1144 if (q->slot_config.min_delay | q->slot_config.max_delay | 1145 q->slot_config.dist_jitter) { 1146 slot = q->slot_config; 1147 if (slot.max_packets == INT_MAX) 1148 slot.max_packets = 0; 1149 if (slot.max_bytes == INT_MAX) 1150 slot.max_bytes = 0; 1151 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot)) 1152 goto nla_put_failure; 1153 } 1154 1155 return nla_nest_end(skb, nla); 1156 1157 nla_put_failure: 1158 nlmsg_trim(skb, nla); 1159 return -1; 1160 } 1161 1162 static int netem_dump_class(struct Qdisc *sch, unsigned long cl, 1163 struct sk_buff *skb, struct tcmsg *tcm) 1164 { 1165 struct netem_sched_data *q = qdisc_priv(sch); 1166 1167 if (cl != 1 || !q->qdisc) /* only one class */ 1168 return -ENOENT; 1169 1170 tcm->tcm_handle |= TC_H_MIN(1); 1171 tcm->tcm_info = q->qdisc->handle; 1172 1173 return 0; 1174 } 1175 1176 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1177 struct Qdisc **old, struct netlink_ext_ack *extack) 1178 { 1179 struct netem_sched_data *q = qdisc_priv(sch); 1180 1181 *old = qdisc_replace(sch, new, &q->qdisc); 1182 return 0; 1183 } 1184 1185 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) 1186 { 1187 struct netem_sched_data *q = qdisc_priv(sch); 1188 return q->qdisc; 1189 } 1190 1191 static unsigned long netem_find(struct Qdisc *sch, u32 classid) 1192 { 1193 return 1; 1194 } 1195 1196 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) 1197 { 1198 if (!walker->stop) { 1199 if (walker->count >= walker->skip) 1200 if (walker->fn(sch, 1, walker) < 0) { 1201 walker->stop = 1; 1202 return; 1203 } 1204 walker->count++; 1205 } 1206 } 1207 1208 static const struct Qdisc_class_ops netem_class_ops = { 1209 .graft = netem_graft, 1210 .leaf = netem_leaf, 1211 .find = netem_find, 1212 .walk = netem_walk, 1213 .dump = netem_dump_class, 1214 }; 1215 1216 static struct Qdisc_ops netem_qdisc_ops __read_mostly = { 1217 .id = "netem", 1218 .cl_ops = &netem_class_ops, 1219 .priv_size = sizeof(struct netem_sched_data), 1220 .enqueue = netem_enqueue, 1221 .dequeue = netem_dequeue, 1222 .peek = qdisc_peek_dequeued, 1223 .init = netem_init, 1224 .reset = netem_reset, 1225 .destroy = netem_destroy, 1226 .change = netem_change, 1227 .dump = netem_dump, 1228 .owner = THIS_MODULE, 1229 }; 1230 1231 1232 static int __init netem_module_init(void) 1233 { 1234 pr_info("netem: version " VERSION "\n"); 1235 return register_qdisc(&netem_qdisc_ops); 1236 } 1237 static void __exit netem_module_exit(void) 1238 { 1239 unregister_qdisc(&netem_qdisc_ops); 1240 } 1241 module_init(netem_module_init) 1242 module_exit(netem_module_exit) 1243 MODULE_LICENSE("GPL"); 1244