1 /* 2 * net/sched/sch_netem.c Network emulator 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License. 8 * 9 * Many of the algorithms and ideas for this came from 10 * NIST Net which is not copyrighted. 11 * 12 * Authors: Stephen Hemminger <shemminger@osdl.org> 13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> 14 */ 15 16 #include <linux/mm.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/errno.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/rtnetlink.h> 25 #include <linux/reciprocal_div.h> 26 #include <linux/rbtree.h> 27 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/inet_ecn.h> 31 32 #define VERSION "1.3" 33 34 /* Network Emulation Queuing algorithm. 35 ==================================== 36 37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based 38 Network Emulation Tool 39 [2] Luigi Rizzo, DummyNet for FreeBSD 40 41 ---------------------------------------------------------------- 42 43 This started out as a simple way to delay outgoing packets to 44 test TCP but has grown to include most of the functionality 45 of a full blown network emulator like NISTnet. It can delay 46 packets and add random jitter (and correlation). The random 47 distribution can be loaded from a table as well to provide 48 normal, Pareto, or experimental curves. Packet loss, 49 duplication, and reordering can also be emulated. 50 51 This qdisc does not do classification that can be handled in 52 layering other disciplines. It does not need to do bandwidth 53 control either since that can be handled by using token 54 bucket or other rate control. 55 56 Correlated Loss Generator models 57 58 Added generation of correlated loss according to the 59 "Gilbert-Elliot" model, a 4-state markov model. 60 61 References: 62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG 63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general 64 and intuitive loss model for packet networks and its implementation 65 in the Netem module in the Linux kernel", available in [1] 66 67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it 68 Fabio Ludovici <fabio.ludovici at yahoo.it> 69 */ 70 71 struct netem_sched_data { 72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */ 73 struct rb_root t_root; 74 75 /* optional qdisc for classful handling (NULL at netem init) */ 76 struct Qdisc *qdisc; 77 78 struct qdisc_watchdog watchdog; 79 80 psched_tdiff_t latency; 81 psched_tdiff_t jitter; 82 83 u32 loss; 84 u32 ecn; 85 u32 limit; 86 u32 counter; 87 u32 gap; 88 u32 duplicate; 89 u32 reorder; 90 u32 corrupt; 91 u64 rate; 92 s32 packet_overhead; 93 u32 cell_size; 94 struct reciprocal_value cell_size_reciprocal; 95 s32 cell_overhead; 96 97 struct crndstate { 98 u32 last; 99 u32 rho; 100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; 101 102 struct disttable { 103 u32 size; 104 s16 table[0]; 105 } *delay_dist; 106 107 enum { 108 CLG_RANDOM, 109 CLG_4_STATES, 110 CLG_GILB_ELL, 111 } loss_model; 112 113 enum { 114 TX_IN_GAP_PERIOD = 1, 115 TX_IN_BURST_PERIOD, 116 LOST_IN_GAP_PERIOD, 117 LOST_IN_BURST_PERIOD, 118 } _4_state_model; 119 120 enum { 121 GOOD_STATE = 1, 122 BAD_STATE, 123 } GE_state_model; 124 125 /* Correlated Loss Generation models */ 126 struct clgstate { 127 /* state of the Markov chain */ 128 u8 state; 129 130 /* 4-states and Gilbert-Elliot models */ 131 u32 a1; /* p13 for 4-states or p for GE */ 132 u32 a2; /* p31 for 4-states or r for GE */ 133 u32 a3; /* p32 for 4-states or h for GE */ 134 u32 a4; /* p14 for 4-states or 1-k for GE */ 135 u32 a5; /* p23 used only in 4-states */ 136 } clg; 137 138 }; 139 140 /* Time stamp put into socket buffer control block 141 * Only valid when skbs are in our internal t(ime)fifo queue. 142 * 143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, 144 * and skb->next & skb->prev are scratch space for a qdisc, 145 * we save skb->tstamp value in skb->cb[] before destroying it. 146 */ 147 struct netem_skb_cb { 148 psched_time_t time_to_send; 149 ktime_t tstamp_save; 150 }; 151 152 153 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) 154 { 155 return container_of(rb, struct sk_buff, rbnode); 156 } 157 158 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 159 { 160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */ 161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); 162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 163 } 164 165 /* init_crandom - initialize correlated random number generator 166 * Use entropy source for initial seed. 167 */ 168 static void init_crandom(struct crndstate *state, unsigned long rho) 169 { 170 state->rho = rho; 171 state->last = prandom_u32(); 172 } 173 174 /* get_crandom - correlated random number generator 175 * Next number depends on last value. 176 * rho is scaled to avoid floating point. 177 */ 178 static u32 get_crandom(struct crndstate *state) 179 { 180 u64 value, rho; 181 unsigned long answer; 182 183 if (state->rho == 0) /* no correlation */ 184 return prandom_u32(); 185 186 value = prandom_u32(); 187 rho = (u64)state->rho + 1; 188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; 189 state->last = answer; 190 return answer; 191 } 192 193 /* loss_4state - 4-state model loss generator 194 * Generates losses according to the 4-state Markov chain adopted in 195 * the GI (General and Intuitive) loss model. 196 */ 197 static bool loss_4state(struct netem_sched_data *q) 198 { 199 struct clgstate *clg = &q->clg; 200 u32 rnd = prandom_u32(); 201 202 /* 203 * Makes a comparison between rnd and the transition 204 * probabilities outgoing from the current state, then decides the 205 * next state and if the next packet has to be transmitted or lost. 206 * The four states correspond to: 207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period 208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period 209 * LOST_IN_GAP_PERIOD => lost packets within a burst period 210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period 211 */ 212 switch (clg->state) { 213 case TX_IN_GAP_PERIOD: 214 if (rnd < clg->a4) { 215 clg->state = LOST_IN_BURST_PERIOD; 216 return true; 217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { 218 clg->state = LOST_IN_GAP_PERIOD; 219 return true; 220 } else if (clg->a1 + clg->a4 < rnd) { 221 clg->state = TX_IN_GAP_PERIOD; 222 } 223 224 break; 225 case TX_IN_BURST_PERIOD: 226 if (rnd < clg->a5) { 227 clg->state = LOST_IN_GAP_PERIOD; 228 return true; 229 } else { 230 clg->state = TX_IN_BURST_PERIOD; 231 } 232 233 break; 234 case LOST_IN_GAP_PERIOD: 235 if (rnd < clg->a3) 236 clg->state = TX_IN_BURST_PERIOD; 237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { 238 clg->state = TX_IN_GAP_PERIOD; 239 } else if (clg->a2 + clg->a3 < rnd) { 240 clg->state = LOST_IN_GAP_PERIOD; 241 return true; 242 } 243 break; 244 case LOST_IN_BURST_PERIOD: 245 clg->state = TX_IN_GAP_PERIOD; 246 break; 247 } 248 249 return false; 250 } 251 252 /* loss_gilb_ell - Gilbert-Elliot model loss generator 253 * Generates losses according to the Gilbert-Elliot loss model or 254 * its special cases (Gilbert or Simple Gilbert) 255 * 256 * Makes a comparison between random number and the transition 257 * probabilities outgoing from the current state, then decides the 258 * next state. A second random number is extracted and the comparison 259 * with the loss probability of the current state decides if the next 260 * packet will be transmitted or lost. 261 */ 262 static bool loss_gilb_ell(struct netem_sched_data *q) 263 { 264 struct clgstate *clg = &q->clg; 265 266 switch (clg->state) { 267 case GOOD_STATE: 268 if (prandom_u32() < clg->a1) 269 clg->state = BAD_STATE; 270 if (prandom_u32() < clg->a4) 271 return true; 272 break; 273 case BAD_STATE: 274 if (prandom_u32() < clg->a2) 275 clg->state = GOOD_STATE; 276 if (prandom_u32() > clg->a3) 277 return true; 278 } 279 280 return false; 281 } 282 283 static bool loss_event(struct netem_sched_data *q) 284 { 285 switch (q->loss_model) { 286 case CLG_RANDOM: 287 /* Random packet drop 0 => none, ~0 => all */ 288 return q->loss && q->loss >= get_crandom(&q->loss_cor); 289 290 case CLG_4_STATES: 291 /* 4state loss model algorithm (used also for GI model) 292 * Extracts a value from the markov 4 state loss generator, 293 * if it is 1 drops a packet and if needed writes the event in 294 * the kernel logs 295 */ 296 return loss_4state(q); 297 298 case CLG_GILB_ELL: 299 /* Gilbert-Elliot loss model algorithm 300 * Extracts a value from the Gilbert-Elliot loss generator, 301 * if it is 1 drops a packet and if needed writes the event in 302 * the kernel logs 303 */ 304 return loss_gilb_ell(q); 305 } 306 307 return false; /* not reached */ 308 } 309 310 311 /* tabledist - return a pseudo-randomly distributed value with mean mu and 312 * std deviation sigma. Uses table lookup to approximate the desired 313 * distribution, and a uniformly-distributed pseudo-random source. 314 */ 315 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, 316 struct crndstate *state, 317 const struct disttable *dist) 318 { 319 psched_tdiff_t x; 320 long t; 321 u32 rnd; 322 323 if (sigma == 0) 324 return mu; 325 326 rnd = get_crandom(state); 327 328 /* default uniform distribution */ 329 if (dist == NULL) 330 return (rnd % (2*sigma)) - sigma + mu; 331 332 t = dist->table[rnd % dist->size]; 333 x = (sigma % NETEM_DIST_SCALE) * t; 334 if (x >= 0) 335 x += NETEM_DIST_SCALE/2; 336 else 337 x -= NETEM_DIST_SCALE/2; 338 339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; 340 } 341 342 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) 343 { 344 u64 ticks; 345 346 len += q->packet_overhead; 347 348 if (q->cell_size) { 349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); 350 351 if (len > cells * q->cell_size) /* extra cell needed for remainder */ 352 cells++; 353 len = cells * (q->cell_size + q->cell_overhead); 354 } 355 356 ticks = (u64)len * NSEC_PER_SEC; 357 358 do_div(ticks, q->rate); 359 return PSCHED_NS2TICKS(ticks); 360 } 361 362 static void tfifo_reset(struct Qdisc *sch) 363 { 364 struct netem_sched_data *q = qdisc_priv(sch); 365 struct rb_node *p; 366 367 while ((p = rb_first(&q->t_root))) { 368 struct sk_buff *skb = netem_rb_to_skb(p); 369 370 rb_erase(p, &q->t_root); 371 rtnl_kfree_skbs(skb, skb); 372 } 373 } 374 375 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 376 { 377 struct netem_sched_data *q = qdisc_priv(sch); 378 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; 379 struct rb_node **p = &q->t_root.rb_node, *parent = NULL; 380 381 while (*p) { 382 struct sk_buff *skb; 383 384 parent = *p; 385 skb = netem_rb_to_skb(parent); 386 if (tnext >= netem_skb_cb(skb)->time_to_send) 387 p = &parent->rb_right; 388 else 389 p = &parent->rb_left; 390 } 391 rb_link_node(&nskb->rbnode, parent, p); 392 rb_insert_color(&nskb->rbnode, &q->t_root); 393 sch->q.qlen++; 394 } 395 396 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead 397 * when we statistically choose to corrupt one, we instead segment it, returning 398 * the first packet to be corrupted, and re-enqueue the remaining frames 399 */ 400 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, 401 struct sk_buff **to_free) 402 { 403 struct sk_buff *segs; 404 netdev_features_t features = netif_skb_features(skb); 405 406 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 407 408 if (IS_ERR_OR_NULL(segs)) { 409 qdisc_drop(skb, sch, to_free); 410 return NULL; 411 } 412 consume_skb(skb); 413 return segs; 414 } 415 416 /* 417 * Insert one skb into qdisc. 418 * Note: parent depends on return value to account for queue length. 419 * NET_XMIT_DROP: queue length didn't change. 420 * NET_XMIT_SUCCESS: one skb was queued. 421 */ 422 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, 423 struct sk_buff **to_free) 424 { 425 struct netem_sched_data *q = qdisc_priv(sch); 426 /* We don't fill cb now as skb_unshare() may invalidate it */ 427 struct netem_skb_cb *cb; 428 struct sk_buff *skb2; 429 struct sk_buff *segs = NULL; 430 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb); 431 int nb = 0; 432 int count = 1; 433 int rc = NET_XMIT_SUCCESS; 434 435 /* Random duplication */ 436 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) 437 ++count; 438 439 /* Drop packet? */ 440 if (loss_event(q)) { 441 if (q->ecn && INET_ECN_set_ce(skb)) 442 qdisc_qstats_drop(sch); /* mark packet */ 443 else 444 --count; 445 } 446 if (count == 0) { 447 qdisc_qstats_drop(sch); 448 __qdisc_drop(skb, to_free); 449 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 450 } 451 452 /* If a delay is expected, orphan the skb. (orphaning usually takes 453 * place at TX completion time, so _before_ the link transit delay) 454 */ 455 if (q->latency || q->jitter) 456 skb_orphan_partial(skb); 457 458 /* 459 * If we need to duplicate packet, then re-insert at top of the 460 * qdisc tree, since parent queuer expects that only one 461 * skb will be queued. 462 */ 463 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 464 struct Qdisc *rootq = qdisc_root(sch); 465 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 466 467 q->duplicate = 0; 468 rootq->enqueue(skb2, rootq, to_free); 469 q->duplicate = dupsave; 470 } 471 472 /* 473 * Randomized packet corruption. 474 * Make copy if needed since we are modifying 475 * If packet is going to be hardware checksummed, then 476 * do it now in software before we mangle it. 477 */ 478 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 479 if (skb_is_gso(skb)) { 480 segs = netem_segment(skb, sch, to_free); 481 if (!segs) 482 return NET_XMIT_DROP; 483 } else { 484 segs = skb; 485 } 486 487 skb = segs; 488 segs = segs->next; 489 490 skb = skb_unshare(skb, GFP_ATOMIC); 491 if (unlikely(!skb)) { 492 qdisc_qstats_drop(sch); 493 goto finish_segs; 494 } 495 if (skb->ip_summed == CHECKSUM_PARTIAL && 496 skb_checksum_help(skb)) { 497 qdisc_drop(skb, sch, to_free); 498 goto finish_segs; 499 } 500 501 skb->data[prandom_u32() % skb_headlen(skb)] ^= 502 1<<(prandom_u32() % 8); 503 } 504 505 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) 506 return qdisc_drop(skb, sch, to_free); 507 508 qdisc_qstats_backlog_inc(sch, skb); 509 510 cb = netem_skb_cb(skb); 511 if (q->gap == 0 || /* not doing reordering */ 512 q->counter < q->gap - 1 || /* inside last reordering gap */ 513 q->reorder < get_crandom(&q->reorder_cor)) { 514 psched_time_t now; 515 psched_tdiff_t delay; 516 517 delay = tabledist(q->latency, q->jitter, 518 &q->delay_cor, q->delay_dist); 519 520 now = psched_get_time(); 521 522 if (q->rate) { 523 struct sk_buff *last; 524 525 if (!skb_queue_empty(&sch->q)) 526 last = skb_peek_tail(&sch->q); 527 else 528 last = netem_rb_to_skb(rb_last(&q->t_root)); 529 if (last) { 530 /* 531 * Last packet in queue is reference point (now), 532 * calculate this time bonus and subtract 533 * from delay. 534 */ 535 delay -= netem_skb_cb(last)->time_to_send - now; 536 delay = max_t(psched_tdiff_t, 0, delay); 537 now = netem_skb_cb(last)->time_to_send; 538 } 539 540 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); 541 } 542 543 cb->time_to_send = now + delay; 544 cb->tstamp_save = skb->tstamp; 545 ++q->counter; 546 tfifo_enqueue(skb, sch); 547 } else { 548 /* 549 * Do re-ordering by putting one out of N packets at the front 550 * of the queue. 551 */ 552 cb->time_to_send = psched_get_time(); 553 q->counter = 0; 554 555 __skb_queue_head(&sch->q, skb); 556 sch->qstats.requeues++; 557 } 558 559 finish_segs: 560 if (segs) { 561 while (segs) { 562 skb2 = segs->next; 563 segs->next = NULL; 564 qdisc_skb_cb(segs)->pkt_len = segs->len; 565 last_len = segs->len; 566 rc = qdisc_enqueue(segs, sch, to_free); 567 if (rc != NET_XMIT_SUCCESS) { 568 if (net_xmit_drop_count(rc)) 569 qdisc_qstats_drop(sch); 570 } else { 571 nb++; 572 len += last_len; 573 } 574 segs = skb2; 575 } 576 sch->q.qlen += nb; 577 if (nb > 1) 578 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); 579 } 580 return NET_XMIT_SUCCESS; 581 } 582 583 static struct sk_buff *netem_dequeue(struct Qdisc *sch) 584 { 585 struct netem_sched_data *q = qdisc_priv(sch); 586 struct sk_buff *skb; 587 struct rb_node *p; 588 589 tfifo_dequeue: 590 skb = __skb_dequeue(&sch->q); 591 if (skb) { 592 qdisc_qstats_backlog_dec(sch, skb); 593 deliver: 594 qdisc_bstats_update(sch, skb); 595 return skb; 596 } 597 p = rb_first(&q->t_root); 598 if (p) { 599 psched_time_t time_to_send; 600 601 skb = netem_rb_to_skb(p); 602 603 /* if more time remaining? */ 604 time_to_send = netem_skb_cb(skb)->time_to_send; 605 if (time_to_send <= psched_get_time()) { 606 rb_erase(p, &q->t_root); 607 608 sch->q.qlen--; 609 qdisc_qstats_backlog_dec(sch, skb); 610 skb->next = NULL; 611 skb->prev = NULL; 612 skb->tstamp = netem_skb_cb(skb)->tstamp_save; 613 614 #ifdef CONFIG_NET_CLS_ACT 615 /* 616 * If it's at ingress let's pretend the delay is 617 * from the network (tstamp will be updated). 618 */ 619 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) 620 skb->tstamp.tv64 = 0; 621 #endif 622 623 if (q->qdisc) { 624 unsigned int pkt_len = qdisc_pkt_len(skb); 625 struct sk_buff *to_free = NULL; 626 int err; 627 628 err = qdisc_enqueue(skb, q->qdisc, &to_free); 629 kfree_skb_list(to_free); 630 if (err != NET_XMIT_SUCCESS && 631 net_xmit_drop_count(err)) { 632 qdisc_qstats_drop(sch); 633 qdisc_tree_reduce_backlog(sch, 1, 634 pkt_len); 635 } 636 goto tfifo_dequeue; 637 } 638 goto deliver; 639 } 640 641 if (q->qdisc) { 642 skb = q->qdisc->ops->dequeue(q->qdisc); 643 if (skb) 644 goto deliver; 645 } 646 qdisc_watchdog_schedule(&q->watchdog, time_to_send); 647 } 648 649 if (q->qdisc) { 650 skb = q->qdisc->ops->dequeue(q->qdisc); 651 if (skb) 652 goto deliver; 653 } 654 return NULL; 655 } 656 657 static void netem_reset(struct Qdisc *sch) 658 { 659 struct netem_sched_data *q = qdisc_priv(sch); 660 661 qdisc_reset_queue(sch); 662 tfifo_reset(sch); 663 if (q->qdisc) 664 qdisc_reset(q->qdisc); 665 qdisc_watchdog_cancel(&q->watchdog); 666 } 667 668 static void dist_free(struct disttable *d) 669 { 670 kvfree(d); 671 } 672 673 /* 674 * Distribution data is a variable size payload containing 675 * signed 16 bit values. 676 */ 677 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) 678 { 679 struct netem_sched_data *q = qdisc_priv(sch); 680 size_t n = nla_len(attr)/sizeof(__s16); 681 const __s16 *data = nla_data(attr); 682 spinlock_t *root_lock; 683 struct disttable *d; 684 int i; 685 size_t s; 686 687 if (n > NETEM_DIST_MAX) 688 return -EINVAL; 689 690 s = sizeof(struct disttable) + n * sizeof(s16); 691 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); 692 if (!d) 693 d = vmalloc(s); 694 if (!d) 695 return -ENOMEM; 696 697 d->size = n; 698 for (i = 0; i < n; i++) 699 d->table[i] = data[i]; 700 701 root_lock = qdisc_root_sleeping_lock(sch); 702 703 spin_lock_bh(root_lock); 704 swap(q->delay_dist, d); 705 spin_unlock_bh(root_lock); 706 707 dist_free(d); 708 return 0; 709 } 710 711 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) 712 { 713 const struct tc_netem_corr *c = nla_data(attr); 714 715 init_crandom(&q->delay_cor, c->delay_corr); 716 init_crandom(&q->loss_cor, c->loss_corr); 717 init_crandom(&q->dup_cor, c->dup_corr); 718 } 719 720 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) 721 { 722 const struct tc_netem_reorder *r = nla_data(attr); 723 724 q->reorder = r->probability; 725 init_crandom(&q->reorder_cor, r->correlation); 726 } 727 728 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) 729 { 730 const struct tc_netem_corrupt *r = nla_data(attr); 731 732 q->corrupt = r->probability; 733 init_crandom(&q->corrupt_cor, r->correlation); 734 } 735 736 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) 737 { 738 const struct tc_netem_rate *r = nla_data(attr); 739 740 q->rate = r->rate; 741 q->packet_overhead = r->packet_overhead; 742 q->cell_size = r->cell_size; 743 q->cell_overhead = r->cell_overhead; 744 if (q->cell_size) 745 q->cell_size_reciprocal = reciprocal_value(q->cell_size); 746 else 747 q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; 748 } 749 750 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) 751 { 752 const struct nlattr *la; 753 int rem; 754 755 nla_for_each_nested(la, attr, rem) { 756 u16 type = nla_type(la); 757 758 switch (type) { 759 case NETEM_LOSS_GI: { 760 const struct tc_netem_gimodel *gi = nla_data(la); 761 762 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { 763 pr_info("netem: incorrect gi model size\n"); 764 return -EINVAL; 765 } 766 767 q->loss_model = CLG_4_STATES; 768 769 q->clg.state = TX_IN_GAP_PERIOD; 770 q->clg.a1 = gi->p13; 771 q->clg.a2 = gi->p31; 772 q->clg.a3 = gi->p32; 773 q->clg.a4 = gi->p14; 774 q->clg.a5 = gi->p23; 775 break; 776 } 777 778 case NETEM_LOSS_GE: { 779 const struct tc_netem_gemodel *ge = nla_data(la); 780 781 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { 782 pr_info("netem: incorrect ge model size\n"); 783 return -EINVAL; 784 } 785 786 q->loss_model = CLG_GILB_ELL; 787 q->clg.state = GOOD_STATE; 788 q->clg.a1 = ge->p; 789 q->clg.a2 = ge->r; 790 q->clg.a3 = ge->h; 791 q->clg.a4 = ge->k1; 792 break; 793 } 794 795 default: 796 pr_info("netem: unknown loss type %u\n", type); 797 return -EINVAL; 798 } 799 } 800 801 return 0; 802 } 803 804 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { 805 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, 806 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, 807 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 808 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, 809 [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, 810 [TCA_NETEM_ECN] = { .type = NLA_U32 }, 811 [TCA_NETEM_RATE64] = { .type = NLA_U64 }, 812 }; 813 814 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, 815 const struct nla_policy *policy, int len) 816 { 817 int nested_len = nla_len(nla) - NLA_ALIGN(len); 818 819 if (nested_len < 0) { 820 pr_info("netem: invalid attributes len %d\n", nested_len); 821 return -EINVAL; 822 } 823 824 if (nested_len >= nla_attr_size(0)) 825 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), 826 nested_len, policy); 827 828 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 829 return 0; 830 } 831 832 /* Parse netlink message to set options */ 833 static int netem_change(struct Qdisc *sch, struct nlattr *opt) 834 { 835 struct netem_sched_data *q = qdisc_priv(sch); 836 struct nlattr *tb[TCA_NETEM_MAX + 1]; 837 struct tc_netem_qopt *qopt; 838 struct clgstate old_clg; 839 int old_loss_model = CLG_RANDOM; 840 int ret; 841 842 if (opt == NULL) 843 return -EINVAL; 844 845 qopt = nla_data(opt); 846 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); 847 if (ret < 0) 848 return ret; 849 850 /* backup q->clg and q->loss_model */ 851 old_clg = q->clg; 852 old_loss_model = q->loss_model; 853 854 if (tb[TCA_NETEM_LOSS]) { 855 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); 856 if (ret) { 857 q->loss_model = old_loss_model; 858 return ret; 859 } 860 } else { 861 q->loss_model = CLG_RANDOM; 862 } 863 864 if (tb[TCA_NETEM_DELAY_DIST]) { 865 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); 866 if (ret) { 867 /* recover clg and loss_model, in case of 868 * q->clg and q->loss_model were modified 869 * in get_loss_clg() 870 */ 871 q->clg = old_clg; 872 q->loss_model = old_loss_model; 873 return ret; 874 } 875 } 876 877 sch->limit = qopt->limit; 878 879 q->latency = qopt->latency; 880 q->jitter = qopt->jitter; 881 q->limit = qopt->limit; 882 q->gap = qopt->gap; 883 q->counter = 0; 884 q->loss = qopt->loss; 885 q->duplicate = qopt->duplicate; 886 887 /* for compatibility with earlier versions. 888 * if gap is set, need to assume 100% probability 889 */ 890 if (q->gap) 891 q->reorder = ~0; 892 893 if (tb[TCA_NETEM_CORR]) 894 get_correlation(q, tb[TCA_NETEM_CORR]); 895 896 if (tb[TCA_NETEM_REORDER]) 897 get_reorder(q, tb[TCA_NETEM_REORDER]); 898 899 if (tb[TCA_NETEM_CORRUPT]) 900 get_corrupt(q, tb[TCA_NETEM_CORRUPT]); 901 902 if (tb[TCA_NETEM_RATE]) 903 get_rate(q, tb[TCA_NETEM_RATE]); 904 905 if (tb[TCA_NETEM_RATE64]) 906 q->rate = max_t(u64, q->rate, 907 nla_get_u64(tb[TCA_NETEM_RATE64])); 908 909 if (tb[TCA_NETEM_ECN]) 910 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); 911 912 return ret; 913 } 914 915 static int netem_init(struct Qdisc *sch, struct nlattr *opt) 916 { 917 struct netem_sched_data *q = qdisc_priv(sch); 918 int ret; 919 920 if (!opt) 921 return -EINVAL; 922 923 qdisc_watchdog_init(&q->watchdog, sch); 924 925 q->loss_model = CLG_RANDOM; 926 ret = netem_change(sch, opt); 927 if (ret) 928 pr_info("netem: change failed\n"); 929 return ret; 930 } 931 932 static void netem_destroy(struct Qdisc *sch) 933 { 934 struct netem_sched_data *q = qdisc_priv(sch); 935 936 qdisc_watchdog_cancel(&q->watchdog); 937 if (q->qdisc) 938 qdisc_destroy(q->qdisc); 939 dist_free(q->delay_dist); 940 } 941 942 static int dump_loss_model(const struct netem_sched_data *q, 943 struct sk_buff *skb) 944 { 945 struct nlattr *nest; 946 947 nest = nla_nest_start(skb, TCA_NETEM_LOSS); 948 if (nest == NULL) 949 goto nla_put_failure; 950 951 switch (q->loss_model) { 952 case CLG_RANDOM: 953 /* legacy loss model */ 954 nla_nest_cancel(skb, nest); 955 return 0; /* no data */ 956 957 case CLG_4_STATES: { 958 struct tc_netem_gimodel gi = { 959 .p13 = q->clg.a1, 960 .p31 = q->clg.a2, 961 .p32 = q->clg.a3, 962 .p14 = q->clg.a4, 963 .p23 = q->clg.a5, 964 }; 965 966 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) 967 goto nla_put_failure; 968 break; 969 } 970 case CLG_GILB_ELL: { 971 struct tc_netem_gemodel ge = { 972 .p = q->clg.a1, 973 .r = q->clg.a2, 974 .h = q->clg.a3, 975 .k1 = q->clg.a4, 976 }; 977 978 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) 979 goto nla_put_failure; 980 break; 981 } 982 } 983 984 nla_nest_end(skb, nest); 985 return 0; 986 987 nla_put_failure: 988 nla_nest_cancel(skb, nest); 989 return -1; 990 } 991 992 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) 993 { 994 const struct netem_sched_data *q = qdisc_priv(sch); 995 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); 996 struct tc_netem_qopt qopt; 997 struct tc_netem_corr cor; 998 struct tc_netem_reorder reorder; 999 struct tc_netem_corrupt corrupt; 1000 struct tc_netem_rate rate; 1001 1002 qopt.latency = q->latency; 1003 qopt.jitter = q->jitter; 1004 qopt.limit = q->limit; 1005 qopt.loss = q->loss; 1006 qopt.gap = q->gap; 1007 qopt.duplicate = q->duplicate; 1008 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) 1009 goto nla_put_failure; 1010 1011 cor.delay_corr = q->delay_cor.rho; 1012 cor.loss_corr = q->loss_cor.rho; 1013 cor.dup_corr = q->dup_cor.rho; 1014 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) 1015 goto nla_put_failure; 1016 1017 reorder.probability = q->reorder; 1018 reorder.correlation = q->reorder_cor.rho; 1019 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) 1020 goto nla_put_failure; 1021 1022 corrupt.probability = q->corrupt; 1023 corrupt.correlation = q->corrupt_cor.rho; 1024 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) 1025 goto nla_put_failure; 1026 1027 if (q->rate >= (1ULL << 32)) { 1028 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, 1029 TCA_NETEM_PAD)) 1030 goto nla_put_failure; 1031 rate.rate = ~0U; 1032 } else { 1033 rate.rate = q->rate; 1034 } 1035 rate.packet_overhead = q->packet_overhead; 1036 rate.cell_size = q->cell_size; 1037 rate.cell_overhead = q->cell_overhead; 1038 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) 1039 goto nla_put_failure; 1040 1041 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) 1042 goto nla_put_failure; 1043 1044 if (dump_loss_model(q, skb) != 0) 1045 goto nla_put_failure; 1046 1047 return nla_nest_end(skb, nla); 1048 1049 nla_put_failure: 1050 nlmsg_trim(skb, nla); 1051 return -1; 1052 } 1053 1054 static int netem_dump_class(struct Qdisc *sch, unsigned long cl, 1055 struct sk_buff *skb, struct tcmsg *tcm) 1056 { 1057 struct netem_sched_data *q = qdisc_priv(sch); 1058 1059 if (cl != 1 || !q->qdisc) /* only one class */ 1060 return -ENOENT; 1061 1062 tcm->tcm_handle |= TC_H_MIN(1); 1063 tcm->tcm_info = q->qdisc->handle; 1064 1065 return 0; 1066 } 1067 1068 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1069 struct Qdisc **old) 1070 { 1071 struct netem_sched_data *q = qdisc_priv(sch); 1072 1073 *old = qdisc_replace(sch, new, &q->qdisc); 1074 return 0; 1075 } 1076 1077 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) 1078 { 1079 struct netem_sched_data *q = qdisc_priv(sch); 1080 return q->qdisc; 1081 } 1082 1083 static unsigned long netem_get(struct Qdisc *sch, u32 classid) 1084 { 1085 return 1; 1086 } 1087 1088 static void netem_put(struct Qdisc *sch, unsigned long arg) 1089 { 1090 } 1091 1092 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) 1093 { 1094 if (!walker->stop) { 1095 if (walker->count >= walker->skip) 1096 if (walker->fn(sch, 1, walker) < 0) { 1097 walker->stop = 1; 1098 return; 1099 } 1100 walker->count++; 1101 } 1102 } 1103 1104 static const struct Qdisc_class_ops netem_class_ops = { 1105 .graft = netem_graft, 1106 .leaf = netem_leaf, 1107 .get = netem_get, 1108 .put = netem_put, 1109 .walk = netem_walk, 1110 .dump = netem_dump_class, 1111 }; 1112 1113 static struct Qdisc_ops netem_qdisc_ops __read_mostly = { 1114 .id = "netem", 1115 .cl_ops = &netem_class_ops, 1116 .priv_size = sizeof(struct netem_sched_data), 1117 .enqueue = netem_enqueue, 1118 .dequeue = netem_dequeue, 1119 .peek = qdisc_peek_dequeued, 1120 .init = netem_init, 1121 .reset = netem_reset, 1122 .destroy = netem_destroy, 1123 .change = netem_change, 1124 .dump = netem_dump, 1125 .owner = THIS_MODULE, 1126 }; 1127 1128 1129 static int __init netem_module_init(void) 1130 { 1131 pr_info("netem: version " VERSION "\n"); 1132 return register_qdisc(&netem_qdisc_ops); 1133 } 1134 static void __exit netem_module_exit(void) 1135 { 1136 unregister_qdisc(&netem_qdisc_ops); 1137 } 1138 module_init(netem_module_init) 1139 module_exit(netem_module_exit) 1140 MODULE_LICENSE("GPL"); 1141