1 /* 2 * net/sched/sch_netem.c Network emulator 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License. 8 * 9 * Many of the algorithms and ideas for this came from 10 * NIST Net which is not copyrighted. 11 * 12 * Authors: Stephen Hemminger <shemminger@osdl.org> 13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> 14 */ 15 16 #include <linux/mm.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/errno.h> 22 #include <linux/skbuff.h> 23 #include <linux/vmalloc.h> 24 #include <linux/rtnetlink.h> 25 #include <linux/reciprocal_div.h> 26 #include <linux/rbtree.h> 27 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/inet_ecn.h> 31 32 #define VERSION "1.3" 33 34 /* Network Emulation Queuing algorithm. 35 ==================================== 36 37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based 38 Network Emulation Tool 39 [2] Luigi Rizzo, DummyNet for FreeBSD 40 41 ---------------------------------------------------------------- 42 43 This started out as a simple way to delay outgoing packets to 44 test TCP but has grown to include most of the functionality 45 of a full blown network emulator like NISTnet. It can delay 46 packets and add random jitter (and correlation). The random 47 distribution can be loaded from a table as well to provide 48 normal, Pareto, or experimental curves. Packet loss, 49 duplication, and reordering can also be emulated. 50 51 This qdisc does not do classification that can be handled in 52 layering other disciplines. It does not need to do bandwidth 53 control either since that can be handled by using token 54 bucket or other rate control. 55 56 Correlated Loss Generator models 57 58 Added generation of correlated loss according to the 59 "Gilbert-Elliot" model, a 4-state markov model. 60 61 References: 62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG 63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general 64 and intuitive loss model for packet networks and its implementation 65 in the Netem module in the Linux kernel", available in [1] 66 67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it 68 Fabio Ludovici <fabio.ludovici at yahoo.it> 69 */ 70 71 struct netem_sched_data { 72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */ 73 struct rb_root t_root; 74 75 /* optional qdisc for classful handling (NULL at netem init) */ 76 struct Qdisc *qdisc; 77 78 struct qdisc_watchdog watchdog; 79 80 psched_tdiff_t latency; 81 psched_tdiff_t jitter; 82 83 u32 loss; 84 u32 ecn; 85 u32 limit; 86 u32 counter; 87 u32 gap; 88 u32 duplicate; 89 u32 reorder; 90 u32 corrupt; 91 u64 rate; 92 s32 packet_overhead; 93 u32 cell_size; 94 struct reciprocal_value cell_size_reciprocal; 95 s32 cell_overhead; 96 97 struct crndstate { 98 u32 last; 99 u32 rho; 100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; 101 102 struct disttable { 103 u32 size; 104 s16 table[0]; 105 } *delay_dist; 106 107 enum { 108 CLG_RANDOM, 109 CLG_4_STATES, 110 CLG_GILB_ELL, 111 } loss_model; 112 113 enum { 114 TX_IN_GAP_PERIOD = 1, 115 TX_IN_BURST_PERIOD, 116 LOST_IN_GAP_PERIOD, 117 LOST_IN_BURST_PERIOD, 118 } _4_state_model; 119 120 enum { 121 GOOD_STATE = 1, 122 BAD_STATE, 123 } GE_state_model; 124 125 /* Correlated Loss Generation models */ 126 struct clgstate { 127 /* state of the Markov chain */ 128 u8 state; 129 130 /* 4-states and Gilbert-Elliot models */ 131 u32 a1; /* p13 for 4-states or p for GE */ 132 u32 a2; /* p31 for 4-states or r for GE */ 133 u32 a3; /* p32 for 4-states or h for GE */ 134 u32 a4; /* p14 for 4-states or 1-k for GE */ 135 u32 a5; /* p23 used only in 4-states */ 136 } clg; 137 138 }; 139 140 /* Time stamp put into socket buffer control block 141 * Only valid when skbs are in our internal t(ime)fifo queue. 142 * 143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, 144 * and skb->next & skb->prev are scratch space for a qdisc, 145 * we save skb->tstamp value in skb->cb[] before destroying it. 146 */ 147 struct netem_skb_cb { 148 psched_time_t time_to_send; 149 ktime_t tstamp_save; 150 }; 151 152 153 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) 154 { 155 return container_of(rb, struct sk_buff, rbnode); 156 } 157 158 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 159 { 160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */ 161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); 162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 163 } 164 165 /* init_crandom - initialize correlated random number generator 166 * Use entropy source for initial seed. 167 */ 168 static void init_crandom(struct crndstate *state, unsigned long rho) 169 { 170 state->rho = rho; 171 state->last = prandom_u32(); 172 } 173 174 /* get_crandom - correlated random number generator 175 * Next number depends on last value. 176 * rho is scaled to avoid floating point. 177 */ 178 static u32 get_crandom(struct crndstate *state) 179 { 180 u64 value, rho; 181 unsigned long answer; 182 183 if (state->rho == 0) /* no correlation */ 184 return prandom_u32(); 185 186 value = prandom_u32(); 187 rho = (u64)state->rho + 1; 188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; 189 state->last = answer; 190 return answer; 191 } 192 193 /* loss_4state - 4-state model loss generator 194 * Generates losses according to the 4-state Markov chain adopted in 195 * the GI (General and Intuitive) loss model. 196 */ 197 static bool loss_4state(struct netem_sched_data *q) 198 { 199 struct clgstate *clg = &q->clg; 200 u32 rnd = prandom_u32(); 201 202 /* 203 * Makes a comparison between rnd and the transition 204 * probabilities outgoing from the current state, then decides the 205 * next state and if the next packet has to be transmitted or lost. 206 * The four states correspond to: 207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period 208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period 209 * LOST_IN_GAP_PERIOD => lost packets within a burst period 210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period 211 */ 212 switch (clg->state) { 213 case TX_IN_GAP_PERIOD: 214 if (rnd < clg->a4) { 215 clg->state = LOST_IN_BURST_PERIOD; 216 return true; 217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { 218 clg->state = LOST_IN_GAP_PERIOD; 219 return true; 220 } else if (clg->a1 + clg->a4 < rnd) { 221 clg->state = TX_IN_GAP_PERIOD; 222 } 223 224 break; 225 case TX_IN_BURST_PERIOD: 226 if (rnd < clg->a5) { 227 clg->state = LOST_IN_GAP_PERIOD; 228 return true; 229 } else { 230 clg->state = TX_IN_BURST_PERIOD; 231 } 232 233 break; 234 case LOST_IN_GAP_PERIOD: 235 if (rnd < clg->a3) 236 clg->state = TX_IN_BURST_PERIOD; 237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { 238 clg->state = TX_IN_GAP_PERIOD; 239 } else if (clg->a2 + clg->a3 < rnd) { 240 clg->state = LOST_IN_GAP_PERIOD; 241 return true; 242 } 243 break; 244 case LOST_IN_BURST_PERIOD: 245 clg->state = TX_IN_GAP_PERIOD; 246 break; 247 } 248 249 return false; 250 } 251 252 /* loss_gilb_ell - Gilbert-Elliot model loss generator 253 * Generates losses according to the Gilbert-Elliot loss model or 254 * its special cases (Gilbert or Simple Gilbert) 255 * 256 * Makes a comparison between random number and the transition 257 * probabilities outgoing from the current state, then decides the 258 * next state. A second random number is extracted and the comparison 259 * with the loss probability of the current state decides if the next 260 * packet will be transmitted or lost. 261 */ 262 static bool loss_gilb_ell(struct netem_sched_data *q) 263 { 264 struct clgstate *clg = &q->clg; 265 266 switch (clg->state) { 267 case GOOD_STATE: 268 if (prandom_u32() < clg->a1) 269 clg->state = BAD_STATE; 270 if (prandom_u32() < clg->a4) 271 return true; 272 break; 273 case BAD_STATE: 274 if (prandom_u32() < clg->a2) 275 clg->state = GOOD_STATE; 276 if (prandom_u32() > clg->a3) 277 return true; 278 } 279 280 return false; 281 } 282 283 static bool loss_event(struct netem_sched_data *q) 284 { 285 switch (q->loss_model) { 286 case CLG_RANDOM: 287 /* Random packet drop 0 => none, ~0 => all */ 288 return q->loss && q->loss >= get_crandom(&q->loss_cor); 289 290 case CLG_4_STATES: 291 /* 4state loss model algorithm (used also for GI model) 292 * Extracts a value from the markov 4 state loss generator, 293 * if it is 1 drops a packet and if needed writes the event in 294 * the kernel logs 295 */ 296 return loss_4state(q); 297 298 case CLG_GILB_ELL: 299 /* Gilbert-Elliot loss model algorithm 300 * Extracts a value from the Gilbert-Elliot loss generator, 301 * if it is 1 drops a packet and if needed writes the event in 302 * the kernel logs 303 */ 304 return loss_gilb_ell(q); 305 } 306 307 return false; /* not reached */ 308 } 309 310 311 /* tabledist - return a pseudo-randomly distributed value with mean mu and 312 * std deviation sigma. Uses table lookup to approximate the desired 313 * distribution, and a uniformly-distributed pseudo-random source. 314 */ 315 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, 316 struct crndstate *state, 317 const struct disttable *dist) 318 { 319 psched_tdiff_t x; 320 long t; 321 u32 rnd; 322 323 if (sigma == 0) 324 return mu; 325 326 rnd = get_crandom(state); 327 328 /* default uniform distribution */ 329 if (dist == NULL) 330 return (rnd % (2*sigma)) - sigma + mu; 331 332 t = dist->table[rnd % dist->size]; 333 x = (sigma % NETEM_DIST_SCALE) * t; 334 if (x >= 0) 335 x += NETEM_DIST_SCALE/2; 336 else 337 x -= NETEM_DIST_SCALE/2; 338 339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; 340 } 341 342 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) 343 { 344 u64 ticks; 345 346 len += q->packet_overhead; 347 348 if (q->cell_size) { 349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); 350 351 if (len > cells * q->cell_size) /* extra cell needed for remainder */ 352 cells++; 353 len = cells * (q->cell_size + q->cell_overhead); 354 } 355 356 ticks = (u64)len * NSEC_PER_SEC; 357 358 do_div(ticks, q->rate); 359 return PSCHED_NS2TICKS(ticks); 360 } 361 362 static void tfifo_reset(struct Qdisc *sch) 363 { 364 struct netem_sched_data *q = qdisc_priv(sch); 365 struct rb_node *p; 366 367 while ((p = rb_first(&q->t_root))) { 368 struct sk_buff *skb = netem_rb_to_skb(p); 369 370 rb_erase(p, &q->t_root); 371 rtnl_kfree_skbs(skb, skb); 372 } 373 } 374 375 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 376 { 377 struct netem_sched_data *q = qdisc_priv(sch); 378 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; 379 struct rb_node **p = &q->t_root.rb_node, *parent = NULL; 380 381 while (*p) { 382 struct sk_buff *skb; 383 384 parent = *p; 385 skb = netem_rb_to_skb(parent); 386 if (tnext >= netem_skb_cb(skb)->time_to_send) 387 p = &parent->rb_right; 388 else 389 p = &parent->rb_left; 390 } 391 rb_link_node(&nskb->rbnode, parent, p); 392 rb_insert_color(&nskb->rbnode, &q->t_root); 393 sch->q.qlen++; 394 } 395 396 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead 397 * when we statistically choose to corrupt one, we instead segment it, returning 398 * the first packet to be corrupted, and re-enqueue the remaining frames 399 */ 400 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, 401 struct sk_buff **to_free) 402 { 403 struct sk_buff *segs; 404 netdev_features_t features = netif_skb_features(skb); 405 406 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 407 408 if (IS_ERR_OR_NULL(segs)) { 409 qdisc_drop(skb, sch, to_free); 410 return NULL; 411 } 412 consume_skb(skb); 413 return segs; 414 } 415 416 /* 417 * Insert one skb into qdisc. 418 * Note: parent depends on return value to account for queue length. 419 * NET_XMIT_DROP: queue length didn't change. 420 * NET_XMIT_SUCCESS: one skb was queued. 421 */ 422 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, 423 struct sk_buff **to_free) 424 { 425 struct netem_sched_data *q = qdisc_priv(sch); 426 /* We don't fill cb now as skb_unshare() may invalidate it */ 427 struct netem_skb_cb *cb; 428 struct sk_buff *skb2; 429 struct sk_buff *segs = NULL; 430 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb); 431 int nb = 0; 432 int count = 1; 433 int rc = NET_XMIT_SUCCESS; 434 435 /* Random duplication */ 436 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) 437 ++count; 438 439 /* Drop packet? */ 440 if (loss_event(q)) { 441 if (q->ecn && INET_ECN_set_ce(skb)) 442 qdisc_qstats_drop(sch); /* mark packet */ 443 else 444 --count; 445 } 446 if (count == 0) { 447 qdisc_qstats_drop(sch); 448 __qdisc_drop(skb, to_free); 449 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 450 } 451 452 /* If a delay is expected, orphan the skb. (orphaning usually takes 453 * place at TX completion time, so _before_ the link transit delay) 454 */ 455 if (q->latency || q->jitter) 456 skb_orphan_partial(skb); 457 458 /* 459 * If we need to duplicate packet, then re-insert at top of the 460 * qdisc tree, since parent queuer expects that only one 461 * skb will be queued. 462 */ 463 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 464 struct Qdisc *rootq = qdisc_root(sch); 465 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 466 467 q->duplicate = 0; 468 rootq->enqueue(skb2, rootq, to_free); 469 q->duplicate = dupsave; 470 } 471 472 /* 473 * Randomized packet corruption. 474 * Make copy if needed since we are modifying 475 * If packet is going to be hardware checksummed, then 476 * do it now in software before we mangle it. 477 */ 478 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 479 if (skb_is_gso(skb)) { 480 segs = netem_segment(skb, sch, to_free); 481 if (!segs) 482 return NET_XMIT_DROP; 483 } else { 484 segs = skb; 485 } 486 487 skb = segs; 488 segs = segs->next; 489 490 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 491 (skb->ip_summed == CHECKSUM_PARTIAL && 492 skb_checksum_help(skb))) { 493 rc = qdisc_drop(skb, sch, to_free); 494 goto finish_segs; 495 } 496 497 skb->data[prandom_u32() % skb_headlen(skb)] ^= 498 1<<(prandom_u32() % 8); 499 } 500 501 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) 502 return qdisc_drop(skb, sch, to_free); 503 504 qdisc_qstats_backlog_inc(sch, skb); 505 506 cb = netem_skb_cb(skb); 507 if (q->gap == 0 || /* not doing reordering */ 508 q->counter < q->gap - 1 || /* inside last reordering gap */ 509 q->reorder < get_crandom(&q->reorder_cor)) { 510 psched_time_t now; 511 psched_tdiff_t delay; 512 513 delay = tabledist(q->latency, q->jitter, 514 &q->delay_cor, q->delay_dist); 515 516 now = psched_get_time(); 517 518 if (q->rate) { 519 struct sk_buff *last; 520 521 if (!skb_queue_empty(&sch->q)) 522 last = skb_peek_tail(&sch->q); 523 else 524 last = netem_rb_to_skb(rb_last(&q->t_root)); 525 if (last) { 526 /* 527 * Last packet in queue is reference point (now), 528 * calculate this time bonus and subtract 529 * from delay. 530 */ 531 delay -= netem_skb_cb(last)->time_to_send - now; 532 delay = max_t(psched_tdiff_t, 0, delay); 533 now = netem_skb_cb(last)->time_to_send; 534 } 535 536 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); 537 } 538 539 cb->time_to_send = now + delay; 540 cb->tstamp_save = skb->tstamp; 541 ++q->counter; 542 tfifo_enqueue(skb, sch); 543 } else { 544 /* 545 * Do re-ordering by putting one out of N packets at the front 546 * of the queue. 547 */ 548 cb->time_to_send = psched_get_time(); 549 q->counter = 0; 550 551 __skb_queue_head(&sch->q, skb); 552 sch->qstats.requeues++; 553 } 554 555 finish_segs: 556 if (segs) { 557 while (segs) { 558 skb2 = segs->next; 559 segs->next = NULL; 560 qdisc_skb_cb(segs)->pkt_len = segs->len; 561 last_len = segs->len; 562 rc = qdisc_enqueue(segs, sch, to_free); 563 if (rc != NET_XMIT_SUCCESS) { 564 if (net_xmit_drop_count(rc)) 565 qdisc_qstats_drop(sch); 566 } else { 567 nb++; 568 len += last_len; 569 } 570 segs = skb2; 571 } 572 sch->q.qlen += nb; 573 if (nb > 1) 574 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); 575 } 576 return NET_XMIT_SUCCESS; 577 } 578 579 static struct sk_buff *netem_dequeue(struct Qdisc *sch) 580 { 581 struct netem_sched_data *q = qdisc_priv(sch); 582 struct sk_buff *skb; 583 struct rb_node *p; 584 585 tfifo_dequeue: 586 skb = __skb_dequeue(&sch->q); 587 if (skb) { 588 qdisc_qstats_backlog_dec(sch, skb); 589 deliver: 590 qdisc_bstats_update(sch, skb); 591 return skb; 592 } 593 p = rb_first(&q->t_root); 594 if (p) { 595 psched_time_t time_to_send; 596 597 skb = netem_rb_to_skb(p); 598 599 /* if more time remaining? */ 600 time_to_send = netem_skb_cb(skb)->time_to_send; 601 if (time_to_send <= psched_get_time()) { 602 rb_erase(p, &q->t_root); 603 604 sch->q.qlen--; 605 qdisc_qstats_backlog_dec(sch, skb); 606 skb->next = NULL; 607 skb->prev = NULL; 608 skb->tstamp = netem_skb_cb(skb)->tstamp_save; 609 610 #ifdef CONFIG_NET_CLS_ACT 611 /* 612 * If it's at ingress let's pretend the delay is 613 * from the network (tstamp will be updated). 614 */ 615 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) 616 skb->tstamp.tv64 = 0; 617 #endif 618 619 if (q->qdisc) { 620 struct sk_buff *to_free = NULL; 621 int err; 622 623 err = qdisc_enqueue(skb, q->qdisc, &to_free); 624 kfree_skb_list(to_free); 625 if (unlikely(err != NET_XMIT_SUCCESS)) { 626 if (net_xmit_drop_count(err)) { 627 qdisc_qstats_drop(sch); 628 qdisc_tree_reduce_backlog(sch, 1, 629 qdisc_pkt_len(skb)); 630 } 631 } 632 goto tfifo_dequeue; 633 } 634 goto deliver; 635 } 636 637 if (q->qdisc) { 638 skb = q->qdisc->ops->dequeue(q->qdisc); 639 if (skb) 640 goto deliver; 641 } 642 qdisc_watchdog_schedule(&q->watchdog, time_to_send); 643 } 644 645 if (q->qdisc) { 646 skb = q->qdisc->ops->dequeue(q->qdisc); 647 if (skb) 648 goto deliver; 649 } 650 return NULL; 651 } 652 653 static void netem_reset(struct Qdisc *sch) 654 { 655 struct netem_sched_data *q = qdisc_priv(sch); 656 657 qdisc_reset_queue(sch); 658 tfifo_reset(sch); 659 if (q->qdisc) 660 qdisc_reset(q->qdisc); 661 qdisc_watchdog_cancel(&q->watchdog); 662 } 663 664 static void dist_free(struct disttable *d) 665 { 666 kvfree(d); 667 } 668 669 /* 670 * Distribution data is a variable size payload containing 671 * signed 16 bit values. 672 */ 673 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) 674 { 675 struct netem_sched_data *q = qdisc_priv(sch); 676 size_t n = nla_len(attr)/sizeof(__s16); 677 const __s16 *data = nla_data(attr); 678 spinlock_t *root_lock; 679 struct disttable *d; 680 int i; 681 size_t s; 682 683 if (n > NETEM_DIST_MAX) 684 return -EINVAL; 685 686 s = sizeof(struct disttable) + n * sizeof(s16); 687 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); 688 if (!d) 689 d = vmalloc(s); 690 if (!d) 691 return -ENOMEM; 692 693 d->size = n; 694 for (i = 0; i < n; i++) 695 d->table[i] = data[i]; 696 697 root_lock = qdisc_root_sleeping_lock(sch); 698 699 spin_lock_bh(root_lock); 700 swap(q->delay_dist, d); 701 spin_unlock_bh(root_lock); 702 703 dist_free(d); 704 return 0; 705 } 706 707 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) 708 { 709 const struct tc_netem_corr *c = nla_data(attr); 710 711 init_crandom(&q->delay_cor, c->delay_corr); 712 init_crandom(&q->loss_cor, c->loss_corr); 713 init_crandom(&q->dup_cor, c->dup_corr); 714 } 715 716 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) 717 { 718 const struct tc_netem_reorder *r = nla_data(attr); 719 720 q->reorder = r->probability; 721 init_crandom(&q->reorder_cor, r->correlation); 722 } 723 724 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) 725 { 726 const struct tc_netem_corrupt *r = nla_data(attr); 727 728 q->corrupt = r->probability; 729 init_crandom(&q->corrupt_cor, r->correlation); 730 } 731 732 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) 733 { 734 const struct tc_netem_rate *r = nla_data(attr); 735 736 q->rate = r->rate; 737 q->packet_overhead = r->packet_overhead; 738 q->cell_size = r->cell_size; 739 q->cell_overhead = r->cell_overhead; 740 if (q->cell_size) 741 q->cell_size_reciprocal = reciprocal_value(q->cell_size); 742 else 743 q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; 744 } 745 746 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) 747 { 748 const struct nlattr *la; 749 int rem; 750 751 nla_for_each_nested(la, attr, rem) { 752 u16 type = nla_type(la); 753 754 switch (type) { 755 case NETEM_LOSS_GI: { 756 const struct tc_netem_gimodel *gi = nla_data(la); 757 758 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { 759 pr_info("netem: incorrect gi model size\n"); 760 return -EINVAL; 761 } 762 763 q->loss_model = CLG_4_STATES; 764 765 q->clg.state = TX_IN_GAP_PERIOD; 766 q->clg.a1 = gi->p13; 767 q->clg.a2 = gi->p31; 768 q->clg.a3 = gi->p32; 769 q->clg.a4 = gi->p14; 770 q->clg.a5 = gi->p23; 771 break; 772 } 773 774 case NETEM_LOSS_GE: { 775 const struct tc_netem_gemodel *ge = nla_data(la); 776 777 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { 778 pr_info("netem: incorrect ge model size\n"); 779 return -EINVAL; 780 } 781 782 q->loss_model = CLG_GILB_ELL; 783 q->clg.state = GOOD_STATE; 784 q->clg.a1 = ge->p; 785 q->clg.a2 = ge->r; 786 q->clg.a3 = ge->h; 787 q->clg.a4 = ge->k1; 788 break; 789 } 790 791 default: 792 pr_info("netem: unknown loss type %u\n", type); 793 return -EINVAL; 794 } 795 } 796 797 return 0; 798 } 799 800 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { 801 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, 802 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, 803 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 804 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, 805 [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, 806 [TCA_NETEM_ECN] = { .type = NLA_U32 }, 807 [TCA_NETEM_RATE64] = { .type = NLA_U64 }, 808 }; 809 810 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, 811 const struct nla_policy *policy, int len) 812 { 813 int nested_len = nla_len(nla) - NLA_ALIGN(len); 814 815 if (nested_len < 0) { 816 pr_info("netem: invalid attributes len %d\n", nested_len); 817 return -EINVAL; 818 } 819 820 if (nested_len >= nla_attr_size(0)) 821 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), 822 nested_len, policy); 823 824 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 825 return 0; 826 } 827 828 /* Parse netlink message to set options */ 829 static int netem_change(struct Qdisc *sch, struct nlattr *opt) 830 { 831 struct netem_sched_data *q = qdisc_priv(sch); 832 struct nlattr *tb[TCA_NETEM_MAX + 1]; 833 struct tc_netem_qopt *qopt; 834 struct clgstate old_clg; 835 int old_loss_model = CLG_RANDOM; 836 int ret; 837 838 if (opt == NULL) 839 return -EINVAL; 840 841 qopt = nla_data(opt); 842 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); 843 if (ret < 0) 844 return ret; 845 846 /* backup q->clg and q->loss_model */ 847 old_clg = q->clg; 848 old_loss_model = q->loss_model; 849 850 if (tb[TCA_NETEM_LOSS]) { 851 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); 852 if (ret) { 853 q->loss_model = old_loss_model; 854 return ret; 855 } 856 } else { 857 q->loss_model = CLG_RANDOM; 858 } 859 860 if (tb[TCA_NETEM_DELAY_DIST]) { 861 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); 862 if (ret) { 863 /* recover clg and loss_model, in case of 864 * q->clg and q->loss_model were modified 865 * in get_loss_clg() 866 */ 867 q->clg = old_clg; 868 q->loss_model = old_loss_model; 869 return ret; 870 } 871 } 872 873 sch->limit = qopt->limit; 874 875 q->latency = qopt->latency; 876 q->jitter = qopt->jitter; 877 q->limit = qopt->limit; 878 q->gap = qopt->gap; 879 q->counter = 0; 880 q->loss = qopt->loss; 881 q->duplicate = qopt->duplicate; 882 883 /* for compatibility with earlier versions. 884 * if gap is set, need to assume 100% probability 885 */ 886 if (q->gap) 887 q->reorder = ~0; 888 889 if (tb[TCA_NETEM_CORR]) 890 get_correlation(q, tb[TCA_NETEM_CORR]); 891 892 if (tb[TCA_NETEM_REORDER]) 893 get_reorder(q, tb[TCA_NETEM_REORDER]); 894 895 if (tb[TCA_NETEM_CORRUPT]) 896 get_corrupt(q, tb[TCA_NETEM_CORRUPT]); 897 898 if (tb[TCA_NETEM_RATE]) 899 get_rate(q, tb[TCA_NETEM_RATE]); 900 901 if (tb[TCA_NETEM_RATE64]) 902 q->rate = max_t(u64, q->rate, 903 nla_get_u64(tb[TCA_NETEM_RATE64])); 904 905 if (tb[TCA_NETEM_ECN]) 906 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); 907 908 return ret; 909 } 910 911 static int netem_init(struct Qdisc *sch, struct nlattr *opt) 912 { 913 struct netem_sched_data *q = qdisc_priv(sch); 914 int ret; 915 916 if (!opt) 917 return -EINVAL; 918 919 qdisc_watchdog_init(&q->watchdog, sch); 920 921 q->loss_model = CLG_RANDOM; 922 ret = netem_change(sch, opt); 923 if (ret) 924 pr_info("netem: change failed\n"); 925 return ret; 926 } 927 928 static void netem_destroy(struct Qdisc *sch) 929 { 930 struct netem_sched_data *q = qdisc_priv(sch); 931 932 qdisc_watchdog_cancel(&q->watchdog); 933 if (q->qdisc) 934 qdisc_destroy(q->qdisc); 935 dist_free(q->delay_dist); 936 } 937 938 static int dump_loss_model(const struct netem_sched_data *q, 939 struct sk_buff *skb) 940 { 941 struct nlattr *nest; 942 943 nest = nla_nest_start(skb, TCA_NETEM_LOSS); 944 if (nest == NULL) 945 goto nla_put_failure; 946 947 switch (q->loss_model) { 948 case CLG_RANDOM: 949 /* legacy loss model */ 950 nla_nest_cancel(skb, nest); 951 return 0; /* no data */ 952 953 case CLG_4_STATES: { 954 struct tc_netem_gimodel gi = { 955 .p13 = q->clg.a1, 956 .p31 = q->clg.a2, 957 .p32 = q->clg.a3, 958 .p14 = q->clg.a4, 959 .p23 = q->clg.a5, 960 }; 961 962 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) 963 goto nla_put_failure; 964 break; 965 } 966 case CLG_GILB_ELL: { 967 struct tc_netem_gemodel ge = { 968 .p = q->clg.a1, 969 .r = q->clg.a2, 970 .h = q->clg.a3, 971 .k1 = q->clg.a4, 972 }; 973 974 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) 975 goto nla_put_failure; 976 break; 977 } 978 } 979 980 nla_nest_end(skb, nest); 981 return 0; 982 983 nla_put_failure: 984 nla_nest_cancel(skb, nest); 985 return -1; 986 } 987 988 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) 989 { 990 const struct netem_sched_data *q = qdisc_priv(sch); 991 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); 992 struct tc_netem_qopt qopt; 993 struct tc_netem_corr cor; 994 struct tc_netem_reorder reorder; 995 struct tc_netem_corrupt corrupt; 996 struct tc_netem_rate rate; 997 998 qopt.latency = q->latency; 999 qopt.jitter = q->jitter; 1000 qopt.limit = q->limit; 1001 qopt.loss = q->loss; 1002 qopt.gap = q->gap; 1003 qopt.duplicate = q->duplicate; 1004 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) 1005 goto nla_put_failure; 1006 1007 cor.delay_corr = q->delay_cor.rho; 1008 cor.loss_corr = q->loss_cor.rho; 1009 cor.dup_corr = q->dup_cor.rho; 1010 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) 1011 goto nla_put_failure; 1012 1013 reorder.probability = q->reorder; 1014 reorder.correlation = q->reorder_cor.rho; 1015 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) 1016 goto nla_put_failure; 1017 1018 corrupt.probability = q->corrupt; 1019 corrupt.correlation = q->corrupt_cor.rho; 1020 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) 1021 goto nla_put_failure; 1022 1023 if (q->rate >= (1ULL << 32)) { 1024 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, 1025 TCA_NETEM_PAD)) 1026 goto nla_put_failure; 1027 rate.rate = ~0U; 1028 } else { 1029 rate.rate = q->rate; 1030 } 1031 rate.packet_overhead = q->packet_overhead; 1032 rate.cell_size = q->cell_size; 1033 rate.cell_overhead = q->cell_overhead; 1034 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) 1035 goto nla_put_failure; 1036 1037 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) 1038 goto nla_put_failure; 1039 1040 if (dump_loss_model(q, skb) != 0) 1041 goto nla_put_failure; 1042 1043 return nla_nest_end(skb, nla); 1044 1045 nla_put_failure: 1046 nlmsg_trim(skb, nla); 1047 return -1; 1048 } 1049 1050 static int netem_dump_class(struct Qdisc *sch, unsigned long cl, 1051 struct sk_buff *skb, struct tcmsg *tcm) 1052 { 1053 struct netem_sched_data *q = qdisc_priv(sch); 1054 1055 if (cl != 1 || !q->qdisc) /* only one class */ 1056 return -ENOENT; 1057 1058 tcm->tcm_handle |= TC_H_MIN(1); 1059 tcm->tcm_info = q->qdisc->handle; 1060 1061 return 0; 1062 } 1063 1064 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1065 struct Qdisc **old) 1066 { 1067 struct netem_sched_data *q = qdisc_priv(sch); 1068 1069 *old = qdisc_replace(sch, new, &q->qdisc); 1070 return 0; 1071 } 1072 1073 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) 1074 { 1075 struct netem_sched_data *q = qdisc_priv(sch); 1076 return q->qdisc; 1077 } 1078 1079 static unsigned long netem_get(struct Qdisc *sch, u32 classid) 1080 { 1081 return 1; 1082 } 1083 1084 static void netem_put(struct Qdisc *sch, unsigned long arg) 1085 { 1086 } 1087 1088 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) 1089 { 1090 if (!walker->stop) { 1091 if (walker->count >= walker->skip) 1092 if (walker->fn(sch, 1, walker) < 0) { 1093 walker->stop = 1; 1094 return; 1095 } 1096 walker->count++; 1097 } 1098 } 1099 1100 static const struct Qdisc_class_ops netem_class_ops = { 1101 .graft = netem_graft, 1102 .leaf = netem_leaf, 1103 .get = netem_get, 1104 .put = netem_put, 1105 .walk = netem_walk, 1106 .dump = netem_dump_class, 1107 }; 1108 1109 static struct Qdisc_ops netem_qdisc_ops __read_mostly = { 1110 .id = "netem", 1111 .cl_ops = &netem_class_ops, 1112 .priv_size = sizeof(struct netem_sched_data), 1113 .enqueue = netem_enqueue, 1114 .dequeue = netem_dequeue, 1115 .peek = qdisc_peek_dequeued, 1116 .init = netem_init, 1117 .reset = netem_reset, 1118 .destroy = netem_destroy, 1119 .change = netem_change, 1120 .dump = netem_dump, 1121 .owner = THIS_MODULE, 1122 }; 1123 1124 1125 static int __init netem_module_init(void) 1126 { 1127 pr_info("netem: version " VERSION "\n"); 1128 return register_qdisc(&netem_qdisc_ops); 1129 } 1130 static void __exit netem_module_exit(void) 1131 { 1132 unregister_qdisc(&netem_qdisc_ops); 1133 } 1134 module_init(netem_module_init) 1135 module_exit(netem_module_exit) 1136 MODULE_LICENSE("GPL"); 1137