11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * net/sched/sch_netem.c Network emulator 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 51da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 61da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 7798b6b19SStephen Hemminger * 2 of the License. 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Many of the algorithms and ideas for this came from 101da177e4SLinus Torvalds * NIST Net which is not copyrighted. 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * Authors: Stephen Hemminger <shemminger@osdl.org> 131da177e4SLinus Torvalds * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 16b7f080cfSAlexey Dobriyan #include <linux/mm.h> 171da177e4SLinus Torvalds #include <linux/module.h> 185a0e3ad6STejun Heo #include <linux/slab.h> 191da177e4SLinus Torvalds #include <linux/types.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/errno.h> 221da177e4SLinus Torvalds #include <linux/skbuff.h> 2378776d3fSDavid S. Miller #include <linux/vmalloc.h> 241da177e4SLinus Torvalds #include <linux/rtnetlink.h> 2590b41a1cSHagen Paul Pfeifer #include <linux/reciprocal_div.h> 26aec0a40aSEric Dumazet #include <linux/rbtree.h> 271da177e4SLinus Torvalds 28dc5fc579SArnaldo Carvalho de Melo #include <net/netlink.h> 291da177e4SLinus Torvalds #include <net/pkt_sched.h> 30e4ae004bSEric Dumazet #include <net/inet_ecn.h> 311da177e4SLinus Torvalds 32250a65f7Sstephen hemminger #define VERSION "1.3" 33eb229c4cSStephen Hemminger 341da177e4SLinus Torvalds /* Network Emulation Queuing algorithm. 351da177e4SLinus Torvalds ==================================== 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based 381da177e4SLinus Torvalds Network Emulation Tool 391da177e4SLinus Torvalds [2] Luigi Rizzo, DummyNet for FreeBSD 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds ---------------------------------------------------------------- 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds This started out as a simple way to delay outgoing packets to 441da177e4SLinus Torvalds test TCP but has grown to include most of the functionality 451da177e4SLinus Torvalds of a full blown network emulator like NISTnet. It can delay 461da177e4SLinus Torvalds packets and add random jitter (and correlation). The random 471da177e4SLinus Torvalds distribution can be loaded from a table as well to provide 481da177e4SLinus Torvalds normal, Pareto, or experimental curves. Packet loss, 491da177e4SLinus Torvalds duplication, and reordering can also be emulated. 501da177e4SLinus Torvalds 511da177e4SLinus Torvalds This qdisc does not do classification that can be handled in 521da177e4SLinus Torvalds layering other disciplines. It does not need to do bandwidth 531da177e4SLinus Torvalds control either since that can be handled by using token 541da177e4SLinus Torvalds bucket or other rate control. 55661b7972Sstephen hemminger 56661b7972Sstephen hemminger Correlated Loss Generator models 57661b7972Sstephen hemminger 58661b7972Sstephen hemminger Added generation of correlated loss according to the 59661b7972Sstephen hemminger "Gilbert-Elliot" model, a 4-state markov model. 60661b7972Sstephen hemminger 61661b7972Sstephen hemminger References: 62661b7972Sstephen hemminger [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG 63661b7972Sstephen hemminger [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general 64661b7972Sstephen hemminger and intuitive loss model for packet networks and its implementation 65661b7972Sstephen hemminger in the Netem module in the Linux kernel", available in [1] 66661b7972Sstephen hemminger 67661b7972Sstephen hemminger Authors: Stefano Salsano <stefano.salsano at uniroma2.it 68661b7972Sstephen hemminger Fabio Ludovici <fabio.ludovici at yahoo.it> 691da177e4SLinus Torvalds */ 701da177e4SLinus Torvalds 711da177e4SLinus Torvalds struct netem_sched_data { 72aec0a40aSEric Dumazet /* internal t(ime)fifo qdisc uses t_root and sch->limit */ 73aec0a40aSEric Dumazet struct rb_root t_root; 7450612537SEric Dumazet 7550612537SEric Dumazet /* optional qdisc for classful handling (NULL at netem init) */ 761da177e4SLinus Torvalds struct Qdisc *qdisc; 7750612537SEric Dumazet 7859cb5c67SPatrick McHardy struct qdisc_watchdog watchdog; 791da177e4SLinus Torvalds 80b407621cSStephen Hemminger psched_tdiff_t latency; 81b407621cSStephen Hemminger psched_tdiff_t jitter; 82b407621cSStephen Hemminger 831da177e4SLinus Torvalds u32 loss; 84e4ae004bSEric Dumazet u32 ecn; 851da177e4SLinus Torvalds u32 limit; 861da177e4SLinus Torvalds u32 counter; 871da177e4SLinus Torvalds u32 gap; 881da177e4SLinus Torvalds u32 duplicate; 890dca51d3SStephen Hemminger u32 reorder; 90c865e5d9SStephen Hemminger u32 corrupt; 916a031f67SYang Yingliang u64 rate; 9290b41a1cSHagen Paul Pfeifer s32 packet_overhead; 9390b41a1cSHagen Paul Pfeifer u32 cell_size; 94809fa972SHannes Frederic Sowa struct reciprocal_value cell_size_reciprocal; 9590b41a1cSHagen Paul Pfeifer s32 cell_overhead; 961da177e4SLinus Torvalds 971da177e4SLinus Torvalds struct crndstate { 98b407621cSStephen Hemminger u32 last; 99b407621cSStephen Hemminger u32 rho; 100c865e5d9SStephen Hemminger } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; 1011da177e4SLinus Torvalds 1021da177e4SLinus Torvalds struct disttable { 1031da177e4SLinus Torvalds u32 size; 1041da177e4SLinus Torvalds s16 table[0]; 1051da177e4SLinus Torvalds } *delay_dist; 106661b7972Sstephen hemminger 107661b7972Sstephen hemminger enum { 108661b7972Sstephen hemminger CLG_RANDOM, 109661b7972Sstephen hemminger CLG_4_STATES, 110661b7972Sstephen hemminger CLG_GILB_ELL, 111661b7972Sstephen hemminger } loss_model; 112661b7972Sstephen hemminger 113a6e2fe17SYang Yingliang enum { 114a6e2fe17SYang Yingliang TX_IN_GAP_PERIOD = 1, 115a6e2fe17SYang Yingliang TX_IN_BURST_PERIOD, 116a6e2fe17SYang Yingliang LOST_IN_GAP_PERIOD, 117a6e2fe17SYang Yingliang LOST_IN_BURST_PERIOD, 118a6e2fe17SYang Yingliang } _4_state_model; 119a6e2fe17SYang Yingliang 120c045a734SYang Yingliang enum { 121c045a734SYang Yingliang GOOD_STATE = 1, 122c045a734SYang Yingliang BAD_STATE, 123c045a734SYang Yingliang } GE_state_model; 124c045a734SYang Yingliang 125661b7972Sstephen hemminger /* Correlated Loss Generation models */ 126661b7972Sstephen hemminger struct clgstate { 127661b7972Sstephen hemminger /* state of the Markov chain */ 128661b7972Sstephen hemminger u8 state; 129661b7972Sstephen hemminger 130661b7972Sstephen hemminger /* 4-states and Gilbert-Elliot models */ 131661b7972Sstephen hemminger u32 a1; /* p13 for 4-states or p for GE */ 132661b7972Sstephen hemminger u32 a2; /* p31 for 4-states or r for GE */ 133661b7972Sstephen hemminger u32 a3; /* p32 for 4-states or h for GE */ 134661b7972Sstephen hemminger u32 a4; /* p14 for 4-states or 1-k for GE */ 135661b7972Sstephen hemminger u32 a5; /* p23 used only in 4-states */ 136661b7972Sstephen hemminger } clg; 137661b7972Sstephen hemminger 1381da177e4SLinus Torvalds }; 1391da177e4SLinus Torvalds 14050612537SEric Dumazet /* Time stamp put into socket buffer control block 14150612537SEric Dumazet * Only valid when skbs are in our internal t(ime)fifo queue. 14256b17425SEric Dumazet * 14356b17425SEric Dumazet * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, 14456b17425SEric Dumazet * and skb->next & skb->prev are scratch space for a qdisc, 14556b17425SEric Dumazet * we save skb->tstamp value in skb->cb[] before destroying it. 14650612537SEric Dumazet */ 1471da177e4SLinus Torvalds struct netem_skb_cb { 1481da177e4SLinus Torvalds psched_time_t time_to_send; 1491da177e4SLinus Torvalds }; 1501da177e4SLinus Torvalds 151aec0a40aSEric Dumazet 152aec0a40aSEric Dumazet static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) 153aec0a40aSEric Dumazet { 1547f7cd56cSGeliang Tang return rb_entry(rb, struct sk_buff, rbnode); 155aec0a40aSEric Dumazet } 156aec0a40aSEric Dumazet 1575f86173bSJussi Kivilinna static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 1585f86173bSJussi Kivilinna { 159aec0a40aSEric Dumazet /* we assume we can use skb next/prev/tstamp as storage for rb_node */ 16016bda13dSDavid S. Miller qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); 161175f9c1bSJussi Kivilinna return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 1625f86173bSJussi Kivilinna } 1635f86173bSJussi Kivilinna 1641da177e4SLinus Torvalds /* init_crandom - initialize correlated random number generator 1651da177e4SLinus Torvalds * Use entropy source for initial seed. 1661da177e4SLinus Torvalds */ 1671da177e4SLinus Torvalds static void init_crandom(struct crndstate *state, unsigned long rho) 1681da177e4SLinus Torvalds { 1691da177e4SLinus Torvalds state->rho = rho; 17063862b5bSAruna-Hewapathirane state->last = prandom_u32(); 1711da177e4SLinus Torvalds } 1721da177e4SLinus Torvalds 1731da177e4SLinus Torvalds /* get_crandom - correlated random number generator 1741da177e4SLinus Torvalds * Next number depends on last value. 1751da177e4SLinus Torvalds * rho is scaled to avoid floating point. 1761da177e4SLinus Torvalds */ 177b407621cSStephen Hemminger static u32 get_crandom(struct crndstate *state) 1781da177e4SLinus Torvalds { 1791da177e4SLinus Torvalds u64 value, rho; 1801da177e4SLinus Torvalds unsigned long answer; 1811da177e4SLinus Torvalds 182bb2f8cc0SStephen Hemminger if (state->rho == 0) /* no correlation */ 18363862b5bSAruna-Hewapathirane return prandom_u32(); 1841da177e4SLinus Torvalds 18563862b5bSAruna-Hewapathirane value = prandom_u32(); 1861da177e4SLinus Torvalds rho = (u64)state->rho + 1; 1871da177e4SLinus Torvalds answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; 1881da177e4SLinus Torvalds state->last = answer; 1891da177e4SLinus Torvalds return answer; 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds 192661b7972Sstephen hemminger /* loss_4state - 4-state model loss generator 193661b7972Sstephen hemminger * Generates losses according to the 4-state Markov chain adopted in 194661b7972Sstephen hemminger * the GI (General and Intuitive) loss model. 195661b7972Sstephen hemminger */ 196661b7972Sstephen hemminger static bool loss_4state(struct netem_sched_data *q) 197661b7972Sstephen hemminger { 198661b7972Sstephen hemminger struct clgstate *clg = &q->clg; 19963862b5bSAruna-Hewapathirane u32 rnd = prandom_u32(); 200661b7972Sstephen hemminger 201661b7972Sstephen hemminger /* 20225985edcSLucas De Marchi * Makes a comparison between rnd and the transition 203661b7972Sstephen hemminger * probabilities outgoing from the current state, then decides the 204661b7972Sstephen hemminger * next state and if the next packet has to be transmitted or lost. 205661b7972Sstephen hemminger * The four states correspond to: 206a6e2fe17SYang Yingliang * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period 207a6e2fe17SYang Yingliang * LOST_IN_BURST_PERIOD => isolated losses within a gap period 208a6e2fe17SYang Yingliang * LOST_IN_GAP_PERIOD => lost packets within a burst period 209a6e2fe17SYang Yingliang * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period 210661b7972Sstephen hemminger */ 211661b7972Sstephen hemminger switch (clg->state) { 212a6e2fe17SYang Yingliang case TX_IN_GAP_PERIOD: 213661b7972Sstephen hemminger if (rnd < clg->a4) { 214a6e2fe17SYang Yingliang clg->state = LOST_IN_BURST_PERIOD; 215661b7972Sstephen hemminger return true; 216ab6c27beSstephen hemminger } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { 217a6e2fe17SYang Yingliang clg->state = LOST_IN_GAP_PERIOD; 218661b7972Sstephen hemminger return true; 219a6e2fe17SYang Yingliang } else if (clg->a1 + clg->a4 < rnd) { 220a6e2fe17SYang Yingliang clg->state = TX_IN_GAP_PERIOD; 221a6e2fe17SYang Yingliang } 222661b7972Sstephen hemminger 223661b7972Sstephen hemminger break; 224a6e2fe17SYang Yingliang case TX_IN_BURST_PERIOD: 225661b7972Sstephen hemminger if (rnd < clg->a5) { 226a6e2fe17SYang Yingliang clg->state = LOST_IN_GAP_PERIOD; 227661b7972Sstephen hemminger return true; 228a6e2fe17SYang Yingliang } else { 229a6e2fe17SYang Yingliang clg->state = TX_IN_BURST_PERIOD; 230a6e2fe17SYang Yingliang } 231661b7972Sstephen hemminger 232661b7972Sstephen hemminger break; 233a6e2fe17SYang Yingliang case LOST_IN_GAP_PERIOD: 234661b7972Sstephen hemminger if (rnd < clg->a3) 235a6e2fe17SYang Yingliang clg->state = TX_IN_BURST_PERIOD; 236661b7972Sstephen hemminger else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { 237a6e2fe17SYang Yingliang clg->state = TX_IN_GAP_PERIOD; 238661b7972Sstephen hemminger } else if (clg->a2 + clg->a3 < rnd) { 239a6e2fe17SYang Yingliang clg->state = LOST_IN_GAP_PERIOD; 240661b7972Sstephen hemminger return true; 241661b7972Sstephen hemminger } 242661b7972Sstephen hemminger break; 243a6e2fe17SYang Yingliang case LOST_IN_BURST_PERIOD: 244a6e2fe17SYang Yingliang clg->state = TX_IN_GAP_PERIOD; 245661b7972Sstephen hemminger break; 246661b7972Sstephen hemminger } 247661b7972Sstephen hemminger 248661b7972Sstephen hemminger return false; 249661b7972Sstephen hemminger } 250661b7972Sstephen hemminger 251661b7972Sstephen hemminger /* loss_gilb_ell - Gilbert-Elliot model loss generator 252661b7972Sstephen hemminger * Generates losses according to the Gilbert-Elliot loss model or 253661b7972Sstephen hemminger * its special cases (Gilbert or Simple Gilbert) 254661b7972Sstephen hemminger * 25525985edcSLucas De Marchi * Makes a comparison between random number and the transition 256661b7972Sstephen hemminger * probabilities outgoing from the current state, then decides the 25725985edcSLucas De Marchi * next state. A second random number is extracted and the comparison 258661b7972Sstephen hemminger * with the loss probability of the current state decides if the next 259661b7972Sstephen hemminger * packet will be transmitted or lost. 260661b7972Sstephen hemminger */ 261661b7972Sstephen hemminger static bool loss_gilb_ell(struct netem_sched_data *q) 262661b7972Sstephen hemminger { 263661b7972Sstephen hemminger struct clgstate *clg = &q->clg; 264661b7972Sstephen hemminger 265661b7972Sstephen hemminger switch (clg->state) { 266c045a734SYang Yingliang case GOOD_STATE: 26763862b5bSAruna-Hewapathirane if (prandom_u32() < clg->a1) 268c045a734SYang Yingliang clg->state = BAD_STATE; 26963862b5bSAruna-Hewapathirane if (prandom_u32() < clg->a4) 270661b7972Sstephen hemminger return true; 2717c2781faSstephen hemminger break; 272c045a734SYang Yingliang case BAD_STATE: 27363862b5bSAruna-Hewapathirane if (prandom_u32() < clg->a2) 274c045a734SYang Yingliang clg->state = GOOD_STATE; 27563862b5bSAruna-Hewapathirane if (prandom_u32() > clg->a3) 276661b7972Sstephen hemminger return true; 277661b7972Sstephen hemminger } 278661b7972Sstephen hemminger 279661b7972Sstephen hemminger return false; 280661b7972Sstephen hemminger } 281661b7972Sstephen hemminger 282661b7972Sstephen hemminger static bool loss_event(struct netem_sched_data *q) 283661b7972Sstephen hemminger { 284661b7972Sstephen hemminger switch (q->loss_model) { 285661b7972Sstephen hemminger case CLG_RANDOM: 286661b7972Sstephen hemminger /* Random packet drop 0 => none, ~0 => all */ 287661b7972Sstephen hemminger return q->loss && q->loss >= get_crandom(&q->loss_cor); 288661b7972Sstephen hemminger 289661b7972Sstephen hemminger case CLG_4_STATES: 290661b7972Sstephen hemminger /* 4state loss model algorithm (used also for GI model) 291661b7972Sstephen hemminger * Extracts a value from the markov 4 state loss generator, 292661b7972Sstephen hemminger * if it is 1 drops a packet and if needed writes the event in 293661b7972Sstephen hemminger * the kernel logs 294661b7972Sstephen hemminger */ 295661b7972Sstephen hemminger return loss_4state(q); 296661b7972Sstephen hemminger 297661b7972Sstephen hemminger case CLG_GILB_ELL: 298661b7972Sstephen hemminger /* Gilbert-Elliot loss model algorithm 299661b7972Sstephen hemminger * Extracts a value from the Gilbert-Elliot loss generator, 300661b7972Sstephen hemminger * if it is 1 drops a packet and if needed writes the event in 301661b7972Sstephen hemminger * the kernel logs 302661b7972Sstephen hemminger */ 303661b7972Sstephen hemminger return loss_gilb_ell(q); 304661b7972Sstephen hemminger } 305661b7972Sstephen hemminger 306661b7972Sstephen hemminger return false; /* not reached */ 307661b7972Sstephen hemminger } 308661b7972Sstephen hemminger 309661b7972Sstephen hemminger 3101da177e4SLinus Torvalds /* tabledist - return a pseudo-randomly distributed value with mean mu and 3111da177e4SLinus Torvalds * std deviation sigma. Uses table lookup to approximate the desired 3121da177e4SLinus Torvalds * distribution, and a uniformly-distributed pseudo-random source. 3131da177e4SLinus Torvalds */ 314b407621cSStephen Hemminger static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, 315b407621cSStephen Hemminger struct crndstate *state, 316b407621cSStephen Hemminger const struct disttable *dist) 3171da177e4SLinus Torvalds { 318b407621cSStephen Hemminger psched_tdiff_t x; 319b407621cSStephen Hemminger long t; 320b407621cSStephen Hemminger u32 rnd; 3211da177e4SLinus Torvalds 3221da177e4SLinus Torvalds if (sigma == 0) 3231da177e4SLinus Torvalds return mu; 3241da177e4SLinus Torvalds 3251da177e4SLinus Torvalds rnd = get_crandom(state); 3261da177e4SLinus Torvalds 3271da177e4SLinus Torvalds /* default uniform distribution */ 3281da177e4SLinus Torvalds if (dist == NULL) 3291da177e4SLinus Torvalds return (rnd % (2*sigma)) - sigma + mu; 3301da177e4SLinus Torvalds 3311da177e4SLinus Torvalds t = dist->table[rnd % dist->size]; 3321da177e4SLinus Torvalds x = (sigma % NETEM_DIST_SCALE) * t; 3331da177e4SLinus Torvalds if (x >= 0) 3341da177e4SLinus Torvalds x += NETEM_DIST_SCALE/2; 3351da177e4SLinus Torvalds else 3361da177e4SLinus Torvalds x -= NETEM_DIST_SCALE/2; 3371da177e4SLinus Torvalds 3381da177e4SLinus Torvalds return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; 3391da177e4SLinus Torvalds } 3401da177e4SLinus Torvalds 34190b41a1cSHagen Paul Pfeifer static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) 3427bc0f28cSHagen Paul Pfeifer { 34390b41a1cSHagen Paul Pfeifer u64 ticks; 344fc33cc72SEric Dumazet 34590b41a1cSHagen Paul Pfeifer len += q->packet_overhead; 34690b41a1cSHagen Paul Pfeifer 34790b41a1cSHagen Paul Pfeifer if (q->cell_size) { 34890b41a1cSHagen Paul Pfeifer u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); 34990b41a1cSHagen Paul Pfeifer 35090b41a1cSHagen Paul Pfeifer if (len > cells * q->cell_size) /* extra cell needed for remainder */ 35190b41a1cSHagen Paul Pfeifer cells++; 35290b41a1cSHagen Paul Pfeifer len = cells * (q->cell_size + q->cell_overhead); 35390b41a1cSHagen Paul Pfeifer } 35490b41a1cSHagen Paul Pfeifer 35590b41a1cSHagen Paul Pfeifer ticks = (u64)len * NSEC_PER_SEC; 35690b41a1cSHagen Paul Pfeifer 35790b41a1cSHagen Paul Pfeifer do_div(ticks, q->rate); 358fc33cc72SEric Dumazet return PSCHED_NS2TICKS(ticks); 3597bc0f28cSHagen Paul Pfeifer } 3607bc0f28cSHagen Paul Pfeifer 361ff704050Sstephen hemminger static void tfifo_reset(struct Qdisc *sch) 362ff704050Sstephen hemminger { 363ff704050Sstephen hemminger struct netem_sched_data *q = qdisc_priv(sch); 364*3aa605f2SEric Dumazet struct rb_node *p = rb_first(&q->t_root); 365ff704050Sstephen hemminger 366*3aa605f2SEric Dumazet while (p) { 367ff704050Sstephen hemminger struct sk_buff *skb = netem_rb_to_skb(p); 368ff704050Sstephen hemminger 369*3aa605f2SEric Dumazet p = rb_next(p); 370*3aa605f2SEric Dumazet rb_erase(&skb->rbnode, &q->t_root); 3712f08a9a1SEric Dumazet rtnl_kfree_skbs(skb, skb); 372ff704050Sstephen hemminger } 373ff704050Sstephen hemminger } 374ff704050Sstephen hemminger 375960fb66eSEric Dumazet static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 37650612537SEric Dumazet { 377aec0a40aSEric Dumazet struct netem_sched_data *q = qdisc_priv(sch); 37850612537SEric Dumazet psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; 379aec0a40aSEric Dumazet struct rb_node **p = &q->t_root.rb_node, *parent = NULL; 38050612537SEric Dumazet 381aec0a40aSEric Dumazet while (*p) { 382aec0a40aSEric Dumazet struct sk_buff *skb; 38350612537SEric Dumazet 384aec0a40aSEric Dumazet parent = *p; 385aec0a40aSEric Dumazet skb = netem_rb_to_skb(parent); 38650612537SEric Dumazet if (tnext >= netem_skb_cb(skb)->time_to_send) 387aec0a40aSEric Dumazet p = &parent->rb_right; 388aec0a40aSEric Dumazet else 389aec0a40aSEric Dumazet p = &parent->rb_left; 39050612537SEric Dumazet } 39156b17425SEric Dumazet rb_link_node(&nskb->rbnode, parent, p); 39256b17425SEric Dumazet rb_insert_color(&nskb->rbnode, &q->t_root); 393aec0a40aSEric Dumazet sch->q.qlen++; 39450612537SEric Dumazet } 39550612537SEric Dumazet 3966071bd1aSNeil Horman /* netem can't properly corrupt a megapacket (like we get from GSO), so instead 3976071bd1aSNeil Horman * when we statistically choose to corrupt one, we instead segment it, returning 3986071bd1aSNeil Horman * the first packet to be corrupted, and re-enqueue the remaining frames 3996071bd1aSNeil Horman */ 400520ac30fSEric Dumazet static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, 401520ac30fSEric Dumazet struct sk_buff **to_free) 4026071bd1aSNeil Horman { 4036071bd1aSNeil Horman struct sk_buff *segs; 4046071bd1aSNeil Horman netdev_features_t features = netif_skb_features(skb); 4056071bd1aSNeil Horman 4066071bd1aSNeil Horman segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 4076071bd1aSNeil Horman 4086071bd1aSNeil Horman if (IS_ERR_OR_NULL(segs)) { 409520ac30fSEric Dumazet qdisc_drop(skb, sch, to_free); 4106071bd1aSNeil Horman return NULL; 4116071bd1aSNeil Horman } 4126071bd1aSNeil Horman consume_skb(skb); 4136071bd1aSNeil Horman return segs; 4146071bd1aSNeil Horman } 4156071bd1aSNeil Horman 41648da34b7SFlorian Westphal static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb) 41748da34b7SFlorian Westphal { 41848da34b7SFlorian Westphal skb->next = qh->head; 41948da34b7SFlorian Westphal 42048da34b7SFlorian Westphal if (!qh->head) 42148da34b7SFlorian Westphal qh->tail = skb; 42248da34b7SFlorian Westphal qh->head = skb; 42348da34b7SFlorian Westphal qh->qlen++; 42448da34b7SFlorian Westphal } 42548da34b7SFlorian Westphal 4260afb51e7SStephen Hemminger /* 4270afb51e7SStephen Hemminger * Insert one skb into qdisc. 4280afb51e7SStephen Hemminger * Note: parent depends on return value to account for queue length. 4290afb51e7SStephen Hemminger * NET_XMIT_DROP: queue length didn't change. 4300afb51e7SStephen Hemminger * NET_XMIT_SUCCESS: one skb was queued. 4310afb51e7SStephen Hemminger */ 432520ac30fSEric Dumazet static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, 433520ac30fSEric Dumazet struct sk_buff **to_free) 4341da177e4SLinus Torvalds { 4351da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 43689e1df74SGuillaume Chazarain /* We don't fill cb now as skb_unshare() may invalidate it */ 43789e1df74SGuillaume Chazarain struct netem_skb_cb *cb; 4380afb51e7SStephen Hemminger struct sk_buff *skb2; 4396071bd1aSNeil Horman struct sk_buff *segs = NULL; 4406071bd1aSNeil Horman unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb); 4416071bd1aSNeil Horman int nb = 0; 4420afb51e7SStephen Hemminger int count = 1; 4436071bd1aSNeil Horman int rc = NET_XMIT_SUCCESS; 4441da177e4SLinus Torvalds 4450afb51e7SStephen Hemminger /* Random duplication */ 4460afb51e7SStephen Hemminger if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) 4470afb51e7SStephen Hemminger ++count; 4480afb51e7SStephen Hemminger 449661b7972Sstephen hemminger /* Drop packet? */ 450e4ae004bSEric Dumazet if (loss_event(q)) { 451e4ae004bSEric Dumazet if (q->ecn && INET_ECN_set_ce(skb)) 45225331d6cSJohn Fastabend qdisc_qstats_drop(sch); /* mark packet */ 453e4ae004bSEric Dumazet else 4540afb51e7SStephen Hemminger --count; 455e4ae004bSEric Dumazet } 4560afb51e7SStephen Hemminger if (count == 0) { 45725331d6cSJohn Fastabend qdisc_qstats_drop(sch); 458520ac30fSEric Dumazet __qdisc_drop(skb, to_free); 459c27f339aSJarek Poplawski return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 4601da177e4SLinus Torvalds } 4611da177e4SLinus Torvalds 4625a308f40SEric Dumazet /* If a delay is expected, orphan the skb. (orphaning usually takes 4635a308f40SEric Dumazet * place at TX completion time, so _before_ the link transit delay) 4645a308f40SEric Dumazet */ 4655080f39eSNik Unger if (q->latency || q->jitter || q->rate) 466f2f872f9SEric Dumazet skb_orphan_partial(skb); 4674e8a5201SDavid S. Miller 4680afb51e7SStephen Hemminger /* 4690afb51e7SStephen Hemminger * If we need to duplicate packet, then re-insert at top of the 4700afb51e7SStephen Hemminger * qdisc tree, since parent queuer expects that only one 4710afb51e7SStephen Hemminger * skb will be queued. 472d5d75cd6SStephen Hemminger */ 4730afb51e7SStephen Hemminger if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 4747698b4fcSDavid S. Miller struct Qdisc *rootq = qdisc_root(sch); 4750afb51e7SStephen Hemminger u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 476d5d75cd6SStephen Hemminger 477b396cca6SEric Dumazet q->duplicate = 0; 478520ac30fSEric Dumazet rootq->enqueue(skb2, rootq, to_free); 4790afb51e7SStephen Hemminger q->duplicate = dupsave; 4801da177e4SLinus Torvalds } 4811da177e4SLinus Torvalds 482c865e5d9SStephen Hemminger /* 483c865e5d9SStephen Hemminger * Randomized packet corruption. 484c865e5d9SStephen Hemminger * Make copy if needed since we are modifying 485c865e5d9SStephen Hemminger * If packet is going to be hardware checksummed, then 486c865e5d9SStephen Hemminger * do it now in software before we mangle it. 487c865e5d9SStephen Hemminger */ 488c865e5d9SStephen Hemminger if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 4896071bd1aSNeil Horman if (skb_is_gso(skb)) { 490520ac30fSEric Dumazet segs = netem_segment(skb, sch, to_free); 4916071bd1aSNeil Horman if (!segs) 4926071bd1aSNeil Horman return NET_XMIT_DROP; 4936071bd1aSNeil Horman } else { 4946071bd1aSNeil Horman segs = skb; 4956071bd1aSNeil Horman } 4966071bd1aSNeil Horman 4976071bd1aSNeil Horman skb = segs; 4986071bd1aSNeil Horman segs = segs->next; 4996071bd1aSNeil Horman 5008a6e9c67SEric Dumazet skb = skb_unshare(skb, GFP_ATOMIC); 5018a6e9c67SEric Dumazet if (unlikely(!skb)) { 5028a6e9c67SEric Dumazet qdisc_qstats_drop(sch); 5038a6e9c67SEric Dumazet goto finish_segs; 5048a6e9c67SEric Dumazet } 5058a6e9c67SEric Dumazet if (skb->ip_summed == CHECKSUM_PARTIAL && 5068a6e9c67SEric Dumazet skb_checksum_help(skb)) { 5078a6e9c67SEric Dumazet qdisc_drop(skb, sch, to_free); 5086071bd1aSNeil Horman goto finish_segs; 5096071bd1aSNeil Horman } 510c865e5d9SStephen Hemminger 51163862b5bSAruna-Hewapathirane skb->data[prandom_u32() % skb_headlen(skb)] ^= 51263862b5bSAruna-Hewapathirane 1<<(prandom_u32() % 8); 513c865e5d9SStephen Hemminger } 514c865e5d9SStephen Hemminger 51597d0678fSFlorian Westphal if (unlikely(sch->q.qlen >= sch->limit)) 516520ac30fSEric Dumazet return qdisc_drop(skb, sch, to_free); 517960fb66eSEric Dumazet 51825331d6cSJohn Fastabend qdisc_qstats_backlog_inc(sch, skb); 519960fb66eSEric Dumazet 5205f86173bSJussi Kivilinna cb = netem_skb_cb(skb); 521f64f9e71SJoe Perches if (q->gap == 0 || /* not doing reordering */ 522a42b4799SVijay Subramanian q->counter < q->gap - 1 || /* inside last reordering gap */ 523f64f9e71SJoe Perches q->reorder < get_crandom(&q->reorder_cor)) { 5240f9f32acSStephen Hemminger psched_time_t now; 52507aaa115SStephen Hemminger psched_tdiff_t delay; 52607aaa115SStephen Hemminger 52707aaa115SStephen Hemminger delay = tabledist(q->latency, q->jitter, 52807aaa115SStephen Hemminger &q->delay_cor, q->delay_dist); 52907aaa115SStephen Hemminger 5303bebcda2SPatrick McHardy now = psched_get_time(); 5317bc0f28cSHagen Paul Pfeifer 5327bc0f28cSHagen Paul Pfeifer if (q->rate) { 5335080f39eSNik Unger struct netem_skb_cb *last = NULL; 5347bc0f28cSHagen Paul Pfeifer 5355080f39eSNik Unger if (sch->q.tail) 5365080f39eSNik Unger last = netem_skb_cb(sch->q.tail); 5375080f39eSNik Unger if (q->t_root.rb_node) { 5385080f39eSNik Unger struct sk_buff *t_skb; 5395080f39eSNik Unger struct netem_skb_cb *t_last; 5405080f39eSNik Unger 5415080f39eSNik Unger t_skb = netem_rb_to_skb(rb_last(&q->t_root)); 5425080f39eSNik Unger t_last = netem_skb_cb(t_skb); 5435080f39eSNik Unger if (!last || 5445080f39eSNik Unger t_last->time_to_send > last->time_to_send) { 5455080f39eSNik Unger last = t_last; 5465080f39eSNik Unger } 5475080f39eSNik Unger } 5485080f39eSNik Unger 549aec0a40aSEric Dumazet if (last) { 5507bc0f28cSHagen Paul Pfeifer /* 551a13d3104SJohannes Naab * Last packet in queue is reference point (now), 552a13d3104SJohannes Naab * calculate this time bonus and subtract 5537bc0f28cSHagen Paul Pfeifer * from delay. 5547bc0f28cSHagen Paul Pfeifer */ 5555080f39eSNik Unger delay -= last->time_to_send - now; 556a13d3104SJohannes Naab delay = max_t(psched_tdiff_t, 0, delay); 5575080f39eSNik Unger now = last->time_to_send; 5587bc0f28cSHagen Paul Pfeifer } 559a13d3104SJohannes Naab 5608cfd88d6SYang Yingliang delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); 5617bc0f28cSHagen Paul Pfeifer } 5627bc0f28cSHagen Paul Pfeifer 5637c59e25fSPatrick McHardy cb->time_to_send = now + delay; 5641da177e4SLinus Torvalds ++q->counter; 565960fb66eSEric Dumazet tfifo_enqueue(skb, sch); 5661da177e4SLinus Torvalds } else { 5670dca51d3SStephen Hemminger /* 5680dca51d3SStephen Hemminger * Do re-ordering by putting one out of N packets at the front 5690dca51d3SStephen Hemminger * of the queue. 5700dca51d3SStephen Hemminger */ 5713bebcda2SPatrick McHardy cb->time_to_send = psched_get_time(); 5720dca51d3SStephen Hemminger q->counter = 0; 5738ba25dadSJarek Poplawski 57448da34b7SFlorian Westphal netem_enqueue_skb_head(&sch->q, skb); 575eb101924SHagen Paul Pfeifer sch->qstats.requeues++; 576378a2f09SJarek Poplawski } 5771da177e4SLinus Torvalds 5786071bd1aSNeil Horman finish_segs: 5796071bd1aSNeil Horman if (segs) { 5806071bd1aSNeil Horman while (segs) { 5816071bd1aSNeil Horman skb2 = segs->next; 5826071bd1aSNeil Horman segs->next = NULL; 5836071bd1aSNeil Horman qdisc_skb_cb(segs)->pkt_len = segs->len; 5846071bd1aSNeil Horman last_len = segs->len; 585520ac30fSEric Dumazet rc = qdisc_enqueue(segs, sch, to_free); 5866071bd1aSNeil Horman if (rc != NET_XMIT_SUCCESS) { 5876071bd1aSNeil Horman if (net_xmit_drop_count(rc)) 5886071bd1aSNeil Horman qdisc_qstats_drop(sch); 5896071bd1aSNeil Horman } else { 5906071bd1aSNeil Horman nb++; 5916071bd1aSNeil Horman len += last_len; 5926071bd1aSNeil Horman } 5936071bd1aSNeil Horman segs = skb2; 5946071bd1aSNeil Horman } 5956071bd1aSNeil Horman sch->q.qlen += nb; 5966071bd1aSNeil Horman if (nb > 1) 5976071bd1aSNeil Horman qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); 5986071bd1aSNeil Horman } 59910f6dfcfSstephen hemminger return NET_XMIT_SUCCESS; 6001da177e4SLinus Torvalds } 6011da177e4SLinus Torvalds 6021da177e4SLinus Torvalds static struct sk_buff *netem_dequeue(struct Qdisc *sch) 6031da177e4SLinus Torvalds { 6041da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 6051da177e4SLinus Torvalds struct sk_buff *skb; 606aec0a40aSEric Dumazet struct rb_node *p; 6071da177e4SLinus Torvalds 60850612537SEric Dumazet tfifo_dequeue: 609ed760cb8SFlorian Westphal skb = __qdisc_dequeue_head(&sch->q); 610771018e7SStephen Hemminger if (skb) { 61125331d6cSJohn Fastabend qdisc_qstats_backlog_dec(sch, skb); 6120ad2a836SBeshay, Joseph deliver: 613aec0a40aSEric Dumazet qdisc_bstats_update(sch, skb); 614aec0a40aSEric Dumazet return skb; 615aec0a40aSEric Dumazet } 616aec0a40aSEric Dumazet p = rb_first(&q->t_root); 617aec0a40aSEric Dumazet if (p) { 61836b7bfe0SEric Dumazet psched_time_t time_to_send; 61936b7bfe0SEric Dumazet 620aec0a40aSEric Dumazet skb = netem_rb_to_skb(p); 6210f9f32acSStephen Hemminger 6220f9f32acSStephen Hemminger /* if more time remaining? */ 62336b7bfe0SEric Dumazet time_to_send = netem_skb_cb(skb)->time_to_send; 62436b7bfe0SEric Dumazet if (time_to_send <= psched_get_time()) { 625aec0a40aSEric Dumazet rb_erase(p, &q->t_root); 626aec0a40aSEric Dumazet 627aec0a40aSEric Dumazet sch->q.qlen--; 6280ad2a836SBeshay, Joseph qdisc_qstats_backlog_dec(sch, skb); 629aec0a40aSEric Dumazet skb->next = NULL; 630aec0a40aSEric Dumazet skb->prev = NULL; 631bffa72cfSEric Dumazet /* skb->dev shares skb->rbnode area, 632bffa72cfSEric Dumazet * we need to restore its value. 633bffa72cfSEric Dumazet */ 634bffa72cfSEric Dumazet skb->dev = qdisc_dev(sch); 63503c05f0dSJarek Poplawski 6368caf1539SJarek Poplawski #ifdef CONFIG_NET_CLS_ACT 6378caf1539SJarek Poplawski /* 6388caf1539SJarek Poplawski * If it's at ingress let's pretend the delay is 6398caf1539SJarek Poplawski * from the network (tstamp will be updated). 6408caf1539SJarek Poplawski */ 641bc31c905SWillem de Bruijn if (skb->tc_redirected && skb->tc_from_ingress) 6422456e855SThomas Gleixner skb->tstamp = 0; 6438caf1539SJarek Poplawski #endif 64410f6dfcfSstephen hemminger 64550612537SEric Dumazet if (q->qdisc) { 64621de12eeSEric Dumazet unsigned int pkt_len = qdisc_pkt_len(skb); 647520ac30fSEric Dumazet struct sk_buff *to_free = NULL; 648520ac30fSEric Dumazet int err; 64950612537SEric Dumazet 650520ac30fSEric Dumazet err = qdisc_enqueue(skb, q->qdisc, &to_free); 651520ac30fSEric Dumazet kfree_skb_list(to_free); 65221de12eeSEric Dumazet if (err != NET_XMIT_SUCCESS && 65321de12eeSEric Dumazet net_xmit_drop_count(err)) { 65425331d6cSJohn Fastabend qdisc_qstats_drop(sch); 6552ccccf5fSWANG Cong qdisc_tree_reduce_backlog(sch, 1, 65621de12eeSEric Dumazet pkt_len); 65750612537SEric Dumazet } 65850612537SEric Dumazet goto tfifo_dequeue; 65950612537SEric Dumazet } 660aec0a40aSEric Dumazet goto deliver; 66111274e5aSStephen Hemminger } 66207aaa115SStephen Hemminger 66350612537SEric Dumazet if (q->qdisc) { 66450612537SEric Dumazet skb = q->qdisc->ops->dequeue(q->qdisc); 66550612537SEric Dumazet if (skb) 66650612537SEric Dumazet goto deliver; 66750612537SEric Dumazet } 66836b7bfe0SEric Dumazet qdisc_watchdog_schedule(&q->watchdog, time_to_send); 6690f9f32acSStephen Hemminger } 6700f9f32acSStephen Hemminger 67150612537SEric Dumazet if (q->qdisc) { 67250612537SEric Dumazet skb = q->qdisc->ops->dequeue(q->qdisc); 67350612537SEric Dumazet if (skb) 67450612537SEric Dumazet goto deliver; 67550612537SEric Dumazet } 6760f9f32acSStephen Hemminger return NULL; 6771da177e4SLinus Torvalds } 6781da177e4SLinus Torvalds 6791da177e4SLinus Torvalds static void netem_reset(struct Qdisc *sch) 6801da177e4SLinus Torvalds { 6811da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 6821da177e4SLinus Torvalds 68350612537SEric Dumazet qdisc_reset_queue(sch); 684ff704050Sstephen hemminger tfifo_reset(sch); 68550612537SEric Dumazet if (q->qdisc) 6861da177e4SLinus Torvalds qdisc_reset(q->qdisc); 68759cb5c67SPatrick McHardy qdisc_watchdog_cancel(&q->watchdog); 6881da177e4SLinus Torvalds } 6891da177e4SLinus Torvalds 6906373a9a2Sstephen hemminger static void dist_free(struct disttable *d) 6916373a9a2Sstephen hemminger { 6924cb28970SWANG Cong kvfree(d); 6936373a9a2Sstephen hemminger } 6946373a9a2Sstephen hemminger 6951da177e4SLinus Torvalds /* 6961da177e4SLinus Torvalds * Distribution data is a variable size payload containing 6971da177e4SLinus Torvalds * signed 16 bit values. 6981da177e4SLinus Torvalds */ 6991e90474cSPatrick McHardy static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) 7001da177e4SLinus Torvalds { 7011da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 7026373a9a2Sstephen hemminger size_t n = nla_len(attr)/sizeof(__s16); 7031e90474cSPatrick McHardy const __s16 *data = nla_data(attr); 7047698b4fcSDavid S. Miller spinlock_t *root_lock; 7051da177e4SLinus Torvalds struct disttable *d; 7061da177e4SLinus Torvalds int i; 7071da177e4SLinus Torvalds 708df173bdaSstephen hemminger if (n > NETEM_DIST_MAX) 7091da177e4SLinus Torvalds return -EINVAL; 7101da177e4SLinus Torvalds 711752ade68SMichal Hocko d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL); 7121da177e4SLinus Torvalds if (!d) 7131da177e4SLinus Torvalds return -ENOMEM; 7141da177e4SLinus Torvalds 7151da177e4SLinus Torvalds d->size = n; 7161da177e4SLinus Torvalds for (i = 0; i < n; i++) 7171da177e4SLinus Torvalds d->table[i] = data[i]; 7181da177e4SLinus Torvalds 719102396aeSJarek Poplawski root_lock = qdisc_root_sleeping_lock(sch); 7207698b4fcSDavid S. Miller 7217698b4fcSDavid S. Miller spin_lock_bh(root_lock); 722bb52c7acSEric Dumazet swap(q->delay_dist, d); 7237698b4fcSDavid S. Miller spin_unlock_bh(root_lock); 724bb52c7acSEric Dumazet 725bb52c7acSEric Dumazet dist_free(d); 7261da177e4SLinus Torvalds return 0; 7271da177e4SLinus Torvalds } 7281da177e4SLinus Torvalds 72949545a77SYang Yingliang static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) 7301da177e4SLinus Torvalds { 7311e90474cSPatrick McHardy const struct tc_netem_corr *c = nla_data(attr); 7321da177e4SLinus Torvalds 7331da177e4SLinus Torvalds init_crandom(&q->delay_cor, c->delay_corr); 7341da177e4SLinus Torvalds init_crandom(&q->loss_cor, c->loss_corr); 7351da177e4SLinus Torvalds init_crandom(&q->dup_cor, c->dup_corr); 7361da177e4SLinus Torvalds } 7371da177e4SLinus Torvalds 73849545a77SYang Yingliang static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) 7390dca51d3SStephen Hemminger { 7401e90474cSPatrick McHardy const struct tc_netem_reorder *r = nla_data(attr); 7410dca51d3SStephen Hemminger 7420dca51d3SStephen Hemminger q->reorder = r->probability; 7430dca51d3SStephen Hemminger init_crandom(&q->reorder_cor, r->correlation); 7440dca51d3SStephen Hemminger } 7450dca51d3SStephen Hemminger 74649545a77SYang Yingliang static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) 747c865e5d9SStephen Hemminger { 7481e90474cSPatrick McHardy const struct tc_netem_corrupt *r = nla_data(attr); 749c865e5d9SStephen Hemminger 750c865e5d9SStephen Hemminger q->corrupt = r->probability; 751c865e5d9SStephen Hemminger init_crandom(&q->corrupt_cor, r->correlation); 752c865e5d9SStephen Hemminger } 753c865e5d9SStephen Hemminger 75449545a77SYang Yingliang static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) 7557bc0f28cSHagen Paul Pfeifer { 7567bc0f28cSHagen Paul Pfeifer const struct tc_netem_rate *r = nla_data(attr); 7577bc0f28cSHagen Paul Pfeifer 7587bc0f28cSHagen Paul Pfeifer q->rate = r->rate; 75990b41a1cSHagen Paul Pfeifer q->packet_overhead = r->packet_overhead; 76090b41a1cSHagen Paul Pfeifer q->cell_size = r->cell_size; 761809fa972SHannes Frederic Sowa q->cell_overhead = r->cell_overhead; 76290b41a1cSHagen Paul Pfeifer if (q->cell_size) 76390b41a1cSHagen Paul Pfeifer q->cell_size_reciprocal = reciprocal_value(q->cell_size); 764809fa972SHannes Frederic Sowa else 765809fa972SHannes Frederic Sowa q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; 7667bc0f28cSHagen Paul Pfeifer } 7677bc0f28cSHagen Paul Pfeifer 76849545a77SYang Yingliang static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) 769661b7972Sstephen hemminger { 770661b7972Sstephen hemminger const struct nlattr *la; 771661b7972Sstephen hemminger int rem; 772661b7972Sstephen hemminger 773661b7972Sstephen hemminger nla_for_each_nested(la, attr, rem) { 774661b7972Sstephen hemminger u16 type = nla_type(la); 775661b7972Sstephen hemminger 776661b7972Sstephen hemminger switch (type) { 777661b7972Sstephen hemminger case NETEM_LOSS_GI: { 778661b7972Sstephen hemminger const struct tc_netem_gimodel *gi = nla_data(la); 779661b7972Sstephen hemminger 7802494654dSstephen hemminger if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { 781661b7972Sstephen hemminger pr_info("netem: incorrect gi model size\n"); 782661b7972Sstephen hemminger return -EINVAL; 783661b7972Sstephen hemminger } 784661b7972Sstephen hemminger 785661b7972Sstephen hemminger q->loss_model = CLG_4_STATES; 786661b7972Sstephen hemminger 7873fbac2a8SYang Yingliang q->clg.state = TX_IN_GAP_PERIOD; 788661b7972Sstephen hemminger q->clg.a1 = gi->p13; 789661b7972Sstephen hemminger q->clg.a2 = gi->p31; 790661b7972Sstephen hemminger q->clg.a3 = gi->p32; 791661b7972Sstephen hemminger q->clg.a4 = gi->p14; 792661b7972Sstephen hemminger q->clg.a5 = gi->p23; 793661b7972Sstephen hemminger break; 794661b7972Sstephen hemminger } 795661b7972Sstephen hemminger 796661b7972Sstephen hemminger case NETEM_LOSS_GE: { 797661b7972Sstephen hemminger const struct tc_netem_gemodel *ge = nla_data(la); 798661b7972Sstephen hemminger 7992494654dSstephen hemminger if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { 8002494654dSstephen hemminger pr_info("netem: incorrect ge model size\n"); 801661b7972Sstephen hemminger return -EINVAL; 802661b7972Sstephen hemminger } 803661b7972Sstephen hemminger 804661b7972Sstephen hemminger q->loss_model = CLG_GILB_ELL; 8053fbac2a8SYang Yingliang q->clg.state = GOOD_STATE; 806661b7972Sstephen hemminger q->clg.a1 = ge->p; 807661b7972Sstephen hemminger q->clg.a2 = ge->r; 808661b7972Sstephen hemminger q->clg.a3 = ge->h; 809661b7972Sstephen hemminger q->clg.a4 = ge->k1; 810661b7972Sstephen hemminger break; 811661b7972Sstephen hemminger } 812661b7972Sstephen hemminger 813661b7972Sstephen hemminger default: 814661b7972Sstephen hemminger pr_info("netem: unknown loss type %u\n", type); 815661b7972Sstephen hemminger return -EINVAL; 816661b7972Sstephen hemminger } 817661b7972Sstephen hemminger } 818661b7972Sstephen hemminger 819661b7972Sstephen hemminger return 0; 820661b7972Sstephen hemminger } 821661b7972Sstephen hemminger 82227a3421eSPatrick McHardy static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { 82327a3421eSPatrick McHardy [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, 82427a3421eSPatrick McHardy [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, 82527a3421eSPatrick McHardy [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 8267bc0f28cSHagen Paul Pfeifer [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, 827661b7972Sstephen hemminger [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, 828e4ae004bSEric Dumazet [TCA_NETEM_ECN] = { .type = NLA_U32 }, 8296a031f67SYang Yingliang [TCA_NETEM_RATE64] = { .type = NLA_U64 }, 83027a3421eSPatrick McHardy }; 83127a3421eSPatrick McHardy 8322c10b32bSThomas Graf static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, 8332c10b32bSThomas Graf const struct nla_policy *policy, int len) 8342c10b32bSThomas Graf { 8352c10b32bSThomas Graf int nested_len = nla_len(nla) - NLA_ALIGN(len); 8362c10b32bSThomas Graf 837661b7972Sstephen hemminger if (nested_len < 0) { 838661b7972Sstephen hemminger pr_info("netem: invalid attributes len %d\n", nested_len); 8392c10b32bSThomas Graf return -EINVAL; 840661b7972Sstephen hemminger } 841661b7972Sstephen hemminger 8422c10b32bSThomas Graf if (nested_len >= nla_attr_size(0)) 8432c10b32bSThomas Graf return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), 844fceb6435SJohannes Berg nested_len, policy, NULL); 845661b7972Sstephen hemminger 8462c10b32bSThomas Graf memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 8472c10b32bSThomas Graf return 0; 8482c10b32bSThomas Graf } 8492c10b32bSThomas Graf 850c865e5d9SStephen Hemminger /* Parse netlink message to set options */ 8511e90474cSPatrick McHardy static int netem_change(struct Qdisc *sch, struct nlattr *opt) 8521da177e4SLinus Torvalds { 8531da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 854b03f4672SPatrick McHardy struct nlattr *tb[TCA_NETEM_MAX + 1]; 8551da177e4SLinus Torvalds struct tc_netem_qopt *qopt; 85654a4b05cSYang Yingliang struct clgstate old_clg; 85754a4b05cSYang Yingliang int old_loss_model = CLG_RANDOM; 8581da177e4SLinus Torvalds int ret; 8591da177e4SLinus Torvalds 860b03f4672SPatrick McHardy if (opt == NULL) 8611da177e4SLinus Torvalds return -EINVAL; 8621da177e4SLinus Torvalds 8632c10b32bSThomas Graf qopt = nla_data(opt); 8642c10b32bSThomas Graf ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); 865b03f4672SPatrick McHardy if (ret < 0) 866b03f4672SPatrick McHardy return ret; 867b03f4672SPatrick McHardy 86854a4b05cSYang Yingliang /* backup q->clg and q->loss_model */ 86954a4b05cSYang Yingliang old_clg = q->clg; 87054a4b05cSYang Yingliang old_loss_model = q->loss_model; 87154a4b05cSYang Yingliang 87254a4b05cSYang Yingliang if (tb[TCA_NETEM_LOSS]) { 87349545a77SYang Yingliang ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); 87454a4b05cSYang Yingliang if (ret) { 87554a4b05cSYang Yingliang q->loss_model = old_loss_model; 87654a4b05cSYang Yingliang return ret; 87754a4b05cSYang Yingliang } 87854a4b05cSYang Yingliang } else { 87954a4b05cSYang Yingliang q->loss_model = CLG_RANDOM; 88054a4b05cSYang Yingliang } 88154a4b05cSYang Yingliang 88254a4b05cSYang Yingliang if (tb[TCA_NETEM_DELAY_DIST]) { 88354a4b05cSYang Yingliang ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); 88454a4b05cSYang Yingliang if (ret) { 88554a4b05cSYang Yingliang /* recover clg and loss_model, in case of 88654a4b05cSYang Yingliang * q->clg and q->loss_model were modified 88754a4b05cSYang Yingliang * in get_loss_clg() 88854a4b05cSYang Yingliang */ 88954a4b05cSYang Yingliang q->clg = old_clg; 89054a4b05cSYang Yingliang q->loss_model = old_loss_model; 89154a4b05cSYang Yingliang return ret; 89254a4b05cSYang Yingliang } 89354a4b05cSYang Yingliang } 89454a4b05cSYang Yingliang 89550612537SEric Dumazet sch->limit = qopt->limit; 8961da177e4SLinus Torvalds 8971da177e4SLinus Torvalds q->latency = qopt->latency; 8981da177e4SLinus Torvalds q->jitter = qopt->jitter; 8991da177e4SLinus Torvalds q->limit = qopt->limit; 9001da177e4SLinus Torvalds q->gap = qopt->gap; 9010dca51d3SStephen Hemminger q->counter = 0; 9021da177e4SLinus Torvalds q->loss = qopt->loss; 9031da177e4SLinus Torvalds q->duplicate = qopt->duplicate; 9041da177e4SLinus Torvalds 905bb2f8cc0SStephen Hemminger /* for compatibility with earlier versions. 906bb2f8cc0SStephen Hemminger * if gap is set, need to assume 100% probability 9070dca51d3SStephen Hemminger */ 908a362e0a7SStephen Hemminger if (q->gap) 9090dca51d3SStephen Hemminger q->reorder = ~0; 9100dca51d3SStephen Hemminger 911265eb67fSStephen Hemminger if (tb[TCA_NETEM_CORR]) 91249545a77SYang Yingliang get_correlation(q, tb[TCA_NETEM_CORR]); 9131da177e4SLinus Torvalds 914265eb67fSStephen Hemminger if (tb[TCA_NETEM_REORDER]) 91549545a77SYang Yingliang get_reorder(q, tb[TCA_NETEM_REORDER]); 9161da177e4SLinus Torvalds 917265eb67fSStephen Hemminger if (tb[TCA_NETEM_CORRUPT]) 91849545a77SYang Yingliang get_corrupt(q, tb[TCA_NETEM_CORRUPT]); 9191da177e4SLinus Torvalds 9207bc0f28cSHagen Paul Pfeifer if (tb[TCA_NETEM_RATE]) 92149545a77SYang Yingliang get_rate(q, tb[TCA_NETEM_RATE]); 9227bc0f28cSHagen Paul Pfeifer 9236a031f67SYang Yingliang if (tb[TCA_NETEM_RATE64]) 9246a031f67SYang Yingliang q->rate = max_t(u64, q->rate, 9256a031f67SYang Yingliang nla_get_u64(tb[TCA_NETEM_RATE64])); 9266a031f67SYang Yingliang 927e4ae004bSEric Dumazet if (tb[TCA_NETEM_ECN]) 928e4ae004bSEric Dumazet q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); 929e4ae004bSEric Dumazet 930661b7972Sstephen hemminger return ret; 9311da177e4SLinus Torvalds } 9321da177e4SLinus Torvalds 9331e90474cSPatrick McHardy static int netem_init(struct Qdisc *sch, struct nlattr *opt) 9341da177e4SLinus Torvalds { 9351da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 9361da177e4SLinus Torvalds int ret; 9371da177e4SLinus Torvalds 938634576a1SNikolay Aleksandrov qdisc_watchdog_init(&q->watchdog, sch); 939634576a1SNikolay Aleksandrov 9401da177e4SLinus Torvalds if (!opt) 9411da177e4SLinus Torvalds return -EINVAL; 9421da177e4SLinus Torvalds 943661b7972Sstephen hemminger q->loss_model = CLG_RANDOM; 9441da177e4SLinus Torvalds ret = netem_change(sch, opt); 94550612537SEric Dumazet if (ret) 946250a65f7Sstephen hemminger pr_info("netem: change failed\n"); 9471da177e4SLinus Torvalds return ret; 9481da177e4SLinus Torvalds } 9491da177e4SLinus Torvalds 9501da177e4SLinus Torvalds static void netem_destroy(struct Qdisc *sch) 9511da177e4SLinus Torvalds { 9521da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 9531da177e4SLinus Torvalds 95459cb5c67SPatrick McHardy qdisc_watchdog_cancel(&q->watchdog); 95550612537SEric Dumazet if (q->qdisc) 9561da177e4SLinus Torvalds qdisc_destroy(q->qdisc); 9576373a9a2Sstephen hemminger dist_free(q->delay_dist); 9581da177e4SLinus Torvalds } 9591da177e4SLinus Torvalds 960661b7972Sstephen hemminger static int dump_loss_model(const struct netem_sched_data *q, 961661b7972Sstephen hemminger struct sk_buff *skb) 962661b7972Sstephen hemminger { 963661b7972Sstephen hemminger struct nlattr *nest; 964661b7972Sstephen hemminger 965661b7972Sstephen hemminger nest = nla_nest_start(skb, TCA_NETEM_LOSS); 966661b7972Sstephen hemminger if (nest == NULL) 967661b7972Sstephen hemminger goto nla_put_failure; 968661b7972Sstephen hemminger 969661b7972Sstephen hemminger switch (q->loss_model) { 970661b7972Sstephen hemminger case CLG_RANDOM: 971661b7972Sstephen hemminger /* legacy loss model */ 972661b7972Sstephen hemminger nla_nest_cancel(skb, nest); 973661b7972Sstephen hemminger return 0; /* no data */ 974661b7972Sstephen hemminger 975661b7972Sstephen hemminger case CLG_4_STATES: { 976661b7972Sstephen hemminger struct tc_netem_gimodel gi = { 977661b7972Sstephen hemminger .p13 = q->clg.a1, 978661b7972Sstephen hemminger .p31 = q->clg.a2, 979661b7972Sstephen hemminger .p32 = q->clg.a3, 980661b7972Sstephen hemminger .p14 = q->clg.a4, 981661b7972Sstephen hemminger .p23 = q->clg.a5, 982661b7972Sstephen hemminger }; 983661b7972Sstephen hemminger 9841b34ec43SDavid S. Miller if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) 9851b34ec43SDavid S. Miller goto nla_put_failure; 986661b7972Sstephen hemminger break; 987661b7972Sstephen hemminger } 988661b7972Sstephen hemminger case CLG_GILB_ELL: { 989661b7972Sstephen hemminger struct tc_netem_gemodel ge = { 990661b7972Sstephen hemminger .p = q->clg.a1, 991661b7972Sstephen hemminger .r = q->clg.a2, 992661b7972Sstephen hemminger .h = q->clg.a3, 993661b7972Sstephen hemminger .k1 = q->clg.a4, 994661b7972Sstephen hemminger }; 995661b7972Sstephen hemminger 9961b34ec43SDavid S. Miller if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) 9971b34ec43SDavid S. Miller goto nla_put_failure; 998661b7972Sstephen hemminger break; 999661b7972Sstephen hemminger } 1000661b7972Sstephen hemminger } 1001661b7972Sstephen hemminger 1002661b7972Sstephen hemminger nla_nest_end(skb, nest); 1003661b7972Sstephen hemminger return 0; 1004661b7972Sstephen hemminger 1005661b7972Sstephen hemminger nla_put_failure: 1006661b7972Sstephen hemminger nla_nest_cancel(skb, nest); 1007661b7972Sstephen hemminger return -1; 1008661b7972Sstephen hemminger } 1009661b7972Sstephen hemminger 10101da177e4SLinus Torvalds static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) 10111da177e4SLinus Torvalds { 10121da177e4SLinus Torvalds const struct netem_sched_data *q = qdisc_priv(sch); 1013861d7f74Sstephen hemminger struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); 10141da177e4SLinus Torvalds struct tc_netem_qopt qopt; 10151da177e4SLinus Torvalds struct tc_netem_corr cor; 10160dca51d3SStephen Hemminger struct tc_netem_reorder reorder; 1017c865e5d9SStephen Hemminger struct tc_netem_corrupt corrupt; 10187bc0f28cSHagen Paul Pfeifer struct tc_netem_rate rate; 10191da177e4SLinus Torvalds 10201da177e4SLinus Torvalds qopt.latency = q->latency; 10211da177e4SLinus Torvalds qopt.jitter = q->jitter; 10221da177e4SLinus Torvalds qopt.limit = q->limit; 10231da177e4SLinus Torvalds qopt.loss = q->loss; 10241da177e4SLinus Torvalds qopt.gap = q->gap; 10251da177e4SLinus Torvalds qopt.duplicate = q->duplicate; 10261b34ec43SDavid S. Miller if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) 10271b34ec43SDavid S. Miller goto nla_put_failure; 10281da177e4SLinus Torvalds 10291da177e4SLinus Torvalds cor.delay_corr = q->delay_cor.rho; 10301da177e4SLinus Torvalds cor.loss_corr = q->loss_cor.rho; 10311da177e4SLinus Torvalds cor.dup_corr = q->dup_cor.rho; 10321b34ec43SDavid S. Miller if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) 10331b34ec43SDavid S. Miller goto nla_put_failure; 10340dca51d3SStephen Hemminger 10350dca51d3SStephen Hemminger reorder.probability = q->reorder; 10360dca51d3SStephen Hemminger reorder.correlation = q->reorder_cor.rho; 10371b34ec43SDavid S. Miller if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) 10381b34ec43SDavid S. Miller goto nla_put_failure; 10390dca51d3SStephen Hemminger 1040c865e5d9SStephen Hemminger corrupt.probability = q->corrupt; 1041c865e5d9SStephen Hemminger corrupt.correlation = q->corrupt_cor.rho; 10421b34ec43SDavid S. Miller if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) 10431b34ec43SDavid S. Miller goto nla_put_failure; 1044c865e5d9SStephen Hemminger 10456a031f67SYang Yingliang if (q->rate >= (1ULL << 32)) { 10462a51c1e8SNicolas Dichtel if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, 10472a51c1e8SNicolas Dichtel TCA_NETEM_PAD)) 10486a031f67SYang Yingliang goto nla_put_failure; 10496a031f67SYang Yingliang rate.rate = ~0U; 10506a031f67SYang Yingliang } else { 10517bc0f28cSHagen Paul Pfeifer rate.rate = q->rate; 10526a031f67SYang Yingliang } 105390b41a1cSHagen Paul Pfeifer rate.packet_overhead = q->packet_overhead; 105490b41a1cSHagen Paul Pfeifer rate.cell_size = q->cell_size; 105590b41a1cSHagen Paul Pfeifer rate.cell_overhead = q->cell_overhead; 10561b34ec43SDavid S. Miller if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) 10571b34ec43SDavid S. Miller goto nla_put_failure; 10587bc0f28cSHagen Paul Pfeifer 1059e4ae004bSEric Dumazet if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) 1060e4ae004bSEric Dumazet goto nla_put_failure; 1061e4ae004bSEric Dumazet 1062661b7972Sstephen hemminger if (dump_loss_model(q, skb) != 0) 1063661b7972Sstephen hemminger goto nla_put_failure; 1064661b7972Sstephen hemminger 1065861d7f74Sstephen hemminger return nla_nest_end(skb, nla); 10661da177e4SLinus Torvalds 10671e90474cSPatrick McHardy nla_put_failure: 1068861d7f74Sstephen hemminger nlmsg_trim(skb, nla); 10691da177e4SLinus Torvalds return -1; 10701da177e4SLinus Torvalds } 10711da177e4SLinus Torvalds 107210f6dfcfSstephen hemminger static int netem_dump_class(struct Qdisc *sch, unsigned long cl, 107310f6dfcfSstephen hemminger struct sk_buff *skb, struct tcmsg *tcm) 107410f6dfcfSstephen hemminger { 107510f6dfcfSstephen hemminger struct netem_sched_data *q = qdisc_priv(sch); 107610f6dfcfSstephen hemminger 107750612537SEric Dumazet if (cl != 1 || !q->qdisc) /* only one class */ 107810f6dfcfSstephen hemminger return -ENOENT; 107910f6dfcfSstephen hemminger 108010f6dfcfSstephen hemminger tcm->tcm_handle |= TC_H_MIN(1); 108110f6dfcfSstephen hemminger tcm->tcm_info = q->qdisc->handle; 108210f6dfcfSstephen hemminger 108310f6dfcfSstephen hemminger return 0; 108410f6dfcfSstephen hemminger } 108510f6dfcfSstephen hemminger 108610f6dfcfSstephen hemminger static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 108710f6dfcfSstephen hemminger struct Qdisc **old) 108810f6dfcfSstephen hemminger { 108910f6dfcfSstephen hemminger struct netem_sched_data *q = qdisc_priv(sch); 109010f6dfcfSstephen hemminger 109186a7996cSWANG Cong *old = qdisc_replace(sch, new, &q->qdisc); 109210f6dfcfSstephen hemminger return 0; 109310f6dfcfSstephen hemminger } 109410f6dfcfSstephen hemminger 109510f6dfcfSstephen hemminger static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) 109610f6dfcfSstephen hemminger { 109710f6dfcfSstephen hemminger struct netem_sched_data *q = qdisc_priv(sch); 109810f6dfcfSstephen hemminger return q->qdisc; 109910f6dfcfSstephen hemminger } 110010f6dfcfSstephen hemminger 1101143976ceSWANG Cong static unsigned long netem_find(struct Qdisc *sch, u32 classid) 110210f6dfcfSstephen hemminger { 110310f6dfcfSstephen hemminger return 1; 110410f6dfcfSstephen hemminger } 110510f6dfcfSstephen hemminger 110610f6dfcfSstephen hemminger static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) 110710f6dfcfSstephen hemminger { 110810f6dfcfSstephen hemminger if (!walker->stop) { 110910f6dfcfSstephen hemminger if (walker->count >= walker->skip) 111010f6dfcfSstephen hemminger if (walker->fn(sch, 1, walker) < 0) { 111110f6dfcfSstephen hemminger walker->stop = 1; 111210f6dfcfSstephen hemminger return; 111310f6dfcfSstephen hemminger } 111410f6dfcfSstephen hemminger walker->count++; 111510f6dfcfSstephen hemminger } 111610f6dfcfSstephen hemminger } 111710f6dfcfSstephen hemminger 111810f6dfcfSstephen hemminger static const struct Qdisc_class_ops netem_class_ops = { 111910f6dfcfSstephen hemminger .graft = netem_graft, 112010f6dfcfSstephen hemminger .leaf = netem_leaf, 1121143976ceSWANG Cong .find = netem_find, 112210f6dfcfSstephen hemminger .walk = netem_walk, 112310f6dfcfSstephen hemminger .dump = netem_dump_class, 112410f6dfcfSstephen hemminger }; 112510f6dfcfSstephen hemminger 112620fea08bSEric Dumazet static struct Qdisc_ops netem_qdisc_ops __read_mostly = { 11271da177e4SLinus Torvalds .id = "netem", 112810f6dfcfSstephen hemminger .cl_ops = &netem_class_ops, 11291da177e4SLinus Torvalds .priv_size = sizeof(struct netem_sched_data), 11301da177e4SLinus Torvalds .enqueue = netem_enqueue, 11311da177e4SLinus Torvalds .dequeue = netem_dequeue, 113277be155cSJarek Poplawski .peek = qdisc_peek_dequeued, 11331da177e4SLinus Torvalds .init = netem_init, 11341da177e4SLinus Torvalds .reset = netem_reset, 11351da177e4SLinus Torvalds .destroy = netem_destroy, 11361da177e4SLinus Torvalds .change = netem_change, 11371da177e4SLinus Torvalds .dump = netem_dump, 11381da177e4SLinus Torvalds .owner = THIS_MODULE, 11391da177e4SLinus Torvalds }; 11401da177e4SLinus Torvalds 11411da177e4SLinus Torvalds 11421da177e4SLinus Torvalds static int __init netem_module_init(void) 11431da177e4SLinus Torvalds { 1144eb229c4cSStephen Hemminger pr_info("netem: version " VERSION "\n"); 11451da177e4SLinus Torvalds return register_qdisc(&netem_qdisc_ops); 11461da177e4SLinus Torvalds } 11471da177e4SLinus Torvalds static void __exit netem_module_exit(void) 11481da177e4SLinus Torvalds { 11491da177e4SLinus Torvalds unregister_qdisc(&netem_qdisc_ops); 11501da177e4SLinus Torvalds } 11511da177e4SLinus Torvalds module_init(netem_module_init) 11521da177e4SLinus Torvalds module_exit(netem_module_exit) 11531da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 1154