11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * net/sched/sch_netem.c Network emulator 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 51da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 61da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 7798b6b19SStephen Hemminger * 2 of the License. 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Many of the algorithms and ideas for this came from 101da177e4SLinus Torvalds * NIST Net which is not copyrighted. 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * Authors: Stephen Hemminger <shemminger@osdl.org> 131da177e4SLinus Torvalds * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/module.h> 171da177e4SLinus Torvalds #include <linux/types.h> 181da177e4SLinus Torvalds #include <linux/kernel.h> 191da177e4SLinus Torvalds #include <linux/errno.h> 201da177e4SLinus Torvalds #include <linux/skbuff.h> 211da177e4SLinus Torvalds #include <linux/rtnetlink.h> 221da177e4SLinus Torvalds 23dc5fc579SArnaldo Carvalho de Melo #include <net/netlink.h> 241da177e4SLinus Torvalds #include <net/pkt_sched.h> 251da177e4SLinus Torvalds 26c865e5d9SStephen Hemminger #define VERSION "1.2" 27eb229c4cSStephen Hemminger 281da177e4SLinus Torvalds /* Network Emulation Queuing algorithm. 291da177e4SLinus Torvalds ==================================== 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based 321da177e4SLinus Torvalds Network Emulation Tool 331da177e4SLinus Torvalds [2] Luigi Rizzo, DummyNet for FreeBSD 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds ---------------------------------------------------------------- 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds This started out as a simple way to delay outgoing packets to 381da177e4SLinus Torvalds test TCP but has grown to include most of the functionality 391da177e4SLinus Torvalds of a full blown network emulator like NISTnet. It can delay 401da177e4SLinus Torvalds packets and add random jitter (and correlation). The random 411da177e4SLinus Torvalds distribution can be loaded from a table as well to provide 421da177e4SLinus Torvalds normal, Pareto, or experimental curves. Packet loss, 431da177e4SLinus Torvalds duplication, and reordering can also be emulated. 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds This qdisc does not do classification that can be handled in 461da177e4SLinus Torvalds layering other disciplines. It does not need to do bandwidth 471da177e4SLinus Torvalds control either since that can be handled by using token 481da177e4SLinus Torvalds bucket or other rate control. 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds The simulator is limited by the Linux timer resolution 511da177e4SLinus Torvalds and will create packet bursts on the HZ boundary (1ms). 521da177e4SLinus Torvalds */ 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds struct netem_sched_data { 551da177e4SLinus Torvalds struct Qdisc *qdisc; 5659cb5c67SPatrick McHardy struct qdisc_watchdog watchdog; 571da177e4SLinus Torvalds 58b407621cSStephen Hemminger psched_tdiff_t latency; 59b407621cSStephen Hemminger psched_tdiff_t jitter; 60b407621cSStephen Hemminger 611da177e4SLinus Torvalds u32 loss; 621da177e4SLinus Torvalds u32 limit; 631da177e4SLinus Torvalds u32 counter; 641da177e4SLinus Torvalds u32 gap; 651da177e4SLinus Torvalds u32 duplicate; 660dca51d3SStephen Hemminger u32 reorder; 67c865e5d9SStephen Hemminger u32 corrupt; 681da177e4SLinus Torvalds 691da177e4SLinus Torvalds struct crndstate { 70b407621cSStephen Hemminger u32 last; 71b407621cSStephen Hemminger u32 rho; 72c865e5d9SStephen Hemminger } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; 731da177e4SLinus Torvalds 741da177e4SLinus Torvalds struct disttable { 751da177e4SLinus Torvalds u32 size; 761da177e4SLinus Torvalds s16 table[0]; 771da177e4SLinus Torvalds } *delay_dist; 781da177e4SLinus Torvalds }; 791da177e4SLinus Torvalds 801da177e4SLinus Torvalds /* Time stamp put into socket buffer control block */ 811da177e4SLinus Torvalds struct netem_skb_cb { 821da177e4SLinus Torvalds psched_time_t time_to_send; 831da177e4SLinus Torvalds }; 841da177e4SLinus Torvalds 855f86173bSJussi Kivilinna static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 865f86173bSJussi Kivilinna { 87175f9c1bSJussi Kivilinna BUILD_BUG_ON(sizeof(skb->cb) < 88175f9c1bSJussi Kivilinna sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb)); 89175f9c1bSJussi Kivilinna return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 905f86173bSJussi Kivilinna } 915f86173bSJussi Kivilinna 921da177e4SLinus Torvalds /* init_crandom - initialize correlated random number generator 931da177e4SLinus Torvalds * Use entropy source for initial seed. 941da177e4SLinus Torvalds */ 951da177e4SLinus Torvalds static void init_crandom(struct crndstate *state, unsigned long rho) 961da177e4SLinus Torvalds { 971da177e4SLinus Torvalds state->rho = rho; 981da177e4SLinus Torvalds state->last = net_random(); 991da177e4SLinus Torvalds } 1001da177e4SLinus Torvalds 1011da177e4SLinus Torvalds /* get_crandom - correlated random number generator 1021da177e4SLinus Torvalds * Next number depends on last value. 1031da177e4SLinus Torvalds * rho is scaled to avoid floating point. 1041da177e4SLinus Torvalds */ 105b407621cSStephen Hemminger static u32 get_crandom(struct crndstate *state) 1061da177e4SLinus Torvalds { 1071da177e4SLinus Torvalds u64 value, rho; 1081da177e4SLinus Torvalds unsigned long answer; 1091da177e4SLinus Torvalds 110bb2f8cc0SStephen Hemminger if (state->rho == 0) /* no correlation */ 1111da177e4SLinus Torvalds return net_random(); 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds value = net_random(); 1141da177e4SLinus Torvalds rho = (u64)state->rho + 1; 1151da177e4SLinus Torvalds answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; 1161da177e4SLinus Torvalds state->last = answer; 1171da177e4SLinus Torvalds return answer; 1181da177e4SLinus Torvalds } 1191da177e4SLinus Torvalds 1201da177e4SLinus Torvalds /* tabledist - return a pseudo-randomly distributed value with mean mu and 1211da177e4SLinus Torvalds * std deviation sigma. Uses table lookup to approximate the desired 1221da177e4SLinus Torvalds * distribution, and a uniformly-distributed pseudo-random source. 1231da177e4SLinus Torvalds */ 124b407621cSStephen Hemminger static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, 125b407621cSStephen Hemminger struct crndstate *state, 126b407621cSStephen Hemminger const struct disttable *dist) 1271da177e4SLinus Torvalds { 128b407621cSStephen Hemminger psched_tdiff_t x; 129b407621cSStephen Hemminger long t; 130b407621cSStephen Hemminger u32 rnd; 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds if (sigma == 0) 1331da177e4SLinus Torvalds return mu; 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds rnd = get_crandom(state); 1361da177e4SLinus Torvalds 1371da177e4SLinus Torvalds /* default uniform distribution */ 1381da177e4SLinus Torvalds if (dist == NULL) 1391da177e4SLinus Torvalds return (rnd % (2*sigma)) - sigma + mu; 1401da177e4SLinus Torvalds 1411da177e4SLinus Torvalds t = dist->table[rnd % dist->size]; 1421da177e4SLinus Torvalds x = (sigma % NETEM_DIST_SCALE) * t; 1431da177e4SLinus Torvalds if (x >= 0) 1441da177e4SLinus Torvalds x += NETEM_DIST_SCALE/2; 1451da177e4SLinus Torvalds else 1461da177e4SLinus Torvalds x -= NETEM_DIST_SCALE/2; 1471da177e4SLinus Torvalds 1481da177e4SLinus Torvalds return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 1510afb51e7SStephen Hemminger /* 1520afb51e7SStephen Hemminger * Insert one skb into qdisc. 1530afb51e7SStephen Hemminger * Note: parent depends on return value to account for queue length. 1540afb51e7SStephen Hemminger * NET_XMIT_DROP: queue length didn't change. 1550afb51e7SStephen Hemminger * NET_XMIT_SUCCESS: one skb was queued. 1560afb51e7SStephen Hemminger */ 1571da177e4SLinus Torvalds static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1581da177e4SLinus Torvalds { 1591da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 16089e1df74SGuillaume Chazarain /* We don't fill cb now as skb_unshare() may invalidate it */ 16189e1df74SGuillaume Chazarain struct netem_skb_cb *cb; 1620afb51e7SStephen Hemminger struct sk_buff *skb2; 1631da177e4SLinus Torvalds int ret; 1640afb51e7SStephen Hemminger int count = 1; 1651da177e4SLinus Torvalds 166771018e7SStephen Hemminger pr_debug("netem_enqueue skb=%p\n", skb); 1671da177e4SLinus Torvalds 1680afb51e7SStephen Hemminger /* Random duplication */ 1690afb51e7SStephen Hemminger if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) 1700afb51e7SStephen Hemminger ++count; 1710afb51e7SStephen Hemminger 1721da177e4SLinus Torvalds /* Random packet drop 0 => none, ~0 => all */ 1730afb51e7SStephen Hemminger if (q->loss && q->loss >= get_crandom(&q->loss_cor)) 1740afb51e7SStephen Hemminger --count; 1750afb51e7SStephen Hemminger 1760afb51e7SStephen Hemminger if (count == 0) { 1771da177e4SLinus Torvalds sch->qstats.drops++; 1781da177e4SLinus Torvalds kfree_skb(skb); 179c27f339aSJarek Poplawski return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 1801da177e4SLinus Torvalds } 1811da177e4SLinus Torvalds 1824e8a5201SDavid S. Miller skb_orphan(skb); 1834e8a5201SDavid S. Miller 1840afb51e7SStephen Hemminger /* 1850afb51e7SStephen Hemminger * If we need to duplicate packet, then re-insert at top of the 1860afb51e7SStephen Hemminger * qdisc tree, since parent queuer expects that only one 1870afb51e7SStephen Hemminger * skb will be queued. 188d5d75cd6SStephen Hemminger */ 1890afb51e7SStephen Hemminger if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 1907698b4fcSDavid S. Miller struct Qdisc *rootq = qdisc_root(sch); 1910afb51e7SStephen Hemminger u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 1920afb51e7SStephen Hemminger q->duplicate = 0; 193d5d75cd6SStephen Hemminger 1945f86173bSJussi Kivilinna qdisc_enqueue_root(skb2, rootq); 1950afb51e7SStephen Hemminger q->duplicate = dupsave; 1961da177e4SLinus Torvalds } 1971da177e4SLinus Torvalds 198c865e5d9SStephen Hemminger /* 199c865e5d9SStephen Hemminger * Randomized packet corruption. 200c865e5d9SStephen Hemminger * Make copy if needed since we are modifying 201c865e5d9SStephen Hemminger * If packet is going to be hardware checksummed, then 202c865e5d9SStephen Hemminger * do it now in software before we mangle it. 203c865e5d9SStephen Hemminger */ 204c865e5d9SStephen Hemminger if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 205c865e5d9SStephen Hemminger if (!(skb = skb_unshare(skb, GFP_ATOMIC)) 20684fa7933SPatrick McHardy || (skb->ip_summed == CHECKSUM_PARTIAL 20784fa7933SPatrick McHardy && skb_checksum_help(skb))) { 208c865e5d9SStephen Hemminger sch->qstats.drops++; 209c865e5d9SStephen Hemminger return NET_XMIT_DROP; 210c865e5d9SStephen Hemminger } 211c865e5d9SStephen Hemminger 212c865e5d9SStephen Hemminger skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 213c865e5d9SStephen Hemminger } 214c865e5d9SStephen Hemminger 2155f86173bSJussi Kivilinna cb = netem_skb_cb(skb); 2160dca51d3SStephen Hemminger if (q->gap == 0 /* not doing reordering */ 2170dca51d3SStephen Hemminger || q->counter < q->gap /* inside last reordering gap */ 2180dca51d3SStephen Hemminger || q->reorder < get_crandom(&q->reorder_cor)) { 2190f9f32acSStephen Hemminger psched_time_t now; 22007aaa115SStephen Hemminger psched_tdiff_t delay; 22107aaa115SStephen Hemminger 22207aaa115SStephen Hemminger delay = tabledist(q->latency, q->jitter, 22307aaa115SStephen Hemminger &q->delay_cor, q->delay_dist); 22407aaa115SStephen Hemminger 2253bebcda2SPatrick McHardy now = psched_get_time(); 2267c59e25fSPatrick McHardy cb->time_to_send = now + delay; 2271da177e4SLinus Torvalds ++q->counter; 2285f86173bSJussi Kivilinna ret = qdisc_enqueue(skb, q->qdisc); 2291da177e4SLinus Torvalds } else { 2300dca51d3SStephen Hemminger /* 2310dca51d3SStephen Hemminger * Do re-ordering by putting one out of N packets at the front 2320dca51d3SStephen Hemminger * of the queue. 2330dca51d3SStephen Hemminger */ 2343bebcda2SPatrick McHardy cb->time_to_send = psched_get_time(); 2350dca51d3SStephen Hemminger q->counter = 0; 2368ba25dadSJarek Poplawski 2378ba25dadSJarek Poplawski __skb_queue_head(&q->qdisc->q, skb); 2388ba25dadSJarek Poplawski q->qdisc->qstats.backlog += qdisc_pkt_len(skb); 2398ba25dadSJarek Poplawski q->qdisc->qstats.requeues++; 2408ba25dadSJarek Poplawski ret = NET_XMIT_SUCCESS; 2411da177e4SLinus Torvalds } 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds if (likely(ret == NET_XMIT_SUCCESS)) { 2441da177e4SLinus Torvalds sch->q.qlen++; 2450abf77e5SJussi Kivilinna sch->bstats.bytes += qdisc_pkt_len(skb); 2461da177e4SLinus Torvalds sch->bstats.packets++; 247378a2f09SJarek Poplawski } else if (net_xmit_drop_count(ret)) { 2481da177e4SLinus Torvalds sch->qstats.drops++; 249378a2f09SJarek Poplawski } 2501da177e4SLinus Torvalds 251d5d75cd6SStephen Hemminger pr_debug("netem: enqueue ret %d\n", ret); 2521da177e4SLinus Torvalds return ret; 2531da177e4SLinus Torvalds } 2541da177e4SLinus Torvalds 2551da177e4SLinus Torvalds static unsigned int netem_drop(struct Qdisc* sch) 2561da177e4SLinus Torvalds { 2571da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 2586d037a26SPatrick McHardy unsigned int len = 0; 2591da177e4SLinus Torvalds 2606d037a26SPatrick McHardy if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { 2611da177e4SLinus Torvalds sch->q.qlen--; 2621da177e4SLinus Torvalds sch->qstats.drops++; 2631da177e4SLinus Torvalds } 2641da177e4SLinus Torvalds return len; 2651da177e4SLinus Torvalds } 2661da177e4SLinus Torvalds 2671da177e4SLinus Torvalds static struct sk_buff *netem_dequeue(struct Qdisc *sch) 2681da177e4SLinus Torvalds { 2691da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 2701da177e4SLinus Torvalds struct sk_buff *skb; 2711da177e4SLinus Torvalds 27211274e5aSStephen Hemminger smp_mb(); 27311274e5aSStephen Hemminger if (sch->flags & TCQ_F_THROTTLED) 27411274e5aSStephen Hemminger return NULL; 27511274e5aSStephen Hemminger 27603c05f0dSJarek Poplawski skb = q->qdisc->ops->peek(q->qdisc); 277771018e7SStephen Hemminger if (skb) { 2785f86173bSJussi Kivilinna const struct netem_skb_cb *cb = netem_skb_cb(skb); 2793bebcda2SPatrick McHardy psched_time_t now = psched_get_time(); 2800f9f32acSStephen Hemminger 2810f9f32acSStephen Hemminger /* if more time remaining? */ 282104e0878SPatrick McHardy if (cb->time_to_send <= now) { 28377be155cSJarek Poplawski skb = qdisc_dequeue_peeked(q->qdisc); 28477be155cSJarek Poplawski if (unlikely(!skb)) 28503c05f0dSJarek Poplawski return NULL; 28603c05f0dSJarek Poplawski 287771018e7SStephen Hemminger pr_debug("netem_dequeue: return skb=%p\n", skb); 2881da177e4SLinus Torvalds sch->q.qlen--; 2890f9f32acSStephen Hemminger return skb; 29011274e5aSStephen Hemminger } 29107aaa115SStephen Hemminger 29211274e5aSStephen Hemminger qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); 2930f9f32acSStephen Hemminger } 2940f9f32acSStephen Hemminger 2950f9f32acSStephen Hemminger return NULL; 2961da177e4SLinus Torvalds } 2971da177e4SLinus Torvalds 2981da177e4SLinus Torvalds static void netem_reset(struct Qdisc *sch) 2991da177e4SLinus Torvalds { 3001da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds qdisc_reset(q->qdisc); 3031da177e4SLinus Torvalds sch->q.qlen = 0; 30459cb5c67SPatrick McHardy qdisc_watchdog_cancel(&q->watchdog); 3051da177e4SLinus Torvalds } 3061da177e4SLinus Torvalds 3071da177e4SLinus Torvalds /* 3081da177e4SLinus Torvalds * Distribution data is a variable size payload containing 3091da177e4SLinus Torvalds * signed 16 bit values. 3101da177e4SLinus Torvalds */ 3111e90474cSPatrick McHardy static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) 3121da177e4SLinus Torvalds { 3131da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 3141e90474cSPatrick McHardy unsigned long n = nla_len(attr)/sizeof(__s16); 3151e90474cSPatrick McHardy const __s16 *data = nla_data(attr); 3167698b4fcSDavid S. Miller spinlock_t *root_lock; 3171da177e4SLinus Torvalds struct disttable *d; 3181da177e4SLinus Torvalds int i; 3191da177e4SLinus Torvalds 3201da177e4SLinus Torvalds if (n > 65536) 3211da177e4SLinus Torvalds return -EINVAL; 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL); 3241da177e4SLinus Torvalds if (!d) 3251da177e4SLinus Torvalds return -ENOMEM; 3261da177e4SLinus Torvalds 3271da177e4SLinus Torvalds d->size = n; 3281da177e4SLinus Torvalds for (i = 0; i < n; i++) 3291da177e4SLinus Torvalds d->table[i] = data[i]; 3301da177e4SLinus Torvalds 331102396aeSJarek Poplawski root_lock = qdisc_root_sleeping_lock(sch); 3327698b4fcSDavid S. Miller 3337698b4fcSDavid S. Miller spin_lock_bh(root_lock); 334b94c8afcSPatrick McHardy kfree(q->delay_dist); 335b94c8afcSPatrick McHardy q->delay_dist = d; 3367698b4fcSDavid S. Miller spin_unlock_bh(root_lock); 3371da177e4SLinus Torvalds return 0; 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds 340265eb67fSStephen Hemminger static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) 3411da177e4SLinus Torvalds { 3421da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 3431e90474cSPatrick McHardy const struct tc_netem_corr *c = nla_data(attr); 3441da177e4SLinus Torvalds 3451da177e4SLinus Torvalds init_crandom(&q->delay_cor, c->delay_corr); 3461da177e4SLinus Torvalds init_crandom(&q->loss_cor, c->loss_corr); 3471da177e4SLinus Torvalds init_crandom(&q->dup_cor, c->dup_corr); 3481da177e4SLinus Torvalds } 3491da177e4SLinus Torvalds 350265eb67fSStephen Hemminger static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) 3510dca51d3SStephen Hemminger { 3520dca51d3SStephen Hemminger struct netem_sched_data *q = qdisc_priv(sch); 3531e90474cSPatrick McHardy const struct tc_netem_reorder *r = nla_data(attr); 3540dca51d3SStephen Hemminger 3550dca51d3SStephen Hemminger q->reorder = r->probability; 3560dca51d3SStephen Hemminger init_crandom(&q->reorder_cor, r->correlation); 3570dca51d3SStephen Hemminger } 3580dca51d3SStephen Hemminger 359265eb67fSStephen Hemminger static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) 360c865e5d9SStephen Hemminger { 361c865e5d9SStephen Hemminger struct netem_sched_data *q = qdisc_priv(sch); 3621e90474cSPatrick McHardy const struct tc_netem_corrupt *r = nla_data(attr); 363c865e5d9SStephen Hemminger 364c865e5d9SStephen Hemminger q->corrupt = r->probability; 365c865e5d9SStephen Hemminger init_crandom(&q->corrupt_cor, r->correlation); 366c865e5d9SStephen Hemminger } 367c865e5d9SStephen Hemminger 36827a3421eSPatrick McHardy static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { 36927a3421eSPatrick McHardy [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, 37027a3421eSPatrick McHardy [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, 37127a3421eSPatrick McHardy [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 37227a3421eSPatrick McHardy }; 37327a3421eSPatrick McHardy 3742c10b32bSThomas Graf static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, 3752c10b32bSThomas Graf const struct nla_policy *policy, int len) 3762c10b32bSThomas Graf { 3772c10b32bSThomas Graf int nested_len = nla_len(nla) - NLA_ALIGN(len); 3782c10b32bSThomas Graf 3792c10b32bSThomas Graf if (nested_len < 0) 3802c10b32bSThomas Graf return -EINVAL; 3812c10b32bSThomas Graf if (nested_len >= nla_attr_size(0)) 3822c10b32bSThomas Graf return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), 3832c10b32bSThomas Graf nested_len, policy); 3842c10b32bSThomas Graf memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 3852c10b32bSThomas Graf return 0; 3862c10b32bSThomas Graf } 3872c10b32bSThomas Graf 388c865e5d9SStephen Hemminger /* Parse netlink message to set options */ 3891e90474cSPatrick McHardy static int netem_change(struct Qdisc *sch, struct nlattr *opt) 3901da177e4SLinus Torvalds { 3911da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 392b03f4672SPatrick McHardy struct nlattr *tb[TCA_NETEM_MAX + 1]; 3931da177e4SLinus Torvalds struct tc_netem_qopt *qopt; 3941da177e4SLinus Torvalds int ret; 3951da177e4SLinus Torvalds 396b03f4672SPatrick McHardy if (opt == NULL) 3971da177e4SLinus Torvalds return -EINVAL; 3981da177e4SLinus Torvalds 3992c10b32bSThomas Graf qopt = nla_data(opt); 4002c10b32bSThomas Graf ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); 401b03f4672SPatrick McHardy if (ret < 0) 402b03f4672SPatrick McHardy return ret; 403b03f4672SPatrick McHardy 404fb0305ceSPatrick McHardy ret = fifo_set_limit(q->qdisc, qopt->limit); 4051da177e4SLinus Torvalds if (ret) { 4061da177e4SLinus Torvalds pr_debug("netem: can't set fifo limit\n"); 4071da177e4SLinus Torvalds return ret; 4081da177e4SLinus Torvalds } 4091da177e4SLinus Torvalds 4101da177e4SLinus Torvalds q->latency = qopt->latency; 4111da177e4SLinus Torvalds q->jitter = qopt->jitter; 4121da177e4SLinus Torvalds q->limit = qopt->limit; 4131da177e4SLinus Torvalds q->gap = qopt->gap; 4140dca51d3SStephen Hemminger q->counter = 0; 4151da177e4SLinus Torvalds q->loss = qopt->loss; 4161da177e4SLinus Torvalds q->duplicate = qopt->duplicate; 4171da177e4SLinus Torvalds 418bb2f8cc0SStephen Hemminger /* for compatibility with earlier versions. 419bb2f8cc0SStephen Hemminger * if gap is set, need to assume 100% probability 4200dca51d3SStephen Hemminger */ 421a362e0a7SStephen Hemminger if (q->gap) 4220dca51d3SStephen Hemminger q->reorder = ~0; 4230dca51d3SStephen Hemminger 424265eb67fSStephen Hemminger if (tb[TCA_NETEM_CORR]) 425265eb67fSStephen Hemminger get_correlation(sch, tb[TCA_NETEM_CORR]); 4261da177e4SLinus Torvalds 4271e90474cSPatrick McHardy if (tb[TCA_NETEM_DELAY_DIST]) { 4281e90474cSPatrick McHardy ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); 4291da177e4SLinus Torvalds if (ret) 4301da177e4SLinus Torvalds return ret; 4311da177e4SLinus Torvalds } 432c865e5d9SStephen Hemminger 433265eb67fSStephen Hemminger if (tb[TCA_NETEM_REORDER]) 434265eb67fSStephen Hemminger get_reorder(sch, tb[TCA_NETEM_REORDER]); 4351da177e4SLinus Torvalds 436265eb67fSStephen Hemminger if (tb[TCA_NETEM_CORRUPT]) 437265eb67fSStephen Hemminger get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); 4381da177e4SLinus Torvalds 4391da177e4SLinus Torvalds return 0; 4401da177e4SLinus Torvalds } 4411da177e4SLinus Torvalds 442300ce174SStephen Hemminger /* 443300ce174SStephen Hemminger * Special case version of FIFO queue for use by netem. 444300ce174SStephen Hemminger * It queues in order based on timestamps in skb's 445300ce174SStephen Hemminger */ 446300ce174SStephen Hemminger struct fifo_sched_data { 447300ce174SStephen Hemminger u32 limit; 448075aa573SStephen Hemminger psched_time_t oldest; 449300ce174SStephen Hemminger }; 450300ce174SStephen Hemminger 451300ce174SStephen Hemminger static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 452300ce174SStephen Hemminger { 453300ce174SStephen Hemminger struct fifo_sched_data *q = qdisc_priv(sch); 454300ce174SStephen Hemminger struct sk_buff_head *list = &sch->q; 4555f86173bSJussi Kivilinna psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; 456300ce174SStephen Hemminger struct sk_buff *skb; 457300ce174SStephen Hemminger 458300ce174SStephen Hemminger if (likely(skb_queue_len(list) < q->limit)) { 459075aa573SStephen Hemminger /* Optimize for add at tail */ 460104e0878SPatrick McHardy if (likely(skb_queue_empty(list) || tnext >= q->oldest)) { 461075aa573SStephen Hemminger q->oldest = tnext; 462075aa573SStephen Hemminger return qdisc_enqueue_tail(nskb, sch); 463075aa573SStephen Hemminger } 464075aa573SStephen Hemminger 465300ce174SStephen Hemminger skb_queue_reverse_walk(list, skb) { 4665f86173bSJussi Kivilinna const struct netem_skb_cb *cb = netem_skb_cb(skb); 467300ce174SStephen Hemminger 468104e0878SPatrick McHardy if (tnext >= cb->time_to_send) 469300ce174SStephen Hemminger break; 470300ce174SStephen Hemminger } 471300ce174SStephen Hemminger 472300ce174SStephen Hemminger __skb_queue_after(list, skb, nskb); 473300ce174SStephen Hemminger 4740abf77e5SJussi Kivilinna sch->qstats.backlog += qdisc_pkt_len(nskb); 4750abf77e5SJussi Kivilinna sch->bstats.bytes += qdisc_pkt_len(nskb); 476300ce174SStephen Hemminger sch->bstats.packets++; 477300ce174SStephen Hemminger 478300ce174SStephen Hemminger return NET_XMIT_SUCCESS; 479300ce174SStephen Hemminger } 480300ce174SStephen Hemminger 481075aa573SStephen Hemminger return qdisc_reshape_fail(nskb, sch); 482300ce174SStephen Hemminger } 483300ce174SStephen Hemminger 4841e90474cSPatrick McHardy static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) 485300ce174SStephen Hemminger { 486300ce174SStephen Hemminger struct fifo_sched_data *q = qdisc_priv(sch); 487300ce174SStephen Hemminger 488300ce174SStephen Hemminger if (opt) { 4891e90474cSPatrick McHardy struct tc_fifo_qopt *ctl = nla_data(opt); 4901e90474cSPatrick McHardy if (nla_len(opt) < sizeof(*ctl)) 491300ce174SStephen Hemminger return -EINVAL; 492300ce174SStephen Hemminger 493300ce174SStephen Hemminger q->limit = ctl->limit; 494300ce174SStephen Hemminger } else 4955ce2d488SDavid S. Miller q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); 496300ce174SStephen Hemminger 497a084980dSPatrick McHardy q->oldest = PSCHED_PASTPERFECT; 498300ce174SStephen Hemminger return 0; 499300ce174SStephen Hemminger } 500300ce174SStephen Hemminger 501300ce174SStephen Hemminger static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb) 502300ce174SStephen Hemminger { 503300ce174SStephen Hemminger struct fifo_sched_data *q = qdisc_priv(sch); 504300ce174SStephen Hemminger struct tc_fifo_qopt opt = { .limit = q->limit }; 505300ce174SStephen Hemminger 5061e90474cSPatrick McHardy NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 507300ce174SStephen Hemminger return skb->len; 508300ce174SStephen Hemminger 5091e90474cSPatrick McHardy nla_put_failure: 510300ce174SStephen Hemminger return -1; 511300ce174SStephen Hemminger } 512300ce174SStephen Hemminger 51320fea08bSEric Dumazet static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = { 514300ce174SStephen Hemminger .id = "tfifo", 515300ce174SStephen Hemminger .priv_size = sizeof(struct fifo_sched_data), 516300ce174SStephen Hemminger .enqueue = tfifo_enqueue, 517300ce174SStephen Hemminger .dequeue = qdisc_dequeue_head, 5188e3af978SJarek Poplawski .peek = qdisc_peek_head, 519300ce174SStephen Hemminger .drop = qdisc_queue_drop, 520300ce174SStephen Hemminger .init = tfifo_init, 521300ce174SStephen Hemminger .reset = qdisc_reset_queue, 522300ce174SStephen Hemminger .change = tfifo_init, 523300ce174SStephen Hemminger .dump = tfifo_dump, 524300ce174SStephen Hemminger }; 525300ce174SStephen Hemminger 5261e90474cSPatrick McHardy static int netem_init(struct Qdisc *sch, struct nlattr *opt) 5271da177e4SLinus Torvalds { 5281da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 5291da177e4SLinus Torvalds int ret; 5301da177e4SLinus Torvalds 5311da177e4SLinus Torvalds if (!opt) 5321da177e4SLinus Torvalds return -EINVAL; 5331da177e4SLinus Torvalds 53459cb5c67SPatrick McHardy qdisc_watchdog_init(&q->watchdog, sch); 5351da177e4SLinus Torvalds 5365ce2d488SDavid S. Miller q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 537bb949fbdSDavid S. Miller &tfifo_qdisc_ops, 5389f9afec4SPatrick McHardy TC_H_MAKE(sch->handle, 1)); 5391da177e4SLinus Torvalds if (!q->qdisc) { 5401da177e4SLinus Torvalds pr_debug("netem: qdisc create failed\n"); 5411da177e4SLinus Torvalds return -ENOMEM; 5421da177e4SLinus Torvalds } 5431da177e4SLinus Torvalds 5441da177e4SLinus Torvalds ret = netem_change(sch, opt); 5451da177e4SLinus Torvalds if (ret) { 5461da177e4SLinus Torvalds pr_debug("netem: change failed\n"); 5471da177e4SLinus Torvalds qdisc_destroy(q->qdisc); 5481da177e4SLinus Torvalds } 5491da177e4SLinus Torvalds return ret; 5501da177e4SLinus Torvalds } 5511da177e4SLinus Torvalds 5521da177e4SLinus Torvalds static void netem_destroy(struct Qdisc *sch) 5531da177e4SLinus Torvalds { 5541da177e4SLinus Torvalds struct netem_sched_data *q = qdisc_priv(sch); 5551da177e4SLinus Torvalds 55659cb5c67SPatrick McHardy qdisc_watchdog_cancel(&q->watchdog); 5571da177e4SLinus Torvalds qdisc_destroy(q->qdisc); 5581da177e4SLinus Torvalds kfree(q->delay_dist); 5591da177e4SLinus Torvalds } 5601da177e4SLinus Torvalds 5611da177e4SLinus Torvalds static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) 5621da177e4SLinus Torvalds { 5631da177e4SLinus Torvalds const struct netem_sched_data *q = qdisc_priv(sch); 56427a884dcSArnaldo Carvalho de Melo unsigned char *b = skb_tail_pointer(skb); 5651e90474cSPatrick McHardy struct nlattr *nla = (struct nlattr *) b; 5661da177e4SLinus Torvalds struct tc_netem_qopt qopt; 5671da177e4SLinus Torvalds struct tc_netem_corr cor; 5680dca51d3SStephen Hemminger struct tc_netem_reorder reorder; 569c865e5d9SStephen Hemminger struct tc_netem_corrupt corrupt; 5701da177e4SLinus Torvalds 5711da177e4SLinus Torvalds qopt.latency = q->latency; 5721da177e4SLinus Torvalds qopt.jitter = q->jitter; 5731da177e4SLinus Torvalds qopt.limit = q->limit; 5741da177e4SLinus Torvalds qopt.loss = q->loss; 5751da177e4SLinus Torvalds qopt.gap = q->gap; 5761da177e4SLinus Torvalds qopt.duplicate = q->duplicate; 5771e90474cSPatrick McHardy NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 5781da177e4SLinus Torvalds 5791da177e4SLinus Torvalds cor.delay_corr = q->delay_cor.rho; 5801da177e4SLinus Torvalds cor.loss_corr = q->loss_cor.rho; 5811da177e4SLinus Torvalds cor.dup_corr = q->dup_cor.rho; 5821e90474cSPatrick McHardy NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); 5830dca51d3SStephen Hemminger 5840dca51d3SStephen Hemminger reorder.probability = q->reorder; 5850dca51d3SStephen Hemminger reorder.correlation = q->reorder_cor.rho; 5861e90474cSPatrick McHardy NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); 5870dca51d3SStephen Hemminger 588c865e5d9SStephen Hemminger corrupt.probability = q->corrupt; 589c865e5d9SStephen Hemminger corrupt.correlation = q->corrupt_cor.rho; 5901e90474cSPatrick McHardy NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); 591c865e5d9SStephen Hemminger 5921e90474cSPatrick McHardy nla->nla_len = skb_tail_pointer(skb) - b; 5931da177e4SLinus Torvalds 5941da177e4SLinus Torvalds return skb->len; 5951da177e4SLinus Torvalds 5961e90474cSPatrick McHardy nla_put_failure: 597dc5fc579SArnaldo Carvalho de Melo nlmsg_trim(skb, b); 5981da177e4SLinus Torvalds return -1; 5991da177e4SLinus Torvalds } 6001da177e4SLinus Torvalds 60120fea08bSEric Dumazet static struct Qdisc_ops netem_qdisc_ops __read_mostly = { 6021da177e4SLinus Torvalds .id = "netem", 6031da177e4SLinus Torvalds .priv_size = sizeof(struct netem_sched_data), 6041da177e4SLinus Torvalds .enqueue = netem_enqueue, 6051da177e4SLinus Torvalds .dequeue = netem_dequeue, 60677be155cSJarek Poplawski .peek = qdisc_peek_dequeued, 6071da177e4SLinus Torvalds .drop = netem_drop, 6081da177e4SLinus Torvalds .init = netem_init, 6091da177e4SLinus Torvalds .reset = netem_reset, 6101da177e4SLinus Torvalds .destroy = netem_destroy, 6111da177e4SLinus Torvalds .change = netem_change, 6121da177e4SLinus Torvalds .dump = netem_dump, 6131da177e4SLinus Torvalds .owner = THIS_MODULE, 6141da177e4SLinus Torvalds }; 6151da177e4SLinus Torvalds 6161da177e4SLinus Torvalds 6171da177e4SLinus Torvalds static int __init netem_module_init(void) 6181da177e4SLinus Torvalds { 619eb229c4cSStephen Hemminger pr_info("netem: version " VERSION "\n"); 6201da177e4SLinus Torvalds return register_qdisc(&netem_qdisc_ops); 6211da177e4SLinus Torvalds } 6221da177e4SLinus Torvalds static void __exit netem_module_exit(void) 6231da177e4SLinus Torvalds { 6241da177e4SLinus Torvalds unregister_qdisc(&netem_qdisc_ops); 6251da177e4SLinus Torvalds } 6261da177e4SLinus Torvalds module_init(netem_module_init) 6271da177e4SLinus Torvalds module_exit(netem_module_exit) 6281da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 629