sch_teql.c (14f0290ba44de6ed435fea24bba26e7868421c66) | sch_teql.c (cc7ec456f82da7f89a5b376e613b3ac4311b3e9a) |
---|---|
1/* net/sched/sch_teql.c "True" (or "trivial") link equalizer. 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of the GNU General Public License 5 * as published by the Free Software Foundation; either version 6 * 2 of the License, or (at your option) any later version. 7 * 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> --- 39 unchanged lines hidden (view full) --- 48 eqalized link unusable, because of huge packet reordering. 49 I estimate an upper useful difference as ~10 times. 50 3. If the slave requires address resolution, only protocols using 51 neighbour cache (IPv4/IPv6) will work over the equalized link. 52 Other protocols are still allowed to use the slave device directly, 53 which will not break load balancing, though native slave 54 traffic will have the highest priority. */ 55 | 1/* net/sched/sch_teql.c "True" (or "trivial") link equalizer. 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of the GNU General Public License 5 * as published by the Free Software Foundation; either version 6 * 2 of the License, or (at your option) any later version. 7 * 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> --- 39 unchanged lines hidden (view full) --- 48 eqalized link unusable, because of huge packet reordering. 49 I estimate an upper useful difference as ~10 times. 50 3. If the slave requires address resolution, only protocols using 51 neighbour cache (IPv4/IPv6) will work over the equalized link. 52 Other protocols are still allowed to use the slave device directly, 53 which will not break load balancing, though native slave 54 traffic will have the highest priority. */ 55 |
56struct teql_master 57{ | 56struct teql_master { |
58 struct Qdisc_ops qops; 59 struct net_device *dev; 60 struct Qdisc *slaves; 61 struct list_head master_list; 62 unsigned long tx_bytes; 63 unsigned long tx_packets; 64 unsigned long tx_errors; 65 unsigned long tx_dropped; 66}; 67 | 57 struct Qdisc_ops qops; 58 struct net_device *dev; 59 struct Qdisc *slaves; 60 struct list_head master_list; 61 unsigned long tx_bytes; 62 unsigned long tx_packets; 63 unsigned long tx_errors; 64 unsigned long tx_dropped; 65}; 66 |
68struct teql_sched_data 69{ | 67struct teql_sched_data { |
70 struct Qdisc *next; 71 struct teql_master *m; 72 struct neighbour *ncache; 73 struct sk_buff_head q; 74}; 75 | 68 struct Qdisc *next; 69 struct teql_master *m; 70 struct neighbour *ncache; 71 struct sk_buff_head q; 72}; 73 |
76#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next) | 74#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next) |
77 | 75 |
78#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT) | 76#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT) |
79 80/* "teql*" qdisc routines */ 81 82static int | 77 78/* "teql*" qdisc routines */ 79 80static int |
83teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 81teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
84{ 85 struct net_device *dev = qdisc_dev(sch); 86 struct teql_sched_data *q = qdisc_priv(sch); 87 88 if (q->q.qlen < dev->tx_queue_len) { 89 __skb_queue_tail(&q->q, skb); 90 qdisc_bstats_update(sch, skb); 91 return NET_XMIT_SUCCESS; 92 } 93 94 kfree_skb(skb); 95 sch->qstats.drops++; 96 return NET_XMIT_DROP; 97} 98 99static struct sk_buff * | 82{ 83 struct net_device *dev = qdisc_dev(sch); 84 struct teql_sched_data *q = qdisc_priv(sch); 85 86 if (q->q.qlen < dev->tx_queue_len) { 87 __skb_queue_tail(&q->q, skb); 88 qdisc_bstats_update(sch, skb); 89 return NET_XMIT_SUCCESS; 90 } 91 92 kfree_skb(skb); 93 sch->qstats.drops++; 94 return NET_XMIT_DROP; 95} 96 97static struct sk_buff * |
100teql_dequeue(struct Qdisc* sch) | 98teql_dequeue(struct Qdisc *sch) |
101{ 102 struct teql_sched_data *dat = qdisc_priv(sch); 103 struct netdev_queue *dat_queue; 104 struct sk_buff *skb; 105 106 skb = __skb_dequeue(&dat->q); 107 dat_queue = netdev_get_tx_queue(dat->m->dev, 0); 108 if (skb == NULL) { 109 struct net_device *m = qdisc_dev(dat_queue->qdisc); 110 if (m) { 111 dat->m->slaves = sch; 112 netif_wake_queue(m); 113 } 114 } 115 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; 116 return skb; 117} 118 119static struct sk_buff * | 99{ 100 struct teql_sched_data *dat = qdisc_priv(sch); 101 struct netdev_queue *dat_queue; 102 struct sk_buff *skb; 103 104 skb = __skb_dequeue(&dat->q); 105 dat_queue = netdev_get_tx_queue(dat->m->dev, 0); 106 if (skb == NULL) { 107 struct net_device *m = qdisc_dev(dat_queue->qdisc); 108 if (m) { 109 dat->m->slaves = sch; 110 netif_wake_queue(m); 111 } 112 } 113 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; 114 return skb; 115} 116 117static struct sk_buff * |
120teql_peek(struct Qdisc* sch) | 118teql_peek(struct Qdisc *sch) |
121{ 122 /* teql is meant to be used as root qdisc */ 123 return NULL; 124} 125 | 119{ 120 /* teql is meant to be used as root qdisc */ 121 return NULL; 122} 123 |
126static __inline__ void | 124static inline void |
127teql_neigh_release(struct neighbour *n) 128{ 129 if (n) 130 neigh_release(n); 131} 132 133static void | 125teql_neigh_release(struct neighbour *n) 126{ 127 if (n) 128 neigh_release(n); 129} 130 131static void |
134teql_reset(struct Qdisc* sch) | 132teql_reset(struct Qdisc *sch) |
135{ 136 struct teql_sched_data *dat = qdisc_priv(sch); 137 138 skb_queue_purge(&dat->q); 139 sch->q.qlen = 0; 140 teql_neigh_release(xchg(&dat->ncache, NULL)); 141} 142 143static void | 133{ 134 struct teql_sched_data *dat = qdisc_priv(sch); 135 136 skb_queue_purge(&dat->q); 137 sch->q.qlen = 0; 138 teql_neigh_release(xchg(&dat->ncache, NULL)); 139} 140 141static void |
144teql_destroy(struct Qdisc* sch) | 142teql_destroy(struct Qdisc *sch) |
145{ 146 struct Qdisc *q, *prev; 147 struct teql_sched_data *dat = qdisc_priv(sch); 148 struct teql_master *master = dat->m; 149 | 143{ 144 struct Qdisc *q, *prev; 145 struct teql_sched_data *dat = qdisc_priv(sch); 146 struct teql_master *master = dat->m; 147 |
150 if ((prev = master->slaves) != NULL) { | 148 prev = master->slaves; 149 if (prev) { |
151 do { 152 q = NEXT_SLAVE(prev); 153 if (q == sch) { 154 NEXT_SLAVE(prev) = NEXT_SLAVE(q); 155 if (q == master->slaves) { 156 master->slaves = NEXT_SLAVE(q); 157 if (q == master->slaves) { 158 struct netdev_queue *txq; --- 15 unchanged lines hidden (view full) --- 174 175 } while ((prev = q) != master->slaves); 176 } 177} 178 179static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) 180{ 181 struct net_device *dev = qdisc_dev(sch); | 150 do { 151 q = NEXT_SLAVE(prev); 152 if (q == sch) { 153 NEXT_SLAVE(prev) = NEXT_SLAVE(q); 154 if (q == master->slaves) { 155 master->slaves = NEXT_SLAVE(q); 156 if (q == master->slaves) { 157 struct netdev_queue *txq; --- 15 unchanged lines hidden (view full) --- 173 174 } while ((prev = q) != master->slaves); 175 } 176} 177 178static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) 179{ 180 struct net_device *dev = qdisc_dev(sch); |
182 struct teql_master *m = (struct teql_master*)sch->ops; | 181 struct teql_master *m = (struct teql_master *)sch->ops; |
183 struct teql_sched_data *q = qdisc_priv(sch); 184 185 if (dev->hard_header_len > m->dev->hard_header_len) 186 return -EINVAL; 187 188 if (m->dev == dev) 189 return -ELOOP; 190 --- 94 unchanged lines hidden (view full) --- 285 struct sk_buff *skb_res = NULL; 286 287 start = master->slaves; 288 289restart: 290 nores = 0; 291 busy = 0; 292 | 182 struct teql_sched_data *q = qdisc_priv(sch); 183 184 if (dev->hard_header_len > m->dev->hard_header_len) 185 return -EINVAL; 186 187 if (m->dev == dev) 188 return -ELOOP; 189 --- 94 unchanged lines hidden (view full) --- 284 struct sk_buff *skb_res = NULL; 285 286 start = master->slaves; 287 288restart: 289 nores = 0; 290 busy = 0; 291 |
293 if ((q = start) == NULL) | 292 q = start; 293 if (!q) |
294 goto drop; 295 296 do { 297 struct net_device *slave = qdisc_dev(q); 298 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); 299 const struct net_device_ops *slave_ops = slave->netdev_ops; 300 301 if (slave_txq->qdisc_sleeping != q) --- 48 unchanged lines hidden (view full) --- 350drop: 351 master->tx_dropped++; 352 dev_kfree_skb(skb); 353 return NETDEV_TX_OK; 354} 355 356static int teql_master_open(struct net_device *dev) 357{ | 294 goto drop; 295 296 do { 297 struct net_device *slave = qdisc_dev(q); 298 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); 299 const struct net_device_ops *slave_ops = slave->netdev_ops; 300 301 if (slave_txq->qdisc_sleeping != q) --- 48 unchanged lines hidden (view full) --- 350drop: 351 master->tx_dropped++; 352 dev_kfree_skb(skb); 353 return NETDEV_TX_OK; 354} 355 356static int teql_master_open(struct net_device *dev) 357{ |
358 struct Qdisc * q; | 358 struct Qdisc *q; |
359 struct teql_master *m = netdev_priv(dev); 360 int mtu = 0xFFFE; | 359 struct teql_master *m = netdev_priv(dev); 360 int mtu = 0xFFFE; |
361 unsigned flags = IFF_NOARP|IFF_MULTICAST; | 361 unsigned int flags = IFF_NOARP | IFF_MULTICAST; |
362 363 if (m->slaves == NULL) 364 return -EUNATCH; 365 366 flags = FMASK; 367 368 q = m->slaves; 369 do { --- 51 unchanged lines hidden (view full) --- 421 if (new_mtu < 68) 422 return -EINVAL; 423 424 q = m->slaves; 425 if (q) { 426 do { 427 if (new_mtu > qdisc_dev(q)->mtu) 428 return -EINVAL; | 362 363 if (m->slaves == NULL) 364 return -EUNATCH; 365 366 flags = FMASK; 367 368 q = m->slaves; 369 do { --- 51 unchanged lines hidden (view full) --- 421 if (new_mtu < 68) 422 return -EINVAL; 423 424 q = m->slaves; 425 if (q) { 426 do { 427 if (new_mtu > qdisc_dev(q)->mtu) 428 return -EINVAL; |
429 } while ((q=NEXT_SLAVE(q)) != m->slaves); | 429 } while ((q = NEXT_SLAVE(q)) != m->slaves); |
430 } 431 432 dev->mtu = new_mtu; 433 return 0; 434} 435 436static const struct net_device_ops teql_netdev_ops = { 437 .ndo_open = teql_master_open, --- 91 unchanged lines hidden --- | 430 } 431 432 dev->mtu = new_mtu; 433 return 0; 434} 435 436static const struct net_device_ops teql_netdev_ops = { 437 .ndo_open = teql_master_open, --- 91 unchanged lines hidden --- |