xref: /openbmc/linux/net/sched/sch_teql.c (revision cc7ec456f82da7f89a5b376e613b3ac4311b3e9a)
1 /* net/sched/sch_teql.c	"True" (or "trivial") link equalizer.
2  *
3  *		This program is free software; you can redistribute it and/or
4  *		modify it under the terms of the GNU General Public License
5  *		as published by the Free Software Foundation; either version
6  *		2 of the License, or (at your option) any later version.
7  *
8  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/if_arp.h>
18 #include <linux/netdevice.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/moduleparam.h>
22 #include <net/dst.h>
23 #include <net/neighbour.h>
24 #include <net/pkt_sched.h>
25 
26 /*
27    How to setup it.
28    ----------------
29 
30    After loading this module you will find a new device teqlN
31    and new qdisc with the same name. To join a slave to the equalizer
32    you should just set this qdisc on a device f.e.
33 
34    # tc qdisc add dev eth0 root teql0
35    # tc qdisc add dev eth1 root teql0
36 
37    That's all. Full PnP 8)
38 
39    Applicability.
40    --------------
41 
42    1. Slave devices MUST be active devices, i.e., they must raise the tbusy
43       signal and generate EOI events. If you want to equalize virtual devices
44       like tunnels, use a normal eql device.
45    2. This device puts no limitations on physical slave characteristics
46       f.e. it will equalize 9600baud line and 100Mb ethernet perfectly :-)
47       Certainly, large difference in link speeds will make the resulting
48       eqalized link unusable, because of huge packet reordering.
49       I estimate an upper useful difference as ~10 times.
50    3. If the slave requires address resolution, only protocols using
51       neighbour cache (IPv4/IPv6) will work over the equalized link.
52       Other protocols are still allowed to use the slave device directly,
53       which will not break load balancing, though native slave
54       traffic will have the highest priority.  */
55 
56 struct teql_master {
57 	struct Qdisc_ops qops;
58 	struct net_device *dev;
59 	struct Qdisc *slaves;
60 	struct list_head master_list;
61 	unsigned long	tx_bytes;
62 	unsigned long	tx_packets;
63 	unsigned long	tx_errors;
64 	unsigned long	tx_dropped;
65 };
66 
67 struct teql_sched_data {
68 	struct Qdisc *next;
69 	struct teql_master *m;
70 	struct neighbour *ncache;
71 	struct sk_buff_head q;
72 };
73 
74 #define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
75 
76 #define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
77 
78 /* "teql*" qdisc routines */
79 
80 static int
81 teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
82 {
83 	struct net_device *dev = qdisc_dev(sch);
84 	struct teql_sched_data *q = qdisc_priv(sch);
85 
86 	if (q->q.qlen < dev->tx_queue_len) {
87 		__skb_queue_tail(&q->q, skb);
88 		qdisc_bstats_update(sch, skb);
89 		return NET_XMIT_SUCCESS;
90 	}
91 
92 	kfree_skb(skb);
93 	sch->qstats.drops++;
94 	return NET_XMIT_DROP;
95 }
96 
97 static struct sk_buff *
98 teql_dequeue(struct Qdisc *sch)
99 {
100 	struct teql_sched_data *dat = qdisc_priv(sch);
101 	struct netdev_queue *dat_queue;
102 	struct sk_buff *skb;
103 
104 	skb = __skb_dequeue(&dat->q);
105 	dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
106 	if (skb == NULL) {
107 		struct net_device *m = qdisc_dev(dat_queue->qdisc);
108 		if (m) {
109 			dat->m->slaves = sch;
110 			netif_wake_queue(m);
111 		}
112 	}
113 	sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
114 	return skb;
115 }
116 
117 static struct sk_buff *
118 teql_peek(struct Qdisc *sch)
119 {
120 	/* teql is meant to be used as root qdisc */
121 	return NULL;
122 }
123 
124 static inline void
125 teql_neigh_release(struct neighbour *n)
126 {
127 	if (n)
128 		neigh_release(n);
129 }
130 
131 static void
132 teql_reset(struct Qdisc *sch)
133 {
134 	struct teql_sched_data *dat = qdisc_priv(sch);
135 
136 	skb_queue_purge(&dat->q);
137 	sch->q.qlen = 0;
138 	teql_neigh_release(xchg(&dat->ncache, NULL));
139 }
140 
141 static void
142 teql_destroy(struct Qdisc *sch)
143 {
144 	struct Qdisc *q, *prev;
145 	struct teql_sched_data *dat = qdisc_priv(sch);
146 	struct teql_master *master = dat->m;
147 
148 	prev = master->slaves;
149 	if (prev) {
150 		do {
151 			q = NEXT_SLAVE(prev);
152 			if (q == sch) {
153 				NEXT_SLAVE(prev) = NEXT_SLAVE(q);
154 				if (q == master->slaves) {
155 					master->slaves = NEXT_SLAVE(q);
156 					if (q == master->slaves) {
157 						struct netdev_queue *txq;
158 						spinlock_t *root_lock;
159 
160 						txq = netdev_get_tx_queue(master->dev, 0);
161 						master->slaves = NULL;
162 
163 						root_lock = qdisc_root_sleeping_lock(txq->qdisc);
164 						spin_lock_bh(root_lock);
165 						qdisc_reset(txq->qdisc);
166 						spin_unlock_bh(root_lock);
167 					}
168 				}
169 				skb_queue_purge(&dat->q);
170 				teql_neigh_release(xchg(&dat->ncache, NULL));
171 				break;
172 			}
173 
174 		} while ((prev = q) != master->slaves);
175 	}
176 }
177 
178 static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
179 {
180 	struct net_device *dev = qdisc_dev(sch);
181 	struct teql_master *m = (struct teql_master *)sch->ops;
182 	struct teql_sched_data *q = qdisc_priv(sch);
183 
184 	if (dev->hard_header_len > m->dev->hard_header_len)
185 		return -EINVAL;
186 
187 	if (m->dev == dev)
188 		return -ELOOP;
189 
190 	q->m = m;
191 
192 	skb_queue_head_init(&q->q);
193 
194 	if (m->slaves) {
195 		if (m->dev->flags & IFF_UP) {
196 			if ((m->dev->flags & IFF_POINTOPOINT &&
197 			     !(dev->flags & IFF_POINTOPOINT)) ||
198 			    (m->dev->flags & IFF_BROADCAST &&
199 			     !(dev->flags & IFF_BROADCAST)) ||
200 			    (m->dev->flags & IFF_MULTICAST &&
201 			     !(dev->flags & IFF_MULTICAST)) ||
202 			    dev->mtu < m->dev->mtu)
203 				return -EINVAL;
204 		} else {
205 			if (!(dev->flags&IFF_POINTOPOINT))
206 				m->dev->flags &= ~IFF_POINTOPOINT;
207 			if (!(dev->flags&IFF_BROADCAST))
208 				m->dev->flags &= ~IFF_BROADCAST;
209 			if (!(dev->flags&IFF_MULTICAST))
210 				m->dev->flags &= ~IFF_MULTICAST;
211 			if (dev->mtu < m->dev->mtu)
212 				m->dev->mtu = dev->mtu;
213 		}
214 		q->next = NEXT_SLAVE(m->slaves);
215 		NEXT_SLAVE(m->slaves) = sch;
216 	} else {
217 		q->next = sch;
218 		m->slaves = sch;
219 		m->dev->mtu = dev->mtu;
220 		m->dev->flags = (m->dev->flags&~FMASK)|(dev->flags&FMASK);
221 	}
222 	return 0;
223 }
224 
225 
226 static int
227 __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
228 {
229 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
230 	struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
231 	struct neighbour *mn = skb_dst(skb)->neighbour;
232 	struct neighbour *n = q->ncache;
233 
234 	if (mn->tbl == NULL)
235 		return -EINVAL;
236 	if (n && n->tbl == mn->tbl &&
237 	    memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) {
238 		atomic_inc(&n->refcnt);
239 	} else {
240 		n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev);
241 		if (IS_ERR(n))
242 			return PTR_ERR(n);
243 	}
244 	if (neigh_event_send(n, skb_res) == 0) {
245 		int err;
246 		char haddr[MAX_ADDR_LEN];
247 
248 		neigh_ha_snapshot(haddr, n, dev);
249 		err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
250 				      NULL, skb->len);
251 
252 		if (err < 0) {
253 			neigh_release(n);
254 			return -EINVAL;
255 		}
256 		teql_neigh_release(xchg(&q->ncache, n));
257 		return 0;
258 	}
259 	neigh_release(n);
260 	return (skb_res == NULL) ? -EAGAIN : 1;
261 }
262 
263 static inline int teql_resolve(struct sk_buff *skb,
264 			       struct sk_buff *skb_res, struct net_device *dev)
265 {
266 	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
267 	if (txq->qdisc == &noop_qdisc)
268 		return -ENODEV;
269 
270 	if (dev->header_ops == NULL ||
271 	    skb_dst(skb) == NULL ||
272 	    skb_dst(skb)->neighbour == NULL)
273 		return 0;
274 	return __teql_resolve(skb, skb_res, dev);
275 }
276 
277 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
278 {
279 	struct teql_master *master = netdev_priv(dev);
280 	struct Qdisc *start, *q;
281 	int busy;
282 	int nores;
283 	int subq = skb_get_queue_mapping(skb);
284 	struct sk_buff *skb_res = NULL;
285 
286 	start = master->slaves;
287 
288 restart:
289 	nores = 0;
290 	busy = 0;
291 
292 	q = start;
293 	if (!q)
294 		goto drop;
295 
296 	do {
297 		struct net_device *slave = qdisc_dev(q);
298 		struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
299 		const struct net_device_ops *slave_ops = slave->netdev_ops;
300 
301 		if (slave_txq->qdisc_sleeping != q)
302 			continue;
303 		if (__netif_subqueue_stopped(slave, subq) ||
304 		    !netif_running(slave)) {
305 			busy = 1;
306 			continue;
307 		}
308 
309 		switch (teql_resolve(skb, skb_res, slave)) {
310 		case 0:
311 			if (__netif_tx_trylock(slave_txq)) {
312 				unsigned int length = qdisc_pkt_len(skb);
313 
314 				if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
315 				    slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
316 					txq_trans_update(slave_txq);
317 					__netif_tx_unlock(slave_txq);
318 					master->slaves = NEXT_SLAVE(q);
319 					netif_wake_queue(dev);
320 					master->tx_packets++;
321 					master->tx_bytes += length;
322 					return NETDEV_TX_OK;
323 				}
324 				__netif_tx_unlock(slave_txq);
325 			}
326 			if (netif_queue_stopped(dev))
327 				busy = 1;
328 			break;
329 		case 1:
330 			master->slaves = NEXT_SLAVE(q);
331 			return NETDEV_TX_OK;
332 		default:
333 			nores = 1;
334 			break;
335 		}
336 		__skb_pull(skb, skb_network_offset(skb));
337 	} while ((q = NEXT_SLAVE(q)) != start);
338 
339 	if (nores && skb_res == NULL) {
340 		skb_res = skb;
341 		goto restart;
342 	}
343 
344 	if (busy) {
345 		netif_stop_queue(dev);
346 		return NETDEV_TX_BUSY;
347 	}
348 	master->tx_errors++;
349 
350 drop:
351 	master->tx_dropped++;
352 	dev_kfree_skb(skb);
353 	return NETDEV_TX_OK;
354 }
355 
356 static int teql_master_open(struct net_device *dev)
357 {
358 	struct Qdisc *q;
359 	struct teql_master *m = netdev_priv(dev);
360 	int mtu = 0xFFFE;
361 	unsigned int flags = IFF_NOARP | IFF_MULTICAST;
362 
363 	if (m->slaves == NULL)
364 		return -EUNATCH;
365 
366 	flags = FMASK;
367 
368 	q = m->slaves;
369 	do {
370 		struct net_device *slave = qdisc_dev(q);
371 
372 		if (slave == NULL)
373 			return -EUNATCH;
374 
375 		if (slave->mtu < mtu)
376 			mtu = slave->mtu;
377 		if (slave->hard_header_len > LL_MAX_HEADER)
378 			return -EINVAL;
379 
380 		/* If all the slaves are BROADCAST, master is BROADCAST
381 		   If all the slaves are PtP, master is PtP
382 		   Otherwise, master is NBMA.
383 		 */
384 		if (!(slave->flags&IFF_POINTOPOINT))
385 			flags &= ~IFF_POINTOPOINT;
386 		if (!(slave->flags&IFF_BROADCAST))
387 			flags &= ~IFF_BROADCAST;
388 		if (!(slave->flags&IFF_MULTICAST))
389 			flags &= ~IFF_MULTICAST;
390 	} while ((q = NEXT_SLAVE(q)) != m->slaves);
391 
392 	m->dev->mtu = mtu;
393 	m->dev->flags = (m->dev->flags&~FMASK) | flags;
394 	netif_start_queue(m->dev);
395 	return 0;
396 }
397 
398 static int teql_master_close(struct net_device *dev)
399 {
400 	netif_stop_queue(dev);
401 	return 0;
402 }
403 
404 static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
405 						     struct rtnl_link_stats64 *stats)
406 {
407 	struct teql_master *m = netdev_priv(dev);
408 
409 	stats->tx_packets	= m->tx_packets;
410 	stats->tx_bytes		= m->tx_bytes;
411 	stats->tx_errors	= m->tx_errors;
412 	stats->tx_dropped	= m->tx_dropped;
413 	return stats;
414 }
415 
416 static int teql_master_mtu(struct net_device *dev, int new_mtu)
417 {
418 	struct teql_master *m = netdev_priv(dev);
419 	struct Qdisc *q;
420 
421 	if (new_mtu < 68)
422 		return -EINVAL;
423 
424 	q = m->slaves;
425 	if (q) {
426 		do {
427 			if (new_mtu > qdisc_dev(q)->mtu)
428 				return -EINVAL;
429 		} while ((q = NEXT_SLAVE(q)) != m->slaves);
430 	}
431 
432 	dev->mtu = new_mtu;
433 	return 0;
434 }
435 
436 static const struct net_device_ops teql_netdev_ops = {
437 	.ndo_open	= teql_master_open,
438 	.ndo_stop	= teql_master_close,
439 	.ndo_start_xmit	= teql_master_xmit,
440 	.ndo_get_stats64 = teql_master_stats64,
441 	.ndo_change_mtu	= teql_master_mtu,
442 };
443 
444 static __init void teql_master_setup(struct net_device *dev)
445 {
446 	struct teql_master *master = netdev_priv(dev);
447 	struct Qdisc_ops *ops = &master->qops;
448 
449 	master->dev	= dev;
450 	ops->priv_size  = sizeof(struct teql_sched_data);
451 
452 	ops->enqueue	=	teql_enqueue;
453 	ops->dequeue	=	teql_dequeue;
454 	ops->peek	=	teql_peek;
455 	ops->init	=	teql_qdisc_init;
456 	ops->reset	=	teql_reset;
457 	ops->destroy	=	teql_destroy;
458 	ops->owner	=	THIS_MODULE;
459 
460 	dev->netdev_ops =       &teql_netdev_ops;
461 	dev->type		= ARPHRD_VOID;
462 	dev->mtu		= 1500;
463 	dev->tx_queue_len	= 100;
464 	dev->flags		= IFF_NOARP;
465 	dev->hard_header_len	= LL_MAX_HEADER;
466 	dev->priv_flags		&= ~IFF_XMIT_DST_RELEASE;
467 }
468 
469 static LIST_HEAD(master_dev_list);
470 static int max_equalizers = 1;
471 module_param(max_equalizers, int, 0);
472 MODULE_PARM_DESC(max_equalizers, "Max number of link equalizers");
473 
474 static int __init teql_init(void)
475 {
476 	int i;
477 	int err = -ENODEV;
478 
479 	for (i = 0; i < max_equalizers; i++) {
480 		struct net_device *dev;
481 		struct teql_master *master;
482 
483 		dev = alloc_netdev(sizeof(struct teql_master),
484 				  "teql%d", teql_master_setup);
485 		if (!dev) {
486 			err = -ENOMEM;
487 			break;
488 		}
489 
490 		if ((err = register_netdev(dev))) {
491 			free_netdev(dev);
492 			break;
493 		}
494 
495 		master = netdev_priv(dev);
496 
497 		strlcpy(master->qops.id, dev->name, IFNAMSIZ);
498 		err = register_qdisc(&master->qops);
499 
500 		if (err) {
501 			unregister_netdev(dev);
502 			free_netdev(dev);
503 			break;
504 		}
505 
506 		list_add_tail(&master->master_list, &master_dev_list);
507 	}
508 	return i ? 0 : err;
509 }
510 
511 static void __exit teql_exit(void)
512 {
513 	struct teql_master *master, *nxt;
514 
515 	list_for_each_entry_safe(master, nxt, &master_dev_list, master_list) {
516 
517 		list_del(&master->master_list);
518 
519 		unregister_qdisc(&master->qops);
520 		unregister_netdev(master->dev);
521 		free_netdev(master->dev);
522 	}
523 }
524 
525 module_init(teql_init);
526 module_exit(teql_exit);
527 
528 MODULE_LICENSE("GPL");
529