xref: /openbmc/linux/drivers/net/eql.c (revision b6bec26c)
1 /*
2  * Equalizer Load-balancer for serial network interfaces.
3  *
4  * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
5  * NCM: Network and Communications Management, Inc.
6  *
7  * (c) Copyright 2002 David S. Miller (davem@redhat.com)
8  *
9  *	This software may be used and distributed according to the terms
10  *	of the GNU General Public License, incorporated herein by reference.
11  *
12  * The author may be reached as simon@ncm.com, or C/O
13  *    NCM
14  *    Attn: Simon Janes
15  *    6803 Whittier Ave
16  *    McLean VA 22101
17  *    Phone: 1-703-847-0040 ext 103
18  */
19 
20 /*
21  * Sources:
22  *   skeleton.c by Donald Becker.
23  * Inspirations:
24  *   The Harried and Overworked Alan Cox
25  * Conspiracies:
26  *   The Alan Cox and Mike McLagan plot to get someone else to do the code,
27  *   which turned out to be me.
28  */
29 
30 /*
31  * $Log: eql.c,v $
32  * Revision 1.2  1996/04/11 17:51:52  guru
33  * Added one-line eql_remove_slave patch.
34  *
35  * Revision 1.1  1996/04/11 17:44:17  guru
36  * Initial revision
37  *
38  * Revision 3.13  1996/01/21  15:17:18  alan
39  * tx_queue_len changes.
40  * reformatted.
41  *
42  * Revision 3.12  1995/03/22  21:07:51  anarchy
43  * Added capable() checks on configuration.
44  * Moved header file.
45  *
46  * Revision 3.11  1995/01/19  23:14:31  guru
47  * 		      slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
48  * 			(priority_Bps) + bytes_queued * 8;
49  *
50  * Revision 3.10  1995/01/19  23:07:53  guru
51  * back to
52  * 		      slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
53  * 			(priority_Bps) + bytes_queued;
54  *
55  * Revision 3.9  1995/01/19  22:38:20  guru
56  * 		      slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
57  * 			(priority_Bps) + bytes_queued * 4;
58  *
59  * Revision 3.8  1995/01/19  22:30:55  guru
60  *       slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
61  * 			(priority_Bps) + bytes_queued * 2;
62  *
63  * Revision 3.7  1995/01/19  21:52:35  guru
64  * printk's trimmed out.
65  *
66  * Revision 3.6  1995/01/19  21:49:56  guru
67  * This is working pretty well. I gained 1 K/s in speed.. now it's just
68  * robustness and printk's to be diked out.
69  *
70  * Revision 3.5  1995/01/18  22:29:59  guru
71  * still crashes the kernel when the lock_wait thing is woken up.
72  *
73  * Revision 3.4  1995/01/18  21:59:47  guru
74  * Broken set-bit locking snapshot
75  *
76  * Revision 3.3  1995/01/17  22:09:18  guru
77  * infinite sleep in a lock somewhere..
78  *
79  * Revision 3.2  1995/01/15  16:46:06  guru
80  * Log trimmed of non-pertinent 1.x branch messages
81  *
82  * Revision 3.1  1995/01/15  14:41:45  guru
83  * New Scheduler and timer stuff...
84  *
85  * Revision 1.15  1995/01/15  14:29:02  guru
86  * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
87  * with the dumber scheduler
88  *
89  * Revision 1.14  1995/01/15  02:37:08  guru
90  * shock.. the kept-new-versions could have zonked working
91  * stuff.. shudder
92  *
93  * Revision 1.13  1995/01/15  02:36:31  guru
94  * big changes
95  *
96  * 	scheduler was torn out and replaced with something smarter
97  *
98  * 	global names not prefixed with eql_ were renamed to protect
99  * 	against namespace collisions
100  *
101  * 	a few more abstract interfaces were added to facilitate any
102  * 	potential change of datastructure.  the driver is still using
103  * 	a linked list of slaves.  going to a heap would be a bit of
104  * 	an overkill.
105  *
106  * 	this compiles fine with no warnings.
107  *
108  * 	the locking mechanism and timer stuff must be written however,
109  * 	this version will not work otherwise
110  *
111  * Sorry, I had to rewrite most of this for 2.5.x -DaveM
112  */
113 
114 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115 
116 #include <linux/capability.h>
117 #include <linux/module.h>
118 #include <linux/kernel.h>
119 #include <linux/init.h>
120 #include <linux/slab.h>
121 #include <linux/timer.h>
122 #include <linux/netdevice.h>
123 #include <net/net_namespace.h>
124 
125 #include <linux/if.h>
126 #include <linux/if_arp.h>
127 #include <linux/if_eql.h>
128 #include <linux/pkt_sched.h>
129 
130 #include <asm/uaccess.h>
131 
132 static int eql_open(struct net_device *dev);
133 static int eql_close(struct net_device *dev);
134 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
135 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
136 
137 #define eql_is_slave(dev)	((dev->flags & IFF_SLAVE) == IFF_SLAVE)
138 #define eql_is_master(dev)	((dev->flags & IFF_MASTER) == IFF_MASTER)
139 
140 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
141 
142 static void eql_timer(unsigned long param)
143 {
144 	equalizer_t *eql = (equalizer_t *) param;
145 	struct list_head *this, *tmp, *head;
146 
147 	spin_lock(&eql->queue.lock);
148 	head = &eql->queue.all_slaves;
149 	list_for_each_safe(this, tmp, head) {
150 		slave_t *slave = list_entry(this, slave_t, list);
151 
152 		if ((slave->dev->flags & IFF_UP) == IFF_UP) {
153 			slave->bytes_queued -= slave->priority_Bps;
154 			if (slave->bytes_queued < 0)
155 				slave->bytes_queued = 0;
156 		} else {
157 			eql_kill_one_slave(&eql->queue, slave);
158 		}
159 
160 	}
161 	spin_unlock(&eql->queue.lock);
162 
163 	eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
164 	add_timer(&eql->timer);
165 }
166 
167 static const char version[] __initconst =
168 	"Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
169 
170 static const struct net_device_ops eql_netdev_ops = {
171 	.ndo_open	= eql_open,
172 	.ndo_stop	= eql_close,
173 	.ndo_do_ioctl	= eql_ioctl,
174 	.ndo_start_xmit	= eql_slave_xmit,
175 };
176 
177 static void __init eql_setup(struct net_device *dev)
178 {
179 	equalizer_t *eql = netdev_priv(dev);
180 
181 	init_timer(&eql->timer);
182 	eql->timer.data     	= (unsigned long) eql;
183 	eql->timer.expires  	= jiffies + EQL_DEFAULT_RESCHED_IVAL;
184 	eql->timer.function 	= eql_timer;
185 
186 	spin_lock_init(&eql->queue.lock);
187 	INIT_LIST_HEAD(&eql->queue.all_slaves);
188 	eql->queue.master_dev	= dev;
189 
190 	dev->netdev_ops		= &eql_netdev_ops;
191 
192 	/*
193 	 *	Now we undo some of the things that eth_setup does
194 	 * 	that we don't like
195 	 */
196 
197 	dev->mtu        	= EQL_DEFAULT_MTU;	/* set to 576 in if_eql.h */
198 	dev->flags      	= IFF_MASTER;
199 
200 	dev->type       	= ARPHRD_SLIP;
201 	dev->tx_queue_len 	= 5;		/* Hands them off fast */
202 	dev->priv_flags	       &= ~IFF_XMIT_DST_RELEASE;
203 }
204 
205 static int eql_open(struct net_device *dev)
206 {
207 	equalizer_t *eql = netdev_priv(dev);
208 
209 	/* XXX We should force this off automatically for the user. */
210 	netdev_info(dev,
211 		    "remember to turn off Van-Jacobson compression on your slave devices\n");
212 
213 	BUG_ON(!list_empty(&eql->queue.all_slaves));
214 
215 	eql->min_slaves = 1;
216 	eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
217 
218 	add_timer(&eql->timer);
219 
220 	return 0;
221 }
222 
223 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
224 {
225 	list_del(&slave->list);
226 	queue->num_slaves--;
227 	slave->dev->flags &= ~IFF_SLAVE;
228 	dev_put(slave->dev);
229 	kfree(slave);
230 }
231 
232 static void eql_kill_slave_queue(slave_queue_t *queue)
233 {
234 	struct list_head *head, *tmp, *this;
235 
236 	spin_lock_bh(&queue->lock);
237 
238 	head = &queue->all_slaves;
239 	list_for_each_safe(this, tmp, head) {
240 		slave_t *s = list_entry(this, slave_t, list);
241 
242 		eql_kill_one_slave(queue, s);
243 	}
244 
245 	spin_unlock_bh(&queue->lock);
246 }
247 
248 static int eql_close(struct net_device *dev)
249 {
250 	equalizer_t *eql = netdev_priv(dev);
251 
252 	/*
253 	 *	The timer has to be stopped first before we start hacking away
254 	 *	at the data structure it scans every so often...
255 	 */
256 
257 	del_timer_sync(&eql->timer);
258 
259 	eql_kill_slave_queue(&eql->queue);
260 
261 	return 0;
262 }
263 
264 static int eql_enslave(struct net_device *dev,  slaving_request_t __user *srq);
265 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
266 
267 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
268 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
269 
270 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
271 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
272 
273 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
274 {
275 	if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
276 	    !capable(CAP_NET_ADMIN))
277 	  	return -EPERM;
278 
279 	switch (cmd) {
280 		case EQL_ENSLAVE:
281 			return eql_enslave(dev, ifr->ifr_data);
282 		case EQL_EMANCIPATE:
283 			return eql_emancipate(dev, ifr->ifr_data);
284 		case EQL_GETSLAVECFG:
285 			return eql_g_slave_cfg(dev, ifr->ifr_data);
286 		case EQL_SETSLAVECFG:
287 			return eql_s_slave_cfg(dev, ifr->ifr_data);
288 		case EQL_GETMASTRCFG:
289 			return eql_g_master_cfg(dev, ifr->ifr_data);
290 		case EQL_SETMASTRCFG:
291 			return eql_s_master_cfg(dev, ifr->ifr_data);
292 		default:
293 			return -EOPNOTSUPP;
294 	}
295 }
296 
297 /* queue->lock must be held */
298 static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
299 {
300 	unsigned long best_load = ~0UL;
301 	struct list_head *this, *tmp, *head;
302 	slave_t *best_slave;
303 
304 	best_slave = NULL;
305 
306 	/* Make a pass to set the best slave. */
307 	head = &queue->all_slaves;
308 	list_for_each_safe(this, tmp, head) {
309 		slave_t *slave = list_entry(this, slave_t, list);
310 		unsigned long slave_load, bytes_queued, priority_Bps;
311 
312 		/* Go through the slave list once, updating best_slave
313 		 * whenever a new best_load is found.
314 		 */
315 		bytes_queued = slave->bytes_queued;
316 		priority_Bps = slave->priority_Bps;
317 		if ((slave->dev->flags & IFF_UP) == IFF_UP) {
318 			slave_load = (~0UL - (~0UL / 2)) -
319 				(priority_Bps) + bytes_queued * 8;
320 
321 			if (slave_load < best_load) {
322 				best_load = slave_load;
323 				best_slave = slave;
324 			}
325 		} else {
326 			/* We found a dead slave, kill it. */
327 			eql_kill_one_slave(queue, slave);
328 		}
329 	}
330 	return best_slave;
331 }
332 
333 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
334 {
335 	equalizer_t *eql = netdev_priv(dev);
336 	slave_t *slave;
337 
338 	spin_lock(&eql->queue.lock);
339 
340 	slave = __eql_schedule_slaves(&eql->queue);
341 	if (slave) {
342 		struct net_device *slave_dev = slave->dev;
343 
344 		skb->dev = slave_dev;
345 		skb->priority = TC_PRIO_FILLER;
346 		slave->bytes_queued += skb->len;
347 		dev_queue_xmit(skb);
348 		dev->stats.tx_packets++;
349 	} else {
350 		dev->stats.tx_dropped++;
351 		dev_kfree_skb(skb);
352 	}
353 
354 	spin_unlock(&eql->queue.lock);
355 
356 	return NETDEV_TX_OK;
357 }
358 
359 /*
360  *	Private ioctl functions
361  */
362 
363 /* queue->lock must be held */
364 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
365 {
366 	struct list_head *this, *head;
367 
368 	head = &queue->all_slaves;
369 	list_for_each(this, head) {
370 		slave_t *slave = list_entry(this, slave_t, list);
371 
372 		if (slave->dev == dev)
373 			return slave;
374 	}
375 
376 	return NULL;
377 }
378 
379 static inline int eql_is_full(slave_queue_t *queue)
380 {
381 	equalizer_t *eql = netdev_priv(queue->master_dev);
382 
383 	if (queue->num_slaves >= eql->max_slaves)
384 		return 1;
385 	return 0;
386 }
387 
388 /* queue->lock must be held */
389 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
390 {
391 	if (!eql_is_full(queue)) {
392 		slave_t *duplicate_slave = NULL;
393 
394 		duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
395 		if (duplicate_slave)
396 			eql_kill_one_slave(queue, duplicate_slave);
397 
398 		list_add(&slave->list, &queue->all_slaves);
399 		queue->num_slaves++;
400 		slave->dev->flags |= IFF_SLAVE;
401 
402 		return 0;
403 	}
404 
405 	return -ENOSPC;
406 }
407 
408 static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp)
409 {
410 	struct net_device *slave_dev;
411 	slaving_request_t srq;
412 
413 	if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
414 		return -EFAULT;
415 
416 	slave_dev  = dev_get_by_name(&init_net, srq.slave_name);
417 	if (slave_dev) {
418 		if ((master_dev->flags & IFF_UP) == IFF_UP) {
419 			/* slave is not a master & not already a slave: */
420 			if (!eql_is_master(slave_dev) &&
421 			    !eql_is_slave(slave_dev)) {
422 				slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
423 				equalizer_t *eql = netdev_priv(master_dev);
424 				int ret;
425 
426 				if (!s) {
427 					dev_put(slave_dev);
428 					return -ENOMEM;
429 				}
430 
431 				memset(s, 0, sizeof(*s));
432 				s->dev = slave_dev;
433 				s->priority = srq.priority;
434 				s->priority_bps = srq.priority;
435 				s->priority_Bps = srq.priority / 8;
436 
437 				spin_lock_bh(&eql->queue.lock);
438 				ret = __eql_insert_slave(&eql->queue, s);
439 				if (ret) {
440 					dev_put(slave_dev);
441 					kfree(s);
442 				}
443 				spin_unlock_bh(&eql->queue.lock);
444 
445 				return ret;
446 			}
447 		}
448 		dev_put(slave_dev);
449 	}
450 
451 	return -EINVAL;
452 }
453 
454 static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp)
455 {
456 	equalizer_t *eql = netdev_priv(master_dev);
457 	struct net_device *slave_dev;
458 	slaving_request_t srq;
459 	int ret;
460 
461 	if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
462 		return -EFAULT;
463 
464 	slave_dev = dev_get_by_name(&init_net, srq.slave_name);
465 	ret = -EINVAL;
466 	if (slave_dev) {
467 		spin_lock_bh(&eql->queue.lock);
468 
469 		if (eql_is_slave(slave_dev)) {
470 			slave_t *slave = __eql_find_slave_dev(&eql->queue,
471 							      slave_dev);
472 
473 			if (slave) {
474 				eql_kill_one_slave(&eql->queue, slave);
475 				ret = 0;
476 			}
477 		}
478 		dev_put(slave_dev);
479 
480 		spin_unlock_bh(&eql->queue.lock);
481 	}
482 
483 	return ret;
484 }
485 
486 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
487 {
488 	equalizer_t *eql = netdev_priv(dev);
489 	slave_t *slave;
490 	struct net_device *slave_dev;
491 	slave_config_t sc;
492 	int ret;
493 
494 	if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
495 		return -EFAULT;
496 
497 	slave_dev = dev_get_by_name(&init_net, sc.slave_name);
498 	if (!slave_dev)
499 		return -ENODEV;
500 
501 	ret = -EINVAL;
502 
503 	spin_lock_bh(&eql->queue.lock);
504 	if (eql_is_slave(slave_dev)) {
505 		slave = __eql_find_slave_dev(&eql->queue, slave_dev);
506 		if (slave) {
507 			sc.priority = slave->priority;
508 			ret = 0;
509 		}
510 	}
511 	spin_unlock_bh(&eql->queue.lock);
512 
513 	dev_put(slave_dev);
514 
515 	if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
516 		ret = -EFAULT;
517 
518 	return ret;
519 }
520 
521 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
522 {
523 	slave_t *slave;
524 	equalizer_t *eql;
525 	struct net_device *slave_dev;
526 	slave_config_t sc;
527 	int ret;
528 
529 	if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
530 		return -EFAULT;
531 
532 	slave_dev = dev_get_by_name(&init_net, sc.slave_name);
533 	if (!slave_dev)
534 		return -ENODEV;
535 
536 	ret = -EINVAL;
537 
538 	eql = netdev_priv(dev);
539 	spin_lock_bh(&eql->queue.lock);
540 	if (eql_is_slave(slave_dev)) {
541 		slave = __eql_find_slave_dev(&eql->queue, slave_dev);
542 		if (slave) {
543 			slave->priority = sc.priority;
544 			slave->priority_bps = sc.priority;
545 			slave->priority_Bps = sc.priority / 8;
546 			ret = 0;
547 		}
548 	}
549 	spin_unlock_bh(&eql->queue.lock);
550 
551 	dev_put(slave_dev);
552 
553 	return ret;
554 }
555 
556 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
557 {
558 	equalizer_t *eql;
559 	master_config_t mc;
560 
561 	memset(&mc, 0, sizeof(master_config_t));
562 
563 	if (eql_is_master(dev)) {
564 		eql = netdev_priv(dev);
565 		mc.max_slaves = eql->max_slaves;
566 		mc.min_slaves = eql->min_slaves;
567 		if (copy_to_user(mcp, &mc, sizeof (master_config_t)))
568 			return -EFAULT;
569 		return 0;
570 	}
571 	return -EINVAL;
572 }
573 
574 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp)
575 {
576 	equalizer_t *eql;
577 	master_config_t mc;
578 
579 	if (copy_from_user(&mc, mcp, sizeof (master_config_t)))
580 		return -EFAULT;
581 
582 	if (eql_is_master(dev)) {
583 		eql = netdev_priv(dev);
584 		eql->max_slaves = mc.max_slaves;
585 		eql->min_slaves = mc.min_slaves;
586 		return 0;
587 	}
588 	return -EINVAL;
589 }
590 
591 static struct net_device *dev_eql;
592 
593 static int __init eql_init_module(void)
594 {
595 	int err;
596 
597 	pr_info("%s\n", version);
598 
599 	dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
600 	if (!dev_eql)
601 		return -ENOMEM;
602 
603 	err = register_netdev(dev_eql);
604 	if (err)
605 		free_netdev(dev_eql);
606 	return err;
607 }
608 
609 static void __exit eql_cleanup_module(void)
610 {
611 	unregister_netdev(dev_eql);
612 	free_netdev(dev_eql);
613 }
614 
615 module_init(eql_init_module);
616 module_exit(eql_cleanup_module);
617 MODULE_LICENSE("GPL");
618