1 /* 2 * Equalizer Load-balancer for serial network interfaces. 3 * 4 * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes 5 * NCM: Network and Communications Management, Inc. 6 * 7 * (c) Copyright 2002 David S. Miller (davem@redhat.com) 8 * 9 * This software may be used and distributed according to the terms 10 * of the GNU General Public License, incorporated herein by reference. 11 * 12 * The author may be reached as simon@ncm.com, or C/O 13 * NCM 14 * Attn: Simon Janes 15 * 6803 Whittier Ave 16 * McLean VA 22101 17 * Phone: 1-703-847-0040 ext 103 18 */ 19 20 /* 21 * Sources: 22 * skeleton.c by Donald Becker. 23 * Inspirations: 24 * The Harried and Overworked Alan Cox 25 * Conspiracies: 26 * The Alan Cox and Mike McLagan plot to get someone else to do the code, 27 * which turned out to be me. 28 */ 29 30 /* 31 * $Log: eql.c,v $ 32 * Revision 1.2 1996/04/11 17:51:52 guru 33 * Added one-line eql_remove_slave patch. 34 * 35 * Revision 1.1 1996/04/11 17:44:17 guru 36 * Initial revision 37 * 38 * Revision 3.13 1996/01/21 15:17:18 alan 39 * tx_queue_len changes. 40 * reformatted. 41 * 42 * Revision 3.12 1995/03/22 21:07:51 anarchy 43 * Added capable() checks on configuration. 44 * Moved header file. 45 * 46 * Revision 3.11 1995/01/19 23:14:31 guru 47 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - 48 * (priority_Bps) + bytes_queued * 8; 49 * 50 * Revision 3.10 1995/01/19 23:07:53 guru 51 * back to 52 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - 53 * (priority_Bps) + bytes_queued; 54 * 55 * Revision 3.9 1995/01/19 22:38:20 guru 56 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - 57 * (priority_Bps) + bytes_queued * 4; 58 * 59 * Revision 3.8 1995/01/19 22:30:55 guru 60 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - 61 * (priority_Bps) + bytes_queued * 2; 62 * 63 * Revision 3.7 1995/01/19 21:52:35 guru 64 * printk's trimmed out. 65 * 66 * Revision 3.6 1995/01/19 21:49:56 guru 67 * This is working pretty well. I gained 1 K/s in speed.. now it's just 68 * robustness and printk's to be diked out. 69 * 70 * Revision 3.5 1995/01/18 22:29:59 guru 71 * still crashes the kernel when the lock_wait thing is woken up. 72 * 73 * Revision 3.4 1995/01/18 21:59:47 guru 74 * Broken set-bit locking snapshot 75 * 76 * Revision 3.3 1995/01/17 22:09:18 guru 77 * infinite sleep in a lock somewhere.. 78 * 79 * Revision 3.2 1995/01/15 16:46:06 guru 80 * Log trimmed of non-pertinent 1.x branch messages 81 * 82 * Revision 3.1 1995/01/15 14:41:45 guru 83 * New Scheduler and timer stuff... 84 * 85 * Revision 1.15 1995/01/15 14:29:02 guru 86 * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one 87 * with the dumber scheduler 88 * 89 * Revision 1.14 1995/01/15 02:37:08 guru 90 * shock.. the kept-new-versions could have zonked working 91 * stuff.. shudder 92 * 93 * Revision 1.13 1995/01/15 02:36:31 guru 94 * big changes 95 * 96 * scheduler was torn out and replaced with something smarter 97 * 98 * global names not prefixed with eql_ were renamed to protect 99 * against namespace collisions 100 * 101 * a few more abstract interfaces were added to facilitate any 102 * potential change of datastructure. the driver is still using 103 * a linked list of slaves. going to a heap would be a bit of 104 * an overkill. 105 * 106 * this compiles fine with no warnings. 107 * 108 * the locking mechanism and timer stuff must be written however, 109 * this version will not work otherwise 110 * 111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM 112 */ 113 114 #include <linux/module.h> 115 #include <linux/kernel.h> 116 #include <linux/init.h> 117 #include <linux/timer.h> 118 #include <linux/netdevice.h> 119 120 #include <linux/if.h> 121 #include <linux/if_arp.h> 122 #include <linux/if_eql.h> 123 124 #include <asm/uaccess.h> 125 126 static int eql_open(struct net_device *dev); 127 static int eql_close(struct net_device *dev); 128 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 129 static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); 130 static struct net_device_stats *eql_get_stats(struct net_device *dev); 131 132 #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) 133 #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) 134 135 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); 136 137 static void eql_timer(unsigned long param) 138 { 139 equalizer_t *eql = (equalizer_t *) param; 140 struct list_head *this, *tmp, *head; 141 142 spin_lock_bh(&eql->queue.lock); 143 head = &eql->queue.all_slaves; 144 list_for_each_safe(this, tmp, head) { 145 slave_t *slave = list_entry(this, slave_t, list); 146 147 if ((slave->dev->flags & IFF_UP) == IFF_UP) { 148 slave->bytes_queued -= slave->priority_Bps; 149 if (slave->bytes_queued < 0) 150 slave->bytes_queued = 0; 151 } else { 152 eql_kill_one_slave(&eql->queue, slave); 153 } 154 155 } 156 spin_unlock_bh(&eql->queue.lock); 157 158 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; 159 add_timer(&eql->timer); 160 } 161 162 static char version[] __initdata = 163 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n"; 164 165 static void __init eql_setup(struct net_device *dev) 166 { 167 equalizer_t *eql = netdev_priv(dev); 168 169 SET_MODULE_OWNER(dev); 170 171 init_timer(&eql->timer); 172 eql->timer.data = (unsigned long) eql; 173 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; 174 eql->timer.function = eql_timer; 175 176 spin_lock_init(&eql->queue.lock); 177 INIT_LIST_HEAD(&eql->queue.all_slaves); 178 eql->queue.master_dev = dev; 179 180 dev->open = eql_open; 181 dev->stop = eql_close; 182 dev->do_ioctl = eql_ioctl; 183 dev->hard_start_xmit = eql_slave_xmit; 184 dev->get_stats = eql_get_stats; 185 186 /* 187 * Now we undo some of the things that eth_setup does 188 * that we don't like 189 */ 190 191 dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */ 192 dev->flags = IFF_MASTER; 193 194 dev->type = ARPHRD_SLIP; 195 dev->tx_queue_len = 5; /* Hands them off fast */ 196 } 197 198 static int eql_open(struct net_device *dev) 199 { 200 equalizer_t *eql = netdev_priv(dev); 201 202 /* XXX We should force this off automatically for the user. */ 203 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " 204 "your slave devices.\n", dev->name); 205 206 BUG_ON(!list_empty(&eql->queue.all_slaves)); 207 208 eql->min_slaves = 1; 209 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ 210 211 add_timer(&eql->timer); 212 213 return 0; 214 } 215 216 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) 217 { 218 list_del(&slave->list); 219 queue->num_slaves--; 220 slave->dev->flags &= ~IFF_SLAVE; 221 dev_put(slave->dev); 222 kfree(slave); 223 } 224 225 static void eql_kill_slave_queue(slave_queue_t *queue) 226 { 227 struct list_head *head, *tmp, *this; 228 229 spin_lock_bh(&queue->lock); 230 231 head = &queue->all_slaves; 232 list_for_each_safe(this, tmp, head) { 233 slave_t *s = list_entry(this, slave_t, list); 234 235 eql_kill_one_slave(queue, s); 236 } 237 238 spin_unlock_bh(&queue->lock); 239 } 240 241 static int eql_close(struct net_device *dev) 242 { 243 equalizer_t *eql = netdev_priv(dev); 244 245 /* 246 * The timer has to be stopped first before we start hacking away 247 * at the data structure it scans every so often... 248 */ 249 250 del_timer_sync(&eql->timer); 251 252 eql_kill_slave_queue(&eql->queue); 253 254 return 0; 255 } 256 257 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq); 258 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq); 259 260 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc); 261 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc); 262 263 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc); 264 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc); 265 266 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 267 { 268 if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG && 269 !capable(CAP_NET_ADMIN)) 270 return -EPERM; 271 272 switch (cmd) { 273 case EQL_ENSLAVE: 274 return eql_enslave(dev, ifr->ifr_data); 275 case EQL_EMANCIPATE: 276 return eql_emancipate(dev, ifr->ifr_data); 277 case EQL_GETSLAVECFG: 278 return eql_g_slave_cfg(dev, ifr->ifr_data); 279 case EQL_SETSLAVECFG: 280 return eql_s_slave_cfg(dev, ifr->ifr_data); 281 case EQL_GETMASTRCFG: 282 return eql_g_master_cfg(dev, ifr->ifr_data); 283 case EQL_SETMASTRCFG: 284 return eql_s_master_cfg(dev, ifr->ifr_data); 285 default: 286 return -EOPNOTSUPP; 287 }; 288 } 289 290 /* queue->lock must be held */ 291 static slave_t *__eql_schedule_slaves(slave_queue_t *queue) 292 { 293 unsigned long best_load = ~0UL; 294 struct list_head *this, *tmp, *head; 295 slave_t *best_slave; 296 297 best_slave = NULL; 298 299 /* Make a pass to set the best slave. */ 300 head = &queue->all_slaves; 301 list_for_each_safe(this, tmp, head) { 302 slave_t *slave = list_entry(this, slave_t, list); 303 unsigned long slave_load, bytes_queued, priority_Bps; 304 305 /* Go through the slave list once, updating best_slave 306 * whenever a new best_load is found. 307 */ 308 bytes_queued = slave->bytes_queued; 309 priority_Bps = slave->priority_Bps; 310 if ((slave->dev->flags & IFF_UP) == IFF_UP) { 311 slave_load = (~0UL - (~0UL / 2)) - 312 (priority_Bps) + bytes_queued * 8; 313 314 if (slave_load < best_load) { 315 best_load = slave_load; 316 best_slave = slave; 317 } 318 } else { 319 /* We found a dead slave, kill it. */ 320 eql_kill_one_slave(queue, slave); 321 } 322 } 323 return best_slave; 324 } 325 326 static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) 327 { 328 equalizer_t *eql = netdev_priv(dev); 329 slave_t *slave; 330 331 spin_lock(&eql->queue.lock); 332 333 slave = __eql_schedule_slaves(&eql->queue); 334 if (slave) { 335 struct net_device *slave_dev = slave->dev; 336 337 skb->dev = slave_dev; 338 skb->priority = 1; 339 slave->bytes_queued += skb->len; 340 dev_queue_xmit(skb); 341 eql->stats.tx_packets++; 342 } else { 343 eql->stats.tx_dropped++; 344 dev_kfree_skb(skb); 345 } 346 347 spin_unlock(&eql->queue.lock); 348 349 return 0; 350 } 351 352 static struct net_device_stats * eql_get_stats(struct net_device *dev) 353 { 354 equalizer_t *eql = netdev_priv(dev); 355 return &eql->stats; 356 } 357 358 /* 359 * Private ioctl functions 360 */ 361 362 /* queue->lock must be held */ 363 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev) 364 { 365 struct list_head *this, *head; 366 367 head = &queue->all_slaves; 368 list_for_each(this, head) { 369 slave_t *slave = list_entry(this, slave_t, list); 370 371 if (slave->dev == dev) 372 return slave; 373 } 374 375 return NULL; 376 } 377 378 static inline int eql_is_full(slave_queue_t *queue) 379 { 380 equalizer_t *eql = netdev_priv(queue->master_dev); 381 382 if (queue->num_slaves >= eql->max_slaves) 383 return 1; 384 return 0; 385 } 386 387 /* queue->lock must be held */ 388 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) 389 { 390 if (!eql_is_full(queue)) { 391 slave_t *duplicate_slave = NULL; 392 393 duplicate_slave = __eql_find_slave_dev(queue, slave->dev); 394 if (duplicate_slave != 0) 395 eql_kill_one_slave(queue, duplicate_slave); 396 397 list_add(&slave->list, &queue->all_slaves); 398 queue->num_slaves++; 399 slave->dev->flags |= IFF_SLAVE; 400 401 return 0; 402 } 403 404 return -ENOSPC; 405 } 406 407 static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp) 408 { 409 struct net_device *slave_dev; 410 slaving_request_t srq; 411 412 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) 413 return -EFAULT; 414 415 slave_dev = dev_get_by_name(srq.slave_name); 416 if (slave_dev) { 417 if ((master_dev->flags & IFF_UP) == IFF_UP) { 418 /* slave is not a master & not already a slave: */ 419 if (!eql_is_master(slave_dev) && 420 !eql_is_slave(slave_dev)) { 421 slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL); 422 equalizer_t *eql = netdev_priv(master_dev); 423 int ret; 424 425 if (!s) { 426 dev_put(slave_dev); 427 return -ENOMEM; 428 } 429 430 memset(s, 0, sizeof(*s)); 431 s->dev = slave_dev; 432 s->priority = srq.priority; 433 s->priority_bps = srq.priority; 434 s->priority_Bps = srq.priority / 8; 435 436 spin_lock_bh(&eql->queue.lock); 437 ret = __eql_insert_slave(&eql->queue, s); 438 if (ret) { 439 dev_put(slave_dev); 440 kfree(s); 441 } 442 spin_unlock_bh(&eql->queue.lock); 443 444 return ret; 445 } 446 } 447 dev_put(slave_dev); 448 } 449 450 return -EINVAL; 451 } 452 453 static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp) 454 { 455 equalizer_t *eql = netdev_priv(master_dev); 456 struct net_device *slave_dev; 457 slaving_request_t srq; 458 int ret; 459 460 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) 461 return -EFAULT; 462 463 slave_dev = dev_get_by_name(srq.slave_name); 464 ret = -EINVAL; 465 if (slave_dev) { 466 spin_lock_bh(&eql->queue.lock); 467 468 if (eql_is_slave(slave_dev)) { 469 slave_t *slave = __eql_find_slave_dev(&eql->queue, 470 slave_dev); 471 472 if (slave) { 473 eql_kill_one_slave(&eql->queue, slave); 474 ret = 0; 475 } 476 } 477 dev_put(slave_dev); 478 479 spin_unlock_bh(&eql->queue.lock); 480 } 481 482 return ret; 483 } 484 485 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp) 486 { 487 equalizer_t *eql = netdev_priv(dev); 488 slave_t *slave; 489 struct net_device *slave_dev; 490 slave_config_t sc; 491 int ret; 492 493 if (copy_from_user(&sc, scp, sizeof (slave_config_t))) 494 return -EFAULT; 495 496 slave_dev = dev_get_by_name(sc.slave_name); 497 if (!slave_dev) 498 return -ENODEV; 499 500 ret = -EINVAL; 501 502 spin_lock_bh(&eql->queue.lock); 503 if (eql_is_slave(slave_dev)) { 504 slave = __eql_find_slave_dev(&eql->queue, slave_dev); 505 if (slave) { 506 sc.priority = slave->priority; 507 ret = 0; 508 } 509 } 510 spin_unlock_bh(&eql->queue.lock); 511 512 dev_put(slave_dev); 513 514 if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t))) 515 ret = -EFAULT; 516 517 return ret; 518 } 519 520 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) 521 { 522 slave_t *slave; 523 equalizer_t *eql; 524 struct net_device *slave_dev; 525 slave_config_t sc; 526 int ret; 527 528 if (copy_from_user(&sc, scp, sizeof (slave_config_t))) 529 return -EFAULT; 530 531 slave_dev = dev_get_by_name(sc.slave_name); 532 if (!slave_dev) 533 return -ENODEV; 534 535 ret = -EINVAL; 536 537 eql = netdev_priv(dev); 538 spin_lock_bh(&eql->queue.lock); 539 if (eql_is_slave(slave_dev)) { 540 slave = __eql_find_slave_dev(&eql->queue, slave_dev); 541 if (slave) { 542 slave->priority = sc.priority; 543 slave->priority_bps = sc.priority; 544 slave->priority_Bps = sc.priority / 8; 545 ret = 0; 546 } 547 } 548 spin_unlock_bh(&eql->queue.lock); 549 550 return ret; 551 } 552 553 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) 554 { 555 equalizer_t *eql; 556 master_config_t mc; 557 558 if (eql_is_master(dev)) { 559 eql = netdev_priv(dev); 560 mc.max_slaves = eql->max_slaves; 561 mc.min_slaves = eql->min_slaves; 562 if (copy_to_user(mcp, &mc, sizeof (master_config_t))) 563 return -EFAULT; 564 return 0; 565 } 566 return -EINVAL; 567 } 568 569 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp) 570 { 571 equalizer_t *eql; 572 master_config_t mc; 573 574 if (copy_from_user(&mc, mcp, sizeof (master_config_t))) 575 return -EFAULT; 576 577 if (eql_is_master(dev)) { 578 eql = netdev_priv(dev); 579 eql->max_slaves = mc.max_slaves; 580 eql->min_slaves = mc.min_slaves; 581 return 0; 582 } 583 return -EINVAL; 584 } 585 586 static struct net_device *dev_eql; 587 588 static int __init eql_init_module(void) 589 { 590 int err; 591 592 printk(version); 593 594 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup); 595 if (!dev_eql) 596 return -ENOMEM; 597 598 err = register_netdev(dev_eql); 599 if (err) 600 free_netdev(dev_eql); 601 return err; 602 } 603 604 static void __exit eql_cleanup_module(void) 605 { 606 unregister_netdev(dev_eql); 607 free_netdev(dev_eql); 608 } 609 610 module_init(eql_init_module); 611 module_exit(eql_cleanup_module); 612 MODULE_LICENSE("GPL"); 613