Lines Matching +full:slave +full:- +full:dev

2  * Equalizer Load-balancer for serial network interfaces.
4 * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
17 * Phone: 1-703-847-0040 ext 103
33 * Added one-line eql_remove_slave patch.
47 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
52 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
56 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
60 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
74 * Broken set-bit locking snapshot
80 * Log trimmed of non-pertinent 1.x branch messages
90 * shock.. the kept-new-versions could have zonked working
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
133 static int eql_open(struct net_device *dev);
134 static int eql_close(struct net_device *dev);
135 static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
137 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
139 #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) argument
140 #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) argument
142 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
149 spin_lock(&eql->queue.lock); in eql_timer()
150 head = &eql->queue.all_slaves; in eql_timer()
152 slave_t *slave = list_entry(this, slave_t, list); in eql_timer() local
154 if ((slave->dev->flags & IFF_UP) == IFF_UP) { in eql_timer()
155 slave->bytes_queued -= slave->priority_Bps; in eql_timer()
156 if (slave->bytes_queued < 0) in eql_timer()
157 slave->bytes_queued = 0; in eql_timer()
159 eql_kill_one_slave(&eql->queue, slave); in eql_timer()
163 spin_unlock(&eql->queue.lock); in eql_timer()
165 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; in eql_timer()
166 add_timer(&eql->timer); in eql_timer()
179 static void __init eql_setup(struct net_device *dev) in eql_setup() argument
181 equalizer_t *eql = netdev_priv(dev); in eql_setup()
183 timer_setup(&eql->timer, eql_timer, 0); in eql_setup()
184 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; in eql_setup()
186 spin_lock_init(&eql->queue.lock); in eql_setup()
187 INIT_LIST_HEAD(&eql->queue.all_slaves); in eql_setup()
188 eql->queue.master_dev = dev; in eql_setup()
190 dev->netdev_ops = &eql_netdev_ops; in eql_setup()
197 dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */ in eql_setup()
198 dev->flags = IFF_MASTER; in eql_setup()
200 dev->type = ARPHRD_SLIP; in eql_setup()
201 dev->tx_queue_len = 5; /* Hands them off fast */ in eql_setup()
202 netif_keep_dst(dev); in eql_setup()
205 static int eql_open(struct net_device *dev) in eql_open() argument
207 equalizer_t *eql = netdev_priv(dev); in eql_open()
210 netdev_info(dev, in eql_open()
211 "remember to turn off Van-Jacobson compression on your slave devices\n"); in eql_open()
213 BUG_ON(!list_empty(&eql->queue.all_slaves)); in eql_open()
215 eql->min_slaves = 1; in eql_open()
216 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ in eql_open()
218 add_timer(&eql->timer); in eql_open()
223 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) in eql_kill_one_slave() argument
225 list_del(&slave->list); in eql_kill_one_slave()
226 queue->num_slaves--; in eql_kill_one_slave()
227 slave->dev->flags &= ~IFF_SLAVE; in eql_kill_one_slave()
228 netdev_put(slave->dev, &slave->dev_tracker); in eql_kill_one_slave()
229 kfree(slave); in eql_kill_one_slave()
236 spin_lock_bh(&queue->lock); in eql_kill_slave_queue()
238 head = &queue->all_slaves; in eql_kill_slave_queue()
245 spin_unlock_bh(&queue->lock); in eql_kill_slave_queue()
248 static int eql_close(struct net_device *dev) in eql_close() argument
250 equalizer_t *eql = netdev_priv(dev); in eql_close()
257 del_timer_sync(&eql->timer); in eql_close()
259 eql_kill_slave_queue(&eql->queue); in eql_close()
264 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
265 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
267 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
268 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
270 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
271 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
273 static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr, in eql_siocdevprivate() argument
278 return -EPERM; in eql_siocdevprivate()
281 return -EOPNOTSUPP; in eql_siocdevprivate()
285 return eql_enslave(dev, data); in eql_siocdevprivate()
287 return eql_emancipate(dev, data); in eql_siocdevprivate()
289 return eql_g_slave_cfg(dev, data); in eql_siocdevprivate()
291 return eql_s_slave_cfg(dev, data); in eql_siocdevprivate()
293 return eql_g_master_cfg(dev, data); in eql_siocdevprivate()
295 return eql_s_master_cfg(dev, data); in eql_siocdevprivate()
297 return -EOPNOTSUPP; in eql_siocdevprivate()
301 /* queue->lock must be held */
310 /* Make a pass to set the best slave. */ in __eql_schedule_slaves()
311 head = &queue->all_slaves; in __eql_schedule_slaves()
313 slave_t *slave = list_entry(this, slave_t, list); in __eql_schedule_slaves() local
316 /* Go through the slave list once, updating best_slave in __eql_schedule_slaves()
319 bytes_queued = slave->bytes_queued; in __eql_schedule_slaves()
320 priority_Bps = slave->priority_Bps; in __eql_schedule_slaves()
321 if ((slave->dev->flags & IFF_UP) == IFF_UP) { in __eql_schedule_slaves()
322 slave_load = (~0UL - (~0UL / 2)) - in __eql_schedule_slaves()
327 best_slave = slave; in __eql_schedule_slaves()
330 /* We found a dead slave, kill it. */ in __eql_schedule_slaves()
331 eql_kill_one_slave(queue, slave); in __eql_schedule_slaves()
337 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) in eql_slave_xmit() argument
339 equalizer_t *eql = netdev_priv(dev); in eql_slave_xmit()
340 slave_t *slave; in eql_slave_xmit() local
342 spin_lock(&eql->queue.lock); in eql_slave_xmit()
344 slave = __eql_schedule_slaves(&eql->queue); in eql_slave_xmit()
345 if (slave) { in eql_slave_xmit()
346 struct net_device *slave_dev = slave->dev; in eql_slave_xmit()
348 skb->dev = slave_dev; in eql_slave_xmit()
349 skb->priority = TC_PRIO_FILLER; in eql_slave_xmit()
350 slave->bytes_queued += skb->len; in eql_slave_xmit()
352 dev->stats.tx_packets++; in eql_slave_xmit()
354 dev->stats.tx_dropped++; in eql_slave_xmit()
358 spin_unlock(&eql->queue.lock); in eql_slave_xmit()
367 /* queue->lock must be held */
368 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev) in __eql_find_slave_dev() argument
372 head = &queue->all_slaves; in __eql_find_slave_dev()
374 slave_t *slave = list_entry(this, slave_t, list); in __eql_find_slave_dev() local
376 if (slave->dev == dev) in __eql_find_slave_dev()
377 return slave; in __eql_find_slave_dev()
385 equalizer_t *eql = netdev_priv(queue->master_dev); in eql_is_full()
387 if (queue->num_slaves >= eql->max_slaves) in eql_is_full()
392 /* queue->lock must be held */
393 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) in __eql_insert_slave() argument
398 duplicate_slave = __eql_find_slave_dev(queue, slave->dev); in __eql_insert_slave()
402 netdev_hold(slave->dev, &slave->dev_tracker, GFP_ATOMIC); in __eql_insert_slave()
403 list_add(&slave->list, &queue->all_slaves); in __eql_insert_slave()
404 queue->num_slaves++; in __eql_insert_slave()
405 slave->dev->flags |= IFF_SLAVE; in __eql_insert_slave()
410 return -ENOSPC; in __eql_insert_slave()
419 return -EFAULT; in eql_enslave()
423 return -ENODEV; in eql_enslave()
425 if ((master_dev->flags & IFF_UP) == IFF_UP) { in eql_enslave()
426 /* slave is not a master & not already a slave: */ in eql_enslave()
433 return -ENOMEM; in eql_enslave()
435 s->dev = slave_dev; in eql_enslave()
436 s->priority = srq.priority; in eql_enslave()
437 s->priority_bps = srq.priority; in eql_enslave()
438 s->priority_Bps = srq.priority / 8; in eql_enslave()
440 spin_lock_bh(&eql->queue.lock); in eql_enslave()
441 ret = __eql_insert_slave(&eql->queue, s); in eql_enslave()
445 spin_unlock_bh(&eql->queue.lock); in eql_enslave()
451 return -EINVAL; in eql_enslave()
462 return -EFAULT; in eql_emancipate()
466 return -ENODEV; in eql_emancipate()
468 ret = -EINVAL; in eql_emancipate()
469 spin_lock_bh(&eql->queue.lock); in eql_emancipate()
471 slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev); in eql_emancipate() local
472 if (slave) { in eql_emancipate()
473 eql_kill_one_slave(&eql->queue, slave); in eql_emancipate()
477 spin_unlock_bh(&eql->queue.lock); in eql_emancipate()
482 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp) in eql_g_slave_cfg() argument
484 equalizer_t *eql = netdev_priv(dev); in eql_g_slave_cfg()
485 slave_t *slave; in eql_g_slave_cfg() local
491 return -EFAULT; in eql_g_slave_cfg()
495 return -ENODEV; in eql_g_slave_cfg()
497 ret = -EINVAL; in eql_g_slave_cfg()
499 spin_lock_bh(&eql->queue.lock); in eql_g_slave_cfg()
501 slave = __eql_find_slave_dev(&eql->queue, slave_dev); in eql_g_slave_cfg()
502 if (slave) { in eql_g_slave_cfg()
503 sc.priority = slave->priority; in eql_g_slave_cfg()
507 spin_unlock_bh(&eql->queue.lock); in eql_g_slave_cfg()
510 ret = -EFAULT; in eql_g_slave_cfg()
515 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) in eql_s_slave_cfg() argument
517 slave_t *slave; in eql_s_slave_cfg() local
524 return -EFAULT; in eql_s_slave_cfg()
528 return -ENODEV; in eql_s_slave_cfg()
530 ret = -EINVAL; in eql_s_slave_cfg()
532 eql = netdev_priv(dev); in eql_s_slave_cfg()
533 spin_lock_bh(&eql->queue.lock); in eql_s_slave_cfg()
535 slave = __eql_find_slave_dev(&eql->queue, slave_dev); in eql_s_slave_cfg()
536 if (slave) { in eql_s_slave_cfg()
537 slave->priority = sc.priority; in eql_s_slave_cfg()
538 slave->priority_bps = sc.priority; in eql_s_slave_cfg()
539 slave->priority_Bps = sc.priority / 8; in eql_s_slave_cfg()
543 spin_unlock_bh(&eql->queue.lock); in eql_s_slave_cfg()
548 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) in eql_g_master_cfg() argument
555 if (eql_is_master(dev)) { in eql_g_master_cfg()
556 eql = netdev_priv(dev); in eql_g_master_cfg()
557 mc.max_slaves = eql->max_slaves; in eql_g_master_cfg()
558 mc.min_slaves = eql->min_slaves; in eql_g_master_cfg()
560 return -EFAULT; in eql_g_master_cfg()
563 return -EINVAL; in eql_g_master_cfg()
566 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp) in eql_s_master_cfg() argument
572 return -EFAULT; in eql_s_master_cfg()
574 if (eql_is_master(dev)) { in eql_s_master_cfg()
575 eql = netdev_priv(dev); in eql_s_master_cfg()
576 eql->max_slaves = mc.max_slaves; in eql_s_master_cfg()
577 eql->min_slaves = mc.min_slaves; in eql_s_master_cfg()
580 return -EINVAL; in eql_s_master_cfg()
594 return -ENOMEM; in eql_init_module()