2
* Equalizer Load-balancer for serial network interfaces.
4
* (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
5
* NCM: Network and Communications Management, Inc.
7
* (c) Copyright 2002 David S. Miller (davem@redhat.com)
9
* This software may be used and distributed according to the terms
10
* of the GNU General Public License, incorporated herein by reference.
12
* The author may be reached as simon@ncm.com, or C/O
17
* Phone: 1-703-847-0040 ext 103
22
* skeleton.c by Donald Becker.
24
* The Harried and Overworked Alan Cox
26
* The Alan Cox and Mike McLagan plot to get someone else to do the code,
27
* which turned out to be me.
32
* Revision 1.2 1996/04/11 17:51:52 guru
33
* Added one-line eql_remove_slave patch.
35
* Revision 1.1 1996/04/11 17:44:17 guru
38
* Revision 3.13 1996/01/21 15:17:18 alan
39
* tx_queue_len changes.
42
* Revision 3.12 1995/03/22 21:07:51 anarchy
43
* Added capable() checks on configuration.
46
* Revision 3.11 1995/01/19 23:14:31 guru
47
* slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
48
* (priority_Bps) + bytes_queued * 8;
50
* Revision 3.10 1995/01/19 23:07:53 guru
52
* slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
53
* (priority_Bps) + bytes_queued;
55
* Revision 3.9 1995/01/19 22:38:20 guru
56
* slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
57
* (priority_Bps) + bytes_queued * 4;
59
* Revision 3.8 1995/01/19 22:30:55 guru
60
* slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
61
* (priority_Bps) + bytes_queued * 2;
63
* Revision 3.7 1995/01/19 21:52:35 guru
64
* printk's trimmed out.
66
* Revision 3.6 1995/01/19 21:49:56 guru
67
* This is working pretty well. I gained 1 K/s in speed.. now it's just
68
* robustness and printk's to be diked out.
70
* Revision 3.5 1995/01/18 22:29:59 guru
71
* still crashes the kernel when the lock_wait thing is woken up.
73
* Revision 3.4 1995/01/18 21:59:47 guru
74
* Broken set-bit locking snapshot
76
* Revision 3.3 1995/01/17 22:09:18 guru
77
* infinite sleep in a lock somewhere..
79
* Revision 3.2 1995/01/15 16:46:06 guru
80
* Log trimmed of non-pertinent 1.x branch messages
82
* Revision 3.1 1995/01/15 14:41:45 guru
83
* New Scheduler and timer stuff...
85
* Revision 1.15 1995/01/15 14:29:02 guru
86
* Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
87
* with the dumber scheduler
89
* Revision 1.14 1995/01/15 02:37:08 guru
90
* shock.. the kept-new-versions could have zonked working
93
* Revision 1.13 1995/01/15 02:36:31 guru
96
* scheduler was torn out and replaced with something smarter
98
* global names not prefixed with eql_ were renamed to protect
99
* against namespace collisions
101
* a few more abstract interfaces were added to facilitate any
102
* potential change of datastructure. the driver is still using
103
* a linked list of slaves. going to a heap would be a bit of
106
* this compiles fine with no warnings.
108
* the locking mechanism and timer stuff must be written however,
109
* this version will not work otherwise
111
* Sorry, I had to rewrite most of this for 2.5.x -DaveM
114
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
116
#include <linux/capability.h>
117
#include <linux/module.h>
118
#include <linux/kernel.h>
119
#include <linux/init.h>
120
#include <linux/slab.h>
121
#include <linux/timer.h>
122
#include <linux/netdevice.h>
123
#include <net/net_namespace.h>
125
#include <linux/if.h>
126
#include <linux/if_arp.h>
127
#include <linux/if_eql.h>
129
#include <asm/uaccess.h>
131
static int eql_open(struct net_device *dev);
132
static int eql_close(struct net_device *dev);
133
static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
134
static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
136
#define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
137
#define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
139
static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
141
static void eql_timer(unsigned long param)
143
equalizer_t *eql = (equalizer_t *) param;
144
struct list_head *this, *tmp, *head;
146
spin_lock_bh(&eql->queue.lock);
147
head = &eql->queue.all_slaves;
148
list_for_each_safe(this, tmp, head) {
149
slave_t *slave = list_entry(this, slave_t, list);
151
if ((slave->dev->flags & IFF_UP) == IFF_UP) {
152
slave->bytes_queued -= slave->priority_Bps;
153
if (slave->bytes_queued < 0)
154
slave->bytes_queued = 0;
156
eql_kill_one_slave(&eql->queue, slave);
160
spin_unlock_bh(&eql->queue.lock);
162
eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
163
add_timer(&eql->timer);
166
static const char version[] __initconst =
167
"Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
169
static const struct net_device_ops eql_netdev_ops = {
170
.ndo_open = eql_open,
171
.ndo_stop = eql_close,
172
.ndo_do_ioctl = eql_ioctl,
173
.ndo_start_xmit = eql_slave_xmit,
176
static void __init eql_setup(struct net_device *dev)
178
equalizer_t *eql = netdev_priv(dev);
180
init_timer(&eql->timer);
181
eql->timer.data = (unsigned long) eql;
182
eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
183
eql->timer.function = eql_timer;
185
spin_lock_init(&eql->queue.lock);
186
INIT_LIST_HEAD(&eql->queue.all_slaves);
187
eql->queue.master_dev = dev;
189
dev->netdev_ops = &eql_netdev_ops;
192
* Now we undo some of the things that eth_setup does
196
dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */
197
dev->flags = IFF_MASTER;
199
dev->type = ARPHRD_SLIP;
200
dev->tx_queue_len = 5; /* Hands them off fast */
201
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
204
static int eql_open(struct net_device *dev)
206
equalizer_t *eql = netdev_priv(dev);
208
/* XXX We should force this off automatically for the user. */
210
"remember to turn off Van-Jacobson compression on your slave devices\n");
212
BUG_ON(!list_empty(&eql->queue.all_slaves));
215
eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
217
add_timer(&eql->timer);
222
static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
224
list_del(&slave->list);
226
slave->dev->flags &= ~IFF_SLAVE;
231
static void eql_kill_slave_queue(slave_queue_t *queue)
233
struct list_head *head, *tmp, *this;
235
spin_lock_bh(&queue->lock);
237
head = &queue->all_slaves;
238
list_for_each_safe(this, tmp, head) {
239
slave_t *s = list_entry(this, slave_t, list);
241
eql_kill_one_slave(queue, s);
244
spin_unlock_bh(&queue->lock);
247
static int eql_close(struct net_device *dev)
249
equalizer_t *eql = netdev_priv(dev);
252
* The timer has to be stopped first before we start hacking away
253
* at the data structure it scans every so often...
256
del_timer_sync(&eql->timer);
258
eql_kill_slave_queue(&eql->queue);
263
static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
264
static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
266
static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
267
static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
269
static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
270
static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
272
static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
274
if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
275
!capable(CAP_NET_ADMIN))
280
return eql_enslave(dev, ifr->ifr_data);
282
return eql_emancipate(dev, ifr->ifr_data);
283
case EQL_GETSLAVECFG:
284
return eql_g_slave_cfg(dev, ifr->ifr_data);
285
case EQL_SETSLAVECFG:
286
return eql_s_slave_cfg(dev, ifr->ifr_data);
287
case EQL_GETMASTRCFG:
288
return eql_g_master_cfg(dev, ifr->ifr_data);
289
case EQL_SETMASTRCFG:
290
return eql_s_master_cfg(dev, ifr->ifr_data);
296
/* queue->lock must be held */
297
static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
299
unsigned long best_load = ~0UL;
300
struct list_head *this, *tmp, *head;
305
/* Make a pass to set the best slave. */
306
head = &queue->all_slaves;
307
list_for_each_safe(this, tmp, head) {
308
slave_t *slave = list_entry(this, slave_t, list);
309
unsigned long slave_load, bytes_queued, priority_Bps;
311
/* Go through the slave list once, updating best_slave
312
* whenever a new best_load is found.
314
bytes_queued = slave->bytes_queued;
315
priority_Bps = slave->priority_Bps;
316
if ((slave->dev->flags & IFF_UP) == IFF_UP) {
317
slave_load = (~0UL - (~0UL / 2)) -
318
(priority_Bps) + bytes_queued * 8;
320
if (slave_load < best_load) {
321
best_load = slave_load;
325
/* We found a dead slave, kill it. */
326
eql_kill_one_slave(queue, slave);
332
static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
334
equalizer_t *eql = netdev_priv(dev);
337
spin_lock(&eql->queue.lock);
339
slave = __eql_schedule_slaves(&eql->queue);
341
struct net_device *slave_dev = slave->dev;
343
skb->dev = slave_dev;
345
slave->bytes_queued += skb->len;
347
dev->stats.tx_packets++;
349
dev->stats.tx_dropped++;
353
spin_unlock(&eql->queue.lock);
359
* Private ioctl functions
362
/* queue->lock must be held */
363
static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
365
struct list_head *this, *head;
367
head = &queue->all_slaves;
368
list_for_each(this, head) {
369
slave_t *slave = list_entry(this, slave_t, list);
371
if (slave->dev == dev)
378
static inline int eql_is_full(slave_queue_t *queue)
380
equalizer_t *eql = netdev_priv(queue->master_dev);
382
if (queue->num_slaves >= eql->max_slaves)
387
/* queue->lock must be held */
388
static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
390
if (!eql_is_full(queue)) {
391
slave_t *duplicate_slave = NULL;
393
duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
395
eql_kill_one_slave(queue, duplicate_slave);
397
list_add(&slave->list, &queue->all_slaves);
399
slave->dev->flags |= IFF_SLAVE;
407
static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp)
409
struct net_device *slave_dev;
410
slaving_request_t srq;
412
if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
415
slave_dev = dev_get_by_name(&init_net, srq.slave_name);
417
if ((master_dev->flags & IFF_UP) == IFF_UP) {
418
/* slave is not a master & not already a slave: */
419
if (!eql_is_master(slave_dev) &&
420
!eql_is_slave(slave_dev)) {
421
slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
422
equalizer_t *eql = netdev_priv(master_dev);
430
memset(s, 0, sizeof(*s));
432
s->priority = srq.priority;
433
s->priority_bps = srq.priority;
434
s->priority_Bps = srq.priority / 8;
436
spin_lock_bh(&eql->queue.lock);
437
ret = __eql_insert_slave(&eql->queue, s);
442
spin_unlock_bh(&eql->queue.lock);
453
static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp)
455
equalizer_t *eql = netdev_priv(master_dev);
456
struct net_device *slave_dev;
457
slaving_request_t srq;
460
if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
463
slave_dev = dev_get_by_name(&init_net, srq.slave_name);
466
spin_lock_bh(&eql->queue.lock);
468
if (eql_is_slave(slave_dev)) {
469
slave_t *slave = __eql_find_slave_dev(&eql->queue,
473
eql_kill_one_slave(&eql->queue, slave);
479
spin_unlock_bh(&eql->queue.lock);
485
static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
487
equalizer_t *eql = netdev_priv(dev);
489
struct net_device *slave_dev;
493
if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
496
slave_dev = dev_get_by_name(&init_net, sc.slave_name);
502
spin_lock_bh(&eql->queue.lock);
503
if (eql_is_slave(slave_dev)) {
504
slave = __eql_find_slave_dev(&eql->queue, slave_dev);
506
sc.priority = slave->priority;
510
spin_unlock_bh(&eql->queue.lock);
514
if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
520
static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
524
struct net_device *slave_dev;
528
if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
531
slave_dev = dev_get_by_name(&init_net, sc.slave_name);
537
eql = netdev_priv(dev);
538
spin_lock_bh(&eql->queue.lock);
539
if (eql_is_slave(slave_dev)) {
540
slave = __eql_find_slave_dev(&eql->queue, slave_dev);
542
slave->priority = sc.priority;
543
slave->priority_bps = sc.priority;
544
slave->priority_Bps = sc.priority / 8;
548
spin_unlock_bh(&eql->queue.lock);
555
static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
560
memset(&mc, 0, sizeof(master_config_t));
562
if (eql_is_master(dev)) {
563
eql = netdev_priv(dev);
564
mc.max_slaves = eql->max_slaves;
565
mc.min_slaves = eql->min_slaves;
566
if (copy_to_user(mcp, &mc, sizeof (master_config_t)))
573
static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp)
578
if (copy_from_user(&mc, mcp, sizeof (master_config_t)))
581
if (eql_is_master(dev)) {
582
eql = netdev_priv(dev);
583
eql->max_slaves = mc.max_slaves;
584
eql->min_slaves = mc.min_slaves;
590
static struct net_device *dev_eql;
592
static int __init eql_init_module(void)
596
pr_info("%s\n", version);
598
dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
602
err = register_netdev(dev_eql);
604
free_netdev(dev_eql);
608
static void __exit eql_cleanup_module(void)
610
unregister_netdev(dev_eql);
611
free_netdev(dev_eql);
614
module_init(eql_init_module);
615
module_exit(eql_cleanup_module);
616
MODULE_LICENSE("GPL");