xref: /openbmc/linux/drivers/net/ethernet/amd/lance.c (revision 91b0383f)
1 /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2 /*
3 	Written/copyright 1993-1998 by Donald Becker.
4 
5 	Copyright 1993 United States Government as represented by the
6 	Director, National Security Agency.
7 	This software may be used and distributed according to the terms
8 	of the GNU General Public License, incorporated herein by reference.
9 
10 	This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 	with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
12 
13 	The author may be reached as becker@scyld.com, or C/O
14 	Scyld Computing Corporation
15 	410 Severn Ave., Suite 210
16 	Annapolis MD 21403
17 
18 	Andrey V. Savochkin:
19 	- alignment problem with 1.3.* kernel and some minor changes.
20 	Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
21 	- added support for Linux/Alpha, but removed most of it, because
22         it worked only for the PCI chip.
23       - added hook for the 32bit lance driver
24       - added PCnetPCI II (79C970A) to chip table
25 	Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
26 	- hopefully fix above so Linux/Alpha can use ISA cards too.
27     8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28     v1.12 10/27/97 Module support -djb
29     v1.14  2/3/98 Module support modified, made PCI support optional -djb
30     v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
31                   before unregister_netdev() which caused NULL pointer
32                   reference later in the chain (in rtnetlink_fill_ifinfo())
33                   -- Mika Kuoppala <miku@iki.fi>
34 
35     Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
36     the 2.1 version of the old driver - Alan Cox
37 
38     Get rid of check_region, check kmalloc return in lance_probe1
39     Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
40 
41 	Reworked detection, added support for Racal InterLan EtherBlaster cards
42 	Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
43 */
44 
45 static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46 
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/string.h>
50 #include <linux/delay.h>
51 #include <linux/errno.h>
52 #include <linux/ioport.h>
53 #include <linux/slab.h>
54 #include <linux/interrupt.h>
55 #include <linux/pci.h>
56 #include <linux/init.h>
57 #include <linux/netdevice.h>
58 #include <linux/etherdevice.h>
59 #include <linux/skbuff.h>
60 #include <linux/mm.h>
61 #include <linux/bitops.h>
62 
63 #include <asm/io.h>
64 #include <asm/dma.h>
65 
66 static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
67 static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
68 static int __init do_lance_probe(struct net_device *dev);
69 
70 
71 static struct card {
72 	char id_offset14;
73 	char id_offset15;
74 } cards[] = {
75 	{	//"normal"
76 		.id_offset14 = 0x57,
77 		.id_offset15 = 0x57,
78 	},
79 	{	//NI6510EB
80 		.id_offset14 = 0x52,
81 		.id_offset15 = 0x44,
82 	},
83 	{	//Racal InterLan EtherBlaster
84 		.id_offset14 = 0x52,
85 		.id_offset15 = 0x49,
86 	},
87 };
88 #define NUM_CARDS 3
89 
90 #ifdef LANCE_DEBUG
91 static int lance_debug = LANCE_DEBUG;
92 #else
93 static int lance_debug = 1;
94 #endif
95 
96 /*
97 				Theory of Operation
98 
99 I. Board Compatibility
100 
101 This device driver is designed for the AMD 79C960, the "PCnet-ISA
102 single-chip ethernet controller for ISA".  This chip is used in a wide
103 variety of boards from vendors such as Allied Telesis, HP, Kingston,
104 and Boca.  This driver is also intended to work with older AMD 7990
105 designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
106 I use the name LANCE to refer to all of the AMD chips, even though it properly
107 refers only to the original 7990.
108 
109 II. Board-specific settings
110 
111 The driver is designed to work the boards that use the faster
112 bus-master mode, rather than in shared memory mode.	 (Only older designs
113 have on-board buffer memory needed to support the slower shared memory mode.)
114 
115 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
116 channel.  This driver probes the likely base addresses:
117 {0x300, 0x320, 0x340, 0x360}.
118 After the board is found it generates a DMA-timeout interrupt and uses
119 autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
120 of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
121 probed for by enabling each free DMA channel in turn and checking if
122 initialization succeeds.
123 
124 The HP-J2405A board is an exception: with this board it is easy to read the
125 EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
126 _know_ the base address -- that field is for writing the EEPROM.)
127 
128 III. Driver operation
129 
130 IIIa. Ring buffers
131 The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
132 the base and length of the data buffer, along with status bits.	 The length
133 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
134 the buffer length (rather than being directly the buffer length) for
135 implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
136 ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
137 needlessly uses extra space and reduces the chance that an upper layer will
138 be able to reorder queued Tx packets based on priority.	 Decreasing the number
139 of entries makes it more difficult to achieve back-to-back packet transmission
140 and increases the chance that Rx ring will overflow.  (Consider the worst case
141 of receiving back-to-back minimum-sized packets.)
142 
143 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
144 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
145 avoid the administrative overhead. For the Rx side this avoids dynamically
146 allocating full-sized buffers "just in case", at the expense of a
147 memory-to-memory data copy for each packet received.  For most systems this
148 is a good tradeoff: the Rx buffer will always be in low memory, the copy
149 is inexpensive, and it primes the cache for later packet processing.  For Tx
150 the buffers are only used when needed as low-memory bounce buffers.
151 
152 IIIB. 16M memory limitations.
153 For the ISA bus master mode all structures used directly by the LANCE,
154 the initialization block, Rx and Tx rings, and data buffers, must be
155 accessible from the ISA bus, i.e. in the lower 16M of real memory.
156 This is a problem for current Linux kernels on >16M machines. The network
157 devices are initialized after memory initialization, and the kernel doles out
158 memory from the top of memory downward.	 The current solution is to have a
159 special network initialization routine that's called before memory
160 initialization; this will eventually be generalized for all network devices.
161 As mentioned before, low-memory "bounce-buffers" are used when needed.
162 
163 IIIC. Synchronization
164 The driver runs as two independent, single-threaded flows of control.  One
165 is the send-packet routine, which enforces single-threaded use by the
166 dev->tbusy flag.  The other thread is the interrupt handler, which is single
167 threaded by the hardware and other software.
168 
169 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
170 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
171 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
172 the 'lp->tx_full' flag.
173 
174 The interrupt handler has exclusive control over the Rx ring and records stats
175 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
176 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
177 stats.)	 After reaping the stats, it marks the queue entry as empty by setting
178 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
179 tx_full and tbusy flags.
180 
181 */
182 
183 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
184    Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
185    That translates to 4 and 4 (16 == 2^^4).
186    This is a compile-time option for efficiency.
187    */
188 #ifndef LANCE_LOG_TX_BUFFERS
189 #define LANCE_LOG_TX_BUFFERS 4
190 #define LANCE_LOG_RX_BUFFERS 4
191 #endif
192 
193 #define TX_RING_SIZE			(1 << (LANCE_LOG_TX_BUFFERS))
194 #define TX_RING_MOD_MASK		(TX_RING_SIZE - 1)
195 #define TX_RING_LEN_BITS		((LANCE_LOG_TX_BUFFERS) << 29)
196 
197 #define RX_RING_SIZE			(1 << (LANCE_LOG_RX_BUFFERS))
198 #define RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
199 #define RX_RING_LEN_BITS		((LANCE_LOG_RX_BUFFERS) << 29)
200 
201 #define PKT_BUF_SZ		1544
202 
203 /* Offsets from base I/O address. */
204 #define LANCE_DATA 0x10
205 #define LANCE_ADDR 0x12
206 #define LANCE_RESET 0x14
207 #define LANCE_BUS_IF 0x16
208 #define LANCE_TOTAL_SIZE 0x18
209 
210 #define TX_TIMEOUT	(HZ/5)
211 
212 /* The LANCE Rx and Tx ring descriptors. */
213 struct lance_rx_head {
214 	s32 base;
215 	s16 buf_length;			/* This length is 2s complement (negative)! */
216 	s16 msg_length;			/* This length is "normal". */
217 };
218 
219 struct lance_tx_head {
220 	s32 base;
221 	s16 length;				/* Length is 2s complement (negative)! */
222 	s16 misc;
223 };
224 
225 /* The LANCE initialization block, described in databook. */
226 struct lance_init_block {
227 	u16 mode;		/* Pre-set mode (reg. 15) */
228 	u8  phys_addr[6]; /* Physical ethernet address */
229 	u32 filter[2];			/* Multicast filter (unused). */
230 	/* Receive and transmit ring base, along with extra bits. */
231 	u32  rx_ring;			/* Tx and Rx ring base pointers */
232 	u32  tx_ring;
233 };
234 
235 struct lance_private {
236 	/* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
237 	struct lance_rx_head rx_ring[RX_RING_SIZE];
238 	struct lance_tx_head tx_ring[TX_RING_SIZE];
239 	struct lance_init_block	init_block;
240 	const char *name;
241 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
242 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
243 	/* The addresses of receive-in-place skbuffs. */
244 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
245 	unsigned long rx_buffs;		/* Address of Rx and Tx buffers. */
246 	/* Tx low-memory "bounce buffer" address. */
247 	char (*tx_bounce_buffs)[PKT_BUF_SZ];
248 	int cur_rx, cur_tx;			/* The next free ring entry */
249 	int dirty_rx, dirty_tx;		/* The ring entries to be free()ed. */
250 	int dma;
251 	unsigned char chip_version;	/* See lance_chip_type. */
252 	spinlock_t devlock;
253 };
254 
255 #define LANCE_MUST_PAD          0x00000001
256 #define LANCE_ENABLE_AUTOSELECT 0x00000002
257 #define LANCE_MUST_REINIT_RING  0x00000004
258 #define LANCE_MUST_UNRESET      0x00000008
259 #define LANCE_HAS_MISSED_FRAME  0x00000010
260 
261 /* A mapping from the chip ID number to the part number and features.
262    These are from the datasheets -- in real life the '970 version
263    reportedly has the same ID as the '965. */
264 static struct lance_chip_type {
265 	int id_number;
266 	const char *name;
267 	int flags;
268 } chip_table[] = {
269 	{0x0000, "LANCE 7990",				/* Ancient lance chip.  */
270 		LANCE_MUST_PAD + LANCE_MUST_UNRESET},
271 	{0x0003, "PCnet/ISA 79C960",		/* 79C960 PCnet/ISA.  */
272 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
273 			LANCE_HAS_MISSED_FRAME},
274 	{0x2260, "PCnet/ISA+ 79C961",		/* 79C961 PCnet/ISA+, Plug-n-Play.  */
275 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
276 			LANCE_HAS_MISSED_FRAME},
277 	{0x2420, "PCnet/PCI 79C970",		/* 79C970 or 79C974 PCnet-SCSI, PCI. */
278 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
279 			LANCE_HAS_MISSED_FRAME},
280 	/* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
281 		it the PCnet32. */
282 	{0x2430, "PCnet32",					/* 79C965 PCnet for VL bus. */
283 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
284 			LANCE_HAS_MISSED_FRAME},
285         {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
286                 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
287                         LANCE_HAS_MISSED_FRAME},
288 	{0x0, 	 "PCnet (unknown)",
289 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
290 			LANCE_HAS_MISSED_FRAME},
291 };
292 
293 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
294 
295 
296 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
297    Assume yes until we know the memory size. */
298 static unsigned char lance_need_isa_bounce_buffers = 1;
299 
300 static int lance_open(struct net_device *dev);
301 static void lance_init_ring(struct net_device *dev, gfp_t mode);
302 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
303 				    struct net_device *dev);
304 static int lance_rx(struct net_device *dev);
305 static irqreturn_t lance_interrupt(int irq, void *dev_id);
306 static int lance_close(struct net_device *dev);
307 static struct net_device_stats *lance_get_stats(struct net_device *dev);
308 static void set_multicast_list(struct net_device *dev);
309 static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
310 
311 
312 
313 #ifdef MODULE
314 #define MAX_CARDS		8	/* Max number of interfaces (cards) per module */
315 
316 static struct net_device *dev_lance[MAX_CARDS];
317 static int io[MAX_CARDS];
318 static int dma[MAX_CARDS];
319 static int irq[MAX_CARDS];
320 
321 module_param_hw_array(io, int, ioport, NULL, 0);
322 module_param_hw_array(dma, int, dma, NULL, 0);
323 module_param_hw_array(irq, int, irq, NULL, 0);
324 module_param(lance_debug, int, 0);
325 MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
326 MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
327 MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
328 MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
329 
330 static int __init lance_init_module(void)
331 {
332 	struct net_device *dev;
333 	int this_dev, found = 0;
334 
335 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
336 		if (io[this_dev] == 0)  {
337 			if (this_dev != 0) /* only complain once */
338 				break;
339 			printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
340 			return -EPERM;
341 		}
342 		dev = alloc_etherdev(0);
343 		if (!dev)
344 			break;
345 		dev->irq = irq[this_dev];
346 		dev->base_addr = io[this_dev];
347 		dev->dma = dma[this_dev];
348 		if (do_lance_probe(dev) == 0) {
349 			dev_lance[found++] = dev;
350 			continue;
351 		}
352 		free_netdev(dev);
353 		break;
354 	}
355 	if (found != 0)
356 		return 0;
357 	return -ENXIO;
358 }
359 module_init(lance_init_module);
360 
361 static void cleanup_card(struct net_device *dev)
362 {
363 	struct lance_private *lp = dev->ml_priv;
364 	if (dev->dma != 4)
365 		free_dma(dev->dma);
366 	release_region(dev->base_addr, LANCE_TOTAL_SIZE);
367 	kfree(lp->tx_bounce_buffs);
368 	kfree((void*)lp->rx_buffs);
369 	kfree(lp);
370 }
371 
372 static void __exit lance_cleanup_module(void)
373 {
374 	int this_dev;
375 
376 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
377 		struct net_device *dev = dev_lance[this_dev];
378 		if (dev) {
379 			unregister_netdev(dev);
380 			cleanup_card(dev);
381 			free_netdev(dev);
382 		}
383 	}
384 }
385 module_exit(lance_cleanup_module);
386 #endif /* MODULE */
387 MODULE_LICENSE("GPL");
388 
389 
390 /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
391    board probes now that kmalloc() can allocate ISA DMA-able regions.
392    This also allows the LANCE driver to be used as a module.
393    */
394 static int __init do_lance_probe(struct net_device *dev)
395 {
396 	unsigned int *port;
397 	int result;
398 
399 	if (high_memory <= phys_to_virt(16*1024*1024))
400 		lance_need_isa_bounce_buffers = 0;
401 
402 	for (port = lance_portlist; *port; port++) {
403 		int ioaddr = *port;
404 		struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
405 							"lance-probe");
406 
407 		if (r) {
408 			/* Detect the card with minimal I/O reads */
409 			char offset14 = inb(ioaddr + 14);
410 			int card;
411 			for (card = 0; card < NUM_CARDS; ++card)
412 				if (cards[card].id_offset14 == offset14)
413 					break;
414 			if (card < NUM_CARDS) {/*yes, the first byte matches*/
415 				char offset15 = inb(ioaddr + 15);
416 				for (card = 0; card < NUM_CARDS; ++card)
417 					if ((cards[card].id_offset14 == offset14) &&
418 						(cards[card].id_offset15 == offset15))
419 						break;
420 			}
421 			if (card < NUM_CARDS) { /*Signature OK*/
422 				result = lance_probe1(dev, ioaddr, 0, 0);
423 				if (!result) {
424 					struct lance_private *lp = dev->ml_priv;
425 					int ver = lp->chip_version;
426 
427 					r->name = chip_table[ver].name;
428 					return 0;
429 				}
430 			}
431 			release_region(ioaddr, LANCE_TOTAL_SIZE);
432 		}
433 	}
434 	return -ENODEV;
435 }
436 
437 #ifndef MODULE
438 struct net_device * __init lance_probe(int unit)
439 {
440 	struct net_device *dev = alloc_etherdev(0);
441 	int err;
442 
443 	if (!dev)
444 		return ERR_PTR(-ENODEV);
445 
446 	sprintf(dev->name, "eth%d", unit);
447 	netdev_boot_setup_check(dev);
448 
449 	err = do_lance_probe(dev);
450 	if (err)
451 		goto out;
452 	return dev;
453 out:
454 	free_netdev(dev);
455 	return ERR_PTR(err);
456 }
457 #endif
458 
459 static const struct net_device_ops lance_netdev_ops = {
460 	.ndo_open 		= lance_open,
461 	.ndo_start_xmit		= lance_start_xmit,
462 	.ndo_stop		= lance_close,
463 	.ndo_get_stats		= lance_get_stats,
464 	.ndo_set_rx_mode	= set_multicast_list,
465 	.ndo_tx_timeout		= lance_tx_timeout,
466 	.ndo_set_mac_address 	= eth_mac_addr,
467 	.ndo_validate_addr	= eth_validate_addr,
468 };
469 
470 static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
471 {
472 	struct lance_private *lp;
473 	unsigned long dma_channels;	/* Mark spuriously-busy DMA channels */
474 	int i, reset_val, lance_version;
475 	const char *chipname;
476 	/* Flags for specific chips or boards. */
477 	unsigned char hpJ2405A = 0;	/* HP ISA adaptor */
478 	int hp_builtin = 0;		/* HP on-board ethernet. */
479 	static int did_version;		/* Already printed version info. */
480 	unsigned long flags;
481 	int err = -ENOMEM;
482 	void __iomem *bios;
483 	u8 addr[ETH_ALEN];
484 
485 	/* First we look for special cases.
486 	   Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
487 	   There are two HP versions, check the BIOS for the configuration port.
488 	   This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
489 	   */
490 	bios = ioremap(0xf00f0, 0x14);
491 	if (!bios)
492 		return -ENOMEM;
493 	if (readw(bios + 0x12) == 0x5048)  {
494 		static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
495 		int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
496 		/* We can have boards other than the built-in!  Verify this is on-board. */
497 		if ((inb(hp_port) & 0xc0) == 0x80 &&
498 		    ioaddr_table[inb(hp_port) & 3] == ioaddr)
499 			hp_builtin = hp_port;
500 	}
501 	iounmap(bios);
502 	/* We also recognize the HP Vectra on-board here, but check below. */
503 	hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
504 		    inb(ioaddr+2) == 0x09);
505 
506 	/* Reset the LANCE.	 */
507 	reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
508 
509 	/* The Un-Reset needed is only needed for the real NE2100, and will
510 	   confuse the HP board. */
511 	if (!hpJ2405A)
512 		outw(reset_val, ioaddr+LANCE_RESET);
513 
514 	outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
515 	if (inw(ioaddr+LANCE_DATA) != 0x0004)
516 		return -ENODEV;
517 
518 	/* Get the version of the chip. */
519 	outw(88, ioaddr+LANCE_ADDR);
520 	if (inw(ioaddr+LANCE_ADDR) != 88) {
521 		lance_version = 0;
522 	} else {			/* Good, it's a newer chip. */
523 		int chip_version = inw(ioaddr+LANCE_DATA);
524 		outw(89, ioaddr+LANCE_ADDR);
525 		chip_version |= inw(ioaddr+LANCE_DATA) << 16;
526 		if (lance_debug > 2)
527 			printk("  LANCE chip version is %#x.\n", chip_version);
528 		if ((chip_version & 0xfff) != 0x003)
529 			return -ENODEV;
530 		chip_version = (chip_version >> 12) & 0xffff;
531 		for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
532 			if (chip_table[lance_version].id_number == chip_version)
533 				break;
534 		}
535 	}
536 
537 	/* We can't allocate private data from alloc_etherdev() because it must
538 	   a ISA DMA-able region. */
539 	chipname = chip_table[lance_version].name;
540 	printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
541 
542 	/* There is a 16 byte station address PROM at the base address.
543 	   The first six bytes are the station address. */
544 	for (i = 0; i < 6; i++)
545 		addr[i] = inb(ioaddr + i);
546 	eth_hw_addr_set(dev, addr);
547 	printk("%pM", dev->dev_addr);
548 
549 	dev->base_addr = ioaddr;
550 	/* Make certain the data structures used by the LANCE are aligned and DMAble. */
551 
552 	lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
553 	if (!lp)
554 		return -ENOMEM;
555 	if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
556 	dev->ml_priv = lp;
557 	lp->name = chipname;
558 	lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
559 						    GFP_DMA | GFP_KERNEL);
560 	if (!lp->rx_buffs)
561 		goto out_lp;
562 	if (lance_need_isa_bounce_buffers) {
563 		lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
564 						    GFP_DMA | GFP_KERNEL);
565 		if (!lp->tx_bounce_buffs)
566 			goto out_rx;
567 	} else
568 		lp->tx_bounce_buffs = NULL;
569 
570 	lp->chip_version = lance_version;
571 	spin_lock_init(&lp->devlock);
572 
573 	lp->init_block.mode = 0x0003;		/* Disable Rx and Tx. */
574 	for (i = 0; i < 6; i++)
575 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
576 	lp->init_block.filter[0] = 0x00000000;
577 	lp->init_block.filter[1] = 0x00000000;
578 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
579 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
580 
581 	outw(0x0001, ioaddr+LANCE_ADDR);
582 	inw(ioaddr+LANCE_ADDR);
583 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
584 	outw(0x0002, ioaddr+LANCE_ADDR);
585 	inw(ioaddr+LANCE_ADDR);
586 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
587 	outw(0x0000, ioaddr+LANCE_ADDR);
588 	inw(ioaddr+LANCE_ADDR);
589 
590 	if (irq) {					/* Set iff PCI card. */
591 		dev->dma = 4;			/* Native bus-master, no DMA channel needed. */
592 		dev->irq = irq;
593 	} else if (hp_builtin) {
594 		static const char dma_tbl[4] = {3, 5, 6, 0};
595 		static const char irq_tbl[4] = {3, 4, 5, 9};
596 		unsigned char port_val = inb(hp_builtin);
597 		dev->dma = dma_tbl[(port_val >> 4) & 3];
598 		dev->irq = irq_tbl[(port_val >> 2) & 3];
599 		printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
600 	} else if (hpJ2405A) {
601 		static const char dma_tbl[4] = {3, 5, 6, 7};
602 		static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
603 		short reset_val = inw(ioaddr+LANCE_RESET);
604 		dev->dma = dma_tbl[(reset_val >> 2) & 3];
605 		dev->irq = irq_tbl[(reset_val >> 4) & 7];
606 		printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
607 	} else if (lance_version == PCNET_ISAP) {		/* The plug-n-play version. */
608 		short bus_info;
609 		outw(8, ioaddr+LANCE_ADDR);
610 		bus_info = inw(ioaddr+LANCE_BUS_IF);
611 		dev->dma = bus_info & 0x07;
612 		dev->irq = (bus_info >> 4) & 0x0F;
613 	} else {
614 		/* The DMA channel may be passed in PARAM1. */
615 		if (dev->mem_start & 0x07)
616 			dev->dma = dev->mem_start & 0x07;
617 	}
618 
619 	if (dev->dma == 0) {
620 		/* Read the DMA channel status register, so that we can avoid
621 		   stuck DMA channels in the DMA detection below. */
622 		dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
623 			(inb(DMA2_STAT_REG) & 0xf0);
624 	}
625 	err = -ENODEV;
626 	if (dev->irq >= 2)
627 		printk(" assigned IRQ %d", dev->irq);
628 	else if (lance_version != 0)  {	/* 7990 boards need DMA detection first. */
629 		unsigned long irq_mask;
630 
631 		/* To auto-IRQ we enable the initialization-done and DMA error
632 		   interrupts. For ISA boards we get a DMA error, but VLB and PCI
633 		   boards will work. */
634 		irq_mask = probe_irq_on();
635 
636 		/* Trigger an initialization just for the interrupt. */
637 		outw(0x0041, ioaddr+LANCE_DATA);
638 
639 		mdelay(20);
640 		dev->irq = probe_irq_off(irq_mask);
641 		if (dev->irq)
642 			printk(", probed IRQ %d", dev->irq);
643 		else {
644 			printk(", failed to detect IRQ line.\n");
645 			goto out_tx;
646 		}
647 
648 		/* Check for the initialization done bit, 0x0100, which means
649 		   that we don't need a DMA channel. */
650 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
651 			dev->dma = 4;
652 	}
653 
654 	if (dev->dma == 4) {
655 		printk(", no DMA needed.\n");
656 	} else if (dev->dma) {
657 		if (request_dma(dev->dma, chipname)) {
658 			printk("DMA %d allocation failed.\n", dev->dma);
659 			goto out_tx;
660 		} else
661 			printk(", assigned DMA %d.\n", dev->dma);
662 	} else {			/* OK, we have to auto-DMA. */
663 		for (i = 0; i < 4; i++) {
664 			static const char dmas[] = { 5, 6, 7, 3 };
665 			int dma = dmas[i];
666 			int boguscnt;
667 
668 			/* Don't enable a permanently busy DMA channel, or the machine
669 			   will hang. */
670 			if (test_bit(dma, &dma_channels))
671 				continue;
672 			outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
673 			if (request_dma(dma, chipname))
674 				continue;
675 
676 			flags=claim_dma_lock();
677 			set_dma_mode(dma, DMA_MODE_CASCADE);
678 			enable_dma(dma);
679 			release_dma_lock(flags);
680 
681 			/* Trigger an initialization. */
682 			outw(0x0001, ioaddr+LANCE_DATA);
683 			for (boguscnt = 100; boguscnt > 0; --boguscnt)
684 				if (inw(ioaddr+LANCE_DATA) & 0x0900)
685 					break;
686 			if (inw(ioaddr+LANCE_DATA) & 0x0100) {
687 				dev->dma = dma;
688 				printk(", DMA %d.\n", dev->dma);
689 				break;
690 			} else {
691 				flags=claim_dma_lock();
692 				disable_dma(dma);
693 				release_dma_lock(flags);
694 				free_dma(dma);
695 			}
696 		}
697 		if (i == 4) {			/* Failure: bail. */
698 			printk("DMA detection failed.\n");
699 			goto out_tx;
700 		}
701 	}
702 
703 	if (lance_version == 0 && dev->irq == 0) {
704 		/* We may auto-IRQ now that we have a DMA channel. */
705 		/* Trigger an initialization just for the interrupt. */
706 		unsigned long irq_mask;
707 
708 		irq_mask = probe_irq_on();
709 		outw(0x0041, ioaddr+LANCE_DATA);
710 
711 		mdelay(40);
712 		dev->irq = probe_irq_off(irq_mask);
713 		if (dev->irq == 0) {
714 			printk("  Failed to detect the 7990 IRQ line.\n");
715 			goto out_dma;
716 		}
717 		printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
718 	}
719 
720 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
721 		/* Turn on auto-select of media (10baseT or BNC) so that the user
722 		   can watch the LEDs even if the board isn't opened. */
723 		outw(0x0002, ioaddr+LANCE_ADDR);
724 		/* Don't touch 10base2 power bit. */
725 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
726 	}
727 
728 	if (lance_debug > 0  &&  did_version++ == 0)
729 		printk(version);
730 
731 	/* The LANCE-specific entries in the device structure. */
732 	dev->netdev_ops = &lance_netdev_ops;
733 	dev->watchdog_timeo = TX_TIMEOUT;
734 
735 	err = register_netdev(dev);
736 	if (err)
737 		goto out_dma;
738 	return 0;
739 out_dma:
740 	if (dev->dma != 4)
741 		free_dma(dev->dma);
742 out_tx:
743 	kfree(lp->tx_bounce_buffs);
744 out_rx:
745 	kfree((void*)lp->rx_buffs);
746 out_lp:
747 	kfree(lp);
748 	return err;
749 }
750 
751 
752 static int
753 lance_open(struct net_device *dev)
754 {
755 	struct lance_private *lp = dev->ml_priv;
756 	int ioaddr = dev->base_addr;
757 	int i;
758 
759 	if (dev->irq == 0 ||
760 		request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
761 		return -EAGAIN;
762 	}
763 
764 	/* We used to allocate DMA here, but that was silly.
765 	   DMA lines can't be shared!  We now permanently allocate them. */
766 
767 	/* Reset the LANCE */
768 	inw(ioaddr+LANCE_RESET);
769 
770 	/* The DMA controller is used as a no-operation slave, "cascade mode". */
771 	if (dev->dma != 4) {
772 		unsigned long flags=claim_dma_lock();
773 		enable_dma(dev->dma);
774 		set_dma_mode(dev->dma, DMA_MODE_CASCADE);
775 		release_dma_lock(flags);
776 	}
777 
778 	/* Un-Reset the LANCE, needed only for the NE2100. */
779 	if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
780 		outw(0, ioaddr+LANCE_RESET);
781 
782 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
783 		/* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
784 		outw(0x0002, ioaddr+LANCE_ADDR);
785 		/* Only touch autoselect bit. */
786 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
787 	}
788 
789 	if (lance_debug > 1)
790 		printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
791 			   dev->name, dev->irq, dev->dma,
792 		           (u32) isa_virt_to_bus(lp->tx_ring),
793 		           (u32) isa_virt_to_bus(lp->rx_ring),
794 			   (u32) isa_virt_to_bus(&lp->init_block));
795 
796 	lance_init_ring(dev, GFP_KERNEL);
797 	/* Re-initialize the LANCE, and start it when done. */
798 	outw(0x0001, ioaddr+LANCE_ADDR);
799 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
800 	outw(0x0002, ioaddr+LANCE_ADDR);
801 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
802 
803 	outw(0x0004, ioaddr+LANCE_ADDR);
804 	outw(0x0915, ioaddr+LANCE_DATA);
805 
806 	outw(0x0000, ioaddr+LANCE_ADDR);
807 	outw(0x0001, ioaddr+LANCE_DATA);
808 
809 	netif_start_queue (dev);
810 
811 	i = 0;
812 	while (i++ < 100)
813 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
814 			break;
815 	/*
816 	 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
817 	 * reports that doing so triggers a bug in the '974.
818 	 */
819 	outw(0x0042, ioaddr+LANCE_DATA);
820 
821 	if (lance_debug > 2)
822 		printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
823 			   dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
824 
825 	return 0;					/* Always succeed */
826 }
827 
828 /* The LANCE has been halted for one reason or another (busmaster memory
829    arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
830    etc.).  Modern LANCE variants always reload their ring-buffer
831    configuration when restarted, so we must reinitialize our ring
832    context before restarting.  As part of this reinitialization,
833    find all packets still on the Tx ring and pretend that they had been
834    sent (in effect, drop the packets on the floor) - the higher-level
835    protocols will time out and retransmit.  It'd be better to shuffle
836    these skbs to a temp list and then actually re-Tx them after
837    restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
838 */
839 
840 static void
841 lance_purge_ring(struct net_device *dev)
842 {
843 	struct lance_private *lp = dev->ml_priv;
844 	int i;
845 
846 	/* Free all the skbuffs in the Rx and Tx queues. */
847 	for (i = 0; i < RX_RING_SIZE; i++) {
848 		struct sk_buff *skb = lp->rx_skbuff[i];
849 		lp->rx_skbuff[i] = NULL;
850 		lp->rx_ring[i].base = 0;		/* Not owned by LANCE chip. */
851 		if (skb)
852 			dev_kfree_skb_any(skb);
853 	}
854 	for (i = 0; i < TX_RING_SIZE; i++) {
855 		if (lp->tx_skbuff[i]) {
856 			dev_kfree_skb_any(lp->tx_skbuff[i]);
857 			lp->tx_skbuff[i] = NULL;
858 		}
859 	}
860 }
861 
862 
863 /* Initialize the LANCE Rx and Tx rings. */
864 static void
865 lance_init_ring(struct net_device *dev, gfp_t gfp)
866 {
867 	struct lance_private *lp = dev->ml_priv;
868 	int i;
869 
870 	lp->cur_rx = lp->cur_tx = 0;
871 	lp->dirty_rx = lp->dirty_tx = 0;
872 
873 	for (i = 0; i < RX_RING_SIZE; i++) {
874 		struct sk_buff *skb;
875 		void *rx_buff;
876 
877 		skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
878 		lp->rx_skbuff[i] = skb;
879 		if (skb)
880 			rx_buff = skb->data;
881 		else
882 			rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
883 		if (rx_buff == NULL)
884 			lp->rx_ring[i].base = 0;
885 		else
886 			lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
887 		lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
888 	}
889 	/* The Tx buffer address is filled in as needed, but we do need to clear
890 	   the upper ownership bit. */
891 	for (i = 0; i < TX_RING_SIZE; i++) {
892 		lp->tx_skbuff[i] = NULL;
893 		lp->tx_ring[i].base = 0;
894 	}
895 
896 	lp->init_block.mode = 0x0000;
897 	for (i = 0; i < 6; i++)
898 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
899 	lp->init_block.filter[0] = 0x00000000;
900 	lp->init_block.filter[1] = 0x00000000;
901 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
902 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
903 }
904 
905 static void
906 lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
907 {
908 	struct lance_private *lp = dev->ml_priv;
909 
910 	if (must_reinit ||
911 		(chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
912 		lance_purge_ring(dev);
913 		lance_init_ring(dev, GFP_ATOMIC);
914 	}
915 	outw(0x0000,    dev->base_addr + LANCE_ADDR);
916 	outw(csr0_bits, dev->base_addr + LANCE_DATA);
917 }
918 
919 
920 static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
921 {
922 	struct lance_private *lp = (struct lance_private *) dev->ml_priv;
923 	int ioaddr = dev->base_addr;
924 
925 	outw (0, ioaddr + LANCE_ADDR);
926 	printk ("%s: transmit timed out, status %4.4x, resetting.\n",
927 		dev->name, inw (ioaddr + LANCE_DATA));
928 	outw (0x0004, ioaddr + LANCE_DATA);
929 	dev->stats.tx_errors++;
930 #ifndef final_version
931 	if (lance_debug > 3) {
932 		int i;
933 		printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
934 		  lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
935 			lp->cur_rx);
936 		for (i = 0; i < RX_RING_SIZE; i++)
937 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
938 			 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
939 				lp->rx_ring[i].msg_length);
940 		for (i = 0; i < TX_RING_SIZE; i++)
941 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
942 			     lp->tx_ring[i].base, -lp->tx_ring[i].length,
943 				lp->tx_ring[i].misc);
944 		printk ("\n");
945 	}
946 #endif
947 	lance_restart (dev, 0x0043, 1);
948 
949 	netif_trans_update(dev); /* prevent tx timeout */
950 	netif_wake_queue (dev);
951 }
952 
953 
954 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
955 				    struct net_device *dev)
956 {
957 	struct lance_private *lp = dev->ml_priv;
958 	int ioaddr = dev->base_addr;
959 	int entry;
960 	unsigned long flags;
961 
962 	spin_lock_irqsave(&lp->devlock, flags);
963 
964 	if (lance_debug > 3) {
965 		outw(0x0000, ioaddr+LANCE_ADDR);
966 		printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
967 			   inw(ioaddr+LANCE_DATA));
968 		outw(0x0000, ioaddr+LANCE_DATA);
969 	}
970 
971 	/* Fill in a Tx ring entry */
972 
973 	/* Mask to ring buffer boundary. */
974 	entry = lp->cur_tx & TX_RING_MOD_MASK;
975 
976 	/* Caution: the write order is important here, set the base address
977 	   with the "ownership" bits last. */
978 
979 	/* The old LANCE chips doesn't automatically pad buffers to min. size. */
980 	if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
981 		if (skb->len < ETH_ZLEN) {
982 			if (skb_padto(skb, ETH_ZLEN))
983 				goto out;
984 			lp->tx_ring[entry].length = -ETH_ZLEN;
985 		}
986 		else
987 			lp->tx_ring[entry].length = -skb->len;
988 	} else
989 		lp->tx_ring[entry].length = -skb->len;
990 
991 	lp->tx_ring[entry].misc = 0x0000;
992 
993 	dev->stats.tx_bytes += skb->len;
994 
995 	/* If any part of this buffer is >16M we must copy it to a low-memory
996 	   buffer. */
997 	if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
998 		if (lance_debug > 5)
999 			printk("%s: bouncing a high-memory packet (%#x).\n",
1000 				   dev->name, (u32)isa_virt_to_bus(skb->data));
1001 		skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
1002 		lp->tx_ring[entry].base =
1003 			((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1004 		dev_kfree_skb(skb);
1005 	} else {
1006 		lp->tx_skbuff[entry] = skb;
1007 		lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1008 	}
1009 	lp->cur_tx++;
1010 
1011 	/* Trigger an immediate send poll. */
1012 	outw(0x0000, ioaddr+LANCE_ADDR);
1013 	outw(0x0048, ioaddr+LANCE_DATA);
1014 
1015 	if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1016 		netif_stop_queue(dev);
1017 
1018 out:
1019 	spin_unlock_irqrestore(&lp->devlock, flags);
1020 	return NETDEV_TX_OK;
1021 }
1022 
1023 /* The LANCE interrupt handler. */
1024 static irqreturn_t lance_interrupt(int irq, void *dev_id)
1025 {
1026 	struct net_device *dev = dev_id;
1027 	struct lance_private *lp;
1028 	int csr0, ioaddr, boguscnt=10;
1029 	int must_restart;
1030 
1031 	ioaddr = dev->base_addr;
1032 	lp = dev->ml_priv;
1033 
1034 	spin_lock (&lp->devlock);
1035 
1036 	outw(0x00, dev->base_addr + LANCE_ADDR);
1037 	while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1038 	       --boguscnt >= 0) {
1039 		/* Acknowledge all of the current interrupt sources ASAP. */
1040 		outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1041 
1042 		must_restart = 0;
1043 
1044 		if (lance_debug > 5)
1045 			printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
1046 				   dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1047 
1048 		if (csr0 & 0x0400)			/* Rx interrupt */
1049 			lance_rx(dev);
1050 
1051 		if (csr0 & 0x0200) {		/* Tx-done interrupt */
1052 			int dirty_tx = lp->dirty_tx;
1053 
1054 			while (dirty_tx < lp->cur_tx) {
1055 				int entry = dirty_tx & TX_RING_MOD_MASK;
1056 				int status = lp->tx_ring[entry].base;
1057 
1058 				if (status < 0)
1059 					break;			/* It still hasn't been Txed */
1060 
1061 				lp->tx_ring[entry].base = 0;
1062 
1063 				if (status & 0x40000000) {
1064 					/* There was an major error, log it. */
1065 					int err_status = lp->tx_ring[entry].misc;
1066 					dev->stats.tx_errors++;
1067 					if (err_status & 0x0400)
1068 						dev->stats.tx_aborted_errors++;
1069 					if (err_status & 0x0800)
1070 						dev->stats.tx_carrier_errors++;
1071 					if (err_status & 0x1000)
1072 						dev->stats.tx_window_errors++;
1073 					if (err_status & 0x4000) {
1074 						/* Ackk!  On FIFO errors the Tx unit is turned off! */
1075 						dev->stats.tx_fifo_errors++;
1076 						/* Remove this verbosity later! */
1077 						printk("%s: Tx FIFO error! Status %4.4x.\n",
1078 							   dev->name, csr0);
1079 						/* Restart the chip. */
1080 						must_restart = 1;
1081 					}
1082 				} else {
1083 					if (status & 0x18000000)
1084 						dev->stats.collisions++;
1085 					dev->stats.tx_packets++;
1086 				}
1087 
1088 				/* We must free the original skb if it's not a data-only copy
1089 				   in the bounce buffer. */
1090 				if (lp->tx_skbuff[entry]) {
1091 					dev_consume_skb_irq(lp->tx_skbuff[entry]);
1092 					lp->tx_skbuff[entry] = NULL;
1093 				}
1094 				dirty_tx++;
1095 			}
1096 
1097 #ifndef final_version
1098 			if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1099 				printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1100 					   dirty_tx, lp->cur_tx,
1101 					   netif_queue_stopped(dev) ? "yes" : "no");
1102 				dirty_tx += TX_RING_SIZE;
1103 			}
1104 #endif
1105 
1106 			/* if the ring is no longer full, accept more packets */
1107 			if (netif_queue_stopped(dev) &&
1108 			    dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1109 				netif_wake_queue (dev);
1110 
1111 			lp->dirty_tx = dirty_tx;
1112 		}
1113 
1114 		/* Log misc errors. */
1115 		if (csr0 & 0x4000)
1116 			dev->stats.tx_errors++; /* Tx babble. */
1117 		if (csr0 & 0x1000)
1118 			dev->stats.rx_errors++; /* Missed a Rx frame. */
1119 		if (csr0 & 0x0800) {
1120 			printk("%s: Bus master arbitration failure, status %4.4x.\n",
1121 				   dev->name, csr0);
1122 			/* Restart the chip. */
1123 			must_restart = 1;
1124 		}
1125 
1126 		if (must_restart) {
1127 			/* stop the chip to clear the error condition, then restart */
1128 			outw(0x0000, dev->base_addr + LANCE_ADDR);
1129 			outw(0x0004, dev->base_addr + LANCE_DATA);
1130 			lance_restart(dev, 0x0002, 0);
1131 		}
1132 	}
1133 
1134 	/* Clear any other interrupt, and set interrupt enable. */
1135 	outw(0x0000, dev->base_addr + LANCE_ADDR);
1136 	outw(0x7940, dev->base_addr + LANCE_DATA);
1137 
1138 	if (lance_debug > 4)
1139 		printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1140 			   dev->name, inw(ioaddr + LANCE_ADDR),
1141 			   inw(dev->base_addr + LANCE_DATA));
1142 
1143 	spin_unlock (&lp->devlock);
1144 	return IRQ_HANDLED;
1145 }
1146 
1147 static int
1148 lance_rx(struct net_device *dev)
1149 {
1150 	struct lance_private *lp = dev->ml_priv;
1151 	int entry = lp->cur_rx & RX_RING_MOD_MASK;
1152 	int i;
1153 
1154 	/* If we own the next entry, it's a new packet. Send it up. */
1155 	while (lp->rx_ring[entry].base >= 0) {
1156 		int status = lp->rx_ring[entry].base >> 24;
1157 
1158 		if (status != 0x03) {			/* There was an error. */
1159 			/* There is a tricky error noted by John Murphy,
1160 			   <murf@perftech.com> to Russ Nelson: Even with full-sized
1161 			   buffers it's possible for a jabber packet to use two
1162 			   buffers, with only the last correctly noting the error. */
1163 			if (status & 0x01)	/* Only count a general error at the */
1164 				dev->stats.rx_errors++; /* end of a packet.*/
1165 			if (status & 0x20)
1166 				dev->stats.rx_frame_errors++;
1167 			if (status & 0x10)
1168 				dev->stats.rx_over_errors++;
1169 			if (status & 0x08)
1170 				dev->stats.rx_crc_errors++;
1171 			if (status & 0x04)
1172 				dev->stats.rx_fifo_errors++;
1173 			lp->rx_ring[entry].base &= 0x03ffffff;
1174 		}
1175 		else
1176 		{
1177 			/* Malloc up new buffer, compatible with net3. */
1178 			short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1179 			struct sk_buff *skb;
1180 
1181 			if(pkt_len<60)
1182 			{
1183 				printk("%s: Runt packet!\n",dev->name);
1184 				dev->stats.rx_errors++;
1185 			}
1186 			else
1187 			{
1188 				skb = dev_alloc_skb(pkt_len+2);
1189 				if (skb == NULL)
1190 				{
1191 					printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1192 					for (i=0; i < RX_RING_SIZE; i++)
1193 						if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1194 							break;
1195 
1196 					if (i > RX_RING_SIZE -2)
1197 					{
1198 						dev->stats.rx_dropped++;
1199 						lp->rx_ring[entry].base |= 0x80000000;
1200 						lp->cur_rx++;
1201 					}
1202 					break;
1203 				}
1204 				skb_reserve(skb,2);	/* 16 byte align */
1205 				skb_put(skb,pkt_len);	/* Make room */
1206 				skb_copy_to_linear_data(skb,
1207 					(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1208 					pkt_len);
1209 				skb->protocol=eth_type_trans(skb,dev);
1210 				netif_rx(skb);
1211 				dev->stats.rx_packets++;
1212 				dev->stats.rx_bytes += pkt_len;
1213 			}
1214 		}
1215 		/* The docs say that the buffer length isn't touched, but Andrew Boyd
1216 		   of QNX reports that some revs of the 79C965 clear it. */
1217 		lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1218 		lp->rx_ring[entry].base |= 0x80000000;
1219 		entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1220 	}
1221 
1222 	/* We should check that at least two ring entries are free.	 If not,
1223 	   we should free one and mark stats->rx_dropped++. */
1224 
1225 	return 0;
1226 }
1227 
1228 static int
1229 lance_close(struct net_device *dev)
1230 {
1231 	int ioaddr = dev->base_addr;
1232 	struct lance_private *lp = dev->ml_priv;
1233 
1234 	netif_stop_queue (dev);
1235 
1236 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1237 		outw(112, ioaddr+LANCE_ADDR);
1238 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1239 	}
1240 	outw(0, ioaddr+LANCE_ADDR);
1241 
1242 	if (lance_debug > 1)
1243 		printk("%s: Shutting down ethercard, status was %2.2x.\n",
1244 			   dev->name, inw(ioaddr+LANCE_DATA));
1245 
1246 	/* We stop the LANCE here -- it occasionally polls
1247 	   memory if we don't. */
1248 	outw(0x0004, ioaddr+LANCE_DATA);
1249 
1250 	if (dev->dma != 4)
1251 	{
1252 		unsigned long flags=claim_dma_lock();
1253 		disable_dma(dev->dma);
1254 		release_dma_lock(flags);
1255 	}
1256 	free_irq(dev->irq, dev);
1257 
1258 	lance_purge_ring(dev);
1259 
1260 	return 0;
1261 }
1262 
1263 static struct net_device_stats *lance_get_stats(struct net_device *dev)
1264 {
1265 	struct lance_private *lp = dev->ml_priv;
1266 
1267 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1268 		short ioaddr = dev->base_addr;
1269 		short saved_addr;
1270 		unsigned long flags;
1271 
1272 		spin_lock_irqsave(&lp->devlock, flags);
1273 		saved_addr = inw(ioaddr+LANCE_ADDR);
1274 		outw(112, ioaddr+LANCE_ADDR);
1275 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1276 		outw(saved_addr, ioaddr+LANCE_ADDR);
1277 		spin_unlock_irqrestore(&lp->devlock, flags);
1278 	}
1279 
1280 	return &dev->stats;
1281 }
1282 
1283 /* Set or clear the multicast filter for this adaptor.
1284  */
1285 
1286 static void set_multicast_list(struct net_device *dev)
1287 {
1288 	short ioaddr = dev->base_addr;
1289 
1290 	outw(0, ioaddr+LANCE_ADDR);
1291 	outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.	 */
1292 
1293 	if (dev->flags&IFF_PROMISC) {
1294 		outw(15, ioaddr+LANCE_ADDR);
1295 		outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1296 	} else {
1297 		short multicast_table[4];
1298 		int i;
1299 		int num_addrs=netdev_mc_count(dev);
1300 		if(dev->flags&IFF_ALLMULTI)
1301 			num_addrs=1;
1302 		/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1303 		memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1304 		for (i = 0; i < 4; i++) {
1305 			outw(8 + i, ioaddr+LANCE_ADDR);
1306 			outw(multicast_table[i], ioaddr+LANCE_DATA);
1307 		}
1308 		outw(15, ioaddr+LANCE_ADDR);
1309 		outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1310 	}
1311 
1312 	lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1313 
1314 }
1315 
1316