xref: /openbmc/linux/drivers/net/ethernet/amd/lance.c (revision 6da2ec56)
1b955f6caSJeff Kirsher /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2b955f6caSJeff Kirsher /*
3b955f6caSJeff Kirsher 	Written/copyright 1993-1998 by Donald Becker.
4b955f6caSJeff Kirsher 
5b955f6caSJeff Kirsher 	Copyright 1993 United States Government as represented by the
6b955f6caSJeff Kirsher 	Director, National Security Agency.
7b955f6caSJeff Kirsher 	This software may be used and distributed according to the terms
8b955f6caSJeff Kirsher 	of the GNU General Public License, incorporated herein by reference.
9b955f6caSJeff Kirsher 
10b955f6caSJeff Kirsher 	This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11b955f6caSJeff Kirsher 	with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
12b955f6caSJeff Kirsher 
13b955f6caSJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
14b955f6caSJeff Kirsher 	Scyld Computing Corporation
15b955f6caSJeff Kirsher 	410 Severn Ave., Suite 210
16b955f6caSJeff Kirsher 	Annapolis MD 21403
17b955f6caSJeff Kirsher 
18b955f6caSJeff Kirsher 	Andrey V. Savochkin:
19b955f6caSJeff Kirsher 	- alignment problem with 1.3.* kernel and some minor changes.
20b955f6caSJeff Kirsher 	Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
21b955f6caSJeff Kirsher 	- added support for Linux/Alpha, but removed most of it, because
22b955f6caSJeff Kirsher         it worked only for the PCI chip.
23b955f6caSJeff Kirsher       - added hook for the 32bit lance driver
24b955f6caSJeff Kirsher       - added PCnetPCI II (79C970A) to chip table
25b955f6caSJeff Kirsher 	Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
26b955f6caSJeff Kirsher 	- hopefully fix above so Linux/Alpha can use ISA cards too.
27b955f6caSJeff Kirsher     8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28b955f6caSJeff Kirsher     v1.12 10/27/97 Module support -djb
29b955f6caSJeff Kirsher     v1.14  2/3/98 Module support modified, made PCI support optional -djb
30b955f6caSJeff Kirsher     v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
31b955f6caSJeff Kirsher                   before unregister_netdev() which caused NULL pointer
32b955f6caSJeff Kirsher                   reference later in the chain (in rtnetlink_fill_ifinfo())
33b955f6caSJeff Kirsher                   -- Mika Kuoppala <miku@iki.fi>
34b955f6caSJeff Kirsher 
35b955f6caSJeff Kirsher     Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
36b955f6caSJeff Kirsher     the 2.1 version of the old driver - Alan Cox
37b955f6caSJeff Kirsher 
38b955f6caSJeff Kirsher     Get rid of check_region, check kmalloc return in lance_probe1
39b955f6caSJeff Kirsher     Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
40b955f6caSJeff Kirsher 
41b955f6caSJeff Kirsher 	Reworked detection, added support for Racal InterLan EtherBlaster cards
42b955f6caSJeff Kirsher 	Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
43b955f6caSJeff Kirsher */
44b955f6caSJeff Kirsher 
45b955f6caSJeff Kirsher static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46b955f6caSJeff Kirsher 
47b955f6caSJeff Kirsher #include <linux/module.h>
48b955f6caSJeff Kirsher #include <linux/kernel.h>
49b955f6caSJeff Kirsher #include <linux/string.h>
50b955f6caSJeff Kirsher #include <linux/delay.h>
51b955f6caSJeff Kirsher #include <linux/errno.h>
52b955f6caSJeff Kirsher #include <linux/ioport.h>
53b955f6caSJeff Kirsher #include <linux/slab.h>
54b955f6caSJeff Kirsher #include <linux/interrupt.h>
55b955f6caSJeff Kirsher #include <linux/pci.h>
56b955f6caSJeff Kirsher #include <linux/init.h>
57b955f6caSJeff Kirsher #include <linux/netdevice.h>
58b955f6caSJeff Kirsher #include <linux/etherdevice.h>
59b955f6caSJeff Kirsher #include <linux/skbuff.h>
60b955f6caSJeff Kirsher #include <linux/mm.h>
61b955f6caSJeff Kirsher #include <linux/bitops.h>
62b955f6caSJeff Kirsher 
63b955f6caSJeff Kirsher #include <asm/io.h>
64b955f6caSJeff Kirsher #include <asm/dma.h>
65b955f6caSJeff Kirsher 
66b955f6caSJeff Kirsher static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
67b955f6caSJeff Kirsher static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
68b955f6caSJeff Kirsher static int __init do_lance_probe(struct net_device *dev);
69b955f6caSJeff Kirsher 
70b955f6caSJeff Kirsher 
71b955f6caSJeff Kirsher static struct card {
72b955f6caSJeff Kirsher 	char id_offset14;
73b955f6caSJeff Kirsher 	char id_offset15;
74b955f6caSJeff Kirsher } cards[] = {
75b955f6caSJeff Kirsher 	{	//"normal"
76b955f6caSJeff Kirsher 		.id_offset14 = 0x57,
77b955f6caSJeff Kirsher 		.id_offset15 = 0x57,
78b955f6caSJeff Kirsher 	},
79b955f6caSJeff Kirsher 	{	//NI6510EB
80b955f6caSJeff Kirsher 		.id_offset14 = 0x52,
81b955f6caSJeff Kirsher 		.id_offset15 = 0x44,
82b955f6caSJeff Kirsher 	},
83b955f6caSJeff Kirsher 	{	//Racal InterLan EtherBlaster
84b955f6caSJeff Kirsher 		.id_offset14 = 0x52,
85b955f6caSJeff Kirsher 		.id_offset15 = 0x49,
86b955f6caSJeff Kirsher 	},
87b955f6caSJeff Kirsher };
88b955f6caSJeff Kirsher #define NUM_CARDS 3
89b955f6caSJeff Kirsher 
90b955f6caSJeff Kirsher #ifdef LANCE_DEBUG
91b955f6caSJeff Kirsher static int lance_debug = LANCE_DEBUG;
92b955f6caSJeff Kirsher #else
93b955f6caSJeff Kirsher static int lance_debug = 1;
94b955f6caSJeff Kirsher #endif
95b955f6caSJeff Kirsher 
96b955f6caSJeff Kirsher /*
97b955f6caSJeff Kirsher 				Theory of Operation
98b955f6caSJeff Kirsher 
99b955f6caSJeff Kirsher I. Board Compatibility
100b955f6caSJeff Kirsher 
101b955f6caSJeff Kirsher This device driver is designed for the AMD 79C960, the "PCnet-ISA
102b955f6caSJeff Kirsher single-chip ethernet controller for ISA".  This chip is used in a wide
103b955f6caSJeff Kirsher variety of boards from vendors such as Allied Telesis, HP, Kingston,
104b955f6caSJeff Kirsher and Boca.  This driver is also intended to work with older AMD 7990
105b955f6caSJeff Kirsher designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
106b955f6caSJeff Kirsher I use the name LANCE to refer to all of the AMD chips, even though it properly
107b955f6caSJeff Kirsher refers only to the original 7990.
108b955f6caSJeff Kirsher 
109b955f6caSJeff Kirsher II. Board-specific settings
110b955f6caSJeff Kirsher 
111b955f6caSJeff Kirsher The driver is designed to work the boards that use the faster
112b955f6caSJeff Kirsher bus-master mode, rather than in shared memory mode.	 (Only older designs
113b955f6caSJeff Kirsher have on-board buffer memory needed to support the slower shared memory mode.)
114b955f6caSJeff Kirsher 
115b955f6caSJeff Kirsher Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
116b955f6caSJeff Kirsher channel.  This driver probes the likely base addresses:
117b955f6caSJeff Kirsher {0x300, 0x320, 0x340, 0x360}.
118b955f6caSJeff Kirsher After the board is found it generates a DMA-timeout interrupt and uses
119b955f6caSJeff Kirsher autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
120b955f6caSJeff Kirsher of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
121b955f6caSJeff Kirsher probed for by enabling each free DMA channel in turn and checking if
122b955f6caSJeff Kirsher initialization succeeds.
123b955f6caSJeff Kirsher 
124b955f6caSJeff Kirsher The HP-J2405A board is an exception: with this board it is easy to read the
125b955f6caSJeff Kirsher EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
126b955f6caSJeff Kirsher _know_ the base address -- that field is for writing the EEPROM.)
127b955f6caSJeff Kirsher 
128b955f6caSJeff Kirsher III. Driver operation
129b955f6caSJeff Kirsher 
130b955f6caSJeff Kirsher IIIa. Ring buffers
131b955f6caSJeff Kirsher The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
132b955f6caSJeff Kirsher the base and length of the data buffer, along with status bits.	 The length
133b955f6caSJeff Kirsher of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
134b955f6caSJeff Kirsher the buffer length (rather than being directly the buffer length) for
135b955f6caSJeff Kirsher implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
136b955f6caSJeff Kirsher ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
137b955f6caSJeff Kirsher needlessly uses extra space and reduces the chance that an upper layer will
138b955f6caSJeff Kirsher be able to reorder queued Tx packets based on priority.	 Decreasing the number
139b955f6caSJeff Kirsher of entries makes it more difficult to achieve back-to-back packet transmission
140b955f6caSJeff Kirsher and increases the chance that Rx ring will overflow.  (Consider the worst case
141b955f6caSJeff Kirsher of receiving back-to-back minimum-sized packets.)
142b955f6caSJeff Kirsher 
143b955f6caSJeff Kirsher The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
144b955f6caSJeff Kirsher statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
145b955f6caSJeff Kirsher avoid the administrative overhead. For the Rx side this avoids dynamically
146b955f6caSJeff Kirsher allocating full-sized buffers "just in case", at the expense of a
147b955f6caSJeff Kirsher memory-to-memory data copy for each packet received.  For most systems this
148b955f6caSJeff Kirsher is a good tradeoff: the Rx buffer will always be in low memory, the copy
149b955f6caSJeff Kirsher is inexpensive, and it primes the cache for later packet processing.  For Tx
150b955f6caSJeff Kirsher the buffers are only used when needed as low-memory bounce buffers.
151b955f6caSJeff Kirsher 
152b955f6caSJeff Kirsher IIIB. 16M memory limitations.
153b955f6caSJeff Kirsher For the ISA bus master mode all structures used directly by the LANCE,
154b955f6caSJeff Kirsher the initialization block, Rx and Tx rings, and data buffers, must be
155b955f6caSJeff Kirsher accessible from the ISA bus, i.e. in the lower 16M of real memory.
156b955f6caSJeff Kirsher This is a problem for current Linux kernels on >16M machines. The network
157b955f6caSJeff Kirsher devices are initialized after memory initialization, and the kernel doles out
158b955f6caSJeff Kirsher memory from the top of memory downward.	 The current solution is to have a
159b955f6caSJeff Kirsher special network initialization routine that's called before memory
160b955f6caSJeff Kirsher initialization; this will eventually be generalized for all network devices.
161b955f6caSJeff Kirsher As mentioned before, low-memory "bounce-buffers" are used when needed.
162b955f6caSJeff Kirsher 
163b955f6caSJeff Kirsher IIIC. Synchronization
164b955f6caSJeff Kirsher The driver runs as two independent, single-threaded flows of control.  One
165b955f6caSJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
166b955f6caSJeff Kirsher dev->tbusy flag.  The other thread is the interrupt handler, which is single
167b955f6caSJeff Kirsher threaded by the hardware and other software.
168b955f6caSJeff Kirsher 
169b955f6caSJeff Kirsher The send packet thread has partial control over the Tx ring and 'dev->tbusy'
170b955f6caSJeff Kirsher flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
171b955f6caSJeff Kirsher queue slot is empty, it clears the tbusy flag when finished otherwise it sets
172b955f6caSJeff Kirsher the 'lp->tx_full' flag.
173b955f6caSJeff Kirsher 
174b955f6caSJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
175b955f6caSJeff Kirsher from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
176b955f6caSJeff Kirsher we can't avoid the interrupt overhead by having the Tx routine reap the Tx
177b955f6caSJeff Kirsher stats.)	 After reaping the stats, it marks the queue entry as empty by setting
178b955f6caSJeff Kirsher the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
179b955f6caSJeff Kirsher tx_full and tbusy flags.
180b955f6caSJeff Kirsher 
181b955f6caSJeff Kirsher */
182b955f6caSJeff Kirsher 
183b955f6caSJeff Kirsher /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
184b955f6caSJeff Kirsher    Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
185b955f6caSJeff Kirsher    That translates to 4 and 4 (16 == 2^^4).
186b955f6caSJeff Kirsher    This is a compile-time option for efficiency.
187b955f6caSJeff Kirsher    */
188b955f6caSJeff Kirsher #ifndef LANCE_LOG_TX_BUFFERS
189b955f6caSJeff Kirsher #define LANCE_LOG_TX_BUFFERS 4
190b955f6caSJeff Kirsher #define LANCE_LOG_RX_BUFFERS 4
191b955f6caSJeff Kirsher #endif
192b955f6caSJeff Kirsher 
193b955f6caSJeff Kirsher #define TX_RING_SIZE			(1 << (LANCE_LOG_TX_BUFFERS))
194b955f6caSJeff Kirsher #define TX_RING_MOD_MASK		(TX_RING_SIZE - 1)
195b955f6caSJeff Kirsher #define TX_RING_LEN_BITS		((LANCE_LOG_TX_BUFFERS) << 29)
196b955f6caSJeff Kirsher 
197b955f6caSJeff Kirsher #define RX_RING_SIZE			(1 << (LANCE_LOG_RX_BUFFERS))
198b955f6caSJeff Kirsher #define RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
199b955f6caSJeff Kirsher #define RX_RING_LEN_BITS		((LANCE_LOG_RX_BUFFERS) << 29)
200b955f6caSJeff Kirsher 
201b955f6caSJeff Kirsher #define PKT_BUF_SZ		1544
202b955f6caSJeff Kirsher 
203b955f6caSJeff Kirsher /* Offsets from base I/O address. */
204b955f6caSJeff Kirsher #define LANCE_DATA 0x10
205b955f6caSJeff Kirsher #define LANCE_ADDR 0x12
206b955f6caSJeff Kirsher #define LANCE_RESET 0x14
207b955f6caSJeff Kirsher #define LANCE_BUS_IF 0x16
208b955f6caSJeff Kirsher #define LANCE_TOTAL_SIZE 0x18
209b955f6caSJeff Kirsher 
210b955f6caSJeff Kirsher #define TX_TIMEOUT	(HZ/5)
211b955f6caSJeff Kirsher 
212b955f6caSJeff Kirsher /* The LANCE Rx and Tx ring descriptors. */
213b955f6caSJeff Kirsher struct lance_rx_head {
214b955f6caSJeff Kirsher 	s32 base;
215b955f6caSJeff Kirsher 	s16 buf_length;			/* This length is 2s complement (negative)! */
216b955f6caSJeff Kirsher 	s16 msg_length;			/* This length is "normal". */
217b955f6caSJeff Kirsher };
218b955f6caSJeff Kirsher 
219b955f6caSJeff Kirsher struct lance_tx_head {
220b955f6caSJeff Kirsher 	s32 base;
221b955f6caSJeff Kirsher 	s16 length;				/* Length is 2s complement (negative)! */
222b955f6caSJeff Kirsher 	s16 misc;
223b955f6caSJeff Kirsher };
224b955f6caSJeff Kirsher 
225b955f6caSJeff Kirsher /* The LANCE initialization block, described in databook. */
226b955f6caSJeff Kirsher struct lance_init_block {
227b955f6caSJeff Kirsher 	u16 mode;		/* Pre-set mode (reg. 15) */
228b955f6caSJeff Kirsher 	u8  phys_addr[6]; /* Physical ethernet address */
229b955f6caSJeff Kirsher 	u32 filter[2];			/* Multicast filter (unused). */
230b955f6caSJeff Kirsher 	/* Receive and transmit ring base, along with extra bits. */
231b955f6caSJeff Kirsher 	u32  rx_ring;			/* Tx and Rx ring base pointers */
232b955f6caSJeff Kirsher 	u32  tx_ring;
233b955f6caSJeff Kirsher };
234b955f6caSJeff Kirsher 
235b955f6caSJeff Kirsher struct lance_private {
236b955f6caSJeff Kirsher 	/* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
237b955f6caSJeff Kirsher 	struct lance_rx_head rx_ring[RX_RING_SIZE];
238b955f6caSJeff Kirsher 	struct lance_tx_head tx_ring[TX_RING_SIZE];
239b955f6caSJeff Kirsher 	struct lance_init_block	init_block;
240b955f6caSJeff Kirsher 	const char *name;
241b955f6caSJeff Kirsher 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
242b955f6caSJeff Kirsher 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
243b955f6caSJeff Kirsher 	/* The addresses of receive-in-place skbuffs. */
244b955f6caSJeff Kirsher 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
245b955f6caSJeff Kirsher 	unsigned long rx_buffs;		/* Address of Rx and Tx buffers. */
246b955f6caSJeff Kirsher 	/* Tx low-memory "bounce buffer" address. */
247b955f6caSJeff Kirsher 	char (*tx_bounce_buffs)[PKT_BUF_SZ];
248b955f6caSJeff Kirsher 	int cur_rx, cur_tx;			/* The next free ring entry */
249b955f6caSJeff Kirsher 	int dirty_rx, dirty_tx;		/* The ring entries to be free()ed. */
250b955f6caSJeff Kirsher 	int dma;
251b955f6caSJeff Kirsher 	unsigned char chip_version;	/* See lance_chip_type. */
252b955f6caSJeff Kirsher 	spinlock_t devlock;
253b955f6caSJeff Kirsher };
254b955f6caSJeff Kirsher 
255b955f6caSJeff Kirsher #define LANCE_MUST_PAD          0x00000001
256b955f6caSJeff Kirsher #define LANCE_ENABLE_AUTOSELECT 0x00000002
257b955f6caSJeff Kirsher #define LANCE_MUST_REINIT_RING  0x00000004
258b955f6caSJeff Kirsher #define LANCE_MUST_UNRESET      0x00000008
259b955f6caSJeff Kirsher #define LANCE_HAS_MISSED_FRAME  0x00000010
260b955f6caSJeff Kirsher 
261b955f6caSJeff Kirsher /* A mapping from the chip ID number to the part number and features.
262b955f6caSJeff Kirsher    These are from the datasheets -- in real life the '970 version
263b955f6caSJeff Kirsher    reportedly has the same ID as the '965. */
264b955f6caSJeff Kirsher static struct lance_chip_type {
265b955f6caSJeff Kirsher 	int id_number;
266b955f6caSJeff Kirsher 	const char *name;
267b955f6caSJeff Kirsher 	int flags;
268b955f6caSJeff Kirsher } chip_table[] = {
269b955f6caSJeff Kirsher 	{0x0000, "LANCE 7990",				/* Ancient lance chip.  */
270b955f6caSJeff Kirsher 		LANCE_MUST_PAD + LANCE_MUST_UNRESET},
271b955f6caSJeff Kirsher 	{0x0003, "PCnet/ISA 79C960",		/* 79C960 PCnet/ISA.  */
272b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
273b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
274b955f6caSJeff Kirsher 	{0x2260, "PCnet/ISA+ 79C961",		/* 79C961 PCnet/ISA+, Plug-n-Play.  */
275b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
276b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
277b955f6caSJeff Kirsher 	{0x2420, "PCnet/PCI 79C970",		/* 79C970 or 79C974 PCnet-SCSI, PCI. */
278b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
279b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
280b955f6caSJeff Kirsher 	/* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
281b955f6caSJeff Kirsher 		it the PCnet32. */
282b955f6caSJeff Kirsher 	{0x2430, "PCnet32",					/* 79C965 PCnet for VL bus. */
283b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
284b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
285b955f6caSJeff Kirsher         {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
286b955f6caSJeff Kirsher                 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
287b955f6caSJeff Kirsher                         LANCE_HAS_MISSED_FRAME},
288b955f6caSJeff Kirsher 	{0x0, 	 "PCnet (unknown)",
289b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
290b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
291b955f6caSJeff Kirsher };
292b955f6caSJeff Kirsher 
293b955f6caSJeff Kirsher enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
294b955f6caSJeff Kirsher 
295b955f6caSJeff Kirsher 
296b955f6caSJeff Kirsher /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
297b955f6caSJeff Kirsher    Assume yes until we know the memory size. */
298b955f6caSJeff Kirsher static unsigned char lance_need_isa_bounce_buffers = 1;
299b955f6caSJeff Kirsher 
300b955f6caSJeff Kirsher static int lance_open(struct net_device *dev);
301b955f6caSJeff Kirsher static void lance_init_ring(struct net_device *dev, gfp_t mode);
302b955f6caSJeff Kirsher static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
303b955f6caSJeff Kirsher 				    struct net_device *dev);
304b955f6caSJeff Kirsher static int lance_rx(struct net_device *dev);
305b955f6caSJeff Kirsher static irqreturn_t lance_interrupt(int irq, void *dev_id);
306b955f6caSJeff Kirsher static int lance_close(struct net_device *dev);
307b955f6caSJeff Kirsher static struct net_device_stats *lance_get_stats(struct net_device *dev);
308b955f6caSJeff Kirsher static void set_multicast_list(struct net_device *dev);
309b955f6caSJeff Kirsher static void lance_tx_timeout (struct net_device *dev);
310b955f6caSJeff Kirsher 
311b955f6caSJeff Kirsher 
312b955f6caSJeff Kirsher 
313b955f6caSJeff Kirsher #ifdef MODULE
314b955f6caSJeff Kirsher #define MAX_CARDS		8	/* Max number of interfaces (cards) per module */
315b955f6caSJeff Kirsher 
316b955f6caSJeff Kirsher static struct net_device *dev_lance[MAX_CARDS];
317b955f6caSJeff Kirsher static int io[MAX_CARDS];
318b955f6caSJeff Kirsher static int dma[MAX_CARDS];
319b955f6caSJeff Kirsher static int irq[MAX_CARDS];
320b955f6caSJeff Kirsher 
321df298408SDavid Howells module_param_hw_array(io, int, ioport, NULL, 0);
322df298408SDavid Howells module_param_hw_array(dma, int, dma, NULL, 0);
323df298408SDavid Howells module_param_hw_array(irq, int, irq, NULL, 0);
324b955f6caSJeff Kirsher module_param(lance_debug, int, 0);
325b955f6caSJeff Kirsher MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
326b955f6caSJeff Kirsher MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
327b955f6caSJeff Kirsher MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
328b955f6caSJeff Kirsher MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
329b955f6caSJeff Kirsher 
330b955f6caSJeff Kirsher int __init init_module(void)
331b955f6caSJeff Kirsher {
332b955f6caSJeff Kirsher 	struct net_device *dev;
333b955f6caSJeff Kirsher 	int this_dev, found = 0;
334b955f6caSJeff Kirsher 
335b955f6caSJeff Kirsher 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
336b955f6caSJeff Kirsher 		if (io[this_dev] == 0)  {
337b955f6caSJeff Kirsher 			if (this_dev != 0) /* only complain once */
338b955f6caSJeff Kirsher 				break;
339b955f6caSJeff Kirsher 			printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
340b955f6caSJeff Kirsher 			return -EPERM;
341b955f6caSJeff Kirsher 		}
342b955f6caSJeff Kirsher 		dev = alloc_etherdev(0);
343b955f6caSJeff Kirsher 		if (!dev)
344b955f6caSJeff Kirsher 			break;
345b955f6caSJeff Kirsher 		dev->irq = irq[this_dev];
346b955f6caSJeff Kirsher 		dev->base_addr = io[this_dev];
347b955f6caSJeff Kirsher 		dev->dma = dma[this_dev];
348b955f6caSJeff Kirsher 		if (do_lance_probe(dev) == 0) {
349b955f6caSJeff Kirsher 			dev_lance[found++] = dev;
350b955f6caSJeff Kirsher 			continue;
351b955f6caSJeff Kirsher 		}
352b955f6caSJeff Kirsher 		free_netdev(dev);
353b955f6caSJeff Kirsher 		break;
354b955f6caSJeff Kirsher 	}
355b955f6caSJeff Kirsher 	if (found != 0)
356b955f6caSJeff Kirsher 		return 0;
357b955f6caSJeff Kirsher 	return -ENXIO;
358b955f6caSJeff Kirsher }
359b955f6caSJeff Kirsher 
360b955f6caSJeff Kirsher static void cleanup_card(struct net_device *dev)
361b955f6caSJeff Kirsher {
362b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
363b955f6caSJeff Kirsher 	if (dev->dma != 4)
364b955f6caSJeff Kirsher 		free_dma(dev->dma);
365b955f6caSJeff Kirsher 	release_region(dev->base_addr, LANCE_TOTAL_SIZE);
366b955f6caSJeff Kirsher 	kfree(lp->tx_bounce_buffs);
367b955f6caSJeff Kirsher 	kfree((void*)lp->rx_buffs);
368b955f6caSJeff Kirsher 	kfree(lp);
369b955f6caSJeff Kirsher }
370b955f6caSJeff Kirsher 
371b955f6caSJeff Kirsher void __exit cleanup_module(void)
372b955f6caSJeff Kirsher {
373b955f6caSJeff Kirsher 	int this_dev;
374b955f6caSJeff Kirsher 
375b955f6caSJeff Kirsher 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
376b955f6caSJeff Kirsher 		struct net_device *dev = dev_lance[this_dev];
377b955f6caSJeff Kirsher 		if (dev) {
378b955f6caSJeff Kirsher 			unregister_netdev(dev);
379b955f6caSJeff Kirsher 			cleanup_card(dev);
380b955f6caSJeff Kirsher 			free_netdev(dev);
381b955f6caSJeff Kirsher 		}
382b955f6caSJeff Kirsher 	}
383b955f6caSJeff Kirsher }
384b955f6caSJeff Kirsher #endif /* MODULE */
385b955f6caSJeff Kirsher MODULE_LICENSE("GPL");
386b955f6caSJeff Kirsher 
387b955f6caSJeff Kirsher 
388b955f6caSJeff Kirsher /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
389b955f6caSJeff Kirsher    board probes now that kmalloc() can allocate ISA DMA-able regions.
390b955f6caSJeff Kirsher    This also allows the LANCE driver to be used as a module.
391b955f6caSJeff Kirsher    */
392b955f6caSJeff Kirsher static int __init do_lance_probe(struct net_device *dev)
393b955f6caSJeff Kirsher {
394b955f6caSJeff Kirsher 	unsigned int *port;
395b955f6caSJeff Kirsher 	int result;
396b955f6caSJeff Kirsher 
397b955f6caSJeff Kirsher 	if (high_memory <= phys_to_virt(16*1024*1024))
398b955f6caSJeff Kirsher 		lance_need_isa_bounce_buffers = 0;
399b955f6caSJeff Kirsher 
400b955f6caSJeff Kirsher 	for (port = lance_portlist; *port; port++) {
401b955f6caSJeff Kirsher 		int ioaddr = *port;
402b955f6caSJeff Kirsher 		struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
403b955f6caSJeff Kirsher 							"lance-probe");
404b955f6caSJeff Kirsher 
405b955f6caSJeff Kirsher 		if (r) {
406b955f6caSJeff Kirsher 			/* Detect the card with minimal I/O reads */
407b955f6caSJeff Kirsher 			char offset14 = inb(ioaddr + 14);
408b955f6caSJeff Kirsher 			int card;
409b955f6caSJeff Kirsher 			for (card = 0; card < NUM_CARDS; ++card)
410b955f6caSJeff Kirsher 				if (cards[card].id_offset14 == offset14)
411b955f6caSJeff Kirsher 					break;
412b955f6caSJeff Kirsher 			if (card < NUM_CARDS) {/*yes, the first byte matches*/
413b955f6caSJeff Kirsher 				char offset15 = inb(ioaddr + 15);
414b955f6caSJeff Kirsher 				for (card = 0; card < NUM_CARDS; ++card)
415b955f6caSJeff Kirsher 					if ((cards[card].id_offset14 == offset14) &&
416b955f6caSJeff Kirsher 						(cards[card].id_offset15 == offset15))
417b955f6caSJeff Kirsher 						break;
418b955f6caSJeff Kirsher 			}
419b955f6caSJeff Kirsher 			if (card < NUM_CARDS) { /*Signature OK*/
420b955f6caSJeff Kirsher 				result = lance_probe1(dev, ioaddr, 0, 0);
421b955f6caSJeff Kirsher 				if (!result) {
422b955f6caSJeff Kirsher 					struct lance_private *lp = dev->ml_priv;
423b955f6caSJeff Kirsher 					int ver = lp->chip_version;
424b955f6caSJeff Kirsher 
425b955f6caSJeff Kirsher 					r->name = chip_table[ver].name;
426b955f6caSJeff Kirsher 					return 0;
427b955f6caSJeff Kirsher 				}
428b955f6caSJeff Kirsher 			}
429b955f6caSJeff Kirsher 			release_region(ioaddr, LANCE_TOTAL_SIZE);
430b955f6caSJeff Kirsher 		}
431b955f6caSJeff Kirsher 	}
432b955f6caSJeff Kirsher 	return -ENODEV;
433b955f6caSJeff Kirsher }
434b955f6caSJeff Kirsher 
435b955f6caSJeff Kirsher #ifndef MODULE
436b955f6caSJeff Kirsher struct net_device * __init lance_probe(int unit)
437b955f6caSJeff Kirsher {
438b955f6caSJeff Kirsher 	struct net_device *dev = alloc_etherdev(0);
439b955f6caSJeff Kirsher 	int err;
440b955f6caSJeff Kirsher 
441b955f6caSJeff Kirsher 	if (!dev)
442b955f6caSJeff Kirsher 		return ERR_PTR(-ENODEV);
443b955f6caSJeff Kirsher 
444b955f6caSJeff Kirsher 	sprintf(dev->name, "eth%d", unit);
445b955f6caSJeff Kirsher 	netdev_boot_setup_check(dev);
446b955f6caSJeff Kirsher 
447b955f6caSJeff Kirsher 	err = do_lance_probe(dev);
448b955f6caSJeff Kirsher 	if (err)
449b955f6caSJeff Kirsher 		goto out;
450b955f6caSJeff Kirsher 	return dev;
451b955f6caSJeff Kirsher out:
452b955f6caSJeff Kirsher 	free_netdev(dev);
453b955f6caSJeff Kirsher 	return ERR_PTR(err);
454b955f6caSJeff Kirsher }
455b955f6caSJeff Kirsher #endif
456b955f6caSJeff Kirsher 
457b955f6caSJeff Kirsher static const struct net_device_ops lance_netdev_ops = {
458b955f6caSJeff Kirsher 	.ndo_open 		= lance_open,
459b955f6caSJeff Kirsher 	.ndo_start_xmit		= lance_start_xmit,
460b955f6caSJeff Kirsher 	.ndo_stop		= lance_close,
461b955f6caSJeff Kirsher 	.ndo_get_stats		= lance_get_stats,
462afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= set_multicast_list,
463b955f6caSJeff Kirsher 	.ndo_tx_timeout		= lance_tx_timeout,
464b955f6caSJeff Kirsher 	.ndo_set_mac_address 	= eth_mac_addr,
465b955f6caSJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
466b955f6caSJeff Kirsher };
467b955f6caSJeff Kirsher 
468b955f6caSJeff Kirsher static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
469b955f6caSJeff Kirsher {
470b955f6caSJeff Kirsher 	struct lance_private *lp;
471b955f6caSJeff Kirsher 	unsigned long dma_channels;	/* Mark spuriously-busy DMA channels */
472b955f6caSJeff Kirsher 	int i, reset_val, lance_version;
473b955f6caSJeff Kirsher 	const char *chipname;
474b955f6caSJeff Kirsher 	/* Flags for specific chips or boards. */
475b955f6caSJeff Kirsher 	unsigned char hpJ2405A = 0;	/* HP ISA adaptor */
476b955f6caSJeff Kirsher 	int hp_builtin = 0;		/* HP on-board ethernet. */
477b955f6caSJeff Kirsher 	static int did_version;		/* Already printed version info. */
478b955f6caSJeff Kirsher 	unsigned long flags;
479b955f6caSJeff Kirsher 	int err = -ENOMEM;
480b955f6caSJeff Kirsher 	void __iomem *bios;
481b955f6caSJeff Kirsher 
482b955f6caSJeff Kirsher 	/* First we look for special cases.
483b955f6caSJeff Kirsher 	   Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
484b955f6caSJeff Kirsher 	   There are two HP versions, check the BIOS for the configuration port.
485b955f6caSJeff Kirsher 	   This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
486b955f6caSJeff Kirsher 	   */
487b955f6caSJeff Kirsher 	bios = ioremap(0xf00f0, 0x14);
488b955f6caSJeff Kirsher 	if (!bios)
489b955f6caSJeff Kirsher 		return -ENOMEM;
490b955f6caSJeff Kirsher 	if (readw(bios + 0x12) == 0x5048)  {
491b955f6caSJeff Kirsher 		static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
492b955f6caSJeff Kirsher 		int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
493b955f6caSJeff Kirsher 		/* We can have boards other than the built-in!  Verify this is on-board. */
494b955f6caSJeff Kirsher 		if ((inb(hp_port) & 0xc0) == 0x80 &&
495b955f6caSJeff Kirsher 		    ioaddr_table[inb(hp_port) & 3] == ioaddr)
496b955f6caSJeff Kirsher 			hp_builtin = hp_port;
497b955f6caSJeff Kirsher 	}
498b955f6caSJeff Kirsher 	iounmap(bios);
499b955f6caSJeff Kirsher 	/* We also recognize the HP Vectra on-board here, but check below. */
500b955f6caSJeff Kirsher 	hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
501b955f6caSJeff Kirsher 		    inb(ioaddr+2) == 0x09);
502b955f6caSJeff Kirsher 
503b955f6caSJeff Kirsher 	/* Reset the LANCE.	 */
504b955f6caSJeff Kirsher 	reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
505b955f6caSJeff Kirsher 
506b955f6caSJeff Kirsher 	/* The Un-Reset needed is only needed for the real NE2100, and will
507b955f6caSJeff Kirsher 	   confuse the HP board. */
508b955f6caSJeff Kirsher 	if (!hpJ2405A)
509b955f6caSJeff Kirsher 		outw(reset_val, ioaddr+LANCE_RESET);
510b955f6caSJeff Kirsher 
511b955f6caSJeff Kirsher 	outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
512b955f6caSJeff Kirsher 	if (inw(ioaddr+LANCE_DATA) != 0x0004)
513b955f6caSJeff Kirsher 		return -ENODEV;
514b955f6caSJeff Kirsher 
515b955f6caSJeff Kirsher 	/* Get the version of the chip. */
516b955f6caSJeff Kirsher 	outw(88, ioaddr+LANCE_ADDR);
517b955f6caSJeff Kirsher 	if (inw(ioaddr+LANCE_ADDR) != 88) {
518b955f6caSJeff Kirsher 		lance_version = 0;
519b955f6caSJeff Kirsher 	} else {			/* Good, it's a newer chip. */
520b955f6caSJeff Kirsher 		int chip_version = inw(ioaddr+LANCE_DATA);
521b955f6caSJeff Kirsher 		outw(89, ioaddr+LANCE_ADDR);
522b955f6caSJeff Kirsher 		chip_version |= inw(ioaddr+LANCE_DATA) << 16;
523b955f6caSJeff Kirsher 		if (lance_debug > 2)
524b955f6caSJeff Kirsher 			printk("  LANCE chip version is %#x.\n", chip_version);
525b955f6caSJeff Kirsher 		if ((chip_version & 0xfff) != 0x003)
526b955f6caSJeff Kirsher 			return -ENODEV;
527b955f6caSJeff Kirsher 		chip_version = (chip_version >> 12) & 0xffff;
528b955f6caSJeff Kirsher 		for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
529b955f6caSJeff Kirsher 			if (chip_table[lance_version].id_number == chip_version)
530b955f6caSJeff Kirsher 				break;
531b955f6caSJeff Kirsher 		}
532b955f6caSJeff Kirsher 	}
533b955f6caSJeff Kirsher 
534b955f6caSJeff Kirsher 	/* We can't allocate private data from alloc_etherdev() because it must
535b955f6caSJeff Kirsher 	   a ISA DMA-able region. */
536b955f6caSJeff Kirsher 	chipname = chip_table[lance_version].name;
537b955f6caSJeff Kirsher 	printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
538b955f6caSJeff Kirsher 
539b955f6caSJeff Kirsher 	/* There is a 16 byte station address PROM at the base address.
540b955f6caSJeff Kirsher 	   The first six bytes are the station address. */
541b955f6caSJeff Kirsher 	for (i = 0; i < 6; i++)
542b955f6caSJeff Kirsher 		dev->dev_addr[i] = inb(ioaddr + i);
543b955f6caSJeff Kirsher 	printk("%pM", dev->dev_addr);
544b955f6caSJeff Kirsher 
545b955f6caSJeff Kirsher 	dev->base_addr = ioaddr;
546b955f6caSJeff Kirsher 	/* Make certain the data structures used by the LANCE are aligned and DMAble. */
547b955f6caSJeff Kirsher 
548b955f6caSJeff Kirsher 	lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
549a09f4af1SAmitoj Kaur Chawla 	if (!lp)
550a09f4af1SAmitoj Kaur Chawla 		return -ENOMEM;
551b955f6caSJeff Kirsher 	if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
552b955f6caSJeff Kirsher 	dev->ml_priv = lp;
553b955f6caSJeff Kirsher 	lp->name = chipname;
5546da2ec56SKees Cook 	lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
555b955f6caSJeff Kirsher 						    GFP_DMA | GFP_KERNEL);
556b955f6caSJeff Kirsher 	if (!lp->rx_buffs)
557b955f6caSJeff Kirsher 		goto out_lp;
558b955f6caSJeff Kirsher 	if (lance_need_isa_bounce_buffers) {
5596da2ec56SKees Cook 		lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
560b955f6caSJeff Kirsher 						    GFP_DMA | GFP_KERNEL);
561b955f6caSJeff Kirsher 		if (!lp->tx_bounce_buffs)
562b955f6caSJeff Kirsher 			goto out_rx;
563b955f6caSJeff Kirsher 	} else
564b955f6caSJeff Kirsher 		lp->tx_bounce_buffs = NULL;
565b955f6caSJeff Kirsher 
566b955f6caSJeff Kirsher 	lp->chip_version = lance_version;
567b955f6caSJeff Kirsher 	spin_lock_init(&lp->devlock);
568b955f6caSJeff Kirsher 
569b955f6caSJeff Kirsher 	lp->init_block.mode = 0x0003;		/* Disable Rx and Tx. */
570b955f6caSJeff Kirsher 	for (i = 0; i < 6; i++)
571b955f6caSJeff Kirsher 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
572b955f6caSJeff Kirsher 	lp->init_block.filter[0] = 0x00000000;
573b955f6caSJeff Kirsher 	lp->init_block.filter[1] = 0x00000000;
574b955f6caSJeff Kirsher 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
575b955f6caSJeff Kirsher 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
576b955f6caSJeff Kirsher 
577b955f6caSJeff Kirsher 	outw(0x0001, ioaddr+LANCE_ADDR);
578b955f6caSJeff Kirsher 	inw(ioaddr+LANCE_ADDR);
579b955f6caSJeff Kirsher 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
580b955f6caSJeff Kirsher 	outw(0x0002, ioaddr+LANCE_ADDR);
581b955f6caSJeff Kirsher 	inw(ioaddr+LANCE_ADDR);
582b955f6caSJeff Kirsher 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
583b955f6caSJeff Kirsher 	outw(0x0000, ioaddr+LANCE_ADDR);
584b955f6caSJeff Kirsher 	inw(ioaddr+LANCE_ADDR);
585b955f6caSJeff Kirsher 
586b955f6caSJeff Kirsher 	if (irq) {					/* Set iff PCI card. */
587b955f6caSJeff Kirsher 		dev->dma = 4;			/* Native bus-master, no DMA channel needed. */
588b955f6caSJeff Kirsher 		dev->irq = irq;
589b955f6caSJeff Kirsher 	} else if (hp_builtin) {
590b955f6caSJeff Kirsher 		static const char dma_tbl[4] = {3, 5, 6, 0};
591b955f6caSJeff Kirsher 		static const char irq_tbl[4] = {3, 4, 5, 9};
592b955f6caSJeff Kirsher 		unsigned char port_val = inb(hp_builtin);
593b955f6caSJeff Kirsher 		dev->dma = dma_tbl[(port_val >> 4) & 3];
594b955f6caSJeff Kirsher 		dev->irq = irq_tbl[(port_val >> 2) & 3];
595b955f6caSJeff Kirsher 		printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
596b955f6caSJeff Kirsher 	} else if (hpJ2405A) {
597b955f6caSJeff Kirsher 		static const char dma_tbl[4] = {3, 5, 6, 7};
598b955f6caSJeff Kirsher 		static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
599b955f6caSJeff Kirsher 		short reset_val = inw(ioaddr+LANCE_RESET);
600b955f6caSJeff Kirsher 		dev->dma = dma_tbl[(reset_val >> 2) & 3];
601b955f6caSJeff Kirsher 		dev->irq = irq_tbl[(reset_val >> 4) & 7];
602b955f6caSJeff Kirsher 		printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
603b955f6caSJeff Kirsher 	} else if (lance_version == PCNET_ISAP) {		/* The plug-n-play version. */
604b955f6caSJeff Kirsher 		short bus_info;
605b955f6caSJeff Kirsher 		outw(8, ioaddr+LANCE_ADDR);
606b955f6caSJeff Kirsher 		bus_info = inw(ioaddr+LANCE_BUS_IF);
607b955f6caSJeff Kirsher 		dev->dma = bus_info & 0x07;
608b955f6caSJeff Kirsher 		dev->irq = (bus_info >> 4) & 0x0F;
609b955f6caSJeff Kirsher 	} else {
610b955f6caSJeff Kirsher 		/* The DMA channel may be passed in PARAM1. */
611b955f6caSJeff Kirsher 		if (dev->mem_start & 0x07)
612b955f6caSJeff Kirsher 			dev->dma = dev->mem_start & 0x07;
613b955f6caSJeff Kirsher 	}
614b955f6caSJeff Kirsher 
615b955f6caSJeff Kirsher 	if (dev->dma == 0) {
616b955f6caSJeff Kirsher 		/* Read the DMA channel status register, so that we can avoid
617b955f6caSJeff Kirsher 		   stuck DMA channels in the DMA detection below. */
618b955f6caSJeff Kirsher 		dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
619b955f6caSJeff Kirsher 			(inb(DMA2_STAT_REG) & 0xf0);
620b955f6caSJeff Kirsher 	}
621b955f6caSJeff Kirsher 	err = -ENODEV;
622b955f6caSJeff Kirsher 	if (dev->irq >= 2)
623b955f6caSJeff Kirsher 		printk(" assigned IRQ %d", dev->irq);
624b955f6caSJeff Kirsher 	else if (lance_version != 0)  {	/* 7990 boards need DMA detection first. */
625b955f6caSJeff Kirsher 		unsigned long irq_mask;
626b955f6caSJeff Kirsher 
627b955f6caSJeff Kirsher 		/* To auto-IRQ we enable the initialization-done and DMA error
628b955f6caSJeff Kirsher 		   interrupts. For ISA boards we get a DMA error, but VLB and PCI
629b955f6caSJeff Kirsher 		   boards will work. */
630b955f6caSJeff Kirsher 		irq_mask = probe_irq_on();
631b955f6caSJeff Kirsher 
632b955f6caSJeff Kirsher 		/* Trigger an initialization just for the interrupt. */
633b955f6caSJeff Kirsher 		outw(0x0041, ioaddr+LANCE_DATA);
634b955f6caSJeff Kirsher 
635b955f6caSJeff Kirsher 		mdelay(20);
636b955f6caSJeff Kirsher 		dev->irq = probe_irq_off(irq_mask);
637b955f6caSJeff Kirsher 		if (dev->irq)
638b955f6caSJeff Kirsher 			printk(", probed IRQ %d", dev->irq);
639b955f6caSJeff Kirsher 		else {
640b955f6caSJeff Kirsher 			printk(", failed to detect IRQ line.\n");
641b955f6caSJeff Kirsher 			goto out_tx;
642b955f6caSJeff Kirsher 		}
643b955f6caSJeff Kirsher 
644b955f6caSJeff Kirsher 		/* Check for the initialization done bit, 0x0100, which means
645b955f6caSJeff Kirsher 		   that we don't need a DMA channel. */
646b955f6caSJeff Kirsher 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
647b955f6caSJeff Kirsher 			dev->dma = 4;
648b955f6caSJeff Kirsher 	}
649b955f6caSJeff Kirsher 
650b955f6caSJeff Kirsher 	if (dev->dma == 4) {
651b955f6caSJeff Kirsher 		printk(", no DMA needed.\n");
652b955f6caSJeff Kirsher 	} else if (dev->dma) {
653b955f6caSJeff Kirsher 		if (request_dma(dev->dma, chipname)) {
654b955f6caSJeff Kirsher 			printk("DMA %d allocation failed.\n", dev->dma);
655b955f6caSJeff Kirsher 			goto out_tx;
656b955f6caSJeff Kirsher 		} else
657b955f6caSJeff Kirsher 			printk(", assigned DMA %d.\n", dev->dma);
658b955f6caSJeff Kirsher 	} else {			/* OK, we have to auto-DMA. */
659b955f6caSJeff Kirsher 		for (i = 0; i < 4; i++) {
660b955f6caSJeff Kirsher 			static const char dmas[] = { 5, 6, 7, 3 };
661b955f6caSJeff Kirsher 			int dma = dmas[i];
662b955f6caSJeff Kirsher 			int boguscnt;
663b955f6caSJeff Kirsher 
664b955f6caSJeff Kirsher 			/* Don't enable a permanently busy DMA channel, or the machine
665b955f6caSJeff Kirsher 			   will hang. */
666b955f6caSJeff Kirsher 			if (test_bit(dma, &dma_channels))
667b955f6caSJeff Kirsher 				continue;
668b955f6caSJeff Kirsher 			outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
669b955f6caSJeff Kirsher 			if (request_dma(dma, chipname))
670b955f6caSJeff Kirsher 				continue;
671b955f6caSJeff Kirsher 
672b955f6caSJeff Kirsher 			flags=claim_dma_lock();
673b955f6caSJeff Kirsher 			set_dma_mode(dma, DMA_MODE_CASCADE);
674b955f6caSJeff Kirsher 			enable_dma(dma);
675b955f6caSJeff Kirsher 			release_dma_lock(flags);
676b955f6caSJeff Kirsher 
677b955f6caSJeff Kirsher 			/* Trigger an initialization. */
678b955f6caSJeff Kirsher 			outw(0x0001, ioaddr+LANCE_DATA);
679b955f6caSJeff Kirsher 			for (boguscnt = 100; boguscnt > 0; --boguscnt)
680b955f6caSJeff Kirsher 				if (inw(ioaddr+LANCE_DATA) & 0x0900)
681b955f6caSJeff Kirsher 					break;
682b955f6caSJeff Kirsher 			if (inw(ioaddr+LANCE_DATA) & 0x0100) {
683b955f6caSJeff Kirsher 				dev->dma = dma;
684b955f6caSJeff Kirsher 				printk(", DMA %d.\n", dev->dma);
685b955f6caSJeff Kirsher 				break;
686b955f6caSJeff Kirsher 			} else {
687b955f6caSJeff Kirsher 				flags=claim_dma_lock();
688b955f6caSJeff Kirsher 				disable_dma(dma);
689b955f6caSJeff Kirsher 				release_dma_lock(flags);
690b955f6caSJeff Kirsher 				free_dma(dma);
691b955f6caSJeff Kirsher 			}
692b955f6caSJeff Kirsher 		}
693b955f6caSJeff Kirsher 		if (i == 4) {			/* Failure: bail. */
694b955f6caSJeff Kirsher 			printk("DMA detection failed.\n");
695b955f6caSJeff Kirsher 			goto out_tx;
696b955f6caSJeff Kirsher 		}
697b955f6caSJeff Kirsher 	}
698b955f6caSJeff Kirsher 
699b955f6caSJeff Kirsher 	if (lance_version == 0 && dev->irq == 0) {
700b955f6caSJeff Kirsher 		/* We may auto-IRQ now that we have a DMA channel. */
701b955f6caSJeff Kirsher 		/* Trigger an initialization just for the interrupt. */
702b955f6caSJeff Kirsher 		unsigned long irq_mask;
703b955f6caSJeff Kirsher 
704b955f6caSJeff Kirsher 		irq_mask = probe_irq_on();
705b955f6caSJeff Kirsher 		outw(0x0041, ioaddr+LANCE_DATA);
706b955f6caSJeff Kirsher 
707b955f6caSJeff Kirsher 		mdelay(40);
708b955f6caSJeff Kirsher 		dev->irq = probe_irq_off(irq_mask);
709b955f6caSJeff Kirsher 		if (dev->irq == 0) {
710b955f6caSJeff Kirsher 			printk("  Failed to detect the 7990 IRQ line.\n");
711b955f6caSJeff Kirsher 			goto out_dma;
712b955f6caSJeff Kirsher 		}
713b955f6caSJeff Kirsher 		printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
714b955f6caSJeff Kirsher 	}
715b955f6caSJeff Kirsher 
716b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
717b955f6caSJeff Kirsher 		/* Turn on auto-select of media (10baseT or BNC) so that the user
718b955f6caSJeff Kirsher 		   can watch the LEDs even if the board isn't opened. */
719b955f6caSJeff Kirsher 		outw(0x0002, ioaddr+LANCE_ADDR);
720b955f6caSJeff Kirsher 		/* Don't touch 10base2 power bit. */
721b955f6caSJeff Kirsher 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
722b955f6caSJeff Kirsher 	}
723b955f6caSJeff Kirsher 
724b955f6caSJeff Kirsher 	if (lance_debug > 0  &&  did_version++ == 0)
725b955f6caSJeff Kirsher 		printk(version);
726b955f6caSJeff Kirsher 
727b955f6caSJeff Kirsher 	/* The LANCE-specific entries in the device structure. */
728b955f6caSJeff Kirsher 	dev->netdev_ops = &lance_netdev_ops;
729b955f6caSJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
730b955f6caSJeff Kirsher 
731b955f6caSJeff Kirsher 	err = register_netdev(dev);
732b955f6caSJeff Kirsher 	if (err)
733b955f6caSJeff Kirsher 		goto out_dma;
734b955f6caSJeff Kirsher 	return 0;
735b955f6caSJeff Kirsher out_dma:
736b955f6caSJeff Kirsher 	if (dev->dma != 4)
737b955f6caSJeff Kirsher 		free_dma(dev->dma);
738b955f6caSJeff Kirsher out_tx:
739b955f6caSJeff Kirsher 	kfree(lp->tx_bounce_buffs);
740b955f6caSJeff Kirsher out_rx:
741b955f6caSJeff Kirsher 	kfree((void*)lp->rx_buffs);
742b955f6caSJeff Kirsher out_lp:
743b955f6caSJeff Kirsher 	kfree(lp);
744b955f6caSJeff Kirsher 	return err;
745b955f6caSJeff Kirsher }
746b955f6caSJeff Kirsher 
747b955f6caSJeff Kirsher 
748b955f6caSJeff Kirsher static int
749b955f6caSJeff Kirsher lance_open(struct net_device *dev)
750b955f6caSJeff Kirsher {
751b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
752b955f6caSJeff Kirsher 	int ioaddr = dev->base_addr;
753b955f6caSJeff Kirsher 	int i;
754b955f6caSJeff Kirsher 
755b955f6caSJeff Kirsher 	if (dev->irq == 0 ||
756f0e28d48SNate Levesque 		request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
757b955f6caSJeff Kirsher 		return -EAGAIN;
758b955f6caSJeff Kirsher 	}
759b955f6caSJeff Kirsher 
760b955f6caSJeff Kirsher 	/* We used to allocate DMA here, but that was silly.
761b955f6caSJeff Kirsher 	   DMA lines can't be shared!  We now permanently allocate them. */
762b955f6caSJeff Kirsher 
763b955f6caSJeff Kirsher 	/* Reset the LANCE */
764b955f6caSJeff Kirsher 	inw(ioaddr+LANCE_RESET);
765b955f6caSJeff Kirsher 
766b955f6caSJeff Kirsher 	/* The DMA controller is used as a no-operation slave, "cascade mode". */
767b955f6caSJeff Kirsher 	if (dev->dma != 4) {
768b955f6caSJeff Kirsher 		unsigned long flags=claim_dma_lock();
769b955f6caSJeff Kirsher 		enable_dma(dev->dma);
770b955f6caSJeff Kirsher 		set_dma_mode(dev->dma, DMA_MODE_CASCADE);
771b955f6caSJeff Kirsher 		release_dma_lock(flags);
772b955f6caSJeff Kirsher 	}
773b955f6caSJeff Kirsher 
774b955f6caSJeff Kirsher 	/* Un-Reset the LANCE, needed only for the NE2100. */
775b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
776b955f6caSJeff Kirsher 		outw(0, ioaddr+LANCE_RESET);
777b955f6caSJeff Kirsher 
778b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
779b955f6caSJeff Kirsher 		/* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
780b955f6caSJeff Kirsher 		outw(0x0002, ioaddr+LANCE_ADDR);
781b955f6caSJeff Kirsher 		/* Only touch autoselect bit. */
782b955f6caSJeff Kirsher 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
783b955f6caSJeff Kirsher  	}
784b955f6caSJeff Kirsher 
785b955f6caSJeff Kirsher 	if (lance_debug > 1)
786b955f6caSJeff Kirsher 		printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
787b955f6caSJeff Kirsher 			   dev->name, dev->irq, dev->dma,
788b955f6caSJeff Kirsher 		           (u32) isa_virt_to_bus(lp->tx_ring),
789b955f6caSJeff Kirsher 		           (u32) isa_virt_to_bus(lp->rx_ring),
790b955f6caSJeff Kirsher 			   (u32) isa_virt_to_bus(&lp->init_block));
791b955f6caSJeff Kirsher 
792b955f6caSJeff Kirsher 	lance_init_ring(dev, GFP_KERNEL);
793b955f6caSJeff Kirsher 	/* Re-initialize the LANCE, and start it when done. */
794b955f6caSJeff Kirsher 	outw(0x0001, ioaddr+LANCE_ADDR);
795b955f6caSJeff Kirsher 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
796b955f6caSJeff Kirsher 	outw(0x0002, ioaddr+LANCE_ADDR);
797b955f6caSJeff Kirsher 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
798b955f6caSJeff Kirsher 
799b955f6caSJeff Kirsher 	outw(0x0004, ioaddr+LANCE_ADDR);
800b955f6caSJeff Kirsher 	outw(0x0915, ioaddr+LANCE_DATA);
801b955f6caSJeff Kirsher 
802b955f6caSJeff Kirsher 	outw(0x0000, ioaddr+LANCE_ADDR);
803b955f6caSJeff Kirsher 	outw(0x0001, ioaddr+LANCE_DATA);
804b955f6caSJeff Kirsher 
805b955f6caSJeff Kirsher 	netif_start_queue (dev);
806b955f6caSJeff Kirsher 
807b955f6caSJeff Kirsher 	i = 0;
808b955f6caSJeff Kirsher 	while (i++ < 100)
809b955f6caSJeff Kirsher 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
810b955f6caSJeff Kirsher 			break;
811b955f6caSJeff Kirsher 	/*
812b955f6caSJeff Kirsher 	 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
813b955f6caSJeff Kirsher 	 * reports that doing so triggers a bug in the '974.
814b955f6caSJeff Kirsher 	 */
815b955f6caSJeff Kirsher  	outw(0x0042, ioaddr+LANCE_DATA);
816b955f6caSJeff Kirsher 
817b955f6caSJeff Kirsher 	if (lance_debug > 2)
818b955f6caSJeff Kirsher 		printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
819b955f6caSJeff Kirsher 			   dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
820b955f6caSJeff Kirsher 
821b955f6caSJeff Kirsher 	return 0;					/* Always succeed */
822b955f6caSJeff Kirsher }
823b955f6caSJeff Kirsher 
824b955f6caSJeff Kirsher /* The LANCE has been halted for one reason or another (busmaster memory
825b955f6caSJeff Kirsher    arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
826b955f6caSJeff Kirsher    etc.).  Modern LANCE variants always reload their ring-buffer
827b955f6caSJeff Kirsher    configuration when restarted, so we must reinitialize our ring
828b955f6caSJeff Kirsher    context before restarting.  As part of this reinitialization,
829b955f6caSJeff Kirsher    find all packets still on the Tx ring and pretend that they had been
830b955f6caSJeff Kirsher    sent (in effect, drop the packets on the floor) - the higher-level
831b955f6caSJeff Kirsher    protocols will time out and retransmit.  It'd be better to shuffle
832b955f6caSJeff Kirsher    these skbs to a temp list and then actually re-Tx them after
833b955f6caSJeff Kirsher    restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
834b955f6caSJeff Kirsher */
835b955f6caSJeff Kirsher 
836b955f6caSJeff Kirsher static void
837b955f6caSJeff Kirsher lance_purge_ring(struct net_device *dev)
838b955f6caSJeff Kirsher {
839b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
840b955f6caSJeff Kirsher 	int i;
841b955f6caSJeff Kirsher 
842b955f6caSJeff Kirsher 	/* Free all the skbuffs in the Rx and Tx queues. */
843b955f6caSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
844b955f6caSJeff Kirsher 		struct sk_buff *skb = lp->rx_skbuff[i];
845b955f6caSJeff Kirsher 		lp->rx_skbuff[i] = NULL;
846b955f6caSJeff Kirsher 		lp->rx_ring[i].base = 0;		/* Not owned by LANCE chip. */
847b955f6caSJeff Kirsher 		if (skb)
848b955f6caSJeff Kirsher 			dev_kfree_skb_any(skb);
849b955f6caSJeff Kirsher 	}
850b955f6caSJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
851b955f6caSJeff Kirsher 		if (lp->tx_skbuff[i]) {
852b955f6caSJeff Kirsher 			dev_kfree_skb_any(lp->tx_skbuff[i]);
853b955f6caSJeff Kirsher 			lp->tx_skbuff[i] = NULL;
854b955f6caSJeff Kirsher 		}
855b955f6caSJeff Kirsher 	}
856b955f6caSJeff Kirsher }
857b955f6caSJeff Kirsher 
858b955f6caSJeff Kirsher 
859b955f6caSJeff Kirsher /* Initialize the LANCE Rx and Tx rings. */
860b955f6caSJeff Kirsher static void
861b955f6caSJeff Kirsher lance_init_ring(struct net_device *dev, gfp_t gfp)
862b955f6caSJeff Kirsher {
863b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
864b955f6caSJeff Kirsher 	int i;
865b955f6caSJeff Kirsher 
866b955f6caSJeff Kirsher 	lp->cur_rx = lp->cur_tx = 0;
867b955f6caSJeff Kirsher 	lp->dirty_rx = lp->dirty_tx = 0;
868b955f6caSJeff Kirsher 
869b955f6caSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
870b955f6caSJeff Kirsher 		struct sk_buff *skb;
871b955f6caSJeff Kirsher 		void *rx_buff;
872b955f6caSJeff Kirsher 
873b955f6caSJeff Kirsher 		skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
874b955f6caSJeff Kirsher 		lp->rx_skbuff[i] = skb;
8755c8b73caSJon Mason 		if (skb)
876b955f6caSJeff Kirsher 			rx_buff = skb->data;
8775c8b73caSJon Mason 		else
878b955f6caSJeff Kirsher 			rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
879b955f6caSJeff Kirsher 		if (rx_buff == NULL)
880b955f6caSJeff Kirsher 			lp->rx_ring[i].base = 0;
881b955f6caSJeff Kirsher 		else
882b955f6caSJeff Kirsher 			lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
883b955f6caSJeff Kirsher 		lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
884b955f6caSJeff Kirsher 	}
885b955f6caSJeff Kirsher 	/* The Tx buffer address is filled in as needed, but we do need to clear
886b955f6caSJeff Kirsher 	   the upper ownership bit. */
887b955f6caSJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
888b955f6caSJeff Kirsher 		lp->tx_skbuff[i] = NULL;
889b955f6caSJeff Kirsher 		lp->tx_ring[i].base = 0;
890b955f6caSJeff Kirsher 	}
891b955f6caSJeff Kirsher 
892b955f6caSJeff Kirsher 	lp->init_block.mode = 0x0000;
893b955f6caSJeff Kirsher 	for (i = 0; i < 6; i++)
894b955f6caSJeff Kirsher 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
895b955f6caSJeff Kirsher 	lp->init_block.filter[0] = 0x00000000;
896b955f6caSJeff Kirsher 	lp->init_block.filter[1] = 0x00000000;
897b955f6caSJeff Kirsher 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
898b955f6caSJeff Kirsher 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
899b955f6caSJeff Kirsher }
900b955f6caSJeff Kirsher 
901b955f6caSJeff Kirsher static void
902b955f6caSJeff Kirsher lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
903b955f6caSJeff Kirsher {
904b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
905b955f6caSJeff Kirsher 
906b955f6caSJeff Kirsher 	if (must_reinit ||
907b955f6caSJeff Kirsher 		(chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
908b955f6caSJeff Kirsher 		lance_purge_ring(dev);
909b955f6caSJeff Kirsher 		lance_init_ring(dev, GFP_ATOMIC);
910b955f6caSJeff Kirsher 	}
911b955f6caSJeff Kirsher 	outw(0x0000,    dev->base_addr + LANCE_ADDR);
912b955f6caSJeff Kirsher 	outw(csr0_bits, dev->base_addr + LANCE_DATA);
913b955f6caSJeff Kirsher }
914b955f6caSJeff Kirsher 
915b955f6caSJeff Kirsher 
916b955f6caSJeff Kirsher static void lance_tx_timeout (struct net_device *dev)
917b955f6caSJeff Kirsher {
918b955f6caSJeff Kirsher 	struct lance_private *lp = (struct lance_private *) dev->ml_priv;
919b955f6caSJeff Kirsher 	int ioaddr = dev->base_addr;
920b955f6caSJeff Kirsher 
921b955f6caSJeff Kirsher 	outw (0, ioaddr + LANCE_ADDR);
922b955f6caSJeff Kirsher 	printk ("%s: transmit timed out, status %4.4x, resetting.\n",
923b955f6caSJeff Kirsher 		dev->name, inw (ioaddr + LANCE_DATA));
924b955f6caSJeff Kirsher 	outw (0x0004, ioaddr + LANCE_DATA);
925b955f6caSJeff Kirsher 	dev->stats.tx_errors++;
926b955f6caSJeff Kirsher #ifndef final_version
927b955f6caSJeff Kirsher 	if (lance_debug > 3) {
928b955f6caSJeff Kirsher 		int i;
929b955f6caSJeff Kirsher 		printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
930b955f6caSJeff Kirsher 		  lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
931b955f6caSJeff Kirsher 			lp->cur_rx);
932b955f6caSJeff Kirsher 		for (i = 0; i < RX_RING_SIZE; i++)
933b955f6caSJeff Kirsher 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
934b955f6caSJeff Kirsher 			 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
935b955f6caSJeff Kirsher 				lp->rx_ring[i].msg_length);
936b955f6caSJeff Kirsher 		for (i = 0; i < TX_RING_SIZE; i++)
937b955f6caSJeff Kirsher 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
938b955f6caSJeff Kirsher 			     lp->tx_ring[i].base, -lp->tx_ring[i].length,
939b955f6caSJeff Kirsher 				lp->tx_ring[i].misc);
940b955f6caSJeff Kirsher 		printk ("\n");
941b955f6caSJeff Kirsher 	}
942b955f6caSJeff Kirsher #endif
943b955f6caSJeff Kirsher 	lance_restart (dev, 0x0043, 1);
944b955f6caSJeff Kirsher 
945860e9538SFlorian Westphal 	netif_trans_update(dev); /* prevent tx timeout */
946b955f6caSJeff Kirsher 	netif_wake_queue (dev);
947b955f6caSJeff Kirsher }
948b955f6caSJeff Kirsher 
949b955f6caSJeff Kirsher 
950b955f6caSJeff Kirsher static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
951b955f6caSJeff Kirsher 				    struct net_device *dev)
952b955f6caSJeff Kirsher {
953b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
954b955f6caSJeff Kirsher 	int ioaddr = dev->base_addr;
955b955f6caSJeff Kirsher 	int entry;
956b955f6caSJeff Kirsher 	unsigned long flags;
957b955f6caSJeff Kirsher 
958b955f6caSJeff Kirsher 	spin_lock_irqsave(&lp->devlock, flags);
959b955f6caSJeff Kirsher 
960b955f6caSJeff Kirsher 	if (lance_debug > 3) {
961b955f6caSJeff Kirsher 		outw(0x0000, ioaddr+LANCE_ADDR);
962b955f6caSJeff Kirsher 		printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
963b955f6caSJeff Kirsher 			   inw(ioaddr+LANCE_DATA));
964b955f6caSJeff Kirsher 		outw(0x0000, ioaddr+LANCE_DATA);
965b955f6caSJeff Kirsher 	}
966b955f6caSJeff Kirsher 
967b955f6caSJeff Kirsher 	/* Fill in a Tx ring entry */
968b955f6caSJeff Kirsher 
969b955f6caSJeff Kirsher 	/* Mask to ring buffer boundary. */
970b955f6caSJeff Kirsher 	entry = lp->cur_tx & TX_RING_MOD_MASK;
971b955f6caSJeff Kirsher 
972b955f6caSJeff Kirsher 	/* Caution: the write order is important here, set the base address
973b955f6caSJeff Kirsher 	   with the "ownership" bits last. */
974b955f6caSJeff Kirsher 
975b955f6caSJeff Kirsher 	/* The old LANCE chips doesn't automatically pad buffers to min. size. */
976b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
977b955f6caSJeff Kirsher 		if (skb->len < ETH_ZLEN) {
978b955f6caSJeff Kirsher 			if (skb_padto(skb, ETH_ZLEN))
979b955f6caSJeff Kirsher 				goto out;
980b955f6caSJeff Kirsher 			lp->tx_ring[entry].length = -ETH_ZLEN;
981b955f6caSJeff Kirsher 		}
982b955f6caSJeff Kirsher 		else
983b955f6caSJeff Kirsher 			lp->tx_ring[entry].length = -skb->len;
984b955f6caSJeff Kirsher 	} else
985b955f6caSJeff Kirsher 		lp->tx_ring[entry].length = -skb->len;
986b955f6caSJeff Kirsher 
987b955f6caSJeff Kirsher 	lp->tx_ring[entry].misc = 0x0000;
988b955f6caSJeff Kirsher 
989b955f6caSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
990b955f6caSJeff Kirsher 
991b955f6caSJeff Kirsher 	/* If any part of this buffer is >16M we must copy it to a low-memory
992b955f6caSJeff Kirsher 	   buffer. */
993b955f6caSJeff Kirsher 	if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
994b955f6caSJeff Kirsher 		if (lance_debug > 5)
995b955f6caSJeff Kirsher 			printk("%s: bouncing a high-memory packet (%#x).\n",
996b955f6caSJeff Kirsher 				   dev->name, (u32)isa_virt_to_bus(skb->data));
997b955f6caSJeff Kirsher 		skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
998b955f6caSJeff Kirsher 		lp->tx_ring[entry].base =
999b955f6caSJeff Kirsher 			((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1000b955f6caSJeff Kirsher 		dev_kfree_skb(skb);
1001b955f6caSJeff Kirsher 	} else {
1002b955f6caSJeff Kirsher 		lp->tx_skbuff[entry] = skb;
1003b955f6caSJeff Kirsher 		lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1004b955f6caSJeff Kirsher 	}
1005b955f6caSJeff Kirsher 	lp->cur_tx++;
1006b955f6caSJeff Kirsher 
1007b955f6caSJeff Kirsher 	/* Trigger an immediate send poll. */
1008b955f6caSJeff Kirsher 	outw(0x0000, ioaddr+LANCE_ADDR);
1009b955f6caSJeff Kirsher 	outw(0x0048, ioaddr+LANCE_DATA);
1010b955f6caSJeff Kirsher 
1011b955f6caSJeff Kirsher 	if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1012b955f6caSJeff Kirsher 		netif_stop_queue(dev);
1013b955f6caSJeff Kirsher 
1014b955f6caSJeff Kirsher out:
1015b955f6caSJeff Kirsher 	spin_unlock_irqrestore(&lp->devlock, flags);
1016b955f6caSJeff Kirsher 	return NETDEV_TX_OK;
1017b955f6caSJeff Kirsher }
1018b955f6caSJeff Kirsher 
1019b955f6caSJeff Kirsher /* The LANCE interrupt handler. */
1020b955f6caSJeff Kirsher static irqreturn_t lance_interrupt(int irq, void *dev_id)
1021b955f6caSJeff Kirsher {
1022b955f6caSJeff Kirsher 	struct net_device *dev = dev_id;
1023b955f6caSJeff Kirsher 	struct lance_private *lp;
1024b955f6caSJeff Kirsher 	int csr0, ioaddr, boguscnt=10;
1025b955f6caSJeff Kirsher 	int must_restart;
1026b955f6caSJeff Kirsher 
1027b955f6caSJeff Kirsher 	ioaddr = dev->base_addr;
1028b955f6caSJeff Kirsher 	lp = dev->ml_priv;
1029b955f6caSJeff Kirsher 
1030b955f6caSJeff Kirsher 	spin_lock (&lp->devlock);
1031b955f6caSJeff Kirsher 
1032b955f6caSJeff Kirsher 	outw(0x00, dev->base_addr + LANCE_ADDR);
1033b955f6caSJeff Kirsher 	while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1034b955f6caSJeff Kirsher 	       --boguscnt >= 0) {
1035b955f6caSJeff Kirsher 		/* Acknowledge all of the current interrupt sources ASAP. */
1036b955f6caSJeff Kirsher 		outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1037b955f6caSJeff Kirsher 
1038b955f6caSJeff Kirsher 		must_restart = 0;
1039b955f6caSJeff Kirsher 
1040b955f6caSJeff Kirsher 		if (lance_debug > 5)
1041b955f6caSJeff Kirsher 			printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
1042b955f6caSJeff Kirsher 				   dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1043b955f6caSJeff Kirsher 
1044b955f6caSJeff Kirsher 		if (csr0 & 0x0400)			/* Rx interrupt */
1045b955f6caSJeff Kirsher 			lance_rx(dev);
1046b955f6caSJeff Kirsher 
1047b955f6caSJeff Kirsher 		if (csr0 & 0x0200) {		/* Tx-done interrupt */
1048b955f6caSJeff Kirsher 			int dirty_tx = lp->dirty_tx;
1049b955f6caSJeff Kirsher 
1050b955f6caSJeff Kirsher 			while (dirty_tx < lp->cur_tx) {
1051b955f6caSJeff Kirsher 				int entry = dirty_tx & TX_RING_MOD_MASK;
1052b955f6caSJeff Kirsher 				int status = lp->tx_ring[entry].base;
1053b955f6caSJeff Kirsher 
1054b955f6caSJeff Kirsher 				if (status < 0)
1055b955f6caSJeff Kirsher 					break;			/* It still hasn't been Txed */
1056b955f6caSJeff Kirsher 
1057b955f6caSJeff Kirsher 				lp->tx_ring[entry].base = 0;
1058b955f6caSJeff Kirsher 
1059b955f6caSJeff Kirsher 				if (status & 0x40000000) {
1060b955f6caSJeff Kirsher 					/* There was an major error, log it. */
1061b955f6caSJeff Kirsher 					int err_status = lp->tx_ring[entry].misc;
1062b955f6caSJeff Kirsher 					dev->stats.tx_errors++;
1063b955f6caSJeff Kirsher 					if (err_status & 0x0400)
1064b955f6caSJeff Kirsher 						dev->stats.tx_aborted_errors++;
1065b955f6caSJeff Kirsher 					if (err_status & 0x0800)
1066b955f6caSJeff Kirsher 						dev->stats.tx_carrier_errors++;
1067b955f6caSJeff Kirsher 					if (err_status & 0x1000)
1068b955f6caSJeff Kirsher 						dev->stats.tx_window_errors++;
1069b955f6caSJeff Kirsher 					if (err_status & 0x4000) {
1070b955f6caSJeff Kirsher 						/* Ackk!  On FIFO errors the Tx unit is turned off! */
1071b955f6caSJeff Kirsher 						dev->stats.tx_fifo_errors++;
1072b955f6caSJeff Kirsher 						/* Remove this verbosity later! */
1073b955f6caSJeff Kirsher 						printk("%s: Tx FIFO error! Status %4.4x.\n",
1074b955f6caSJeff Kirsher 							   dev->name, csr0);
1075b955f6caSJeff Kirsher 						/* Restart the chip. */
1076b955f6caSJeff Kirsher 						must_restart = 1;
1077b955f6caSJeff Kirsher 					}
1078b955f6caSJeff Kirsher 				} else {
1079b955f6caSJeff Kirsher 					if (status & 0x18000000)
1080b955f6caSJeff Kirsher 						dev->stats.collisions++;
1081b955f6caSJeff Kirsher 					dev->stats.tx_packets++;
1082b955f6caSJeff Kirsher 				}
1083b955f6caSJeff Kirsher 
1084b955f6caSJeff Kirsher 				/* We must free the original skb if it's not a data-only copy
1085b955f6caSJeff Kirsher 				   in the bounce buffer. */
1086b955f6caSJeff Kirsher 				if (lp->tx_skbuff[entry]) {
1087b955f6caSJeff Kirsher 					dev_kfree_skb_irq(lp->tx_skbuff[entry]);
1088b955f6caSJeff Kirsher 					lp->tx_skbuff[entry] = NULL;
1089b955f6caSJeff Kirsher 				}
1090b955f6caSJeff Kirsher 				dirty_tx++;
1091b955f6caSJeff Kirsher 			}
1092b955f6caSJeff Kirsher 
1093b955f6caSJeff Kirsher #ifndef final_version
1094b955f6caSJeff Kirsher 			if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1095b955f6caSJeff Kirsher 				printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1096b955f6caSJeff Kirsher 					   dirty_tx, lp->cur_tx,
1097b955f6caSJeff Kirsher 					   netif_queue_stopped(dev) ? "yes" : "no");
1098b955f6caSJeff Kirsher 				dirty_tx += TX_RING_SIZE;
1099b955f6caSJeff Kirsher 			}
1100b955f6caSJeff Kirsher #endif
1101b955f6caSJeff Kirsher 
1102b955f6caSJeff Kirsher 			/* if the ring is no longer full, accept more packets */
1103b955f6caSJeff Kirsher 			if (netif_queue_stopped(dev) &&
1104b955f6caSJeff Kirsher 			    dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1105b955f6caSJeff Kirsher 				netif_wake_queue (dev);
1106b955f6caSJeff Kirsher 
1107b955f6caSJeff Kirsher 			lp->dirty_tx = dirty_tx;
1108b955f6caSJeff Kirsher 		}
1109b955f6caSJeff Kirsher 
1110b955f6caSJeff Kirsher 		/* Log misc errors. */
1111b955f6caSJeff Kirsher 		if (csr0 & 0x4000)
1112b955f6caSJeff Kirsher 			dev->stats.tx_errors++; /* Tx babble. */
1113b955f6caSJeff Kirsher 		if (csr0 & 0x1000)
1114b955f6caSJeff Kirsher 			dev->stats.rx_errors++; /* Missed a Rx frame. */
1115b955f6caSJeff Kirsher 		if (csr0 & 0x0800) {
1116b955f6caSJeff Kirsher 			printk("%s: Bus master arbitration failure, status %4.4x.\n",
1117b955f6caSJeff Kirsher 				   dev->name, csr0);
1118b955f6caSJeff Kirsher 			/* Restart the chip. */
1119b955f6caSJeff Kirsher 			must_restart = 1;
1120b955f6caSJeff Kirsher 		}
1121b955f6caSJeff Kirsher 
1122b955f6caSJeff Kirsher 		if (must_restart) {
1123b955f6caSJeff Kirsher 			/* stop the chip to clear the error condition, then restart */
1124b955f6caSJeff Kirsher 			outw(0x0000, dev->base_addr + LANCE_ADDR);
1125b955f6caSJeff Kirsher 			outw(0x0004, dev->base_addr + LANCE_DATA);
1126b955f6caSJeff Kirsher 			lance_restart(dev, 0x0002, 0);
1127b955f6caSJeff Kirsher 		}
1128b955f6caSJeff Kirsher 	}
1129b955f6caSJeff Kirsher 
1130b955f6caSJeff Kirsher 	/* Clear any other interrupt, and set interrupt enable. */
1131b955f6caSJeff Kirsher 	outw(0x0000, dev->base_addr + LANCE_ADDR);
1132b955f6caSJeff Kirsher 	outw(0x7940, dev->base_addr + LANCE_DATA);
1133b955f6caSJeff Kirsher 
1134b955f6caSJeff Kirsher 	if (lance_debug > 4)
1135b955f6caSJeff Kirsher 		printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1136b955f6caSJeff Kirsher 			   dev->name, inw(ioaddr + LANCE_ADDR),
1137b955f6caSJeff Kirsher 			   inw(dev->base_addr + LANCE_DATA));
1138b955f6caSJeff Kirsher 
1139b955f6caSJeff Kirsher 	spin_unlock (&lp->devlock);
1140b955f6caSJeff Kirsher 	return IRQ_HANDLED;
1141b955f6caSJeff Kirsher }
1142b955f6caSJeff Kirsher 
1143b955f6caSJeff Kirsher static int
1144b955f6caSJeff Kirsher lance_rx(struct net_device *dev)
1145b955f6caSJeff Kirsher {
1146b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
1147b955f6caSJeff Kirsher 	int entry = lp->cur_rx & RX_RING_MOD_MASK;
1148b955f6caSJeff Kirsher 	int i;
1149b955f6caSJeff Kirsher 
1150b955f6caSJeff Kirsher 	/* If we own the next entry, it's a new packet. Send it up. */
1151b955f6caSJeff Kirsher 	while (lp->rx_ring[entry].base >= 0) {
1152b955f6caSJeff Kirsher 		int status = lp->rx_ring[entry].base >> 24;
1153b955f6caSJeff Kirsher 
1154b955f6caSJeff Kirsher 		if (status != 0x03) {			/* There was an error. */
1155b955f6caSJeff Kirsher 			/* There is a tricky error noted by John Murphy,
1156b955f6caSJeff Kirsher 			   <murf@perftech.com> to Russ Nelson: Even with full-sized
1157b955f6caSJeff Kirsher 			   buffers it's possible for a jabber packet to use two
1158b955f6caSJeff Kirsher 			   buffers, with only the last correctly noting the error. */
1159b955f6caSJeff Kirsher 			if (status & 0x01)	/* Only count a general error at the */
1160b955f6caSJeff Kirsher 				dev->stats.rx_errors++; /* end of a packet.*/
1161b955f6caSJeff Kirsher 			if (status & 0x20)
1162b955f6caSJeff Kirsher 				dev->stats.rx_frame_errors++;
1163b955f6caSJeff Kirsher 			if (status & 0x10)
1164b955f6caSJeff Kirsher 				dev->stats.rx_over_errors++;
1165b955f6caSJeff Kirsher 			if (status & 0x08)
1166b955f6caSJeff Kirsher 				dev->stats.rx_crc_errors++;
1167b955f6caSJeff Kirsher 			if (status & 0x04)
1168b955f6caSJeff Kirsher 				dev->stats.rx_fifo_errors++;
1169b955f6caSJeff Kirsher 			lp->rx_ring[entry].base &= 0x03ffffff;
1170b955f6caSJeff Kirsher 		}
1171b955f6caSJeff Kirsher 		else
1172b955f6caSJeff Kirsher 		{
1173b955f6caSJeff Kirsher 			/* Malloc up new buffer, compatible with net3. */
1174b955f6caSJeff Kirsher 			short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1175b955f6caSJeff Kirsher 			struct sk_buff *skb;
1176b955f6caSJeff Kirsher 
1177b955f6caSJeff Kirsher 			if(pkt_len<60)
1178b955f6caSJeff Kirsher 			{
1179b955f6caSJeff Kirsher 				printk("%s: Runt packet!\n",dev->name);
1180b955f6caSJeff Kirsher 				dev->stats.rx_errors++;
1181b955f6caSJeff Kirsher 			}
1182b955f6caSJeff Kirsher 			else
1183b955f6caSJeff Kirsher 			{
1184b955f6caSJeff Kirsher 				skb = dev_alloc_skb(pkt_len+2);
1185b955f6caSJeff Kirsher 				if (skb == NULL)
1186b955f6caSJeff Kirsher 				{
1187b955f6caSJeff Kirsher 					printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1188b955f6caSJeff Kirsher 					for (i=0; i < RX_RING_SIZE; i++)
1189b955f6caSJeff Kirsher 						if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1190b955f6caSJeff Kirsher 							break;
1191b955f6caSJeff Kirsher 
1192b955f6caSJeff Kirsher 					if (i > RX_RING_SIZE -2)
1193b955f6caSJeff Kirsher 					{
1194b955f6caSJeff Kirsher 						dev->stats.rx_dropped++;
1195b955f6caSJeff Kirsher 						lp->rx_ring[entry].base |= 0x80000000;
1196b955f6caSJeff Kirsher 						lp->cur_rx++;
1197b955f6caSJeff Kirsher 					}
1198b955f6caSJeff Kirsher 					break;
1199b955f6caSJeff Kirsher 				}
1200b955f6caSJeff Kirsher 				skb_reserve(skb,2);	/* 16 byte align */
1201b955f6caSJeff Kirsher 				skb_put(skb,pkt_len);	/* Make room */
1202b955f6caSJeff Kirsher 				skb_copy_to_linear_data(skb,
1203b955f6caSJeff Kirsher 					(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1204b955f6caSJeff Kirsher 					pkt_len);
1205b955f6caSJeff Kirsher 				skb->protocol=eth_type_trans(skb,dev);
1206b955f6caSJeff Kirsher 				netif_rx(skb);
1207b955f6caSJeff Kirsher 				dev->stats.rx_packets++;
1208b955f6caSJeff Kirsher 				dev->stats.rx_bytes += pkt_len;
1209b955f6caSJeff Kirsher 			}
1210b955f6caSJeff Kirsher 		}
1211b955f6caSJeff Kirsher 		/* The docs say that the buffer length isn't touched, but Andrew Boyd
1212b955f6caSJeff Kirsher 		   of QNX reports that some revs of the 79C965 clear it. */
1213b955f6caSJeff Kirsher 		lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1214b955f6caSJeff Kirsher 		lp->rx_ring[entry].base |= 0x80000000;
1215b955f6caSJeff Kirsher 		entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1216b955f6caSJeff Kirsher 	}
1217b955f6caSJeff Kirsher 
1218b955f6caSJeff Kirsher 	/* We should check that at least two ring entries are free.	 If not,
1219b955f6caSJeff Kirsher 	   we should free one and mark stats->rx_dropped++. */
1220b955f6caSJeff Kirsher 
1221b955f6caSJeff Kirsher 	return 0;
1222b955f6caSJeff Kirsher }
1223b955f6caSJeff Kirsher 
1224b955f6caSJeff Kirsher static int
1225b955f6caSJeff Kirsher lance_close(struct net_device *dev)
1226b955f6caSJeff Kirsher {
1227b955f6caSJeff Kirsher 	int ioaddr = dev->base_addr;
1228b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
1229b955f6caSJeff Kirsher 
1230b955f6caSJeff Kirsher 	netif_stop_queue (dev);
1231b955f6caSJeff Kirsher 
1232b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1233b955f6caSJeff Kirsher 		outw(112, ioaddr+LANCE_ADDR);
1234b955f6caSJeff Kirsher 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1235b955f6caSJeff Kirsher 	}
1236b955f6caSJeff Kirsher 	outw(0, ioaddr+LANCE_ADDR);
1237b955f6caSJeff Kirsher 
1238b955f6caSJeff Kirsher 	if (lance_debug > 1)
1239b955f6caSJeff Kirsher 		printk("%s: Shutting down ethercard, status was %2.2x.\n",
1240b955f6caSJeff Kirsher 			   dev->name, inw(ioaddr+LANCE_DATA));
1241b955f6caSJeff Kirsher 
1242b955f6caSJeff Kirsher 	/* We stop the LANCE here -- it occasionally polls
1243b955f6caSJeff Kirsher 	   memory if we don't. */
1244b955f6caSJeff Kirsher 	outw(0x0004, ioaddr+LANCE_DATA);
1245b955f6caSJeff Kirsher 
1246b955f6caSJeff Kirsher 	if (dev->dma != 4)
1247b955f6caSJeff Kirsher 	{
1248b955f6caSJeff Kirsher 		unsigned long flags=claim_dma_lock();
1249b955f6caSJeff Kirsher 		disable_dma(dev->dma);
1250b955f6caSJeff Kirsher 		release_dma_lock(flags);
1251b955f6caSJeff Kirsher 	}
1252b955f6caSJeff Kirsher 	free_irq(dev->irq, dev);
1253b955f6caSJeff Kirsher 
1254b955f6caSJeff Kirsher 	lance_purge_ring(dev);
1255b955f6caSJeff Kirsher 
1256b955f6caSJeff Kirsher 	return 0;
1257b955f6caSJeff Kirsher }
1258b955f6caSJeff Kirsher 
1259b955f6caSJeff Kirsher static struct net_device_stats *lance_get_stats(struct net_device *dev)
1260b955f6caSJeff Kirsher {
1261b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
1262b955f6caSJeff Kirsher 
1263b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1264b955f6caSJeff Kirsher 		short ioaddr = dev->base_addr;
1265b955f6caSJeff Kirsher 		short saved_addr;
1266b955f6caSJeff Kirsher 		unsigned long flags;
1267b955f6caSJeff Kirsher 
1268b955f6caSJeff Kirsher 		spin_lock_irqsave(&lp->devlock, flags);
1269b955f6caSJeff Kirsher 		saved_addr = inw(ioaddr+LANCE_ADDR);
1270b955f6caSJeff Kirsher 		outw(112, ioaddr+LANCE_ADDR);
1271b955f6caSJeff Kirsher 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1272b955f6caSJeff Kirsher 		outw(saved_addr, ioaddr+LANCE_ADDR);
1273b955f6caSJeff Kirsher 		spin_unlock_irqrestore(&lp->devlock, flags);
1274b955f6caSJeff Kirsher 	}
1275b955f6caSJeff Kirsher 
1276b955f6caSJeff Kirsher 	return &dev->stats;
1277b955f6caSJeff Kirsher }
1278b955f6caSJeff Kirsher 
1279b955f6caSJeff Kirsher /* Set or clear the multicast filter for this adaptor.
1280b955f6caSJeff Kirsher  */
1281b955f6caSJeff Kirsher 
1282b955f6caSJeff Kirsher static void set_multicast_list(struct net_device *dev)
1283b955f6caSJeff Kirsher {
1284b955f6caSJeff Kirsher 	short ioaddr = dev->base_addr;
1285b955f6caSJeff Kirsher 
1286b955f6caSJeff Kirsher 	outw(0, ioaddr+LANCE_ADDR);
1287b955f6caSJeff Kirsher 	outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.	 */
1288b955f6caSJeff Kirsher 
1289b955f6caSJeff Kirsher 	if (dev->flags&IFF_PROMISC) {
1290b955f6caSJeff Kirsher 		outw(15, ioaddr+LANCE_ADDR);
1291b955f6caSJeff Kirsher 		outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1292b955f6caSJeff Kirsher 	} else {
1293b955f6caSJeff Kirsher 		short multicast_table[4];
1294b955f6caSJeff Kirsher 		int i;
1295b955f6caSJeff Kirsher 		int num_addrs=netdev_mc_count(dev);
1296b955f6caSJeff Kirsher 		if(dev->flags&IFF_ALLMULTI)
1297b955f6caSJeff Kirsher 			num_addrs=1;
1298b955f6caSJeff Kirsher 		/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1299b955f6caSJeff Kirsher 		memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1300b955f6caSJeff Kirsher 		for (i = 0; i < 4; i++) {
1301b955f6caSJeff Kirsher 			outw(8 + i, ioaddr+LANCE_ADDR);
1302b955f6caSJeff Kirsher 			outw(multicast_table[i], ioaddr+LANCE_DATA);
1303b955f6caSJeff Kirsher 		}
1304b955f6caSJeff Kirsher 		outw(15, ioaddr+LANCE_ADDR);
1305b955f6caSJeff Kirsher 		outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1306b955f6caSJeff Kirsher 	}
1307b955f6caSJeff Kirsher 
1308b955f6caSJeff Kirsher 	lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1309b955f6caSJeff Kirsher 
1310b955f6caSJeff Kirsher }
1311b955f6caSJeff Kirsher 
1312