xref: /openbmc/linux/drivers/net/ethernet/amd/lance.c (revision b0b815a3)
1b955f6caSJeff Kirsher /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2b955f6caSJeff Kirsher /*
3b955f6caSJeff Kirsher 	Written/copyright 1993-1998 by Donald Becker.
4b955f6caSJeff Kirsher 
5b955f6caSJeff Kirsher 	Copyright 1993 United States Government as represented by the
6b955f6caSJeff Kirsher 	Director, National Security Agency.
7b955f6caSJeff Kirsher 	This software may be used and distributed according to the terms
8b955f6caSJeff Kirsher 	of the GNU General Public License, incorporated herein by reference.
9b955f6caSJeff Kirsher 
10b955f6caSJeff Kirsher 	This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11b955f6caSJeff Kirsher 	with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
12b955f6caSJeff Kirsher 
13b955f6caSJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
14b955f6caSJeff Kirsher 	Scyld Computing Corporation
15b955f6caSJeff Kirsher 	410 Severn Ave., Suite 210
16b955f6caSJeff Kirsher 	Annapolis MD 21403
17b955f6caSJeff Kirsher 
18b955f6caSJeff Kirsher 	Andrey V. Savochkin:
19b955f6caSJeff Kirsher 	- alignment problem with 1.3.* kernel and some minor changes.
20b955f6caSJeff Kirsher 	Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
21b955f6caSJeff Kirsher 	- added support for Linux/Alpha, but removed most of it, because
22b955f6caSJeff Kirsher         it worked only for the PCI chip.
23b955f6caSJeff Kirsher       - added hook for the 32bit lance driver
24b955f6caSJeff Kirsher       - added PCnetPCI II (79C970A) to chip table
25b955f6caSJeff Kirsher 	Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
26b955f6caSJeff Kirsher 	- hopefully fix above so Linux/Alpha can use ISA cards too.
27b955f6caSJeff Kirsher     8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28b955f6caSJeff Kirsher     v1.12 10/27/97 Module support -djb
29b955f6caSJeff Kirsher     v1.14  2/3/98 Module support modified, made PCI support optional -djb
30b955f6caSJeff Kirsher     v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
31b955f6caSJeff Kirsher                   before unregister_netdev() which caused NULL pointer
32b955f6caSJeff Kirsher                   reference later in the chain (in rtnetlink_fill_ifinfo())
33b955f6caSJeff Kirsher                   -- Mika Kuoppala <miku@iki.fi>
34b955f6caSJeff Kirsher 
35b955f6caSJeff Kirsher     Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
36b955f6caSJeff Kirsher     the 2.1 version of the old driver - Alan Cox
37b955f6caSJeff Kirsher 
38b955f6caSJeff Kirsher     Get rid of check_region, check kmalloc return in lance_probe1
39b955f6caSJeff Kirsher     Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
40b955f6caSJeff Kirsher 
41b955f6caSJeff Kirsher 	Reworked detection, added support for Racal InterLan EtherBlaster cards
42b955f6caSJeff Kirsher 	Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
43b955f6caSJeff Kirsher */
44b955f6caSJeff Kirsher 
45b955f6caSJeff Kirsher static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46b955f6caSJeff Kirsher 
47b955f6caSJeff Kirsher #include <linux/module.h>
48b955f6caSJeff Kirsher #include <linux/kernel.h>
49b955f6caSJeff Kirsher #include <linux/string.h>
50b955f6caSJeff Kirsher #include <linux/delay.h>
51b955f6caSJeff Kirsher #include <linux/errno.h>
52b955f6caSJeff Kirsher #include <linux/ioport.h>
53b955f6caSJeff Kirsher #include <linux/slab.h>
54b955f6caSJeff Kirsher #include <linux/interrupt.h>
55b955f6caSJeff Kirsher #include <linux/pci.h>
56b955f6caSJeff Kirsher #include <linux/init.h>
57b955f6caSJeff Kirsher #include <linux/netdevice.h>
58b955f6caSJeff Kirsher #include <linux/etherdevice.h>
59b955f6caSJeff Kirsher #include <linux/skbuff.h>
60b955f6caSJeff Kirsher #include <linux/mm.h>
61b955f6caSJeff Kirsher #include <linux/bitops.h>
62b955f6caSJeff Kirsher 
63b955f6caSJeff Kirsher #include <asm/io.h>
64b955f6caSJeff Kirsher #include <asm/dma.h>
65b955f6caSJeff Kirsher 
66b955f6caSJeff Kirsher static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
67b955f6caSJeff Kirsher static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
68b955f6caSJeff Kirsher static int __init do_lance_probe(struct net_device *dev);
69b955f6caSJeff Kirsher 
70b955f6caSJeff Kirsher 
71b955f6caSJeff Kirsher static struct card {
72b955f6caSJeff Kirsher 	char id_offset14;
73b955f6caSJeff Kirsher 	char id_offset15;
74b955f6caSJeff Kirsher } cards[] = {
75b955f6caSJeff Kirsher 	{	//"normal"
76b955f6caSJeff Kirsher 		.id_offset14 = 0x57,
77b955f6caSJeff Kirsher 		.id_offset15 = 0x57,
78b955f6caSJeff Kirsher 	},
79b955f6caSJeff Kirsher 	{	//NI6510EB
80b955f6caSJeff Kirsher 		.id_offset14 = 0x52,
81b955f6caSJeff Kirsher 		.id_offset15 = 0x44,
82b955f6caSJeff Kirsher 	},
83b955f6caSJeff Kirsher 	{	//Racal InterLan EtherBlaster
84b955f6caSJeff Kirsher 		.id_offset14 = 0x52,
85b955f6caSJeff Kirsher 		.id_offset15 = 0x49,
86b955f6caSJeff Kirsher 	},
87b955f6caSJeff Kirsher };
88b955f6caSJeff Kirsher #define NUM_CARDS 3
89b955f6caSJeff Kirsher 
90b955f6caSJeff Kirsher #ifdef LANCE_DEBUG
91b955f6caSJeff Kirsher static int lance_debug = LANCE_DEBUG;
92b955f6caSJeff Kirsher #else
93b955f6caSJeff Kirsher static int lance_debug = 1;
94b955f6caSJeff Kirsher #endif
95b955f6caSJeff Kirsher 
96b955f6caSJeff Kirsher /*
97b955f6caSJeff Kirsher 				Theory of Operation
98b955f6caSJeff Kirsher 
99b955f6caSJeff Kirsher I. Board Compatibility
100b955f6caSJeff Kirsher 
101b955f6caSJeff Kirsher This device driver is designed for the AMD 79C960, the "PCnet-ISA
102b955f6caSJeff Kirsher single-chip ethernet controller for ISA".  This chip is used in a wide
103b955f6caSJeff Kirsher variety of boards from vendors such as Allied Telesis, HP, Kingston,
104b955f6caSJeff Kirsher and Boca.  This driver is also intended to work with older AMD 7990
105b955f6caSJeff Kirsher designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
106b955f6caSJeff Kirsher I use the name LANCE to refer to all of the AMD chips, even though it properly
107b955f6caSJeff Kirsher refers only to the original 7990.
108b955f6caSJeff Kirsher 
109b955f6caSJeff Kirsher II. Board-specific settings
110b955f6caSJeff Kirsher 
111b955f6caSJeff Kirsher The driver is designed to work the boards that use the faster
112b955f6caSJeff Kirsher bus-master mode, rather than in shared memory mode.	 (Only older designs
113b955f6caSJeff Kirsher have on-board buffer memory needed to support the slower shared memory mode.)
114b955f6caSJeff Kirsher 
115b955f6caSJeff Kirsher Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
116b955f6caSJeff Kirsher channel.  This driver probes the likely base addresses:
117b955f6caSJeff Kirsher {0x300, 0x320, 0x340, 0x360}.
118b955f6caSJeff Kirsher After the board is found it generates a DMA-timeout interrupt and uses
119b955f6caSJeff Kirsher autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
120b955f6caSJeff Kirsher of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
121b955f6caSJeff Kirsher probed for by enabling each free DMA channel in turn and checking if
122b955f6caSJeff Kirsher initialization succeeds.
123b955f6caSJeff Kirsher 
124b955f6caSJeff Kirsher The HP-J2405A board is an exception: with this board it is easy to read the
125b955f6caSJeff Kirsher EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
126b955f6caSJeff Kirsher _know_ the base address -- that field is for writing the EEPROM.)
127b955f6caSJeff Kirsher 
128b955f6caSJeff Kirsher III. Driver operation
129b955f6caSJeff Kirsher 
130b955f6caSJeff Kirsher IIIa. Ring buffers
131b955f6caSJeff Kirsher The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
132b955f6caSJeff Kirsher the base and length of the data buffer, along with status bits.	 The length
133b955f6caSJeff Kirsher of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
134b955f6caSJeff Kirsher the buffer length (rather than being directly the buffer length) for
135b955f6caSJeff Kirsher implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
136b955f6caSJeff Kirsher ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
137b955f6caSJeff Kirsher needlessly uses extra space and reduces the chance that an upper layer will
138b955f6caSJeff Kirsher be able to reorder queued Tx packets based on priority.	 Decreasing the number
139b955f6caSJeff Kirsher of entries makes it more difficult to achieve back-to-back packet transmission
140b955f6caSJeff Kirsher and increases the chance that Rx ring will overflow.  (Consider the worst case
141b955f6caSJeff Kirsher of receiving back-to-back minimum-sized packets.)
142b955f6caSJeff Kirsher 
143b955f6caSJeff Kirsher The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
144b955f6caSJeff Kirsher statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
145b955f6caSJeff Kirsher avoid the administrative overhead. For the Rx side this avoids dynamically
146b955f6caSJeff Kirsher allocating full-sized buffers "just in case", at the expense of a
147b955f6caSJeff Kirsher memory-to-memory data copy for each packet received.  For most systems this
148b955f6caSJeff Kirsher is a good tradeoff: the Rx buffer will always be in low memory, the copy
149b955f6caSJeff Kirsher is inexpensive, and it primes the cache for later packet processing.  For Tx
150b955f6caSJeff Kirsher the buffers are only used when needed as low-memory bounce buffers.
151b955f6caSJeff Kirsher 
152b955f6caSJeff Kirsher IIIB. 16M memory limitations.
153b955f6caSJeff Kirsher For the ISA bus master mode all structures used directly by the LANCE,
154b955f6caSJeff Kirsher the initialization block, Rx and Tx rings, and data buffers, must be
155b955f6caSJeff Kirsher accessible from the ISA bus, i.e. in the lower 16M of real memory.
156b955f6caSJeff Kirsher This is a problem for current Linux kernels on >16M machines. The network
157b955f6caSJeff Kirsher devices are initialized after memory initialization, and the kernel doles out
158b955f6caSJeff Kirsher memory from the top of memory downward.	 The current solution is to have a
159b955f6caSJeff Kirsher special network initialization routine that's called before memory
160b955f6caSJeff Kirsher initialization; this will eventually be generalized for all network devices.
161b955f6caSJeff Kirsher As mentioned before, low-memory "bounce-buffers" are used when needed.
162b955f6caSJeff Kirsher 
163b955f6caSJeff Kirsher IIIC. Synchronization
164b955f6caSJeff Kirsher The driver runs as two independent, single-threaded flows of control.  One
165b955f6caSJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
166b955f6caSJeff Kirsher dev->tbusy flag.  The other thread is the interrupt handler, which is single
167b955f6caSJeff Kirsher threaded by the hardware and other software.
168b955f6caSJeff Kirsher 
169b955f6caSJeff Kirsher The send packet thread has partial control over the Tx ring and 'dev->tbusy'
170b955f6caSJeff Kirsher flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
171b955f6caSJeff Kirsher queue slot is empty, it clears the tbusy flag when finished otherwise it sets
172b955f6caSJeff Kirsher the 'lp->tx_full' flag.
173b955f6caSJeff Kirsher 
174b955f6caSJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
175b955f6caSJeff Kirsher from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
176b955f6caSJeff Kirsher we can't avoid the interrupt overhead by having the Tx routine reap the Tx
177b955f6caSJeff Kirsher stats.)	 After reaping the stats, it marks the queue entry as empty by setting
178b955f6caSJeff Kirsher the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
179b955f6caSJeff Kirsher tx_full and tbusy flags.
180b955f6caSJeff Kirsher 
181b955f6caSJeff Kirsher */
182b955f6caSJeff Kirsher 
183b955f6caSJeff Kirsher /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
184b955f6caSJeff Kirsher    Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
185b955f6caSJeff Kirsher    That translates to 4 and 4 (16 == 2^^4).
186b955f6caSJeff Kirsher    This is a compile-time option for efficiency.
187b955f6caSJeff Kirsher    */
188b955f6caSJeff Kirsher #ifndef LANCE_LOG_TX_BUFFERS
189b955f6caSJeff Kirsher #define LANCE_LOG_TX_BUFFERS 4
190b955f6caSJeff Kirsher #define LANCE_LOG_RX_BUFFERS 4
191b955f6caSJeff Kirsher #endif
192b955f6caSJeff Kirsher 
193b955f6caSJeff Kirsher #define TX_RING_SIZE			(1 << (LANCE_LOG_TX_BUFFERS))
194b955f6caSJeff Kirsher #define TX_RING_MOD_MASK		(TX_RING_SIZE - 1)
195b955f6caSJeff Kirsher #define TX_RING_LEN_BITS		((LANCE_LOG_TX_BUFFERS) << 29)
196b955f6caSJeff Kirsher 
197b955f6caSJeff Kirsher #define RX_RING_SIZE			(1 << (LANCE_LOG_RX_BUFFERS))
198b955f6caSJeff Kirsher #define RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
199b955f6caSJeff Kirsher #define RX_RING_LEN_BITS		((LANCE_LOG_RX_BUFFERS) << 29)
200b955f6caSJeff Kirsher 
201b955f6caSJeff Kirsher #define PKT_BUF_SZ		1544
202b955f6caSJeff Kirsher 
203b955f6caSJeff Kirsher /* Offsets from base I/O address. */
204b955f6caSJeff Kirsher #define LANCE_DATA 0x10
205b955f6caSJeff Kirsher #define LANCE_ADDR 0x12
206b955f6caSJeff Kirsher #define LANCE_RESET 0x14
207b955f6caSJeff Kirsher #define LANCE_BUS_IF 0x16
208b955f6caSJeff Kirsher #define LANCE_TOTAL_SIZE 0x18
209b955f6caSJeff Kirsher 
210b955f6caSJeff Kirsher #define TX_TIMEOUT	(HZ/5)
211b955f6caSJeff Kirsher 
212b955f6caSJeff Kirsher /* The LANCE Rx and Tx ring descriptors. */
213b955f6caSJeff Kirsher struct lance_rx_head {
214b955f6caSJeff Kirsher 	s32 base;
215b955f6caSJeff Kirsher 	s16 buf_length;			/* This length is 2s complement (negative)! */
216b955f6caSJeff Kirsher 	s16 msg_length;			/* This length is "normal". */
217b955f6caSJeff Kirsher };
218b955f6caSJeff Kirsher 
219b955f6caSJeff Kirsher struct lance_tx_head {
220b955f6caSJeff Kirsher 	s32 base;
221b955f6caSJeff Kirsher 	s16 length;				/* Length is 2s complement (negative)! */
222b955f6caSJeff Kirsher 	s16 misc;
223b955f6caSJeff Kirsher };
224b955f6caSJeff Kirsher 
225b955f6caSJeff Kirsher /* The LANCE initialization block, described in databook. */
226b955f6caSJeff Kirsher struct lance_init_block {
227b955f6caSJeff Kirsher 	u16 mode;		/* Pre-set mode (reg. 15) */
228b955f6caSJeff Kirsher 	u8  phys_addr[6]; /* Physical ethernet address */
229b955f6caSJeff Kirsher 	u32 filter[2];			/* Multicast filter (unused). */
230b955f6caSJeff Kirsher 	/* Receive and transmit ring base, along with extra bits. */
231b955f6caSJeff Kirsher 	u32  rx_ring;			/* Tx and Rx ring base pointers */
232b955f6caSJeff Kirsher 	u32  tx_ring;
233b955f6caSJeff Kirsher };
234b955f6caSJeff Kirsher 
235b955f6caSJeff Kirsher struct lance_private {
236b955f6caSJeff Kirsher 	/* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
237b955f6caSJeff Kirsher 	struct lance_rx_head rx_ring[RX_RING_SIZE];
238b955f6caSJeff Kirsher 	struct lance_tx_head tx_ring[TX_RING_SIZE];
239b955f6caSJeff Kirsher 	struct lance_init_block	init_block;
240b955f6caSJeff Kirsher 	const char *name;
241b955f6caSJeff Kirsher 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
242b955f6caSJeff Kirsher 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
243b955f6caSJeff Kirsher 	/* The addresses of receive-in-place skbuffs. */
244b955f6caSJeff Kirsher 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
245b955f6caSJeff Kirsher 	unsigned long rx_buffs;		/* Address of Rx and Tx buffers. */
246b955f6caSJeff Kirsher 	/* Tx low-memory "bounce buffer" address. */
247b955f6caSJeff Kirsher 	char (*tx_bounce_buffs)[PKT_BUF_SZ];
248b955f6caSJeff Kirsher 	int cur_rx, cur_tx;			/* The next free ring entry */
249b955f6caSJeff Kirsher 	int dirty_rx, dirty_tx;		/* The ring entries to be free()ed. */
250b955f6caSJeff Kirsher 	int dma;
251b955f6caSJeff Kirsher 	unsigned char chip_version;	/* See lance_chip_type. */
252b955f6caSJeff Kirsher 	spinlock_t devlock;
253b955f6caSJeff Kirsher };
254b955f6caSJeff Kirsher 
255b955f6caSJeff Kirsher #define LANCE_MUST_PAD          0x00000001
256b955f6caSJeff Kirsher #define LANCE_ENABLE_AUTOSELECT 0x00000002
257b955f6caSJeff Kirsher #define LANCE_MUST_REINIT_RING  0x00000004
258b955f6caSJeff Kirsher #define LANCE_MUST_UNRESET      0x00000008
259b955f6caSJeff Kirsher #define LANCE_HAS_MISSED_FRAME  0x00000010
260b955f6caSJeff Kirsher 
261b955f6caSJeff Kirsher /* A mapping from the chip ID number to the part number and features.
262b955f6caSJeff Kirsher    These are from the datasheets -- in real life the '970 version
263b955f6caSJeff Kirsher    reportedly has the same ID as the '965. */
264b955f6caSJeff Kirsher static struct lance_chip_type {
265b955f6caSJeff Kirsher 	int id_number;
266b955f6caSJeff Kirsher 	const char *name;
267b955f6caSJeff Kirsher 	int flags;
268b955f6caSJeff Kirsher } chip_table[] = {
269b955f6caSJeff Kirsher 	{0x0000, "LANCE 7990",				/* Ancient lance chip.  */
270b955f6caSJeff Kirsher 		LANCE_MUST_PAD + LANCE_MUST_UNRESET},
271b955f6caSJeff Kirsher 	{0x0003, "PCnet/ISA 79C960",		/* 79C960 PCnet/ISA.  */
272b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
273b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
274b955f6caSJeff Kirsher 	{0x2260, "PCnet/ISA+ 79C961",		/* 79C961 PCnet/ISA+, Plug-n-Play.  */
275b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
276b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
277b955f6caSJeff Kirsher 	{0x2420, "PCnet/PCI 79C970",		/* 79C970 or 79C974 PCnet-SCSI, PCI. */
278b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
279b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
280b955f6caSJeff Kirsher 	/* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
281b955f6caSJeff Kirsher 		it the PCnet32. */
282b955f6caSJeff Kirsher 	{0x2430, "PCnet32",					/* 79C965 PCnet for VL bus. */
283b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
284b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
285b955f6caSJeff Kirsher         {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
286b955f6caSJeff Kirsher                 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
287b955f6caSJeff Kirsher                         LANCE_HAS_MISSED_FRAME},
288b955f6caSJeff Kirsher 	{0x0, 	 "PCnet (unknown)",
289b955f6caSJeff Kirsher 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
290b955f6caSJeff Kirsher 			LANCE_HAS_MISSED_FRAME},
291b955f6caSJeff Kirsher };
292b955f6caSJeff Kirsher 
293b955f6caSJeff Kirsher enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
294b955f6caSJeff Kirsher 
295b955f6caSJeff Kirsher 
296b955f6caSJeff Kirsher /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
297b955f6caSJeff Kirsher    Assume yes until we know the memory size. */
298b955f6caSJeff Kirsher static unsigned char lance_need_isa_bounce_buffers = 1;
299b955f6caSJeff Kirsher 
300b955f6caSJeff Kirsher static int lance_open(struct net_device *dev);
301b955f6caSJeff Kirsher static void lance_init_ring(struct net_device *dev, gfp_t mode);
302b955f6caSJeff Kirsher static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
303b955f6caSJeff Kirsher 				    struct net_device *dev);
304b955f6caSJeff Kirsher static int lance_rx(struct net_device *dev);
305b955f6caSJeff Kirsher static irqreturn_t lance_interrupt(int irq, void *dev_id);
306b955f6caSJeff Kirsher static int lance_close(struct net_device *dev);
307b955f6caSJeff Kirsher static struct net_device_stats *lance_get_stats(struct net_device *dev);
308b955f6caSJeff Kirsher static void set_multicast_list(struct net_device *dev);
3090290bd29SMichael S. Tsirkin static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
310b955f6caSJeff Kirsher 
311b955f6caSJeff Kirsher 
312b955f6caSJeff Kirsher 
313b955f6caSJeff Kirsher #ifdef MODULE
314b955f6caSJeff Kirsher #define MAX_CARDS		8	/* Max number of interfaces (cards) per module */
315b955f6caSJeff Kirsher 
316b955f6caSJeff Kirsher static struct net_device *dev_lance[MAX_CARDS];
317b955f6caSJeff Kirsher static int io[MAX_CARDS];
318b955f6caSJeff Kirsher static int dma[MAX_CARDS];
319b955f6caSJeff Kirsher static int irq[MAX_CARDS];
320b955f6caSJeff Kirsher 
321df298408SDavid Howells module_param_hw_array(io, int, ioport, NULL, 0);
322df298408SDavid Howells module_param_hw_array(dma, int, dma, NULL, 0);
323df298408SDavid Howells module_param_hw_array(irq, int, irq, NULL, 0);
324b955f6caSJeff Kirsher module_param(lance_debug, int, 0);
325b955f6caSJeff Kirsher MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
326b955f6caSJeff Kirsher MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
327b955f6caSJeff Kirsher MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
328b955f6caSJeff Kirsher MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
329b955f6caSJeff Kirsher 
330a07d8ecfSArnd Bergmann static int __init lance_init_module(void)
331b955f6caSJeff Kirsher {
332b955f6caSJeff Kirsher 	struct net_device *dev;
333b955f6caSJeff Kirsher 	int this_dev, found = 0;
334b955f6caSJeff Kirsher 
335b955f6caSJeff Kirsher 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
336b955f6caSJeff Kirsher 		if (io[this_dev] == 0)  {
337b955f6caSJeff Kirsher 			if (this_dev != 0) /* only complain once */
338b955f6caSJeff Kirsher 				break;
339b955f6caSJeff Kirsher 			printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
340b955f6caSJeff Kirsher 			return -EPERM;
341b955f6caSJeff Kirsher 		}
342b955f6caSJeff Kirsher 		dev = alloc_etherdev(0);
343b955f6caSJeff Kirsher 		if (!dev)
344b955f6caSJeff Kirsher 			break;
345b955f6caSJeff Kirsher 		dev->irq = irq[this_dev];
346b955f6caSJeff Kirsher 		dev->base_addr = io[this_dev];
347b955f6caSJeff Kirsher 		dev->dma = dma[this_dev];
348b955f6caSJeff Kirsher 		if (do_lance_probe(dev) == 0) {
349b955f6caSJeff Kirsher 			dev_lance[found++] = dev;
350b955f6caSJeff Kirsher 			continue;
351b955f6caSJeff Kirsher 		}
352b955f6caSJeff Kirsher 		free_netdev(dev);
353b955f6caSJeff Kirsher 		break;
354b955f6caSJeff Kirsher 	}
355b955f6caSJeff Kirsher 	if (found != 0)
356b955f6caSJeff Kirsher 		return 0;
357b955f6caSJeff Kirsher 	return -ENXIO;
358b955f6caSJeff Kirsher }
359a07d8ecfSArnd Bergmann module_init(lance_init_module);
360b955f6caSJeff Kirsher 
361b955f6caSJeff Kirsher static void cleanup_card(struct net_device *dev)
362b955f6caSJeff Kirsher {
363b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
364b955f6caSJeff Kirsher 	if (dev->dma != 4)
365b955f6caSJeff Kirsher 		free_dma(dev->dma);
366b955f6caSJeff Kirsher 	release_region(dev->base_addr, LANCE_TOTAL_SIZE);
367b955f6caSJeff Kirsher 	kfree(lp->tx_bounce_buffs);
368b955f6caSJeff Kirsher 	kfree((void*)lp->rx_buffs);
369b955f6caSJeff Kirsher 	kfree(lp);
370b955f6caSJeff Kirsher }
371b955f6caSJeff Kirsher 
372a07d8ecfSArnd Bergmann static void __exit lance_cleanup_module(void)
373b955f6caSJeff Kirsher {
374b955f6caSJeff Kirsher 	int this_dev;
375b955f6caSJeff Kirsher 
376b955f6caSJeff Kirsher 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
377b955f6caSJeff Kirsher 		struct net_device *dev = dev_lance[this_dev];
378b955f6caSJeff Kirsher 		if (dev) {
379b955f6caSJeff Kirsher 			unregister_netdev(dev);
380b955f6caSJeff Kirsher 			cleanup_card(dev);
381b955f6caSJeff Kirsher 			free_netdev(dev);
382b955f6caSJeff Kirsher 		}
383b955f6caSJeff Kirsher 	}
384b955f6caSJeff Kirsher }
385a07d8ecfSArnd Bergmann module_exit(lance_cleanup_module);
386b955f6caSJeff Kirsher #endif /* MODULE */
387b955f6caSJeff Kirsher MODULE_LICENSE("GPL");
388b955f6caSJeff Kirsher 
389b955f6caSJeff Kirsher 
390b955f6caSJeff Kirsher /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
391b955f6caSJeff Kirsher    board probes now that kmalloc() can allocate ISA DMA-able regions.
392b955f6caSJeff Kirsher    This also allows the LANCE driver to be used as a module.
393b955f6caSJeff Kirsher    */
394b955f6caSJeff Kirsher static int __init do_lance_probe(struct net_device *dev)
395b955f6caSJeff Kirsher {
396b955f6caSJeff Kirsher 	unsigned int *port;
397b955f6caSJeff Kirsher 	int result;
398b955f6caSJeff Kirsher 
399b955f6caSJeff Kirsher 	if (high_memory <= phys_to_virt(16*1024*1024))
400b955f6caSJeff Kirsher 		lance_need_isa_bounce_buffers = 0;
401b955f6caSJeff Kirsher 
402b955f6caSJeff Kirsher 	for (port = lance_portlist; *port; port++) {
403b955f6caSJeff Kirsher 		int ioaddr = *port;
404b955f6caSJeff Kirsher 		struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
405b955f6caSJeff Kirsher 							"lance-probe");
406b955f6caSJeff Kirsher 
407b955f6caSJeff Kirsher 		if (r) {
408b955f6caSJeff Kirsher 			/* Detect the card with minimal I/O reads */
409b955f6caSJeff Kirsher 			char offset14 = inb(ioaddr + 14);
410b955f6caSJeff Kirsher 			int card;
411b955f6caSJeff Kirsher 			for (card = 0; card < NUM_CARDS; ++card)
412b955f6caSJeff Kirsher 				if (cards[card].id_offset14 == offset14)
413b955f6caSJeff Kirsher 					break;
414b955f6caSJeff Kirsher 			if (card < NUM_CARDS) {/*yes, the first byte matches*/
415b955f6caSJeff Kirsher 				char offset15 = inb(ioaddr + 15);
416b955f6caSJeff Kirsher 				for (card = 0; card < NUM_CARDS; ++card)
417b955f6caSJeff Kirsher 					if ((cards[card].id_offset14 == offset14) &&
418b955f6caSJeff Kirsher 						(cards[card].id_offset15 == offset15))
419b955f6caSJeff Kirsher 						break;
420b955f6caSJeff Kirsher 			}
421b955f6caSJeff Kirsher 			if (card < NUM_CARDS) { /*Signature OK*/
422b955f6caSJeff Kirsher 				result = lance_probe1(dev, ioaddr, 0, 0);
423b955f6caSJeff Kirsher 				if (!result) {
424b955f6caSJeff Kirsher 					struct lance_private *lp = dev->ml_priv;
425b955f6caSJeff Kirsher 					int ver = lp->chip_version;
426b955f6caSJeff Kirsher 
427b955f6caSJeff Kirsher 					r->name = chip_table[ver].name;
428b955f6caSJeff Kirsher 					return 0;
429b955f6caSJeff Kirsher 				}
430b955f6caSJeff Kirsher 			}
431b955f6caSJeff Kirsher 			release_region(ioaddr, LANCE_TOTAL_SIZE);
432b955f6caSJeff Kirsher 		}
433b955f6caSJeff Kirsher 	}
434b955f6caSJeff Kirsher 	return -ENODEV;
435b955f6caSJeff Kirsher }
436b955f6caSJeff Kirsher 
437b955f6caSJeff Kirsher #ifndef MODULE
438b955f6caSJeff Kirsher struct net_device * __init lance_probe(int unit)
439b955f6caSJeff Kirsher {
440b955f6caSJeff Kirsher 	struct net_device *dev = alloc_etherdev(0);
441b955f6caSJeff Kirsher 	int err;
442b955f6caSJeff Kirsher 
443b955f6caSJeff Kirsher 	if (!dev)
444b955f6caSJeff Kirsher 		return ERR_PTR(-ENODEV);
445b955f6caSJeff Kirsher 
446b955f6caSJeff Kirsher 	sprintf(dev->name, "eth%d", unit);
447b955f6caSJeff Kirsher 	netdev_boot_setup_check(dev);
448b955f6caSJeff Kirsher 
449b955f6caSJeff Kirsher 	err = do_lance_probe(dev);
450b955f6caSJeff Kirsher 	if (err)
451b955f6caSJeff Kirsher 		goto out;
452b955f6caSJeff Kirsher 	return dev;
453b955f6caSJeff Kirsher out:
454b955f6caSJeff Kirsher 	free_netdev(dev);
455b955f6caSJeff Kirsher 	return ERR_PTR(err);
456b955f6caSJeff Kirsher }
457b955f6caSJeff Kirsher #endif
458b955f6caSJeff Kirsher 
459b955f6caSJeff Kirsher static const struct net_device_ops lance_netdev_ops = {
460b955f6caSJeff Kirsher 	.ndo_open 		= lance_open,
461b955f6caSJeff Kirsher 	.ndo_start_xmit		= lance_start_xmit,
462b955f6caSJeff Kirsher 	.ndo_stop		= lance_close,
463b955f6caSJeff Kirsher 	.ndo_get_stats		= lance_get_stats,
464afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= set_multicast_list,
465b955f6caSJeff Kirsher 	.ndo_tx_timeout		= lance_tx_timeout,
466b955f6caSJeff Kirsher 	.ndo_set_mac_address 	= eth_mac_addr,
467b955f6caSJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
468b955f6caSJeff Kirsher };
469b955f6caSJeff Kirsher 
470b955f6caSJeff Kirsher static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
471b955f6caSJeff Kirsher {
472b955f6caSJeff Kirsher 	struct lance_private *lp;
473b955f6caSJeff Kirsher 	unsigned long dma_channels;	/* Mark spuriously-busy DMA channels */
474b955f6caSJeff Kirsher 	int i, reset_val, lance_version;
475b955f6caSJeff Kirsher 	const char *chipname;
476b955f6caSJeff Kirsher 	/* Flags for specific chips or boards. */
477b955f6caSJeff Kirsher 	unsigned char hpJ2405A = 0;	/* HP ISA adaptor */
478b955f6caSJeff Kirsher 	int hp_builtin = 0;		/* HP on-board ethernet. */
479b955f6caSJeff Kirsher 	static int did_version;		/* Already printed version info. */
480b955f6caSJeff Kirsher 	unsigned long flags;
481b955f6caSJeff Kirsher 	int err = -ENOMEM;
482b955f6caSJeff Kirsher 	void __iomem *bios;
4830222ee53SJakub Kicinski 	u8 addr[ETH_ALEN];
484b955f6caSJeff Kirsher 
485b955f6caSJeff Kirsher 	/* First we look for special cases.
486b955f6caSJeff Kirsher 	   Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
487b955f6caSJeff Kirsher 	   There are two HP versions, check the BIOS for the configuration port.
488b955f6caSJeff Kirsher 	   This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
489b955f6caSJeff Kirsher 	   */
490b955f6caSJeff Kirsher 	bios = ioremap(0xf00f0, 0x14);
491b955f6caSJeff Kirsher 	if (!bios)
492b955f6caSJeff Kirsher 		return -ENOMEM;
493b955f6caSJeff Kirsher 	if (readw(bios + 0x12) == 0x5048)  {
494b955f6caSJeff Kirsher 		static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
495b955f6caSJeff Kirsher 		int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
496b955f6caSJeff Kirsher 		/* We can have boards other than the built-in!  Verify this is on-board. */
497b955f6caSJeff Kirsher 		if ((inb(hp_port) & 0xc0) == 0x80 &&
498b955f6caSJeff Kirsher 		    ioaddr_table[inb(hp_port) & 3] == ioaddr)
499b955f6caSJeff Kirsher 			hp_builtin = hp_port;
500b955f6caSJeff Kirsher 	}
501b955f6caSJeff Kirsher 	iounmap(bios);
502b955f6caSJeff Kirsher 	/* We also recognize the HP Vectra on-board here, but check below. */
503b955f6caSJeff Kirsher 	hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
504b955f6caSJeff Kirsher 		    inb(ioaddr+2) == 0x09);
505b955f6caSJeff Kirsher 
506b955f6caSJeff Kirsher 	/* Reset the LANCE.	 */
507b955f6caSJeff Kirsher 	reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
508b955f6caSJeff Kirsher 
509b955f6caSJeff Kirsher 	/* The Un-Reset needed is only needed for the real NE2100, and will
510b955f6caSJeff Kirsher 	   confuse the HP board. */
511b955f6caSJeff Kirsher 	if (!hpJ2405A)
512b955f6caSJeff Kirsher 		outw(reset_val, ioaddr+LANCE_RESET);
513b955f6caSJeff Kirsher 
514b955f6caSJeff Kirsher 	outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
515b955f6caSJeff Kirsher 	if (inw(ioaddr+LANCE_DATA) != 0x0004)
516b955f6caSJeff Kirsher 		return -ENODEV;
517b955f6caSJeff Kirsher 
518b955f6caSJeff Kirsher 	/* Get the version of the chip. */
519b955f6caSJeff Kirsher 	outw(88, ioaddr+LANCE_ADDR);
520b955f6caSJeff Kirsher 	if (inw(ioaddr+LANCE_ADDR) != 88) {
521b955f6caSJeff Kirsher 		lance_version = 0;
522b955f6caSJeff Kirsher 	} else {			/* Good, it's a newer chip. */
523b955f6caSJeff Kirsher 		int chip_version = inw(ioaddr+LANCE_DATA);
524b955f6caSJeff Kirsher 		outw(89, ioaddr+LANCE_ADDR);
525b955f6caSJeff Kirsher 		chip_version |= inw(ioaddr+LANCE_DATA) << 16;
526b955f6caSJeff Kirsher 		if (lance_debug > 2)
527b955f6caSJeff Kirsher 			printk("  LANCE chip version is %#x.\n", chip_version);
528b955f6caSJeff Kirsher 		if ((chip_version & 0xfff) != 0x003)
529b955f6caSJeff Kirsher 			return -ENODEV;
530b955f6caSJeff Kirsher 		chip_version = (chip_version >> 12) & 0xffff;
531b955f6caSJeff Kirsher 		for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
532b955f6caSJeff Kirsher 			if (chip_table[lance_version].id_number == chip_version)
533b955f6caSJeff Kirsher 				break;
534b955f6caSJeff Kirsher 		}
535b955f6caSJeff Kirsher 	}
536b955f6caSJeff Kirsher 
537b955f6caSJeff Kirsher 	/* We can't allocate private data from alloc_etherdev() because it must
538b955f6caSJeff Kirsher 	   a ISA DMA-able region. */
539b955f6caSJeff Kirsher 	chipname = chip_table[lance_version].name;
540b955f6caSJeff Kirsher 	printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
541b955f6caSJeff Kirsher 
542b955f6caSJeff Kirsher 	/* There is a 16 byte station address PROM at the base address.
543b955f6caSJeff Kirsher 	   The first six bytes are the station address. */
544b955f6caSJeff Kirsher 	for (i = 0; i < 6; i++)
5450222ee53SJakub Kicinski 		addr[i] = inb(ioaddr + i);
5460222ee53SJakub Kicinski 	eth_hw_addr_set(dev, addr);
547b955f6caSJeff Kirsher 	printk("%pM", dev->dev_addr);
548b955f6caSJeff Kirsher 
549b955f6caSJeff Kirsher 	dev->base_addr = ioaddr;
550b955f6caSJeff Kirsher 	/* Make certain the data structures used by the LANCE are aligned and DMAble. */
551b955f6caSJeff Kirsher 
552b955f6caSJeff Kirsher 	lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
553a09f4af1SAmitoj Kaur Chawla 	if (!lp)
554a09f4af1SAmitoj Kaur Chawla 		return -ENOMEM;
555b955f6caSJeff Kirsher 	if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
556b955f6caSJeff Kirsher 	dev->ml_priv = lp;
557b955f6caSJeff Kirsher 	lp->name = chipname;
5586da2ec56SKees Cook 	lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
559b955f6caSJeff Kirsher 						    GFP_DMA | GFP_KERNEL);
560b955f6caSJeff Kirsher 	if (!lp->rx_buffs)
561b955f6caSJeff Kirsher 		goto out_lp;
562b955f6caSJeff Kirsher 	if (lance_need_isa_bounce_buffers) {
5636da2ec56SKees Cook 		lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
564b955f6caSJeff Kirsher 						    GFP_DMA | GFP_KERNEL);
565b955f6caSJeff Kirsher 		if (!lp->tx_bounce_buffs)
566b955f6caSJeff Kirsher 			goto out_rx;
567b955f6caSJeff Kirsher 	} else
568b955f6caSJeff Kirsher 		lp->tx_bounce_buffs = NULL;
569b955f6caSJeff Kirsher 
570b955f6caSJeff Kirsher 	lp->chip_version = lance_version;
571b955f6caSJeff Kirsher 	spin_lock_init(&lp->devlock);
572b955f6caSJeff Kirsher 
573b955f6caSJeff Kirsher 	lp->init_block.mode = 0x0003;		/* Disable Rx and Tx. */
574b955f6caSJeff Kirsher 	for (i = 0; i < 6; i++)
575b955f6caSJeff Kirsher 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
576b955f6caSJeff Kirsher 	lp->init_block.filter[0] = 0x00000000;
577b955f6caSJeff Kirsher 	lp->init_block.filter[1] = 0x00000000;
578b955f6caSJeff Kirsher 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
579b955f6caSJeff Kirsher 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
580b955f6caSJeff Kirsher 
581b955f6caSJeff Kirsher 	outw(0x0001, ioaddr+LANCE_ADDR);
582b955f6caSJeff Kirsher 	inw(ioaddr+LANCE_ADDR);
583b955f6caSJeff Kirsher 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
584b955f6caSJeff Kirsher 	outw(0x0002, ioaddr+LANCE_ADDR);
585b955f6caSJeff Kirsher 	inw(ioaddr+LANCE_ADDR);
586b955f6caSJeff Kirsher 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
587b955f6caSJeff Kirsher 	outw(0x0000, ioaddr+LANCE_ADDR);
588b955f6caSJeff Kirsher 	inw(ioaddr+LANCE_ADDR);
589b955f6caSJeff Kirsher 
590b955f6caSJeff Kirsher 	if (irq) {					/* Set iff PCI card. */
591b955f6caSJeff Kirsher 		dev->dma = 4;			/* Native bus-master, no DMA channel needed. */
592b955f6caSJeff Kirsher 		dev->irq = irq;
593b955f6caSJeff Kirsher 	} else if (hp_builtin) {
594b955f6caSJeff Kirsher 		static const char dma_tbl[4] = {3, 5, 6, 0};
595b955f6caSJeff Kirsher 		static const char irq_tbl[4] = {3, 4, 5, 9};
596b955f6caSJeff Kirsher 		unsigned char port_val = inb(hp_builtin);
597b955f6caSJeff Kirsher 		dev->dma = dma_tbl[(port_val >> 4) & 3];
598b955f6caSJeff Kirsher 		dev->irq = irq_tbl[(port_val >> 2) & 3];
599b955f6caSJeff Kirsher 		printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
600b955f6caSJeff Kirsher 	} else if (hpJ2405A) {
601b955f6caSJeff Kirsher 		static const char dma_tbl[4] = {3, 5, 6, 7};
602b955f6caSJeff Kirsher 		static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
603b955f6caSJeff Kirsher 		short reset_val = inw(ioaddr+LANCE_RESET);
604b955f6caSJeff Kirsher 		dev->dma = dma_tbl[(reset_val >> 2) & 3];
605b955f6caSJeff Kirsher 		dev->irq = irq_tbl[(reset_val >> 4) & 7];
606b955f6caSJeff Kirsher 		printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
607b955f6caSJeff Kirsher 	} else if (lance_version == PCNET_ISAP) {		/* The plug-n-play version. */
608b955f6caSJeff Kirsher 		short bus_info;
609b955f6caSJeff Kirsher 		outw(8, ioaddr+LANCE_ADDR);
610b955f6caSJeff Kirsher 		bus_info = inw(ioaddr+LANCE_BUS_IF);
611b955f6caSJeff Kirsher 		dev->dma = bus_info & 0x07;
612b955f6caSJeff Kirsher 		dev->irq = (bus_info >> 4) & 0x0F;
613b955f6caSJeff Kirsher 	} else {
614b955f6caSJeff Kirsher 		/* The DMA channel may be passed in PARAM1. */
615b955f6caSJeff Kirsher 		if (dev->mem_start & 0x07)
616b955f6caSJeff Kirsher 			dev->dma = dev->mem_start & 0x07;
617b955f6caSJeff Kirsher 	}
618b955f6caSJeff Kirsher 
619b955f6caSJeff Kirsher 	if (dev->dma == 0) {
620b955f6caSJeff Kirsher 		/* Read the DMA channel status register, so that we can avoid
621b955f6caSJeff Kirsher 		   stuck DMA channels in the DMA detection below. */
622b955f6caSJeff Kirsher 		dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
623b955f6caSJeff Kirsher 			(inb(DMA2_STAT_REG) & 0xf0);
624b955f6caSJeff Kirsher 	}
625b955f6caSJeff Kirsher 	err = -ENODEV;
626b955f6caSJeff Kirsher 	if (dev->irq >= 2)
627b955f6caSJeff Kirsher 		printk(" assigned IRQ %d", dev->irq);
628b955f6caSJeff Kirsher 	else if (lance_version != 0)  {	/* 7990 boards need DMA detection first. */
629b955f6caSJeff Kirsher 		unsigned long irq_mask;
630b955f6caSJeff Kirsher 
631b955f6caSJeff Kirsher 		/* To auto-IRQ we enable the initialization-done and DMA error
632b955f6caSJeff Kirsher 		   interrupts. For ISA boards we get a DMA error, but VLB and PCI
633b955f6caSJeff Kirsher 		   boards will work. */
634b955f6caSJeff Kirsher 		irq_mask = probe_irq_on();
635b955f6caSJeff Kirsher 
636b955f6caSJeff Kirsher 		/* Trigger an initialization just for the interrupt. */
637b955f6caSJeff Kirsher 		outw(0x0041, ioaddr+LANCE_DATA);
638b955f6caSJeff Kirsher 
639b955f6caSJeff Kirsher 		mdelay(20);
640b955f6caSJeff Kirsher 		dev->irq = probe_irq_off(irq_mask);
641b955f6caSJeff Kirsher 		if (dev->irq)
642b955f6caSJeff Kirsher 			printk(", probed IRQ %d", dev->irq);
643b955f6caSJeff Kirsher 		else {
644b955f6caSJeff Kirsher 			printk(", failed to detect IRQ line.\n");
645b955f6caSJeff Kirsher 			goto out_tx;
646b955f6caSJeff Kirsher 		}
647b955f6caSJeff Kirsher 
648b955f6caSJeff Kirsher 		/* Check for the initialization done bit, 0x0100, which means
649b955f6caSJeff Kirsher 		   that we don't need a DMA channel. */
650b955f6caSJeff Kirsher 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
651b955f6caSJeff Kirsher 			dev->dma = 4;
652b955f6caSJeff Kirsher 	}
653b955f6caSJeff Kirsher 
654b955f6caSJeff Kirsher 	if (dev->dma == 4) {
655b955f6caSJeff Kirsher 		printk(", no DMA needed.\n");
656b955f6caSJeff Kirsher 	} else if (dev->dma) {
657b955f6caSJeff Kirsher 		if (request_dma(dev->dma, chipname)) {
658b955f6caSJeff Kirsher 			printk("DMA %d allocation failed.\n", dev->dma);
659b955f6caSJeff Kirsher 			goto out_tx;
660b955f6caSJeff Kirsher 		} else
661b955f6caSJeff Kirsher 			printk(", assigned DMA %d.\n", dev->dma);
662b955f6caSJeff Kirsher 	} else {			/* OK, we have to auto-DMA. */
663b955f6caSJeff Kirsher 		for (i = 0; i < 4; i++) {
664b955f6caSJeff Kirsher 			static const char dmas[] = { 5, 6, 7, 3 };
665b955f6caSJeff Kirsher 			int dma = dmas[i];
666b955f6caSJeff Kirsher 			int boguscnt;
667b955f6caSJeff Kirsher 
668b955f6caSJeff Kirsher 			/* Don't enable a permanently busy DMA channel, or the machine
669b955f6caSJeff Kirsher 			   will hang. */
670b955f6caSJeff Kirsher 			if (test_bit(dma, &dma_channels))
671b955f6caSJeff Kirsher 				continue;
672b955f6caSJeff Kirsher 			outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
673b955f6caSJeff Kirsher 			if (request_dma(dma, chipname))
674b955f6caSJeff Kirsher 				continue;
675b955f6caSJeff Kirsher 
676b955f6caSJeff Kirsher 			flags=claim_dma_lock();
677b955f6caSJeff Kirsher 			set_dma_mode(dma, DMA_MODE_CASCADE);
678b955f6caSJeff Kirsher 			enable_dma(dma);
679b955f6caSJeff Kirsher 			release_dma_lock(flags);
680b955f6caSJeff Kirsher 
681b955f6caSJeff Kirsher 			/* Trigger an initialization. */
682b955f6caSJeff Kirsher 			outw(0x0001, ioaddr+LANCE_DATA);
683b955f6caSJeff Kirsher 			for (boguscnt = 100; boguscnt > 0; --boguscnt)
684b955f6caSJeff Kirsher 				if (inw(ioaddr+LANCE_DATA) & 0x0900)
685b955f6caSJeff Kirsher 					break;
686b955f6caSJeff Kirsher 			if (inw(ioaddr+LANCE_DATA) & 0x0100) {
687b955f6caSJeff Kirsher 				dev->dma = dma;
688b955f6caSJeff Kirsher 				printk(", DMA %d.\n", dev->dma);
689b955f6caSJeff Kirsher 				break;
690b955f6caSJeff Kirsher 			} else {
691b955f6caSJeff Kirsher 				flags=claim_dma_lock();
692b955f6caSJeff Kirsher 				disable_dma(dma);
693b955f6caSJeff Kirsher 				release_dma_lock(flags);
694b955f6caSJeff Kirsher 				free_dma(dma);
695b955f6caSJeff Kirsher 			}
696b955f6caSJeff Kirsher 		}
697b955f6caSJeff Kirsher 		if (i == 4) {			/* Failure: bail. */
698b955f6caSJeff Kirsher 			printk("DMA detection failed.\n");
699b955f6caSJeff Kirsher 			goto out_tx;
700b955f6caSJeff Kirsher 		}
701b955f6caSJeff Kirsher 	}
702b955f6caSJeff Kirsher 
703b955f6caSJeff Kirsher 	if (lance_version == 0 && dev->irq == 0) {
704b955f6caSJeff Kirsher 		/* We may auto-IRQ now that we have a DMA channel. */
705b955f6caSJeff Kirsher 		/* Trigger an initialization just for the interrupt. */
706b955f6caSJeff Kirsher 		unsigned long irq_mask;
707b955f6caSJeff Kirsher 
708b955f6caSJeff Kirsher 		irq_mask = probe_irq_on();
709b955f6caSJeff Kirsher 		outw(0x0041, ioaddr+LANCE_DATA);
710b955f6caSJeff Kirsher 
711b955f6caSJeff Kirsher 		mdelay(40);
712b955f6caSJeff Kirsher 		dev->irq = probe_irq_off(irq_mask);
713b955f6caSJeff Kirsher 		if (dev->irq == 0) {
714b955f6caSJeff Kirsher 			printk("  Failed to detect the 7990 IRQ line.\n");
715b955f6caSJeff Kirsher 			goto out_dma;
716b955f6caSJeff Kirsher 		}
717b955f6caSJeff Kirsher 		printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
718b955f6caSJeff Kirsher 	}
719b955f6caSJeff Kirsher 
720b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
721b955f6caSJeff Kirsher 		/* Turn on auto-select of media (10baseT or BNC) so that the user
722b955f6caSJeff Kirsher 		   can watch the LEDs even if the board isn't opened. */
723b955f6caSJeff Kirsher 		outw(0x0002, ioaddr+LANCE_ADDR);
724b955f6caSJeff Kirsher 		/* Don't touch 10base2 power bit. */
725b955f6caSJeff Kirsher 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
726b955f6caSJeff Kirsher 	}
727b955f6caSJeff Kirsher 
728b955f6caSJeff Kirsher 	if (lance_debug > 0  &&  did_version++ == 0)
729b955f6caSJeff Kirsher 		printk(version);
730b955f6caSJeff Kirsher 
731b955f6caSJeff Kirsher 	/* The LANCE-specific entries in the device structure. */
732b955f6caSJeff Kirsher 	dev->netdev_ops = &lance_netdev_ops;
733b955f6caSJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
734b955f6caSJeff Kirsher 
735b955f6caSJeff Kirsher 	err = register_netdev(dev);
736b955f6caSJeff Kirsher 	if (err)
737b955f6caSJeff Kirsher 		goto out_dma;
738b955f6caSJeff Kirsher 	return 0;
739b955f6caSJeff Kirsher out_dma:
740b955f6caSJeff Kirsher 	if (dev->dma != 4)
741b955f6caSJeff Kirsher 		free_dma(dev->dma);
742b955f6caSJeff Kirsher out_tx:
743b955f6caSJeff Kirsher 	kfree(lp->tx_bounce_buffs);
744b955f6caSJeff Kirsher out_rx:
745b955f6caSJeff Kirsher 	kfree((void*)lp->rx_buffs);
746b955f6caSJeff Kirsher out_lp:
747b955f6caSJeff Kirsher 	kfree(lp);
748b955f6caSJeff Kirsher 	return err;
749b955f6caSJeff Kirsher }
750b955f6caSJeff Kirsher 
751b955f6caSJeff Kirsher 
752b955f6caSJeff Kirsher static int
753b955f6caSJeff Kirsher lance_open(struct net_device *dev)
754b955f6caSJeff Kirsher {
755b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
756b955f6caSJeff Kirsher 	int ioaddr = dev->base_addr;
757b955f6caSJeff Kirsher 	int i;
758b955f6caSJeff Kirsher 
759b955f6caSJeff Kirsher 	if (dev->irq == 0 ||
760f0e28d48SNate Levesque 		request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
761b955f6caSJeff Kirsher 		return -EAGAIN;
762b955f6caSJeff Kirsher 	}
763b955f6caSJeff Kirsher 
764b955f6caSJeff Kirsher 	/* We used to allocate DMA here, but that was silly.
765b955f6caSJeff Kirsher 	   DMA lines can't be shared!  We now permanently allocate them. */
766b955f6caSJeff Kirsher 
767b955f6caSJeff Kirsher 	/* Reset the LANCE */
768b955f6caSJeff Kirsher 	inw(ioaddr+LANCE_RESET);
769b955f6caSJeff Kirsher 
770b955f6caSJeff Kirsher 	/* The DMA controller is used as a no-operation slave, "cascade mode". */
771b955f6caSJeff Kirsher 	if (dev->dma != 4) {
772b955f6caSJeff Kirsher 		unsigned long flags=claim_dma_lock();
773b955f6caSJeff Kirsher 		enable_dma(dev->dma);
774b955f6caSJeff Kirsher 		set_dma_mode(dev->dma, DMA_MODE_CASCADE);
775b955f6caSJeff Kirsher 		release_dma_lock(flags);
776b955f6caSJeff Kirsher 	}
777b955f6caSJeff Kirsher 
778b955f6caSJeff Kirsher 	/* Un-Reset the LANCE, needed only for the NE2100. */
779b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
780b955f6caSJeff Kirsher 		outw(0, ioaddr+LANCE_RESET);
781b955f6caSJeff Kirsher 
782b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
783b955f6caSJeff Kirsher 		/* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
784b955f6caSJeff Kirsher 		outw(0x0002, ioaddr+LANCE_ADDR);
785b955f6caSJeff Kirsher 		/* Only touch autoselect bit. */
786b955f6caSJeff Kirsher 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
787b955f6caSJeff Kirsher 	}
788b955f6caSJeff Kirsher 
789b955f6caSJeff Kirsher 	if (lance_debug > 1)
790b955f6caSJeff Kirsher 		printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
791b955f6caSJeff Kirsher 			   dev->name, dev->irq, dev->dma,
792b955f6caSJeff Kirsher 		           (u32) isa_virt_to_bus(lp->tx_ring),
793b955f6caSJeff Kirsher 		           (u32) isa_virt_to_bus(lp->rx_ring),
794b955f6caSJeff Kirsher 			   (u32) isa_virt_to_bus(&lp->init_block));
795b955f6caSJeff Kirsher 
796b955f6caSJeff Kirsher 	lance_init_ring(dev, GFP_KERNEL);
797b955f6caSJeff Kirsher 	/* Re-initialize the LANCE, and start it when done. */
798b955f6caSJeff Kirsher 	outw(0x0001, ioaddr+LANCE_ADDR);
799b955f6caSJeff Kirsher 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
800b955f6caSJeff Kirsher 	outw(0x0002, ioaddr+LANCE_ADDR);
801b955f6caSJeff Kirsher 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
802b955f6caSJeff Kirsher 
803b955f6caSJeff Kirsher 	outw(0x0004, ioaddr+LANCE_ADDR);
804b955f6caSJeff Kirsher 	outw(0x0915, ioaddr+LANCE_DATA);
805b955f6caSJeff Kirsher 
806b955f6caSJeff Kirsher 	outw(0x0000, ioaddr+LANCE_ADDR);
807b955f6caSJeff Kirsher 	outw(0x0001, ioaddr+LANCE_DATA);
808b955f6caSJeff Kirsher 
809b955f6caSJeff Kirsher 	netif_start_queue (dev);
810b955f6caSJeff Kirsher 
811b955f6caSJeff Kirsher 	i = 0;
812b955f6caSJeff Kirsher 	while (i++ < 100)
813b955f6caSJeff Kirsher 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
814b955f6caSJeff Kirsher 			break;
815b955f6caSJeff Kirsher 	/*
816b955f6caSJeff Kirsher 	 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
817b955f6caSJeff Kirsher 	 * reports that doing so triggers a bug in the '974.
818b955f6caSJeff Kirsher 	 */
819b955f6caSJeff Kirsher 	outw(0x0042, ioaddr+LANCE_DATA);
820b955f6caSJeff Kirsher 
821b955f6caSJeff Kirsher 	if (lance_debug > 2)
822b955f6caSJeff Kirsher 		printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
823b955f6caSJeff Kirsher 			   dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
824b955f6caSJeff Kirsher 
825b955f6caSJeff Kirsher 	return 0;					/* Always succeed */
826b955f6caSJeff Kirsher }
827b955f6caSJeff Kirsher 
828b955f6caSJeff Kirsher /* The LANCE has been halted for one reason or another (busmaster memory
829b955f6caSJeff Kirsher    arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
830b955f6caSJeff Kirsher    etc.).  Modern LANCE variants always reload their ring-buffer
831b955f6caSJeff Kirsher    configuration when restarted, so we must reinitialize our ring
832b955f6caSJeff Kirsher    context before restarting.  As part of this reinitialization,
833b955f6caSJeff Kirsher    find all packets still on the Tx ring and pretend that they had been
834b955f6caSJeff Kirsher    sent (in effect, drop the packets on the floor) - the higher-level
835b955f6caSJeff Kirsher    protocols will time out and retransmit.  It'd be better to shuffle
836b955f6caSJeff Kirsher    these skbs to a temp list and then actually re-Tx them after
837b955f6caSJeff Kirsher    restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
838b955f6caSJeff Kirsher */
839b955f6caSJeff Kirsher 
840b955f6caSJeff Kirsher static void
841b955f6caSJeff Kirsher lance_purge_ring(struct net_device *dev)
842b955f6caSJeff Kirsher {
843b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
844b955f6caSJeff Kirsher 	int i;
845b955f6caSJeff Kirsher 
846b955f6caSJeff Kirsher 	/* Free all the skbuffs in the Rx and Tx queues. */
847b955f6caSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
848b955f6caSJeff Kirsher 		struct sk_buff *skb = lp->rx_skbuff[i];
849b955f6caSJeff Kirsher 		lp->rx_skbuff[i] = NULL;
850b955f6caSJeff Kirsher 		lp->rx_ring[i].base = 0;		/* Not owned by LANCE chip. */
851b955f6caSJeff Kirsher 		if (skb)
852b955f6caSJeff Kirsher 			dev_kfree_skb_any(skb);
853b955f6caSJeff Kirsher 	}
854b955f6caSJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
855b955f6caSJeff Kirsher 		if (lp->tx_skbuff[i]) {
856b955f6caSJeff Kirsher 			dev_kfree_skb_any(lp->tx_skbuff[i]);
857b955f6caSJeff Kirsher 			lp->tx_skbuff[i] = NULL;
858b955f6caSJeff Kirsher 		}
859b955f6caSJeff Kirsher 	}
860b955f6caSJeff Kirsher }
861b955f6caSJeff Kirsher 
862b955f6caSJeff Kirsher 
863b955f6caSJeff Kirsher /* Initialize the LANCE Rx and Tx rings. */
864b955f6caSJeff Kirsher static void
865b955f6caSJeff Kirsher lance_init_ring(struct net_device *dev, gfp_t gfp)
866b955f6caSJeff Kirsher {
867b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
868b955f6caSJeff Kirsher 	int i;
869b955f6caSJeff Kirsher 
870b955f6caSJeff Kirsher 	lp->cur_rx = lp->cur_tx = 0;
871b955f6caSJeff Kirsher 	lp->dirty_rx = lp->dirty_tx = 0;
872b955f6caSJeff Kirsher 
873b955f6caSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
874b955f6caSJeff Kirsher 		struct sk_buff *skb;
875b955f6caSJeff Kirsher 		void *rx_buff;
876b955f6caSJeff Kirsher 
877b955f6caSJeff Kirsher 		skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
878b955f6caSJeff Kirsher 		lp->rx_skbuff[i] = skb;
8795c8b73caSJon Mason 		if (skb)
880b955f6caSJeff Kirsher 			rx_buff = skb->data;
8815c8b73caSJon Mason 		else
882b955f6caSJeff Kirsher 			rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
883*b0b815a3SGuofeng Yue 		if (!rx_buff)
884b955f6caSJeff Kirsher 			lp->rx_ring[i].base = 0;
885b955f6caSJeff Kirsher 		else
886b955f6caSJeff Kirsher 			lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
887b955f6caSJeff Kirsher 		lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
888b955f6caSJeff Kirsher 	}
889b955f6caSJeff Kirsher 	/* The Tx buffer address is filled in as needed, but we do need to clear
890b955f6caSJeff Kirsher 	   the upper ownership bit. */
891b955f6caSJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
892b955f6caSJeff Kirsher 		lp->tx_skbuff[i] = NULL;
893b955f6caSJeff Kirsher 		lp->tx_ring[i].base = 0;
894b955f6caSJeff Kirsher 	}
895b955f6caSJeff Kirsher 
896b955f6caSJeff Kirsher 	lp->init_block.mode = 0x0000;
897b955f6caSJeff Kirsher 	for (i = 0; i < 6; i++)
898b955f6caSJeff Kirsher 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
899b955f6caSJeff Kirsher 	lp->init_block.filter[0] = 0x00000000;
900b955f6caSJeff Kirsher 	lp->init_block.filter[1] = 0x00000000;
901b955f6caSJeff Kirsher 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
902b955f6caSJeff Kirsher 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
903b955f6caSJeff Kirsher }
904b955f6caSJeff Kirsher 
905b955f6caSJeff Kirsher static void
906b955f6caSJeff Kirsher lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
907b955f6caSJeff Kirsher {
908b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
909b955f6caSJeff Kirsher 
910b955f6caSJeff Kirsher 	if (must_reinit ||
911b955f6caSJeff Kirsher 		(chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
912b955f6caSJeff Kirsher 		lance_purge_ring(dev);
913b955f6caSJeff Kirsher 		lance_init_ring(dev, GFP_ATOMIC);
914b955f6caSJeff Kirsher 	}
915b955f6caSJeff Kirsher 	outw(0x0000,    dev->base_addr + LANCE_ADDR);
916b955f6caSJeff Kirsher 	outw(csr0_bits, dev->base_addr + LANCE_DATA);
917b955f6caSJeff Kirsher }
918b955f6caSJeff Kirsher 
919b955f6caSJeff Kirsher 
9200290bd29SMichael S. Tsirkin static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
921b955f6caSJeff Kirsher {
922b955f6caSJeff Kirsher 	struct lance_private *lp = (struct lance_private *) dev->ml_priv;
923b955f6caSJeff Kirsher 	int ioaddr = dev->base_addr;
924b955f6caSJeff Kirsher 
925b955f6caSJeff Kirsher 	outw (0, ioaddr + LANCE_ADDR);
926b955f6caSJeff Kirsher 	printk ("%s: transmit timed out, status %4.4x, resetting.\n",
927b955f6caSJeff Kirsher 		dev->name, inw (ioaddr + LANCE_DATA));
928b955f6caSJeff Kirsher 	outw (0x0004, ioaddr + LANCE_DATA);
929b955f6caSJeff Kirsher 	dev->stats.tx_errors++;
930b955f6caSJeff Kirsher #ifndef final_version
931b955f6caSJeff Kirsher 	if (lance_debug > 3) {
932b955f6caSJeff Kirsher 		int i;
933b955f6caSJeff Kirsher 		printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
934b955f6caSJeff Kirsher 		  lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
935b955f6caSJeff Kirsher 			lp->cur_rx);
936b955f6caSJeff Kirsher 		for (i = 0; i < RX_RING_SIZE; i++)
937b955f6caSJeff Kirsher 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
938b955f6caSJeff Kirsher 			 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
939b955f6caSJeff Kirsher 				lp->rx_ring[i].msg_length);
940b955f6caSJeff Kirsher 		for (i = 0; i < TX_RING_SIZE; i++)
941b955f6caSJeff Kirsher 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
942b955f6caSJeff Kirsher 			     lp->tx_ring[i].base, -lp->tx_ring[i].length,
943b955f6caSJeff Kirsher 				lp->tx_ring[i].misc);
944b955f6caSJeff Kirsher 		printk ("\n");
945b955f6caSJeff Kirsher 	}
946b955f6caSJeff Kirsher #endif
947b955f6caSJeff Kirsher 	lance_restart (dev, 0x0043, 1);
948b955f6caSJeff Kirsher 
949860e9538SFlorian Westphal 	netif_trans_update(dev); /* prevent tx timeout */
950b955f6caSJeff Kirsher 	netif_wake_queue (dev);
951b955f6caSJeff Kirsher }
952b955f6caSJeff Kirsher 
953b955f6caSJeff Kirsher 
954b955f6caSJeff Kirsher static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
955b955f6caSJeff Kirsher 				    struct net_device *dev)
956b955f6caSJeff Kirsher {
957b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
958b955f6caSJeff Kirsher 	int ioaddr = dev->base_addr;
959b955f6caSJeff Kirsher 	int entry;
960b955f6caSJeff Kirsher 	unsigned long flags;
961b955f6caSJeff Kirsher 
962b955f6caSJeff Kirsher 	spin_lock_irqsave(&lp->devlock, flags);
963b955f6caSJeff Kirsher 
964b955f6caSJeff Kirsher 	if (lance_debug > 3) {
965b955f6caSJeff Kirsher 		outw(0x0000, ioaddr+LANCE_ADDR);
966b955f6caSJeff Kirsher 		printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
967b955f6caSJeff Kirsher 			   inw(ioaddr+LANCE_DATA));
968b955f6caSJeff Kirsher 		outw(0x0000, ioaddr+LANCE_DATA);
969b955f6caSJeff Kirsher 	}
970b955f6caSJeff Kirsher 
971b955f6caSJeff Kirsher 	/* Fill in a Tx ring entry */
972b955f6caSJeff Kirsher 
973b955f6caSJeff Kirsher 	/* Mask to ring buffer boundary. */
974b955f6caSJeff Kirsher 	entry = lp->cur_tx & TX_RING_MOD_MASK;
975b955f6caSJeff Kirsher 
976b955f6caSJeff Kirsher 	/* Caution: the write order is important here, set the base address
977b955f6caSJeff Kirsher 	   with the "ownership" bits last. */
978b955f6caSJeff Kirsher 
979b955f6caSJeff Kirsher 	/* The old LANCE chips doesn't automatically pad buffers to min. size. */
980b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
981b955f6caSJeff Kirsher 		if (skb->len < ETH_ZLEN) {
982b955f6caSJeff Kirsher 			if (skb_padto(skb, ETH_ZLEN))
983b955f6caSJeff Kirsher 				goto out;
984b955f6caSJeff Kirsher 			lp->tx_ring[entry].length = -ETH_ZLEN;
985b955f6caSJeff Kirsher 		}
986b955f6caSJeff Kirsher 		else
987b955f6caSJeff Kirsher 			lp->tx_ring[entry].length = -skb->len;
988b955f6caSJeff Kirsher 	} else
989b955f6caSJeff Kirsher 		lp->tx_ring[entry].length = -skb->len;
990b955f6caSJeff Kirsher 
991b955f6caSJeff Kirsher 	lp->tx_ring[entry].misc = 0x0000;
992b955f6caSJeff Kirsher 
993b955f6caSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
994b955f6caSJeff Kirsher 
995b955f6caSJeff Kirsher 	/* If any part of this buffer is >16M we must copy it to a low-memory
996b955f6caSJeff Kirsher 	   buffer. */
997b955f6caSJeff Kirsher 	if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
998b955f6caSJeff Kirsher 		if (lance_debug > 5)
999b955f6caSJeff Kirsher 			printk("%s: bouncing a high-memory packet (%#x).\n",
1000b955f6caSJeff Kirsher 				   dev->name, (u32)isa_virt_to_bus(skb->data));
1001b955f6caSJeff Kirsher 		skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
1002b955f6caSJeff Kirsher 		lp->tx_ring[entry].base =
1003b955f6caSJeff Kirsher 			((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1004b955f6caSJeff Kirsher 		dev_kfree_skb(skb);
1005b955f6caSJeff Kirsher 	} else {
1006b955f6caSJeff Kirsher 		lp->tx_skbuff[entry] = skb;
1007b955f6caSJeff Kirsher 		lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1008b955f6caSJeff Kirsher 	}
1009b955f6caSJeff Kirsher 	lp->cur_tx++;
1010b955f6caSJeff Kirsher 
1011b955f6caSJeff Kirsher 	/* Trigger an immediate send poll. */
1012b955f6caSJeff Kirsher 	outw(0x0000, ioaddr+LANCE_ADDR);
1013b955f6caSJeff Kirsher 	outw(0x0048, ioaddr+LANCE_DATA);
1014b955f6caSJeff Kirsher 
1015b955f6caSJeff Kirsher 	if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1016b955f6caSJeff Kirsher 		netif_stop_queue(dev);
1017b955f6caSJeff Kirsher 
1018b955f6caSJeff Kirsher out:
1019b955f6caSJeff Kirsher 	spin_unlock_irqrestore(&lp->devlock, flags);
1020b955f6caSJeff Kirsher 	return NETDEV_TX_OK;
1021b955f6caSJeff Kirsher }
1022b955f6caSJeff Kirsher 
1023b955f6caSJeff Kirsher /* The LANCE interrupt handler. */
1024b955f6caSJeff Kirsher static irqreturn_t lance_interrupt(int irq, void *dev_id)
1025b955f6caSJeff Kirsher {
1026b955f6caSJeff Kirsher 	struct net_device *dev = dev_id;
1027b955f6caSJeff Kirsher 	struct lance_private *lp;
1028b955f6caSJeff Kirsher 	int csr0, ioaddr, boguscnt=10;
1029b955f6caSJeff Kirsher 	int must_restart;
1030b955f6caSJeff Kirsher 
1031b955f6caSJeff Kirsher 	ioaddr = dev->base_addr;
1032b955f6caSJeff Kirsher 	lp = dev->ml_priv;
1033b955f6caSJeff Kirsher 
1034b955f6caSJeff Kirsher 	spin_lock (&lp->devlock);
1035b955f6caSJeff Kirsher 
1036b955f6caSJeff Kirsher 	outw(0x00, dev->base_addr + LANCE_ADDR);
1037b955f6caSJeff Kirsher 	while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1038b955f6caSJeff Kirsher 	       --boguscnt >= 0) {
1039b955f6caSJeff Kirsher 		/* Acknowledge all of the current interrupt sources ASAP. */
1040b955f6caSJeff Kirsher 		outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1041b955f6caSJeff Kirsher 
1042b955f6caSJeff Kirsher 		must_restart = 0;
1043b955f6caSJeff Kirsher 
1044b955f6caSJeff Kirsher 		if (lance_debug > 5)
1045b955f6caSJeff Kirsher 			printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
1046b955f6caSJeff Kirsher 				   dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1047b955f6caSJeff Kirsher 
1048b955f6caSJeff Kirsher 		if (csr0 & 0x0400)			/* Rx interrupt */
1049b955f6caSJeff Kirsher 			lance_rx(dev);
1050b955f6caSJeff Kirsher 
1051b955f6caSJeff Kirsher 		if (csr0 & 0x0200) {		/* Tx-done interrupt */
1052b955f6caSJeff Kirsher 			int dirty_tx = lp->dirty_tx;
1053b955f6caSJeff Kirsher 
1054b955f6caSJeff Kirsher 			while (dirty_tx < lp->cur_tx) {
1055b955f6caSJeff Kirsher 				int entry = dirty_tx & TX_RING_MOD_MASK;
1056b955f6caSJeff Kirsher 				int status = lp->tx_ring[entry].base;
1057b955f6caSJeff Kirsher 
1058b955f6caSJeff Kirsher 				if (status < 0)
1059b955f6caSJeff Kirsher 					break;			/* It still hasn't been Txed */
1060b955f6caSJeff Kirsher 
1061b955f6caSJeff Kirsher 				lp->tx_ring[entry].base = 0;
1062b955f6caSJeff Kirsher 
1063b955f6caSJeff Kirsher 				if (status & 0x40000000) {
1064b955f6caSJeff Kirsher 					/* There was an major error, log it. */
1065b955f6caSJeff Kirsher 					int err_status = lp->tx_ring[entry].misc;
1066b955f6caSJeff Kirsher 					dev->stats.tx_errors++;
1067b955f6caSJeff Kirsher 					if (err_status & 0x0400)
1068b955f6caSJeff Kirsher 						dev->stats.tx_aborted_errors++;
1069b955f6caSJeff Kirsher 					if (err_status & 0x0800)
1070b955f6caSJeff Kirsher 						dev->stats.tx_carrier_errors++;
1071b955f6caSJeff Kirsher 					if (err_status & 0x1000)
1072b955f6caSJeff Kirsher 						dev->stats.tx_window_errors++;
1073b955f6caSJeff Kirsher 					if (err_status & 0x4000) {
1074b955f6caSJeff Kirsher 						/* Ackk!  On FIFO errors the Tx unit is turned off! */
1075b955f6caSJeff Kirsher 						dev->stats.tx_fifo_errors++;
1076b955f6caSJeff Kirsher 						/* Remove this verbosity later! */
1077b955f6caSJeff Kirsher 						printk("%s: Tx FIFO error! Status %4.4x.\n",
1078b955f6caSJeff Kirsher 							   dev->name, csr0);
1079b955f6caSJeff Kirsher 						/* Restart the chip. */
1080b955f6caSJeff Kirsher 						must_restart = 1;
1081b955f6caSJeff Kirsher 					}
1082b955f6caSJeff Kirsher 				} else {
1083b955f6caSJeff Kirsher 					if (status & 0x18000000)
1084b955f6caSJeff Kirsher 						dev->stats.collisions++;
1085b955f6caSJeff Kirsher 					dev->stats.tx_packets++;
1086b955f6caSJeff Kirsher 				}
1087b955f6caSJeff Kirsher 
1088b955f6caSJeff Kirsher 				/* We must free the original skb if it's not a data-only copy
1089b955f6caSJeff Kirsher 				   in the bounce buffer. */
1090b955f6caSJeff Kirsher 				if (lp->tx_skbuff[entry]) {
1091fc67ade1SYang Wei 					dev_consume_skb_irq(lp->tx_skbuff[entry]);
1092b955f6caSJeff Kirsher 					lp->tx_skbuff[entry] = NULL;
1093b955f6caSJeff Kirsher 				}
1094b955f6caSJeff Kirsher 				dirty_tx++;
1095b955f6caSJeff Kirsher 			}
1096b955f6caSJeff Kirsher 
1097b955f6caSJeff Kirsher #ifndef final_version
1098b955f6caSJeff Kirsher 			if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1099b955f6caSJeff Kirsher 				printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1100b955f6caSJeff Kirsher 					   dirty_tx, lp->cur_tx,
1101b955f6caSJeff Kirsher 					   netif_queue_stopped(dev) ? "yes" : "no");
1102b955f6caSJeff Kirsher 				dirty_tx += TX_RING_SIZE;
1103b955f6caSJeff Kirsher 			}
1104b955f6caSJeff Kirsher #endif
1105b955f6caSJeff Kirsher 
1106b955f6caSJeff Kirsher 			/* if the ring is no longer full, accept more packets */
1107b955f6caSJeff Kirsher 			if (netif_queue_stopped(dev) &&
1108b955f6caSJeff Kirsher 			    dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1109b955f6caSJeff Kirsher 				netif_wake_queue (dev);
1110b955f6caSJeff Kirsher 
1111b955f6caSJeff Kirsher 			lp->dirty_tx = dirty_tx;
1112b955f6caSJeff Kirsher 		}
1113b955f6caSJeff Kirsher 
1114b955f6caSJeff Kirsher 		/* Log misc errors. */
1115b955f6caSJeff Kirsher 		if (csr0 & 0x4000)
1116b955f6caSJeff Kirsher 			dev->stats.tx_errors++; /* Tx babble. */
1117b955f6caSJeff Kirsher 		if (csr0 & 0x1000)
1118b955f6caSJeff Kirsher 			dev->stats.rx_errors++; /* Missed a Rx frame. */
1119b955f6caSJeff Kirsher 		if (csr0 & 0x0800) {
1120b955f6caSJeff Kirsher 			printk("%s: Bus master arbitration failure, status %4.4x.\n",
1121b955f6caSJeff Kirsher 				   dev->name, csr0);
1122b955f6caSJeff Kirsher 			/* Restart the chip. */
1123b955f6caSJeff Kirsher 			must_restart = 1;
1124b955f6caSJeff Kirsher 		}
1125b955f6caSJeff Kirsher 
1126b955f6caSJeff Kirsher 		if (must_restart) {
1127b955f6caSJeff Kirsher 			/* stop the chip to clear the error condition, then restart */
1128b955f6caSJeff Kirsher 			outw(0x0000, dev->base_addr + LANCE_ADDR);
1129b955f6caSJeff Kirsher 			outw(0x0004, dev->base_addr + LANCE_DATA);
1130b955f6caSJeff Kirsher 			lance_restart(dev, 0x0002, 0);
1131b955f6caSJeff Kirsher 		}
1132b955f6caSJeff Kirsher 	}
1133b955f6caSJeff Kirsher 
1134b955f6caSJeff Kirsher 	/* Clear any other interrupt, and set interrupt enable. */
1135b955f6caSJeff Kirsher 	outw(0x0000, dev->base_addr + LANCE_ADDR);
1136b955f6caSJeff Kirsher 	outw(0x7940, dev->base_addr + LANCE_DATA);
1137b955f6caSJeff Kirsher 
1138b955f6caSJeff Kirsher 	if (lance_debug > 4)
1139b955f6caSJeff Kirsher 		printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1140b955f6caSJeff Kirsher 			   dev->name, inw(ioaddr + LANCE_ADDR),
1141b955f6caSJeff Kirsher 			   inw(dev->base_addr + LANCE_DATA));
1142b955f6caSJeff Kirsher 
1143b955f6caSJeff Kirsher 	spin_unlock (&lp->devlock);
1144b955f6caSJeff Kirsher 	return IRQ_HANDLED;
1145b955f6caSJeff Kirsher }
1146b955f6caSJeff Kirsher 
1147b955f6caSJeff Kirsher static int
1148b955f6caSJeff Kirsher lance_rx(struct net_device *dev)
1149b955f6caSJeff Kirsher {
1150b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
1151b955f6caSJeff Kirsher 	int entry = lp->cur_rx & RX_RING_MOD_MASK;
1152b955f6caSJeff Kirsher 	int i;
1153b955f6caSJeff Kirsher 
1154b955f6caSJeff Kirsher 	/* If we own the next entry, it's a new packet. Send it up. */
1155b955f6caSJeff Kirsher 	while (lp->rx_ring[entry].base >= 0) {
1156b955f6caSJeff Kirsher 		int status = lp->rx_ring[entry].base >> 24;
1157b955f6caSJeff Kirsher 
1158b955f6caSJeff Kirsher 		if (status != 0x03) {			/* There was an error. */
1159b955f6caSJeff Kirsher 			/* There is a tricky error noted by John Murphy,
1160b955f6caSJeff Kirsher 			   <murf@perftech.com> to Russ Nelson: Even with full-sized
1161b955f6caSJeff Kirsher 			   buffers it's possible for a jabber packet to use two
1162b955f6caSJeff Kirsher 			   buffers, with only the last correctly noting the error. */
1163b955f6caSJeff Kirsher 			if (status & 0x01)	/* Only count a general error at the */
1164b955f6caSJeff Kirsher 				dev->stats.rx_errors++; /* end of a packet.*/
1165b955f6caSJeff Kirsher 			if (status & 0x20)
1166b955f6caSJeff Kirsher 				dev->stats.rx_frame_errors++;
1167b955f6caSJeff Kirsher 			if (status & 0x10)
1168b955f6caSJeff Kirsher 				dev->stats.rx_over_errors++;
1169b955f6caSJeff Kirsher 			if (status & 0x08)
1170b955f6caSJeff Kirsher 				dev->stats.rx_crc_errors++;
1171b955f6caSJeff Kirsher 			if (status & 0x04)
1172b955f6caSJeff Kirsher 				dev->stats.rx_fifo_errors++;
1173b955f6caSJeff Kirsher 			lp->rx_ring[entry].base &= 0x03ffffff;
1174b955f6caSJeff Kirsher 		}
1175b955f6caSJeff Kirsher 		else
1176b955f6caSJeff Kirsher 		{
1177b955f6caSJeff Kirsher 			/* Malloc up new buffer, compatible with net3. */
1178b955f6caSJeff Kirsher 			short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1179b955f6caSJeff Kirsher 			struct sk_buff *skb;
1180b955f6caSJeff Kirsher 
1181b955f6caSJeff Kirsher 			if(pkt_len<60)
1182b955f6caSJeff Kirsher 			{
1183b955f6caSJeff Kirsher 				printk("%s: Runt packet!\n",dev->name);
1184b955f6caSJeff Kirsher 				dev->stats.rx_errors++;
1185b955f6caSJeff Kirsher 			}
1186b955f6caSJeff Kirsher 			else
1187b955f6caSJeff Kirsher 			{
1188b955f6caSJeff Kirsher 				skb = dev_alloc_skb(pkt_len+2);
1189*b0b815a3SGuofeng Yue 				if (!skb)
1190b955f6caSJeff Kirsher 				{
1191b955f6caSJeff Kirsher 					printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1192b955f6caSJeff Kirsher 					for (i=0; i < RX_RING_SIZE; i++)
1193b955f6caSJeff Kirsher 						if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1194b955f6caSJeff Kirsher 							break;
1195b955f6caSJeff Kirsher 
1196b955f6caSJeff Kirsher 					if (i > RX_RING_SIZE -2)
1197b955f6caSJeff Kirsher 					{
1198b955f6caSJeff Kirsher 						dev->stats.rx_dropped++;
1199b955f6caSJeff Kirsher 						lp->rx_ring[entry].base |= 0x80000000;
1200b955f6caSJeff Kirsher 						lp->cur_rx++;
1201b955f6caSJeff Kirsher 					}
1202b955f6caSJeff Kirsher 					break;
1203b955f6caSJeff Kirsher 				}
1204b955f6caSJeff Kirsher 				skb_reserve(skb,2);	/* 16 byte align */
1205b955f6caSJeff Kirsher 				skb_put(skb,pkt_len);	/* Make room */
1206b955f6caSJeff Kirsher 				skb_copy_to_linear_data(skb,
1207b955f6caSJeff Kirsher 					(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1208b955f6caSJeff Kirsher 					pkt_len);
1209b955f6caSJeff Kirsher 				skb->protocol=eth_type_trans(skb,dev);
1210b955f6caSJeff Kirsher 				netif_rx(skb);
1211b955f6caSJeff Kirsher 				dev->stats.rx_packets++;
1212b955f6caSJeff Kirsher 				dev->stats.rx_bytes += pkt_len;
1213b955f6caSJeff Kirsher 			}
1214b955f6caSJeff Kirsher 		}
1215b955f6caSJeff Kirsher 		/* The docs say that the buffer length isn't touched, but Andrew Boyd
1216b955f6caSJeff Kirsher 		   of QNX reports that some revs of the 79C965 clear it. */
1217b955f6caSJeff Kirsher 		lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1218b955f6caSJeff Kirsher 		lp->rx_ring[entry].base |= 0x80000000;
1219b955f6caSJeff Kirsher 		entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1220b955f6caSJeff Kirsher 	}
1221b955f6caSJeff Kirsher 
1222b955f6caSJeff Kirsher 	/* We should check that at least two ring entries are free.	 If not,
1223b955f6caSJeff Kirsher 	   we should free one and mark stats->rx_dropped++. */
1224b955f6caSJeff Kirsher 
1225b955f6caSJeff Kirsher 	return 0;
1226b955f6caSJeff Kirsher }
1227b955f6caSJeff Kirsher 
1228b955f6caSJeff Kirsher static int
1229b955f6caSJeff Kirsher lance_close(struct net_device *dev)
1230b955f6caSJeff Kirsher {
1231b955f6caSJeff Kirsher 	int ioaddr = dev->base_addr;
1232b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
1233b955f6caSJeff Kirsher 
1234b955f6caSJeff Kirsher 	netif_stop_queue (dev);
1235b955f6caSJeff Kirsher 
1236b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1237b955f6caSJeff Kirsher 		outw(112, ioaddr+LANCE_ADDR);
1238b955f6caSJeff Kirsher 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1239b955f6caSJeff Kirsher 	}
1240b955f6caSJeff Kirsher 	outw(0, ioaddr+LANCE_ADDR);
1241b955f6caSJeff Kirsher 
1242b955f6caSJeff Kirsher 	if (lance_debug > 1)
1243b955f6caSJeff Kirsher 		printk("%s: Shutting down ethercard, status was %2.2x.\n",
1244b955f6caSJeff Kirsher 			   dev->name, inw(ioaddr+LANCE_DATA));
1245b955f6caSJeff Kirsher 
1246b955f6caSJeff Kirsher 	/* We stop the LANCE here -- it occasionally polls
1247b955f6caSJeff Kirsher 	   memory if we don't. */
1248b955f6caSJeff Kirsher 	outw(0x0004, ioaddr+LANCE_DATA);
1249b955f6caSJeff Kirsher 
1250b955f6caSJeff Kirsher 	if (dev->dma != 4)
1251b955f6caSJeff Kirsher 	{
1252b955f6caSJeff Kirsher 		unsigned long flags=claim_dma_lock();
1253b955f6caSJeff Kirsher 		disable_dma(dev->dma);
1254b955f6caSJeff Kirsher 		release_dma_lock(flags);
1255b955f6caSJeff Kirsher 	}
1256b955f6caSJeff Kirsher 	free_irq(dev->irq, dev);
1257b955f6caSJeff Kirsher 
1258b955f6caSJeff Kirsher 	lance_purge_ring(dev);
1259b955f6caSJeff Kirsher 
1260b955f6caSJeff Kirsher 	return 0;
1261b955f6caSJeff Kirsher }
1262b955f6caSJeff Kirsher 
1263b955f6caSJeff Kirsher static struct net_device_stats *lance_get_stats(struct net_device *dev)
1264b955f6caSJeff Kirsher {
1265b955f6caSJeff Kirsher 	struct lance_private *lp = dev->ml_priv;
1266b955f6caSJeff Kirsher 
1267b955f6caSJeff Kirsher 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1268b955f6caSJeff Kirsher 		short ioaddr = dev->base_addr;
1269b955f6caSJeff Kirsher 		short saved_addr;
1270b955f6caSJeff Kirsher 		unsigned long flags;
1271b955f6caSJeff Kirsher 
1272b955f6caSJeff Kirsher 		spin_lock_irqsave(&lp->devlock, flags);
1273b955f6caSJeff Kirsher 		saved_addr = inw(ioaddr+LANCE_ADDR);
1274b955f6caSJeff Kirsher 		outw(112, ioaddr+LANCE_ADDR);
1275b955f6caSJeff Kirsher 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1276b955f6caSJeff Kirsher 		outw(saved_addr, ioaddr+LANCE_ADDR);
1277b955f6caSJeff Kirsher 		spin_unlock_irqrestore(&lp->devlock, flags);
1278b955f6caSJeff Kirsher 	}
1279b955f6caSJeff Kirsher 
1280b955f6caSJeff Kirsher 	return &dev->stats;
1281b955f6caSJeff Kirsher }
1282b955f6caSJeff Kirsher 
1283b955f6caSJeff Kirsher /* Set or clear the multicast filter for this adaptor.
1284b955f6caSJeff Kirsher  */
1285b955f6caSJeff Kirsher 
1286b955f6caSJeff Kirsher static void set_multicast_list(struct net_device *dev)
1287b955f6caSJeff Kirsher {
1288b955f6caSJeff Kirsher 	short ioaddr = dev->base_addr;
1289b955f6caSJeff Kirsher 
1290b955f6caSJeff Kirsher 	outw(0, ioaddr+LANCE_ADDR);
1291b955f6caSJeff Kirsher 	outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.	 */
1292b955f6caSJeff Kirsher 
1293b955f6caSJeff Kirsher 	if (dev->flags&IFF_PROMISC) {
1294b955f6caSJeff Kirsher 		outw(15, ioaddr+LANCE_ADDR);
1295b955f6caSJeff Kirsher 		outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1296b955f6caSJeff Kirsher 	} else {
1297b955f6caSJeff Kirsher 		short multicast_table[4];
1298b955f6caSJeff Kirsher 		int i;
1299b955f6caSJeff Kirsher 		int num_addrs=netdev_mc_count(dev);
1300b955f6caSJeff Kirsher 		if(dev->flags&IFF_ALLMULTI)
1301b955f6caSJeff Kirsher 			num_addrs=1;
1302b955f6caSJeff Kirsher 		/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1303b955f6caSJeff Kirsher 		memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1304b955f6caSJeff Kirsher 		for (i = 0; i < 4; i++) {
1305b955f6caSJeff Kirsher 			outw(8 + i, ioaddr+LANCE_ADDR);
1306b955f6caSJeff Kirsher 			outw(multicast_table[i], ioaddr+LANCE_DATA);
1307b955f6caSJeff Kirsher 		}
1308b955f6caSJeff Kirsher 		outw(15, ioaddr+LANCE_ADDR);
1309b955f6caSJeff Kirsher 		outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1310b955f6caSJeff Kirsher 	}
1311b955f6caSJeff Kirsher 
1312b955f6caSJeff Kirsher 	lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1313b955f6caSJeff Kirsher 
1314b955f6caSJeff Kirsher }
1315b955f6caSJeff Kirsher 
1316