1b955f6caSJeff Kirsher /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2b955f6caSJeff Kirsher /*
3b955f6caSJeff Kirsher Written/copyright 1993-1998 by Donald Becker.
4b955f6caSJeff Kirsher
5b955f6caSJeff Kirsher Copyright 1993 United States Government as represented by the
6b955f6caSJeff Kirsher Director, National Security Agency.
7b955f6caSJeff Kirsher This software may be used and distributed according to the terms
8b955f6caSJeff Kirsher of the GNU General Public License, incorporated herein by reference.
9b955f6caSJeff Kirsher
10b955f6caSJeff Kirsher This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11b955f6caSJeff Kirsher with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
12b955f6caSJeff Kirsher
13b955f6caSJeff Kirsher The author may be reached as becker@scyld.com, or C/O
14b955f6caSJeff Kirsher Scyld Computing Corporation
15b955f6caSJeff Kirsher 410 Severn Ave., Suite 210
16b955f6caSJeff Kirsher Annapolis MD 21403
17b955f6caSJeff Kirsher
18b955f6caSJeff Kirsher Andrey V. Savochkin:
19b955f6caSJeff Kirsher - alignment problem with 1.3.* kernel and some minor changes.
20b955f6caSJeff Kirsher Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
21b955f6caSJeff Kirsher - added support for Linux/Alpha, but removed most of it, because
22b955f6caSJeff Kirsher it worked only for the PCI chip.
23b955f6caSJeff Kirsher - added hook for the 32bit lance driver
24b955f6caSJeff Kirsher - added PCnetPCI II (79C970A) to chip table
25b955f6caSJeff Kirsher Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
26b955f6caSJeff Kirsher - hopefully fix above so Linux/Alpha can use ISA cards too.
27b955f6caSJeff Kirsher 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28b955f6caSJeff Kirsher v1.12 10/27/97 Module support -djb
29b955f6caSJeff Kirsher v1.14 2/3/98 Module support modified, made PCI support optional -djb
30b955f6caSJeff Kirsher v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
31b955f6caSJeff Kirsher before unregister_netdev() which caused NULL pointer
32b955f6caSJeff Kirsher reference later in the chain (in rtnetlink_fill_ifinfo())
33b955f6caSJeff Kirsher -- Mika Kuoppala <miku@iki.fi>
34b955f6caSJeff Kirsher
35b955f6caSJeff Kirsher Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
36b955f6caSJeff Kirsher the 2.1 version of the old driver - Alan Cox
37b955f6caSJeff Kirsher
38b955f6caSJeff Kirsher Get rid of check_region, check kmalloc return in lance_probe1
39b955f6caSJeff Kirsher Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
40b955f6caSJeff Kirsher
41b955f6caSJeff Kirsher Reworked detection, added support for Racal InterLan EtherBlaster cards
42b955f6caSJeff Kirsher Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
43b955f6caSJeff Kirsher */
44b955f6caSJeff Kirsher
45b955f6caSJeff Kirsher static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46b955f6caSJeff Kirsher
47b955f6caSJeff Kirsher #include <linux/module.h>
48b955f6caSJeff Kirsher #include <linux/kernel.h>
49b955f6caSJeff Kirsher #include <linux/string.h>
50b955f6caSJeff Kirsher #include <linux/delay.h>
51b955f6caSJeff Kirsher #include <linux/errno.h>
52b955f6caSJeff Kirsher #include <linux/ioport.h>
53b955f6caSJeff Kirsher #include <linux/slab.h>
54b955f6caSJeff Kirsher #include <linux/interrupt.h>
55b955f6caSJeff Kirsher #include <linux/pci.h>
56b955f6caSJeff Kirsher #include <linux/init.h>
57b955f6caSJeff Kirsher #include <linux/netdevice.h>
58b955f6caSJeff Kirsher #include <linux/etherdevice.h>
59b955f6caSJeff Kirsher #include <linux/skbuff.h>
60b955f6caSJeff Kirsher #include <linux/mm.h>
61b955f6caSJeff Kirsher #include <linux/bitops.h>
62*067dee65SArnd Bergmann #include <net/Space.h>
63b955f6caSJeff Kirsher
64b955f6caSJeff Kirsher #include <asm/io.h>
65b955f6caSJeff Kirsher #include <asm/dma.h>
66b955f6caSJeff Kirsher
67b955f6caSJeff Kirsher static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
68b955f6caSJeff Kirsher static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
69b955f6caSJeff Kirsher static int __init do_lance_probe(struct net_device *dev);
70b955f6caSJeff Kirsher
71b955f6caSJeff Kirsher
72b955f6caSJeff Kirsher static struct card {
73b955f6caSJeff Kirsher char id_offset14;
74b955f6caSJeff Kirsher char id_offset15;
75b955f6caSJeff Kirsher } cards[] = {
76b955f6caSJeff Kirsher { //"normal"
77b955f6caSJeff Kirsher .id_offset14 = 0x57,
78b955f6caSJeff Kirsher .id_offset15 = 0x57,
79b955f6caSJeff Kirsher },
80b955f6caSJeff Kirsher { //NI6510EB
81b955f6caSJeff Kirsher .id_offset14 = 0x52,
82b955f6caSJeff Kirsher .id_offset15 = 0x44,
83b955f6caSJeff Kirsher },
84b955f6caSJeff Kirsher { //Racal InterLan EtherBlaster
85b955f6caSJeff Kirsher .id_offset14 = 0x52,
86b955f6caSJeff Kirsher .id_offset15 = 0x49,
87b955f6caSJeff Kirsher },
88b955f6caSJeff Kirsher };
89b955f6caSJeff Kirsher #define NUM_CARDS 3
90b955f6caSJeff Kirsher
91b955f6caSJeff Kirsher #ifdef LANCE_DEBUG
92b955f6caSJeff Kirsher static int lance_debug = LANCE_DEBUG;
93b955f6caSJeff Kirsher #else
94b955f6caSJeff Kirsher static int lance_debug = 1;
95b955f6caSJeff Kirsher #endif
96b955f6caSJeff Kirsher
97b955f6caSJeff Kirsher /*
98b955f6caSJeff Kirsher Theory of Operation
99b955f6caSJeff Kirsher
100b955f6caSJeff Kirsher I. Board Compatibility
101b955f6caSJeff Kirsher
102b955f6caSJeff Kirsher This device driver is designed for the AMD 79C960, the "PCnet-ISA
103b955f6caSJeff Kirsher single-chip ethernet controller for ISA". This chip is used in a wide
104b955f6caSJeff Kirsher variety of boards from vendors such as Allied Telesis, HP, Kingston,
105b955f6caSJeff Kirsher and Boca. This driver is also intended to work with older AMD 7990
106b955f6caSJeff Kirsher designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
107b955f6caSJeff Kirsher I use the name LANCE to refer to all of the AMD chips, even though it properly
108b955f6caSJeff Kirsher refers only to the original 7990.
109b955f6caSJeff Kirsher
110b955f6caSJeff Kirsher II. Board-specific settings
111b955f6caSJeff Kirsher
112b955f6caSJeff Kirsher The driver is designed to work the boards that use the faster
113b955f6caSJeff Kirsher bus-master mode, rather than in shared memory mode. (Only older designs
114b955f6caSJeff Kirsher have on-board buffer memory needed to support the slower shared memory mode.)
115b955f6caSJeff Kirsher
116b955f6caSJeff Kirsher Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
117b955f6caSJeff Kirsher channel. This driver probes the likely base addresses:
118b955f6caSJeff Kirsher {0x300, 0x320, 0x340, 0x360}.
119b955f6caSJeff Kirsher After the board is found it generates a DMA-timeout interrupt and uses
120b955f6caSJeff Kirsher autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
121b955f6caSJeff Kirsher of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
122b955f6caSJeff Kirsher probed for by enabling each free DMA channel in turn and checking if
123b955f6caSJeff Kirsher initialization succeeds.
124b955f6caSJeff Kirsher
125b955f6caSJeff Kirsher The HP-J2405A board is an exception: with this board it is easy to read the
126b955f6caSJeff Kirsher EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
127b955f6caSJeff Kirsher _know_ the base address -- that field is for writing the EEPROM.)
128b955f6caSJeff Kirsher
129b955f6caSJeff Kirsher III. Driver operation
130b955f6caSJeff Kirsher
131b955f6caSJeff Kirsher IIIa. Ring buffers
132b955f6caSJeff Kirsher The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
133b955f6caSJeff Kirsher the base and length of the data buffer, along with status bits. The length
134b955f6caSJeff Kirsher of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
135b955f6caSJeff Kirsher the buffer length (rather than being directly the buffer length) for
136b955f6caSJeff Kirsher implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
137b955f6caSJeff Kirsher ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
138b955f6caSJeff Kirsher needlessly uses extra space and reduces the chance that an upper layer will
139b955f6caSJeff Kirsher be able to reorder queued Tx packets based on priority. Decreasing the number
140b955f6caSJeff Kirsher of entries makes it more difficult to achieve back-to-back packet transmission
141b955f6caSJeff Kirsher and increases the chance that Rx ring will overflow. (Consider the worst case
142b955f6caSJeff Kirsher of receiving back-to-back minimum-sized packets.)
143b955f6caSJeff Kirsher
144b955f6caSJeff Kirsher The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
145b955f6caSJeff Kirsher statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
146b955f6caSJeff Kirsher avoid the administrative overhead. For the Rx side this avoids dynamically
147b955f6caSJeff Kirsher allocating full-sized buffers "just in case", at the expense of a
148b955f6caSJeff Kirsher memory-to-memory data copy for each packet received. For most systems this
149b955f6caSJeff Kirsher is a good tradeoff: the Rx buffer will always be in low memory, the copy
150b955f6caSJeff Kirsher is inexpensive, and it primes the cache for later packet processing. For Tx
151b955f6caSJeff Kirsher the buffers are only used when needed as low-memory bounce buffers.
152b955f6caSJeff Kirsher
153b955f6caSJeff Kirsher IIIB. 16M memory limitations.
154b955f6caSJeff Kirsher For the ISA bus master mode all structures used directly by the LANCE,
155b955f6caSJeff Kirsher the initialization block, Rx and Tx rings, and data buffers, must be
156b955f6caSJeff Kirsher accessible from the ISA bus, i.e. in the lower 16M of real memory.
157b955f6caSJeff Kirsher This is a problem for current Linux kernels on >16M machines. The network
158b955f6caSJeff Kirsher devices are initialized after memory initialization, and the kernel doles out
159b955f6caSJeff Kirsher memory from the top of memory downward. The current solution is to have a
160b955f6caSJeff Kirsher special network initialization routine that's called before memory
161b955f6caSJeff Kirsher initialization; this will eventually be generalized for all network devices.
162b955f6caSJeff Kirsher As mentioned before, low-memory "bounce-buffers" are used when needed.
163b955f6caSJeff Kirsher
164b955f6caSJeff Kirsher IIIC. Synchronization
165b955f6caSJeff Kirsher The driver runs as two independent, single-threaded flows of control. One
166b955f6caSJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
167b955f6caSJeff Kirsher dev->tbusy flag. The other thread is the interrupt handler, which is single
168b955f6caSJeff Kirsher threaded by the hardware and other software.
169b955f6caSJeff Kirsher
170b955f6caSJeff Kirsher The send packet thread has partial control over the Tx ring and 'dev->tbusy'
171b955f6caSJeff Kirsher flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
172b955f6caSJeff Kirsher queue slot is empty, it clears the tbusy flag when finished otherwise it sets
173b955f6caSJeff Kirsher the 'lp->tx_full' flag.
174b955f6caSJeff Kirsher
175b955f6caSJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
176b955f6caSJeff Kirsher from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
177b955f6caSJeff Kirsher we can't avoid the interrupt overhead by having the Tx routine reap the Tx
178b955f6caSJeff Kirsher stats.) After reaping the stats, it marks the queue entry as empty by setting
179b955f6caSJeff Kirsher the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
180b955f6caSJeff Kirsher tx_full and tbusy flags.
181b955f6caSJeff Kirsher
182b955f6caSJeff Kirsher */
183b955f6caSJeff Kirsher
184b955f6caSJeff Kirsher /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
185b955f6caSJeff Kirsher Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
186b955f6caSJeff Kirsher That translates to 4 and 4 (16 == 2^^4).
187b955f6caSJeff Kirsher This is a compile-time option for efficiency.
188b955f6caSJeff Kirsher */
189b955f6caSJeff Kirsher #ifndef LANCE_LOG_TX_BUFFERS
190b955f6caSJeff Kirsher #define LANCE_LOG_TX_BUFFERS 4
191b955f6caSJeff Kirsher #define LANCE_LOG_RX_BUFFERS 4
192b955f6caSJeff Kirsher #endif
193b955f6caSJeff Kirsher
194b955f6caSJeff Kirsher #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
195b955f6caSJeff Kirsher #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
196b955f6caSJeff Kirsher #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
197b955f6caSJeff Kirsher
198b955f6caSJeff Kirsher #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
199b955f6caSJeff Kirsher #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
200b955f6caSJeff Kirsher #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
201b955f6caSJeff Kirsher
202b955f6caSJeff Kirsher #define PKT_BUF_SZ 1544
203b955f6caSJeff Kirsher
204b955f6caSJeff Kirsher /* Offsets from base I/O address. */
205b955f6caSJeff Kirsher #define LANCE_DATA 0x10
206b955f6caSJeff Kirsher #define LANCE_ADDR 0x12
207b955f6caSJeff Kirsher #define LANCE_RESET 0x14
208b955f6caSJeff Kirsher #define LANCE_BUS_IF 0x16
209b955f6caSJeff Kirsher #define LANCE_TOTAL_SIZE 0x18
210b955f6caSJeff Kirsher
211b955f6caSJeff Kirsher #define TX_TIMEOUT (HZ/5)
212b955f6caSJeff Kirsher
213b955f6caSJeff Kirsher /* The LANCE Rx and Tx ring descriptors. */
214b955f6caSJeff Kirsher struct lance_rx_head {
215b955f6caSJeff Kirsher s32 base;
216b955f6caSJeff Kirsher s16 buf_length; /* This length is 2s complement (negative)! */
217b955f6caSJeff Kirsher s16 msg_length; /* This length is "normal". */
218b955f6caSJeff Kirsher };
219b955f6caSJeff Kirsher
220b955f6caSJeff Kirsher struct lance_tx_head {
221b955f6caSJeff Kirsher s32 base;
222b955f6caSJeff Kirsher s16 length; /* Length is 2s complement (negative)! */
223b955f6caSJeff Kirsher s16 misc;
224b955f6caSJeff Kirsher };
225b955f6caSJeff Kirsher
226b955f6caSJeff Kirsher /* The LANCE initialization block, described in databook. */
227b955f6caSJeff Kirsher struct lance_init_block {
228b955f6caSJeff Kirsher u16 mode; /* Pre-set mode (reg. 15) */
229b955f6caSJeff Kirsher u8 phys_addr[6]; /* Physical ethernet address */
230b955f6caSJeff Kirsher u32 filter[2]; /* Multicast filter (unused). */
231b955f6caSJeff Kirsher /* Receive and transmit ring base, along with extra bits. */
232b955f6caSJeff Kirsher u32 rx_ring; /* Tx and Rx ring base pointers */
233b955f6caSJeff Kirsher u32 tx_ring;
234b955f6caSJeff Kirsher };
235b955f6caSJeff Kirsher
236b955f6caSJeff Kirsher struct lance_private {
237b955f6caSJeff Kirsher /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
238b955f6caSJeff Kirsher struct lance_rx_head rx_ring[RX_RING_SIZE];
239b955f6caSJeff Kirsher struct lance_tx_head tx_ring[TX_RING_SIZE];
240b955f6caSJeff Kirsher struct lance_init_block init_block;
241b955f6caSJeff Kirsher const char *name;
242b955f6caSJeff Kirsher /* The saved address of a sent-in-place packet/buffer, for skfree(). */
243b955f6caSJeff Kirsher struct sk_buff* tx_skbuff[TX_RING_SIZE];
244b955f6caSJeff Kirsher /* The addresses of receive-in-place skbuffs. */
245b955f6caSJeff Kirsher struct sk_buff* rx_skbuff[RX_RING_SIZE];
246b955f6caSJeff Kirsher unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
247b955f6caSJeff Kirsher /* Tx low-memory "bounce buffer" address. */
248b955f6caSJeff Kirsher char (*tx_bounce_buffs)[PKT_BUF_SZ];
249b955f6caSJeff Kirsher int cur_rx, cur_tx; /* The next free ring entry */
250b955f6caSJeff Kirsher int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
251b955f6caSJeff Kirsher int dma;
252b955f6caSJeff Kirsher unsigned char chip_version; /* See lance_chip_type. */
253b955f6caSJeff Kirsher spinlock_t devlock;
254b955f6caSJeff Kirsher };
255b955f6caSJeff Kirsher
256b955f6caSJeff Kirsher #define LANCE_MUST_PAD 0x00000001
257b955f6caSJeff Kirsher #define LANCE_ENABLE_AUTOSELECT 0x00000002
258b955f6caSJeff Kirsher #define LANCE_MUST_REINIT_RING 0x00000004
259b955f6caSJeff Kirsher #define LANCE_MUST_UNRESET 0x00000008
260b955f6caSJeff Kirsher #define LANCE_HAS_MISSED_FRAME 0x00000010
261b955f6caSJeff Kirsher
262b955f6caSJeff Kirsher /* A mapping from the chip ID number to the part number and features.
263b955f6caSJeff Kirsher These are from the datasheets -- in real life the '970 version
264b955f6caSJeff Kirsher reportedly has the same ID as the '965. */
265b955f6caSJeff Kirsher static struct lance_chip_type {
266b955f6caSJeff Kirsher int id_number;
267b955f6caSJeff Kirsher const char *name;
268b955f6caSJeff Kirsher int flags;
269b955f6caSJeff Kirsher } chip_table[] = {
270b955f6caSJeff Kirsher {0x0000, "LANCE 7990", /* Ancient lance chip. */
271b955f6caSJeff Kirsher LANCE_MUST_PAD + LANCE_MUST_UNRESET},
272b955f6caSJeff Kirsher {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
273b955f6caSJeff Kirsher LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
274b955f6caSJeff Kirsher LANCE_HAS_MISSED_FRAME},
275b955f6caSJeff Kirsher {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
276b955f6caSJeff Kirsher LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
277b955f6caSJeff Kirsher LANCE_HAS_MISSED_FRAME},
278b955f6caSJeff Kirsher {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
279b955f6caSJeff Kirsher LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
280b955f6caSJeff Kirsher LANCE_HAS_MISSED_FRAME},
281b955f6caSJeff Kirsher /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
282b955f6caSJeff Kirsher it the PCnet32. */
283b955f6caSJeff Kirsher {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
284b955f6caSJeff Kirsher LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
285b955f6caSJeff Kirsher LANCE_HAS_MISSED_FRAME},
286b955f6caSJeff Kirsher {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
287b955f6caSJeff Kirsher LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
288b955f6caSJeff Kirsher LANCE_HAS_MISSED_FRAME},
289b955f6caSJeff Kirsher {0x0, "PCnet (unknown)",
290b955f6caSJeff Kirsher LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
291b955f6caSJeff Kirsher LANCE_HAS_MISSED_FRAME},
292b955f6caSJeff Kirsher };
293b955f6caSJeff Kirsher
294b955f6caSJeff Kirsher enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
295b955f6caSJeff Kirsher
296b955f6caSJeff Kirsher
297b955f6caSJeff Kirsher /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
298b955f6caSJeff Kirsher Assume yes until we know the memory size. */
299b955f6caSJeff Kirsher static unsigned char lance_need_isa_bounce_buffers = 1;
300b955f6caSJeff Kirsher
301b955f6caSJeff Kirsher static int lance_open(struct net_device *dev);
302b955f6caSJeff Kirsher static void lance_init_ring(struct net_device *dev, gfp_t mode);
303b955f6caSJeff Kirsher static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
304b955f6caSJeff Kirsher struct net_device *dev);
305b955f6caSJeff Kirsher static int lance_rx(struct net_device *dev);
306b955f6caSJeff Kirsher static irqreturn_t lance_interrupt(int irq, void *dev_id);
307b955f6caSJeff Kirsher static int lance_close(struct net_device *dev);
308b955f6caSJeff Kirsher static struct net_device_stats *lance_get_stats(struct net_device *dev);
309b955f6caSJeff Kirsher static void set_multicast_list(struct net_device *dev);
3100290bd29SMichael S. Tsirkin static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
311b955f6caSJeff Kirsher
312b955f6caSJeff Kirsher
313b955f6caSJeff Kirsher
314b955f6caSJeff Kirsher #ifdef MODULE
315b955f6caSJeff Kirsher #define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
316b955f6caSJeff Kirsher
317b955f6caSJeff Kirsher static struct net_device *dev_lance[MAX_CARDS];
318b955f6caSJeff Kirsher static int io[MAX_CARDS];
319b955f6caSJeff Kirsher static int dma[MAX_CARDS];
320b955f6caSJeff Kirsher static int irq[MAX_CARDS];
321b955f6caSJeff Kirsher
322df298408SDavid Howells module_param_hw_array(io, int, ioport, NULL, 0);
323df298408SDavid Howells module_param_hw_array(dma, int, dma, NULL, 0);
324df298408SDavid Howells module_param_hw_array(irq, int, irq, NULL, 0);
325b955f6caSJeff Kirsher module_param(lance_debug, int, 0);
326b955f6caSJeff Kirsher MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
327b955f6caSJeff Kirsher MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
328b955f6caSJeff Kirsher MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
329b955f6caSJeff Kirsher MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
330b955f6caSJeff Kirsher
lance_init_module(void)331a07d8ecfSArnd Bergmann static int __init lance_init_module(void)
332b955f6caSJeff Kirsher {
333b955f6caSJeff Kirsher struct net_device *dev;
334b955f6caSJeff Kirsher int this_dev, found = 0;
335b955f6caSJeff Kirsher
336b955f6caSJeff Kirsher for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
337b955f6caSJeff Kirsher if (io[this_dev] == 0) {
338b955f6caSJeff Kirsher if (this_dev != 0) /* only complain once */
339b955f6caSJeff Kirsher break;
340b955f6caSJeff Kirsher printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
341b955f6caSJeff Kirsher return -EPERM;
342b955f6caSJeff Kirsher }
343b955f6caSJeff Kirsher dev = alloc_etherdev(0);
344b955f6caSJeff Kirsher if (!dev)
345b955f6caSJeff Kirsher break;
346b955f6caSJeff Kirsher dev->irq = irq[this_dev];
347b955f6caSJeff Kirsher dev->base_addr = io[this_dev];
348b955f6caSJeff Kirsher dev->dma = dma[this_dev];
349b955f6caSJeff Kirsher if (do_lance_probe(dev) == 0) {
350b955f6caSJeff Kirsher dev_lance[found++] = dev;
351b955f6caSJeff Kirsher continue;
352b955f6caSJeff Kirsher }
353b955f6caSJeff Kirsher free_netdev(dev);
354b955f6caSJeff Kirsher break;
355b955f6caSJeff Kirsher }
356b955f6caSJeff Kirsher if (found != 0)
357b955f6caSJeff Kirsher return 0;
358b955f6caSJeff Kirsher return -ENXIO;
359b955f6caSJeff Kirsher }
360a07d8ecfSArnd Bergmann module_init(lance_init_module);
361b955f6caSJeff Kirsher
cleanup_card(struct net_device * dev)362b955f6caSJeff Kirsher static void cleanup_card(struct net_device *dev)
363b955f6caSJeff Kirsher {
364b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
365b955f6caSJeff Kirsher if (dev->dma != 4)
366b955f6caSJeff Kirsher free_dma(dev->dma);
367b955f6caSJeff Kirsher release_region(dev->base_addr, LANCE_TOTAL_SIZE);
368b955f6caSJeff Kirsher kfree(lp->tx_bounce_buffs);
369b955f6caSJeff Kirsher kfree((void*)lp->rx_buffs);
370b955f6caSJeff Kirsher kfree(lp);
371b955f6caSJeff Kirsher }
372b955f6caSJeff Kirsher
lance_cleanup_module(void)373a07d8ecfSArnd Bergmann static void __exit lance_cleanup_module(void)
374b955f6caSJeff Kirsher {
375b955f6caSJeff Kirsher int this_dev;
376b955f6caSJeff Kirsher
377b955f6caSJeff Kirsher for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
378b955f6caSJeff Kirsher struct net_device *dev = dev_lance[this_dev];
379b955f6caSJeff Kirsher if (dev) {
380b955f6caSJeff Kirsher unregister_netdev(dev);
381b955f6caSJeff Kirsher cleanup_card(dev);
382b955f6caSJeff Kirsher free_netdev(dev);
383b955f6caSJeff Kirsher }
384b955f6caSJeff Kirsher }
385b955f6caSJeff Kirsher }
386a07d8ecfSArnd Bergmann module_exit(lance_cleanup_module);
387b955f6caSJeff Kirsher #endif /* MODULE */
388b955f6caSJeff Kirsher MODULE_LICENSE("GPL");
389b955f6caSJeff Kirsher
390b955f6caSJeff Kirsher
391b955f6caSJeff Kirsher /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
392b955f6caSJeff Kirsher board probes now that kmalloc() can allocate ISA DMA-able regions.
393b955f6caSJeff Kirsher This also allows the LANCE driver to be used as a module.
394b955f6caSJeff Kirsher */
do_lance_probe(struct net_device * dev)395b955f6caSJeff Kirsher static int __init do_lance_probe(struct net_device *dev)
396b955f6caSJeff Kirsher {
397b955f6caSJeff Kirsher unsigned int *port;
398b955f6caSJeff Kirsher int result;
399b955f6caSJeff Kirsher
400b955f6caSJeff Kirsher if (high_memory <= phys_to_virt(16*1024*1024))
401b955f6caSJeff Kirsher lance_need_isa_bounce_buffers = 0;
402b955f6caSJeff Kirsher
403b955f6caSJeff Kirsher for (port = lance_portlist; *port; port++) {
404b955f6caSJeff Kirsher int ioaddr = *port;
405b955f6caSJeff Kirsher struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
406b955f6caSJeff Kirsher "lance-probe");
407b955f6caSJeff Kirsher
408b955f6caSJeff Kirsher if (r) {
409b955f6caSJeff Kirsher /* Detect the card with minimal I/O reads */
410b955f6caSJeff Kirsher char offset14 = inb(ioaddr + 14);
411b955f6caSJeff Kirsher int card;
412b955f6caSJeff Kirsher for (card = 0; card < NUM_CARDS; ++card)
413b955f6caSJeff Kirsher if (cards[card].id_offset14 == offset14)
414b955f6caSJeff Kirsher break;
415b955f6caSJeff Kirsher if (card < NUM_CARDS) {/*yes, the first byte matches*/
416b955f6caSJeff Kirsher char offset15 = inb(ioaddr + 15);
417b955f6caSJeff Kirsher for (card = 0; card < NUM_CARDS; ++card)
418b955f6caSJeff Kirsher if ((cards[card].id_offset14 == offset14) &&
419b955f6caSJeff Kirsher (cards[card].id_offset15 == offset15))
420b955f6caSJeff Kirsher break;
421b955f6caSJeff Kirsher }
422b955f6caSJeff Kirsher if (card < NUM_CARDS) { /*Signature OK*/
423b955f6caSJeff Kirsher result = lance_probe1(dev, ioaddr, 0, 0);
424b955f6caSJeff Kirsher if (!result) {
425b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
426b955f6caSJeff Kirsher int ver = lp->chip_version;
427b955f6caSJeff Kirsher
428b955f6caSJeff Kirsher r->name = chip_table[ver].name;
429b955f6caSJeff Kirsher return 0;
430b955f6caSJeff Kirsher }
431b955f6caSJeff Kirsher }
432b955f6caSJeff Kirsher release_region(ioaddr, LANCE_TOTAL_SIZE);
433b955f6caSJeff Kirsher }
434b955f6caSJeff Kirsher }
435b955f6caSJeff Kirsher return -ENODEV;
436b955f6caSJeff Kirsher }
437b955f6caSJeff Kirsher
438b955f6caSJeff Kirsher #ifndef MODULE
lance_probe(int unit)439b955f6caSJeff Kirsher struct net_device * __init lance_probe(int unit)
440b955f6caSJeff Kirsher {
441b955f6caSJeff Kirsher struct net_device *dev = alloc_etherdev(0);
442b955f6caSJeff Kirsher int err;
443b955f6caSJeff Kirsher
444b955f6caSJeff Kirsher if (!dev)
445b955f6caSJeff Kirsher return ERR_PTR(-ENODEV);
446b955f6caSJeff Kirsher
447b955f6caSJeff Kirsher sprintf(dev->name, "eth%d", unit);
448b955f6caSJeff Kirsher netdev_boot_setup_check(dev);
449b955f6caSJeff Kirsher
450b955f6caSJeff Kirsher err = do_lance_probe(dev);
451b955f6caSJeff Kirsher if (err)
452b955f6caSJeff Kirsher goto out;
453b955f6caSJeff Kirsher return dev;
454b955f6caSJeff Kirsher out:
455b955f6caSJeff Kirsher free_netdev(dev);
456b955f6caSJeff Kirsher return ERR_PTR(err);
457b955f6caSJeff Kirsher }
458b955f6caSJeff Kirsher #endif
459b955f6caSJeff Kirsher
460b955f6caSJeff Kirsher static const struct net_device_ops lance_netdev_ops = {
461b955f6caSJeff Kirsher .ndo_open = lance_open,
462b955f6caSJeff Kirsher .ndo_start_xmit = lance_start_xmit,
463b955f6caSJeff Kirsher .ndo_stop = lance_close,
464b955f6caSJeff Kirsher .ndo_get_stats = lance_get_stats,
465afc4b13dSJiri Pirko .ndo_set_rx_mode = set_multicast_list,
466b955f6caSJeff Kirsher .ndo_tx_timeout = lance_tx_timeout,
467b955f6caSJeff Kirsher .ndo_set_mac_address = eth_mac_addr,
468b955f6caSJeff Kirsher .ndo_validate_addr = eth_validate_addr,
469b955f6caSJeff Kirsher };
470b955f6caSJeff Kirsher
lance_probe1(struct net_device * dev,int ioaddr,int irq,int options)471b955f6caSJeff Kirsher static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
472b955f6caSJeff Kirsher {
473b955f6caSJeff Kirsher struct lance_private *lp;
474b955f6caSJeff Kirsher unsigned long dma_channels; /* Mark spuriously-busy DMA channels */
475b955f6caSJeff Kirsher int i, reset_val, lance_version;
476b955f6caSJeff Kirsher const char *chipname;
477b955f6caSJeff Kirsher /* Flags for specific chips or boards. */
478b955f6caSJeff Kirsher unsigned char hpJ2405A = 0; /* HP ISA adaptor */
479b955f6caSJeff Kirsher int hp_builtin = 0; /* HP on-board ethernet. */
480b955f6caSJeff Kirsher static int did_version; /* Already printed version info. */
481b955f6caSJeff Kirsher unsigned long flags;
482b955f6caSJeff Kirsher int err = -ENOMEM;
483b955f6caSJeff Kirsher void __iomem *bios;
4840222ee53SJakub Kicinski u8 addr[ETH_ALEN];
485b955f6caSJeff Kirsher
486b955f6caSJeff Kirsher /* First we look for special cases.
487b955f6caSJeff Kirsher Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
488b955f6caSJeff Kirsher There are two HP versions, check the BIOS for the configuration port.
489b955f6caSJeff Kirsher This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
490b955f6caSJeff Kirsher */
491b955f6caSJeff Kirsher bios = ioremap(0xf00f0, 0x14);
492b955f6caSJeff Kirsher if (!bios)
493b955f6caSJeff Kirsher return -ENOMEM;
494b955f6caSJeff Kirsher if (readw(bios + 0x12) == 0x5048) {
495b955f6caSJeff Kirsher static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
496b955f6caSJeff Kirsher int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99;
497b955f6caSJeff Kirsher /* We can have boards other than the built-in! Verify this is on-board. */
498b955f6caSJeff Kirsher if ((inb(hp_port) & 0xc0) == 0x80 &&
499b955f6caSJeff Kirsher ioaddr_table[inb(hp_port) & 3] == ioaddr)
500b955f6caSJeff Kirsher hp_builtin = hp_port;
501b955f6caSJeff Kirsher }
502b955f6caSJeff Kirsher iounmap(bios);
503b955f6caSJeff Kirsher /* We also recognize the HP Vectra on-board here, but check below. */
504b955f6caSJeff Kirsher hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
505b955f6caSJeff Kirsher inb(ioaddr+2) == 0x09);
506b955f6caSJeff Kirsher
507b955f6caSJeff Kirsher /* Reset the LANCE. */
508b955f6caSJeff Kirsher reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
509b955f6caSJeff Kirsher
510b955f6caSJeff Kirsher /* The Un-Reset needed is only needed for the real NE2100, and will
511b955f6caSJeff Kirsher confuse the HP board. */
512b955f6caSJeff Kirsher if (!hpJ2405A)
513b955f6caSJeff Kirsher outw(reset_val, ioaddr+LANCE_RESET);
514b955f6caSJeff Kirsher
515b955f6caSJeff Kirsher outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
516b955f6caSJeff Kirsher if (inw(ioaddr+LANCE_DATA) != 0x0004)
517b955f6caSJeff Kirsher return -ENODEV;
518b955f6caSJeff Kirsher
519b955f6caSJeff Kirsher /* Get the version of the chip. */
520b955f6caSJeff Kirsher outw(88, ioaddr+LANCE_ADDR);
521b955f6caSJeff Kirsher if (inw(ioaddr+LANCE_ADDR) != 88) {
522b955f6caSJeff Kirsher lance_version = 0;
523b955f6caSJeff Kirsher } else { /* Good, it's a newer chip. */
524b955f6caSJeff Kirsher int chip_version = inw(ioaddr+LANCE_DATA);
525b955f6caSJeff Kirsher outw(89, ioaddr+LANCE_ADDR);
526b955f6caSJeff Kirsher chip_version |= inw(ioaddr+LANCE_DATA) << 16;
527b955f6caSJeff Kirsher if (lance_debug > 2)
528b955f6caSJeff Kirsher printk(" LANCE chip version is %#x.\n", chip_version);
529b955f6caSJeff Kirsher if ((chip_version & 0xfff) != 0x003)
530b955f6caSJeff Kirsher return -ENODEV;
531b955f6caSJeff Kirsher chip_version = (chip_version >> 12) & 0xffff;
532b955f6caSJeff Kirsher for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
533b955f6caSJeff Kirsher if (chip_table[lance_version].id_number == chip_version)
534b955f6caSJeff Kirsher break;
535b955f6caSJeff Kirsher }
536b955f6caSJeff Kirsher }
537b955f6caSJeff Kirsher
538b955f6caSJeff Kirsher /* We can't allocate private data from alloc_etherdev() because it must
539b955f6caSJeff Kirsher a ISA DMA-able region. */
540b955f6caSJeff Kirsher chipname = chip_table[lance_version].name;
541b955f6caSJeff Kirsher printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
542b955f6caSJeff Kirsher
543b955f6caSJeff Kirsher /* There is a 16 byte station address PROM at the base address.
544b955f6caSJeff Kirsher The first six bytes are the station address. */
545b955f6caSJeff Kirsher for (i = 0; i < 6; i++)
5460222ee53SJakub Kicinski addr[i] = inb(ioaddr + i);
5470222ee53SJakub Kicinski eth_hw_addr_set(dev, addr);
548b955f6caSJeff Kirsher printk("%pM", dev->dev_addr);
549b955f6caSJeff Kirsher
550b955f6caSJeff Kirsher dev->base_addr = ioaddr;
551b955f6caSJeff Kirsher /* Make certain the data structures used by the LANCE are aligned and DMAble. */
552b955f6caSJeff Kirsher
553b955f6caSJeff Kirsher lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
554a09f4af1SAmitoj Kaur Chawla if (!lp)
555a09f4af1SAmitoj Kaur Chawla return -ENOMEM;
556b955f6caSJeff Kirsher if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
557b955f6caSJeff Kirsher dev->ml_priv = lp;
558b955f6caSJeff Kirsher lp->name = chipname;
5596da2ec56SKees Cook lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
560b955f6caSJeff Kirsher GFP_DMA | GFP_KERNEL);
561b955f6caSJeff Kirsher if (!lp->rx_buffs)
562b955f6caSJeff Kirsher goto out_lp;
563b955f6caSJeff Kirsher if (lance_need_isa_bounce_buffers) {
5646da2ec56SKees Cook lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
565b955f6caSJeff Kirsher GFP_DMA | GFP_KERNEL);
566b955f6caSJeff Kirsher if (!lp->tx_bounce_buffs)
567b955f6caSJeff Kirsher goto out_rx;
568b955f6caSJeff Kirsher } else
569b955f6caSJeff Kirsher lp->tx_bounce_buffs = NULL;
570b955f6caSJeff Kirsher
571b955f6caSJeff Kirsher lp->chip_version = lance_version;
572b955f6caSJeff Kirsher spin_lock_init(&lp->devlock);
573b955f6caSJeff Kirsher
574b955f6caSJeff Kirsher lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
575b955f6caSJeff Kirsher for (i = 0; i < 6; i++)
576b955f6caSJeff Kirsher lp->init_block.phys_addr[i] = dev->dev_addr[i];
577b955f6caSJeff Kirsher lp->init_block.filter[0] = 0x00000000;
578b955f6caSJeff Kirsher lp->init_block.filter[1] = 0x00000000;
579b955f6caSJeff Kirsher lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
580b955f6caSJeff Kirsher lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
581b955f6caSJeff Kirsher
582b955f6caSJeff Kirsher outw(0x0001, ioaddr+LANCE_ADDR);
583b955f6caSJeff Kirsher inw(ioaddr+LANCE_ADDR);
584b955f6caSJeff Kirsher outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
585b955f6caSJeff Kirsher outw(0x0002, ioaddr+LANCE_ADDR);
586b955f6caSJeff Kirsher inw(ioaddr+LANCE_ADDR);
587b955f6caSJeff Kirsher outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
588b955f6caSJeff Kirsher outw(0x0000, ioaddr+LANCE_ADDR);
589b955f6caSJeff Kirsher inw(ioaddr+LANCE_ADDR);
590b955f6caSJeff Kirsher
591b955f6caSJeff Kirsher if (irq) { /* Set iff PCI card. */
592b955f6caSJeff Kirsher dev->dma = 4; /* Native bus-master, no DMA channel needed. */
593b955f6caSJeff Kirsher dev->irq = irq;
594b955f6caSJeff Kirsher } else if (hp_builtin) {
595b955f6caSJeff Kirsher static const char dma_tbl[4] = {3, 5, 6, 0};
596b955f6caSJeff Kirsher static const char irq_tbl[4] = {3, 4, 5, 9};
597b955f6caSJeff Kirsher unsigned char port_val = inb(hp_builtin);
598b955f6caSJeff Kirsher dev->dma = dma_tbl[(port_val >> 4) & 3];
599b955f6caSJeff Kirsher dev->irq = irq_tbl[(port_val >> 2) & 3];
600b955f6caSJeff Kirsher printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
601b955f6caSJeff Kirsher } else if (hpJ2405A) {
602b955f6caSJeff Kirsher static const char dma_tbl[4] = {3, 5, 6, 7};
603b955f6caSJeff Kirsher static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
604b955f6caSJeff Kirsher short reset_val = inw(ioaddr+LANCE_RESET);
605b955f6caSJeff Kirsher dev->dma = dma_tbl[(reset_val >> 2) & 3];
606b955f6caSJeff Kirsher dev->irq = irq_tbl[(reset_val >> 4) & 7];
607b955f6caSJeff Kirsher printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
608b955f6caSJeff Kirsher } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
609b955f6caSJeff Kirsher short bus_info;
610b955f6caSJeff Kirsher outw(8, ioaddr+LANCE_ADDR);
611b955f6caSJeff Kirsher bus_info = inw(ioaddr+LANCE_BUS_IF);
612b955f6caSJeff Kirsher dev->dma = bus_info & 0x07;
613b955f6caSJeff Kirsher dev->irq = (bus_info >> 4) & 0x0F;
614b955f6caSJeff Kirsher } else {
615b955f6caSJeff Kirsher /* The DMA channel may be passed in PARAM1. */
616b955f6caSJeff Kirsher if (dev->mem_start & 0x07)
617b955f6caSJeff Kirsher dev->dma = dev->mem_start & 0x07;
618b955f6caSJeff Kirsher }
619b955f6caSJeff Kirsher
620b955f6caSJeff Kirsher if (dev->dma == 0) {
621b955f6caSJeff Kirsher /* Read the DMA channel status register, so that we can avoid
622b955f6caSJeff Kirsher stuck DMA channels in the DMA detection below. */
623b955f6caSJeff Kirsher dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
624b955f6caSJeff Kirsher (inb(DMA2_STAT_REG) & 0xf0);
625b955f6caSJeff Kirsher }
626b955f6caSJeff Kirsher err = -ENODEV;
627b955f6caSJeff Kirsher if (dev->irq >= 2)
628b955f6caSJeff Kirsher printk(" assigned IRQ %d", dev->irq);
629b955f6caSJeff Kirsher else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
630b955f6caSJeff Kirsher unsigned long irq_mask;
631b955f6caSJeff Kirsher
632b955f6caSJeff Kirsher /* To auto-IRQ we enable the initialization-done and DMA error
633b955f6caSJeff Kirsher interrupts. For ISA boards we get a DMA error, but VLB and PCI
634b955f6caSJeff Kirsher boards will work. */
635b955f6caSJeff Kirsher irq_mask = probe_irq_on();
636b955f6caSJeff Kirsher
637b955f6caSJeff Kirsher /* Trigger an initialization just for the interrupt. */
638b955f6caSJeff Kirsher outw(0x0041, ioaddr+LANCE_DATA);
639b955f6caSJeff Kirsher
640b955f6caSJeff Kirsher mdelay(20);
641b955f6caSJeff Kirsher dev->irq = probe_irq_off(irq_mask);
642b955f6caSJeff Kirsher if (dev->irq)
643b955f6caSJeff Kirsher printk(", probed IRQ %d", dev->irq);
644b955f6caSJeff Kirsher else {
645b955f6caSJeff Kirsher printk(", failed to detect IRQ line.\n");
646b955f6caSJeff Kirsher goto out_tx;
647b955f6caSJeff Kirsher }
648b955f6caSJeff Kirsher
649b955f6caSJeff Kirsher /* Check for the initialization done bit, 0x0100, which means
650b955f6caSJeff Kirsher that we don't need a DMA channel. */
651b955f6caSJeff Kirsher if (inw(ioaddr+LANCE_DATA) & 0x0100)
652b955f6caSJeff Kirsher dev->dma = 4;
653b955f6caSJeff Kirsher }
654b955f6caSJeff Kirsher
655b955f6caSJeff Kirsher if (dev->dma == 4) {
656b955f6caSJeff Kirsher printk(", no DMA needed.\n");
657b955f6caSJeff Kirsher } else if (dev->dma) {
658b955f6caSJeff Kirsher if (request_dma(dev->dma, chipname)) {
659b955f6caSJeff Kirsher printk("DMA %d allocation failed.\n", dev->dma);
660b955f6caSJeff Kirsher goto out_tx;
661b955f6caSJeff Kirsher } else
662b955f6caSJeff Kirsher printk(", assigned DMA %d.\n", dev->dma);
663b955f6caSJeff Kirsher } else { /* OK, we have to auto-DMA. */
664b955f6caSJeff Kirsher for (i = 0; i < 4; i++) {
665b955f6caSJeff Kirsher static const char dmas[] = { 5, 6, 7, 3 };
666b955f6caSJeff Kirsher int dma = dmas[i];
667b955f6caSJeff Kirsher int boguscnt;
668b955f6caSJeff Kirsher
669b955f6caSJeff Kirsher /* Don't enable a permanently busy DMA channel, or the machine
670b955f6caSJeff Kirsher will hang. */
671b955f6caSJeff Kirsher if (test_bit(dma, &dma_channels))
672b955f6caSJeff Kirsher continue;
673b955f6caSJeff Kirsher outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
674b955f6caSJeff Kirsher if (request_dma(dma, chipname))
675b955f6caSJeff Kirsher continue;
676b955f6caSJeff Kirsher
677b955f6caSJeff Kirsher flags=claim_dma_lock();
678b955f6caSJeff Kirsher set_dma_mode(dma, DMA_MODE_CASCADE);
679b955f6caSJeff Kirsher enable_dma(dma);
680b955f6caSJeff Kirsher release_dma_lock(flags);
681b955f6caSJeff Kirsher
682b955f6caSJeff Kirsher /* Trigger an initialization. */
683b955f6caSJeff Kirsher outw(0x0001, ioaddr+LANCE_DATA);
684b955f6caSJeff Kirsher for (boguscnt = 100; boguscnt > 0; --boguscnt)
685b955f6caSJeff Kirsher if (inw(ioaddr+LANCE_DATA) & 0x0900)
686b955f6caSJeff Kirsher break;
687b955f6caSJeff Kirsher if (inw(ioaddr+LANCE_DATA) & 0x0100) {
688b955f6caSJeff Kirsher dev->dma = dma;
689b955f6caSJeff Kirsher printk(", DMA %d.\n", dev->dma);
690b955f6caSJeff Kirsher break;
691b955f6caSJeff Kirsher } else {
692b955f6caSJeff Kirsher flags=claim_dma_lock();
693b955f6caSJeff Kirsher disable_dma(dma);
694b955f6caSJeff Kirsher release_dma_lock(flags);
695b955f6caSJeff Kirsher free_dma(dma);
696b955f6caSJeff Kirsher }
697b955f6caSJeff Kirsher }
698b955f6caSJeff Kirsher if (i == 4) { /* Failure: bail. */
699b955f6caSJeff Kirsher printk("DMA detection failed.\n");
700b955f6caSJeff Kirsher goto out_tx;
701b955f6caSJeff Kirsher }
702b955f6caSJeff Kirsher }
703b955f6caSJeff Kirsher
704b955f6caSJeff Kirsher if (lance_version == 0 && dev->irq == 0) {
705b955f6caSJeff Kirsher /* We may auto-IRQ now that we have a DMA channel. */
706b955f6caSJeff Kirsher /* Trigger an initialization just for the interrupt. */
707b955f6caSJeff Kirsher unsigned long irq_mask;
708b955f6caSJeff Kirsher
709b955f6caSJeff Kirsher irq_mask = probe_irq_on();
710b955f6caSJeff Kirsher outw(0x0041, ioaddr+LANCE_DATA);
711b955f6caSJeff Kirsher
712b955f6caSJeff Kirsher mdelay(40);
713b955f6caSJeff Kirsher dev->irq = probe_irq_off(irq_mask);
714b955f6caSJeff Kirsher if (dev->irq == 0) {
715b955f6caSJeff Kirsher printk(" Failed to detect the 7990 IRQ line.\n");
716b955f6caSJeff Kirsher goto out_dma;
717b955f6caSJeff Kirsher }
718b955f6caSJeff Kirsher printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
719b955f6caSJeff Kirsher }
720b955f6caSJeff Kirsher
721b955f6caSJeff Kirsher if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
722b955f6caSJeff Kirsher /* Turn on auto-select of media (10baseT or BNC) so that the user
723b955f6caSJeff Kirsher can watch the LEDs even if the board isn't opened. */
724b955f6caSJeff Kirsher outw(0x0002, ioaddr+LANCE_ADDR);
725b955f6caSJeff Kirsher /* Don't touch 10base2 power bit. */
726b955f6caSJeff Kirsher outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
727b955f6caSJeff Kirsher }
728b955f6caSJeff Kirsher
729b955f6caSJeff Kirsher if (lance_debug > 0 && did_version++ == 0)
730b955f6caSJeff Kirsher printk(version);
731b955f6caSJeff Kirsher
732b955f6caSJeff Kirsher /* The LANCE-specific entries in the device structure. */
733b955f6caSJeff Kirsher dev->netdev_ops = &lance_netdev_ops;
734b955f6caSJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT;
735b955f6caSJeff Kirsher
736b955f6caSJeff Kirsher err = register_netdev(dev);
737b955f6caSJeff Kirsher if (err)
738b955f6caSJeff Kirsher goto out_dma;
739b955f6caSJeff Kirsher return 0;
740b955f6caSJeff Kirsher out_dma:
741b955f6caSJeff Kirsher if (dev->dma != 4)
742b955f6caSJeff Kirsher free_dma(dev->dma);
743b955f6caSJeff Kirsher out_tx:
744b955f6caSJeff Kirsher kfree(lp->tx_bounce_buffs);
745b955f6caSJeff Kirsher out_rx:
746b955f6caSJeff Kirsher kfree((void*)lp->rx_buffs);
747b955f6caSJeff Kirsher out_lp:
748b955f6caSJeff Kirsher kfree(lp);
749b955f6caSJeff Kirsher return err;
750b955f6caSJeff Kirsher }
751b955f6caSJeff Kirsher
752b955f6caSJeff Kirsher
753b955f6caSJeff Kirsher static int
lance_open(struct net_device * dev)754b955f6caSJeff Kirsher lance_open(struct net_device *dev)
755b955f6caSJeff Kirsher {
756b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
757b955f6caSJeff Kirsher int ioaddr = dev->base_addr;
758b955f6caSJeff Kirsher int i;
759b955f6caSJeff Kirsher
760b955f6caSJeff Kirsher if (dev->irq == 0 ||
761f0e28d48SNate Levesque request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
762b955f6caSJeff Kirsher return -EAGAIN;
763b955f6caSJeff Kirsher }
764b955f6caSJeff Kirsher
765b955f6caSJeff Kirsher /* We used to allocate DMA here, but that was silly.
766b955f6caSJeff Kirsher DMA lines can't be shared! We now permanently allocate them. */
767b955f6caSJeff Kirsher
768b955f6caSJeff Kirsher /* Reset the LANCE */
769b955f6caSJeff Kirsher inw(ioaddr+LANCE_RESET);
770b955f6caSJeff Kirsher
771b955f6caSJeff Kirsher /* The DMA controller is used as a no-operation slave, "cascade mode". */
772b955f6caSJeff Kirsher if (dev->dma != 4) {
773b955f6caSJeff Kirsher unsigned long flags=claim_dma_lock();
774b955f6caSJeff Kirsher enable_dma(dev->dma);
775b955f6caSJeff Kirsher set_dma_mode(dev->dma, DMA_MODE_CASCADE);
776b955f6caSJeff Kirsher release_dma_lock(flags);
777b955f6caSJeff Kirsher }
778b955f6caSJeff Kirsher
779b955f6caSJeff Kirsher /* Un-Reset the LANCE, needed only for the NE2100. */
780b955f6caSJeff Kirsher if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
781b955f6caSJeff Kirsher outw(0, ioaddr+LANCE_RESET);
782b955f6caSJeff Kirsher
783b955f6caSJeff Kirsher if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
784b955f6caSJeff Kirsher /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
785b955f6caSJeff Kirsher outw(0x0002, ioaddr+LANCE_ADDR);
786b955f6caSJeff Kirsher /* Only touch autoselect bit. */
787b955f6caSJeff Kirsher outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
788b955f6caSJeff Kirsher }
789b955f6caSJeff Kirsher
790b955f6caSJeff Kirsher if (lance_debug > 1)
791b955f6caSJeff Kirsher printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
792b955f6caSJeff Kirsher dev->name, dev->irq, dev->dma,
793b955f6caSJeff Kirsher (u32) isa_virt_to_bus(lp->tx_ring),
794b955f6caSJeff Kirsher (u32) isa_virt_to_bus(lp->rx_ring),
795b955f6caSJeff Kirsher (u32) isa_virt_to_bus(&lp->init_block));
796b955f6caSJeff Kirsher
797b955f6caSJeff Kirsher lance_init_ring(dev, GFP_KERNEL);
798b955f6caSJeff Kirsher /* Re-initialize the LANCE, and start it when done. */
799b955f6caSJeff Kirsher outw(0x0001, ioaddr+LANCE_ADDR);
800b955f6caSJeff Kirsher outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
801b955f6caSJeff Kirsher outw(0x0002, ioaddr+LANCE_ADDR);
802b955f6caSJeff Kirsher outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
803b955f6caSJeff Kirsher
804b955f6caSJeff Kirsher outw(0x0004, ioaddr+LANCE_ADDR);
805b955f6caSJeff Kirsher outw(0x0915, ioaddr+LANCE_DATA);
806b955f6caSJeff Kirsher
807b955f6caSJeff Kirsher outw(0x0000, ioaddr+LANCE_ADDR);
808b955f6caSJeff Kirsher outw(0x0001, ioaddr+LANCE_DATA);
809b955f6caSJeff Kirsher
810b955f6caSJeff Kirsher netif_start_queue (dev);
811b955f6caSJeff Kirsher
812b955f6caSJeff Kirsher i = 0;
813b955f6caSJeff Kirsher while (i++ < 100)
814b955f6caSJeff Kirsher if (inw(ioaddr+LANCE_DATA) & 0x0100)
815b955f6caSJeff Kirsher break;
816b955f6caSJeff Kirsher /*
817b955f6caSJeff Kirsher * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
818b955f6caSJeff Kirsher * reports that doing so triggers a bug in the '974.
819b955f6caSJeff Kirsher */
820b955f6caSJeff Kirsher outw(0x0042, ioaddr+LANCE_DATA);
821b955f6caSJeff Kirsher
822b955f6caSJeff Kirsher if (lance_debug > 2)
823b955f6caSJeff Kirsher printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
824b955f6caSJeff Kirsher dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
825b955f6caSJeff Kirsher
826b955f6caSJeff Kirsher return 0; /* Always succeed */
827b955f6caSJeff Kirsher }
828b955f6caSJeff Kirsher
829b955f6caSJeff Kirsher /* The LANCE has been halted for one reason or another (busmaster memory
830b955f6caSJeff Kirsher arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
831b955f6caSJeff Kirsher etc.). Modern LANCE variants always reload their ring-buffer
832b955f6caSJeff Kirsher configuration when restarted, so we must reinitialize our ring
833b955f6caSJeff Kirsher context before restarting. As part of this reinitialization,
834b955f6caSJeff Kirsher find all packets still on the Tx ring and pretend that they had been
835b955f6caSJeff Kirsher sent (in effect, drop the packets on the floor) - the higher-level
836b955f6caSJeff Kirsher protocols will time out and retransmit. It'd be better to shuffle
837b955f6caSJeff Kirsher these skbs to a temp list and then actually re-Tx them after
838b955f6caSJeff Kirsher restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
839b955f6caSJeff Kirsher */
840b955f6caSJeff Kirsher
841b955f6caSJeff Kirsher static void
lance_purge_ring(struct net_device * dev)842b955f6caSJeff Kirsher lance_purge_ring(struct net_device *dev)
843b955f6caSJeff Kirsher {
844b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
845b955f6caSJeff Kirsher int i;
846b955f6caSJeff Kirsher
847b955f6caSJeff Kirsher /* Free all the skbuffs in the Rx and Tx queues. */
848b955f6caSJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) {
849b955f6caSJeff Kirsher struct sk_buff *skb = lp->rx_skbuff[i];
850b955f6caSJeff Kirsher lp->rx_skbuff[i] = NULL;
851b955f6caSJeff Kirsher lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
852b955f6caSJeff Kirsher if (skb)
853b955f6caSJeff Kirsher dev_kfree_skb_any(skb);
854b955f6caSJeff Kirsher }
855b955f6caSJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) {
856b955f6caSJeff Kirsher if (lp->tx_skbuff[i]) {
857b955f6caSJeff Kirsher dev_kfree_skb_any(lp->tx_skbuff[i]);
858b955f6caSJeff Kirsher lp->tx_skbuff[i] = NULL;
859b955f6caSJeff Kirsher }
860b955f6caSJeff Kirsher }
861b955f6caSJeff Kirsher }
862b955f6caSJeff Kirsher
863b955f6caSJeff Kirsher
864b955f6caSJeff Kirsher /* Initialize the LANCE Rx and Tx rings. */
865b955f6caSJeff Kirsher static void
lance_init_ring(struct net_device * dev,gfp_t gfp)866b955f6caSJeff Kirsher lance_init_ring(struct net_device *dev, gfp_t gfp)
867b955f6caSJeff Kirsher {
868b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
869b955f6caSJeff Kirsher int i;
870b955f6caSJeff Kirsher
871b955f6caSJeff Kirsher lp->cur_rx = lp->cur_tx = 0;
872b955f6caSJeff Kirsher lp->dirty_rx = lp->dirty_tx = 0;
873b955f6caSJeff Kirsher
874b955f6caSJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) {
875b955f6caSJeff Kirsher struct sk_buff *skb;
876b955f6caSJeff Kirsher void *rx_buff;
877b955f6caSJeff Kirsher
878b955f6caSJeff Kirsher skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
879b955f6caSJeff Kirsher lp->rx_skbuff[i] = skb;
8805c8b73caSJon Mason if (skb)
881b955f6caSJeff Kirsher rx_buff = skb->data;
8825c8b73caSJon Mason else
883b955f6caSJeff Kirsher rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
884b0b815a3SGuofeng Yue if (!rx_buff)
885b955f6caSJeff Kirsher lp->rx_ring[i].base = 0;
886b955f6caSJeff Kirsher else
887b955f6caSJeff Kirsher lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
888b955f6caSJeff Kirsher lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
889b955f6caSJeff Kirsher }
890b955f6caSJeff Kirsher /* The Tx buffer address is filled in as needed, but we do need to clear
891b955f6caSJeff Kirsher the upper ownership bit. */
892b955f6caSJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) {
893b955f6caSJeff Kirsher lp->tx_skbuff[i] = NULL;
894b955f6caSJeff Kirsher lp->tx_ring[i].base = 0;
895b955f6caSJeff Kirsher }
896b955f6caSJeff Kirsher
897b955f6caSJeff Kirsher lp->init_block.mode = 0x0000;
898b955f6caSJeff Kirsher for (i = 0; i < 6; i++)
899b955f6caSJeff Kirsher lp->init_block.phys_addr[i] = dev->dev_addr[i];
900b955f6caSJeff Kirsher lp->init_block.filter[0] = 0x00000000;
901b955f6caSJeff Kirsher lp->init_block.filter[1] = 0x00000000;
902b955f6caSJeff Kirsher lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
903b955f6caSJeff Kirsher lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
904b955f6caSJeff Kirsher }
905b955f6caSJeff Kirsher
906b955f6caSJeff Kirsher static void
lance_restart(struct net_device * dev,unsigned int csr0_bits,int must_reinit)907b955f6caSJeff Kirsher lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
908b955f6caSJeff Kirsher {
909b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
910b955f6caSJeff Kirsher
911b955f6caSJeff Kirsher if (must_reinit ||
912b955f6caSJeff Kirsher (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
913b955f6caSJeff Kirsher lance_purge_ring(dev);
914b955f6caSJeff Kirsher lance_init_ring(dev, GFP_ATOMIC);
915b955f6caSJeff Kirsher }
916b955f6caSJeff Kirsher outw(0x0000, dev->base_addr + LANCE_ADDR);
917b955f6caSJeff Kirsher outw(csr0_bits, dev->base_addr + LANCE_DATA);
918b955f6caSJeff Kirsher }
919b955f6caSJeff Kirsher
920b955f6caSJeff Kirsher
lance_tx_timeout(struct net_device * dev,unsigned int txqueue)9210290bd29SMichael S. Tsirkin static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
922b955f6caSJeff Kirsher {
923b955f6caSJeff Kirsher struct lance_private *lp = (struct lance_private *) dev->ml_priv;
924b955f6caSJeff Kirsher int ioaddr = dev->base_addr;
925b955f6caSJeff Kirsher
926b955f6caSJeff Kirsher outw (0, ioaddr + LANCE_ADDR);
927b955f6caSJeff Kirsher printk ("%s: transmit timed out, status %4.4x, resetting.\n",
928b955f6caSJeff Kirsher dev->name, inw (ioaddr + LANCE_DATA));
929b955f6caSJeff Kirsher outw (0x0004, ioaddr + LANCE_DATA);
930b955f6caSJeff Kirsher dev->stats.tx_errors++;
931b955f6caSJeff Kirsher #ifndef final_version
932b955f6caSJeff Kirsher if (lance_debug > 3) {
933b955f6caSJeff Kirsher int i;
934b955f6caSJeff Kirsher printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
935b955f6caSJeff Kirsher lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
936b955f6caSJeff Kirsher lp->cur_rx);
937b955f6caSJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++)
938b955f6caSJeff Kirsher printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
939b955f6caSJeff Kirsher lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
940b955f6caSJeff Kirsher lp->rx_ring[i].msg_length);
941b955f6caSJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++)
942b955f6caSJeff Kirsher printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
943b955f6caSJeff Kirsher lp->tx_ring[i].base, -lp->tx_ring[i].length,
944b955f6caSJeff Kirsher lp->tx_ring[i].misc);
945b955f6caSJeff Kirsher printk ("\n");
946b955f6caSJeff Kirsher }
947b955f6caSJeff Kirsher #endif
948b955f6caSJeff Kirsher lance_restart (dev, 0x0043, 1);
949b955f6caSJeff Kirsher
950860e9538SFlorian Westphal netif_trans_update(dev); /* prevent tx timeout */
951b955f6caSJeff Kirsher netif_wake_queue (dev);
952b955f6caSJeff Kirsher }
953b955f6caSJeff Kirsher
954b955f6caSJeff Kirsher
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)955b955f6caSJeff Kirsher static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
956b955f6caSJeff Kirsher struct net_device *dev)
957b955f6caSJeff Kirsher {
958b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
959b955f6caSJeff Kirsher int ioaddr = dev->base_addr;
960b955f6caSJeff Kirsher int entry;
961b955f6caSJeff Kirsher unsigned long flags;
962b955f6caSJeff Kirsher
963b955f6caSJeff Kirsher spin_lock_irqsave(&lp->devlock, flags);
964b955f6caSJeff Kirsher
965b955f6caSJeff Kirsher if (lance_debug > 3) {
966b955f6caSJeff Kirsher outw(0x0000, ioaddr+LANCE_ADDR);
967b955f6caSJeff Kirsher printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
968b955f6caSJeff Kirsher inw(ioaddr+LANCE_DATA));
969b955f6caSJeff Kirsher outw(0x0000, ioaddr+LANCE_DATA);
970b955f6caSJeff Kirsher }
971b955f6caSJeff Kirsher
972b955f6caSJeff Kirsher /* Fill in a Tx ring entry */
973b955f6caSJeff Kirsher
974b955f6caSJeff Kirsher /* Mask to ring buffer boundary. */
975b955f6caSJeff Kirsher entry = lp->cur_tx & TX_RING_MOD_MASK;
976b955f6caSJeff Kirsher
977b955f6caSJeff Kirsher /* Caution: the write order is important here, set the base address
978b955f6caSJeff Kirsher with the "ownership" bits last. */
979b955f6caSJeff Kirsher
980b955f6caSJeff Kirsher /* The old LANCE chips doesn't automatically pad buffers to min. size. */
981b955f6caSJeff Kirsher if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
982b955f6caSJeff Kirsher if (skb->len < ETH_ZLEN) {
983b955f6caSJeff Kirsher if (skb_padto(skb, ETH_ZLEN))
984b955f6caSJeff Kirsher goto out;
985b955f6caSJeff Kirsher lp->tx_ring[entry].length = -ETH_ZLEN;
986b955f6caSJeff Kirsher }
987b955f6caSJeff Kirsher else
988b955f6caSJeff Kirsher lp->tx_ring[entry].length = -skb->len;
989b955f6caSJeff Kirsher } else
990b955f6caSJeff Kirsher lp->tx_ring[entry].length = -skb->len;
991b955f6caSJeff Kirsher
992b955f6caSJeff Kirsher lp->tx_ring[entry].misc = 0x0000;
993b955f6caSJeff Kirsher
994b955f6caSJeff Kirsher dev->stats.tx_bytes += skb->len;
995b955f6caSJeff Kirsher
996b955f6caSJeff Kirsher /* If any part of this buffer is >16M we must copy it to a low-memory
997b955f6caSJeff Kirsher buffer. */
998b955f6caSJeff Kirsher if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
999b955f6caSJeff Kirsher if (lance_debug > 5)
1000b955f6caSJeff Kirsher printk("%s: bouncing a high-memory packet (%#x).\n",
1001b955f6caSJeff Kirsher dev->name, (u32)isa_virt_to_bus(skb->data));
1002b955f6caSJeff Kirsher skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
1003b955f6caSJeff Kirsher lp->tx_ring[entry].base =
1004b955f6caSJeff Kirsher ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
10056151d105SYang Yingliang dev_consume_skb_irq(skb);
1006b955f6caSJeff Kirsher } else {
1007b955f6caSJeff Kirsher lp->tx_skbuff[entry] = skb;
1008b955f6caSJeff Kirsher lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1009b955f6caSJeff Kirsher }
1010b955f6caSJeff Kirsher lp->cur_tx++;
1011b955f6caSJeff Kirsher
1012b955f6caSJeff Kirsher /* Trigger an immediate send poll. */
1013b955f6caSJeff Kirsher outw(0x0000, ioaddr+LANCE_ADDR);
1014b955f6caSJeff Kirsher outw(0x0048, ioaddr+LANCE_DATA);
1015b955f6caSJeff Kirsher
1016b955f6caSJeff Kirsher if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1017b955f6caSJeff Kirsher netif_stop_queue(dev);
1018b955f6caSJeff Kirsher
1019b955f6caSJeff Kirsher out:
1020b955f6caSJeff Kirsher spin_unlock_irqrestore(&lp->devlock, flags);
1021b955f6caSJeff Kirsher return NETDEV_TX_OK;
1022b955f6caSJeff Kirsher }
1023b955f6caSJeff Kirsher
1024b955f6caSJeff Kirsher /* The LANCE interrupt handler. */
lance_interrupt(int irq,void * dev_id)1025b955f6caSJeff Kirsher static irqreturn_t lance_interrupt(int irq, void *dev_id)
1026b955f6caSJeff Kirsher {
1027b955f6caSJeff Kirsher struct net_device *dev = dev_id;
1028b955f6caSJeff Kirsher struct lance_private *lp;
1029b955f6caSJeff Kirsher int csr0, ioaddr, boguscnt=10;
1030b955f6caSJeff Kirsher int must_restart;
1031b955f6caSJeff Kirsher
1032b955f6caSJeff Kirsher ioaddr = dev->base_addr;
1033b955f6caSJeff Kirsher lp = dev->ml_priv;
1034b955f6caSJeff Kirsher
1035b955f6caSJeff Kirsher spin_lock (&lp->devlock);
1036b955f6caSJeff Kirsher
1037b955f6caSJeff Kirsher outw(0x00, dev->base_addr + LANCE_ADDR);
1038b955f6caSJeff Kirsher while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1039b955f6caSJeff Kirsher --boguscnt >= 0) {
1040b955f6caSJeff Kirsher /* Acknowledge all of the current interrupt sources ASAP. */
1041b955f6caSJeff Kirsher outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1042b955f6caSJeff Kirsher
1043b955f6caSJeff Kirsher must_restart = 0;
1044b955f6caSJeff Kirsher
1045b955f6caSJeff Kirsher if (lance_debug > 5)
1046b955f6caSJeff Kirsher printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
1047b955f6caSJeff Kirsher dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1048b955f6caSJeff Kirsher
1049b955f6caSJeff Kirsher if (csr0 & 0x0400) /* Rx interrupt */
1050b955f6caSJeff Kirsher lance_rx(dev);
1051b955f6caSJeff Kirsher
1052b955f6caSJeff Kirsher if (csr0 & 0x0200) { /* Tx-done interrupt */
1053b955f6caSJeff Kirsher int dirty_tx = lp->dirty_tx;
1054b955f6caSJeff Kirsher
1055b955f6caSJeff Kirsher while (dirty_tx < lp->cur_tx) {
1056b955f6caSJeff Kirsher int entry = dirty_tx & TX_RING_MOD_MASK;
1057b955f6caSJeff Kirsher int status = lp->tx_ring[entry].base;
1058b955f6caSJeff Kirsher
1059b955f6caSJeff Kirsher if (status < 0)
1060b955f6caSJeff Kirsher break; /* It still hasn't been Txed */
1061b955f6caSJeff Kirsher
1062b955f6caSJeff Kirsher lp->tx_ring[entry].base = 0;
1063b955f6caSJeff Kirsher
1064b955f6caSJeff Kirsher if (status & 0x40000000) {
1065b955f6caSJeff Kirsher /* There was an major error, log it. */
1066b955f6caSJeff Kirsher int err_status = lp->tx_ring[entry].misc;
1067b955f6caSJeff Kirsher dev->stats.tx_errors++;
1068b955f6caSJeff Kirsher if (err_status & 0x0400)
1069b955f6caSJeff Kirsher dev->stats.tx_aborted_errors++;
1070b955f6caSJeff Kirsher if (err_status & 0x0800)
1071b955f6caSJeff Kirsher dev->stats.tx_carrier_errors++;
1072b955f6caSJeff Kirsher if (err_status & 0x1000)
1073b955f6caSJeff Kirsher dev->stats.tx_window_errors++;
1074b955f6caSJeff Kirsher if (err_status & 0x4000) {
1075b955f6caSJeff Kirsher /* Ackk! On FIFO errors the Tx unit is turned off! */
1076b955f6caSJeff Kirsher dev->stats.tx_fifo_errors++;
1077b955f6caSJeff Kirsher /* Remove this verbosity later! */
1078b955f6caSJeff Kirsher printk("%s: Tx FIFO error! Status %4.4x.\n",
1079b955f6caSJeff Kirsher dev->name, csr0);
1080b955f6caSJeff Kirsher /* Restart the chip. */
1081b955f6caSJeff Kirsher must_restart = 1;
1082b955f6caSJeff Kirsher }
1083b955f6caSJeff Kirsher } else {
1084b955f6caSJeff Kirsher if (status & 0x18000000)
1085b955f6caSJeff Kirsher dev->stats.collisions++;
1086b955f6caSJeff Kirsher dev->stats.tx_packets++;
1087b955f6caSJeff Kirsher }
1088b955f6caSJeff Kirsher
1089b955f6caSJeff Kirsher /* We must free the original skb if it's not a data-only copy
1090b955f6caSJeff Kirsher in the bounce buffer. */
1091b955f6caSJeff Kirsher if (lp->tx_skbuff[entry]) {
1092fc67ade1SYang Wei dev_consume_skb_irq(lp->tx_skbuff[entry]);
1093b955f6caSJeff Kirsher lp->tx_skbuff[entry] = NULL;
1094b955f6caSJeff Kirsher }
1095b955f6caSJeff Kirsher dirty_tx++;
1096b955f6caSJeff Kirsher }
1097b955f6caSJeff Kirsher
1098b955f6caSJeff Kirsher #ifndef final_version
1099b955f6caSJeff Kirsher if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1100b955f6caSJeff Kirsher printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1101b955f6caSJeff Kirsher dirty_tx, lp->cur_tx,
1102b955f6caSJeff Kirsher netif_queue_stopped(dev) ? "yes" : "no");
1103b955f6caSJeff Kirsher dirty_tx += TX_RING_SIZE;
1104b955f6caSJeff Kirsher }
1105b955f6caSJeff Kirsher #endif
1106b955f6caSJeff Kirsher
1107b955f6caSJeff Kirsher /* if the ring is no longer full, accept more packets */
1108b955f6caSJeff Kirsher if (netif_queue_stopped(dev) &&
1109b955f6caSJeff Kirsher dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1110b955f6caSJeff Kirsher netif_wake_queue (dev);
1111b955f6caSJeff Kirsher
1112b955f6caSJeff Kirsher lp->dirty_tx = dirty_tx;
1113b955f6caSJeff Kirsher }
1114b955f6caSJeff Kirsher
1115b955f6caSJeff Kirsher /* Log misc errors. */
1116b955f6caSJeff Kirsher if (csr0 & 0x4000)
1117b955f6caSJeff Kirsher dev->stats.tx_errors++; /* Tx babble. */
1118b955f6caSJeff Kirsher if (csr0 & 0x1000)
1119b955f6caSJeff Kirsher dev->stats.rx_errors++; /* Missed a Rx frame. */
1120b955f6caSJeff Kirsher if (csr0 & 0x0800) {
1121b955f6caSJeff Kirsher printk("%s: Bus master arbitration failure, status %4.4x.\n",
1122b955f6caSJeff Kirsher dev->name, csr0);
1123b955f6caSJeff Kirsher /* Restart the chip. */
1124b955f6caSJeff Kirsher must_restart = 1;
1125b955f6caSJeff Kirsher }
1126b955f6caSJeff Kirsher
1127b955f6caSJeff Kirsher if (must_restart) {
1128b955f6caSJeff Kirsher /* stop the chip to clear the error condition, then restart */
1129b955f6caSJeff Kirsher outw(0x0000, dev->base_addr + LANCE_ADDR);
1130b955f6caSJeff Kirsher outw(0x0004, dev->base_addr + LANCE_DATA);
1131b955f6caSJeff Kirsher lance_restart(dev, 0x0002, 0);
1132b955f6caSJeff Kirsher }
1133b955f6caSJeff Kirsher }
1134b955f6caSJeff Kirsher
1135b955f6caSJeff Kirsher /* Clear any other interrupt, and set interrupt enable. */
1136b955f6caSJeff Kirsher outw(0x0000, dev->base_addr + LANCE_ADDR);
1137b955f6caSJeff Kirsher outw(0x7940, dev->base_addr + LANCE_DATA);
1138b955f6caSJeff Kirsher
1139b955f6caSJeff Kirsher if (lance_debug > 4)
1140b955f6caSJeff Kirsher printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1141b955f6caSJeff Kirsher dev->name, inw(ioaddr + LANCE_ADDR),
1142b955f6caSJeff Kirsher inw(dev->base_addr + LANCE_DATA));
1143b955f6caSJeff Kirsher
1144b955f6caSJeff Kirsher spin_unlock (&lp->devlock);
1145b955f6caSJeff Kirsher return IRQ_HANDLED;
1146b955f6caSJeff Kirsher }
1147b955f6caSJeff Kirsher
1148b955f6caSJeff Kirsher static int
lance_rx(struct net_device * dev)1149b955f6caSJeff Kirsher lance_rx(struct net_device *dev)
1150b955f6caSJeff Kirsher {
1151b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
1152b955f6caSJeff Kirsher int entry = lp->cur_rx & RX_RING_MOD_MASK;
1153b955f6caSJeff Kirsher int i;
1154b955f6caSJeff Kirsher
1155b955f6caSJeff Kirsher /* If we own the next entry, it's a new packet. Send it up. */
1156b955f6caSJeff Kirsher while (lp->rx_ring[entry].base >= 0) {
1157b955f6caSJeff Kirsher int status = lp->rx_ring[entry].base >> 24;
1158b955f6caSJeff Kirsher
1159b955f6caSJeff Kirsher if (status != 0x03) { /* There was an error. */
1160b955f6caSJeff Kirsher /* There is a tricky error noted by John Murphy,
1161b955f6caSJeff Kirsher <murf@perftech.com> to Russ Nelson: Even with full-sized
1162b955f6caSJeff Kirsher buffers it's possible for a jabber packet to use two
1163b955f6caSJeff Kirsher buffers, with only the last correctly noting the error. */
1164b955f6caSJeff Kirsher if (status & 0x01) /* Only count a general error at the */
1165b955f6caSJeff Kirsher dev->stats.rx_errors++; /* end of a packet.*/
1166b955f6caSJeff Kirsher if (status & 0x20)
1167b955f6caSJeff Kirsher dev->stats.rx_frame_errors++;
1168b955f6caSJeff Kirsher if (status & 0x10)
1169b955f6caSJeff Kirsher dev->stats.rx_over_errors++;
1170b955f6caSJeff Kirsher if (status & 0x08)
1171b955f6caSJeff Kirsher dev->stats.rx_crc_errors++;
1172b955f6caSJeff Kirsher if (status & 0x04)
1173b955f6caSJeff Kirsher dev->stats.rx_fifo_errors++;
1174b955f6caSJeff Kirsher lp->rx_ring[entry].base &= 0x03ffffff;
1175b955f6caSJeff Kirsher }
1176b955f6caSJeff Kirsher else
1177b955f6caSJeff Kirsher {
1178b955f6caSJeff Kirsher /* Malloc up new buffer, compatible with net3. */
1179b955f6caSJeff Kirsher short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1180b955f6caSJeff Kirsher struct sk_buff *skb;
1181b955f6caSJeff Kirsher
1182b955f6caSJeff Kirsher if(pkt_len<60)
1183b955f6caSJeff Kirsher {
1184b955f6caSJeff Kirsher printk("%s: Runt packet!\n",dev->name);
1185b955f6caSJeff Kirsher dev->stats.rx_errors++;
1186b955f6caSJeff Kirsher }
1187b955f6caSJeff Kirsher else
1188b955f6caSJeff Kirsher {
1189b955f6caSJeff Kirsher skb = dev_alloc_skb(pkt_len+2);
1190b0b815a3SGuofeng Yue if (!skb)
1191b955f6caSJeff Kirsher {
1192b955f6caSJeff Kirsher printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1193b955f6caSJeff Kirsher for (i=0; i < RX_RING_SIZE; i++)
1194b955f6caSJeff Kirsher if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1195b955f6caSJeff Kirsher break;
1196b955f6caSJeff Kirsher
1197b955f6caSJeff Kirsher if (i > RX_RING_SIZE -2)
1198b955f6caSJeff Kirsher {
1199b955f6caSJeff Kirsher dev->stats.rx_dropped++;
1200b955f6caSJeff Kirsher lp->rx_ring[entry].base |= 0x80000000;
1201b955f6caSJeff Kirsher lp->cur_rx++;
1202b955f6caSJeff Kirsher }
1203b955f6caSJeff Kirsher break;
1204b955f6caSJeff Kirsher }
1205b955f6caSJeff Kirsher skb_reserve(skb,2); /* 16 byte align */
1206b955f6caSJeff Kirsher skb_put(skb,pkt_len); /* Make room */
1207b955f6caSJeff Kirsher skb_copy_to_linear_data(skb,
1208b955f6caSJeff Kirsher (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1209b955f6caSJeff Kirsher pkt_len);
1210b955f6caSJeff Kirsher skb->protocol=eth_type_trans(skb,dev);
1211b955f6caSJeff Kirsher netif_rx(skb);
1212b955f6caSJeff Kirsher dev->stats.rx_packets++;
1213b955f6caSJeff Kirsher dev->stats.rx_bytes += pkt_len;
1214b955f6caSJeff Kirsher }
1215b955f6caSJeff Kirsher }
1216b955f6caSJeff Kirsher /* The docs say that the buffer length isn't touched, but Andrew Boyd
1217b955f6caSJeff Kirsher of QNX reports that some revs of the 79C965 clear it. */
1218b955f6caSJeff Kirsher lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1219b955f6caSJeff Kirsher lp->rx_ring[entry].base |= 0x80000000;
1220b955f6caSJeff Kirsher entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1221b955f6caSJeff Kirsher }
1222b955f6caSJeff Kirsher
1223b955f6caSJeff Kirsher /* We should check that at least two ring entries are free. If not,
1224b955f6caSJeff Kirsher we should free one and mark stats->rx_dropped++. */
1225b955f6caSJeff Kirsher
1226b955f6caSJeff Kirsher return 0;
1227b955f6caSJeff Kirsher }
1228b955f6caSJeff Kirsher
1229b955f6caSJeff Kirsher static int
lance_close(struct net_device * dev)1230b955f6caSJeff Kirsher lance_close(struct net_device *dev)
1231b955f6caSJeff Kirsher {
1232b955f6caSJeff Kirsher int ioaddr = dev->base_addr;
1233b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
1234b955f6caSJeff Kirsher
1235b955f6caSJeff Kirsher netif_stop_queue (dev);
1236b955f6caSJeff Kirsher
1237b955f6caSJeff Kirsher if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1238b955f6caSJeff Kirsher outw(112, ioaddr+LANCE_ADDR);
1239b955f6caSJeff Kirsher dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1240b955f6caSJeff Kirsher }
1241b955f6caSJeff Kirsher outw(0, ioaddr+LANCE_ADDR);
1242b955f6caSJeff Kirsher
1243b955f6caSJeff Kirsher if (lance_debug > 1)
1244b955f6caSJeff Kirsher printk("%s: Shutting down ethercard, status was %2.2x.\n",
1245b955f6caSJeff Kirsher dev->name, inw(ioaddr+LANCE_DATA));
1246b955f6caSJeff Kirsher
1247b955f6caSJeff Kirsher /* We stop the LANCE here -- it occasionally polls
1248b955f6caSJeff Kirsher memory if we don't. */
1249b955f6caSJeff Kirsher outw(0x0004, ioaddr+LANCE_DATA);
1250b955f6caSJeff Kirsher
1251b955f6caSJeff Kirsher if (dev->dma != 4)
1252b955f6caSJeff Kirsher {
1253b955f6caSJeff Kirsher unsigned long flags=claim_dma_lock();
1254b955f6caSJeff Kirsher disable_dma(dev->dma);
1255b955f6caSJeff Kirsher release_dma_lock(flags);
1256b955f6caSJeff Kirsher }
1257b955f6caSJeff Kirsher free_irq(dev->irq, dev);
1258b955f6caSJeff Kirsher
1259b955f6caSJeff Kirsher lance_purge_ring(dev);
1260b955f6caSJeff Kirsher
1261b955f6caSJeff Kirsher return 0;
1262b955f6caSJeff Kirsher }
1263b955f6caSJeff Kirsher
lance_get_stats(struct net_device * dev)1264b955f6caSJeff Kirsher static struct net_device_stats *lance_get_stats(struct net_device *dev)
1265b955f6caSJeff Kirsher {
1266b955f6caSJeff Kirsher struct lance_private *lp = dev->ml_priv;
1267b955f6caSJeff Kirsher
1268b955f6caSJeff Kirsher if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1269b955f6caSJeff Kirsher short ioaddr = dev->base_addr;
1270b955f6caSJeff Kirsher short saved_addr;
1271b955f6caSJeff Kirsher unsigned long flags;
1272b955f6caSJeff Kirsher
1273b955f6caSJeff Kirsher spin_lock_irqsave(&lp->devlock, flags);
1274b955f6caSJeff Kirsher saved_addr = inw(ioaddr+LANCE_ADDR);
1275b955f6caSJeff Kirsher outw(112, ioaddr+LANCE_ADDR);
1276b955f6caSJeff Kirsher dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1277b955f6caSJeff Kirsher outw(saved_addr, ioaddr+LANCE_ADDR);
1278b955f6caSJeff Kirsher spin_unlock_irqrestore(&lp->devlock, flags);
1279b955f6caSJeff Kirsher }
1280b955f6caSJeff Kirsher
1281b955f6caSJeff Kirsher return &dev->stats;
1282b955f6caSJeff Kirsher }
1283b955f6caSJeff Kirsher
1284b955f6caSJeff Kirsher /* Set or clear the multicast filter for this adaptor.
1285b955f6caSJeff Kirsher */
1286b955f6caSJeff Kirsher
set_multicast_list(struct net_device * dev)1287b955f6caSJeff Kirsher static void set_multicast_list(struct net_device *dev)
1288b955f6caSJeff Kirsher {
1289b955f6caSJeff Kirsher short ioaddr = dev->base_addr;
1290b955f6caSJeff Kirsher
1291b955f6caSJeff Kirsher outw(0, ioaddr+LANCE_ADDR);
1292b955f6caSJeff Kirsher outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
1293b955f6caSJeff Kirsher
1294b955f6caSJeff Kirsher if (dev->flags&IFF_PROMISC) {
1295b955f6caSJeff Kirsher outw(15, ioaddr+LANCE_ADDR);
1296b955f6caSJeff Kirsher outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1297b955f6caSJeff Kirsher } else {
1298b955f6caSJeff Kirsher short multicast_table[4];
1299b955f6caSJeff Kirsher int i;
1300b955f6caSJeff Kirsher int num_addrs=netdev_mc_count(dev);
1301b955f6caSJeff Kirsher if(dev->flags&IFF_ALLMULTI)
1302b955f6caSJeff Kirsher num_addrs=1;
1303b955f6caSJeff Kirsher /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1304b955f6caSJeff Kirsher memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1305b955f6caSJeff Kirsher for (i = 0; i < 4; i++) {
1306b955f6caSJeff Kirsher outw(8 + i, ioaddr+LANCE_ADDR);
1307b955f6caSJeff Kirsher outw(multicast_table[i], ioaddr+LANCE_DATA);
1308b955f6caSJeff Kirsher }
1309b955f6caSJeff Kirsher outw(15, ioaddr+LANCE_ADDR);
1310b955f6caSJeff Kirsher outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1311b955f6caSJeff Kirsher }
1312b955f6caSJeff Kirsher
1313b955f6caSJeff Kirsher lance_restart(dev, 0x0142, 0); /* Resume normal operation */
1314b955f6caSJeff Kirsher
1315b955f6caSJeff Kirsher }
1316b955f6caSJeff Kirsher
1317