xref: /openbmc/linux/drivers/net/ethernet/3com/3c59x.c (revision 60772e48)
1 /* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
2 /*
3 	Written 1996-1999 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms
6 	of the GNU General Public License, incorporated herein by reference.
7 
8 	This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
9 	Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
10 	and the EtherLink XL 3c900 and 3c905 cards.
11 
12 	Problem reports and questions should be directed to
13 	vortex@scyld.com
14 
15 	The author may be reached as becker@scyld.com, or C/O
16 	Scyld Computing Corporation
17 	410 Severn Ave., Suite 210
18 	Annapolis MD 21403
19 
20 */
21 
22 /*
23  * FIXME: This driver _could_ support MTU changing, but doesn't.  See Don's hamachi.c implementation
24  * as well as other drivers
25  *
26  * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
27  * due to dead code elimination.  There will be some performance benefits from this due to
28  * elimination of all the tests and reduced cache footprint.
29  */
30 
31 
32 #define DRV_NAME	"3c59x"
33 
34 
35 
36 /* A few values that may be tweaked. */
37 /* Keep the ring sizes a power of two for efficiency. */
38 #define TX_RING_SIZE	16
39 #define RX_RING_SIZE	32
40 #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
41 
42 /* "Knobs" that adjust features and parameters. */
43 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44    Setting to > 1512 effectively disables this feature. */
45 #ifndef __arm__
46 static int rx_copybreak = 200;
47 #else
48 /* ARM systems perform better by disregarding the bus-master
49    transfer capability of these cards. -- rmk */
50 static int rx_copybreak = 1513;
51 #endif
52 /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
53 static const int mtu = 1500;
54 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
55 static int max_interrupt_work = 32;
56 /* Tx timeout interval (millisecs) */
57 static int watchdog = 5000;
58 
59 /* Allow aggregation of Tx interrupts.  Saves CPU load at the cost
60  * of possible Tx stalls if the system is blocking interrupts
61  * somewhere else.  Undefine this to disable.
62  */
63 #define tx_interrupt_mitigation 1
64 
65 /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
66 #define vortex_debug debug
67 #ifdef VORTEX_DEBUG
68 static int vortex_debug = VORTEX_DEBUG;
69 #else
70 static int vortex_debug = 1;
71 #endif
72 
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/string.h>
76 #include <linux/timer.h>
77 #include <linux/errno.h>
78 #include <linux/in.h>
79 #include <linux/ioport.h>
80 #include <linux/interrupt.h>
81 #include <linux/pci.h>
82 #include <linux/mii.h>
83 #include <linux/init.h>
84 #include <linux/netdevice.h>
85 #include <linux/etherdevice.h>
86 #include <linux/skbuff.h>
87 #include <linux/ethtool.h>
88 #include <linux/highmem.h>
89 #include <linux/eisa.h>
90 #include <linux/bitops.h>
91 #include <linux/jiffies.h>
92 #include <linux/gfp.h>
93 #include <asm/irq.h>			/* For nr_irqs only. */
94 #include <asm/io.h>
95 #include <linux/uaccess.h>
96 
97 /* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
98    This is only in the support-all-kernels source code. */
99 
100 #define RUN_AT(x) (jiffies + (x))
101 
102 #include <linux/delay.h>
103 
104 
105 static const char version[] =
106 	DRV_NAME ": Donald Becker and others.\n";
107 
108 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109 MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
110 MODULE_LICENSE("GPL");
111 
112 
113 /* Operational parameter that usually are not changed. */
114 
115 /* The Vortex size is twice that of the original EtherLinkIII series: the
116    runtime register window, window 1, is now always mapped in.
117    The Boomerang size is twice as large as the Vortex -- it has additional
118    bus master control registers. */
119 #define VORTEX_TOTAL_SIZE 0x20
120 #define BOOMERANG_TOTAL_SIZE 0x40
121 
122 /* Set iff a MII transceiver on any interface requires mdio preamble.
123    This only set with the original DP83840 on older 3c905 boards, so the extra
124    code size of a per-interface flag is not worthwhile. */
125 static char mii_preamble_required;
126 
127 #define PFX DRV_NAME ": "
128 
129 
130 
131 /*
132 				Theory of Operation
133 
134 I. Board Compatibility
135 
136 This device driver is designed for the 3Com FastEtherLink and FastEtherLink
137 XL, 3Com's PCI to 10/100baseT adapters.  It also works with the 10Mbs
138 versions of the FastEtherLink cards.  The supported product IDs are
139   3c590, 3c592, 3c595, 3c597, 3c900, 3c905
140 
141 The related ISA 3c515 is supported with a separate driver, 3c515.c, included
142 with the kernel source or available from
143     cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
144 
145 II. Board-specific settings
146 
147 PCI bus devices are configured by the system at boot time, so no jumpers
148 need to be set on the board.  The system BIOS should be set to assign the
149 PCI INTA signal to an otherwise unused system IRQ line.
150 
151 The EEPROM settings for media type and forced-full-duplex are observed.
152 The EEPROM media type should be left at the default "autoselect" unless using
153 10base2 or AUI connections which cannot be reliably detected.
154 
155 III. Driver operation
156 
157 The 3c59x series use an interface that's very similar to the previous 3c5x9
158 series.  The primary interface is two programmed-I/O FIFOs, with an
159 alternate single-contiguous-region bus-master transfer (see next).
160 
161 The 3c900 "Boomerang" series uses a full-bus-master interface with separate
162 lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
163 DEC Tulip and Intel Speedo3.  The first chip version retains a compatible
164 programmed-I/O interface that has been removed in 'B' and subsequent board
165 revisions.
166 
167 One extension that is advertised in a very large font is that the adapters
168 are capable of being bus masters.  On the Vortex chip this capability was
169 only for a single contiguous region making it far less useful than the full
170 bus master capability.  There is a significant performance impact of taking
171 an extra interrupt or polling for the completion of each transfer, as well
172 as difficulty sharing the single transfer engine between the transmit and
173 receive threads.  Using DMA transfers is a win only with large blocks or
174 with the flawed versions of the Intel Orion motherboard PCI controller.
175 
176 The Boomerang chip's full-bus-master interface is useful, and has the
177 currently-unused advantages over other similar chips that queued transmit
178 packets may be reordered and receive buffer groups are associated with a
179 single frame.
180 
181 With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
182 Rather than a fixed intermediate receive buffer, this scheme allocates
183 full-sized skbuffs as receive buffers.  The value RX_COPYBREAK is used as
184 the copying breakpoint: it is chosen to trade-off the memory wasted by
185 passing the full-sized skbuff to the queue layer for all frames vs. the
186 copying cost of copying a frame to a correctly-sized skbuff.
187 
188 IIIC. Synchronization
189 The driver runs as two independent, single-threaded flows of control.  One
190 is the send-packet routine, which enforces single-threaded use by the
191 dev->tbusy flag.  The other thread is the interrupt handler, which is single
192 threaded by the hardware and other software.
193 
194 IV. Notes
195 
196 Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
197 3c590, 3c595, and 3c900 boards.
198 The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
199 the EISA version is called "Demon".  According to Terry these names come
200 from rides at the local amusement park.
201 
202 The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
203 This driver only supports ethernet packets because of the skbuff allocation
204 limit of 4K.
205 */
206 
207 /* This table drives the PCI probe routines.  It's mostly boilerplate in all
208    of the drivers, and will likely be provided by some future kernel.
209 */
210 enum pci_flags_bit {
211 	PCI_USES_MASTER=4,
212 };
213 
214 enum {	IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
215 	EEPROM_8BIT=0x10,	/* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
216 	HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
217 	INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
218 	EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000,
219 	EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, };
220 
221 enum vortex_chips {
222 	CH_3C590 = 0,
223 	CH_3C592,
224 	CH_3C597,
225 	CH_3C595_1,
226 	CH_3C595_2,
227 
228 	CH_3C595_3,
229 	CH_3C900_1,
230 	CH_3C900_2,
231 	CH_3C900_3,
232 	CH_3C900_4,
233 
234 	CH_3C900_5,
235 	CH_3C900B_FL,
236 	CH_3C905_1,
237 	CH_3C905_2,
238 	CH_3C905B_TX,
239 	CH_3C905B_1,
240 
241 	CH_3C905B_2,
242 	CH_3C905B_FX,
243 	CH_3C905C,
244 	CH_3C9202,
245 	CH_3C980,
246 	CH_3C9805,
247 
248 	CH_3CSOHO100_TX,
249 	CH_3C555,
250 	CH_3C556,
251 	CH_3C556B,
252 	CH_3C575,
253 
254 	CH_3C575_1,
255 	CH_3CCFE575,
256 	CH_3CCFE575CT,
257 	CH_3CCFE656,
258 	CH_3CCFEM656,
259 
260 	CH_3CCFEM656_1,
261 	CH_3C450,
262 	CH_3C920,
263 	CH_3C982A,
264 	CH_3C982B,
265 
266 	CH_905BT4,
267 	CH_920B_EMB_WNM,
268 };
269 
270 
271 /* note: this array directly indexed by above enums, and MUST
272  * be kept in sync with both the enums above, and the PCI device
273  * table below
274  */
275 static struct vortex_chip_info {
276 	const char *name;
277 	int flags;
278 	int drv_flags;
279 	int io_size;
280 } vortex_info_tbl[] = {
281 	{"3c590 Vortex 10Mbps",
282 	 PCI_USES_MASTER, IS_VORTEX, 32, },
283 	{"3c592 EISA 10Mbps Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
284 	 PCI_USES_MASTER, IS_VORTEX, 32, },
285 	{"3c597 EISA Fast Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
286 	 PCI_USES_MASTER, IS_VORTEX, 32, },
287 	{"3c595 Vortex 100baseTx",
288 	 PCI_USES_MASTER, IS_VORTEX, 32, },
289 	{"3c595 Vortex 100baseT4",
290 	 PCI_USES_MASTER, IS_VORTEX, 32, },
291 
292 	{"3c595 Vortex 100base-MII",
293 	 PCI_USES_MASTER, IS_VORTEX, 32, },
294 	{"3c900 Boomerang 10baseT",
295 	 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
296 	{"3c900 Boomerang 10Mbps Combo",
297 	 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
298 	{"3c900 Cyclone 10Mbps TPO",						/* AKPM: from Don's 0.99M */
299 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
300 	{"3c900 Cyclone 10Mbps Combo",
301 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
302 
303 	{"3c900 Cyclone 10Mbps TPC",						/* AKPM: from Don's 0.99M */
304 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
305 	{"3c900B-FL Cyclone 10base-FL",
306 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
307 	{"3c905 Boomerang 100baseTx",
308 	 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
309 	{"3c905 Boomerang 100baseT4",
310 	 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
311 	{"3C905B-TX Fast Etherlink XL PCI",
312 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
313 	{"3c905B Cyclone 100baseTx",
314 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
315 
316 	{"3c905B Cyclone 10/100/BNC",
317 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
318 	{"3c905B-FX Cyclone 100baseFx",
319 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
320 	{"3c905C Tornado",
321 	PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
322 	{"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
323 	 PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
324 	{"3c980 Cyclone",
325 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
326 
327 	{"3c980C Python-T",
328 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
329 	{"3cSOHO100-TX Hurricane",
330 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
331 	{"3c555 Laptop Hurricane",
332 	 PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
333 	{"3c556 Laptop Tornado",
334 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
335 									HAS_HWCKSM, 128, },
336 	{"3c556B Laptop Hurricane",
337 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
338 	                                WNO_XCVR_PWR|HAS_HWCKSM, 128, },
339 
340 	{"3c575 [Megahertz] 10/100 LAN 	CardBus",
341 	PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
342 	{"3c575 Boomerang CardBus",
343 	 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
344 	{"3CCFE575BT Cyclone CardBus",
345 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
346 									INVERT_LED_PWR|HAS_HWCKSM, 128, },
347 	{"3CCFE575CT Tornado CardBus",
348 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
349 									MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
350 	{"3CCFE656 Cyclone CardBus",
351 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
352 									INVERT_LED_PWR|HAS_HWCKSM, 128, },
353 
354 	{"3CCFEM656B Cyclone+Winmodem CardBus",
355 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
356 									INVERT_LED_PWR|HAS_HWCKSM, 128, },
357 	{"3CXFEM656C Tornado+Winmodem CardBus",			/* From pcmcia-cs-3.1.5 */
358 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
359 									MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
360 	{"3c450 HomePNA Tornado",						/* AKPM: from Don's 0.99Q */
361 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
362 	{"3c920 Tornado",
363 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
364 	{"3c982 Hydra Dual Port A",
365 	 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
366 
367 	{"3c982 Hydra Dual Port B",
368 	 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
369 	{"3c905B-T4",
370 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
371 	{"3c920B-EMB-WNM Tornado",
372 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
373 
374 	{NULL,}, /* NULL terminated list. */
375 };
376 
377 
378 static const struct pci_device_id vortex_pci_tbl[] = {
379 	{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
380 	{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
381 	{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
382 	{ 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
383 	{ 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
384 
385 	{ 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
386 	{ 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
387 	{ 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
388 	{ 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
389 	{ 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
390 
391 	{ 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
392 	{ 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
393 	{ 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
394 	{ 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
395 	{ 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX },
396 	{ 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
397 
398 	{ 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
399 	{ 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
400 	{ 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
401 	{ 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 },
402 	{ 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
403 	{ 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
404 
405 	{ 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
406 	{ 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
407 	{ 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
408 	{ 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
409 	{ 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
410 
411 	{ 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
412 	{ 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
413 	{ 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
414 	{ 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
415 	{ 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
416 
417 	{ 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
418 	{ 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
419 	{ 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 },
420 	{ 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A },
421 	{ 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B },
422 
423 	{ 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 },
424 	{ 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM },
425 
426 	{0,}						/* 0 terminated list. */
427 };
428 MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
429 
430 
431 /* Operational definitions.
432    These are not used by other compilation units and thus are not
433    exported in a ".h" file.
434 
435    First the windows.  There are eight register windows, with the command
436    and status registers available in each.
437    */
438 #define EL3_CMD 0x0e
439 #define EL3_STATUS 0x0e
440 
441 /* The top five bits written to EL3_CMD are a command, the lower
442    11 bits are the parameter, if applicable.
443    Note that 11 parameters bits was fine for ethernet, but the new chip
444    can handle FDDI length frames (~4500 octets) and now parameters count
445    32-bit 'Dwords' rather than octets. */
446 
447 enum vortex_cmd {
448 	TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
449 	RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
450 	UpStall = 6<<11, UpUnstall = (6<<11)+1,
451 	DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
452 	RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
453 	FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
454 	SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
455 	SetTxThreshold = 18<<11, SetTxStart = 19<<11,
456 	StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
457 	StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
458 
459 /* The SetRxFilter command accepts the following classes: */
460 enum RxFilter {
461 	RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
462 
463 /* Bits in the general status register. */
464 enum vortex_status {
465 	IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
466 	TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
467 	IntReq = 0x0040, StatsFull = 0x0080,
468 	DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
469 	DMAInProgress = 1<<11,			/* DMA controller is still busy.*/
470 	CmdInProgress = 1<<12,			/* EL3_CMD is still busy.*/
471 };
472 
473 /* Register window 1 offsets, the window used in normal operation.
474    On the Vortex this window is always mapped at offsets 0x10-0x1f. */
475 enum Window1 {
476 	TX_FIFO = 0x10,  RX_FIFO = 0x10,  RxErrors = 0x14,
477 	RxStatus = 0x18,  Timer=0x1A, TxStatus = 0x1B,
478 	TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
479 };
480 enum Window0 {
481 	Wn0EepromCmd = 10,		/* Window 0: EEPROM command register. */
482 	Wn0EepromData = 12,		/* Window 0: EEPROM results register. */
483 	IntrStatus=0x0E,		/* Valid in all windows. */
484 };
485 enum Win0_EEPROM_bits {
486 	EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
487 	EEPROM_EWENB = 0x30,		/* Enable erasing/writing for 10 msec. */
488 	EEPROM_EWDIS = 0x00,		/* Disable EWENB before 10 msec timeout. */
489 };
490 /* EEPROM locations. */
491 enum eeprom_offset {
492 	PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
493 	EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
494 	NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
495 	DriverTune=13, Checksum=15};
496 
497 enum Window2 {			/* Window 2. */
498 	Wn2_ResetOptions=12,
499 };
500 enum Window3 {			/* Window 3: MAC/config bits. */
501 	Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
502 };
503 
504 #define BFEXT(value, offset, bitcount)  \
505     ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
506 
507 #define BFINS(lhs, rhs, offset, bitcount)					\
508 	(((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) |	\
509 	(((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
510 
511 #define RAM_SIZE(v)		BFEXT(v, 0, 3)
512 #define RAM_WIDTH(v)	BFEXT(v, 3, 1)
513 #define RAM_SPEED(v)	BFEXT(v, 4, 2)
514 #define ROM_SIZE(v)		BFEXT(v, 6, 2)
515 #define RAM_SPLIT(v)	BFEXT(v, 16, 2)
516 #define XCVR(v)			BFEXT(v, 20, 4)
517 #define AUTOSELECT(v)	BFEXT(v, 24, 1)
518 
519 enum Window4 {		/* Window 4: Xcvr/media bits. */
520 	Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
521 };
522 enum Win4_Media_bits {
523 	Media_SQE = 0x0008,		/* Enable SQE error counting for AUI. */
524 	Media_10TP = 0x00C0,	/* Enable link beat and jabber for 10baseT. */
525 	Media_Lnk = 0x0080,		/* Enable just link beat for 100TX/100FX. */
526 	Media_LnkBeat = 0x0800,
527 };
528 enum Window7 {					/* Window 7: Bus Master control. */
529 	Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6,
530 	Wn7_MasterStatus = 12,
531 };
532 /* Boomerang bus master control registers. */
533 enum MasterCtrl {
534 	PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
535 	TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
536 };
537 
538 /* The Rx and Tx descriptor lists.
539    Caution Alpha hackers: these types are 32 bits!  Note also the 8 byte
540    alignment contraint on tx_ring[] and rx_ring[]. */
541 #define LAST_FRAG 	0x80000000			/* Last Addr/Len pair in descriptor. */
542 #define DN_COMPLETE	0x00010000			/* This packet has been downloaded */
543 struct boom_rx_desc {
544 	__le32 next;					/* Last entry points to 0.   */
545 	__le32 status;
546 	__le32 addr;					/* Up to 63 addr/len pairs possible. */
547 	__le32 length;					/* Set LAST_FRAG to indicate last pair. */
548 };
549 /* Values for the Rx status entry. */
550 enum rx_desc_status {
551 	RxDComplete=0x00008000, RxDError=0x4000,
552 	/* See boomerang_rx() for actual error bits */
553 	IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
554 	IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
555 };
556 
557 #ifdef MAX_SKB_FRAGS
558 #define DO_ZEROCOPY 1
559 #else
560 #define DO_ZEROCOPY 0
561 #endif
562 
563 struct boom_tx_desc {
564 	__le32 next;					/* Last entry points to 0.   */
565 	__le32 status;					/* bits 0:12 length, others see below.  */
566 #if DO_ZEROCOPY
567 	struct {
568 		__le32 addr;
569 		__le32 length;
570 	} frag[1+MAX_SKB_FRAGS];
571 #else
572 		__le32 addr;
573 		__le32 length;
574 #endif
575 };
576 
577 /* Values for the Tx status entry. */
578 enum tx_desc_status {
579 	CRCDisable=0x2000, TxDComplete=0x8000,
580 	AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
581 	TxIntrUploaded=0x80000000,		/* IRQ when in FIFO, but maybe not sent. */
582 };
583 
584 /* Chip features we care about in vp->capabilities, read from the EEPROM. */
585 enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
586 
587 struct vortex_extra_stats {
588 	unsigned long tx_deferred;
589 	unsigned long tx_max_collisions;
590 	unsigned long tx_multiple_collisions;
591 	unsigned long tx_single_collisions;
592 	unsigned long rx_bad_ssd;
593 };
594 
595 struct vortex_private {
596 	/* The Rx and Tx rings should be quad-word-aligned. */
597 	struct boom_rx_desc* rx_ring;
598 	struct boom_tx_desc* tx_ring;
599 	dma_addr_t rx_ring_dma;
600 	dma_addr_t tx_ring_dma;
601 	/* The addresses of transmit- and receive-in-place skbuffs. */
602 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
603 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
604 	unsigned int cur_rx, cur_tx;		/* The next free ring entry */
605 	unsigned int dirty_tx;	/* The ring entries to be free()ed. */
606 	struct vortex_extra_stats xstats;	/* NIC-specific extra stats */
607 	struct sk_buff *tx_skb;				/* Packet being eaten by bus master ctrl.  */
608 	dma_addr_t tx_skb_dma;				/* Allocated DMA address for bus master ctrl DMA.   */
609 
610 	/* PCI configuration space information. */
611 	struct device *gendev;
612 	void __iomem *ioaddr;			/* IO address space */
613 	void __iomem *cb_fn_base;		/* CardBus function status addr space. */
614 
615 	/* Some values here only for performance evaluation and path-coverage */
616 	int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
617 	int card_idx;
618 
619 	/* The remainder are related to chip state, mostly media selection. */
620 	struct timer_list timer;			/* Media selection timer. */
621 	int options;						/* User-settable misc. driver options. */
622 	unsigned int media_override:4, 		/* Passed-in media type. */
623 		default_media:4,				/* Read from the EEPROM/Wn3_Config. */
624 		full_duplex:1, autoselect:1,
625 		bus_master:1,					/* Vortex can only do a fragment bus-m. */
626 		full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang  */
627 		flow_ctrl:1,					/* Use 802.3x flow control (PAUSE only) */
628 		partner_flow_ctrl:1,			/* Partner supports flow control */
629 		has_nway:1,
630 		enable_wol:1,					/* Wake-on-LAN is enabled */
631 		pm_state_valid:1,				/* pci_dev->saved_config_space has sane contents */
632 		open:1,
633 		medialock:1,
634 		large_frames:1,			/* accept large frames */
635 		handling_irq:1;			/* private in_irq indicator */
636 	/* {get|set}_wol operations are already serialized by rtnl.
637 	 * no additional locking is required for the enable_wol and acpi_set_WOL()
638 	 */
639 	int drv_flags;
640 	u16 status_enable;
641 	u16 intr_enable;
642 	u16 available_media;				/* From Wn3_Options. */
643 	u16 capabilities, info1, info2;		/* Various, from EEPROM. */
644 	u16 advertising;					/* NWay media advertisement */
645 	unsigned char phys[2];				/* MII device addresses. */
646 	u16 deferred;						/* Resend these interrupts when we
647 										 * bale from the ISR */
648 	u16 io_size;						/* Size of PCI region (for release_region) */
649 
650 	/* Serialises access to hardware other than MII and variables below.
651 	 * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
652 	spinlock_t lock;
653 
654 	spinlock_t mii_lock;		/* Serialises access to MII */
655 	struct mii_if_info mii;		/* MII lib hooks/info */
656 	spinlock_t window_lock;		/* Serialises access to windowed regs */
657 	int window;			/* Register window */
658 };
659 
660 static void window_set(struct vortex_private *vp, int window)
661 {
662 	if (window != vp->window) {
663 		iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
664 		vp->window = window;
665 	}
666 }
667 
668 #define DEFINE_WINDOW_IO(size)						\
669 static u ## size							\
670 window_read ## size(struct vortex_private *vp, int window, int addr)	\
671 {									\
672 	unsigned long flags;						\
673 	u ## size ret;							\
674 	spin_lock_irqsave(&vp->window_lock, flags);			\
675 	window_set(vp, window);						\
676 	ret = ioread ## size(vp->ioaddr + addr);			\
677 	spin_unlock_irqrestore(&vp->window_lock, flags);		\
678 	return ret;							\
679 }									\
680 static void								\
681 window_write ## size(struct vortex_private *vp, u ## size value,	\
682 		     int window, int addr)				\
683 {									\
684 	unsigned long flags;						\
685 	spin_lock_irqsave(&vp->window_lock, flags);			\
686 	window_set(vp, window);						\
687 	iowrite ## size(value, vp->ioaddr + addr);			\
688 	spin_unlock_irqrestore(&vp->window_lock, flags);		\
689 }
690 DEFINE_WINDOW_IO(8)
691 DEFINE_WINDOW_IO(16)
692 DEFINE_WINDOW_IO(32)
693 
694 #ifdef CONFIG_PCI
695 #define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL)
696 #else
697 #define DEVICE_PCI(dev) NULL
698 #endif
699 
700 #define VORTEX_PCI(vp)							\
701 	((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
702 
703 #ifdef CONFIG_EISA
704 #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
705 #else
706 #define DEVICE_EISA(dev) NULL
707 #endif
708 
709 #define VORTEX_EISA(vp)							\
710 	((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
711 
712 /* The action to take with a media selection timer tick.
713    Note that we deviate from the 3Com order by checking 10base2 before AUI.
714  */
715 enum xcvr_types {
716 	XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
717 	XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
718 };
719 
720 static const struct media_table {
721 	char *name;
722 	unsigned int media_bits:16,		/* Bits to set in Wn4_Media register. */
723 		mask:8,						/* The transceiver-present bit in Wn3_Config.*/
724 		next:8;						/* The media type to try next. */
725 	int wait;						/* Time before we check media status. */
726 } media_tbl[] = {
727   {	"10baseT",   Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
728   { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
729   { "undefined", 0,			0x80, XCVR_10baseT, 10000},
730   { "10base2",   0,			0x10, XCVR_AUI,		(1*HZ)/10},
731   { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
732   { "100baseFX", Media_Lnk, 0x04, XCVR_MII,		(14*HZ)/10},
733   { "MII",		 0,			0x41, XCVR_10baseT, 3*HZ },
734   { "undefined", 0,			0x01, XCVR_10baseT, 10000},
735   { "Autonegotiate", 0,		0x41, XCVR_10baseT, 3*HZ},
736   { "MII-External",	 0,		0x41, XCVR_10baseT, 3*HZ },
737   { "Default",	 0,			0xFF, XCVR_10baseT, 10000},
738 };
739 
740 static struct {
741 	const char str[ETH_GSTRING_LEN];
742 } ethtool_stats_keys[] = {
743 	{ "tx_deferred" },
744 	{ "tx_max_collisions" },
745 	{ "tx_multiple_collisions" },
746 	{ "tx_single_collisions" },
747 	{ "rx_bad_ssd" },
748 };
749 
750 /* number of ETHTOOL_GSTATS u64's */
751 #define VORTEX_NUM_STATS    5
752 
753 static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
754 				   int chip_idx, int card_idx);
755 static int vortex_up(struct net_device *dev);
756 static void vortex_down(struct net_device *dev, int final);
757 static int vortex_open(struct net_device *dev);
758 static void mdio_sync(struct vortex_private *vp, int bits);
759 static int mdio_read(struct net_device *dev, int phy_id, int location);
760 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
761 static void vortex_timer(struct timer_list *t);
762 static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
763 				     struct net_device *dev);
764 static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
765 					struct net_device *dev);
766 static int vortex_rx(struct net_device *dev);
767 static int boomerang_rx(struct net_device *dev);
768 static irqreturn_t vortex_interrupt(int irq, void *dev_id);
769 static irqreturn_t boomerang_interrupt(int irq, void *dev_id);
770 static int vortex_close(struct net_device *dev);
771 static void dump_tx_ring(struct net_device *dev);
772 static void update_stats(void __iomem *ioaddr, struct net_device *dev);
773 static struct net_device_stats *vortex_get_stats(struct net_device *dev);
774 static void set_rx_mode(struct net_device *dev);
775 #ifdef CONFIG_PCI
776 static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
777 #endif
778 static void vortex_tx_timeout(struct net_device *dev);
779 static void acpi_set_WOL(struct net_device *dev);
780 static const struct ethtool_ops vortex_ethtool_ops;
781 static void set_8021q_mode(struct net_device *dev, int enable);
782 
783 /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
784 /* Option count limit only -- unlimited interfaces are supported. */
785 #define MAX_UNITS 8
786 static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
787 static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
788 static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
789 static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
790 static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
791 static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
792 static int global_options = -1;
793 static int global_full_duplex = -1;
794 static int global_enable_wol = -1;
795 static int global_use_mmio = -1;
796 
797 /* Variables to work-around the Compaq PCI BIOS32 problem. */
798 static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
799 static struct net_device *compaq_net_device;
800 
801 static int vortex_cards_found;
802 
803 module_param(debug, int, 0);
804 module_param(global_options, int, 0);
805 module_param_array(options, int, NULL, 0);
806 module_param(global_full_duplex, int, 0);
807 module_param_array(full_duplex, int, NULL, 0);
808 module_param_array(hw_checksums, int, NULL, 0);
809 module_param_array(flow_ctrl, int, NULL, 0);
810 module_param(global_enable_wol, int, 0);
811 module_param_array(enable_wol, int, NULL, 0);
812 module_param(rx_copybreak, int, 0);
813 module_param(max_interrupt_work, int, 0);
814 module_param_hw(compaq_ioaddr, int, ioport, 0);
815 module_param_hw(compaq_irq, int, irq, 0);
816 module_param(compaq_device_id, int, 0);
817 module_param(watchdog, int, 0);
818 module_param(global_use_mmio, int, 0);
819 module_param_array(use_mmio, int, NULL, 0);
820 MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
821 MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
822 MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset");
823 MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
824 MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset");
825 MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
826 MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
827 MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
828 MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset");
829 MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
830 MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
831 MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
832 MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
833 MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
834 MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
835 MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset");
836 MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
837 
838 #ifdef CONFIG_NET_POLL_CONTROLLER
839 static void poll_vortex(struct net_device *dev)
840 {
841 	struct vortex_private *vp = netdev_priv(dev);
842 	unsigned long flags;
843 	local_irq_save(flags);
844 	(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
845 	local_irq_restore(flags);
846 }
847 #endif
848 
849 #ifdef CONFIG_PM
850 
851 static int vortex_suspend(struct device *dev)
852 {
853 	struct pci_dev *pdev = to_pci_dev(dev);
854 	struct net_device *ndev = pci_get_drvdata(pdev);
855 
856 	if (!ndev || !netif_running(ndev))
857 		return 0;
858 
859 	netif_device_detach(ndev);
860 	vortex_down(ndev, 1);
861 
862 	return 0;
863 }
864 
865 static int vortex_resume(struct device *dev)
866 {
867 	struct pci_dev *pdev = to_pci_dev(dev);
868 	struct net_device *ndev = pci_get_drvdata(pdev);
869 	int err;
870 
871 	if (!ndev || !netif_running(ndev))
872 		return 0;
873 
874 	err = vortex_up(ndev);
875 	if (err)
876 		return err;
877 
878 	netif_device_attach(ndev);
879 
880 	return 0;
881 }
882 
883 static const struct dev_pm_ops vortex_pm_ops = {
884 	.suspend = vortex_suspend,
885 	.resume = vortex_resume,
886 	.freeze = vortex_suspend,
887 	.thaw = vortex_resume,
888 	.poweroff = vortex_suspend,
889 	.restore = vortex_resume,
890 };
891 
892 #define VORTEX_PM_OPS (&vortex_pm_ops)
893 
894 #else /* !CONFIG_PM */
895 
896 #define VORTEX_PM_OPS NULL
897 
898 #endif /* !CONFIG_PM */
899 
900 #ifdef CONFIG_EISA
901 static const struct eisa_device_id vortex_eisa_ids[] = {
902 	{ "TCM5920", CH_3C592 },
903 	{ "TCM5970", CH_3C597 },
904 	{ "" }
905 };
906 MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
907 
908 static int vortex_eisa_probe(struct device *device)
909 {
910 	void __iomem *ioaddr;
911 	struct eisa_device *edev;
912 
913 	edev = to_eisa_device(device);
914 
915 	if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
916 		return -EBUSY;
917 
918 	ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE);
919 
920 	if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
921 					  edev->id.driver_data, vortex_cards_found)) {
922 		release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
923 		return -ENODEV;
924 	}
925 
926 	vortex_cards_found++;
927 
928 	return 0;
929 }
930 
931 static int vortex_eisa_remove(struct device *device)
932 {
933 	struct eisa_device *edev;
934 	struct net_device *dev;
935 	struct vortex_private *vp;
936 	void __iomem *ioaddr;
937 
938 	edev = to_eisa_device(device);
939 	dev = eisa_get_drvdata(edev);
940 
941 	if (!dev) {
942 		pr_err("vortex_eisa_remove called for Compaq device!\n");
943 		BUG();
944 	}
945 
946 	vp = netdev_priv(dev);
947 	ioaddr = vp->ioaddr;
948 
949 	unregister_netdev(dev);
950 	iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
951 	release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
952 
953 	free_netdev(dev);
954 	return 0;
955 }
956 
957 static struct eisa_driver vortex_eisa_driver = {
958 	.id_table = vortex_eisa_ids,
959 	.driver   = {
960 		.name    = "3c59x",
961 		.probe   = vortex_eisa_probe,
962 		.remove  = vortex_eisa_remove
963 	}
964 };
965 
966 #endif /* CONFIG_EISA */
967 
968 /* returns count found (>= 0), or negative on error */
969 static int __init vortex_eisa_init(void)
970 {
971 	int eisa_found = 0;
972 	int orig_cards_found = vortex_cards_found;
973 
974 #ifdef CONFIG_EISA
975 	int err;
976 
977 	err = eisa_driver_register (&vortex_eisa_driver);
978 	if (!err) {
979 		/*
980 		 * Because of the way EISA bus is probed, we cannot assume
981 		 * any device have been found when we exit from
982 		 * eisa_driver_register (the bus root driver may not be
983 		 * initialized yet). So we blindly assume something was
984 		 * found, and let the sysfs magic happened...
985 		 */
986 		eisa_found = 1;
987 	}
988 #endif
989 
990 	/* Special code to work-around the Compaq PCI BIOS32 problem. */
991 	if (compaq_ioaddr) {
992 		vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE),
993 			      compaq_irq, compaq_device_id, vortex_cards_found++);
994 	}
995 
996 	return vortex_cards_found - orig_cards_found + eisa_found;
997 }
998 
999 /* returns count (>= 0), or negative on error */
1000 static int vortex_init_one(struct pci_dev *pdev,
1001 			   const struct pci_device_id *ent)
1002 {
1003 	int rc, unit, pci_bar;
1004 	struct vortex_chip_info *vci;
1005 	void __iomem *ioaddr;
1006 
1007 	/* wake up and enable device */
1008 	rc = pci_enable_device(pdev);
1009 	if (rc < 0)
1010 		goto out;
1011 
1012 	rc = pci_request_regions(pdev, DRV_NAME);
1013 	if (rc < 0)
1014 		goto out_disable;
1015 
1016 	unit = vortex_cards_found;
1017 
1018 	if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
1019 		/* Determine the default if the user didn't override us */
1020 		vci = &vortex_info_tbl[ent->driver_data];
1021 		pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0;
1022 	} else if (unit < MAX_UNITS && use_mmio[unit] >= 0)
1023 		pci_bar = use_mmio[unit] ? 1 : 0;
1024 	else
1025 		pci_bar = global_use_mmio ? 1 : 0;
1026 
1027 	ioaddr = pci_iomap(pdev, pci_bar, 0);
1028 	if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
1029 		ioaddr = pci_iomap(pdev, 0, 0);
1030 	if (!ioaddr) {
1031 		rc = -ENOMEM;
1032 		goto out_release;
1033 	}
1034 
1035 	rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
1036 			   ent->driver_data, unit);
1037 	if (rc < 0)
1038 		goto out_iounmap;
1039 
1040 	vortex_cards_found++;
1041 	goto out;
1042 
1043 out_iounmap:
1044 	pci_iounmap(pdev, ioaddr);
1045 out_release:
1046 	pci_release_regions(pdev);
1047 out_disable:
1048 	pci_disable_device(pdev);
1049 out:
1050 	return rc;
1051 }
1052 
1053 static const struct net_device_ops boomrang_netdev_ops = {
1054 	.ndo_open		= vortex_open,
1055 	.ndo_stop		= vortex_close,
1056 	.ndo_start_xmit		= boomerang_start_xmit,
1057 	.ndo_tx_timeout		= vortex_tx_timeout,
1058 	.ndo_get_stats		= vortex_get_stats,
1059 #ifdef CONFIG_PCI
1060 	.ndo_do_ioctl 		= vortex_ioctl,
1061 #endif
1062 	.ndo_set_rx_mode	= set_rx_mode,
1063 	.ndo_set_mac_address 	= eth_mac_addr,
1064 	.ndo_validate_addr	= eth_validate_addr,
1065 #ifdef CONFIG_NET_POLL_CONTROLLER
1066 	.ndo_poll_controller	= poll_vortex,
1067 #endif
1068 };
1069 
1070 static const struct net_device_ops vortex_netdev_ops = {
1071 	.ndo_open		= vortex_open,
1072 	.ndo_stop		= vortex_close,
1073 	.ndo_start_xmit		= vortex_start_xmit,
1074 	.ndo_tx_timeout		= vortex_tx_timeout,
1075 	.ndo_get_stats		= vortex_get_stats,
1076 #ifdef CONFIG_PCI
1077 	.ndo_do_ioctl 		= vortex_ioctl,
1078 #endif
1079 	.ndo_set_rx_mode	= set_rx_mode,
1080 	.ndo_set_mac_address 	= eth_mac_addr,
1081 	.ndo_validate_addr	= eth_validate_addr,
1082 #ifdef CONFIG_NET_POLL_CONTROLLER
1083 	.ndo_poll_controller	= poll_vortex,
1084 #endif
1085 };
1086 
1087 /*
1088  * Start up the PCI/EISA device which is described by *gendev.
1089  * Return 0 on success.
1090  *
1091  * NOTE: pdev can be NULL, for the case of a Compaq device
1092  */
1093 static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1094 			 int chip_idx, int card_idx)
1095 {
1096 	struct vortex_private *vp;
1097 	int option;
1098 	unsigned int eeprom[0x40], checksum = 0;		/* EEPROM contents */
1099 	int i, step;
1100 	struct net_device *dev;
1101 	static int printed_version;
1102 	int retval, print_info;
1103 	struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
1104 	const char *print_name = "3c59x";
1105 	struct pci_dev *pdev = NULL;
1106 	struct eisa_device *edev = NULL;
1107 
1108 	if (!printed_version) {
1109 		pr_info("%s", version);
1110 		printed_version = 1;
1111 	}
1112 
1113 	if (gendev) {
1114 		if ((pdev = DEVICE_PCI(gendev))) {
1115 			print_name = pci_name(pdev);
1116 		}
1117 
1118 		if ((edev = DEVICE_EISA(gendev))) {
1119 			print_name = dev_name(&edev->dev);
1120 		}
1121 	}
1122 
1123 	dev = alloc_etherdev(sizeof(*vp));
1124 	retval = -ENOMEM;
1125 	if (!dev)
1126 		goto out;
1127 
1128 	SET_NETDEV_DEV(dev, gendev);
1129 	vp = netdev_priv(dev);
1130 
1131 	option = global_options;
1132 
1133 	/* The lower four bits are the media type. */
1134 	if (dev->mem_start) {
1135 		/*
1136 		 * The 'options' param is passed in as the third arg to the
1137 		 * LILO 'ether=' argument for non-modular use
1138 		 */
1139 		option = dev->mem_start;
1140 	}
1141 	else if (card_idx < MAX_UNITS) {
1142 		if (options[card_idx] >= 0)
1143 			option = options[card_idx];
1144 	}
1145 
1146 	if (option > 0) {
1147 		if (option & 0x8000)
1148 			vortex_debug = 7;
1149 		if (option & 0x4000)
1150 			vortex_debug = 2;
1151 		if (option & 0x0400)
1152 			vp->enable_wol = 1;
1153 	}
1154 
1155 	print_info = (vortex_debug > 1);
1156 	if (print_info)
1157 		pr_info("See Documentation/networking/vortex.txt\n");
1158 
1159 	pr_info("%s: 3Com %s %s at %p.\n",
1160 	       print_name,
1161 	       pdev ? "PCI" : "EISA",
1162 	       vci->name,
1163 	       ioaddr);
1164 
1165 	dev->base_addr = (unsigned long)ioaddr;
1166 	dev->irq = irq;
1167 	dev->mtu = mtu;
1168 	vp->ioaddr = ioaddr;
1169 	vp->large_frames = mtu > 1500;
1170 	vp->drv_flags = vci->drv_flags;
1171 	vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1172 	vp->io_size = vci->io_size;
1173 	vp->card_idx = card_idx;
1174 	vp->window = -1;
1175 
1176 	/* module list only for Compaq device */
1177 	if (gendev == NULL) {
1178 		compaq_net_device = dev;
1179 	}
1180 
1181 	/* PCI-only startup logic */
1182 	if (pdev) {
1183 		/* enable bus-mastering if necessary */
1184 		if (vci->flags & PCI_USES_MASTER)
1185 			pci_set_master(pdev);
1186 
1187 		if (vci->drv_flags & IS_VORTEX) {
1188 			u8 pci_latency;
1189 			u8 new_latency = 248;
1190 
1191 			/* Check the PCI latency value.  On the 3c590 series the latency timer
1192 			   must be set to the maximum value to avoid data corruption that occurs
1193 			   when the timer expires during a transfer.  This bug exists the Vortex
1194 			   chip only. */
1195 			pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
1196 			if (pci_latency < new_latency) {
1197 				pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n",
1198 					print_name, pci_latency, new_latency);
1199 				pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
1200 			}
1201 		}
1202 	}
1203 
1204 	spin_lock_init(&vp->lock);
1205 	spin_lock_init(&vp->mii_lock);
1206 	spin_lock_init(&vp->window_lock);
1207 	vp->gendev = gendev;
1208 	vp->mii.dev = dev;
1209 	vp->mii.mdio_read = mdio_read;
1210 	vp->mii.mdio_write = mdio_write;
1211 	vp->mii.phy_id_mask = 0x1f;
1212 	vp->mii.reg_num_mask = 0x1f;
1213 
1214 	/* Makes sure rings are at least 16 byte aligned. */
1215 	vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1216 					   + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1217 					   &vp->rx_ring_dma);
1218 	retval = -ENOMEM;
1219 	if (!vp->rx_ring)
1220 		goto free_device;
1221 
1222 	vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1223 	vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1224 
1225 	/* if we are a PCI driver, we store info in pdev->driver_data
1226 	 * instead of a module list */
1227 	if (pdev)
1228 		pci_set_drvdata(pdev, dev);
1229 	if (edev)
1230 		eisa_set_drvdata(edev, dev);
1231 
1232 	vp->media_override = 7;
1233 	if (option >= 0) {
1234 		vp->media_override = ((option & 7) == 2)  ?  0  :  option & 15;
1235 		if (vp->media_override != 7)
1236 			vp->medialock = 1;
1237 		vp->full_duplex = (option & 0x200) ? 1 : 0;
1238 		vp->bus_master = (option & 16) ? 1 : 0;
1239 	}
1240 
1241 	if (global_full_duplex > 0)
1242 		vp->full_duplex = 1;
1243 	if (global_enable_wol > 0)
1244 		vp->enable_wol = 1;
1245 
1246 	if (card_idx < MAX_UNITS) {
1247 		if (full_duplex[card_idx] > 0)
1248 			vp->full_duplex = 1;
1249 		if (flow_ctrl[card_idx] > 0)
1250 			vp->flow_ctrl = 1;
1251 		if (enable_wol[card_idx] > 0)
1252 			vp->enable_wol = 1;
1253 	}
1254 
1255 	vp->mii.force_media = vp->full_duplex;
1256 	vp->options = option;
1257 	/* Read the station address from the EEPROM. */
1258 	{
1259 		int base;
1260 
1261 		if (vci->drv_flags & EEPROM_8BIT)
1262 			base = 0x230;
1263 		else if (vci->drv_flags & EEPROM_OFFSET)
1264 			base = EEPROM_Read + 0x30;
1265 		else
1266 			base = EEPROM_Read;
1267 
1268 		for (i = 0; i < 0x40; i++) {
1269 			int timer;
1270 			window_write16(vp, base + i, 0, Wn0EepromCmd);
1271 			/* Pause for at least 162 us. for the read to take place. */
1272 			for (timer = 10; timer >= 0; timer--) {
1273 				udelay(162);
1274 				if ((window_read16(vp, 0, Wn0EepromCmd) &
1275 				     0x8000) == 0)
1276 					break;
1277 			}
1278 			eeprom[i] = window_read16(vp, 0, Wn0EepromData);
1279 		}
1280 	}
1281 	for (i = 0; i < 0x18; i++)
1282 		checksum ^= eeprom[i];
1283 	checksum = (checksum ^ (checksum >> 8)) & 0xff;
1284 	if (checksum != 0x00) {		/* Grrr, needless incompatible change 3Com. */
1285 		while (i < 0x21)
1286 			checksum ^= eeprom[i++];
1287 		checksum = (checksum ^ (checksum >> 8)) & 0xff;
1288 	}
1289 	if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
1290 		pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
1291 	for (i = 0; i < 3; i++)
1292 		((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
1293 	if (print_info)
1294 		pr_cont(" %pM", dev->dev_addr);
1295 	/* Unfortunately an all zero eeprom passes the checksum and this
1296 	   gets found in the wild in failure cases. Crypto is hard 8) */
1297 	if (!is_valid_ether_addr(dev->dev_addr)) {
1298 		retval = -EINVAL;
1299 		pr_err("*** EEPROM MAC address is invalid.\n");
1300 		goto free_ring;	/* With every pack */
1301 	}
1302 	for (i = 0; i < 6; i++)
1303 		window_write8(vp, dev->dev_addr[i], 2, i);
1304 
1305 	if (print_info)
1306 		pr_cont(", IRQ %d\n", dev->irq);
1307 	/* Tell them about an invalid IRQ. */
1308 	if (dev->irq <= 0 || dev->irq >= nr_irqs)
1309 		pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n",
1310 			dev->irq);
1311 
1312 	step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
1313 	if (print_info) {
1314 		pr_info("  product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
1315 			eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
1316 			step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
1317 	}
1318 
1319 
1320 	if (pdev && vci->drv_flags & HAS_CB_FNS) {
1321 		unsigned short n;
1322 
1323 		vp->cb_fn_base = pci_iomap(pdev, 2, 0);
1324 		if (!vp->cb_fn_base) {
1325 			retval = -ENOMEM;
1326 			goto free_ring;
1327 		}
1328 
1329 		if (print_info) {
1330 			pr_info("%s: CardBus functions mapped %16.16llx->%p\n",
1331 				print_name,
1332 				(unsigned long long)pci_resource_start(pdev, 2),
1333 				vp->cb_fn_base);
1334 		}
1335 
1336 		n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1337 		if (vp->drv_flags & INVERT_LED_PWR)
1338 			n |= 0x10;
1339 		if (vp->drv_flags & INVERT_MII_PWR)
1340 			n |= 0x4000;
1341 		window_write16(vp, n, 2, Wn2_ResetOptions);
1342 		if (vp->drv_flags & WNO_XCVR_PWR) {
1343 			window_write16(vp, 0x0800, 0, 0);
1344 		}
1345 	}
1346 
1347 	/* Extract our information from the EEPROM data. */
1348 	vp->info1 = eeprom[13];
1349 	vp->info2 = eeprom[15];
1350 	vp->capabilities = eeprom[16];
1351 
1352 	if (vp->info1 & 0x8000) {
1353 		vp->full_duplex = 1;
1354 		if (print_info)
1355 			pr_info("Full duplex capable\n");
1356 	}
1357 
1358 	{
1359 		static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
1360 		unsigned int config;
1361 		vp->available_media = window_read16(vp, 3, Wn3_Options);
1362 		if ((vp->available_media & 0xff) == 0)		/* Broken 3c916 */
1363 			vp->available_media = 0x40;
1364 		config = window_read32(vp, 3, Wn3_Config);
1365 		if (print_info) {
1366 			pr_debug("  Internal config register is %4.4x, transceivers %#x.\n",
1367 				config, window_read16(vp, 3, Wn3_Options));
1368 			pr_info("  %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
1369 				   8 << RAM_SIZE(config),
1370 				   RAM_WIDTH(config) ? "word" : "byte",
1371 				   ram_split[RAM_SPLIT(config)],
1372 				   AUTOSELECT(config) ? "autoselect/" : "",
1373 				   XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
1374 				   media_tbl[XCVR(config)].name);
1375 		}
1376 		vp->default_media = XCVR(config);
1377 		if (vp->default_media == XCVR_NWAY)
1378 			vp->has_nway = 1;
1379 		vp->autoselect = AUTOSELECT(config);
1380 	}
1381 
1382 	if (vp->media_override != 7) {
1383 		pr_info("%s:  Media override to transceiver type %d (%s).\n",
1384 				print_name, vp->media_override,
1385 				media_tbl[vp->media_override].name);
1386 		dev->if_port = vp->media_override;
1387 	} else
1388 		dev->if_port = vp->default_media;
1389 
1390 	if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
1391 		dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1392 		int phy, phy_idx = 0;
1393 		mii_preamble_required++;
1394 		if (vp->drv_flags & EXTRA_PREAMBLE)
1395 			mii_preamble_required++;
1396 		mdio_sync(vp, 32);
1397 		mdio_read(dev, 24, MII_BMSR);
1398 		for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
1399 			int mii_status, phyx;
1400 
1401 			/*
1402 			 * For the 3c905CX we look at index 24 first, because it bogusly
1403 			 * reports an external PHY at all indices
1404 			 */
1405 			if (phy == 0)
1406 				phyx = 24;
1407 			else if (phy <= 24)
1408 				phyx = phy - 1;
1409 			else
1410 				phyx = phy;
1411 			mii_status = mdio_read(dev, phyx, MII_BMSR);
1412 			if (mii_status  &&  mii_status != 0xffff) {
1413 				vp->phys[phy_idx++] = phyx;
1414 				if (print_info) {
1415 					pr_info("  MII transceiver found at address %d, status %4x.\n",
1416 						phyx, mii_status);
1417 				}
1418 				if ((mii_status & 0x0040) == 0)
1419 					mii_preamble_required++;
1420 			}
1421 		}
1422 		mii_preamble_required--;
1423 		if (phy_idx == 0) {
1424 			pr_warn("  ***WARNING*** No MII transceivers found!\n");
1425 			vp->phys[0] = 24;
1426 		} else {
1427 			vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
1428 			if (vp->full_duplex) {
1429 				/* Only advertise the FD media types. */
1430 				vp->advertising &= ~0x02A0;
1431 				mdio_write(dev, vp->phys[0], 4, vp->advertising);
1432 			}
1433 		}
1434 		vp->mii.phy_id = vp->phys[0];
1435 	}
1436 
1437 	if (vp->capabilities & CapBusMaster) {
1438 		vp->full_bus_master_tx = 1;
1439 		if (print_info) {
1440 			pr_info("  Enabling bus-master transmits and %s receives.\n",
1441 			(vp->info2 & 1) ? "early" : "whole-frame" );
1442 		}
1443 		vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1444 		vp->bus_master = 0;		/* AKPM: vortex only */
1445 	}
1446 
1447 	/* The 3c59x-specific entries in the device structure. */
1448 	if (vp->full_bus_master_tx) {
1449 		dev->netdev_ops = &boomrang_netdev_ops;
1450 		/* Actually, it still should work with iommu. */
1451 		if (card_idx < MAX_UNITS &&
1452 		    ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
1453 				hw_checksums[card_idx] == 1)) {
1454 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1455 		}
1456 	} else
1457 		dev->netdev_ops =  &vortex_netdev_ops;
1458 
1459 	if (print_info) {
1460 		pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n",
1461 				print_name,
1462 				(dev->features & NETIF_F_SG) ? "en":"dis",
1463 				(dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
1464 	}
1465 
1466 	dev->ethtool_ops = &vortex_ethtool_ops;
1467 	dev->watchdog_timeo = (watchdog * HZ) / 1000;
1468 
1469 	if (pdev) {
1470 		vp->pm_state_valid = 1;
1471 		pci_save_state(pdev);
1472  		acpi_set_WOL(dev);
1473 	}
1474 	retval = register_netdev(dev);
1475 	if (retval == 0)
1476 		return 0;
1477 
1478 free_ring:
1479 	pci_free_consistent(pdev,
1480 						sizeof(struct boom_rx_desc) * RX_RING_SIZE
1481 							+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1482 						vp->rx_ring,
1483 						vp->rx_ring_dma);
1484 free_device:
1485 	free_netdev(dev);
1486 	pr_err(PFX "vortex_probe1 fails.  Returns %d\n", retval);
1487 out:
1488 	return retval;
1489 }
1490 
1491 static void
1492 issue_and_wait(struct net_device *dev, int cmd)
1493 {
1494 	struct vortex_private *vp = netdev_priv(dev);
1495 	void __iomem *ioaddr = vp->ioaddr;
1496 	int i;
1497 
1498 	iowrite16(cmd, ioaddr + EL3_CMD);
1499 	for (i = 0; i < 2000; i++) {
1500 		if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
1501 			return;
1502 	}
1503 
1504 	/* OK, that didn't work.  Do it the slow way.  One second */
1505 	for (i = 0; i < 100000; i++) {
1506 		if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) {
1507 			if (vortex_debug > 1)
1508 				pr_info("%s: command 0x%04x took %d usecs\n",
1509 					   dev->name, cmd, i * 10);
1510 			return;
1511 		}
1512 		udelay(10);
1513 	}
1514 	pr_err("%s: command 0x%04x did not complete! Status=0x%x\n",
1515 			   dev->name, cmd, ioread16(ioaddr + EL3_STATUS));
1516 }
1517 
1518 static void
1519 vortex_set_duplex(struct net_device *dev)
1520 {
1521 	struct vortex_private *vp = netdev_priv(dev);
1522 
1523 	pr_info("%s:  setting %s-duplex.\n",
1524 		dev->name, (vp->full_duplex) ? "full" : "half");
1525 
1526 	/* Set the full-duplex bit. */
1527 	window_write16(vp,
1528 		       ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1529 		       (vp->large_frames ? 0x40 : 0) |
1530 		       ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1531 			0x100 : 0),
1532 		       3, Wn3_MAC_Ctrl);
1533 }
1534 
1535 static void vortex_check_media(struct net_device *dev, unsigned int init)
1536 {
1537 	struct vortex_private *vp = netdev_priv(dev);
1538 	unsigned int ok_to_print = 0;
1539 
1540 	if (vortex_debug > 3)
1541 		ok_to_print = 1;
1542 
1543 	if (mii_check_media(&vp->mii, ok_to_print, init)) {
1544 		vp->full_duplex = vp->mii.full_duplex;
1545 		vortex_set_duplex(dev);
1546 	} else if (init) {
1547 		vortex_set_duplex(dev);
1548 	}
1549 }
1550 
1551 static int
1552 vortex_up(struct net_device *dev)
1553 {
1554 	struct vortex_private *vp = netdev_priv(dev);
1555 	void __iomem *ioaddr = vp->ioaddr;
1556 	unsigned int config;
1557 	int i, mii_reg1, mii_reg5, err = 0;
1558 
1559 	if (VORTEX_PCI(vp)) {
1560 		pci_set_power_state(VORTEX_PCI(vp), PCI_D0);	/* Go active */
1561 		if (vp->pm_state_valid)
1562 			pci_restore_state(VORTEX_PCI(vp));
1563 		err = pci_enable_device(VORTEX_PCI(vp));
1564 		if (err) {
1565 			pr_warn("%s: Could not enable device\n", dev->name);
1566 			goto err_out;
1567 		}
1568 	}
1569 
1570 	/* Before initializing select the active media port. */
1571 	config = window_read32(vp, 3, Wn3_Config);
1572 
1573 	if (vp->media_override != 7) {
1574 		pr_info("%s: Media override to transceiver %d (%s).\n",
1575 			   dev->name, vp->media_override,
1576 			   media_tbl[vp->media_override].name);
1577 		dev->if_port = vp->media_override;
1578 	} else if (vp->autoselect) {
1579 		if (vp->has_nway) {
1580 			if (vortex_debug > 1)
1581 				pr_info("%s: using NWAY device table, not %d\n",
1582 								dev->name, dev->if_port);
1583 			dev->if_port = XCVR_NWAY;
1584 		} else {
1585 			/* Find first available media type, starting with 100baseTx. */
1586 			dev->if_port = XCVR_100baseTx;
1587 			while (! (vp->available_media & media_tbl[dev->if_port].mask))
1588 				dev->if_port = media_tbl[dev->if_port].next;
1589 			if (vortex_debug > 1)
1590 				pr_info("%s: first available media type: %s\n",
1591 					dev->name, media_tbl[dev->if_port].name);
1592 		}
1593 	} else {
1594 		dev->if_port = vp->default_media;
1595 		if (vortex_debug > 1)
1596 			pr_info("%s: using default media %s\n",
1597 				dev->name, media_tbl[dev->if_port].name);
1598 	}
1599 
1600 	timer_setup(&vp->timer, vortex_timer, 0);
1601 	mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
1602 
1603 	if (vortex_debug > 1)
1604 		pr_debug("%s: Initial media type %s.\n",
1605 			   dev->name, media_tbl[dev->if_port].name);
1606 
1607 	vp->full_duplex = vp->mii.force_media;
1608 	config = BFINS(config, dev->if_port, 20, 4);
1609 	if (vortex_debug > 6)
1610 		pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
1611 	window_write32(vp, config, 3, Wn3_Config);
1612 
1613 	if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1614 		mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
1615 		mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1616 		vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1617 		vp->mii.full_duplex = vp->full_duplex;
1618 
1619 		vortex_check_media(dev, 1);
1620 	}
1621 	else
1622 		vortex_set_duplex(dev);
1623 
1624 	issue_and_wait(dev, TxReset);
1625 	/*
1626 	 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1627 	 */
1628 	issue_and_wait(dev, RxReset|0x04);
1629 
1630 
1631 	iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1632 
1633 	if (vortex_debug > 1) {
1634 		pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
1635 			   dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
1636 	}
1637 
1638 	/* Set the station address and mask in window 2 each time opened. */
1639 	for (i = 0; i < 6; i++)
1640 		window_write8(vp, dev->dev_addr[i], 2, i);
1641 	for (; i < 12; i+=2)
1642 		window_write16(vp, 0, 2, i);
1643 
1644 	if (vp->cb_fn_base) {
1645 		unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1646 		if (vp->drv_flags & INVERT_LED_PWR)
1647 			n |= 0x10;
1648 		if (vp->drv_flags & INVERT_MII_PWR)
1649 			n |= 0x4000;
1650 		window_write16(vp, n, 2, Wn2_ResetOptions);
1651 	}
1652 
1653 	if (dev->if_port == XCVR_10base2)
1654 		/* Start the thinnet transceiver. We should really wait 50ms...*/
1655 		iowrite16(StartCoax, ioaddr + EL3_CMD);
1656 	if (dev->if_port != XCVR_NWAY) {
1657 		window_write16(vp,
1658 			       (window_read16(vp, 4, Wn4_Media) &
1659 				~(Media_10TP|Media_SQE)) |
1660 			       media_tbl[dev->if_port].media_bits,
1661 			       4, Wn4_Media);
1662 	}
1663 
1664 	/* Switch to the stats window, and clear all stats by reading. */
1665 	iowrite16(StatsDisable, ioaddr + EL3_CMD);
1666 	for (i = 0; i < 10; i++)
1667 		window_read8(vp, 6, i);
1668 	window_read16(vp, 6, 10);
1669 	window_read16(vp, 6, 12);
1670 	/* New: On the Vortex we must also clear the BadSSD counter. */
1671 	window_read8(vp, 4, 12);
1672 	/* ..and on the Boomerang we enable the extra statistics bits. */
1673 	window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1674 
1675 	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1676 		vp->cur_rx = 0;
1677 		/* Initialize the RxEarly register as recommended. */
1678 		iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1679 		iowrite32(0x0020, ioaddr + PktStatus);
1680 		iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
1681 	}
1682 	if (vp->full_bus_master_tx) { 		/* Boomerang bus master Tx. */
1683 		vp->cur_tx = vp->dirty_tx = 0;
1684 		if (vp->drv_flags & IS_BOOMERANG)
1685 			iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
1686 		/* Clear the Rx, Tx rings. */
1687 		for (i = 0; i < RX_RING_SIZE; i++)	/* AKPM: this is done in vortex_open, too */
1688 			vp->rx_ring[i].status = 0;
1689 		for (i = 0; i < TX_RING_SIZE; i++)
1690 			vp->tx_skbuff[i] = NULL;
1691 		iowrite32(0, ioaddr + DownListPtr);
1692 	}
1693 	/* Set receiver mode: presumably accept b-case and phys addr only. */
1694 	set_rx_mode(dev);
1695 	/* enable 802.1q tagged frames */
1696 	set_8021q_mode(dev, 1);
1697 	iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
1698 
1699 	iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
1700 	iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
1701 	/* Allow status bits to be seen. */
1702 	vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1703 		(vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1704 		(vp->full_bus_master_rx ? UpComplete : RxComplete) |
1705 		(vp->bus_master ? DMADone : 0);
1706 	vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1707 		(vp->full_bus_master_rx ? 0 : RxComplete) |
1708 		StatsFull | HostError | TxComplete | IntReq
1709 		| (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1710 	iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1711 	/* Ack all pending events, and set active indicator mask. */
1712 	iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
1713 		 ioaddr + EL3_CMD);
1714 	iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1715 	if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
1716 		iowrite32(0x8000, vp->cb_fn_base + 4);
1717 	netif_start_queue (dev);
1718 	netdev_reset_queue(dev);
1719 err_out:
1720 	return err;
1721 }
1722 
1723 static int
1724 vortex_open(struct net_device *dev)
1725 {
1726 	struct vortex_private *vp = netdev_priv(dev);
1727 	int i;
1728 	int retval;
1729 	dma_addr_t dma;
1730 
1731 	/* Use the now-standard shared IRQ implementation. */
1732 	if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
1733 				boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
1734 		pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
1735 		goto err;
1736 	}
1737 
1738 	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1739 		if (vortex_debug > 2)
1740 			pr_debug("%s:  Filling in the Rx ring.\n", dev->name);
1741 		for (i = 0; i < RX_RING_SIZE; i++) {
1742 			struct sk_buff *skb;
1743 			vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1744 			vp->rx_ring[i].status = 0;	/* Clear complete bit. */
1745 			vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1746 
1747 			skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
1748 						 GFP_KERNEL);
1749 			vp->rx_skbuff[i] = skb;
1750 			if (skb == NULL)
1751 				break;			/* Bad news!  */
1752 
1753 			skb_reserve(skb, NET_IP_ALIGN);	/* Align IP on 16 byte boundaries */
1754 			dma = pci_map_single(VORTEX_PCI(vp), skb->data,
1755 					     PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1756 			if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
1757 				break;
1758 			vp->rx_ring[i].addr = cpu_to_le32(dma);
1759 		}
1760 		if (i != RX_RING_SIZE) {
1761 			pr_emerg("%s: no memory for rx ring\n", dev->name);
1762 			retval = -ENOMEM;
1763 			goto err_free_skb;
1764 		}
1765 		/* Wrap the ring. */
1766 		vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1767 	}
1768 
1769 	retval = vortex_up(dev);
1770 	if (!retval)
1771 		goto out;
1772 
1773 err_free_skb:
1774 	for (i = 0; i < RX_RING_SIZE; i++) {
1775 		if (vp->rx_skbuff[i]) {
1776 			dev_kfree_skb(vp->rx_skbuff[i]);
1777 			vp->rx_skbuff[i] = NULL;
1778 		}
1779 	}
1780 	free_irq(dev->irq, dev);
1781 err:
1782 	if (vortex_debug > 1)
1783 		pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval);
1784 out:
1785 	return retval;
1786 }
1787 
1788 static void
1789 vortex_timer(struct timer_list *t)
1790 {
1791 	struct vortex_private *vp = from_timer(vp, t, timer);
1792 	struct net_device *dev = vp->mii.dev;
1793 	void __iomem *ioaddr = vp->ioaddr;
1794 	int next_tick = 60*HZ;
1795 	int ok = 0;
1796 	int media_status;
1797 
1798 	if (vortex_debug > 2) {
1799 		pr_debug("%s: Media selection timer tick happened, %s.\n",
1800 			   dev->name, media_tbl[dev->if_port].name);
1801 		pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1802 	}
1803 
1804 	media_status = window_read16(vp, 4, Wn4_Media);
1805 	switch (dev->if_port) {
1806 	case XCVR_10baseT:  case XCVR_100baseTx:  case XCVR_100baseFx:
1807 		if (media_status & Media_LnkBeat) {
1808 			netif_carrier_on(dev);
1809 			ok = 1;
1810 			if (vortex_debug > 1)
1811 				pr_debug("%s: Media %s has link beat, %x.\n",
1812 					   dev->name, media_tbl[dev->if_port].name, media_status);
1813 		} else {
1814 			netif_carrier_off(dev);
1815 			if (vortex_debug > 1) {
1816 				pr_debug("%s: Media %s has no link beat, %x.\n",
1817 					   dev->name, media_tbl[dev->if_port].name, media_status);
1818 			}
1819 		}
1820 		break;
1821 	case XCVR_MII: case XCVR_NWAY:
1822 		{
1823 			ok = 1;
1824 			vortex_check_media(dev, 0);
1825 		}
1826 		break;
1827 	  default:					/* Other media types handled by Tx timeouts. */
1828 		if (vortex_debug > 1)
1829 		  pr_debug("%s: Media %s has no indication, %x.\n",
1830 				 dev->name, media_tbl[dev->if_port].name, media_status);
1831 		ok = 1;
1832 	}
1833 
1834 	if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
1835 		next_tick = 5*HZ;
1836 
1837 	if (vp->medialock)
1838 		goto leave_media_alone;
1839 
1840 	if (!ok) {
1841 		unsigned int config;
1842 
1843 		spin_lock_irq(&vp->lock);
1844 
1845 		do {
1846 			dev->if_port = media_tbl[dev->if_port].next;
1847 		} while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1848 		if (dev->if_port == XCVR_Default) { /* Go back to default. */
1849 		  dev->if_port = vp->default_media;
1850 		  if (vortex_debug > 1)
1851 			pr_debug("%s: Media selection failing, using default %s port.\n",
1852 				   dev->name, media_tbl[dev->if_port].name);
1853 		} else {
1854 			if (vortex_debug > 1)
1855 				pr_debug("%s: Media selection failed, now trying %s port.\n",
1856 					   dev->name, media_tbl[dev->if_port].name);
1857 			next_tick = media_tbl[dev->if_port].wait;
1858 		}
1859 		window_write16(vp,
1860 			       (media_status & ~(Media_10TP|Media_SQE)) |
1861 			       media_tbl[dev->if_port].media_bits,
1862 			       4, Wn4_Media);
1863 
1864 		config = window_read32(vp, 3, Wn3_Config);
1865 		config = BFINS(config, dev->if_port, 20, 4);
1866 		window_write32(vp, config, 3, Wn3_Config);
1867 
1868 		iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
1869 			 ioaddr + EL3_CMD);
1870 		if (vortex_debug > 1)
1871 			pr_debug("wrote 0x%08x to Wn3_Config\n", config);
1872 		/* AKPM: FIXME: Should reset Rx & Tx here.  P60 of 3c90xc.pdf */
1873 
1874 		spin_unlock_irq(&vp->lock);
1875 	}
1876 
1877 leave_media_alone:
1878 	if (vortex_debug > 2)
1879 	  pr_debug("%s: Media selection timer finished, %s.\n",
1880 			 dev->name, media_tbl[dev->if_port].name);
1881 
1882 	mod_timer(&vp->timer, RUN_AT(next_tick));
1883 	if (vp->deferred)
1884 		iowrite16(FakeIntr, ioaddr + EL3_CMD);
1885 }
1886 
1887 static void vortex_tx_timeout(struct net_device *dev)
1888 {
1889 	struct vortex_private *vp = netdev_priv(dev);
1890 	void __iomem *ioaddr = vp->ioaddr;
1891 
1892 	pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
1893 		   dev->name, ioread8(ioaddr + TxStatus),
1894 		   ioread16(ioaddr + EL3_STATUS));
1895 	pr_err("  diagnostics: net %04x media %04x dma %08x fifo %04x\n",
1896 			window_read16(vp, 4, Wn4_NetDiag),
1897 			window_read16(vp, 4, Wn4_Media),
1898 			ioread32(ioaddr + PktStatus),
1899 			window_read16(vp, 4, Wn4_FIFODiag));
1900 	/* Slight code bloat to be user friendly. */
1901 	if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
1902 		pr_err("%s: Transmitter encountered 16 collisions --"
1903 			   " network cable problem?\n", dev->name);
1904 	if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
1905 		pr_err("%s: Interrupt posted but not delivered --"
1906 			   " IRQ blocked by another device?\n", dev->name);
1907 		/* Bad idea here.. but we might as well handle a few events. */
1908 		{
1909 			/*
1910 			 * Block interrupts because vortex_interrupt does a bare spin_lock()
1911 			 */
1912 			unsigned long flags;
1913 			local_irq_save(flags);
1914 			if (vp->full_bus_master_tx)
1915 				boomerang_interrupt(dev->irq, dev);
1916 			else
1917 				vortex_interrupt(dev->irq, dev);
1918 			local_irq_restore(flags);
1919 		}
1920 	}
1921 
1922 	if (vortex_debug > 0)
1923 		dump_tx_ring(dev);
1924 
1925 	issue_and_wait(dev, TxReset);
1926 
1927 	dev->stats.tx_errors++;
1928 	if (vp->full_bus_master_tx) {
1929 		pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name);
1930 		if (vp->cur_tx - vp->dirty_tx > 0  &&  ioread32(ioaddr + DownListPtr) == 0)
1931 			iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1932 				 ioaddr + DownListPtr);
1933 		if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
1934 			netif_wake_queue (dev);
1935 			netdev_reset_queue (dev);
1936 		}
1937 		if (vp->drv_flags & IS_BOOMERANG)
1938 			iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
1939 		iowrite16(DownUnstall, ioaddr + EL3_CMD);
1940 	} else {
1941 		dev->stats.tx_dropped++;
1942 		netif_wake_queue(dev);
1943 		netdev_reset_queue(dev);
1944 	}
1945 	/* Issue Tx Enable */
1946 	iowrite16(TxEnable, ioaddr + EL3_CMD);
1947 	netif_trans_update(dev); /* prevent tx timeout */
1948 }
1949 
1950 /*
1951  * Handle uncommon interrupt sources.  This is a separate routine to minimize
1952  * the cache impact.
1953  */
1954 static void
1955 vortex_error(struct net_device *dev, int status)
1956 {
1957 	struct vortex_private *vp = netdev_priv(dev);
1958 	void __iomem *ioaddr = vp->ioaddr;
1959 	int do_tx_reset = 0, reset_mask = 0;
1960 	unsigned char tx_status = 0;
1961 
1962 	if (vortex_debug > 2) {
1963 		pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status);
1964 	}
1965 
1966 	if (status & TxComplete) {			/* Really "TxError" for us. */
1967 		tx_status = ioread8(ioaddr + TxStatus);
1968 		/* Presumably a tx-timeout. We must merely re-enable. */
1969 		if (vortex_debug > 2 ||
1970 		    (tx_status != 0x88 && vortex_debug > 0)) {
1971 			pr_err("%s: Transmit error, Tx status register %2.2x.\n",
1972 				   dev->name, tx_status);
1973 			if (tx_status == 0x82) {
1974 				pr_err("Probably a duplex mismatch.  See "
1975 						"Documentation/networking/vortex.txt\n");
1976 			}
1977 			dump_tx_ring(dev);
1978 		}
1979 		if (tx_status & 0x14)  dev->stats.tx_fifo_errors++;
1980 		if (tx_status & 0x38)  dev->stats.tx_aborted_errors++;
1981 		if (tx_status & 0x08)  vp->xstats.tx_max_collisions++;
1982 		iowrite8(0, ioaddr + TxStatus);
1983 		if (tx_status & 0x30) {			/* txJabber or txUnderrun */
1984 			do_tx_reset = 1;
1985 		} else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET))  {	/* maxCollisions */
1986 			do_tx_reset = 1;
1987 			reset_mask = 0x0108;		/* Reset interface logic, but not download logic */
1988 		} else {				/* Merely re-enable the transmitter. */
1989 			iowrite16(TxEnable, ioaddr + EL3_CMD);
1990 		}
1991 	}
1992 
1993 	if (status & RxEarly)				/* Rx early is unused. */
1994 		iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
1995 
1996 	if (status & StatsFull) {			/* Empty statistics. */
1997 		static int DoneDidThat;
1998 		if (vortex_debug > 4)
1999 			pr_debug("%s: Updating stats.\n", dev->name);
2000 		update_stats(ioaddr, dev);
2001 		/* HACK: Disable statistics as an interrupt source. */
2002 		/* This occurs when we have the wrong media type! */
2003 		if (DoneDidThat == 0  &&
2004 			ioread16(ioaddr + EL3_STATUS) & StatsFull) {
2005 			pr_warn("%s: Updating statistics failed, disabling stats as an interrupt source\n",
2006 				dev->name);
2007 			iowrite16(SetIntrEnb |
2008 				  (window_read16(vp, 5, 10) & ~StatsFull),
2009 				  ioaddr + EL3_CMD);
2010 			vp->intr_enable &= ~StatsFull;
2011 			DoneDidThat++;
2012 		}
2013 	}
2014 	if (status & IntReq) {		/* Restore all interrupt sources.  */
2015 		iowrite16(vp->status_enable, ioaddr + EL3_CMD);
2016 		iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
2017 	}
2018 	if (status & HostError) {
2019 		u16 fifo_diag;
2020 		fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
2021 		pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
2022 			   dev->name, fifo_diag);
2023 		/* Adapter failure requires Tx/Rx reset and reinit. */
2024 		if (vp->full_bus_master_tx) {
2025 			int bus_status = ioread32(ioaddr + PktStatus);
2026 			/* 0x80000000 PCI master abort. */
2027 			/* 0x40000000 PCI target abort. */
2028 			if (vortex_debug)
2029 				pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
2030 
2031 			/* In this case, blow the card away */
2032 			/* Must not enter D3 or we can't legally issue the reset! */
2033 			vortex_down(dev, 0);
2034 			issue_and_wait(dev, TotalReset | 0xff);
2035 			vortex_up(dev);		/* AKPM: bug.  vortex_up() assumes that the rx ring is full. It may not be. */
2036 		} else if (fifo_diag & 0x0400)
2037 			do_tx_reset = 1;
2038 		if (fifo_diag & 0x3000) {
2039 			/* Reset Rx fifo and upload logic */
2040 			issue_and_wait(dev, RxReset|0x07);
2041 			/* Set the Rx filter to the current state. */
2042 			set_rx_mode(dev);
2043 			/* enable 802.1q VLAN tagged frames */
2044 			set_8021q_mode(dev, 1);
2045 			iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
2046 			iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
2047 		}
2048 	}
2049 
2050 	if (do_tx_reset) {
2051 		issue_and_wait(dev, TxReset|reset_mask);
2052 		iowrite16(TxEnable, ioaddr + EL3_CMD);
2053 		if (!vp->full_bus_master_tx)
2054 			netif_wake_queue(dev);
2055 	}
2056 }
2057 
2058 static netdev_tx_t
2059 vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2060 {
2061 	struct vortex_private *vp = netdev_priv(dev);
2062 	void __iomem *ioaddr = vp->ioaddr;
2063 	int skblen = skb->len;
2064 
2065 	/* Put out the doubleword header... */
2066 	iowrite32(skb->len, ioaddr + TX_FIFO);
2067 	if (vp->bus_master) {
2068 		/* Set the bus-master controller to transfer the packet. */
2069 		int len = (skb->len + 3) & ~3;
2070 		vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
2071 						PCI_DMA_TODEVICE);
2072 		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
2073 			dev_kfree_skb_any(skb);
2074 			dev->stats.tx_dropped++;
2075 			return NETDEV_TX_OK;
2076 		}
2077 
2078 		spin_lock_irq(&vp->window_lock);
2079 		window_set(vp, 7);
2080 		iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
2081 		iowrite16(len, ioaddr + Wn7_MasterLen);
2082 		spin_unlock_irq(&vp->window_lock);
2083 		vp->tx_skb = skb;
2084 		skb_tx_timestamp(skb);
2085 		iowrite16(StartDMADown, ioaddr + EL3_CMD);
2086 		/* netif_wake_queue() will be called at the DMADone interrupt. */
2087 	} else {
2088 		/* ... and the packet rounded to a doubleword. */
2089 		skb_tx_timestamp(skb);
2090 		iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
2091 		dev_consume_skb_any (skb);
2092 		if (ioread16(ioaddr + TxFree) > 1536) {
2093 			netif_start_queue (dev);	/* AKPM: redundant? */
2094 		} else {
2095 			/* Interrupt us when the FIFO has room for max-sized packet. */
2096 			netif_stop_queue(dev);
2097 			iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2098 		}
2099 	}
2100 
2101 	netdev_sent_queue(dev, skblen);
2102 
2103 	/* Clear the Tx status stack. */
2104 	{
2105 		int tx_status;
2106 		int i = 32;
2107 
2108 		while (--i > 0	&&	(tx_status = ioread8(ioaddr + TxStatus)) > 0) {
2109 			if (tx_status & 0x3C) {		/* A Tx-disabling error occurred.  */
2110 				if (vortex_debug > 2)
2111 				  pr_debug("%s: Tx error, status %2.2x.\n",
2112 						 dev->name, tx_status);
2113 				if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
2114 				if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
2115 				if (tx_status & 0x30) {
2116 					issue_and_wait(dev, TxReset);
2117 				}
2118 				iowrite16(TxEnable, ioaddr + EL3_CMD);
2119 			}
2120 			iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
2121 		}
2122 	}
2123 	return NETDEV_TX_OK;
2124 }
2125 
2126 static netdev_tx_t
2127 boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2128 {
2129 	struct vortex_private *vp = netdev_priv(dev);
2130 	void __iomem *ioaddr = vp->ioaddr;
2131 	/* Calculate the next Tx descriptor entry. */
2132 	int entry = vp->cur_tx % TX_RING_SIZE;
2133 	int skblen = skb->len;
2134 	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2135 	unsigned long flags;
2136 	dma_addr_t dma_addr;
2137 
2138 	if (vortex_debug > 6) {
2139 		pr_debug("boomerang_start_xmit()\n");
2140 		pr_debug("%s: Trying to send a packet, Tx index %d.\n",
2141 			   dev->name, vp->cur_tx);
2142 	}
2143 
2144 	/*
2145 	 * We can't allow a recursion from our interrupt handler back into the
2146 	 * tx routine, as they take the same spin lock, and that causes
2147 	 * deadlock.  Just return NETDEV_TX_BUSY and let the stack try again in
2148 	 * a bit
2149 	 */
2150 	if (vp->handling_irq)
2151 		return NETDEV_TX_BUSY;
2152 
2153 	if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2154 		if (vortex_debug > 0)
2155 			pr_warn("%s: BUG! Tx Ring full, refusing to send buffer\n",
2156 				dev->name);
2157 		netif_stop_queue(dev);
2158 		return NETDEV_TX_BUSY;
2159 	}
2160 
2161 	vp->tx_skbuff[entry] = skb;
2162 
2163 	vp->tx_ring[entry].next = 0;
2164 #if DO_ZEROCOPY
2165 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2166 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2167 	else
2168 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2169 
2170 	if (!skb_shinfo(skb)->nr_frags) {
2171 		dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
2172 					  PCI_DMA_TODEVICE);
2173 		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2174 			goto out_dma_err;
2175 
2176 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2177 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2178 	} else {
2179 		int i;
2180 
2181 		dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
2182 					  skb_headlen(skb), PCI_DMA_TODEVICE);
2183 		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2184 			goto out_dma_err;
2185 
2186 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2187 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2188 
2189 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2190 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2191 
2192 			dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
2193 						    0,
2194 						    frag->size,
2195 						    DMA_TO_DEVICE);
2196 			if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
2197 				for(i = i-1; i >= 0; i--)
2198 					dma_unmap_page(&VORTEX_PCI(vp)->dev,
2199 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2200 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2201 						       DMA_TO_DEVICE);
2202 
2203 				pci_unmap_single(VORTEX_PCI(vp),
2204 						 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2205 						 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2206 						 PCI_DMA_TODEVICE);
2207 
2208 				goto out_dma_err;
2209 			}
2210 
2211 			vp->tx_ring[entry].frag[i+1].addr =
2212 						cpu_to_le32(dma_addr);
2213 
2214 			if (i == skb_shinfo(skb)->nr_frags-1)
2215 					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
2216 			else
2217 					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
2218 		}
2219 	}
2220 #else
2221 	dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
2222 	if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2223 		goto out_dma_err;
2224 	vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2225 	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2226 	vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2227 #endif
2228 
2229 	spin_lock_irqsave(&vp->lock, flags);
2230 	/* Wait for the stall to complete. */
2231 	issue_and_wait(dev, DownStall);
2232 	prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
2233 	if (ioread32(ioaddr + DownListPtr) == 0) {
2234 		iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
2235 		vp->queued_packet++;
2236 	}
2237 
2238 	vp->cur_tx++;
2239 	netdev_sent_queue(dev, skblen);
2240 
2241 	if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
2242 		netif_stop_queue (dev);
2243 	} else {					/* Clear previous interrupt enable. */
2244 #if defined(tx_interrupt_mitigation)
2245 		/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
2246 		 * were selected, this would corrupt DN_COMPLETE. No?
2247 		 */
2248 		prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
2249 #endif
2250 	}
2251 	skb_tx_timestamp(skb);
2252 	iowrite16(DownUnstall, ioaddr + EL3_CMD);
2253 	spin_unlock_irqrestore(&vp->lock, flags);
2254 out:
2255 	return NETDEV_TX_OK;
2256 out_dma_err:
2257 	dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
2258 	goto out;
2259 }
2260 
2261 /* The interrupt handler does all of the Rx thread work and cleans up
2262    after the Tx thread. */
2263 
2264 /*
2265  * This is the ISR for the vortex series chips.
2266  * full_bus_master_tx == 0 && full_bus_master_rx == 0
2267  */
2268 
2269 static irqreturn_t
2270 vortex_interrupt(int irq, void *dev_id)
2271 {
2272 	struct net_device *dev = dev_id;
2273 	struct vortex_private *vp = netdev_priv(dev);
2274 	void __iomem *ioaddr;
2275 	int status;
2276 	int work_done = max_interrupt_work;
2277 	int handled = 0;
2278 	unsigned int bytes_compl = 0, pkts_compl = 0;
2279 
2280 	ioaddr = vp->ioaddr;
2281 	spin_lock(&vp->lock);
2282 
2283 	status = ioread16(ioaddr + EL3_STATUS);
2284 
2285 	if (vortex_debug > 6)
2286 		pr_debug("vortex_interrupt(). status=0x%4x\n", status);
2287 
2288 	if ((status & IntLatch) == 0)
2289 		goto handler_exit;		/* No interrupt: shared IRQs cause this */
2290 	handled = 1;
2291 
2292 	if (status & IntReq) {
2293 		status |= vp->deferred;
2294 		vp->deferred = 0;
2295 	}
2296 
2297 	if (status == 0xffff)		/* h/w no longer present (hotplug)? */
2298 		goto handler_exit;
2299 
2300 	if (vortex_debug > 4)
2301 		pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
2302 			   dev->name, status, ioread8(ioaddr + Timer));
2303 
2304 	spin_lock(&vp->window_lock);
2305 	window_set(vp, 7);
2306 
2307 	do {
2308 		if (vortex_debug > 5)
2309 				pr_debug("%s: In interrupt loop, status %4.4x.\n",
2310 					   dev->name, status);
2311 		if (status & RxComplete)
2312 			vortex_rx(dev);
2313 
2314 		if (status & TxAvailable) {
2315 			if (vortex_debug > 5)
2316 				pr_debug("	TX room bit was handled.\n");
2317 			/* There's room in the FIFO for a full-sized packet. */
2318 			iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
2319 			netif_wake_queue (dev);
2320 		}
2321 
2322 		if (status & DMADone) {
2323 			if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
2324 				iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
2325 				pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
2326 				pkts_compl++;
2327 				bytes_compl += vp->tx_skb->len;
2328 				dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2329 				if (ioread16(ioaddr + TxFree) > 1536) {
2330 					/*
2331 					 * AKPM: FIXME: I don't think we need this.  If the queue was stopped due to
2332 					 * insufficient FIFO room, the TxAvailable test will succeed and call
2333 					 * netif_wake_queue()
2334 					 */
2335 					netif_wake_queue(dev);
2336 				} else { /* Interrupt when FIFO has room for max-sized packet. */
2337 					iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2338 					netif_stop_queue(dev);
2339 				}
2340 			}
2341 		}
2342 		/* Check for all uncommon interrupts at once. */
2343 		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
2344 			if (status == 0xffff)
2345 				break;
2346 			if (status & RxEarly)
2347 				vortex_rx(dev);
2348 			spin_unlock(&vp->window_lock);
2349 			vortex_error(dev, status);
2350 			spin_lock(&vp->window_lock);
2351 			window_set(vp, 7);
2352 		}
2353 
2354 		if (--work_done < 0) {
2355 			pr_warn("%s: Too much work in interrupt, status %4.4x\n",
2356 				dev->name, status);
2357 			/* Disable all pending interrupts. */
2358 			do {
2359 				vp->deferred |= status;
2360 				iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2361 					 ioaddr + EL3_CMD);
2362 				iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2363 			} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
2364 			/* The timer will reenable interrupts. */
2365 			mod_timer(&vp->timer, jiffies + 1*HZ);
2366 			break;
2367 		}
2368 		/* Acknowledge the IRQ. */
2369 		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2370 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
2371 
2372 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2373 	spin_unlock(&vp->window_lock);
2374 
2375 	if (vortex_debug > 4)
2376 		pr_debug("%s: exiting interrupt, status %4.4x.\n",
2377 			   dev->name, status);
2378 handler_exit:
2379 	spin_unlock(&vp->lock);
2380 	return IRQ_RETVAL(handled);
2381 }
2382 
2383 /*
2384  * This is the ISR for the boomerang series chips.
2385  * full_bus_master_tx == 1 && full_bus_master_rx == 1
2386  */
2387 
2388 static irqreturn_t
2389 boomerang_interrupt(int irq, void *dev_id)
2390 {
2391 	struct net_device *dev = dev_id;
2392 	struct vortex_private *vp = netdev_priv(dev);
2393 	void __iomem *ioaddr;
2394 	int status;
2395 	int work_done = max_interrupt_work;
2396 	int handled = 0;
2397 	unsigned int bytes_compl = 0, pkts_compl = 0;
2398 
2399 	ioaddr = vp->ioaddr;
2400 
2401 
2402 	/*
2403 	 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
2404 	 * and boomerang_start_xmit
2405 	 */
2406 	spin_lock(&vp->lock);
2407 	vp->handling_irq = 1;
2408 
2409 	status = ioread16(ioaddr + EL3_STATUS);
2410 
2411 	if (vortex_debug > 6)
2412 		pr_debug("boomerang_interrupt. status=0x%4x\n", status);
2413 
2414 	if ((status & IntLatch) == 0)
2415 		goto handler_exit;		/* No interrupt: shared IRQs can cause this */
2416 	handled = 1;
2417 
2418 	if (status == 0xffff) {		/* h/w no longer present (hotplug)? */
2419 		if (vortex_debug > 1)
2420 			pr_debug("boomerang_interrupt(1): status = 0xffff\n");
2421 		goto handler_exit;
2422 	}
2423 
2424 	if (status & IntReq) {
2425 		status |= vp->deferred;
2426 		vp->deferred = 0;
2427 	}
2428 
2429 	if (vortex_debug > 4)
2430 		pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
2431 			   dev->name, status, ioread8(ioaddr + Timer));
2432 	do {
2433 		if (vortex_debug > 5)
2434 				pr_debug("%s: In interrupt loop, status %4.4x.\n",
2435 					   dev->name, status);
2436 		if (status & UpComplete) {
2437 			iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
2438 			if (vortex_debug > 5)
2439 				pr_debug("boomerang_interrupt->boomerang_rx\n");
2440 			boomerang_rx(dev);
2441 		}
2442 
2443 		if (status & DownComplete) {
2444 			unsigned int dirty_tx = vp->dirty_tx;
2445 
2446 			iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
2447 			while (vp->cur_tx - dirty_tx > 0) {
2448 				int entry = dirty_tx % TX_RING_SIZE;
2449 #if 1	/* AKPM: the latter is faster, but cyclone-only */
2450 				if (ioread32(ioaddr + DownListPtr) ==
2451 					vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2452 					break;			/* It still hasn't been processed. */
2453 #else
2454 				if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
2455 					break;			/* It still hasn't been processed. */
2456 #endif
2457 
2458 				if (vp->tx_skbuff[entry]) {
2459 					struct sk_buff *skb = vp->tx_skbuff[entry];
2460 #if DO_ZEROCOPY
2461 					int i;
2462 					pci_unmap_single(VORTEX_PCI(vp),
2463 							le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2464 							le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2465 							PCI_DMA_TODEVICE);
2466 
2467 					for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
2468 							pci_unmap_page(VORTEX_PCI(vp),
2469 											 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2470 											 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2471 											 PCI_DMA_TODEVICE);
2472 #else
2473 					pci_unmap_single(VORTEX_PCI(vp),
2474 						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
2475 #endif
2476 					pkts_compl++;
2477 					bytes_compl += skb->len;
2478 					dev_kfree_skb_irq(skb);
2479 					vp->tx_skbuff[entry] = NULL;
2480 				} else {
2481 					pr_debug("boomerang_interrupt: no skb!\n");
2482 				}
2483 				/* dev->stats.tx_packets++;  Counted below. */
2484 				dirty_tx++;
2485 			}
2486 			vp->dirty_tx = dirty_tx;
2487 			if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2488 				if (vortex_debug > 6)
2489 					pr_debug("boomerang_interrupt: wake queue\n");
2490 				netif_wake_queue (dev);
2491 			}
2492 		}
2493 
2494 		/* Check for all uncommon interrupts at once. */
2495 		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
2496 			vortex_error(dev, status);
2497 
2498 		if (--work_done < 0) {
2499 			pr_warn("%s: Too much work in interrupt, status %4.4x\n",
2500 				dev->name, status);
2501 			/* Disable all pending interrupts. */
2502 			do {
2503 				vp->deferred |= status;
2504 				iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2505 					 ioaddr + EL3_CMD);
2506 				iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2507 			} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
2508 			/* The timer will reenable interrupts. */
2509 			mod_timer(&vp->timer, jiffies + 1*HZ);
2510 			break;
2511 		}
2512 		/* Acknowledge the IRQ. */
2513 		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2514 		if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
2515 			iowrite32(0x8000, vp->cb_fn_base + 4);
2516 
2517 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
2518 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2519 
2520 	if (vortex_debug > 4)
2521 		pr_debug("%s: exiting interrupt, status %4.4x.\n",
2522 			   dev->name, status);
2523 handler_exit:
2524 	vp->handling_irq = 0;
2525 	spin_unlock(&vp->lock);
2526 	return IRQ_RETVAL(handled);
2527 }
2528 
2529 static int vortex_rx(struct net_device *dev)
2530 {
2531 	struct vortex_private *vp = netdev_priv(dev);
2532 	void __iomem *ioaddr = vp->ioaddr;
2533 	int i;
2534 	short rx_status;
2535 
2536 	if (vortex_debug > 5)
2537 		pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n",
2538 			   ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
2539 	while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
2540 		if (rx_status & 0x4000) { /* Error, update stats. */
2541 			unsigned char rx_error = ioread8(ioaddr + RxErrors);
2542 			if (vortex_debug > 2)
2543 				pr_debug(" Rx error: status %2.2x.\n", rx_error);
2544 			dev->stats.rx_errors++;
2545 			if (rx_error & 0x01)  dev->stats.rx_over_errors++;
2546 			if (rx_error & 0x02)  dev->stats.rx_length_errors++;
2547 			if (rx_error & 0x04)  dev->stats.rx_frame_errors++;
2548 			if (rx_error & 0x08)  dev->stats.rx_crc_errors++;
2549 			if (rx_error & 0x10)  dev->stats.rx_length_errors++;
2550 		} else {
2551 			/* The packet length: up to 4.5K!. */
2552 			int pkt_len = rx_status & 0x1fff;
2553 			struct sk_buff *skb;
2554 
2555 			skb = netdev_alloc_skb(dev, pkt_len + 5);
2556 			if (vortex_debug > 4)
2557 				pr_debug("Receiving packet size %d status %4.4x.\n",
2558 					   pkt_len, rx_status);
2559 			if (skb != NULL) {
2560 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
2561 				/* 'skb_put()' points to the start of sk_buff data area. */
2562 				if (vp->bus_master &&
2563 					! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
2564 					dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
2565 									   pkt_len, PCI_DMA_FROMDEVICE);
2566 					iowrite32(dma, ioaddr + Wn7_MasterAddr);
2567 					iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2568 					iowrite16(StartDMAUp, ioaddr + EL3_CMD);
2569 					while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
2570 						;
2571 					pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
2572 				} else {
2573 					ioread32_rep(ioaddr + RX_FIFO,
2574 					             skb_put(skb, pkt_len),
2575 						     (pkt_len + 3) >> 2);
2576 				}
2577 				iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
2578 				skb->protocol = eth_type_trans(skb, dev);
2579 				netif_rx(skb);
2580 				dev->stats.rx_packets++;
2581 				/* Wait a limited time to go to next packet. */
2582 				for (i = 200; i >= 0; i--)
2583 					if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
2584 						break;
2585 				continue;
2586 			} else if (vortex_debug > 0)
2587 				pr_notice("%s: No memory to allocate a sk_buff of size %d.\n",
2588 					dev->name, pkt_len);
2589 			dev->stats.rx_dropped++;
2590 		}
2591 		issue_and_wait(dev, RxDiscard);
2592 	}
2593 
2594 	return 0;
2595 }
2596 
2597 static int
2598 boomerang_rx(struct net_device *dev)
2599 {
2600 	struct vortex_private *vp = netdev_priv(dev);
2601 	int entry = vp->cur_rx % RX_RING_SIZE;
2602 	void __iomem *ioaddr = vp->ioaddr;
2603 	int rx_status;
2604 	int rx_work_limit = RX_RING_SIZE;
2605 
2606 	if (vortex_debug > 5)
2607 		pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
2608 
2609 	while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2610 		if (--rx_work_limit < 0)
2611 			break;
2612 		if (rx_status & RxDError) { /* Error, update stats. */
2613 			unsigned char rx_error = rx_status >> 16;
2614 			if (vortex_debug > 2)
2615 				pr_debug(" Rx error: status %2.2x.\n", rx_error);
2616 			dev->stats.rx_errors++;
2617 			if (rx_error & 0x01)  dev->stats.rx_over_errors++;
2618 			if (rx_error & 0x02)  dev->stats.rx_length_errors++;
2619 			if (rx_error & 0x04)  dev->stats.rx_frame_errors++;
2620 			if (rx_error & 0x08)  dev->stats.rx_crc_errors++;
2621 			if (rx_error & 0x10)  dev->stats.rx_length_errors++;
2622 		} else {
2623 			/* The packet length: up to 4.5K!. */
2624 			int pkt_len = rx_status & 0x1fff;
2625 			struct sk_buff *skb, *newskb;
2626 			dma_addr_t newdma;
2627 			dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2628 
2629 			if (vortex_debug > 4)
2630 				pr_debug("Receiving packet size %d status %4.4x.\n",
2631 					   pkt_len, rx_status);
2632 
2633 			/* Check if the packet is long enough to just accept without
2634 			   copying to a properly sized skbuff. */
2635 			if (pkt_len < rx_copybreak &&
2636 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
2637 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
2638 				pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2639 				/* 'skb_put()' points to the start of sk_buff data area. */
2640 				skb_put_data(skb, vp->rx_skbuff[entry]->data,
2641 					     pkt_len);
2642 				pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2643 				vp->rx_copy++;
2644 			} else {
2645 				/* Pre-allocate the replacement skb.  If it or its
2646 				 * mapping fails then recycle the buffer thats already
2647 				 * in place
2648 				 */
2649 				newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2650 				if (!newskb) {
2651 					dev->stats.rx_dropped++;
2652 					goto clear_complete;
2653 				}
2654 				newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
2655 							PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2656 				if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
2657 					dev->stats.rx_dropped++;
2658 					consume_skb(newskb);
2659 					goto clear_complete;
2660 				}
2661 
2662 				/* Pass up the skbuff already on the Rx ring. */
2663 				skb = vp->rx_skbuff[entry];
2664 				vp->rx_skbuff[entry] = newskb;
2665 				vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2666 				skb_put(skb, pkt_len);
2667 				pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2668 				vp->rx_nocopy++;
2669 			}
2670 			skb->protocol = eth_type_trans(skb, dev);
2671 			{					/* Use hardware checksum info. */
2672 				int csum_bits = rx_status & 0xee000000;
2673 				if (csum_bits &&
2674 					(csum_bits == (IPChksumValid | TCPChksumValid) ||
2675 					 csum_bits == (IPChksumValid | UDPChksumValid))) {
2676 					skb->ip_summed = CHECKSUM_UNNECESSARY;
2677 					vp->rx_csumhits++;
2678 				}
2679 			}
2680 			netif_rx(skb);
2681 			dev->stats.rx_packets++;
2682 		}
2683 
2684 clear_complete:
2685 		vp->rx_ring[entry].status = 0;	/* Clear complete bit. */
2686 		iowrite16(UpUnstall, ioaddr + EL3_CMD);
2687 		entry = (++vp->cur_rx) % RX_RING_SIZE;
2688 	}
2689 	return 0;
2690 }
2691 
2692 static void
2693 vortex_down(struct net_device *dev, int final_down)
2694 {
2695 	struct vortex_private *vp = netdev_priv(dev);
2696 	void __iomem *ioaddr = vp->ioaddr;
2697 
2698 	netdev_reset_queue(dev);
2699 	netif_stop_queue(dev);
2700 
2701 	del_timer_sync(&vp->timer);
2702 
2703 	/* Turn off statistics ASAP.  We update dev->stats below. */
2704 	iowrite16(StatsDisable, ioaddr + EL3_CMD);
2705 
2706 	/* Disable the receiver and transmitter. */
2707 	iowrite16(RxDisable, ioaddr + EL3_CMD);
2708 	iowrite16(TxDisable, ioaddr + EL3_CMD);
2709 
2710 	/* Disable receiving 802.1q tagged frames */
2711 	set_8021q_mode(dev, 0);
2712 
2713 	if (dev->if_port == XCVR_10base2)
2714 		/* Turn off thinnet power.  Green! */
2715 		iowrite16(StopCoax, ioaddr + EL3_CMD);
2716 
2717 	iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
2718 
2719 	update_stats(ioaddr, dev);
2720 	if (vp->full_bus_master_rx)
2721 		iowrite32(0, ioaddr + UpListPtr);
2722 	if (vp->full_bus_master_tx)
2723 		iowrite32(0, ioaddr + DownListPtr);
2724 
2725 	if (final_down && VORTEX_PCI(vp)) {
2726 		vp->pm_state_valid = 1;
2727 		pci_save_state(VORTEX_PCI(vp));
2728 		acpi_set_WOL(dev);
2729 	}
2730 }
2731 
2732 static int
2733 vortex_close(struct net_device *dev)
2734 {
2735 	struct vortex_private *vp = netdev_priv(dev);
2736 	void __iomem *ioaddr = vp->ioaddr;
2737 	int i;
2738 
2739 	if (netif_device_present(dev))
2740 		vortex_down(dev, 1);
2741 
2742 	if (vortex_debug > 1) {
2743 		pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
2744 			   dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
2745 		pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d"
2746 			   " tx_queued %d Rx pre-checksummed %d.\n",
2747 			   dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2748 	}
2749 
2750 #if DO_ZEROCOPY
2751 	if (vp->rx_csumhits &&
2752 	    (vp->drv_flags & HAS_HWCKSM) == 0 &&
2753 	    (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
2754 		pr_warn("%s supports hardware checksums, and we're not using them!\n",
2755 			dev->name);
2756 	}
2757 #endif
2758 
2759 	free_irq(dev->irq, dev);
2760 
2761 	if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2762 		for (i = 0; i < RX_RING_SIZE; i++)
2763 			if (vp->rx_skbuff[i]) {
2764 				pci_unmap_single(	VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
2765 									PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2766 				dev_kfree_skb(vp->rx_skbuff[i]);
2767 				vp->rx_skbuff[i] = NULL;
2768 			}
2769 	}
2770 	if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2771 		for (i = 0; i < TX_RING_SIZE; i++) {
2772 			if (vp->tx_skbuff[i]) {
2773 				struct sk_buff *skb = vp->tx_skbuff[i];
2774 #if DO_ZEROCOPY
2775 				int k;
2776 
2777 				for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
2778 						pci_unmap_single(VORTEX_PCI(vp),
2779 										 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2780 										 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2781 										 PCI_DMA_TODEVICE);
2782 #else
2783 				pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
2784 #endif
2785 				dev_kfree_skb(skb);
2786 				vp->tx_skbuff[i] = NULL;
2787 			}
2788 		}
2789 	}
2790 
2791 	return 0;
2792 }
2793 
2794 static void
2795 dump_tx_ring(struct net_device *dev)
2796 {
2797 	if (vortex_debug > 0) {
2798 	struct vortex_private *vp = netdev_priv(dev);
2799 		void __iomem *ioaddr = vp->ioaddr;
2800 
2801 		if (vp->full_bus_master_tx) {
2802 			int i;
2803 			int stalled = ioread32(ioaddr + PktStatus) & 0x04;	/* Possible racy. But it's only debug stuff */
2804 
2805 			pr_err("  Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
2806 					vp->full_bus_master_tx,
2807 					vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2808 					vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2809 			pr_err("  Transmit list %8.8x vs. %p.\n",
2810 				   ioread32(ioaddr + DownListPtr),
2811 				   &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2812 			issue_and_wait(dev, DownStall);
2813 			for (i = 0; i < TX_RING_SIZE; i++) {
2814 				unsigned int length;
2815 
2816 #if DO_ZEROCOPY
2817 				length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
2818 #else
2819 				length = le32_to_cpu(vp->tx_ring[i].length);
2820 #endif
2821 				pr_err("  %d: @%p  length %8.8x status %8.8x\n",
2822 					   i, &vp->tx_ring[i], length,
2823 					   le32_to_cpu(vp->tx_ring[i].status));
2824 			}
2825 			if (!stalled)
2826 				iowrite16(DownUnstall, ioaddr + EL3_CMD);
2827 		}
2828 	}
2829 }
2830 
2831 static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2832 {
2833 	struct vortex_private *vp = netdev_priv(dev);
2834 	void __iomem *ioaddr = vp->ioaddr;
2835 	unsigned long flags;
2836 
2837 	if (netif_device_present(dev)) {	/* AKPM: Used to be netif_running */
2838 		spin_lock_irqsave (&vp->lock, flags);
2839 		update_stats(ioaddr, dev);
2840 		spin_unlock_irqrestore (&vp->lock, flags);
2841 	}
2842 	return &dev->stats;
2843 }
2844 
2845 /*  Update statistics.
2846 	Unlike with the EL3 we need not worry about interrupts changing
2847 	the window setting from underneath us, but we must still guard
2848 	against a race condition with a StatsUpdate interrupt updating the
2849 	table.  This is done by checking that the ASM (!) code generated uses
2850 	atomic updates with '+='.
2851 	*/
2852 static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2853 {
2854 	struct vortex_private *vp = netdev_priv(dev);
2855 
2856 	/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2857 	/* Switch to the stats window, and read everything. */
2858 	dev->stats.tx_carrier_errors		+= window_read8(vp, 6, 0);
2859 	dev->stats.tx_heartbeat_errors		+= window_read8(vp, 6, 1);
2860 	dev->stats.tx_window_errors		+= window_read8(vp, 6, 4);
2861 	dev->stats.rx_fifo_errors		+= window_read8(vp, 6, 5);
2862 	dev->stats.tx_packets			+= window_read8(vp, 6, 6);
2863 	dev->stats.tx_packets			+= (window_read8(vp, 6, 9) &
2864 						    0x30) << 4;
2865 	/* Rx packets	*/			window_read8(vp, 6, 7);   /* Must read to clear */
2866 	/* Don't bother with register 9, an extension of registers 6&7.
2867 	   If we do use the 6&7 values the atomic update assumption above
2868 	   is invalid. */
2869 	dev->stats.rx_bytes 			+= window_read16(vp, 6, 10);
2870 	dev->stats.tx_bytes 			+= window_read16(vp, 6, 12);
2871 	/* Extra stats for get_ethtool_stats() */
2872 	vp->xstats.tx_multiple_collisions	+= window_read8(vp, 6, 2);
2873 	vp->xstats.tx_single_collisions         += window_read8(vp, 6, 3);
2874 	vp->xstats.tx_deferred			+= window_read8(vp, 6, 8);
2875 	vp->xstats.rx_bad_ssd			+= window_read8(vp, 4, 12);
2876 
2877 	dev->stats.collisions = vp->xstats.tx_multiple_collisions
2878 		+ vp->xstats.tx_single_collisions
2879 		+ vp->xstats.tx_max_collisions;
2880 
2881 	{
2882 		u8 up = window_read8(vp, 4, 13);
2883 		dev->stats.rx_bytes += (up & 0x0f) << 16;
2884 		dev->stats.tx_bytes += (up & 0xf0) << 12;
2885 	}
2886 }
2887 
2888 static int vortex_nway_reset(struct net_device *dev)
2889 {
2890 	struct vortex_private *vp = netdev_priv(dev);
2891 
2892 	return mii_nway_restart(&vp->mii);
2893 }
2894 
2895 static int vortex_get_link_ksettings(struct net_device *dev,
2896 				     struct ethtool_link_ksettings *cmd)
2897 {
2898 	struct vortex_private *vp = netdev_priv(dev);
2899 
2900 	mii_ethtool_get_link_ksettings(&vp->mii, cmd);
2901 
2902 	return 0;
2903 }
2904 
2905 static int vortex_set_link_ksettings(struct net_device *dev,
2906 				     const struct ethtool_link_ksettings *cmd)
2907 {
2908 	struct vortex_private *vp = netdev_priv(dev);
2909 
2910 	return mii_ethtool_set_link_ksettings(&vp->mii, cmd);
2911 }
2912 
2913 static u32 vortex_get_msglevel(struct net_device *dev)
2914 {
2915 	return vortex_debug;
2916 }
2917 
2918 static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
2919 {
2920 	vortex_debug = dbg;
2921 }
2922 
2923 static int vortex_get_sset_count(struct net_device *dev, int sset)
2924 {
2925 	switch (sset) {
2926 	case ETH_SS_STATS:
2927 		return VORTEX_NUM_STATS;
2928 	default:
2929 		return -EOPNOTSUPP;
2930 	}
2931 }
2932 
2933 static void vortex_get_ethtool_stats(struct net_device *dev,
2934 	struct ethtool_stats *stats, u64 *data)
2935 {
2936 	struct vortex_private *vp = netdev_priv(dev);
2937 	void __iomem *ioaddr = vp->ioaddr;
2938 	unsigned long flags;
2939 
2940 	spin_lock_irqsave(&vp->lock, flags);
2941 	update_stats(ioaddr, dev);
2942 	spin_unlock_irqrestore(&vp->lock, flags);
2943 
2944 	data[0] = vp->xstats.tx_deferred;
2945 	data[1] = vp->xstats.tx_max_collisions;
2946 	data[2] = vp->xstats.tx_multiple_collisions;
2947 	data[3] = vp->xstats.tx_single_collisions;
2948 	data[4] = vp->xstats.rx_bad_ssd;
2949 }
2950 
2951 
2952 static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2953 {
2954 	switch (stringset) {
2955 	case ETH_SS_STATS:
2956 		memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
2957 		break;
2958 	default:
2959 		WARN_ON(1);
2960 		break;
2961 	}
2962 }
2963 
2964 static void vortex_get_drvinfo(struct net_device *dev,
2965 					struct ethtool_drvinfo *info)
2966 {
2967 	struct vortex_private *vp = netdev_priv(dev);
2968 
2969 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2970 	if (VORTEX_PCI(vp)) {
2971 		strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
2972 			sizeof(info->bus_info));
2973 	} else {
2974 		if (VORTEX_EISA(vp))
2975 			strlcpy(info->bus_info, dev_name(vp->gendev),
2976 				sizeof(info->bus_info));
2977 		else
2978 			snprintf(info->bus_info, sizeof(info->bus_info),
2979 				"EISA 0x%lx %d", dev->base_addr, dev->irq);
2980 	}
2981 }
2982 
2983 static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2984 {
2985 	struct vortex_private *vp = netdev_priv(dev);
2986 
2987 	if (!VORTEX_PCI(vp))
2988 		return;
2989 
2990 	wol->supported = WAKE_MAGIC;
2991 
2992 	wol->wolopts = 0;
2993 	if (vp->enable_wol)
2994 		wol->wolopts |= WAKE_MAGIC;
2995 }
2996 
2997 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2998 {
2999 	struct vortex_private *vp = netdev_priv(dev);
3000 
3001 	if (!VORTEX_PCI(vp))
3002 		return -EOPNOTSUPP;
3003 
3004 	if (wol->wolopts & ~WAKE_MAGIC)
3005 		return -EINVAL;
3006 
3007 	if (wol->wolopts & WAKE_MAGIC)
3008 		vp->enable_wol = 1;
3009 	else
3010 		vp->enable_wol = 0;
3011 	acpi_set_WOL(dev);
3012 
3013 	return 0;
3014 }
3015 
3016 static const struct ethtool_ops vortex_ethtool_ops = {
3017 	.get_drvinfo		= vortex_get_drvinfo,
3018 	.get_strings            = vortex_get_strings,
3019 	.get_msglevel           = vortex_get_msglevel,
3020 	.set_msglevel           = vortex_set_msglevel,
3021 	.get_ethtool_stats      = vortex_get_ethtool_stats,
3022 	.get_sset_count		= vortex_get_sset_count,
3023 	.get_link               = ethtool_op_get_link,
3024 	.nway_reset             = vortex_nway_reset,
3025 	.get_wol                = vortex_get_wol,
3026 	.set_wol                = vortex_set_wol,
3027 	.get_ts_info		= ethtool_op_get_ts_info,
3028 	.get_link_ksettings     = vortex_get_link_ksettings,
3029 	.set_link_ksettings     = vortex_set_link_ksettings,
3030 };
3031 
3032 #ifdef CONFIG_PCI
3033 /*
3034  *	Must power the device up to do MDIO operations
3035  */
3036 static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3037 {
3038 	int err;
3039 	struct vortex_private *vp = netdev_priv(dev);
3040 	pci_power_t state = 0;
3041 
3042 	if(VORTEX_PCI(vp))
3043 		state = VORTEX_PCI(vp)->current_state;
3044 
3045 	/* The kernel core really should have pci_get_power_state() */
3046 
3047 	if(state != 0)
3048 		pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
3049 	err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
3050 	if(state != 0)
3051 		pci_set_power_state(VORTEX_PCI(vp), state);
3052 
3053 	return err;
3054 }
3055 #endif
3056 
3057 
3058 /* Pre-Cyclone chips have no documented multicast filter, so the only
3059    multicast setting is to receive all multicast frames.  At least
3060    the chip has a very clean way to set the mode, unlike many others. */
3061 static void set_rx_mode(struct net_device *dev)
3062 {
3063 	struct vortex_private *vp = netdev_priv(dev);
3064 	void __iomem *ioaddr = vp->ioaddr;
3065 	int new_mode;
3066 
3067 	if (dev->flags & IFF_PROMISC) {
3068 		if (vortex_debug > 3)
3069 			pr_notice("%s: Setting promiscuous mode.\n", dev->name);
3070 		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
3071 	} else	if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
3072 		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
3073 	} else
3074 		new_mode = SetRxFilter | RxStation | RxBroadcast;
3075 
3076 	iowrite16(new_mode, ioaddr + EL3_CMD);
3077 }
3078 
3079 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3080 /* Setup the card so that it can receive frames with an 802.1q VLAN tag.
3081    Note that this must be done after each RxReset due to some backwards
3082    compatibility logic in the Cyclone and Tornado ASICs */
3083 
3084 /* The Ethernet Type used for 802.1q tagged frames */
3085 #define VLAN_ETHER_TYPE 0x8100
3086 
3087 static void set_8021q_mode(struct net_device *dev, int enable)
3088 {
3089 	struct vortex_private *vp = netdev_priv(dev);
3090 	int mac_ctrl;
3091 
3092 	if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
3093 		/* cyclone and tornado chipsets can recognize 802.1q
3094 		 * tagged frames and treat them correctly */
3095 
3096 		int max_pkt_size = dev->mtu+14;	/* MTU+Ethernet header */
3097 		if (enable)
3098 			max_pkt_size += 4;	/* 802.1Q VLAN tag */
3099 
3100 		window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
3101 
3102 		/* set VlanEtherType to let the hardware checksumming
3103 		   treat tagged frames correctly */
3104 		window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
3105 	} else {
3106 		/* on older cards we have to enable large frames */
3107 
3108 		vp->large_frames = dev->mtu > 1500 || enable;
3109 
3110 		mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
3111 		if (vp->large_frames)
3112 			mac_ctrl |= 0x40;
3113 		else
3114 			mac_ctrl &= ~0x40;
3115 		window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
3116 	}
3117 }
3118 #else
3119 
3120 static void set_8021q_mode(struct net_device *dev, int enable)
3121 {
3122 }
3123 
3124 
3125 #endif
3126 
3127 /* MII transceiver control section.
3128    Read and write the MII registers using software-generated serial
3129    MDIO protocol.  See the MII specifications or DP83840A data sheet
3130    for details. */
3131 
3132 /* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
3133    met by back-to-back PCI I/O cycles, but we insert a delay to avoid
3134    "overclocking" issues. */
3135 static void mdio_delay(struct vortex_private *vp)
3136 {
3137 	window_read32(vp, 4, Wn4_PhysicalMgmt);
3138 }
3139 
3140 #define MDIO_SHIFT_CLK	0x01
3141 #define MDIO_DIR_WRITE	0x04
3142 #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
3143 #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
3144 #define MDIO_DATA_READ	0x02
3145 #define MDIO_ENB_IN		0x00
3146 
3147 /* Generate the preamble required for initial synchronization and
3148    a few older transceivers. */
3149 static void mdio_sync(struct vortex_private *vp, int bits)
3150 {
3151 	/* Establish sync by sending at least 32 logic ones. */
3152 	while (-- bits >= 0) {
3153 		window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
3154 		mdio_delay(vp);
3155 		window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
3156 			       4, Wn4_PhysicalMgmt);
3157 		mdio_delay(vp);
3158 	}
3159 }
3160 
3161 static int mdio_read(struct net_device *dev, int phy_id, int location)
3162 {
3163 	int i;
3164 	struct vortex_private *vp = netdev_priv(dev);
3165 	int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
3166 	unsigned int retval = 0;
3167 
3168 	spin_lock_bh(&vp->mii_lock);
3169 
3170 	if (mii_preamble_required)
3171 		mdio_sync(vp, 32);
3172 
3173 	/* Shift the read command bits out. */
3174 	for (i = 14; i >= 0; i--) {
3175 		int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3176 		window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3177 		mdio_delay(vp);
3178 		window_write16(vp, dataval | MDIO_SHIFT_CLK,
3179 			       4, Wn4_PhysicalMgmt);
3180 		mdio_delay(vp);
3181 	}
3182 	/* Read the two transition, 16 data, and wire-idle bits. */
3183 	for (i = 19; i > 0; i--) {
3184 		window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3185 		mdio_delay(vp);
3186 		retval = (retval << 1) |
3187 			((window_read16(vp, 4, Wn4_PhysicalMgmt) &
3188 			  MDIO_DATA_READ) ? 1 : 0);
3189 		window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3190 			       4, Wn4_PhysicalMgmt);
3191 		mdio_delay(vp);
3192 	}
3193 
3194 	spin_unlock_bh(&vp->mii_lock);
3195 
3196 	return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
3197 }
3198 
3199 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
3200 {
3201 	struct vortex_private *vp = netdev_priv(dev);
3202 	int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
3203 	int i;
3204 
3205 	spin_lock_bh(&vp->mii_lock);
3206 
3207 	if (mii_preamble_required)
3208 		mdio_sync(vp, 32);
3209 
3210 	/* Shift the command bits out. */
3211 	for (i = 31; i >= 0; i--) {
3212 		int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3213 		window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3214 		mdio_delay(vp);
3215 		window_write16(vp, dataval | MDIO_SHIFT_CLK,
3216 			       4, Wn4_PhysicalMgmt);
3217 		mdio_delay(vp);
3218 	}
3219 	/* Leave the interface idle. */
3220 	for (i = 1; i >= 0; i--) {
3221 		window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3222 		mdio_delay(vp);
3223 		window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3224 			       4, Wn4_PhysicalMgmt);
3225 		mdio_delay(vp);
3226 	}
3227 
3228 	spin_unlock_bh(&vp->mii_lock);
3229 }
3230 
3231 /* ACPI: Advanced Configuration and Power Interface. */
3232 /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
3233 static void acpi_set_WOL(struct net_device *dev)
3234 {
3235 	struct vortex_private *vp = netdev_priv(dev);
3236 	void __iomem *ioaddr = vp->ioaddr;
3237 
3238 	device_set_wakeup_enable(vp->gendev, vp->enable_wol);
3239 
3240 	if (vp->enable_wol) {
3241 		/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
3242 		window_write16(vp, 2, 7, 0x0c);
3243 		/* The RxFilter must accept the WOL frames. */
3244 		iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
3245 		iowrite16(RxEnable, ioaddr + EL3_CMD);
3246 
3247 		if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
3248 			pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp)));
3249 
3250 			vp->enable_wol = 0;
3251 			return;
3252 		}
3253 
3254 		if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
3255 			return;
3256 
3257 		/* Change the power state to D3; RxEnable doesn't take effect. */
3258 		pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3259 	}
3260 }
3261 
3262 
3263 static void vortex_remove_one(struct pci_dev *pdev)
3264 {
3265 	struct net_device *dev = pci_get_drvdata(pdev);
3266 	struct vortex_private *vp;
3267 
3268 	if (!dev) {
3269 		pr_err("vortex_remove_one called for Compaq device!\n");
3270 		BUG();
3271 	}
3272 
3273 	vp = netdev_priv(dev);
3274 
3275 	if (vp->cb_fn_base)
3276 		pci_iounmap(pdev, vp->cb_fn_base);
3277 
3278 	unregister_netdev(dev);
3279 
3280 	pci_set_power_state(pdev, PCI_D0);	/* Go active */
3281 	if (vp->pm_state_valid)
3282 		pci_restore_state(pdev);
3283 	pci_disable_device(pdev);
3284 
3285 	/* Should really use issue_and_wait() here */
3286 	iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3287 	     vp->ioaddr + EL3_CMD);
3288 
3289 	pci_iounmap(pdev, vp->ioaddr);
3290 
3291 	pci_free_consistent(pdev,
3292 						sizeof(struct boom_rx_desc) * RX_RING_SIZE
3293 							+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3294 						vp->rx_ring,
3295 						vp->rx_ring_dma);
3296 
3297 	pci_release_regions(pdev);
3298 
3299 	free_netdev(dev);
3300 }
3301 
3302 
3303 static struct pci_driver vortex_driver = {
3304 	.name		= "3c59x",
3305 	.probe		= vortex_init_one,
3306 	.remove		= vortex_remove_one,
3307 	.id_table	= vortex_pci_tbl,
3308 	.driver.pm	= VORTEX_PM_OPS,
3309 };
3310 
3311 
3312 static int vortex_have_pci;
3313 static int vortex_have_eisa;
3314 
3315 
3316 static int __init vortex_init(void)
3317 {
3318 	int pci_rc, eisa_rc;
3319 
3320 	pci_rc = pci_register_driver(&vortex_driver);
3321 	eisa_rc = vortex_eisa_init();
3322 
3323 	if (pci_rc == 0)
3324 		vortex_have_pci = 1;
3325 	if (eisa_rc > 0)
3326 		vortex_have_eisa = 1;
3327 
3328 	return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
3329 }
3330 
3331 
3332 static void __exit vortex_eisa_cleanup(void)
3333 {
3334 	void __iomem *ioaddr;
3335 
3336 #ifdef CONFIG_EISA
3337 	/* Take care of the EISA devices */
3338 	eisa_driver_unregister(&vortex_eisa_driver);
3339 #endif
3340 
3341 	if (compaq_net_device) {
3342 		ioaddr = ioport_map(compaq_net_device->base_addr,
3343 		                    VORTEX_TOTAL_SIZE);
3344 
3345 		unregister_netdev(compaq_net_device);
3346 		iowrite16(TotalReset, ioaddr + EL3_CMD);
3347 		release_region(compaq_net_device->base_addr,
3348 		               VORTEX_TOTAL_SIZE);
3349 
3350 		free_netdev(compaq_net_device);
3351 	}
3352 }
3353 
3354 
3355 static void __exit vortex_cleanup(void)
3356 {
3357 	if (vortex_have_pci)
3358 		pci_unregister_driver(&vortex_driver);
3359 	if (vortex_have_eisa)
3360 		vortex_eisa_cleanup();
3361 }
3362 
3363 
3364 module_init(vortex_init);
3365 module_exit(vortex_cleanup);
3366