1 /*	tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2 
3 	Copyright 2000,2001  The Linux Kernel Team
4 	Written/copyright 1994-2001 by Donald Becker.
5 
6 	This software may be used and distributed according to the terms
7 	of the GNU General Public License, incorporated herein by reference.
8 
9 	Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11 
12 #define pr_fmt(fmt) "tulip: " fmt
13 
14 #define DRV_NAME	"tulip"
15 #ifdef CONFIG_TULIP_NAPI
16 #define DRV_VERSION    "1.1.15-NAPI" /* Keep at least for test */
17 #else
18 #define DRV_VERSION	"1.1.15"
19 #endif
20 #define DRV_RELDATE	"Feb 27, 2007"
21 
22 
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include "tulip.h"
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/mii.h>
32 #include <linux/crc32.h>
33 #include <asm/unaligned.h>
34 #include <linux/uaccess.h>
35 
36 #ifdef CONFIG_SPARC
37 #include <asm/prom.h>
38 #endif
39 
40 static char version[] =
41 	"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42 
43 /* A few user-configurable values. */
44 
45 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
46 static unsigned int max_interrupt_work = 25;
47 
48 #define MAX_UNITS 8
49 /* Used to pass the full-duplex flag, etc. */
50 static int full_duplex[MAX_UNITS];
51 static int options[MAX_UNITS];
52 static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
53 
54 /*  The possible media types that can be set in options[] are: */
55 const char * const medianame[32] = {
56 	"10baseT", "10base2", "AUI", "100baseTx",
57 	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 	"","","","", "","","","",  "","","","Transceiver reset",
62 };
63 
64 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 	defined(CONFIG_SPARC) || defined(__ia64__) || \
67 	defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
69 #else
70 static int rx_copybreak = 100;
71 #endif
72 
73 /*
74   Set the bus performance register.
75 	Typical: Set 16 longword cache alignment, no burst limit.
76 	Cache alignment bits 15:14	     Burst length 13:8
77 		0000	No alignment  0x00000000 unlimited		0800 8 longwords
78 		4000	8  longwords		0100 1 longword		1000 16 longwords
79 		8000	16 longwords		0200 2 longwords	2000 32 longwords
80 		C000	32  longwords		0400 4 longwords
81 	Warning: many older 486 systems are broken and require setting 0x00A04800
82 	   8 longword cache alignment, 8 longword burst.
83 	ToDo: Non-Intel setting could be better.
84 */
85 
86 #if defined(__alpha__) || defined(__ia64__)
87 static int csr0 = 0x01A00000 | 0xE000;
88 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89 static int csr0 = 0x01A00000 | 0x8000;
90 #elif defined(CONFIG_SPARC) || defined(__hppa__)
91 /* The UltraSparc PCI controllers will disconnect at every 64-byte
92  * crossing anyways so it makes no sense to tell Tulip to burst
93  * any more than that.
94  */
95 static int csr0 = 0x01A00000 | 0x9000;
96 #elif defined(__arm__) || defined(__sh__)
97 static int csr0 = 0x01A00000 | 0x4800;
98 #elif defined(__mips__)
99 static int csr0 = 0x00200000 | 0x4000;
100 #else
101 static int csr0;
102 #endif
103 
104 /* Operational parameters that usually are not changed. */
105 /* Time in jiffies before concluding the transmitter is hung. */
106 #define TX_TIMEOUT  (4*HZ)
107 
108 
109 MODULE_AUTHOR("The Linux Kernel Team");
110 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
111 MODULE_LICENSE("GPL");
112 MODULE_VERSION(DRV_VERSION);
113 module_param(tulip_debug, int, 0);
114 module_param(max_interrupt_work, int, 0);
115 module_param(rx_copybreak, int, 0);
116 module_param(csr0, int, 0);
117 module_param_array(options, int, NULL, 0);
118 module_param_array(full_duplex, int, NULL, 0);
119 
120 #ifdef TULIP_DEBUG
121 int tulip_debug = TULIP_DEBUG;
122 #else
123 int tulip_debug = 1;
124 #endif
125 
126 static void tulip_timer(struct timer_list *t)
127 {
128 	struct tulip_private *tp = from_timer(tp, t, timer);
129 	struct net_device *dev = tp->dev;
130 
131 	if (netif_running(dev))
132 		schedule_work(&tp->media_work);
133 }
134 
135 /*
136  * This table use during operation for capabilities and media timer.
137  *
138  * It is indexed via the values in 'enum chips'
139  */
140 
141 const struct tulip_chip_table tulip_tbl[] = {
142   { }, /* placeholder for array, slot unused currently */
143   { }, /* placeholder for array, slot unused currently */
144 
145   /* DC21140 */
146   { "Digital DS21140 Tulip", 128, 0x0001ebef,
147 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
148 	tulip_media_task },
149 
150   /* DC21142, DC21143 */
151   { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
152 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
153 	| HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
154 
155   /* LC82C168 */
156   { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
157 	HAS_MII | HAS_PNICNWAY, pnic_timer, },
158 
159   /* MX98713 */
160   { "Macronix 98713 PMAC", 128, 0x0001ebef,
161 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
162 
163   /* MX98715 */
164   { "Macronix 98715 PMAC", 256, 0x0001ebef,
165 	HAS_MEDIA_TABLE, mxic_timer, },
166 
167   /* MX98725 */
168   { "Macronix 98725 PMAC", 256, 0x0001ebef,
169 	HAS_MEDIA_TABLE, mxic_timer, },
170 
171   /* AX88140 */
172   { "ASIX AX88140", 128, 0x0001fbff,
173 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
174 	| IS_ASIX, tulip_timer, tulip_media_task },
175 
176   /* PNIC2 */
177   { "Lite-On PNIC-II", 256, 0x0801fbff,
178 	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
179 
180   /* COMET */
181   { "ADMtek Comet", 256, 0x0001abef,
182 	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
183 
184   /* COMPEX9881 */
185   { "Compex 9881 PMAC", 128, 0x0001ebef,
186 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
187 
188   /* I21145 */
189   { "Intel DS21145 Tulip", 128, 0x0801fbff,
190 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
191 	| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
192 
193   /* DM910X */
194 #ifdef CONFIG_TULIP_DM910X
195   { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
196 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
197 	tulip_timer, tulip_media_task },
198 #else
199   { NULL },
200 #endif
201 
202   /* RS7112 */
203   { "Conexant LANfinity", 256, 0x0001ebef,
204 	HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
205 
206 };
207 
208 
209 static const struct pci_device_id tulip_pci_tbl[] = {
210 	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
211 	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
212 	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
213 	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
214 	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
215 /*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
216 	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
217 	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
218 	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
219 	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 	{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
229 	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
230 #ifdef CONFIG_TULIP_DM910X
231 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
232 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 #endif
234 	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
235 	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
236 	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
237 	{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 	{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 	{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 	{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
241 	{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
246 	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
247 	{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
248 	{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
249 	{ } /* terminate list */
250 };
251 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
252 
253 
254 /* A full-duplex map for media types. */
255 const char tulip_media_cap[32] =
256 {0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
257 
258 static void tulip_tx_timeout(struct net_device *dev);
259 static void tulip_init_ring(struct net_device *dev);
260 static void tulip_free_ring(struct net_device *dev);
261 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
262 					  struct net_device *dev);
263 static int tulip_open(struct net_device *dev);
264 static int tulip_close(struct net_device *dev);
265 static void tulip_up(struct net_device *dev);
266 static void tulip_down(struct net_device *dev);
267 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
268 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
269 static void set_rx_mode(struct net_device *dev);
270 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
271 #ifdef CONFIG_NET_POLL_CONTROLLER
272 static void poll_tulip(struct net_device *dev);
273 #endif
274 
275 static void tulip_set_power_state (struct tulip_private *tp,
276 				   int sleep, int snooze)
277 {
278 	if (tp->flags & HAS_ACPI) {
279 		u32 tmp, newtmp;
280 		pci_read_config_dword (tp->pdev, CFDD, &tmp);
281 		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
282 		if (sleep)
283 			newtmp |= CFDD_Sleep;
284 		else if (snooze)
285 			newtmp |= CFDD_Snooze;
286 		if (tmp != newtmp)
287 			pci_write_config_dword (tp->pdev, CFDD, newtmp);
288 	}
289 
290 }
291 
292 
293 static void tulip_up(struct net_device *dev)
294 {
295 	struct tulip_private *tp = netdev_priv(dev);
296 	void __iomem *ioaddr = tp->base_addr;
297 	int next_tick = 3*HZ;
298 	u32 reg;
299 	int i;
300 
301 #ifdef CONFIG_TULIP_NAPI
302 	napi_enable(&tp->napi);
303 #endif
304 
305 	/* Wake the chip from sleep/snooze mode. */
306 	tulip_set_power_state (tp, 0, 0);
307 
308 	/* Disable all WOL events */
309 	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
310 	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
311 	tulip_set_wolopts(tp->pdev, 0);
312 
313 	/* On some chip revs we must set the MII/SYM port before the reset!? */
314 	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
315 		iowrite32(0x00040000, ioaddr + CSR6);
316 
317 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
318 	iowrite32(0x00000001, ioaddr + CSR0);
319 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
320 	udelay(100);
321 
322 	/* Deassert reset.
323 	   Wait the specified 50 PCI cycles after a reset by initializing
324 	   Tx and Rx queues and the address filter list. */
325 	iowrite32(tp->csr0, ioaddr + CSR0);
326 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
327 	udelay(100);
328 
329 	if (tulip_debug > 1)
330 		netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
331 
332 	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
333 	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
334 	tp->cur_rx = tp->cur_tx = 0;
335 	tp->dirty_rx = tp->dirty_tx = 0;
336 
337 	if (tp->flags & MC_HASH_ONLY) {
338 		u32 addr_low = get_unaligned_le32(dev->dev_addr);
339 		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
340 		if (tp->chip_id == AX88140) {
341 			iowrite32(0, ioaddr + CSR13);
342 			iowrite32(addr_low,  ioaddr + CSR14);
343 			iowrite32(1, ioaddr + CSR13);
344 			iowrite32(addr_high, ioaddr + CSR14);
345 		} else if (tp->flags & COMET_MAC_ADDR) {
346 			iowrite32(addr_low,  ioaddr + 0xA4);
347 			iowrite32(addr_high, ioaddr + 0xA8);
348 			iowrite32(0, ioaddr + CSR27);
349 			iowrite32(0, ioaddr + CSR28);
350 		}
351 	} else {
352 		/* This is set_rx_mode(), but without starting the transmitter. */
353 		u16 *eaddrs = (u16 *)dev->dev_addr;
354 		u16 *setup_frm = &tp->setup_frame[15*6];
355 		dma_addr_t mapping;
356 
357 		/* 21140 bug: you must add the broadcast address. */
358 		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
359 		/* Fill the final entry of the table with our physical address. */
360 		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
361 		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
362 		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
363 
364 		mapping = pci_map_single(tp->pdev, tp->setup_frame,
365 					 sizeof(tp->setup_frame),
366 					 PCI_DMA_TODEVICE);
367 		tp->tx_buffers[tp->cur_tx].skb = NULL;
368 		tp->tx_buffers[tp->cur_tx].mapping = mapping;
369 
370 		/* Put the setup frame on the Tx list. */
371 		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
372 		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
373 		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
374 
375 		tp->cur_tx++;
376 	}
377 
378 	tp->saved_if_port = dev->if_port;
379 	if (dev->if_port == 0)
380 		dev->if_port = tp->default_port;
381 
382 	/* Allow selecting a default media. */
383 	i = 0;
384 	if (tp->mtable == NULL)
385 		goto media_picked;
386 	if (dev->if_port) {
387 		int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
388 			(dev->if_port == 12 ? 0 : dev->if_port);
389 		for (i = 0; i < tp->mtable->leafcount; i++)
390 			if (tp->mtable->mleaf[i].media == looking_for) {
391 				dev_info(&dev->dev,
392 					 "Using user-specified media %s\n",
393 					 medianame[dev->if_port]);
394 				goto media_picked;
395 			}
396 	}
397 	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
398 		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
399 		for (i = 0; i < tp->mtable->leafcount; i++)
400 			if (tp->mtable->mleaf[i].media == looking_for) {
401 				dev_info(&dev->dev,
402 					 "Using EEPROM-set media %s\n",
403 					 medianame[looking_for]);
404 				goto media_picked;
405 			}
406 	}
407 	/* Start sensing first non-full-duplex media. */
408 	for (i = tp->mtable->leafcount - 1;
409 		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
410 		;
411 media_picked:
412 
413 	tp->csr6 = 0;
414 	tp->cur_index = i;
415 	tp->nwayset = 0;
416 
417 	if (dev->if_port) {
418 		if (tp->chip_id == DC21143  &&
419 		    (tulip_media_cap[dev->if_port] & MediaIsMII)) {
420 			/* We must reset the media CSRs when we force-select MII mode. */
421 			iowrite32(0x0000, ioaddr + CSR13);
422 			iowrite32(0x0000, ioaddr + CSR14);
423 			iowrite32(0x0008, ioaddr + CSR15);
424 		}
425 		tulip_select_media(dev, 1);
426 	} else if (tp->chip_id == DC21142) {
427 		if (tp->mii_cnt) {
428 			tulip_select_media(dev, 1);
429 			if (tulip_debug > 1)
430 				dev_info(&dev->dev,
431 					 "Using MII transceiver %d, status %04x\n",
432 					 tp->phys[0],
433 					 tulip_mdio_read(dev, tp->phys[0], 1));
434 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
435 			tp->csr6 = csr6_mask_hdcap;
436 			dev->if_port = 11;
437 			iowrite32(0x0000, ioaddr + CSR13);
438 			iowrite32(0x0000, ioaddr + CSR14);
439 		} else
440 			t21142_start_nway(dev);
441 	} else if (tp->chip_id == PNIC2) {
442 	        /* for initial startup advertise 10/100 Full and Half */
443 	        tp->sym_advertise = 0x01E0;
444                 /* enable autonegotiate end interrupt */
445 	        iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
446 	        iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
447 		pnic2_start_nway(dev);
448 	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
449 		if (tp->mii_cnt) {
450 			dev->if_port = 11;
451 			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
452 			iowrite32(0x0001, ioaddr + CSR15);
453 		} else if (ioread32(ioaddr + CSR5) & TPLnkPass)
454 			pnic_do_nway(dev);
455 		else {
456 			/* Start with 10mbps to do autonegotiation. */
457 			iowrite32(0x32, ioaddr + CSR12);
458 			tp->csr6 = 0x00420000;
459 			iowrite32(0x0001B078, ioaddr + 0xB8);
460 			iowrite32(0x0201B078, ioaddr + 0xB8);
461 			next_tick = 1*HZ;
462 		}
463 	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
464 		   ! tp->medialock) {
465 		dev->if_port = 0;
466 		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
467 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
468 	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
469 		/* Provided by BOLO, Macronix - 12/10/1998. */
470 		dev->if_port = 0;
471 		tp->csr6 = 0x01a80200;
472 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
473 		iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
474 	} else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
475 		/* Enable automatic Tx underrun recovery. */
476 		iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
477 		dev->if_port = tp->mii_cnt ? 11 : 0;
478 		tp->csr6 = 0x00040000;
479 	} else if (tp->chip_id == AX88140) {
480 		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
481 	} else
482 		tulip_select_media(dev, 1);
483 
484 	/* Start the chip's Tx to process setup frame. */
485 	tulip_stop_rxtx(tp);
486 	barrier();
487 	udelay(5);
488 	iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
489 
490 	/* Enable interrupts by setting the interrupt mask. */
491 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
492 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
493 	tulip_start_rxtx(tp);
494 	iowrite32(0, ioaddr + CSR2);		/* Rx poll demand */
495 
496 	if (tulip_debug > 2) {
497 		netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
498 			   ioread32(ioaddr + CSR0),
499 			   ioread32(ioaddr + CSR5),
500 			   ioread32(ioaddr + CSR6));
501 	}
502 
503 	/* Set the timer to switch to check for link beat and perhaps switch
504 	   to an alternate media type. */
505 	tp->timer.expires = RUN_AT(next_tick);
506 	add_timer(&tp->timer);
507 #ifdef CONFIG_TULIP_NAPI
508 	timer_setup(&tp->oom_timer, oom_timer, 0);
509 #endif
510 }
511 
512 static int
513 tulip_open(struct net_device *dev)
514 {
515 	struct tulip_private *tp = netdev_priv(dev);
516 	int retval;
517 
518 	tulip_init_ring (dev);
519 
520 	retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
521 			     dev->name, dev);
522 	if (retval)
523 		goto free_ring;
524 
525 	tulip_up (dev);
526 
527 	netif_start_queue (dev);
528 
529 	return 0;
530 
531 free_ring:
532 	tulip_free_ring (dev);
533 	return retval;
534 }
535 
536 
537 static void tulip_tx_timeout(struct net_device *dev)
538 {
539 	struct tulip_private *tp = netdev_priv(dev);
540 	void __iomem *ioaddr = tp->base_addr;
541 	unsigned long flags;
542 
543 	spin_lock_irqsave (&tp->lock, flags);
544 
545 	if (tulip_media_cap[dev->if_port] & MediaIsMII) {
546 		/* Do nothing -- the media monitor should handle this. */
547 		if (tulip_debug > 1)
548 			dev_warn(&dev->dev,
549 				 "Transmit timeout using MII device\n");
550 	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
551 		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
552 		   tp->chip_id == DM910X) {
553 		dev_warn(&dev->dev,
554 			 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
555 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
556 			 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
557 			 ioread32(ioaddr + CSR15));
558 		tp->timeout_recovery = 1;
559 		schedule_work(&tp->media_work);
560 		goto out_unlock;
561 	} else if (tp->chip_id == PNIC2) {
562 		dev_warn(&dev->dev,
563 			 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
564 			 (int)ioread32(ioaddr + CSR5),
565 			 (int)ioread32(ioaddr + CSR6),
566 			 (int)ioread32(ioaddr + CSR7),
567 			 (int)ioread32(ioaddr + CSR12));
568 	} else {
569 		dev_warn(&dev->dev,
570 			 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
571 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
572 		dev->if_port = 0;
573 	}
574 
575 #if defined(way_too_many_messages)
576 	if (tulip_debug > 3) {
577 		int i;
578 		for (i = 0; i < RX_RING_SIZE; i++) {
579 			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
580 			int j;
581 			printk(KERN_DEBUG
582 			       "%2d: %08x %08x %08x %08x  %02x %02x %02x\n",
583 			       i,
584 			       (unsigned int)tp->rx_ring[i].status,
585 			       (unsigned int)tp->rx_ring[i].length,
586 			       (unsigned int)tp->rx_ring[i].buffer1,
587 			       (unsigned int)tp->rx_ring[i].buffer2,
588 			       buf[0], buf[1], buf[2]);
589 			for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
590 				if (j < 100)
591 					pr_cont(" %02x", buf[j]);
592 			pr_cont(" j=%d\n", j);
593 		}
594 		printk(KERN_DEBUG "  Rx ring %p: ", tp->rx_ring);
595 		for (i = 0; i < RX_RING_SIZE; i++)
596 			pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
597 		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
598 		for (i = 0; i < TX_RING_SIZE; i++)
599 			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
600 		pr_cont("\n");
601 	}
602 #endif
603 
604 	tulip_tx_timeout_complete(tp, ioaddr);
605 
606 out_unlock:
607 	spin_unlock_irqrestore (&tp->lock, flags);
608 	netif_trans_update(dev); /* prevent tx timeout */
609 	netif_wake_queue (dev);
610 }
611 
612 
613 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
614 static void tulip_init_ring(struct net_device *dev)
615 {
616 	struct tulip_private *tp = netdev_priv(dev);
617 	int i;
618 
619 	tp->susp_rx = 0;
620 	tp->ttimer = 0;
621 	tp->nir = 0;
622 
623 	for (i = 0; i < RX_RING_SIZE; i++) {
624 		tp->rx_ring[i].status = 0x00000000;
625 		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
626 		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
627 		tp->rx_buffers[i].skb = NULL;
628 		tp->rx_buffers[i].mapping = 0;
629 	}
630 	/* Mark the last entry as wrapping the ring. */
631 	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
632 	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
633 
634 	for (i = 0; i < RX_RING_SIZE; i++) {
635 		dma_addr_t mapping;
636 
637 		/* Note the receive buffer must be longword aligned.
638 		   netdev_alloc_skb() provides 16 byte alignment.  But do *not*
639 		   use skb_reserve() to align the IP header! */
640 		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
641 		tp->rx_buffers[i].skb = skb;
642 		if (skb == NULL)
643 			break;
644 		mapping = pci_map_single(tp->pdev, skb->data,
645 					 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
646 		tp->rx_buffers[i].mapping = mapping;
647 		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
648 		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
649 	}
650 	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
651 
652 	/* The Tx buffer descriptor is filled in as needed, but we
653 	   do need to clear the ownership bit. */
654 	for (i = 0; i < TX_RING_SIZE; i++) {
655 		tp->tx_buffers[i].skb = NULL;
656 		tp->tx_buffers[i].mapping = 0;
657 		tp->tx_ring[i].status = 0x00000000;
658 		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
659 	}
660 	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
661 }
662 
663 static netdev_tx_t
664 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
665 {
666 	struct tulip_private *tp = netdev_priv(dev);
667 	int entry;
668 	u32 flag;
669 	dma_addr_t mapping;
670 	unsigned long flags;
671 
672 	spin_lock_irqsave(&tp->lock, flags);
673 
674 	/* Calculate the next Tx descriptor entry. */
675 	entry = tp->cur_tx % TX_RING_SIZE;
676 
677 	tp->tx_buffers[entry].skb = skb;
678 	mapping = pci_map_single(tp->pdev, skb->data,
679 				 skb->len, PCI_DMA_TODEVICE);
680 	tp->tx_buffers[entry].mapping = mapping;
681 	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
682 
683 	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
684 		flag = 0x60000000; /* No interrupt */
685 	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
686 		flag = 0xe0000000; /* Tx-done intr. */
687 	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
688 		flag = 0x60000000; /* No Tx-done intr. */
689 	} else {		/* Leave room for set_rx_mode() to fill entries. */
690 		flag = 0xe0000000; /* Tx-done intr. */
691 		netif_stop_queue(dev);
692 	}
693 	if (entry == TX_RING_SIZE-1)
694 		flag = 0xe0000000 | DESC_RING_WRAP;
695 
696 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
697 	/* if we were using Transmit Automatic Polling, we would need a
698 	 * wmb() here. */
699 	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
700 	wmb();
701 
702 	tp->cur_tx++;
703 
704 	/* Trigger an immediate transmit demand. */
705 	iowrite32(0, tp->base_addr + CSR1);
706 
707 	spin_unlock_irqrestore(&tp->lock, flags);
708 
709 	return NETDEV_TX_OK;
710 }
711 
712 static void tulip_clean_tx_ring(struct tulip_private *tp)
713 {
714 	unsigned int dirty_tx;
715 
716 	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
717 		dirty_tx++) {
718 		int entry = dirty_tx % TX_RING_SIZE;
719 		int status = le32_to_cpu(tp->tx_ring[entry].status);
720 
721 		if (status < 0) {
722 			tp->dev->stats.tx_errors++;	/* It wasn't Txed */
723 			tp->tx_ring[entry].status = 0;
724 		}
725 
726 		/* Check for Tx filter setup frames. */
727 		if (tp->tx_buffers[entry].skb == NULL) {
728 			/* test because dummy frames not mapped */
729 			if (tp->tx_buffers[entry].mapping)
730 				pci_unmap_single(tp->pdev,
731 					tp->tx_buffers[entry].mapping,
732 					sizeof(tp->setup_frame),
733 					PCI_DMA_TODEVICE);
734 			continue;
735 		}
736 
737 		pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
738 				tp->tx_buffers[entry].skb->len,
739 				PCI_DMA_TODEVICE);
740 
741 		/* Free the original skb. */
742 		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
743 		tp->tx_buffers[entry].skb = NULL;
744 		tp->tx_buffers[entry].mapping = 0;
745 	}
746 }
747 
748 static void tulip_down (struct net_device *dev)
749 {
750 	struct tulip_private *tp = netdev_priv(dev);
751 	void __iomem *ioaddr = tp->base_addr;
752 	unsigned long flags;
753 
754 	cancel_work_sync(&tp->media_work);
755 
756 #ifdef CONFIG_TULIP_NAPI
757 	napi_disable(&tp->napi);
758 #endif
759 
760 	del_timer_sync (&tp->timer);
761 #ifdef CONFIG_TULIP_NAPI
762 	del_timer_sync (&tp->oom_timer);
763 #endif
764 	spin_lock_irqsave (&tp->lock, flags);
765 
766 	/* Disable interrupts by clearing the interrupt mask. */
767 	iowrite32 (0x00000000, ioaddr + CSR7);
768 
769 	/* Stop the Tx and Rx processes. */
770 	tulip_stop_rxtx(tp);
771 
772 	/* prepare receive buffers */
773 	tulip_refill_rx(dev);
774 
775 	/* release any unconsumed transmit buffers */
776 	tulip_clean_tx_ring(tp);
777 
778 	if (ioread32(ioaddr + CSR6) != 0xffffffff)
779 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
780 
781 	spin_unlock_irqrestore (&tp->lock, flags);
782 
783 	timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
784 
785 	dev->if_port = tp->saved_if_port;
786 
787 	/* Leave the driver in snooze, not sleep, mode. */
788 	tulip_set_power_state (tp, 0, 1);
789 }
790 
791 static void tulip_free_ring (struct net_device *dev)
792 {
793 	struct tulip_private *tp = netdev_priv(dev);
794 	int i;
795 
796 	/* Free all the skbuffs in the Rx queue. */
797 	for (i = 0; i < RX_RING_SIZE; i++) {
798 		struct sk_buff *skb = tp->rx_buffers[i].skb;
799 		dma_addr_t mapping = tp->rx_buffers[i].mapping;
800 
801 		tp->rx_buffers[i].skb = NULL;
802 		tp->rx_buffers[i].mapping = 0;
803 
804 		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
805 		tp->rx_ring[i].length = 0;
806 		/* An invalid address. */
807 		tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
808 		if (skb) {
809 			pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
810 					 PCI_DMA_FROMDEVICE);
811 			dev_kfree_skb (skb);
812 		}
813 	}
814 
815 	for (i = 0; i < TX_RING_SIZE; i++) {
816 		struct sk_buff *skb = tp->tx_buffers[i].skb;
817 
818 		if (skb != NULL) {
819 			pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
820 					 skb->len, PCI_DMA_TODEVICE);
821 			dev_kfree_skb (skb);
822 		}
823 		tp->tx_buffers[i].skb = NULL;
824 		tp->tx_buffers[i].mapping = 0;
825 	}
826 }
827 
828 static int tulip_close (struct net_device *dev)
829 {
830 	struct tulip_private *tp = netdev_priv(dev);
831 	void __iomem *ioaddr = tp->base_addr;
832 
833 	netif_stop_queue (dev);
834 
835 	tulip_down (dev);
836 
837 	if (tulip_debug > 1)
838 		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
839 			   ioread32 (ioaddr + CSR5));
840 
841 	free_irq (tp->pdev->irq, dev);
842 
843 	tulip_free_ring (dev);
844 
845 	return 0;
846 }
847 
848 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
849 {
850 	struct tulip_private *tp = netdev_priv(dev);
851 	void __iomem *ioaddr = tp->base_addr;
852 
853 	if (netif_running(dev)) {
854 		unsigned long flags;
855 
856 		spin_lock_irqsave (&tp->lock, flags);
857 
858 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
859 
860 		spin_unlock_irqrestore(&tp->lock, flags);
861 	}
862 
863 	return &dev->stats;
864 }
865 
866 
867 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
868 {
869 	struct tulip_private *np = netdev_priv(dev);
870 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
871 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
872 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
873 }
874 
875 
876 static int tulip_ethtool_set_wol(struct net_device *dev,
877 				 struct ethtool_wolinfo *wolinfo)
878 {
879 	struct tulip_private *tp = netdev_priv(dev);
880 
881 	if (wolinfo->wolopts & (~tp->wolinfo.supported))
882 		   return -EOPNOTSUPP;
883 
884 	tp->wolinfo.wolopts = wolinfo->wolopts;
885 	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
886 	return 0;
887 }
888 
889 static void tulip_ethtool_get_wol(struct net_device *dev,
890 				  struct ethtool_wolinfo *wolinfo)
891 {
892 	struct tulip_private *tp = netdev_priv(dev);
893 
894 	wolinfo->supported = tp->wolinfo.supported;
895 	wolinfo->wolopts = tp->wolinfo.wolopts;
896 	return;
897 }
898 
899 
900 static const struct ethtool_ops ops = {
901 	.get_drvinfo = tulip_get_drvinfo,
902 	.set_wol     = tulip_ethtool_set_wol,
903 	.get_wol     = tulip_ethtool_get_wol,
904 };
905 
906 /* Provide ioctl() calls to examine the MII xcvr state. */
907 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
908 {
909 	struct tulip_private *tp = netdev_priv(dev);
910 	void __iomem *ioaddr = tp->base_addr;
911 	struct mii_ioctl_data *data = if_mii(rq);
912 	const unsigned int phy_idx = 0;
913 	int phy = tp->phys[phy_idx] & 0x1f;
914 	unsigned int regnum = data->reg_num;
915 
916 	switch (cmd) {
917 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
918 		if (tp->mii_cnt)
919 			data->phy_id = phy;
920 		else if (tp->flags & HAS_NWAY)
921 			data->phy_id = 32;
922 		else if (tp->chip_id == COMET)
923 			data->phy_id = 1;
924 		else
925 			return -ENODEV;
926 
927 	case SIOCGMIIREG:		/* Read MII PHY register. */
928 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
929 			int csr12 = ioread32 (ioaddr + CSR12);
930 			int csr14 = ioread32 (ioaddr + CSR14);
931 			switch (regnum) {
932 			case 0:
933                                 if (((csr14<<5) & 0x1000) ||
934                                         (dev->if_port == 5 && tp->nwayset))
935                                         data->val_out = 0x1000;
936                                 else
937                                         data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
938                                                 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
939 				break;
940 			case 1:
941                                 data->val_out =
942 					0x1848 +
943 					((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
944 					((csr12&0x06) == 6 ? 0 : 4);
945                                 data->val_out |= 0x6048;
946 				break;
947 			case 4:
948                                 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
949                                 data->val_out =
950 					((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
951 					((csr14 >> 1) & 0x20) + 1;
952                                 data->val_out |= ((csr14 >> 9) & 0x03C0);
953 				break;
954 			case 5: data->val_out = tp->lpar; break;
955 			default: data->val_out = 0; break;
956 			}
957 		} else {
958 			data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
959 		}
960 		return 0;
961 
962 	case SIOCSMIIREG:		/* Write MII PHY register. */
963 		if (regnum & ~0x1f)
964 			return -EINVAL;
965 		if (data->phy_id == phy) {
966 			u16 value = data->val_in;
967 			switch (regnum) {
968 			case 0:	/* Check for autonegotiation on or reset. */
969 				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
970 				if (tp->full_duplex_lock)
971 					tp->full_duplex = (value & 0x0100) ? 1 : 0;
972 				break;
973 			case 4:
974 				tp->advertising[phy_idx] =
975 				tp->mii_advertise = data->val_in;
976 				break;
977 			}
978 		}
979 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
980 			u16 value = data->val_in;
981 			if (regnum == 0) {
982 			  if ((value & 0x1200) == 0x1200) {
983 			    if (tp->chip_id == PNIC2) {
984                                    pnic2_start_nway (dev);
985                             } else {
986 				   t21142_start_nway (dev);
987                             }
988 			  }
989 			} else if (regnum == 4)
990 				tp->sym_advertise = value;
991 		} else {
992 			tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
993 		}
994 		return 0;
995 	default:
996 		return -EOPNOTSUPP;
997 	}
998 
999 	return -EOPNOTSUPP;
1000 }
1001 
1002 
1003 /* Set or clear the multicast filter for this adaptor.
1004    Note that we only use exclusion around actually queueing the
1005    new frame, not around filling tp->setup_frame.  This is non-deterministic
1006    when re-entered but still correct. */
1007 
1008 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1009 {
1010 	struct tulip_private *tp = netdev_priv(dev);
1011 	u16 hash_table[32];
1012 	struct netdev_hw_addr *ha;
1013 	int i;
1014 	u16 *eaddrs;
1015 
1016 	memset(hash_table, 0, sizeof(hash_table));
1017 	__set_bit_le(255, hash_table);			/* Broadcast entry */
1018 	/* This should work on big-endian machines as well. */
1019 	netdev_for_each_mc_addr(ha, dev) {
1020 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1021 
1022 		__set_bit_le(index, hash_table);
1023 	}
1024 	for (i = 0; i < 32; i++) {
1025 		*setup_frm++ = hash_table[i];
1026 		*setup_frm++ = hash_table[i];
1027 	}
1028 	setup_frm = &tp->setup_frame[13*6];
1029 
1030 	/* Fill the final entry with our physical address. */
1031 	eaddrs = (u16 *)dev->dev_addr;
1032 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1033 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1034 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1035 }
1036 
1037 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1038 {
1039 	struct tulip_private *tp = netdev_priv(dev);
1040 	struct netdev_hw_addr *ha;
1041 	u16 *eaddrs;
1042 
1043 	/* We have <= 14 addresses so we can use the wonderful
1044 	   16 address perfect filtering of the Tulip. */
1045 	netdev_for_each_mc_addr(ha, dev) {
1046 		eaddrs = (u16 *) ha->addr;
1047 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1048 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1049 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1050 	}
1051 	/* Fill the unused entries with the broadcast address. */
1052 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1053 	setup_frm = &tp->setup_frame[15*6];
1054 
1055 	/* Fill the final entry with our physical address. */
1056 	eaddrs = (u16 *)dev->dev_addr;
1057 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1058 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1059 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1060 }
1061 
1062 
1063 static void set_rx_mode(struct net_device *dev)
1064 {
1065 	struct tulip_private *tp = netdev_priv(dev);
1066 	void __iomem *ioaddr = tp->base_addr;
1067 	int csr6;
1068 
1069 	csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1070 
1071 	tp->csr6 &= ~0x00D5;
1072 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1073 		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1074 		csr6 |= AcceptAllMulticast | AcceptAllPhys;
1075 	} else if ((netdev_mc_count(dev) > 1000) ||
1076 		   (dev->flags & IFF_ALLMULTI)) {
1077 		/* Too many to filter well -- accept all multicasts. */
1078 		tp->csr6 |= AcceptAllMulticast;
1079 		csr6 |= AcceptAllMulticast;
1080 	} else	if (tp->flags & MC_HASH_ONLY) {
1081 		/* Some work-alikes have only a 64-entry hash filter table. */
1082 		/* Should verify correctness on big-endian/__powerpc__ */
1083 		struct netdev_hw_addr *ha;
1084 		if (netdev_mc_count(dev) > 64) {
1085 			/* Arbitrary non-effective limit. */
1086 			tp->csr6 |= AcceptAllMulticast;
1087 			csr6 |= AcceptAllMulticast;
1088 		} else {
1089 			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
1090 			int filterbit;
1091 			netdev_for_each_mc_addr(ha, dev) {
1092 				if (tp->flags & COMET_MAC_ADDR)
1093 					filterbit = ether_crc_le(ETH_ALEN,
1094 								 ha->addr);
1095 				else
1096 					filterbit = ether_crc(ETH_ALEN,
1097 							      ha->addr) >> 26;
1098 				filterbit &= 0x3f;
1099 				mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1100 				if (tulip_debug > 2)
1101 					dev_info(&dev->dev,
1102 						 "Added filter for %pM  %08x bit %d\n",
1103 						 ha->addr,
1104 						 ether_crc(ETH_ALEN, ha->addr),
1105 						 filterbit);
1106 			}
1107 			if (mc_filter[0] == tp->mc_filter[0]  &&
1108 				mc_filter[1] == tp->mc_filter[1])
1109 				;				/* No change. */
1110 			else if (tp->flags & IS_ASIX) {
1111 				iowrite32(2, ioaddr + CSR13);
1112 				iowrite32(mc_filter[0], ioaddr + CSR14);
1113 				iowrite32(3, ioaddr + CSR13);
1114 				iowrite32(mc_filter[1], ioaddr + CSR14);
1115 			} else if (tp->flags & COMET_MAC_ADDR) {
1116 				iowrite32(mc_filter[0], ioaddr + CSR27);
1117 				iowrite32(mc_filter[1], ioaddr + CSR28);
1118 			}
1119 			tp->mc_filter[0] = mc_filter[0];
1120 			tp->mc_filter[1] = mc_filter[1];
1121 		}
1122 	} else {
1123 		unsigned long flags;
1124 		u32 tx_flags = 0x08000000 | 192;
1125 
1126 		/* Note that only the low-address shortword of setup_frame is valid!
1127 		   The values are doubled for big-endian architectures. */
1128 		if (netdev_mc_count(dev) > 14) {
1129 			/* Must use a multicast hash table. */
1130 			build_setup_frame_hash(tp->setup_frame, dev);
1131 			tx_flags = 0x08400000 | 192;
1132 		} else {
1133 			build_setup_frame_perfect(tp->setup_frame, dev);
1134 		}
1135 
1136 		spin_lock_irqsave(&tp->lock, flags);
1137 
1138 		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1139 			/* Same setup recently queued, we need not add it. */
1140 		} else {
1141 			unsigned int entry;
1142 			int dummy = -1;
1143 
1144 			/* Now add this frame to the Tx list. */
1145 
1146 			entry = tp->cur_tx++ % TX_RING_SIZE;
1147 
1148 			if (entry != 0) {
1149 				/* Avoid a chip errata by prefixing a dummy entry. */
1150 				tp->tx_buffers[entry].skb = NULL;
1151 				tp->tx_buffers[entry].mapping = 0;
1152 				tp->tx_ring[entry].length =
1153 					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1154 				tp->tx_ring[entry].buffer1 = 0;
1155 				/* Must set DescOwned later to avoid race with chip */
1156 				dummy = entry;
1157 				entry = tp->cur_tx++ % TX_RING_SIZE;
1158 
1159 			}
1160 
1161 			tp->tx_buffers[entry].skb = NULL;
1162 			tp->tx_buffers[entry].mapping =
1163 				pci_map_single(tp->pdev, tp->setup_frame,
1164 					       sizeof(tp->setup_frame),
1165 					       PCI_DMA_TODEVICE);
1166 			/* Put the setup frame on the Tx list. */
1167 			if (entry == TX_RING_SIZE-1)
1168 				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
1169 			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1170 			tp->tx_ring[entry].buffer1 =
1171 				cpu_to_le32(tp->tx_buffers[entry].mapping);
1172 			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1173 			if (dummy >= 0)
1174 				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1175 			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1176 				netif_stop_queue(dev);
1177 
1178 			/* Trigger an immediate transmit demand. */
1179 			iowrite32(0, ioaddr + CSR1);
1180 		}
1181 
1182 		spin_unlock_irqrestore(&tp->lock, flags);
1183 	}
1184 
1185 	iowrite32(csr6, ioaddr + CSR6);
1186 }
1187 
1188 #ifdef CONFIG_TULIP_MWI
1189 static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1190 {
1191 	struct tulip_private *tp = netdev_priv(dev);
1192 	u8 cache;
1193 	u16 pci_command;
1194 	u32 csr0;
1195 
1196 	if (tulip_debug > 3)
1197 		netdev_dbg(dev, "tulip_mwi_config()\n");
1198 
1199 	tp->csr0 = csr0 = 0;
1200 
1201 	/* if we have any cache line size at all, we can do MRM and MWI */
1202 	csr0 |= MRM | MWI;
1203 
1204 	/* Enable MWI in the standard PCI command bit.
1205 	 * Check for the case where MWI is desired but not available
1206 	 */
1207 	pci_try_set_mwi(pdev);
1208 
1209 	/* read result from hardware (in case bit refused to enable) */
1210 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1211 	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1212 		csr0 &= ~MWI;
1213 
1214 	/* if cache line size hardwired to zero, no MWI */
1215 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1216 	if ((csr0 & MWI) && (cache == 0)) {
1217 		csr0 &= ~MWI;
1218 		pci_clear_mwi(pdev);
1219 	}
1220 
1221 	/* assign per-cacheline-size cache alignment and
1222 	 * burst length values
1223 	 */
1224 	switch (cache) {
1225 	case 8:
1226 		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1227 		break;
1228 	case 16:
1229 		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1230 		break;
1231 	case 32:
1232 		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1233 		break;
1234 	default:
1235 		cache = 0;
1236 		break;
1237 	}
1238 
1239 	/* if we have a good cache line size, we by now have a good
1240 	 * csr0, so save it and exit
1241 	 */
1242 	if (cache)
1243 		goto out;
1244 
1245 	/* we don't have a good csr0 or cache line size, disable MWI */
1246 	if (csr0 & MWI) {
1247 		pci_clear_mwi(pdev);
1248 		csr0 &= ~MWI;
1249 	}
1250 
1251 	/* sane defaults for burst length and cache alignment
1252 	 * originally from de4x5 driver
1253 	 */
1254 	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1255 
1256 out:
1257 	tp->csr0 = csr0;
1258 	if (tulip_debug > 2)
1259 		netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1260 			   cache, csr0);
1261 }
1262 #endif
1263 
1264 /*
1265  *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
1266  *	is the DM910X and the on chip ULi devices
1267  */
1268 
1269 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1270 {
1271 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1272 		return 1;
1273 	return 0;
1274 }
1275 
1276 static const struct net_device_ops tulip_netdev_ops = {
1277 	.ndo_open		= tulip_open,
1278 	.ndo_start_xmit		= tulip_start_xmit,
1279 	.ndo_tx_timeout		= tulip_tx_timeout,
1280 	.ndo_stop		= tulip_close,
1281 	.ndo_get_stats		= tulip_get_stats,
1282 	.ndo_do_ioctl 		= private_ioctl,
1283 	.ndo_set_rx_mode	= set_rx_mode,
1284 	.ndo_set_mac_address	= eth_mac_addr,
1285 	.ndo_validate_addr	= eth_validate_addr,
1286 #ifdef CONFIG_NET_POLL_CONTROLLER
1287 	.ndo_poll_controller	 = poll_tulip,
1288 #endif
1289 };
1290 
1291 const struct pci_device_id early_486_chipsets[] = {
1292 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1293 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1294 	{ },
1295 };
1296 
1297 static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1298 {
1299 	struct tulip_private *tp;
1300 	/* See note below on the multiport cards. */
1301 	static unsigned char last_phys_addr[ETH_ALEN] = {
1302 		0x00, 'L', 'i', 'n', 'u', 'x'
1303 	};
1304 	static int last_irq;
1305 	int i, irq;
1306 	unsigned short sum;
1307 	unsigned char *ee_data;
1308 	struct net_device *dev;
1309 	void __iomem *ioaddr;
1310 	static int board_idx = -1;
1311 	int chip_idx = ent->driver_data;
1312 	const char *chip_name = tulip_tbl[chip_idx].chip_name;
1313 	unsigned int eeprom_missing = 0;
1314 	unsigned int force_csr0 = 0;
1315 
1316 #ifndef MODULE
1317 	if (tulip_debug > 0)
1318 		printk_once(KERN_INFO "%s", version);
1319 #endif
1320 
1321 	board_idx++;
1322 
1323 	/*
1324 	 *	Lan media wire a tulip chip to a wan interface. Needs a very
1325 	 *	different driver (lmc driver)
1326 	 */
1327 
1328         if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1329 		pr_err("skipping LMC card\n");
1330 		return -ENODEV;
1331 	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1332 		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1333 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1334 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1335 		pr_err("skipping SBE T3E3 port\n");
1336 		return -ENODEV;
1337 	}
1338 
1339 	/*
1340 	 *	DM910x chips should be handled by the dmfe driver, except
1341 	 *	on-board chips on SPARC systems.  Also, early DM9100s need
1342 	 *	software CRC which only the dmfe driver supports.
1343 	 */
1344 
1345 #ifdef CONFIG_TULIP_DM910X
1346 	if (chip_idx == DM910X) {
1347 		struct device_node *dp;
1348 
1349 		if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1350 		    pdev->revision < 0x30) {
1351 			pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1352 			return -ENODEV;
1353 		}
1354 
1355 		dp = pci_device_to_OF_node(pdev);
1356 		if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1357 			pr_info("skipping DM910x expansion card (use dmfe)\n");
1358 			return -ENODEV;
1359 		}
1360 	}
1361 #endif
1362 
1363 	/*
1364 	 *	Looks for early PCI chipsets where people report hangs
1365 	 *	without the workarounds being on.
1366 	 */
1367 
1368 	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1369 	      aligned.  Aries might need this too. The Saturn errata are not
1370 	      pretty reading but thankfully it's an old 486 chipset.
1371 
1372 	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
1373 	      Saturn.
1374 	*/
1375 
1376 	if (pci_dev_present(early_486_chipsets)) {
1377 		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1378 		force_csr0 = 1;
1379 	}
1380 
1381 	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
1382 	if (chip_idx == AX88140) {
1383 		if ((csr0 & 0x3f00) == 0)
1384 			csr0 |= 0x2000;
1385 	}
1386 
1387 	/* PNIC doesn't have MWI/MRL/MRM... */
1388 	if (chip_idx == LC82C168)
1389 		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1390 
1391 	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1392 	if (tulip_uli_dm_quirk(pdev)) {
1393 		csr0 &= ~0x01f100ff;
1394 #if defined(CONFIG_SPARC)
1395                 csr0 = (csr0 & ~0xff00) | 0xe000;
1396 #endif
1397 	}
1398 	/*
1399 	 *	And back to business
1400 	 */
1401 
1402 	i = pci_enable_device(pdev);
1403 	if (i) {
1404 		pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1405 		return i;
1406 	}
1407 
1408 	irq = pdev->irq;
1409 
1410 	/* alloc_etherdev ensures aligned and zeroed private structures */
1411 	dev = alloc_etherdev (sizeof (*tp));
1412 	if (!dev)
1413 		return -ENOMEM;
1414 
1415 	SET_NETDEV_DEV(dev, &pdev->dev);
1416 	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1417 		pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1418 		       pci_name(pdev),
1419 		       (unsigned long long)pci_resource_len (pdev, 0),
1420 		       (unsigned long long)pci_resource_start (pdev, 0));
1421 		goto err_out_free_netdev;
1422 	}
1423 
1424 	/* grab all resources from both PIO and MMIO regions, as we
1425 	 * don't want anyone else messing around with our hardware */
1426 	if (pci_request_regions (pdev, DRV_NAME))
1427 		goto err_out_free_netdev;
1428 
1429 	ioaddr =  pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1430 
1431 	if (!ioaddr)
1432 		goto err_out_free_res;
1433 
1434 	/*
1435 	 * initialize private data structure 'tp'
1436 	 * it is zeroed and aligned in alloc_etherdev
1437 	 */
1438 	tp = netdev_priv(dev);
1439 	tp->dev = dev;
1440 
1441 	tp->rx_ring = pci_alloc_consistent(pdev,
1442 					   sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1443 					   sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1444 					   &tp->rx_ring_dma);
1445 	if (!tp->rx_ring)
1446 		goto err_out_mtable;
1447 	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1448 	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1449 
1450 	tp->chip_id = chip_idx;
1451 	tp->flags = tulip_tbl[chip_idx].flags;
1452 
1453 	tp->wolinfo.supported = 0;
1454 	tp->wolinfo.wolopts = 0;
1455 	/* COMET: Enable power management only for AN983B */
1456 	if (chip_idx == COMET ) {
1457 		u32 sig;
1458 		pci_read_config_dword (pdev, 0x80, &sig);
1459 		if (sig == 0x09811317) {
1460 			tp->flags |= COMET_PM;
1461 			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1462 			pr_info("%s: Enabled WOL support for AN983B\n",
1463 				__func__);
1464 		}
1465 	}
1466 	tp->pdev = pdev;
1467 	tp->base_addr = ioaddr;
1468 	tp->revision = pdev->revision;
1469 	tp->csr0 = csr0;
1470 	spin_lock_init(&tp->lock);
1471 	spin_lock_init(&tp->mii_lock);
1472 	timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
1473 
1474 	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1475 
1476 #ifdef CONFIG_TULIP_MWI
1477 	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1478 		tulip_mwi_config (pdev, dev);
1479 #endif
1480 
1481 	/* Stop the chip's Tx and Rx processes. */
1482 	tulip_stop_rxtx(tp);
1483 
1484 	pci_set_master(pdev);
1485 
1486 #ifdef CONFIG_GSC
1487 	if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1488 		switch (pdev->subsystem_device) {
1489 		default:
1490 			break;
1491 		case 0x1061:
1492 		case 0x1062:
1493 		case 0x1063:
1494 		case 0x1098:
1495 		case 0x1099:
1496 		case 0x10EE:
1497 			tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1498 			chip_name = "GSC DS21140 Tulip";
1499 		}
1500 	}
1501 #endif
1502 
1503 	/* Clear the missed-packet counter. */
1504 	ioread32(ioaddr + CSR8);
1505 
1506 	/* The station address ROM is read byte serially.  The register must
1507 	   be polled, waiting for the value to be read bit serially from the
1508 	   EEPROM.
1509 	   */
1510 	ee_data = tp->eeprom;
1511 	memset(ee_data, 0, sizeof(tp->eeprom));
1512 	sum = 0;
1513 	if (chip_idx == LC82C168) {
1514 		for (i = 0; i < 3; i++) {
1515 			int value, boguscnt = 100000;
1516 			iowrite32(0x600 | i, ioaddr + 0x98);
1517 			do {
1518 				value = ioread32(ioaddr + CSR9);
1519 			} while (value < 0  && --boguscnt > 0);
1520 			put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1521 			sum += value & 0xffff;
1522 		}
1523 	} else if (chip_idx == COMET) {
1524 		/* No need to read the EEPROM. */
1525 		put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1526 		put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1527 		for (i = 0; i < 6; i ++)
1528 			sum += dev->dev_addr[i];
1529 	} else {
1530 		/* A serial EEPROM interface, we read now and sort it out later. */
1531 		int sa_offset = 0;
1532 		int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1533 		int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1534 
1535 		if (ee_max_addr > sizeof(tp->eeprom))
1536 			ee_max_addr = sizeof(tp->eeprom);
1537 
1538 		for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1539 			u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1540 			ee_data[i] = data & 0xff;
1541 			ee_data[i + 1] = data >> 8;
1542 		}
1543 
1544 		/* DEC now has a specification (see Notes) but early board makers
1545 		   just put the address in the first EEPROM locations. */
1546 		/* This does  memcmp(ee_data, ee_data+16, 8) */
1547 		for (i = 0; i < 8; i ++)
1548 			if (ee_data[i] != ee_data[16+i])
1549 				sa_offset = 20;
1550 		if (chip_idx == CONEXANT) {
1551 			/* Check that the tuple type and length is correct. */
1552 			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
1553 				sa_offset = 0x19A;
1554 		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
1555 				   ee_data[2] == 0) {
1556 			sa_offset = 2;		/* Grrr, damn Matrox boards. */
1557 		}
1558 #ifdef CONFIG_MIPS_COBALT
1559                if ((pdev->bus->number == 0) &&
1560                    ((PCI_SLOT(pdev->devfn) == 7) ||
1561                     (PCI_SLOT(pdev->devfn) == 12))) {
1562                        /* Cobalt MAC address in first EEPROM locations. */
1563                        sa_offset = 0;
1564 		       /* Ensure our media table fixup get's applied */
1565 		       memcpy(ee_data + 16, ee_data, 8);
1566                }
1567 #endif
1568 #ifdef CONFIG_GSC
1569 		/* Check to see if we have a broken srom */
1570 		if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1571 			/* pci_vendor_id and subsystem_id are swapped */
1572 			ee_data[0] = ee_data[2];
1573 			ee_data[1] = ee_data[3];
1574 			ee_data[2] = 0x61;
1575 			ee_data[3] = 0x10;
1576 
1577 			/* HSC-PCI boards need to be byte-swaped and shifted
1578 			 * up 1 word.  This shift needs to happen at the end
1579 			 * of the MAC first because of the 2 byte overlap.
1580 			 */
1581 			for (i = 4; i >= 0; i -= 2) {
1582 				ee_data[17 + i + 3] = ee_data[17 + i];
1583 				ee_data[16 + i + 5] = ee_data[16 + i];
1584 			}
1585 		}
1586 #endif
1587 
1588 		for (i = 0; i < 6; i ++) {
1589 			dev->dev_addr[i] = ee_data[i + sa_offset];
1590 			sum += ee_data[i + sa_offset];
1591 		}
1592 	}
1593 	/* Lite-On boards have the address byte-swapped. */
1594 	if ((dev->dev_addr[0] == 0xA0 ||
1595 	     dev->dev_addr[0] == 0xC0 ||
1596 	     dev->dev_addr[0] == 0x02) &&
1597 	    dev->dev_addr[1] == 0x00)
1598 		for (i = 0; i < 6; i+=2) {
1599 			char tmp = dev->dev_addr[i];
1600 			dev->dev_addr[i] = dev->dev_addr[i+1];
1601 			dev->dev_addr[i+1] = tmp;
1602 		}
1603 	/* On the Zynx 315 Etherarray and other multiport boards only the
1604 	   first Tulip has an EEPROM.
1605 	   On Sparc systems the mac address is held in the OBP property
1606 	   "local-mac-address".
1607 	   The addresses of the subsequent ports are derived from the first.
1608 	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1609 	   that here as well. */
1610 	if (sum == 0  || sum == 6*0xff) {
1611 #if defined(CONFIG_SPARC)
1612 		struct device_node *dp = pci_device_to_OF_node(pdev);
1613 		const unsigned char *addr;
1614 		int len;
1615 #endif
1616 		eeprom_missing = 1;
1617 		for (i = 0; i < 5; i++)
1618 			dev->dev_addr[i] = last_phys_addr[i];
1619 		dev->dev_addr[i] = last_phys_addr[i] + 1;
1620 #if defined(CONFIG_SPARC)
1621 		addr = of_get_property(dp, "local-mac-address", &len);
1622 		if (addr && len == ETH_ALEN)
1623 			memcpy(dev->dev_addr, addr, ETH_ALEN);
1624 #endif
1625 #if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1626 		if (last_irq)
1627 			irq = last_irq;
1628 #endif
1629 	}
1630 
1631 	for (i = 0; i < 6; i++)
1632 		last_phys_addr[i] = dev->dev_addr[i];
1633 	last_irq = irq;
1634 
1635 	/* The lower four bits are the media type. */
1636 	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
1637 		if (options[board_idx] & MEDIA_MASK)
1638 			tp->default_port = options[board_idx] & MEDIA_MASK;
1639 		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1640 			tp->full_duplex = 1;
1641 		if (mtu[board_idx] > 0)
1642 			dev->mtu = mtu[board_idx];
1643 	}
1644 	if (dev->mem_start & MEDIA_MASK)
1645 		tp->default_port = dev->mem_start & MEDIA_MASK;
1646 	if (tp->default_port) {
1647 		pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1648 			board_idx, medianame[tp->default_port & MEDIA_MASK]);
1649 		tp->medialock = 1;
1650 		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1651 			tp->full_duplex = 1;
1652 	}
1653 	if (tp->full_duplex)
1654 		tp->full_duplex_lock = 1;
1655 
1656 	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1657 		static const u16 media2advert[] = {
1658 			0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1659 		};
1660 		tp->mii_advertise = media2advert[tp->default_port - 9];
1661 		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1662 	}
1663 
1664 	if (tp->flags & HAS_MEDIA_TABLE) {
1665 		sprintf(dev->name, DRV_NAME "%d", board_idx);	/* hack */
1666 		tulip_parse_eeprom(dev);
1667 		strcpy(dev->name, "eth%d");			/* un-hack */
1668 	}
1669 
1670 	if ((tp->flags & ALWAYS_CHECK_MII) ||
1671 		(tp->mtable  &&  tp->mtable->has_mii) ||
1672 		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
1673 		if (tp->mtable  &&  tp->mtable->has_mii) {
1674 			for (i = 0; i < tp->mtable->leafcount; i++)
1675 				if (tp->mtable->mleaf[i].media == 11) {
1676 					tp->cur_index = i;
1677 					tp->saved_if_port = dev->if_port;
1678 					tulip_select_media(dev, 2);
1679 					dev->if_port = tp->saved_if_port;
1680 					break;
1681 				}
1682 		}
1683 
1684 		/* Find the connected MII xcvrs.
1685 		   Doing this in open() would allow detecting external xcvrs
1686 		   later, but takes much time. */
1687 		tulip_find_mii (dev, board_idx);
1688 	}
1689 
1690 	/* The Tulip-specific entries in the device structure. */
1691 	dev->netdev_ops = &tulip_netdev_ops;
1692 	dev->watchdog_timeo = TX_TIMEOUT;
1693 #ifdef CONFIG_TULIP_NAPI
1694 	netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1695 #endif
1696 	dev->ethtool_ops = &ops;
1697 
1698 	if (register_netdev(dev))
1699 		goto err_out_free_ring;
1700 
1701 	pci_set_drvdata(pdev, dev);
1702 
1703 	dev_info(&dev->dev,
1704 #ifdef CONFIG_TULIP_MMIO
1705 		 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1706 #else
1707 		 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1708 #endif
1709 		 chip_name, pdev->revision,
1710 		 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1711 		 eeprom_missing ? " EEPROM not present," : "",
1712 		 dev->dev_addr, irq);
1713 
1714         if (tp->chip_id == PNIC2)
1715 		tp->link_change = pnic2_lnk_change;
1716 	else if (tp->flags & HAS_NWAY)
1717 		tp->link_change = t21142_lnk_change;
1718 	else if (tp->flags & HAS_PNICNWAY)
1719 		tp->link_change = pnic_lnk_change;
1720 
1721 	/* Reset the xcvr interface and turn on heartbeat. */
1722 	switch (chip_idx) {
1723 	case DC21140:
1724 	case DM910X:
1725 	default:
1726 		if (tp->mtable)
1727 			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1728 		break;
1729 	case DC21142:
1730 		if (tp->mii_cnt  ||  tulip_media_cap[dev->if_port] & MediaIsMII) {
1731 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1732 			iowrite32(0x0000, ioaddr + CSR13);
1733 			iowrite32(0x0000, ioaddr + CSR14);
1734 			iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1735 		} else
1736 			t21142_start_nway(dev);
1737 		break;
1738 	case PNIC2:
1739 	        /* just do a reset for sanity sake */
1740 		iowrite32(0x0000, ioaddr + CSR13);
1741 		iowrite32(0x0000, ioaddr + CSR14);
1742 		break;
1743 	case LC82C168:
1744 		if ( ! tp->mii_cnt) {
1745 			tp->nway = 1;
1746 			tp->nwayset = 0;
1747 			iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1748 			iowrite32(0x30, ioaddr + CSR12);
1749 			iowrite32(0x0001F078, ioaddr + CSR6);
1750 			iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1751 		}
1752 		break;
1753 	case MX98713:
1754 	case COMPEX9881:
1755 		iowrite32(0x00000000, ioaddr + CSR6);
1756 		iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1757 		iowrite32(0x00000001, ioaddr + CSR13);
1758 		break;
1759 	case MX98715:
1760 	case MX98725:
1761 		iowrite32(0x01a80000, ioaddr + CSR6);
1762 		iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1763 		iowrite32(0x00001000, ioaddr + CSR12);
1764 		break;
1765 	case COMET:
1766 		/* No initialization necessary. */
1767 		break;
1768 	}
1769 
1770 	/* put the chip in snooze mode until opened */
1771 	tulip_set_power_state (tp, 0, 1);
1772 
1773 	return 0;
1774 
1775 err_out_free_ring:
1776 	pci_free_consistent (pdev,
1777 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1778 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1779 			     tp->rx_ring, tp->rx_ring_dma);
1780 
1781 err_out_mtable:
1782 	kfree (tp->mtable);
1783 	pci_iounmap(pdev, ioaddr);
1784 
1785 err_out_free_res:
1786 	pci_release_regions (pdev);
1787 
1788 err_out_free_netdev:
1789 	free_netdev (dev);
1790 	return -ENODEV;
1791 }
1792 
1793 
1794 /* set the registers according to the given wolopts */
1795 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1796 {
1797 	struct net_device *dev = pci_get_drvdata(pdev);
1798 	struct tulip_private *tp = netdev_priv(dev);
1799 	void __iomem *ioaddr = tp->base_addr;
1800 
1801 	if (tp->flags & COMET_PM) {
1802 
1803 		unsigned int tmp;
1804 
1805 		tmp = ioread32(ioaddr + CSR18);
1806 		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1807 		tmp |= comet_csr18_pm_mode;
1808 		iowrite32(tmp, ioaddr + CSR18);
1809 
1810 		/* Set the Wake-up Control/Status Register to the given WOL options*/
1811 		tmp = ioread32(ioaddr + CSR13);
1812 		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1813 		if (wolopts & WAKE_MAGIC)
1814 			tmp |= comet_csr13_mpre;
1815 		if (wolopts & WAKE_PHY)
1816 			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1817 		/* Clear the event flags */
1818 		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1819 		iowrite32(tmp, ioaddr + CSR13);
1820 	}
1821 }
1822 
1823 #ifdef CONFIG_PM
1824 
1825 
1826 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1827 {
1828 	pci_power_t pstate;
1829 	struct net_device *dev = pci_get_drvdata(pdev);
1830 	struct tulip_private *tp = netdev_priv(dev);
1831 
1832 	if (!dev)
1833 		return -EINVAL;
1834 
1835 	if (!netif_running(dev))
1836 		goto save_state;
1837 
1838 	tulip_down(dev);
1839 
1840 	netif_device_detach(dev);
1841 	/* FIXME: it needlessly adds an error path. */
1842 	free_irq(tp->pdev->irq, dev);
1843 
1844 save_state:
1845 	pci_save_state(pdev);
1846 	pci_disable_device(pdev);
1847 	pstate = pci_choose_state(pdev, state);
1848 	if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1849 		int rc;
1850 
1851 		tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1852 		rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1853 		if (rc)
1854 			pr_err("pci_enable_wake failed (%d)\n", rc);
1855 	}
1856 	pci_set_power_state(pdev, pstate);
1857 
1858 	return 0;
1859 }
1860 
1861 
1862 static int tulip_resume(struct pci_dev *pdev)
1863 {
1864 	struct net_device *dev = pci_get_drvdata(pdev);
1865 	struct tulip_private *tp = netdev_priv(dev);
1866 	void __iomem *ioaddr = tp->base_addr;
1867 	int retval;
1868 	unsigned int tmp;
1869 
1870 	if (!dev)
1871 		return -EINVAL;
1872 
1873 	pci_set_power_state(pdev, PCI_D0);
1874 	pci_restore_state(pdev);
1875 
1876 	if (!netif_running(dev))
1877 		return 0;
1878 
1879 	if ((retval = pci_enable_device(pdev))) {
1880 		pr_err("pci_enable_device failed in resume\n");
1881 		return retval;
1882 	}
1883 
1884 	retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1885 			     dev->name, dev);
1886 	if (retval) {
1887 		pr_err("request_irq failed in resume\n");
1888 		return retval;
1889 	}
1890 
1891 	if (tp->flags & COMET_PM) {
1892 		pci_enable_wake(pdev, PCI_D3hot, 0);
1893 		pci_enable_wake(pdev, PCI_D3cold, 0);
1894 
1895 		/* Clear the PMES flag */
1896 		tmp = ioread32(ioaddr + CSR20);
1897 		tmp |= comet_csr20_pmes;
1898 		iowrite32(tmp, ioaddr + CSR20);
1899 
1900 		/* Disable all wake-up events */
1901 		tulip_set_wolopts(pdev, 0);
1902 	}
1903 	netif_device_attach(dev);
1904 
1905 	if (netif_running(dev))
1906 		tulip_up(dev);
1907 
1908 	return 0;
1909 }
1910 
1911 #endif /* CONFIG_PM */
1912 
1913 
1914 static void tulip_remove_one(struct pci_dev *pdev)
1915 {
1916 	struct net_device *dev = pci_get_drvdata (pdev);
1917 	struct tulip_private *tp;
1918 
1919 	if (!dev)
1920 		return;
1921 
1922 	tp = netdev_priv(dev);
1923 	unregister_netdev(dev);
1924 	pci_free_consistent (pdev,
1925 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1926 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1927 			     tp->rx_ring, tp->rx_ring_dma);
1928 	kfree (tp->mtable);
1929 	pci_iounmap(pdev, tp->base_addr);
1930 	free_netdev (dev);
1931 	pci_release_regions (pdev);
1932 	pci_disable_device(pdev);
1933 
1934 	/* pci_power_off (pdev, -1); */
1935 }
1936 
1937 #ifdef CONFIG_NET_POLL_CONTROLLER
1938 /*
1939  * Polling 'interrupt' - used by things like netconsole to send skbs
1940  * without having to re-enable interrupts. It's not called while
1941  * the interrupt routine is executing.
1942  */
1943 
1944 static void poll_tulip (struct net_device *dev)
1945 {
1946 	struct tulip_private *tp = netdev_priv(dev);
1947 	const int irq = tp->pdev->irq;
1948 
1949 	/* disable_irq here is not very nice, but with the lockless
1950 	   interrupt handler we have no other choice. */
1951 	disable_irq(irq);
1952 	tulip_interrupt (irq, dev);
1953 	enable_irq(irq);
1954 }
1955 #endif
1956 
1957 static struct pci_driver tulip_driver = {
1958 	.name		= DRV_NAME,
1959 	.id_table	= tulip_pci_tbl,
1960 	.probe		= tulip_init_one,
1961 	.remove		= tulip_remove_one,
1962 #ifdef CONFIG_PM
1963 	.suspend	= tulip_suspend,
1964 	.resume		= tulip_resume,
1965 #endif /* CONFIG_PM */
1966 };
1967 
1968 
1969 static int __init tulip_init (void)
1970 {
1971 #ifdef MODULE
1972 	pr_info("%s", version);
1973 #endif
1974 
1975 	if (!csr0) {
1976 		pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1977 		/* default to 8 longword cache line alignment */
1978 		csr0 = 0x00A00000 | 0x4800;
1979 	}
1980 
1981 	/* copy module parms into globals */
1982 	tulip_rx_copybreak = rx_copybreak;
1983 	tulip_max_interrupt_work = max_interrupt_work;
1984 
1985 	/* probe for and init boards */
1986 	return pci_register_driver(&tulip_driver);
1987 }
1988 
1989 
1990 static void __exit tulip_cleanup (void)
1991 {
1992 	pci_unregister_driver (&tulip_driver);
1993 }
1994 
1995 
1996 module_init(tulip_init);
1997 module_exit(tulip_cleanup);
1998