1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 	Written 1999-2000 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	The author may be reached as becker@scyld.com, or C/O
13 	Scyld Computing Corporation
14 	410 Severn Ave., Suite 210
15 	Annapolis MD 21403
16 
17 	Support and updates available at
18 	http://www.scyld.com/network/sundance.html
19 	[link no longer provides useful info -jgarzik]
20 	Archives of the mailing list are still available at
21 	http://www.beowulf.org/pipermail/netdrivers/
22 
23 */
24 
25 #define DRV_NAME	"sundance"
26 #define DRV_VERSION	"1.2"
27 #define DRV_RELDATE	"11-Sep-2006"
28 
29 
30 /* The user-configurable values.
31    These may be modified when a driver module is loaded.*/
32 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34    Typical is a 64 element hash table based on the Ethernet CRC.  */
35 static const int multicast_filter_limit = 32;
36 
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38    Setting to > 1518 effectively disables this feature.
39    This chip can receive into offset buffers, so the Alpha does not
40    need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
43 
44 /* media[] specifies the media type the NIC operates at.
45 		 autosense	Autosensing active media.
46 		 10mbps_hd 	10Mbps half duplex.
47 		 10mbps_fd 	10Mbps full duplex.
48 		 100mbps_hd 	100Mbps half duplex.
49 		 100mbps_fd 	100Mbps full duplex.
50 		 0		Autosensing active media.
51 		 1	 	10Mbps half duplex.
52 		 2	 	10Mbps full duplex.
53 		 3	 	100Mbps half duplex.
54 		 4	 	100Mbps full duplex.
55 */
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
58 
59 
60 /* Operational parameters that are set at compile time. */
61 
62 /* Keep the ring sizes a power of two for compile efficiency.
63    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64    Making the Tx ring too large decreases the effectiveness of channel
65    bonding and packet priority, and more than 128 requires modifying the
66    Tx error recovery.
67    Large receive rings merely waste memory. */
68 #define TX_RING_SIZE	32
69 #define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
70 #define RX_RING_SIZE	64
71 #define RX_BUDGET	32
72 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
74 
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT  (4*HZ)
78 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
79 
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h>		/* Processor type for cache alignment. */
96 #include <asm/io.h>
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
103 
104 /* These identify the driver base version and may not be removed. */
105 static const char version[] __devinitconst =
106 	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 	" Written by Donald Becker\n";
108 
109 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111 MODULE_LICENSE("GPL");
112 
113 module_param(debug, int, 0);
114 module_param(rx_copybreak, int, 0);
115 module_param_array(media, charp, NULL, 0);
116 module_param(flowctrl, int, 0);
117 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
120 
121 /*
122 				Theory of Operation
123 
124 I. Board Compatibility
125 
126 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
127 
128 II. Board-specific settings
129 
130 III. Driver operation
131 
132 IIIa. Ring buffers
133 
134 This driver uses two statically allocated fixed-size descriptor lists
135 formed into rings by a branch from the final descriptor to the beginning of
136 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
137 Some chips explicitly use only 2^N sized rings, while others use a
138 'next descriptor' pointer that the driver forms into rings.
139 
140 IIIb/c. Transmit/Receive Structure
141 
142 This driver uses a zero-copy receive and transmit scheme.
143 The driver allocates full frame size skbuffs for the Rx ring buffers at
144 open() time and passes the skb->data field to the chip as receive data
145 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
146 a fresh skbuff is allocated and the frame is copied to the new skbuff.
147 When the incoming frame is larger, the skbuff is passed directly up the
148 protocol stack.  Buffers consumed this way are replaced by newly allocated
149 skbuffs in a later phase of receives.
150 
151 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152 using a full-sized skbuff for small frames vs. the copying costs of larger
153 frames.  New boards are typically used in generously configured machines
154 and the underfilled buffers have negligible impact compared to the benefit of
155 a single allocation size, so the default value of zero results in never
156 copying packets.  When copying is done, the cost is usually mitigated by using
157 a combined copy/checksum routine.  Copying also preloads the cache, which is
158 most useful with small frames.
159 
160 A subtle aspect of the operation is that the IP header at offset 14 in an
161 ethernet frame isn't longword aligned for further processing.
162 Unaligned buffers are permitted by the Sundance hardware, so
163 frames are received into the skbuff at an offset of "+2", 16-byte aligning
164 the IP header.
165 
166 IIId. Synchronization
167 
168 The driver runs as two independent, single-threaded flows of control.  One
169 is the send-packet routine, which enforces single-threaded use by the
170 dev->tbusy flag.  The other thread is the interrupt handler, which is single
171 threaded by the hardware and interrupt handling software.
172 
173 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176 the 'lp->tx_full' flag.
177 
178 The interrupt handler has exclusive control over the Rx ring and records stats
179 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
180 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181 clears both the tx_full and tbusy flags.
182 
183 IV. Notes
184 
185 IVb. References
186 
187 The Sundance ST201 datasheet, preliminary version.
188 The Kendin KS8723 datasheet, preliminary version.
189 The ICplus IP100 datasheet, preliminary version.
190 http://www.scyld.com/expert/100mbps.html
191 http://www.scyld.com/expert/NWay.html
192 
193 IVc. Errata
194 
195 */
196 
197 /* Work-around for Kendin chip bugs. */
198 #ifndef CONFIG_SUNDANCE_MMIO
199 #define USE_IO_OPS 1
200 #endif
201 
202 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
203 	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
210 	{ }
211 };
212 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
213 
214 enum {
215 	netdev_io_size = 128
216 };
217 
218 struct pci_id_info {
219         const char *name;
220 };
221 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
222 	{"D-Link DFE-550TX FAST Ethernet Adapter"},
223 	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 	{"D-Link DFE-580TX 4 port Server Adapter"},
225 	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 	{"D-Link DL10050-based FAST Ethernet Adapter"},
227 	{"Sundance Technology Alta"},
228 	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
229 	{ }	/* terminate list. */
230 };
231 
232 /* This driver was written to use PCI memory space, however x86-oriented
233    hardware often uses I/O space accesses. */
234 
235 /* Offsets to the device registers.
236    Unlike software-only systems, device drivers interact with complex hardware.
237    It's not useful to define symbolic names for every register bit in the
238    device.  The name can only partially document the semantics and make
239    the driver longer and more difficult to read.
240    In general, only the important configuration values or bits changed
241    multiple times should be defined symbolically.
242 */
243 enum alta_offsets {
244 	DMACtrl = 0x00,
245 	TxListPtr = 0x04,
246 	TxDMABurstThresh = 0x08,
247 	TxDMAUrgentThresh = 0x09,
248 	TxDMAPollPeriod = 0x0a,
249 	RxDMAStatus = 0x0c,
250 	RxListPtr = 0x10,
251 	DebugCtrl0 = 0x1a,
252 	DebugCtrl1 = 0x1c,
253 	RxDMABurstThresh = 0x14,
254 	RxDMAUrgentThresh = 0x15,
255 	RxDMAPollPeriod = 0x16,
256 	LEDCtrl = 0x1a,
257 	ASICCtrl = 0x30,
258 	EEData = 0x34,
259 	EECtrl = 0x36,
260 	FlashAddr = 0x40,
261 	FlashData = 0x44,
262 	TxStatus = 0x46,
263 	TxFrameId = 0x47,
264 	DownCounter = 0x18,
265 	IntrClear = 0x4a,
266 	IntrEnable = 0x4c,
267 	IntrStatus = 0x4e,
268 	MACCtrl0 = 0x50,
269 	MACCtrl1 = 0x52,
270 	StationAddr = 0x54,
271 	MaxFrameSize = 0x5A,
272 	RxMode = 0x5c,
273 	MIICtrl = 0x5e,
274 	MulticastFilter0 = 0x60,
275 	MulticastFilter1 = 0x64,
276 	RxOctetsLow = 0x68,
277 	RxOctetsHigh = 0x6a,
278 	TxOctetsLow = 0x6c,
279 	TxOctetsHigh = 0x6e,
280 	TxFramesOK = 0x70,
281 	RxFramesOK = 0x72,
282 	StatsCarrierError = 0x74,
283 	StatsLateColl = 0x75,
284 	StatsMultiColl = 0x76,
285 	StatsOneColl = 0x77,
286 	StatsTxDefer = 0x78,
287 	RxMissed = 0x79,
288 	StatsTxXSDefer = 0x7a,
289 	StatsTxAbort = 0x7b,
290 	StatsBcastTx = 0x7c,
291 	StatsBcastRx = 0x7d,
292 	StatsMcastTx = 0x7e,
293 	StatsMcastRx = 0x7f,
294 	/* Aliased and bogus values! */
295 	RxStatus = 0x0c,
296 };
297 
298 #define ASIC_HI_WORD(x)	((x) + 2)
299 
300 enum ASICCtrl_HiWord_bit {
301 	GlobalReset = 0x0001,
302 	RxReset = 0x0002,
303 	TxReset = 0x0004,
304 	DMAReset = 0x0008,
305 	FIFOReset = 0x0010,
306 	NetworkReset = 0x0020,
307 	HostReset = 0x0040,
308 	ResetBusy = 0x0400,
309 };
310 
311 /* Bits in the interrupt status/mask registers. */
312 enum intr_status_bits {
313 	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
314 	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
315 	IntrDrvRqst=0x0040,
316 	StatsMax=0x0080, LinkChange=0x0100,
317 	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
318 };
319 
320 /* Bits in the RxMode register. */
321 enum rx_mode_bits {
322 	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
323 	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
324 };
325 /* Bits in MACCtrl. */
326 enum mac_ctrl0_bits {
327 	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
328 	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
329 };
330 enum mac_ctrl1_bits {
331 	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
332 	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
333 	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
334 };
335 
336 /* The Rx and Tx buffer descriptors. */
337 /* Note that using only 32 bit fields simplifies conversion to big-endian
338    architectures. */
339 struct netdev_desc {
340 	__le32 next_desc;
341 	__le32 status;
342 	struct desc_frag { __le32 addr, length; } frag[1];
343 };
344 
345 /* Bits in netdev_desc.status */
346 enum desc_status_bits {
347 	DescOwn=0x8000,
348 	DescEndPacket=0x4000,
349 	DescEndRing=0x2000,
350 	LastFrag=0x80000000,
351 	DescIntrOnTx=0x8000,
352 	DescIntrOnDMADone=0x80000000,
353 	DisableAlign = 0x00000001,
354 };
355 
356 #define PRIV_ALIGN	15 	/* Required alignment mask */
357 /* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
358    within the structure. */
359 #define MII_CNT		4
360 struct netdev_private {
361 	/* Descriptor rings first for alignment. */
362 	struct netdev_desc *rx_ring;
363 	struct netdev_desc *tx_ring;
364 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
365 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
366         dma_addr_t tx_ring_dma;
367         dma_addr_t rx_ring_dma;
368 	struct timer_list timer;		/* Media monitoring timer. */
369 	/* ethtool extra stats */
370 	struct {
371 		u64 tx_multiple_collisions;
372 		u64 tx_single_collisions;
373 		u64 tx_late_collisions;
374 		u64 tx_deferred;
375 		u64 tx_deferred_excessive;
376 		u64 tx_aborted;
377 		u64 tx_bcasts;
378 		u64 rx_bcasts;
379 		u64 tx_mcasts;
380 		u64 rx_mcasts;
381 	} xstats;
382 	/* Frequently used values: keep some adjacent for cache effect. */
383 	spinlock_t lock;
384 	int msg_enable;
385 	int chip_id;
386 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
387 	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
388 	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
389 	unsigned int cur_tx, dirty_tx;
390 	/* These values are keep track of the transceiver/media in use. */
391 	unsigned int flowctrl:1;
392 	unsigned int default_port:4;		/* Last dev->if_port value. */
393 	unsigned int an_enable:1;
394 	unsigned int speed;
395 	struct tasklet_struct rx_tasklet;
396 	struct tasklet_struct tx_tasklet;
397 	int budget;
398 	int cur_task;
399 	/* Multicast and receive mode. */
400 	spinlock_t mcastlock;			/* SMP lock multicast updates. */
401 	u16 mcast_filter[4];
402 	/* MII transceiver section. */
403 	struct mii_if_info mii_if;
404 	int mii_preamble_required;
405 	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
406 	struct pci_dev *pci_dev;
407 	void __iomem *base;
408 	spinlock_t statlock;
409 };
410 
411 /* The station address location in the EEPROM. */
412 #define EEPROM_SA_OFFSET	0x10
413 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
414 			IntrDrvRqst | IntrTxDone | StatsMax | \
415 			LinkChange)
416 
417 static int  change_mtu(struct net_device *dev, int new_mtu);
418 static int  eeprom_read(void __iomem *ioaddr, int location);
419 static int  mdio_read(struct net_device *dev, int phy_id, int location);
420 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
421 static int  mdio_wait_link(struct net_device *dev, int wait);
422 static int  netdev_open(struct net_device *dev);
423 static void check_duplex(struct net_device *dev);
424 static void netdev_timer(unsigned long data);
425 static void tx_timeout(struct net_device *dev);
426 static void init_ring(struct net_device *dev);
427 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
428 static int reset_tx (struct net_device *dev);
429 static irqreturn_t intr_handler(int irq, void *dev_instance);
430 static void rx_poll(unsigned long data);
431 static void tx_poll(unsigned long data);
432 static void refill_rx (struct net_device *dev);
433 static void netdev_error(struct net_device *dev, int intr_status);
434 static void netdev_error(struct net_device *dev, int intr_status);
435 static void set_rx_mode(struct net_device *dev);
436 static int __set_mac_addr(struct net_device *dev);
437 static int sundance_set_mac_addr(struct net_device *dev, void *data);
438 static struct net_device_stats *get_stats(struct net_device *dev);
439 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
440 static int  netdev_close(struct net_device *dev);
441 static const struct ethtool_ops ethtool_ops;
442 
443 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
444 {
445 	struct netdev_private *np = netdev_priv(dev);
446 	void __iomem *ioaddr = np->base + ASICCtrl;
447 	int countdown;
448 
449 	/* ST201 documentation states ASICCtrl is a 32bit register */
450 	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
451 	/* ST201 documentation states reset can take up to 1 ms */
452 	countdown = 10 + 1;
453 	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
454 		if (--countdown == 0) {
455 			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
456 			break;
457 		}
458 		udelay(100);
459 	}
460 }
461 
462 static const struct net_device_ops netdev_ops = {
463 	.ndo_open		= netdev_open,
464 	.ndo_stop		= netdev_close,
465 	.ndo_start_xmit		= start_tx,
466 	.ndo_get_stats 		= get_stats,
467 	.ndo_set_rx_mode	= set_rx_mode,
468 	.ndo_do_ioctl 		= netdev_ioctl,
469 	.ndo_tx_timeout		= tx_timeout,
470 	.ndo_change_mtu		= change_mtu,
471 	.ndo_set_mac_address 	= sundance_set_mac_addr,
472 	.ndo_validate_addr	= eth_validate_addr,
473 };
474 
475 static int __devinit sundance_probe1 (struct pci_dev *pdev,
476 				      const struct pci_device_id *ent)
477 {
478 	struct net_device *dev;
479 	struct netdev_private *np;
480 	static int card_idx;
481 	int chip_idx = ent->driver_data;
482 	int irq;
483 	int i;
484 	void __iomem *ioaddr;
485 	u16 mii_ctl;
486 	void *ring_space;
487 	dma_addr_t ring_dma;
488 #ifdef USE_IO_OPS
489 	int bar = 0;
490 #else
491 	int bar = 1;
492 #endif
493 	int phy, phy_end, phy_idx = 0;
494 
495 /* when built into the kernel, we only print version if device is found */
496 #ifndef MODULE
497 	static int printed_version;
498 	if (!printed_version++)
499 		printk(version);
500 #endif
501 
502 	if (pci_enable_device(pdev))
503 		return -EIO;
504 	pci_set_master(pdev);
505 
506 	irq = pdev->irq;
507 
508 	dev = alloc_etherdev(sizeof(*np));
509 	if (!dev)
510 		return -ENOMEM;
511 	SET_NETDEV_DEV(dev, &pdev->dev);
512 
513 	if (pci_request_regions(pdev, DRV_NAME))
514 		goto err_out_netdev;
515 
516 	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
517 	if (!ioaddr)
518 		goto err_out_res;
519 
520 	for (i = 0; i < 3; i++)
521 		((__le16 *)dev->dev_addr)[i] =
522 			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
523 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
524 
525 	dev->base_addr = (unsigned long)ioaddr;
526 	dev->irq = irq;
527 
528 	np = netdev_priv(dev);
529 	np->base = ioaddr;
530 	np->pci_dev = pdev;
531 	np->chip_id = chip_idx;
532 	np->msg_enable = (1 << debug) - 1;
533 	spin_lock_init(&np->lock);
534 	spin_lock_init(&np->statlock);
535 	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
536 	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
537 
538 	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
539 			&ring_dma, GFP_KERNEL);
540 	if (!ring_space)
541 		goto err_out_cleardev;
542 	np->tx_ring = (struct netdev_desc *)ring_space;
543 	np->tx_ring_dma = ring_dma;
544 
545 	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
546 			&ring_dma, GFP_KERNEL);
547 	if (!ring_space)
548 		goto err_out_unmap_tx;
549 	np->rx_ring = (struct netdev_desc *)ring_space;
550 	np->rx_ring_dma = ring_dma;
551 
552 	np->mii_if.dev = dev;
553 	np->mii_if.mdio_read = mdio_read;
554 	np->mii_if.mdio_write = mdio_write;
555 	np->mii_if.phy_id_mask = 0x1f;
556 	np->mii_if.reg_num_mask = 0x1f;
557 
558 	/* The chip-specific entries in the device structure. */
559 	dev->netdev_ops = &netdev_ops;
560 	SET_ETHTOOL_OPS(dev, &ethtool_ops);
561 	dev->watchdog_timeo = TX_TIMEOUT;
562 
563 	pci_set_drvdata(pdev, dev);
564 
565 	i = register_netdev(dev);
566 	if (i)
567 		goto err_out_unmap_rx;
568 
569 	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
570 	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
571 	       dev->dev_addr, irq);
572 
573 	np->phys[0] = 1;		/* Default setting */
574 	np->mii_preamble_required++;
575 
576 	/*
577 	 * It seems some phys doesn't deal well with address 0 being accessed
578 	 * first
579 	 */
580 	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
581 		phy = 0;
582 		phy_end = 31;
583 	} else {
584 		phy = 1;
585 		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
586 	}
587 	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
588 		int phyx = phy & 0x1f;
589 		int mii_status = mdio_read(dev, phyx, MII_BMSR);
590 		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
591 			np->phys[phy_idx++] = phyx;
592 			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
593 			if ((mii_status & 0x0040) == 0)
594 				np->mii_preamble_required++;
595 			printk(KERN_INFO "%s: MII PHY found at address %d, status "
596 				   "0x%4.4x advertising %4.4x.\n",
597 				   dev->name, phyx, mii_status, np->mii_if.advertising);
598 		}
599 	}
600 	np->mii_preamble_required--;
601 
602 	if (phy_idx == 0) {
603 		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
604 			   dev->name, ioread32(ioaddr + ASICCtrl));
605 		goto err_out_unregister;
606 	}
607 
608 	np->mii_if.phy_id = np->phys[0];
609 
610 	/* Parse override configuration */
611 	np->an_enable = 1;
612 	if (card_idx < MAX_UNITS) {
613 		if (media[card_idx] != NULL) {
614 			np->an_enable = 0;
615 			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
616 			    strcmp (media[card_idx], "4") == 0) {
617 				np->speed = 100;
618 				np->mii_if.full_duplex = 1;
619 			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
620 				   strcmp (media[card_idx], "3") == 0) {
621 				np->speed = 100;
622 				np->mii_if.full_duplex = 0;
623 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
624 				   strcmp (media[card_idx], "2") == 0) {
625 				np->speed = 10;
626 				np->mii_if.full_duplex = 1;
627 			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
628 				   strcmp (media[card_idx], "1") == 0) {
629 				np->speed = 10;
630 				np->mii_if.full_duplex = 0;
631 			} else {
632 				np->an_enable = 1;
633 			}
634 		}
635 		if (flowctrl == 1)
636 			np->flowctrl = 1;
637 	}
638 
639 	/* Fibre PHY? */
640 	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
641 		/* Default 100Mbps Full */
642 		if (np->an_enable) {
643 			np->speed = 100;
644 			np->mii_if.full_duplex = 1;
645 			np->an_enable = 0;
646 		}
647 	}
648 	/* Reset PHY */
649 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
650 	mdelay (300);
651 	/* If flow control enabled, we need to advertise it.*/
652 	if (np->flowctrl)
653 		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
654 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
655 	/* Force media type */
656 	if (!np->an_enable) {
657 		mii_ctl = 0;
658 		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
659 		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
660 		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
661 		printk (KERN_INFO "Override speed=%d, %s duplex\n",
662 			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
663 
664 	}
665 
666 	/* Perhaps move the reset here? */
667 	/* Reset the chip to erase previous misconfiguration. */
668 	if (netif_msg_hw(np))
669 		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
670 	sundance_reset(dev, 0x00ff << 16);
671 	if (netif_msg_hw(np))
672 		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
673 
674 	card_idx++;
675 	return 0;
676 
677 err_out_unregister:
678 	unregister_netdev(dev);
679 err_out_unmap_rx:
680 	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
681 		np->rx_ring, np->rx_ring_dma);
682 err_out_unmap_tx:
683 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
684 		np->tx_ring, np->tx_ring_dma);
685 err_out_cleardev:
686 	pci_set_drvdata(pdev, NULL);
687 	pci_iounmap(pdev, ioaddr);
688 err_out_res:
689 	pci_release_regions(pdev);
690 err_out_netdev:
691 	free_netdev (dev);
692 	return -ENODEV;
693 }
694 
695 static int change_mtu(struct net_device *dev, int new_mtu)
696 {
697 	if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
698 		return -EINVAL;
699 	if (netif_running(dev))
700 		return -EBUSY;
701 	dev->mtu = new_mtu;
702 	return 0;
703 }
704 
705 #define eeprom_delay(ee_addr)	ioread32(ee_addr)
706 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
707 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
708 {
709 	int boguscnt = 10000;		/* Typical 1900 ticks. */
710 	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
711 	do {
712 		eeprom_delay(ioaddr + EECtrl);
713 		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
714 			return ioread16(ioaddr + EEData);
715 		}
716 	} while (--boguscnt > 0);
717 	return 0;
718 }
719 
720 /*  MII transceiver control section.
721 	Read and write the MII registers using software-generated serial
722 	MDIO protocol.  See the MII specifications or DP83840A data sheet
723 	for details.
724 
725 	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
726 	met by back-to-back 33Mhz PCI cycles. */
727 #define mdio_delay() ioread8(mdio_addr)
728 
729 enum mii_reg_bits {
730 	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
731 };
732 #define MDIO_EnbIn  (0)
733 #define MDIO_WRITE0 (MDIO_EnbOutput)
734 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
735 
736 /* Generate the preamble required for initial synchronization and
737    a few older transceivers. */
738 static void mdio_sync(void __iomem *mdio_addr)
739 {
740 	int bits = 32;
741 
742 	/* Establish sync by sending at least 32 logic ones. */
743 	while (--bits >= 0) {
744 		iowrite8(MDIO_WRITE1, mdio_addr);
745 		mdio_delay();
746 		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
747 		mdio_delay();
748 	}
749 }
750 
751 static int mdio_read(struct net_device *dev, int phy_id, int location)
752 {
753 	struct netdev_private *np = netdev_priv(dev);
754 	void __iomem *mdio_addr = np->base + MIICtrl;
755 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
756 	int i, retval = 0;
757 
758 	if (np->mii_preamble_required)
759 		mdio_sync(mdio_addr);
760 
761 	/* Shift the read command bits out. */
762 	for (i = 15; i >= 0; i--) {
763 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
764 
765 		iowrite8(dataval, mdio_addr);
766 		mdio_delay();
767 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
768 		mdio_delay();
769 	}
770 	/* Read the two transition, 16 data, and wire-idle bits. */
771 	for (i = 19; i > 0; i--) {
772 		iowrite8(MDIO_EnbIn, mdio_addr);
773 		mdio_delay();
774 		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
775 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
776 		mdio_delay();
777 	}
778 	return (retval>>1) & 0xffff;
779 }
780 
781 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
782 {
783 	struct netdev_private *np = netdev_priv(dev);
784 	void __iomem *mdio_addr = np->base + MIICtrl;
785 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
786 	int i;
787 
788 	if (np->mii_preamble_required)
789 		mdio_sync(mdio_addr);
790 
791 	/* Shift the command bits out. */
792 	for (i = 31; i >= 0; i--) {
793 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
794 
795 		iowrite8(dataval, mdio_addr);
796 		mdio_delay();
797 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
798 		mdio_delay();
799 	}
800 	/* Clear out extra bits. */
801 	for (i = 2; i > 0; i--) {
802 		iowrite8(MDIO_EnbIn, mdio_addr);
803 		mdio_delay();
804 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
805 		mdio_delay();
806 	}
807 }
808 
809 static int mdio_wait_link(struct net_device *dev, int wait)
810 {
811 	int bmsr;
812 	int phy_id;
813 	struct netdev_private *np;
814 
815 	np = netdev_priv(dev);
816 	phy_id = np->phys[0];
817 
818 	do {
819 		bmsr = mdio_read(dev, phy_id, MII_BMSR);
820 		if (bmsr & 0x0004)
821 			return 0;
822 		mdelay(1);
823 	} while (--wait > 0);
824 	return -1;
825 }
826 
827 static int netdev_open(struct net_device *dev)
828 {
829 	struct netdev_private *np = netdev_priv(dev);
830 	void __iomem *ioaddr = np->base;
831 	unsigned long flags;
832 	int i;
833 
834 	/* Do we need to reset the chip??? */
835 
836 	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
837 	if (i)
838 		return i;
839 
840 	if (netif_msg_ifup(np))
841 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
842 			   dev->name, dev->irq);
843 	init_ring(dev);
844 
845 	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
846 	/* The Tx list pointer is written as packets are queued. */
847 
848 	/* Initialize other registers. */
849 	__set_mac_addr(dev);
850 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
851 	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
852 #else
853 	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
854 #endif
855 	if (dev->mtu > 2047)
856 		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
857 
858 	/* Configure the PCI bus bursts and FIFO thresholds. */
859 
860 	if (dev->if_port == 0)
861 		dev->if_port = np->default_port;
862 
863 	spin_lock_init(&np->mcastlock);
864 
865 	set_rx_mode(dev);
866 	iowrite16(0, ioaddr + IntrEnable);
867 	iowrite16(0, ioaddr + DownCounter);
868 	/* Set the chip to poll every N*320nsec. */
869 	iowrite8(100, ioaddr + RxDMAPollPeriod);
870 	iowrite8(127, ioaddr + TxDMAPollPeriod);
871 	/* Fix DFE-580TX packet drop issue */
872 	if (np->pci_dev->revision >= 0x14)
873 		iowrite8(0x01, ioaddr + DebugCtrl1);
874 	netif_start_queue(dev);
875 
876 	spin_lock_irqsave(&np->lock, flags);
877 	reset_tx(dev);
878 	spin_unlock_irqrestore(&np->lock, flags);
879 
880 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
881 
882 	if (netif_msg_ifup(np))
883 		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
884 			   "MAC Control %x, %4.4x %4.4x.\n",
885 			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
886 			   ioread32(ioaddr + MACCtrl0),
887 			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
888 
889 	/* Set the timer to check for link beat. */
890 	init_timer(&np->timer);
891 	np->timer.expires = jiffies + 3*HZ;
892 	np->timer.data = (unsigned long)dev;
893 	np->timer.function = netdev_timer;				/* timer handler */
894 	add_timer(&np->timer);
895 
896 	/* Enable interrupts by setting the interrupt mask. */
897 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
898 
899 	return 0;
900 }
901 
902 static void check_duplex(struct net_device *dev)
903 {
904 	struct netdev_private *np = netdev_priv(dev);
905 	void __iomem *ioaddr = np->base;
906 	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
907 	int negotiated = mii_lpa & np->mii_if.advertising;
908 	int duplex;
909 
910 	/* Force media */
911 	if (!np->an_enable || mii_lpa == 0xffff) {
912 		if (np->mii_if.full_duplex)
913 			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
914 				ioaddr + MACCtrl0);
915 		return;
916 	}
917 
918 	/* Autonegotiation */
919 	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
920 	if (np->mii_if.full_duplex != duplex) {
921 		np->mii_if.full_duplex = duplex;
922 		if (netif_msg_link(np))
923 			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
924 				   "negotiated capability %4.4x.\n", dev->name,
925 				   duplex ? "full" : "half", np->phys[0], negotiated);
926 		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
927 	}
928 }
929 
930 static void netdev_timer(unsigned long data)
931 {
932 	struct net_device *dev = (struct net_device *)data;
933 	struct netdev_private *np = netdev_priv(dev);
934 	void __iomem *ioaddr = np->base;
935 	int next_tick = 10*HZ;
936 
937 	if (netif_msg_timer(np)) {
938 		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
939 			   "Tx %x Rx %x.\n",
940 			   dev->name, ioread16(ioaddr + IntrEnable),
941 			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
942 	}
943 	check_duplex(dev);
944 	np->timer.expires = jiffies + next_tick;
945 	add_timer(&np->timer);
946 }
947 
948 static void tx_timeout(struct net_device *dev)
949 {
950 	struct netdev_private *np = netdev_priv(dev);
951 	void __iomem *ioaddr = np->base;
952 	unsigned long flag;
953 
954 	netif_stop_queue(dev);
955 	tasklet_disable(&np->tx_tasklet);
956 	iowrite16(0, ioaddr + IntrEnable);
957 	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
958 		   "TxFrameId %2.2x,"
959 		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
960 		   ioread8(ioaddr + TxFrameId));
961 
962 	{
963 		int i;
964 		for (i=0; i<TX_RING_SIZE; i++) {
965 			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
966 				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
967 				le32_to_cpu(np->tx_ring[i].next_desc),
968 				le32_to_cpu(np->tx_ring[i].status),
969 				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
970 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
971 				le32_to_cpu(np->tx_ring[i].frag[0].length));
972 		}
973 		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
974 			ioread32(np->base + TxListPtr),
975 			netif_queue_stopped(dev));
976 		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
977 			np->cur_tx, np->cur_tx % TX_RING_SIZE,
978 			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
979 		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
980 		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
981 	}
982 	spin_lock_irqsave(&np->lock, flag);
983 
984 	/* Stop and restart the chip's Tx processes . */
985 	reset_tx(dev);
986 	spin_unlock_irqrestore(&np->lock, flag);
987 
988 	dev->if_port = 0;
989 
990 	dev->trans_start = jiffies; /* prevent tx timeout */
991 	dev->stats.tx_errors++;
992 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
993 		netif_wake_queue(dev);
994 	}
995 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
996 	tasklet_enable(&np->tx_tasklet);
997 }
998 
999 
1000 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1001 static void init_ring(struct net_device *dev)
1002 {
1003 	struct netdev_private *np = netdev_priv(dev);
1004 	int i;
1005 
1006 	np->cur_rx = np->cur_tx = 0;
1007 	np->dirty_rx = np->dirty_tx = 0;
1008 	np->cur_task = 0;
1009 
1010 	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1011 
1012 	/* Initialize all Rx descriptors. */
1013 	for (i = 0; i < RX_RING_SIZE; i++) {
1014 		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1015 			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1016 		np->rx_ring[i].status = 0;
1017 		np->rx_ring[i].frag[0].length = 0;
1018 		np->rx_skbuff[i] = NULL;
1019 	}
1020 
1021 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1022 	for (i = 0; i < RX_RING_SIZE; i++) {
1023 		struct sk_buff *skb =
1024 			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1025 		np->rx_skbuff[i] = skb;
1026 		if (skb == NULL)
1027 			break;
1028 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1029 		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1030 			dma_map_single(&np->pci_dev->dev, skb->data,
1031 				np->rx_buf_sz, DMA_FROM_DEVICE));
1032 		if (dma_mapping_error(&np->pci_dev->dev,
1033 					np->rx_ring[i].frag[0].addr)) {
1034 			dev_kfree_skb(skb);
1035 			np->rx_skbuff[i] = NULL;
1036 			break;
1037 		}
1038 		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1039 	}
1040 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1041 
1042 	for (i = 0; i < TX_RING_SIZE; i++) {
1043 		np->tx_skbuff[i] = NULL;
1044 		np->tx_ring[i].status = 0;
1045 	}
1046 }
1047 
1048 static void tx_poll (unsigned long data)
1049 {
1050 	struct net_device *dev = (struct net_device *)data;
1051 	struct netdev_private *np = netdev_priv(dev);
1052 	unsigned head = np->cur_task % TX_RING_SIZE;
1053 	struct netdev_desc *txdesc =
1054 		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1055 
1056 	/* Chain the next pointer */
1057 	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1058 		int entry = np->cur_task % TX_RING_SIZE;
1059 		txdesc = &np->tx_ring[entry];
1060 		if (np->last_tx) {
1061 			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1062 				entry*sizeof(struct netdev_desc));
1063 		}
1064 		np->last_tx = txdesc;
1065 	}
1066 	/* Indicate the latest descriptor of tx ring */
1067 	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1068 
1069 	if (ioread32 (np->base + TxListPtr) == 0)
1070 		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1071 			np->base + TxListPtr);
1072 }
1073 
1074 static netdev_tx_t
1075 start_tx (struct sk_buff *skb, struct net_device *dev)
1076 {
1077 	struct netdev_private *np = netdev_priv(dev);
1078 	struct netdev_desc *txdesc;
1079 	unsigned entry;
1080 
1081 	/* Calculate the next Tx descriptor entry. */
1082 	entry = np->cur_tx % TX_RING_SIZE;
1083 	np->tx_skbuff[entry] = skb;
1084 	txdesc = &np->tx_ring[entry];
1085 
1086 	txdesc->next_desc = 0;
1087 	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1088 	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1089 				skb->data, skb->len, DMA_TO_DEVICE));
1090 	if (dma_mapping_error(&np->pci_dev->dev,
1091 				txdesc->frag[0].addr))
1092 			goto drop_frame;
1093 	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1094 
1095 	/* Increment cur_tx before tasklet_schedule() */
1096 	np->cur_tx++;
1097 	mb();
1098 	/* Schedule a tx_poll() task */
1099 	tasklet_schedule(&np->tx_tasklet);
1100 
1101 	/* On some architectures: explicitly flush cache lines here. */
1102 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1103 	    !netif_queue_stopped(dev)) {
1104 		/* do nothing */
1105 	} else {
1106 		netif_stop_queue (dev);
1107 	}
1108 	if (netif_msg_tx_queued(np)) {
1109 		printk (KERN_DEBUG
1110 			"%s: Transmit frame #%d queued in slot %d.\n",
1111 			dev->name, np->cur_tx, entry);
1112 	}
1113 	return NETDEV_TX_OK;
1114 
1115 drop_frame:
1116 	dev_kfree_skb(skb);
1117 	np->tx_skbuff[entry] = NULL;
1118 	dev->stats.tx_dropped++;
1119 	return NETDEV_TX_OK;
1120 }
1121 
1122 /* Reset hardware tx and free all of tx buffers */
1123 static int
1124 reset_tx (struct net_device *dev)
1125 {
1126 	struct netdev_private *np = netdev_priv(dev);
1127 	void __iomem *ioaddr = np->base;
1128 	struct sk_buff *skb;
1129 	int i;
1130 
1131 	/* Reset tx logic, TxListPtr will be cleaned */
1132 	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1133 	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1134 
1135 	/* free all tx skbuff */
1136 	for (i = 0; i < TX_RING_SIZE; i++) {
1137 		np->tx_ring[i].next_desc = 0;
1138 
1139 		skb = np->tx_skbuff[i];
1140 		if (skb) {
1141 			dma_unmap_single(&np->pci_dev->dev,
1142 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1143 				skb->len, DMA_TO_DEVICE);
1144 			dev_kfree_skb_any(skb);
1145 			np->tx_skbuff[i] = NULL;
1146 			dev->stats.tx_dropped++;
1147 		}
1148 	}
1149 	np->cur_tx = np->dirty_tx = 0;
1150 	np->cur_task = 0;
1151 
1152 	np->last_tx = NULL;
1153 	iowrite8(127, ioaddr + TxDMAPollPeriod);
1154 
1155 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1156 	return 0;
1157 }
1158 
1159 /* The interrupt handler cleans up after the Tx thread,
1160    and schedule a Rx thread work */
1161 static irqreturn_t intr_handler(int irq, void *dev_instance)
1162 {
1163 	struct net_device *dev = (struct net_device *)dev_instance;
1164 	struct netdev_private *np = netdev_priv(dev);
1165 	void __iomem *ioaddr = np->base;
1166 	int hw_frame_id;
1167 	int tx_cnt;
1168 	int tx_status;
1169 	int handled = 0;
1170 	int i;
1171 
1172 
1173 	do {
1174 		int intr_status = ioread16(ioaddr + IntrStatus);
1175 		iowrite16(intr_status, ioaddr + IntrStatus);
1176 
1177 		if (netif_msg_intr(np))
1178 			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1179 				   dev->name, intr_status);
1180 
1181 		if (!(intr_status & DEFAULT_INTR))
1182 			break;
1183 
1184 		handled = 1;
1185 
1186 		if (intr_status & (IntrRxDMADone)) {
1187 			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1188 					ioaddr + IntrEnable);
1189 			if (np->budget < 0)
1190 				np->budget = RX_BUDGET;
1191 			tasklet_schedule(&np->rx_tasklet);
1192 		}
1193 		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1194 			tx_status = ioread16 (ioaddr + TxStatus);
1195 			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1196 				if (netif_msg_tx_done(np))
1197 					printk
1198 					    ("%s: Transmit status is %2.2x.\n",
1199 				     	dev->name, tx_status);
1200 				if (tx_status & 0x1e) {
1201 					if (netif_msg_tx_err(np))
1202 						printk("%s: Transmit error status %4.4x.\n",
1203 							   dev->name, tx_status);
1204 					dev->stats.tx_errors++;
1205 					if (tx_status & 0x10)
1206 						dev->stats.tx_fifo_errors++;
1207 					if (tx_status & 0x08)
1208 						dev->stats.collisions++;
1209 					if (tx_status & 0x04)
1210 						dev->stats.tx_fifo_errors++;
1211 					if (tx_status & 0x02)
1212 						dev->stats.tx_window_errors++;
1213 
1214 					/*
1215 					** This reset has been verified on
1216 					** DFE-580TX boards ! phdm@macqel.be.
1217 					*/
1218 					if (tx_status & 0x10) {	/* TxUnderrun */
1219 						/* Restart Tx FIFO and transmitter */
1220 						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1221 						/* No need to reset the Tx pointer here */
1222 					}
1223 					/* Restart the Tx. Need to make sure tx enabled */
1224 					i = 10;
1225 					do {
1226 						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1227 						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1228 							break;
1229 						mdelay(1);
1230 					} while (--i);
1231 				}
1232 				/* Yup, this is a documentation bug.  It cost me *hours*. */
1233 				iowrite16 (0, ioaddr + TxStatus);
1234 				if (tx_cnt < 0) {
1235 					iowrite32(5000, ioaddr + DownCounter);
1236 					break;
1237 				}
1238 				tx_status = ioread16 (ioaddr + TxStatus);
1239 			}
1240 			hw_frame_id = (tx_status >> 8) & 0xff;
1241 		} else 	{
1242 			hw_frame_id = ioread8(ioaddr + TxFrameId);
1243 		}
1244 
1245 		if (np->pci_dev->revision >= 0x14) {
1246 			spin_lock(&np->lock);
1247 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1248 				int entry = np->dirty_tx % TX_RING_SIZE;
1249 				struct sk_buff *skb;
1250 				int sw_frame_id;
1251 				sw_frame_id = (le32_to_cpu(
1252 					np->tx_ring[entry].status) >> 2) & 0xff;
1253 				if (sw_frame_id == hw_frame_id &&
1254 					!(le32_to_cpu(np->tx_ring[entry].status)
1255 					& 0x00010000))
1256 						break;
1257 				if (sw_frame_id == (hw_frame_id + 1) %
1258 					TX_RING_SIZE)
1259 						break;
1260 				skb = np->tx_skbuff[entry];
1261 				/* Free the original skb. */
1262 				dma_unmap_single(&np->pci_dev->dev,
1263 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1264 					skb->len, DMA_TO_DEVICE);
1265 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1266 				np->tx_skbuff[entry] = NULL;
1267 				np->tx_ring[entry].frag[0].addr = 0;
1268 				np->tx_ring[entry].frag[0].length = 0;
1269 			}
1270 			spin_unlock(&np->lock);
1271 		} else {
1272 			spin_lock(&np->lock);
1273 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1274 				int entry = np->dirty_tx % TX_RING_SIZE;
1275 				struct sk_buff *skb;
1276 				if (!(le32_to_cpu(np->tx_ring[entry].status)
1277 							& 0x00010000))
1278 					break;
1279 				skb = np->tx_skbuff[entry];
1280 				/* Free the original skb. */
1281 				dma_unmap_single(&np->pci_dev->dev,
1282 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1283 					skb->len, DMA_TO_DEVICE);
1284 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1285 				np->tx_skbuff[entry] = NULL;
1286 				np->tx_ring[entry].frag[0].addr = 0;
1287 				np->tx_ring[entry].frag[0].length = 0;
1288 			}
1289 			spin_unlock(&np->lock);
1290 		}
1291 
1292 		if (netif_queue_stopped(dev) &&
1293 			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1294 			/* The ring is no longer full, clear busy flag. */
1295 			netif_wake_queue (dev);
1296 		}
1297 		/* Abnormal error summary/uncommon events handlers. */
1298 		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1299 			netdev_error(dev, intr_status);
1300 	} while (0);
1301 	if (netif_msg_intr(np))
1302 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1303 			   dev->name, ioread16(ioaddr + IntrStatus));
1304 	return IRQ_RETVAL(handled);
1305 }
1306 
1307 static void rx_poll(unsigned long data)
1308 {
1309 	struct net_device *dev = (struct net_device *)data;
1310 	struct netdev_private *np = netdev_priv(dev);
1311 	int entry = np->cur_rx % RX_RING_SIZE;
1312 	int boguscnt = np->budget;
1313 	void __iomem *ioaddr = np->base;
1314 	int received = 0;
1315 
1316 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1317 	while (1) {
1318 		struct netdev_desc *desc = &(np->rx_ring[entry]);
1319 		u32 frame_status = le32_to_cpu(desc->status);
1320 		int pkt_len;
1321 
1322 		if (--boguscnt < 0) {
1323 			goto not_done;
1324 		}
1325 		if (!(frame_status & DescOwn))
1326 			break;
1327 		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1328 		if (netif_msg_rx_status(np))
1329 			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1330 				   frame_status);
1331 		if (frame_status & 0x001f4000) {
1332 			/* There was a error. */
1333 			if (netif_msg_rx_err(np))
1334 				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1335 					   frame_status);
1336 			dev->stats.rx_errors++;
1337 			if (frame_status & 0x00100000)
1338 				dev->stats.rx_length_errors++;
1339 			if (frame_status & 0x00010000)
1340 				dev->stats.rx_fifo_errors++;
1341 			if (frame_status & 0x00060000)
1342 				dev->stats.rx_frame_errors++;
1343 			if (frame_status & 0x00080000)
1344 				dev->stats.rx_crc_errors++;
1345 			if (frame_status & 0x00100000) {
1346 				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1347 					   " status %8.8x.\n",
1348 					   dev->name, frame_status);
1349 			}
1350 		} else {
1351 			struct sk_buff *skb;
1352 #ifndef final_version
1353 			if (netif_msg_rx_status(np))
1354 				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1355 					   ", bogus_cnt %d.\n",
1356 					   pkt_len, boguscnt);
1357 #endif
1358 			/* Check if the packet is long enough to accept without copying
1359 			   to a minimally-sized skbuff. */
1360 			if (pkt_len < rx_copybreak &&
1361 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1362 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1363 				dma_sync_single_for_cpu(&np->pci_dev->dev,
1364 						le32_to_cpu(desc->frag[0].addr),
1365 						np->rx_buf_sz, DMA_FROM_DEVICE);
1366 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1367 				dma_sync_single_for_device(&np->pci_dev->dev,
1368 						le32_to_cpu(desc->frag[0].addr),
1369 						np->rx_buf_sz, DMA_FROM_DEVICE);
1370 				skb_put(skb, pkt_len);
1371 			} else {
1372 				dma_unmap_single(&np->pci_dev->dev,
1373 					le32_to_cpu(desc->frag[0].addr),
1374 					np->rx_buf_sz, DMA_FROM_DEVICE);
1375 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1376 				np->rx_skbuff[entry] = NULL;
1377 			}
1378 			skb->protocol = eth_type_trans(skb, dev);
1379 			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1380 			netif_rx(skb);
1381 		}
1382 		entry = (entry + 1) % RX_RING_SIZE;
1383 		received++;
1384 	}
1385 	np->cur_rx = entry;
1386 	refill_rx (dev);
1387 	np->budget -= received;
1388 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1389 	return;
1390 
1391 not_done:
1392 	np->cur_rx = entry;
1393 	refill_rx (dev);
1394 	if (!received)
1395 		received = 1;
1396 	np->budget -= received;
1397 	if (np->budget <= 0)
1398 		np->budget = RX_BUDGET;
1399 	tasklet_schedule(&np->rx_tasklet);
1400 }
1401 
1402 static void refill_rx (struct net_device *dev)
1403 {
1404 	struct netdev_private *np = netdev_priv(dev);
1405 	int entry;
1406 	int cnt = 0;
1407 
1408 	/* Refill the Rx ring buffers. */
1409 	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1410 		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1411 		struct sk_buff *skb;
1412 		entry = np->dirty_rx % RX_RING_SIZE;
1413 		if (np->rx_skbuff[entry] == NULL) {
1414 			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1415 			np->rx_skbuff[entry] = skb;
1416 			if (skb == NULL)
1417 				break;		/* Better luck next round. */
1418 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1419 			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1420 				dma_map_single(&np->pci_dev->dev, skb->data,
1421 					np->rx_buf_sz, DMA_FROM_DEVICE));
1422 			if (dma_mapping_error(&np->pci_dev->dev,
1423 				    np->rx_ring[entry].frag[0].addr)) {
1424 			    dev_kfree_skb_irq(skb);
1425 			    np->rx_skbuff[entry] = NULL;
1426 			    break;
1427 			}
1428 		}
1429 		/* Perhaps we need not reset this field. */
1430 		np->rx_ring[entry].frag[0].length =
1431 			cpu_to_le32(np->rx_buf_sz | LastFrag);
1432 		np->rx_ring[entry].status = 0;
1433 		cnt++;
1434 	}
1435 }
1436 static void netdev_error(struct net_device *dev, int intr_status)
1437 {
1438 	struct netdev_private *np = netdev_priv(dev);
1439 	void __iomem *ioaddr = np->base;
1440 	u16 mii_ctl, mii_advertise, mii_lpa;
1441 	int speed;
1442 
1443 	if (intr_status & LinkChange) {
1444 		if (mdio_wait_link(dev, 10) == 0) {
1445 			printk(KERN_INFO "%s: Link up\n", dev->name);
1446 			if (np->an_enable) {
1447 				mii_advertise = mdio_read(dev, np->phys[0],
1448 							   MII_ADVERTISE);
1449 				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1450 				mii_advertise &= mii_lpa;
1451 				printk(KERN_INFO "%s: Link changed: ",
1452 					dev->name);
1453 				if (mii_advertise & ADVERTISE_100FULL) {
1454 					np->speed = 100;
1455 					printk("100Mbps, full duplex\n");
1456 				} else if (mii_advertise & ADVERTISE_100HALF) {
1457 					np->speed = 100;
1458 					printk("100Mbps, half duplex\n");
1459 				} else if (mii_advertise & ADVERTISE_10FULL) {
1460 					np->speed = 10;
1461 					printk("10Mbps, full duplex\n");
1462 				} else if (mii_advertise & ADVERTISE_10HALF) {
1463 					np->speed = 10;
1464 					printk("10Mbps, half duplex\n");
1465 				} else
1466 					printk("\n");
1467 
1468 			} else {
1469 				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1470 				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1471 				np->speed = speed;
1472 				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1473 					dev->name, speed);
1474 				printk("%s duplex.\n",
1475 					(mii_ctl & BMCR_FULLDPLX) ?
1476 						"full" : "half");
1477 			}
1478 			check_duplex(dev);
1479 			if (np->flowctrl && np->mii_if.full_duplex) {
1480 				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1481 					ioaddr + MulticastFilter1+2);
1482 				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1483 					ioaddr + MACCtrl0);
1484 			}
1485 			netif_carrier_on(dev);
1486 		} else {
1487 			printk(KERN_INFO "%s: Link down\n", dev->name);
1488 			netif_carrier_off(dev);
1489 		}
1490 	}
1491 	if (intr_status & StatsMax) {
1492 		get_stats(dev);
1493 	}
1494 	if (intr_status & IntrPCIErr) {
1495 		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1496 			   dev->name, intr_status);
1497 		/* We must do a global reset of DMA to continue. */
1498 	}
1499 }
1500 
1501 static struct net_device_stats *get_stats(struct net_device *dev)
1502 {
1503 	struct netdev_private *np = netdev_priv(dev);
1504 	void __iomem *ioaddr = np->base;
1505 	unsigned long flags;
1506 	u8 late_coll, single_coll, mult_coll;
1507 
1508 	spin_lock_irqsave(&np->statlock, flags);
1509 	/* The chip only need report frame silently dropped. */
1510 	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1511 	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1512 	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1513 	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1514 
1515 	mult_coll = ioread8(ioaddr + StatsMultiColl);
1516 	np->xstats.tx_multiple_collisions += mult_coll;
1517 	single_coll = ioread8(ioaddr + StatsOneColl);
1518 	np->xstats.tx_single_collisions += single_coll;
1519 	late_coll = ioread8(ioaddr + StatsLateColl);
1520 	np->xstats.tx_late_collisions += late_coll;
1521 	dev->stats.collisions += mult_coll
1522 		+ single_coll
1523 		+ late_coll;
1524 
1525 	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1526 	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1527 	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1528 	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1529 	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1530 	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1531 	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1532 
1533 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1534 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1535 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1536 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1537 
1538 	spin_unlock_irqrestore(&np->statlock, flags);
1539 
1540 	return &dev->stats;
1541 }
1542 
1543 static void set_rx_mode(struct net_device *dev)
1544 {
1545 	struct netdev_private *np = netdev_priv(dev);
1546 	void __iomem *ioaddr = np->base;
1547 	u16 mc_filter[4];			/* Multicast hash filter */
1548 	u32 rx_mode;
1549 	int i;
1550 
1551 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1552 		memset(mc_filter, 0xff, sizeof(mc_filter));
1553 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1554 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1555 		   (dev->flags & IFF_ALLMULTI)) {
1556 		/* Too many to match, or accept all multicasts. */
1557 		memset(mc_filter, 0xff, sizeof(mc_filter));
1558 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1559 	} else if (!netdev_mc_empty(dev)) {
1560 		struct netdev_hw_addr *ha;
1561 		int bit;
1562 		int index;
1563 		int crc;
1564 		memset (mc_filter, 0, sizeof (mc_filter));
1565 		netdev_for_each_mc_addr(ha, dev) {
1566 			crc = ether_crc_le(ETH_ALEN, ha->addr);
1567 			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1568 				if (crc & 0x80000000) index |= 1 << bit;
1569 			mc_filter[index/16] |= (1 << (index % 16));
1570 		}
1571 		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1572 	} else {
1573 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1574 		return;
1575 	}
1576 	if (np->mii_if.full_duplex && np->flowctrl)
1577 		mc_filter[3] |= 0x0200;
1578 
1579 	for (i = 0; i < 4; i++)
1580 		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1581 	iowrite8(rx_mode, ioaddr + RxMode);
1582 }
1583 
1584 static int __set_mac_addr(struct net_device *dev)
1585 {
1586 	struct netdev_private *np = netdev_priv(dev);
1587 	u16 addr16;
1588 
1589 	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1590 	iowrite16(addr16, np->base + StationAddr);
1591 	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1592 	iowrite16(addr16, np->base + StationAddr+2);
1593 	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1594 	iowrite16(addr16, np->base + StationAddr+4);
1595 	return 0;
1596 }
1597 
1598 /* Invoked with rtnl_lock held */
1599 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1600 {
1601 	const struct sockaddr *addr = data;
1602 
1603 	if (!is_valid_ether_addr(addr->sa_data))
1604 		return -EADDRNOTAVAIL;
1605 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1606 	__set_mac_addr(dev);
1607 
1608 	return 0;
1609 }
1610 
1611 static const struct {
1612 	const char name[ETH_GSTRING_LEN];
1613 } sundance_stats[] = {
1614 	{ "tx_multiple_collisions" },
1615 	{ "tx_single_collisions" },
1616 	{ "tx_late_collisions" },
1617 	{ "tx_deferred" },
1618 	{ "tx_deferred_excessive" },
1619 	{ "tx_aborted" },
1620 	{ "tx_bcasts" },
1621 	{ "rx_bcasts" },
1622 	{ "tx_mcasts" },
1623 	{ "rx_mcasts" },
1624 };
1625 
1626 static int check_if_running(struct net_device *dev)
1627 {
1628 	if (!netif_running(dev))
1629 		return -EINVAL;
1630 	return 0;
1631 }
1632 
1633 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1634 {
1635 	struct netdev_private *np = netdev_priv(dev);
1636 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1637 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1638 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1639 }
1640 
1641 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1642 {
1643 	struct netdev_private *np = netdev_priv(dev);
1644 	spin_lock_irq(&np->lock);
1645 	mii_ethtool_gset(&np->mii_if, ecmd);
1646 	spin_unlock_irq(&np->lock);
1647 	return 0;
1648 }
1649 
1650 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1651 {
1652 	struct netdev_private *np = netdev_priv(dev);
1653 	int res;
1654 	spin_lock_irq(&np->lock);
1655 	res = mii_ethtool_sset(&np->mii_if, ecmd);
1656 	spin_unlock_irq(&np->lock);
1657 	return res;
1658 }
1659 
1660 static int nway_reset(struct net_device *dev)
1661 {
1662 	struct netdev_private *np = netdev_priv(dev);
1663 	return mii_nway_restart(&np->mii_if);
1664 }
1665 
1666 static u32 get_link(struct net_device *dev)
1667 {
1668 	struct netdev_private *np = netdev_priv(dev);
1669 	return mii_link_ok(&np->mii_if);
1670 }
1671 
1672 static u32 get_msglevel(struct net_device *dev)
1673 {
1674 	struct netdev_private *np = netdev_priv(dev);
1675 	return np->msg_enable;
1676 }
1677 
1678 static void set_msglevel(struct net_device *dev, u32 val)
1679 {
1680 	struct netdev_private *np = netdev_priv(dev);
1681 	np->msg_enable = val;
1682 }
1683 
1684 static void get_strings(struct net_device *dev, u32 stringset,
1685 		u8 *data)
1686 {
1687 	if (stringset == ETH_SS_STATS)
1688 		memcpy(data, sundance_stats, sizeof(sundance_stats));
1689 }
1690 
1691 static int get_sset_count(struct net_device *dev, int sset)
1692 {
1693 	switch (sset) {
1694 	case ETH_SS_STATS:
1695 		return ARRAY_SIZE(sundance_stats);
1696 	default:
1697 		return -EOPNOTSUPP;
1698 	}
1699 }
1700 
1701 static void get_ethtool_stats(struct net_device *dev,
1702 		struct ethtool_stats *stats, u64 *data)
1703 {
1704 	struct netdev_private *np = netdev_priv(dev);
1705 	int i = 0;
1706 
1707 	get_stats(dev);
1708 	data[i++] = np->xstats.tx_multiple_collisions;
1709 	data[i++] = np->xstats.tx_single_collisions;
1710 	data[i++] = np->xstats.tx_late_collisions;
1711 	data[i++] = np->xstats.tx_deferred;
1712 	data[i++] = np->xstats.tx_deferred_excessive;
1713 	data[i++] = np->xstats.tx_aborted;
1714 	data[i++] = np->xstats.tx_bcasts;
1715 	data[i++] = np->xstats.rx_bcasts;
1716 	data[i++] = np->xstats.tx_mcasts;
1717 	data[i++] = np->xstats.rx_mcasts;
1718 }
1719 
1720 static const struct ethtool_ops ethtool_ops = {
1721 	.begin = check_if_running,
1722 	.get_drvinfo = get_drvinfo,
1723 	.get_settings = get_settings,
1724 	.set_settings = set_settings,
1725 	.nway_reset = nway_reset,
1726 	.get_link = get_link,
1727 	.get_msglevel = get_msglevel,
1728 	.set_msglevel = set_msglevel,
1729 	.get_strings = get_strings,
1730 	.get_sset_count = get_sset_count,
1731 	.get_ethtool_stats = get_ethtool_stats,
1732 };
1733 
1734 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1735 {
1736 	struct netdev_private *np = netdev_priv(dev);
1737 	int rc;
1738 
1739 	if (!netif_running(dev))
1740 		return -EINVAL;
1741 
1742 	spin_lock_irq(&np->lock);
1743 	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1744 	spin_unlock_irq(&np->lock);
1745 
1746 	return rc;
1747 }
1748 
1749 static int netdev_close(struct net_device *dev)
1750 {
1751 	struct netdev_private *np = netdev_priv(dev);
1752 	void __iomem *ioaddr = np->base;
1753 	struct sk_buff *skb;
1754 	int i;
1755 
1756 	/* Wait and kill tasklet */
1757 	tasklet_kill(&np->rx_tasklet);
1758 	tasklet_kill(&np->tx_tasklet);
1759 	np->cur_tx = 0;
1760 	np->dirty_tx = 0;
1761 	np->cur_task = 0;
1762 	np->last_tx = NULL;
1763 
1764 	netif_stop_queue(dev);
1765 
1766 	if (netif_msg_ifdown(np)) {
1767 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1768 			   "Rx %4.4x Int %2.2x.\n",
1769 			   dev->name, ioread8(ioaddr + TxStatus),
1770 			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1771 		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1772 			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1773 	}
1774 
1775 	/* Disable interrupts by clearing the interrupt mask. */
1776 	iowrite16(0x0000, ioaddr + IntrEnable);
1777 
1778 	/* Disable Rx and Tx DMA for safely release resource */
1779 	iowrite32(0x500, ioaddr + DMACtrl);
1780 
1781 	/* Stop the chip's Tx and Rx processes. */
1782 	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1783 
1784     	for (i = 2000; i > 0; i--) {
1785  		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1786 			break;
1787 		mdelay(1);
1788     	}
1789 
1790     	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1791 			ioaddr + ASIC_HI_WORD(ASICCtrl));
1792 
1793     	for (i = 2000; i > 0; i--) {
1794 		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1795 			break;
1796 		mdelay(1);
1797     	}
1798 
1799 #ifdef __i386__
1800 	if (netif_msg_hw(np)) {
1801 		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1802 			   (int)(np->tx_ring_dma));
1803 		for (i = 0; i < TX_RING_SIZE; i++)
1804 			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1805 				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1806 				   np->tx_ring[i].frag[0].length);
1807 		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1808 			   (int)(np->rx_ring_dma));
1809 		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1810 			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1811 				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1812 				   np->rx_ring[i].frag[0].length);
1813 		}
1814 	}
1815 #endif /* __i386__ debugging only */
1816 
1817 	free_irq(dev->irq, dev);
1818 
1819 	del_timer_sync(&np->timer);
1820 
1821 	/* Free all the skbuffs in the Rx queue. */
1822 	for (i = 0; i < RX_RING_SIZE; i++) {
1823 		np->rx_ring[i].status = 0;
1824 		skb = np->rx_skbuff[i];
1825 		if (skb) {
1826 			dma_unmap_single(&np->pci_dev->dev,
1827 				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1828 				np->rx_buf_sz, DMA_FROM_DEVICE);
1829 			dev_kfree_skb(skb);
1830 			np->rx_skbuff[i] = NULL;
1831 		}
1832 		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1833 	}
1834 	for (i = 0; i < TX_RING_SIZE; i++) {
1835 		np->tx_ring[i].next_desc = 0;
1836 		skb = np->tx_skbuff[i];
1837 		if (skb) {
1838 			dma_unmap_single(&np->pci_dev->dev,
1839 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1840 				skb->len, DMA_TO_DEVICE);
1841 			dev_kfree_skb(skb);
1842 			np->tx_skbuff[i] = NULL;
1843 		}
1844 	}
1845 
1846 	return 0;
1847 }
1848 
1849 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1850 {
1851 	struct net_device *dev = pci_get_drvdata(pdev);
1852 
1853 	if (dev) {
1854 	    struct netdev_private *np = netdev_priv(dev);
1855 	    unregister_netdev(dev);
1856 	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1857 		    np->rx_ring, np->rx_ring_dma);
1858 	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1859 		    np->tx_ring, np->tx_ring_dma);
1860 	    pci_iounmap(pdev, np->base);
1861 	    pci_release_regions(pdev);
1862 	    free_netdev(dev);
1863 	    pci_set_drvdata(pdev, NULL);
1864 	}
1865 }
1866 
1867 #ifdef CONFIG_PM
1868 
1869 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1870 {
1871 	struct net_device *dev = pci_get_drvdata(pci_dev);
1872 
1873 	if (!netif_running(dev))
1874 		return 0;
1875 
1876 	netdev_close(dev);
1877 	netif_device_detach(dev);
1878 
1879 	pci_save_state(pci_dev);
1880 	pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1881 
1882 	return 0;
1883 }
1884 
1885 static int sundance_resume(struct pci_dev *pci_dev)
1886 {
1887 	struct net_device *dev = pci_get_drvdata(pci_dev);
1888 	int err = 0;
1889 
1890 	if (!netif_running(dev))
1891 		return 0;
1892 
1893 	pci_set_power_state(pci_dev, PCI_D0);
1894 	pci_restore_state(pci_dev);
1895 
1896 	err = netdev_open(dev);
1897 	if (err) {
1898 		printk(KERN_ERR "%s: Can't resume interface!\n",
1899 				dev->name);
1900 		goto out;
1901 	}
1902 
1903 	netif_device_attach(dev);
1904 
1905 out:
1906 	return err;
1907 }
1908 
1909 #endif /* CONFIG_PM */
1910 
1911 static struct pci_driver sundance_driver = {
1912 	.name		= DRV_NAME,
1913 	.id_table	= sundance_pci_tbl,
1914 	.probe		= sundance_probe1,
1915 	.remove		= __devexit_p(sundance_remove1),
1916 #ifdef CONFIG_PM
1917 	.suspend	= sundance_suspend,
1918 	.resume		= sundance_resume,
1919 #endif /* CONFIG_PM */
1920 };
1921 
1922 static int __init sundance_init(void)
1923 {
1924 /* when a module, this is printed whether or not devices are found in probe */
1925 #ifdef MODULE
1926 	printk(version);
1927 #endif
1928 	return pci_register_driver(&sundance_driver);
1929 }
1930 
1931 static void __exit sundance_exit(void)
1932 {
1933 	pci_unregister_driver(&sundance_driver);
1934 }
1935 
1936 module_init(sundance_init);
1937 module_exit(sundance_exit);
1938 
1939 
1940