1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 	Written 1999-2000 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	The author may be reached as becker@scyld.com, or C/O
13 	Scyld Computing Corporation
14 	410 Severn Ave., Suite 210
15 	Annapolis MD 21403
16 
17 	Support and updates available at
18 	http://www.scyld.com/network/sundance.html
19 	[link no longer provides useful info -jgarzik]
20 	Archives of the mailing list are still available at
21 	http://www.beowulf.org/pipermail/netdrivers/
22 
23 */
24 
25 #define DRV_NAME	"sundance"
26 #define DRV_VERSION	"1.2"
27 #define DRV_RELDATE	"11-Sep-2006"
28 
29 
30 /* The user-configurable values.
31    These may be modified when a driver module is loaded.*/
32 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34    Typical is a 64 element hash table based on the Ethernet CRC.  */
35 static const int multicast_filter_limit = 32;
36 
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38    Setting to > 1518 effectively disables this feature.
39    This chip can receive into offset buffers, so the Alpha does not
40    need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
43 
44 /* media[] specifies the media type the NIC operates at.
45 		 autosense	Autosensing active media.
46 		 10mbps_hd 	10Mbps half duplex.
47 		 10mbps_fd 	10Mbps full duplex.
48 		 100mbps_hd 	100Mbps half duplex.
49 		 100mbps_fd 	100Mbps full duplex.
50 		 0		Autosensing active media.
51 		 1	 	10Mbps half duplex.
52 		 2	 	10Mbps full duplex.
53 		 3	 	100Mbps half duplex.
54 		 4	 	100Mbps full duplex.
55 */
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
58 
59 
60 /* Operational parameters that are set at compile time. */
61 
62 /* Keep the ring sizes a power of two for compile efficiency.
63    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64    Making the Tx ring too large decreases the effectiveness of channel
65    bonding and packet priority, and more than 128 requires modifying the
66    Tx error recovery.
67    Large receive rings merely waste memory. */
68 #define TX_RING_SIZE	32
69 #define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
70 #define RX_RING_SIZE	64
71 #define RX_BUDGET	32
72 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
74 
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT  (4*HZ)
78 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
79 
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h>		/* Processor type for cache alignment. */
96 #include <asm/io.h>
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
103 
104 /* These identify the driver base version and may not be removed. */
105 static const char version[] =
106 	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 	" Written by Donald Becker\n";
108 
109 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111 MODULE_LICENSE("GPL");
112 
113 module_param(debug, int, 0);
114 module_param(rx_copybreak, int, 0);
115 module_param_array(media, charp, NULL, 0);
116 module_param(flowctrl, int, 0);
117 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
120 
121 /*
122 				Theory of Operation
123 
124 I. Board Compatibility
125 
126 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
127 
128 II. Board-specific settings
129 
130 III. Driver operation
131 
132 IIIa. Ring buffers
133 
134 This driver uses two statically allocated fixed-size descriptor lists
135 formed into rings by a branch from the final descriptor to the beginning of
136 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
137 Some chips explicitly use only 2^N sized rings, while others use a
138 'next descriptor' pointer that the driver forms into rings.
139 
140 IIIb/c. Transmit/Receive Structure
141 
142 This driver uses a zero-copy receive and transmit scheme.
143 The driver allocates full frame size skbuffs for the Rx ring buffers at
144 open() time and passes the skb->data field to the chip as receive data
145 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
146 a fresh skbuff is allocated and the frame is copied to the new skbuff.
147 When the incoming frame is larger, the skbuff is passed directly up the
148 protocol stack.  Buffers consumed this way are replaced by newly allocated
149 skbuffs in a later phase of receives.
150 
151 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152 using a full-sized skbuff for small frames vs. the copying costs of larger
153 frames.  New boards are typically used in generously configured machines
154 and the underfilled buffers have negligible impact compared to the benefit of
155 a single allocation size, so the default value of zero results in never
156 copying packets.  When copying is done, the cost is usually mitigated by using
157 a combined copy/checksum routine.  Copying also preloads the cache, which is
158 most useful with small frames.
159 
160 A subtle aspect of the operation is that the IP header at offset 14 in an
161 ethernet frame isn't longword aligned for further processing.
162 Unaligned buffers are permitted by the Sundance hardware, so
163 frames are received into the skbuff at an offset of "+2", 16-byte aligning
164 the IP header.
165 
166 IIId. Synchronization
167 
168 The driver runs as two independent, single-threaded flows of control.  One
169 is the send-packet routine, which enforces single-threaded use by the
170 dev->tbusy flag.  The other thread is the interrupt handler, which is single
171 threaded by the hardware and interrupt handling software.
172 
173 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176 the 'lp->tx_full' flag.
177 
178 The interrupt handler has exclusive control over the Rx ring and records stats
179 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
180 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181 clears both the tx_full and tbusy flags.
182 
183 IV. Notes
184 
185 IVb. References
186 
187 The Sundance ST201 datasheet, preliminary version.
188 The Kendin KS8723 datasheet, preliminary version.
189 The ICplus IP100 datasheet, preliminary version.
190 http://www.scyld.com/expert/100mbps.html
191 http://www.scyld.com/expert/NWay.html
192 
193 IVc. Errata
194 
195 */
196 
197 /* Work-around for Kendin chip bugs. */
198 #ifndef CONFIG_SUNDANCE_MMIO
199 #define USE_IO_OPS 1
200 #endif
201 
202 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
203 	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
210 	{ }
211 };
212 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
213 
214 enum {
215 	netdev_io_size = 128
216 };
217 
218 struct pci_id_info {
219         const char *name;
220 };
221 static const struct pci_id_info pci_id_tbl[] = {
222 	{"D-Link DFE-550TX FAST Ethernet Adapter"},
223 	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 	{"D-Link DFE-580TX 4 port Server Adapter"},
225 	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 	{"D-Link DL10050-based FAST Ethernet Adapter"},
227 	{"Sundance Technology Alta"},
228 	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
229 	{ }	/* terminate list. */
230 };
231 
232 /* This driver was written to use PCI memory space, however x86-oriented
233    hardware often uses I/O space accesses. */
234 
235 /* Offsets to the device registers.
236    Unlike software-only systems, device drivers interact with complex hardware.
237    It's not useful to define symbolic names for every register bit in the
238    device.  The name can only partially document the semantics and make
239    the driver longer and more difficult to read.
240    In general, only the important configuration values or bits changed
241    multiple times should be defined symbolically.
242 */
243 enum alta_offsets {
244 	DMACtrl = 0x00,
245 	TxListPtr = 0x04,
246 	TxDMABurstThresh = 0x08,
247 	TxDMAUrgentThresh = 0x09,
248 	TxDMAPollPeriod = 0x0a,
249 	RxDMAStatus = 0x0c,
250 	RxListPtr = 0x10,
251 	DebugCtrl0 = 0x1a,
252 	DebugCtrl1 = 0x1c,
253 	RxDMABurstThresh = 0x14,
254 	RxDMAUrgentThresh = 0x15,
255 	RxDMAPollPeriod = 0x16,
256 	LEDCtrl = 0x1a,
257 	ASICCtrl = 0x30,
258 	EEData = 0x34,
259 	EECtrl = 0x36,
260 	FlashAddr = 0x40,
261 	FlashData = 0x44,
262 	WakeEvent = 0x45,
263 	TxStatus = 0x46,
264 	TxFrameId = 0x47,
265 	DownCounter = 0x18,
266 	IntrClear = 0x4a,
267 	IntrEnable = 0x4c,
268 	IntrStatus = 0x4e,
269 	MACCtrl0 = 0x50,
270 	MACCtrl1 = 0x52,
271 	StationAddr = 0x54,
272 	MaxFrameSize = 0x5A,
273 	RxMode = 0x5c,
274 	MIICtrl = 0x5e,
275 	MulticastFilter0 = 0x60,
276 	MulticastFilter1 = 0x64,
277 	RxOctetsLow = 0x68,
278 	RxOctetsHigh = 0x6a,
279 	TxOctetsLow = 0x6c,
280 	TxOctetsHigh = 0x6e,
281 	TxFramesOK = 0x70,
282 	RxFramesOK = 0x72,
283 	StatsCarrierError = 0x74,
284 	StatsLateColl = 0x75,
285 	StatsMultiColl = 0x76,
286 	StatsOneColl = 0x77,
287 	StatsTxDefer = 0x78,
288 	RxMissed = 0x79,
289 	StatsTxXSDefer = 0x7a,
290 	StatsTxAbort = 0x7b,
291 	StatsBcastTx = 0x7c,
292 	StatsBcastRx = 0x7d,
293 	StatsMcastTx = 0x7e,
294 	StatsMcastRx = 0x7f,
295 	/* Aliased and bogus values! */
296 	RxStatus = 0x0c,
297 };
298 
299 #define ASIC_HI_WORD(x)	((x) + 2)
300 
301 enum ASICCtrl_HiWord_bit {
302 	GlobalReset = 0x0001,
303 	RxReset = 0x0002,
304 	TxReset = 0x0004,
305 	DMAReset = 0x0008,
306 	FIFOReset = 0x0010,
307 	NetworkReset = 0x0020,
308 	HostReset = 0x0040,
309 	ResetBusy = 0x0400,
310 };
311 
312 /* Bits in the interrupt status/mask registers. */
313 enum intr_status_bits {
314 	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
315 	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
316 	IntrDrvRqst=0x0040,
317 	StatsMax=0x0080, LinkChange=0x0100,
318 	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
319 };
320 
321 /* Bits in the RxMode register. */
322 enum rx_mode_bits {
323 	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
324 	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
325 };
326 /* Bits in MACCtrl. */
327 enum mac_ctrl0_bits {
328 	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
329 	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
330 };
331 enum mac_ctrl1_bits {
332 	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
333 	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
334 	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
335 };
336 
337 /* Bits in WakeEvent register. */
338 enum wake_event_bits {
339 	WakePktEnable = 0x01,
340 	MagicPktEnable = 0x02,
341 	LinkEventEnable = 0x04,
342 	WolEnable = 0x80,
343 };
344 
345 /* The Rx and Tx buffer descriptors. */
346 /* Note that using only 32 bit fields simplifies conversion to big-endian
347    architectures. */
348 struct netdev_desc {
349 	__le32 next_desc;
350 	__le32 status;
351 	struct desc_frag { __le32 addr, length; } frag[1];
352 };
353 
354 /* Bits in netdev_desc.status */
355 enum desc_status_bits {
356 	DescOwn=0x8000,
357 	DescEndPacket=0x4000,
358 	DescEndRing=0x2000,
359 	LastFrag=0x80000000,
360 	DescIntrOnTx=0x8000,
361 	DescIntrOnDMADone=0x80000000,
362 	DisableAlign = 0x00000001,
363 };
364 
365 #define PRIV_ALIGN	15 	/* Required alignment mask */
366 /* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
367    within the structure. */
368 #define MII_CNT		4
369 struct netdev_private {
370 	/* Descriptor rings first for alignment. */
371 	struct netdev_desc *rx_ring;
372 	struct netdev_desc *tx_ring;
373 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
374 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
375         dma_addr_t tx_ring_dma;
376         dma_addr_t rx_ring_dma;
377 	struct timer_list timer;		/* Media monitoring timer. */
378 	/* ethtool extra stats */
379 	struct {
380 		u64 tx_multiple_collisions;
381 		u64 tx_single_collisions;
382 		u64 tx_late_collisions;
383 		u64 tx_deferred;
384 		u64 tx_deferred_excessive;
385 		u64 tx_aborted;
386 		u64 tx_bcasts;
387 		u64 rx_bcasts;
388 		u64 tx_mcasts;
389 		u64 rx_mcasts;
390 	} xstats;
391 	/* Frequently used values: keep some adjacent for cache effect. */
392 	spinlock_t lock;
393 	int msg_enable;
394 	int chip_id;
395 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
396 	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
397 	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
398 	unsigned int cur_tx, dirty_tx;
399 	/* These values are keep track of the transceiver/media in use. */
400 	unsigned int flowctrl:1;
401 	unsigned int default_port:4;		/* Last dev->if_port value. */
402 	unsigned int an_enable:1;
403 	unsigned int speed;
404 	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
405 	struct tasklet_struct rx_tasklet;
406 	struct tasklet_struct tx_tasklet;
407 	int budget;
408 	int cur_task;
409 	/* Multicast and receive mode. */
410 	spinlock_t mcastlock;			/* SMP lock multicast updates. */
411 	u16 mcast_filter[4];
412 	/* MII transceiver section. */
413 	struct mii_if_info mii_if;
414 	int mii_preamble_required;
415 	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
416 	struct pci_dev *pci_dev;
417 	void __iomem *base;
418 	spinlock_t statlock;
419 };
420 
421 /* The station address location in the EEPROM. */
422 #define EEPROM_SA_OFFSET	0x10
423 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
424 			IntrDrvRqst | IntrTxDone | StatsMax | \
425 			LinkChange)
426 
427 static int  change_mtu(struct net_device *dev, int new_mtu);
428 static int  eeprom_read(void __iomem *ioaddr, int location);
429 static int  mdio_read(struct net_device *dev, int phy_id, int location);
430 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
431 static int  mdio_wait_link(struct net_device *dev, int wait);
432 static int  netdev_open(struct net_device *dev);
433 static void check_duplex(struct net_device *dev);
434 static void netdev_timer(unsigned long data);
435 static void tx_timeout(struct net_device *dev);
436 static void init_ring(struct net_device *dev);
437 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
438 static int reset_tx (struct net_device *dev);
439 static irqreturn_t intr_handler(int irq, void *dev_instance);
440 static void rx_poll(unsigned long data);
441 static void tx_poll(unsigned long data);
442 static void refill_rx (struct net_device *dev);
443 static void netdev_error(struct net_device *dev, int intr_status);
444 static void netdev_error(struct net_device *dev, int intr_status);
445 static void set_rx_mode(struct net_device *dev);
446 static int __set_mac_addr(struct net_device *dev);
447 static int sundance_set_mac_addr(struct net_device *dev, void *data);
448 static struct net_device_stats *get_stats(struct net_device *dev);
449 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
450 static int  netdev_close(struct net_device *dev);
451 static const struct ethtool_ops ethtool_ops;
452 
453 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
454 {
455 	struct netdev_private *np = netdev_priv(dev);
456 	void __iomem *ioaddr = np->base + ASICCtrl;
457 	int countdown;
458 
459 	/* ST201 documentation states ASICCtrl is a 32bit register */
460 	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
461 	/* ST201 documentation states reset can take up to 1 ms */
462 	countdown = 10 + 1;
463 	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
464 		if (--countdown == 0) {
465 			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
466 			break;
467 		}
468 		udelay(100);
469 	}
470 }
471 
472 #ifdef CONFIG_NET_POLL_CONTROLLER
473 static void sundance_poll_controller(struct net_device *dev)
474 {
475 	struct netdev_private *np = netdev_priv(dev);
476 
477 	disable_irq(np->pci_dev->irq);
478 	intr_handler(np->pci_dev->irq, dev);
479 	enable_irq(np->pci_dev->irq);
480 }
481 #endif
482 
483 static const struct net_device_ops netdev_ops = {
484 	.ndo_open		= netdev_open,
485 	.ndo_stop		= netdev_close,
486 	.ndo_start_xmit		= start_tx,
487 	.ndo_get_stats 		= get_stats,
488 	.ndo_set_rx_mode	= set_rx_mode,
489 	.ndo_do_ioctl 		= netdev_ioctl,
490 	.ndo_tx_timeout		= tx_timeout,
491 	.ndo_change_mtu		= change_mtu,
492 	.ndo_set_mac_address 	= sundance_set_mac_addr,
493 	.ndo_validate_addr	= eth_validate_addr,
494 #ifdef CONFIG_NET_POLL_CONTROLLER
495 	.ndo_poll_controller 	= sundance_poll_controller,
496 #endif
497 };
498 
499 static int sundance_probe1(struct pci_dev *pdev,
500 			   const struct pci_device_id *ent)
501 {
502 	struct net_device *dev;
503 	struct netdev_private *np;
504 	static int card_idx;
505 	int chip_idx = ent->driver_data;
506 	int irq;
507 	int i;
508 	void __iomem *ioaddr;
509 	u16 mii_ctl;
510 	void *ring_space;
511 	dma_addr_t ring_dma;
512 #ifdef USE_IO_OPS
513 	int bar = 0;
514 #else
515 	int bar = 1;
516 #endif
517 	int phy, phy_end, phy_idx = 0;
518 
519 /* when built into the kernel, we only print version if device is found */
520 #ifndef MODULE
521 	static int printed_version;
522 	if (!printed_version++)
523 		printk(version);
524 #endif
525 
526 	if (pci_enable_device(pdev))
527 		return -EIO;
528 	pci_set_master(pdev);
529 
530 	irq = pdev->irq;
531 
532 	dev = alloc_etherdev(sizeof(*np));
533 	if (!dev)
534 		return -ENOMEM;
535 	SET_NETDEV_DEV(dev, &pdev->dev);
536 
537 	if (pci_request_regions(pdev, DRV_NAME))
538 		goto err_out_netdev;
539 
540 	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
541 	if (!ioaddr)
542 		goto err_out_res;
543 
544 	for (i = 0; i < 3; i++)
545 		((__le16 *)dev->dev_addr)[i] =
546 			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
547 
548 	np = netdev_priv(dev);
549 	np->base = ioaddr;
550 	np->pci_dev = pdev;
551 	np->chip_id = chip_idx;
552 	np->msg_enable = (1 << debug) - 1;
553 	spin_lock_init(&np->lock);
554 	spin_lock_init(&np->statlock);
555 	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
556 	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
557 
558 	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
559 			&ring_dma, GFP_KERNEL);
560 	if (!ring_space)
561 		goto err_out_cleardev;
562 	np->tx_ring = (struct netdev_desc *)ring_space;
563 	np->tx_ring_dma = ring_dma;
564 
565 	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
566 			&ring_dma, GFP_KERNEL);
567 	if (!ring_space)
568 		goto err_out_unmap_tx;
569 	np->rx_ring = (struct netdev_desc *)ring_space;
570 	np->rx_ring_dma = ring_dma;
571 
572 	np->mii_if.dev = dev;
573 	np->mii_if.mdio_read = mdio_read;
574 	np->mii_if.mdio_write = mdio_write;
575 	np->mii_if.phy_id_mask = 0x1f;
576 	np->mii_if.reg_num_mask = 0x1f;
577 
578 	/* The chip-specific entries in the device structure. */
579 	dev->netdev_ops = &netdev_ops;
580 	SET_ETHTOOL_OPS(dev, &ethtool_ops);
581 	dev->watchdog_timeo = TX_TIMEOUT;
582 
583 	pci_set_drvdata(pdev, dev);
584 
585 	i = register_netdev(dev);
586 	if (i)
587 		goto err_out_unmap_rx;
588 
589 	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
590 	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
591 	       dev->dev_addr, irq);
592 
593 	np->phys[0] = 1;		/* Default setting */
594 	np->mii_preamble_required++;
595 
596 	/*
597 	 * It seems some phys doesn't deal well with address 0 being accessed
598 	 * first
599 	 */
600 	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
601 		phy = 0;
602 		phy_end = 31;
603 	} else {
604 		phy = 1;
605 		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
606 	}
607 	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
608 		int phyx = phy & 0x1f;
609 		int mii_status = mdio_read(dev, phyx, MII_BMSR);
610 		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
611 			np->phys[phy_idx++] = phyx;
612 			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
613 			if ((mii_status & 0x0040) == 0)
614 				np->mii_preamble_required++;
615 			printk(KERN_INFO "%s: MII PHY found at address %d, status "
616 				   "0x%4.4x advertising %4.4x.\n",
617 				   dev->name, phyx, mii_status, np->mii_if.advertising);
618 		}
619 	}
620 	np->mii_preamble_required--;
621 
622 	if (phy_idx == 0) {
623 		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
624 			   dev->name, ioread32(ioaddr + ASICCtrl));
625 		goto err_out_unregister;
626 	}
627 
628 	np->mii_if.phy_id = np->phys[0];
629 
630 	/* Parse override configuration */
631 	np->an_enable = 1;
632 	if (card_idx < MAX_UNITS) {
633 		if (media[card_idx] != NULL) {
634 			np->an_enable = 0;
635 			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
636 			    strcmp (media[card_idx], "4") == 0) {
637 				np->speed = 100;
638 				np->mii_if.full_duplex = 1;
639 			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
640 				   strcmp (media[card_idx], "3") == 0) {
641 				np->speed = 100;
642 				np->mii_if.full_duplex = 0;
643 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
644 				   strcmp (media[card_idx], "2") == 0) {
645 				np->speed = 10;
646 				np->mii_if.full_duplex = 1;
647 			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
648 				   strcmp (media[card_idx], "1") == 0) {
649 				np->speed = 10;
650 				np->mii_if.full_duplex = 0;
651 			} else {
652 				np->an_enable = 1;
653 			}
654 		}
655 		if (flowctrl == 1)
656 			np->flowctrl = 1;
657 	}
658 
659 	/* Fibre PHY? */
660 	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
661 		/* Default 100Mbps Full */
662 		if (np->an_enable) {
663 			np->speed = 100;
664 			np->mii_if.full_duplex = 1;
665 			np->an_enable = 0;
666 		}
667 	}
668 	/* Reset PHY */
669 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
670 	mdelay (300);
671 	/* If flow control enabled, we need to advertise it.*/
672 	if (np->flowctrl)
673 		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
674 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
675 	/* Force media type */
676 	if (!np->an_enable) {
677 		mii_ctl = 0;
678 		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
679 		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
680 		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
681 		printk (KERN_INFO "Override speed=%d, %s duplex\n",
682 			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
683 
684 	}
685 
686 	/* Perhaps move the reset here? */
687 	/* Reset the chip to erase previous misconfiguration. */
688 	if (netif_msg_hw(np))
689 		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
690 	sundance_reset(dev, 0x00ff << 16);
691 	if (netif_msg_hw(np))
692 		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
693 
694 	card_idx++;
695 	return 0;
696 
697 err_out_unregister:
698 	unregister_netdev(dev);
699 err_out_unmap_rx:
700 	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
701 		np->rx_ring, np->rx_ring_dma);
702 err_out_unmap_tx:
703 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
704 		np->tx_ring, np->tx_ring_dma);
705 err_out_cleardev:
706 	pci_set_drvdata(pdev, NULL);
707 	pci_iounmap(pdev, ioaddr);
708 err_out_res:
709 	pci_release_regions(pdev);
710 err_out_netdev:
711 	free_netdev (dev);
712 	return -ENODEV;
713 }
714 
715 static int change_mtu(struct net_device *dev, int new_mtu)
716 {
717 	if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
718 		return -EINVAL;
719 	if (netif_running(dev))
720 		return -EBUSY;
721 	dev->mtu = new_mtu;
722 	return 0;
723 }
724 
725 #define eeprom_delay(ee_addr)	ioread32(ee_addr)
726 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
727 static int eeprom_read(void __iomem *ioaddr, int location)
728 {
729 	int boguscnt = 10000;		/* Typical 1900 ticks. */
730 	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
731 	do {
732 		eeprom_delay(ioaddr + EECtrl);
733 		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
734 			return ioread16(ioaddr + EEData);
735 		}
736 	} while (--boguscnt > 0);
737 	return 0;
738 }
739 
740 /*  MII transceiver control section.
741 	Read and write the MII registers using software-generated serial
742 	MDIO protocol.  See the MII specifications or DP83840A data sheet
743 	for details.
744 
745 	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
746 	met by back-to-back 33Mhz PCI cycles. */
747 #define mdio_delay() ioread8(mdio_addr)
748 
749 enum mii_reg_bits {
750 	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
751 };
752 #define MDIO_EnbIn  (0)
753 #define MDIO_WRITE0 (MDIO_EnbOutput)
754 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
755 
756 /* Generate the preamble required for initial synchronization and
757    a few older transceivers. */
758 static void mdio_sync(void __iomem *mdio_addr)
759 {
760 	int bits = 32;
761 
762 	/* Establish sync by sending at least 32 logic ones. */
763 	while (--bits >= 0) {
764 		iowrite8(MDIO_WRITE1, mdio_addr);
765 		mdio_delay();
766 		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
767 		mdio_delay();
768 	}
769 }
770 
771 static int mdio_read(struct net_device *dev, int phy_id, int location)
772 {
773 	struct netdev_private *np = netdev_priv(dev);
774 	void __iomem *mdio_addr = np->base + MIICtrl;
775 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
776 	int i, retval = 0;
777 
778 	if (np->mii_preamble_required)
779 		mdio_sync(mdio_addr);
780 
781 	/* Shift the read command bits out. */
782 	for (i = 15; i >= 0; i--) {
783 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
784 
785 		iowrite8(dataval, mdio_addr);
786 		mdio_delay();
787 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
788 		mdio_delay();
789 	}
790 	/* Read the two transition, 16 data, and wire-idle bits. */
791 	for (i = 19; i > 0; i--) {
792 		iowrite8(MDIO_EnbIn, mdio_addr);
793 		mdio_delay();
794 		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
795 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
796 		mdio_delay();
797 	}
798 	return (retval>>1) & 0xffff;
799 }
800 
801 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
802 {
803 	struct netdev_private *np = netdev_priv(dev);
804 	void __iomem *mdio_addr = np->base + MIICtrl;
805 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
806 	int i;
807 
808 	if (np->mii_preamble_required)
809 		mdio_sync(mdio_addr);
810 
811 	/* Shift the command bits out. */
812 	for (i = 31; i >= 0; i--) {
813 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
814 
815 		iowrite8(dataval, mdio_addr);
816 		mdio_delay();
817 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
818 		mdio_delay();
819 	}
820 	/* Clear out extra bits. */
821 	for (i = 2; i > 0; i--) {
822 		iowrite8(MDIO_EnbIn, mdio_addr);
823 		mdio_delay();
824 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
825 		mdio_delay();
826 	}
827 }
828 
829 static int mdio_wait_link(struct net_device *dev, int wait)
830 {
831 	int bmsr;
832 	int phy_id;
833 	struct netdev_private *np;
834 
835 	np = netdev_priv(dev);
836 	phy_id = np->phys[0];
837 
838 	do {
839 		bmsr = mdio_read(dev, phy_id, MII_BMSR);
840 		if (bmsr & 0x0004)
841 			return 0;
842 		mdelay(1);
843 	} while (--wait > 0);
844 	return -1;
845 }
846 
847 static int netdev_open(struct net_device *dev)
848 {
849 	struct netdev_private *np = netdev_priv(dev);
850 	void __iomem *ioaddr = np->base;
851 	const int irq = np->pci_dev->irq;
852 	unsigned long flags;
853 	int i;
854 
855 	sundance_reset(dev, 0x00ff << 16);
856 
857 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
858 	if (i)
859 		return i;
860 
861 	if (netif_msg_ifup(np))
862 		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
863 
864 	init_ring(dev);
865 
866 	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
867 	/* The Tx list pointer is written as packets are queued. */
868 
869 	/* Initialize other registers. */
870 	__set_mac_addr(dev);
871 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
872 	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
873 #else
874 	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
875 #endif
876 	if (dev->mtu > 2047)
877 		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
878 
879 	/* Configure the PCI bus bursts and FIFO thresholds. */
880 
881 	if (dev->if_port == 0)
882 		dev->if_port = np->default_port;
883 
884 	spin_lock_init(&np->mcastlock);
885 
886 	set_rx_mode(dev);
887 	iowrite16(0, ioaddr + IntrEnable);
888 	iowrite16(0, ioaddr + DownCounter);
889 	/* Set the chip to poll every N*320nsec. */
890 	iowrite8(100, ioaddr + RxDMAPollPeriod);
891 	iowrite8(127, ioaddr + TxDMAPollPeriod);
892 	/* Fix DFE-580TX packet drop issue */
893 	if (np->pci_dev->revision >= 0x14)
894 		iowrite8(0x01, ioaddr + DebugCtrl1);
895 	netif_start_queue(dev);
896 
897 	spin_lock_irqsave(&np->lock, flags);
898 	reset_tx(dev);
899 	spin_unlock_irqrestore(&np->lock, flags);
900 
901 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
902 
903 	/* Disable Wol */
904 	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
905 	np->wol_enabled = 0;
906 
907 	if (netif_msg_ifup(np))
908 		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
909 			   "MAC Control %x, %4.4x %4.4x.\n",
910 			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
911 			   ioread32(ioaddr + MACCtrl0),
912 			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
913 
914 	/* Set the timer to check for link beat. */
915 	init_timer(&np->timer);
916 	np->timer.expires = jiffies + 3*HZ;
917 	np->timer.data = (unsigned long)dev;
918 	np->timer.function = netdev_timer;				/* timer handler */
919 	add_timer(&np->timer);
920 
921 	/* Enable interrupts by setting the interrupt mask. */
922 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
923 
924 	return 0;
925 }
926 
927 static void check_duplex(struct net_device *dev)
928 {
929 	struct netdev_private *np = netdev_priv(dev);
930 	void __iomem *ioaddr = np->base;
931 	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
932 	int negotiated = mii_lpa & np->mii_if.advertising;
933 	int duplex;
934 
935 	/* Force media */
936 	if (!np->an_enable || mii_lpa == 0xffff) {
937 		if (np->mii_if.full_duplex)
938 			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
939 				ioaddr + MACCtrl0);
940 		return;
941 	}
942 
943 	/* Autonegotiation */
944 	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
945 	if (np->mii_if.full_duplex != duplex) {
946 		np->mii_if.full_duplex = duplex;
947 		if (netif_msg_link(np))
948 			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
949 				   "negotiated capability %4.4x.\n", dev->name,
950 				   duplex ? "full" : "half", np->phys[0], negotiated);
951 		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
952 	}
953 }
954 
955 static void netdev_timer(unsigned long data)
956 {
957 	struct net_device *dev = (struct net_device *)data;
958 	struct netdev_private *np = netdev_priv(dev);
959 	void __iomem *ioaddr = np->base;
960 	int next_tick = 10*HZ;
961 
962 	if (netif_msg_timer(np)) {
963 		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
964 			   "Tx %x Rx %x.\n",
965 			   dev->name, ioread16(ioaddr + IntrEnable),
966 			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
967 	}
968 	check_duplex(dev);
969 	np->timer.expires = jiffies + next_tick;
970 	add_timer(&np->timer);
971 }
972 
973 static void tx_timeout(struct net_device *dev)
974 {
975 	struct netdev_private *np = netdev_priv(dev);
976 	void __iomem *ioaddr = np->base;
977 	unsigned long flag;
978 
979 	netif_stop_queue(dev);
980 	tasklet_disable(&np->tx_tasklet);
981 	iowrite16(0, ioaddr + IntrEnable);
982 	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
983 		   "TxFrameId %2.2x,"
984 		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
985 		   ioread8(ioaddr + TxFrameId));
986 
987 	{
988 		int i;
989 		for (i=0; i<TX_RING_SIZE; i++) {
990 			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
991 				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
992 				le32_to_cpu(np->tx_ring[i].next_desc),
993 				le32_to_cpu(np->tx_ring[i].status),
994 				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
995 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
996 				le32_to_cpu(np->tx_ring[i].frag[0].length));
997 		}
998 		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
999 			ioread32(np->base + TxListPtr),
1000 			netif_queue_stopped(dev));
1001 		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1002 			np->cur_tx, np->cur_tx % TX_RING_SIZE,
1003 			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1004 		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1005 		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1006 	}
1007 	spin_lock_irqsave(&np->lock, flag);
1008 
1009 	/* Stop and restart the chip's Tx processes . */
1010 	reset_tx(dev);
1011 	spin_unlock_irqrestore(&np->lock, flag);
1012 
1013 	dev->if_port = 0;
1014 
1015 	dev->trans_start = jiffies; /* prevent tx timeout */
1016 	dev->stats.tx_errors++;
1017 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1018 		netif_wake_queue(dev);
1019 	}
1020 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1021 	tasklet_enable(&np->tx_tasklet);
1022 }
1023 
1024 
1025 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1026 static void init_ring(struct net_device *dev)
1027 {
1028 	struct netdev_private *np = netdev_priv(dev);
1029 	int i;
1030 
1031 	np->cur_rx = np->cur_tx = 0;
1032 	np->dirty_rx = np->dirty_tx = 0;
1033 	np->cur_task = 0;
1034 
1035 	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1036 
1037 	/* Initialize all Rx descriptors. */
1038 	for (i = 0; i < RX_RING_SIZE; i++) {
1039 		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1040 			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1041 		np->rx_ring[i].status = 0;
1042 		np->rx_ring[i].frag[0].length = 0;
1043 		np->rx_skbuff[i] = NULL;
1044 	}
1045 
1046 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1047 	for (i = 0; i < RX_RING_SIZE; i++) {
1048 		struct sk_buff *skb =
1049 			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1050 		np->rx_skbuff[i] = skb;
1051 		if (skb == NULL)
1052 			break;
1053 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1054 		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1055 			dma_map_single(&np->pci_dev->dev, skb->data,
1056 				np->rx_buf_sz, DMA_FROM_DEVICE));
1057 		if (dma_mapping_error(&np->pci_dev->dev,
1058 					np->rx_ring[i].frag[0].addr)) {
1059 			dev_kfree_skb(skb);
1060 			np->rx_skbuff[i] = NULL;
1061 			break;
1062 		}
1063 		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1064 	}
1065 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1066 
1067 	for (i = 0; i < TX_RING_SIZE; i++) {
1068 		np->tx_skbuff[i] = NULL;
1069 		np->tx_ring[i].status = 0;
1070 	}
1071 }
1072 
1073 static void tx_poll (unsigned long data)
1074 {
1075 	struct net_device *dev = (struct net_device *)data;
1076 	struct netdev_private *np = netdev_priv(dev);
1077 	unsigned head = np->cur_task % TX_RING_SIZE;
1078 	struct netdev_desc *txdesc =
1079 		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1080 
1081 	/* Chain the next pointer */
1082 	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1083 		int entry = np->cur_task % TX_RING_SIZE;
1084 		txdesc = &np->tx_ring[entry];
1085 		if (np->last_tx) {
1086 			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1087 				entry*sizeof(struct netdev_desc));
1088 		}
1089 		np->last_tx = txdesc;
1090 	}
1091 	/* Indicate the latest descriptor of tx ring */
1092 	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1093 
1094 	if (ioread32 (np->base + TxListPtr) == 0)
1095 		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1096 			np->base + TxListPtr);
1097 }
1098 
1099 static netdev_tx_t
1100 start_tx (struct sk_buff *skb, struct net_device *dev)
1101 {
1102 	struct netdev_private *np = netdev_priv(dev);
1103 	struct netdev_desc *txdesc;
1104 	unsigned entry;
1105 
1106 	/* Calculate the next Tx descriptor entry. */
1107 	entry = np->cur_tx % TX_RING_SIZE;
1108 	np->tx_skbuff[entry] = skb;
1109 	txdesc = &np->tx_ring[entry];
1110 
1111 	txdesc->next_desc = 0;
1112 	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1113 	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1114 				skb->data, skb->len, DMA_TO_DEVICE));
1115 	if (dma_mapping_error(&np->pci_dev->dev,
1116 				txdesc->frag[0].addr))
1117 			goto drop_frame;
1118 	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1119 
1120 	/* Increment cur_tx before tasklet_schedule() */
1121 	np->cur_tx++;
1122 	mb();
1123 	/* Schedule a tx_poll() task */
1124 	tasklet_schedule(&np->tx_tasklet);
1125 
1126 	/* On some architectures: explicitly flush cache lines here. */
1127 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1128 	    !netif_queue_stopped(dev)) {
1129 		/* do nothing */
1130 	} else {
1131 		netif_stop_queue (dev);
1132 	}
1133 	if (netif_msg_tx_queued(np)) {
1134 		printk (KERN_DEBUG
1135 			"%s: Transmit frame #%d queued in slot %d.\n",
1136 			dev->name, np->cur_tx, entry);
1137 	}
1138 	return NETDEV_TX_OK;
1139 
1140 drop_frame:
1141 	dev_kfree_skb(skb);
1142 	np->tx_skbuff[entry] = NULL;
1143 	dev->stats.tx_dropped++;
1144 	return NETDEV_TX_OK;
1145 }
1146 
1147 /* Reset hardware tx and free all of tx buffers */
1148 static int
1149 reset_tx (struct net_device *dev)
1150 {
1151 	struct netdev_private *np = netdev_priv(dev);
1152 	void __iomem *ioaddr = np->base;
1153 	struct sk_buff *skb;
1154 	int i;
1155 
1156 	/* Reset tx logic, TxListPtr will be cleaned */
1157 	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1158 	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1159 
1160 	/* free all tx skbuff */
1161 	for (i = 0; i < TX_RING_SIZE; i++) {
1162 		np->tx_ring[i].next_desc = 0;
1163 
1164 		skb = np->tx_skbuff[i];
1165 		if (skb) {
1166 			dma_unmap_single(&np->pci_dev->dev,
1167 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1168 				skb->len, DMA_TO_DEVICE);
1169 			dev_kfree_skb_any(skb);
1170 			np->tx_skbuff[i] = NULL;
1171 			dev->stats.tx_dropped++;
1172 		}
1173 	}
1174 	np->cur_tx = np->dirty_tx = 0;
1175 	np->cur_task = 0;
1176 
1177 	np->last_tx = NULL;
1178 	iowrite8(127, ioaddr + TxDMAPollPeriod);
1179 
1180 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1181 	return 0;
1182 }
1183 
1184 /* The interrupt handler cleans up after the Tx thread,
1185    and schedule a Rx thread work */
1186 static irqreturn_t intr_handler(int irq, void *dev_instance)
1187 {
1188 	struct net_device *dev = (struct net_device *)dev_instance;
1189 	struct netdev_private *np = netdev_priv(dev);
1190 	void __iomem *ioaddr = np->base;
1191 	int hw_frame_id;
1192 	int tx_cnt;
1193 	int tx_status;
1194 	int handled = 0;
1195 	int i;
1196 
1197 
1198 	do {
1199 		int intr_status = ioread16(ioaddr + IntrStatus);
1200 		iowrite16(intr_status, ioaddr + IntrStatus);
1201 
1202 		if (netif_msg_intr(np))
1203 			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1204 				   dev->name, intr_status);
1205 
1206 		if (!(intr_status & DEFAULT_INTR))
1207 			break;
1208 
1209 		handled = 1;
1210 
1211 		if (intr_status & (IntrRxDMADone)) {
1212 			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1213 					ioaddr + IntrEnable);
1214 			if (np->budget < 0)
1215 				np->budget = RX_BUDGET;
1216 			tasklet_schedule(&np->rx_tasklet);
1217 		}
1218 		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1219 			tx_status = ioread16 (ioaddr + TxStatus);
1220 			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1221 				if (netif_msg_tx_done(np))
1222 					printk
1223 					    ("%s: Transmit status is %2.2x.\n",
1224 				     	dev->name, tx_status);
1225 				if (tx_status & 0x1e) {
1226 					if (netif_msg_tx_err(np))
1227 						printk("%s: Transmit error status %4.4x.\n",
1228 							   dev->name, tx_status);
1229 					dev->stats.tx_errors++;
1230 					if (tx_status & 0x10)
1231 						dev->stats.tx_fifo_errors++;
1232 					if (tx_status & 0x08)
1233 						dev->stats.collisions++;
1234 					if (tx_status & 0x04)
1235 						dev->stats.tx_fifo_errors++;
1236 					if (tx_status & 0x02)
1237 						dev->stats.tx_window_errors++;
1238 
1239 					/*
1240 					** This reset has been verified on
1241 					** DFE-580TX boards ! phdm@macqel.be.
1242 					*/
1243 					if (tx_status & 0x10) {	/* TxUnderrun */
1244 						/* Restart Tx FIFO and transmitter */
1245 						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1246 						/* No need to reset the Tx pointer here */
1247 					}
1248 					/* Restart the Tx. Need to make sure tx enabled */
1249 					i = 10;
1250 					do {
1251 						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1252 						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1253 							break;
1254 						mdelay(1);
1255 					} while (--i);
1256 				}
1257 				/* Yup, this is a documentation bug.  It cost me *hours*. */
1258 				iowrite16 (0, ioaddr + TxStatus);
1259 				if (tx_cnt < 0) {
1260 					iowrite32(5000, ioaddr + DownCounter);
1261 					break;
1262 				}
1263 				tx_status = ioread16 (ioaddr + TxStatus);
1264 			}
1265 			hw_frame_id = (tx_status >> 8) & 0xff;
1266 		} else 	{
1267 			hw_frame_id = ioread8(ioaddr + TxFrameId);
1268 		}
1269 
1270 		if (np->pci_dev->revision >= 0x14) {
1271 			spin_lock(&np->lock);
1272 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1273 				int entry = np->dirty_tx % TX_RING_SIZE;
1274 				struct sk_buff *skb;
1275 				int sw_frame_id;
1276 				sw_frame_id = (le32_to_cpu(
1277 					np->tx_ring[entry].status) >> 2) & 0xff;
1278 				if (sw_frame_id == hw_frame_id &&
1279 					!(le32_to_cpu(np->tx_ring[entry].status)
1280 					& 0x00010000))
1281 						break;
1282 				if (sw_frame_id == (hw_frame_id + 1) %
1283 					TX_RING_SIZE)
1284 						break;
1285 				skb = np->tx_skbuff[entry];
1286 				/* Free the original skb. */
1287 				dma_unmap_single(&np->pci_dev->dev,
1288 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1289 					skb->len, DMA_TO_DEVICE);
1290 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1291 				np->tx_skbuff[entry] = NULL;
1292 				np->tx_ring[entry].frag[0].addr = 0;
1293 				np->tx_ring[entry].frag[0].length = 0;
1294 			}
1295 			spin_unlock(&np->lock);
1296 		} else {
1297 			spin_lock(&np->lock);
1298 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1299 				int entry = np->dirty_tx % TX_RING_SIZE;
1300 				struct sk_buff *skb;
1301 				if (!(le32_to_cpu(np->tx_ring[entry].status)
1302 							& 0x00010000))
1303 					break;
1304 				skb = np->tx_skbuff[entry];
1305 				/* Free the original skb. */
1306 				dma_unmap_single(&np->pci_dev->dev,
1307 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1308 					skb->len, DMA_TO_DEVICE);
1309 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1310 				np->tx_skbuff[entry] = NULL;
1311 				np->tx_ring[entry].frag[0].addr = 0;
1312 				np->tx_ring[entry].frag[0].length = 0;
1313 			}
1314 			spin_unlock(&np->lock);
1315 		}
1316 
1317 		if (netif_queue_stopped(dev) &&
1318 			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1319 			/* The ring is no longer full, clear busy flag. */
1320 			netif_wake_queue (dev);
1321 		}
1322 		/* Abnormal error summary/uncommon events handlers. */
1323 		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1324 			netdev_error(dev, intr_status);
1325 	} while (0);
1326 	if (netif_msg_intr(np))
1327 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1328 			   dev->name, ioread16(ioaddr + IntrStatus));
1329 	return IRQ_RETVAL(handled);
1330 }
1331 
1332 static void rx_poll(unsigned long data)
1333 {
1334 	struct net_device *dev = (struct net_device *)data;
1335 	struct netdev_private *np = netdev_priv(dev);
1336 	int entry = np->cur_rx % RX_RING_SIZE;
1337 	int boguscnt = np->budget;
1338 	void __iomem *ioaddr = np->base;
1339 	int received = 0;
1340 
1341 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1342 	while (1) {
1343 		struct netdev_desc *desc = &(np->rx_ring[entry]);
1344 		u32 frame_status = le32_to_cpu(desc->status);
1345 		int pkt_len;
1346 
1347 		if (--boguscnt < 0) {
1348 			goto not_done;
1349 		}
1350 		if (!(frame_status & DescOwn))
1351 			break;
1352 		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1353 		if (netif_msg_rx_status(np))
1354 			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1355 				   frame_status);
1356 		if (frame_status & 0x001f4000) {
1357 			/* There was a error. */
1358 			if (netif_msg_rx_err(np))
1359 				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1360 					   frame_status);
1361 			dev->stats.rx_errors++;
1362 			if (frame_status & 0x00100000)
1363 				dev->stats.rx_length_errors++;
1364 			if (frame_status & 0x00010000)
1365 				dev->stats.rx_fifo_errors++;
1366 			if (frame_status & 0x00060000)
1367 				dev->stats.rx_frame_errors++;
1368 			if (frame_status & 0x00080000)
1369 				dev->stats.rx_crc_errors++;
1370 			if (frame_status & 0x00100000) {
1371 				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1372 					   " status %8.8x.\n",
1373 					   dev->name, frame_status);
1374 			}
1375 		} else {
1376 			struct sk_buff *skb;
1377 #ifndef final_version
1378 			if (netif_msg_rx_status(np))
1379 				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1380 					   ", bogus_cnt %d.\n",
1381 					   pkt_len, boguscnt);
1382 #endif
1383 			/* Check if the packet is long enough to accept without copying
1384 			   to a minimally-sized skbuff. */
1385 			if (pkt_len < rx_copybreak &&
1386 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1387 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1388 				dma_sync_single_for_cpu(&np->pci_dev->dev,
1389 						le32_to_cpu(desc->frag[0].addr),
1390 						np->rx_buf_sz, DMA_FROM_DEVICE);
1391 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1392 				dma_sync_single_for_device(&np->pci_dev->dev,
1393 						le32_to_cpu(desc->frag[0].addr),
1394 						np->rx_buf_sz, DMA_FROM_DEVICE);
1395 				skb_put(skb, pkt_len);
1396 			} else {
1397 				dma_unmap_single(&np->pci_dev->dev,
1398 					le32_to_cpu(desc->frag[0].addr),
1399 					np->rx_buf_sz, DMA_FROM_DEVICE);
1400 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1401 				np->rx_skbuff[entry] = NULL;
1402 			}
1403 			skb->protocol = eth_type_trans(skb, dev);
1404 			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1405 			netif_rx(skb);
1406 		}
1407 		entry = (entry + 1) % RX_RING_SIZE;
1408 		received++;
1409 	}
1410 	np->cur_rx = entry;
1411 	refill_rx (dev);
1412 	np->budget -= received;
1413 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1414 	return;
1415 
1416 not_done:
1417 	np->cur_rx = entry;
1418 	refill_rx (dev);
1419 	if (!received)
1420 		received = 1;
1421 	np->budget -= received;
1422 	if (np->budget <= 0)
1423 		np->budget = RX_BUDGET;
1424 	tasklet_schedule(&np->rx_tasklet);
1425 }
1426 
1427 static void refill_rx (struct net_device *dev)
1428 {
1429 	struct netdev_private *np = netdev_priv(dev);
1430 	int entry;
1431 	int cnt = 0;
1432 
1433 	/* Refill the Rx ring buffers. */
1434 	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1435 		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1436 		struct sk_buff *skb;
1437 		entry = np->dirty_rx % RX_RING_SIZE;
1438 		if (np->rx_skbuff[entry] == NULL) {
1439 			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1440 			np->rx_skbuff[entry] = skb;
1441 			if (skb == NULL)
1442 				break;		/* Better luck next round. */
1443 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1444 			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1445 				dma_map_single(&np->pci_dev->dev, skb->data,
1446 					np->rx_buf_sz, DMA_FROM_DEVICE));
1447 			if (dma_mapping_error(&np->pci_dev->dev,
1448 				    np->rx_ring[entry].frag[0].addr)) {
1449 			    dev_kfree_skb_irq(skb);
1450 			    np->rx_skbuff[entry] = NULL;
1451 			    break;
1452 			}
1453 		}
1454 		/* Perhaps we need not reset this field. */
1455 		np->rx_ring[entry].frag[0].length =
1456 			cpu_to_le32(np->rx_buf_sz | LastFrag);
1457 		np->rx_ring[entry].status = 0;
1458 		cnt++;
1459 	}
1460 }
1461 static void netdev_error(struct net_device *dev, int intr_status)
1462 {
1463 	struct netdev_private *np = netdev_priv(dev);
1464 	void __iomem *ioaddr = np->base;
1465 	u16 mii_ctl, mii_advertise, mii_lpa;
1466 	int speed;
1467 
1468 	if (intr_status & LinkChange) {
1469 		if (mdio_wait_link(dev, 10) == 0) {
1470 			printk(KERN_INFO "%s: Link up\n", dev->name);
1471 			if (np->an_enable) {
1472 				mii_advertise = mdio_read(dev, np->phys[0],
1473 							   MII_ADVERTISE);
1474 				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1475 				mii_advertise &= mii_lpa;
1476 				printk(KERN_INFO "%s: Link changed: ",
1477 					dev->name);
1478 				if (mii_advertise & ADVERTISE_100FULL) {
1479 					np->speed = 100;
1480 					printk("100Mbps, full duplex\n");
1481 				} else if (mii_advertise & ADVERTISE_100HALF) {
1482 					np->speed = 100;
1483 					printk("100Mbps, half duplex\n");
1484 				} else if (mii_advertise & ADVERTISE_10FULL) {
1485 					np->speed = 10;
1486 					printk("10Mbps, full duplex\n");
1487 				} else if (mii_advertise & ADVERTISE_10HALF) {
1488 					np->speed = 10;
1489 					printk("10Mbps, half duplex\n");
1490 				} else
1491 					printk("\n");
1492 
1493 			} else {
1494 				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1495 				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1496 				np->speed = speed;
1497 				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1498 					dev->name, speed);
1499 				printk("%s duplex.\n",
1500 					(mii_ctl & BMCR_FULLDPLX) ?
1501 						"full" : "half");
1502 			}
1503 			check_duplex(dev);
1504 			if (np->flowctrl && np->mii_if.full_duplex) {
1505 				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1506 					ioaddr + MulticastFilter1+2);
1507 				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1508 					ioaddr + MACCtrl0);
1509 			}
1510 			netif_carrier_on(dev);
1511 		} else {
1512 			printk(KERN_INFO "%s: Link down\n", dev->name);
1513 			netif_carrier_off(dev);
1514 		}
1515 	}
1516 	if (intr_status & StatsMax) {
1517 		get_stats(dev);
1518 	}
1519 	if (intr_status & IntrPCIErr) {
1520 		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1521 			   dev->name, intr_status);
1522 		/* We must do a global reset of DMA to continue. */
1523 	}
1524 }
1525 
1526 static struct net_device_stats *get_stats(struct net_device *dev)
1527 {
1528 	struct netdev_private *np = netdev_priv(dev);
1529 	void __iomem *ioaddr = np->base;
1530 	unsigned long flags;
1531 	u8 late_coll, single_coll, mult_coll;
1532 
1533 	spin_lock_irqsave(&np->statlock, flags);
1534 	/* The chip only need report frame silently dropped. */
1535 	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1536 	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1537 	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1538 	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1539 
1540 	mult_coll = ioread8(ioaddr + StatsMultiColl);
1541 	np->xstats.tx_multiple_collisions += mult_coll;
1542 	single_coll = ioread8(ioaddr + StatsOneColl);
1543 	np->xstats.tx_single_collisions += single_coll;
1544 	late_coll = ioread8(ioaddr + StatsLateColl);
1545 	np->xstats.tx_late_collisions += late_coll;
1546 	dev->stats.collisions += mult_coll
1547 		+ single_coll
1548 		+ late_coll;
1549 
1550 	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1551 	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1552 	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1553 	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1554 	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1555 	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1556 	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1557 
1558 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1559 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1560 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1561 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1562 
1563 	spin_unlock_irqrestore(&np->statlock, flags);
1564 
1565 	return &dev->stats;
1566 }
1567 
1568 static void set_rx_mode(struct net_device *dev)
1569 {
1570 	struct netdev_private *np = netdev_priv(dev);
1571 	void __iomem *ioaddr = np->base;
1572 	u16 mc_filter[4];			/* Multicast hash filter */
1573 	u32 rx_mode;
1574 	int i;
1575 
1576 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1577 		memset(mc_filter, 0xff, sizeof(mc_filter));
1578 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1579 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1580 		   (dev->flags & IFF_ALLMULTI)) {
1581 		/* Too many to match, or accept all multicasts. */
1582 		memset(mc_filter, 0xff, sizeof(mc_filter));
1583 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1584 	} else if (!netdev_mc_empty(dev)) {
1585 		struct netdev_hw_addr *ha;
1586 		int bit;
1587 		int index;
1588 		int crc;
1589 		memset (mc_filter, 0, sizeof (mc_filter));
1590 		netdev_for_each_mc_addr(ha, dev) {
1591 			crc = ether_crc_le(ETH_ALEN, ha->addr);
1592 			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1593 				if (crc & 0x80000000) index |= 1 << bit;
1594 			mc_filter[index/16] |= (1 << (index % 16));
1595 		}
1596 		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1597 	} else {
1598 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1599 		return;
1600 	}
1601 	if (np->mii_if.full_duplex && np->flowctrl)
1602 		mc_filter[3] |= 0x0200;
1603 
1604 	for (i = 0; i < 4; i++)
1605 		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1606 	iowrite8(rx_mode, ioaddr + RxMode);
1607 }
1608 
1609 static int __set_mac_addr(struct net_device *dev)
1610 {
1611 	struct netdev_private *np = netdev_priv(dev);
1612 	u16 addr16;
1613 
1614 	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1615 	iowrite16(addr16, np->base + StationAddr);
1616 	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1617 	iowrite16(addr16, np->base + StationAddr+2);
1618 	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1619 	iowrite16(addr16, np->base + StationAddr+4);
1620 	return 0;
1621 }
1622 
1623 /* Invoked with rtnl_lock held */
1624 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1625 {
1626 	const struct sockaddr *addr = data;
1627 
1628 	if (!is_valid_ether_addr(addr->sa_data))
1629 		return -EADDRNOTAVAIL;
1630 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1631 	__set_mac_addr(dev);
1632 
1633 	return 0;
1634 }
1635 
1636 static const struct {
1637 	const char name[ETH_GSTRING_LEN];
1638 } sundance_stats[] = {
1639 	{ "tx_multiple_collisions" },
1640 	{ "tx_single_collisions" },
1641 	{ "tx_late_collisions" },
1642 	{ "tx_deferred" },
1643 	{ "tx_deferred_excessive" },
1644 	{ "tx_aborted" },
1645 	{ "tx_bcasts" },
1646 	{ "rx_bcasts" },
1647 	{ "tx_mcasts" },
1648 	{ "rx_mcasts" },
1649 };
1650 
1651 static int check_if_running(struct net_device *dev)
1652 {
1653 	if (!netif_running(dev))
1654 		return -EINVAL;
1655 	return 0;
1656 }
1657 
1658 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1659 {
1660 	struct netdev_private *np = netdev_priv(dev);
1661 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1662 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1663 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1664 }
1665 
1666 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1667 {
1668 	struct netdev_private *np = netdev_priv(dev);
1669 	spin_lock_irq(&np->lock);
1670 	mii_ethtool_gset(&np->mii_if, ecmd);
1671 	spin_unlock_irq(&np->lock);
1672 	return 0;
1673 }
1674 
1675 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1676 {
1677 	struct netdev_private *np = netdev_priv(dev);
1678 	int res;
1679 	spin_lock_irq(&np->lock);
1680 	res = mii_ethtool_sset(&np->mii_if, ecmd);
1681 	spin_unlock_irq(&np->lock);
1682 	return res;
1683 }
1684 
1685 static int nway_reset(struct net_device *dev)
1686 {
1687 	struct netdev_private *np = netdev_priv(dev);
1688 	return mii_nway_restart(&np->mii_if);
1689 }
1690 
1691 static u32 get_link(struct net_device *dev)
1692 {
1693 	struct netdev_private *np = netdev_priv(dev);
1694 	return mii_link_ok(&np->mii_if);
1695 }
1696 
1697 static u32 get_msglevel(struct net_device *dev)
1698 {
1699 	struct netdev_private *np = netdev_priv(dev);
1700 	return np->msg_enable;
1701 }
1702 
1703 static void set_msglevel(struct net_device *dev, u32 val)
1704 {
1705 	struct netdev_private *np = netdev_priv(dev);
1706 	np->msg_enable = val;
1707 }
1708 
1709 static void get_strings(struct net_device *dev, u32 stringset,
1710 		u8 *data)
1711 {
1712 	if (stringset == ETH_SS_STATS)
1713 		memcpy(data, sundance_stats, sizeof(sundance_stats));
1714 }
1715 
1716 static int get_sset_count(struct net_device *dev, int sset)
1717 {
1718 	switch (sset) {
1719 	case ETH_SS_STATS:
1720 		return ARRAY_SIZE(sundance_stats);
1721 	default:
1722 		return -EOPNOTSUPP;
1723 	}
1724 }
1725 
1726 static void get_ethtool_stats(struct net_device *dev,
1727 		struct ethtool_stats *stats, u64 *data)
1728 {
1729 	struct netdev_private *np = netdev_priv(dev);
1730 	int i = 0;
1731 
1732 	get_stats(dev);
1733 	data[i++] = np->xstats.tx_multiple_collisions;
1734 	data[i++] = np->xstats.tx_single_collisions;
1735 	data[i++] = np->xstats.tx_late_collisions;
1736 	data[i++] = np->xstats.tx_deferred;
1737 	data[i++] = np->xstats.tx_deferred_excessive;
1738 	data[i++] = np->xstats.tx_aborted;
1739 	data[i++] = np->xstats.tx_bcasts;
1740 	data[i++] = np->xstats.rx_bcasts;
1741 	data[i++] = np->xstats.tx_mcasts;
1742 	data[i++] = np->xstats.rx_mcasts;
1743 }
1744 
1745 #ifdef CONFIG_PM
1746 
1747 static void sundance_get_wol(struct net_device *dev,
1748 		struct ethtool_wolinfo *wol)
1749 {
1750 	struct netdev_private *np = netdev_priv(dev);
1751 	void __iomem *ioaddr = np->base;
1752 	u8 wol_bits;
1753 
1754 	wol->wolopts = 0;
1755 
1756 	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1757 	if (!np->wol_enabled)
1758 		return;
1759 
1760 	wol_bits = ioread8(ioaddr + WakeEvent);
1761 	if (wol_bits & MagicPktEnable)
1762 		wol->wolopts |= WAKE_MAGIC;
1763 	if (wol_bits & LinkEventEnable)
1764 		wol->wolopts |= WAKE_PHY;
1765 }
1766 
1767 static int sundance_set_wol(struct net_device *dev,
1768 	struct ethtool_wolinfo *wol)
1769 {
1770 	struct netdev_private *np = netdev_priv(dev);
1771 	void __iomem *ioaddr = np->base;
1772 	u8 wol_bits;
1773 
1774 	if (!device_can_wakeup(&np->pci_dev->dev))
1775 		return -EOPNOTSUPP;
1776 
1777 	np->wol_enabled = !!(wol->wolopts);
1778 	wol_bits = ioread8(ioaddr + WakeEvent);
1779 	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1780 			LinkEventEnable | WolEnable);
1781 
1782 	if (np->wol_enabled) {
1783 		if (wol->wolopts & WAKE_MAGIC)
1784 			wol_bits |= (MagicPktEnable | WolEnable);
1785 		if (wol->wolopts & WAKE_PHY)
1786 			wol_bits |= (LinkEventEnable | WolEnable);
1787 	}
1788 	iowrite8(wol_bits, ioaddr + WakeEvent);
1789 
1790 	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1791 
1792 	return 0;
1793 }
1794 #else
1795 #define sundance_get_wol NULL
1796 #define sundance_set_wol NULL
1797 #endif /* CONFIG_PM */
1798 
1799 static const struct ethtool_ops ethtool_ops = {
1800 	.begin = check_if_running,
1801 	.get_drvinfo = get_drvinfo,
1802 	.get_settings = get_settings,
1803 	.set_settings = set_settings,
1804 	.nway_reset = nway_reset,
1805 	.get_link = get_link,
1806 	.get_wol = sundance_get_wol,
1807 	.set_wol = sundance_set_wol,
1808 	.get_msglevel = get_msglevel,
1809 	.set_msglevel = set_msglevel,
1810 	.get_strings = get_strings,
1811 	.get_sset_count = get_sset_count,
1812 	.get_ethtool_stats = get_ethtool_stats,
1813 };
1814 
1815 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1816 {
1817 	struct netdev_private *np = netdev_priv(dev);
1818 	int rc;
1819 
1820 	if (!netif_running(dev))
1821 		return -EINVAL;
1822 
1823 	spin_lock_irq(&np->lock);
1824 	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1825 	spin_unlock_irq(&np->lock);
1826 
1827 	return rc;
1828 }
1829 
1830 static int netdev_close(struct net_device *dev)
1831 {
1832 	struct netdev_private *np = netdev_priv(dev);
1833 	void __iomem *ioaddr = np->base;
1834 	struct sk_buff *skb;
1835 	int i;
1836 
1837 	/* Wait and kill tasklet */
1838 	tasklet_kill(&np->rx_tasklet);
1839 	tasklet_kill(&np->tx_tasklet);
1840 	np->cur_tx = 0;
1841 	np->dirty_tx = 0;
1842 	np->cur_task = 0;
1843 	np->last_tx = NULL;
1844 
1845 	netif_stop_queue(dev);
1846 
1847 	if (netif_msg_ifdown(np)) {
1848 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1849 			   "Rx %4.4x Int %2.2x.\n",
1850 			   dev->name, ioread8(ioaddr + TxStatus),
1851 			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1852 		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1853 			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1854 	}
1855 
1856 	/* Disable interrupts by clearing the interrupt mask. */
1857 	iowrite16(0x0000, ioaddr + IntrEnable);
1858 
1859 	/* Disable Rx and Tx DMA for safely release resource */
1860 	iowrite32(0x500, ioaddr + DMACtrl);
1861 
1862 	/* Stop the chip's Tx and Rx processes. */
1863 	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1864 
1865     	for (i = 2000; i > 0; i--) {
1866  		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1867 			break;
1868 		mdelay(1);
1869     	}
1870 
1871     	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1872 			ioaddr + ASIC_HI_WORD(ASICCtrl));
1873 
1874     	for (i = 2000; i > 0; i--) {
1875 		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1876 			break;
1877 		mdelay(1);
1878     	}
1879 
1880 #ifdef __i386__
1881 	if (netif_msg_hw(np)) {
1882 		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1883 			   (int)(np->tx_ring_dma));
1884 		for (i = 0; i < TX_RING_SIZE; i++)
1885 			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1886 				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1887 				   np->tx_ring[i].frag[0].length);
1888 		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1889 			   (int)(np->rx_ring_dma));
1890 		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1891 			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1892 				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1893 				   np->rx_ring[i].frag[0].length);
1894 		}
1895 	}
1896 #endif /* __i386__ debugging only */
1897 
1898 	free_irq(np->pci_dev->irq, dev);
1899 
1900 	del_timer_sync(&np->timer);
1901 
1902 	/* Free all the skbuffs in the Rx queue. */
1903 	for (i = 0; i < RX_RING_SIZE; i++) {
1904 		np->rx_ring[i].status = 0;
1905 		skb = np->rx_skbuff[i];
1906 		if (skb) {
1907 			dma_unmap_single(&np->pci_dev->dev,
1908 				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1909 				np->rx_buf_sz, DMA_FROM_DEVICE);
1910 			dev_kfree_skb(skb);
1911 			np->rx_skbuff[i] = NULL;
1912 		}
1913 		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1914 	}
1915 	for (i = 0; i < TX_RING_SIZE; i++) {
1916 		np->tx_ring[i].next_desc = 0;
1917 		skb = np->tx_skbuff[i];
1918 		if (skb) {
1919 			dma_unmap_single(&np->pci_dev->dev,
1920 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1921 				skb->len, DMA_TO_DEVICE);
1922 			dev_kfree_skb(skb);
1923 			np->tx_skbuff[i] = NULL;
1924 		}
1925 	}
1926 
1927 	return 0;
1928 }
1929 
1930 static void sundance_remove1(struct pci_dev *pdev)
1931 {
1932 	struct net_device *dev = pci_get_drvdata(pdev);
1933 
1934 	if (dev) {
1935 	    struct netdev_private *np = netdev_priv(dev);
1936 	    unregister_netdev(dev);
1937 	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1938 		    np->rx_ring, np->rx_ring_dma);
1939 	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1940 		    np->tx_ring, np->tx_ring_dma);
1941 	    pci_iounmap(pdev, np->base);
1942 	    pci_release_regions(pdev);
1943 	    free_netdev(dev);
1944 	    pci_set_drvdata(pdev, NULL);
1945 	}
1946 }
1947 
1948 #ifdef CONFIG_PM
1949 
1950 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1951 {
1952 	struct net_device *dev = pci_get_drvdata(pci_dev);
1953 	struct netdev_private *np = netdev_priv(dev);
1954 	void __iomem *ioaddr = np->base;
1955 
1956 	if (!netif_running(dev))
1957 		return 0;
1958 
1959 	netdev_close(dev);
1960 	netif_device_detach(dev);
1961 
1962 	pci_save_state(pci_dev);
1963 	if (np->wol_enabled) {
1964 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1965 		iowrite16(RxEnable, ioaddr + MACCtrl1);
1966 	}
1967 	pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1968 			np->wol_enabled);
1969 	pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1970 
1971 	return 0;
1972 }
1973 
1974 static int sundance_resume(struct pci_dev *pci_dev)
1975 {
1976 	struct net_device *dev = pci_get_drvdata(pci_dev);
1977 	int err = 0;
1978 
1979 	if (!netif_running(dev))
1980 		return 0;
1981 
1982 	pci_set_power_state(pci_dev, PCI_D0);
1983 	pci_restore_state(pci_dev);
1984 	pci_enable_wake(pci_dev, PCI_D0, 0);
1985 
1986 	err = netdev_open(dev);
1987 	if (err) {
1988 		printk(KERN_ERR "%s: Can't resume interface!\n",
1989 				dev->name);
1990 		goto out;
1991 	}
1992 
1993 	netif_device_attach(dev);
1994 
1995 out:
1996 	return err;
1997 }
1998 
1999 #endif /* CONFIG_PM */
2000 
2001 static struct pci_driver sundance_driver = {
2002 	.name		= DRV_NAME,
2003 	.id_table	= sundance_pci_tbl,
2004 	.probe		= sundance_probe1,
2005 	.remove		= sundance_remove1,
2006 #ifdef CONFIG_PM
2007 	.suspend	= sundance_suspend,
2008 	.resume		= sundance_resume,
2009 #endif /* CONFIG_PM */
2010 };
2011 
2012 static int __init sundance_init(void)
2013 {
2014 /* when a module, this is printed whether or not devices are found in probe */
2015 #ifdef MODULE
2016 	printk(version);
2017 #endif
2018 	return pci_register_driver(&sundance_driver);
2019 }
2020 
2021 static void __exit sundance_exit(void)
2022 {
2023 	pci_unregister_driver(&sundance_driver);
2024 }
2025 
2026 module_init(sundance_init);
2027 module_exit(sundance_exit);
2028 
2029 
2030