1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 	Written 1999-2000 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	The author may be reached as becker@scyld.com, or C/O
13 	Scyld Computing Corporation
14 	410 Severn Ave., Suite 210
15 	Annapolis MD 21403
16 
17 	Support and updates available at
18 	http://www.scyld.com/network/sundance.html
19 	[link no longer provides useful info -jgarzik]
20 	Archives of the mailing list are still available at
21 	http://www.beowulf.org/pipermail/netdrivers/
22 
23 */
24 
25 #define DRV_NAME	"sundance"
26 #define DRV_VERSION	"1.2"
27 #define DRV_RELDATE	"11-Sep-2006"
28 
29 
30 /* The user-configurable values.
31    These may be modified when a driver module is loaded.*/
32 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34    Typical is a 64 element hash table based on the Ethernet CRC.  */
35 static const int multicast_filter_limit = 32;
36 
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38    Setting to > 1518 effectively disables this feature.
39    This chip can receive into offset buffers, so the Alpha does not
40    need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
43 
44 /* media[] specifies the media type the NIC operates at.
45 		 autosense	Autosensing active media.
46 		 10mbps_hd 	10Mbps half duplex.
47 		 10mbps_fd 	10Mbps full duplex.
48 		 100mbps_hd 	100Mbps half duplex.
49 		 100mbps_fd 	100Mbps full duplex.
50 		 0		Autosensing active media.
51 		 1	 	10Mbps half duplex.
52 		 2	 	10Mbps full duplex.
53 		 3	 	100Mbps half duplex.
54 		 4	 	100Mbps full duplex.
55 */
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
58 
59 
60 /* Operational parameters that are set at compile time. */
61 
62 /* Keep the ring sizes a power of two for compile efficiency.
63    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64    Making the Tx ring too large decreases the effectiveness of channel
65    bonding and packet priority, and more than 128 requires modifying the
66    Tx error recovery.
67    Large receive rings merely waste memory. */
68 #define TX_RING_SIZE	32
69 #define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
70 #define RX_RING_SIZE	64
71 #define RX_BUDGET	32
72 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
74 
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT  (4*HZ)
78 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
79 
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h>		/* Processor type for cache alignment. */
96 #include <asm/io.h>
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
103 
104 /* These identify the driver base version and may not be removed. */
105 static const char version[] =
106 	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 	" Written by Donald Becker\n";
108 
109 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111 MODULE_LICENSE("GPL");
112 
113 module_param(debug, int, 0);
114 module_param(rx_copybreak, int, 0);
115 module_param_array(media, charp, NULL, 0);
116 module_param(flowctrl, int, 0);
117 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
120 
121 /*
122 				Theory of Operation
123 
124 I. Board Compatibility
125 
126 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
127 
128 II. Board-specific settings
129 
130 III. Driver operation
131 
132 IIIa. Ring buffers
133 
134 This driver uses two statically allocated fixed-size descriptor lists
135 formed into rings by a branch from the final descriptor to the beginning of
136 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
137 Some chips explicitly use only 2^N sized rings, while others use a
138 'next descriptor' pointer that the driver forms into rings.
139 
140 IIIb/c. Transmit/Receive Structure
141 
142 This driver uses a zero-copy receive and transmit scheme.
143 The driver allocates full frame size skbuffs for the Rx ring buffers at
144 open() time and passes the skb->data field to the chip as receive data
145 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
146 a fresh skbuff is allocated and the frame is copied to the new skbuff.
147 When the incoming frame is larger, the skbuff is passed directly up the
148 protocol stack.  Buffers consumed this way are replaced by newly allocated
149 skbuffs in a later phase of receives.
150 
151 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152 using a full-sized skbuff for small frames vs. the copying costs of larger
153 frames.  New boards are typically used in generously configured machines
154 and the underfilled buffers have negligible impact compared to the benefit of
155 a single allocation size, so the default value of zero results in never
156 copying packets.  When copying is done, the cost is usually mitigated by using
157 a combined copy/checksum routine.  Copying also preloads the cache, which is
158 most useful with small frames.
159 
160 A subtle aspect of the operation is that the IP header at offset 14 in an
161 ethernet frame isn't longword aligned for further processing.
162 Unaligned buffers are permitted by the Sundance hardware, so
163 frames are received into the skbuff at an offset of "+2", 16-byte aligning
164 the IP header.
165 
166 IIId. Synchronization
167 
168 The driver runs as two independent, single-threaded flows of control.  One
169 is the send-packet routine, which enforces single-threaded use by the
170 dev->tbusy flag.  The other thread is the interrupt handler, which is single
171 threaded by the hardware and interrupt handling software.
172 
173 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176 the 'lp->tx_full' flag.
177 
178 The interrupt handler has exclusive control over the Rx ring and records stats
179 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
180 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181 clears both the tx_full and tbusy flags.
182 
183 IV. Notes
184 
185 IVb. References
186 
187 The Sundance ST201 datasheet, preliminary version.
188 The Kendin KS8723 datasheet, preliminary version.
189 The ICplus IP100 datasheet, preliminary version.
190 http://www.scyld.com/expert/100mbps.html
191 http://www.scyld.com/expert/NWay.html
192 
193 IVc. Errata
194 
195 */
196 
197 /* Work-around for Kendin chip bugs. */
198 #ifndef CONFIG_SUNDANCE_MMIO
199 #define USE_IO_OPS 1
200 #endif
201 
202 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
203 	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
210 	{ }
211 };
212 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
213 
214 enum {
215 	netdev_io_size = 128
216 };
217 
218 struct pci_id_info {
219         const char *name;
220 };
221 static const struct pci_id_info pci_id_tbl[] = {
222 	{"D-Link DFE-550TX FAST Ethernet Adapter"},
223 	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 	{"D-Link DFE-580TX 4 port Server Adapter"},
225 	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 	{"D-Link DL10050-based FAST Ethernet Adapter"},
227 	{"Sundance Technology Alta"},
228 	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
229 	{ }	/* terminate list. */
230 };
231 
232 /* This driver was written to use PCI memory space, however x86-oriented
233    hardware often uses I/O space accesses. */
234 
235 /* Offsets to the device registers.
236    Unlike software-only systems, device drivers interact with complex hardware.
237    It's not useful to define symbolic names for every register bit in the
238    device.  The name can only partially document the semantics and make
239    the driver longer and more difficult to read.
240    In general, only the important configuration values or bits changed
241    multiple times should be defined symbolically.
242 */
243 enum alta_offsets {
244 	DMACtrl = 0x00,
245 	TxListPtr = 0x04,
246 	TxDMABurstThresh = 0x08,
247 	TxDMAUrgentThresh = 0x09,
248 	TxDMAPollPeriod = 0x0a,
249 	RxDMAStatus = 0x0c,
250 	RxListPtr = 0x10,
251 	DebugCtrl0 = 0x1a,
252 	DebugCtrl1 = 0x1c,
253 	RxDMABurstThresh = 0x14,
254 	RxDMAUrgentThresh = 0x15,
255 	RxDMAPollPeriod = 0x16,
256 	LEDCtrl = 0x1a,
257 	ASICCtrl = 0x30,
258 	EEData = 0x34,
259 	EECtrl = 0x36,
260 	FlashAddr = 0x40,
261 	FlashData = 0x44,
262 	WakeEvent = 0x45,
263 	TxStatus = 0x46,
264 	TxFrameId = 0x47,
265 	DownCounter = 0x18,
266 	IntrClear = 0x4a,
267 	IntrEnable = 0x4c,
268 	IntrStatus = 0x4e,
269 	MACCtrl0 = 0x50,
270 	MACCtrl1 = 0x52,
271 	StationAddr = 0x54,
272 	MaxFrameSize = 0x5A,
273 	RxMode = 0x5c,
274 	MIICtrl = 0x5e,
275 	MulticastFilter0 = 0x60,
276 	MulticastFilter1 = 0x64,
277 	RxOctetsLow = 0x68,
278 	RxOctetsHigh = 0x6a,
279 	TxOctetsLow = 0x6c,
280 	TxOctetsHigh = 0x6e,
281 	TxFramesOK = 0x70,
282 	RxFramesOK = 0x72,
283 	StatsCarrierError = 0x74,
284 	StatsLateColl = 0x75,
285 	StatsMultiColl = 0x76,
286 	StatsOneColl = 0x77,
287 	StatsTxDefer = 0x78,
288 	RxMissed = 0x79,
289 	StatsTxXSDefer = 0x7a,
290 	StatsTxAbort = 0x7b,
291 	StatsBcastTx = 0x7c,
292 	StatsBcastRx = 0x7d,
293 	StatsMcastTx = 0x7e,
294 	StatsMcastRx = 0x7f,
295 	/* Aliased and bogus values! */
296 	RxStatus = 0x0c,
297 };
298 
299 #define ASIC_HI_WORD(x)	((x) + 2)
300 
301 enum ASICCtrl_HiWord_bit {
302 	GlobalReset = 0x0001,
303 	RxReset = 0x0002,
304 	TxReset = 0x0004,
305 	DMAReset = 0x0008,
306 	FIFOReset = 0x0010,
307 	NetworkReset = 0x0020,
308 	HostReset = 0x0040,
309 	ResetBusy = 0x0400,
310 };
311 
312 /* Bits in the interrupt status/mask registers. */
313 enum intr_status_bits {
314 	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
315 	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
316 	IntrDrvRqst=0x0040,
317 	StatsMax=0x0080, LinkChange=0x0100,
318 	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
319 };
320 
321 /* Bits in the RxMode register. */
322 enum rx_mode_bits {
323 	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
324 	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
325 };
326 /* Bits in MACCtrl. */
327 enum mac_ctrl0_bits {
328 	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
329 	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
330 };
331 enum mac_ctrl1_bits {
332 	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
333 	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
334 	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
335 };
336 
337 /* Bits in WakeEvent register. */
338 enum wake_event_bits {
339 	WakePktEnable = 0x01,
340 	MagicPktEnable = 0x02,
341 	LinkEventEnable = 0x04,
342 	WolEnable = 0x80,
343 };
344 
345 /* The Rx and Tx buffer descriptors. */
346 /* Note that using only 32 bit fields simplifies conversion to big-endian
347    architectures. */
348 struct netdev_desc {
349 	__le32 next_desc;
350 	__le32 status;
351 	struct desc_frag { __le32 addr, length; } frag[1];
352 };
353 
354 /* Bits in netdev_desc.status */
355 enum desc_status_bits {
356 	DescOwn=0x8000,
357 	DescEndPacket=0x4000,
358 	DescEndRing=0x2000,
359 	LastFrag=0x80000000,
360 	DescIntrOnTx=0x8000,
361 	DescIntrOnDMADone=0x80000000,
362 	DisableAlign = 0x00000001,
363 };
364 
365 #define PRIV_ALIGN	15 	/* Required alignment mask */
366 /* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
367    within the structure. */
368 #define MII_CNT		4
369 struct netdev_private {
370 	/* Descriptor rings first for alignment. */
371 	struct netdev_desc *rx_ring;
372 	struct netdev_desc *tx_ring;
373 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
374 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
375         dma_addr_t tx_ring_dma;
376         dma_addr_t rx_ring_dma;
377 	struct timer_list timer;		/* Media monitoring timer. */
378 	/* ethtool extra stats */
379 	struct {
380 		u64 tx_multiple_collisions;
381 		u64 tx_single_collisions;
382 		u64 tx_late_collisions;
383 		u64 tx_deferred;
384 		u64 tx_deferred_excessive;
385 		u64 tx_aborted;
386 		u64 tx_bcasts;
387 		u64 rx_bcasts;
388 		u64 tx_mcasts;
389 		u64 rx_mcasts;
390 	} xstats;
391 	/* Frequently used values: keep some adjacent for cache effect. */
392 	spinlock_t lock;
393 	int msg_enable;
394 	int chip_id;
395 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
396 	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
397 	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
398 	unsigned int cur_tx, dirty_tx;
399 	/* These values are keep track of the transceiver/media in use. */
400 	unsigned int flowctrl:1;
401 	unsigned int default_port:4;		/* Last dev->if_port value. */
402 	unsigned int an_enable:1;
403 	unsigned int speed;
404 	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
405 	struct tasklet_struct rx_tasklet;
406 	struct tasklet_struct tx_tasklet;
407 	int budget;
408 	int cur_task;
409 	/* Multicast and receive mode. */
410 	spinlock_t mcastlock;			/* SMP lock multicast updates. */
411 	u16 mcast_filter[4];
412 	/* MII transceiver section. */
413 	struct mii_if_info mii_if;
414 	int mii_preamble_required;
415 	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
416 	struct pci_dev *pci_dev;
417 	void __iomem *base;
418 	spinlock_t statlock;
419 };
420 
421 /* The station address location in the EEPROM. */
422 #define EEPROM_SA_OFFSET	0x10
423 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
424 			IntrDrvRqst | IntrTxDone | StatsMax | \
425 			LinkChange)
426 
427 static int  change_mtu(struct net_device *dev, int new_mtu);
428 static int  eeprom_read(void __iomem *ioaddr, int location);
429 static int  mdio_read(struct net_device *dev, int phy_id, int location);
430 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
431 static int  mdio_wait_link(struct net_device *dev, int wait);
432 static int  netdev_open(struct net_device *dev);
433 static void check_duplex(struct net_device *dev);
434 static void netdev_timer(unsigned long data);
435 static void tx_timeout(struct net_device *dev);
436 static void init_ring(struct net_device *dev);
437 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
438 static int reset_tx (struct net_device *dev);
439 static irqreturn_t intr_handler(int irq, void *dev_instance);
440 static void rx_poll(unsigned long data);
441 static void tx_poll(unsigned long data);
442 static void refill_rx (struct net_device *dev);
443 static void netdev_error(struct net_device *dev, int intr_status);
444 static void netdev_error(struct net_device *dev, int intr_status);
445 static void set_rx_mode(struct net_device *dev);
446 static int __set_mac_addr(struct net_device *dev);
447 static int sundance_set_mac_addr(struct net_device *dev, void *data);
448 static struct net_device_stats *get_stats(struct net_device *dev);
449 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
450 static int  netdev_close(struct net_device *dev);
451 static const struct ethtool_ops ethtool_ops;
452 
453 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
454 {
455 	struct netdev_private *np = netdev_priv(dev);
456 	void __iomem *ioaddr = np->base + ASICCtrl;
457 	int countdown;
458 
459 	/* ST201 documentation states ASICCtrl is a 32bit register */
460 	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
461 	/* ST201 documentation states reset can take up to 1 ms */
462 	countdown = 10 + 1;
463 	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
464 		if (--countdown == 0) {
465 			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
466 			break;
467 		}
468 		udelay(100);
469 	}
470 }
471 
472 static const struct net_device_ops netdev_ops = {
473 	.ndo_open		= netdev_open,
474 	.ndo_stop		= netdev_close,
475 	.ndo_start_xmit		= start_tx,
476 	.ndo_get_stats 		= get_stats,
477 	.ndo_set_rx_mode	= set_rx_mode,
478 	.ndo_do_ioctl 		= netdev_ioctl,
479 	.ndo_tx_timeout		= tx_timeout,
480 	.ndo_change_mtu		= change_mtu,
481 	.ndo_set_mac_address 	= sundance_set_mac_addr,
482 	.ndo_validate_addr	= eth_validate_addr,
483 };
484 
485 static int sundance_probe1(struct pci_dev *pdev,
486 			   const struct pci_device_id *ent)
487 {
488 	struct net_device *dev;
489 	struct netdev_private *np;
490 	static int card_idx;
491 	int chip_idx = ent->driver_data;
492 	int irq;
493 	int i;
494 	void __iomem *ioaddr;
495 	u16 mii_ctl;
496 	void *ring_space;
497 	dma_addr_t ring_dma;
498 #ifdef USE_IO_OPS
499 	int bar = 0;
500 #else
501 	int bar = 1;
502 #endif
503 	int phy, phy_end, phy_idx = 0;
504 
505 /* when built into the kernel, we only print version if device is found */
506 #ifndef MODULE
507 	static int printed_version;
508 	if (!printed_version++)
509 		printk(version);
510 #endif
511 
512 	if (pci_enable_device(pdev))
513 		return -EIO;
514 	pci_set_master(pdev);
515 
516 	irq = pdev->irq;
517 
518 	dev = alloc_etherdev(sizeof(*np));
519 	if (!dev)
520 		return -ENOMEM;
521 	SET_NETDEV_DEV(dev, &pdev->dev);
522 
523 	if (pci_request_regions(pdev, DRV_NAME))
524 		goto err_out_netdev;
525 
526 	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
527 	if (!ioaddr)
528 		goto err_out_res;
529 
530 	for (i = 0; i < 3; i++)
531 		((__le16 *)dev->dev_addr)[i] =
532 			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
533 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
534 
535 	np = netdev_priv(dev);
536 	np->base = ioaddr;
537 	np->pci_dev = pdev;
538 	np->chip_id = chip_idx;
539 	np->msg_enable = (1 << debug) - 1;
540 	spin_lock_init(&np->lock);
541 	spin_lock_init(&np->statlock);
542 	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
543 	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
544 
545 	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
546 			&ring_dma, GFP_KERNEL);
547 	if (!ring_space)
548 		goto err_out_cleardev;
549 	np->tx_ring = (struct netdev_desc *)ring_space;
550 	np->tx_ring_dma = ring_dma;
551 
552 	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
553 			&ring_dma, GFP_KERNEL);
554 	if (!ring_space)
555 		goto err_out_unmap_tx;
556 	np->rx_ring = (struct netdev_desc *)ring_space;
557 	np->rx_ring_dma = ring_dma;
558 
559 	np->mii_if.dev = dev;
560 	np->mii_if.mdio_read = mdio_read;
561 	np->mii_if.mdio_write = mdio_write;
562 	np->mii_if.phy_id_mask = 0x1f;
563 	np->mii_if.reg_num_mask = 0x1f;
564 
565 	/* The chip-specific entries in the device structure. */
566 	dev->netdev_ops = &netdev_ops;
567 	SET_ETHTOOL_OPS(dev, &ethtool_ops);
568 	dev->watchdog_timeo = TX_TIMEOUT;
569 
570 	pci_set_drvdata(pdev, dev);
571 
572 	i = register_netdev(dev);
573 	if (i)
574 		goto err_out_unmap_rx;
575 
576 	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
577 	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
578 	       dev->dev_addr, irq);
579 
580 	np->phys[0] = 1;		/* Default setting */
581 	np->mii_preamble_required++;
582 
583 	/*
584 	 * It seems some phys doesn't deal well with address 0 being accessed
585 	 * first
586 	 */
587 	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
588 		phy = 0;
589 		phy_end = 31;
590 	} else {
591 		phy = 1;
592 		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
593 	}
594 	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
595 		int phyx = phy & 0x1f;
596 		int mii_status = mdio_read(dev, phyx, MII_BMSR);
597 		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
598 			np->phys[phy_idx++] = phyx;
599 			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
600 			if ((mii_status & 0x0040) == 0)
601 				np->mii_preamble_required++;
602 			printk(KERN_INFO "%s: MII PHY found at address %d, status "
603 				   "0x%4.4x advertising %4.4x.\n",
604 				   dev->name, phyx, mii_status, np->mii_if.advertising);
605 		}
606 	}
607 	np->mii_preamble_required--;
608 
609 	if (phy_idx == 0) {
610 		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
611 			   dev->name, ioread32(ioaddr + ASICCtrl));
612 		goto err_out_unregister;
613 	}
614 
615 	np->mii_if.phy_id = np->phys[0];
616 
617 	/* Parse override configuration */
618 	np->an_enable = 1;
619 	if (card_idx < MAX_UNITS) {
620 		if (media[card_idx] != NULL) {
621 			np->an_enable = 0;
622 			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
623 			    strcmp (media[card_idx], "4") == 0) {
624 				np->speed = 100;
625 				np->mii_if.full_duplex = 1;
626 			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
627 				   strcmp (media[card_idx], "3") == 0) {
628 				np->speed = 100;
629 				np->mii_if.full_duplex = 0;
630 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
631 				   strcmp (media[card_idx], "2") == 0) {
632 				np->speed = 10;
633 				np->mii_if.full_duplex = 1;
634 			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
635 				   strcmp (media[card_idx], "1") == 0) {
636 				np->speed = 10;
637 				np->mii_if.full_duplex = 0;
638 			} else {
639 				np->an_enable = 1;
640 			}
641 		}
642 		if (flowctrl == 1)
643 			np->flowctrl = 1;
644 	}
645 
646 	/* Fibre PHY? */
647 	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
648 		/* Default 100Mbps Full */
649 		if (np->an_enable) {
650 			np->speed = 100;
651 			np->mii_if.full_duplex = 1;
652 			np->an_enable = 0;
653 		}
654 	}
655 	/* Reset PHY */
656 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
657 	mdelay (300);
658 	/* If flow control enabled, we need to advertise it.*/
659 	if (np->flowctrl)
660 		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
661 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
662 	/* Force media type */
663 	if (!np->an_enable) {
664 		mii_ctl = 0;
665 		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
666 		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
667 		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
668 		printk (KERN_INFO "Override speed=%d, %s duplex\n",
669 			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
670 
671 	}
672 
673 	/* Perhaps move the reset here? */
674 	/* Reset the chip to erase previous misconfiguration. */
675 	if (netif_msg_hw(np))
676 		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
677 	sundance_reset(dev, 0x00ff << 16);
678 	if (netif_msg_hw(np))
679 		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
680 
681 	card_idx++;
682 	return 0;
683 
684 err_out_unregister:
685 	unregister_netdev(dev);
686 err_out_unmap_rx:
687 	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
688 		np->rx_ring, np->rx_ring_dma);
689 err_out_unmap_tx:
690 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
691 		np->tx_ring, np->tx_ring_dma);
692 err_out_cleardev:
693 	pci_set_drvdata(pdev, NULL);
694 	pci_iounmap(pdev, ioaddr);
695 err_out_res:
696 	pci_release_regions(pdev);
697 err_out_netdev:
698 	free_netdev (dev);
699 	return -ENODEV;
700 }
701 
702 static int change_mtu(struct net_device *dev, int new_mtu)
703 {
704 	if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
705 		return -EINVAL;
706 	if (netif_running(dev))
707 		return -EBUSY;
708 	dev->mtu = new_mtu;
709 	return 0;
710 }
711 
712 #define eeprom_delay(ee_addr)	ioread32(ee_addr)
713 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
714 static int eeprom_read(void __iomem *ioaddr, int location)
715 {
716 	int boguscnt = 10000;		/* Typical 1900 ticks. */
717 	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
718 	do {
719 		eeprom_delay(ioaddr + EECtrl);
720 		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
721 			return ioread16(ioaddr + EEData);
722 		}
723 	} while (--boguscnt > 0);
724 	return 0;
725 }
726 
727 /*  MII transceiver control section.
728 	Read and write the MII registers using software-generated serial
729 	MDIO protocol.  See the MII specifications or DP83840A data sheet
730 	for details.
731 
732 	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
733 	met by back-to-back 33Mhz PCI cycles. */
734 #define mdio_delay() ioread8(mdio_addr)
735 
736 enum mii_reg_bits {
737 	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
738 };
739 #define MDIO_EnbIn  (0)
740 #define MDIO_WRITE0 (MDIO_EnbOutput)
741 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
742 
743 /* Generate the preamble required for initial synchronization and
744    a few older transceivers. */
745 static void mdio_sync(void __iomem *mdio_addr)
746 {
747 	int bits = 32;
748 
749 	/* Establish sync by sending at least 32 logic ones. */
750 	while (--bits >= 0) {
751 		iowrite8(MDIO_WRITE1, mdio_addr);
752 		mdio_delay();
753 		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
754 		mdio_delay();
755 	}
756 }
757 
758 static int mdio_read(struct net_device *dev, int phy_id, int location)
759 {
760 	struct netdev_private *np = netdev_priv(dev);
761 	void __iomem *mdio_addr = np->base + MIICtrl;
762 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
763 	int i, retval = 0;
764 
765 	if (np->mii_preamble_required)
766 		mdio_sync(mdio_addr);
767 
768 	/* Shift the read command bits out. */
769 	for (i = 15; i >= 0; i--) {
770 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
771 
772 		iowrite8(dataval, mdio_addr);
773 		mdio_delay();
774 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
775 		mdio_delay();
776 	}
777 	/* Read the two transition, 16 data, and wire-idle bits. */
778 	for (i = 19; i > 0; i--) {
779 		iowrite8(MDIO_EnbIn, mdio_addr);
780 		mdio_delay();
781 		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
782 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
783 		mdio_delay();
784 	}
785 	return (retval>>1) & 0xffff;
786 }
787 
788 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
789 {
790 	struct netdev_private *np = netdev_priv(dev);
791 	void __iomem *mdio_addr = np->base + MIICtrl;
792 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
793 	int i;
794 
795 	if (np->mii_preamble_required)
796 		mdio_sync(mdio_addr);
797 
798 	/* Shift the command bits out. */
799 	for (i = 31; i >= 0; i--) {
800 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
801 
802 		iowrite8(dataval, mdio_addr);
803 		mdio_delay();
804 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
805 		mdio_delay();
806 	}
807 	/* Clear out extra bits. */
808 	for (i = 2; i > 0; i--) {
809 		iowrite8(MDIO_EnbIn, mdio_addr);
810 		mdio_delay();
811 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
812 		mdio_delay();
813 	}
814 }
815 
816 static int mdio_wait_link(struct net_device *dev, int wait)
817 {
818 	int bmsr;
819 	int phy_id;
820 	struct netdev_private *np;
821 
822 	np = netdev_priv(dev);
823 	phy_id = np->phys[0];
824 
825 	do {
826 		bmsr = mdio_read(dev, phy_id, MII_BMSR);
827 		if (bmsr & 0x0004)
828 			return 0;
829 		mdelay(1);
830 	} while (--wait > 0);
831 	return -1;
832 }
833 
834 static int netdev_open(struct net_device *dev)
835 {
836 	struct netdev_private *np = netdev_priv(dev);
837 	void __iomem *ioaddr = np->base;
838 	const int irq = np->pci_dev->irq;
839 	unsigned long flags;
840 	int i;
841 
842 	sundance_reset(dev, 0x00ff << 16);
843 
844 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
845 	if (i)
846 		return i;
847 
848 	if (netif_msg_ifup(np))
849 		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
850 
851 	init_ring(dev);
852 
853 	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
854 	/* The Tx list pointer is written as packets are queued. */
855 
856 	/* Initialize other registers. */
857 	__set_mac_addr(dev);
858 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
859 	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
860 #else
861 	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
862 #endif
863 	if (dev->mtu > 2047)
864 		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
865 
866 	/* Configure the PCI bus bursts and FIFO thresholds. */
867 
868 	if (dev->if_port == 0)
869 		dev->if_port = np->default_port;
870 
871 	spin_lock_init(&np->mcastlock);
872 
873 	set_rx_mode(dev);
874 	iowrite16(0, ioaddr + IntrEnable);
875 	iowrite16(0, ioaddr + DownCounter);
876 	/* Set the chip to poll every N*320nsec. */
877 	iowrite8(100, ioaddr + RxDMAPollPeriod);
878 	iowrite8(127, ioaddr + TxDMAPollPeriod);
879 	/* Fix DFE-580TX packet drop issue */
880 	if (np->pci_dev->revision >= 0x14)
881 		iowrite8(0x01, ioaddr + DebugCtrl1);
882 	netif_start_queue(dev);
883 
884 	spin_lock_irqsave(&np->lock, flags);
885 	reset_tx(dev);
886 	spin_unlock_irqrestore(&np->lock, flags);
887 
888 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
889 
890 	/* Disable Wol */
891 	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
892 	np->wol_enabled = 0;
893 
894 	if (netif_msg_ifup(np))
895 		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
896 			   "MAC Control %x, %4.4x %4.4x.\n",
897 			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
898 			   ioread32(ioaddr + MACCtrl0),
899 			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
900 
901 	/* Set the timer to check for link beat. */
902 	init_timer(&np->timer);
903 	np->timer.expires = jiffies + 3*HZ;
904 	np->timer.data = (unsigned long)dev;
905 	np->timer.function = netdev_timer;				/* timer handler */
906 	add_timer(&np->timer);
907 
908 	/* Enable interrupts by setting the interrupt mask. */
909 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
910 
911 	return 0;
912 }
913 
914 static void check_duplex(struct net_device *dev)
915 {
916 	struct netdev_private *np = netdev_priv(dev);
917 	void __iomem *ioaddr = np->base;
918 	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
919 	int negotiated = mii_lpa & np->mii_if.advertising;
920 	int duplex;
921 
922 	/* Force media */
923 	if (!np->an_enable || mii_lpa == 0xffff) {
924 		if (np->mii_if.full_duplex)
925 			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
926 				ioaddr + MACCtrl0);
927 		return;
928 	}
929 
930 	/* Autonegotiation */
931 	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
932 	if (np->mii_if.full_duplex != duplex) {
933 		np->mii_if.full_duplex = duplex;
934 		if (netif_msg_link(np))
935 			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
936 				   "negotiated capability %4.4x.\n", dev->name,
937 				   duplex ? "full" : "half", np->phys[0], negotiated);
938 		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
939 	}
940 }
941 
942 static void netdev_timer(unsigned long data)
943 {
944 	struct net_device *dev = (struct net_device *)data;
945 	struct netdev_private *np = netdev_priv(dev);
946 	void __iomem *ioaddr = np->base;
947 	int next_tick = 10*HZ;
948 
949 	if (netif_msg_timer(np)) {
950 		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
951 			   "Tx %x Rx %x.\n",
952 			   dev->name, ioread16(ioaddr + IntrEnable),
953 			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
954 	}
955 	check_duplex(dev);
956 	np->timer.expires = jiffies + next_tick;
957 	add_timer(&np->timer);
958 }
959 
960 static void tx_timeout(struct net_device *dev)
961 {
962 	struct netdev_private *np = netdev_priv(dev);
963 	void __iomem *ioaddr = np->base;
964 	unsigned long flag;
965 
966 	netif_stop_queue(dev);
967 	tasklet_disable(&np->tx_tasklet);
968 	iowrite16(0, ioaddr + IntrEnable);
969 	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
970 		   "TxFrameId %2.2x,"
971 		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
972 		   ioread8(ioaddr + TxFrameId));
973 
974 	{
975 		int i;
976 		for (i=0; i<TX_RING_SIZE; i++) {
977 			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
978 				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
979 				le32_to_cpu(np->tx_ring[i].next_desc),
980 				le32_to_cpu(np->tx_ring[i].status),
981 				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
982 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
983 				le32_to_cpu(np->tx_ring[i].frag[0].length));
984 		}
985 		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
986 			ioread32(np->base + TxListPtr),
987 			netif_queue_stopped(dev));
988 		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
989 			np->cur_tx, np->cur_tx % TX_RING_SIZE,
990 			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
991 		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
992 		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
993 	}
994 	spin_lock_irqsave(&np->lock, flag);
995 
996 	/* Stop and restart the chip's Tx processes . */
997 	reset_tx(dev);
998 	spin_unlock_irqrestore(&np->lock, flag);
999 
1000 	dev->if_port = 0;
1001 
1002 	dev->trans_start = jiffies; /* prevent tx timeout */
1003 	dev->stats.tx_errors++;
1004 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1005 		netif_wake_queue(dev);
1006 	}
1007 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1008 	tasklet_enable(&np->tx_tasklet);
1009 }
1010 
1011 
1012 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1013 static void init_ring(struct net_device *dev)
1014 {
1015 	struct netdev_private *np = netdev_priv(dev);
1016 	int i;
1017 
1018 	np->cur_rx = np->cur_tx = 0;
1019 	np->dirty_rx = np->dirty_tx = 0;
1020 	np->cur_task = 0;
1021 
1022 	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1023 
1024 	/* Initialize all Rx descriptors. */
1025 	for (i = 0; i < RX_RING_SIZE; i++) {
1026 		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1027 			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1028 		np->rx_ring[i].status = 0;
1029 		np->rx_ring[i].frag[0].length = 0;
1030 		np->rx_skbuff[i] = NULL;
1031 	}
1032 
1033 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1034 	for (i = 0; i < RX_RING_SIZE; i++) {
1035 		struct sk_buff *skb =
1036 			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1037 		np->rx_skbuff[i] = skb;
1038 		if (skb == NULL)
1039 			break;
1040 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1041 		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1042 			dma_map_single(&np->pci_dev->dev, skb->data,
1043 				np->rx_buf_sz, DMA_FROM_DEVICE));
1044 		if (dma_mapping_error(&np->pci_dev->dev,
1045 					np->rx_ring[i].frag[0].addr)) {
1046 			dev_kfree_skb(skb);
1047 			np->rx_skbuff[i] = NULL;
1048 			break;
1049 		}
1050 		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1051 	}
1052 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1053 
1054 	for (i = 0; i < TX_RING_SIZE; i++) {
1055 		np->tx_skbuff[i] = NULL;
1056 		np->tx_ring[i].status = 0;
1057 	}
1058 }
1059 
1060 static void tx_poll (unsigned long data)
1061 {
1062 	struct net_device *dev = (struct net_device *)data;
1063 	struct netdev_private *np = netdev_priv(dev);
1064 	unsigned head = np->cur_task % TX_RING_SIZE;
1065 	struct netdev_desc *txdesc =
1066 		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1067 
1068 	/* Chain the next pointer */
1069 	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1070 		int entry = np->cur_task % TX_RING_SIZE;
1071 		txdesc = &np->tx_ring[entry];
1072 		if (np->last_tx) {
1073 			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1074 				entry*sizeof(struct netdev_desc));
1075 		}
1076 		np->last_tx = txdesc;
1077 	}
1078 	/* Indicate the latest descriptor of tx ring */
1079 	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1080 
1081 	if (ioread32 (np->base + TxListPtr) == 0)
1082 		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1083 			np->base + TxListPtr);
1084 }
1085 
1086 static netdev_tx_t
1087 start_tx (struct sk_buff *skb, struct net_device *dev)
1088 {
1089 	struct netdev_private *np = netdev_priv(dev);
1090 	struct netdev_desc *txdesc;
1091 	unsigned entry;
1092 
1093 	/* Calculate the next Tx descriptor entry. */
1094 	entry = np->cur_tx % TX_RING_SIZE;
1095 	np->tx_skbuff[entry] = skb;
1096 	txdesc = &np->tx_ring[entry];
1097 
1098 	txdesc->next_desc = 0;
1099 	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1100 	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1101 				skb->data, skb->len, DMA_TO_DEVICE));
1102 	if (dma_mapping_error(&np->pci_dev->dev,
1103 				txdesc->frag[0].addr))
1104 			goto drop_frame;
1105 	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1106 
1107 	/* Increment cur_tx before tasklet_schedule() */
1108 	np->cur_tx++;
1109 	mb();
1110 	/* Schedule a tx_poll() task */
1111 	tasklet_schedule(&np->tx_tasklet);
1112 
1113 	/* On some architectures: explicitly flush cache lines here. */
1114 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1115 	    !netif_queue_stopped(dev)) {
1116 		/* do nothing */
1117 	} else {
1118 		netif_stop_queue (dev);
1119 	}
1120 	if (netif_msg_tx_queued(np)) {
1121 		printk (KERN_DEBUG
1122 			"%s: Transmit frame #%d queued in slot %d.\n",
1123 			dev->name, np->cur_tx, entry);
1124 	}
1125 	return NETDEV_TX_OK;
1126 
1127 drop_frame:
1128 	dev_kfree_skb(skb);
1129 	np->tx_skbuff[entry] = NULL;
1130 	dev->stats.tx_dropped++;
1131 	return NETDEV_TX_OK;
1132 }
1133 
1134 /* Reset hardware tx and free all of tx buffers */
1135 static int
1136 reset_tx (struct net_device *dev)
1137 {
1138 	struct netdev_private *np = netdev_priv(dev);
1139 	void __iomem *ioaddr = np->base;
1140 	struct sk_buff *skb;
1141 	int i;
1142 
1143 	/* Reset tx logic, TxListPtr will be cleaned */
1144 	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1145 	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1146 
1147 	/* free all tx skbuff */
1148 	for (i = 0; i < TX_RING_SIZE; i++) {
1149 		np->tx_ring[i].next_desc = 0;
1150 
1151 		skb = np->tx_skbuff[i];
1152 		if (skb) {
1153 			dma_unmap_single(&np->pci_dev->dev,
1154 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1155 				skb->len, DMA_TO_DEVICE);
1156 			dev_kfree_skb_any(skb);
1157 			np->tx_skbuff[i] = NULL;
1158 			dev->stats.tx_dropped++;
1159 		}
1160 	}
1161 	np->cur_tx = np->dirty_tx = 0;
1162 	np->cur_task = 0;
1163 
1164 	np->last_tx = NULL;
1165 	iowrite8(127, ioaddr + TxDMAPollPeriod);
1166 
1167 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1168 	return 0;
1169 }
1170 
1171 /* The interrupt handler cleans up after the Tx thread,
1172    and schedule a Rx thread work */
1173 static irqreturn_t intr_handler(int irq, void *dev_instance)
1174 {
1175 	struct net_device *dev = (struct net_device *)dev_instance;
1176 	struct netdev_private *np = netdev_priv(dev);
1177 	void __iomem *ioaddr = np->base;
1178 	int hw_frame_id;
1179 	int tx_cnt;
1180 	int tx_status;
1181 	int handled = 0;
1182 	int i;
1183 
1184 
1185 	do {
1186 		int intr_status = ioread16(ioaddr + IntrStatus);
1187 		iowrite16(intr_status, ioaddr + IntrStatus);
1188 
1189 		if (netif_msg_intr(np))
1190 			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1191 				   dev->name, intr_status);
1192 
1193 		if (!(intr_status & DEFAULT_INTR))
1194 			break;
1195 
1196 		handled = 1;
1197 
1198 		if (intr_status & (IntrRxDMADone)) {
1199 			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1200 					ioaddr + IntrEnable);
1201 			if (np->budget < 0)
1202 				np->budget = RX_BUDGET;
1203 			tasklet_schedule(&np->rx_tasklet);
1204 		}
1205 		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1206 			tx_status = ioread16 (ioaddr + TxStatus);
1207 			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1208 				if (netif_msg_tx_done(np))
1209 					printk
1210 					    ("%s: Transmit status is %2.2x.\n",
1211 				     	dev->name, tx_status);
1212 				if (tx_status & 0x1e) {
1213 					if (netif_msg_tx_err(np))
1214 						printk("%s: Transmit error status %4.4x.\n",
1215 							   dev->name, tx_status);
1216 					dev->stats.tx_errors++;
1217 					if (tx_status & 0x10)
1218 						dev->stats.tx_fifo_errors++;
1219 					if (tx_status & 0x08)
1220 						dev->stats.collisions++;
1221 					if (tx_status & 0x04)
1222 						dev->stats.tx_fifo_errors++;
1223 					if (tx_status & 0x02)
1224 						dev->stats.tx_window_errors++;
1225 
1226 					/*
1227 					** This reset has been verified on
1228 					** DFE-580TX boards ! phdm@macqel.be.
1229 					*/
1230 					if (tx_status & 0x10) {	/* TxUnderrun */
1231 						/* Restart Tx FIFO and transmitter */
1232 						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1233 						/* No need to reset the Tx pointer here */
1234 					}
1235 					/* Restart the Tx. Need to make sure tx enabled */
1236 					i = 10;
1237 					do {
1238 						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1239 						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1240 							break;
1241 						mdelay(1);
1242 					} while (--i);
1243 				}
1244 				/* Yup, this is a documentation bug.  It cost me *hours*. */
1245 				iowrite16 (0, ioaddr + TxStatus);
1246 				if (tx_cnt < 0) {
1247 					iowrite32(5000, ioaddr + DownCounter);
1248 					break;
1249 				}
1250 				tx_status = ioread16 (ioaddr + TxStatus);
1251 			}
1252 			hw_frame_id = (tx_status >> 8) & 0xff;
1253 		} else 	{
1254 			hw_frame_id = ioread8(ioaddr + TxFrameId);
1255 		}
1256 
1257 		if (np->pci_dev->revision >= 0x14) {
1258 			spin_lock(&np->lock);
1259 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1260 				int entry = np->dirty_tx % TX_RING_SIZE;
1261 				struct sk_buff *skb;
1262 				int sw_frame_id;
1263 				sw_frame_id = (le32_to_cpu(
1264 					np->tx_ring[entry].status) >> 2) & 0xff;
1265 				if (sw_frame_id == hw_frame_id &&
1266 					!(le32_to_cpu(np->tx_ring[entry].status)
1267 					& 0x00010000))
1268 						break;
1269 				if (sw_frame_id == (hw_frame_id + 1) %
1270 					TX_RING_SIZE)
1271 						break;
1272 				skb = np->tx_skbuff[entry];
1273 				/* Free the original skb. */
1274 				dma_unmap_single(&np->pci_dev->dev,
1275 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1276 					skb->len, DMA_TO_DEVICE);
1277 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1278 				np->tx_skbuff[entry] = NULL;
1279 				np->tx_ring[entry].frag[0].addr = 0;
1280 				np->tx_ring[entry].frag[0].length = 0;
1281 			}
1282 			spin_unlock(&np->lock);
1283 		} else {
1284 			spin_lock(&np->lock);
1285 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1286 				int entry = np->dirty_tx % TX_RING_SIZE;
1287 				struct sk_buff *skb;
1288 				if (!(le32_to_cpu(np->tx_ring[entry].status)
1289 							& 0x00010000))
1290 					break;
1291 				skb = np->tx_skbuff[entry];
1292 				/* Free the original skb. */
1293 				dma_unmap_single(&np->pci_dev->dev,
1294 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1295 					skb->len, DMA_TO_DEVICE);
1296 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1297 				np->tx_skbuff[entry] = NULL;
1298 				np->tx_ring[entry].frag[0].addr = 0;
1299 				np->tx_ring[entry].frag[0].length = 0;
1300 			}
1301 			spin_unlock(&np->lock);
1302 		}
1303 
1304 		if (netif_queue_stopped(dev) &&
1305 			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1306 			/* The ring is no longer full, clear busy flag. */
1307 			netif_wake_queue (dev);
1308 		}
1309 		/* Abnormal error summary/uncommon events handlers. */
1310 		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1311 			netdev_error(dev, intr_status);
1312 	} while (0);
1313 	if (netif_msg_intr(np))
1314 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1315 			   dev->name, ioread16(ioaddr + IntrStatus));
1316 	return IRQ_RETVAL(handled);
1317 }
1318 
1319 static void rx_poll(unsigned long data)
1320 {
1321 	struct net_device *dev = (struct net_device *)data;
1322 	struct netdev_private *np = netdev_priv(dev);
1323 	int entry = np->cur_rx % RX_RING_SIZE;
1324 	int boguscnt = np->budget;
1325 	void __iomem *ioaddr = np->base;
1326 	int received = 0;
1327 
1328 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1329 	while (1) {
1330 		struct netdev_desc *desc = &(np->rx_ring[entry]);
1331 		u32 frame_status = le32_to_cpu(desc->status);
1332 		int pkt_len;
1333 
1334 		if (--boguscnt < 0) {
1335 			goto not_done;
1336 		}
1337 		if (!(frame_status & DescOwn))
1338 			break;
1339 		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1340 		if (netif_msg_rx_status(np))
1341 			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1342 				   frame_status);
1343 		if (frame_status & 0x001f4000) {
1344 			/* There was a error. */
1345 			if (netif_msg_rx_err(np))
1346 				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1347 					   frame_status);
1348 			dev->stats.rx_errors++;
1349 			if (frame_status & 0x00100000)
1350 				dev->stats.rx_length_errors++;
1351 			if (frame_status & 0x00010000)
1352 				dev->stats.rx_fifo_errors++;
1353 			if (frame_status & 0x00060000)
1354 				dev->stats.rx_frame_errors++;
1355 			if (frame_status & 0x00080000)
1356 				dev->stats.rx_crc_errors++;
1357 			if (frame_status & 0x00100000) {
1358 				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1359 					   " status %8.8x.\n",
1360 					   dev->name, frame_status);
1361 			}
1362 		} else {
1363 			struct sk_buff *skb;
1364 #ifndef final_version
1365 			if (netif_msg_rx_status(np))
1366 				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1367 					   ", bogus_cnt %d.\n",
1368 					   pkt_len, boguscnt);
1369 #endif
1370 			/* Check if the packet is long enough to accept without copying
1371 			   to a minimally-sized skbuff. */
1372 			if (pkt_len < rx_copybreak &&
1373 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1374 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1375 				dma_sync_single_for_cpu(&np->pci_dev->dev,
1376 						le32_to_cpu(desc->frag[0].addr),
1377 						np->rx_buf_sz, DMA_FROM_DEVICE);
1378 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1379 				dma_sync_single_for_device(&np->pci_dev->dev,
1380 						le32_to_cpu(desc->frag[0].addr),
1381 						np->rx_buf_sz, DMA_FROM_DEVICE);
1382 				skb_put(skb, pkt_len);
1383 			} else {
1384 				dma_unmap_single(&np->pci_dev->dev,
1385 					le32_to_cpu(desc->frag[0].addr),
1386 					np->rx_buf_sz, DMA_FROM_DEVICE);
1387 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1388 				np->rx_skbuff[entry] = NULL;
1389 			}
1390 			skb->protocol = eth_type_trans(skb, dev);
1391 			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1392 			netif_rx(skb);
1393 		}
1394 		entry = (entry + 1) % RX_RING_SIZE;
1395 		received++;
1396 	}
1397 	np->cur_rx = entry;
1398 	refill_rx (dev);
1399 	np->budget -= received;
1400 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1401 	return;
1402 
1403 not_done:
1404 	np->cur_rx = entry;
1405 	refill_rx (dev);
1406 	if (!received)
1407 		received = 1;
1408 	np->budget -= received;
1409 	if (np->budget <= 0)
1410 		np->budget = RX_BUDGET;
1411 	tasklet_schedule(&np->rx_tasklet);
1412 }
1413 
1414 static void refill_rx (struct net_device *dev)
1415 {
1416 	struct netdev_private *np = netdev_priv(dev);
1417 	int entry;
1418 	int cnt = 0;
1419 
1420 	/* Refill the Rx ring buffers. */
1421 	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1422 		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1423 		struct sk_buff *skb;
1424 		entry = np->dirty_rx % RX_RING_SIZE;
1425 		if (np->rx_skbuff[entry] == NULL) {
1426 			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1427 			np->rx_skbuff[entry] = skb;
1428 			if (skb == NULL)
1429 				break;		/* Better luck next round. */
1430 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1431 			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1432 				dma_map_single(&np->pci_dev->dev, skb->data,
1433 					np->rx_buf_sz, DMA_FROM_DEVICE));
1434 			if (dma_mapping_error(&np->pci_dev->dev,
1435 				    np->rx_ring[entry].frag[0].addr)) {
1436 			    dev_kfree_skb_irq(skb);
1437 			    np->rx_skbuff[entry] = NULL;
1438 			    break;
1439 			}
1440 		}
1441 		/* Perhaps we need not reset this field. */
1442 		np->rx_ring[entry].frag[0].length =
1443 			cpu_to_le32(np->rx_buf_sz | LastFrag);
1444 		np->rx_ring[entry].status = 0;
1445 		cnt++;
1446 	}
1447 }
1448 static void netdev_error(struct net_device *dev, int intr_status)
1449 {
1450 	struct netdev_private *np = netdev_priv(dev);
1451 	void __iomem *ioaddr = np->base;
1452 	u16 mii_ctl, mii_advertise, mii_lpa;
1453 	int speed;
1454 
1455 	if (intr_status & LinkChange) {
1456 		if (mdio_wait_link(dev, 10) == 0) {
1457 			printk(KERN_INFO "%s: Link up\n", dev->name);
1458 			if (np->an_enable) {
1459 				mii_advertise = mdio_read(dev, np->phys[0],
1460 							   MII_ADVERTISE);
1461 				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1462 				mii_advertise &= mii_lpa;
1463 				printk(KERN_INFO "%s: Link changed: ",
1464 					dev->name);
1465 				if (mii_advertise & ADVERTISE_100FULL) {
1466 					np->speed = 100;
1467 					printk("100Mbps, full duplex\n");
1468 				} else if (mii_advertise & ADVERTISE_100HALF) {
1469 					np->speed = 100;
1470 					printk("100Mbps, half duplex\n");
1471 				} else if (mii_advertise & ADVERTISE_10FULL) {
1472 					np->speed = 10;
1473 					printk("10Mbps, full duplex\n");
1474 				} else if (mii_advertise & ADVERTISE_10HALF) {
1475 					np->speed = 10;
1476 					printk("10Mbps, half duplex\n");
1477 				} else
1478 					printk("\n");
1479 
1480 			} else {
1481 				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1482 				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1483 				np->speed = speed;
1484 				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1485 					dev->name, speed);
1486 				printk("%s duplex.\n",
1487 					(mii_ctl & BMCR_FULLDPLX) ?
1488 						"full" : "half");
1489 			}
1490 			check_duplex(dev);
1491 			if (np->flowctrl && np->mii_if.full_duplex) {
1492 				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1493 					ioaddr + MulticastFilter1+2);
1494 				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1495 					ioaddr + MACCtrl0);
1496 			}
1497 			netif_carrier_on(dev);
1498 		} else {
1499 			printk(KERN_INFO "%s: Link down\n", dev->name);
1500 			netif_carrier_off(dev);
1501 		}
1502 	}
1503 	if (intr_status & StatsMax) {
1504 		get_stats(dev);
1505 	}
1506 	if (intr_status & IntrPCIErr) {
1507 		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1508 			   dev->name, intr_status);
1509 		/* We must do a global reset of DMA to continue. */
1510 	}
1511 }
1512 
1513 static struct net_device_stats *get_stats(struct net_device *dev)
1514 {
1515 	struct netdev_private *np = netdev_priv(dev);
1516 	void __iomem *ioaddr = np->base;
1517 	unsigned long flags;
1518 	u8 late_coll, single_coll, mult_coll;
1519 
1520 	spin_lock_irqsave(&np->statlock, flags);
1521 	/* The chip only need report frame silently dropped. */
1522 	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1523 	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1524 	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1525 	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1526 
1527 	mult_coll = ioread8(ioaddr + StatsMultiColl);
1528 	np->xstats.tx_multiple_collisions += mult_coll;
1529 	single_coll = ioread8(ioaddr + StatsOneColl);
1530 	np->xstats.tx_single_collisions += single_coll;
1531 	late_coll = ioread8(ioaddr + StatsLateColl);
1532 	np->xstats.tx_late_collisions += late_coll;
1533 	dev->stats.collisions += mult_coll
1534 		+ single_coll
1535 		+ late_coll;
1536 
1537 	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1538 	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1539 	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1540 	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1541 	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1542 	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1543 	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1544 
1545 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1546 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1547 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1548 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1549 
1550 	spin_unlock_irqrestore(&np->statlock, flags);
1551 
1552 	return &dev->stats;
1553 }
1554 
1555 static void set_rx_mode(struct net_device *dev)
1556 {
1557 	struct netdev_private *np = netdev_priv(dev);
1558 	void __iomem *ioaddr = np->base;
1559 	u16 mc_filter[4];			/* Multicast hash filter */
1560 	u32 rx_mode;
1561 	int i;
1562 
1563 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1564 		memset(mc_filter, 0xff, sizeof(mc_filter));
1565 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1566 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1567 		   (dev->flags & IFF_ALLMULTI)) {
1568 		/* Too many to match, or accept all multicasts. */
1569 		memset(mc_filter, 0xff, sizeof(mc_filter));
1570 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1571 	} else if (!netdev_mc_empty(dev)) {
1572 		struct netdev_hw_addr *ha;
1573 		int bit;
1574 		int index;
1575 		int crc;
1576 		memset (mc_filter, 0, sizeof (mc_filter));
1577 		netdev_for_each_mc_addr(ha, dev) {
1578 			crc = ether_crc_le(ETH_ALEN, ha->addr);
1579 			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1580 				if (crc & 0x80000000) index |= 1 << bit;
1581 			mc_filter[index/16] |= (1 << (index % 16));
1582 		}
1583 		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1584 	} else {
1585 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1586 		return;
1587 	}
1588 	if (np->mii_if.full_duplex && np->flowctrl)
1589 		mc_filter[3] |= 0x0200;
1590 
1591 	for (i = 0; i < 4; i++)
1592 		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1593 	iowrite8(rx_mode, ioaddr + RxMode);
1594 }
1595 
1596 static int __set_mac_addr(struct net_device *dev)
1597 {
1598 	struct netdev_private *np = netdev_priv(dev);
1599 	u16 addr16;
1600 
1601 	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1602 	iowrite16(addr16, np->base + StationAddr);
1603 	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1604 	iowrite16(addr16, np->base + StationAddr+2);
1605 	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1606 	iowrite16(addr16, np->base + StationAddr+4);
1607 	return 0;
1608 }
1609 
1610 /* Invoked with rtnl_lock held */
1611 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1612 {
1613 	const struct sockaddr *addr = data;
1614 
1615 	if (!is_valid_ether_addr(addr->sa_data))
1616 		return -EADDRNOTAVAIL;
1617 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1618 	__set_mac_addr(dev);
1619 
1620 	return 0;
1621 }
1622 
1623 static const struct {
1624 	const char name[ETH_GSTRING_LEN];
1625 } sundance_stats[] = {
1626 	{ "tx_multiple_collisions" },
1627 	{ "tx_single_collisions" },
1628 	{ "tx_late_collisions" },
1629 	{ "tx_deferred" },
1630 	{ "tx_deferred_excessive" },
1631 	{ "tx_aborted" },
1632 	{ "tx_bcasts" },
1633 	{ "rx_bcasts" },
1634 	{ "tx_mcasts" },
1635 	{ "rx_mcasts" },
1636 };
1637 
1638 static int check_if_running(struct net_device *dev)
1639 {
1640 	if (!netif_running(dev))
1641 		return -EINVAL;
1642 	return 0;
1643 }
1644 
1645 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1646 {
1647 	struct netdev_private *np = netdev_priv(dev);
1648 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1649 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1650 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1651 }
1652 
1653 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1654 {
1655 	struct netdev_private *np = netdev_priv(dev);
1656 	spin_lock_irq(&np->lock);
1657 	mii_ethtool_gset(&np->mii_if, ecmd);
1658 	spin_unlock_irq(&np->lock);
1659 	return 0;
1660 }
1661 
1662 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1663 {
1664 	struct netdev_private *np = netdev_priv(dev);
1665 	int res;
1666 	spin_lock_irq(&np->lock);
1667 	res = mii_ethtool_sset(&np->mii_if, ecmd);
1668 	spin_unlock_irq(&np->lock);
1669 	return res;
1670 }
1671 
1672 static int nway_reset(struct net_device *dev)
1673 {
1674 	struct netdev_private *np = netdev_priv(dev);
1675 	return mii_nway_restart(&np->mii_if);
1676 }
1677 
1678 static u32 get_link(struct net_device *dev)
1679 {
1680 	struct netdev_private *np = netdev_priv(dev);
1681 	return mii_link_ok(&np->mii_if);
1682 }
1683 
1684 static u32 get_msglevel(struct net_device *dev)
1685 {
1686 	struct netdev_private *np = netdev_priv(dev);
1687 	return np->msg_enable;
1688 }
1689 
1690 static void set_msglevel(struct net_device *dev, u32 val)
1691 {
1692 	struct netdev_private *np = netdev_priv(dev);
1693 	np->msg_enable = val;
1694 }
1695 
1696 static void get_strings(struct net_device *dev, u32 stringset,
1697 		u8 *data)
1698 {
1699 	if (stringset == ETH_SS_STATS)
1700 		memcpy(data, sundance_stats, sizeof(sundance_stats));
1701 }
1702 
1703 static int get_sset_count(struct net_device *dev, int sset)
1704 {
1705 	switch (sset) {
1706 	case ETH_SS_STATS:
1707 		return ARRAY_SIZE(sundance_stats);
1708 	default:
1709 		return -EOPNOTSUPP;
1710 	}
1711 }
1712 
1713 static void get_ethtool_stats(struct net_device *dev,
1714 		struct ethtool_stats *stats, u64 *data)
1715 {
1716 	struct netdev_private *np = netdev_priv(dev);
1717 	int i = 0;
1718 
1719 	get_stats(dev);
1720 	data[i++] = np->xstats.tx_multiple_collisions;
1721 	data[i++] = np->xstats.tx_single_collisions;
1722 	data[i++] = np->xstats.tx_late_collisions;
1723 	data[i++] = np->xstats.tx_deferred;
1724 	data[i++] = np->xstats.tx_deferred_excessive;
1725 	data[i++] = np->xstats.tx_aborted;
1726 	data[i++] = np->xstats.tx_bcasts;
1727 	data[i++] = np->xstats.rx_bcasts;
1728 	data[i++] = np->xstats.tx_mcasts;
1729 	data[i++] = np->xstats.rx_mcasts;
1730 }
1731 
1732 #ifdef CONFIG_PM
1733 
1734 static void sundance_get_wol(struct net_device *dev,
1735 		struct ethtool_wolinfo *wol)
1736 {
1737 	struct netdev_private *np = netdev_priv(dev);
1738 	void __iomem *ioaddr = np->base;
1739 	u8 wol_bits;
1740 
1741 	wol->wolopts = 0;
1742 
1743 	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1744 	if (!np->wol_enabled)
1745 		return;
1746 
1747 	wol_bits = ioread8(ioaddr + WakeEvent);
1748 	if (wol_bits & MagicPktEnable)
1749 		wol->wolopts |= WAKE_MAGIC;
1750 	if (wol_bits & LinkEventEnable)
1751 		wol->wolopts |= WAKE_PHY;
1752 }
1753 
1754 static int sundance_set_wol(struct net_device *dev,
1755 	struct ethtool_wolinfo *wol)
1756 {
1757 	struct netdev_private *np = netdev_priv(dev);
1758 	void __iomem *ioaddr = np->base;
1759 	u8 wol_bits;
1760 
1761 	if (!device_can_wakeup(&np->pci_dev->dev))
1762 		return -EOPNOTSUPP;
1763 
1764 	np->wol_enabled = !!(wol->wolopts);
1765 	wol_bits = ioread8(ioaddr + WakeEvent);
1766 	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1767 			LinkEventEnable | WolEnable);
1768 
1769 	if (np->wol_enabled) {
1770 		if (wol->wolopts & WAKE_MAGIC)
1771 			wol_bits |= (MagicPktEnable | WolEnable);
1772 		if (wol->wolopts & WAKE_PHY)
1773 			wol_bits |= (LinkEventEnable | WolEnable);
1774 	}
1775 	iowrite8(wol_bits, ioaddr + WakeEvent);
1776 
1777 	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1778 
1779 	return 0;
1780 }
1781 #else
1782 #define sundance_get_wol NULL
1783 #define sundance_set_wol NULL
1784 #endif /* CONFIG_PM */
1785 
1786 static const struct ethtool_ops ethtool_ops = {
1787 	.begin = check_if_running,
1788 	.get_drvinfo = get_drvinfo,
1789 	.get_settings = get_settings,
1790 	.set_settings = set_settings,
1791 	.nway_reset = nway_reset,
1792 	.get_link = get_link,
1793 	.get_wol = sundance_get_wol,
1794 	.set_wol = sundance_set_wol,
1795 	.get_msglevel = get_msglevel,
1796 	.set_msglevel = set_msglevel,
1797 	.get_strings = get_strings,
1798 	.get_sset_count = get_sset_count,
1799 	.get_ethtool_stats = get_ethtool_stats,
1800 };
1801 
1802 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1803 {
1804 	struct netdev_private *np = netdev_priv(dev);
1805 	int rc;
1806 
1807 	if (!netif_running(dev))
1808 		return -EINVAL;
1809 
1810 	spin_lock_irq(&np->lock);
1811 	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1812 	spin_unlock_irq(&np->lock);
1813 
1814 	return rc;
1815 }
1816 
1817 static int netdev_close(struct net_device *dev)
1818 {
1819 	struct netdev_private *np = netdev_priv(dev);
1820 	void __iomem *ioaddr = np->base;
1821 	struct sk_buff *skb;
1822 	int i;
1823 
1824 	/* Wait and kill tasklet */
1825 	tasklet_kill(&np->rx_tasklet);
1826 	tasklet_kill(&np->tx_tasklet);
1827 	np->cur_tx = 0;
1828 	np->dirty_tx = 0;
1829 	np->cur_task = 0;
1830 	np->last_tx = NULL;
1831 
1832 	netif_stop_queue(dev);
1833 
1834 	if (netif_msg_ifdown(np)) {
1835 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1836 			   "Rx %4.4x Int %2.2x.\n",
1837 			   dev->name, ioread8(ioaddr + TxStatus),
1838 			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1839 		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1840 			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1841 	}
1842 
1843 	/* Disable interrupts by clearing the interrupt mask. */
1844 	iowrite16(0x0000, ioaddr + IntrEnable);
1845 
1846 	/* Disable Rx and Tx DMA for safely release resource */
1847 	iowrite32(0x500, ioaddr + DMACtrl);
1848 
1849 	/* Stop the chip's Tx and Rx processes. */
1850 	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1851 
1852     	for (i = 2000; i > 0; i--) {
1853  		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1854 			break;
1855 		mdelay(1);
1856     	}
1857 
1858     	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1859 			ioaddr + ASIC_HI_WORD(ASICCtrl));
1860 
1861     	for (i = 2000; i > 0; i--) {
1862 		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1863 			break;
1864 		mdelay(1);
1865     	}
1866 
1867 #ifdef __i386__
1868 	if (netif_msg_hw(np)) {
1869 		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1870 			   (int)(np->tx_ring_dma));
1871 		for (i = 0; i < TX_RING_SIZE; i++)
1872 			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1873 				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1874 				   np->tx_ring[i].frag[0].length);
1875 		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1876 			   (int)(np->rx_ring_dma));
1877 		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1878 			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1879 				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1880 				   np->rx_ring[i].frag[0].length);
1881 		}
1882 	}
1883 #endif /* __i386__ debugging only */
1884 
1885 	free_irq(np->pci_dev->irq, dev);
1886 
1887 	del_timer_sync(&np->timer);
1888 
1889 	/* Free all the skbuffs in the Rx queue. */
1890 	for (i = 0; i < RX_RING_SIZE; i++) {
1891 		np->rx_ring[i].status = 0;
1892 		skb = np->rx_skbuff[i];
1893 		if (skb) {
1894 			dma_unmap_single(&np->pci_dev->dev,
1895 				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1896 				np->rx_buf_sz, DMA_FROM_DEVICE);
1897 			dev_kfree_skb(skb);
1898 			np->rx_skbuff[i] = NULL;
1899 		}
1900 		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1901 	}
1902 	for (i = 0; i < TX_RING_SIZE; i++) {
1903 		np->tx_ring[i].next_desc = 0;
1904 		skb = np->tx_skbuff[i];
1905 		if (skb) {
1906 			dma_unmap_single(&np->pci_dev->dev,
1907 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1908 				skb->len, DMA_TO_DEVICE);
1909 			dev_kfree_skb(skb);
1910 			np->tx_skbuff[i] = NULL;
1911 		}
1912 	}
1913 
1914 	return 0;
1915 }
1916 
1917 static void sundance_remove1(struct pci_dev *pdev)
1918 {
1919 	struct net_device *dev = pci_get_drvdata(pdev);
1920 
1921 	if (dev) {
1922 	    struct netdev_private *np = netdev_priv(dev);
1923 	    unregister_netdev(dev);
1924 	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1925 		    np->rx_ring, np->rx_ring_dma);
1926 	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1927 		    np->tx_ring, np->tx_ring_dma);
1928 	    pci_iounmap(pdev, np->base);
1929 	    pci_release_regions(pdev);
1930 	    free_netdev(dev);
1931 	    pci_set_drvdata(pdev, NULL);
1932 	}
1933 }
1934 
1935 #ifdef CONFIG_PM
1936 
1937 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1938 {
1939 	struct net_device *dev = pci_get_drvdata(pci_dev);
1940 	struct netdev_private *np = netdev_priv(dev);
1941 	void __iomem *ioaddr = np->base;
1942 
1943 	if (!netif_running(dev))
1944 		return 0;
1945 
1946 	netdev_close(dev);
1947 	netif_device_detach(dev);
1948 
1949 	pci_save_state(pci_dev);
1950 	if (np->wol_enabled) {
1951 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1952 		iowrite16(RxEnable, ioaddr + MACCtrl1);
1953 	}
1954 	pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1955 			np->wol_enabled);
1956 	pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1957 
1958 	return 0;
1959 }
1960 
1961 static int sundance_resume(struct pci_dev *pci_dev)
1962 {
1963 	struct net_device *dev = pci_get_drvdata(pci_dev);
1964 	int err = 0;
1965 
1966 	if (!netif_running(dev))
1967 		return 0;
1968 
1969 	pci_set_power_state(pci_dev, PCI_D0);
1970 	pci_restore_state(pci_dev);
1971 	pci_enable_wake(pci_dev, PCI_D0, 0);
1972 
1973 	err = netdev_open(dev);
1974 	if (err) {
1975 		printk(KERN_ERR "%s: Can't resume interface!\n",
1976 				dev->name);
1977 		goto out;
1978 	}
1979 
1980 	netif_device_attach(dev);
1981 
1982 out:
1983 	return err;
1984 }
1985 
1986 #endif /* CONFIG_PM */
1987 
1988 static struct pci_driver sundance_driver = {
1989 	.name		= DRV_NAME,
1990 	.id_table	= sundance_pci_tbl,
1991 	.probe		= sundance_probe1,
1992 	.remove		= sundance_remove1,
1993 #ifdef CONFIG_PM
1994 	.suspend	= sundance_suspend,
1995 	.resume		= sundance_resume,
1996 #endif /* CONFIG_PM */
1997 };
1998 
1999 static int __init sundance_init(void)
2000 {
2001 /* when a module, this is printed whether or not devices are found in probe */
2002 #ifdef MODULE
2003 	printk(version);
2004 #endif
2005 	return pci_register_driver(&sundance_driver);
2006 }
2007 
2008 static void __exit sundance_exit(void)
2009 {
2010 	pci_unregister_driver(&sundance_driver);
2011 }
2012 
2013 module_init(sundance_init);
2014 module_exit(sundance_exit);
2015 
2016 
2017