1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 	Written 1999-2000 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	The author may be reached as becker@scyld.com, or C/O
13 	Scyld Computing Corporation
14 	410 Severn Ave., Suite 210
15 	Annapolis MD 21403
16 
17 	Support and updates available at
18 	http://www.scyld.com/network/sundance.html
19 	[link no longer provides useful info -jgarzik]
20 	Archives of the mailing list are still available at
21 	http://www.beowulf.org/pipermail/netdrivers/
22 
23 */
24 
25 #define DRV_NAME	"sundance"
26 #define DRV_VERSION	"1.2"
27 #define DRV_RELDATE	"11-Sep-2006"
28 
29 
30 /* The user-configurable values.
31    These may be modified when a driver module is loaded.*/
32 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34    Typical is a 64 element hash table based on the Ethernet CRC.  */
35 static const int multicast_filter_limit = 32;
36 
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38    Setting to > 1518 effectively disables this feature.
39    This chip can receive into offset buffers, so the Alpha does not
40    need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
43 
44 /* media[] specifies the media type the NIC operates at.
45 		 autosense	Autosensing active media.
46 		 10mbps_hd 	10Mbps half duplex.
47 		 10mbps_fd 	10Mbps full duplex.
48 		 100mbps_hd 	100Mbps half duplex.
49 		 100mbps_fd 	100Mbps full duplex.
50 		 0		Autosensing active media.
51 		 1	 	10Mbps half duplex.
52 		 2	 	10Mbps full duplex.
53 		 3	 	100Mbps half duplex.
54 		 4	 	100Mbps full duplex.
55 */
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
58 
59 
60 /* Operational parameters that are set at compile time. */
61 
62 /* Keep the ring sizes a power of two for compile efficiency.
63    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64    Making the Tx ring too large decreases the effectiveness of channel
65    bonding and packet priority, and more than 128 requires modifying the
66    Tx error recovery.
67    Large receive rings merely waste memory. */
68 #define TX_RING_SIZE	32
69 #define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
70 #define RX_RING_SIZE	64
71 #define RX_BUDGET	32
72 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
74 
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT  (4*HZ)
78 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
79 
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <linux/uaccess.h>
95 #include <asm/processor.h>		/* Processor type for cache alignment. */
96 #include <asm/io.h>
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
103 
104 /* These identify the driver base version and may not be removed. */
105 static const char version[] =
106 	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 	" Written by Donald Becker\n";
108 
109 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111 MODULE_LICENSE("GPL");
112 
113 module_param(debug, int, 0);
114 module_param(rx_copybreak, int, 0);
115 module_param_array(media, charp, NULL, 0);
116 module_param(flowctrl, int, 0);
117 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
120 
121 /*
122 				Theory of Operation
123 
124 I. Board Compatibility
125 
126 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
127 
128 II. Board-specific settings
129 
130 III. Driver operation
131 
132 IIIa. Ring buffers
133 
134 This driver uses two statically allocated fixed-size descriptor lists
135 formed into rings by a branch from the final descriptor to the beginning of
136 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
137 Some chips explicitly use only 2^N sized rings, while others use a
138 'next descriptor' pointer that the driver forms into rings.
139 
140 IIIb/c. Transmit/Receive Structure
141 
142 This driver uses a zero-copy receive and transmit scheme.
143 The driver allocates full frame size skbuffs for the Rx ring buffers at
144 open() time and passes the skb->data field to the chip as receive data
145 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
146 a fresh skbuff is allocated and the frame is copied to the new skbuff.
147 When the incoming frame is larger, the skbuff is passed directly up the
148 protocol stack.  Buffers consumed this way are replaced by newly allocated
149 skbuffs in a later phase of receives.
150 
151 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152 using a full-sized skbuff for small frames vs. the copying costs of larger
153 frames.  New boards are typically used in generously configured machines
154 and the underfilled buffers have negligible impact compared to the benefit of
155 a single allocation size, so the default value of zero results in never
156 copying packets.  When copying is done, the cost is usually mitigated by using
157 a combined copy/checksum routine.  Copying also preloads the cache, which is
158 most useful with small frames.
159 
160 A subtle aspect of the operation is that the IP header at offset 14 in an
161 ethernet frame isn't longword aligned for further processing.
162 Unaligned buffers are permitted by the Sundance hardware, so
163 frames are received into the skbuff at an offset of "+2", 16-byte aligning
164 the IP header.
165 
166 IIId. Synchronization
167 
168 The driver runs as two independent, single-threaded flows of control.  One
169 is the send-packet routine, which enforces single-threaded use by the
170 dev->tbusy flag.  The other thread is the interrupt handler, which is single
171 threaded by the hardware and interrupt handling software.
172 
173 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176 the 'lp->tx_full' flag.
177 
178 The interrupt handler has exclusive control over the Rx ring and records stats
179 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
180 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181 clears both the tx_full and tbusy flags.
182 
183 IV. Notes
184 
185 IVb. References
186 
187 The Sundance ST201 datasheet, preliminary version.
188 The Kendin KS8723 datasheet, preliminary version.
189 The ICplus IP100 datasheet, preliminary version.
190 http://www.scyld.com/expert/100mbps.html
191 http://www.scyld.com/expert/NWay.html
192 
193 IVc. Errata
194 
195 */
196 
197 /* Work-around for Kendin chip bugs. */
198 #ifndef CONFIG_SUNDANCE_MMIO
199 #define USE_IO_OPS 1
200 #endif
201 
202 static const struct pci_device_id sundance_pci_tbl[] = {
203 	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
210 	{ }
211 };
212 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
213 
214 enum {
215 	netdev_io_size = 128
216 };
217 
218 struct pci_id_info {
219         const char *name;
220 };
221 static const struct pci_id_info pci_id_tbl[] = {
222 	{"D-Link DFE-550TX FAST Ethernet Adapter"},
223 	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 	{"D-Link DFE-580TX 4 port Server Adapter"},
225 	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 	{"D-Link DL10050-based FAST Ethernet Adapter"},
227 	{"Sundance Technology Alta"},
228 	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
229 	{ }	/* terminate list. */
230 };
231 
232 /* This driver was written to use PCI memory space, however x86-oriented
233    hardware often uses I/O space accesses. */
234 
235 /* Offsets to the device registers.
236    Unlike software-only systems, device drivers interact with complex hardware.
237    It's not useful to define symbolic names for every register bit in the
238    device.  The name can only partially document the semantics and make
239    the driver longer and more difficult to read.
240    In general, only the important configuration values or bits changed
241    multiple times should be defined symbolically.
242 */
243 enum alta_offsets {
244 	DMACtrl = 0x00,
245 	TxListPtr = 0x04,
246 	TxDMABurstThresh = 0x08,
247 	TxDMAUrgentThresh = 0x09,
248 	TxDMAPollPeriod = 0x0a,
249 	RxDMAStatus = 0x0c,
250 	RxListPtr = 0x10,
251 	DebugCtrl0 = 0x1a,
252 	DebugCtrl1 = 0x1c,
253 	RxDMABurstThresh = 0x14,
254 	RxDMAUrgentThresh = 0x15,
255 	RxDMAPollPeriod = 0x16,
256 	LEDCtrl = 0x1a,
257 	ASICCtrl = 0x30,
258 	EEData = 0x34,
259 	EECtrl = 0x36,
260 	FlashAddr = 0x40,
261 	FlashData = 0x44,
262 	WakeEvent = 0x45,
263 	TxStatus = 0x46,
264 	TxFrameId = 0x47,
265 	DownCounter = 0x18,
266 	IntrClear = 0x4a,
267 	IntrEnable = 0x4c,
268 	IntrStatus = 0x4e,
269 	MACCtrl0 = 0x50,
270 	MACCtrl1 = 0x52,
271 	StationAddr = 0x54,
272 	MaxFrameSize = 0x5A,
273 	RxMode = 0x5c,
274 	MIICtrl = 0x5e,
275 	MulticastFilter0 = 0x60,
276 	MulticastFilter1 = 0x64,
277 	RxOctetsLow = 0x68,
278 	RxOctetsHigh = 0x6a,
279 	TxOctetsLow = 0x6c,
280 	TxOctetsHigh = 0x6e,
281 	TxFramesOK = 0x70,
282 	RxFramesOK = 0x72,
283 	StatsCarrierError = 0x74,
284 	StatsLateColl = 0x75,
285 	StatsMultiColl = 0x76,
286 	StatsOneColl = 0x77,
287 	StatsTxDefer = 0x78,
288 	RxMissed = 0x79,
289 	StatsTxXSDefer = 0x7a,
290 	StatsTxAbort = 0x7b,
291 	StatsBcastTx = 0x7c,
292 	StatsBcastRx = 0x7d,
293 	StatsMcastTx = 0x7e,
294 	StatsMcastRx = 0x7f,
295 	/* Aliased and bogus values! */
296 	RxStatus = 0x0c,
297 };
298 
299 #define ASIC_HI_WORD(x)	((x) + 2)
300 
301 enum ASICCtrl_HiWord_bit {
302 	GlobalReset = 0x0001,
303 	RxReset = 0x0002,
304 	TxReset = 0x0004,
305 	DMAReset = 0x0008,
306 	FIFOReset = 0x0010,
307 	NetworkReset = 0x0020,
308 	HostReset = 0x0040,
309 	ResetBusy = 0x0400,
310 };
311 
312 /* Bits in the interrupt status/mask registers. */
313 enum intr_status_bits {
314 	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
315 	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
316 	IntrDrvRqst=0x0040,
317 	StatsMax=0x0080, LinkChange=0x0100,
318 	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
319 };
320 
321 /* Bits in the RxMode register. */
322 enum rx_mode_bits {
323 	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
324 	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
325 };
326 /* Bits in MACCtrl. */
327 enum mac_ctrl0_bits {
328 	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
329 	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
330 };
331 enum mac_ctrl1_bits {
332 	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
333 	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
334 	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
335 };
336 
337 /* Bits in WakeEvent register. */
338 enum wake_event_bits {
339 	WakePktEnable = 0x01,
340 	MagicPktEnable = 0x02,
341 	LinkEventEnable = 0x04,
342 	WolEnable = 0x80,
343 };
344 
345 /* The Rx and Tx buffer descriptors. */
346 /* Note that using only 32 bit fields simplifies conversion to big-endian
347    architectures. */
348 struct netdev_desc {
349 	__le32 next_desc;
350 	__le32 status;
351 	struct desc_frag { __le32 addr, length; } frag[1];
352 };
353 
354 /* Bits in netdev_desc.status */
355 enum desc_status_bits {
356 	DescOwn=0x8000,
357 	DescEndPacket=0x4000,
358 	DescEndRing=0x2000,
359 	LastFrag=0x80000000,
360 	DescIntrOnTx=0x8000,
361 	DescIntrOnDMADone=0x80000000,
362 	DisableAlign = 0x00000001,
363 };
364 
365 #define PRIV_ALIGN	15 	/* Required alignment mask */
366 /* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
367    within the structure. */
368 #define MII_CNT		4
369 struct netdev_private {
370 	/* Descriptor rings first for alignment. */
371 	struct netdev_desc *rx_ring;
372 	struct netdev_desc *tx_ring;
373 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
374 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
375         dma_addr_t tx_ring_dma;
376         dma_addr_t rx_ring_dma;
377 	struct timer_list timer;		/* Media monitoring timer. */
378 	/* ethtool extra stats */
379 	struct {
380 		u64 tx_multiple_collisions;
381 		u64 tx_single_collisions;
382 		u64 tx_late_collisions;
383 		u64 tx_deferred;
384 		u64 tx_deferred_excessive;
385 		u64 tx_aborted;
386 		u64 tx_bcasts;
387 		u64 rx_bcasts;
388 		u64 tx_mcasts;
389 		u64 rx_mcasts;
390 	} xstats;
391 	/* Frequently used values: keep some adjacent for cache effect. */
392 	spinlock_t lock;
393 	int msg_enable;
394 	int chip_id;
395 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
396 	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
397 	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
398 	unsigned int cur_tx, dirty_tx;
399 	/* These values are keep track of the transceiver/media in use. */
400 	unsigned int flowctrl:1;
401 	unsigned int default_port:4;		/* Last dev->if_port value. */
402 	unsigned int an_enable:1;
403 	unsigned int speed;
404 	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
405 	struct tasklet_struct rx_tasklet;
406 	struct tasklet_struct tx_tasklet;
407 	int budget;
408 	int cur_task;
409 	/* Multicast and receive mode. */
410 	spinlock_t mcastlock;			/* SMP lock multicast updates. */
411 	u16 mcast_filter[4];
412 	/* MII transceiver section. */
413 	struct mii_if_info mii_if;
414 	int mii_preamble_required;
415 	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
416 	struct pci_dev *pci_dev;
417 	void __iomem *base;
418 	spinlock_t statlock;
419 };
420 
421 /* The station address location in the EEPROM. */
422 #define EEPROM_SA_OFFSET	0x10
423 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
424 			IntrDrvRqst | IntrTxDone | StatsMax | \
425 			LinkChange)
426 
427 static int  change_mtu(struct net_device *dev, int new_mtu);
428 static int  eeprom_read(void __iomem *ioaddr, int location);
429 static int  mdio_read(struct net_device *dev, int phy_id, int location);
430 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
431 static int  mdio_wait_link(struct net_device *dev, int wait);
432 static int  netdev_open(struct net_device *dev);
433 static void check_duplex(struct net_device *dev);
434 static void netdev_timer(unsigned long data);
435 static void tx_timeout(struct net_device *dev);
436 static void init_ring(struct net_device *dev);
437 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
438 static int reset_tx (struct net_device *dev);
439 static irqreturn_t intr_handler(int irq, void *dev_instance);
440 static void rx_poll(unsigned long data);
441 static void tx_poll(unsigned long data);
442 static void refill_rx (struct net_device *dev);
443 static void netdev_error(struct net_device *dev, int intr_status);
444 static void netdev_error(struct net_device *dev, int intr_status);
445 static void set_rx_mode(struct net_device *dev);
446 static int __set_mac_addr(struct net_device *dev);
447 static int sundance_set_mac_addr(struct net_device *dev, void *data);
448 static struct net_device_stats *get_stats(struct net_device *dev);
449 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
450 static int  netdev_close(struct net_device *dev);
451 static const struct ethtool_ops ethtool_ops;
452 
453 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
454 {
455 	struct netdev_private *np = netdev_priv(dev);
456 	void __iomem *ioaddr = np->base + ASICCtrl;
457 	int countdown;
458 
459 	/* ST201 documentation states ASICCtrl is a 32bit register */
460 	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
461 	/* ST201 documentation states reset can take up to 1 ms */
462 	countdown = 10 + 1;
463 	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
464 		if (--countdown == 0) {
465 			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
466 			break;
467 		}
468 		udelay(100);
469 	}
470 }
471 
472 #ifdef CONFIG_NET_POLL_CONTROLLER
473 static void sundance_poll_controller(struct net_device *dev)
474 {
475 	struct netdev_private *np = netdev_priv(dev);
476 
477 	disable_irq(np->pci_dev->irq);
478 	intr_handler(np->pci_dev->irq, dev);
479 	enable_irq(np->pci_dev->irq);
480 }
481 #endif
482 
483 static const struct net_device_ops netdev_ops = {
484 	.ndo_open		= netdev_open,
485 	.ndo_stop		= netdev_close,
486 	.ndo_start_xmit		= start_tx,
487 	.ndo_get_stats 		= get_stats,
488 	.ndo_set_rx_mode	= set_rx_mode,
489 	.ndo_do_ioctl 		= netdev_ioctl,
490 	.ndo_tx_timeout		= tx_timeout,
491 	.ndo_change_mtu		= change_mtu,
492 	.ndo_set_mac_address 	= sundance_set_mac_addr,
493 	.ndo_validate_addr	= eth_validate_addr,
494 #ifdef CONFIG_NET_POLL_CONTROLLER
495 	.ndo_poll_controller 	= sundance_poll_controller,
496 #endif
497 };
498 
499 static int sundance_probe1(struct pci_dev *pdev,
500 			   const struct pci_device_id *ent)
501 {
502 	struct net_device *dev;
503 	struct netdev_private *np;
504 	static int card_idx;
505 	int chip_idx = ent->driver_data;
506 	int irq;
507 	int i;
508 	void __iomem *ioaddr;
509 	u16 mii_ctl;
510 	void *ring_space;
511 	dma_addr_t ring_dma;
512 #ifdef USE_IO_OPS
513 	int bar = 0;
514 #else
515 	int bar = 1;
516 #endif
517 	int phy, phy_end, phy_idx = 0;
518 
519 /* when built into the kernel, we only print version if device is found */
520 #ifndef MODULE
521 	static int printed_version;
522 	if (!printed_version++)
523 		printk(version);
524 #endif
525 
526 	if (pci_enable_device(pdev))
527 		return -EIO;
528 	pci_set_master(pdev);
529 
530 	irq = pdev->irq;
531 
532 	dev = alloc_etherdev(sizeof(*np));
533 	if (!dev)
534 		return -ENOMEM;
535 	SET_NETDEV_DEV(dev, &pdev->dev);
536 
537 	if (pci_request_regions(pdev, DRV_NAME))
538 		goto err_out_netdev;
539 
540 	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
541 	if (!ioaddr)
542 		goto err_out_res;
543 
544 	for (i = 0; i < 3; i++)
545 		((__le16 *)dev->dev_addr)[i] =
546 			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
547 
548 	np = netdev_priv(dev);
549 	np->base = ioaddr;
550 	np->pci_dev = pdev;
551 	np->chip_id = chip_idx;
552 	np->msg_enable = (1 << debug) - 1;
553 	spin_lock_init(&np->lock);
554 	spin_lock_init(&np->statlock);
555 	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
556 	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
557 
558 	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
559 			&ring_dma, GFP_KERNEL);
560 	if (!ring_space)
561 		goto err_out_cleardev;
562 	np->tx_ring = (struct netdev_desc *)ring_space;
563 	np->tx_ring_dma = ring_dma;
564 
565 	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
566 			&ring_dma, GFP_KERNEL);
567 	if (!ring_space)
568 		goto err_out_unmap_tx;
569 	np->rx_ring = (struct netdev_desc *)ring_space;
570 	np->rx_ring_dma = ring_dma;
571 
572 	np->mii_if.dev = dev;
573 	np->mii_if.mdio_read = mdio_read;
574 	np->mii_if.mdio_write = mdio_write;
575 	np->mii_if.phy_id_mask = 0x1f;
576 	np->mii_if.reg_num_mask = 0x1f;
577 
578 	/* The chip-specific entries in the device structure. */
579 	dev->netdev_ops = &netdev_ops;
580 	dev->ethtool_ops = &ethtool_ops;
581 	dev->watchdog_timeo = TX_TIMEOUT;
582 
583 	/* MTU range: 68 - 8191 */
584 	dev->min_mtu = ETH_MIN_MTU;
585 	dev->max_mtu = 8191;
586 
587 	pci_set_drvdata(pdev, dev);
588 
589 	i = register_netdev(dev);
590 	if (i)
591 		goto err_out_unmap_rx;
592 
593 	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
594 	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
595 	       dev->dev_addr, irq);
596 
597 	np->phys[0] = 1;		/* Default setting */
598 	np->mii_preamble_required++;
599 
600 	/*
601 	 * It seems some phys doesn't deal well with address 0 being accessed
602 	 * first
603 	 */
604 	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
605 		phy = 0;
606 		phy_end = 31;
607 	} else {
608 		phy = 1;
609 		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
610 	}
611 	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
612 		int phyx = phy & 0x1f;
613 		int mii_status = mdio_read(dev, phyx, MII_BMSR);
614 		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
615 			np->phys[phy_idx++] = phyx;
616 			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
617 			if ((mii_status & 0x0040) == 0)
618 				np->mii_preamble_required++;
619 			printk(KERN_INFO "%s: MII PHY found at address %d, status "
620 				   "0x%4.4x advertising %4.4x.\n",
621 				   dev->name, phyx, mii_status, np->mii_if.advertising);
622 		}
623 	}
624 	np->mii_preamble_required--;
625 
626 	if (phy_idx == 0) {
627 		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
628 			   dev->name, ioread32(ioaddr + ASICCtrl));
629 		goto err_out_unregister;
630 	}
631 
632 	np->mii_if.phy_id = np->phys[0];
633 
634 	/* Parse override configuration */
635 	np->an_enable = 1;
636 	if (card_idx < MAX_UNITS) {
637 		if (media[card_idx] != NULL) {
638 			np->an_enable = 0;
639 			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
640 			    strcmp (media[card_idx], "4") == 0) {
641 				np->speed = 100;
642 				np->mii_if.full_duplex = 1;
643 			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
644 				   strcmp (media[card_idx], "3") == 0) {
645 				np->speed = 100;
646 				np->mii_if.full_duplex = 0;
647 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
648 				   strcmp (media[card_idx], "2") == 0) {
649 				np->speed = 10;
650 				np->mii_if.full_duplex = 1;
651 			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
652 				   strcmp (media[card_idx], "1") == 0) {
653 				np->speed = 10;
654 				np->mii_if.full_duplex = 0;
655 			} else {
656 				np->an_enable = 1;
657 			}
658 		}
659 		if (flowctrl == 1)
660 			np->flowctrl = 1;
661 	}
662 
663 	/* Fibre PHY? */
664 	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
665 		/* Default 100Mbps Full */
666 		if (np->an_enable) {
667 			np->speed = 100;
668 			np->mii_if.full_duplex = 1;
669 			np->an_enable = 0;
670 		}
671 	}
672 	/* Reset PHY */
673 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
674 	mdelay (300);
675 	/* If flow control enabled, we need to advertise it.*/
676 	if (np->flowctrl)
677 		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
678 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
679 	/* Force media type */
680 	if (!np->an_enable) {
681 		mii_ctl = 0;
682 		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
683 		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
684 		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
685 		printk (KERN_INFO "Override speed=%d, %s duplex\n",
686 			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
687 
688 	}
689 
690 	/* Perhaps move the reset here? */
691 	/* Reset the chip to erase previous misconfiguration. */
692 	if (netif_msg_hw(np))
693 		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
694 	sundance_reset(dev, 0x00ff << 16);
695 	if (netif_msg_hw(np))
696 		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
697 
698 	card_idx++;
699 	return 0;
700 
701 err_out_unregister:
702 	unregister_netdev(dev);
703 err_out_unmap_rx:
704 	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
705 		np->rx_ring, np->rx_ring_dma);
706 err_out_unmap_tx:
707 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
708 		np->tx_ring, np->tx_ring_dma);
709 err_out_cleardev:
710 	pci_iounmap(pdev, ioaddr);
711 err_out_res:
712 	pci_release_regions(pdev);
713 err_out_netdev:
714 	free_netdev (dev);
715 	return -ENODEV;
716 }
717 
718 static int change_mtu(struct net_device *dev, int new_mtu)
719 {
720 	if (netif_running(dev))
721 		return -EBUSY;
722 	dev->mtu = new_mtu;
723 	return 0;
724 }
725 
726 #define eeprom_delay(ee_addr)	ioread32(ee_addr)
727 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
728 static int eeprom_read(void __iomem *ioaddr, int location)
729 {
730 	int boguscnt = 10000;		/* Typical 1900 ticks. */
731 	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
732 	do {
733 		eeprom_delay(ioaddr + EECtrl);
734 		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
735 			return ioread16(ioaddr + EEData);
736 		}
737 	} while (--boguscnt > 0);
738 	return 0;
739 }
740 
741 /*  MII transceiver control section.
742 	Read and write the MII registers using software-generated serial
743 	MDIO protocol.  See the MII specifications or DP83840A data sheet
744 	for details.
745 
746 	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
747 	met by back-to-back 33Mhz PCI cycles. */
748 #define mdio_delay() ioread8(mdio_addr)
749 
750 enum mii_reg_bits {
751 	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
752 };
753 #define MDIO_EnbIn  (0)
754 #define MDIO_WRITE0 (MDIO_EnbOutput)
755 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
756 
757 /* Generate the preamble required for initial synchronization and
758    a few older transceivers. */
759 static void mdio_sync(void __iomem *mdio_addr)
760 {
761 	int bits = 32;
762 
763 	/* Establish sync by sending at least 32 logic ones. */
764 	while (--bits >= 0) {
765 		iowrite8(MDIO_WRITE1, mdio_addr);
766 		mdio_delay();
767 		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
768 		mdio_delay();
769 	}
770 }
771 
772 static int mdio_read(struct net_device *dev, int phy_id, int location)
773 {
774 	struct netdev_private *np = netdev_priv(dev);
775 	void __iomem *mdio_addr = np->base + MIICtrl;
776 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
777 	int i, retval = 0;
778 
779 	if (np->mii_preamble_required)
780 		mdio_sync(mdio_addr);
781 
782 	/* Shift the read command bits out. */
783 	for (i = 15; i >= 0; i--) {
784 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
785 
786 		iowrite8(dataval, mdio_addr);
787 		mdio_delay();
788 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
789 		mdio_delay();
790 	}
791 	/* Read the two transition, 16 data, and wire-idle bits. */
792 	for (i = 19; i > 0; i--) {
793 		iowrite8(MDIO_EnbIn, mdio_addr);
794 		mdio_delay();
795 		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
796 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
797 		mdio_delay();
798 	}
799 	return (retval>>1) & 0xffff;
800 }
801 
802 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
803 {
804 	struct netdev_private *np = netdev_priv(dev);
805 	void __iomem *mdio_addr = np->base + MIICtrl;
806 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
807 	int i;
808 
809 	if (np->mii_preamble_required)
810 		mdio_sync(mdio_addr);
811 
812 	/* Shift the command bits out. */
813 	for (i = 31; i >= 0; i--) {
814 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
815 
816 		iowrite8(dataval, mdio_addr);
817 		mdio_delay();
818 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
819 		mdio_delay();
820 	}
821 	/* Clear out extra bits. */
822 	for (i = 2; i > 0; i--) {
823 		iowrite8(MDIO_EnbIn, mdio_addr);
824 		mdio_delay();
825 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
826 		mdio_delay();
827 	}
828 }
829 
830 static int mdio_wait_link(struct net_device *dev, int wait)
831 {
832 	int bmsr;
833 	int phy_id;
834 	struct netdev_private *np;
835 
836 	np = netdev_priv(dev);
837 	phy_id = np->phys[0];
838 
839 	do {
840 		bmsr = mdio_read(dev, phy_id, MII_BMSR);
841 		if (bmsr & 0x0004)
842 			return 0;
843 		mdelay(1);
844 	} while (--wait > 0);
845 	return -1;
846 }
847 
848 static int netdev_open(struct net_device *dev)
849 {
850 	struct netdev_private *np = netdev_priv(dev);
851 	void __iomem *ioaddr = np->base;
852 	const int irq = np->pci_dev->irq;
853 	unsigned long flags;
854 	int i;
855 
856 	sundance_reset(dev, 0x00ff << 16);
857 
858 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
859 	if (i)
860 		return i;
861 
862 	if (netif_msg_ifup(np))
863 		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
864 
865 	init_ring(dev);
866 
867 	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
868 	/* The Tx list pointer is written as packets are queued. */
869 
870 	/* Initialize other registers. */
871 	__set_mac_addr(dev);
872 #if IS_ENABLED(CONFIG_VLAN_8021Q)
873 	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
874 #else
875 	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
876 #endif
877 	if (dev->mtu > 2047)
878 		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
879 
880 	/* Configure the PCI bus bursts and FIFO thresholds. */
881 
882 	if (dev->if_port == 0)
883 		dev->if_port = np->default_port;
884 
885 	spin_lock_init(&np->mcastlock);
886 
887 	set_rx_mode(dev);
888 	iowrite16(0, ioaddr + IntrEnable);
889 	iowrite16(0, ioaddr + DownCounter);
890 	/* Set the chip to poll every N*320nsec. */
891 	iowrite8(100, ioaddr + RxDMAPollPeriod);
892 	iowrite8(127, ioaddr + TxDMAPollPeriod);
893 	/* Fix DFE-580TX packet drop issue */
894 	if (np->pci_dev->revision >= 0x14)
895 		iowrite8(0x01, ioaddr + DebugCtrl1);
896 	netif_start_queue(dev);
897 
898 	spin_lock_irqsave(&np->lock, flags);
899 	reset_tx(dev);
900 	spin_unlock_irqrestore(&np->lock, flags);
901 
902 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
903 
904 	/* Disable Wol */
905 	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
906 	np->wol_enabled = 0;
907 
908 	if (netif_msg_ifup(np))
909 		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
910 			   "MAC Control %x, %4.4x %4.4x.\n",
911 			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
912 			   ioread32(ioaddr + MACCtrl0),
913 			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
914 
915 	/* Set the timer to check for link beat. */
916 	init_timer(&np->timer);
917 	np->timer.expires = jiffies + 3*HZ;
918 	np->timer.data = (unsigned long)dev;
919 	np->timer.function = netdev_timer;				/* timer handler */
920 	add_timer(&np->timer);
921 
922 	/* Enable interrupts by setting the interrupt mask. */
923 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
924 
925 	return 0;
926 }
927 
928 static void check_duplex(struct net_device *dev)
929 {
930 	struct netdev_private *np = netdev_priv(dev);
931 	void __iomem *ioaddr = np->base;
932 	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
933 	int negotiated = mii_lpa & np->mii_if.advertising;
934 	int duplex;
935 
936 	/* Force media */
937 	if (!np->an_enable || mii_lpa == 0xffff) {
938 		if (np->mii_if.full_duplex)
939 			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
940 				ioaddr + MACCtrl0);
941 		return;
942 	}
943 
944 	/* Autonegotiation */
945 	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
946 	if (np->mii_if.full_duplex != duplex) {
947 		np->mii_if.full_duplex = duplex;
948 		if (netif_msg_link(np))
949 			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
950 				   "negotiated capability %4.4x.\n", dev->name,
951 				   duplex ? "full" : "half", np->phys[0], negotiated);
952 		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
953 	}
954 }
955 
956 static void netdev_timer(unsigned long data)
957 {
958 	struct net_device *dev = (struct net_device *)data;
959 	struct netdev_private *np = netdev_priv(dev);
960 	void __iomem *ioaddr = np->base;
961 	int next_tick = 10*HZ;
962 
963 	if (netif_msg_timer(np)) {
964 		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
965 			   "Tx %x Rx %x.\n",
966 			   dev->name, ioread16(ioaddr + IntrEnable),
967 			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
968 	}
969 	check_duplex(dev);
970 	np->timer.expires = jiffies + next_tick;
971 	add_timer(&np->timer);
972 }
973 
974 static void tx_timeout(struct net_device *dev)
975 {
976 	struct netdev_private *np = netdev_priv(dev);
977 	void __iomem *ioaddr = np->base;
978 	unsigned long flag;
979 
980 	netif_stop_queue(dev);
981 	tasklet_disable(&np->tx_tasklet);
982 	iowrite16(0, ioaddr + IntrEnable);
983 	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
984 		   "TxFrameId %2.2x,"
985 		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
986 		   ioread8(ioaddr + TxFrameId));
987 
988 	{
989 		int i;
990 		for (i=0; i<TX_RING_SIZE; i++) {
991 			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
992 				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
993 				le32_to_cpu(np->tx_ring[i].next_desc),
994 				le32_to_cpu(np->tx_ring[i].status),
995 				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
996 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
997 				le32_to_cpu(np->tx_ring[i].frag[0].length));
998 		}
999 		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1000 			ioread32(np->base + TxListPtr),
1001 			netif_queue_stopped(dev));
1002 		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1003 			np->cur_tx, np->cur_tx % TX_RING_SIZE,
1004 			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1005 		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1006 		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1007 	}
1008 	spin_lock_irqsave(&np->lock, flag);
1009 
1010 	/* Stop and restart the chip's Tx processes . */
1011 	reset_tx(dev);
1012 	spin_unlock_irqrestore(&np->lock, flag);
1013 
1014 	dev->if_port = 0;
1015 
1016 	netif_trans_update(dev); /* prevent tx timeout */
1017 	dev->stats.tx_errors++;
1018 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1019 		netif_wake_queue(dev);
1020 	}
1021 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1022 	tasklet_enable(&np->tx_tasklet);
1023 }
1024 
1025 
1026 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1027 static void init_ring(struct net_device *dev)
1028 {
1029 	struct netdev_private *np = netdev_priv(dev);
1030 	int i;
1031 
1032 	np->cur_rx = np->cur_tx = 0;
1033 	np->dirty_rx = np->dirty_tx = 0;
1034 	np->cur_task = 0;
1035 
1036 	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1037 
1038 	/* Initialize all Rx descriptors. */
1039 	for (i = 0; i < RX_RING_SIZE; i++) {
1040 		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1041 			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1042 		np->rx_ring[i].status = 0;
1043 		np->rx_ring[i].frag[0].length = 0;
1044 		np->rx_skbuff[i] = NULL;
1045 	}
1046 
1047 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1048 	for (i = 0; i < RX_RING_SIZE; i++) {
1049 		struct sk_buff *skb =
1050 			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1051 		np->rx_skbuff[i] = skb;
1052 		if (skb == NULL)
1053 			break;
1054 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1055 		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1056 			dma_map_single(&np->pci_dev->dev, skb->data,
1057 				np->rx_buf_sz, DMA_FROM_DEVICE));
1058 		if (dma_mapping_error(&np->pci_dev->dev,
1059 					np->rx_ring[i].frag[0].addr)) {
1060 			dev_kfree_skb(skb);
1061 			np->rx_skbuff[i] = NULL;
1062 			break;
1063 		}
1064 		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1065 	}
1066 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1067 
1068 	for (i = 0; i < TX_RING_SIZE; i++) {
1069 		np->tx_skbuff[i] = NULL;
1070 		np->tx_ring[i].status = 0;
1071 	}
1072 }
1073 
1074 static void tx_poll (unsigned long data)
1075 {
1076 	struct net_device *dev = (struct net_device *)data;
1077 	struct netdev_private *np = netdev_priv(dev);
1078 	unsigned head = np->cur_task % TX_RING_SIZE;
1079 	struct netdev_desc *txdesc =
1080 		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1081 
1082 	/* Chain the next pointer */
1083 	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1084 		int entry = np->cur_task % TX_RING_SIZE;
1085 		txdesc = &np->tx_ring[entry];
1086 		if (np->last_tx) {
1087 			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1088 				entry*sizeof(struct netdev_desc));
1089 		}
1090 		np->last_tx = txdesc;
1091 	}
1092 	/* Indicate the latest descriptor of tx ring */
1093 	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1094 
1095 	if (ioread32 (np->base + TxListPtr) == 0)
1096 		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1097 			np->base + TxListPtr);
1098 }
1099 
1100 static netdev_tx_t
1101 start_tx (struct sk_buff *skb, struct net_device *dev)
1102 {
1103 	struct netdev_private *np = netdev_priv(dev);
1104 	struct netdev_desc *txdesc;
1105 	unsigned entry;
1106 
1107 	/* Calculate the next Tx descriptor entry. */
1108 	entry = np->cur_tx % TX_RING_SIZE;
1109 	np->tx_skbuff[entry] = skb;
1110 	txdesc = &np->tx_ring[entry];
1111 
1112 	txdesc->next_desc = 0;
1113 	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1114 	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1115 				skb->data, skb->len, DMA_TO_DEVICE));
1116 	if (dma_mapping_error(&np->pci_dev->dev,
1117 				txdesc->frag[0].addr))
1118 			goto drop_frame;
1119 	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1120 
1121 	/* Increment cur_tx before tasklet_schedule() */
1122 	np->cur_tx++;
1123 	mb();
1124 	/* Schedule a tx_poll() task */
1125 	tasklet_schedule(&np->tx_tasklet);
1126 
1127 	/* On some architectures: explicitly flush cache lines here. */
1128 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1129 	    !netif_queue_stopped(dev)) {
1130 		/* do nothing */
1131 	} else {
1132 		netif_stop_queue (dev);
1133 	}
1134 	if (netif_msg_tx_queued(np)) {
1135 		printk (KERN_DEBUG
1136 			"%s: Transmit frame #%d queued in slot %d.\n",
1137 			dev->name, np->cur_tx, entry);
1138 	}
1139 	return NETDEV_TX_OK;
1140 
1141 drop_frame:
1142 	dev_kfree_skb_any(skb);
1143 	np->tx_skbuff[entry] = NULL;
1144 	dev->stats.tx_dropped++;
1145 	return NETDEV_TX_OK;
1146 }
1147 
1148 /* Reset hardware tx and free all of tx buffers */
1149 static int
1150 reset_tx (struct net_device *dev)
1151 {
1152 	struct netdev_private *np = netdev_priv(dev);
1153 	void __iomem *ioaddr = np->base;
1154 	struct sk_buff *skb;
1155 	int i;
1156 
1157 	/* Reset tx logic, TxListPtr will be cleaned */
1158 	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1159 	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1160 
1161 	/* free all tx skbuff */
1162 	for (i = 0; i < TX_RING_SIZE; i++) {
1163 		np->tx_ring[i].next_desc = 0;
1164 
1165 		skb = np->tx_skbuff[i];
1166 		if (skb) {
1167 			dma_unmap_single(&np->pci_dev->dev,
1168 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1169 				skb->len, DMA_TO_DEVICE);
1170 			dev_kfree_skb_any(skb);
1171 			np->tx_skbuff[i] = NULL;
1172 			dev->stats.tx_dropped++;
1173 		}
1174 	}
1175 	np->cur_tx = np->dirty_tx = 0;
1176 	np->cur_task = 0;
1177 
1178 	np->last_tx = NULL;
1179 	iowrite8(127, ioaddr + TxDMAPollPeriod);
1180 
1181 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1182 	return 0;
1183 }
1184 
1185 /* The interrupt handler cleans up after the Tx thread,
1186    and schedule a Rx thread work */
1187 static irqreturn_t intr_handler(int irq, void *dev_instance)
1188 {
1189 	struct net_device *dev = (struct net_device *)dev_instance;
1190 	struct netdev_private *np = netdev_priv(dev);
1191 	void __iomem *ioaddr = np->base;
1192 	int hw_frame_id;
1193 	int tx_cnt;
1194 	int tx_status;
1195 	int handled = 0;
1196 	int i;
1197 
1198 
1199 	do {
1200 		int intr_status = ioread16(ioaddr + IntrStatus);
1201 		iowrite16(intr_status, ioaddr + IntrStatus);
1202 
1203 		if (netif_msg_intr(np))
1204 			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1205 				   dev->name, intr_status);
1206 
1207 		if (!(intr_status & DEFAULT_INTR))
1208 			break;
1209 
1210 		handled = 1;
1211 
1212 		if (intr_status & (IntrRxDMADone)) {
1213 			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1214 					ioaddr + IntrEnable);
1215 			if (np->budget < 0)
1216 				np->budget = RX_BUDGET;
1217 			tasklet_schedule(&np->rx_tasklet);
1218 		}
1219 		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1220 			tx_status = ioread16 (ioaddr + TxStatus);
1221 			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1222 				if (netif_msg_tx_done(np))
1223 					printk
1224 					    ("%s: Transmit status is %2.2x.\n",
1225 				     	dev->name, tx_status);
1226 				if (tx_status & 0x1e) {
1227 					if (netif_msg_tx_err(np))
1228 						printk("%s: Transmit error status %4.4x.\n",
1229 							   dev->name, tx_status);
1230 					dev->stats.tx_errors++;
1231 					if (tx_status & 0x10)
1232 						dev->stats.tx_fifo_errors++;
1233 					if (tx_status & 0x08)
1234 						dev->stats.collisions++;
1235 					if (tx_status & 0x04)
1236 						dev->stats.tx_fifo_errors++;
1237 					if (tx_status & 0x02)
1238 						dev->stats.tx_window_errors++;
1239 
1240 					/*
1241 					** This reset has been verified on
1242 					** DFE-580TX boards ! phdm@macqel.be.
1243 					*/
1244 					if (tx_status & 0x10) {	/* TxUnderrun */
1245 						/* Restart Tx FIFO and transmitter */
1246 						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1247 						/* No need to reset the Tx pointer here */
1248 					}
1249 					/* Restart the Tx. Need to make sure tx enabled */
1250 					i = 10;
1251 					do {
1252 						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1253 						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1254 							break;
1255 						mdelay(1);
1256 					} while (--i);
1257 				}
1258 				/* Yup, this is a documentation bug.  It cost me *hours*. */
1259 				iowrite16 (0, ioaddr + TxStatus);
1260 				if (tx_cnt < 0) {
1261 					iowrite32(5000, ioaddr + DownCounter);
1262 					break;
1263 				}
1264 				tx_status = ioread16 (ioaddr + TxStatus);
1265 			}
1266 			hw_frame_id = (tx_status >> 8) & 0xff;
1267 		} else 	{
1268 			hw_frame_id = ioread8(ioaddr + TxFrameId);
1269 		}
1270 
1271 		if (np->pci_dev->revision >= 0x14) {
1272 			spin_lock(&np->lock);
1273 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1274 				int entry = np->dirty_tx % TX_RING_SIZE;
1275 				struct sk_buff *skb;
1276 				int sw_frame_id;
1277 				sw_frame_id = (le32_to_cpu(
1278 					np->tx_ring[entry].status) >> 2) & 0xff;
1279 				if (sw_frame_id == hw_frame_id &&
1280 					!(le32_to_cpu(np->tx_ring[entry].status)
1281 					& 0x00010000))
1282 						break;
1283 				if (sw_frame_id == (hw_frame_id + 1) %
1284 					TX_RING_SIZE)
1285 						break;
1286 				skb = np->tx_skbuff[entry];
1287 				/* Free the original skb. */
1288 				dma_unmap_single(&np->pci_dev->dev,
1289 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1290 					skb->len, DMA_TO_DEVICE);
1291 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1292 				np->tx_skbuff[entry] = NULL;
1293 				np->tx_ring[entry].frag[0].addr = 0;
1294 				np->tx_ring[entry].frag[0].length = 0;
1295 			}
1296 			spin_unlock(&np->lock);
1297 		} else {
1298 			spin_lock(&np->lock);
1299 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1300 				int entry = np->dirty_tx % TX_RING_SIZE;
1301 				struct sk_buff *skb;
1302 				if (!(le32_to_cpu(np->tx_ring[entry].status)
1303 							& 0x00010000))
1304 					break;
1305 				skb = np->tx_skbuff[entry];
1306 				/* Free the original skb. */
1307 				dma_unmap_single(&np->pci_dev->dev,
1308 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1309 					skb->len, DMA_TO_DEVICE);
1310 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1311 				np->tx_skbuff[entry] = NULL;
1312 				np->tx_ring[entry].frag[0].addr = 0;
1313 				np->tx_ring[entry].frag[0].length = 0;
1314 			}
1315 			spin_unlock(&np->lock);
1316 		}
1317 
1318 		if (netif_queue_stopped(dev) &&
1319 			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1320 			/* The ring is no longer full, clear busy flag. */
1321 			netif_wake_queue (dev);
1322 		}
1323 		/* Abnormal error summary/uncommon events handlers. */
1324 		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1325 			netdev_error(dev, intr_status);
1326 	} while (0);
1327 	if (netif_msg_intr(np))
1328 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1329 			   dev->name, ioread16(ioaddr + IntrStatus));
1330 	return IRQ_RETVAL(handled);
1331 }
1332 
1333 static void rx_poll(unsigned long data)
1334 {
1335 	struct net_device *dev = (struct net_device *)data;
1336 	struct netdev_private *np = netdev_priv(dev);
1337 	int entry = np->cur_rx % RX_RING_SIZE;
1338 	int boguscnt = np->budget;
1339 	void __iomem *ioaddr = np->base;
1340 	int received = 0;
1341 
1342 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1343 	while (1) {
1344 		struct netdev_desc *desc = &(np->rx_ring[entry]);
1345 		u32 frame_status = le32_to_cpu(desc->status);
1346 		int pkt_len;
1347 
1348 		if (--boguscnt < 0) {
1349 			goto not_done;
1350 		}
1351 		if (!(frame_status & DescOwn))
1352 			break;
1353 		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1354 		if (netif_msg_rx_status(np))
1355 			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1356 				   frame_status);
1357 		if (frame_status & 0x001f4000) {
1358 			/* There was a error. */
1359 			if (netif_msg_rx_err(np))
1360 				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1361 					   frame_status);
1362 			dev->stats.rx_errors++;
1363 			if (frame_status & 0x00100000)
1364 				dev->stats.rx_length_errors++;
1365 			if (frame_status & 0x00010000)
1366 				dev->stats.rx_fifo_errors++;
1367 			if (frame_status & 0x00060000)
1368 				dev->stats.rx_frame_errors++;
1369 			if (frame_status & 0x00080000)
1370 				dev->stats.rx_crc_errors++;
1371 			if (frame_status & 0x00100000) {
1372 				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1373 					   " status %8.8x.\n",
1374 					   dev->name, frame_status);
1375 			}
1376 		} else {
1377 			struct sk_buff *skb;
1378 #ifndef final_version
1379 			if (netif_msg_rx_status(np))
1380 				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1381 					   ", bogus_cnt %d.\n",
1382 					   pkt_len, boguscnt);
1383 #endif
1384 			/* Check if the packet is long enough to accept without copying
1385 			   to a minimally-sized skbuff. */
1386 			if (pkt_len < rx_copybreak &&
1387 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1388 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1389 				dma_sync_single_for_cpu(&np->pci_dev->dev,
1390 						le32_to_cpu(desc->frag[0].addr),
1391 						np->rx_buf_sz, DMA_FROM_DEVICE);
1392 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1393 				dma_sync_single_for_device(&np->pci_dev->dev,
1394 						le32_to_cpu(desc->frag[0].addr),
1395 						np->rx_buf_sz, DMA_FROM_DEVICE);
1396 				skb_put(skb, pkt_len);
1397 			} else {
1398 				dma_unmap_single(&np->pci_dev->dev,
1399 					le32_to_cpu(desc->frag[0].addr),
1400 					np->rx_buf_sz, DMA_FROM_DEVICE);
1401 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1402 				np->rx_skbuff[entry] = NULL;
1403 			}
1404 			skb->protocol = eth_type_trans(skb, dev);
1405 			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1406 			netif_rx(skb);
1407 		}
1408 		entry = (entry + 1) % RX_RING_SIZE;
1409 		received++;
1410 	}
1411 	np->cur_rx = entry;
1412 	refill_rx (dev);
1413 	np->budget -= received;
1414 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1415 	return;
1416 
1417 not_done:
1418 	np->cur_rx = entry;
1419 	refill_rx (dev);
1420 	if (!received)
1421 		received = 1;
1422 	np->budget -= received;
1423 	if (np->budget <= 0)
1424 		np->budget = RX_BUDGET;
1425 	tasklet_schedule(&np->rx_tasklet);
1426 }
1427 
1428 static void refill_rx (struct net_device *dev)
1429 {
1430 	struct netdev_private *np = netdev_priv(dev);
1431 	int entry;
1432 	int cnt = 0;
1433 
1434 	/* Refill the Rx ring buffers. */
1435 	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1436 		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1437 		struct sk_buff *skb;
1438 		entry = np->dirty_rx % RX_RING_SIZE;
1439 		if (np->rx_skbuff[entry] == NULL) {
1440 			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1441 			np->rx_skbuff[entry] = skb;
1442 			if (skb == NULL)
1443 				break;		/* Better luck next round. */
1444 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1445 			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1446 				dma_map_single(&np->pci_dev->dev, skb->data,
1447 					np->rx_buf_sz, DMA_FROM_DEVICE));
1448 			if (dma_mapping_error(&np->pci_dev->dev,
1449 				    np->rx_ring[entry].frag[0].addr)) {
1450 			    dev_kfree_skb_irq(skb);
1451 			    np->rx_skbuff[entry] = NULL;
1452 			    break;
1453 			}
1454 		}
1455 		/* Perhaps we need not reset this field. */
1456 		np->rx_ring[entry].frag[0].length =
1457 			cpu_to_le32(np->rx_buf_sz | LastFrag);
1458 		np->rx_ring[entry].status = 0;
1459 		cnt++;
1460 	}
1461 }
1462 static void netdev_error(struct net_device *dev, int intr_status)
1463 {
1464 	struct netdev_private *np = netdev_priv(dev);
1465 	void __iomem *ioaddr = np->base;
1466 	u16 mii_ctl, mii_advertise, mii_lpa;
1467 	int speed;
1468 
1469 	if (intr_status & LinkChange) {
1470 		if (mdio_wait_link(dev, 10) == 0) {
1471 			printk(KERN_INFO "%s: Link up\n", dev->name);
1472 			if (np->an_enable) {
1473 				mii_advertise = mdio_read(dev, np->phys[0],
1474 							   MII_ADVERTISE);
1475 				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1476 				mii_advertise &= mii_lpa;
1477 				printk(KERN_INFO "%s: Link changed: ",
1478 					dev->name);
1479 				if (mii_advertise & ADVERTISE_100FULL) {
1480 					np->speed = 100;
1481 					printk("100Mbps, full duplex\n");
1482 				} else if (mii_advertise & ADVERTISE_100HALF) {
1483 					np->speed = 100;
1484 					printk("100Mbps, half duplex\n");
1485 				} else if (mii_advertise & ADVERTISE_10FULL) {
1486 					np->speed = 10;
1487 					printk("10Mbps, full duplex\n");
1488 				} else if (mii_advertise & ADVERTISE_10HALF) {
1489 					np->speed = 10;
1490 					printk("10Mbps, half duplex\n");
1491 				} else
1492 					printk("\n");
1493 
1494 			} else {
1495 				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1496 				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1497 				np->speed = speed;
1498 				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1499 					dev->name, speed);
1500 				printk("%s duplex.\n",
1501 					(mii_ctl & BMCR_FULLDPLX) ?
1502 						"full" : "half");
1503 			}
1504 			check_duplex(dev);
1505 			if (np->flowctrl && np->mii_if.full_duplex) {
1506 				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1507 					ioaddr + MulticastFilter1+2);
1508 				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1509 					ioaddr + MACCtrl0);
1510 			}
1511 			netif_carrier_on(dev);
1512 		} else {
1513 			printk(KERN_INFO "%s: Link down\n", dev->name);
1514 			netif_carrier_off(dev);
1515 		}
1516 	}
1517 	if (intr_status & StatsMax) {
1518 		get_stats(dev);
1519 	}
1520 	if (intr_status & IntrPCIErr) {
1521 		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1522 			   dev->name, intr_status);
1523 		/* We must do a global reset of DMA to continue. */
1524 	}
1525 }
1526 
1527 static struct net_device_stats *get_stats(struct net_device *dev)
1528 {
1529 	struct netdev_private *np = netdev_priv(dev);
1530 	void __iomem *ioaddr = np->base;
1531 	unsigned long flags;
1532 	u8 late_coll, single_coll, mult_coll;
1533 
1534 	spin_lock_irqsave(&np->statlock, flags);
1535 	/* The chip only need report frame silently dropped. */
1536 	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1537 	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1538 	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1539 	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1540 
1541 	mult_coll = ioread8(ioaddr + StatsMultiColl);
1542 	np->xstats.tx_multiple_collisions += mult_coll;
1543 	single_coll = ioread8(ioaddr + StatsOneColl);
1544 	np->xstats.tx_single_collisions += single_coll;
1545 	late_coll = ioread8(ioaddr + StatsLateColl);
1546 	np->xstats.tx_late_collisions += late_coll;
1547 	dev->stats.collisions += mult_coll
1548 		+ single_coll
1549 		+ late_coll;
1550 
1551 	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1552 	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1553 	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1554 	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1555 	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1556 	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1557 	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1558 
1559 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1560 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1561 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1562 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1563 
1564 	spin_unlock_irqrestore(&np->statlock, flags);
1565 
1566 	return &dev->stats;
1567 }
1568 
1569 static void set_rx_mode(struct net_device *dev)
1570 {
1571 	struct netdev_private *np = netdev_priv(dev);
1572 	void __iomem *ioaddr = np->base;
1573 	u16 mc_filter[4];			/* Multicast hash filter */
1574 	u32 rx_mode;
1575 	int i;
1576 
1577 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1578 		memset(mc_filter, 0xff, sizeof(mc_filter));
1579 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1580 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1581 		   (dev->flags & IFF_ALLMULTI)) {
1582 		/* Too many to match, or accept all multicasts. */
1583 		memset(mc_filter, 0xff, sizeof(mc_filter));
1584 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1585 	} else if (!netdev_mc_empty(dev)) {
1586 		struct netdev_hw_addr *ha;
1587 		int bit;
1588 		int index;
1589 		int crc;
1590 		memset (mc_filter, 0, sizeof (mc_filter));
1591 		netdev_for_each_mc_addr(ha, dev) {
1592 			crc = ether_crc_le(ETH_ALEN, ha->addr);
1593 			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1594 				if (crc & 0x80000000) index |= 1 << bit;
1595 			mc_filter[index/16] |= (1 << (index % 16));
1596 		}
1597 		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1598 	} else {
1599 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1600 		return;
1601 	}
1602 	if (np->mii_if.full_duplex && np->flowctrl)
1603 		mc_filter[3] |= 0x0200;
1604 
1605 	for (i = 0; i < 4; i++)
1606 		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1607 	iowrite8(rx_mode, ioaddr + RxMode);
1608 }
1609 
1610 static int __set_mac_addr(struct net_device *dev)
1611 {
1612 	struct netdev_private *np = netdev_priv(dev);
1613 	u16 addr16;
1614 
1615 	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1616 	iowrite16(addr16, np->base + StationAddr);
1617 	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1618 	iowrite16(addr16, np->base + StationAddr+2);
1619 	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1620 	iowrite16(addr16, np->base + StationAddr+4);
1621 	return 0;
1622 }
1623 
1624 /* Invoked with rtnl_lock held */
1625 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1626 {
1627 	const struct sockaddr *addr = data;
1628 
1629 	if (!is_valid_ether_addr(addr->sa_data))
1630 		return -EADDRNOTAVAIL;
1631 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1632 	__set_mac_addr(dev);
1633 
1634 	return 0;
1635 }
1636 
1637 static const struct {
1638 	const char name[ETH_GSTRING_LEN];
1639 } sundance_stats[] = {
1640 	{ "tx_multiple_collisions" },
1641 	{ "tx_single_collisions" },
1642 	{ "tx_late_collisions" },
1643 	{ "tx_deferred" },
1644 	{ "tx_deferred_excessive" },
1645 	{ "tx_aborted" },
1646 	{ "tx_bcasts" },
1647 	{ "rx_bcasts" },
1648 	{ "tx_mcasts" },
1649 	{ "rx_mcasts" },
1650 };
1651 
1652 static int check_if_running(struct net_device *dev)
1653 {
1654 	if (!netif_running(dev))
1655 		return -EINVAL;
1656 	return 0;
1657 }
1658 
1659 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1660 {
1661 	struct netdev_private *np = netdev_priv(dev);
1662 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1663 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1664 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1665 }
1666 
1667 static int get_link_ksettings(struct net_device *dev,
1668 			      struct ethtool_link_ksettings *cmd)
1669 {
1670 	struct netdev_private *np = netdev_priv(dev);
1671 	spin_lock_irq(&np->lock);
1672 	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1673 	spin_unlock_irq(&np->lock);
1674 	return 0;
1675 }
1676 
1677 static int set_link_ksettings(struct net_device *dev,
1678 			      const struct ethtool_link_ksettings *cmd)
1679 {
1680 	struct netdev_private *np = netdev_priv(dev);
1681 	int res;
1682 	spin_lock_irq(&np->lock);
1683 	res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1684 	spin_unlock_irq(&np->lock);
1685 	return res;
1686 }
1687 
1688 static int nway_reset(struct net_device *dev)
1689 {
1690 	struct netdev_private *np = netdev_priv(dev);
1691 	return mii_nway_restart(&np->mii_if);
1692 }
1693 
1694 static u32 get_link(struct net_device *dev)
1695 {
1696 	struct netdev_private *np = netdev_priv(dev);
1697 	return mii_link_ok(&np->mii_if);
1698 }
1699 
1700 static u32 get_msglevel(struct net_device *dev)
1701 {
1702 	struct netdev_private *np = netdev_priv(dev);
1703 	return np->msg_enable;
1704 }
1705 
1706 static void set_msglevel(struct net_device *dev, u32 val)
1707 {
1708 	struct netdev_private *np = netdev_priv(dev);
1709 	np->msg_enable = val;
1710 }
1711 
1712 static void get_strings(struct net_device *dev, u32 stringset,
1713 		u8 *data)
1714 {
1715 	if (stringset == ETH_SS_STATS)
1716 		memcpy(data, sundance_stats, sizeof(sundance_stats));
1717 }
1718 
1719 static int get_sset_count(struct net_device *dev, int sset)
1720 {
1721 	switch (sset) {
1722 	case ETH_SS_STATS:
1723 		return ARRAY_SIZE(sundance_stats);
1724 	default:
1725 		return -EOPNOTSUPP;
1726 	}
1727 }
1728 
1729 static void get_ethtool_stats(struct net_device *dev,
1730 		struct ethtool_stats *stats, u64 *data)
1731 {
1732 	struct netdev_private *np = netdev_priv(dev);
1733 	int i = 0;
1734 
1735 	get_stats(dev);
1736 	data[i++] = np->xstats.tx_multiple_collisions;
1737 	data[i++] = np->xstats.tx_single_collisions;
1738 	data[i++] = np->xstats.tx_late_collisions;
1739 	data[i++] = np->xstats.tx_deferred;
1740 	data[i++] = np->xstats.tx_deferred_excessive;
1741 	data[i++] = np->xstats.tx_aborted;
1742 	data[i++] = np->xstats.tx_bcasts;
1743 	data[i++] = np->xstats.rx_bcasts;
1744 	data[i++] = np->xstats.tx_mcasts;
1745 	data[i++] = np->xstats.rx_mcasts;
1746 }
1747 
1748 #ifdef CONFIG_PM
1749 
1750 static void sundance_get_wol(struct net_device *dev,
1751 		struct ethtool_wolinfo *wol)
1752 {
1753 	struct netdev_private *np = netdev_priv(dev);
1754 	void __iomem *ioaddr = np->base;
1755 	u8 wol_bits;
1756 
1757 	wol->wolopts = 0;
1758 
1759 	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1760 	if (!np->wol_enabled)
1761 		return;
1762 
1763 	wol_bits = ioread8(ioaddr + WakeEvent);
1764 	if (wol_bits & MagicPktEnable)
1765 		wol->wolopts |= WAKE_MAGIC;
1766 	if (wol_bits & LinkEventEnable)
1767 		wol->wolopts |= WAKE_PHY;
1768 }
1769 
1770 static int sundance_set_wol(struct net_device *dev,
1771 	struct ethtool_wolinfo *wol)
1772 {
1773 	struct netdev_private *np = netdev_priv(dev);
1774 	void __iomem *ioaddr = np->base;
1775 	u8 wol_bits;
1776 
1777 	if (!device_can_wakeup(&np->pci_dev->dev))
1778 		return -EOPNOTSUPP;
1779 
1780 	np->wol_enabled = !!(wol->wolopts);
1781 	wol_bits = ioread8(ioaddr + WakeEvent);
1782 	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1783 			LinkEventEnable | WolEnable);
1784 
1785 	if (np->wol_enabled) {
1786 		if (wol->wolopts & WAKE_MAGIC)
1787 			wol_bits |= (MagicPktEnable | WolEnable);
1788 		if (wol->wolopts & WAKE_PHY)
1789 			wol_bits |= (LinkEventEnable | WolEnable);
1790 	}
1791 	iowrite8(wol_bits, ioaddr + WakeEvent);
1792 
1793 	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1794 
1795 	return 0;
1796 }
1797 #else
1798 #define sundance_get_wol NULL
1799 #define sundance_set_wol NULL
1800 #endif /* CONFIG_PM */
1801 
1802 static const struct ethtool_ops ethtool_ops = {
1803 	.begin = check_if_running,
1804 	.get_drvinfo = get_drvinfo,
1805 	.nway_reset = nway_reset,
1806 	.get_link = get_link,
1807 	.get_wol = sundance_get_wol,
1808 	.set_wol = sundance_set_wol,
1809 	.get_msglevel = get_msglevel,
1810 	.set_msglevel = set_msglevel,
1811 	.get_strings = get_strings,
1812 	.get_sset_count = get_sset_count,
1813 	.get_ethtool_stats = get_ethtool_stats,
1814 	.get_link_ksettings = get_link_ksettings,
1815 	.set_link_ksettings = set_link_ksettings,
1816 };
1817 
1818 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1819 {
1820 	struct netdev_private *np = netdev_priv(dev);
1821 	int rc;
1822 
1823 	if (!netif_running(dev))
1824 		return -EINVAL;
1825 
1826 	spin_lock_irq(&np->lock);
1827 	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1828 	spin_unlock_irq(&np->lock);
1829 
1830 	return rc;
1831 }
1832 
1833 static int netdev_close(struct net_device *dev)
1834 {
1835 	struct netdev_private *np = netdev_priv(dev);
1836 	void __iomem *ioaddr = np->base;
1837 	struct sk_buff *skb;
1838 	int i;
1839 
1840 	/* Wait and kill tasklet */
1841 	tasklet_kill(&np->rx_tasklet);
1842 	tasklet_kill(&np->tx_tasklet);
1843 	np->cur_tx = 0;
1844 	np->dirty_tx = 0;
1845 	np->cur_task = 0;
1846 	np->last_tx = NULL;
1847 
1848 	netif_stop_queue(dev);
1849 
1850 	if (netif_msg_ifdown(np)) {
1851 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1852 			   "Rx %4.4x Int %2.2x.\n",
1853 			   dev->name, ioread8(ioaddr + TxStatus),
1854 			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1855 		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1856 			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1857 	}
1858 
1859 	/* Disable interrupts by clearing the interrupt mask. */
1860 	iowrite16(0x0000, ioaddr + IntrEnable);
1861 
1862 	/* Disable Rx and Tx DMA for safely release resource */
1863 	iowrite32(0x500, ioaddr + DMACtrl);
1864 
1865 	/* Stop the chip's Tx and Rx processes. */
1866 	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1867 
1868     	for (i = 2000; i > 0; i--) {
1869  		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1870 			break;
1871 		mdelay(1);
1872     	}
1873 
1874     	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1875 			ioaddr + ASIC_HI_WORD(ASICCtrl));
1876 
1877     	for (i = 2000; i > 0; i--) {
1878 		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1879 			break;
1880 		mdelay(1);
1881     	}
1882 
1883 #ifdef __i386__
1884 	if (netif_msg_hw(np)) {
1885 		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1886 			   (int)(np->tx_ring_dma));
1887 		for (i = 0; i < TX_RING_SIZE; i++)
1888 			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1889 				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1890 				   np->tx_ring[i].frag[0].length);
1891 		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1892 			   (int)(np->rx_ring_dma));
1893 		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1894 			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1895 				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1896 				   np->rx_ring[i].frag[0].length);
1897 		}
1898 	}
1899 #endif /* __i386__ debugging only */
1900 
1901 	free_irq(np->pci_dev->irq, dev);
1902 
1903 	del_timer_sync(&np->timer);
1904 
1905 	/* Free all the skbuffs in the Rx queue. */
1906 	for (i = 0; i < RX_RING_SIZE; i++) {
1907 		np->rx_ring[i].status = 0;
1908 		skb = np->rx_skbuff[i];
1909 		if (skb) {
1910 			dma_unmap_single(&np->pci_dev->dev,
1911 				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1912 				np->rx_buf_sz, DMA_FROM_DEVICE);
1913 			dev_kfree_skb(skb);
1914 			np->rx_skbuff[i] = NULL;
1915 		}
1916 		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1917 	}
1918 	for (i = 0; i < TX_RING_SIZE; i++) {
1919 		np->tx_ring[i].next_desc = 0;
1920 		skb = np->tx_skbuff[i];
1921 		if (skb) {
1922 			dma_unmap_single(&np->pci_dev->dev,
1923 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1924 				skb->len, DMA_TO_DEVICE);
1925 			dev_kfree_skb(skb);
1926 			np->tx_skbuff[i] = NULL;
1927 		}
1928 	}
1929 
1930 	return 0;
1931 }
1932 
1933 static void sundance_remove1(struct pci_dev *pdev)
1934 {
1935 	struct net_device *dev = pci_get_drvdata(pdev);
1936 
1937 	if (dev) {
1938 	    struct netdev_private *np = netdev_priv(dev);
1939 	    unregister_netdev(dev);
1940 	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1941 		    np->rx_ring, np->rx_ring_dma);
1942 	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1943 		    np->tx_ring, np->tx_ring_dma);
1944 	    pci_iounmap(pdev, np->base);
1945 	    pci_release_regions(pdev);
1946 	    free_netdev(dev);
1947 	}
1948 }
1949 
1950 #ifdef CONFIG_PM
1951 
1952 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1953 {
1954 	struct net_device *dev = pci_get_drvdata(pci_dev);
1955 	struct netdev_private *np = netdev_priv(dev);
1956 	void __iomem *ioaddr = np->base;
1957 
1958 	if (!netif_running(dev))
1959 		return 0;
1960 
1961 	netdev_close(dev);
1962 	netif_device_detach(dev);
1963 
1964 	pci_save_state(pci_dev);
1965 	if (np->wol_enabled) {
1966 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1967 		iowrite16(RxEnable, ioaddr + MACCtrl1);
1968 	}
1969 	pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1970 			np->wol_enabled);
1971 	pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1972 
1973 	return 0;
1974 }
1975 
1976 static int sundance_resume(struct pci_dev *pci_dev)
1977 {
1978 	struct net_device *dev = pci_get_drvdata(pci_dev);
1979 	int err = 0;
1980 
1981 	if (!netif_running(dev))
1982 		return 0;
1983 
1984 	pci_set_power_state(pci_dev, PCI_D0);
1985 	pci_restore_state(pci_dev);
1986 	pci_enable_wake(pci_dev, PCI_D0, 0);
1987 
1988 	err = netdev_open(dev);
1989 	if (err) {
1990 		printk(KERN_ERR "%s: Can't resume interface!\n",
1991 				dev->name);
1992 		goto out;
1993 	}
1994 
1995 	netif_device_attach(dev);
1996 
1997 out:
1998 	return err;
1999 }
2000 
2001 #endif /* CONFIG_PM */
2002 
2003 static struct pci_driver sundance_driver = {
2004 	.name		= DRV_NAME,
2005 	.id_table	= sundance_pci_tbl,
2006 	.probe		= sundance_probe1,
2007 	.remove		= sundance_remove1,
2008 #ifdef CONFIG_PM
2009 	.suspend	= sundance_suspend,
2010 	.resume		= sundance_resume,
2011 #endif /* CONFIG_PM */
2012 };
2013 
2014 static int __init sundance_init(void)
2015 {
2016 /* when a module, this is printed whether or not devices are found in probe */
2017 #ifdef MODULE
2018 	printk(version);
2019 #endif
2020 	return pci_register_driver(&sundance_driver);
2021 }
2022 
2023 static void __exit sundance_exit(void)
2024 {
2025 	pci_unregister_driver(&sundance_driver);
2026 }
2027 
2028 module_init(sundance_init);
2029 module_exit(sundance_exit);
2030 
2031 
2032