1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 	Written 1999-2000 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	The author may be reached as becker@scyld.com, or C/O
13 	Scyld Computing Corporation
14 	410 Severn Ave., Suite 210
15 	Annapolis MD 21403
16 
17 	Support and updates available at
18 	http://www.scyld.com/network/sundance.html
19 	[link no longer provides useful info -jgarzik]
20 	Archives of the mailing list are still available at
21 	http://www.beowulf.org/pipermail/netdrivers/
22 
23 */
24 
25 #define DRV_NAME	"sundance"
26 
27 /* The user-configurable values.
28    These may be modified when a driver module is loaded.*/
29 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
30 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
31    Typical is a 64 element hash table based on the Ethernet CRC.  */
32 static const int multicast_filter_limit = 32;
33 
34 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
35    Setting to > 1518 effectively disables this feature.
36    This chip can receive into offset buffers, so the Alpha does not
37    need a copy-align. */
38 static int rx_copybreak;
39 static int flowctrl=1;
40 
41 /* media[] specifies the media type the NIC operates at.
42 		 autosense	Autosensing active media.
43 		 10mbps_hd 	10Mbps half duplex.
44 		 10mbps_fd 	10Mbps full duplex.
45 		 100mbps_hd 	100Mbps half duplex.
46 		 100mbps_fd 	100Mbps full duplex.
47 		 0		Autosensing active media.
48 		 1	 	10Mbps half duplex.
49 		 2	 	10Mbps full duplex.
50 		 3	 	100Mbps half duplex.
51 		 4	 	100Mbps full duplex.
52 */
53 #define MAX_UNITS 8
54 static char *media[MAX_UNITS];
55 
56 
57 /* Operational parameters that are set at compile time. */
58 
59 /* Keep the ring sizes a power of two for compile efficiency.
60    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
61    Making the Tx ring too large decreases the effectiveness of channel
62    bonding and packet priority, and more than 128 requires modifying the
63    Tx error recovery.
64    Large receive rings merely waste memory. */
65 #define TX_RING_SIZE	32
66 #define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
67 #define RX_RING_SIZE	64
68 #define RX_BUDGET	32
69 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
70 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
71 
72 /* Operational parameters that usually are not changed. */
73 /* Time in jiffies before concluding the transmitter is hung. */
74 #define TX_TIMEOUT  (4*HZ)
75 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
76 
77 /* Include files, designed to support most kernel versions 2.0.0 and later. */
78 #include <linux/module.h>
79 #include <linux/kernel.h>
80 #include <linux/string.h>
81 #include <linux/timer.h>
82 #include <linux/errno.h>
83 #include <linux/ioport.h>
84 #include <linux/interrupt.h>
85 #include <linux/pci.h>
86 #include <linux/netdevice.h>
87 #include <linux/etherdevice.h>
88 #include <linux/skbuff.h>
89 #include <linux/init.h>
90 #include <linux/bitops.h>
91 #include <linux/uaccess.h>
92 #include <asm/processor.h>		/* Processor type for cache alignment. */
93 #include <asm/io.h>
94 #include <linux/delay.h>
95 #include <linux/spinlock.h>
96 #include <linux/dma-mapping.h>
97 #include <linux/crc32.h>
98 #include <linux/ethtool.h>
99 #include <linux/mii.h>
100 
101 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
102 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
103 MODULE_LICENSE("GPL");
104 
105 module_param(debug, int, 0);
106 module_param(rx_copybreak, int, 0);
107 module_param_array(media, charp, NULL, 0);
108 module_param(flowctrl, int, 0);
109 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
110 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
111 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
112 
113 /*
114 				Theory of Operation
115 
116 I. Board Compatibility
117 
118 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
119 
120 II. Board-specific settings
121 
122 III. Driver operation
123 
124 IIIa. Ring buffers
125 
126 This driver uses two statically allocated fixed-size descriptor lists
127 formed into rings by a branch from the final descriptor to the beginning of
128 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
129 Some chips explicitly use only 2^N sized rings, while others use a
130 'next descriptor' pointer that the driver forms into rings.
131 
132 IIIb/c. Transmit/Receive Structure
133 
134 This driver uses a zero-copy receive and transmit scheme.
135 The driver allocates full frame size skbuffs for the Rx ring buffers at
136 open() time and passes the skb->data field to the chip as receive data
137 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
138 a fresh skbuff is allocated and the frame is copied to the new skbuff.
139 When the incoming frame is larger, the skbuff is passed directly up the
140 protocol stack.  Buffers consumed this way are replaced by newly allocated
141 skbuffs in a later phase of receives.
142 
143 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
144 using a full-sized skbuff for small frames vs. the copying costs of larger
145 frames.  New boards are typically used in generously configured machines
146 and the underfilled buffers have negligible impact compared to the benefit of
147 a single allocation size, so the default value of zero results in never
148 copying packets.  When copying is done, the cost is usually mitigated by using
149 a combined copy/checksum routine.  Copying also preloads the cache, which is
150 most useful with small frames.
151 
152 A subtle aspect of the operation is that the IP header at offset 14 in an
153 ethernet frame isn't longword aligned for further processing.
154 Unaligned buffers are permitted by the Sundance hardware, so
155 frames are received into the skbuff at an offset of "+2", 16-byte aligning
156 the IP header.
157 
158 IIId. Synchronization
159 
160 The driver runs as two independent, single-threaded flows of control.  One
161 is the send-packet routine, which enforces single-threaded use by the
162 dev->tbusy flag.  The other thread is the interrupt handler, which is single
163 threaded by the hardware and interrupt handling software.
164 
165 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
166 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
167 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
168 the 'lp->tx_full' flag.
169 
170 The interrupt handler has exclusive control over the Rx ring and records stats
171 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
172 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
173 clears both the tx_full and tbusy flags.
174 
175 IV. Notes
176 
177 IVb. References
178 
179 The Sundance ST201 datasheet, preliminary version.
180 The Kendin KS8723 datasheet, preliminary version.
181 The ICplus IP100 datasheet, preliminary version.
182 http://www.scyld.com/expert/100mbps.html
183 http://www.scyld.com/expert/NWay.html
184 
185 IVc. Errata
186 
187 */
188 
189 /* Work-around for Kendin chip bugs. */
190 #ifndef CONFIG_SUNDANCE_MMIO
191 #define USE_IO_OPS 1
192 #endif
193 
194 static const struct pci_device_id sundance_pci_tbl[] = {
195 	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
196 	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
197 	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
198 	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
199 	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
200 	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
201 	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
202 	{ }
203 };
204 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
205 
206 enum {
207 	netdev_io_size = 128
208 };
209 
210 struct pci_id_info {
211         const char *name;
212 };
213 static const struct pci_id_info pci_id_tbl[] = {
214 	{"D-Link DFE-550TX FAST Ethernet Adapter"},
215 	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
216 	{"D-Link DFE-580TX 4 port Server Adapter"},
217 	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
218 	{"D-Link DL10050-based FAST Ethernet Adapter"},
219 	{"Sundance Technology Alta"},
220 	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
221 	{ }	/* terminate list. */
222 };
223 
224 /* This driver was written to use PCI memory space, however x86-oriented
225    hardware often uses I/O space accesses. */
226 
227 /* Offsets to the device registers.
228    Unlike software-only systems, device drivers interact with complex hardware.
229    It's not useful to define symbolic names for every register bit in the
230    device.  The name can only partially document the semantics and make
231    the driver longer and more difficult to read.
232    In general, only the important configuration values or bits changed
233    multiple times should be defined symbolically.
234 */
235 enum alta_offsets {
236 	DMACtrl = 0x00,
237 	TxListPtr = 0x04,
238 	TxDMABurstThresh = 0x08,
239 	TxDMAUrgentThresh = 0x09,
240 	TxDMAPollPeriod = 0x0a,
241 	RxDMAStatus = 0x0c,
242 	RxListPtr = 0x10,
243 	DebugCtrl0 = 0x1a,
244 	DebugCtrl1 = 0x1c,
245 	RxDMABurstThresh = 0x14,
246 	RxDMAUrgentThresh = 0x15,
247 	RxDMAPollPeriod = 0x16,
248 	LEDCtrl = 0x1a,
249 	ASICCtrl = 0x30,
250 	EEData = 0x34,
251 	EECtrl = 0x36,
252 	FlashAddr = 0x40,
253 	FlashData = 0x44,
254 	WakeEvent = 0x45,
255 	TxStatus = 0x46,
256 	TxFrameId = 0x47,
257 	DownCounter = 0x18,
258 	IntrClear = 0x4a,
259 	IntrEnable = 0x4c,
260 	IntrStatus = 0x4e,
261 	MACCtrl0 = 0x50,
262 	MACCtrl1 = 0x52,
263 	StationAddr = 0x54,
264 	MaxFrameSize = 0x5A,
265 	RxMode = 0x5c,
266 	MIICtrl = 0x5e,
267 	MulticastFilter0 = 0x60,
268 	MulticastFilter1 = 0x64,
269 	RxOctetsLow = 0x68,
270 	RxOctetsHigh = 0x6a,
271 	TxOctetsLow = 0x6c,
272 	TxOctetsHigh = 0x6e,
273 	TxFramesOK = 0x70,
274 	RxFramesOK = 0x72,
275 	StatsCarrierError = 0x74,
276 	StatsLateColl = 0x75,
277 	StatsMultiColl = 0x76,
278 	StatsOneColl = 0x77,
279 	StatsTxDefer = 0x78,
280 	RxMissed = 0x79,
281 	StatsTxXSDefer = 0x7a,
282 	StatsTxAbort = 0x7b,
283 	StatsBcastTx = 0x7c,
284 	StatsBcastRx = 0x7d,
285 	StatsMcastTx = 0x7e,
286 	StatsMcastRx = 0x7f,
287 	/* Aliased and bogus values! */
288 	RxStatus = 0x0c,
289 };
290 
291 #define ASIC_HI_WORD(x)	((x) + 2)
292 
293 enum ASICCtrl_HiWord_bit {
294 	GlobalReset = 0x0001,
295 	RxReset = 0x0002,
296 	TxReset = 0x0004,
297 	DMAReset = 0x0008,
298 	FIFOReset = 0x0010,
299 	NetworkReset = 0x0020,
300 	HostReset = 0x0040,
301 	ResetBusy = 0x0400,
302 };
303 
304 /* Bits in the interrupt status/mask registers. */
305 enum intr_status_bits {
306 	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
307 	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
308 	IntrDrvRqst=0x0040,
309 	StatsMax=0x0080, LinkChange=0x0100,
310 	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
311 };
312 
313 /* Bits in the RxMode register. */
314 enum rx_mode_bits {
315 	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
316 	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
317 };
318 /* Bits in MACCtrl. */
319 enum mac_ctrl0_bits {
320 	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
321 	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
322 };
323 enum mac_ctrl1_bits {
324 	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
325 	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
326 	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
327 };
328 
329 /* Bits in WakeEvent register. */
330 enum wake_event_bits {
331 	WakePktEnable = 0x01,
332 	MagicPktEnable = 0x02,
333 	LinkEventEnable = 0x04,
334 	WolEnable = 0x80,
335 };
336 
337 /* The Rx and Tx buffer descriptors. */
338 /* Note that using only 32 bit fields simplifies conversion to big-endian
339    architectures. */
340 struct netdev_desc {
341 	__le32 next_desc;
342 	__le32 status;
343 	struct desc_frag { __le32 addr, length; } frag[1];
344 };
345 
346 /* Bits in netdev_desc.status */
347 enum desc_status_bits {
348 	DescOwn=0x8000,
349 	DescEndPacket=0x4000,
350 	DescEndRing=0x2000,
351 	LastFrag=0x80000000,
352 	DescIntrOnTx=0x8000,
353 	DescIntrOnDMADone=0x80000000,
354 	DisableAlign = 0x00000001,
355 };
356 
357 #define PRIV_ALIGN	15 	/* Required alignment mask */
358 /* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
359    within the structure. */
360 #define MII_CNT		4
361 struct netdev_private {
362 	/* Descriptor rings first for alignment. */
363 	struct netdev_desc *rx_ring;
364 	struct netdev_desc *tx_ring;
365 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
366 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
367         dma_addr_t tx_ring_dma;
368         dma_addr_t rx_ring_dma;
369 	struct timer_list timer;		/* Media monitoring timer. */
370 	/* ethtool extra stats */
371 	struct {
372 		u64 tx_multiple_collisions;
373 		u64 tx_single_collisions;
374 		u64 tx_late_collisions;
375 		u64 tx_deferred;
376 		u64 tx_deferred_excessive;
377 		u64 tx_aborted;
378 		u64 tx_bcasts;
379 		u64 rx_bcasts;
380 		u64 tx_mcasts;
381 		u64 rx_mcasts;
382 	} xstats;
383 	/* Frequently used values: keep some adjacent for cache effect. */
384 	spinlock_t lock;
385 	int msg_enable;
386 	int chip_id;
387 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
388 	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
389 	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
390 	unsigned int cur_tx, dirty_tx;
391 	/* These values are keep track of the transceiver/media in use. */
392 	unsigned int flowctrl:1;
393 	unsigned int default_port:4;		/* Last dev->if_port value. */
394 	unsigned int an_enable:1;
395 	unsigned int speed;
396 	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
397 	struct tasklet_struct rx_tasklet;
398 	struct tasklet_struct tx_tasklet;
399 	int budget;
400 	int cur_task;
401 	/* Multicast and receive mode. */
402 	spinlock_t mcastlock;			/* SMP lock multicast updates. */
403 	u16 mcast_filter[4];
404 	/* MII transceiver section. */
405 	struct mii_if_info mii_if;
406 	int mii_preamble_required;
407 	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
408 	struct pci_dev *pci_dev;
409 	void __iomem *base;
410 	spinlock_t statlock;
411 };
412 
413 /* The station address location in the EEPROM. */
414 #define EEPROM_SA_OFFSET	0x10
415 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
416 			IntrDrvRqst | IntrTxDone | StatsMax | \
417 			LinkChange)
418 
419 static int  change_mtu(struct net_device *dev, int new_mtu);
420 static int  eeprom_read(void __iomem *ioaddr, int location);
421 static int  mdio_read(struct net_device *dev, int phy_id, int location);
422 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
423 static int  mdio_wait_link(struct net_device *dev, int wait);
424 static int  netdev_open(struct net_device *dev);
425 static void check_duplex(struct net_device *dev);
426 static void netdev_timer(struct timer_list *t);
427 static void tx_timeout(struct net_device *dev, unsigned int txqueue);
428 static void init_ring(struct net_device *dev);
429 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
430 static int reset_tx (struct net_device *dev);
431 static irqreturn_t intr_handler(int irq, void *dev_instance);
432 static void rx_poll(unsigned long data);
433 static void tx_poll(unsigned long data);
434 static void refill_rx (struct net_device *dev);
435 static void netdev_error(struct net_device *dev, int intr_status);
436 static void netdev_error(struct net_device *dev, int intr_status);
437 static void set_rx_mode(struct net_device *dev);
438 static int __set_mac_addr(struct net_device *dev);
439 static int sundance_set_mac_addr(struct net_device *dev, void *data);
440 static struct net_device_stats *get_stats(struct net_device *dev);
441 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
442 static int  netdev_close(struct net_device *dev);
443 static const struct ethtool_ops ethtool_ops;
444 
445 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
446 {
447 	struct netdev_private *np = netdev_priv(dev);
448 	void __iomem *ioaddr = np->base + ASICCtrl;
449 	int countdown;
450 
451 	/* ST201 documentation states ASICCtrl is a 32bit register */
452 	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
453 	/* ST201 documentation states reset can take up to 1 ms */
454 	countdown = 10 + 1;
455 	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
456 		if (--countdown == 0) {
457 			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
458 			break;
459 		}
460 		udelay(100);
461 	}
462 }
463 
464 #ifdef CONFIG_NET_POLL_CONTROLLER
465 static void sundance_poll_controller(struct net_device *dev)
466 {
467 	struct netdev_private *np = netdev_priv(dev);
468 
469 	disable_irq(np->pci_dev->irq);
470 	intr_handler(np->pci_dev->irq, dev);
471 	enable_irq(np->pci_dev->irq);
472 }
473 #endif
474 
475 static const struct net_device_ops netdev_ops = {
476 	.ndo_open		= netdev_open,
477 	.ndo_stop		= netdev_close,
478 	.ndo_start_xmit		= start_tx,
479 	.ndo_get_stats 		= get_stats,
480 	.ndo_set_rx_mode	= set_rx_mode,
481 	.ndo_do_ioctl 		= netdev_ioctl,
482 	.ndo_tx_timeout		= tx_timeout,
483 	.ndo_change_mtu		= change_mtu,
484 	.ndo_set_mac_address 	= sundance_set_mac_addr,
485 	.ndo_validate_addr	= eth_validate_addr,
486 #ifdef CONFIG_NET_POLL_CONTROLLER
487 	.ndo_poll_controller 	= sundance_poll_controller,
488 #endif
489 };
490 
491 static int sundance_probe1(struct pci_dev *pdev,
492 			   const struct pci_device_id *ent)
493 {
494 	struct net_device *dev;
495 	struct netdev_private *np;
496 	static int card_idx;
497 	int chip_idx = ent->driver_data;
498 	int irq;
499 	int i;
500 	void __iomem *ioaddr;
501 	u16 mii_ctl;
502 	void *ring_space;
503 	dma_addr_t ring_dma;
504 #ifdef USE_IO_OPS
505 	int bar = 0;
506 #else
507 	int bar = 1;
508 #endif
509 	int phy, phy_end, phy_idx = 0;
510 
511 	if (pci_enable_device(pdev))
512 		return -EIO;
513 	pci_set_master(pdev);
514 
515 	irq = pdev->irq;
516 
517 	dev = alloc_etherdev(sizeof(*np));
518 	if (!dev)
519 		return -ENOMEM;
520 	SET_NETDEV_DEV(dev, &pdev->dev);
521 
522 	if (pci_request_regions(pdev, DRV_NAME))
523 		goto err_out_netdev;
524 
525 	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
526 	if (!ioaddr)
527 		goto err_out_res;
528 
529 	for (i = 0; i < 3; i++)
530 		((__le16 *)dev->dev_addr)[i] =
531 			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
532 
533 	np = netdev_priv(dev);
534 	np->base = ioaddr;
535 	np->pci_dev = pdev;
536 	np->chip_id = chip_idx;
537 	np->msg_enable = (1 << debug) - 1;
538 	spin_lock_init(&np->lock);
539 	spin_lock_init(&np->statlock);
540 	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
541 	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
542 
543 	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
544 			&ring_dma, GFP_KERNEL);
545 	if (!ring_space)
546 		goto err_out_cleardev;
547 	np->tx_ring = (struct netdev_desc *)ring_space;
548 	np->tx_ring_dma = ring_dma;
549 
550 	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
551 			&ring_dma, GFP_KERNEL);
552 	if (!ring_space)
553 		goto err_out_unmap_tx;
554 	np->rx_ring = (struct netdev_desc *)ring_space;
555 	np->rx_ring_dma = ring_dma;
556 
557 	np->mii_if.dev = dev;
558 	np->mii_if.mdio_read = mdio_read;
559 	np->mii_if.mdio_write = mdio_write;
560 	np->mii_if.phy_id_mask = 0x1f;
561 	np->mii_if.reg_num_mask = 0x1f;
562 
563 	/* The chip-specific entries in the device structure. */
564 	dev->netdev_ops = &netdev_ops;
565 	dev->ethtool_ops = &ethtool_ops;
566 	dev->watchdog_timeo = TX_TIMEOUT;
567 
568 	/* MTU range: 68 - 8191 */
569 	dev->min_mtu = ETH_MIN_MTU;
570 	dev->max_mtu = 8191;
571 
572 	pci_set_drvdata(pdev, dev);
573 
574 	i = register_netdev(dev);
575 	if (i)
576 		goto err_out_unmap_rx;
577 
578 	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
579 	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
580 	       dev->dev_addr, irq);
581 
582 	np->phys[0] = 1;		/* Default setting */
583 	np->mii_preamble_required++;
584 
585 	/*
586 	 * It seems some phys doesn't deal well with address 0 being accessed
587 	 * first
588 	 */
589 	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
590 		phy = 0;
591 		phy_end = 31;
592 	} else {
593 		phy = 1;
594 		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
595 	}
596 	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
597 		int phyx = phy & 0x1f;
598 		int mii_status = mdio_read(dev, phyx, MII_BMSR);
599 		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
600 			np->phys[phy_idx++] = phyx;
601 			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
602 			if ((mii_status & 0x0040) == 0)
603 				np->mii_preamble_required++;
604 			printk(KERN_INFO "%s: MII PHY found at address %d, status "
605 				   "0x%4.4x advertising %4.4x.\n",
606 				   dev->name, phyx, mii_status, np->mii_if.advertising);
607 		}
608 	}
609 	np->mii_preamble_required--;
610 
611 	if (phy_idx == 0) {
612 		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
613 			   dev->name, ioread32(ioaddr + ASICCtrl));
614 		goto err_out_unregister;
615 	}
616 
617 	np->mii_if.phy_id = np->phys[0];
618 
619 	/* Parse override configuration */
620 	np->an_enable = 1;
621 	if (card_idx < MAX_UNITS) {
622 		if (media[card_idx] != NULL) {
623 			np->an_enable = 0;
624 			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
625 			    strcmp (media[card_idx], "4") == 0) {
626 				np->speed = 100;
627 				np->mii_if.full_duplex = 1;
628 			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
629 				   strcmp (media[card_idx], "3") == 0) {
630 				np->speed = 100;
631 				np->mii_if.full_duplex = 0;
632 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
633 				   strcmp (media[card_idx], "2") == 0) {
634 				np->speed = 10;
635 				np->mii_if.full_duplex = 1;
636 			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
637 				   strcmp (media[card_idx], "1") == 0) {
638 				np->speed = 10;
639 				np->mii_if.full_duplex = 0;
640 			} else {
641 				np->an_enable = 1;
642 			}
643 		}
644 		if (flowctrl == 1)
645 			np->flowctrl = 1;
646 	}
647 
648 	/* Fibre PHY? */
649 	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
650 		/* Default 100Mbps Full */
651 		if (np->an_enable) {
652 			np->speed = 100;
653 			np->mii_if.full_duplex = 1;
654 			np->an_enable = 0;
655 		}
656 	}
657 	/* Reset PHY */
658 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
659 	mdelay (300);
660 	/* If flow control enabled, we need to advertise it.*/
661 	if (np->flowctrl)
662 		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
663 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
664 	/* Force media type */
665 	if (!np->an_enable) {
666 		mii_ctl = 0;
667 		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
668 		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
669 		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
670 		printk (KERN_INFO "Override speed=%d, %s duplex\n",
671 			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
672 
673 	}
674 
675 	/* Perhaps move the reset here? */
676 	/* Reset the chip to erase previous misconfiguration. */
677 	if (netif_msg_hw(np))
678 		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
679 	sundance_reset(dev, 0x00ff << 16);
680 	if (netif_msg_hw(np))
681 		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
682 
683 	card_idx++;
684 	return 0;
685 
686 err_out_unregister:
687 	unregister_netdev(dev);
688 err_out_unmap_rx:
689 	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
690 		np->rx_ring, np->rx_ring_dma);
691 err_out_unmap_tx:
692 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
693 		np->tx_ring, np->tx_ring_dma);
694 err_out_cleardev:
695 	pci_iounmap(pdev, ioaddr);
696 err_out_res:
697 	pci_release_regions(pdev);
698 err_out_netdev:
699 	free_netdev (dev);
700 	return -ENODEV;
701 }
702 
703 static int change_mtu(struct net_device *dev, int new_mtu)
704 {
705 	if (netif_running(dev))
706 		return -EBUSY;
707 	dev->mtu = new_mtu;
708 	return 0;
709 }
710 
711 #define eeprom_delay(ee_addr)	ioread32(ee_addr)
712 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
713 static int eeprom_read(void __iomem *ioaddr, int location)
714 {
715 	int boguscnt = 10000;		/* Typical 1900 ticks. */
716 	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
717 	do {
718 		eeprom_delay(ioaddr + EECtrl);
719 		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
720 			return ioread16(ioaddr + EEData);
721 		}
722 	} while (--boguscnt > 0);
723 	return 0;
724 }
725 
726 /*  MII transceiver control section.
727 	Read and write the MII registers using software-generated serial
728 	MDIO protocol.  See the MII specifications or DP83840A data sheet
729 	for details.
730 
731 	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
732 	met by back-to-back 33Mhz PCI cycles. */
733 #define mdio_delay() ioread8(mdio_addr)
734 
735 enum mii_reg_bits {
736 	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
737 };
738 #define MDIO_EnbIn  (0)
739 #define MDIO_WRITE0 (MDIO_EnbOutput)
740 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
741 
742 /* Generate the preamble required for initial synchronization and
743    a few older transceivers. */
744 static void mdio_sync(void __iomem *mdio_addr)
745 {
746 	int bits = 32;
747 
748 	/* Establish sync by sending at least 32 logic ones. */
749 	while (--bits >= 0) {
750 		iowrite8(MDIO_WRITE1, mdio_addr);
751 		mdio_delay();
752 		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
753 		mdio_delay();
754 	}
755 }
756 
757 static int mdio_read(struct net_device *dev, int phy_id, int location)
758 {
759 	struct netdev_private *np = netdev_priv(dev);
760 	void __iomem *mdio_addr = np->base + MIICtrl;
761 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
762 	int i, retval = 0;
763 
764 	if (np->mii_preamble_required)
765 		mdio_sync(mdio_addr);
766 
767 	/* Shift the read command bits out. */
768 	for (i = 15; i >= 0; i--) {
769 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
770 
771 		iowrite8(dataval, mdio_addr);
772 		mdio_delay();
773 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
774 		mdio_delay();
775 	}
776 	/* Read the two transition, 16 data, and wire-idle bits. */
777 	for (i = 19; i > 0; i--) {
778 		iowrite8(MDIO_EnbIn, mdio_addr);
779 		mdio_delay();
780 		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
781 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
782 		mdio_delay();
783 	}
784 	return (retval>>1) & 0xffff;
785 }
786 
787 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
788 {
789 	struct netdev_private *np = netdev_priv(dev);
790 	void __iomem *mdio_addr = np->base + MIICtrl;
791 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
792 	int i;
793 
794 	if (np->mii_preamble_required)
795 		mdio_sync(mdio_addr);
796 
797 	/* Shift the command bits out. */
798 	for (i = 31; i >= 0; i--) {
799 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
800 
801 		iowrite8(dataval, mdio_addr);
802 		mdio_delay();
803 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
804 		mdio_delay();
805 	}
806 	/* Clear out extra bits. */
807 	for (i = 2; i > 0; i--) {
808 		iowrite8(MDIO_EnbIn, mdio_addr);
809 		mdio_delay();
810 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
811 		mdio_delay();
812 	}
813 }
814 
815 static int mdio_wait_link(struct net_device *dev, int wait)
816 {
817 	int bmsr;
818 	int phy_id;
819 	struct netdev_private *np;
820 
821 	np = netdev_priv(dev);
822 	phy_id = np->phys[0];
823 
824 	do {
825 		bmsr = mdio_read(dev, phy_id, MII_BMSR);
826 		if (bmsr & 0x0004)
827 			return 0;
828 		mdelay(1);
829 	} while (--wait > 0);
830 	return -1;
831 }
832 
833 static int netdev_open(struct net_device *dev)
834 {
835 	struct netdev_private *np = netdev_priv(dev);
836 	void __iomem *ioaddr = np->base;
837 	const int irq = np->pci_dev->irq;
838 	unsigned long flags;
839 	int i;
840 
841 	sundance_reset(dev, 0x00ff << 16);
842 
843 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
844 	if (i)
845 		return i;
846 
847 	if (netif_msg_ifup(np))
848 		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
849 
850 	init_ring(dev);
851 
852 	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
853 	/* The Tx list pointer is written as packets are queued. */
854 
855 	/* Initialize other registers. */
856 	__set_mac_addr(dev);
857 #if IS_ENABLED(CONFIG_VLAN_8021Q)
858 	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
859 #else
860 	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
861 #endif
862 	if (dev->mtu > 2047)
863 		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
864 
865 	/* Configure the PCI bus bursts and FIFO thresholds. */
866 
867 	if (dev->if_port == 0)
868 		dev->if_port = np->default_port;
869 
870 	spin_lock_init(&np->mcastlock);
871 
872 	set_rx_mode(dev);
873 	iowrite16(0, ioaddr + IntrEnable);
874 	iowrite16(0, ioaddr + DownCounter);
875 	/* Set the chip to poll every N*320nsec. */
876 	iowrite8(100, ioaddr + RxDMAPollPeriod);
877 	iowrite8(127, ioaddr + TxDMAPollPeriod);
878 	/* Fix DFE-580TX packet drop issue */
879 	if (np->pci_dev->revision >= 0x14)
880 		iowrite8(0x01, ioaddr + DebugCtrl1);
881 	netif_start_queue(dev);
882 
883 	spin_lock_irqsave(&np->lock, flags);
884 	reset_tx(dev);
885 	spin_unlock_irqrestore(&np->lock, flags);
886 
887 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
888 
889 	/* Disable Wol */
890 	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
891 	np->wol_enabled = 0;
892 
893 	if (netif_msg_ifup(np))
894 		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
895 			   "MAC Control %x, %4.4x %4.4x.\n",
896 			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
897 			   ioread32(ioaddr + MACCtrl0),
898 			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
899 
900 	/* Set the timer to check for link beat. */
901 	timer_setup(&np->timer, netdev_timer, 0);
902 	np->timer.expires = jiffies + 3*HZ;
903 	add_timer(&np->timer);
904 
905 	/* Enable interrupts by setting the interrupt mask. */
906 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
907 
908 	return 0;
909 }
910 
911 static void check_duplex(struct net_device *dev)
912 {
913 	struct netdev_private *np = netdev_priv(dev);
914 	void __iomem *ioaddr = np->base;
915 	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
916 	int negotiated = mii_lpa & np->mii_if.advertising;
917 	int duplex;
918 
919 	/* Force media */
920 	if (!np->an_enable || mii_lpa == 0xffff) {
921 		if (np->mii_if.full_duplex)
922 			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
923 				ioaddr + MACCtrl0);
924 		return;
925 	}
926 
927 	/* Autonegotiation */
928 	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
929 	if (np->mii_if.full_duplex != duplex) {
930 		np->mii_if.full_duplex = duplex;
931 		if (netif_msg_link(np))
932 			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
933 				   "negotiated capability %4.4x.\n", dev->name,
934 				   duplex ? "full" : "half", np->phys[0], negotiated);
935 		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
936 	}
937 }
938 
939 static void netdev_timer(struct timer_list *t)
940 {
941 	struct netdev_private *np = from_timer(np, t, timer);
942 	struct net_device *dev = np->mii_if.dev;
943 	void __iomem *ioaddr = np->base;
944 	int next_tick = 10*HZ;
945 
946 	if (netif_msg_timer(np)) {
947 		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
948 			   "Tx %x Rx %x.\n",
949 			   dev->name, ioread16(ioaddr + IntrEnable),
950 			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
951 	}
952 	check_duplex(dev);
953 	np->timer.expires = jiffies + next_tick;
954 	add_timer(&np->timer);
955 }
956 
957 static void tx_timeout(struct net_device *dev, unsigned int txqueue)
958 {
959 	struct netdev_private *np = netdev_priv(dev);
960 	void __iomem *ioaddr = np->base;
961 	unsigned long flag;
962 
963 	netif_stop_queue(dev);
964 	tasklet_disable(&np->tx_tasklet);
965 	iowrite16(0, ioaddr + IntrEnable);
966 	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
967 		   "TxFrameId %2.2x,"
968 		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
969 		   ioread8(ioaddr + TxFrameId));
970 
971 	{
972 		int i;
973 		for (i=0; i<TX_RING_SIZE; i++) {
974 			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
975 				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
976 				le32_to_cpu(np->tx_ring[i].next_desc),
977 				le32_to_cpu(np->tx_ring[i].status),
978 				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
979 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
980 				le32_to_cpu(np->tx_ring[i].frag[0].length));
981 		}
982 		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
983 			ioread32(np->base + TxListPtr),
984 			netif_queue_stopped(dev));
985 		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
986 			np->cur_tx, np->cur_tx % TX_RING_SIZE,
987 			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
988 		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
989 		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
990 	}
991 	spin_lock_irqsave(&np->lock, flag);
992 
993 	/* Stop and restart the chip's Tx processes . */
994 	reset_tx(dev);
995 	spin_unlock_irqrestore(&np->lock, flag);
996 
997 	dev->if_port = 0;
998 
999 	netif_trans_update(dev); /* prevent tx timeout */
1000 	dev->stats.tx_errors++;
1001 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1002 		netif_wake_queue(dev);
1003 	}
1004 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1005 	tasklet_enable(&np->tx_tasklet);
1006 }
1007 
1008 
1009 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1010 static void init_ring(struct net_device *dev)
1011 {
1012 	struct netdev_private *np = netdev_priv(dev);
1013 	int i;
1014 
1015 	np->cur_rx = np->cur_tx = 0;
1016 	np->dirty_rx = np->dirty_tx = 0;
1017 	np->cur_task = 0;
1018 
1019 	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1020 
1021 	/* Initialize all Rx descriptors. */
1022 	for (i = 0; i < RX_RING_SIZE; i++) {
1023 		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1024 			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1025 		np->rx_ring[i].status = 0;
1026 		np->rx_ring[i].frag[0].length = 0;
1027 		np->rx_skbuff[i] = NULL;
1028 	}
1029 
1030 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1031 	for (i = 0; i < RX_RING_SIZE; i++) {
1032 		struct sk_buff *skb =
1033 			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1034 		np->rx_skbuff[i] = skb;
1035 		if (skb == NULL)
1036 			break;
1037 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1038 		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1039 			dma_map_single(&np->pci_dev->dev, skb->data,
1040 				np->rx_buf_sz, DMA_FROM_DEVICE));
1041 		if (dma_mapping_error(&np->pci_dev->dev,
1042 					np->rx_ring[i].frag[0].addr)) {
1043 			dev_kfree_skb(skb);
1044 			np->rx_skbuff[i] = NULL;
1045 			break;
1046 		}
1047 		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1048 	}
1049 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1050 
1051 	for (i = 0; i < TX_RING_SIZE; i++) {
1052 		np->tx_skbuff[i] = NULL;
1053 		np->tx_ring[i].status = 0;
1054 	}
1055 }
1056 
1057 static void tx_poll (unsigned long data)
1058 {
1059 	struct net_device *dev = (struct net_device *)data;
1060 	struct netdev_private *np = netdev_priv(dev);
1061 	unsigned head = np->cur_task % TX_RING_SIZE;
1062 	struct netdev_desc *txdesc =
1063 		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1064 
1065 	/* Chain the next pointer */
1066 	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1067 		int entry = np->cur_task % TX_RING_SIZE;
1068 		txdesc = &np->tx_ring[entry];
1069 		if (np->last_tx) {
1070 			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1071 				entry*sizeof(struct netdev_desc));
1072 		}
1073 		np->last_tx = txdesc;
1074 	}
1075 	/* Indicate the latest descriptor of tx ring */
1076 	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1077 
1078 	if (ioread32 (np->base + TxListPtr) == 0)
1079 		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1080 			np->base + TxListPtr);
1081 }
1082 
1083 static netdev_tx_t
1084 start_tx (struct sk_buff *skb, struct net_device *dev)
1085 {
1086 	struct netdev_private *np = netdev_priv(dev);
1087 	struct netdev_desc *txdesc;
1088 	unsigned entry;
1089 
1090 	/* Calculate the next Tx descriptor entry. */
1091 	entry = np->cur_tx % TX_RING_SIZE;
1092 	np->tx_skbuff[entry] = skb;
1093 	txdesc = &np->tx_ring[entry];
1094 
1095 	txdesc->next_desc = 0;
1096 	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1097 	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1098 				skb->data, skb->len, DMA_TO_DEVICE));
1099 	if (dma_mapping_error(&np->pci_dev->dev,
1100 				txdesc->frag[0].addr))
1101 			goto drop_frame;
1102 	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1103 
1104 	/* Increment cur_tx before tasklet_schedule() */
1105 	np->cur_tx++;
1106 	mb();
1107 	/* Schedule a tx_poll() task */
1108 	tasklet_schedule(&np->tx_tasklet);
1109 
1110 	/* On some architectures: explicitly flush cache lines here. */
1111 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1112 	    !netif_queue_stopped(dev)) {
1113 		/* do nothing */
1114 	} else {
1115 		netif_stop_queue (dev);
1116 	}
1117 	if (netif_msg_tx_queued(np)) {
1118 		printk (KERN_DEBUG
1119 			"%s: Transmit frame #%d queued in slot %d.\n",
1120 			dev->name, np->cur_tx, entry);
1121 	}
1122 	return NETDEV_TX_OK;
1123 
1124 drop_frame:
1125 	dev_kfree_skb_any(skb);
1126 	np->tx_skbuff[entry] = NULL;
1127 	dev->stats.tx_dropped++;
1128 	return NETDEV_TX_OK;
1129 }
1130 
1131 /* Reset hardware tx and free all of tx buffers */
1132 static int
1133 reset_tx (struct net_device *dev)
1134 {
1135 	struct netdev_private *np = netdev_priv(dev);
1136 	void __iomem *ioaddr = np->base;
1137 	struct sk_buff *skb;
1138 	int i;
1139 
1140 	/* Reset tx logic, TxListPtr will be cleaned */
1141 	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1142 	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1143 
1144 	/* free all tx skbuff */
1145 	for (i = 0; i < TX_RING_SIZE; i++) {
1146 		np->tx_ring[i].next_desc = 0;
1147 
1148 		skb = np->tx_skbuff[i];
1149 		if (skb) {
1150 			dma_unmap_single(&np->pci_dev->dev,
1151 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1152 				skb->len, DMA_TO_DEVICE);
1153 			dev_kfree_skb_any(skb);
1154 			np->tx_skbuff[i] = NULL;
1155 			dev->stats.tx_dropped++;
1156 		}
1157 	}
1158 	np->cur_tx = np->dirty_tx = 0;
1159 	np->cur_task = 0;
1160 
1161 	np->last_tx = NULL;
1162 	iowrite8(127, ioaddr + TxDMAPollPeriod);
1163 
1164 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1165 	return 0;
1166 }
1167 
1168 /* The interrupt handler cleans up after the Tx thread,
1169    and schedule a Rx thread work */
1170 static irqreturn_t intr_handler(int irq, void *dev_instance)
1171 {
1172 	struct net_device *dev = (struct net_device *)dev_instance;
1173 	struct netdev_private *np = netdev_priv(dev);
1174 	void __iomem *ioaddr = np->base;
1175 	int hw_frame_id;
1176 	int tx_cnt;
1177 	int tx_status;
1178 	int handled = 0;
1179 	int i;
1180 
1181 	do {
1182 		int intr_status = ioread16(ioaddr + IntrStatus);
1183 		iowrite16(intr_status, ioaddr + IntrStatus);
1184 
1185 		if (netif_msg_intr(np))
1186 			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1187 				   dev->name, intr_status);
1188 
1189 		if (!(intr_status & DEFAULT_INTR))
1190 			break;
1191 
1192 		handled = 1;
1193 
1194 		if (intr_status & (IntrRxDMADone)) {
1195 			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1196 					ioaddr + IntrEnable);
1197 			if (np->budget < 0)
1198 				np->budget = RX_BUDGET;
1199 			tasklet_schedule(&np->rx_tasklet);
1200 		}
1201 		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1202 			tx_status = ioread16 (ioaddr + TxStatus);
1203 			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1204 				if (netif_msg_tx_done(np))
1205 					printk
1206 					    ("%s: Transmit status is %2.2x.\n",
1207 				     	dev->name, tx_status);
1208 				if (tx_status & 0x1e) {
1209 					if (netif_msg_tx_err(np))
1210 						printk("%s: Transmit error status %4.4x.\n",
1211 							   dev->name, tx_status);
1212 					dev->stats.tx_errors++;
1213 					if (tx_status & 0x10)
1214 						dev->stats.tx_fifo_errors++;
1215 					if (tx_status & 0x08)
1216 						dev->stats.collisions++;
1217 					if (tx_status & 0x04)
1218 						dev->stats.tx_fifo_errors++;
1219 					if (tx_status & 0x02)
1220 						dev->stats.tx_window_errors++;
1221 
1222 					/*
1223 					** This reset has been verified on
1224 					** DFE-580TX boards ! phdm@macqel.be.
1225 					*/
1226 					if (tx_status & 0x10) {	/* TxUnderrun */
1227 						/* Restart Tx FIFO and transmitter */
1228 						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1229 						/* No need to reset the Tx pointer here */
1230 					}
1231 					/* Restart the Tx. Need to make sure tx enabled */
1232 					i = 10;
1233 					do {
1234 						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1235 						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1236 							break;
1237 						mdelay(1);
1238 					} while (--i);
1239 				}
1240 				/* Yup, this is a documentation bug.  It cost me *hours*. */
1241 				iowrite16 (0, ioaddr + TxStatus);
1242 				if (tx_cnt < 0) {
1243 					iowrite32(5000, ioaddr + DownCounter);
1244 					break;
1245 				}
1246 				tx_status = ioread16 (ioaddr + TxStatus);
1247 			}
1248 			hw_frame_id = (tx_status >> 8) & 0xff;
1249 		} else 	{
1250 			hw_frame_id = ioread8(ioaddr + TxFrameId);
1251 		}
1252 
1253 		if (np->pci_dev->revision >= 0x14) {
1254 			spin_lock(&np->lock);
1255 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1256 				int entry = np->dirty_tx % TX_RING_SIZE;
1257 				struct sk_buff *skb;
1258 				int sw_frame_id;
1259 				sw_frame_id = (le32_to_cpu(
1260 					np->tx_ring[entry].status) >> 2) & 0xff;
1261 				if (sw_frame_id == hw_frame_id &&
1262 					!(le32_to_cpu(np->tx_ring[entry].status)
1263 					& 0x00010000))
1264 						break;
1265 				if (sw_frame_id == (hw_frame_id + 1) %
1266 					TX_RING_SIZE)
1267 						break;
1268 				skb = np->tx_skbuff[entry];
1269 				/* Free the original skb. */
1270 				dma_unmap_single(&np->pci_dev->dev,
1271 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1272 					skb->len, DMA_TO_DEVICE);
1273 				dev_consume_skb_irq(np->tx_skbuff[entry]);
1274 				np->tx_skbuff[entry] = NULL;
1275 				np->tx_ring[entry].frag[0].addr = 0;
1276 				np->tx_ring[entry].frag[0].length = 0;
1277 			}
1278 			spin_unlock(&np->lock);
1279 		} else {
1280 			spin_lock(&np->lock);
1281 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1282 				int entry = np->dirty_tx % TX_RING_SIZE;
1283 				struct sk_buff *skb;
1284 				if (!(le32_to_cpu(np->tx_ring[entry].status)
1285 							& 0x00010000))
1286 					break;
1287 				skb = np->tx_skbuff[entry];
1288 				/* Free the original skb. */
1289 				dma_unmap_single(&np->pci_dev->dev,
1290 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1291 					skb->len, DMA_TO_DEVICE);
1292 				dev_consume_skb_irq(np->tx_skbuff[entry]);
1293 				np->tx_skbuff[entry] = NULL;
1294 				np->tx_ring[entry].frag[0].addr = 0;
1295 				np->tx_ring[entry].frag[0].length = 0;
1296 			}
1297 			spin_unlock(&np->lock);
1298 		}
1299 
1300 		if (netif_queue_stopped(dev) &&
1301 			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1302 			/* The ring is no longer full, clear busy flag. */
1303 			netif_wake_queue (dev);
1304 		}
1305 		/* Abnormal error summary/uncommon events handlers. */
1306 		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1307 			netdev_error(dev, intr_status);
1308 	} while (0);
1309 	if (netif_msg_intr(np))
1310 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1311 			   dev->name, ioread16(ioaddr + IntrStatus));
1312 	return IRQ_RETVAL(handled);
1313 }
1314 
1315 static void rx_poll(unsigned long data)
1316 {
1317 	struct net_device *dev = (struct net_device *)data;
1318 	struct netdev_private *np = netdev_priv(dev);
1319 	int entry = np->cur_rx % RX_RING_SIZE;
1320 	int boguscnt = np->budget;
1321 	void __iomem *ioaddr = np->base;
1322 	int received = 0;
1323 
1324 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1325 	while (1) {
1326 		struct netdev_desc *desc = &(np->rx_ring[entry]);
1327 		u32 frame_status = le32_to_cpu(desc->status);
1328 		int pkt_len;
1329 
1330 		if (--boguscnt < 0) {
1331 			goto not_done;
1332 		}
1333 		if (!(frame_status & DescOwn))
1334 			break;
1335 		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1336 		if (netif_msg_rx_status(np))
1337 			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1338 				   frame_status);
1339 		if (frame_status & 0x001f4000) {
1340 			/* There was a error. */
1341 			if (netif_msg_rx_err(np))
1342 				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1343 					   frame_status);
1344 			dev->stats.rx_errors++;
1345 			if (frame_status & 0x00100000)
1346 				dev->stats.rx_length_errors++;
1347 			if (frame_status & 0x00010000)
1348 				dev->stats.rx_fifo_errors++;
1349 			if (frame_status & 0x00060000)
1350 				dev->stats.rx_frame_errors++;
1351 			if (frame_status & 0x00080000)
1352 				dev->stats.rx_crc_errors++;
1353 			if (frame_status & 0x00100000) {
1354 				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1355 					   " status %8.8x.\n",
1356 					   dev->name, frame_status);
1357 			}
1358 		} else {
1359 			struct sk_buff *skb;
1360 #ifndef final_version
1361 			if (netif_msg_rx_status(np))
1362 				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1363 					   ", bogus_cnt %d.\n",
1364 					   pkt_len, boguscnt);
1365 #endif
1366 			/* Check if the packet is long enough to accept without copying
1367 			   to a minimally-sized skbuff. */
1368 			if (pkt_len < rx_copybreak &&
1369 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1370 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1371 				dma_sync_single_for_cpu(&np->pci_dev->dev,
1372 						le32_to_cpu(desc->frag[0].addr),
1373 						np->rx_buf_sz, DMA_FROM_DEVICE);
1374 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1375 				dma_sync_single_for_device(&np->pci_dev->dev,
1376 						le32_to_cpu(desc->frag[0].addr),
1377 						np->rx_buf_sz, DMA_FROM_DEVICE);
1378 				skb_put(skb, pkt_len);
1379 			} else {
1380 				dma_unmap_single(&np->pci_dev->dev,
1381 					le32_to_cpu(desc->frag[0].addr),
1382 					np->rx_buf_sz, DMA_FROM_DEVICE);
1383 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1384 				np->rx_skbuff[entry] = NULL;
1385 			}
1386 			skb->protocol = eth_type_trans(skb, dev);
1387 			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1388 			netif_rx(skb);
1389 		}
1390 		entry = (entry + 1) % RX_RING_SIZE;
1391 		received++;
1392 	}
1393 	np->cur_rx = entry;
1394 	refill_rx (dev);
1395 	np->budget -= received;
1396 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1397 	return;
1398 
1399 not_done:
1400 	np->cur_rx = entry;
1401 	refill_rx (dev);
1402 	if (!received)
1403 		received = 1;
1404 	np->budget -= received;
1405 	if (np->budget <= 0)
1406 		np->budget = RX_BUDGET;
1407 	tasklet_schedule(&np->rx_tasklet);
1408 }
1409 
1410 static void refill_rx (struct net_device *dev)
1411 {
1412 	struct netdev_private *np = netdev_priv(dev);
1413 	int entry;
1414 	int cnt = 0;
1415 
1416 	/* Refill the Rx ring buffers. */
1417 	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1418 		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1419 		struct sk_buff *skb;
1420 		entry = np->dirty_rx % RX_RING_SIZE;
1421 		if (np->rx_skbuff[entry] == NULL) {
1422 			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1423 			np->rx_skbuff[entry] = skb;
1424 			if (skb == NULL)
1425 				break;		/* Better luck next round. */
1426 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1427 			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1428 				dma_map_single(&np->pci_dev->dev, skb->data,
1429 					np->rx_buf_sz, DMA_FROM_DEVICE));
1430 			if (dma_mapping_error(&np->pci_dev->dev,
1431 				    np->rx_ring[entry].frag[0].addr)) {
1432 			    dev_kfree_skb_irq(skb);
1433 			    np->rx_skbuff[entry] = NULL;
1434 			    break;
1435 			}
1436 		}
1437 		/* Perhaps we need not reset this field. */
1438 		np->rx_ring[entry].frag[0].length =
1439 			cpu_to_le32(np->rx_buf_sz | LastFrag);
1440 		np->rx_ring[entry].status = 0;
1441 		cnt++;
1442 	}
1443 }
1444 static void netdev_error(struct net_device *dev, int intr_status)
1445 {
1446 	struct netdev_private *np = netdev_priv(dev);
1447 	void __iomem *ioaddr = np->base;
1448 	u16 mii_ctl, mii_advertise, mii_lpa;
1449 	int speed;
1450 
1451 	if (intr_status & LinkChange) {
1452 		if (mdio_wait_link(dev, 10) == 0) {
1453 			printk(KERN_INFO "%s: Link up\n", dev->name);
1454 			if (np->an_enable) {
1455 				mii_advertise = mdio_read(dev, np->phys[0],
1456 							   MII_ADVERTISE);
1457 				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1458 				mii_advertise &= mii_lpa;
1459 				printk(KERN_INFO "%s: Link changed: ",
1460 					dev->name);
1461 				if (mii_advertise & ADVERTISE_100FULL) {
1462 					np->speed = 100;
1463 					printk("100Mbps, full duplex\n");
1464 				} else if (mii_advertise & ADVERTISE_100HALF) {
1465 					np->speed = 100;
1466 					printk("100Mbps, half duplex\n");
1467 				} else if (mii_advertise & ADVERTISE_10FULL) {
1468 					np->speed = 10;
1469 					printk("10Mbps, full duplex\n");
1470 				} else if (mii_advertise & ADVERTISE_10HALF) {
1471 					np->speed = 10;
1472 					printk("10Mbps, half duplex\n");
1473 				} else
1474 					printk("\n");
1475 
1476 			} else {
1477 				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1478 				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1479 				np->speed = speed;
1480 				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1481 					dev->name, speed);
1482 				printk("%s duplex.\n",
1483 					(mii_ctl & BMCR_FULLDPLX) ?
1484 						"full" : "half");
1485 			}
1486 			check_duplex(dev);
1487 			if (np->flowctrl && np->mii_if.full_duplex) {
1488 				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1489 					ioaddr + MulticastFilter1+2);
1490 				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1491 					ioaddr + MACCtrl0);
1492 			}
1493 			netif_carrier_on(dev);
1494 		} else {
1495 			printk(KERN_INFO "%s: Link down\n", dev->name);
1496 			netif_carrier_off(dev);
1497 		}
1498 	}
1499 	if (intr_status & StatsMax) {
1500 		get_stats(dev);
1501 	}
1502 	if (intr_status & IntrPCIErr) {
1503 		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1504 			   dev->name, intr_status);
1505 		/* We must do a global reset of DMA to continue. */
1506 	}
1507 }
1508 
1509 static struct net_device_stats *get_stats(struct net_device *dev)
1510 {
1511 	struct netdev_private *np = netdev_priv(dev);
1512 	void __iomem *ioaddr = np->base;
1513 	unsigned long flags;
1514 	u8 late_coll, single_coll, mult_coll;
1515 
1516 	spin_lock_irqsave(&np->statlock, flags);
1517 	/* The chip only need report frame silently dropped. */
1518 	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1519 	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1520 	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1521 	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1522 
1523 	mult_coll = ioread8(ioaddr + StatsMultiColl);
1524 	np->xstats.tx_multiple_collisions += mult_coll;
1525 	single_coll = ioread8(ioaddr + StatsOneColl);
1526 	np->xstats.tx_single_collisions += single_coll;
1527 	late_coll = ioread8(ioaddr + StatsLateColl);
1528 	np->xstats.tx_late_collisions += late_coll;
1529 	dev->stats.collisions += mult_coll
1530 		+ single_coll
1531 		+ late_coll;
1532 
1533 	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1534 	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1535 	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1536 	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1537 	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1538 	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1539 	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1540 
1541 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1542 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1543 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1544 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1545 
1546 	spin_unlock_irqrestore(&np->statlock, flags);
1547 
1548 	return &dev->stats;
1549 }
1550 
1551 static void set_rx_mode(struct net_device *dev)
1552 {
1553 	struct netdev_private *np = netdev_priv(dev);
1554 	void __iomem *ioaddr = np->base;
1555 	u16 mc_filter[4];			/* Multicast hash filter */
1556 	u32 rx_mode;
1557 	int i;
1558 
1559 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1560 		memset(mc_filter, 0xff, sizeof(mc_filter));
1561 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1562 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1563 		   (dev->flags & IFF_ALLMULTI)) {
1564 		/* Too many to match, or accept all multicasts. */
1565 		memset(mc_filter, 0xff, sizeof(mc_filter));
1566 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1567 	} else if (!netdev_mc_empty(dev)) {
1568 		struct netdev_hw_addr *ha;
1569 		int bit;
1570 		int index;
1571 		int crc;
1572 		memset (mc_filter, 0, sizeof (mc_filter));
1573 		netdev_for_each_mc_addr(ha, dev) {
1574 			crc = ether_crc_le(ETH_ALEN, ha->addr);
1575 			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1576 				if (crc & 0x80000000) index |= 1 << bit;
1577 			mc_filter[index/16] |= (1 << (index % 16));
1578 		}
1579 		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1580 	} else {
1581 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1582 		return;
1583 	}
1584 	if (np->mii_if.full_duplex && np->flowctrl)
1585 		mc_filter[3] |= 0x0200;
1586 
1587 	for (i = 0; i < 4; i++)
1588 		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1589 	iowrite8(rx_mode, ioaddr + RxMode);
1590 }
1591 
1592 static int __set_mac_addr(struct net_device *dev)
1593 {
1594 	struct netdev_private *np = netdev_priv(dev);
1595 	u16 addr16;
1596 
1597 	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1598 	iowrite16(addr16, np->base + StationAddr);
1599 	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1600 	iowrite16(addr16, np->base + StationAddr+2);
1601 	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1602 	iowrite16(addr16, np->base + StationAddr+4);
1603 	return 0;
1604 }
1605 
1606 /* Invoked with rtnl_lock held */
1607 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1608 {
1609 	const struct sockaddr *addr = data;
1610 
1611 	if (!is_valid_ether_addr(addr->sa_data))
1612 		return -EADDRNOTAVAIL;
1613 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1614 	__set_mac_addr(dev);
1615 
1616 	return 0;
1617 }
1618 
1619 static const struct {
1620 	const char name[ETH_GSTRING_LEN];
1621 } sundance_stats[] = {
1622 	{ "tx_multiple_collisions" },
1623 	{ "tx_single_collisions" },
1624 	{ "tx_late_collisions" },
1625 	{ "tx_deferred" },
1626 	{ "tx_deferred_excessive" },
1627 	{ "tx_aborted" },
1628 	{ "tx_bcasts" },
1629 	{ "rx_bcasts" },
1630 	{ "tx_mcasts" },
1631 	{ "rx_mcasts" },
1632 };
1633 
1634 static int check_if_running(struct net_device *dev)
1635 {
1636 	if (!netif_running(dev))
1637 		return -EINVAL;
1638 	return 0;
1639 }
1640 
1641 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1642 {
1643 	struct netdev_private *np = netdev_priv(dev);
1644 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1645 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1646 }
1647 
1648 static int get_link_ksettings(struct net_device *dev,
1649 			      struct ethtool_link_ksettings *cmd)
1650 {
1651 	struct netdev_private *np = netdev_priv(dev);
1652 	spin_lock_irq(&np->lock);
1653 	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1654 	spin_unlock_irq(&np->lock);
1655 	return 0;
1656 }
1657 
1658 static int set_link_ksettings(struct net_device *dev,
1659 			      const struct ethtool_link_ksettings *cmd)
1660 {
1661 	struct netdev_private *np = netdev_priv(dev);
1662 	int res;
1663 	spin_lock_irq(&np->lock);
1664 	res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1665 	spin_unlock_irq(&np->lock);
1666 	return res;
1667 }
1668 
1669 static int nway_reset(struct net_device *dev)
1670 {
1671 	struct netdev_private *np = netdev_priv(dev);
1672 	return mii_nway_restart(&np->mii_if);
1673 }
1674 
1675 static u32 get_link(struct net_device *dev)
1676 {
1677 	struct netdev_private *np = netdev_priv(dev);
1678 	return mii_link_ok(&np->mii_if);
1679 }
1680 
1681 static u32 get_msglevel(struct net_device *dev)
1682 {
1683 	struct netdev_private *np = netdev_priv(dev);
1684 	return np->msg_enable;
1685 }
1686 
1687 static void set_msglevel(struct net_device *dev, u32 val)
1688 {
1689 	struct netdev_private *np = netdev_priv(dev);
1690 	np->msg_enable = val;
1691 }
1692 
1693 static void get_strings(struct net_device *dev, u32 stringset,
1694 		u8 *data)
1695 {
1696 	if (stringset == ETH_SS_STATS)
1697 		memcpy(data, sundance_stats, sizeof(sundance_stats));
1698 }
1699 
1700 static int get_sset_count(struct net_device *dev, int sset)
1701 {
1702 	switch (sset) {
1703 	case ETH_SS_STATS:
1704 		return ARRAY_SIZE(sundance_stats);
1705 	default:
1706 		return -EOPNOTSUPP;
1707 	}
1708 }
1709 
1710 static void get_ethtool_stats(struct net_device *dev,
1711 		struct ethtool_stats *stats, u64 *data)
1712 {
1713 	struct netdev_private *np = netdev_priv(dev);
1714 	int i = 0;
1715 
1716 	get_stats(dev);
1717 	data[i++] = np->xstats.tx_multiple_collisions;
1718 	data[i++] = np->xstats.tx_single_collisions;
1719 	data[i++] = np->xstats.tx_late_collisions;
1720 	data[i++] = np->xstats.tx_deferred;
1721 	data[i++] = np->xstats.tx_deferred_excessive;
1722 	data[i++] = np->xstats.tx_aborted;
1723 	data[i++] = np->xstats.tx_bcasts;
1724 	data[i++] = np->xstats.rx_bcasts;
1725 	data[i++] = np->xstats.tx_mcasts;
1726 	data[i++] = np->xstats.rx_mcasts;
1727 }
1728 
1729 #ifdef CONFIG_PM
1730 
1731 static void sundance_get_wol(struct net_device *dev,
1732 		struct ethtool_wolinfo *wol)
1733 {
1734 	struct netdev_private *np = netdev_priv(dev);
1735 	void __iomem *ioaddr = np->base;
1736 	u8 wol_bits;
1737 
1738 	wol->wolopts = 0;
1739 
1740 	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1741 	if (!np->wol_enabled)
1742 		return;
1743 
1744 	wol_bits = ioread8(ioaddr + WakeEvent);
1745 	if (wol_bits & MagicPktEnable)
1746 		wol->wolopts |= WAKE_MAGIC;
1747 	if (wol_bits & LinkEventEnable)
1748 		wol->wolopts |= WAKE_PHY;
1749 }
1750 
1751 static int sundance_set_wol(struct net_device *dev,
1752 	struct ethtool_wolinfo *wol)
1753 {
1754 	struct netdev_private *np = netdev_priv(dev);
1755 	void __iomem *ioaddr = np->base;
1756 	u8 wol_bits;
1757 
1758 	if (!device_can_wakeup(&np->pci_dev->dev))
1759 		return -EOPNOTSUPP;
1760 
1761 	np->wol_enabled = !!(wol->wolopts);
1762 	wol_bits = ioread8(ioaddr + WakeEvent);
1763 	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1764 			LinkEventEnable | WolEnable);
1765 
1766 	if (np->wol_enabled) {
1767 		if (wol->wolopts & WAKE_MAGIC)
1768 			wol_bits |= (MagicPktEnable | WolEnable);
1769 		if (wol->wolopts & WAKE_PHY)
1770 			wol_bits |= (LinkEventEnable | WolEnable);
1771 	}
1772 	iowrite8(wol_bits, ioaddr + WakeEvent);
1773 
1774 	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1775 
1776 	return 0;
1777 }
1778 #else
1779 #define sundance_get_wol NULL
1780 #define sundance_set_wol NULL
1781 #endif /* CONFIG_PM */
1782 
1783 static const struct ethtool_ops ethtool_ops = {
1784 	.begin = check_if_running,
1785 	.get_drvinfo = get_drvinfo,
1786 	.nway_reset = nway_reset,
1787 	.get_link = get_link,
1788 	.get_wol = sundance_get_wol,
1789 	.set_wol = sundance_set_wol,
1790 	.get_msglevel = get_msglevel,
1791 	.set_msglevel = set_msglevel,
1792 	.get_strings = get_strings,
1793 	.get_sset_count = get_sset_count,
1794 	.get_ethtool_stats = get_ethtool_stats,
1795 	.get_link_ksettings = get_link_ksettings,
1796 	.set_link_ksettings = set_link_ksettings,
1797 };
1798 
1799 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1800 {
1801 	struct netdev_private *np = netdev_priv(dev);
1802 	int rc;
1803 
1804 	if (!netif_running(dev))
1805 		return -EINVAL;
1806 
1807 	spin_lock_irq(&np->lock);
1808 	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1809 	spin_unlock_irq(&np->lock);
1810 
1811 	return rc;
1812 }
1813 
1814 static int netdev_close(struct net_device *dev)
1815 {
1816 	struct netdev_private *np = netdev_priv(dev);
1817 	void __iomem *ioaddr = np->base;
1818 	struct sk_buff *skb;
1819 	int i;
1820 
1821 	/* Wait and kill tasklet */
1822 	tasklet_kill(&np->rx_tasklet);
1823 	tasklet_kill(&np->tx_tasklet);
1824 	np->cur_tx = 0;
1825 	np->dirty_tx = 0;
1826 	np->cur_task = 0;
1827 	np->last_tx = NULL;
1828 
1829 	netif_stop_queue(dev);
1830 
1831 	if (netif_msg_ifdown(np)) {
1832 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1833 			   "Rx %4.4x Int %2.2x.\n",
1834 			   dev->name, ioread8(ioaddr + TxStatus),
1835 			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1836 		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1837 			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1838 	}
1839 
1840 	/* Disable interrupts by clearing the interrupt mask. */
1841 	iowrite16(0x0000, ioaddr + IntrEnable);
1842 
1843 	/* Disable Rx and Tx DMA for safely release resource */
1844 	iowrite32(0x500, ioaddr + DMACtrl);
1845 
1846 	/* Stop the chip's Tx and Rx processes. */
1847 	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1848 
1849     	for (i = 2000; i > 0; i--) {
1850  		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1851 			break;
1852 		mdelay(1);
1853     	}
1854 
1855     	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1856 			ioaddr + ASIC_HI_WORD(ASICCtrl));
1857 
1858     	for (i = 2000; i > 0; i--) {
1859 		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1860 			break;
1861 		mdelay(1);
1862     	}
1863 
1864 #ifdef __i386__
1865 	if (netif_msg_hw(np)) {
1866 		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1867 			   (int)(np->tx_ring_dma));
1868 		for (i = 0; i < TX_RING_SIZE; i++)
1869 			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1870 				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1871 				   np->tx_ring[i].frag[0].length);
1872 		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1873 			   (int)(np->rx_ring_dma));
1874 		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1875 			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1876 				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1877 				   np->rx_ring[i].frag[0].length);
1878 		}
1879 	}
1880 #endif /* __i386__ debugging only */
1881 
1882 	free_irq(np->pci_dev->irq, dev);
1883 
1884 	del_timer_sync(&np->timer);
1885 
1886 	/* Free all the skbuffs in the Rx queue. */
1887 	for (i = 0; i < RX_RING_SIZE; i++) {
1888 		np->rx_ring[i].status = 0;
1889 		skb = np->rx_skbuff[i];
1890 		if (skb) {
1891 			dma_unmap_single(&np->pci_dev->dev,
1892 				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1893 				np->rx_buf_sz, DMA_FROM_DEVICE);
1894 			dev_kfree_skb(skb);
1895 			np->rx_skbuff[i] = NULL;
1896 		}
1897 		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1898 	}
1899 	for (i = 0; i < TX_RING_SIZE; i++) {
1900 		np->tx_ring[i].next_desc = 0;
1901 		skb = np->tx_skbuff[i];
1902 		if (skb) {
1903 			dma_unmap_single(&np->pci_dev->dev,
1904 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1905 				skb->len, DMA_TO_DEVICE);
1906 			dev_kfree_skb(skb);
1907 			np->tx_skbuff[i] = NULL;
1908 		}
1909 	}
1910 
1911 	return 0;
1912 }
1913 
1914 static void sundance_remove1(struct pci_dev *pdev)
1915 {
1916 	struct net_device *dev = pci_get_drvdata(pdev);
1917 
1918 	if (dev) {
1919 	    struct netdev_private *np = netdev_priv(dev);
1920 	    unregister_netdev(dev);
1921 	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1922 		    np->rx_ring, np->rx_ring_dma);
1923 	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1924 		    np->tx_ring, np->tx_ring_dma);
1925 	    pci_iounmap(pdev, np->base);
1926 	    pci_release_regions(pdev);
1927 	    free_netdev(dev);
1928 	}
1929 }
1930 
1931 #ifdef CONFIG_PM
1932 
1933 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1934 {
1935 	struct net_device *dev = pci_get_drvdata(pci_dev);
1936 	struct netdev_private *np = netdev_priv(dev);
1937 	void __iomem *ioaddr = np->base;
1938 
1939 	if (!netif_running(dev))
1940 		return 0;
1941 
1942 	netdev_close(dev);
1943 	netif_device_detach(dev);
1944 
1945 	pci_save_state(pci_dev);
1946 	if (np->wol_enabled) {
1947 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1948 		iowrite16(RxEnable, ioaddr + MACCtrl1);
1949 	}
1950 	pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1951 			np->wol_enabled);
1952 	pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1953 
1954 	return 0;
1955 }
1956 
1957 static int sundance_resume(struct pci_dev *pci_dev)
1958 {
1959 	struct net_device *dev = pci_get_drvdata(pci_dev);
1960 	int err = 0;
1961 
1962 	if (!netif_running(dev))
1963 		return 0;
1964 
1965 	pci_set_power_state(pci_dev, PCI_D0);
1966 	pci_restore_state(pci_dev);
1967 	pci_enable_wake(pci_dev, PCI_D0, 0);
1968 
1969 	err = netdev_open(dev);
1970 	if (err) {
1971 		printk(KERN_ERR "%s: Can't resume interface!\n",
1972 				dev->name);
1973 		goto out;
1974 	}
1975 
1976 	netif_device_attach(dev);
1977 
1978 out:
1979 	return err;
1980 }
1981 
1982 #endif /* CONFIG_PM */
1983 
1984 static struct pci_driver sundance_driver = {
1985 	.name		= DRV_NAME,
1986 	.id_table	= sundance_pci_tbl,
1987 	.probe		= sundance_probe1,
1988 	.remove		= sundance_remove1,
1989 #ifdef CONFIG_PM
1990 	.suspend	= sundance_suspend,
1991 	.resume		= sundance_resume,
1992 #endif /* CONFIG_PM */
1993 };
1994 
1995 static int __init sundance_init(void)
1996 {
1997 	return pci_register_driver(&sundance_driver);
1998 }
1999 
2000 static void __exit sundance_exit(void)
2001 {
2002 	pci_unregister_driver(&sundance_driver);
2003 }
2004 
2005 module_init(sundance_init);
2006 module_exit(sundance_exit);
2007 
2008 
2009