1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2 /*
3 	Written/copyright 1997-2001 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14 
15 	The author may be reached as becker@scyld.com, or C/O
16 	Scyld Computing Corporation
17 	410 Severn Ave., Suite 210
18 	Annapolis MD 21403
19 
20 	Information and updates available at
21 	http://www.scyld.com/network/epic100.html
22 	[this link no longer provides anything useful -jgarzik]
23 
24 	---------------------------------------------------------------------
25 
26 */
27 
28 #define DRV_NAME        "epic100"
29 #define DRV_VERSION     "2.1"
30 #define DRV_RELDATE     "Sept 11, 2006"
31 
32 /* The user-configurable values.
33    These may be modified when a driver module is loaded.*/
34 
35 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36 
37 /* Used to pass the full-duplex flag, etc. */
38 #define MAX_UNITS 8		/* More are supported, limit only on options */
39 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41 
42 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43    Setting to > 1518 effectively disables this feature. */
44 static int rx_copybreak;
45 
46 /* Operational parameters that are set at compile time. */
47 
48 /* Keep the ring sizes a power of two for operational efficiency.
49    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50    Making the Tx ring too large decreases the effectiveness of channel
51    bonding and packet priority.
52    There are no ill effects from too-large receive rings. */
53 #define TX_RING_SIZE	256
54 #define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
55 #define RX_RING_SIZE	256
56 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
57 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
58 
59 /* Operational parameters that usually are not changed. */
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT  (2*HZ)
62 
63 #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
64 
65 /* Bytes transferred to chip before transmission starts. */
66 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67 #define TX_FIFO_THRESH 256
68 #define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
69 
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/interrupt.h>
77 #include <linux/pci.h>
78 #include <linux/delay.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/ethtool.h>
85 #include <linux/mii.h>
86 #include <linux/crc32.h>
87 #include <linux/bitops.h>
88 #include <asm/io.h>
89 #include <asm/uaccess.h>
90 #include <asm/byteorder.h>
91 
92 /* These identify the driver base version and may not be removed. */
93 static char version[] =
94 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95 static char version2[] =
96 "  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
97 
98 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100 MODULE_LICENSE("GPL");
101 
102 module_param(debug, int, 0);
103 module_param(rx_copybreak, int, 0);
104 module_param_array(options, int, NULL, 0);
105 module_param_array(full_duplex, int, NULL, 0);
106 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110 
111 /*
112 				Theory of Operation
113 
114 I. Board Compatibility
115 
116 This device driver is designed for the SMC "EPIC/100", the SMC
117 single-chip Ethernet controllers for PCI.  This chip is used on
118 the SMC EtherPower II boards.
119 
120 II. Board-specific settings
121 
122 PCI bus devices are configured by the system at boot time, so no jumpers
123 need to be set on the board.  The system BIOS will assign the
124 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126 interrupt lines.
127 
128 III. Driver operation
129 
130 IIIa. Ring buffers
131 
132 IVb. References
133 
134 http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
135 http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
136 http://scyld.com/expert/NWay.html
137 http://www.national.com/pf/DP/DP83840A.html
138 
139 IVc. Errata
140 
141 */
142 
143 
144 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145 
146 #define EPIC_TOTAL_SIZE 0x100
147 #define USE_IO_OPS 1
148 
149 #ifdef USE_IO_OPS
150 #define EPIC_BAR	0
151 #else
152 #define EPIC_BAR	1
153 #endif
154 
155 typedef enum {
156 	SMSC_83C170_0,
157 	SMSC_83C170,
158 	SMSC_83C175,
159 } chip_t;
160 
161 
162 struct epic_chip_info {
163 	const char *name;
164         int drv_flags;                          /* Driver use, intended as capability flags. */
165 };
166 
167 
168 /* indexed by chip_t */
169 static const struct epic_chip_info pci_id_tbl[] = {
170 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
171 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
172 	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
173 };
174 
175 
176 static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
177 	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
178 	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
179 	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
180 	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
181 	{ 0,}
182 };
183 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
184 
185 #define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
186 #define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
187 #define er8(reg)	ioread8(ioaddr + (reg))
188 #define er16(reg)	ioread16(ioaddr + (reg))
189 #define er32(reg)	ioread32(ioaddr + (reg))
190 
191 /* Offsets to registers, using the (ugh) SMC names. */
192 enum epic_registers {
193   COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
194   PCIBurstCnt=0x18,
195   TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
196   MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
197   LAN0=64,						/* MAC address. */
198   MC0=80,						/* Multicast filter table. */
199   RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
200   PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
201 };
202 
203 /* Interrupt register bits, using my own meaningful names. */
204 enum IntrStatus {
205 	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
206 	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
207 	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
208 	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
209 	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
210 };
211 enum CommandBits {
212 	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
213 	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
214 };
215 
216 #define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
217 
218 #define EpicNapiEvent	(TxEmpty | TxDone | \
219 			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
220 #define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
221 
222 static const u16 media2miictl[16] = {
223 	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
224 	0, 0, 0, 0,  0, 0, 0, 0 };
225 
226 /*
227  * The EPIC100 Rx and Tx buffer descriptors.  Note that these
228  * really ARE host-endian; it's not a misannotation.  We tell
229  * the card to byteswap them internally on big-endian hosts -
230  * look for #ifdef __BIG_ENDIAN in epic_open().
231  */
232 
233 struct epic_tx_desc {
234 	u32 txstatus;
235 	u32 bufaddr;
236 	u32 buflength;
237 	u32 next;
238 };
239 
240 struct epic_rx_desc {
241 	u32 rxstatus;
242 	u32 bufaddr;
243 	u32 buflength;
244 	u32 next;
245 };
246 
247 enum desc_status_bits {
248 	DescOwn=0x8000,
249 };
250 
251 #define PRIV_ALIGN	15 	/* Required alignment mask */
252 struct epic_private {
253 	struct epic_rx_desc *rx_ring;
254 	struct epic_tx_desc *tx_ring;
255 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
256 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
257 	/* The addresses of receive-in-place skbuffs. */
258 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
259 
260 	dma_addr_t tx_ring_dma;
261 	dma_addr_t rx_ring_dma;
262 
263 	/* Ring pointers. */
264 	spinlock_t lock;				/* Group with Tx control cache line. */
265 	spinlock_t napi_lock;
266 	struct napi_struct napi;
267 	unsigned int reschedule_in_poll;
268 	unsigned int cur_tx, dirty_tx;
269 
270 	unsigned int cur_rx, dirty_rx;
271 	u32 irq_mask;
272 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
273 
274 	void __iomem *ioaddr;
275 	struct pci_dev *pci_dev;			/* PCI bus location. */
276 	int chip_id, chip_flags;
277 
278 	struct timer_list timer;			/* Media selection timer. */
279 	int tx_threshold;
280 	unsigned char mc_filter[8];
281 	signed char phys[4];				/* MII device addresses. */
282 	u16 advertising;					/* NWay media advertisement */
283 	int mii_phy_cnt;
284 	struct mii_if_info mii;
285 	unsigned int tx_full:1;				/* The Tx queue is full. */
286 	unsigned int default_port:4;		/* Last dev->if_port value. */
287 };
288 
289 static int epic_open(struct net_device *dev);
290 static int read_eeprom(struct epic_private *, int);
291 static int mdio_read(struct net_device *dev, int phy_id, int location);
292 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
293 static void epic_restart(struct net_device *dev);
294 static void epic_timer(unsigned long data);
295 static void epic_tx_timeout(struct net_device *dev);
296 static void epic_init_ring(struct net_device *dev);
297 static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
298 				   struct net_device *dev);
299 static int epic_rx(struct net_device *dev, int budget);
300 static int epic_poll(struct napi_struct *napi, int budget);
301 static irqreturn_t epic_interrupt(int irq, void *dev_instance);
302 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
303 static const struct ethtool_ops netdev_ethtool_ops;
304 static int epic_close(struct net_device *dev);
305 static struct net_device_stats *epic_get_stats(struct net_device *dev);
306 static void set_rx_mode(struct net_device *dev);
307 
308 static const struct net_device_ops epic_netdev_ops = {
309 	.ndo_open		= epic_open,
310 	.ndo_stop		= epic_close,
311 	.ndo_start_xmit		= epic_start_xmit,
312 	.ndo_tx_timeout 	= epic_tx_timeout,
313 	.ndo_get_stats		= epic_get_stats,
314 	.ndo_set_rx_mode	= set_rx_mode,
315 	.ndo_do_ioctl 		= netdev_ioctl,
316 	.ndo_change_mtu		= eth_change_mtu,
317 	.ndo_set_mac_address 	= eth_mac_addr,
318 	.ndo_validate_addr	= eth_validate_addr,
319 };
320 
321 static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
322 {
323 	static int card_idx = -1;
324 	void __iomem *ioaddr;
325 	int chip_idx = (int) ent->driver_data;
326 	int irq;
327 	struct net_device *dev;
328 	struct epic_private *ep;
329 	int i, ret, option = 0, duplex = 0;
330 	void *ring_space;
331 	dma_addr_t ring_dma;
332 
333 /* when built into the kernel, we only print version if device is found */
334 #ifndef MODULE
335 	static int printed_version;
336 	if (!printed_version++)
337 		printk(KERN_INFO "%s%s", version, version2);
338 #endif
339 
340 	card_idx++;
341 
342 	ret = pci_enable_device(pdev);
343 	if (ret)
344 		goto out;
345 	irq = pdev->irq;
346 
347 	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
348 		dev_err(&pdev->dev, "no PCI region space\n");
349 		ret = -ENODEV;
350 		goto err_out_disable;
351 	}
352 
353 	pci_set_master(pdev);
354 
355 	ret = pci_request_regions(pdev, DRV_NAME);
356 	if (ret < 0)
357 		goto err_out_disable;
358 
359 	ret = -ENOMEM;
360 
361 	dev = alloc_etherdev(sizeof (*ep));
362 	if (!dev)
363 		goto err_out_free_res;
364 
365 	SET_NETDEV_DEV(dev, &pdev->dev);
366 
367 	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
368 	if (!ioaddr) {
369 		dev_err(&pdev->dev, "ioremap failed\n");
370 		goto err_out_free_netdev;
371 	}
372 
373 	pci_set_drvdata(pdev, dev);
374 	ep = netdev_priv(dev);
375 	ep->ioaddr = ioaddr;
376 	ep->mii.dev = dev;
377 	ep->mii.mdio_read = mdio_read;
378 	ep->mii.mdio_write = mdio_write;
379 	ep->mii.phy_id_mask = 0x1f;
380 	ep->mii.reg_num_mask = 0x1f;
381 
382 	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
383 	if (!ring_space)
384 		goto err_out_iounmap;
385 	ep->tx_ring = ring_space;
386 	ep->tx_ring_dma = ring_dma;
387 
388 	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
389 	if (!ring_space)
390 		goto err_out_unmap_tx;
391 	ep->rx_ring = ring_space;
392 	ep->rx_ring_dma = ring_dma;
393 
394 	if (dev->mem_start) {
395 		option = dev->mem_start;
396 		duplex = (dev->mem_start & 16) ? 1 : 0;
397 	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
398 		if (options[card_idx] >= 0)
399 			option = options[card_idx];
400 		if (full_duplex[card_idx] >= 0)
401 			duplex = full_duplex[card_idx];
402 	}
403 
404 	spin_lock_init(&ep->lock);
405 	spin_lock_init(&ep->napi_lock);
406 	ep->reschedule_in_poll = 0;
407 
408 	/* Bring the chip out of low-power mode. */
409 	ew32(GENCTL, 0x4200);
410 	/* Magic?!  If we don't set this bit the MII interface won't work. */
411 	/* This magic is documented in SMSC app note 7.15 */
412 	for (i = 16; i > 0; i--)
413 		ew32(TEST1, 0x0008);
414 
415 	/* Turn on the MII transceiver. */
416 	ew32(MIICfg, 0x12);
417 	if (chip_idx == 1)
418 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
419 	ew32(GENCTL, 0x0200);
420 
421 	/* Note: the '175 does not have a serial EEPROM. */
422 	for (i = 0; i < 3; i++)
423 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
424 
425 	if (debug > 2) {
426 		dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
427 		for (i = 0; i < 64; i++)
428 			printk(" %4.4x%s", read_eeprom(ep, i),
429 				   i % 16 == 15 ? "\n" : "");
430 	}
431 
432 	ep->pci_dev = pdev;
433 	ep->chip_id = chip_idx;
434 	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
435 	ep->irq_mask =
436 		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
437 		 | CntFull | TxUnderrun | EpicNapiEvent;
438 
439 	/* Find the connected MII xcvrs.
440 	   Doing this in open() would allow detecting external xcvrs later, but
441 	   takes much time and no cards have external MII. */
442 	{
443 		int phy, phy_idx = 0;
444 		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
445 			int mii_status = mdio_read(dev, phy, MII_BMSR);
446 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
447 				ep->phys[phy_idx++] = phy;
448 				dev_info(&pdev->dev,
449 					"MII transceiver #%d control "
450 					"%4.4x status %4.4x.\n",
451 					phy, mdio_read(dev, phy, 0), mii_status);
452 			}
453 		}
454 		ep->mii_phy_cnt = phy_idx;
455 		if (phy_idx != 0) {
456 			phy = ep->phys[0];
457 			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
458 			dev_info(&pdev->dev,
459 				"Autonegotiation advertising %4.4x link "
460 				   "partner %4.4x.\n",
461 				   ep->mii.advertising, mdio_read(dev, phy, 5));
462 		} else if ( ! (ep->chip_flags & NO_MII)) {
463 			dev_warn(&pdev->dev,
464 				"***WARNING***: No MII transceiver found!\n");
465 			/* Use the known PHY address of the EPII. */
466 			ep->phys[0] = 3;
467 		}
468 		ep->mii.phy_id = ep->phys[0];
469 	}
470 
471 	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
472 	if (ep->chip_flags & MII_PWRDWN)
473 		ew32(NVCTL, er32(NVCTL) & ~0x483c);
474 	ew32(GENCTL, 0x0008);
475 
476 	/* The lower four bits are the media type. */
477 	if (duplex) {
478 		ep->mii.force_media = ep->mii.full_duplex = 1;
479 		dev_info(&pdev->dev, "Forced full duplex requested.\n");
480 	}
481 	dev->if_port = ep->default_port = option;
482 
483 	/* The Epic-specific entries in the device structure. */
484 	dev->netdev_ops = &epic_netdev_ops;
485 	dev->ethtool_ops = &netdev_ethtool_ops;
486 	dev->watchdog_timeo = TX_TIMEOUT;
487 	netif_napi_add(dev, &ep->napi, epic_poll, 64);
488 
489 	ret = register_netdev(dev);
490 	if (ret < 0)
491 		goto err_out_unmap_rx;
492 
493 	printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
494 	       dev->name, pci_id_tbl[chip_idx].name,
495 	       (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
496 	       dev->dev_addr);
497 
498 out:
499 	return ret;
500 
501 err_out_unmap_rx:
502 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
503 err_out_unmap_tx:
504 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
505 err_out_iounmap:
506 	pci_iounmap(pdev, ioaddr);
507 err_out_free_netdev:
508 	free_netdev(dev);
509 err_out_free_res:
510 	pci_release_regions(pdev);
511 err_out_disable:
512 	pci_disable_device(pdev);
513 	goto out;
514 }
515 
516 /* Serial EEPROM section. */
517 
518 /*  EEPROM_Ctrl bits. */
519 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
520 #define EE_CS			0x02	/* EEPROM chip select. */
521 #define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
522 #define EE_WRITE_0		0x01
523 #define EE_WRITE_1		0x09
524 #define EE_DATA_READ	0x10	/* EEPROM chip data out. */
525 #define EE_ENB			(0x0001 | EE_CS)
526 
527 /* Delay between EEPROM clock transitions.
528    This serves to flush the operation to the PCI bus.
529  */
530 
531 #define eeprom_delay()	er32(EECTL)
532 
533 /* The EEPROM commands include the alway-set leading bit. */
534 #define EE_WRITE_CMD	(5 << 6)
535 #define EE_READ64_CMD	(6 << 6)
536 #define EE_READ256_CMD	(6 << 8)
537 #define EE_ERASE_CMD	(7 << 6)
538 
539 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
540 {
541 	void __iomem *ioaddr = ep->ioaddr;
542 
543 	ew32(INTMASK, 0x00000000);
544 }
545 
546 static inline void __epic_pci_commit(void __iomem *ioaddr)
547 {
548 #ifndef USE_IO_OPS
549 	er32(INTMASK);
550 #endif
551 }
552 
553 static inline void epic_napi_irq_off(struct net_device *dev,
554 				     struct epic_private *ep)
555 {
556 	void __iomem *ioaddr = ep->ioaddr;
557 
558 	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
559 	__epic_pci_commit(ioaddr);
560 }
561 
562 static inline void epic_napi_irq_on(struct net_device *dev,
563 				    struct epic_private *ep)
564 {
565 	void __iomem *ioaddr = ep->ioaddr;
566 
567 	/* No need to commit possible posted write */
568 	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
569 }
570 
571 static int read_eeprom(struct epic_private *ep, int location)
572 {
573 	void __iomem *ioaddr = ep->ioaddr;
574 	int i;
575 	int retval = 0;
576 	int read_cmd = location |
577 		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
578 
579 	ew32(EECTL, EE_ENB & ~EE_CS);
580 	ew32(EECTL, EE_ENB);
581 
582 	/* Shift the read command bits out. */
583 	for (i = 12; i >= 0; i--) {
584 		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
585 		ew32(EECTL, EE_ENB | dataval);
586 		eeprom_delay();
587 		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
588 		eeprom_delay();
589 	}
590 	ew32(EECTL, EE_ENB);
591 
592 	for (i = 16; i > 0; i--) {
593 		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
594 		eeprom_delay();
595 		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
596 		ew32(EECTL, EE_ENB);
597 		eeprom_delay();
598 	}
599 
600 	/* Terminate the EEPROM access. */
601 	ew32(EECTL, EE_ENB & ~EE_CS);
602 	return retval;
603 }
604 
605 #define MII_READOP		1
606 #define MII_WRITEOP		2
607 static int mdio_read(struct net_device *dev, int phy_id, int location)
608 {
609 	struct epic_private *ep = netdev_priv(dev);
610 	void __iomem *ioaddr = ep->ioaddr;
611 	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
612 	int i;
613 
614 	ew32(MIICtrl, read_cmd);
615 	/* Typical operation takes 25 loops. */
616 	for (i = 400; i > 0; i--) {
617 		barrier();
618 		if ((er32(MIICtrl) & MII_READOP) == 0) {
619 			/* Work around read failure bug. */
620 			if (phy_id == 1 && location < 6 &&
621 			    er16(MIIData) == 0xffff) {
622 				ew32(MIICtrl, read_cmd);
623 				continue;
624 			}
625 			return er16(MIIData);
626 		}
627 	}
628 	return 0xffff;
629 }
630 
631 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
632 {
633 	struct epic_private *ep = netdev_priv(dev);
634 	void __iomem *ioaddr = ep->ioaddr;
635 	int i;
636 
637 	ew16(MIIData, value);
638 	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
639 	for (i = 10000; i > 0; i--) {
640 		barrier();
641 		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
642 			break;
643 	}
644 }
645 
646 
647 static int epic_open(struct net_device *dev)
648 {
649 	struct epic_private *ep = netdev_priv(dev);
650 	void __iomem *ioaddr = ep->ioaddr;
651 	const int irq = ep->pci_dev->irq;
652 	int rc, i;
653 
654 	/* Soft reset the chip. */
655 	ew32(GENCTL, 0x4001);
656 
657 	napi_enable(&ep->napi);
658 	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
659 	if (rc) {
660 		napi_disable(&ep->napi);
661 		return rc;
662 	}
663 
664 	epic_init_ring(dev);
665 
666 	ew32(GENCTL, 0x4000);
667 	/* This magic is documented in SMSC app note 7.15 */
668 	for (i = 16; i > 0; i--)
669 		ew32(TEST1, 0x0008);
670 
671 	/* Pull the chip out of low-power mode, enable interrupts, and set for
672 	   PCI read multiple.  The MIIcfg setting and strange write order are
673 	   required by the details of which bits are reset and the transceiver
674 	   wiring on the Ositech CardBus card.
675 	*/
676 #if 0
677 	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
678 #endif
679 	if (ep->chip_flags & MII_PWRDWN)
680 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
681 
682 	/* Tell the chip to byteswap descriptors on big-endian hosts */
683 #ifdef __BIG_ENDIAN
684 	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
685 	er32(GENCTL);
686 	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
687 #else
688 	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
689 	er32(GENCTL);
690 	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
691 #endif
692 
693 	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
694 
695 	for (i = 0; i < 3; i++)
696 		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
697 
698 	ep->tx_threshold = TX_FIFO_THRESH;
699 	ew32(TxThresh, ep->tx_threshold);
700 
701 	if (media2miictl[dev->if_port & 15]) {
702 		if (ep->mii_phy_cnt)
703 			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
704 		if (dev->if_port == 1) {
705 			if (debug > 1)
706 				printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
707 					   "status %4.4x.\n",
708 					   dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
709 		}
710 	} else {
711 		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
712 		if (mii_lpa != 0xffff) {
713 			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
714 				ep->mii.full_duplex = 1;
715 			else if (! (mii_lpa & LPA_LPACK))
716 				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
717 			if (debug > 1)
718 				printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
719 					   " register read of %4.4x.\n", dev->name,
720 					   ep->mii.full_duplex ? "full" : "half",
721 					   ep->phys[0], mii_lpa);
722 		}
723 	}
724 
725 	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
726 	ew32(PRxCDAR, ep->rx_ring_dma);
727 	ew32(PTxCDAR, ep->tx_ring_dma);
728 
729 	/* Start the chip's Rx process. */
730 	set_rx_mode(dev);
731 	ew32(COMMAND, StartRx | RxQueued);
732 
733 	netif_start_queue(dev);
734 
735 	/* Enable interrupts by setting the interrupt mask. */
736 	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
737 	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
738 	     TxUnderrun);
739 
740 	if (debug > 1) {
741 		printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
742 		       "status %4.4x %s-duplex.\n",
743 		       dev->name, ioaddr, irq, er32(GENCTL),
744 		       ep->mii.full_duplex ? "full" : "half");
745 	}
746 
747 	/* Set the timer to switch to check for link beat and perhaps switch
748 	   to an alternate media type. */
749 	init_timer(&ep->timer);
750 	ep->timer.expires = jiffies + 3*HZ;
751 	ep->timer.data = (unsigned long)dev;
752 	ep->timer.function = epic_timer;				/* timer handler */
753 	add_timer(&ep->timer);
754 
755 	return rc;
756 }
757 
758 /* Reset the chip to recover from a PCI transaction error.
759    This may occur at interrupt time. */
760 static void epic_pause(struct net_device *dev)
761 {
762 	struct net_device_stats *stats = &dev->stats;
763 	struct epic_private *ep = netdev_priv(dev);
764 	void __iomem *ioaddr = ep->ioaddr;
765 
766 	netif_stop_queue (dev);
767 
768 	/* Disable interrupts by clearing the interrupt mask. */
769 	ew32(INTMASK, 0x00000000);
770 	/* Stop the chip's Tx and Rx DMA processes. */
771 	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
772 
773 	/* Update the error counts. */
774 	if (er16(COMMAND) != 0xffff) {
775 		stats->rx_missed_errors	+= er8(MPCNT);
776 		stats->rx_frame_errors	+= er8(ALICNT);
777 		stats->rx_crc_errors	+= er8(CRCCNT);
778 	}
779 
780 	/* Remove the packets on the Rx queue. */
781 	epic_rx(dev, RX_RING_SIZE);
782 }
783 
784 static void epic_restart(struct net_device *dev)
785 {
786 	struct epic_private *ep = netdev_priv(dev);
787 	void __iomem *ioaddr = ep->ioaddr;
788 	int i;
789 
790 	/* Soft reset the chip. */
791 	ew32(GENCTL, 0x4001);
792 
793 	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
794 		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
795 	udelay(1);
796 
797 	/* This magic is documented in SMSC app note 7.15 */
798 	for (i = 16; i > 0; i--)
799 		ew32(TEST1, 0x0008);
800 
801 #ifdef __BIG_ENDIAN
802 	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
803 #else
804 	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
805 #endif
806 	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
807 	if (ep->chip_flags & MII_PWRDWN)
808 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
809 
810 	for (i = 0; i < 3; i++)
811 		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
812 
813 	ep->tx_threshold = TX_FIFO_THRESH;
814 	ew32(TxThresh, ep->tx_threshold);
815 	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
816 	ew32(PRxCDAR, ep->rx_ring_dma +
817 	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
818 	ew32(PTxCDAR, ep->tx_ring_dma +
819 	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
820 
821 	/* Start the chip's Rx process. */
822 	set_rx_mode(dev);
823 	ew32(COMMAND, StartRx | RxQueued);
824 
825 	/* Enable interrupts by setting the interrupt mask. */
826 	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
827 	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
828 	     TxUnderrun);
829 
830 	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
831 		   " interrupt %4.4x.\n",
832 		   dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
833 }
834 
835 static void check_media(struct net_device *dev)
836 {
837 	struct epic_private *ep = netdev_priv(dev);
838 	void __iomem *ioaddr = ep->ioaddr;
839 	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
840 	int negotiated = mii_lpa & ep->mii.advertising;
841 	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
842 
843 	if (ep->mii.force_media)
844 		return;
845 	if (mii_lpa == 0xffff)		/* Bogus read */
846 		return;
847 	if (ep->mii.full_duplex != duplex) {
848 		ep->mii.full_duplex = duplex;
849 		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
850 			   " partner capability of %4.4x.\n", dev->name,
851 			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
852 		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
853 	}
854 }
855 
856 static void epic_timer(unsigned long data)
857 {
858 	struct net_device *dev = (struct net_device *)data;
859 	struct epic_private *ep = netdev_priv(dev);
860 	void __iomem *ioaddr = ep->ioaddr;
861 	int next_tick = 5*HZ;
862 
863 	if (debug > 3) {
864 		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
865 		       dev->name, er32(TxSTAT));
866 		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
867 		       "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
868 		       er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
869 	}
870 
871 	check_media(dev);
872 
873 	ep->timer.expires = jiffies + next_tick;
874 	add_timer(&ep->timer);
875 }
876 
877 static void epic_tx_timeout(struct net_device *dev)
878 {
879 	struct epic_private *ep = netdev_priv(dev);
880 	void __iomem *ioaddr = ep->ioaddr;
881 
882 	if (debug > 0) {
883 		printk(KERN_WARNING "%s: Transmit timeout using MII device, "
884 		       "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
885 		if (debug > 1) {
886 			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
887 				   dev->name, ep->dirty_tx, ep->cur_tx);
888 		}
889 	}
890 	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
891 		dev->stats.tx_fifo_errors++;
892 		ew32(COMMAND, RestartTx);
893 	} else {
894 		epic_restart(dev);
895 		ew32(COMMAND, TxQueued);
896 	}
897 
898 	dev->trans_start = jiffies; /* prevent tx timeout */
899 	dev->stats.tx_errors++;
900 	if (!ep->tx_full)
901 		netif_wake_queue(dev);
902 }
903 
904 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
905 static void epic_init_ring(struct net_device *dev)
906 {
907 	struct epic_private *ep = netdev_priv(dev);
908 	int i;
909 
910 	ep->tx_full = 0;
911 	ep->dirty_tx = ep->cur_tx = 0;
912 	ep->cur_rx = ep->dirty_rx = 0;
913 	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
914 
915 	/* Initialize all Rx descriptors. */
916 	for (i = 0; i < RX_RING_SIZE; i++) {
917 		ep->rx_ring[i].rxstatus = 0;
918 		ep->rx_ring[i].buflength = ep->rx_buf_sz;
919 		ep->rx_ring[i].next = ep->rx_ring_dma +
920 				      (i+1)*sizeof(struct epic_rx_desc);
921 		ep->rx_skbuff[i] = NULL;
922 	}
923 	/* Mark the last entry as wrapping the ring. */
924 	ep->rx_ring[i-1].next = ep->rx_ring_dma;
925 
926 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
927 	for (i = 0; i < RX_RING_SIZE; i++) {
928 		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
929 		ep->rx_skbuff[i] = skb;
930 		if (skb == NULL)
931 			break;
932 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
933 		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
934 			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
935 		ep->rx_ring[i].rxstatus = DescOwn;
936 	}
937 	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
938 
939 	/* The Tx buffer descriptor is filled in as needed, but we
940 	   do need to clear the ownership bit. */
941 	for (i = 0; i < TX_RING_SIZE; i++) {
942 		ep->tx_skbuff[i] = NULL;
943 		ep->tx_ring[i].txstatus = 0x0000;
944 		ep->tx_ring[i].next = ep->tx_ring_dma +
945 			(i+1)*sizeof(struct epic_tx_desc);
946 	}
947 	ep->tx_ring[i-1].next = ep->tx_ring_dma;
948 }
949 
950 static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
951 {
952 	struct epic_private *ep = netdev_priv(dev);
953 	void __iomem *ioaddr = ep->ioaddr;
954 	int entry, free_count;
955 	u32 ctrl_word;
956 	unsigned long flags;
957 
958 	if (skb_padto(skb, ETH_ZLEN))
959 		return NETDEV_TX_OK;
960 
961 	/* Caution: the write order is important here, set the field with the
962 	   "ownership" bit last. */
963 
964 	/* Calculate the next Tx descriptor entry. */
965 	spin_lock_irqsave(&ep->lock, flags);
966 	free_count = ep->cur_tx - ep->dirty_tx;
967 	entry = ep->cur_tx % TX_RING_SIZE;
968 
969 	ep->tx_skbuff[entry] = skb;
970 	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
971 		 			            skb->len, PCI_DMA_TODEVICE);
972 	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
973 		ctrl_word = 0x100000; /* No interrupt */
974 	} else if (free_count == TX_QUEUE_LEN/2) {
975 		ctrl_word = 0x140000; /* Tx-done intr. */
976 	} else if (free_count < TX_QUEUE_LEN - 1) {
977 		ctrl_word = 0x100000; /* No Tx-done intr. */
978 	} else {
979 		/* Leave room for an additional entry. */
980 		ctrl_word = 0x140000; /* Tx-done intr. */
981 		ep->tx_full = 1;
982 	}
983 	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
984 	ep->tx_ring[entry].txstatus =
985 		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
986 			    | DescOwn;
987 
988 	ep->cur_tx++;
989 	if (ep->tx_full)
990 		netif_stop_queue(dev);
991 
992 	spin_unlock_irqrestore(&ep->lock, flags);
993 	/* Trigger an immediate transmit demand. */
994 	ew32(COMMAND, TxQueued);
995 
996 	if (debug > 4)
997 		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
998 		       "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
999 		       entry, ctrl_word, er32(TxSTAT));
1000 
1001 	return NETDEV_TX_OK;
1002 }
1003 
1004 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1005 			  int status)
1006 {
1007 	struct net_device_stats *stats = &dev->stats;
1008 
1009 #ifndef final_version
1010 	/* There was an major error, log it. */
1011 	if (debug > 1)
1012 		printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1013 		       dev->name, status);
1014 #endif
1015 	stats->tx_errors++;
1016 	if (status & 0x1050)
1017 		stats->tx_aborted_errors++;
1018 	if (status & 0x0008)
1019 		stats->tx_carrier_errors++;
1020 	if (status & 0x0040)
1021 		stats->tx_window_errors++;
1022 	if (status & 0x0010)
1023 		stats->tx_fifo_errors++;
1024 }
1025 
1026 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1027 {
1028 	unsigned int dirty_tx, cur_tx;
1029 
1030 	/*
1031 	 * Note: if this lock becomes a problem we can narrow the locked
1032 	 * region at the cost of occasionally grabbing the lock more times.
1033 	 */
1034 	cur_tx = ep->cur_tx;
1035 	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1036 		struct sk_buff *skb;
1037 		int entry = dirty_tx % TX_RING_SIZE;
1038 		int txstatus = ep->tx_ring[entry].txstatus;
1039 
1040 		if (txstatus & DescOwn)
1041 			break;	/* It still hasn't been Txed */
1042 
1043 		if (likely(txstatus & 0x0001)) {
1044 			dev->stats.collisions += (txstatus >> 8) & 15;
1045 			dev->stats.tx_packets++;
1046 			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1047 		} else
1048 			epic_tx_error(dev, ep, txstatus);
1049 
1050 		/* Free the original skb. */
1051 		skb = ep->tx_skbuff[entry];
1052 		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1053 				 skb->len, PCI_DMA_TODEVICE);
1054 		dev_kfree_skb_irq(skb);
1055 		ep->tx_skbuff[entry] = NULL;
1056 	}
1057 
1058 #ifndef final_version
1059 	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1060 		printk(KERN_WARNING
1061 		       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1062 		       dev->name, dirty_tx, cur_tx, ep->tx_full);
1063 		dirty_tx += TX_RING_SIZE;
1064 	}
1065 #endif
1066 	ep->dirty_tx = dirty_tx;
1067 	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1068 		/* The ring is no longer full, allow new TX entries. */
1069 		ep->tx_full = 0;
1070 		netif_wake_queue(dev);
1071 	}
1072 }
1073 
1074 /* The interrupt handler does all of the Rx thread work and cleans up
1075    after the Tx thread. */
1076 static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1077 {
1078 	struct net_device *dev = dev_instance;
1079 	struct epic_private *ep = netdev_priv(dev);
1080 	void __iomem *ioaddr = ep->ioaddr;
1081 	unsigned int handled = 0;
1082 	int status;
1083 
1084 	status = er32(INTSTAT);
1085 	/* Acknowledge all of the current interrupt sources ASAP. */
1086 	ew32(INTSTAT, status & EpicNormalEvent);
1087 
1088 	if (debug > 4) {
1089 		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1090 		       "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
1091 	}
1092 
1093 	if ((status & IntrSummary) == 0)
1094 		goto out;
1095 
1096 	handled = 1;
1097 
1098 	if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1099 		spin_lock(&ep->napi_lock);
1100 		if (napi_schedule_prep(&ep->napi)) {
1101 			epic_napi_irq_off(dev, ep);
1102 			__napi_schedule(&ep->napi);
1103 		} else
1104 			ep->reschedule_in_poll++;
1105 		spin_unlock(&ep->napi_lock);
1106 	}
1107 	status &= ~EpicNapiEvent;
1108 
1109 	/* Check uncommon events all at once. */
1110 	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1111 		struct net_device_stats *stats = &dev->stats;
1112 
1113 		if (status == EpicRemoved)
1114 			goto out;
1115 
1116 		/* Always update the error counts to avoid overhead later. */
1117 		stats->rx_missed_errors	+= er8(MPCNT);
1118 		stats->rx_frame_errors	+= er8(ALICNT);
1119 		stats->rx_crc_errors	+= er8(CRCCNT);
1120 
1121 		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1122 			stats->tx_fifo_errors++;
1123 			ew32(TxThresh, ep->tx_threshold += 128);
1124 			/* Restart the transmit process. */
1125 			ew32(COMMAND, RestartTx);
1126 		}
1127 		if (status & PCIBusErr170) {
1128 			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1129 					 dev->name, status);
1130 			epic_pause(dev);
1131 			epic_restart(dev);
1132 		}
1133 		/* Clear all error sources. */
1134 		ew32(INTSTAT, status & 0x7f18);
1135 	}
1136 
1137 out:
1138 	if (debug > 3) {
1139 		printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1140 				   dev->name, status);
1141 	}
1142 
1143 	return IRQ_RETVAL(handled);
1144 }
1145 
1146 static int epic_rx(struct net_device *dev, int budget)
1147 {
1148 	struct epic_private *ep = netdev_priv(dev);
1149 	int entry = ep->cur_rx % RX_RING_SIZE;
1150 	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1151 	int work_done = 0;
1152 
1153 	if (debug > 4)
1154 		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1155 			   ep->rx_ring[entry].rxstatus);
1156 
1157 	if (rx_work_limit > budget)
1158 		rx_work_limit = budget;
1159 
1160 	/* If we own the next entry, it's a new packet. Send it up. */
1161 	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1162 		int status = ep->rx_ring[entry].rxstatus;
1163 
1164 		if (debug > 4)
1165 			printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);
1166 		if (--rx_work_limit < 0)
1167 			break;
1168 		if (status & 0x2006) {
1169 			if (debug > 2)
1170 				printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1171 					   dev->name, status);
1172 			if (status & 0x2000) {
1173 				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1174 					   "multiple buffers, status %4.4x!\n", dev->name, status);
1175 				dev->stats.rx_length_errors++;
1176 			} else if (status & 0x0006)
1177 				/* Rx Frame errors are counted in hardware. */
1178 				dev->stats.rx_errors++;
1179 		} else {
1180 			/* Malloc up new buffer, compatible with net-2e. */
1181 			/* Omit the four octet CRC from the length. */
1182 			short pkt_len = (status >> 16) - 4;
1183 			struct sk_buff *skb;
1184 
1185 			if (pkt_len > PKT_BUF_SZ - 4) {
1186 				printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1187 					   "%d bytes.\n",
1188 					   dev->name, status, pkt_len);
1189 				pkt_len = 1514;
1190 			}
1191 			/* Check if the packet is long enough to accept without copying
1192 			   to a minimally-sized skbuff. */
1193 			if (pkt_len < rx_copybreak &&
1194 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1195 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1196 				pci_dma_sync_single_for_cpu(ep->pci_dev,
1197 							    ep->rx_ring[entry].bufaddr,
1198 							    ep->rx_buf_sz,
1199 							    PCI_DMA_FROMDEVICE);
1200 				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1201 				skb_put(skb, pkt_len);
1202 				pci_dma_sync_single_for_device(ep->pci_dev,
1203 							       ep->rx_ring[entry].bufaddr,
1204 							       ep->rx_buf_sz,
1205 							       PCI_DMA_FROMDEVICE);
1206 			} else {
1207 				pci_unmap_single(ep->pci_dev,
1208 					ep->rx_ring[entry].bufaddr,
1209 					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1210 				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1211 				ep->rx_skbuff[entry] = NULL;
1212 			}
1213 			skb->protocol = eth_type_trans(skb, dev);
1214 			netif_receive_skb(skb);
1215 			dev->stats.rx_packets++;
1216 			dev->stats.rx_bytes += pkt_len;
1217 		}
1218 		work_done++;
1219 		entry = (++ep->cur_rx) % RX_RING_SIZE;
1220 	}
1221 
1222 	/* Refill the Rx ring buffers. */
1223 	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1224 		entry = ep->dirty_rx % RX_RING_SIZE;
1225 		if (ep->rx_skbuff[entry] == NULL) {
1226 			struct sk_buff *skb;
1227 			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1228 			if (skb == NULL)
1229 				break;
1230 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1231 			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1232 				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1233 			work_done++;
1234 		}
1235 		/* AV: shouldn't we add a barrier here? */
1236 		ep->rx_ring[entry].rxstatus = DescOwn;
1237 	}
1238 	return work_done;
1239 }
1240 
1241 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1242 {
1243 	void __iomem *ioaddr = ep->ioaddr;
1244 	int status;
1245 
1246 	status = er32(INTSTAT);
1247 
1248 	if (status == EpicRemoved)
1249 		return;
1250 	if (status & RxOverflow) 	/* Missed a Rx frame. */
1251 		dev->stats.rx_errors++;
1252 	if (status & (RxOverflow | RxFull))
1253 		ew16(COMMAND, RxQueued);
1254 }
1255 
1256 static int epic_poll(struct napi_struct *napi, int budget)
1257 {
1258 	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1259 	struct net_device *dev = ep->mii.dev;
1260 	int work_done = 0;
1261 	void __iomem *ioaddr = ep->ioaddr;
1262 
1263 rx_action:
1264 
1265 	epic_tx(dev, ep);
1266 
1267 	work_done += epic_rx(dev, budget);
1268 
1269 	epic_rx_err(dev, ep);
1270 
1271 	if (work_done < budget) {
1272 		unsigned long flags;
1273 		int more;
1274 
1275 		/* A bit baroque but it avoids a (space hungry) spin_unlock */
1276 
1277 		spin_lock_irqsave(&ep->napi_lock, flags);
1278 
1279 		more = ep->reschedule_in_poll;
1280 		if (!more) {
1281 			__napi_complete(napi);
1282 			ew32(INTSTAT, EpicNapiEvent);
1283 			epic_napi_irq_on(dev, ep);
1284 		} else
1285 			ep->reschedule_in_poll--;
1286 
1287 		spin_unlock_irqrestore(&ep->napi_lock, flags);
1288 
1289 		if (more)
1290 			goto rx_action;
1291 	}
1292 
1293 	return work_done;
1294 }
1295 
1296 static int epic_close(struct net_device *dev)
1297 {
1298 	struct epic_private *ep = netdev_priv(dev);
1299 	struct pci_dev *pdev = ep->pci_dev;
1300 	void __iomem *ioaddr = ep->ioaddr;
1301 	struct sk_buff *skb;
1302 	int i;
1303 
1304 	netif_stop_queue(dev);
1305 	napi_disable(&ep->napi);
1306 
1307 	if (debug > 1)
1308 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1309 		       dev->name, er32(INTSTAT));
1310 
1311 	del_timer_sync(&ep->timer);
1312 
1313 	epic_disable_int(dev, ep);
1314 
1315 	free_irq(pdev->irq, dev);
1316 
1317 	epic_pause(dev);
1318 
1319 	/* Free all the skbuffs in the Rx queue. */
1320 	for (i = 0; i < RX_RING_SIZE; i++) {
1321 		skb = ep->rx_skbuff[i];
1322 		ep->rx_skbuff[i] = NULL;
1323 		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1324 		ep->rx_ring[i].buflength = 0;
1325 		if (skb) {
1326 			pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1327 				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1328 			dev_kfree_skb(skb);
1329 		}
1330 		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1331 	}
1332 	for (i = 0; i < TX_RING_SIZE; i++) {
1333 		skb = ep->tx_skbuff[i];
1334 		ep->tx_skbuff[i] = NULL;
1335 		if (!skb)
1336 			continue;
1337 		pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1338 				 PCI_DMA_TODEVICE);
1339 		dev_kfree_skb(skb);
1340 	}
1341 
1342 	/* Green! Leave the chip in low-power mode. */
1343 	ew32(GENCTL, 0x0008);
1344 
1345 	return 0;
1346 }
1347 
1348 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1349 {
1350 	struct epic_private *ep = netdev_priv(dev);
1351 	void __iomem *ioaddr = ep->ioaddr;
1352 
1353 	if (netif_running(dev)) {
1354 		struct net_device_stats *stats = &dev->stats;
1355 
1356 		stats->rx_missed_errors	+= er8(MPCNT);
1357 		stats->rx_frame_errors	+= er8(ALICNT);
1358 		stats->rx_crc_errors	+= er8(CRCCNT);
1359 	}
1360 
1361 	return &dev->stats;
1362 }
1363 
1364 /* Set or clear the multicast filter for this adaptor.
1365    Note that we only use exclusion around actually queueing the
1366    new frame, not around filling ep->setup_frame.  This is non-deterministic
1367    when re-entered but still correct. */
1368 
1369 static void set_rx_mode(struct net_device *dev)
1370 {
1371 	struct epic_private *ep = netdev_priv(dev);
1372 	void __iomem *ioaddr = ep->ioaddr;
1373 	unsigned char mc_filter[8];		 /* Multicast hash filter */
1374 	int i;
1375 
1376 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1377 		ew32(RxCtrl, 0x002c);
1378 		/* Unconditionally log net taps. */
1379 		memset(mc_filter, 0xff, sizeof(mc_filter));
1380 	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1381 		/* There is apparently a chip bug, so the multicast filter
1382 		   is never enabled. */
1383 		/* Too many to filter perfectly -- accept all multicasts. */
1384 		memset(mc_filter, 0xff, sizeof(mc_filter));
1385 		ew32(RxCtrl, 0x000c);
1386 	} else if (netdev_mc_empty(dev)) {
1387 		ew32(RxCtrl, 0x0004);
1388 		return;
1389 	} else {					/* Never executed, for now. */
1390 		struct netdev_hw_addr *ha;
1391 
1392 		memset(mc_filter, 0, sizeof(mc_filter));
1393 		netdev_for_each_mc_addr(ha, dev) {
1394 			unsigned int bit_nr =
1395 				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1396 			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1397 		}
1398 	}
1399 	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1400 	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1401 		for (i = 0; i < 4; i++)
1402 			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1403 		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1404 	}
1405 }
1406 
1407 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1408 {
1409 	struct epic_private *np = netdev_priv(dev);
1410 
1411 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1412 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1413 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1414 }
1415 
1416 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1417 {
1418 	struct epic_private *np = netdev_priv(dev);
1419 	int rc;
1420 
1421 	spin_lock_irq(&np->lock);
1422 	rc = mii_ethtool_gset(&np->mii, cmd);
1423 	spin_unlock_irq(&np->lock);
1424 
1425 	return rc;
1426 }
1427 
1428 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1429 {
1430 	struct epic_private *np = netdev_priv(dev);
1431 	int rc;
1432 
1433 	spin_lock_irq(&np->lock);
1434 	rc = mii_ethtool_sset(&np->mii, cmd);
1435 	spin_unlock_irq(&np->lock);
1436 
1437 	return rc;
1438 }
1439 
1440 static int netdev_nway_reset(struct net_device *dev)
1441 {
1442 	struct epic_private *np = netdev_priv(dev);
1443 	return mii_nway_restart(&np->mii);
1444 }
1445 
1446 static u32 netdev_get_link(struct net_device *dev)
1447 {
1448 	struct epic_private *np = netdev_priv(dev);
1449 	return mii_link_ok(&np->mii);
1450 }
1451 
1452 static u32 netdev_get_msglevel(struct net_device *dev)
1453 {
1454 	return debug;
1455 }
1456 
1457 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1458 {
1459 	debug = value;
1460 }
1461 
1462 static int ethtool_begin(struct net_device *dev)
1463 {
1464 	struct epic_private *ep = netdev_priv(dev);
1465 	void __iomem *ioaddr = ep->ioaddr;
1466 
1467 	/* power-up, if interface is down */
1468 	if (!netif_running(dev)) {
1469 		ew32(GENCTL, 0x0200);
1470 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1471 	}
1472 	return 0;
1473 }
1474 
1475 static void ethtool_complete(struct net_device *dev)
1476 {
1477 	struct epic_private *ep = netdev_priv(dev);
1478 	void __iomem *ioaddr = ep->ioaddr;
1479 
1480 	/* power-down, if interface is down */
1481 	if (!netif_running(dev)) {
1482 		ew32(GENCTL, 0x0008);
1483 		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1484 	}
1485 }
1486 
1487 static const struct ethtool_ops netdev_ethtool_ops = {
1488 	.get_drvinfo		= netdev_get_drvinfo,
1489 	.get_settings		= netdev_get_settings,
1490 	.set_settings		= netdev_set_settings,
1491 	.nway_reset		= netdev_nway_reset,
1492 	.get_link		= netdev_get_link,
1493 	.get_msglevel		= netdev_get_msglevel,
1494 	.set_msglevel		= netdev_set_msglevel,
1495 	.begin			= ethtool_begin,
1496 	.complete		= ethtool_complete
1497 };
1498 
1499 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1500 {
1501 	struct epic_private *np = netdev_priv(dev);
1502 	void __iomem *ioaddr = np->ioaddr;
1503 	struct mii_ioctl_data *data = if_mii(rq);
1504 	int rc;
1505 
1506 	/* power-up, if interface is down */
1507 	if (! netif_running(dev)) {
1508 		ew32(GENCTL, 0x0200);
1509 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1510 	}
1511 
1512 	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1513 	spin_lock_irq(&np->lock);
1514 	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1515 	spin_unlock_irq(&np->lock);
1516 
1517 	/* power-down, if interface is down */
1518 	if (! netif_running(dev)) {
1519 		ew32(GENCTL, 0x0008);
1520 		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1521 	}
1522 	return rc;
1523 }
1524 
1525 
1526 static void epic_remove_one(struct pci_dev *pdev)
1527 {
1528 	struct net_device *dev = pci_get_drvdata(pdev);
1529 	struct epic_private *ep = netdev_priv(dev);
1530 
1531 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1532 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1533 	unregister_netdev(dev);
1534 	pci_iounmap(pdev, ep->ioaddr);
1535 	pci_release_regions(pdev);
1536 	free_netdev(dev);
1537 	pci_disable_device(pdev);
1538 	pci_set_drvdata(pdev, NULL);
1539 	/* pci_power_off(pdev, -1); */
1540 }
1541 
1542 
1543 #ifdef CONFIG_PM
1544 
1545 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1546 {
1547 	struct net_device *dev = pci_get_drvdata(pdev);
1548 	struct epic_private *ep = netdev_priv(dev);
1549 	void __iomem *ioaddr = ep->ioaddr;
1550 
1551 	if (!netif_running(dev))
1552 		return 0;
1553 	epic_pause(dev);
1554 	/* Put the chip into low-power mode. */
1555 	ew32(GENCTL, 0x0008);
1556 	/* pci_power_off(pdev, -1); */
1557 	return 0;
1558 }
1559 
1560 
1561 static int epic_resume (struct pci_dev *pdev)
1562 {
1563 	struct net_device *dev = pci_get_drvdata(pdev);
1564 
1565 	if (!netif_running(dev))
1566 		return 0;
1567 	epic_restart(dev);
1568 	/* pci_power_on(pdev); */
1569 	return 0;
1570 }
1571 
1572 #endif /* CONFIG_PM */
1573 
1574 
1575 static struct pci_driver epic_driver = {
1576 	.name		= DRV_NAME,
1577 	.id_table	= epic_pci_tbl,
1578 	.probe		= epic_init_one,
1579 	.remove		= epic_remove_one,
1580 #ifdef CONFIG_PM
1581 	.suspend	= epic_suspend,
1582 	.resume		= epic_resume,
1583 #endif /* CONFIG_PM */
1584 };
1585 
1586 
1587 static int __init epic_init (void)
1588 {
1589 /* when a module, this is printed whether or not devices are found in probe */
1590 #ifdef MODULE
1591 	printk (KERN_INFO "%s%s",
1592 		version, version2);
1593 #endif
1594 
1595 	return pci_register_driver(&epic_driver);
1596 }
1597 
1598 
1599 static void __exit epic_cleanup (void)
1600 {
1601 	pci_unregister_driver (&epic_driver);
1602 }
1603 
1604 
1605 module_init(epic_init);
1606 module_exit(epic_cleanup);
1607