1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2 /*
3 	Written/copyright 1997-2001 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14 
15 	The author may be reached as becker@scyld.com, or C/O
16 	Scyld Computing Corporation
17 	410 Severn Ave., Suite 210
18 	Annapolis MD 21403
19 
20 	Information and updates available at
21 	http://www.scyld.com/network/epic100.html
22 	[this link no longer provides anything useful -jgarzik]
23 
24 	---------------------------------------------------------------------
25 
26 */
27 
28 #define DRV_NAME        "epic100"
29 #define DRV_VERSION     "2.1"
30 #define DRV_RELDATE     "Sept 11, 2006"
31 
32 /* The user-configurable values.
33    These may be modified when a driver module is loaded.*/
34 
35 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36 
37 /* Used to pass the full-duplex flag, etc. */
38 #define MAX_UNITS 8		/* More are supported, limit only on options */
39 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41 
42 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43    Setting to > 1518 effectively disables this feature. */
44 static int rx_copybreak;
45 
46 /* Operational parameters that are set at compile time. */
47 
48 /* Keep the ring sizes a power of two for operational efficiency.
49    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50    Making the Tx ring too large decreases the effectiveness of channel
51    bonding and packet priority.
52    There are no ill effects from too-large receive rings. */
53 #define TX_RING_SIZE	256
54 #define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
55 #define RX_RING_SIZE	256
56 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
57 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
58 
59 /* Operational parameters that usually are not changed. */
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT  (2*HZ)
62 
63 #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
64 
65 /* Bytes transferred to chip before transmission starts. */
66 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67 #define TX_FIFO_THRESH 256
68 #define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
69 
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/interrupt.h>
77 #include <linux/pci.h>
78 #include <linux/delay.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/ethtool.h>
85 #include <linux/mii.h>
86 #include <linux/crc32.h>
87 #include <linux/bitops.h>
88 #include <asm/io.h>
89 #include <linux/uaccess.h>
90 #include <asm/byteorder.h>
91 
92 /* These identify the driver base version and may not be removed. */
93 static char version[] =
94 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
95 static char version2[] =
96 "  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
97 
98 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100 MODULE_LICENSE("GPL");
101 
102 module_param(debug, int, 0);
103 module_param(rx_copybreak, int, 0);
104 module_param_array(options, int, NULL, 0);
105 module_param_array(full_duplex, int, NULL, 0);
106 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110 
111 /*
112 				Theory of Operation
113 
114 I. Board Compatibility
115 
116 This device driver is designed for the SMC "EPIC/100", the SMC
117 single-chip Ethernet controllers for PCI.  This chip is used on
118 the SMC EtherPower II boards.
119 
120 II. Board-specific settings
121 
122 PCI bus devices are configured by the system at boot time, so no jumpers
123 need to be set on the board.  The system BIOS will assign the
124 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126 interrupt lines.
127 
128 III. Driver operation
129 
130 IIIa. Ring buffers
131 
132 IVb. References
133 
134 http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
135 http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
136 http://scyld.com/expert/NWay.html
137 http://www.national.com/pf/DP/DP83840A.html
138 
139 IVc. Errata
140 
141 */
142 
143 
144 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145 
146 #define EPIC_TOTAL_SIZE 0x100
147 #define USE_IO_OPS 1
148 
149 #ifdef USE_IO_OPS
150 #define EPIC_BAR	0
151 #else
152 #define EPIC_BAR	1
153 #endif
154 
155 typedef enum {
156 	SMSC_83C170_0,
157 	SMSC_83C170,
158 	SMSC_83C175,
159 } chip_t;
160 
161 
162 struct epic_chip_info {
163 	const char *name;
164         int drv_flags;                          /* Driver use, intended as capability flags. */
165 };
166 
167 
168 /* indexed by chip_t */
169 static const struct epic_chip_info pci_id_tbl[] = {
170 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
171 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
172 	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
173 };
174 
175 
176 static const struct pci_device_id epic_pci_tbl[] = {
177 	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
178 	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
179 	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
180 	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
181 	{ 0,}
182 };
183 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
184 
185 #define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
186 #define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
187 #define er8(reg)	ioread8(ioaddr + (reg))
188 #define er16(reg)	ioread16(ioaddr + (reg))
189 #define er32(reg)	ioread32(ioaddr + (reg))
190 
191 /* Offsets to registers, using the (ugh) SMC names. */
192 enum epic_registers {
193   COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
194   PCIBurstCnt=0x18,
195   TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
196   MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
197   LAN0=64,						/* MAC address. */
198   MC0=80,						/* Multicast filter table. */
199   RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
200   PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
201 };
202 
203 /* Interrupt register bits, using my own meaningful names. */
204 enum IntrStatus {
205 	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
206 	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
207 	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
208 	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
209 	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
210 };
211 enum CommandBits {
212 	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
213 	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
214 };
215 
216 #define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
217 
218 #define EpicNapiEvent	(TxEmpty | TxDone | \
219 			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
220 #define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
221 
222 static const u16 media2miictl[16] = {
223 	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
224 	0, 0, 0, 0,  0, 0, 0, 0 };
225 
226 /*
227  * The EPIC100 Rx and Tx buffer descriptors.  Note that these
228  * really ARE host-endian; it's not a misannotation.  We tell
229  * the card to byteswap them internally on big-endian hosts -
230  * look for #ifdef __BIG_ENDIAN in epic_open().
231  */
232 
233 struct epic_tx_desc {
234 	u32 txstatus;
235 	u32 bufaddr;
236 	u32 buflength;
237 	u32 next;
238 };
239 
240 struct epic_rx_desc {
241 	u32 rxstatus;
242 	u32 bufaddr;
243 	u32 buflength;
244 	u32 next;
245 };
246 
247 enum desc_status_bits {
248 	DescOwn=0x8000,
249 };
250 
251 #define PRIV_ALIGN	15 	/* Required alignment mask */
252 struct epic_private {
253 	struct epic_rx_desc *rx_ring;
254 	struct epic_tx_desc *tx_ring;
255 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
256 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
257 	/* The addresses of receive-in-place skbuffs. */
258 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
259 
260 	dma_addr_t tx_ring_dma;
261 	dma_addr_t rx_ring_dma;
262 
263 	/* Ring pointers. */
264 	spinlock_t lock;				/* Group with Tx control cache line. */
265 	spinlock_t napi_lock;
266 	struct napi_struct napi;
267 	unsigned int cur_tx, dirty_tx;
268 
269 	unsigned int cur_rx, dirty_rx;
270 	u32 irq_mask;
271 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
272 
273 	void __iomem *ioaddr;
274 	struct pci_dev *pci_dev;			/* PCI bus location. */
275 	int chip_id, chip_flags;
276 
277 	struct timer_list timer;			/* Media selection timer. */
278 	int tx_threshold;
279 	unsigned char mc_filter[8];
280 	signed char phys[4];				/* MII device addresses. */
281 	u16 advertising;					/* NWay media advertisement */
282 	int mii_phy_cnt;
283 	struct mii_if_info mii;
284 	unsigned int tx_full:1;				/* The Tx queue is full. */
285 	unsigned int default_port:4;		/* Last dev->if_port value. */
286 };
287 
288 static int epic_open(struct net_device *dev);
289 static int read_eeprom(struct epic_private *, int);
290 static int mdio_read(struct net_device *dev, int phy_id, int location);
291 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
292 static void epic_restart(struct net_device *dev);
293 static void epic_timer(unsigned long data);
294 static void epic_tx_timeout(struct net_device *dev);
295 static void epic_init_ring(struct net_device *dev);
296 static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
297 				   struct net_device *dev);
298 static int epic_rx(struct net_device *dev, int budget);
299 static int epic_poll(struct napi_struct *napi, int budget);
300 static irqreturn_t epic_interrupt(int irq, void *dev_instance);
301 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
302 static const struct ethtool_ops netdev_ethtool_ops;
303 static int epic_close(struct net_device *dev);
304 static struct net_device_stats *epic_get_stats(struct net_device *dev);
305 static void set_rx_mode(struct net_device *dev);
306 
307 static const struct net_device_ops epic_netdev_ops = {
308 	.ndo_open		= epic_open,
309 	.ndo_stop		= epic_close,
310 	.ndo_start_xmit		= epic_start_xmit,
311 	.ndo_tx_timeout 	= epic_tx_timeout,
312 	.ndo_get_stats		= epic_get_stats,
313 	.ndo_set_rx_mode	= set_rx_mode,
314 	.ndo_do_ioctl 		= netdev_ioctl,
315 	.ndo_set_mac_address 	= eth_mac_addr,
316 	.ndo_validate_addr	= eth_validate_addr,
317 };
318 
319 static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
320 {
321 	static int card_idx = -1;
322 	void __iomem *ioaddr;
323 	int chip_idx = (int) ent->driver_data;
324 	int irq;
325 	struct net_device *dev;
326 	struct epic_private *ep;
327 	int i, ret, option = 0, duplex = 0;
328 	void *ring_space;
329 	dma_addr_t ring_dma;
330 
331 /* when built into the kernel, we only print version if device is found */
332 #ifndef MODULE
333 	pr_info_once("%s%s\n", version, version2);
334 #endif
335 
336 	card_idx++;
337 
338 	ret = pci_enable_device(pdev);
339 	if (ret)
340 		goto out;
341 	irq = pdev->irq;
342 
343 	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
344 		dev_err(&pdev->dev, "no PCI region space\n");
345 		ret = -ENODEV;
346 		goto err_out_disable;
347 	}
348 
349 	pci_set_master(pdev);
350 
351 	ret = pci_request_regions(pdev, DRV_NAME);
352 	if (ret < 0)
353 		goto err_out_disable;
354 
355 	ret = -ENOMEM;
356 
357 	dev = alloc_etherdev(sizeof (*ep));
358 	if (!dev)
359 		goto err_out_free_res;
360 
361 	SET_NETDEV_DEV(dev, &pdev->dev);
362 
363 	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
364 	if (!ioaddr) {
365 		dev_err(&pdev->dev, "ioremap failed\n");
366 		goto err_out_free_netdev;
367 	}
368 
369 	pci_set_drvdata(pdev, dev);
370 	ep = netdev_priv(dev);
371 	ep->ioaddr = ioaddr;
372 	ep->mii.dev = dev;
373 	ep->mii.mdio_read = mdio_read;
374 	ep->mii.mdio_write = mdio_write;
375 	ep->mii.phy_id_mask = 0x1f;
376 	ep->mii.reg_num_mask = 0x1f;
377 
378 	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
379 	if (!ring_space)
380 		goto err_out_iounmap;
381 	ep->tx_ring = ring_space;
382 	ep->tx_ring_dma = ring_dma;
383 
384 	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
385 	if (!ring_space)
386 		goto err_out_unmap_tx;
387 	ep->rx_ring = ring_space;
388 	ep->rx_ring_dma = ring_dma;
389 
390 	if (dev->mem_start) {
391 		option = dev->mem_start;
392 		duplex = (dev->mem_start & 16) ? 1 : 0;
393 	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
394 		if (options[card_idx] >= 0)
395 			option = options[card_idx];
396 		if (full_duplex[card_idx] >= 0)
397 			duplex = full_duplex[card_idx];
398 	}
399 
400 	spin_lock_init(&ep->lock);
401 	spin_lock_init(&ep->napi_lock);
402 
403 	/* Bring the chip out of low-power mode. */
404 	ew32(GENCTL, 0x4200);
405 	/* Magic?!  If we don't set this bit the MII interface won't work. */
406 	/* This magic is documented in SMSC app note 7.15 */
407 	for (i = 16; i > 0; i--)
408 		ew32(TEST1, 0x0008);
409 
410 	/* Turn on the MII transceiver. */
411 	ew32(MIICfg, 0x12);
412 	if (chip_idx == 1)
413 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
414 	ew32(GENCTL, 0x0200);
415 
416 	/* Note: the '175 does not have a serial EEPROM. */
417 	for (i = 0; i < 3; i++)
418 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
419 
420 	if (debug > 2) {
421 		dev_dbg(&pdev->dev, "EEPROM contents:\n");
422 		for (i = 0; i < 64; i++)
423 			pr_cont(" %4.4x%s", read_eeprom(ep, i),
424 				   i % 16 == 15 ? "\n" : "");
425 	}
426 
427 	ep->pci_dev = pdev;
428 	ep->chip_id = chip_idx;
429 	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
430 	ep->irq_mask =
431 		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
432 		 | CntFull | TxUnderrun | EpicNapiEvent;
433 
434 	/* Find the connected MII xcvrs.
435 	   Doing this in open() would allow detecting external xcvrs later, but
436 	   takes much time and no cards have external MII. */
437 	{
438 		int phy, phy_idx = 0;
439 		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
440 			int mii_status = mdio_read(dev, phy, MII_BMSR);
441 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
442 				ep->phys[phy_idx++] = phy;
443 				dev_info(&pdev->dev,
444 					"MII transceiver #%d control "
445 					"%4.4x status %4.4x.\n",
446 					phy, mdio_read(dev, phy, 0), mii_status);
447 			}
448 		}
449 		ep->mii_phy_cnt = phy_idx;
450 		if (phy_idx != 0) {
451 			phy = ep->phys[0];
452 			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
453 			dev_info(&pdev->dev,
454 				"Autonegotiation advertising %4.4x link "
455 				   "partner %4.4x.\n",
456 				   ep->mii.advertising, mdio_read(dev, phy, 5));
457 		} else if ( ! (ep->chip_flags & NO_MII)) {
458 			dev_warn(&pdev->dev,
459 				"***WARNING***: No MII transceiver found!\n");
460 			/* Use the known PHY address of the EPII. */
461 			ep->phys[0] = 3;
462 		}
463 		ep->mii.phy_id = ep->phys[0];
464 	}
465 
466 	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
467 	if (ep->chip_flags & MII_PWRDWN)
468 		ew32(NVCTL, er32(NVCTL) & ~0x483c);
469 	ew32(GENCTL, 0x0008);
470 
471 	/* The lower four bits are the media type. */
472 	if (duplex) {
473 		ep->mii.force_media = ep->mii.full_duplex = 1;
474 		dev_info(&pdev->dev, "Forced full duplex requested.\n");
475 	}
476 	dev->if_port = ep->default_port = option;
477 
478 	/* The Epic-specific entries in the device structure. */
479 	dev->netdev_ops = &epic_netdev_ops;
480 	dev->ethtool_ops = &netdev_ethtool_ops;
481 	dev->watchdog_timeo = TX_TIMEOUT;
482 	netif_napi_add(dev, &ep->napi, epic_poll, 64);
483 
484 	ret = register_netdev(dev);
485 	if (ret < 0)
486 		goto err_out_unmap_rx;
487 
488 	netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
489 		    pci_id_tbl[chip_idx].name,
490 		    (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
491 		    dev->dev_addr);
492 
493 out:
494 	return ret;
495 
496 err_out_unmap_rx:
497 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
498 err_out_unmap_tx:
499 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
500 err_out_iounmap:
501 	pci_iounmap(pdev, ioaddr);
502 err_out_free_netdev:
503 	free_netdev(dev);
504 err_out_free_res:
505 	pci_release_regions(pdev);
506 err_out_disable:
507 	pci_disable_device(pdev);
508 	goto out;
509 }
510 
511 /* Serial EEPROM section. */
512 
513 /*  EEPROM_Ctrl bits. */
514 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
515 #define EE_CS			0x02	/* EEPROM chip select. */
516 #define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
517 #define EE_WRITE_0		0x01
518 #define EE_WRITE_1		0x09
519 #define EE_DATA_READ	0x10	/* EEPROM chip data out. */
520 #define EE_ENB			(0x0001 | EE_CS)
521 
522 /* Delay between EEPROM clock transitions.
523    This serves to flush the operation to the PCI bus.
524  */
525 
526 #define eeprom_delay()	er32(EECTL)
527 
528 /* The EEPROM commands include the alway-set leading bit. */
529 #define EE_WRITE_CMD	(5 << 6)
530 #define EE_READ64_CMD	(6 << 6)
531 #define EE_READ256_CMD	(6 << 8)
532 #define EE_ERASE_CMD	(7 << 6)
533 
534 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
535 {
536 	void __iomem *ioaddr = ep->ioaddr;
537 
538 	ew32(INTMASK, 0x00000000);
539 }
540 
541 static inline void __epic_pci_commit(void __iomem *ioaddr)
542 {
543 #ifndef USE_IO_OPS
544 	er32(INTMASK);
545 #endif
546 }
547 
548 static inline void epic_napi_irq_off(struct net_device *dev,
549 				     struct epic_private *ep)
550 {
551 	void __iomem *ioaddr = ep->ioaddr;
552 
553 	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
554 	__epic_pci_commit(ioaddr);
555 }
556 
557 static inline void epic_napi_irq_on(struct net_device *dev,
558 				    struct epic_private *ep)
559 {
560 	void __iomem *ioaddr = ep->ioaddr;
561 
562 	/* No need to commit possible posted write */
563 	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
564 }
565 
566 static int read_eeprom(struct epic_private *ep, int location)
567 {
568 	void __iomem *ioaddr = ep->ioaddr;
569 	int i;
570 	int retval = 0;
571 	int read_cmd = location |
572 		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
573 
574 	ew32(EECTL, EE_ENB & ~EE_CS);
575 	ew32(EECTL, EE_ENB);
576 
577 	/* Shift the read command bits out. */
578 	for (i = 12; i >= 0; i--) {
579 		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
580 		ew32(EECTL, EE_ENB | dataval);
581 		eeprom_delay();
582 		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
583 		eeprom_delay();
584 	}
585 	ew32(EECTL, EE_ENB);
586 
587 	for (i = 16; i > 0; i--) {
588 		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
589 		eeprom_delay();
590 		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
591 		ew32(EECTL, EE_ENB);
592 		eeprom_delay();
593 	}
594 
595 	/* Terminate the EEPROM access. */
596 	ew32(EECTL, EE_ENB & ~EE_CS);
597 	return retval;
598 }
599 
600 #define MII_READOP		1
601 #define MII_WRITEOP		2
602 static int mdio_read(struct net_device *dev, int phy_id, int location)
603 {
604 	struct epic_private *ep = netdev_priv(dev);
605 	void __iomem *ioaddr = ep->ioaddr;
606 	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
607 	int i;
608 
609 	ew32(MIICtrl, read_cmd);
610 	/* Typical operation takes 25 loops. */
611 	for (i = 400; i > 0; i--) {
612 		barrier();
613 		if ((er32(MIICtrl) & MII_READOP) == 0) {
614 			/* Work around read failure bug. */
615 			if (phy_id == 1 && location < 6 &&
616 			    er16(MIIData) == 0xffff) {
617 				ew32(MIICtrl, read_cmd);
618 				continue;
619 			}
620 			return er16(MIIData);
621 		}
622 	}
623 	return 0xffff;
624 }
625 
626 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
627 {
628 	struct epic_private *ep = netdev_priv(dev);
629 	void __iomem *ioaddr = ep->ioaddr;
630 	int i;
631 
632 	ew16(MIIData, value);
633 	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
634 	for (i = 10000; i > 0; i--) {
635 		barrier();
636 		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
637 			break;
638 	}
639 }
640 
641 
642 static int epic_open(struct net_device *dev)
643 {
644 	struct epic_private *ep = netdev_priv(dev);
645 	void __iomem *ioaddr = ep->ioaddr;
646 	const int irq = ep->pci_dev->irq;
647 	int rc, i;
648 
649 	/* Soft reset the chip. */
650 	ew32(GENCTL, 0x4001);
651 
652 	napi_enable(&ep->napi);
653 	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
654 	if (rc) {
655 		napi_disable(&ep->napi);
656 		return rc;
657 	}
658 
659 	epic_init_ring(dev);
660 
661 	ew32(GENCTL, 0x4000);
662 	/* This magic is documented in SMSC app note 7.15 */
663 	for (i = 16; i > 0; i--)
664 		ew32(TEST1, 0x0008);
665 
666 	/* Pull the chip out of low-power mode, enable interrupts, and set for
667 	   PCI read multiple.  The MIIcfg setting and strange write order are
668 	   required by the details of which bits are reset and the transceiver
669 	   wiring on the Ositech CardBus card.
670 	*/
671 #if 0
672 	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
673 #endif
674 	if (ep->chip_flags & MII_PWRDWN)
675 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
676 
677 	/* Tell the chip to byteswap descriptors on big-endian hosts */
678 #ifdef __BIG_ENDIAN
679 	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
680 	er32(GENCTL);
681 	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
682 #else
683 	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
684 	er32(GENCTL);
685 	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
686 #endif
687 
688 	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
689 
690 	for (i = 0; i < 3; i++)
691 		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
692 
693 	ep->tx_threshold = TX_FIFO_THRESH;
694 	ew32(TxThresh, ep->tx_threshold);
695 
696 	if (media2miictl[dev->if_port & 15]) {
697 		if (ep->mii_phy_cnt)
698 			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
699 		if (dev->if_port == 1) {
700 			if (debug > 1)
701 				netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
702 					    mdio_read(dev, ep->phys[0], MII_BMSR));
703 		}
704 	} else {
705 		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
706 		if (mii_lpa != 0xffff) {
707 			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
708 				ep->mii.full_duplex = 1;
709 			else if (! (mii_lpa & LPA_LPACK))
710 				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
711 			if (debug > 1)
712 				netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
713 					    ep->mii.full_duplex ? "full"
714 								: "half",
715 					    ep->phys[0], mii_lpa);
716 		}
717 	}
718 
719 	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
720 	ew32(PRxCDAR, ep->rx_ring_dma);
721 	ew32(PTxCDAR, ep->tx_ring_dma);
722 
723 	/* Start the chip's Rx process. */
724 	set_rx_mode(dev);
725 	ew32(COMMAND, StartRx | RxQueued);
726 
727 	netif_start_queue(dev);
728 
729 	/* Enable interrupts by setting the interrupt mask. */
730 	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
731 	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
732 	     TxUnderrun);
733 
734 	if (debug > 1) {
735 		netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
736 			   ioaddr, irq, er32(GENCTL),
737 			   ep->mii.full_duplex ? "full" : "half");
738 	}
739 
740 	/* Set the timer to switch to check for link beat and perhaps switch
741 	   to an alternate media type. */
742 	init_timer(&ep->timer);
743 	ep->timer.expires = jiffies + 3*HZ;
744 	ep->timer.data = (unsigned long)dev;
745 	ep->timer.function = epic_timer;				/* timer handler */
746 	add_timer(&ep->timer);
747 
748 	return rc;
749 }
750 
751 /* Reset the chip to recover from a PCI transaction error.
752    This may occur at interrupt time. */
753 static void epic_pause(struct net_device *dev)
754 {
755 	struct net_device_stats *stats = &dev->stats;
756 	struct epic_private *ep = netdev_priv(dev);
757 	void __iomem *ioaddr = ep->ioaddr;
758 
759 	netif_stop_queue (dev);
760 
761 	/* Disable interrupts by clearing the interrupt mask. */
762 	ew32(INTMASK, 0x00000000);
763 	/* Stop the chip's Tx and Rx DMA processes. */
764 	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
765 
766 	/* Update the error counts. */
767 	if (er16(COMMAND) != 0xffff) {
768 		stats->rx_missed_errors	+= er8(MPCNT);
769 		stats->rx_frame_errors	+= er8(ALICNT);
770 		stats->rx_crc_errors	+= er8(CRCCNT);
771 	}
772 
773 	/* Remove the packets on the Rx queue. */
774 	epic_rx(dev, RX_RING_SIZE);
775 }
776 
777 static void epic_restart(struct net_device *dev)
778 {
779 	struct epic_private *ep = netdev_priv(dev);
780 	void __iomem *ioaddr = ep->ioaddr;
781 	int i;
782 
783 	/* Soft reset the chip. */
784 	ew32(GENCTL, 0x4001);
785 
786 	netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
787 		   ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
788 	udelay(1);
789 
790 	/* This magic is documented in SMSC app note 7.15 */
791 	for (i = 16; i > 0; i--)
792 		ew32(TEST1, 0x0008);
793 
794 #ifdef __BIG_ENDIAN
795 	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
796 #else
797 	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
798 #endif
799 	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
800 	if (ep->chip_flags & MII_PWRDWN)
801 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
802 
803 	for (i = 0; i < 3; i++)
804 		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
805 
806 	ep->tx_threshold = TX_FIFO_THRESH;
807 	ew32(TxThresh, ep->tx_threshold);
808 	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
809 	ew32(PRxCDAR, ep->rx_ring_dma +
810 	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
811 	ew32(PTxCDAR, ep->tx_ring_dma +
812 	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
813 
814 	/* Start the chip's Rx process. */
815 	set_rx_mode(dev);
816 	ew32(COMMAND, StartRx | RxQueued);
817 
818 	/* Enable interrupts by setting the interrupt mask. */
819 	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
820 	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
821 	     TxUnderrun);
822 
823 	netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
824 		   er32(COMMAND), er32(GENCTL), er32(INTSTAT));
825 }
826 
827 static void check_media(struct net_device *dev)
828 {
829 	struct epic_private *ep = netdev_priv(dev);
830 	void __iomem *ioaddr = ep->ioaddr;
831 	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
832 	int negotiated = mii_lpa & ep->mii.advertising;
833 	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
834 
835 	if (ep->mii.force_media)
836 		return;
837 	if (mii_lpa == 0xffff)		/* Bogus read */
838 		return;
839 	if (ep->mii.full_duplex != duplex) {
840 		ep->mii.full_duplex = duplex;
841 		netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
842 			    ep->mii.full_duplex ? "full" : "half",
843 			    ep->phys[0], mii_lpa);
844 		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
845 	}
846 }
847 
848 static void epic_timer(unsigned long data)
849 {
850 	struct net_device *dev = (struct net_device *)data;
851 	struct epic_private *ep = netdev_priv(dev);
852 	void __iomem *ioaddr = ep->ioaddr;
853 	int next_tick = 5*HZ;
854 
855 	if (debug > 3) {
856 		netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
857 			   er32(TxSTAT));
858 		netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
859 			   er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
860 	}
861 
862 	check_media(dev);
863 
864 	ep->timer.expires = jiffies + next_tick;
865 	add_timer(&ep->timer);
866 }
867 
868 static void epic_tx_timeout(struct net_device *dev)
869 {
870 	struct epic_private *ep = netdev_priv(dev);
871 	void __iomem *ioaddr = ep->ioaddr;
872 
873 	if (debug > 0) {
874 		netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
875 			    er16(TxSTAT));
876 		if (debug > 1) {
877 			netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
878 				   ep->dirty_tx, ep->cur_tx);
879 		}
880 	}
881 	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
882 		dev->stats.tx_fifo_errors++;
883 		ew32(COMMAND, RestartTx);
884 	} else {
885 		epic_restart(dev);
886 		ew32(COMMAND, TxQueued);
887 	}
888 
889 	netif_trans_update(dev); /* prevent tx timeout */
890 	dev->stats.tx_errors++;
891 	if (!ep->tx_full)
892 		netif_wake_queue(dev);
893 }
894 
895 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
896 static void epic_init_ring(struct net_device *dev)
897 {
898 	struct epic_private *ep = netdev_priv(dev);
899 	int i;
900 
901 	ep->tx_full = 0;
902 	ep->dirty_tx = ep->cur_tx = 0;
903 	ep->cur_rx = ep->dirty_rx = 0;
904 	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
905 
906 	/* Initialize all Rx descriptors. */
907 	for (i = 0; i < RX_RING_SIZE; i++) {
908 		ep->rx_ring[i].rxstatus = 0;
909 		ep->rx_ring[i].buflength = ep->rx_buf_sz;
910 		ep->rx_ring[i].next = ep->rx_ring_dma +
911 				      (i+1)*sizeof(struct epic_rx_desc);
912 		ep->rx_skbuff[i] = NULL;
913 	}
914 	/* Mark the last entry as wrapping the ring. */
915 	ep->rx_ring[i-1].next = ep->rx_ring_dma;
916 
917 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
918 	for (i = 0; i < RX_RING_SIZE; i++) {
919 		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
920 		ep->rx_skbuff[i] = skb;
921 		if (skb == NULL)
922 			break;
923 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
924 		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
925 			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
926 		ep->rx_ring[i].rxstatus = DescOwn;
927 	}
928 	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
929 
930 	/* The Tx buffer descriptor is filled in as needed, but we
931 	   do need to clear the ownership bit. */
932 	for (i = 0; i < TX_RING_SIZE; i++) {
933 		ep->tx_skbuff[i] = NULL;
934 		ep->tx_ring[i].txstatus = 0x0000;
935 		ep->tx_ring[i].next = ep->tx_ring_dma +
936 			(i+1)*sizeof(struct epic_tx_desc);
937 	}
938 	ep->tx_ring[i-1].next = ep->tx_ring_dma;
939 }
940 
941 static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
942 {
943 	struct epic_private *ep = netdev_priv(dev);
944 	void __iomem *ioaddr = ep->ioaddr;
945 	int entry, free_count;
946 	u32 ctrl_word;
947 	unsigned long flags;
948 
949 	if (skb_padto(skb, ETH_ZLEN))
950 		return NETDEV_TX_OK;
951 
952 	/* Caution: the write order is important here, set the field with the
953 	   "ownership" bit last. */
954 
955 	/* Calculate the next Tx descriptor entry. */
956 	spin_lock_irqsave(&ep->lock, flags);
957 	free_count = ep->cur_tx - ep->dirty_tx;
958 	entry = ep->cur_tx % TX_RING_SIZE;
959 
960 	ep->tx_skbuff[entry] = skb;
961 	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
962 		 			            skb->len, PCI_DMA_TODEVICE);
963 	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
964 		ctrl_word = 0x100000; /* No interrupt */
965 	} else if (free_count == TX_QUEUE_LEN/2) {
966 		ctrl_word = 0x140000; /* Tx-done intr. */
967 	} else if (free_count < TX_QUEUE_LEN - 1) {
968 		ctrl_word = 0x100000; /* No Tx-done intr. */
969 	} else {
970 		/* Leave room for an additional entry. */
971 		ctrl_word = 0x140000; /* Tx-done intr. */
972 		ep->tx_full = 1;
973 	}
974 	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
975 	ep->tx_ring[entry].txstatus =
976 		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
977 			    | DescOwn;
978 
979 	ep->cur_tx++;
980 	if (ep->tx_full)
981 		netif_stop_queue(dev);
982 
983 	spin_unlock_irqrestore(&ep->lock, flags);
984 	/* Trigger an immediate transmit demand. */
985 	ew32(COMMAND, TxQueued);
986 
987 	if (debug > 4)
988 		netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
989 			   skb->len, entry, ctrl_word, er32(TxSTAT));
990 
991 	return NETDEV_TX_OK;
992 }
993 
994 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
995 			  int status)
996 {
997 	struct net_device_stats *stats = &dev->stats;
998 
999 #ifndef final_version
1000 	/* There was an major error, log it. */
1001 	if (debug > 1)
1002 		netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
1003 			   status);
1004 #endif
1005 	stats->tx_errors++;
1006 	if (status & 0x1050)
1007 		stats->tx_aborted_errors++;
1008 	if (status & 0x0008)
1009 		stats->tx_carrier_errors++;
1010 	if (status & 0x0040)
1011 		stats->tx_window_errors++;
1012 	if (status & 0x0010)
1013 		stats->tx_fifo_errors++;
1014 }
1015 
1016 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1017 {
1018 	unsigned int dirty_tx, cur_tx;
1019 
1020 	/*
1021 	 * Note: if this lock becomes a problem we can narrow the locked
1022 	 * region at the cost of occasionally grabbing the lock more times.
1023 	 */
1024 	cur_tx = ep->cur_tx;
1025 	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1026 		struct sk_buff *skb;
1027 		int entry = dirty_tx % TX_RING_SIZE;
1028 		int txstatus = ep->tx_ring[entry].txstatus;
1029 
1030 		if (txstatus & DescOwn)
1031 			break;	/* It still hasn't been Txed */
1032 
1033 		if (likely(txstatus & 0x0001)) {
1034 			dev->stats.collisions += (txstatus >> 8) & 15;
1035 			dev->stats.tx_packets++;
1036 			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1037 		} else
1038 			epic_tx_error(dev, ep, txstatus);
1039 
1040 		/* Free the original skb. */
1041 		skb = ep->tx_skbuff[entry];
1042 		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1043 				 skb->len, PCI_DMA_TODEVICE);
1044 		dev_kfree_skb_irq(skb);
1045 		ep->tx_skbuff[entry] = NULL;
1046 	}
1047 
1048 #ifndef final_version
1049 	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1050 		netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1051 			    dirty_tx, cur_tx, ep->tx_full);
1052 		dirty_tx += TX_RING_SIZE;
1053 	}
1054 #endif
1055 	ep->dirty_tx = dirty_tx;
1056 	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1057 		/* The ring is no longer full, allow new TX entries. */
1058 		ep->tx_full = 0;
1059 		netif_wake_queue(dev);
1060 	}
1061 }
1062 
1063 /* The interrupt handler does all of the Rx thread work and cleans up
1064    after the Tx thread. */
1065 static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1066 {
1067 	struct net_device *dev = dev_instance;
1068 	struct epic_private *ep = netdev_priv(dev);
1069 	void __iomem *ioaddr = ep->ioaddr;
1070 	unsigned int handled = 0;
1071 	int status;
1072 
1073 	status = er32(INTSTAT);
1074 	/* Acknowledge all of the current interrupt sources ASAP. */
1075 	ew32(INTSTAT, status & EpicNormalEvent);
1076 
1077 	if (debug > 4) {
1078 		netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1079 			   status, er32(INTSTAT));
1080 	}
1081 
1082 	if ((status & IntrSummary) == 0)
1083 		goto out;
1084 
1085 	handled = 1;
1086 
1087 	if (status & EpicNapiEvent) {
1088 		spin_lock(&ep->napi_lock);
1089 		if (napi_schedule_prep(&ep->napi)) {
1090 			epic_napi_irq_off(dev, ep);
1091 			__napi_schedule(&ep->napi);
1092 		}
1093 		spin_unlock(&ep->napi_lock);
1094 	}
1095 	status &= ~EpicNapiEvent;
1096 
1097 	/* Check uncommon events all at once. */
1098 	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1099 		struct net_device_stats *stats = &dev->stats;
1100 
1101 		if (status == EpicRemoved)
1102 			goto out;
1103 
1104 		/* Always update the error counts to avoid overhead later. */
1105 		stats->rx_missed_errors	+= er8(MPCNT);
1106 		stats->rx_frame_errors	+= er8(ALICNT);
1107 		stats->rx_crc_errors	+= er8(CRCCNT);
1108 
1109 		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1110 			stats->tx_fifo_errors++;
1111 			ew32(TxThresh, ep->tx_threshold += 128);
1112 			/* Restart the transmit process. */
1113 			ew32(COMMAND, RestartTx);
1114 		}
1115 		if (status & PCIBusErr170) {
1116 			netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1117 				   status);
1118 			epic_pause(dev);
1119 			epic_restart(dev);
1120 		}
1121 		/* Clear all error sources. */
1122 		ew32(INTSTAT, status & 0x7f18);
1123 	}
1124 
1125 out:
1126 	if (debug > 3) {
1127 		netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1128 			   status);
1129 	}
1130 
1131 	return IRQ_RETVAL(handled);
1132 }
1133 
1134 static int epic_rx(struct net_device *dev, int budget)
1135 {
1136 	struct epic_private *ep = netdev_priv(dev);
1137 	int entry = ep->cur_rx % RX_RING_SIZE;
1138 	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1139 	int work_done = 0;
1140 
1141 	if (debug > 4)
1142 		netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1143 			   ep->rx_ring[entry].rxstatus);
1144 
1145 	if (rx_work_limit > budget)
1146 		rx_work_limit = budget;
1147 
1148 	/* If we own the next entry, it's a new packet. Send it up. */
1149 	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1150 		int status = ep->rx_ring[entry].rxstatus;
1151 
1152 		if (debug > 4)
1153 			netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
1154 				   status);
1155 		if (--rx_work_limit < 0)
1156 			break;
1157 		if (status & 0x2006) {
1158 			if (debug > 2)
1159 				netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1160 					   status);
1161 			if (status & 0x2000) {
1162 				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1163 					    status);
1164 				dev->stats.rx_length_errors++;
1165 			} else if (status & 0x0006)
1166 				/* Rx Frame errors are counted in hardware. */
1167 				dev->stats.rx_errors++;
1168 		} else {
1169 			/* Malloc up new buffer, compatible with net-2e. */
1170 			/* Omit the four octet CRC from the length. */
1171 			short pkt_len = (status >> 16) - 4;
1172 			struct sk_buff *skb;
1173 
1174 			if (pkt_len > PKT_BUF_SZ - 4) {
1175 				netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1176 					   status, pkt_len);
1177 				pkt_len = 1514;
1178 			}
1179 			/* Check if the packet is long enough to accept without copying
1180 			   to a minimally-sized skbuff. */
1181 			if (pkt_len < rx_copybreak &&
1182 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1183 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1184 				pci_dma_sync_single_for_cpu(ep->pci_dev,
1185 							    ep->rx_ring[entry].bufaddr,
1186 							    ep->rx_buf_sz,
1187 							    PCI_DMA_FROMDEVICE);
1188 				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1189 				skb_put(skb, pkt_len);
1190 				pci_dma_sync_single_for_device(ep->pci_dev,
1191 							       ep->rx_ring[entry].bufaddr,
1192 							       ep->rx_buf_sz,
1193 							       PCI_DMA_FROMDEVICE);
1194 			} else {
1195 				pci_unmap_single(ep->pci_dev,
1196 					ep->rx_ring[entry].bufaddr,
1197 					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1198 				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1199 				ep->rx_skbuff[entry] = NULL;
1200 			}
1201 			skb->protocol = eth_type_trans(skb, dev);
1202 			netif_receive_skb(skb);
1203 			dev->stats.rx_packets++;
1204 			dev->stats.rx_bytes += pkt_len;
1205 		}
1206 		work_done++;
1207 		entry = (++ep->cur_rx) % RX_RING_SIZE;
1208 	}
1209 
1210 	/* Refill the Rx ring buffers. */
1211 	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1212 		entry = ep->dirty_rx % RX_RING_SIZE;
1213 		if (ep->rx_skbuff[entry] == NULL) {
1214 			struct sk_buff *skb;
1215 			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1216 			if (skb == NULL)
1217 				break;
1218 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1219 			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1220 				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1221 			work_done++;
1222 		}
1223 		/* AV: shouldn't we add a barrier here? */
1224 		ep->rx_ring[entry].rxstatus = DescOwn;
1225 	}
1226 	return work_done;
1227 }
1228 
1229 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1230 {
1231 	void __iomem *ioaddr = ep->ioaddr;
1232 	int status;
1233 
1234 	status = er32(INTSTAT);
1235 
1236 	if (status == EpicRemoved)
1237 		return;
1238 	if (status & RxOverflow) 	/* Missed a Rx frame. */
1239 		dev->stats.rx_errors++;
1240 	if (status & (RxOverflow | RxFull))
1241 		ew16(COMMAND, RxQueued);
1242 }
1243 
1244 static int epic_poll(struct napi_struct *napi, int budget)
1245 {
1246 	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1247 	struct net_device *dev = ep->mii.dev;
1248 	void __iomem *ioaddr = ep->ioaddr;
1249 	int work_done;
1250 
1251 	epic_tx(dev, ep);
1252 
1253 	work_done = epic_rx(dev, budget);
1254 
1255 	epic_rx_err(dev, ep);
1256 
1257 	if (work_done < budget && napi_complete_done(napi, work_done)) {
1258 		unsigned long flags;
1259 
1260 		spin_lock_irqsave(&ep->napi_lock, flags);
1261 
1262 		ew32(INTSTAT, EpicNapiEvent);
1263 		epic_napi_irq_on(dev, ep);
1264 		spin_unlock_irqrestore(&ep->napi_lock, flags);
1265 	}
1266 
1267 	return work_done;
1268 }
1269 
1270 static int epic_close(struct net_device *dev)
1271 {
1272 	struct epic_private *ep = netdev_priv(dev);
1273 	struct pci_dev *pdev = ep->pci_dev;
1274 	void __iomem *ioaddr = ep->ioaddr;
1275 	struct sk_buff *skb;
1276 	int i;
1277 
1278 	netif_stop_queue(dev);
1279 	napi_disable(&ep->napi);
1280 
1281 	if (debug > 1)
1282 		netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1283 			   er32(INTSTAT));
1284 
1285 	del_timer_sync(&ep->timer);
1286 
1287 	epic_disable_int(dev, ep);
1288 
1289 	free_irq(pdev->irq, dev);
1290 
1291 	epic_pause(dev);
1292 
1293 	/* Free all the skbuffs in the Rx queue. */
1294 	for (i = 0; i < RX_RING_SIZE; i++) {
1295 		skb = ep->rx_skbuff[i];
1296 		ep->rx_skbuff[i] = NULL;
1297 		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1298 		ep->rx_ring[i].buflength = 0;
1299 		if (skb) {
1300 			pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1301 					 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1302 			dev_kfree_skb(skb);
1303 		}
1304 		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1305 	}
1306 	for (i = 0; i < TX_RING_SIZE; i++) {
1307 		skb = ep->tx_skbuff[i];
1308 		ep->tx_skbuff[i] = NULL;
1309 		if (!skb)
1310 			continue;
1311 		pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1312 				 PCI_DMA_TODEVICE);
1313 		dev_kfree_skb(skb);
1314 	}
1315 
1316 	/* Green! Leave the chip in low-power mode. */
1317 	ew32(GENCTL, 0x0008);
1318 
1319 	return 0;
1320 }
1321 
1322 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1323 {
1324 	struct epic_private *ep = netdev_priv(dev);
1325 	void __iomem *ioaddr = ep->ioaddr;
1326 
1327 	if (netif_running(dev)) {
1328 		struct net_device_stats *stats = &dev->stats;
1329 
1330 		stats->rx_missed_errors	+= er8(MPCNT);
1331 		stats->rx_frame_errors	+= er8(ALICNT);
1332 		stats->rx_crc_errors	+= er8(CRCCNT);
1333 	}
1334 
1335 	return &dev->stats;
1336 }
1337 
1338 /* Set or clear the multicast filter for this adaptor.
1339    Note that we only use exclusion around actually queueing the
1340    new frame, not around filling ep->setup_frame.  This is non-deterministic
1341    when re-entered but still correct. */
1342 
1343 static void set_rx_mode(struct net_device *dev)
1344 {
1345 	struct epic_private *ep = netdev_priv(dev);
1346 	void __iomem *ioaddr = ep->ioaddr;
1347 	unsigned char mc_filter[8];		 /* Multicast hash filter */
1348 	int i;
1349 
1350 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1351 		ew32(RxCtrl, 0x002c);
1352 		/* Unconditionally log net taps. */
1353 		memset(mc_filter, 0xff, sizeof(mc_filter));
1354 	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1355 		/* There is apparently a chip bug, so the multicast filter
1356 		   is never enabled. */
1357 		/* Too many to filter perfectly -- accept all multicasts. */
1358 		memset(mc_filter, 0xff, sizeof(mc_filter));
1359 		ew32(RxCtrl, 0x000c);
1360 	} else if (netdev_mc_empty(dev)) {
1361 		ew32(RxCtrl, 0x0004);
1362 		return;
1363 	} else {					/* Never executed, for now. */
1364 		struct netdev_hw_addr *ha;
1365 
1366 		memset(mc_filter, 0, sizeof(mc_filter));
1367 		netdev_for_each_mc_addr(ha, dev) {
1368 			unsigned int bit_nr =
1369 				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1370 			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1371 		}
1372 	}
1373 	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1374 	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1375 		for (i = 0; i < 4; i++)
1376 			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1377 		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1378 	}
1379 }
1380 
1381 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1382 {
1383 	struct epic_private *np = netdev_priv(dev);
1384 
1385 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1386 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1387 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1388 }
1389 
1390 static int netdev_get_link_ksettings(struct net_device *dev,
1391 				     struct ethtool_link_ksettings *cmd)
1392 {
1393 	struct epic_private *np = netdev_priv(dev);
1394 
1395 	spin_lock_irq(&np->lock);
1396 	mii_ethtool_get_link_ksettings(&np->mii, cmd);
1397 	spin_unlock_irq(&np->lock);
1398 
1399 	return 0;
1400 }
1401 
1402 static int netdev_set_link_ksettings(struct net_device *dev,
1403 				     const struct ethtool_link_ksettings *cmd)
1404 {
1405 	struct epic_private *np = netdev_priv(dev);
1406 	int rc;
1407 
1408 	spin_lock_irq(&np->lock);
1409 	rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1410 	spin_unlock_irq(&np->lock);
1411 
1412 	return rc;
1413 }
1414 
1415 static int netdev_nway_reset(struct net_device *dev)
1416 {
1417 	struct epic_private *np = netdev_priv(dev);
1418 	return mii_nway_restart(&np->mii);
1419 }
1420 
1421 static u32 netdev_get_link(struct net_device *dev)
1422 {
1423 	struct epic_private *np = netdev_priv(dev);
1424 	return mii_link_ok(&np->mii);
1425 }
1426 
1427 static u32 netdev_get_msglevel(struct net_device *dev)
1428 {
1429 	return debug;
1430 }
1431 
1432 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1433 {
1434 	debug = value;
1435 }
1436 
1437 static int ethtool_begin(struct net_device *dev)
1438 {
1439 	struct epic_private *ep = netdev_priv(dev);
1440 	void __iomem *ioaddr = ep->ioaddr;
1441 
1442 	/* power-up, if interface is down */
1443 	if (!netif_running(dev)) {
1444 		ew32(GENCTL, 0x0200);
1445 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1446 	}
1447 	return 0;
1448 }
1449 
1450 static void ethtool_complete(struct net_device *dev)
1451 {
1452 	struct epic_private *ep = netdev_priv(dev);
1453 	void __iomem *ioaddr = ep->ioaddr;
1454 
1455 	/* power-down, if interface is down */
1456 	if (!netif_running(dev)) {
1457 		ew32(GENCTL, 0x0008);
1458 		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1459 	}
1460 }
1461 
1462 static const struct ethtool_ops netdev_ethtool_ops = {
1463 	.get_drvinfo		= netdev_get_drvinfo,
1464 	.nway_reset		= netdev_nway_reset,
1465 	.get_link		= netdev_get_link,
1466 	.get_msglevel		= netdev_get_msglevel,
1467 	.set_msglevel		= netdev_set_msglevel,
1468 	.begin			= ethtool_begin,
1469 	.complete		= ethtool_complete,
1470 	.get_link_ksettings	= netdev_get_link_ksettings,
1471 	.set_link_ksettings	= netdev_set_link_ksettings,
1472 };
1473 
1474 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1475 {
1476 	struct epic_private *np = netdev_priv(dev);
1477 	void __iomem *ioaddr = np->ioaddr;
1478 	struct mii_ioctl_data *data = if_mii(rq);
1479 	int rc;
1480 
1481 	/* power-up, if interface is down */
1482 	if (! netif_running(dev)) {
1483 		ew32(GENCTL, 0x0200);
1484 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1485 	}
1486 
1487 	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1488 	spin_lock_irq(&np->lock);
1489 	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1490 	spin_unlock_irq(&np->lock);
1491 
1492 	/* power-down, if interface is down */
1493 	if (! netif_running(dev)) {
1494 		ew32(GENCTL, 0x0008);
1495 		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1496 	}
1497 	return rc;
1498 }
1499 
1500 
1501 static void epic_remove_one(struct pci_dev *pdev)
1502 {
1503 	struct net_device *dev = pci_get_drvdata(pdev);
1504 	struct epic_private *ep = netdev_priv(dev);
1505 
1506 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1507 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1508 	unregister_netdev(dev);
1509 	pci_iounmap(pdev, ep->ioaddr);
1510 	pci_release_regions(pdev);
1511 	free_netdev(dev);
1512 	pci_disable_device(pdev);
1513 	/* pci_power_off(pdev, -1); */
1514 }
1515 
1516 
1517 #ifdef CONFIG_PM
1518 
1519 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1520 {
1521 	struct net_device *dev = pci_get_drvdata(pdev);
1522 	struct epic_private *ep = netdev_priv(dev);
1523 	void __iomem *ioaddr = ep->ioaddr;
1524 
1525 	if (!netif_running(dev))
1526 		return 0;
1527 	epic_pause(dev);
1528 	/* Put the chip into low-power mode. */
1529 	ew32(GENCTL, 0x0008);
1530 	/* pci_power_off(pdev, -1); */
1531 	return 0;
1532 }
1533 
1534 
1535 static int epic_resume (struct pci_dev *pdev)
1536 {
1537 	struct net_device *dev = pci_get_drvdata(pdev);
1538 
1539 	if (!netif_running(dev))
1540 		return 0;
1541 	epic_restart(dev);
1542 	/* pci_power_on(pdev); */
1543 	return 0;
1544 }
1545 
1546 #endif /* CONFIG_PM */
1547 
1548 
1549 static struct pci_driver epic_driver = {
1550 	.name		= DRV_NAME,
1551 	.id_table	= epic_pci_tbl,
1552 	.probe		= epic_init_one,
1553 	.remove		= epic_remove_one,
1554 #ifdef CONFIG_PM
1555 	.suspend	= epic_suspend,
1556 	.resume		= epic_resume,
1557 #endif /* CONFIG_PM */
1558 };
1559 
1560 
1561 static int __init epic_init (void)
1562 {
1563 /* when a module, this is printed whether or not devices are found in probe */
1564 #ifdef MODULE
1565 	pr_info("%s%s\n", version, version2);
1566 #endif
1567 
1568 	return pci_register_driver(&epic_driver);
1569 }
1570 
1571 
1572 static void __exit epic_cleanup (void)
1573 {
1574 	pci_unregister_driver (&epic_driver);
1575 }
1576 
1577 
1578 module_init(epic_init);
1579 module_exit(epic_cleanup);
1580