1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2 /*
3 	Written/copyright 1997-2001 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14 
15 	The author may be reached as becker@scyld.com, or C/O
16 	Scyld Computing Corporation
17 	410 Severn Ave., Suite 210
18 	Annapolis MD 21403
19 
20 	Information and updates available at
21 	http://www.scyld.com/network/epic100.html
22 	[this link no longer provides anything useful -jgarzik]
23 
24 	---------------------------------------------------------------------
25 
26 */
27 
28 #define DRV_NAME        "epic100"
29 #define DRV_VERSION     "2.1"
30 #define DRV_RELDATE     "Sept 11, 2006"
31 
32 /* The user-configurable values.
33    These may be modified when a driver module is loaded.*/
34 
35 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36 
37 /* Used to pass the full-duplex flag, etc. */
38 #define MAX_UNITS 8		/* More are supported, limit only on options */
39 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41 
42 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43    Setting to > 1518 effectively disables this feature. */
44 static int rx_copybreak;
45 
46 /* Operational parameters that are set at compile time. */
47 
48 /* Keep the ring sizes a power of two for operational efficiency.
49    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50    Making the Tx ring too large decreases the effectiveness of channel
51    bonding and packet priority.
52    There are no ill effects from too-large receive rings. */
53 #define TX_RING_SIZE	256
54 #define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
55 #define RX_RING_SIZE	256
56 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
57 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
58 
59 /* Operational parameters that usually are not changed. */
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT  (2*HZ)
62 
63 #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
64 
65 /* Bytes transferred to chip before transmission starts. */
66 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67 #define TX_FIFO_THRESH 256
68 #define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
69 
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/interrupt.h>
77 #include <linux/pci.h>
78 #include <linux/delay.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/ethtool.h>
85 #include <linux/mii.h>
86 #include <linux/crc32.h>
87 #include <linux/bitops.h>
88 #include <asm/io.h>
89 #include <linux/uaccess.h>
90 #include <asm/byteorder.h>
91 
92 /* These identify the driver base version and may not be removed. */
93 static char version[] =
94 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
95 static char version2[] =
96 "  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
97 
98 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100 MODULE_LICENSE("GPL");
101 
102 module_param(debug, int, 0);
103 module_param(rx_copybreak, int, 0);
104 module_param_array(options, int, NULL, 0);
105 module_param_array(full_duplex, int, NULL, 0);
106 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110 
111 /*
112 				Theory of Operation
113 
114 I. Board Compatibility
115 
116 This device driver is designed for the SMC "EPIC/100", the SMC
117 single-chip Ethernet controllers for PCI.  This chip is used on
118 the SMC EtherPower II boards.
119 
120 II. Board-specific settings
121 
122 PCI bus devices are configured by the system at boot time, so no jumpers
123 need to be set on the board.  The system BIOS will assign the
124 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126 interrupt lines.
127 
128 III. Driver operation
129 
130 IIIa. Ring buffers
131 
132 IVb. References
133 
134 http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
135 http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
136 http://scyld.com/expert/NWay.html
137 http://www.national.com/pf/DP/DP83840A.html
138 
139 IVc. Errata
140 
141 */
142 
143 
144 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145 
146 #define EPIC_TOTAL_SIZE 0x100
147 #define USE_IO_OPS 1
148 
149 #ifdef USE_IO_OPS
150 #define EPIC_BAR	0
151 #else
152 #define EPIC_BAR	1
153 #endif
154 
155 typedef enum {
156 	SMSC_83C170_0,
157 	SMSC_83C170,
158 	SMSC_83C175,
159 } chip_t;
160 
161 
162 struct epic_chip_info {
163 	const char *name;
164         int drv_flags;                          /* Driver use, intended as capability flags. */
165 };
166 
167 
168 /* indexed by chip_t */
169 static const struct epic_chip_info pci_id_tbl[] = {
170 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
171 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
172 	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
173 };
174 
175 
176 static const struct pci_device_id epic_pci_tbl[] = {
177 	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
178 	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
179 	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
180 	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
181 	{ 0,}
182 };
183 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
184 
185 #define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
186 #define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
187 #define er8(reg)	ioread8(ioaddr + (reg))
188 #define er16(reg)	ioread16(ioaddr + (reg))
189 #define er32(reg)	ioread32(ioaddr + (reg))
190 
191 /* Offsets to registers, using the (ugh) SMC names. */
192 enum epic_registers {
193   COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
194   PCIBurstCnt=0x18,
195   TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
196   MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
197   LAN0=64,						/* MAC address. */
198   MC0=80,						/* Multicast filter table. */
199   RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
200   PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
201 };
202 
203 /* Interrupt register bits, using my own meaningful names. */
204 enum IntrStatus {
205 	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
206 	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
207 	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
208 	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
209 	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
210 };
211 enum CommandBits {
212 	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
213 	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
214 };
215 
216 #define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
217 
218 #define EpicNapiEvent	(TxEmpty | TxDone | \
219 			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
220 #define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
221 
222 static const u16 media2miictl[16] = {
223 	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
224 	0, 0, 0, 0,  0, 0, 0, 0 };
225 
226 /*
227  * The EPIC100 Rx and Tx buffer descriptors.  Note that these
228  * really ARE host-endian; it's not a misannotation.  We tell
229  * the card to byteswap them internally on big-endian hosts -
230  * look for #ifdef __BIG_ENDIAN in epic_open().
231  */
232 
233 struct epic_tx_desc {
234 	u32 txstatus;
235 	u32 bufaddr;
236 	u32 buflength;
237 	u32 next;
238 };
239 
240 struct epic_rx_desc {
241 	u32 rxstatus;
242 	u32 bufaddr;
243 	u32 buflength;
244 	u32 next;
245 };
246 
247 enum desc_status_bits {
248 	DescOwn=0x8000,
249 };
250 
251 #define PRIV_ALIGN	15 	/* Required alignment mask */
252 struct epic_private {
253 	struct epic_rx_desc *rx_ring;
254 	struct epic_tx_desc *tx_ring;
255 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
256 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
257 	/* The addresses of receive-in-place skbuffs. */
258 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
259 
260 	dma_addr_t tx_ring_dma;
261 	dma_addr_t rx_ring_dma;
262 
263 	/* Ring pointers. */
264 	spinlock_t lock;				/* Group with Tx control cache line. */
265 	spinlock_t napi_lock;
266 	struct napi_struct napi;
267 	unsigned int cur_tx, dirty_tx;
268 
269 	unsigned int cur_rx, dirty_rx;
270 	u32 irq_mask;
271 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
272 
273 	void __iomem *ioaddr;
274 	struct pci_dev *pci_dev;			/* PCI bus location. */
275 	int chip_id, chip_flags;
276 
277 	struct timer_list timer;			/* Media selection timer. */
278 	int tx_threshold;
279 	unsigned char mc_filter[8];
280 	signed char phys[4];				/* MII device addresses. */
281 	u16 advertising;					/* NWay media advertisement */
282 	int mii_phy_cnt;
283 	struct mii_if_info mii;
284 	unsigned int tx_full:1;				/* The Tx queue is full. */
285 	unsigned int default_port:4;		/* Last dev->if_port value. */
286 };
287 
288 static int epic_open(struct net_device *dev);
289 static int read_eeprom(struct epic_private *, int);
290 static int mdio_read(struct net_device *dev, int phy_id, int location);
291 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
292 static void epic_restart(struct net_device *dev);
293 static void epic_timer(struct timer_list *t);
294 static void epic_tx_timeout(struct net_device *dev);
295 static void epic_init_ring(struct net_device *dev);
296 static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
297 				   struct net_device *dev);
298 static int epic_rx(struct net_device *dev, int budget);
299 static int epic_poll(struct napi_struct *napi, int budget);
300 static irqreturn_t epic_interrupt(int irq, void *dev_instance);
301 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
302 static const struct ethtool_ops netdev_ethtool_ops;
303 static int epic_close(struct net_device *dev);
304 static struct net_device_stats *epic_get_stats(struct net_device *dev);
305 static void set_rx_mode(struct net_device *dev);
306 
307 static const struct net_device_ops epic_netdev_ops = {
308 	.ndo_open		= epic_open,
309 	.ndo_stop		= epic_close,
310 	.ndo_start_xmit		= epic_start_xmit,
311 	.ndo_tx_timeout 	= epic_tx_timeout,
312 	.ndo_get_stats		= epic_get_stats,
313 	.ndo_set_rx_mode	= set_rx_mode,
314 	.ndo_do_ioctl 		= netdev_ioctl,
315 	.ndo_set_mac_address 	= eth_mac_addr,
316 	.ndo_validate_addr	= eth_validate_addr,
317 };
318 
319 static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
320 {
321 	static int card_idx = -1;
322 	void __iomem *ioaddr;
323 	int chip_idx = (int) ent->driver_data;
324 	struct net_device *dev;
325 	struct epic_private *ep;
326 	int i, ret, option = 0, duplex = 0;
327 	void *ring_space;
328 	dma_addr_t ring_dma;
329 
330 /* when built into the kernel, we only print version if device is found */
331 #ifndef MODULE
332 	pr_info_once("%s%s\n", version, version2);
333 #endif
334 
335 	card_idx++;
336 
337 	ret = pci_enable_device(pdev);
338 	if (ret)
339 		goto out;
340 
341 	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
342 		dev_err(&pdev->dev, "no PCI region space\n");
343 		ret = -ENODEV;
344 		goto err_out_disable;
345 	}
346 
347 	pci_set_master(pdev);
348 
349 	ret = pci_request_regions(pdev, DRV_NAME);
350 	if (ret < 0)
351 		goto err_out_disable;
352 
353 	ret = -ENOMEM;
354 
355 	dev = alloc_etherdev(sizeof (*ep));
356 	if (!dev)
357 		goto err_out_free_res;
358 
359 	SET_NETDEV_DEV(dev, &pdev->dev);
360 
361 	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
362 	if (!ioaddr) {
363 		dev_err(&pdev->dev, "ioremap failed\n");
364 		goto err_out_free_netdev;
365 	}
366 
367 	pci_set_drvdata(pdev, dev);
368 	ep = netdev_priv(dev);
369 	ep->ioaddr = ioaddr;
370 	ep->mii.dev = dev;
371 	ep->mii.mdio_read = mdio_read;
372 	ep->mii.mdio_write = mdio_write;
373 	ep->mii.phy_id_mask = 0x1f;
374 	ep->mii.reg_num_mask = 0x1f;
375 
376 	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
377 	if (!ring_space)
378 		goto err_out_iounmap;
379 	ep->tx_ring = ring_space;
380 	ep->tx_ring_dma = ring_dma;
381 
382 	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
383 	if (!ring_space)
384 		goto err_out_unmap_tx;
385 	ep->rx_ring = ring_space;
386 	ep->rx_ring_dma = ring_dma;
387 
388 	if (dev->mem_start) {
389 		option = dev->mem_start;
390 		duplex = (dev->mem_start & 16) ? 1 : 0;
391 	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
392 		if (options[card_idx] >= 0)
393 			option = options[card_idx];
394 		if (full_duplex[card_idx] >= 0)
395 			duplex = full_duplex[card_idx];
396 	}
397 
398 	spin_lock_init(&ep->lock);
399 	spin_lock_init(&ep->napi_lock);
400 
401 	/* Bring the chip out of low-power mode. */
402 	ew32(GENCTL, 0x4200);
403 	/* Magic?!  If we don't set this bit the MII interface won't work. */
404 	/* This magic is documented in SMSC app note 7.15 */
405 	for (i = 16; i > 0; i--)
406 		ew32(TEST1, 0x0008);
407 
408 	/* Turn on the MII transceiver. */
409 	ew32(MIICfg, 0x12);
410 	if (chip_idx == 1)
411 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
412 	ew32(GENCTL, 0x0200);
413 
414 	/* Note: the '175 does not have a serial EEPROM. */
415 	for (i = 0; i < 3; i++)
416 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
417 
418 	if (debug > 2) {
419 		dev_dbg(&pdev->dev, "EEPROM contents:\n");
420 		for (i = 0; i < 64; i++)
421 			pr_cont(" %4.4x%s", read_eeprom(ep, i),
422 				   i % 16 == 15 ? "\n" : "");
423 	}
424 
425 	ep->pci_dev = pdev;
426 	ep->chip_id = chip_idx;
427 	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
428 	ep->irq_mask =
429 		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
430 		 | CntFull | TxUnderrun | EpicNapiEvent;
431 
432 	/* Find the connected MII xcvrs.
433 	   Doing this in open() would allow detecting external xcvrs later, but
434 	   takes much time and no cards have external MII. */
435 	{
436 		int phy, phy_idx = 0;
437 		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
438 			int mii_status = mdio_read(dev, phy, MII_BMSR);
439 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
440 				ep->phys[phy_idx++] = phy;
441 				dev_info(&pdev->dev,
442 					"MII transceiver #%d control "
443 					"%4.4x status %4.4x.\n",
444 					phy, mdio_read(dev, phy, 0), mii_status);
445 			}
446 		}
447 		ep->mii_phy_cnt = phy_idx;
448 		if (phy_idx != 0) {
449 			phy = ep->phys[0];
450 			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
451 			dev_info(&pdev->dev,
452 				"Autonegotiation advertising %4.4x link "
453 				   "partner %4.4x.\n",
454 				   ep->mii.advertising, mdio_read(dev, phy, 5));
455 		} else if ( ! (ep->chip_flags & NO_MII)) {
456 			dev_warn(&pdev->dev,
457 				"***WARNING***: No MII transceiver found!\n");
458 			/* Use the known PHY address of the EPII. */
459 			ep->phys[0] = 3;
460 		}
461 		ep->mii.phy_id = ep->phys[0];
462 	}
463 
464 	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
465 	if (ep->chip_flags & MII_PWRDWN)
466 		ew32(NVCTL, er32(NVCTL) & ~0x483c);
467 	ew32(GENCTL, 0x0008);
468 
469 	/* The lower four bits are the media type. */
470 	if (duplex) {
471 		ep->mii.force_media = ep->mii.full_duplex = 1;
472 		dev_info(&pdev->dev, "Forced full duplex requested.\n");
473 	}
474 	dev->if_port = ep->default_port = option;
475 
476 	/* The Epic-specific entries in the device structure. */
477 	dev->netdev_ops = &epic_netdev_ops;
478 	dev->ethtool_ops = &netdev_ethtool_ops;
479 	dev->watchdog_timeo = TX_TIMEOUT;
480 	netif_napi_add(dev, &ep->napi, epic_poll, 64);
481 
482 	ret = register_netdev(dev);
483 	if (ret < 0)
484 		goto err_out_unmap_rx;
485 
486 	netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
487 		    pci_id_tbl[chip_idx].name,
488 		    (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
489 		    dev->dev_addr);
490 
491 out:
492 	return ret;
493 
494 err_out_unmap_rx:
495 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
496 err_out_unmap_tx:
497 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
498 err_out_iounmap:
499 	pci_iounmap(pdev, ioaddr);
500 err_out_free_netdev:
501 	free_netdev(dev);
502 err_out_free_res:
503 	pci_release_regions(pdev);
504 err_out_disable:
505 	pci_disable_device(pdev);
506 	goto out;
507 }
508 
509 /* Serial EEPROM section. */
510 
511 /*  EEPROM_Ctrl bits. */
512 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
513 #define EE_CS			0x02	/* EEPROM chip select. */
514 #define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
515 #define EE_WRITE_0		0x01
516 #define EE_WRITE_1		0x09
517 #define EE_DATA_READ	0x10	/* EEPROM chip data out. */
518 #define EE_ENB			(0x0001 | EE_CS)
519 
520 /* Delay between EEPROM clock transitions.
521    This serves to flush the operation to the PCI bus.
522  */
523 
524 #define eeprom_delay()	er32(EECTL)
525 
526 /* The EEPROM commands include the alway-set leading bit. */
527 #define EE_WRITE_CMD	(5 << 6)
528 #define EE_READ64_CMD	(6 << 6)
529 #define EE_READ256_CMD	(6 << 8)
530 #define EE_ERASE_CMD	(7 << 6)
531 
532 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
533 {
534 	void __iomem *ioaddr = ep->ioaddr;
535 
536 	ew32(INTMASK, 0x00000000);
537 }
538 
539 static inline void __epic_pci_commit(void __iomem *ioaddr)
540 {
541 #ifndef USE_IO_OPS
542 	er32(INTMASK);
543 #endif
544 }
545 
546 static inline void epic_napi_irq_off(struct net_device *dev,
547 				     struct epic_private *ep)
548 {
549 	void __iomem *ioaddr = ep->ioaddr;
550 
551 	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
552 	__epic_pci_commit(ioaddr);
553 }
554 
555 static inline void epic_napi_irq_on(struct net_device *dev,
556 				    struct epic_private *ep)
557 {
558 	void __iomem *ioaddr = ep->ioaddr;
559 
560 	/* No need to commit possible posted write */
561 	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
562 }
563 
564 static int read_eeprom(struct epic_private *ep, int location)
565 {
566 	void __iomem *ioaddr = ep->ioaddr;
567 	int i;
568 	int retval = 0;
569 	int read_cmd = location |
570 		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
571 
572 	ew32(EECTL, EE_ENB & ~EE_CS);
573 	ew32(EECTL, EE_ENB);
574 
575 	/* Shift the read command bits out. */
576 	for (i = 12; i >= 0; i--) {
577 		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
578 		ew32(EECTL, EE_ENB | dataval);
579 		eeprom_delay();
580 		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
581 		eeprom_delay();
582 	}
583 	ew32(EECTL, EE_ENB);
584 
585 	for (i = 16; i > 0; i--) {
586 		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
587 		eeprom_delay();
588 		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
589 		ew32(EECTL, EE_ENB);
590 		eeprom_delay();
591 	}
592 
593 	/* Terminate the EEPROM access. */
594 	ew32(EECTL, EE_ENB & ~EE_CS);
595 	return retval;
596 }
597 
598 #define MII_READOP		1
599 #define MII_WRITEOP		2
600 static int mdio_read(struct net_device *dev, int phy_id, int location)
601 {
602 	struct epic_private *ep = netdev_priv(dev);
603 	void __iomem *ioaddr = ep->ioaddr;
604 	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
605 	int i;
606 
607 	ew32(MIICtrl, read_cmd);
608 	/* Typical operation takes 25 loops. */
609 	for (i = 400; i > 0; i--) {
610 		barrier();
611 		if ((er32(MIICtrl) & MII_READOP) == 0) {
612 			/* Work around read failure bug. */
613 			if (phy_id == 1 && location < 6 &&
614 			    er16(MIIData) == 0xffff) {
615 				ew32(MIICtrl, read_cmd);
616 				continue;
617 			}
618 			return er16(MIIData);
619 		}
620 	}
621 	return 0xffff;
622 }
623 
624 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
625 {
626 	struct epic_private *ep = netdev_priv(dev);
627 	void __iomem *ioaddr = ep->ioaddr;
628 	int i;
629 
630 	ew16(MIIData, value);
631 	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
632 	for (i = 10000; i > 0; i--) {
633 		barrier();
634 		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
635 			break;
636 	}
637 }
638 
639 
640 static int epic_open(struct net_device *dev)
641 {
642 	struct epic_private *ep = netdev_priv(dev);
643 	void __iomem *ioaddr = ep->ioaddr;
644 	const int irq = ep->pci_dev->irq;
645 	int rc, i;
646 
647 	/* Soft reset the chip. */
648 	ew32(GENCTL, 0x4001);
649 
650 	napi_enable(&ep->napi);
651 	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
652 	if (rc) {
653 		napi_disable(&ep->napi);
654 		return rc;
655 	}
656 
657 	epic_init_ring(dev);
658 
659 	ew32(GENCTL, 0x4000);
660 	/* This magic is documented in SMSC app note 7.15 */
661 	for (i = 16; i > 0; i--)
662 		ew32(TEST1, 0x0008);
663 
664 	/* Pull the chip out of low-power mode, enable interrupts, and set for
665 	   PCI read multiple.  The MIIcfg setting and strange write order are
666 	   required by the details of which bits are reset and the transceiver
667 	   wiring on the Ositech CardBus card.
668 	*/
669 #if 0
670 	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
671 #endif
672 	if (ep->chip_flags & MII_PWRDWN)
673 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
674 
675 	/* Tell the chip to byteswap descriptors on big-endian hosts */
676 #ifdef __BIG_ENDIAN
677 	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
678 	er32(GENCTL);
679 	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
680 #else
681 	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
682 	er32(GENCTL);
683 	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
684 #endif
685 
686 	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
687 
688 	for (i = 0; i < 3; i++)
689 		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
690 
691 	ep->tx_threshold = TX_FIFO_THRESH;
692 	ew32(TxThresh, ep->tx_threshold);
693 
694 	if (media2miictl[dev->if_port & 15]) {
695 		if (ep->mii_phy_cnt)
696 			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
697 		if (dev->if_port == 1) {
698 			if (debug > 1)
699 				netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
700 					    mdio_read(dev, ep->phys[0], MII_BMSR));
701 		}
702 	} else {
703 		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
704 		if (mii_lpa != 0xffff) {
705 			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
706 				ep->mii.full_duplex = 1;
707 			else if (! (mii_lpa & LPA_LPACK))
708 				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
709 			if (debug > 1)
710 				netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
711 					    ep->mii.full_duplex ? "full"
712 								: "half",
713 					    ep->phys[0], mii_lpa);
714 		}
715 	}
716 
717 	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
718 	ew32(PRxCDAR, ep->rx_ring_dma);
719 	ew32(PTxCDAR, ep->tx_ring_dma);
720 
721 	/* Start the chip's Rx process. */
722 	set_rx_mode(dev);
723 	ew32(COMMAND, StartRx | RxQueued);
724 
725 	netif_start_queue(dev);
726 
727 	/* Enable interrupts by setting the interrupt mask. */
728 	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
729 	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
730 	     TxUnderrun);
731 
732 	if (debug > 1) {
733 		netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
734 			   ioaddr, irq, er32(GENCTL),
735 			   ep->mii.full_duplex ? "full" : "half");
736 	}
737 
738 	/* Set the timer to switch to check for link beat and perhaps switch
739 	   to an alternate media type. */
740 	timer_setup(&ep->timer, epic_timer, 0);
741 	ep->timer.expires = jiffies + 3*HZ;
742 	add_timer(&ep->timer);
743 
744 	return rc;
745 }
746 
747 /* Reset the chip to recover from a PCI transaction error.
748    This may occur at interrupt time. */
749 static void epic_pause(struct net_device *dev)
750 {
751 	struct net_device_stats *stats = &dev->stats;
752 	struct epic_private *ep = netdev_priv(dev);
753 	void __iomem *ioaddr = ep->ioaddr;
754 
755 	netif_stop_queue (dev);
756 
757 	/* Disable interrupts by clearing the interrupt mask. */
758 	ew32(INTMASK, 0x00000000);
759 	/* Stop the chip's Tx and Rx DMA processes. */
760 	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
761 
762 	/* Update the error counts. */
763 	if (er16(COMMAND) != 0xffff) {
764 		stats->rx_missed_errors	+= er8(MPCNT);
765 		stats->rx_frame_errors	+= er8(ALICNT);
766 		stats->rx_crc_errors	+= er8(CRCCNT);
767 	}
768 
769 	/* Remove the packets on the Rx queue. */
770 	epic_rx(dev, RX_RING_SIZE);
771 }
772 
773 static void epic_restart(struct net_device *dev)
774 {
775 	struct epic_private *ep = netdev_priv(dev);
776 	void __iomem *ioaddr = ep->ioaddr;
777 	int i;
778 
779 	/* Soft reset the chip. */
780 	ew32(GENCTL, 0x4001);
781 
782 	netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
783 		   ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
784 	udelay(1);
785 
786 	/* This magic is documented in SMSC app note 7.15 */
787 	for (i = 16; i > 0; i--)
788 		ew32(TEST1, 0x0008);
789 
790 #ifdef __BIG_ENDIAN
791 	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
792 #else
793 	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
794 #endif
795 	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
796 	if (ep->chip_flags & MII_PWRDWN)
797 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
798 
799 	for (i = 0; i < 3; i++)
800 		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
801 
802 	ep->tx_threshold = TX_FIFO_THRESH;
803 	ew32(TxThresh, ep->tx_threshold);
804 	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
805 	ew32(PRxCDAR, ep->rx_ring_dma +
806 	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
807 	ew32(PTxCDAR, ep->tx_ring_dma +
808 	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
809 
810 	/* Start the chip's Rx process. */
811 	set_rx_mode(dev);
812 	ew32(COMMAND, StartRx | RxQueued);
813 
814 	/* Enable interrupts by setting the interrupt mask. */
815 	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
816 	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
817 	     TxUnderrun);
818 
819 	netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
820 		   er32(COMMAND), er32(GENCTL), er32(INTSTAT));
821 }
822 
823 static void check_media(struct net_device *dev)
824 {
825 	struct epic_private *ep = netdev_priv(dev);
826 	void __iomem *ioaddr = ep->ioaddr;
827 	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
828 	int negotiated = mii_lpa & ep->mii.advertising;
829 	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
830 
831 	if (ep->mii.force_media)
832 		return;
833 	if (mii_lpa == 0xffff)		/* Bogus read */
834 		return;
835 	if (ep->mii.full_duplex != duplex) {
836 		ep->mii.full_duplex = duplex;
837 		netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
838 			    ep->mii.full_duplex ? "full" : "half",
839 			    ep->phys[0], mii_lpa);
840 		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
841 	}
842 }
843 
844 static void epic_timer(struct timer_list *t)
845 {
846 	struct epic_private *ep = from_timer(ep, t, timer);
847 	struct net_device *dev = ep->mii.dev;
848 	void __iomem *ioaddr = ep->ioaddr;
849 	int next_tick = 5*HZ;
850 
851 	if (debug > 3) {
852 		netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
853 			   er32(TxSTAT));
854 		netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
855 			   er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
856 	}
857 
858 	check_media(dev);
859 
860 	ep->timer.expires = jiffies + next_tick;
861 	add_timer(&ep->timer);
862 }
863 
864 static void epic_tx_timeout(struct net_device *dev)
865 {
866 	struct epic_private *ep = netdev_priv(dev);
867 	void __iomem *ioaddr = ep->ioaddr;
868 
869 	if (debug > 0) {
870 		netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
871 			    er16(TxSTAT));
872 		if (debug > 1) {
873 			netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
874 				   ep->dirty_tx, ep->cur_tx);
875 		}
876 	}
877 	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
878 		dev->stats.tx_fifo_errors++;
879 		ew32(COMMAND, RestartTx);
880 	} else {
881 		epic_restart(dev);
882 		ew32(COMMAND, TxQueued);
883 	}
884 
885 	netif_trans_update(dev); /* prevent tx timeout */
886 	dev->stats.tx_errors++;
887 	if (!ep->tx_full)
888 		netif_wake_queue(dev);
889 }
890 
891 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
892 static void epic_init_ring(struct net_device *dev)
893 {
894 	struct epic_private *ep = netdev_priv(dev);
895 	int i;
896 
897 	ep->tx_full = 0;
898 	ep->dirty_tx = ep->cur_tx = 0;
899 	ep->cur_rx = ep->dirty_rx = 0;
900 	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
901 
902 	/* Initialize all Rx descriptors. */
903 	for (i = 0; i < RX_RING_SIZE; i++) {
904 		ep->rx_ring[i].rxstatus = 0;
905 		ep->rx_ring[i].buflength = ep->rx_buf_sz;
906 		ep->rx_ring[i].next = ep->rx_ring_dma +
907 				      (i+1)*sizeof(struct epic_rx_desc);
908 		ep->rx_skbuff[i] = NULL;
909 	}
910 	/* Mark the last entry as wrapping the ring. */
911 	ep->rx_ring[i-1].next = ep->rx_ring_dma;
912 
913 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
914 	for (i = 0; i < RX_RING_SIZE; i++) {
915 		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
916 		ep->rx_skbuff[i] = skb;
917 		if (skb == NULL)
918 			break;
919 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
920 		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
921 			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
922 		ep->rx_ring[i].rxstatus = DescOwn;
923 	}
924 	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
925 
926 	/* The Tx buffer descriptor is filled in as needed, but we
927 	   do need to clear the ownership bit. */
928 	for (i = 0; i < TX_RING_SIZE; i++) {
929 		ep->tx_skbuff[i] = NULL;
930 		ep->tx_ring[i].txstatus = 0x0000;
931 		ep->tx_ring[i].next = ep->tx_ring_dma +
932 			(i+1)*sizeof(struct epic_tx_desc);
933 	}
934 	ep->tx_ring[i-1].next = ep->tx_ring_dma;
935 }
936 
937 static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
938 {
939 	struct epic_private *ep = netdev_priv(dev);
940 	void __iomem *ioaddr = ep->ioaddr;
941 	int entry, free_count;
942 	u32 ctrl_word;
943 	unsigned long flags;
944 
945 	if (skb_padto(skb, ETH_ZLEN))
946 		return NETDEV_TX_OK;
947 
948 	/* Caution: the write order is important here, set the field with the
949 	   "ownership" bit last. */
950 
951 	/* Calculate the next Tx descriptor entry. */
952 	spin_lock_irqsave(&ep->lock, flags);
953 	free_count = ep->cur_tx - ep->dirty_tx;
954 	entry = ep->cur_tx % TX_RING_SIZE;
955 
956 	ep->tx_skbuff[entry] = skb;
957 	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
958 		 			            skb->len, PCI_DMA_TODEVICE);
959 	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
960 		ctrl_word = 0x100000; /* No interrupt */
961 	} else if (free_count == TX_QUEUE_LEN/2) {
962 		ctrl_word = 0x140000; /* Tx-done intr. */
963 	} else if (free_count < TX_QUEUE_LEN - 1) {
964 		ctrl_word = 0x100000; /* No Tx-done intr. */
965 	} else {
966 		/* Leave room for an additional entry. */
967 		ctrl_word = 0x140000; /* Tx-done intr. */
968 		ep->tx_full = 1;
969 	}
970 	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
971 	ep->tx_ring[entry].txstatus =
972 		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
973 			    | DescOwn;
974 
975 	ep->cur_tx++;
976 	if (ep->tx_full)
977 		netif_stop_queue(dev);
978 
979 	spin_unlock_irqrestore(&ep->lock, flags);
980 	/* Trigger an immediate transmit demand. */
981 	ew32(COMMAND, TxQueued);
982 
983 	if (debug > 4)
984 		netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
985 			   skb->len, entry, ctrl_word, er32(TxSTAT));
986 
987 	return NETDEV_TX_OK;
988 }
989 
990 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
991 			  int status)
992 {
993 	struct net_device_stats *stats = &dev->stats;
994 
995 #ifndef final_version
996 	/* There was an major error, log it. */
997 	if (debug > 1)
998 		netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
999 			   status);
1000 #endif
1001 	stats->tx_errors++;
1002 	if (status & 0x1050)
1003 		stats->tx_aborted_errors++;
1004 	if (status & 0x0008)
1005 		stats->tx_carrier_errors++;
1006 	if (status & 0x0040)
1007 		stats->tx_window_errors++;
1008 	if (status & 0x0010)
1009 		stats->tx_fifo_errors++;
1010 }
1011 
1012 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1013 {
1014 	unsigned int dirty_tx, cur_tx;
1015 
1016 	/*
1017 	 * Note: if this lock becomes a problem we can narrow the locked
1018 	 * region at the cost of occasionally grabbing the lock more times.
1019 	 */
1020 	cur_tx = ep->cur_tx;
1021 	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1022 		struct sk_buff *skb;
1023 		int entry = dirty_tx % TX_RING_SIZE;
1024 		int txstatus = ep->tx_ring[entry].txstatus;
1025 
1026 		if (txstatus & DescOwn)
1027 			break;	/* It still hasn't been Txed */
1028 
1029 		if (likely(txstatus & 0x0001)) {
1030 			dev->stats.collisions += (txstatus >> 8) & 15;
1031 			dev->stats.tx_packets++;
1032 			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1033 		} else
1034 			epic_tx_error(dev, ep, txstatus);
1035 
1036 		/* Free the original skb. */
1037 		skb = ep->tx_skbuff[entry];
1038 		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1039 				 skb->len, PCI_DMA_TODEVICE);
1040 		dev_consume_skb_irq(skb);
1041 		ep->tx_skbuff[entry] = NULL;
1042 	}
1043 
1044 #ifndef final_version
1045 	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1046 		netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1047 			    dirty_tx, cur_tx, ep->tx_full);
1048 		dirty_tx += TX_RING_SIZE;
1049 	}
1050 #endif
1051 	ep->dirty_tx = dirty_tx;
1052 	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1053 		/* The ring is no longer full, allow new TX entries. */
1054 		ep->tx_full = 0;
1055 		netif_wake_queue(dev);
1056 	}
1057 }
1058 
1059 /* The interrupt handler does all of the Rx thread work and cleans up
1060    after the Tx thread. */
1061 static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1062 {
1063 	struct net_device *dev = dev_instance;
1064 	struct epic_private *ep = netdev_priv(dev);
1065 	void __iomem *ioaddr = ep->ioaddr;
1066 	unsigned int handled = 0;
1067 	int status;
1068 
1069 	status = er32(INTSTAT);
1070 	/* Acknowledge all of the current interrupt sources ASAP. */
1071 	ew32(INTSTAT, status & EpicNormalEvent);
1072 
1073 	if (debug > 4) {
1074 		netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1075 			   status, er32(INTSTAT));
1076 	}
1077 
1078 	if ((status & IntrSummary) == 0)
1079 		goto out;
1080 
1081 	handled = 1;
1082 
1083 	if (status & EpicNapiEvent) {
1084 		spin_lock(&ep->napi_lock);
1085 		if (napi_schedule_prep(&ep->napi)) {
1086 			epic_napi_irq_off(dev, ep);
1087 			__napi_schedule(&ep->napi);
1088 		}
1089 		spin_unlock(&ep->napi_lock);
1090 	}
1091 	status &= ~EpicNapiEvent;
1092 
1093 	/* Check uncommon events all at once. */
1094 	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1095 		struct net_device_stats *stats = &dev->stats;
1096 
1097 		if (status == EpicRemoved)
1098 			goto out;
1099 
1100 		/* Always update the error counts to avoid overhead later. */
1101 		stats->rx_missed_errors	+= er8(MPCNT);
1102 		stats->rx_frame_errors	+= er8(ALICNT);
1103 		stats->rx_crc_errors	+= er8(CRCCNT);
1104 
1105 		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1106 			stats->tx_fifo_errors++;
1107 			ew32(TxThresh, ep->tx_threshold += 128);
1108 			/* Restart the transmit process. */
1109 			ew32(COMMAND, RestartTx);
1110 		}
1111 		if (status & PCIBusErr170) {
1112 			netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1113 				   status);
1114 			epic_pause(dev);
1115 			epic_restart(dev);
1116 		}
1117 		/* Clear all error sources. */
1118 		ew32(INTSTAT, status & 0x7f18);
1119 	}
1120 
1121 out:
1122 	if (debug > 3) {
1123 		netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1124 			   status);
1125 	}
1126 
1127 	return IRQ_RETVAL(handled);
1128 }
1129 
1130 static int epic_rx(struct net_device *dev, int budget)
1131 {
1132 	struct epic_private *ep = netdev_priv(dev);
1133 	int entry = ep->cur_rx % RX_RING_SIZE;
1134 	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1135 	int work_done = 0;
1136 
1137 	if (debug > 4)
1138 		netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1139 			   ep->rx_ring[entry].rxstatus);
1140 
1141 	if (rx_work_limit > budget)
1142 		rx_work_limit = budget;
1143 
1144 	/* If we own the next entry, it's a new packet. Send it up. */
1145 	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1146 		int status = ep->rx_ring[entry].rxstatus;
1147 
1148 		if (debug > 4)
1149 			netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
1150 				   status);
1151 		if (--rx_work_limit < 0)
1152 			break;
1153 		if (status & 0x2006) {
1154 			if (debug > 2)
1155 				netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1156 					   status);
1157 			if (status & 0x2000) {
1158 				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1159 					    status);
1160 				dev->stats.rx_length_errors++;
1161 			} else if (status & 0x0006)
1162 				/* Rx Frame errors are counted in hardware. */
1163 				dev->stats.rx_errors++;
1164 		} else {
1165 			/* Malloc up new buffer, compatible with net-2e. */
1166 			/* Omit the four octet CRC from the length. */
1167 			short pkt_len = (status >> 16) - 4;
1168 			struct sk_buff *skb;
1169 
1170 			if (pkt_len > PKT_BUF_SZ - 4) {
1171 				netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1172 					   status, pkt_len);
1173 				pkt_len = 1514;
1174 			}
1175 			/* Check if the packet is long enough to accept without copying
1176 			   to a minimally-sized skbuff. */
1177 			if (pkt_len < rx_copybreak &&
1178 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1179 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1180 				pci_dma_sync_single_for_cpu(ep->pci_dev,
1181 							    ep->rx_ring[entry].bufaddr,
1182 							    ep->rx_buf_sz,
1183 							    PCI_DMA_FROMDEVICE);
1184 				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1185 				skb_put(skb, pkt_len);
1186 				pci_dma_sync_single_for_device(ep->pci_dev,
1187 							       ep->rx_ring[entry].bufaddr,
1188 							       ep->rx_buf_sz,
1189 							       PCI_DMA_FROMDEVICE);
1190 			} else {
1191 				pci_unmap_single(ep->pci_dev,
1192 					ep->rx_ring[entry].bufaddr,
1193 					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1194 				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1195 				ep->rx_skbuff[entry] = NULL;
1196 			}
1197 			skb->protocol = eth_type_trans(skb, dev);
1198 			netif_receive_skb(skb);
1199 			dev->stats.rx_packets++;
1200 			dev->stats.rx_bytes += pkt_len;
1201 		}
1202 		work_done++;
1203 		entry = (++ep->cur_rx) % RX_RING_SIZE;
1204 	}
1205 
1206 	/* Refill the Rx ring buffers. */
1207 	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1208 		entry = ep->dirty_rx % RX_RING_SIZE;
1209 		if (ep->rx_skbuff[entry] == NULL) {
1210 			struct sk_buff *skb;
1211 			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1212 			if (skb == NULL)
1213 				break;
1214 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1215 			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1216 				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1217 			work_done++;
1218 		}
1219 		/* AV: shouldn't we add a barrier here? */
1220 		ep->rx_ring[entry].rxstatus = DescOwn;
1221 	}
1222 	return work_done;
1223 }
1224 
1225 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1226 {
1227 	void __iomem *ioaddr = ep->ioaddr;
1228 	int status;
1229 
1230 	status = er32(INTSTAT);
1231 
1232 	if (status == EpicRemoved)
1233 		return;
1234 	if (status & RxOverflow) 	/* Missed a Rx frame. */
1235 		dev->stats.rx_errors++;
1236 	if (status & (RxOverflow | RxFull))
1237 		ew16(COMMAND, RxQueued);
1238 }
1239 
1240 static int epic_poll(struct napi_struct *napi, int budget)
1241 {
1242 	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1243 	struct net_device *dev = ep->mii.dev;
1244 	void __iomem *ioaddr = ep->ioaddr;
1245 	int work_done;
1246 
1247 	epic_tx(dev, ep);
1248 
1249 	work_done = epic_rx(dev, budget);
1250 
1251 	epic_rx_err(dev, ep);
1252 
1253 	if (work_done < budget && napi_complete_done(napi, work_done)) {
1254 		unsigned long flags;
1255 
1256 		spin_lock_irqsave(&ep->napi_lock, flags);
1257 
1258 		ew32(INTSTAT, EpicNapiEvent);
1259 		epic_napi_irq_on(dev, ep);
1260 		spin_unlock_irqrestore(&ep->napi_lock, flags);
1261 	}
1262 
1263 	return work_done;
1264 }
1265 
1266 static int epic_close(struct net_device *dev)
1267 {
1268 	struct epic_private *ep = netdev_priv(dev);
1269 	struct pci_dev *pdev = ep->pci_dev;
1270 	void __iomem *ioaddr = ep->ioaddr;
1271 	struct sk_buff *skb;
1272 	int i;
1273 
1274 	netif_stop_queue(dev);
1275 	napi_disable(&ep->napi);
1276 
1277 	if (debug > 1)
1278 		netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1279 			   er32(INTSTAT));
1280 
1281 	del_timer_sync(&ep->timer);
1282 
1283 	epic_disable_int(dev, ep);
1284 
1285 	free_irq(pdev->irq, dev);
1286 
1287 	epic_pause(dev);
1288 
1289 	/* Free all the skbuffs in the Rx queue. */
1290 	for (i = 0; i < RX_RING_SIZE; i++) {
1291 		skb = ep->rx_skbuff[i];
1292 		ep->rx_skbuff[i] = NULL;
1293 		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1294 		ep->rx_ring[i].buflength = 0;
1295 		if (skb) {
1296 			pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1297 					 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1298 			dev_kfree_skb(skb);
1299 		}
1300 		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1301 	}
1302 	for (i = 0; i < TX_RING_SIZE; i++) {
1303 		skb = ep->tx_skbuff[i];
1304 		ep->tx_skbuff[i] = NULL;
1305 		if (!skb)
1306 			continue;
1307 		pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1308 				 PCI_DMA_TODEVICE);
1309 		dev_kfree_skb(skb);
1310 	}
1311 
1312 	/* Green! Leave the chip in low-power mode. */
1313 	ew32(GENCTL, 0x0008);
1314 
1315 	return 0;
1316 }
1317 
1318 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1319 {
1320 	struct epic_private *ep = netdev_priv(dev);
1321 	void __iomem *ioaddr = ep->ioaddr;
1322 
1323 	if (netif_running(dev)) {
1324 		struct net_device_stats *stats = &dev->stats;
1325 
1326 		stats->rx_missed_errors	+= er8(MPCNT);
1327 		stats->rx_frame_errors	+= er8(ALICNT);
1328 		stats->rx_crc_errors	+= er8(CRCCNT);
1329 	}
1330 
1331 	return &dev->stats;
1332 }
1333 
1334 /* Set or clear the multicast filter for this adaptor.
1335    Note that we only use exclusion around actually queueing the
1336    new frame, not around filling ep->setup_frame.  This is non-deterministic
1337    when re-entered but still correct. */
1338 
1339 static void set_rx_mode(struct net_device *dev)
1340 {
1341 	struct epic_private *ep = netdev_priv(dev);
1342 	void __iomem *ioaddr = ep->ioaddr;
1343 	unsigned char mc_filter[8];		 /* Multicast hash filter */
1344 	int i;
1345 
1346 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1347 		ew32(RxCtrl, 0x002c);
1348 		/* Unconditionally log net taps. */
1349 		memset(mc_filter, 0xff, sizeof(mc_filter));
1350 	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1351 		/* There is apparently a chip bug, so the multicast filter
1352 		   is never enabled. */
1353 		/* Too many to filter perfectly -- accept all multicasts. */
1354 		memset(mc_filter, 0xff, sizeof(mc_filter));
1355 		ew32(RxCtrl, 0x000c);
1356 	} else if (netdev_mc_empty(dev)) {
1357 		ew32(RxCtrl, 0x0004);
1358 		return;
1359 	} else {					/* Never executed, for now. */
1360 		struct netdev_hw_addr *ha;
1361 
1362 		memset(mc_filter, 0, sizeof(mc_filter));
1363 		netdev_for_each_mc_addr(ha, dev) {
1364 			unsigned int bit_nr =
1365 				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1366 			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1367 		}
1368 	}
1369 	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1370 	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1371 		for (i = 0; i < 4; i++)
1372 			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1373 		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1374 	}
1375 }
1376 
1377 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1378 {
1379 	struct epic_private *np = netdev_priv(dev);
1380 
1381 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1382 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1383 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1384 }
1385 
1386 static int netdev_get_link_ksettings(struct net_device *dev,
1387 				     struct ethtool_link_ksettings *cmd)
1388 {
1389 	struct epic_private *np = netdev_priv(dev);
1390 
1391 	spin_lock_irq(&np->lock);
1392 	mii_ethtool_get_link_ksettings(&np->mii, cmd);
1393 	spin_unlock_irq(&np->lock);
1394 
1395 	return 0;
1396 }
1397 
1398 static int netdev_set_link_ksettings(struct net_device *dev,
1399 				     const struct ethtool_link_ksettings *cmd)
1400 {
1401 	struct epic_private *np = netdev_priv(dev);
1402 	int rc;
1403 
1404 	spin_lock_irq(&np->lock);
1405 	rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1406 	spin_unlock_irq(&np->lock);
1407 
1408 	return rc;
1409 }
1410 
1411 static int netdev_nway_reset(struct net_device *dev)
1412 {
1413 	struct epic_private *np = netdev_priv(dev);
1414 	return mii_nway_restart(&np->mii);
1415 }
1416 
1417 static u32 netdev_get_link(struct net_device *dev)
1418 {
1419 	struct epic_private *np = netdev_priv(dev);
1420 	return mii_link_ok(&np->mii);
1421 }
1422 
1423 static u32 netdev_get_msglevel(struct net_device *dev)
1424 {
1425 	return debug;
1426 }
1427 
1428 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1429 {
1430 	debug = value;
1431 }
1432 
1433 static int ethtool_begin(struct net_device *dev)
1434 {
1435 	struct epic_private *ep = netdev_priv(dev);
1436 	void __iomem *ioaddr = ep->ioaddr;
1437 
1438 	/* power-up, if interface is down */
1439 	if (!netif_running(dev)) {
1440 		ew32(GENCTL, 0x0200);
1441 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1442 	}
1443 	return 0;
1444 }
1445 
1446 static void ethtool_complete(struct net_device *dev)
1447 {
1448 	struct epic_private *ep = netdev_priv(dev);
1449 	void __iomem *ioaddr = ep->ioaddr;
1450 
1451 	/* power-down, if interface is down */
1452 	if (!netif_running(dev)) {
1453 		ew32(GENCTL, 0x0008);
1454 		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1455 	}
1456 }
1457 
1458 static const struct ethtool_ops netdev_ethtool_ops = {
1459 	.get_drvinfo		= netdev_get_drvinfo,
1460 	.nway_reset		= netdev_nway_reset,
1461 	.get_link		= netdev_get_link,
1462 	.get_msglevel		= netdev_get_msglevel,
1463 	.set_msglevel		= netdev_set_msglevel,
1464 	.begin			= ethtool_begin,
1465 	.complete		= ethtool_complete,
1466 	.get_link_ksettings	= netdev_get_link_ksettings,
1467 	.set_link_ksettings	= netdev_set_link_ksettings,
1468 };
1469 
1470 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1471 {
1472 	struct epic_private *np = netdev_priv(dev);
1473 	void __iomem *ioaddr = np->ioaddr;
1474 	struct mii_ioctl_data *data = if_mii(rq);
1475 	int rc;
1476 
1477 	/* power-up, if interface is down */
1478 	if (! netif_running(dev)) {
1479 		ew32(GENCTL, 0x0200);
1480 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1481 	}
1482 
1483 	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1484 	spin_lock_irq(&np->lock);
1485 	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1486 	spin_unlock_irq(&np->lock);
1487 
1488 	/* power-down, if interface is down */
1489 	if (! netif_running(dev)) {
1490 		ew32(GENCTL, 0x0008);
1491 		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1492 	}
1493 	return rc;
1494 }
1495 
1496 
1497 static void epic_remove_one(struct pci_dev *pdev)
1498 {
1499 	struct net_device *dev = pci_get_drvdata(pdev);
1500 	struct epic_private *ep = netdev_priv(dev);
1501 
1502 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1503 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1504 	unregister_netdev(dev);
1505 	pci_iounmap(pdev, ep->ioaddr);
1506 	pci_release_regions(pdev);
1507 	free_netdev(dev);
1508 	pci_disable_device(pdev);
1509 	/* pci_power_off(pdev, -1); */
1510 }
1511 
1512 
1513 #ifdef CONFIG_PM
1514 
1515 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1516 {
1517 	struct net_device *dev = pci_get_drvdata(pdev);
1518 	struct epic_private *ep = netdev_priv(dev);
1519 	void __iomem *ioaddr = ep->ioaddr;
1520 
1521 	if (!netif_running(dev))
1522 		return 0;
1523 	epic_pause(dev);
1524 	/* Put the chip into low-power mode. */
1525 	ew32(GENCTL, 0x0008);
1526 	/* pci_power_off(pdev, -1); */
1527 	return 0;
1528 }
1529 
1530 
1531 static int epic_resume (struct pci_dev *pdev)
1532 {
1533 	struct net_device *dev = pci_get_drvdata(pdev);
1534 
1535 	if (!netif_running(dev))
1536 		return 0;
1537 	epic_restart(dev);
1538 	/* pci_power_on(pdev); */
1539 	return 0;
1540 }
1541 
1542 #endif /* CONFIG_PM */
1543 
1544 
1545 static struct pci_driver epic_driver = {
1546 	.name		= DRV_NAME,
1547 	.id_table	= epic_pci_tbl,
1548 	.probe		= epic_init_one,
1549 	.remove		= epic_remove_one,
1550 #ifdef CONFIG_PM
1551 	.suspend	= epic_suspend,
1552 	.resume		= epic_resume,
1553 #endif /* CONFIG_PM */
1554 };
1555 
1556 
1557 static int __init epic_init (void)
1558 {
1559 /* when a module, this is printed whether or not devices are found in probe */
1560 #ifdef MODULE
1561 	pr_info("%s%s\n", version, version2);
1562 #endif
1563 
1564 	return pci_register_driver(&epic_driver);
1565 }
1566 
1567 
1568 static void __exit epic_cleanup (void)
1569 {
1570 	pci_unregister_driver (&epic_driver);
1571 }
1572 
1573 
1574 module_init(epic_init);
1575 module_exit(epic_cleanup);
1576