xref: /openbmc/linux/drivers/net/ethernet/smsc/epic100.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1  /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2  /*
3  	Written/copyright 1997-2001 by Donald Becker.
4  
5  	This software may be used and distributed according to the terms of
6  	the GNU General Public License (GPL), incorporated herein by reference.
7  	Drivers based on or derived from this code fall under the GPL and must
8  	retain the authorship, copyright and license notice.  This file is not
9  	a complete program and may only be used when the entire operating
10  	system is licensed under the GPL.
11  
12  	This driver is for the SMC83c170/175 "EPIC" series, as used on the
13  	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14  
15  	The author may be reached as becker@scyld.com, or C/O
16  	Scyld Computing Corporation
17  	410 Severn Ave., Suite 210
18  	Annapolis MD 21403
19  
20  	Information and updates available at
21  	http://www.scyld.com/network/epic100.html
22  	[this link no longer provides anything useful -jgarzik]
23  
24  	---------------------------------------------------------------------
25  
26  */
27  
28  #define DRV_NAME        "epic100"
29  #define DRV_VERSION     "2.1"
30  #define DRV_RELDATE     "Sept 11, 2006"
31  
32  /* The user-configurable values.
33     These may be modified when a driver module is loaded.*/
34  
35  static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36  
37  /* Used to pass the full-duplex flag, etc. */
38  #define MAX_UNITS 8		/* More are supported, limit only on options */
39  static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40  static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41  
42  /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43     Setting to > 1518 effectively disables this feature. */
44  static int rx_copybreak;
45  
46  /* Operational parameters that are set at compile time. */
47  
48  /* Keep the ring sizes a power of two for operational efficiency.
49     The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50     Making the Tx ring too large decreases the effectiveness of channel
51     bonding and packet priority.
52     There are no ill effects from too-large receive rings. */
53  #define TX_RING_SIZE	256
54  #define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
55  #define RX_RING_SIZE	256
56  #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
57  #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
58  
59  /* Operational parameters that usually are not changed. */
60  /* Time in jiffies before concluding the transmitter is hung. */
61  #define TX_TIMEOUT  (2*HZ)
62  
63  #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
64  
65  /* Bytes transferred to chip before transmission starts. */
66  /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67  #define TX_FIFO_THRESH 256
68  #define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
69  
70  #include <linux/module.h>
71  #include <linux/kernel.h>
72  #include <linux/string.h>
73  #include <linux/timer.h>
74  #include <linux/errno.h>
75  #include <linux/ioport.h>
76  #include <linux/interrupt.h>
77  #include <linux/pci.h>
78  #include <linux/delay.h>
79  #include <linux/netdevice.h>
80  #include <linux/etherdevice.h>
81  #include <linux/skbuff.h>
82  #include <linux/init.h>
83  #include <linux/spinlock.h>
84  #include <linux/ethtool.h>
85  #include <linux/mii.h>
86  #include <linux/crc32.h>
87  #include <linux/bitops.h>
88  #include <asm/io.h>
89  #include <linux/uaccess.h>
90  #include <asm/byteorder.h>
91  
92  /* These identify the driver base version and may not be removed. */
93  static char version[] =
94  DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
95  static char version2[] =
96  "  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
97  
98  MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99  MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100  MODULE_LICENSE("GPL");
101  
102  module_param(debug, int, 0);
103  module_param(rx_copybreak, int, 0);
104  module_param_array(options, int, NULL, 0);
105  module_param_array(full_duplex, int, NULL, 0);
106  MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107  MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108  MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109  MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110  
111  /*
112  				Theory of Operation
113  
114  I. Board Compatibility
115  
116  This device driver is designed for the SMC "EPIC/100", the SMC
117  single-chip Ethernet controllers for PCI.  This chip is used on
118  the SMC EtherPower II boards.
119  
120  II. Board-specific settings
121  
122  PCI bus devices are configured by the system at boot time, so no jumpers
123  need to be set on the board.  The system BIOS will assign the
124  PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125  Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126  interrupt lines.
127  
128  III. Driver operation
129  
130  IIIa. Ring buffers
131  
132  IVb. References
133  
134  http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
135  http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
136  http://scyld.com/expert/NWay.html
137  http://www.national.com/pf/DP/DP83840A.html
138  
139  IVc. Errata
140  
141  */
142  
143  
144  enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145  
146  #define EPIC_TOTAL_SIZE 0x100
147  #define USE_IO_OPS 1
148  
149  #ifdef USE_IO_OPS
150  #define EPIC_BAR	0
151  #else
152  #define EPIC_BAR	1
153  #endif
154  
155  typedef enum {
156  	SMSC_83C170_0,
157  	SMSC_83C170,
158  	SMSC_83C175,
159  } chip_t;
160  
161  
162  struct epic_chip_info {
163  	const char *name;
164          int drv_flags;                          /* Driver use, intended as capability flags. */
165  };
166  
167  
168  /* indexed by chip_t */
169  static const struct epic_chip_info pci_id_tbl[] = {
170  	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
171  	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
172  	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
173  };
174  
175  
176  static const struct pci_device_id epic_pci_tbl[] = {
177  	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
178  	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
179  	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
180  	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
181  	{ 0,}
182  };
183  MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
184  
185  #define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
186  #define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
187  #define er8(reg)	ioread8(ioaddr + (reg))
188  #define er16(reg)	ioread16(ioaddr + (reg))
189  #define er32(reg)	ioread32(ioaddr + (reg))
190  
191  /* Offsets to registers, using the (ugh) SMC names. */
192  enum epic_registers {
193    COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
194    PCIBurstCnt=0x18,
195    TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
196    MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
197    LAN0=64,						/* MAC address. */
198    MC0=80,						/* Multicast filter table. */
199    RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
200    PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
201  };
202  
203  /* Interrupt register bits, using my own meaningful names. */
204  enum IntrStatus {
205  	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
206  	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
207  	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
208  	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
209  	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
210  };
211  enum CommandBits {
212  	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
213  	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
214  };
215  
216  #define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
217  
218  #define EpicNapiEvent	(TxEmpty | TxDone | \
219  			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
220  #define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
221  
222  static const u16 media2miictl[16] = {
223  	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
224  	0, 0, 0, 0,  0, 0, 0, 0 };
225  
226  /*
227   * The EPIC100 Rx and Tx buffer descriptors.  Note that these
228   * really ARE host-endian; it's not a misannotation.  We tell
229   * the card to byteswap them internally on big-endian hosts -
230   * look for #ifdef __BIG_ENDIAN in epic_open().
231   */
232  
233  struct epic_tx_desc {
234  	u32 txstatus;
235  	u32 bufaddr;
236  	u32 buflength;
237  	u32 next;
238  };
239  
240  struct epic_rx_desc {
241  	u32 rxstatus;
242  	u32 bufaddr;
243  	u32 buflength;
244  	u32 next;
245  };
246  
247  enum desc_status_bits {
248  	DescOwn=0x8000,
249  };
250  
251  #define PRIV_ALIGN	15 	/* Required alignment mask */
252  struct epic_private {
253  	struct epic_rx_desc *rx_ring;
254  	struct epic_tx_desc *tx_ring;
255  	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
256  	struct sk_buff* tx_skbuff[TX_RING_SIZE];
257  	/* The addresses of receive-in-place skbuffs. */
258  	struct sk_buff* rx_skbuff[RX_RING_SIZE];
259  
260  	dma_addr_t tx_ring_dma;
261  	dma_addr_t rx_ring_dma;
262  
263  	/* Ring pointers. */
264  	spinlock_t lock;				/* Group with Tx control cache line. */
265  	spinlock_t napi_lock;
266  	struct napi_struct napi;
267  	unsigned int cur_tx, dirty_tx;
268  
269  	unsigned int cur_rx, dirty_rx;
270  	u32 irq_mask;
271  	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
272  
273  	void __iomem *ioaddr;
274  	struct pci_dev *pci_dev;			/* PCI bus location. */
275  	int chip_id, chip_flags;
276  
277  	struct timer_list timer;			/* Media selection timer. */
278  	int tx_threshold;
279  	unsigned char mc_filter[8];
280  	signed char phys[4];				/* MII device addresses. */
281  	u16 advertising;					/* NWay media advertisement */
282  	int mii_phy_cnt;
283  	u32 ethtool_ops_nesting;
284  	struct mii_if_info mii;
285  	unsigned int tx_full:1;				/* The Tx queue is full. */
286  	unsigned int default_port:4;		/* Last dev->if_port value. */
287  };
288  
289  static int epic_open(struct net_device *dev);
290  static int read_eeprom(struct epic_private *, int);
291  static int mdio_read(struct net_device *dev, int phy_id, int location);
292  static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
293  static void epic_restart(struct net_device *dev);
294  static void epic_timer(struct timer_list *t);
295  static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
296  static void epic_init_ring(struct net_device *dev);
297  static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
298  				   struct net_device *dev);
299  static int epic_rx(struct net_device *dev, int budget);
300  static int epic_poll(struct napi_struct *napi, int budget);
301  static irqreturn_t epic_interrupt(int irq, void *dev_instance);
302  static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
303  static const struct ethtool_ops netdev_ethtool_ops;
304  static int epic_close(struct net_device *dev);
305  static struct net_device_stats *epic_get_stats(struct net_device *dev);
306  static void set_rx_mode(struct net_device *dev);
307  
308  static const struct net_device_ops epic_netdev_ops = {
309  	.ndo_open		= epic_open,
310  	.ndo_stop		= epic_close,
311  	.ndo_start_xmit		= epic_start_xmit,
312  	.ndo_tx_timeout 	= epic_tx_timeout,
313  	.ndo_get_stats		= epic_get_stats,
314  	.ndo_set_rx_mode	= set_rx_mode,
315  	.ndo_eth_ioctl		= netdev_ioctl,
316  	.ndo_set_mac_address 	= eth_mac_addr,
317  	.ndo_validate_addr	= eth_validate_addr,
318  };
319  
epic_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)320  static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
321  {
322  	static int card_idx = -1;
323  	void __iomem *ioaddr;
324  	int chip_idx = (int) ent->driver_data;
325  	struct net_device *dev;
326  	struct epic_private *ep;
327  	int i, ret, option = 0, duplex = 0;
328  	__le16 addr[ETH_ALEN / 2];
329  	void *ring_space;
330  	dma_addr_t ring_dma;
331  
332  /* when built into the kernel, we only print version if device is found */
333  #ifndef MODULE
334  	pr_info_once("%s%s\n", version, version2);
335  #endif
336  
337  	card_idx++;
338  
339  	ret = pci_enable_device(pdev);
340  	if (ret)
341  		goto out;
342  
343  	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
344  		dev_err(&pdev->dev, "no PCI region space\n");
345  		ret = -ENODEV;
346  		goto err_out_disable;
347  	}
348  
349  	pci_set_master(pdev);
350  
351  	ret = pci_request_regions(pdev, DRV_NAME);
352  	if (ret < 0)
353  		goto err_out_disable;
354  
355  	ret = -ENOMEM;
356  
357  	dev = alloc_etherdev(sizeof (*ep));
358  	if (!dev)
359  		goto err_out_free_res;
360  
361  	SET_NETDEV_DEV(dev, &pdev->dev);
362  
363  	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
364  	if (!ioaddr) {
365  		dev_err(&pdev->dev, "ioremap failed\n");
366  		goto err_out_free_netdev;
367  	}
368  
369  	pci_set_drvdata(pdev, dev);
370  	ep = netdev_priv(dev);
371  	ep->ioaddr = ioaddr;
372  	ep->mii.dev = dev;
373  	ep->mii.mdio_read = mdio_read;
374  	ep->mii.mdio_write = mdio_write;
375  	ep->mii.phy_id_mask = 0x1f;
376  	ep->mii.reg_num_mask = 0x1f;
377  
378  	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
379  					GFP_KERNEL);
380  	if (!ring_space)
381  		goto err_out_iounmap;
382  	ep->tx_ring = ring_space;
383  	ep->tx_ring_dma = ring_dma;
384  
385  	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
386  					GFP_KERNEL);
387  	if (!ring_space)
388  		goto err_out_unmap_tx;
389  	ep->rx_ring = ring_space;
390  	ep->rx_ring_dma = ring_dma;
391  
392  	if (dev->mem_start) {
393  		option = dev->mem_start;
394  		duplex = (dev->mem_start & 16) ? 1 : 0;
395  	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
396  		if (options[card_idx] >= 0)
397  			option = options[card_idx];
398  		if (full_duplex[card_idx] >= 0)
399  			duplex = full_duplex[card_idx];
400  	}
401  
402  	spin_lock_init(&ep->lock);
403  	spin_lock_init(&ep->napi_lock);
404  
405  	/* Bring the chip out of low-power mode. */
406  	ew32(GENCTL, 0x4200);
407  	/* Magic?!  If we don't set this bit the MII interface won't work. */
408  	/* This magic is documented in SMSC app note 7.15 */
409  	for (i = 16; i > 0; i--)
410  		ew32(TEST1, 0x0008);
411  
412  	/* Turn on the MII transceiver. */
413  	ew32(MIICfg, 0x12);
414  	if (chip_idx == 1)
415  		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
416  	ew32(GENCTL, 0x0200);
417  
418  	/* Note: the '175 does not have a serial EEPROM. */
419  	for (i = 0; i < 3; i++)
420  		addr[i] = cpu_to_le16(er16(LAN0 + i*4));
421  	eth_hw_addr_set(dev, (u8 *)addr);
422  
423  	if (debug > 2) {
424  		dev_dbg(&pdev->dev, "EEPROM contents:\n");
425  		for (i = 0; i < 64; i++)
426  			pr_cont(" %4.4x%s", read_eeprom(ep, i),
427  				   i % 16 == 15 ? "\n" : "");
428  	}
429  
430  	ep->pci_dev = pdev;
431  	ep->chip_id = chip_idx;
432  	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
433  	ep->irq_mask =
434  		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
435  		 | CntFull | TxUnderrun | EpicNapiEvent;
436  
437  	/* Find the connected MII xcvrs.
438  	   Doing this in open() would allow detecting external xcvrs later, but
439  	   takes much time and no cards have external MII. */
440  	{
441  		int phy, phy_idx = 0;
442  		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
443  			int mii_status = mdio_read(dev, phy, MII_BMSR);
444  			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
445  				ep->phys[phy_idx++] = phy;
446  				dev_info(&pdev->dev,
447  					"MII transceiver #%d control "
448  					"%4.4x status %4.4x.\n",
449  					phy, mdio_read(dev, phy, 0), mii_status);
450  			}
451  		}
452  		ep->mii_phy_cnt = phy_idx;
453  		if (phy_idx != 0) {
454  			phy = ep->phys[0];
455  			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
456  			dev_info(&pdev->dev,
457  				"Autonegotiation advertising %4.4x link "
458  				   "partner %4.4x.\n",
459  				   ep->mii.advertising, mdio_read(dev, phy, 5));
460  		} else if ( ! (ep->chip_flags & NO_MII)) {
461  			dev_warn(&pdev->dev,
462  				"***WARNING***: No MII transceiver found!\n");
463  			/* Use the known PHY address of the EPII. */
464  			ep->phys[0] = 3;
465  		}
466  		ep->mii.phy_id = ep->phys[0];
467  	}
468  
469  	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
470  	if (ep->chip_flags & MII_PWRDWN)
471  		ew32(NVCTL, er32(NVCTL) & ~0x483c);
472  	ew32(GENCTL, 0x0008);
473  
474  	/* The lower four bits are the media type. */
475  	if (duplex) {
476  		ep->mii.force_media = ep->mii.full_duplex = 1;
477  		dev_info(&pdev->dev, "Forced full duplex requested.\n");
478  	}
479  	dev->if_port = ep->default_port = option;
480  
481  	/* The Epic-specific entries in the device structure. */
482  	dev->netdev_ops = &epic_netdev_ops;
483  	dev->ethtool_ops = &netdev_ethtool_ops;
484  	dev->watchdog_timeo = TX_TIMEOUT;
485  	netif_napi_add(dev, &ep->napi, epic_poll);
486  
487  	ret = register_netdev(dev);
488  	if (ret < 0)
489  		goto err_out_unmap_rx;
490  
491  	netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
492  		    pci_id_tbl[chip_idx].name,
493  		    (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
494  		    dev->dev_addr);
495  
496  out:
497  	return ret;
498  
499  err_out_unmap_rx:
500  	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
501  			  ep->rx_ring_dma);
502  err_out_unmap_tx:
503  	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
504  			  ep->tx_ring_dma);
505  err_out_iounmap:
506  	pci_iounmap(pdev, ioaddr);
507  err_out_free_netdev:
508  	free_netdev(dev);
509  err_out_free_res:
510  	pci_release_regions(pdev);
511  err_out_disable:
512  	pci_disable_device(pdev);
513  	goto out;
514  }
515  
516  /* Serial EEPROM section. */
517  
518  /*  EEPROM_Ctrl bits. */
519  #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
520  #define EE_CS			0x02	/* EEPROM chip select. */
521  #define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
522  #define EE_WRITE_0		0x01
523  #define EE_WRITE_1		0x09
524  #define EE_DATA_READ	0x10	/* EEPROM chip data out. */
525  #define EE_ENB			(0x0001 | EE_CS)
526  
527  /* Delay between EEPROM clock transitions.
528     This serves to flush the operation to the PCI bus.
529   */
530  
531  #define eeprom_delay()	er32(EECTL)
532  
533  /* The EEPROM commands include the alway-set leading bit. */
534  #define EE_WRITE_CMD	(5 << 6)
535  #define EE_READ64_CMD	(6 << 6)
536  #define EE_READ256_CMD	(6 << 8)
537  #define EE_ERASE_CMD	(7 << 6)
538  
epic_disable_int(struct net_device * dev,struct epic_private * ep)539  static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
540  {
541  	void __iomem *ioaddr = ep->ioaddr;
542  
543  	ew32(INTMASK, 0x00000000);
544  }
545  
__epic_pci_commit(void __iomem * ioaddr)546  static inline void __epic_pci_commit(void __iomem *ioaddr)
547  {
548  #ifndef USE_IO_OPS
549  	er32(INTMASK);
550  #endif
551  }
552  
epic_napi_irq_off(struct net_device * dev,struct epic_private * ep)553  static inline void epic_napi_irq_off(struct net_device *dev,
554  				     struct epic_private *ep)
555  {
556  	void __iomem *ioaddr = ep->ioaddr;
557  
558  	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
559  	__epic_pci_commit(ioaddr);
560  }
561  
epic_napi_irq_on(struct net_device * dev,struct epic_private * ep)562  static inline void epic_napi_irq_on(struct net_device *dev,
563  				    struct epic_private *ep)
564  {
565  	void __iomem *ioaddr = ep->ioaddr;
566  
567  	/* No need to commit possible posted write */
568  	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
569  }
570  
read_eeprom(struct epic_private * ep,int location)571  static int read_eeprom(struct epic_private *ep, int location)
572  {
573  	void __iomem *ioaddr = ep->ioaddr;
574  	int i;
575  	int retval = 0;
576  	int read_cmd = location |
577  		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
578  
579  	ew32(EECTL, EE_ENB & ~EE_CS);
580  	ew32(EECTL, EE_ENB);
581  
582  	/* Shift the read command bits out. */
583  	for (i = 12; i >= 0; i--) {
584  		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
585  		ew32(EECTL, EE_ENB | dataval);
586  		eeprom_delay();
587  		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
588  		eeprom_delay();
589  	}
590  	ew32(EECTL, EE_ENB);
591  
592  	for (i = 16; i > 0; i--) {
593  		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
594  		eeprom_delay();
595  		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
596  		ew32(EECTL, EE_ENB);
597  		eeprom_delay();
598  	}
599  
600  	/* Terminate the EEPROM access. */
601  	ew32(EECTL, EE_ENB & ~EE_CS);
602  	return retval;
603  }
604  
605  #define MII_READOP		1
606  #define MII_WRITEOP		2
mdio_read(struct net_device * dev,int phy_id,int location)607  static int mdio_read(struct net_device *dev, int phy_id, int location)
608  {
609  	struct epic_private *ep = netdev_priv(dev);
610  	void __iomem *ioaddr = ep->ioaddr;
611  	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
612  	int i;
613  
614  	ew32(MIICtrl, read_cmd);
615  	/* Typical operation takes 25 loops. */
616  	for (i = 400; i > 0; i--) {
617  		barrier();
618  		if ((er32(MIICtrl) & MII_READOP) == 0) {
619  			/* Work around read failure bug. */
620  			if (phy_id == 1 && location < 6 &&
621  			    er16(MIIData) == 0xffff) {
622  				ew32(MIICtrl, read_cmd);
623  				continue;
624  			}
625  			return er16(MIIData);
626  		}
627  	}
628  	return 0xffff;
629  }
630  
mdio_write(struct net_device * dev,int phy_id,int loc,int value)631  static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
632  {
633  	struct epic_private *ep = netdev_priv(dev);
634  	void __iomem *ioaddr = ep->ioaddr;
635  	int i;
636  
637  	ew16(MIIData, value);
638  	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
639  	for (i = 10000; i > 0; i--) {
640  		barrier();
641  		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
642  			break;
643  	}
644  }
645  
646  
epic_open(struct net_device * dev)647  static int epic_open(struct net_device *dev)
648  {
649  	struct epic_private *ep = netdev_priv(dev);
650  	void __iomem *ioaddr = ep->ioaddr;
651  	const int irq = ep->pci_dev->irq;
652  	int rc, i;
653  
654  	/* Soft reset the chip. */
655  	ew32(GENCTL, 0x4001);
656  
657  	napi_enable(&ep->napi);
658  	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
659  	if (rc) {
660  		napi_disable(&ep->napi);
661  		return rc;
662  	}
663  
664  	epic_init_ring(dev);
665  
666  	ew32(GENCTL, 0x4000);
667  	/* This magic is documented in SMSC app note 7.15 */
668  	for (i = 16; i > 0; i--)
669  		ew32(TEST1, 0x0008);
670  
671  	/* Pull the chip out of low-power mode, enable interrupts, and set for
672  	   PCI read multiple.  The MIIcfg setting and strange write order are
673  	   required by the details of which bits are reset and the transceiver
674  	   wiring on the Ositech CardBus card.
675  	*/
676  #if 0
677  	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
678  #endif
679  	if (ep->chip_flags & MII_PWRDWN)
680  		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
681  
682  	/* Tell the chip to byteswap descriptors on big-endian hosts */
683  #ifdef __BIG_ENDIAN
684  	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
685  	er32(GENCTL);
686  	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
687  #else
688  	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
689  	er32(GENCTL);
690  	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
691  #endif
692  
693  	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
694  
695  	for (i = 0; i < 3; i++)
696  		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
697  
698  	ep->tx_threshold = TX_FIFO_THRESH;
699  	ew32(TxThresh, ep->tx_threshold);
700  
701  	if (media2miictl[dev->if_port & 15]) {
702  		if (ep->mii_phy_cnt)
703  			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
704  		if (dev->if_port == 1) {
705  			if (debug > 1)
706  				netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
707  					    mdio_read(dev, ep->phys[0], MII_BMSR));
708  		}
709  	} else {
710  		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
711  		if (mii_lpa != 0xffff) {
712  			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
713  				ep->mii.full_duplex = 1;
714  			else if (! (mii_lpa & LPA_LPACK))
715  				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
716  			if (debug > 1)
717  				netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
718  					    ep->mii.full_duplex ? "full"
719  								: "half",
720  					    ep->phys[0], mii_lpa);
721  		}
722  	}
723  
724  	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
725  	ew32(PRxCDAR, ep->rx_ring_dma);
726  	ew32(PTxCDAR, ep->tx_ring_dma);
727  
728  	/* Start the chip's Rx process. */
729  	set_rx_mode(dev);
730  	ew32(COMMAND, StartRx | RxQueued);
731  
732  	netif_start_queue(dev);
733  
734  	/* Enable interrupts by setting the interrupt mask. */
735  	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
736  	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
737  	     TxUnderrun);
738  
739  	if (debug > 1) {
740  		netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
741  			   ioaddr, irq, er32(GENCTL),
742  			   ep->mii.full_duplex ? "full" : "half");
743  	}
744  
745  	/* Set the timer to switch to check for link beat and perhaps switch
746  	   to an alternate media type. */
747  	timer_setup(&ep->timer, epic_timer, 0);
748  	ep->timer.expires = jiffies + 3*HZ;
749  	add_timer(&ep->timer);
750  
751  	return rc;
752  }
753  
754  /* Reset the chip to recover from a PCI transaction error.
755     This may occur at interrupt time. */
epic_pause(struct net_device * dev)756  static void epic_pause(struct net_device *dev)
757  {
758  	struct net_device_stats *stats = &dev->stats;
759  	struct epic_private *ep = netdev_priv(dev);
760  	void __iomem *ioaddr = ep->ioaddr;
761  
762  	netif_stop_queue (dev);
763  
764  	/* Disable interrupts by clearing the interrupt mask. */
765  	ew32(INTMASK, 0x00000000);
766  	/* Stop the chip's Tx and Rx DMA processes. */
767  	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
768  
769  	/* Update the error counts. */
770  	if (er16(COMMAND) != 0xffff) {
771  		stats->rx_missed_errors	+= er8(MPCNT);
772  		stats->rx_frame_errors	+= er8(ALICNT);
773  		stats->rx_crc_errors	+= er8(CRCCNT);
774  	}
775  
776  	/* Remove the packets on the Rx queue. */
777  	epic_rx(dev, RX_RING_SIZE);
778  }
779  
epic_restart(struct net_device * dev)780  static void epic_restart(struct net_device *dev)
781  {
782  	struct epic_private *ep = netdev_priv(dev);
783  	void __iomem *ioaddr = ep->ioaddr;
784  	int i;
785  
786  	/* Soft reset the chip. */
787  	ew32(GENCTL, 0x4001);
788  
789  	netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
790  		   ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
791  	udelay(1);
792  
793  	/* This magic is documented in SMSC app note 7.15 */
794  	for (i = 16; i > 0; i--)
795  		ew32(TEST1, 0x0008);
796  
797  #ifdef __BIG_ENDIAN
798  	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
799  #else
800  	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
801  #endif
802  	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
803  	if (ep->chip_flags & MII_PWRDWN)
804  		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
805  
806  	for (i = 0; i < 3; i++)
807  		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
808  
809  	ep->tx_threshold = TX_FIFO_THRESH;
810  	ew32(TxThresh, ep->tx_threshold);
811  	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
812  	ew32(PRxCDAR, ep->rx_ring_dma +
813  	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
814  	ew32(PTxCDAR, ep->tx_ring_dma +
815  	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
816  
817  	/* Start the chip's Rx process. */
818  	set_rx_mode(dev);
819  	ew32(COMMAND, StartRx | RxQueued);
820  
821  	/* Enable interrupts by setting the interrupt mask. */
822  	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
823  	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
824  	     TxUnderrun);
825  
826  	netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
827  		   er32(COMMAND), er32(GENCTL), er32(INTSTAT));
828  }
829  
check_media(struct net_device * dev)830  static void check_media(struct net_device *dev)
831  {
832  	struct epic_private *ep = netdev_priv(dev);
833  	void __iomem *ioaddr = ep->ioaddr;
834  	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
835  	int negotiated = mii_lpa & ep->mii.advertising;
836  	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
837  
838  	if (ep->mii.force_media)
839  		return;
840  	if (mii_lpa == 0xffff)		/* Bogus read */
841  		return;
842  	if (ep->mii.full_duplex != duplex) {
843  		ep->mii.full_duplex = duplex;
844  		netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
845  			    ep->mii.full_duplex ? "full" : "half",
846  			    ep->phys[0], mii_lpa);
847  		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
848  	}
849  }
850  
epic_timer(struct timer_list * t)851  static void epic_timer(struct timer_list *t)
852  {
853  	struct epic_private *ep = from_timer(ep, t, timer);
854  	struct net_device *dev = ep->mii.dev;
855  	void __iomem *ioaddr = ep->ioaddr;
856  	int next_tick = 5*HZ;
857  
858  	if (debug > 3) {
859  		netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
860  			   er32(TxSTAT));
861  		netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
862  			   er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
863  	}
864  
865  	check_media(dev);
866  
867  	ep->timer.expires = jiffies + next_tick;
868  	add_timer(&ep->timer);
869  }
870  
epic_tx_timeout(struct net_device * dev,unsigned int txqueue)871  static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
872  {
873  	struct epic_private *ep = netdev_priv(dev);
874  	void __iomem *ioaddr = ep->ioaddr;
875  
876  	if (debug > 0) {
877  		netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
878  			    er16(TxSTAT));
879  		if (debug > 1) {
880  			netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
881  				   ep->dirty_tx, ep->cur_tx);
882  		}
883  	}
884  	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
885  		dev->stats.tx_fifo_errors++;
886  		ew32(COMMAND, RestartTx);
887  	} else {
888  		epic_restart(dev);
889  		ew32(COMMAND, TxQueued);
890  	}
891  
892  	netif_trans_update(dev); /* prevent tx timeout */
893  	dev->stats.tx_errors++;
894  	if (!ep->tx_full)
895  		netif_wake_queue(dev);
896  }
897  
898  /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
epic_init_ring(struct net_device * dev)899  static void epic_init_ring(struct net_device *dev)
900  {
901  	struct epic_private *ep = netdev_priv(dev);
902  	int i;
903  
904  	ep->tx_full = 0;
905  	ep->dirty_tx = ep->cur_tx = 0;
906  	ep->cur_rx = ep->dirty_rx = 0;
907  	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
908  
909  	/* Initialize all Rx descriptors. */
910  	for (i = 0; i < RX_RING_SIZE; i++) {
911  		ep->rx_ring[i].rxstatus = 0;
912  		ep->rx_ring[i].buflength = ep->rx_buf_sz;
913  		ep->rx_ring[i].next = ep->rx_ring_dma +
914  				      (i+1)*sizeof(struct epic_rx_desc);
915  		ep->rx_skbuff[i] = NULL;
916  	}
917  	/* Mark the last entry as wrapping the ring. */
918  	ep->rx_ring[i-1].next = ep->rx_ring_dma;
919  
920  	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
921  	for (i = 0; i < RX_RING_SIZE; i++) {
922  		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
923  		ep->rx_skbuff[i] = skb;
924  		if (skb == NULL)
925  			break;
926  		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
927  		ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev,
928  							skb->data,
929  							ep->rx_buf_sz,
930  							DMA_FROM_DEVICE);
931  		ep->rx_ring[i].rxstatus = DescOwn;
932  	}
933  	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
934  
935  	/* The Tx buffer descriptor is filled in as needed, but we
936  	   do need to clear the ownership bit. */
937  	for (i = 0; i < TX_RING_SIZE; i++) {
938  		ep->tx_skbuff[i] = NULL;
939  		ep->tx_ring[i].txstatus = 0x0000;
940  		ep->tx_ring[i].next = ep->tx_ring_dma +
941  			(i+1)*sizeof(struct epic_tx_desc);
942  	}
943  	ep->tx_ring[i-1].next = ep->tx_ring_dma;
944  }
945  
epic_start_xmit(struct sk_buff * skb,struct net_device * dev)946  static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
947  {
948  	struct epic_private *ep = netdev_priv(dev);
949  	void __iomem *ioaddr = ep->ioaddr;
950  	int entry, free_count;
951  	u32 ctrl_word;
952  	unsigned long flags;
953  
954  	if (skb_padto(skb, ETH_ZLEN))
955  		return NETDEV_TX_OK;
956  
957  	/* Caution: the write order is important here, set the field with the
958  	   "ownership" bit last. */
959  
960  	/* Calculate the next Tx descriptor entry. */
961  	spin_lock_irqsave(&ep->lock, flags);
962  	free_count = ep->cur_tx - ep->dirty_tx;
963  	entry = ep->cur_tx % TX_RING_SIZE;
964  
965  	ep->tx_skbuff[entry] = skb;
966  	ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
967  						    skb->data, skb->len,
968  						    DMA_TO_DEVICE);
969  	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
970  		ctrl_word = 0x100000; /* No interrupt */
971  	} else if (free_count == TX_QUEUE_LEN/2) {
972  		ctrl_word = 0x140000; /* Tx-done intr. */
973  	} else if (free_count < TX_QUEUE_LEN - 1) {
974  		ctrl_word = 0x100000; /* No Tx-done intr. */
975  	} else {
976  		/* Leave room for an additional entry. */
977  		ctrl_word = 0x140000; /* Tx-done intr. */
978  		ep->tx_full = 1;
979  	}
980  	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
981  	ep->tx_ring[entry].txstatus =
982  		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
983  			    | DescOwn;
984  
985  	ep->cur_tx++;
986  	if (ep->tx_full)
987  		netif_stop_queue(dev);
988  
989  	spin_unlock_irqrestore(&ep->lock, flags);
990  	/* Trigger an immediate transmit demand. */
991  	ew32(COMMAND, TxQueued);
992  
993  	if (debug > 4)
994  		netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
995  			   skb->len, entry, ctrl_word, er32(TxSTAT));
996  
997  	return NETDEV_TX_OK;
998  }
999  
epic_tx_error(struct net_device * dev,struct epic_private * ep,int status)1000  static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1001  			  int status)
1002  {
1003  	struct net_device_stats *stats = &dev->stats;
1004  
1005  #ifndef final_version
1006  	/* There was an major error, log it. */
1007  	if (debug > 1)
1008  		netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
1009  			   status);
1010  #endif
1011  	stats->tx_errors++;
1012  	if (status & 0x1050)
1013  		stats->tx_aborted_errors++;
1014  	if (status & 0x0008)
1015  		stats->tx_carrier_errors++;
1016  	if (status & 0x0040)
1017  		stats->tx_window_errors++;
1018  	if (status & 0x0010)
1019  		stats->tx_fifo_errors++;
1020  }
1021  
epic_tx(struct net_device * dev,struct epic_private * ep)1022  static void epic_tx(struct net_device *dev, struct epic_private *ep)
1023  {
1024  	unsigned int dirty_tx, cur_tx;
1025  
1026  	/*
1027  	 * Note: if this lock becomes a problem we can narrow the locked
1028  	 * region at the cost of occasionally grabbing the lock more times.
1029  	 */
1030  	cur_tx = ep->cur_tx;
1031  	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1032  		struct sk_buff *skb;
1033  		int entry = dirty_tx % TX_RING_SIZE;
1034  		int txstatus = ep->tx_ring[entry].txstatus;
1035  
1036  		if (txstatus & DescOwn)
1037  			break;	/* It still hasn't been Txed */
1038  
1039  		if (likely(txstatus & 0x0001)) {
1040  			dev->stats.collisions += (txstatus >> 8) & 15;
1041  			dev->stats.tx_packets++;
1042  			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1043  		} else
1044  			epic_tx_error(dev, ep, txstatus);
1045  
1046  		/* Free the original skb. */
1047  		skb = ep->tx_skbuff[entry];
1048  		dma_unmap_single(&ep->pci_dev->dev,
1049  				 ep->tx_ring[entry].bufaddr, skb->len,
1050  				 DMA_TO_DEVICE);
1051  		dev_consume_skb_irq(skb);
1052  		ep->tx_skbuff[entry] = NULL;
1053  	}
1054  
1055  #ifndef final_version
1056  	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1057  		netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1058  			    dirty_tx, cur_tx, ep->tx_full);
1059  		dirty_tx += TX_RING_SIZE;
1060  	}
1061  #endif
1062  	ep->dirty_tx = dirty_tx;
1063  	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1064  		/* The ring is no longer full, allow new TX entries. */
1065  		ep->tx_full = 0;
1066  		netif_wake_queue(dev);
1067  	}
1068  }
1069  
1070  /* The interrupt handler does all of the Rx thread work and cleans up
1071     after the Tx thread. */
epic_interrupt(int irq,void * dev_instance)1072  static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1073  {
1074  	struct net_device *dev = dev_instance;
1075  	struct epic_private *ep = netdev_priv(dev);
1076  	void __iomem *ioaddr = ep->ioaddr;
1077  	unsigned int handled = 0;
1078  	int status;
1079  
1080  	status = er32(INTSTAT);
1081  	/* Acknowledge all of the current interrupt sources ASAP. */
1082  	ew32(INTSTAT, status & EpicNormalEvent);
1083  
1084  	if (debug > 4) {
1085  		netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1086  			   status, er32(INTSTAT));
1087  	}
1088  
1089  	if ((status & IntrSummary) == 0)
1090  		goto out;
1091  
1092  	handled = 1;
1093  
1094  	if (status & EpicNapiEvent) {
1095  		spin_lock(&ep->napi_lock);
1096  		if (napi_schedule_prep(&ep->napi)) {
1097  			epic_napi_irq_off(dev, ep);
1098  			__napi_schedule(&ep->napi);
1099  		}
1100  		spin_unlock(&ep->napi_lock);
1101  	}
1102  	status &= ~EpicNapiEvent;
1103  
1104  	/* Check uncommon events all at once. */
1105  	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1106  		struct net_device_stats *stats = &dev->stats;
1107  
1108  		if (status == EpicRemoved)
1109  			goto out;
1110  
1111  		/* Always update the error counts to avoid overhead later. */
1112  		stats->rx_missed_errors	+= er8(MPCNT);
1113  		stats->rx_frame_errors	+= er8(ALICNT);
1114  		stats->rx_crc_errors	+= er8(CRCCNT);
1115  
1116  		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1117  			stats->tx_fifo_errors++;
1118  			ew32(TxThresh, ep->tx_threshold += 128);
1119  			/* Restart the transmit process. */
1120  			ew32(COMMAND, RestartTx);
1121  		}
1122  		if (status & PCIBusErr170) {
1123  			netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1124  				   status);
1125  			epic_pause(dev);
1126  			epic_restart(dev);
1127  		}
1128  		/* Clear all error sources. */
1129  		ew32(INTSTAT, status & 0x7f18);
1130  	}
1131  
1132  out:
1133  	if (debug > 3) {
1134  		netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1135  			   status);
1136  	}
1137  
1138  	return IRQ_RETVAL(handled);
1139  }
1140  
epic_rx(struct net_device * dev,int budget)1141  static int epic_rx(struct net_device *dev, int budget)
1142  {
1143  	struct epic_private *ep = netdev_priv(dev);
1144  	int entry = ep->cur_rx % RX_RING_SIZE;
1145  	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1146  	int work_done = 0;
1147  
1148  	if (debug > 4)
1149  		netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1150  			   ep->rx_ring[entry].rxstatus);
1151  
1152  	if (rx_work_limit > budget)
1153  		rx_work_limit = budget;
1154  
1155  	/* If we own the next entry, it's a new packet. Send it up. */
1156  	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1157  		int status = ep->rx_ring[entry].rxstatus;
1158  
1159  		if (debug > 4)
1160  			netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
1161  				   status);
1162  		if (--rx_work_limit < 0)
1163  			break;
1164  		if (status & 0x2006) {
1165  			if (debug > 2)
1166  				netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1167  					   status);
1168  			if (status & 0x2000) {
1169  				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1170  					    status);
1171  				dev->stats.rx_length_errors++;
1172  			} else if (status & 0x0006)
1173  				/* Rx Frame errors are counted in hardware. */
1174  				dev->stats.rx_errors++;
1175  		} else {
1176  			/* Malloc up new buffer, compatible with net-2e. */
1177  			/* Omit the four octet CRC from the length. */
1178  			short pkt_len = (status >> 16) - 4;
1179  			struct sk_buff *skb;
1180  
1181  			if (pkt_len > PKT_BUF_SZ - 4) {
1182  				netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1183  					   status, pkt_len);
1184  				pkt_len = 1514;
1185  			}
1186  			/* Check if the packet is long enough to accept without copying
1187  			   to a minimally-sized skbuff. */
1188  			if (pkt_len < rx_copybreak &&
1189  			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1190  				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1191  				dma_sync_single_for_cpu(&ep->pci_dev->dev,
1192  							ep->rx_ring[entry].bufaddr,
1193  							ep->rx_buf_sz,
1194  							DMA_FROM_DEVICE);
1195  				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1196  				skb_put(skb, pkt_len);
1197  				dma_sync_single_for_device(&ep->pci_dev->dev,
1198  							   ep->rx_ring[entry].bufaddr,
1199  							   ep->rx_buf_sz,
1200  							   DMA_FROM_DEVICE);
1201  			} else {
1202  				dma_unmap_single(&ep->pci_dev->dev,
1203  						 ep->rx_ring[entry].bufaddr,
1204  						 ep->rx_buf_sz,
1205  						 DMA_FROM_DEVICE);
1206  				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1207  				ep->rx_skbuff[entry] = NULL;
1208  			}
1209  			skb->protocol = eth_type_trans(skb, dev);
1210  			netif_receive_skb(skb);
1211  			dev->stats.rx_packets++;
1212  			dev->stats.rx_bytes += pkt_len;
1213  		}
1214  		work_done++;
1215  		entry = (++ep->cur_rx) % RX_RING_SIZE;
1216  	}
1217  
1218  	/* Refill the Rx ring buffers. */
1219  	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1220  		entry = ep->dirty_rx % RX_RING_SIZE;
1221  		if (ep->rx_skbuff[entry] == NULL) {
1222  			struct sk_buff *skb;
1223  			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1224  			if (skb == NULL)
1225  				break;
1226  			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1227  			ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
1228  								    skb->data,
1229  								    ep->rx_buf_sz,
1230  								    DMA_FROM_DEVICE);
1231  			work_done++;
1232  		}
1233  		/* AV: shouldn't we add a barrier here? */
1234  		ep->rx_ring[entry].rxstatus = DescOwn;
1235  	}
1236  	return work_done;
1237  }
1238  
epic_rx_err(struct net_device * dev,struct epic_private * ep)1239  static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1240  {
1241  	void __iomem *ioaddr = ep->ioaddr;
1242  	int status;
1243  
1244  	status = er32(INTSTAT);
1245  
1246  	if (status == EpicRemoved)
1247  		return;
1248  	if (status & RxOverflow) 	/* Missed a Rx frame. */
1249  		dev->stats.rx_errors++;
1250  	if (status & (RxOverflow | RxFull))
1251  		ew16(COMMAND, RxQueued);
1252  }
1253  
epic_poll(struct napi_struct * napi,int budget)1254  static int epic_poll(struct napi_struct *napi, int budget)
1255  {
1256  	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1257  	struct net_device *dev = ep->mii.dev;
1258  	void __iomem *ioaddr = ep->ioaddr;
1259  	int work_done;
1260  
1261  	epic_tx(dev, ep);
1262  
1263  	work_done = epic_rx(dev, budget);
1264  
1265  	epic_rx_err(dev, ep);
1266  
1267  	if (work_done < budget && napi_complete_done(napi, work_done)) {
1268  		unsigned long flags;
1269  
1270  		spin_lock_irqsave(&ep->napi_lock, flags);
1271  
1272  		ew32(INTSTAT, EpicNapiEvent);
1273  		epic_napi_irq_on(dev, ep);
1274  		spin_unlock_irqrestore(&ep->napi_lock, flags);
1275  	}
1276  
1277  	return work_done;
1278  }
1279  
epic_close(struct net_device * dev)1280  static int epic_close(struct net_device *dev)
1281  {
1282  	struct epic_private *ep = netdev_priv(dev);
1283  	struct pci_dev *pdev = ep->pci_dev;
1284  	void __iomem *ioaddr = ep->ioaddr;
1285  	struct sk_buff *skb;
1286  	int i;
1287  
1288  	netif_stop_queue(dev);
1289  	napi_disable(&ep->napi);
1290  
1291  	if (debug > 1)
1292  		netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1293  			   er32(INTSTAT));
1294  
1295  	del_timer_sync(&ep->timer);
1296  
1297  	epic_disable_int(dev, ep);
1298  
1299  	free_irq(pdev->irq, dev);
1300  
1301  	epic_pause(dev);
1302  
1303  	/* Free all the skbuffs in the Rx queue. */
1304  	for (i = 0; i < RX_RING_SIZE; i++) {
1305  		skb = ep->rx_skbuff[i];
1306  		ep->rx_skbuff[i] = NULL;
1307  		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1308  		ep->rx_ring[i].buflength = 0;
1309  		if (skb) {
1310  			dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr,
1311  					 ep->rx_buf_sz, DMA_FROM_DEVICE);
1312  			dev_kfree_skb(skb);
1313  		}
1314  		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1315  	}
1316  	for (i = 0; i < TX_RING_SIZE; i++) {
1317  		skb = ep->tx_skbuff[i];
1318  		ep->tx_skbuff[i] = NULL;
1319  		if (!skb)
1320  			continue;
1321  		dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len,
1322  				 DMA_TO_DEVICE);
1323  		dev_kfree_skb(skb);
1324  	}
1325  
1326  	/* Green! Leave the chip in low-power mode. */
1327  	ew32(GENCTL, 0x0008);
1328  
1329  	return 0;
1330  }
1331  
epic_get_stats(struct net_device * dev)1332  static struct net_device_stats *epic_get_stats(struct net_device *dev)
1333  {
1334  	struct epic_private *ep = netdev_priv(dev);
1335  	void __iomem *ioaddr = ep->ioaddr;
1336  
1337  	if (netif_running(dev)) {
1338  		struct net_device_stats *stats = &dev->stats;
1339  
1340  		stats->rx_missed_errors	+= er8(MPCNT);
1341  		stats->rx_frame_errors	+= er8(ALICNT);
1342  		stats->rx_crc_errors	+= er8(CRCCNT);
1343  	}
1344  
1345  	return &dev->stats;
1346  }
1347  
1348  /* Set or clear the multicast filter for this adaptor.
1349     Note that we only use exclusion around actually queueing the
1350     new frame, not around filling ep->setup_frame.  This is non-deterministic
1351     when re-entered but still correct. */
1352  
set_rx_mode(struct net_device * dev)1353  static void set_rx_mode(struct net_device *dev)
1354  {
1355  	struct epic_private *ep = netdev_priv(dev);
1356  	void __iomem *ioaddr = ep->ioaddr;
1357  	unsigned char mc_filter[8];		 /* Multicast hash filter */
1358  	int i;
1359  
1360  	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1361  		ew32(RxCtrl, 0x002c);
1362  		/* Unconditionally log net taps. */
1363  		memset(mc_filter, 0xff, sizeof(mc_filter));
1364  	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1365  		/* There is apparently a chip bug, so the multicast filter
1366  		   is never enabled. */
1367  		/* Too many to filter perfectly -- accept all multicasts. */
1368  		memset(mc_filter, 0xff, sizeof(mc_filter));
1369  		ew32(RxCtrl, 0x000c);
1370  	} else if (netdev_mc_empty(dev)) {
1371  		ew32(RxCtrl, 0x0004);
1372  		return;
1373  	} else {					/* Never executed, for now. */
1374  		struct netdev_hw_addr *ha;
1375  
1376  		memset(mc_filter, 0, sizeof(mc_filter));
1377  		netdev_for_each_mc_addr(ha, dev) {
1378  			unsigned int bit_nr =
1379  				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1380  			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1381  		}
1382  	}
1383  	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1384  	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1385  		for (i = 0; i < 4; i++)
1386  			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1387  		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1388  	}
1389  }
1390  
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1391  static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1392  {
1393  	struct epic_private *np = netdev_priv(dev);
1394  
1395  	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1396  	strscpy(info->version, DRV_VERSION, sizeof(info->version));
1397  	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1398  }
1399  
netdev_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1400  static int netdev_get_link_ksettings(struct net_device *dev,
1401  				     struct ethtool_link_ksettings *cmd)
1402  {
1403  	struct epic_private *np = netdev_priv(dev);
1404  
1405  	spin_lock_irq(&np->lock);
1406  	mii_ethtool_get_link_ksettings(&np->mii, cmd);
1407  	spin_unlock_irq(&np->lock);
1408  
1409  	return 0;
1410  }
1411  
netdev_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1412  static int netdev_set_link_ksettings(struct net_device *dev,
1413  				     const struct ethtool_link_ksettings *cmd)
1414  {
1415  	struct epic_private *np = netdev_priv(dev);
1416  	int rc;
1417  
1418  	spin_lock_irq(&np->lock);
1419  	rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1420  	spin_unlock_irq(&np->lock);
1421  
1422  	return rc;
1423  }
1424  
netdev_nway_reset(struct net_device * dev)1425  static int netdev_nway_reset(struct net_device *dev)
1426  {
1427  	struct epic_private *np = netdev_priv(dev);
1428  	return mii_nway_restart(&np->mii);
1429  }
1430  
netdev_get_link(struct net_device * dev)1431  static u32 netdev_get_link(struct net_device *dev)
1432  {
1433  	struct epic_private *np = netdev_priv(dev);
1434  	return mii_link_ok(&np->mii);
1435  }
1436  
netdev_get_msglevel(struct net_device * dev)1437  static u32 netdev_get_msglevel(struct net_device *dev)
1438  {
1439  	return debug;
1440  }
1441  
netdev_set_msglevel(struct net_device * dev,u32 value)1442  static void netdev_set_msglevel(struct net_device *dev, u32 value)
1443  {
1444  	debug = value;
1445  }
1446  
ethtool_begin(struct net_device * dev)1447  static int ethtool_begin(struct net_device *dev)
1448  {
1449  	struct epic_private *ep = netdev_priv(dev);
1450  	void __iomem *ioaddr = ep->ioaddr;
1451  
1452  	if (ep->ethtool_ops_nesting == U32_MAX)
1453  		return -EBUSY;
1454  	/* power-up, if interface is down */
1455  	if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) {
1456  		ew32(GENCTL, 0x0200);
1457  		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1458  	}
1459  	return 0;
1460  }
1461  
ethtool_complete(struct net_device * dev)1462  static void ethtool_complete(struct net_device *dev)
1463  {
1464  	struct epic_private *ep = netdev_priv(dev);
1465  	void __iomem *ioaddr = ep->ioaddr;
1466  
1467  	/* power-down, if interface is down */
1468  	if (!--ep->ethtool_ops_nesting && !netif_running(dev)) {
1469  		ew32(GENCTL, 0x0008);
1470  		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1471  	}
1472  }
1473  
1474  static const struct ethtool_ops netdev_ethtool_ops = {
1475  	.get_drvinfo		= netdev_get_drvinfo,
1476  	.nway_reset		= netdev_nway_reset,
1477  	.get_link		= netdev_get_link,
1478  	.get_msglevel		= netdev_get_msglevel,
1479  	.set_msglevel		= netdev_set_msglevel,
1480  	.begin			= ethtool_begin,
1481  	.complete		= ethtool_complete,
1482  	.get_link_ksettings	= netdev_get_link_ksettings,
1483  	.set_link_ksettings	= netdev_set_link_ksettings,
1484  };
1485  
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1486  static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1487  {
1488  	struct epic_private *np = netdev_priv(dev);
1489  	void __iomem *ioaddr = np->ioaddr;
1490  	struct mii_ioctl_data *data = if_mii(rq);
1491  	int rc;
1492  
1493  	/* power-up, if interface is down */
1494  	if (! netif_running(dev)) {
1495  		ew32(GENCTL, 0x0200);
1496  		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1497  	}
1498  
1499  	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1500  	spin_lock_irq(&np->lock);
1501  	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1502  	spin_unlock_irq(&np->lock);
1503  
1504  	/* power-down, if interface is down */
1505  	if (! netif_running(dev)) {
1506  		ew32(GENCTL, 0x0008);
1507  		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1508  	}
1509  	return rc;
1510  }
1511  
1512  
epic_remove_one(struct pci_dev * pdev)1513  static void epic_remove_one(struct pci_dev *pdev)
1514  {
1515  	struct net_device *dev = pci_get_drvdata(pdev);
1516  	struct epic_private *ep = netdev_priv(dev);
1517  
1518  	unregister_netdev(dev);
1519  	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
1520  			  ep->tx_ring_dma);
1521  	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
1522  			  ep->rx_ring_dma);
1523  	pci_iounmap(pdev, ep->ioaddr);
1524  	free_netdev(dev);
1525  	pci_release_regions(pdev);
1526  	pci_disable_device(pdev);
1527  	/* pci_power_off(pdev, -1); */
1528  }
1529  
epic_suspend(struct device * dev_d)1530  static int __maybe_unused epic_suspend(struct device *dev_d)
1531  {
1532  	struct net_device *dev = dev_get_drvdata(dev_d);
1533  	struct epic_private *ep = netdev_priv(dev);
1534  	void __iomem *ioaddr = ep->ioaddr;
1535  
1536  	if (!netif_running(dev))
1537  		return 0;
1538  	epic_pause(dev);
1539  	/* Put the chip into low-power mode. */
1540  	ew32(GENCTL, 0x0008);
1541  	/* pci_power_off(pdev, -1); */
1542  	return 0;
1543  }
1544  
1545  
epic_resume(struct device * dev_d)1546  static int __maybe_unused epic_resume(struct device *dev_d)
1547  {
1548  	struct net_device *dev = dev_get_drvdata(dev_d);
1549  
1550  	if (!netif_running(dev))
1551  		return 0;
1552  	epic_restart(dev);
1553  	/* pci_power_on(pdev); */
1554  	return 0;
1555  }
1556  
1557  static SIMPLE_DEV_PM_OPS(epic_pm_ops, epic_suspend, epic_resume);
1558  
1559  static struct pci_driver epic_driver = {
1560  	.name		= DRV_NAME,
1561  	.id_table	= epic_pci_tbl,
1562  	.probe		= epic_init_one,
1563  	.remove		= epic_remove_one,
1564  	.driver.pm	= &epic_pm_ops,
1565  };
1566  
1567  
epic_init(void)1568  static int __init epic_init (void)
1569  {
1570  /* when a module, this is printed whether or not devices are found in probe */
1571  #ifdef MODULE
1572  	pr_info("%s%s\n", version, version2);
1573  #endif
1574  
1575  	return pci_register_driver(&epic_driver);
1576  }
1577  
1578  
epic_cleanup(void)1579  static void __exit epic_cleanup (void)
1580  {
1581  	pci_unregister_driver (&epic_driver);
1582  }
1583  
1584  
1585  module_init(epic_init);
1586  module_exit(epic_cleanup);
1587