1a88394cfSJeff Kirsher /* winbond-840.c: A Linux PCI network adapter device driver. */
2a88394cfSJeff Kirsher /*
3a88394cfSJeff Kirsher 	Written 1998-2001 by Donald Becker.
4a88394cfSJeff Kirsher 
5a88394cfSJeff Kirsher 	This software may be used and distributed according to the terms of
6a88394cfSJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
7a88394cfSJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
8a88394cfSJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
9a88394cfSJeff Kirsher 	a complete program and may only be used when the entire operating
10a88394cfSJeff Kirsher 	system is licensed under the GPL.
11a88394cfSJeff Kirsher 
12a88394cfSJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
13a88394cfSJeff Kirsher 	Scyld Computing Corporation
14a88394cfSJeff Kirsher 	410 Severn Ave., Suite 210
15a88394cfSJeff Kirsher 	Annapolis MD 21403
16a88394cfSJeff Kirsher 
17a88394cfSJeff Kirsher 	Support and updates available at
18a88394cfSJeff Kirsher 	http://www.scyld.com/network/drivers.html
19a88394cfSJeff Kirsher 
20a88394cfSJeff Kirsher 	Do not remove the copyright information.
21a88394cfSJeff Kirsher 	Do not change the version information unless an improvement has been made.
22a88394cfSJeff Kirsher 	Merely removing my name, as Compex has done in the past, does not count
23a88394cfSJeff Kirsher 	as an improvement.
24a88394cfSJeff Kirsher 
25a88394cfSJeff Kirsher 	Changelog:
26a88394cfSJeff Kirsher 	* ported to 2.4
27a88394cfSJeff Kirsher 		???
28a88394cfSJeff Kirsher 	* spin lock update, memory barriers, new style dma mappings
29a88394cfSJeff Kirsher 		limit each tx buffer to < 1024 bytes
30a88394cfSJeff Kirsher 		remove DescIntr from Rx descriptors (that's an Tx flag)
31a88394cfSJeff Kirsher 		remove next pointer from Tx descriptors
32a88394cfSJeff Kirsher 		synchronize tx_q_bytes
33a88394cfSJeff Kirsher 		software reset in tx_timeout
34a88394cfSJeff Kirsher 			Copyright (C) 2000 Manfred Spraul
35a88394cfSJeff Kirsher 	* further cleanups
36a88394cfSJeff Kirsher 		power management.
37a88394cfSJeff Kirsher 		support for big endian descriptors
38a88394cfSJeff Kirsher 			Copyright (C) 2001 Manfred Spraul
39a88394cfSJeff Kirsher 	* ethtool support (jgarzik)
40a88394cfSJeff Kirsher 	* Replace some MII-related magic numbers with constants (jgarzik)
41a88394cfSJeff Kirsher 
42a88394cfSJeff Kirsher 	TODO:
43a88394cfSJeff Kirsher 	* enable pci_power_off
44a88394cfSJeff Kirsher 	* Wake-On-LAN
45a88394cfSJeff Kirsher */
46a88394cfSJeff Kirsher 
47a88394cfSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48a88394cfSJeff Kirsher 
49a88394cfSJeff Kirsher #define DRV_NAME	"winbond-840"
50a88394cfSJeff Kirsher 
51a88394cfSJeff Kirsher /* Automatically extracted configuration info:
52a88394cfSJeff Kirsher probe-func: winbond840_probe
53a88394cfSJeff Kirsher config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
54a88394cfSJeff Kirsher 
55a88394cfSJeff Kirsher c-help-name: Winbond W89c840 PCI Ethernet support
56a88394cfSJeff Kirsher c-help-symbol: CONFIG_WINBOND_840
57a88394cfSJeff Kirsher c-help: This driver is for the Winbond W89c840 chip.  It also works with
58a88394cfSJeff Kirsher c-help: the TX9882 chip on the Compex RL100-ATX board.
59a88394cfSJeff Kirsher c-help: More specific information and updates are available from
60a88394cfSJeff Kirsher c-help: http://www.scyld.com/network/drivers.html
61a88394cfSJeff Kirsher */
62a88394cfSJeff Kirsher 
63a88394cfSJeff Kirsher /* The user-configurable values.
64a88394cfSJeff Kirsher    These may be modified when a driver module is loaded.*/
65a88394cfSJeff Kirsher 
66a88394cfSJeff Kirsher static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
67a88394cfSJeff Kirsher static int max_interrupt_work = 20;
68a88394cfSJeff Kirsher /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
69a88394cfSJeff Kirsher    The '840 uses a 64 element hash table based on the Ethernet CRC.  */
70a88394cfSJeff Kirsher static int multicast_filter_limit = 32;
71a88394cfSJeff Kirsher 
72a88394cfSJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
73a88394cfSJeff Kirsher    Setting to > 1518 effectively disables this feature. */
74a88394cfSJeff Kirsher static int rx_copybreak;
75a88394cfSJeff Kirsher 
76a88394cfSJeff Kirsher /* Used to pass the media type, etc.
77a88394cfSJeff Kirsher    Both 'options[]' and 'full_duplex[]' should exist for driver
78a88394cfSJeff Kirsher    interoperability.
79a88394cfSJeff Kirsher    The media type is usually passed in 'options[]'.
80a88394cfSJeff Kirsher */
81a88394cfSJeff Kirsher #define MAX_UNITS 8		/* More are supported, limit only on options */
82a88394cfSJeff Kirsher static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
83a88394cfSJeff Kirsher static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84a88394cfSJeff Kirsher 
85a88394cfSJeff Kirsher /* Operational parameters that are set at compile time. */
86a88394cfSJeff Kirsher 
87a88394cfSJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
88a88394cfSJeff Kirsher    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
89a88394cfSJeff Kirsher    Making the Tx ring too large decreases the effectiveness of channel
90a88394cfSJeff Kirsher    bonding and packet priority.
91a88394cfSJeff Kirsher    There are no ill effects from too-large receive rings. */
92a88394cfSJeff Kirsher #define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
93a88394cfSJeff Kirsher #define TX_QUEUE_LEN_RESTART	5
94a88394cfSJeff Kirsher 
95a88394cfSJeff Kirsher #define TX_BUFLIMIT	(1024-128)
96a88394cfSJeff Kirsher 
97a88394cfSJeff Kirsher /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
98a88394cfSJeff Kirsher    To avoid overflowing we don't queue again until we have room for a
99a88394cfSJeff Kirsher    full-size packet.
100a88394cfSJeff Kirsher  */
101a88394cfSJeff Kirsher #define TX_FIFO_SIZE (2048)
102a88394cfSJeff Kirsher #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
103a88394cfSJeff Kirsher 
104a88394cfSJeff Kirsher 
105a88394cfSJeff Kirsher /* Operational parameters that usually are not changed. */
106a88394cfSJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
107a88394cfSJeff Kirsher #define TX_TIMEOUT  (2*HZ)
108a88394cfSJeff Kirsher 
109a88394cfSJeff Kirsher /* Include files, designed to support most kernel versions 2.0.0 and later. */
110a88394cfSJeff Kirsher #include <linux/module.h>
111a88394cfSJeff Kirsher #include <linux/kernel.h>
112a88394cfSJeff Kirsher #include <linux/string.h>
113a88394cfSJeff Kirsher #include <linux/timer.h>
114a88394cfSJeff Kirsher #include <linux/errno.h>
115a88394cfSJeff Kirsher #include <linux/ioport.h>
116a88394cfSJeff Kirsher #include <linux/interrupt.h>
117a88394cfSJeff Kirsher #include <linux/pci.h>
118a88394cfSJeff Kirsher #include <linux/dma-mapping.h>
119a88394cfSJeff Kirsher #include <linux/netdevice.h>
120a88394cfSJeff Kirsher #include <linux/etherdevice.h>
121a88394cfSJeff Kirsher #include <linux/skbuff.h>
122a88394cfSJeff Kirsher #include <linux/init.h>
123a88394cfSJeff Kirsher #include <linux/delay.h>
124a88394cfSJeff Kirsher #include <linux/ethtool.h>
125a88394cfSJeff Kirsher #include <linux/mii.h>
126a88394cfSJeff Kirsher #include <linux/rtnetlink.h>
127a88394cfSJeff Kirsher #include <linux/crc32.h>
128a88394cfSJeff Kirsher #include <linux/bitops.h>
1297c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
130a88394cfSJeff Kirsher #include <asm/processor.h>		/* Processor type for cache alignment. */
131a88394cfSJeff Kirsher #include <asm/io.h>
132a88394cfSJeff Kirsher #include <asm/irq.h>
133a88394cfSJeff Kirsher 
134a88394cfSJeff Kirsher #include "tulip.h"
135a88394cfSJeff Kirsher 
136a88394cfSJeff Kirsher #undef PKT_BUF_SZ			/* tulip.h also defines this */
137a88394cfSJeff Kirsher #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
138a88394cfSJeff Kirsher 
139a88394cfSJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
140a88394cfSJeff Kirsher MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
141a88394cfSJeff Kirsher MODULE_LICENSE("GPL");
142a88394cfSJeff Kirsher 
143a88394cfSJeff Kirsher module_param(max_interrupt_work, int, 0);
144a88394cfSJeff Kirsher module_param(debug, int, 0);
145a88394cfSJeff Kirsher module_param(rx_copybreak, int, 0);
146a88394cfSJeff Kirsher module_param(multicast_filter_limit, int, 0);
147a88394cfSJeff Kirsher module_param_array(options, int, NULL, 0);
148a88394cfSJeff Kirsher module_param_array(full_duplex, int, NULL, 0);
149a88394cfSJeff Kirsher MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
150a88394cfSJeff Kirsher MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
151a88394cfSJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
152a88394cfSJeff Kirsher MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
153a88394cfSJeff Kirsher MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
154a88394cfSJeff Kirsher MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
155a88394cfSJeff Kirsher 
156a88394cfSJeff Kirsher /*
157a88394cfSJeff Kirsher 				Theory of Operation
158a88394cfSJeff Kirsher 
159a88394cfSJeff Kirsher I. Board Compatibility
160a88394cfSJeff Kirsher 
161a88394cfSJeff Kirsher This driver is for the Winbond w89c840 chip.
162a88394cfSJeff Kirsher 
163a88394cfSJeff Kirsher II. Board-specific settings
164a88394cfSJeff Kirsher 
165a88394cfSJeff Kirsher None.
166a88394cfSJeff Kirsher 
167a88394cfSJeff Kirsher III. Driver operation
168a88394cfSJeff Kirsher 
169a88394cfSJeff Kirsher This chip is very similar to the Digital 21*4* "Tulip" family.  The first
170a88394cfSJeff Kirsher twelve registers and the descriptor format are nearly identical.  Read a
171a88394cfSJeff Kirsher Tulip manual for operational details.
172a88394cfSJeff Kirsher 
173a88394cfSJeff Kirsher A significant difference is that the multicast filter and station address are
174a88394cfSJeff Kirsher stored in registers rather than loaded through a pseudo-transmit packet.
175a88394cfSJeff Kirsher 
176a88394cfSJeff Kirsher Unlike the Tulip, transmit buffers are limited to 1KB.  To transmit a
177a88394cfSJeff Kirsher full-sized packet we must use both data buffers in a descriptor.  Thus the
178a88394cfSJeff Kirsher driver uses ring mode where descriptors are implicitly sequential in memory,
179a88394cfSJeff Kirsher rather than using the second descriptor address as a chain pointer to
180a88394cfSJeff Kirsher subsequent descriptors.
181a88394cfSJeff Kirsher 
182a88394cfSJeff Kirsher IV. Notes
183a88394cfSJeff Kirsher 
184a88394cfSJeff Kirsher If you are going to almost clone a Tulip, why not go all the way and avoid
185a88394cfSJeff Kirsher the need for a new driver?
186a88394cfSJeff Kirsher 
187a88394cfSJeff Kirsher IVb. References
188a88394cfSJeff Kirsher 
189a88394cfSJeff Kirsher http://www.scyld.com/expert/100mbps.html
190a88394cfSJeff Kirsher http://www.scyld.com/expert/NWay.html
191a88394cfSJeff Kirsher http://www.winbond.com.tw/
192a88394cfSJeff Kirsher 
193a88394cfSJeff Kirsher IVc. Errata
194a88394cfSJeff Kirsher 
195a88394cfSJeff Kirsher A horrible bug exists in the transmit FIFO.  Apparently the chip doesn't
196a88394cfSJeff Kirsher correctly detect a full FIFO, and queuing more than 2048 bytes may result in
197a88394cfSJeff Kirsher silent data corruption.
198a88394cfSJeff Kirsher 
199a88394cfSJeff Kirsher Test with 'ping -s 10000' on a fast computer.
200a88394cfSJeff Kirsher 
201a88394cfSJeff Kirsher */
202a88394cfSJeff Kirsher 
203a88394cfSJeff Kirsher 
204a88394cfSJeff Kirsher 
205a88394cfSJeff Kirsher /*
206a88394cfSJeff Kirsher   PCI probe table.
207a88394cfSJeff Kirsher */
208a88394cfSJeff Kirsher enum chip_capability_flags {
209a88394cfSJeff Kirsher 	CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
210a88394cfSJeff Kirsher };
211a88394cfSJeff Kirsher 
2129baa3c34SBenoit Taine static const struct pci_device_id w840_pci_tbl[] = {
213a88394cfSJeff Kirsher 	{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153,     0, 0, 0 },
214a88394cfSJeff Kirsher 	{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
215a88394cfSJeff Kirsher 	{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
216a88394cfSJeff Kirsher 	{ }
217a88394cfSJeff Kirsher };
218a88394cfSJeff Kirsher MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
219a88394cfSJeff Kirsher 
220a88394cfSJeff Kirsher enum {
221a88394cfSJeff Kirsher 	netdev_res_size		= 128,	/* size of PCI BAR resource */
222a88394cfSJeff Kirsher };
223a88394cfSJeff Kirsher 
224a88394cfSJeff Kirsher struct pci_id_info {
225a88394cfSJeff Kirsher         const char *name;
226a88394cfSJeff Kirsher         int drv_flags;		/* Driver use, intended as capability flags. */
227a88394cfSJeff Kirsher };
228a88394cfSJeff Kirsher 
229779c1a85SBill Pemberton static const struct pci_id_info pci_id_tbl[] = {
230a88394cfSJeff Kirsher 	{ 				/* Sometime a Level-One switch card. */
231a88394cfSJeff Kirsher 	  "Winbond W89c840",	CanHaveMII | HasBrokenTx | FDXOnNoMII},
232a88394cfSJeff Kirsher 	{ "Winbond W89c840",	CanHaveMII | HasBrokenTx},
233a88394cfSJeff Kirsher 	{ "Compex RL100-ATX",	CanHaveMII | HasBrokenTx},
234a88394cfSJeff Kirsher 	{ }	/* terminate list. */
235a88394cfSJeff Kirsher };
236a88394cfSJeff Kirsher 
237a88394cfSJeff Kirsher /* This driver was written to use PCI memory space, however some x86 systems
238a88394cfSJeff Kirsher    work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
239a88394cfSJeff Kirsher */
240a88394cfSJeff Kirsher 
241a88394cfSJeff Kirsher /* Offsets to the Command and Status Registers, "CSRs".
242a88394cfSJeff Kirsher    While similar to the Tulip, these registers are longword aligned.
243a88394cfSJeff Kirsher    Note: It's not useful to define symbolic names for every register bit in
244a88394cfSJeff Kirsher    the device.  The name can only partially document the semantics and make
245a88394cfSJeff Kirsher    the driver longer and more difficult to read.
246a88394cfSJeff Kirsher */
247a88394cfSJeff Kirsher enum w840_offsets {
248a88394cfSJeff Kirsher 	PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
249a88394cfSJeff Kirsher 	RxRingPtr=0x0C, TxRingPtr=0x10,
250a88394cfSJeff Kirsher 	IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
251a88394cfSJeff Kirsher 	RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
252a88394cfSJeff Kirsher 	CurRxDescAddr=0x30, CurRxBufAddr=0x34,			/* Debug use */
253a88394cfSJeff Kirsher 	MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
254a88394cfSJeff Kirsher 	CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
255a88394cfSJeff Kirsher };
256a88394cfSJeff Kirsher 
257a88394cfSJeff Kirsher /* Bits in the NetworkConfig register. */
258a88394cfSJeff Kirsher enum rx_mode_bits {
259a88394cfSJeff Kirsher 	AcceptErr=0x80,
260a88394cfSJeff Kirsher 	RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
261a88394cfSJeff Kirsher 	RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
262a88394cfSJeff Kirsher };
263a88394cfSJeff Kirsher 
264a88394cfSJeff Kirsher enum mii_reg_bits {
265a88394cfSJeff Kirsher 	MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
266a88394cfSJeff Kirsher 	MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
267a88394cfSJeff Kirsher };
268a88394cfSJeff Kirsher 
269a88394cfSJeff Kirsher /* The Tulip Rx and Tx buffer descriptors. */
270a88394cfSJeff Kirsher struct w840_rx_desc {
271a88394cfSJeff Kirsher 	s32 status;
272a88394cfSJeff Kirsher 	s32 length;
273a88394cfSJeff Kirsher 	u32 buffer1;
274a88394cfSJeff Kirsher 	u32 buffer2;
275a88394cfSJeff Kirsher };
276a88394cfSJeff Kirsher 
277a88394cfSJeff Kirsher struct w840_tx_desc {
278a88394cfSJeff Kirsher 	s32 status;
279a88394cfSJeff Kirsher 	s32 length;
280a88394cfSJeff Kirsher 	u32 buffer1, buffer2;
281a88394cfSJeff Kirsher };
282a88394cfSJeff Kirsher 
283a88394cfSJeff Kirsher #define MII_CNT		1 /* winbond only supports one MII */
284a88394cfSJeff Kirsher struct netdev_private {
285a88394cfSJeff Kirsher 	struct w840_rx_desc *rx_ring;
286a88394cfSJeff Kirsher 	dma_addr_t	rx_addr[RX_RING_SIZE];
287a88394cfSJeff Kirsher 	struct w840_tx_desc *tx_ring;
288a88394cfSJeff Kirsher 	dma_addr_t	tx_addr[TX_RING_SIZE];
289a88394cfSJeff Kirsher 	dma_addr_t ring_dma_addr;
290a88394cfSJeff Kirsher 	/* The addresses of receive-in-place skbuffs. */
291a88394cfSJeff Kirsher 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
292a88394cfSJeff Kirsher 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
293a88394cfSJeff Kirsher 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
294a88394cfSJeff Kirsher 	struct net_device_stats stats;
295a88394cfSJeff Kirsher 	struct timer_list timer;	/* Media monitoring timer. */
296a88394cfSJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect. */
297a88394cfSJeff Kirsher 	spinlock_t lock;
298a88394cfSJeff Kirsher 	int chip_id, drv_flags;
299a88394cfSJeff Kirsher 	struct pci_dev *pci_dev;
300a88394cfSJeff Kirsher 	int csr6;
301a88394cfSJeff Kirsher 	struct w840_rx_desc *rx_head_desc;
302a88394cfSJeff Kirsher 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
303a88394cfSJeff Kirsher 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
304a88394cfSJeff Kirsher 	unsigned int cur_tx, dirty_tx;
305a88394cfSJeff Kirsher 	unsigned int tx_q_bytes;
306a88394cfSJeff Kirsher 	unsigned int tx_full;				/* The Tx queue is full. */
307a88394cfSJeff Kirsher 	/* MII transceiver section. */
308a88394cfSJeff Kirsher 	int mii_cnt;						/* MII device addresses. */
309a88394cfSJeff Kirsher 	unsigned char phys[MII_CNT];		/* MII device addresses, but only the first is used */
310a88394cfSJeff Kirsher 	u32 mii;
311a88394cfSJeff Kirsher 	struct mii_if_info mii_if;
312a88394cfSJeff Kirsher 	void __iomem *base_addr;
313a88394cfSJeff Kirsher };
314a88394cfSJeff Kirsher 
315a88394cfSJeff Kirsher static int  eeprom_read(void __iomem *ioaddr, int location);
316a88394cfSJeff Kirsher static int  mdio_read(struct net_device *dev, int phy_id, int location);
317a88394cfSJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
318a88394cfSJeff Kirsher static int  netdev_open(struct net_device *dev);
319a88394cfSJeff Kirsher static int  update_link(struct net_device *dev);
320a8c22a2bSKees Cook static void netdev_timer(struct timer_list *t);
321a88394cfSJeff Kirsher static void init_rxtx_rings(struct net_device *dev);
322a88394cfSJeff Kirsher static void free_rxtx_rings(struct netdev_private *np);
323a88394cfSJeff Kirsher static void init_registers(struct net_device *dev);
3240290bd29SMichael S. Tsirkin static void tx_timeout(struct net_device *dev, unsigned int txqueue);
325a88394cfSJeff Kirsher static int alloc_ringdesc(struct net_device *dev);
326a88394cfSJeff Kirsher static void free_ringdesc(struct netdev_private *np);
327a88394cfSJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
328a88394cfSJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance);
329a88394cfSJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status);
330a88394cfSJeff Kirsher static int  netdev_rx(struct net_device *dev);
331a88394cfSJeff Kirsher static u32 __set_rx_mode(struct net_device *dev);
332a88394cfSJeff Kirsher static void set_rx_mode(struct net_device *dev);
333a88394cfSJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev);
334a88394cfSJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
335a88394cfSJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops;
336a88394cfSJeff Kirsher static int  netdev_close(struct net_device *dev);
337a88394cfSJeff Kirsher 
338a88394cfSJeff Kirsher static const struct net_device_ops netdev_ops = {
339a88394cfSJeff Kirsher 	.ndo_open		= netdev_open,
340a88394cfSJeff Kirsher 	.ndo_stop		= netdev_close,
341a88394cfSJeff Kirsher 	.ndo_start_xmit		= start_tx,
342a88394cfSJeff Kirsher 	.ndo_get_stats		= get_stats,
343afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= set_rx_mode,
344a7605370SArnd Bergmann 	.ndo_eth_ioctl		= netdev_ioctl,
345a88394cfSJeff Kirsher 	.ndo_tx_timeout		= tx_timeout,
346a88394cfSJeff Kirsher 	.ndo_set_mac_address	= eth_mac_addr,
347a88394cfSJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
348a88394cfSJeff Kirsher };
349a88394cfSJeff Kirsher 
w840_probe1(struct pci_dev * pdev,const struct pci_device_id * ent)3501dd06ae8SGreg Kroah-Hartman static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
351a88394cfSJeff Kirsher {
352a88394cfSJeff Kirsher 	struct net_device *dev;
353a88394cfSJeff Kirsher 	struct netdev_private *np;
354a88394cfSJeff Kirsher 	static int find_cnt;
355a88394cfSJeff Kirsher 	int chip_idx = ent->driver_data;
356a88394cfSJeff Kirsher 	int irq;
357a88394cfSJeff Kirsher 	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
358923ca6f6SJakub Kicinski 	__le16 addr[ETH_ALEN / 2];
359a88394cfSJeff Kirsher 	void __iomem *ioaddr;
360a88394cfSJeff Kirsher 
36176a16be0SWang Hai 	i = pcim_enable_device(pdev);
362a88394cfSJeff Kirsher 	if (i) return i;
363a88394cfSJeff Kirsher 
364a88394cfSJeff Kirsher 	pci_set_master(pdev);
365a88394cfSJeff Kirsher 
366a88394cfSJeff Kirsher 	irq = pdev->irq;
367a88394cfSJeff Kirsher 
3685911419fSChristophe JAILLET 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
369a88394cfSJeff Kirsher 		pr_warn("Device %s disabled due to DMA limitations\n",
370a88394cfSJeff Kirsher 			pci_name(pdev));
371a88394cfSJeff Kirsher 		return -EIO;
372a88394cfSJeff Kirsher 	}
373a88394cfSJeff Kirsher 	dev = alloc_etherdev(sizeof(*np));
374a88394cfSJeff Kirsher 	if (!dev)
375a88394cfSJeff Kirsher 		return -ENOMEM;
376a88394cfSJeff Kirsher 	SET_NETDEV_DEV(dev, &pdev->dev);
377a88394cfSJeff Kirsher 
378a88394cfSJeff Kirsher 	if (pci_request_regions(pdev, DRV_NAME))
379a88394cfSJeff Kirsher 		goto err_out_netdev;
380a88394cfSJeff Kirsher 
381a88394cfSJeff Kirsher 	ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
382a88394cfSJeff Kirsher 	if (!ioaddr)
38376a16be0SWang Hai 		goto err_out_netdev;
384a88394cfSJeff Kirsher 
385a88394cfSJeff Kirsher 	for (i = 0; i < 3; i++)
386923ca6f6SJakub Kicinski 		addr[i] = cpu_to_le16(eeprom_read(ioaddr, i));
387923ca6f6SJakub Kicinski 	eth_hw_addr_set(dev, (u8 *)addr);
388a88394cfSJeff Kirsher 
389a88394cfSJeff Kirsher 	/* Reset the chip to erase previous misconfiguration.
390a88394cfSJeff Kirsher 	   No hold time required! */
391a88394cfSJeff Kirsher 	iowrite32(0x00000001, ioaddr + PCIBusCfg);
392a88394cfSJeff Kirsher 
393a88394cfSJeff Kirsher 	np = netdev_priv(dev);
394a88394cfSJeff Kirsher 	np->pci_dev = pdev;
395a88394cfSJeff Kirsher 	np->chip_id = chip_idx;
396a88394cfSJeff Kirsher 	np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
397a88394cfSJeff Kirsher 	spin_lock_init(&np->lock);
398a88394cfSJeff Kirsher 	np->mii_if.dev = dev;
399a88394cfSJeff Kirsher 	np->mii_if.mdio_read = mdio_read;
400a88394cfSJeff Kirsher 	np->mii_if.mdio_write = mdio_write;
401a88394cfSJeff Kirsher 	np->base_addr = ioaddr;
402a88394cfSJeff Kirsher 
403a88394cfSJeff Kirsher 	pci_set_drvdata(pdev, dev);
404a88394cfSJeff Kirsher 
405a88394cfSJeff Kirsher 	if (dev->mem_start)
406a88394cfSJeff Kirsher 		option = dev->mem_start;
407a88394cfSJeff Kirsher 
408a88394cfSJeff Kirsher 	/* The lower four bits are the media type. */
409a88394cfSJeff Kirsher 	if (option > 0) {
410a88394cfSJeff Kirsher 		if (option & 0x200)
411a88394cfSJeff Kirsher 			np->mii_if.full_duplex = 1;
412a88394cfSJeff Kirsher 		if (option & 15)
413a88394cfSJeff Kirsher 			dev_info(&dev->dev,
414a88394cfSJeff Kirsher 				 "ignoring user supplied media type %d",
415a88394cfSJeff Kirsher 				 option & 15);
416a88394cfSJeff Kirsher 	}
417a88394cfSJeff Kirsher 	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
418a88394cfSJeff Kirsher 		np->mii_if.full_duplex = 1;
419a88394cfSJeff Kirsher 
420a88394cfSJeff Kirsher 	if (np->mii_if.full_duplex)
421a88394cfSJeff Kirsher 		np->mii_if.force_media = 1;
422a88394cfSJeff Kirsher 
423a88394cfSJeff Kirsher 	/* The chip-specific entries in the device structure. */
424a88394cfSJeff Kirsher 	dev->netdev_ops = &netdev_ops;
425a88394cfSJeff Kirsher 	dev->ethtool_ops = &netdev_ethtool_ops;
426a88394cfSJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
427a88394cfSJeff Kirsher 
428a88394cfSJeff Kirsher 	i = register_netdev(dev);
429a88394cfSJeff Kirsher 	if (i)
430a88394cfSJeff Kirsher 		goto err_out_cleardev;
431a88394cfSJeff Kirsher 
432a88394cfSJeff Kirsher 	dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
433a88394cfSJeff Kirsher 		 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
434a88394cfSJeff Kirsher 
435a88394cfSJeff Kirsher 	if (np->drv_flags & CanHaveMII) {
436a88394cfSJeff Kirsher 		int phy, phy_idx = 0;
437a88394cfSJeff Kirsher 		for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
438a88394cfSJeff Kirsher 			int mii_status = mdio_read(dev, phy, MII_BMSR);
439a88394cfSJeff Kirsher 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
440a88394cfSJeff Kirsher 				np->phys[phy_idx++] = phy;
441a88394cfSJeff Kirsher 				np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
442a88394cfSJeff Kirsher 				np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
443a88394cfSJeff Kirsher 						mdio_read(dev, phy, MII_PHYSID2);
444a88394cfSJeff Kirsher 				dev_info(&dev->dev,
445a88394cfSJeff Kirsher 					 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
446a88394cfSJeff Kirsher 					 np->mii, phy, mii_status,
447a88394cfSJeff Kirsher 					 np->mii_if.advertising);
448a88394cfSJeff Kirsher 			}
449a88394cfSJeff Kirsher 		}
450a88394cfSJeff Kirsher 		np->mii_cnt = phy_idx;
451a88394cfSJeff Kirsher 		np->mii_if.phy_id = np->phys[0];
452a88394cfSJeff Kirsher 		if (phy_idx == 0) {
453a88394cfSJeff Kirsher 			dev_warn(&dev->dev,
454a88394cfSJeff Kirsher 				 "MII PHY not found -- this device may not operate correctly\n");
455a88394cfSJeff Kirsher 		}
456a88394cfSJeff Kirsher 	}
457a88394cfSJeff Kirsher 
458a88394cfSJeff Kirsher 	find_cnt++;
459a88394cfSJeff Kirsher 	return 0;
460a88394cfSJeff Kirsher 
461a88394cfSJeff Kirsher err_out_cleardev:
462a88394cfSJeff Kirsher 	pci_iounmap(pdev, ioaddr);
463a88394cfSJeff Kirsher err_out_netdev:
464a88394cfSJeff Kirsher 	free_netdev (dev);
465a88394cfSJeff Kirsher 	return -ENODEV;
466a88394cfSJeff Kirsher }
467a88394cfSJeff Kirsher 
468a88394cfSJeff Kirsher 
469a88394cfSJeff Kirsher /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.  These are
470a88394cfSJeff Kirsher    often serial bit streams generated by the host processor.
471a88394cfSJeff Kirsher    The example below is for the common 93c46 EEPROM, 64 16 bit words. */
472a88394cfSJeff Kirsher 
473a88394cfSJeff Kirsher /* Delay between EEPROM clock transitions.
474a88394cfSJeff Kirsher    No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
475a88394cfSJeff Kirsher    a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
476a88394cfSJeff Kirsher    made udelay() unreliable.
477a88394cfSJeff Kirsher */
478a88394cfSJeff Kirsher #define eeprom_delay(ee_addr)	ioread32(ee_addr)
479a88394cfSJeff Kirsher 
480a88394cfSJeff Kirsher enum EEPROM_Ctrl_Bits {
481a88394cfSJeff Kirsher 	EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
482a88394cfSJeff Kirsher 	EE_ChipSelect=0x801, EE_DataIn=0x08,
483a88394cfSJeff Kirsher };
484a88394cfSJeff Kirsher 
485a88394cfSJeff Kirsher /* The EEPROM commands include the alway-set leading bit. */
486a88394cfSJeff Kirsher enum EEPROM_Cmds {
487a88394cfSJeff Kirsher 	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
488a88394cfSJeff Kirsher };
489a88394cfSJeff Kirsher 
eeprom_read(void __iomem * addr,int location)490a88394cfSJeff Kirsher static int eeprom_read(void __iomem *addr, int location)
491a88394cfSJeff Kirsher {
492a88394cfSJeff Kirsher 	int i;
493a88394cfSJeff Kirsher 	int retval = 0;
494a88394cfSJeff Kirsher 	void __iomem *ee_addr = addr + EECtrl;
495a88394cfSJeff Kirsher 	int read_cmd = location | EE_ReadCmd;
496a88394cfSJeff Kirsher 	iowrite32(EE_ChipSelect, ee_addr);
497a88394cfSJeff Kirsher 
498a88394cfSJeff Kirsher 	/* Shift the read command bits out. */
499a88394cfSJeff Kirsher 	for (i = 10; i >= 0; i--) {
500a88394cfSJeff Kirsher 		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
501a88394cfSJeff Kirsher 		iowrite32(dataval, ee_addr);
502a88394cfSJeff Kirsher 		eeprom_delay(ee_addr);
503a88394cfSJeff Kirsher 		iowrite32(dataval | EE_ShiftClk, ee_addr);
504a88394cfSJeff Kirsher 		eeprom_delay(ee_addr);
505a88394cfSJeff Kirsher 	}
506a88394cfSJeff Kirsher 	iowrite32(EE_ChipSelect, ee_addr);
507a88394cfSJeff Kirsher 	eeprom_delay(ee_addr);
508a88394cfSJeff Kirsher 
509a88394cfSJeff Kirsher 	for (i = 16; i > 0; i--) {
510a88394cfSJeff Kirsher 		iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
511a88394cfSJeff Kirsher 		eeprom_delay(ee_addr);
512a88394cfSJeff Kirsher 		retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
513a88394cfSJeff Kirsher 		iowrite32(EE_ChipSelect, ee_addr);
514a88394cfSJeff Kirsher 		eeprom_delay(ee_addr);
515a88394cfSJeff Kirsher 	}
516a88394cfSJeff Kirsher 
517a88394cfSJeff Kirsher 	/* Terminate the EEPROM access. */
518a88394cfSJeff Kirsher 	iowrite32(0, ee_addr);
519a88394cfSJeff Kirsher 	return retval;
520a88394cfSJeff Kirsher }
521a88394cfSJeff Kirsher 
522a88394cfSJeff Kirsher /*  MII transceiver control section.
523a88394cfSJeff Kirsher 	Read and write the MII registers using software-generated serial
524a88394cfSJeff Kirsher 	MDIO protocol.  See the MII specifications or DP83840A data sheet
525a88394cfSJeff Kirsher 	for details.
526a88394cfSJeff Kirsher 
527a88394cfSJeff Kirsher 	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
528a88394cfSJeff Kirsher 	met by back-to-back 33Mhz PCI cycles. */
529a88394cfSJeff Kirsher #define mdio_delay(mdio_addr) ioread32(mdio_addr)
530a88394cfSJeff Kirsher 
531a88394cfSJeff Kirsher /* Set iff a MII transceiver on any interface requires mdio preamble.
532a88394cfSJeff Kirsher    This only set with older transceivers, so the extra
533a88394cfSJeff Kirsher    code size of a per-interface flag is not worthwhile. */
534a88394cfSJeff Kirsher static char mii_preamble_required = 1;
535a88394cfSJeff Kirsher 
536a88394cfSJeff Kirsher #define MDIO_WRITE0 (MDIO_EnbOutput)
537a88394cfSJeff Kirsher #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
538a88394cfSJeff Kirsher 
539a88394cfSJeff Kirsher /* Generate the preamble required for initial synchronization and
540a88394cfSJeff Kirsher    a few older transceivers. */
mdio_sync(void __iomem * mdio_addr)541a88394cfSJeff Kirsher static void mdio_sync(void __iomem *mdio_addr)
542a88394cfSJeff Kirsher {
543a88394cfSJeff Kirsher 	int bits = 32;
544a88394cfSJeff Kirsher 
545a88394cfSJeff Kirsher 	/* Establish sync by sending at least 32 logic ones. */
546a88394cfSJeff Kirsher 	while (--bits >= 0) {
547a88394cfSJeff Kirsher 		iowrite32(MDIO_WRITE1, mdio_addr);
548a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
549a88394cfSJeff Kirsher 		iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
550a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
551a88394cfSJeff Kirsher 	}
552a88394cfSJeff Kirsher }
553a88394cfSJeff Kirsher 
mdio_read(struct net_device * dev,int phy_id,int location)554a88394cfSJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int location)
555a88394cfSJeff Kirsher {
556a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
557a88394cfSJeff Kirsher 	void __iomem *mdio_addr = np->base_addr + MIICtrl;
558a88394cfSJeff Kirsher 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
559a88394cfSJeff Kirsher 	int i, retval = 0;
560a88394cfSJeff Kirsher 
561a88394cfSJeff Kirsher 	if (mii_preamble_required)
562a88394cfSJeff Kirsher 		mdio_sync(mdio_addr);
563a88394cfSJeff Kirsher 
564a88394cfSJeff Kirsher 	/* Shift the read command bits out. */
565a88394cfSJeff Kirsher 	for (i = 15; i >= 0; i--) {
566a88394cfSJeff Kirsher 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
567a88394cfSJeff Kirsher 
568a88394cfSJeff Kirsher 		iowrite32(dataval, mdio_addr);
569a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
570a88394cfSJeff Kirsher 		iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
571a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
572a88394cfSJeff Kirsher 	}
573a88394cfSJeff Kirsher 	/* Read the two transition, 16 data, and wire-idle bits. */
574a88394cfSJeff Kirsher 	for (i = 20; i > 0; i--) {
575a88394cfSJeff Kirsher 		iowrite32(MDIO_EnbIn, mdio_addr);
576a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
577a88394cfSJeff Kirsher 		retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
578a88394cfSJeff Kirsher 		iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
579a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
580a88394cfSJeff Kirsher 	}
581a88394cfSJeff Kirsher 	return (retval>>1) & 0xffff;
582a88394cfSJeff Kirsher }
583a88394cfSJeff Kirsher 
mdio_write(struct net_device * dev,int phy_id,int location,int value)584a88394cfSJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
585a88394cfSJeff Kirsher {
586a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
587a88394cfSJeff Kirsher 	void __iomem *mdio_addr = np->base_addr + MIICtrl;
588a88394cfSJeff Kirsher 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
589a88394cfSJeff Kirsher 	int i;
590a88394cfSJeff Kirsher 
591a88394cfSJeff Kirsher 	if (location == 4  &&  phy_id == np->phys[0])
592a88394cfSJeff Kirsher 		np->mii_if.advertising = value;
593a88394cfSJeff Kirsher 
594a88394cfSJeff Kirsher 	if (mii_preamble_required)
595a88394cfSJeff Kirsher 		mdio_sync(mdio_addr);
596a88394cfSJeff Kirsher 
597a88394cfSJeff Kirsher 	/* Shift the command bits out. */
598a88394cfSJeff Kirsher 	for (i = 31; i >= 0; i--) {
599a88394cfSJeff Kirsher 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
600a88394cfSJeff Kirsher 
601a88394cfSJeff Kirsher 		iowrite32(dataval, mdio_addr);
602a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
603a88394cfSJeff Kirsher 		iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
604a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
605a88394cfSJeff Kirsher 	}
606a88394cfSJeff Kirsher 	/* Clear out extra bits. */
607a88394cfSJeff Kirsher 	for (i = 2; i > 0; i--) {
608a88394cfSJeff Kirsher 		iowrite32(MDIO_EnbIn, mdio_addr);
609a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
610a88394cfSJeff Kirsher 		iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
611a88394cfSJeff Kirsher 		mdio_delay(mdio_addr);
612a88394cfSJeff Kirsher 	}
613a88394cfSJeff Kirsher }
614a88394cfSJeff Kirsher 
615a88394cfSJeff Kirsher 
netdev_open(struct net_device * dev)616a88394cfSJeff Kirsher static int netdev_open(struct net_device *dev)
617a88394cfSJeff Kirsher {
618a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
619a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
620c0bd55efSFrancois Romieu 	const int irq = np->pci_dev->irq;
621a88394cfSJeff Kirsher 	int i;
622a88394cfSJeff Kirsher 
623a88394cfSJeff Kirsher 	iowrite32(0x00000001, ioaddr + PCIBusCfg);		/* Reset */
624a88394cfSJeff Kirsher 
625a88394cfSJeff Kirsher 	netif_device_detach(dev);
626c0bd55efSFrancois Romieu 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
627a88394cfSJeff Kirsher 	if (i)
628a88394cfSJeff Kirsher 		goto out_err;
629a88394cfSJeff Kirsher 
630a88394cfSJeff Kirsher 	if (debug > 1)
6315eb2cd07SChristophe JAILLET 		netdev_dbg(dev, "%s() irq %d\n", __func__, irq);
632a88394cfSJeff Kirsher 
6335eb2cd07SChristophe JAILLET 	i = alloc_ringdesc(dev);
6345eb2cd07SChristophe JAILLET 	if (i)
635a88394cfSJeff Kirsher 		goto out_err;
636a88394cfSJeff Kirsher 
637a88394cfSJeff Kirsher 	spin_lock_irq(&np->lock);
638a88394cfSJeff Kirsher 	netif_device_attach(dev);
639a88394cfSJeff Kirsher 	init_registers(dev);
640a88394cfSJeff Kirsher 	spin_unlock_irq(&np->lock);
641a88394cfSJeff Kirsher 
642a88394cfSJeff Kirsher 	netif_start_queue(dev);
643a88394cfSJeff Kirsher 	if (debug > 2)
6445eb2cd07SChristophe JAILLET 		netdev_dbg(dev, "Done %s()\n", __func__);
645a88394cfSJeff Kirsher 
646a88394cfSJeff Kirsher 	/* Set the timer to check for link beat. */
647a8c22a2bSKees Cook 	timer_setup(&np->timer, netdev_timer, 0);
648a88394cfSJeff Kirsher 	np->timer.expires = jiffies + 1*HZ;
649a88394cfSJeff Kirsher 	add_timer(&np->timer);
650a88394cfSJeff Kirsher 	return 0;
651a88394cfSJeff Kirsher out_err:
652a88394cfSJeff Kirsher 	netif_device_attach(dev);
653a88394cfSJeff Kirsher 	return i;
654a88394cfSJeff Kirsher }
655a88394cfSJeff Kirsher 
656a88394cfSJeff Kirsher #define MII_DAVICOM_DM9101	0x0181b800
657a88394cfSJeff Kirsher 
update_link(struct net_device * dev)658a88394cfSJeff Kirsher static int update_link(struct net_device *dev)
659a88394cfSJeff Kirsher {
660a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
661a88394cfSJeff Kirsher 	int duplex, fasteth, result, mii_reg;
662a88394cfSJeff Kirsher 
663a88394cfSJeff Kirsher 	/* BSMR */
664a88394cfSJeff Kirsher 	mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
665a88394cfSJeff Kirsher 
666a88394cfSJeff Kirsher 	if (mii_reg == 0xffff)
667a88394cfSJeff Kirsher 		return np->csr6;
668a88394cfSJeff Kirsher 	/* reread: the link status bit is sticky */
669a88394cfSJeff Kirsher 	mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
670a88394cfSJeff Kirsher 	if (!(mii_reg & 0x4)) {
671a88394cfSJeff Kirsher 		if (netif_carrier_ok(dev)) {
672a88394cfSJeff Kirsher 			if (debug)
673a88394cfSJeff Kirsher 				dev_info(&dev->dev,
674a88394cfSJeff Kirsher 					 "MII #%d reports no link. Disabling watchdog\n",
675a88394cfSJeff Kirsher 					 np->phys[0]);
676a88394cfSJeff Kirsher 			netif_carrier_off(dev);
677a88394cfSJeff Kirsher 		}
678a88394cfSJeff Kirsher 		return np->csr6;
679a88394cfSJeff Kirsher 	}
680a88394cfSJeff Kirsher 	if (!netif_carrier_ok(dev)) {
681a88394cfSJeff Kirsher 		if (debug)
682a88394cfSJeff Kirsher 			dev_info(&dev->dev,
683a88394cfSJeff Kirsher 				 "MII #%d link is back. Enabling watchdog\n",
684a88394cfSJeff Kirsher 				 np->phys[0]);
685a88394cfSJeff Kirsher 		netif_carrier_on(dev);
686a88394cfSJeff Kirsher 	}
687a88394cfSJeff Kirsher 
688a88394cfSJeff Kirsher 	if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
689a88394cfSJeff Kirsher 		/* If the link partner doesn't support autonegotiation
690a88394cfSJeff Kirsher 		 * the MII detects it's abilities with the "parallel detection".
691a88394cfSJeff Kirsher 		 * Some MIIs update the LPA register to the result of the parallel
692a88394cfSJeff Kirsher 		 * detection, some don't.
693a88394cfSJeff Kirsher 		 * The Davicom PHY [at least 0181b800] doesn't.
694a88394cfSJeff Kirsher 		 * Instead bit 9 and 13 of the BMCR are updated to the result
695a88394cfSJeff Kirsher 		 * of the negotiation..
696a88394cfSJeff Kirsher 		 */
697a88394cfSJeff Kirsher 		mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
698a88394cfSJeff Kirsher 		duplex = mii_reg & BMCR_FULLDPLX;
699a88394cfSJeff Kirsher 		fasteth = mii_reg & BMCR_SPEED100;
700a88394cfSJeff Kirsher 	} else {
701a88394cfSJeff Kirsher 		int negotiated;
702a88394cfSJeff Kirsher 		mii_reg	= mdio_read(dev, np->phys[0], MII_LPA);
703a88394cfSJeff Kirsher 		negotiated = mii_reg & np->mii_if.advertising;
704a88394cfSJeff Kirsher 
705a88394cfSJeff Kirsher 		duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
706a88394cfSJeff Kirsher 		fasteth = negotiated & 0x380;
707a88394cfSJeff Kirsher 	}
708a88394cfSJeff Kirsher 	duplex |= np->mii_if.force_media;
709a88394cfSJeff Kirsher 	/* remove fastether and fullduplex */
710a88394cfSJeff Kirsher 	result = np->csr6 & ~0x20000200;
711a88394cfSJeff Kirsher 	if (duplex)
712a88394cfSJeff Kirsher 		result |= 0x200;
713a88394cfSJeff Kirsher 	if (fasteth)
714a88394cfSJeff Kirsher 		result |= 0x20000000;
715a88394cfSJeff Kirsher 	if (result != np->csr6 && debug)
716a88394cfSJeff Kirsher 		dev_info(&dev->dev,
717a88394cfSJeff Kirsher 			 "Setting %dMBit-%s-duplex based on MII#%d\n",
718a88394cfSJeff Kirsher 			 fasteth ? 100 : 10, duplex ? "full" : "half",
719a88394cfSJeff Kirsher 			 np->phys[0]);
720a88394cfSJeff Kirsher 	return result;
721a88394cfSJeff Kirsher }
722a88394cfSJeff Kirsher 
723a88394cfSJeff Kirsher #define RXTX_TIMEOUT	2000
update_csr6(struct net_device * dev,int new)724a88394cfSJeff Kirsher static inline void update_csr6(struct net_device *dev, int new)
725a88394cfSJeff Kirsher {
726a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
727a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
728a88394cfSJeff Kirsher 	int limit = RXTX_TIMEOUT;
729a88394cfSJeff Kirsher 
730a88394cfSJeff Kirsher 	if (!netif_device_present(dev))
731a88394cfSJeff Kirsher 		new = 0;
732a88394cfSJeff Kirsher 	if (new==np->csr6)
733a88394cfSJeff Kirsher 		return;
734a88394cfSJeff Kirsher 	/* stop both Tx and Rx processes */
735a88394cfSJeff Kirsher 	iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
736a88394cfSJeff Kirsher 	/* wait until they have really stopped */
737a88394cfSJeff Kirsher 	for (;;) {
738a88394cfSJeff Kirsher 		int csr5 = ioread32(ioaddr + IntrStatus);
739a88394cfSJeff Kirsher 		int t;
740a88394cfSJeff Kirsher 
741a88394cfSJeff Kirsher 		t = (csr5 >> 17) & 0x07;
742a88394cfSJeff Kirsher 		if (t==0||t==1) {
743a88394cfSJeff Kirsher 			/* rx stopped */
744a88394cfSJeff Kirsher 			t = (csr5 >> 20) & 0x07;
745a88394cfSJeff Kirsher 			if (t==0||t==1)
746a88394cfSJeff Kirsher 				break;
747a88394cfSJeff Kirsher 		}
748a88394cfSJeff Kirsher 
749a88394cfSJeff Kirsher 		limit--;
750a88394cfSJeff Kirsher 		if(!limit) {
751a88394cfSJeff Kirsher 			dev_info(&dev->dev,
752a88394cfSJeff Kirsher 				 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
753a88394cfSJeff Kirsher 			break;
754a88394cfSJeff Kirsher 		}
755a88394cfSJeff Kirsher 		udelay(1);
756a88394cfSJeff Kirsher 	}
757a88394cfSJeff Kirsher 	np->csr6 = new;
758a88394cfSJeff Kirsher 	/* and restart them with the new configuration */
759a88394cfSJeff Kirsher 	iowrite32(np->csr6, ioaddr + NetworkConfig);
760a88394cfSJeff Kirsher 	if (new & 0x200)
761a88394cfSJeff Kirsher 		np->mii_if.full_duplex = 1;
762a88394cfSJeff Kirsher }
763a88394cfSJeff Kirsher 
netdev_timer(struct timer_list * t)764a8c22a2bSKees Cook static void netdev_timer(struct timer_list *t)
765a88394cfSJeff Kirsher {
766a8c22a2bSKees Cook 	struct netdev_private *np = from_timer(np, t, timer);
767a8c22a2bSKees Cook 	struct net_device *dev = pci_get_drvdata(np->pci_dev);
768a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
769a88394cfSJeff Kirsher 
770a88394cfSJeff Kirsher 	if (debug > 2)
771a88394cfSJeff Kirsher 		netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
772a88394cfSJeff Kirsher 			   ioread32(ioaddr + IntrStatus),
773a88394cfSJeff Kirsher 			   ioread32(ioaddr + NetworkConfig));
774a88394cfSJeff Kirsher 	spin_lock_irq(&np->lock);
775a88394cfSJeff Kirsher 	update_csr6(dev, update_link(dev));
776a88394cfSJeff Kirsher 	spin_unlock_irq(&np->lock);
777a88394cfSJeff Kirsher 	np->timer.expires = jiffies + 10*HZ;
778a88394cfSJeff Kirsher 	add_timer(&np->timer);
779a88394cfSJeff Kirsher }
780a88394cfSJeff Kirsher 
init_rxtx_rings(struct net_device * dev)781a88394cfSJeff Kirsher static void init_rxtx_rings(struct net_device *dev)
782a88394cfSJeff Kirsher {
783a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
784a88394cfSJeff Kirsher 	int i;
785a88394cfSJeff Kirsher 
786a88394cfSJeff Kirsher 	np->rx_head_desc = &np->rx_ring[0];
787a88394cfSJeff Kirsher 	np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
788a88394cfSJeff Kirsher 
789a88394cfSJeff Kirsher 	/* Initial all Rx descriptors. */
790a88394cfSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
791a88394cfSJeff Kirsher 		np->rx_ring[i].length = np->rx_buf_sz;
792a88394cfSJeff Kirsher 		np->rx_ring[i].status = 0;
793a88394cfSJeff Kirsher 		np->rx_skbuff[i] = NULL;
794a88394cfSJeff Kirsher 	}
795a88394cfSJeff Kirsher 	/* Mark the last entry as wrapping the ring. */
796a88394cfSJeff Kirsher 	np->rx_ring[i-1].length |= DescEndRing;
797a88394cfSJeff Kirsher 
798a88394cfSJeff Kirsher 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
799a88394cfSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
80021a4e469SPradeep A Dalvi 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
801a88394cfSJeff Kirsher 		np->rx_skbuff[i] = skb;
802a88394cfSJeff Kirsher 		if (skb == NULL)
803a88394cfSJeff Kirsher 			break;
8045911419fSChristophe JAILLET 		np->rx_addr[i] = dma_map_single(&np->pci_dev->dev, skb->data,
8055911419fSChristophe JAILLET 						np->rx_buf_sz,
8065911419fSChristophe JAILLET 						DMA_FROM_DEVICE);
807a88394cfSJeff Kirsher 
808a88394cfSJeff Kirsher 		np->rx_ring[i].buffer1 = np->rx_addr[i];
809a88394cfSJeff Kirsher 		np->rx_ring[i].status = DescOwned;
810a88394cfSJeff Kirsher 	}
811a88394cfSJeff Kirsher 
812a88394cfSJeff Kirsher 	np->cur_rx = 0;
813a88394cfSJeff Kirsher 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
814a88394cfSJeff Kirsher 
815a88394cfSJeff Kirsher 	/* Initialize the Tx descriptors */
816a88394cfSJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
817a88394cfSJeff Kirsher 		np->tx_skbuff[i] = NULL;
818a88394cfSJeff Kirsher 		np->tx_ring[i].status = 0;
819a88394cfSJeff Kirsher 	}
820a88394cfSJeff Kirsher 	np->tx_full = 0;
821a88394cfSJeff Kirsher 	np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
822a88394cfSJeff Kirsher 
823a88394cfSJeff Kirsher 	iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
824a88394cfSJeff Kirsher 	iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
825a88394cfSJeff Kirsher 		np->base_addr + TxRingPtr);
826a88394cfSJeff Kirsher 
827a88394cfSJeff Kirsher }
828a88394cfSJeff Kirsher 
free_rxtx_rings(struct netdev_private * np)829a88394cfSJeff Kirsher static void free_rxtx_rings(struct netdev_private* np)
830a88394cfSJeff Kirsher {
831a88394cfSJeff Kirsher 	int i;
832a88394cfSJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
833a88394cfSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
834a88394cfSJeff Kirsher 		np->rx_ring[i].status = 0;
835a88394cfSJeff Kirsher 		if (np->rx_skbuff[i]) {
8365911419fSChristophe JAILLET 			dma_unmap_single(&np->pci_dev->dev, np->rx_addr[i],
837a88394cfSJeff Kirsher 					 np->rx_skbuff[i]->len,
8385911419fSChristophe JAILLET 					 DMA_FROM_DEVICE);
839a88394cfSJeff Kirsher 			dev_kfree_skb(np->rx_skbuff[i]);
840a88394cfSJeff Kirsher 		}
841a88394cfSJeff Kirsher 		np->rx_skbuff[i] = NULL;
842a88394cfSJeff Kirsher 	}
843a88394cfSJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
844a88394cfSJeff Kirsher 		if (np->tx_skbuff[i]) {
8455911419fSChristophe JAILLET 			dma_unmap_single(&np->pci_dev->dev, np->tx_addr[i],
8465911419fSChristophe JAILLET 					 np->tx_skbuff[i]->len, DMA_TO_DEVICE);
847a88394cfSJeff Kirsher 			dev_kfree_skb(np->tx_skbuff[i]);
848a88394cfSJeff Kirsher 		}
849a88394cfSJeff Kirsher 		np->tx_skbuff[i] = NULL;
850a88394cfSJeff Kirsher 	}
851a88394cfSJeff Kirsher }
852a88394cfSJeff Kirsher 
init_registers(struct net_device * dev)853a88394cfSJeff Kirsher static void init_registers(struct net_device *dev)
854a88394cfSJeff Kirsher {
855a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
856a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
857a88394cfSJeff Kirsher 	int i;
858a88394cfSJeff Kirsher 
859a88394cfSJeff Kirsher 	for (i = 0; i < 6; i++)
860a88394cfSJeff Kirsher 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
861a88394cfSJeff Kirsher 
862a88394cfSJeff Kirsher 	/* Initialize other registers. */
863a88394cfSJeff Kirsher #ifdef __BIG_ENDIAN
864a88394cfSJeff Kirsher 	i = (1<<20);	/* Big-endian descriptors */
865a88394cfSJeff Kirsher #else
866a88394cfSJeff Kirsher 	i = 0;
867a88394cfSJeff Kirsher #endif
868a88394cfSJeff Kirsher 	i |= (0x04<<2);		/* skip length 4 u32 */
869a88394cfSJeff Kirsher 	i |= 0x02;		/* give Rx priority */
870a88394cfSJeff Kirsher 
871a88394cfSJeff Kirsher 	/* Configure the PCI bus bursts and FIFO thresholds.
872a88394cfSJeff Kirsher 	   486: Set 8 longword cache alignment, 8 longword burst.
873a88394cfSJeff Kirsher 	   586: Set 16 longword cache alignment, no burst limit.
874a88394cfSJeff Kirsher 	   Cache alignment bits 15:14	     Burst length 13:8
875a88394cfSJeff Kirsher 		0000	<not allowed> 		0000 align to cache	0800 8 longwords
876a88394cfSJeff Kirsher 		4000	8  longwords		0100 1 longword		1000 16 longwords
877a88394cfSJeff Kirsher 		8000	16 longwords		0200 2 longwords	2000 32 longwords
878a88394cfSJeff Kirsher 		C000	32  longwords		0400 4 longwords */
879a88394cfSJeff Kirsher 
880a3d70892SRandy Dunlap #if defined (__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
881a88394cfSJeff Kirsher 	/* When not a module we can work around broken '486 PCI boards. */
882a88394cfSJeff Kirsher 	if (boot_cpu_data.x86 <= 4) {
883a88394cfSJeff Kirsher 		i |= 0x4800;
884a88394cfSJeff Kirsher 		dev_info(&dev->dev,
885a88394cfSJeff Kirsher 			 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
886a88394cfSJeff Kirsher 	} else {
887a88394cfSJeff Kirsher 		i |= 0xE000;
888a88394cfSJeff Kirsher 	}
889a88394cfSJeff Kirsher #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
890a88394cfSJeff Kirsher 	i |= 0xE000;
89198830dd0SArnd Bergmann #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
892a88394cfSJeff Kirsher 	i |= 0x4800;
893a88394cfSJeff Kirsher #else
894de927188SArnd Bergmann 	dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
895a88394cfSJeff Kirsher 	i |= 0x4800;
896a88394cfSJeff Kirsher #endif
897a88394cfSJeff Kirsher 	iowrite32(i, ioaddr + PCIBusCfg);
898a88394cfSJeff Kirsher 
899a88394cfSJeff Kirsher 	np->csr6 = 0;
900a88394cfSJeff Kirsher 	/* 128 byte Tx threshold;
901a88394cfSJeff Kirsher 		Transmit on; Receive on; */
902a88394cfSJeff Kirsher 	update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
903a88394cfSJeff Kirsher 
904a88394cfSJeff Kirsher 	/* Clear and Enable interrupts by setting the interrupt mask. */
905a88394cfSJeff Kirsher 	iowrite32(0x1A0F5, ioaddr + IntrStatus);
906a88394cfSJeff Kirsher 	iowrite32(0x1A0F5, ioaddr + IntrEnable);
907a88394cfSJeff Kirsher 
908a88394cfSJeff Kirsher 	iowrite32(0, ioaddr + RxStartDemand);
909a88394cfSJeff Kirsher }
910a88394cfSJeff Kirsher 
tx_timeout(struct net_device * dev,unsigned int txqueue)9110290bd29SMichael S. Tsirkin static void tx_timeout(struct net_device *dev, unsigned int txqueue)
912a88394cfSJeff Kirsher {
913a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
914a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
915c0bd55efSFrancois Romieu 	const int irq = np->pci_dev->irq;
916a88394cfSJeff Kirsher 
917a88394cfSJeff Kirsher 	dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
918a88394cfSJeff Kirsher 		 ioread32(ioaddr + IntrStatus));
919a88394cfSJeff Kirsher 
920a88394cfSJeff Kirsher 	{
921a88394cfSJeff Kirsher 		int i;
922a88394cfSJeff Kirsher 		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
923a88394cfSJeff Kirsher 		for (i = 0; i < RX_RING_SIZE; i++)
924a88394cfSJeff Kirsher 			printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
925a88394cfSJeff Kirsher 		printk(KERN_CONT "\n");
926a88394cfSJeff Kirsher 		printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
927a88394cfSJeff Kirsher 		for (i = 0; i < TX_RING_SIZE; i++)
928a88394cfSJeff Kirsher 			printk(KERN_CONT " %08x", np->tx_ring[i].status);
929a88394cfSJeff Kirsher 		printk(KERN_CONT "\n");
930a88394cfSJeff Kirsher 	}
931a88394cfSJeff Kirsher 	printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
932a88394cfSJeff Kirsher 	       np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
933a88394cfSJeff Kirsher 	printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
934a88394cfSJeff Kirsher 
935c0bd55efSFrancois Romieu 	disable_irq(irq);
936a88394cfSJeff Kirsher 	spin_lock_irq(&np->lock);
937a88394cfSJeff Kirsher 	/*
938a88394cfSJeff Kirsher 	 * Under high load dirty_tx and the internal tx descriptor pointer
939a88394cfSJeff Kirsher 	 * come out of sync, thus perform a software reset and reinitialize
940a88394cfSJeff Kirsher 	 * everything.
941a88394cfSJeff Kirsher 	 */
942a88394cfSJeff Kirsher 
943a88394cfSJeff Kirsher 	iowrite32(1, np->base_addr+PCIBusCfg);
944a88394cfSJeff Kirsher 	udelay(1);
945a88394cfSJeff Kirsher 
946a88394cfSJeff Kirsher 	free_rxtx_rings(np);
947a88394cfSJeff Kirsher 	init_rxtx_rings(dev);
948a88394cfSJeff Kirsher 	init_registers(dev);
949a88394cfSJeff Kirsher 	spin_unlock_irq(&np->lock);
950c0bd55efSFrancois Romieu 	enable_irq(irq);
951a88394cfSJeff Kirsher 
952a88394cfSJeff Kirsher 	netif_wake_queue(dev);
953860e9538SFlorian Westphal 	netif_trans_update(dev); /* prevent tx timeout */
954a88394cfSJeff Kirsher 	np->stats.tx_errors++;
955a88394cfSJeff Kirsher }
956a88394cfSJeff Kirsher 
957a88394cfSJeff Kirsher /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
alloc_ringdesc(struct net_device * dev)958a88394cfSJeff Kirsher static int alloc_ringdesc(struct net_device *dev)
959a88394cfSJeff Kirsher {
960a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
961a88394cfSJeff Kirsher 
962a88394cfSJeff Kirsher 	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
963a88394cfSJeff Kirsher 
9645911419fSChristophe JAILLET 	np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
965a88394cfSJeff Kirsher 					 sizeof(struct w840_rx_desc) * RX_RING_SIZE +
966a88394cfSJeff Kirsher 					 sizeof(struct w840_tx_desc) * TX_RING_SIZE,
9675911419fSChristophe JAILLET 					 &np->ring_dma_addr, GFP_KERNEL);
968a88394cfSJeff Kirsher 	if(!np->rx_ring)
969a88394cfSJeff Kirsher 		return -ENOMEM;
970a88394cfSJeff Kirsher 	init_rxtx_rings(dev);
971a88394cfSJeff Kirsher 	return 0;
972a88394cfSJeff Kirsher }
973a88394cfSJeff Kirsher 
free_ringdesc(struct netdev_private * np)974a88394cfSJeff Kirsher static void free_ringdesc(struct netdev_private *np)
975a88394cfSJeff Kirsher {
9765911419fSChristophe JAILLET 	dma_free_coherent(&np->pci_dev->dev,
977a88394cfSJeff Kirsher 			  sizeof(struct w840_rx_desc) * RX_RING_SIZE +
978a88394cfSJeff Kirsher 			  sizeof(struct w840_tx_desc) * TX_RING_SIZE,
979a88394cfSJeff Kirsher 			  np->rx_ring, np->ring_dma_addr);
980a88394cfSJeff Kirsher 
981a88394cfSJeff Kirsher }
982a88394cfSJeff Kirsher 
start_tx(struct sk_buff * skb,struct net_device * dev)983a88394cfSJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
984a88394cfSJeff Kirsher {
985a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
986a88394cfSJeff Kirsher 	unsigned entry;
987a88394cfSJeff Kirsher 
988a88394cfSJeff Kirsher 	/* Caution: the write order is important here, set the field
989a88394cfSJeff Kirsher 	   with the "ownership" bits last. */
990a88394cfSJeff Kirsher 
991a88394cfSJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
992a88394cfSJeff Kirsher 	entry = np->cur_tx % TX_RING_SIZE;
993a88394cfSJeff Kirsher 
9945911419fSChristophe JAILLET 	np->tx_addr[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
9955911419fSChristophe JAILLET 					    skb->len, DMA_TO_DEVICE);
996a88394cfSJeff Kirsher 	np->tx_skbuff[entry] = skb;
997a88394cfSJeff Kirsher 
998a88394cfSJeff Kirsher 	np->tx_ring[entry].buffer1 = np->tx_addr[entry];
999a88394cfSJeff Kirsher 	if (skb->len < TX_BUFLIMIT) {
1000a88394cfSJeff Kirsher 		np->tx_ring[entry].length = DescWholePkt | skb->len;
1001a88394cfSJeff Kirsher 	} else {
1002a88394cfSJeff Kirsher 		int len = skb->len - TX_BUFLIMIT;
1003a88394cfSJeff Kirsher 
1004a88394cfSJeff Kirsher 		np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1005a88394cfSJeff Kirsher 		np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1006a88394cfSJeff Kirsher 	}
1007a88394cfSJeff Kirsher 	if(entry == TX_RING_SIZE-1)
1008a88394cfSJeff Kirsher 		np->tx_ring[entry].length |= DescEndRing;
1009a88394cfSJeff Kirsher 
1010a88394cfSJeff Kirsher 	/* Now acquire the irq spinlock.
1011a88394cfSJeff Kirsher 	 * The difficult race is the ordering between
1012a88394cfSJeff Kirsher 	 * increasing np->cur_tx and setting DescOwned:
1013a88394cfSJeff Kirsher 	 * - if np->cur_tx is increased first the interrupt
1014a88394cfSJeff Kirsher 	 *   handler could consider the packet as transmitted
1015a88394cfSJeff Kirsher 	 *   since DescOwned is cleared.
1016a88394cfSJeff Kirsher 	 * - If DescOwned is set first the NIC could report the
1017a88394cfSJeff Kirsher 	 *   packet as sent, but the interrupt handler would ignore it
1018a88394cfSJeff Kirsher 	 *   since the np->cur_tx was not yet increased.
1019a88394cfSJeff Kirsher 	 */
1020a88394cfSJeff Kirsher 	spin_lock_irq(&np->lock);
1021a88394cfSJeff Kirsher 	np->cur_tx++;
1022a88394cfSJeff Kirsher 
1023a88394cfSJeff Kirsher 	wmb(); /* flush length, buffer1, buffer2 */
1024a88394cfSJeff Kirsher 	np->tx_ring[entry].status = DescOwned;
1025a88394cfSJeff Kirsher 	wmb(); /* flush status and kick the hardware */
1026a88394cfSJeff Kirsher 	iowrite32(0, np->base_addr + TxStartDemand);
1027a88394cfSJeff Kirsher 	np->tx_q_bytes += skb->len;
1028a88394cfSJeff Kirsher 	/* Work around horrible bug in the chip by marking the queue as full
1029a88394cfSJeff Kirsher 	   when we do not have FIFO room for a maximum sized packet. */
1030a88394cfSJeff Kirsher 	if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1031a88394cfSJeff Kirsher 		((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1032a88394cfSJeff Kirsher 		netif_stop_queue(dev);
1033a88394cfSJeff Kirsher 		wmb();
1034a88394cfSJeff Kirsher 		np->tx_full = 1;
1035a88394cfSJeff Kirsher 	}
1036a88394cfSJeff Kirsher 	spin_unlock_irq(&np->lock);
1037a88394cfSJeff Kirsher 
1038a88394cfSJeff Kirsher 	if (debug > 4) {
1039a88394cfSJeff Kirsher 		netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1040a88394cfSJeff Kirsher 			   np->cur_tx, entry);
1041a88394cfSJeff Kirsher 	}
1042a88394cfSJeff Kirsher 	return NETDEV_TX_OK;
1043a88394cfSJeff Kirsher }
1044a88394cfSJeff Kirsher 
netdev_tx_done(struct net_device * dev)1045a88394cfSJeff Kirsher static void netdev_tx_done(struct net_device *dev)
1046a88394cfSJeff Kirsher {
1047a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1048a88394cfSJeff Kirsher 	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1049a88394cfSJeff Kirsher 		int entry = np->dirty_tx % TX_RING_SIZE;
1050a88394cfSJeff Kirsher 		int tx_status = np->tx_ring[entry].status;
1051a88394cfSJeff Kirsher 
1052a88394cfSJeff Kirsher 		if (tx_status < 0)
1053a88394cfSJeff Kirsher 			break;
1054a88394cfSJeff Kirsher 		if (tx_status & 0x8000) { 	/* There was an error, log it. */
1055a88394cfSJeff Kirsher #ifndef final_version
1056a88394cfSJeff Kirsher 			if (debug > 1)
1057a88394cfSJeff Kirsher 				netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1058a88394cfSJeff Kirsher 					   tx_status);
1059a88394cfSJeff Kirsher #endif
1060a88394cfSJeff Kirsher 			np->stats.tx_errors++;
1061a88394cfSJeff Kirsher 			if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1062a88394cfSJeff Kirsher 			if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1063a88394cfSJeff Kirsher 			if (tx_status & 0x0200) np->stats.tx_window_errors++;
1064a88394cfSJeff Kirsher 			if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1065a88394cfSJeff Kirsher 			if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1066a88394cfSJeff Kirsher 				np->stats.tx_heartbeat_errors++;
1067a88394cfSJeff Kirsher 		} else {
1068a88394cfSJeff Kirsher #ifndef final_version
1069a88394cfSJeff Kirsher 			if (debug > 3)
1070a88394cfSJeff Kirsher 				netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1071a88394cfSJeff Kirsher 					   entry, tx_status);
1072a88394cfSJeff Kirsher #endif
1073a88394cfSJeff Kirsher 			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1074a88394cfSJeff Kirsher 			np->stats.collisions += (tx_status >> 3) & 15;
1075a88394cfSJeff Kirsher 			np->stats.tx_packets++;
1076a88394cfSJeff Kirsher 		}
1077a88394cfSJeff Kirsher 		/* Free the original skb. */
10785911419fSChristophe JAILLET 		dma_unmap_single(&np->pci_dev->dev, np->tx_addr[entry],
10795911419fSChristophe JAILLET 				 np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
1080a88394cfSJeff Kirsher 		np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1081a88394cfSJeff Kirsher 		dev_kfree_skb_irq(np->tx_skbuff[entry]);
1082a88394cfSJeff Kirsher 		np->tx_skbuff[entry] = NULL;
1083a88394cfSJeff Kirsher 	}
1084a88394cfSJeff Kirsher 	if (np->tx_full &&
1085a88394cfSJeff Kirsher 		np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1086a88394cfSJeff Kirsher 		np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1087a88394cfSJeff Kirsher 		/* The ring is no longer full, clear tbusy. */
1088a88394cfSJeff Kirsher 		np->tx_full = 0;
1089a88394cfSJeff Kirsher 		wmb();
1090a88394cfSJeff Kirsher 		netif_wake_queue(dev);
1091a88394cfSJeff Kirsher 	}
1092a88394cfSJeff Kirsher }
1093a88394cfSJeff Kirsher 
1094a88394cfSJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
1095a88394cfSJeff Kirsher    after the Tx thread. */
intr_handler(int irq,void * dev_instance)1096a88394cfSJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance)
1097a88394cfSJeff Kirsher {
1098a88394cfSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_instance;
1099a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1100a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
1101a88394cfSJeff Kirsher 	int work_limit = max_interrupt_work;
1102a88394cfSJeff Kirsher 	int handled = 0;
1103a88394cfSJeff Kirsher 
1104a88394cfSJeff Kirsher 	if (!netif_device_present(dev))
1105a88394cfSJeff Kirsher 		return IRQ_NONE;
1106a88394cfSJeff Kirsher 	do {
1107a88394cfSJeff Kirsher 		u32 intr_status = ioread32(ioaddr + IntrStatus);
1108a88394cfSJeff Kirsher 
1109a88394cfSJeff Kirsher 		/* Acknowledge all of the current interrupt sources ASAP. */
1110a88394cfSJeff Kirsher 		iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1111a88394cfSJeff Kirsher 
1112a88394cfSJeff Kirsher 		if (debug > 4)
1113a88394cfSJeff Kirsher 			netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1114a88394cfSJeff Kirsher 
1115a88394cfSJeff Kirsher 		if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1116a88394cfSJeff Kirsher 			break;
1117a88394cfSJeff Kirsher 
1118a88394cfSJeff Kirsher 		handled = 1;
1119a88394cfSJeff Kirsher 
1120a88394cfSJeff Kirsher 		if (intr_status & (RxIntr | RxNoBuf))
1121a88394cfSJeff Kirsher 			netdev_rx(dev);
1122a88394cfSJeff Kirsher 		if (intr_status & RxNoBuf)
1123a88394cfSJeff Kirsher 			iowrite32(0, ioaddr + RxStartDemand);
1124a88394cfSJeff Kirsher 
1125a88394cfSJeff Kirsher 		if (intr_status & (TxNoBuf | TxIntr) &&
1126a88394cfSJeff Kirsher 			np->cur_tx != np->dirty_tx) {
1127a88394cfSJeff Kirsher 			spin_lock(&np->lock);
1128a88394cfSJeff Kirsher 			netdev_tx_done(dev);
1129a88394cfSJeff Kirsher 			spin_unlock(&np->lock);
1130a88394cfSJeff Kirsher 		}
1131a88394cfSJeff Kirsher 
1132a88394cfSJeff Kirsher 		/* Abnormal error summary/uncommon events handlers. */
1133a88394cfSJeff Kirsher 		if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1134a88394cfSJeff Kirsher 						   TimerInt | TxDied))
1135a88394cfSJeff Kirsher 			netdev_error(dev, intr_status);
1136a88394cfSJeff Kirsher 
1137a88394cfSJeff Kirsher 		if (--work_limit < 0) {
1138a88394cfSJeff Kirsher 			dev_warn(&dev->dev,
1139a88394cfSJeff Kirsher 				 "Too much work at interrupt, status=0x%04x\n",
1140a88394cfSJeff Kirsher 				 intr_status);
1141a88394cfSJeff Kirsher 			/* Set the timer to re-enable the other interrupts after
1142a88394cfSJeff Kirsher 			   10*82usec ticks. */
1143a88394cfSJeff Kirsher 			spin_lock(&np->lock);
1144a88394cfSJeff Kirsher 			if (netif_device_present(dev)) {
1145a88394cfSJeff Kirsher 				iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1146a88394cfSJeff Kirsher 				iowrite32(10, ioaddr + GPTimer);
1147a88394cfSJeff Kirsher 			}
1148a88394cfSJeff Kirsher 			spin_unlock(&np->lock);
1149a88394cfSJeff Kirsher 			break;
1150a88394cfSJeff Kirsher 		}
1151a88394cfSJeff Kirsher 	} while (1);
1152a88394cfSJeff Kirsher 
1153a88394cfSJeff Kirsher 	if (debug > 3)
1154a88394cfSJeff Kirsher 		netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1155a88394cfSJeff Kirsher 			   ioread32(ioaddr + IntrStatus));
1156a88394cfSJeff Kirsher 	return IRQ_RETVAL(handled);
1157a88394cfSJeff Kirsher }
1158a88394cfSJeff Kirsher 
1159a88394cfSJeff Kirsher /* This routine is logically part of the interrupt handler, but separated
1160a88394cfSJeff Kirsher    for clarity and better register allocation. */
netdev_rx(struct net_device * dev)1161a88394cfSJeff Kirsher static int netdev_rx(struct net_device *dev)
1162a88394cfSJeff Kirsher {
1163a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1164a88394cfSJeff Kirsher 	int entry = np->cur_rx % RX_RING_SIZE;
1165a88394cfSJeff Kirsher 	int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1166a88394cfSJeff Kirsher 
1167a88394cfSJeff Kirsher 	if (debug > 4) {
1168a88394cfSJeff Kirsher 		netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1169a88394cfSJeff Kirsher 			   entry, np->rx_ring[entry].status);
1170a88394cfSJeff Kirsher 	}
1171a88394cfSJeff Kirsher 
1172a88394cfSJeff Kirsher 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1173a88394cfSJeff Kirsher 	while (--work_limit >= 0) {
1174a88394cfSJeff Kirsher 		struct w840_rx_desc *desc = np->rx_head_desc;
1175a88394cfSJeff Kirsher 		s32 status = desc->status;
1176a88394cfSJeff Kirsher 
1177a88394cfSJeff Kirsher 		if (debug > 4)
1178a88394cfSJeff Kirsher 			netdev_dbg(dev, "  netdev_rx() status was %08x\n",
1179a88394cfSJeff Kirsher 				   status);
1180a88394cfSJeff Kirsher 		if (status < 0)
1181a88394cfSJeff Kirsher 			break;
1182a88394cfSJeff Kirsher 		if ((status & 0x38008300) != 0x0300) {
1183a88394cfSJeff Kirsher 			if ((status & 0x38000300) != 0x0300) {
1184a88394cfSJeff Kirsher 				/* Ingore earlier buffers. */
1185a88394cfSJeff Kirsher 				if ((status & 0xffff) != 0x7fff) {
1186a88394cfSJeff Kirsher 					dev_warn(&dev->dev,
1187a88394cfSJeff Kirsher 						 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1188a88394cfSJeff Kirsher 						 np->cur_rx, status);
1189a88394cfSJeff Kirsher 					np->stats.rx_length_errors++;
1190a88394cfSJeff Kirsher 				}
1191a88394cfSJeff Kirsher 			} else if (status & 0x8000) {
1192a88394cfSJeff Kirsher 				/* There was a fatal error. */
1193a88394cfSJeff Kirsher 				if (debug > 2)
1194a88394cfSJeff Kirsher 					netdev_dbg(dev, "Receive error, Rx status %08x\n",
1195a88394cfSJeff Kirsher 						   status);
1196a88394cfSJeff Kirsher 				np->stats.rx_errors++; /* end of a packet.*/
1197a88394cfSJeff Kirsher 				if (status & 0x0890) np->stats.rx_length_errors++;
1198a88394cfSJeff Kirsher 				if (status & 0x004C) np->stats.rx_frame_errors++;
1199a88394cfSJeff Kirsher 				if (status & 0x0002) np->stats.rx_crc_errors++;
1200a88394cfSJeff Kirsher 			}
1201a88394cfSJeff Kirsher 		} else {
1202a88394cfSJeff Kirsher 			struct sk_buff *skb;
1203a88394cfSJeff Kirsher 			/* Omit the four octet CRC from the length. */
1204a88394cfSJeff Kirsher 			int pkt_len = ((status >> 16) & 0x7ff) - 4;
1205a88394cfSJeff Kirsher 
1206a88394cfSJeff Kirsher #ifndef final_version
1207a88394cfSJeff Kirsher 			if (debug > 4)
1208a88394cfSJeff Kirsher 				netdev_dbg(dev, "  netdev_rx() normal Rx pkt length %d status %x\n",
1209a88394cfSJeff Kirsher 					   pkt_len, status);
1210a88394cfSJeff Kirsher #endif
1211a88394cfSJeff Kirsher 			/* Check if the packet is long enough to accept without copying
1212a88394cfSJeff Kirsher 			   to a minimally-sized skbuff. */
1213a88394cfSJeff Kirsher 			if (pkt_len < rx_copybreak &&
121421a4e469SPradeep A Dalvi 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1215a88394cfSJeff Kirsher 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
12165911419fSChristophe JAILLET 				dma_sync_single_for_cpu(&np->pci_dev->dev,
12175911419fSChristophe JAILLET 							np->rx_addr[entry],
1218a88394cfSJeff Kirsher 							np->rx_skbuff[entry]->len,
12195911419fSChristophe JAILLET 							DMA_FROM_DEVICE);
1220a88394cfSJeff Kirsher 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1221a88394cfSJeff Kirsher 				skb_put(skb, pkt_len);
12225911419fSChristophe JAILLET 				dma_sync_single_for_device(&np->pci_dev->dev,
12235911419fSChristophe JAILLET 							   np->rx_addr[entry],
1224a88394cfSJeff Kirsher 							   np->rx_skbuff[entry]->len,
12255911419fSChristophe JAILLET 							   DMA_FROM_DEVICE);
1226a88394cfSJeff Kirsher 			} else {
12275911419fSChristophe JAILLET 				dma_unmap_single(&np->pci_dev->dev,
12285911419fSChristophe JAILLET 						 np->rx_addr[entry],
1229a88394cfSJeff Kirsher 						 np->rx_skbuff[entry]->len,
12305911419fSChristophe JAILLET 						 DMA_FROM_DEVICE);
1231a88394cfSJeff Kirsher 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1232a88394cfSJeff Kirsher 				np->rx_skbuff[entry] = NULL;
1233a88394cfSJeff Kirsher 			}
1234a88394cfSJeff Kirsher #ifndef final_version				/* Remove after testing. */
1235a88394cfSJeff Kirsher 			/* You will want this info for the initial debug. */
1236a88394cfSJeff Kirsher 			if (debug > 5)
1237a88394cfSJeff Kirsher 				netdev_dbg(dev, "  Rx data %pM %pM %02x%02x %pI4\n",
1238a88394cfSJeff Kirsher 					   &skb->data[0], &skb->data[6],
1239a88394cfSJeff Kirsher 					   skb->data[12], skb->data[13],
1240a88394cfSJeff Kirsher 					   &skb->data[14]);
1241a88394cfSJeff Kirsher #endif
1242a88394cfSJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
1243a88394cfSJeff Kirsher 			netif_rx(skb);
1244a88394cfSJeff Kirsher 			np->stats.rx_packets++;
1245a88394cfSJeff Kirsher 			np->stats.rx_bytes += pkt_len;
1246a88394cfSJeff Kirsher 		}
1247a88394cfSJeff Kirsher 		entry = (++np->cur_rx) % RX_RING_SIZE;
1248a88394cfSJeff Kirsher 		np->rx_head_desc = &np->rx_ring[entry];
1249a88394cfSJeff Kirsher 	}
1250a88394cfSJeff Kirsher 
1251a88394cfSJeff Kirsher 	/* Refill the Rx ring buffers. */
1252a88394cfSJeff Kirsher 	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1253a88394cfSJeff Kirsher 		struct sk_buff *skb;
1254a88394cfSJeff Kirsher 		entry = np->dirty_rx % RX_RING_SIZE;
1255a88394cfSJeff Kirsher 		if (np->rx_skbuff[entry] == NULL) {
125621a4e469SPradeep A Dalvi 			skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1257a88394cfSJeff Kirsher 			np->rx_skbuff[entry] = skb;
1258a88394cfSJeff Kirsher 			if (skb == NULL)
1259a88394cfSJeff Kirsher 				break;			/* Better luck next round. */
12605911419fSChristophe JAILLET 			np->rx_addr[entry] = dma_map_single(&np->pci_dev->dev,
1261a88394cfSJeff Kirsher 							    skb->data,
12625911419fSChristophe JAILLET 							    np->rx_buf_sz,
12635911419fSChristophe JAILLET 							    DMA_FROM_DEVICE);
1264a88394cfSJeff Kirsher 			np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1265a88394cfSJeff Kirsher 		}
1266a88394cfSJeff Kirsher 		wmb();
1267a88394cfSJeff Kirsher 		np->rx_ring[entry].status = DescOwned;
1268a88394cfSJeff Kirsher 	}
1269a88394cfSJeff Kirsher 
1270a88394cfSJeff Kirsher 	return 0;
1271a88394cfSJeff Kirsher }
1272a88394cfSJeff Kirsher 
netdev_error(struct net_device * dev,int intr_status)1273a88394cfSJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status)
1274a88394cfSJeff Kirsher {
1275a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1276a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
1277a88394cfSJeff Kirsher 
1278a88394cfSJeff Kirsher 	if (debug > 2)
1279a88394cfSJeff Kirsher 		netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1280a88394cfSJeff Kirsher 	if (intr_status == 0xffffffff)
1281a88394cfSJeff Kirsher 		return;
1282a88394cfSJeff Kirsher 	spin_lock(&np->lock);
1283a88394cfSJeff Kirsher 	if (intr_status & TxFIFOUnderflow) {
1284a88394cfSJeff Kirsher 		int new;
1285a88394cfSJeff Kirsher 		/* Bump up the Tx threshold */
1286a88394cfSJeff Kirsher #if 0
1287a88394cfSJeff Kirsher 		/* This causes lots of dropped packets,
1288a88394cfSJeff Kirsher 		 * and under high load even tx_timeouts
1289a88394cfSJeff Kirsher 		 */
1290a88394cfSJeff Kirsher 		new = np->csr6 + 0x4000;
1291a88394cfSJeff Kirsher #else
1292a88394cfSJeff Kirsher 		new = (np->csr6 >> 14)&0x7f;
1293a88394cfSJeff Kirsher 		if (new < 64)
1294a88394cfSJeff Kirsher 			new *= 2;
1295a88394cfSJeff Kirsher 		 else
1296a88394cfSJeff Kirsher 		 	new = 127; /* load full packet before starting */
1297a88394cfSJeff Kirsher 		new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1298a88394cfSJeff Kirsher #endif
1299a88394cfSJeff Kirsher 		netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1300a88394cfSJeff Kirsher 		update_csr6(dev, new);
1301a88394cfSJeff Kirsher 	}
1302a88394cfSJeff Kirsher 	if (intr_status & RxDied) {		/* Missed a Rx frame. */
1303a88394cfSJeff Kirsher 		np->stats.rx_errors++;
1304a88394cfSJeff Kirsher 	}
1305a88394cfSJeff Kirsher 	if (intr_status & TimerInt) {
1306a88394cfSJeff Kirsher 		/* Re-enable other interrupts. */
1307a88394cfSJeff Kirsher 		if (netif_device_present(dev))
1308a88394cfSJeff Kirsher 			iowrite32(0x1A0F5, ioaddr + IntrEnable);
1309a88394cfSJeff Kirsher 	}
1310a88394cfSJeff Kirsher 	np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1311a88394cfSJeff Kirsher 	iowrite32(0, ioaddr + RxStartDemand);
1312a88394cfSJeff Kirsher 	spin_unlock(&np->lock);
1313a88394cfSJeff Kirsher }
1314a88394cfSJeff Kirsher 
get_stats(struct net_device * dev)1315a88394cfSJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev)
1316a88394cfSJeff Kirsher {
1317a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1318a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
1319a88394cfSJeff Kirsher 
1320a88394cfSJeff Kirsher 	/* The chip only need report frame silently dropped. */
1321a88394cfSJeff Kirsher 	spin_lock_irq(&np->lock);
1322a88394cfSJeff Kirsher 	if (netif_running(dev) && netif_device_present(dev))
1323a88394cfSJeff Kirsher 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1324a88394cfSJeff Kirsher 	spin_unlock_irq(&np->lock);
1325a88394cfSJeff Kirsher 
1326a88394cfSJeff Kirsher 	return &np->stats;
1327a88394cfSJeff Kirsher }
1328a88394cfSJeff Kirsher 
1329a88394cfSJeff Kirsher 
__set_rx_mode(struct net_device * dev)1330a88394cfSJeff Kirsher static u32 __set_rx_mode(struct net_device *dev)
1331a88394cfSJeff Kirsher {
1332a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1333a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
1334a88394cfSJeff Kirsher 	u32 mc_filter[2];			/* Multicast hash filter */
1335a88394cfSJeff Kirsher 	u32 rx_mode;
1336a88394cfSJeff Kirsher 
1337a88394cfSJeff Kirsher 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1338a88394cfSJeff Kirsher 		memset(mc_filter, 0xff, sizeof(mc_filter));
1339a88394cfSJeff Kirsher 		rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1340a88394cfSJeff Kirsher 			| AcceptMyPhys;
1341a88394cfSJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1342a88394cfSJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
1343a88394cfSJeff Kirsher 		/* Too many to match, or accept all multicasts. */
1344a88394cfSJeff Kirsher 		memset(mc_filter, 0xff, sizeof(mc_filter));
1345a88394cfSJeff Kirsher 		rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1346a88394cfSJeff Kirsher 	} else {
1347a88394cfSJeff Kirsher 		struct netdev_hw_addr *ha;
1348a88394cfSJeff Kirsher 
1349a88394cfSJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
1350a88394cfSJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
1351a88394cfSJeff Kirsher 			int filbit;
1352a88394cfSJeff Kirsher 
1353a88394cfSJeff Kirsher 			filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1354a88394cfSJeff Kirsher 			filbit &= 0x3f;
1355a88394cfSJeff Kirsher 			mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1356a88394cfSJeff Kirsher 		}
1357a88394cfSJeff Kirsher 		rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1358a88394cfSJeff Kirsher 	}
1359a88394cfSJeff Kirsher 	iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1360a88394cfSJeff Kirsher 	iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1361a88394cfSJeff Kirsher 	return rx_mode;
1362a88394cfSJeff Kirsher }
1363a88394cfSJeff Kirsher 
set_rx_mode(struct net_device * dev)1364a88394cfSJeff Kirsher static void set_rx_mode(struct net_device *dev)
1365a88394cfSJeff Kirsher {
1366a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1367a88394cfSJeff Kirsher 	u32 rx_mode = __set_rx_mode(dev);
1368a88394cfSJeff Kirsher 	spin_lock_irq(&np->lock);
1369a88394cfSJeff Kirsher 	update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1370a88394cfSJeff Kirsher 	spin_unlock_irq(&np->lock);
1371a88394cfSJeff Kirsher }
1372a88394cfSJeff Kirsher 
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1373a88394cfSJeff Kirsher static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1374a88394cfSJeff Kirsher {
1375a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1376a88394cfSJeff Kirsher 
1377*f029c781SWolfram Sang 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1378*f029c781SWolfram Sang 	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1379a88394cfSJeff Kirsher }
1380a88394cfSJeff Kirsher 
netdev_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1381453e5f3dSPhilippe Reynes static int netdev_get_link_ksettings(struct net_device *dev,
1382453e5f3dSPhilippe Reynes 				     struct ethtool_link_ksettings *cmd)
1383a88394cfSJeff Kirsher {
1384a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1385a88394cfSJeff Kirsher 
1386a88394cfSJeff Kirsher 	spin_lock_irq(&np->lock);
138782c01a84Syuval.shaia@oracle.com 	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1388a88394cfSJeff Kirsher 	spin_unlock_irq(&np->lock);
1389a88394cfSJeff Kirsher 
139082c01a84Syuval.shaia@oracle.com 	return 0;
1391a88394cfSJeff Kirsher }
1392a88394cfSJeff Kirsher 
netdev_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1393453e5f3dSPhilippe Reynes static int netdev_set_link_ksettings(struct net_device *dev,
1394453e5f3dSPhilippe Reynes 				     const struct ethtool_link_ksettings *cmd)
1395a88394cfSJeff Kirsher {
1396a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1397a88394cfSJeff Kirsher 	int rc;
1398a88394cfSJeff Kirsher 
1399a88394cfSJeff Kirsher 	spin_lock_irq(&np->lock);
1400453e5f3dSPhilippe Reynes 	rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1401a88394cfSJeff Kirsher 	spin_unlock_irq(&np->lock);
1402a88394cfSJeff Kirsher 
1403a88394cfSJeff Kirsher 	return rc;
1404a88394cfSJeff Kirsher }
1405a88394cfSJeff Kirsher 
netdev_nway_reset(struct net_device * dev)1406a88394cfSJeff Kirsher static int netdev_nway_reset(struct net_device *dev)
1407a88394cfSJeff Kirsher {
1408a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1409a88394cfSJeff Kirsher 	return mii_nway_restart(&np->mii_if);
1410a88394cfSJeff Kirsher }
1411a88394cfSJeff Kirsher 
netdev_get_link(struct net_device * dev)1412a88394cfSJeff Kirsher static u32 netdev_get_link(struct net_device *dev)
1413a88394cfSJeff Kirsher {
1414a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1415a88394cfSJeff Kirsher 	return mii_link_ok(&np->mii_if);
1416a88394cfSJeff Kirsher }
1417a88394cfSJeff Kirsher 
netdev_get_msglevel(struct net_device * dev)1418a88394cfSJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev)
1419a88394cfSJeff Kirsher {
1420a88394cfSJeff Kirsher 	return debug;
1421a88394cfSJeff Kirsher }
1422a88394cfSJeff Kirsher 
netdev_set_msglevel(struct net_device * dev,u32 value)1423a88394cfSJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value)
1424a88394cfSJeff Kirsher {
1425a88394cfSJeff Kirsher 	debug = value;
1426a88394cfSJeff Kirsher }
1427a88394cfSJeff Kirsher 
1428a88394cfSJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = {
1429a88394cfSJeff Kirsher 	.get_drvinfo		= netdev_get_drvinfo,
1430a88394cfSJeff Kirsher 	.nway_reset		= netdev_nway_reset,
1431a88394cfSJeff Kirsher 	.get_link		= netdev_get_link,
1432a88394cfSJeff Kirsher 	.get_msglevel		= netdev_get_msglevel,
1433a88394cfSJeff Kirsher 	.set_msglevel		= netdev_set_msglevel,
1434453e5f3dSPhilippe Reynes 	.get_link_ksettings	= netdev_get_link_ksettings,
1435453e5f3dSPhilippe Reynes 	.set_link_ksettings	= netdev_set_link_ksettings,
1436a88394cfSJeff Kirsher };
1437a88394cfSJeff Kirsher 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1438a88394cfSJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1439a88394cfSJeff Kirsher {
1440a88394cfSJeff Kirsher 	struct mii_ioctl_data *data = if_mii(rq);
1441a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1442a88394cfSJeff Kirsher 
1443a88394cfSJeff Kirsher 	switch(cmd) {
1444a88394cfSJeff Kirsher 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1445a88394cfSJeff Kirsher 		data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1446df561f66SGustavo A. R. Silva 		fallthrough;
1447a88394cfSJeff Kirsher 
1448a88394cfSJeff Kirsher 	case SIOCGMIIREG:		/* Read MII PHY register. */
1449a88394cfSJeff Kirsher 		spin_lock_irq(&np->lock);
1450a88394cfSJeff Kirsher 		data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1451a88394cfSJeff Kirsher 		spin_unlock_irq(&np->lock);
1452a88394cfSJeff Kirsher 		return 0;
1453a88394cfSJeff Kirsher 
1454a88394cfSJeff Kirsher 	case SIOCSMIIREG:		/* Write MII PHY register. */
1455a88394cfSJeff Kirsher 		spin_lock_irq(&np->lock);
1456a88394cfSJeff Kirsher 		mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1457a88394cfSJeff Kirsher 		spin_unlock_irq(&np->lock);
1458a88394cfSJeff Kirsher 		return 0;
1459a88394cfSJeff Kirsher 	default:
1460a88394cfSJeff Kirsher 		return -EOPNOTSUPP;
1461a88394cfSJeff Kirsher 	}
1462a88394cfSJeff Kirsher }
1463a88394cfSJeff Kirsher 
netdev_close(struct net_device * dev)1464a88394cfSJeff Kirsher static int netdev_close(struct net_device *dev)
1465a88394cfSJeff Kirsher {
1466a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1467a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
1468a88394cfSJeff Kirsher 
1469a88394cfSJeff Kirsher 	netif_stop_queue(dev);
1470a88394cfSJeff Kirsher 
1471a88394cfSJeff Kirsher 	if (debug > 1) {
1472a88394cfSJeff Kirsher 		netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1473a88394cfSJeff Kirsher 			   ioread32(ioaddr + IntrStatus),
1474a88394cfSJeff Kirsher 			   ioread32(ioaddr + NetworkConfig));
1475a88394cfSJeff Kirsher 		netdev_dbg(dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1476a88394cfSJeff Kirsher 			   np->cur_tx, np->dirty_tx,
1477a88394cfSJeff Kirsher 			   np->cur_rx, np->dirty_rx);
1478a88394cfSJeff Kirsher 	}
1479a88394cfSJeff Kirsher 
1480a88394cfSJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
1481a88394cfSJeff Kirsher 	spin_lock_irq(&np->lock);
1482a88394cfSJeff Kirsher 	netif_device_detach(dev);
1483a88394cfSJeff Kirsher 	update_csr6(dev, 0);
1484a88394cfSJeff Kirsher 	iowrite32(0x0000, ioaddr + IntrEnable);
1485a88394cfSJeff Kirsher 	spin_unlock_irq(&np->lock);
1486a88394cfSJeff Kirsher 
1487c0bd55efSFrancois Romieu 	free_irq(np->pci_dev->irq, dev);
1488a88394cfSJeff Kirsher 	wmb();
1489a88394cfSJeff Kirsher 	netif_device_attach(dev);
1490a88394cfSJeff Kirsher 
1491a88394cfSJeff Kirsher 	if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1492a88394cfSJeff Kirsher 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1493a88394cfSJeff Kirsher 
1494a88394cfSJeff Kirsher #ifdef __i386__
1495a88394cfSJeff Kirsher 	if (debug > 2) {
1496a88394cfSJeff Kirsher 		int i;
1497a88394cfSJeff Kirsher 
1498a88394cfSJeff Kirsher 		printk(KERN_DEBUG"  Tx ring at %p:\n", np->tx_ring);
1499a88394cfSJeff Kirsher 		for (i = 0; i < TX_RING_SIZE; i++)
1500a88394cfSJeff Kirsher 			printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1501a88394cfSJeff Kirsher 			       i, np->tx_ring[i].length,
1502a88394cfSJeff Kirsher 			       np->tx_ring[i].status, np->tx_ring[i].buffer1);
1503a88394cfSJeff Kirsher 		printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
1504a88394cfSJeff Kirsher 		for (i = 0; i < RX_RING_SIZE; i++) {
1505a88394cfSJeff Kirsher 			printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1506a88394cfSJeff Kirsher 			       i, np->rx_ring[i].length,
1507a88394cfSJeff Kirsher 			       np->rx_ring[i].status, np->rx_ring[i].buffer1);
1508a88394cfSJeff Kirsher 		}
1509a88394cfSJeff Kirsher 	}
1510a88394cfSJeff Kirsher #endif /* __i386__ debugging only */
1511a88394cfSJeff Kirsher 
1512a88394cfSJeff Kirsher 	del_timer_sync(&np->timer);
1513a88394cfSJeff Kirsher 
1514a88394cfSJeff Kirsher 	free_rxtx_rings(np);
1515a88394cfSJeff Kirsher 	free_ringdesc(np);
1516a88394cfSJeff Kirsher 
1517a88394cfSJeff Kirsher 	return 0;
1518a88394cfSJeff Kirsher }
1519a88394cfSJeff Kirsher 
w840_remove1(struct pci_dev * pdev)1520779c1a85SBill Pemberton static void w840_remove1(struct pci_dev *pdev)
1521a88394cfSJeff Kirsher {
1522a88394cfSJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
1523a88394cfSJeff Kirsher 
1524a88394cfSJeff Kirsher 	if (dev) {
1525a88394cfSJeff Kirsher 		struct netdev_private *np = netdev_priv(dev);
1526a88394cfSJeff Kirsher 		unregister_netdev(dev);
1527a88394cfSJeff Kirsher 		pci_iounmap(pdev, np->base_addr);
1528a88394cfSJeff Kirsher 		free_netdev(dev);
1529a88394cfSJeff Kirsher 	}
1530a88394cfSJeff Kirsher }
1531a88394cfSJeff Kirsher 
1532a88394cfSJeff Kirsher /*
1533a88394cfSJeff Kirsher  * suspend/resume synchronization:
1534a88394cfSJeff Kirsher  * - open, close, do_ioctl:
1535a88394cfSJeff Kirsher  * 	rtnl_lock, & netif_device_detach after the rtnl_unlock.
1536a88394cfSJeff Kirsher  * - get_stats:
1537a88394cfSJeff Kirsher  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
1538a88394cfSJeff Kirsher  * - start_xmit:
1539a88394cfSJeff Kirsher  * 	synchronize_irq + netif_tx_disable;
1540a88394cfSJeff Kirsher  * - tx_timeout:
1541a88394cfSJeff Kirsher  * 	netif_device_detach + netif_tx_disable;
1542a88394cfSJeff Kirsher  * - set_multicast_list
1543a88394cfSJeff Kirsher  * 	netif_device_detach + netif_tx_disable;
1544a88394cfSJeff Kirsher  * - interrupt handler
1545a88394cfSJeff Kirsher  * 	doesn't touch hw if not present, synchronize_irq waits for
1546a88394cfSJeff Kirsher  * 	running instances of the interrupt handler.
1547a88394cfSJeff Kirsher  *
1548a88394cfSJeff Kirsher  * Disabling hw requires clearing csr6 & IntrEnable.
1549a88394cfSJeff Kirsher  * update_csr6 & all function that write IntrEnable check netif_device_present
1550a88394cfSJeff Kirsher  * before settings any bits.
1551a88394cfSJeff Kirsher  *
1552a88394cfSJeff Kirsher  * Detach must occur under spin_unlock_irq(), interrupts from a detached
1553a88394cfSJeff Kirsher  * device would cause an irq storm.
1554a88394cfSJeff Kirsher  */
w840_suspend(struct device * dev_d)1555fc9aebfbSVaibhav Gupta static int __maybe_unused w840_suspend(struct device *dev_d)
1556a88394cfSJeff Kirsher {
1557fc9aebfbSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
1558a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1559a88394cfSJeff Kirsher 	void __iomem *ioaddr = np->base_addr;
1560a88394cfSJeff Kirsher 
1561a88394cfSJeff Kirsher 	rtnl_lock();
1562a88394cfSJeff Kirsher 	if (netif_running (dev)) {
1563a88394cfSJeff Kirsher 		del_timer_sync(&np->timer);
1564a88394cfSJeff Kirsher 
1565a88394cfSJeff Kirsher 		spin_lock_irq(&np->lock);
1566a88394cfSJeff Kirsher 		netif_device_detach(dev);
1567a88394cfSJeff Kirsher 		update_csr6(dev, 0);
1568a88394cfSJeff Kirsher 		iowrite32(0, ioaddr + IntrEnable);
1569a88394cfSJeff Kirsher 		spin_unlock_irq(&np->lock);
1570a88394cfSJeff Kirsher 
1571c0bd55efSFrancois Romieu 		synchronize_irq(np->pci_dev->irq);
1572a88394cfSJeff Kirsher 		netif_tx_disable(dev);
1573a88394cfSJeff Kirsher 
1574a88394cfSJeff Kirsher 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1575a88394cfSJeff Kirsher 
1576a88394cfSJeff Kirsher 		/* no more hardware accesses behind this line. */
1577a88394cfSJeff Kirsher 
1578a88394cfSJeff Kirsher 		BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1579a88394cfSJeff Kirsher 
1580a88394cfSJeff Kirsher 		/* pci_power_off(pdev, -1); */
1581a88394cfSJeff Kirsher 
1582a88394cfSJeff Kirsher 		free_rxtx_rings(np);
1583a88394cfSJeff Kirsher 	} else {
1584a88394cfSJeff Kirsher 		netif_device_detach(dev);
1585a88394cfSJeff Kirsher 	}
1586a88394cfSJeff Kirsher 	rtnl_unlock();
1587a88394cfSJeff Kirsher 	return 0;
1588a88394cfSJeff Kirsher }
1589a88394cfSJeff Kirsher 
w840_resume(struct device * dev_d)1590fc9aebfbSVaibhav Gupta static int __maybe_unused w840_resume(struct device *dev_d)
1591a88394cfSJeff Kirsher {
1592fc9aebfbSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
1593a88394cfSJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1594a88394cfSJeff Kirsher 
1595a88394cfSJeff Kirsher 	rtnl_lock();
1596a88394cfSJeff Kirsher 	if (netif_device_present(dev))
1597a88394cfSJeff Kirsher 		goto out; /* device not suspended */
1598a88394cfSJeff Kirsher 	if (netif_running(dev)) {
1599a88394cfSJeff Kirsher 		spin_lock_irq(&np->lock);
1600a88394cfSJeff Kirsher 		iowrite32(1, np->base_addr+PCIBusCfg);
1601a88394cfSJeff Kirsher 		ioread32(np->base_addr+PCIBusCfg);
1602a88394cfSJeff Kirsher 		udelay(1);
1603a88394cfSJeff Kirsher 		netif_device_attach(dev);
1604a88394cfSJeff Kirsher 		init_rxtx_rings(dev);
1605a88394cfSJeff Kirsher 		init_registers(dev);
1606a88394cfSJeff Kirsher 		spin_unlock_irq(&np->lock);
1607a88394cfSJeff Kirsher 
1608a88394cfSJeff Kirsher 		netif_wake_queue(dev);
1609a88394cfSJeff Kirsher 
1610a88394cfSJeff Kirsher 		mod_timer(&np->timer, jiffies + 1*HZ);
1611a88394cfSJeff Kirsher 	} else {
1612a88394cfSJeff Kirsher 		netif_device_attach(dev);
1613a88394cfSJeff Kirsher 	}
1614a88394cfSJeff Kirsher out:
1615a88394cfSJeff Kirsher 	rtnl_unlock();
1616fc9aebfbSVaibhav Gupta 	return 0;
1617a88394cfSJeff Kirsher }
1618fc9aebfbSVaibhav Gupta 
1619fc9aebfbSVaibhav Gupta static SIMPLE_DEV_PM_OPS(w840_pm_ops, w840_suspend, w840_resume);
1620a88394cfSJeff Kirsher 
1621a88394cfSJeff Kirsher static struct pci_driver w840_driver = {
1622a88394cfSJeff Kirsher 	.name		= DRV_NAME,
1623a88394cfSJeff Kirsher 	.id_table	= w840_pci_tbl,
1624a88394cfSJeff Kirsher 	.probe		= w840_probe1,
1625779c1a85SBill Pemberton 	.remove		= w840_remove1,
1626fc9aebfbSVaibhav Gupta 	.driver.pm	= &w840_pm_ops,
1627a88394cfSJeff Kirsher };
1628a88394cfSJeff Kirsher 
162995b2fbdbSWei Yongjun module_pci_driver(w840_driver);
1630