1d9fb9f38SJeff Kirsher /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
2d9fb9f38SJeff Kirsher /*
3d9fb9f38SJeff Kirsher 	Written/copyright 1999-2001 by Donald Becker.
4d9fb9f38SJeff Kirsher 	Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
5d9fb9f38SJeff Kirsher 	Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
6d9fb9f38SJeff Kirsher 	Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
7d9fb9f38SJeff Kirsher 
8d9fb9f38SJeff Kirsher 	This software may be used and distributed according to the terms of
9d9fb9f38SJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
10d9fb9f38SJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
11d9fb9f38SJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
12d9fb9f38SJeff Kirsher 	a complete program and may only be used when the entire operating
13d9fb9f38SJeff Kirsher 	system is licensed under the GPL.  License for under other terms may be
14d9fb9f38SJeff Kirsher 	available.  Contact the original author for details.
15d9fb9f38SJeff Kirsher 
16d9fb9f38SJeff Kirsher 	The original author may be reached as becker@scyld.com, or at
17d9fb9f38SJeff Kirsher 	Scyld Computing Corporation
18d9fb9f38SJeff Kirsher 	410 Severn Ave., Suite 210
19d9fb9f38SJeff Kirsher 	Annapolis MD 21403
20d9fb9f38SJeff Kirsher 
21d9fb9f38SJeff Kirsher 	Support information and updates available at
22d9fb9f38SJeff Kirsher 	http://www.scyld.com/network/netsemi.html
23d9fb9f38SJeff Kirsher 	[link no longer provides useful info -jgarzik]
24d9fb9f38SJeff Kirsher 
25d9fb9f38SJeff Kirsher 
26d9fb9f38SJeff Kirsher 	TODO:
27d9fb9f38SJeff Kirsher 	* big endian support with CFG:BEM instead of cpu_to_le32
28d9fb9f38SJeff Kirsher */
29d9fb9f38SJeff Kirsher 
30d9fb9f38SJeff Kirsher #include <linux/module.h>
31d9fb9f38SJeff Kirsher #include <linux/kernel.h>
32d9fb9f38SJeff Kirsher #include <linux/string.h>
33d9fb9f38SJeff Kirsher #include <linux/timer.h>
34d9fb9f38SJeff Kirsher #include <linux/errno.h>
35d9fb9f38SJeff Kirsher #include <linux/ioport.h>
36d9fb9f38SJeff Kirsher #include <linux/slab.h>
37d9fb9f38SJeff Kirsher #include <linux/interrupt.h>
38d9fb9f38SJeff Kirsher #include <linux/pci.h>
39d9fb9f38SJeff Kirsher #include <linux/netdevice.h>
40d9fb9f38SJeff Kirsher #include <linux/etherdevice.h>
41d9fb9f38SJeff Kirsher #include <linux/skbuff.h>
42d9fb9f38SJeff Kirsher #include <linux/init.h>
43d9fb9f38SJeff Kirsher #include <linux/spinlock.h>
44d9fb9f38SJeff Kirsher #include <linux/ethtool.h>
45d9fb9f38SJeff Kirsher #include <linux/delay.h>
46d9fb9f38SJeff Kirsher #include <linux/rtnetlink.h>
47d9fb9f38SJeff Kirsher #include <linux/mii.h>
48d9fb9f38SJeff Kirsher #include <linux/crc32.h>
49d9fb9f38SJeff Kirsher #include <linux/bitops.h>
50d9fb9f38SJeff Kirsher #include <linux/prefetch.h>
51d9fb9f38SJeff Kirsher #include <asm/processor.h>	/* Processor type for cache alignment. */
52d9fb9f38SJeff Kirsher #include <asm/io.h>
53d9fb9f38SJeff Kirsher #include <asm/irq.h>
547c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
55d9fb9f38SJeff Kirsher 
56d9fb9f38SJeff Kirsher #define DRV_NAME	"natsemi"
57d9fb9f38SJeff Kirsher #define DRV_VERSION	"2.1"
58d9fb9f38SJeff Kirsher #define DRV_RELDATE	"Sept 11, 2006"
59d9fb9f38SJeff Kirsher 
60d9fb9f38SJeff Kirsher #define RX_OFFSET	2
61d9fb9f38SJeff Kirsher 
62d9fb9f38SJeff Kirsher /* Updated to recommendations in pci-skeleton v2.03. */
63d9fb9f38SJeff Kirsher 
64d9fb9f38SJeff Kirsher /* The user-configurable values.
65d9fb9f38SJeff Kirsher    These may be modified when a driver module is loaded.*/
66d9fb9f38SJeff Kirsher 
67d9fb9f38SJeff Kirsher #define NATSEMI_DEF_MSG		(NETIF_MSG_DRV		| \
68d9fb9f38SJeff Kirsher 				 NETIF_MSG_LINK		| \
69d9fb9f38SJeff Kirsher 				 NETIF_MSG_WOL		| \
70d9fb9f38SJeff Kirsher 				 NETIF_MSG_RX_ERR	| \
71d9fb9f38SJeff Kirsher 				 NETIF_MSG_TX_ERR)
72d9fb9f38SJeff Kirsher static int debug = -1;
73d9fb9f38SJeff Kirsher 
74d9fb9f38SJeff Kirsher static int mtu;
75d9fb9f38SJeff Kirsher 
76d9fb9f38SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
77d9fb9f38SJeff Kirsher    This chip uses a 512 element hash table based on the Ethernet CRC.  */
78d9fb9f38SJeff Kirsher static const int multicast_filter_limit = 100;
79d9fb9f38SJeff Kirsher 
80d9fb9f38SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
81d9fb9f38SJeff Kirsher    Setting to > 1518 effectively disables this feature. */
82d9fb9f38SJeff Kirsher static int rx_copybreak;
83d9fb9f38SJeff Kirsher 
84d9fb9f38SJeff Kirsher static int dspcfg_workaround = 1;
85d9fb9f38SJeff Kirsher 
86d9fb9f38SJeff Kirsher /* Used to pass the media type, etc.
87d9fb9f38SJeff Kirsher    Both 'options[]' and 'full_duplex[]' should exist for driver
88d9fb9f38SJeff Kirsher    interoperability.
89d9fb9f38SJeff Kirsher    The media type is usually passed in 'options[]'.
90d9fb9f38SJeff Kirsher */
91d9fb9f38SJeff Kirsher #define MAX_UNITS 8		/* More are supported, limit only on options */
92d9fb9f38SJeff Kirsher static int options[MAX_UNITS];
93d9fb9f38SJeff Kirsher static int full_duplex[MAX_UNITS];
94d9fb9f38SJeff Kirsher 
95d9fb9f38SJeff Kirsher /* Operational parameters that are set at compile time. */
96d9fb9f38SJeff Kirsher 
97d9fb9f38SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
98d9fb9f38SJeff Kirsher    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
99d9fb9f38SJeff Kirsher    Making the Tx ring too large decreases the effectiveness of channel
100d9fb9f38SJeff Kirsher    bonding and packet priority.
101d9fb9f38SJeff Kirsher    There are no ill effects from too-large receive rings. */
102d9fb9f38SJeff Kirsher #define TX_RING_SIZE	16
103d9fb9f38SJeff Kirsher #define TX_QUEUE_LEN	10 /* Limit ring entries actually used, min 4. */
104d9fb9f38SJeff Kirsher #define RX_RING_SIZE	32
105d9fb9f38SJeff Kirsher 
106d9fb9f38SJeff Kirsher /* Operational parameters that usually are not changed. */
107d9fb9f38SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
108d9fb9f38SJeff Kirsher #define TX_TIMEOUT  (2*HZ)
109d9fb9f38SJeff Kirsher 
110d9fb9f38SJeff Kirsher #define NATSEMI_HW_TIMEOUT	400
111d9fb9f38SJeff Kirsher #define NATSEMI_TIMER_FREQ	5*HZ
112d9fb9f38SJeff Kirsher #define NATSEMI_PG0_NREGS	64
113d9fb9f38SJeff Kirsher #define NATSEMI_RFDR_NREGS	8
114d9fb9f38SJeff Kirsher #define NATSEMI_PG1_NREGS	4
115d9fb9f38SJeff Kirsher #define NATSEMI_NREGS		(NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
116d9fb9f38SJeff Kirsher 				 NATSEMI_PG1_NREGS)
117d9fb9f38SJeff Kirsher #define NATSEMI_REGS_VER	1 /* v1 added RFDR registers */
118d9fb9f38SJeff Kirsher #define NATSEMI_REGS_SIZE	(NATSEMI_NREGS * sizeof(u32))
119d9fb9f38SJeff Kirsher 
120d9fb9f38SJeff Kirsher /* Buffer sizes:
121d9fb9f38SJeff Kirsher  * The nic writes 32-bit values, even if the upper bytes of
122d9fb9f38SJeff Kirsher  * a 32-bit value are beyond the end of the buffer.
123d9fb9f38SJeff Kirsher  */
124d9fb9f38SJeff Kirsher #define NATSEMI_HEADERS		22	/* 2*mac,type,vlan,crc */
125d9fb9f38SJeff Kirsher #define NATSEMI_PADDING		16	/* 2 bytes should be sufficient */
126d9fb9f38SJeff Kirsher #define NATSEMI_LONGPKT		1518	/* limit for normal packets */
127d9fb9f38SJeff Kirsher #define NATSEMI_RX_LIMIT	2046	/* maximum supported by hardware */
128d9fb9f38SJeff Kirsher 
129d9fb9f38SJeff Kirsher /* These identify the driver base version and may not be removed. */
1306980cbe4SBill Pemberton static const char version[] =
131d9fb9f38SJeff Kirsher   KERN_INFO DRV_NAME " dp8381x driver, version "
132d9fb9f38SJeff Kirsher       DRV_VERSION ", " DRV_RELDATE "\n"
133d9fb9f38SJeff Kirsher   "  originally by Donald Becker <becker@scyld.com>\n"
134d9fb9f38SJeff Kirsher   "  2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135d9fb9f38SJeff Kirsher 
136d9fb9f38SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
137d9fb9f38SJeff Kirsher MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
138d9fb9f38SJeff Kirsher MODULE_LICENSE("GPL");
139d9fb9f38SJeff Kirsher 
140d9fb9f38SJeff Kirsher module_param(mtu, int, 0);
141d9fb9f38SJeff Kirsher module_param(debug, int, 0);
142d9fb9f38SJeff Kirsher module_param(rx_copybreak, int, 0);
143d9fb9f38SJeff Kirsher module_param(dspcfg_workaround, int, 0);
144d9fb9f38SJeff Kirsher module_param_array(options, int, NULL, 0);
145d9fb9f38SJeff Kirsher module_param_array(full_duplex, int, NULL, 0);
146d9fb9f38SJeff Kirsher MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
147d9fb9f38SJeff Kirsher MODULE_PARM_DESC(debug, "DP8381x default debug level");
148d9fb9f38SJeff Kirsher MODULE_PARM_DESC(rx_copybreak,
149d9fb9f38SJeff Kirsher 	"DP8381x copy breakpoint for copy-only-tiny-frames");
150d9fb9f38SJeff Kirsher MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
151d9fb9f38SJeff Kirsher MODULE_PARM_DESC(options,
152d9fb9f38SJeff Kirsher 	"DP8381x: Bits 0-3: media type, bit 17: full duplex");
153d9fb9f38SJeff Kirsher MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
154d9fb9f38SJeff Kirsher 
155d9fb9f38SJeff Kirsher /*
156d9fb9f38SJeff Kirsher 				Theory of Operation
157d9fb9f38SJeff Kirsher 
158d9fb9f38SJeff Kirsher I. Board Compatibility
159d9fb9f38SJeff Kirsher 
160d9fb9f38SJeff Kirsher This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
161d9fb9f38SJeff Kirsher It also works with other chips in in the DP83810 series.
162d9fb9f38SJeff Kirsher 
163d9fb9f38SJeff Kirsher II. Board-specific settings
164d9fb9f38SJeff Kirsher 
165d9fb9f38SJeff Kirsher This driver requires the PCI interrupt line to be valid.
166d9fb9f38SJeff Kirsher It honors the EEPROM-set values.
167d9fb9f38SJeff Kirsher 
168d9fb9f38SJeff Kirsher III. Driver operation
169d9fb9f38SJeff Kirsher 
170d9fb9f38SJeff Kirsher IIIa. Ring buffers
171d9fb9f38SJeff Kirsher 
172d9fb9f38SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists
173d9fb9f38SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
174d9fb9f38SJeff Kirsher the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
175d9fb9f38SJeff Kirsher The NatSemi design uses a 'next descriptor' pointer that the driver forms
176d9fb9f38SJeff Kirsher into a list.
177d9fb9f38SJeff Kirsher 
178d9fb9f38SJeff Kirsher IIIb/c. Transmit/Receive Structure
179d9fb9f38SJeff Kirsher 
180d9fb9f38SJeff Kirsher This driver uses a zero-copy receive and transmit scheme.
181d9fb9f38SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
182d9fb9f38SJeff Kirsher open() time and passes the skb->data field to the chip as receive data
183d9fb9f38SJeff Kirsher buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
184d9fb9f38SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
185d9fb9f38SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
186d9fb9f38SJeff Kirsher protocol stack.  Buffers consumed this way are replaced by newly allocated
187d9fb9f38SJeff Kirsher skbuffs in a later phase of receives.
188d9fb9f38SJeff Kirsher 
189d9fb9f38SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
190d9fb9f38SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
191d9fb9f38SJeff Kirsher frames.  New boards are typically used in generously configured machines
192d9fb9f38SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of
193d9fb9f38SJeff Kirsher a single allocation size, so the default value of zero results in never
194d9fb9f38SJeff Kirsher copying packets.  When copying is done, the cost is usually mitigated by using
195d9fb9f38SJeff Kirsher a combined copy/checksum routine.  Copying also preloads the cache, which is
196d9fb9f38SJeff Kirsher most useful with small frames.
197d9fb9f38SJeff Kirsher 
198d9fb9f38SJeff Kirsher A subtle aspect of the operation is that unaligned buffers are not permitted
199d9fb9f38SJeff Kirsher by the hardware.  Thus the IP header at offset 14 in an ethernet frame isn't
200d9fb9f38SJeff Kirsher longword aligned for further processing.  On copies frames are put into the
201d9fb9f38SJeff Kirsher skbuff at an offset of "+2", 16-byte aligning the IP header.
202d9fb9f38SJeff Kirsher 
203d9fb9f38SJeff Kirsher IIId. Synchronization
204d9fb9f38SJeff Kirsher 
205d9fb9f38SJeff Kirsher Most operations are synchronized on the np->lock irq spinlock, except the
206d9fb9f38SJeff Kirsher receive and transmit paths which are synchronised using a combination of
207d9fb9f38SJeff Kirsher hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
208d9fb9f38SJeff Kirsher 
209d9fb9f38SJeff Kirsher IVb. References
210d9fb9f38SJeff Kirsher 
211d9fb9f38SJeff Kirsher http://www.scyld.com/expert/100mbps.html
212d9fb9f38SJeff Kirsher http://www.scyld.com/expert/NWay.html
213d9fb9f38SJeff Kirsher Datasheet is available from:
214d9fb9f38SJeff Kirsher http://www.national.com/pf/DP/DP83815.html
215d9fb9f38SJeff Kirsher 
216d9fb9f38SJeff Kirsher IVc. Errata
217d9fb9f38SJeff Kirsher 
218d9fb9f38SJeff Kirsher None characterised.
219d9fb9f38SJeff Kirsher */
220d9fb9f38SJeff Kirsher 
221d9fb9f38SJeff Kirsher 
222d9fb9f38SJeff Kirsher 
223d9fb9f38SJeff Kirsher /*
224d9fb9f38SJeff Kirsher  * Support for fibre connections on Am79C874:
225d9fb9f38SJeff Kirsher  * This phy needs a special setup when connected to a fibre cable.
226d9fb9f38SJeff Kirsher  * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
227d9fb9f38SJeff Kirsher  */
228d9fb9f38SJeff Kirsher #define PHYID_AM79C874	0x0022561b
229d9fb9f38SJeff Kirsher 
230d9fb9f38SJeff Kirsher enum {
231d9fb9f38SJeff Kirsher 	MII_MCTRL	= 0x15,		/* mode control register */
232d9fb9f38SJeff Kirsher 	MII_FX_SEL	= 0x0001,	/* 100BASE-FX (fiber) */
233d9fb9f38SJeff Kirsher 	MII_EN_SCRM	= 0x0004,	/* enable scrambler (tp) */
234d9fb9f38SJeff Kirsher };
235d9fb9f38SJeff Kirsher 
236d9fb9f38SJeff Kirsher enum {
237d9fb9f38SJeff Kirsher 	NATSEMI_FLAG_IGNORE_PHY		= 0x1,
238d9fb9f38SJeff Kirsher };
239d9fb9f38SJeff Kirsher 
240d9fb9f38SJeff Kirsher /* array of board data directly indexed by pci_tbl[x].driver_data */
241d9fb9f38SJeff Kirsher static struct {
242d9fb9f38SJeff Kirsher 	const char *name;
243d9fb9f38SJeff Kirsher 	unsigned long flags;
244d9fb9f38SJeff Kirsher 	unsigned int eeprom_size;
2456980cbe4SBill Pemberton } natsemi_pci_info[] = {
246d9fb9f38SJeff Kirsher 	{ "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
247d9fb9f38SJeff Kirsher 	{ "NatSemi DP8381[56]", 0, 24 },
248d9fb9f38SJeff Kirsher };
249d9fb9f38SJeff Kirsher 
2509baa3c34SBenoit Taine static const struct pci_device_id natsemi_pci_tbl[] = {
251d9fb9f38SJeff Kirsher 	{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9,     0x000c,     0, 0, 0 },
252d9fb9f38SJeff Kirsher 	{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253d9fb9f38SJeff Kirsher 	{ }	/* terminate list */
254d9fb9f38SJeff Kirsher };
255d9fb9f38SJeff Kirsher MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
256d9fb9f38SJeff Kirsher 
257d9fb9f38SJeff Kirsher /* Offsets to the device registers.
258d9fb9f38SJeff Kirsher    Unlike software-only systems, device drivers interact with complex hardware.
259d9fb9f38SJeff Kirsher    It's not useful to define symbolic names for every register bit in the
260d9fb9f38SJeff Kirsher    device.
261d9fb9f38SJeff Kirsher */
262d9fb9f38SJeff Kirsher enum register_offsets {
263d9fb9f38SJeff Kirsher 	ChipCmd			= 0x00,
264d9fb9f38SJeff Kirsher 	ChipConfig		= 0x04,
265d9fb9f38SJeff Kirsher 	EECtrl			= 0x08,
266d9fb9f38SJeff Kirsher 	PCIBusCfg		= 0x0C,
267d9fb9f38SJeff Kirsher 	IntrStatus		= 0x10,
268d9fb9f38SJeff Kirsher 	IntrMask		= 0x14,
269d9fb9f38SJeff Kirsher 	IntrEnable		= 0x18,
270d9fb9f38SJeff Kirsher 	IntrHoldoff		= 0x1C, /* DP83816 only */
271d9fb9f38SJeff Kirsher 	TxRingPtr		= 0x20,
272d9fb9f38SJeff Kirsher 	TxConfig		= 0x24,
273d9fb9f38SJeff Kirsher 	RxRingPtr		= 0x30,
274d9fb9f38SJeff Kirsher 	RxConfig		= 0x34,
275d9fb9f38SJeff Kirsher 	ClkRun			= 0x3C,
276d9fb9f38SJeff Kirsher 	WOLCmd			= 0x40,
277d9fb9f38SJeff Kirsher 	PauseCmd		= 0x44,
278d9fb9f38SJeff Kirsher 	RxFilterAddr		= 0x48,
279d9fb9f38SJeff Kirsher 	RxFilterData		= 0x4C,
280d9fb9f38SJeff Kirsher 	BootRomAddr		= 0x50,
281d9fb9f38SJeff Kirsher 	BootRomData		= 0x54,
282d9fb9f38SJeff Kirsher 	SiliconRev		= 0x58,
283d9fb9f38SJeff Kirsher 	StatsCtrl		= 0x5C,
284d9fb9f38SJeff Kirsher 	StatsData		= 0x60,
285d9fb9f38SJeff Kirsher 	RxPktErrs		= 0x60,
286d9fb9f38SJeff Kirsher 	RxMissed		= 0x68,
287d9fb9f38SJeff Kirsher 	RxCRCErrs		= 0x64,
288d9fb9f38SJeff Kirsher 	BasicControl		= 0x80,
289d9fb9f38SJeff Kirsher 	BasicStatus		= 0x84,
290d9fb9f38SJeff Kirsher 	AnegAdv			= 0x90,
291d9fb9f38SJeff Kirsher 	AnegPeer		= 0x94,
292d9fb9f38SJeff Kirsher 	PhyStatus		= 0xC0,
293d9fb9f38SJeff Kirsher 	MIntrCtrl		= 0xC4,
294d9fb9f38SJeff Kirsher 	MIntrStatus		= 0xC8,
295d9fb9f38SJeff Kirsher 	PhyCtrl			= 0xE4,
296d9fb9f38SJeff Kirsher 
297d9fb9f38SJeff Kirsher 	/* These are from the spec, around page 78... on a separate table.
298d9fb9f38SJeff Kirsher 	 * The meaning of these registers depend on the value of PGSEL. */
299d9fb9f38SJeff Kirsher 	PGSEL			= 0xCC,
300d9fb9f38SJeff Kirsher 	PMDCSR			= 0xE4,
301d9fb9f38SJeff Kirsher 	TSTDAT			= 0xFC,
302d9fb9f38SJeff Kirsher 	DSPCFG			= 0xF4,
303d9fb9f38SJeff Kirsher 	SDCFG			= 0xF8
304d9fb9f38SJeff Kirsher };
305d9fb9f38SJeff Kirsher /* the values for the 'magic' registers above (PGSEL=1) */
306d9fb9f38SJeff Kirsher #define PMDCSR_VAL	0x189c	/* enable preferred adaptation circuitry */
307d9fb9f38SJeff Kirsher #define TSTDAT_VAL	0x0
308d9fb9f38SJeff Kirsher #define DSPCFG_VAL	0x5040
309d9fb9f38SJeff Kirsher #define SDCFG_VAL	0x008c	/* set voltage thresholds for Signal Detect */
310d9fb9f38SJeff Kirsher #define DSPCFG_LOCK	0x20	/* coefficient lock bit in DSPCFG */
311d9fb9f38SJeff Kirsher #define DSPCFG_COEF	0x1000	/* see coefficient (in TSTDAT) bit in DSPCFG */
312d9fb9f38SJeff Kirsher #define TSTDAT_FIXED	0xe8	/* magic number for bad coefficients */
313d9fb9f38SJeff Kirsher 
314d9fb9f38SJeff Kirsher /* misc PCI space registers */
315d9fb9f38SJeff Kirsher enum pci_register_offsets {
316d9fb9f38SJeff Kirsher 	PCIPM			= 0x44,
317d9fb9f38SJeff Kirsher };
318d9fb9f38SJeff Kirsher 
319d9fb9f38SJeff Kirsher enum ChipCmd_bits {
320d9fb9f38SJeff Kirsher 	ChipReset		= 0x100,
321d9fb9f38SJeff Kirsher 	RxReset			= 0x20,
322d9fb9f38SJeff Kirsher 	TxReset			= 0x10,
323d9fb9f38SJeff Kirsher 	RxOff			= 0x08,
324d9fb9f38SJeff Kirsher 	RxOn			= 0x04,
325d9fb9f38SJeff Kirsher 	TxOff			= 0x02,
326d9fb9f38SJeff Kirsher 	TxOn			= 0x01,
327d9fb9f38SJeff Kirsher };
328d9fb9f38SJeff Kirsher 
329d9fb9f38SJeff Kirsher enum ChipConfig_bits {
330d9fb9f38SJeff Kirsher 	CfgPhyDis		= 0x200,
331d9fb9f38SJeff Kirsher 	CfgPhyRst		= 0x400,
332d9fb9f38SJeff Kirsher 	CfgExtPhy		= 0x1000,
333d9fb9f38SJeff Kirsher 	CfgAnegEnable		= 0x2000,
334d9fb9f38SJeff Kirsher 	CfgAneg100		= 0x4000,
335d9fb9f38SJeff Kirsher 	CfgAnegFull		= 0x8000,
336d9fb9f38SJeff Kirsher 	CfgAnegDone		= 0x8000000,
337d9fb9f38SJeff Kirsher 	CfgFullDuplex		= 0x20000000,
338d9fb9f38SJeff Kirsher 	CfgSpeed100		= 0x40000000,
339d9fb9f38SJeff Kirsher 	CfgLink			= 0x80000000,
340d9fb9f38SJeff Kirsher };
341d9fb9f38SJeff Kirsher 
342d9fb9f38SJeff Kirsher enum EECtrl_bits {
343d9fb9f38SJeff Kirsher 	EE_ShiftClk		= 0x04,
344d9fb9f38SJeff Kirsher 	EE_DataIn		= 0x01,
345d9fb9f38SJeff Kirsher 	EE_ChipSelect		= 0x08,
346d9fb9f38SJeff Kirsher 	EE_DataOut		= 0x02,
347d9fb9f38SJeff Kirsher 	MII_Data 		= 0x10,
348d9fb9f38SJeff Kirsher 	MII_Write		= 0x20,
349d9fb9f38SJeff Kirsher 	MII_ShiftClk		= 0x40,
350d9fb9f38SJeff Kirsher };
351d9fb9f38SJeff Kirsher 
352d9fb9f38SJeff Kirsher enum PCIBusCfg_bits {
353d9fb9f38SJeff Kirsher 	EepromReload		= 0x4,
354d9fb9f38SJeff Kirsher };
355d9fb9f38SJeff Kirsher 
356d9fb9f38SJeff Kirsher /* Bits in the interrupt status/mask registers. */
357d9fb9f38SJeff Kirsher enum IntrStatus_bits {
358d9fb9f38SJeff Kirsher 	IntrRxDone		= 0x0001,
359d9fb9f38SJeff Kirsher 	IntrRxIntr		= 0x0002,
360d9fb9f38SJeff Kirsher 	IntrRxErr		= 0x0004,
361d9fb9f38SJeff Kirsher 	IntrRxEarly		= 0x0008,
362d9fb9f38SJeff Kirsher 	IntrRxIdle		= 0x0010,
363d9fb9f38SJeff Kirsher 	IntrRxOverrun		= 0x0020,
364d9fb9f38SJeff Kirsher 	IntrTxDone		= 0x0040,
365d9fb9f38SJeff Kirsher 	IntrTxIntr		= 0x0080,
366d9fb9f38SJeff Kirsher 	IntrTxErr		= 0x0100,
367d9fb9f38SJeff Kirsher 	IntrTxIdle		= 0x0200,
368d9fb9f38SJeff Kirsher 	IntrTxUnderrun		= 0x0400,
369d9fb9f38SJeff Kirsher 	StatsMax		= 0x0800,
370d9fb9f38SJeff Kirsher 	SWInt			= 0x1000,
371d9fb9f38SJeff Kirsher 	WOLPkt			= 0x2000,
372d9fb9f38SJeff Kirsher 	LinkChange		= 0x4000,
373d9fb9f38SJeff Kirsher 	IntrHighBits		= 0x8000,
374d9fb9f38SJeff Kirsher 	RxStatusFIFOOver	= 0x10000,
375d9fb9f38SJeff Kirsher 	IntrPCIErr		= 0xf00000,
376d9fb9f38SJeff Kirsher 	RxResetDone		= 0x1000000,
377d9fb9f38SJeff Kirsher 	TxResetDone		= 0x2000000,
378d9fb9f38SJeff Kirsher 	IntrAbnormalSummary	= 0xCD20,
379d9fb9f38SJeff Kirsher };
380d9fb9f38SJeff Kirsher 
381d9fb9f38SJeff Kirsher /*
382d9fb9f38SJeff Kirsher  * Default Interrupts:
383d9fb9f38SJeff Kirsher  * Rx OK, Rx Packet Error, Rx Overrun,
384d9fb9f38SJeff Kirsher  * Tx OK, Tx Packet Error, Tx Underrun,
385d9fb9f38SJeff Kirsher  * MIB Service, Phy Interrupt, High Bits,
386d9fb9f38SJeff Kirsher  * Rx Status FIFO overrun,
387d9fb9f38SJeff Kirsher  * Received Target Abort, Received Master Abort,
388d9fb9f38SJeff Kirsher  * Signalled System Error, Received Parity Error
389d9fb9f38SJeff Kirsher  */
390d9fb9f38SJeff Kirsher #define DEFAULT_INTR 0x00f1cd65
391d9fb9f38SJeff Kirsher 
392d9fb9f38SJeff Kirsher enum TxConfig_bits {
393d9fb9f38SJeff Kirsher 	TxDrthMask		= 0x3f,
394d9fb9f38SJeff Kirsher 	TxFlthMask		= 0x3f00,
395d9fb9f38SJeff Kirsher 	TxMxdmaMask		= 0x700000,
396d9fb9f38SJeff Kirsher 	TxMxdma_512		= 0x0,
397d9fb9f38SJeff Kirsher 	TxMxdma_4		= 0x100000,
398d9fb9f38SJeff Kirsher 	TxMxdma_8		= 0x200000,
399d9fb9f38SJeff Kirsher 	TxMxdma_16		= 0x300000,
400d9fb9f38SJeff Kirsher 	TxMxdma_32		= 0x400000,
401d9fb9f38SJeff Kirsher 	TxMxdma_64		= 0x500000,
402d9fb9f38SJeff Kirsher 	TxMxdma_128		= 0x600000,
403d9fb9f38SJeff Kirsher 	TxMxdma_256		= 0x700000,
404d9fb9f38SJeff Kirsher 	TxCollRetry		= 0x800000,
405d9fb9f38SJeff Kirsher 	TxAutoPad		= 0x10000000,
406d9fb9f38SJeff Kirsher 	TxMacLoop		= 0x20000000,
407d9fb9f38SJeff Kirsher 	TxHeartIgn		= 0x40000000,
408d9fb9f38SJeff Kirsher 	TxCarrierIgn		= 0x80000000
409d9fb9f38SJeff Kirsher };
410d9fb9f38SJeff Kirsher 
411d9fb9f38SJeff Kirsher /*
412d9fb9f38SJeff Kirsher  * Tx Configuration:
413d9fb9f38SJeff Kirsher  * - 256 byte DMA burst length
414d9fb9f38SJeff Kirsher  * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
415d9fb9f38SJeff Kirsher  * - 64 bytes initial drain threshold (i.e. begin actual transmission
416d9fb9f38SJeff Kirsher  *   when 64 byte are in the fifo)
417d9fb9f38SJeff Kirsher  * - on tx underruns, increase drain threshold by 64.
418d9fb9f38SJeff Kirsher  * - at most use a drain threshold of 1472 bytes: The sum of the fill
419d9fb9f38SJeff Kirsher  *   threshold and the drain threshold must be less than 2016 bytes.
420d9fb9f38SJeff Kirsher  *
421d9fb9f38SJeff Kirsher  */
422d9fb9f38SJeff Kirsher #define TX_FLTH_VAL		((512/32) << 8)
423d9fb9f38SJeff Kirsher #define TX_DRTH_VAL_START	(64/32)
424d9fb9f38SJeff Kirsher #define TX_DRTH_VAL_INC		2
425d9fb9f38SJeff Kirsher #define TX_DRTH_VAL_LIMIT	(1472/32)
426d9fb9f38SJeff Kirsher 
427d9fb9f38SJeff Kirsher enum RxConfig_bits {
428d9fb9f38SJeff Kirsher 	RxDrthMask		= 0x3e,
429d9fb9f38SJeff Kirsher 	RxMxdmaMask		= 0x700000,
430d9fb9f38SJeff Kirsher 	RxMxdma_512		= 0x0,
431d9fb9f38SJeff Kirsher 	RxMxdma_4		= 0x100000,
432d9fb9f38SJeff Kirsher 	RxMxdma_8		= 0x200000,
433d9fb9f38SJeff Kirsher 	RxMxdma_16		= 0x300000,
434d9fb9f38SJeff Kirsher 	RxMxdma_32		= 0x400000,
435d9fb9f38SJeff Kirsher 	RxMxdma_64		= 0x500000,
436d9fb9f38SJeff Kirsher 	RxMxdma_128		= 0x600000,
437d9fb9f38SJeff Kirsher 	RxMxdma_256		= 0x700000,
438d9fb9f38SJeff Kirsher 	RxAcceptLong		= 0x8000000,
439d9fb9f38SJeff Kirsher 	RxAcceptTx		= 0x10000000,
440d9fb9f38SJeff Kirsher 	RxAcceptRunt		= 0x40000000,
441d9fb9f38SJeff Kirsher 	RxAcceptErr		= 0x80000000
442d9fb9f38SJeff Kirsher };
443d9fb9f38SJeff Kirsher #define RX_DRTH_VAL		(128/8)
444d9fb9f38SJeff Kirsher 
445d9fb9f38SJeff Kirsher enum ClkRun_bits {
446d9fb9f38SJeff Kirsher 	PMEEnable		= 0x100,
447d9fb9f38SJeff Kirsher 	PMEStatus		= 0x8000,
448d9fb9f38SJeff Kirsher };
449d9fb9f38SJeff Kirsher 
450d9fb9f38SJeff Kirsher enum WolCmd_bits {
451d9fb9f38SJeff Kirsher 	WakePhy			= 0x1,
452d9fb9f38SJeff Kirsher 	WakeUnicast		= 0x2,
453d9fb9f38SJeff Kirsher 	WakeMulticast		= 0x4,
454d9fb9f38SJeff Kirsher 	WakeBroadcast		= 0x8,
455d9fb9f38SJeff Kirsher 	WakeArp			= 0x10,
456d9fb9f38SJeff Kirsher 	WakePMatch0		= 0x20,
457d9fb9f38SJeff Kirsher 	WakePMatch1		= 0x40,
458d9fb9f38SJeff Kirsher 	WakePMatch2		= 0x80,
459d9fb9f38SJeff Kirsher 	WakePMatch3		= 0x100,
460d9fb9f38SJeff Kirsher 	WakeMagic		= 0x200,
461d9fb9f38SJeff Kirsher 	WakeMagicSecure		= 0x400,
462d9fb9f38SJeff Kirsher 	SecureHack		= 0x100000,
463d9fb9f38SJeff Kirsher 	WokePhy			= 0x400000,
464d9fb9f38SJeff Kirsher 	WokeUnicast		= 0x800000,
465d9fb9f38SJeff Kirsher 	WokeMulticast		= 0x1000000,
466d9fb9f38SJeff Kirsher 	WokeBroadcast		= 0x2000000,
467d9fb9f38SJeff Kirsher 	WokeArp			= 0x4000000,
468d9fb9f38SJeff Kirsher 	WokePMatch0		= 0x8000000,
469d9fb9f38SJeff Kirsher 	WokePMatch1		= 0x10000000,
470d9fb9f38SJeff Kirsher 	WokePMatch2		= 0x20000000,
471d9fb9f38SJeff Kirsher 	WokePMatch3		= 0x40000000,
472d9fb9f38SJeff Kirsher 	WokeMagic		= 0x80000000,
473d9fb9f38SJeff Kirsher 	WakeOptsSummary		= 0x7ff
474d9fb9f38SJeff Kirsher };
475d9fb9f38SJeff Kirsher 
476d9fb9f38SJeff Kirsher enum RxFilterAddr_bits {
477d9fb9f38SJeff Kirsher 	RFCRAddressMask		= 0x3ff,
478d9fb9f38SJeff Kirsher 	AcceptMulticast		= 0x00200000,
479d9fb9f38SJeff Kirsher 	AcceptMyPhys		= 0x08000000,
480d9fb9f38SJeff Kirsher 	AcceptAllPhys		= 0x10000000,
481d9fb9f38SJeff Kirsher 	AcceptAllMulticast	= 0x20000000,
482d9fb9f38SJeff Kirsher 	AcceptBroadcast		= 0x40000000,
483d9fb9f38SJeff Kirsher 	RxFilterEnable		= 0x80000000
484d9fb9f38SJeff Kirsher };
485d9fb9f38SJeff Kirsher 
486d9fb9f38SJeff Kirsher enum StatsCtrl_bits {
487d9fb9f38SJeff Kirsher 	StatsWarn		= 0x1,
488d9fb9f38SJeff Kirsher 	StatsFreeze		= 0x2,
489d9fb9f38SJeff Kirsher 	StatsClear		= 0x4,
490d9fb9f38SJeff Kirsher 	StatsStrobe		= 0x8,
491d9fb9f38SJeff Kirsher };
492d9fb9f38SJeff Kirsher 
493d9fb9f38SJeff Kirsher enum MIntrCtrl_bits {
494d9fb9f38SJeff Kirsher 	MICRIntEn		= 0x2,
495d9fb9f38SJeff Kirsher };
496d9fb9f38SJeff Kirsher 
497d9fb9f38SJeff Kirsher enum PhyCtrl_bits {
498d9fb9f38SJeff Kirsher 	PhyAddrMask		= 0x1f,
499d9fb9f38SJeff Kirsher };
500d9fb9f38SJeff Kirsher 
501d9fb9f38SJeff Kirsher #define PHY_ADDR_NONE		32
502d9fb9f38SJeff Kirsher #define PHY_ADDR_INTERNAL	1
503d9fb9f38SJeff Kirsher 
504d9fb9f38SJeff Kirsher /* values we might find in the silicon revision register */
505d9fb9f38SJeff Kirsher #define SRR_DP83815_C	0x0302
506d9fb9f38SJeff Kirsher #define SRR_DP83815_D	0x0403
507d9fb9f38SJeff Kirsher #define SRR_DP83816_A4	0x0504
508d9fb9f38SJeff Kirsher #define SRR_DP83816_A5	0x0505
509d9fb9f38SJeff Kirsher 
510d9fb9f38SJeff Kirsher /* The Rx and Tx buffer descriptors. */
511d9fb9f38SJeff Kirsher /* Note that using only 32 bit fields simplifies conversion to big-endian
512d9fb9f38SJeff Kirsher    architectures. */
513d9fb9f38SJeff Kirsher struct netdev_desc {
514d9fb9f38SJeff Kirsher 	__le32 next_desc;
515d9fb9f38SJeff Kirsher 	__le32 cmd_status;
516d9fb9f38SJeff Kirsher 	__le32 addr;
517d9fb9f38SJeff Kirsher 	__le32 software_use;
518d9fb9f38SJeff Kirsher };
519d9fb9f38SJeff Kirsher 
520d9fb9f38SJeff Kirsher /* Bits in network_desc.status */
521d9fb9f38SJeff Kirsher enum desc_status_bits {
522d9fb9f38SJeff Kirsher 	DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
523d9fb9f38SJeff Kirsher 	DescNoCRC=0x10000000, DescPktOK=0x08000000,
524d9fb9f38SJeff Kirsher 	DescSizeMask=0xfff,
525d9fb9f38SJeff Kirsher 
526d9fb9f38SJeff Kirsher 	DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
527d9fb9f38SJeff Kirsher 	DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
528d9fb9f38SJeff Kirsher 	DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
529d9fb9f38SJeff Kirsher 	DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
530d9fb9f38SJeff Kirsher 
531d9fb9f38SJeff Kirsher 	DescRxAbort=0x04000000, DescRxOver=0x02000000,
532d9fb9f38SJeff Kirsher 	DescRxDest=0x01800000, DescRxLong=0x00400000,
533d9fb9f38SJeff Kirsher 	DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
534d9fb9f38SJeff Kirsher 	DescRxCRC=0x00080000, DescRxAlign=0x00040000,
535d9fb9f38SJeff Kirsher 	DescRxLoop=0x00020000, DesRxColl=0x00010000,
536d9fb9f38SJeff Kirsher };
537d9fb9f38SJeff Kirsher 
538d9fb9f38SJeff Kirsher struct netdev_private {
539d9fb9f38SJeff Kirsher 	/* Descriptor rings first for alignment */
540d9fb9f38SJeff Kirsher 	dma_addr_t ring_dma;
541d9fb9f38SJeff Kirsher 	struct netdev_desc *rx_ring;
542d9fb9f38SJeff Kirsher 	struct netdev_desc *tx_ring;
543d9fb9f38SJeff Kirsher 	/* The addresses of receive-in-place skbuffs */
544d9fb9f38SJeff Kirsher 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
545d9fb9f38SJeff Kirsher 	dma_addr_t rx_dma[RX_RING_SIZE];
546d9fb9f38SJeff Kirsher 	/* address of a sent-in-place packet/buffer, for later free() */
547d9fb9f38SJeff Kirsher 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
548d9fb9f38SJeff Kirsher 	dma_addr_t tx_dma[TX_RING_SIZE];
549d9fb9f38SJeff Kirsher 	struct net_device *dev;
550d710ce13SFrancois Romieu 	void __iomem *ioaddr;
551d9fb9f38SJeff Kirsher 	struct napi_struct napi;
552d9fb9f38SJeff Kirsher 	/* Media monitoring timer */
553d9fb9f38SJeff Kirsher 	struct timer_list timer;
554d9fb9f38SJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect */
555d9fb9f38SJeff Kirsher 	struct pci_dev *pci_dev;
556d9fb9f38SJeff Kirsher 	struct netdev_desc *rx_head_desc;
557d9fb9f38SJeff Kirsher 	/* Producer/consumer ring indices */
558d9fb9f38SJeff Kirsher 	unsigned int cur_rx, dirty_rx;
559d9fb9f38SJeff Kirsher 	unsigned int cur_tx, dirty_tx;
560d9fb9f38SJeff Kirsher 	/* Based on MTU+slack. */
561d9fb9f38SJeff Kirsher 	unsigned int rx_buf_sz;
562d9fb9f38SJeff Kirsher 	int oom;
563d9fb9f38SJeff Kirsher 	/* Interrupt status */
564d9fb9f38SJeff Kirsher 	u32 intr_status;
565d9fb9f38SJeff Kirsher 	/* Do not touch the nic registers */
566d9fb9f38SJeff Kirsher 	int hands_off;
567d9fb9f38SJeff Kirsher 	/* Don't pay attention to the reported link state. */
568d9fb9f38SJeff Kirsher 	int ignore_phy;
569d9fb9f38SJeff Kirsher 	/* external phy that is used: only valid if dev->if_port != PORT_TP */
570d9fb9f38SJeff Kirsher 	int mii;
571d9fb9f38SJeff Kirsher 	int phy_addr_external;
572d9fb9f38SJeff Kirsher 	unsigned int full_duplex;
573d9fb9f38SJeff Kirsher 	/* Rx filter */
574d9fb9f38SJeff Kirsher 	u32 cur_rx_mode;
575d9fb9f38SJeff Kirsher 	u32 rx_filter[16];
576d9fb9f38SJeff Kirsher 	/* FIFO and PCI burst thresholds */
577d9fb9f38SJeff Kirsher 	u32 tx_config, rx_config;
578d9fb9f38SJeff Kirsher 	/* original contents of ClkRun register */
579d9fb9f38SJeff Kirsher 	u32 SavedClkRun;
580d9fb9f38SJeff Kirsher 	/* silicon revision */
581d9fb9f38SJeff Kirsher 	u32 srr;
582d9fb9f38SJeff Kirsher 	/* expected DSPCFG value */
583d9fb9f38SJeff Kirsher 	u16 dspcfg;
584d9fb9f38SJeff Kirsher 	int dspcfg_workaround;
585d9fb9f38SJeff Kirsher 	/* parms saved in ethtool format */
586d9fb9f38SJeff Kirsher 	u16	speed;		/* The forced speed, 10Mb, 100Mb, gigabit */
587d9fb9f38SJeff Kirsher 	u8	duplex;		/* Duplex, half or full */
588d9fb9f38SJeff Kirsher 	u8	autoneg;	/* Autonegotiation enabled */
589d9fb9f38SJeff Kirsher 	/* MII transceiver section */
590d9fb9f38SJeff Kirsher 	u16 advertising;
591d9fb9f38SJeff Kirsher 	unsigned int iosize;
592d9fb9f38SJeff Kirsher 	spinlock_t lock;
593d9fb9f38SJeff Kirsher 	u32 msg_enable;
594d9fb9f38SJeff Kirsher 	/* EEPROM data */
595d9fb9f38SJeff Kirsher 	int eeprom_size;
596d9fb9f38SJeff Kirsher };
597d9fb9f38SJeff Kirsher 
598d9fb9f38SJeff Kirsher static void move_int_phy(struct net_device *dev, int addr);
599d9fb9f38SJeff Kirsher static int eeprom_read(void __iomem *ioaddr, int location);
600d9fb9f38SJeff Kirsher static int mdio_read(struct net_device *dev, int reg);
601d9fb9f38SJeff Kirsher static void mdio_write(struct net_device *dev, int reg, u16 data);
602d9fb9f38SJeff Kirsher static void init_phy_fixup(struct net_device *dev);
603d9fb9f38SJeff Kirsher static int miiport_read(struct net_device *dev, int phy_id, int reg);
604d9fb9f38SJeff Kirsher static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
605d9fb9f38SJeff Kirsher static int find_mii(struct net_device *dev);
606d9fb9f38SJeff Kirsher static void natsemi_reset(struct net_device *dev);
607d9fb9f38SJeff Kirsher static void natsemi_reload_eeprom(struct net_device *dev);
608d9fb9f38SJeff Kirsher static void natsemi_stop_rxtx(struct net_device *dev);
609d9fb9f38SJeff Kirsher static int netdev_open(struct net_device *dev);
610d9fb9f38SJeff Kirsher static void do_cable_magic(struct net_device *dev);
611d9fb9f38SJeff Kirsher static void undo_cable_magic(struct net_device *dev);
612d9fb9f38SJeff Kirsher static void check_link(struct net_device *dev);
61315735c9dSKees Cook static void netdev_timer(struct timer_list *t);
614d9fb9f38SJeff Kirsher static void dump_ring(struct net_device *dev);
6150290bd29SMichael S. Tsirkin static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
616d9fb9f38SJeff Kirsher static int alloc_ring(struct net_device *dev);
617d9fb9f38SJeff Kirsher static void refill_rx(struct net_device *dev);
618d9fb9f38SJeff Kirsher static void init_ring(struct net_device *dev);
619d9fb9f38SJeff Kirsher static void drain_tx(struct net_device *dev);
620d9fb9f38SJeff Kirsher static void drain_ring(struct net_device *dev);
621d9fb9f38SJeff Kirsher static void free_ring(struct net_device *dev);
622d9fb9f38SJeff Kirsher static void reinit_ring(struct net_device *dev);
623d9fb9f38SJeff Kirsher static void init_registers(struct net_device *dev);
624d9fb9f38SJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
625d9fb9f38SJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance);
626d9fb9f38SJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status);
627d9fb9f38SJeff Kirsher static int natsemi_poll(struct napi_struct *napi, int budget);
628d9fb9f38SJeff Kirsher static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
629d9fb9f38SJeff Kirsher static void netdev_tx_done(struct net_device *dev);
630d9fb9f38SJeff Kirsher static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
631d9fb9f38SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
632d9fb9f38SJeff Kirsher static void natsemi_poll_controller(struct net_device *dev);
633d9fb9f38SJeff Kirsher #endif
634d9fb9f38SJeff Kirsher static void __set_rx_mode(struct net_device *dev);
635d9fb9f38SJeff Kirsher static void set_rx_mode(struct net_device *dev);
636d9fb9f38SJeff Kirsher static void __get_stats(struct net_device *dev);
637d9fb9f38SJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev);
638d9fb9f38SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
639d9fb9f38SJeff Kirsher static int netdev_set_wol(struct net_device *dev, u32 newval);
640d9fb9f38SJeff Kirsher static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
641d9fb9f38SJeff Kirsher static int netdev_set_sopass(struct net_device *dev, u8 *newval);
642d9fb9f38SJeff Kirsher static int netdev_get_sopass(struct net_device *dev, u8 *data);
643586b6e27SPhilippe Reynes static int netdev_get_ecmd(struct net_device *dev,
644586b6e27SPhilippe Reynes 			   struct ethtool_link_ksettings *ecmd);
645586b6e27SPhilippe Reynes static int netdev_set_ecmd(struct net_device *dev,
646586b6e27SPhilippe Reynes 			   const struct ethtool_link_ksettings *ecmd);
647d9fb9f38SJeff Kirsher static void enable_wol_mode(struct net_device *dev, int enable_intr);
648d9fb9f38SJeff Kirsher static int netdev_close(struct net_device *dev);
649d9fb9f38SJeff Kirsher static int netdev_get_regs(struct net_device *dev, u8 *buf);
650d9fb9f38SJeff Kirsher static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
651d9fb9f38SJeff Kirsher static const struct ethtool_ops ethtool_ops;
652d9fb9f38SJeff Kirsher 
653d9fb9f38SJeff Kirsher #define NATSEMI_ATTR(_name) \
654d9fb9f38SJeff Kirsher static ssize_t natsemi_show_##_name(struct device *dev, \
655d9fb9f38SJeff Kirsher          struct device_attribute *attr, char *buf); \
656d9fb9f38SJeff Kirsher 	 static ssize_t natsemi_set_##_name(struct device *dev, \
657d9fb9f38SJeff Kirsher 		struct device_attribute *attr, \
658d9fb9f38SJeff Kirsher 	        const char *buf, size_t count); \
659d9fb9f38SJeff Kirsher 	 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
660d9fb9f38SJeff Kirsher 
661d9fb9f38SJeff Kirsher #define NATSEMI_CREATE_FILE(_dev, _name) \
662d9fb9f38SJeff Kirsher          device_create_file(&_dev->dev, &dev_attr_##_name)
663d9fb9f38SJeff Kirsher #define NATSEMI_REMOVE_FILE(_dev, _name) \
664d9fb9f38SJeff Kirsher          device_remove_file(&_dev->dev, &dev_attr_##_name)
665d9fb9f38SJeff Kirsher 
666d9fb9f38SJeff Kirsher NATSEMI_ATTR(dspcfg_workaround);
667d9fb9f38SJeff Kirsher 
668d9fb9f38SJeff Kirsher static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
669d9fb9f38SJeff Kirsher 				  	      struct device_attribute *attr,
670d9fb9f38SJeff Kirsher 					      char *buf)
671d9fb9f38SJeff Kirsher {
672d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(to_net_dev(dev));
673d9fb9f38SJeff Kirsher 
674d9fb9f38SJeff Kirsher 	return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
675d9fb9f38SJeff Kirsher }
676d9fb9f38SJeff Kirsher 
677d9fb9f38SJeff Kirsher static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
678d9fb9f38SJeff Kirsher 					     struct device_attribute *attr,
679d9fb9f38SJeff Kirsher 					     const char *buf, size_t count)
680d9fb9f38SJeff Kirsher {
681d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(to_net_dev(dev));
682d9fb9f38SJeff Kirsher 	int new_setting;
683d9fb9f38SJeff Kirsher 	unsigned long flags;
684d9fb9f38SJeff Kirsher 
685d9fb9f38SJeff Kirsher         /* Find out the new setting */
686d9fb9f38SJeff Kirsher         if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
687d9fb9f38SJeff Kirsher                 new_setting = 1;
688d9fb9f38SJeff Kirsher         else if (!strncmp("off", buf, count - 1) ||
689d9fb9f38SJeff Kirsher                  !strncmp("0", buf, count - 1))
690d9fb9f38SJeff Kirsher 		new_setting = 0;
691d9fb9f38SJeff Kirsher 	else
692d9fb9f38SJeff Kirsher                  return count;
693d9fb9f38SJeff Kirsher 
694d9fb9f38SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
695d9fb9f38SJeff Kirsher 
696d9fb9f38SJeff Kirsher 	np->dspcfg_workaround = new_setting;
697d9fb9f38SJeff Kirsher 
698d9fb9f38SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
699d9fb9f38SJeff Kirsher 
700d9fb9f38SJeff Kirsher 	return count;
701d9fb9f38SJeff Kirsher }
702d9fb9f38SJeff Kirsher 
703d9fb9f38SJeff Kirsher static inline void __iomem *ns_ioaddr(struct net_device *dev)
704d9fb9f38SJeff Kirsher {
705d710ce13SFrancois Romieu 	struct netdev_private *np = netdev_priv(dev);
706d710ce13SFrancois Romieu 
707d710ce13SFrancois Romieu 	return np->ioaddr;
708d9fb9f38SJeff Kirsher }
709d9fb9f38SJeff Kirsher 
710d9fb9f38SJeff Kirsher static inline void natsemi_irq_enable(struct net_device *dev)
711d9fb9f38SJeff Kirsher {
712d9fb9f38SJeff Kirsher 	writel(1, ns_ioaddr(dev) + IntrEnable);
713d9fb9f38SJeff Kirsher 	readl(ns_ioaddr(dev) + IntrEnable);
714d9fb9f38SJeff Kirsher }
715d9fb9f38SJeff Kirsher 
716d9fb9f38SJeff Kirsher static inline void natsemi_irq_disable(struct net_device *dev)
717d9fb9f38SJeff Kirsher {
718d9fb9f38SJeff Kirsher 	writel(0, ns_ioaddr(dev) + IntrEnable);
719d9fb9f38SJeff Kirsher 	readl(ns_ioaddr(dev) + IntrEnable);
720d9fb9f38SJeff Kirsher }
721d9fb9f38SJeff Kirsher 
722d9fb9f38SJeff Kirsher static void move_int_phy(struct net_device *dev, int addr)
723d9fb9f38SJeff Kirsher {
724d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
725d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
726d9fb9f38SJeff Kirsher 	int target = 31;
727d9fb9f38SJeff Kirsher 
728d9fb9f38SJeff Kirsher 	/*
729d9fb9f38SJeff Kirsher 	 * The internal phy is visible on the external mii bus. Therefore we must
730d9fb9f38SJeff Kirsher 	 * move it away before we can send commands to an external phy.
731d9fb9f38SJeff Kirsher 	 * There are two addresses we must avoid:
732d9fb9f38SJeff Kirsher 	 * - the address on the external phy that is used for transmission.
733d9fb9f38SJeff Kirsher 	 * - the address that we want to access. User space can access phys
734d9fb9f38SJeff Kirsher 	 *   on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the
735d9fb9f38SJeff Kirsher 	 *   phy that is used for transmission.
736d9fb9f38SJeff Kirsher 	 */
737d9fb9f38SJeff Kirsher 
738d9fb9f38SJeff Kirsher 	if (target == addr)
739d9fb9f38SJeff Kirsher 		target--;
740d9fb9f38SJeff Kirsher 	if (target == np->phy_addr_external)
741d9fb9f38SJeff Kirsher 		target--;
742d9fb9f38SJeff Kirsher 	writew(target, ioaddr + PhyCtrl);
743d9fb9f38SJeff Kirsher 	readw(ioaddr + PhyCtrl);
744d9fb9f38SJeff Kirsher 	udelay(1);
745d9fb9f38SJeff Kirsher }
746d9fb9f38SJeff Kirsher 
7476980cbe4SBill Pemberton static void natsemi_init_media(struct net_device *dev)
748d9fb9f38SJeff Kirsher {
749d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
750d9fb9f38SJeff Kirsher 	u32 tmp;
751d9fb9f38SJeff Kirsher 
752d9fb9f38SJeff Kirsher 	if (np->ignore_phy)
753d9fb9f38SJeff Kirsher 		netif_carrier_on(dev);
754d9fb9f38SJeff Kirsher 	else
755d9fb9f38SJeff Kirsher 		netif_carrier_off(dev);
756d9fb9f38SJeff Kirsher 
757d9fb9f38SJeff Kirsher 	/* get the initial settings from hardware */
758d9fb9f38SJeff Kirsher 	tmp            = mdio_read(dev, MII_BMCR);
759d9fb9f38SJeff Kirsher 	np->speed      = (tmp & BMCR_SPEED100)? SPEED_100     : SPEED_10;
760d9fb9f38SJeff Kirsher 	np->duplex     = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL   : DUPLEX_HALF;
761d9fb9f38SJeff Kirsher 	np->autoneg    = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
762d9fb9f38SJeff Kirsher 	np->advertising= mdio_read(dev, MII_ADVERTISE);
763d9fb9f38SJeff Kirsher 
764d9fb9f38SJeff Kirsher 	if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
765d9fb9f38SJeff Kirsher 	    netif_msg_probe(np)) {
766d9fb9f38SJeff Kirsher 		printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
767d9fb9f38SJeff Kirsher 			"10%s %s duplex.\n",
768d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev),
769d9fb9f38SJeff Kirsher 			(mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
770d9fb9f38SJeff Kirsher 			  "enabled, advertise" : "disabled, force",
771d9fb9f38SJeff Kirsher 			(np->advertising &
772d9fb9f38SJeff Kirsher 			  (ADVERTISE_100FULL|ADVERTISE_100HALF))?
773d9fb9f38SJeff Kirsher 			    "0" : "",
774d9fb9f38SJeff Kirsher 			(np->advertising &
775d9fb9f38SJeff Kirsher 			  (ADVERTISE_100FULL|ADVERTISE_10FULL))?
776d9fb9f38SJeff Kirsher 			    "full" : "half");
777d9fb9f38SJeff Kirsher 	}
778d9fb9f38SJeff Kirsher 	if (netif_msg_probe(np))
779d9fb9f38SJeff Kirsher 		printk(KERN_INFO
780d9fb9f38SJeff Kirsher 			"natsemi %s: Transceiver status %#04x advertising %#04x.\n",
781d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
782d9fb9f38SJeff Kirsher 			np->advertising);
783d9fb9f38SJeff Kirsher 
784d9fb9f38SJeff Kirsher }
785d9fb9f38SJeff Kirsher 
786d9fb9f38SJeff Kirsher static const struct net_device_ops natsemi_netdev_ops = {
787d9fb9f38SJeff Kirsher 	.ndo_open		= netdev_open,
788d9fb9f38SJeff Kirsher 	.ndo_stop		= netdev_close,
789d9fb9f38SJeff Kirsher 	.ndo_start_xmit		= start_tx,
790d9fb9f38SJeff Kirsher 	.ndo_get_stats		= get_stats,
791afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= set_rx_mode,
792d9fb9f38SJeff Kirsher 	.ndo_change_mtu		= natsemi_change_mtu,
793d9fb9f38SJeff Kirsher 	.ndo_do_ioctl		= netdev_ioctl,
794d9fb9f38SJeff Kirsher 	.ndo_tx_timeout 	= ns_tx_timeout,
795d9fb9f38SJeff Kirsher 	.ndo_set_mac_address 	= eth_mac_addr,
796d9fb9f38SJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
797d9fb9f38SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
798d9fb9f38SJeff Kirsher 	.ndo_poll_controller	= natsemi_poll_controller,
799d9fb9f38SJeff Kirsher #endif
800d9fb9f38SJeff Kirsher };
801d9fb9f38SJeff Kirsher 
8021dd06ae8SGreg Kroah-Hartman static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
803d9fb9f38SJeff Kirsher {
804d9fb9f38SJeff Kirsher 	struct net_device *dev;
805d9fb9f38SJeff Kirsher 	struct netdev_private *np;
806d9fb9f38SJeff Kirsher 	int i, option, irq, chip_idx = ent->driver_data;
807d9fb9f38SJeff Kirsher 	static int find_cnt = -1;
808d9fb9f38SJeff Kirsher 	resource_size_t iostart;
809d9fb9f38SJeff Kirsher 	unsigned long iosize;
810d9fb9f38SJeff Kirsher 	void __iomem *ioaddr;
811d9fb9f38SJeff Kirsher 	const int pcibar = 1; /* PCI base address register */
812d9fb9f38SJeff Kirsher 	int prev_eedata;
813d9fb9f38SJeff Kirsher 	u32 tmp;
814d9fb9f38SJeff Kirsher 
815d9fb9f38SJeff Kirsher /* when built into the kernel, we only print version if device is found */
816d9fb9f38SJeff Kirsher #ifndef MODULE
817d9fb9f38SJeff Kirsher 	static int printed_version;
818d9fb9f38SJeff Kirsher 	if (!printed_version++)
819d9fb9f38SJeff Kirsher 		printk(version);
820d9fb9f38SJeff Kirsher #endif
821d9fb9f38SJeff Kirsher 
822d9fb9f38SJeff Kirsher 	i = pci_enable_device(pdev);
823d9fb9f38SJeff Kirsher 	if (i) return i;
824d9fb9f38SJeff Kirsher 
825d9fb9f38SJeff Kirsher 	/* natsemi has a non-standard PM control register
826d9fb9f38SJeff Kirsher 	 * in PCI config space.  Some boards apparently need
827d9fb9f38SJeff Kirsher 	 * to be brought to D0 in this manner.
828d9fb9f38SJeff Kirsher 	 */
829d9fb9f38SJeff Kirsher 	pci_read_config_dword(pdev, PCIPM, &tmp);
830d9fb9f38SJeff Kirsher 	if (tmp & PCI_PM_CTRL_STATE_MASK) {
831d9fb9f38SJeff Kirsher 		/* D0 state, disable PME assertion */
832d9fb9f38SJeff Kirsher 		u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
833d9fb9f38SJeff Kirsher 		pci_write_config_dword(pdev, PCIPM, newtmp);
834d9fb9f38SJeff Kirsher 	}
835d9fb9f38SJeff Kirsher 
836d9fb9f38SJeff Kirsher 	find_cnt++;
837d9fb9f38SJeff Kirsher 	iostart = pci_resource_start(pdev, pcibar);
838d9fb9f38SJeff Kirsher 	iosize = pci_resource_len(pdev, pcibar);
839d9fb9f38SJeff Kirsher 	irq = pdev->irq;
840d9fb9f38SJeff Kirsher 
841d9fb9f38SJeff Kirsher 	pci_set_master(pdev);
842d9fb9f38SJeff Kirsher 
843d9fb9f38SJeff Kirsher 	dev = alloc_etherdev(sizeof (struct netdev_private));
844d9fb9f38SJeff Kirsher 	if (!dev)
845d9fb9f38SJeff Kirsher 		return -ENOMEM;
846d9fb9f38SJeff Kirsher 	SET_NETDEV_DEV(dev, &pdev->dev);
847d9fb9f38SJeff Kirsher 
848d9fb9f38SJeff Kirsher 	i = pci_request_regions(pdev, DRV_NAME);
849d9fb9f38SJeff Kirsher 	if (i)
850d9fb9f38SJeff Kirsher 		goto err_pci_request_regions;
851d9fb9f38SJeff Kirsher 
852d9fb9f38SJeff Kirsher 	ioaddr = ioremap(iostart, iosize);
853d9fb9f38SJeff Kirsher 	if (!ioaddr) {
854d9fb9f38SJeff Kirsher 		i = -ENOMEM;
855d9fb9f38SJeff Kirsher 		goto err_ioremap;
856d9fb9f38SJeff Kirsher 	}
857d9fb9f38SJeff Kirsher 
858d9fb9f38SJeff Kirsher 	/* Work around the dropped serial bit. */
859d9fb9f38SJeff Kirsher 	prev_eedata = eeprom_read(ioaddr, 6);
860d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
861d9fb9f38SJeff Kirsher 		int eedata = eeprom_read(ioaddr, i + 7);
862d9fb9f38SJeff Kirsher 		dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
863d9fb9f38SJeff Kirsher 		dev->dev_addr[i*2+1] = eedata >> 7;
864d9fb9f38SJeff Kirsher 		prev_eedata = eedata;
865d9fb9f38SJeff Kirsher 	}
866d9fb9f38SJeff Kirsher 
867d9fb9f38SJeff Kirsher 	np = netdev_priv(dev);
868d710ce13SFrancois Romieu 	np->ioaddr = ioaddr;
869d710ce13SFrancois Romieu 
870d9fb9f38SJeff Kirsher 	netif_napi_add(dev, &np->napi, natsemi_poll, 64);
871d9fb9f38SJeff Kirsher 	np->dev = dev;
872d9fb9f38SJeff Kirsher 
873d9fb9f38SJeff Kirsher 	np->pci_dev = pdev;
874d9fb9f38SJeff Kirsher 	pci_set_drvdata(pdev, dev);
875d9fb9f38SJeff Kirsher 	np->iosize = iosize;
876d9fb9f38SJeff Kirsher 	spin_lock_init(&np->lock);
877d9fb9f38SJeff Kirsher 	np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
878d9fb9f38SJeff Kirsher 	np->hands_off = 0;
879d9fb9f38SJeff Kirsher 	np->intr_status = 0;
880d9fb9f38SJeff Kirsher 	np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
881d9fb9f38SJeff Kirsher 	if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
882d9fb9f38SJeff Kirsher 		np->ignore_phy = 1;
883d9fb9f38SJeff Kirsher 	else
884d9fb9f38SJeff Kirsher 		np->ignore_phy = 0;
885d9fb9f38SJeff Kirsher 	np->dspcfg_workaround = dspcfg_workaround;
886d9fb9f38SJeff Kirsher 
887d9fb9f38SJeff Kirsher 	/* Initial port:
888d9fb9f38SJeff Kirsher 	 * - If configured to ignore the PHY set up for external.
889d9fb9f38SJeff Kirsher 	 * - If the nic was configured to use an external phy and if find_mii
890d9fb9f38SJeff Kirsher 	 *   finds a phy: use external port, first phy that replies.
891d9fb9f38SJeff Kirsher 	 * - Otherwise: internal port.
892d9fb9f38SJeff Kirsher 	 * Note that the phy address for the internal phy doesn't matter:
893d9fb9f38SJeff Kirsher 	 * The address would be used to access a phy over the mii bus, but
894d9fb9f38SJeff Kirsher 	 * the internal phy is accessed through mapped registers.
895d9fb9f38SJeff Kirsher 	 */
896d9fb9f38SJeff Kirsher 	if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
897d9fb9f38SJeff Kirsher 		dev->if_port = PORT_MII;
898d9fb9f38SJeff Kirsher 	else
899d9fb9f38SJeff Kirsher 		dev->if_port = PORT_TP;
900d9fb9f38SJeff Kirsher 	/* Reset the chip to erase previous misconfiguration. */
901d9fb9f38SJeff Kirsher 	natsemi_reload_eeprom(dev);
902d9fb9f38SJeff Kirsher 	natsemi_reset(dev);
903d9fb9f38SJeff Kirsher 
904d9fb9f38SJeff Kirsher 	if (dev->if_port != PORT_TP) {
905d9fb9f38SJeff Kirsher 		np->phy_addr_external = find_mii(dev);
906d9fb9f38SJeff Kirsher 		/* If we're ignoring the PHY it doesn't matter if we can't
907d9fb9f38SJeff Kirsher 		 * find one. */
908d9fb9f38SJeff Kirsher 		if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
909d9fb9f38SJeff Kirsher 			dev->if_port = PORT_TP;
910d9fb9f38SJeff Kirsher 			np->phy_addr_external = PHY_ADDR_INTERNAL;
911d9fb9f38SJeff Kirsher 		}
912d9fb9f38SJeff Kirsher 	} else {
913d9fb9f38SJeff Kirsher 		np->phy_addr_external = PHY_ADDR_INTERNAL;
914d9fb9f38SJeff Kirsher 	}
915d9fb9f38SJeff Kirsher 
916d9fb9f38SJeff Kirsher 	option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
917d9fb9f38SJeff Kirsher 	/* The lower four bits are the media type. */
918d9fb9f38SJeff Kirsher 	if (option) {
919d9fb9f38SJeff Kirsher 		if (option & 0x200)
920d9fb9f38SJeff Kirsher 			np->full_duplex = 1;
921d9fb9f38SJeff Kirsher 		if (option & 15)
922d9fb9f38SJeff Kirsher 			printk(KERN_INFO
923d9fb9f38SJeff Kirsher 				"natsemi %s: ignoring user supplied media type %d",
924d9fb9f38SJeff Kirsher 				pci_name(np->pci_dev), option & 15);
925d9fb9f38SJeff Kirsher 	}
926d9fb9f38SJeff Kirsher 	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt])
927d9fb9f38SJeff Kirsher 		np->full_duplex = 1;
928d9fb9f38SJeff Kirsher 
929d9fb9f38SJeff Kirsher 	dev->netdev_ops = &natsemi_netdev_ops;
930d9fb9f38SJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
931d9fb9f38SJeff Kirsher 
9327ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &ethtool_ops;
933d9fb9f38SJeff Kirsher 
93444770e11SJarod Wilson 	/* MTU range: 64 - 2024 */
93544770e11SJarod Wilson 	dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
93644770e11SJarod Wilson 	dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS;
93744770e11SJarod Wilson 
938d9fb9f38SJeff Kirsher 	if (mtu)
939d9fb9f38SJeff Kirsher 		dev->mtu = mtu;
940d9fb9f38SJeff Kirsher 
941d9fb9f38SJeff Kirsher 	natsemi_init_media(dev);
942d9fb9f38SJeff Kirsher 
943d9fb9f38SJeff Kirsher 	/* save the silicon revision for later querying */
944d9fb9f38SJeff Kirsher 	np->srr = readl(ioaddr + SiliconRev);
945d9fb9f38SJeff Kirsher 	if (netif_msg_hw(np))
946d9fb9f38SJeff Kirsher 		printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
947d9fb9f38SJeff Kirsher 				pci_name(np->pci_dev), np->srr);
948d9fb9f38SJeff Kirsher 
949d9fb9f38SJeff Kirsher 	i = register_netdev(dev);
950d9fb9f38SJeff Kirsher 	if (i)
951d9fb9f38SJeff Kirsher 		goto err_register_netdev;
95252428d91SPeter Senna Tschudin 	i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
95352428d91SPeter Senna Tschudin 	if (i)
954d9fb9f38SJeff Kirsher 		goto err_create_file;
955d9fb9f38SJeff Kirsher 
956d9fb9f38SJeff Kirsher 	if (netif_msg_drv(np)) {
957d9fb9f38SJeff Kirsher 		printk(KERN_INFO "natsemi %s: %s at %#08llx "
958d9fb9f38SJeff Kirsher 		       "(%s), %pM, IRQ %d",
959d9fb9f38SJeff Kirsher 		       dev->name, natsemi_pci_info[chip_idx].name,
960d9fb9f38SJeff Kirsher 		       (unsigned long long)iostart, pci_name(np->pci_dev),
961d9fb9f38SJeff Kirsher 		       dev->dev_addr, irq);
962d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_TP)
963d9fb9f38SJeff Kirsher 			printk(", port TP.\n");
964d9fb9f38SJeff Kirsher 		else if (np->ignore_phy)
965d9fb9f38SJeff Kirsher 			printk(", port MII, ignoring PHY\n");
966d9fb9f38SJeff Kirsher 		else
967d9fb9f38SJeff Kirsher 			printk(", port MII, phy ad %d.\n", np->phy_addr_external);
968d9fb9f38SJeff Kirsher 	}
969d9fb9f38SJeff Kirsher 	return 0;
970d9fb9f38SJeff Kirsher 
971d9fb9f38SJeff Kirsher  err_create_file:
972d9fb9f38SJeff Kirsher  	unregister_netdev(dev);
973d9fb9f38SJeff Kirsher 
974d9fb9f38SJeff Kirsher  err_register_netdev:
975d9fb9f38SJeff Kirsher 	iounmap(ioaddr);
976d9fb9f38SJeff Kirsher 
977d9fb9f38SJeff Kirsher  err_ioremap:
978d9fb9f38SJeff Kirsher 	pci_release_regions(pdev);
979d9fb9f38SJeff Kirsher 
980d9fb9f38SJeff Kirsher  err_pci_request_regions:
981d9fb9f38SJeff Kirsher 	free_netdev(dev);
982d9fb9f38SJeff Kirsher 	return i;
983d9fb9f38SJeff Kirsher }
984d9fb9f38SJeff Kirsher 
985d9fb9f38SJeff Kirsher 
986d9fb9f38SJeff Kirsher /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
987d9fb9f38SJeff Kirsher    The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
988d9fb9f38SJeff Kirsher 
989d9fb9f38SJeff Kirsher /* Delay between EEPROM clock transitions.
990d9fb9f38SJeff Kirsher    No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
991d9fb9f38SJeff Kirsher    a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
992d9fb9f38SJeff Kirsher    made udelay() unreliable.
993d9fb9f38SJeff Kirsher    The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
994d9fb9f38SJeff Kirsher    deprecated.
995d9fb9f38SJeff Kirsher */
996d9fb9f38SJeff Kirsher #define eeprom_delay(ee_addr)	readl(ee_addr)
997d9fb9f38SJeff Kirsher 
998d9fb9f38SJeff Kirsher #define EE_Write0 (EE_ChipSelect)
999d9fb9f38SJeff Kirsher #define EE_Write1 (EE_ChipSelect | EE_DataIn)
1000d9fb9f38SJeff Kirsher 
1001d9fb9f38SJeff Kirsher /* The EEPROM commands include the alway-set leading bit. */
1002d9fb9f38SJeff Kirsher enum EEPROM_Cmds {
1003d9fb9f38SJeff Kirsher 	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
1004d9fb9f38SJeff Kirsher };
1005d9fb9f38SJeff Kirsher 
1006d9fb9f38SJeff Kirsher static int eeprom_read(void __iomem *addr, int location)
1007d9fb9f38SJeff Kirsher {
1008d9fb9f38SJeff Kirsher 	int i;
1009d9fb9f38SJeff Kirsher 	int retval = 0;
1010d9fb9f38SJeff Kirsher 	void __iomem *ee_addr = addr + EECtrl;
1011d9fb9f38SJeff Kirsher 	int read_cmd = location | EE_ReadCmd;
1012d9fb9f38SJeff Kirsher 
1013d9fb9f38SJeff Kirsher 	writel(EE_Write0, ee_addr);
1014d9fb9f38SJeff Kirsher 
1015d9fb9f38SJeff Kirsher 	/* Shift the read command bits out. */
1016d9fb9f38SJeff Kirsher 	for (i = 10; i >= 0; i--) {
1017d9fb9f38SJeff Kirsher 		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1018d9fb9f38SJeff Kirsher 		writel(dataval, ee_addr);
1019d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1020d9fb9f38SJeff Kirsher 		writel(dataval | EE_ShiftClk, ee_addr);
1021d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1022d9fb9f38SJeff Kirsher 	}
1023d9fb9f38SJeff Kirsher 	writel(EE_ChipSelect, ee_addr);
1024d9fb9f38SJeff Kirsher 	eeprom_delay(ee_addr);
1025d9fb9f38SJeff Kirsher 
1026d9fb9f38SJeff Kirsher 	for (i = 0; i < 16; i++) {
1027d9fb9f38SJeff Kirsher 		writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1028d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1029d9fb9f38SJeff Kirsher 		retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1030d9fb9f38SJeff Kirsher 		writel(EE_ChipSelect, ee_addr);
1031d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1032d9fb9f38SJeff Kirsher 	}
1033d9fb9f38SJeff Kirsher 
1034d9fb9f38SJeff Kirsher 	/* Terminate the EEPROM access. */
1035d9fb9f38SJeff Kirsher 	writel(EE_Write0, ee_addr);
1036d9fb9f38SJeff Kirsher 	writel(0, ee_addr);
1037d9fb9f38SJeff Kirsher 	return retval;
1038d9fb9f38SJeff Kirsher }
1039d9fb9f38SJeff Kirsher 
1040d9fb9f38SJeff Kirsher /* MII transceiver control section.
1041d9fb9f38SJeff Kirsher  * The 83815 series has an internal transceiver, and we present the
1042d9fb9f38SJeff Kirsher  * internal management registers as if they were MII connected.
1043d9fb9f38SJeff Kirsher  * External Phy registers are referenced through the MII interface.
1044d9fb9f38SJeff Kirsher  */
1045d9fb9f38SJeff Kirsher 
1046d9fb9f38SJeff Kirsher /* clock transitions >= 20ns (25MHz)
1047d9fb9f38SJeff Kirsher  * One readl should be good to PCI @ 100MHz
1048d9fb9f38SJeff Kirsher  */
1049d9fb9f38SJeff Kirsher #define mii_delay(ioaddr)  readl(ioaddr + EECtrl)
1050d9fb9f38SJeff Kirsher 
1051d9fb9f38SJeff Kirsher static int mii_getbit (struct net_device *dev)
1052d9fb9f38SJeff Kirsher {
1053d9fb9f38SJeff Kirsher 	int data;
1054d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1055d9fb9f38SJeff Kirsher 
1056d9fb9f38SJeff Kirsher 	writel(MII_ShiftClk, ioaddr + EECtrl);
1057d9fb9f38SJeff Kirsher 	data = readl(ioaddr + EECtrl);
1058d9fb9f38SJeff Kirsher 	writel(0, ioaddr + EECtrl);
1059d9fb9f38SJeff Kirsher 	mii_delay(ioaddr);
1060d9fb9f38SJeff Kirsher 	return (data & MII_Data)? 1 : 0;
1061d9fb9f38SJeff Kirsher }
1062d9fb9f38SJeff Kirsher 
1063d9fb9f38SJeff Kirsher static void mii_send_bits (struct net_device *dev, u32 data, int len)
1064d9fb9f38SJeff Kirsher {
1065d9fb9f38SJeff Kirsher 	u32 i;
1066d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1067d9fb9f38SJeff Kirsher 
1068d9fb9f38SJeff Kirsher 	for (i = (1 << (len-1)); i; i >>= 1)
1069d9fb9f38SJeff Kirsher 	{
1070d9fb9f38SJeff Kirsher 		u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1071d9fb9f38SJeff Kirsher 		writel(mdio_val, ioaddr + EECtrl);
1072d9fb9f38SJeff Kirsher 		mii_delay(ioaddr);
1073d9fb9f38SJeff Kirsher 		writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1074d9fb9f38SJeff Kirsher 		mii_delay(ioaddr);
1075d9fb9f38SJeff Kirsher 	}
1076d9fb9f38SJeff Kirsher 	writel(0, ioaddr + EECtrl);
1077d9fb9f38SJeff Kirsher 	mii_delay(ioaddr);
1078d9fb9f38SJeff Kirsher }
1079d9fb9f38SJeff Kirsher 
1080d9fb9f38SJeff Kirsher static int miiport_read(struct net_device *dev, int phy_id, int reg)
1081d9fb9f38SJeff Kirsher {
1082d9fb9f38SJeff Kirsher 	u32 cmd;
1083d9fb9f38SJeff Kirsher 	int i;
1084d9fb9f38SJeff Kirsher 	u32 retval = 0;
1085d9fb9f38SJeff Kirsher 
1086d9fb9f38SJeff Kirsher 	/* Ensure sync */
1087d9fb9f38SJeff Kirsher 	mii_send_bits (dev, 0xffffffff, 32);
1088d9fb9f38SJeff Kirsher 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1089d9fb9f38SJeff Kirsher 	/* ST,OP = 0110'b for read operation */
1090d9fb9f38SJeff Kirsher 	cmd = (0x06 << 10) | (phy_id << 5) | reg;
1091d9fb9f38SJeff Kirsher 	mii_send_bits (dev, cmd, 14);
1092d9fb9f38SJeff Kirsher 	/* Turnaround */
1093d9fb9f38SJeff Kirsher 	if (mii_getbit (dev))
1094d9fb9f38SJeff Kirsher 		return 0;
1095d9fb9f38SJeff Kirsher 	/* Read data */
1096d9fb9f38SJeff Kirsher 	for (i = 0; i < 16; i++) {
1097d9fb9f38SJeff Kirsher 		retval <<= 1;
1098d9fb9f38SJeff Kirsher 		retval |= mii_getbit (dev);
1099d9fb9f38SJeff Kirsher 	}
1100d9fb9f38SJeff Kirsher 	/* End cycle */
1101d9fb9f38SJeff Kirsher 	mii_getbit (dev);
1102d9fb9f38SJeff Kirsher 	return retval;
1103d9fb9f38SJeff Kirsher }
1104d9fb9f38SJeff Kirsher 
1105d9fb9f38SJeff Kirsher static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1106d9fb9f38SJeff Kirsher {
1107d9fb9f38SJeff Kirsher 	u32 cmd;
1108d9fb9f38SJeff Kirsher 
1109d9fb9f38SJeff Kirsher 	/* Ensure sync */
1110d9fb9f38SJeff Kirsher 	mii_send_bits (dev, 0xffffffff, 32);
1111d9fb9f38SJeff Kirsher 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1112d9fb9f38SJeff Kirsher 	/* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1113d9fb9f38SJeff Kirsher 	cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1114d9fb9f38SJeff Kirsher 	mii_send_bits (dev, cmd, 32);
1115d9fb9f38SJeff Kirsher 	/* End cycle */
1116d9fb9f38SJeff Kirsher 	mii_getbit (dev);
1117d9fb9f38SJeff Kirsher }
1118d9fb9f38SJeff Kirsher 
1119d9fb9f38SJeff Kirsher static int mdio_read(struct net_device *dev, int reg)
1120d9fb9f38SJeff Kirsher {
1121d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1122d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1123d9fb9f38SJeff Kirsher 
1124d9fb9f38SJeff Kirsher 	/* The 83815 series has two ports:
1125d9fb9f38SJeff Kirsher 	 * - an internal transceiver
1126d9fb9f38SJeff Kirsher 	 * - an external mii bus
1127d9fb9f38SJeff Kirsher 	 */
1128d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP)
1129d9fb9f38SJeff Kirsher 		return readw(ioaddr+BasicControl+(reg<<2));
1130d9fb9f38SJeff Kirsher 	else
1131d9fb9f38SJeff Kirsher 		return miiport_read(dev, np->phy_addr_external, reg);
1132d9fb9f38SJeff Kirsher }
1133d9fb9f38SJeff Kirsher 
1134d9fb9f38SJeff Kirsher static void mdio_write(struct net_device *dev, int reg, u16 data)
1135d9fb9f38SJeff Kirsher {
1136d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1137d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1138d9fb9f38SJeff Kirsher 
1139d9fb9f38SJeff Kirsher 	/* The 83815 series has an internal transceiver; handle separately */
1140d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP)
1141d9fb9f38SJeff Kirsher 		writew(data, ioaddr+BasicControl+(reg<<2));
1142d9fb9f38SJeff Kirsher 	else
1143d9fb9f38SJeff Kirsher 		miiport_write(dev, np->phy_addr_external, reg, data);
1144d9fb9f38SJeff Kirsher }
1145d9fb9f38SJeff Kirsher 
1146d9fb9f38SJeff Kirsher static void init_phy_fixup(struct net_device *dev)
1147d9fb9f38SJeff Kirsher {
1148d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1149d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1150d9fb9f38SJeff Kirsher 	int i;
1151d9fb9f38SJeff Kirsher 	u32 cfg;
1152d9fb9f38SJeff Kirsher 	u16 tmp;
1153d9fb9f38SJeff Kirsher 
1154d9fb9f38SJeff Kirsher 	/* restore stuff lost when power was out */
1155d9fb9f38SJeff Kirsher 	tmp = mdio_read(dev, MII_BMCR);
1156d9fb9f38SJeff Kirsher 	if (np->autoneg == AUTONEG_ENABLE) {
1157d9fb9f38SJeff Kirsher 		/* renegotiate if something changed */
1158d9fb9f38SJeff Kirsher 		if ((tmp & BMCR_ANENABLE) == 0 ||
1159d9fb9f38SJeff Kirsher 		    np->advertising != mdio_read(dev, MII_ADVERTISE))
1160d9fb9f38SJeff Kirsher 		{
1161d9fb9f38SJeff Kirsher 			/* turn on autonegotiation and force negotiation */
1162d9fb9f38SJeff Kirsher 			tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1163d9fb9f38SJeff Kirsher 			mdio_write(dev, MII_ADVERTISE, np->advertising);
1164d9fb9f38SJeff Kirsher 		}
1165d9fb9f38SJeff Kirsher 	} else {
1166d9fb9f38SJeff Kirsher 		/* turn off auto negotiation, set speed and duplexity */
1167d9fb9f38SJeff Kirsher 		tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1168d9fb9f38SJeff Kirsher 		if (np->speed == SPEED_100)
1169d9fb9f38SJeff Kirsher 			tmp |= BMCR_SPEED100;
1170d9fb9f38SJeff Kirsher 		if (np->duplex == DUPLEX_FULL)
1171d9fb9f38SJeff Kirsher 			tmp |= BMCR_FULLDPLX;
1172d9fb9f38SJeff Kirsher 		/*
1173d9fb9f38SJeff Kirsher 		 * Note: there is no good way to inform the link partner
1174d9fb9f38SJeff Kirsher 		 * that our capabilities changed. The user has to unplug
1175d9fb9f38SJeff Kirsher 		 * and replug the network cable after some changes, e.g.
1176d9fb9f38SJeff Kirsher 		 * after switching from 10HD, autoneg off to 100 HD,
1177d9fb9f38SJeff Kirsher 		 * autoneg off.
1178d9fb9f38SJeff Kirsher 		 */
1179d9fb9f38SJeff Kirsher 	}
1180d9fb9f38SJeff Kirsher 	mdio_write(dev, MII_BMCR, tmp);
1181d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1182d9fb9f38SJeff Kirsher 	udelay(1);
1183d9fb9f38SJeff Kirsher 
1184d9fb9f38SJeff Kirsher 	/* find out what phy this is */
1185d9fb9f38SJeff Kirsher 	np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1186d9fb9f38SJeff Kirsher 				+ mdio_read(dev, MII_PHYSID2);
1187d9fb9f38SJeff Kirsher 
1188d9fb9f38SJeff Kirsher 	/* handle external phys here */
1189d9fb9f38SJeff Kirsher 	switch (np->mii) {
1190d9fb9f38SJeff Kirsher 	case PHYID_AM79C874:
1191d9fb9f38SJeff Kirsher 		/* phy specific configuration for fibre/tp operation */
1192d9fb9f38SJeff Kirsher 		tmp = mdio_read(dev, MII_MCTRL);
1193d9fb9f38SJeff Kirsher 		tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1194d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_FIBRE)
1195d9fb9f38SJeff Kirsher 			tmp |= MII_FX_SEL;
1196d9fb9f38SJeff Kirsher 		else
1197d9fb9f38SJeff Kirsher 			tmp |= MII_EN_SCRM;
1198d9fb9f38SJeff Kirsher 		mdio_write(dev, MII_MCTRL, tmp);
1199d9fb9f38SJeff Kirsher 		break;
1200d9fb9f38SJeff Kirsher 	default:
1201d9fb9f38SJeff Kirsher 		break;
1202d9fb9f38SJeff Kirsher 	}
1203d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig);
1204d9fb9f38SJeff Kirsher 	if (cfg & CfgExtPhy)
1205d9fb9f38SJeff Kirsher 		return;
1206d9fb9f38SJeff Kirsher 
1207d9fb9f38SJeff Kirsher 	/* On page 78 of the spec, they recommend some settings for "optimum
1208d9fb9f38SJeff Kirsher 	   performance" to be done in sequence.  These settings optimize some
1209d9fb9f38SJeff Kirsher 	   of the 100Mbit autodetection circuitry.  They say we only want to
1210d9fb9f38SJeff Kirsher 	   do this for rev C of the chip, but engineers at NSC (Bradley
1211d9fb9f38SJeff Kirsher 	   Kennedy) recommends always setting them.  If you don't, you get
1212d9fb9f38SJeff Kirsher 	   errors on some autonegotiations that make the device unusable.
1213d9fb9f38SJeff Kirsher 
1214d9fb9f38SJeff Kirsher 	   It seems that the DSP needs a few usec to reinitialize after
1215d9fb9f38SJeff Kirsher 	   the start of the phy. Just retry writing these values until they
1216d9fb9f38SJeff Kirsher 	   stick.
1217d9fb9f38SJeff Kirsher 	*/
1218d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1219d9fb9f38SJeff Kirsher 
1220d9fb9f38SJeff Kirsher 		int dspcfg;
1221d9fb9f38SJeff Kirsher 		writew(1, ioaddr + PGSEL);
1222d9fb9f38SJeff Kirsher 		writew(PMDCSR_VAL, ioaddr + PMDCSR);
1223d9fb9f38SJeff Kirsher 		writew(TSTDAT_VAL, ioaddr + TSTDAT);
1224d9fb9f38SJeff Kirsher 		np->dspcfg = (np->srr <= SRR_DP83815_C)?
1225d9fb9f38SJeff Kirsher 			DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1226d9fb9f38SJeff Kirsher 		writew(np->dspcfg, ioaddr + DSPCFG);
1227d9fb9f38SJeff Kirsher 		writew(SDCFG_VAL, ioaddr + SDCFG);
1228d9fb9f38SJeff Kirsher 		writew(0, ioaddr + PGSEL);
1229d9fb9f38SJeff Kirsher 		readl(ioaddr + ChipConfig);
1230d9fb9f38SJeff Kirsher 		udelay(10);
1231d9fb9f38SJeff Kirsher 
1232d9fb9f38SJeff Kirsher 		writew(1, ioaddr + PGSEL);
1233d9fb9f38SJeff Kirsher 		dspcfg = readw(ioaddr + DSPCFG);
1234d9fb9f38SJeff Kirsher 		writew(0, ioaddr + PGSEL);
1235d9fb9f38SJeff Kirsher 		if (np->dspcfg == dspcfg)
1236d9fb9f38SJeff Kirsher 			break;
1237d9fb9f38SJeff Kirsher 	}
1238d9fb9f38SJeff Kirsher 
1239d9fb9f38SJeff Kirsher 	if (netif_msg_link(np)) {
1240d9fb9f38SJeff Kirsher 		if (i==NATSEMI_HW_TIMEOUT) {
1241d9fb9f38SJeff Kirsher 			printk(KERN_INFO
1242d9fb9f38SJeff Kirsher 				"%s: DSPCFG mismatch after retrying for %d usec.\n",
1243d9fb9f38SJeff Kirsher 				dev->name, i*10);
1244d9fb9f38SJeff Kirsher 		} else {
1245d9fb9f38SJeff Kirsher 			printk(KERN_INFO
1246d9fb9f38SJeff Kirsher 				"%s: DSPCFG accepted after %d usec.\n",
1247d9fb9f38SJeff Kirsher 				dev->name, i*10);
1248d9fb9f38SJeff Kirsher 		}
1249d9fb9f38SJeff Kirsher 	}
1250d9fb9f38SJeff Kirsher 	/*
1251d9fb9f38SJeff Kirsher 	 * Enable PHY Specific event based interrupts.  Link state change
1252d9fb9f38SJeff Kirsher 	 * and Auto-Negotiation Completion are among the affected.
1253d9fb9f38SJeff Kirsher 	 * Read the intr status to clear it (needed for wake events).
1254d9fb9f38SJeff Kirsher 	 */
1255d9fb9f38SJeff Kirsher 	readw(ioaddr + MIntrStatus);
1256d9fb9f38SJeff Kirsher 	writew(MICRIntEn, ioaddr + MIntrCtrl);
1257d9fb9f38SJeff Kirsher }
1258d9fb9f38SJeff Kirsher 
1259d9fb9f38SJeff Kirsher static int switch_port_external(struct net_device *dev)
1260d9fb9f38SJeff Kirsher {
1261d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1262d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1263d9fb9f38SJeff Kirsher 	u32 cfg;
1264d9fb9f38SJeff Kirsher 
1265d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig);
1266d9fb9f38SJeff Kirsher 	if (cfg & CfgExtPhy)
1267d9fb9f38SJeff Kirsher 		return 0;
1268d9fb9f38SJeff Kirsher 
1269d9fb9f38SJeff Kirsher 	if (netif_msg_link(np)) {
1270d9fb9f38SJeff Kirsher 		printk(KERN_INFO "%s: switching to external transceiver.\n",
1271d9fb9f38SJeff Kirsher 				dev->name);
1272d9fb9f38SJeff Kirsher 	}
1273d9fb9f38SJeff Kirsher 
1274d9fb9f38SJeff Kirsher 	/* 1) switch back to external phy */
1275d9fb9f38SJeff Kirsher 	writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1276d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1277d9fb9f38SJeff Kirsher 	udelay(1);
1278d9fb9f38SJeff Kirsher 
1279d9fb9f38SJeff Kirsher 	/* 2) reset the external phy: */
1280d9fb9f38SJeff Kirsher 	/* resetting the external PHY has been known to cause a hub supplying
1281d9fb9f38SJeff Kirsher 	 * power over Ethernet to kill the power.  We don't want to kill
1282d9fb9f38SJeff Kirsher 	 * power to this computer, so we avoid resetting the phy.
1283d9fb9f38SJeff Kirsher 	 */
1284d9fb9f38SJeff Kirsher 
1285d9fb9f38SJeff Kirsher 	/* 3) reinit the phy fixup, it got lost during power down. */
1286d9fb9f38SJeff Kirsher 	move_int_phy(dev, np->phy_addr_external);
1287d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
1288d9fb9f38SJeff Kirsher 
1289d9fb9f38SJeff Kirsher 	return 1;
1290d9fb9f38SJeff Kirsher }
1291d9fb9f38SJeff Kirsher 
1292d9fb9f38SJeff Kirsher static int switch_port_internal(struct net_device *dev)
1293d9fb9f38SJeff Kirsher {
1294d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1295d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1296d9fb9f38SJeff Kirsher 	int i;
1297d9fb9f38SJeff Kirsher 	u32 cfg;
1298d9fb9f38SJeff Kirsher 	u16 bmcr;
1299d9fb9f38SJeff Kirsher 
1300d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig);
1301d9fb9f38SJeff Kirsher 	if (!(cfg &CfgExtPhy))
1302d9fb9f38SJeff Kirsher 		return 0;
1303d9fb9f38SJeff Kirsher 
1304d9fb9f38SJeff Kirsher 	if (netif_msg_link(np)) {
1305d9fb9f38SJeff Kirsher 		printk(KERN_INFO "%s: switching to internal transceiver.\n",
1306d9fb9f38SJeff Kirsher 				dev->name);
1307d9fb9f38SJeff Kirsher 	}
1308d9fb9f38SJeff Kirsher 	/* 1) switch back to internal phy: */
1309d9fb9f38SJeff Kirsher 	cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1310d9fb9f38SJeff Kirsher 	writel(cfg, ioaddr + ChipConfig);
1311d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1312d9fb9f38SJeff Kirsher 	udelay(1);
1313d9fb9f38SJeff Kirsher 
1314d9fb9f38SJeff Kirsher 	/* 2) reset the internal phy: */
1315d9fb9f38SJeff Kirsher 	bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1316d9fb9f38SJeff Kirsher 	writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1317d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1318d9fb9f38SJeff Kirsher 	udelay(10);
1319d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1320d9fb9f38SJeff Kirsher 		bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1321d9fb9f38SJeff Kirsher 		if (!(bmcr & BMCR_RESET))
1322d9fb9f38SJeff Kirsher 			break;
1323d9fb9f38SJeff Kirsher 		udelay(10);
1324d9fb9f38SJeff Kirsher 	}
1325d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1326d9fb9f38SJeff Kirsher 		printk(KERN_INFO
1327d9fb9f38SJeff Kirsher 			"%s: phy reset did not complete in %d usec.\n",
1328d9fb9f38SJeff Kirsher 			dev->name, i*10);
1329d9fb9f38SJeff Kirsher 	}
1330d9fb9f38SJeff Kirsher 	/* 3) reinit the phy fixup, it got lost during power down. */
1331d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
1332d9fb9f38SJeff Kirsher 
1333d9fb9f38SJeff Kirsher 	return 1;
1334d9fb9f38SJeff Kirsher }
1335d9fb9f38SJeff Kirsher 
1336d9fb9f38SJeff Kirsher /* Scan for a PHY on the external mii bus.
1337d9fb9f38SJeff Kirsher  * There are two tricky points:
1338d9fb9f38SJeff Kirsher  * - Do not scan while the internal phy is enabled. The internal phy will
1339d9fb9f38SJeff Kirsher  *   crash: e.g. reads from the DSPCFG register will return odd values and
1340d9fb9f38SJeff Kirsher  *   the nasty random phy reset code will reset the nic every few seconds.
1341d9fb9f38SJeff Kirsher  * - The internal phy must be moved around, an external phy could
1342d9fb9f38SJeff Kirsher  *   have the same address as the internal phy.
1343d9fb9f38SJeff Kirsher  */
1344d9fb9f38SJeff Kirsher static int find_mii(struct net_device *dev)
1345d9fb9f38SJeff Kirsher {
1346d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1347d9fb9f38SJeff Kirsher 	int tmp;
1348d9fb9f38SJeff Kirsher 	int i;
1349d9fb9f38SJeff Kirsher 	int did_switch;
1350d9fb9f38SJeff Kirsher 
1351d9fb9f38SJeff Kirsher 	/* Switch to external phy */
1352d9fb9f38SJeff Kirsher 	did_switch = switch_port_external(dev);
1353d9fb9f38SJeff Kirsher 
1354d9fb9f38SJeff Kirsher 	/* Scan the possible phy addresses:
1355d9fb9f38SJeff Kirsher 	 *
1356d9fb9f38SJeff Kirsher 	 * PHY address 0 means that the phy is in isolate mode. Not yet
1357d9fb9f38SJeff Kirsher 	 * supported due to lack of test hardware. User space should
1358d9fb9f38SJeff Kirsher 	 * handle it through ethtool.
1359d9fb9f38SJeff Kirsher 	 */
1360d9fb9f38SJeff Kirsher 	for (i = 1; i <= 31; i++) {
1361d9fb9f38SJeff Kirsher 		move_int_phy(dev, i);
1362d9fb9f38SJeff Kirsher 		tmp = miiport_read(dev, i, MII_BMSR);
1363d9fb9f38SJeff Kirsher 		if (tmp != 0xffff && tmp != 0x0000) {
1364d9fb9f38SJeff Kirsher 			/* found something! */
1365d9fb9f38SJeff Kirsher 			np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1366d9fb9f38SJeff Kirsher 					+ mdio_read(dev, MII_PHYSID2);
1367d9fb9f38SJeff Kirsher 	 		if (netif_msg_probe(np)) {
1368d9fb9f38SJeff Kirsher 				printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1369d9fb9f38SJeff Kirsher 						pci_name(np->pci_dev), np->mii, i);
1370d9fb9f38SJeff Kirsher 			}
1371d9fb9f38SJeff Kirsher 			break;
1372d9fb9f38SJeff Kirsher 		}
1373d9fb9f38SJeff Kirsher 	}
1374d9fb9f38SJeff Kirsher 	/* And switch back to internal phy: */
1375d9fb9f38SJeff Kirsher 	if (did_switch)
1376d9fb9f38SJeff Kirsher 		switch_port_internal(dev);
1377d9fb9f38SJeff Kirsher 	return i;
1378d9fb9f38SJeff Kirsher }
1379d9fb9f38SJeff Kirsher 
1380d9fb9f38SJeff Kirsher /* CFG bits [13:16] [18:23] */
1381d9fb9f38SJeff Kirsher #define CFG_RESET_SAVE 0xfde000
1382d9fb9f38SJeff Kirsher /* WCSR bits [0:4] [9:10] */
1383d9fb9f38SJeff Kirsher #define WCSR_RESET_SAVE 0x61f
1384d9fb9f38SJeff Kirsher /* RFCR bits [20] [22] [27:31] */
1385d9fb9f38SJeff Kirsher #define RFCR_RESET_SAVE 0xf8500000
1386d9fb9f38SJeff Kirsher 
1387d9fb9f38SJeff Kirsher static void natsemi_reset(struct net_device *dev)
1388d9fb9f38SJeff Kirsher {
1389d9fb9f38SJeff Kirsher 	int i;
1390d9fb9f38SJeff Kirsher 	u32 cfg;
1391d9fb9f38SJeff Kirsher 	u32 wcsr;
1392d9fb9f38SJeff Kirsher 	u32 rfcr;
1393d9fb9f38SJeff Kirsher 	u16 pmatch[3];
1394d9fb9f38SJeff Kirsher 	u16 sopass[3];
1395d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1396d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1397d9fb9f38SJeff Kirsher 
1398d9fb9f38SJeff Kirsher 	/*
1399d9fb9f38SJeff Kirsher 	 * Resetting the chip causes some registers to be lost.
1400d9fb9f38SJeff Kirsher 	 * Natsemi suggests NOT reloading the EEPROM while live, so instead
1401d9fb9f38SJeff Kirsher 	 * we save the state that would have been loaded from EEPROM
1402d9fb9f38SJeff Kirsher 	 * on a normal power-up (see the spec EEPROM map).  This assumes
1403d9fb9f38SJeff Kirsher 	 * whoever calls this will follow up with init_registers() eventually.
1404d9fb9f38SJeff Kirsher 	 */
1405d9fb9f38SJeff Kirsher 
1406d9fb9f38SJeff Kirsher 	/* CFG */
1407d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1408d9fb9f38SJeff Kirsher 	/* WCSR */
1409d9fb9f38SJeff Kirsher 	wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1410d9fb9f38SJeff Kirsher 	/* RFCR */
1411d9fb9f38SJeff Kirsher 	rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1412d9fb9f38SJeff Kirsher 	/* PMATCH */
1413d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1414d9fb9f38SJeff Kirsher 		writel(i*2, ioaddr + RxFilterAddr);
1415d9fb9f38SJeff Kirsher 		pmatch[i] = readw(ioaddr + RxFilterData);
1416d9fb9f38SJeff Kirsher 	}
1417d9fb9f38SJeff Kirsher 	/* SOPAS */
1418d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1419d9fb9f38SJeff Kirsher 		writel(0xa+(i*2), ioaddr + RxFilterAddr);
1420d9fb9f38SJeff Kirsher 		sopass[i] = readw(ioaddr + RxFilterData);
1421d9fb9f38SJeff Kirsher 	}
1422d9fb9f38SJeff Kirsher 
1423d9fb9f38SJeff Kirsher 	/* now whack the chip */
1424d9fb9f38SJeff Kirsher 	writel(ChipReset, ioaddr + ChipCmd);
1425d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1426d9fb9f38SJeff Kirsher 		if (!(readl(ioaddr + ChipCmd) & ChipReset))
1427d9fb9f38SJeff Kirsher 			break;
1428d9fb9f38SJeff Kirsher 		udelay(5);
1429d9fb9f38SJeff Kirsher 	}
1430d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1431d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1432d9fb9f38SJeff Kirsher 			dev->name, i*5);
1433d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1434d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1435d9fb9f38SJeff Kirsher 			dev->name, i*5);
1436d9fb9f38SJeff Kirsher 	}
1437d9fb9f38SJeff Kirsher 
1438d9fb9f38SJeff Kirsher 	/* restore CFG */
1439d9fb9f38SJeff Kirsher 	cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1440d9fb9f38SJeff Kirsher 	/* turn on external phy if it was selected */
1441d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP)
1442d9fb9f38SJeff Kirsher 		cfg &= ~(CfgExtPhy | CfgPhyDis);
1443d9fb9f38SJeff Kirsher 	else
1444d9fb9f38SJeff Kirsher 		cfg |= (CfgExtPhy | CfgPhyDis);
1445d9fb9f38SJeff Kirsher 	writel(cfg, ioaddr + ChipConfig);
1446d9fb9f38SJeff Kirsher 	/* restore WCSR */
1447d9fb9f38SJeff Kirsher 	wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1448d9fb9f38SJeff Kirsher 	writel(wcsr, ioaddr + WOLCmd);
1449d9fb9f38SJeff Kirsher 	/* read RFCR */
1450d9fb9f38SJeff Kirsher 	rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1451d9fb9f38SJeff Kirsher 	/* restore PMATCH */
1452d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1453d9fb9f38SJeff Kirsher 		writel(i*2, ioaddr + RxFilterAddr);
1454d9fb9f38SJeff Kirsher 		writew(pmatch[i], ioaddr + RxFilterData);
1455d9fb9f38SJeff Kirsher 	}
1456d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1457d9fb9f38SJeff Kirsher 		writel(0xa+(i*2), ioaddr + RxFilterAddr);
1458d9fb9f38SJeff Kirsher 		writew(sopass[i], ioaddr + RxFilterData);
1459d9fb9f38SJeff Kirsher 	}
1460d9fb9f38SJeff Kirsher 	/* restore RFCR */
1461d9fb9f38SJeff Kirsher 	writel(rfcr, ioaddr + RxFilterAddr);
1462d9fb9f38SJeff Kirsher }
1463d9fb9f38SJeff Kirsher 
1464d9fb9f38SJeff Kirsher static void reset_rx(struct net_device *dev)
1465d9fb9f38SJeff Kirsher {
1466d9fb9f38SJeff Kirsher 	int i;
1467d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1468d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1469d9fb9f38SJeff Kirsher 
1470d9fb9f38SJeff Kirsher 	np->intr_status &= ~RxResetDone;
1471d9fb9f38SJeff Kirsher 
1472d9fb9f38SJeff Kirsher 	writel(RxReset, ioaddr + ChipCmd);
1473d9fb9f38SJeff Kirsher 
1474d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1475d9fb9f38SJeff Kirsher 		np->intr_status |= readl(ioaddr + IntrStatus);
1476d9fb9f38SJeff Kirsher 		if (np->intr_status & RxResetDone)
1477d9fb9f38SJeff Kirsher 			break;
1478d9fb9f38SJeff Kirsher 		udelay(15);
1479d9fb9f38SJeff Kirsher 	}
1480d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1481d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1482d9fb9f38SJeff Kirsher 		       dev->name, i*15);
1483d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1484d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1485d9fb9f38SJeff Kirsher 		       dev->name, i*15);
1486d9fb9f38SJeff Kirsher 	}
1487d9fb9f38SJeff Kirsher }
1488d9fb9f38SJeff Kirsher 
1489d9fb9f38SJeff Kirsher static void natsemi_reload_eeprom(struct net_device *dev)
1490d9fb9f38SJeff Kirsher {
1491d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1492d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1493d9fb9f38SJeff Kirsher 	int i;
1494d9fb9f38SJeff Kirsher 
1495d9fb9f38SJeff Kirsher 	writel(EepromReload, ioaddr + PCIBusCfg);
1496d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1497d9fb9f38SJeff Kirsher 		udelay(50);
1498d9fb9f38SJeff Kirsher 		if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1499d9fb9f38SJeff Kirsher 			break;
1500d9fb9f38SJeff Kirsher 	}
1501d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1502d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1503d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev), i*50);
1504d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1505d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1506d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev), i*50);
1507d9fb9f38SJeff Kirsher 	}
1508d9fb9f38SJeff Kirsher }
1509d9fb9f38SJeff Kirsher 
1510d9fb9f38SJeff Kirsher static void natsemi_stop_rxtx(struct net_device *dev)
1511d9fb9f38SJeff Kirsher {
1512d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1513d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1514d9fb9f38SJeff Kirsher 	int i;
1515d9fb9f38SJeff Kirsher 
1516d9fb9f38SJeff Kirsher 	writel(RxOff | TxOff, ioaddr + ChipCmd);
1517d9fb9f38SJeff Kirsher 	for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1518d9fb9f38SJeff Kirsher 		if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1519d9fb9f38SJeff Kirsher 			break;
1520d9fb9f38SJeff Kirsher 		udelay(5);
1521d9fb9f38SJeff Kirsher 	}
1522d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1523d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1524d9fb9f38SJeff Kirsher 			dev->name, i*5);
1525d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1526d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1527d9fb9f38SJeff Kirsher 			dev->name, i*5);
1528d9fb9f38SJeff Kirsher 	}
1529d9fb9f38SJeff Kirsher }
1530d9fb9f38SJeff Kirsher 
1531d9fb9f38SJeff Kirsher static int netdev_open(struct net_device *dev)
1532d9fb9f38SJeff Kirsher {
1533d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1534d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1535d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
1536d9fb9f38SJeff Kirsher 	int i;
1537d9fb9f38SJeff Kirsher 
1538d9fb9f38SJeff Kirsher 	/* Reset the chip, just in case. */
1539d9fb9f38SJeff Kirsher 	natsemi_reset(dev);
1540d9fb9f38SJeff Kirsher 
1541d710ce13SFrancois Romieu 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1542d9fb9f38SJeff Kirsher 	if (i) return i;
1543d9fb9f38SJeff Kirsher 
1544d9fb9f38SJeff Kirsher 	if (netif_msg_ifup(np))
1545d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1546d710ce13SFrancois Romieu 			dev->name, irq);
1547d9fb9f38SJeff Kirsher 	i = alloc_ring(dev);
1548d9fb9f38SJeff Kirsher 	if (i < 0) {
1549d710ce13SFrancois Romieu 		free_irq(irq, dev);
1550d9fb9f38SJeff Kirsher 		return i;
1551d9fb9f38SJeff Kirsher 	}
1552d9fb9f38SJeff Kirsher 	napi_enable(&np->napi);
1553d9fb9f38SJeff Kirsher 
1554d9fb9f38SJeff Kirsher 	init_ring(dev);
1555d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
1556d9fb9f38SJeff Kirsher 	init_registers(dev);
1557d9fb9f38SJeff Kirsher 	/* now set the MAC address according to dev->dev_addr */
1558d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1559d9fb9f38SJeff Kirsher 		u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1560d9fb9f38SJeff Kirsher 
1561d9fb9f38SJeff Kirsher 		writel(i*2, ioaddr + RxFilterAddr);
1562d9fb9f38SJeff Kirsher 		writew(mac, ioaddr + RxFilterData);
1563d9fb9f38SJeff Kirsher 	}
1564d9fb9f38SJeff Kirsher 	writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1565d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
1566d9fb9f38SJeff Kirsher 
1567d9fb9f38SJeff Kirsher 	netif_start_queue(dev);
1568d9fb9f38SJeff Kirsher 
1569d9fb9f38SJeff Kirsher 	if (netif_msg_ifup(np))
1570d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1571d9fb9f38SJeff Kirsher 			dev->name, (int)readl(ioaddr + ChipCmd));
1572d9fb9f38SJeff Kirsher 
1573d9fb9f38SJeff Kirsher 	/* Set the timer to check for link beat. */
157415735c9dSKees Cook 	timer_setup(&np->timer, netdev_timer, 0);
1575d9fb9f38SJeff Kirsher 	np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1576d9fb9f38SJeff Kirsher 	add_timer(&np->timer);
1577d9fb9f38SJeff Kirsher 
1578d9fb9f38SJeff Kirsher 	return 0;
1579d9fb9f38SJeff Kirsher }
1580d9fb9f38SJeff Kirsher 
1581d9fb9f38SJeff Kirsher static void do_cable_magic(struct net_device *dev)
1582d9fb9f38SJeff Kirsher {
1583d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1584d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1585d9fb9f38SJeff Kirsher 
1586d9fb9f38SJeff Kirsher 	if (dev->if_port != PORT_TP)
1587d9fb9f38SJeff Kirsher 		return;
1588d9fb9f38SJeff Kirsher 
1589d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83816_A5)
1590d9fb9f38SJeff Kirsher 		return;
1591d9fb9f38SJeff Kirsher 
1592d9fb9f38SJeff Kirsher 	/*
1593d9fb9f38SJeff Kirsher 	 * 100 MBit links with short cables can trip an issue with the chip.
1594d9fb9f38SJeff Kirsher 	 * The problem manifests as lots of CRC errors and/or flickering
1595d9fb9f38SJeff Kirsher 	 * activity LED while idle.  This process is based on instructions
1596d9fb9f38SJeff Kirsher 	 * from engineers at National.
1597d9fb9f38SJeff Kirsher 	 */
1598d9fb9f38SJeff Kirsher 	if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1599d9fb9f38SJeff Kirsher 		u16 data;
1600d9fb9f38SJeff Kirsher 
1601d9fb9f38SJeff Kirsher 		writew(1, ioaddr + PGSEL);
1602d9fb9f38SJeff Kirsher 		/*
1603d9fb9f38SJeff Kirsher 		 * coefficient visibility should already be enabled via
1604d9fb9f38SJeff Kirsher 		 * DSPCFG | 0x1000
1605d9fb9f38SJeff Kirsher 		 */
1606d9fb9f38SJeff Kirsher 		data = readw(ioaddr + TSTDAT) & 0xff;
1607d9fb9f38SJeff Kirsher 		/*
1608d9fb9f38SJeff Kirsher 		 * the value must be negative, and within certain values
1609d9fb9f38SJeff Kirsher 		 * (these values all come from National)
1610d9fb9f38SJeff Kirsher 		 */
1611d9fb9f38SJeff Kirsher 		if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1612d9fb9f38SJeff Kirsher 			np = netdev_priv(dev);
1613d9fb9f38SJeff Kirsher 
1614d9fb9f38SJeff Kirsher 			/* the bug has been triggered - fix the coefficient */
1615d9fb9f38SJeff Kirsher 			writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1616d9fb9f38SJeff Kirsher 			/* lock the value */
1617d9fb9f38SJeff Kirsher 			data = readw(ioaddr + DSPCFG);
1618d9fb9f38SJeff Kirsher 			np->dspcfg = data | DSPCFG_LOCK;
1619d9fb9f38SJeff Kirsher 			writew(np->dspcfg, ioaddr + DSPCFG);
1620d9fb9f38SJeff Kirsher 		}
1621d9fb9f38SJeff Kirsher 		writew(0, ioaddr + PGSEL);
1622d9fb9f38SJeff Kirsher 	}
1623d9fb9f38SJeff Kirsher }
1624d9fb9f38SJeff Kirsher 
1625d9fb9f38SJeff Kirsher static void undo_cable_magic(struct net_device *dev)
1626d9fb9f38SJeff Kirsher {
1627d9fb9f38SJeff Kirsher 	u16 data;
1628d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1629d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1630d9fb9f38SJeff Kirsher 
1631d9fb9f38SJeff Kirsher 	if (dev->if_port != PORT_TP)
1632d9fb9f38SJeff Kirsher 		return;
1633d9fb9f38SJeff Kirsher 
1634d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83816_A5)
1635d9fb9f38SJeff Kirsher 		return;
1636d9fb9f38SJeff Kirsher 
1637d9fb9f38SJeff Kirsher 	writew(1, ioaddr + PGSEL);
1638d9fb9f38SJeff Kirsher 	/* make sure the lock bit is clear */
1639d9fb9f38SJeff Kirsher 	data = readw(ioaddr + DSPCFG);
1640d9fb9f38SJeff Kirsher 	np->dspcfg = data & ~DSPCFG_LOCK;
1641d9fb9f38SJeff Kirsher 	writew(np->dspcfg, ioaddr + DSPCFG);
1642d9fb9f38SJeff Kirsher 	writew(0, ioaddr + PGSEL);
1643d9fb9f38SJeff Kirsher }
1644d9fb9f38SJeff Kirsher 
1645d9fb9f38SJeff Kirsher static void check_link(struct net_device *dev)
1646d9fb9f38SJeff Kirsher {
1647d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1648d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1649d9fb9f38SJeff Kirsher 	int duplex = np->duplex;
1650d9fb9f38SJeff Kirsher 	u16 bmsr;
1651d9fb9f38SJeff Kirsher 
1652d9fb9f38SJeff Kirsher 	/* If we are ignoring the PHY then don't try reading it. */
1653d9fb9f38SJeff Kirsher 	if (np->ignore_phy)
1654d9fb9f38SJeff Kirsher 		goto propagate_state;
1655d9fb9f38SJeff Kirsher 
1656d9fb9f38SJeff Kirsher 	/* The link status field is latched: it remains low after a temporary
1657d9fb9f38SJeff Kirsher 	 * link failure until it's read. We need the current link status,
1658d9fb9f38SJeff Kirsher 	 * thus read twice.
1659d9fb9f38SJeff Kirsher 	 */
1660d9fb9f38SJeff Kirsher 	mdio_read(dev, MII_BMSR);
1661d9fb9f38SJeff Kirsher 	bmsr = mdio_read(dev, MII_BMSR);
1662d9fb9f38SJeff Kirsher 
1663d9fb9f38SJeff Kirsher 	if (!(bmsr & BMSR_LSTATUS)) {
1664d9fb9f38SJeff Kirsher 		if (netif_carrier_ok(dev)) {
1665d9fb9f38SJeff Kirsher 			if (netif_msg_link(np))
1666d9fb9f38SJeff Kirsher 				printk(KERN_NOTICE "%s: link down.\n",
1667d9fb9f38SJeff Kirsher 				       dev->name);
1668d9fb9f38SJeff Kirsher 			netif_carrier_off(dev);
1669d9fb9f38SJeff Kirsher 			undo_cable_magic(dev);
1670d9fb9f38SJeff Kirsher 		}
1671d9fb9f38SJeff Kirsher 		return;
1672d9fb9f38SJeff Kirsher 	}
1673d9fb9f38SJeff Kirsher 	if (!netif_carrier_ok(dev)) {
1674d9fb9f38SJeff Kirsher 		if (netif_msg_link(np))
1675d9fb9f38SJeff Kirsher 			printk(KERN_NOTICE "%s: link up.\n", dev->name);
1676d9fb9f38SJeff Kirsher 		netif_carrier_on(dev);
1677d9fb9f38SJeff Kirsher 		do_cable_magic(dev);
1678d9fb9f38SJeff Kirsher 	}
1679d9fb9f38SJeff Kirsher 
1680d9fb9f38SJeff Kirsher 	duplex = np->full_duplex;
1681d9fb9f38SJeff Kirsher 	if (!duplex) {
1682d9fb9f38SJeff Kirsher 		if (bmsr & BMSR_ANEGCOMPLETE) {
1683d9fb9f38SJeff Kirsher 			int tmp = mii_nway_result(
1684d9fb9f38SJeff Kirsher 				np->advertising & mdio_read(dev, MII_LPA));
1685d9fb9f38SJeff Kirsher 			if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1686d9fb9f38SJeff Kirsher 				duplex = 1;
1687d9fb9f38SJeff Kirsher 		} else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1688d9fb9f38SJeff Kirsher 			duplex = 1;
1689d9fb9f38SJeff Kirsher 	}
1690d9fb9f38SJeff Kirsher 
1691d9fb9f38SJeff Kirsher propagate_state:
1692d9fb9f38SJeff Kirsher 	/* if duplex is set then bit 28 must be set, too */
1693d9fb9f38SJeff Kirsher 	if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1694d9fb9f38SJeff Kirsher 		if (netif_msg_link(np))
1695d9fb9f38SJeff Kirsher 			printk(KERN_INFO
1696d9fb9f38SJeff Kirsher 				"%s: Setting %s-duplex based on negotiated "
1697d9fb9f38SJeff Kirsher 				"link capability.\n", dev->name,
1698d9fb9f38SJeff Kirsher 				duplex ? "full" : "half");
1699d9fb9f38SJeff Kirsher 		if (duplex) {
1700d9fb9f38SJeff Kirsher 			np->rx_config |= RxAcceptTx;
1701d9fb9f38SJeff Kirsher 			np->tx_config |= TxCarrierIgn | TxHeartIgn;
1702d9fb9f38SJeff Kirsher 		} else {
1703d9fb9f38SJeff Kirsher 			np->rx_config &= ~RxAcceptTx;
1704d9fb9f38SJeff Kirsher 			np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1705d9fb9f38SJeff Kirsher 		}
1706d9fb9f38SJeff Kirsher 		writel(np->tx_config, ioaddr + TxConfig);
1707d9fb9f38SJeff Kirsher 		writel(np->rx_config, ioaddr + RxConfig);
1708d9fb9f38SJeff Kirsher 	}
1709d9fb9f38SJeff Kirsher }
1710d9fb9f38SJeff Kirsher 
1711d9fb9f38SJeff Kirsher static void init_registers(struct net_device *dev)
1712d9fb9f38SJeff Kirsher {
1713d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1714d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1715d9fb9f38SJeff Kirsher 
1716d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
1717d9fb9f38SJeff Kirsher 
1718d9fb9f38SJeff Kirsher 	/* clear any interrupts that are pending, such as wake events */
1719d9fb9f38SJeff Kirsher 	readl(ioaddr + IntrStatus);
1720d9fb9f38SJeff Kirsher 
1721d9fb9f38SJeff Kirsher 	writel(np->ring_dma, ioaddr + RxRingPtr);
1722d9fb9f38SJeff Kirsher 	writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1723d9fb9f38SJeff Kirsher 		ioaddr + TxRingPtr);
1724d9fb9f38SJeff Kirsher 
1725d9fb9f38SJeff Kirsher 	/* Initialize other registers.
1726d9fb9f38SJeff Kirsher 	 * Configure the PCI bus bursts and FIFO thresholds.
1727d9fb9f38SJeff Kirsher 	 * Configure for standard, in-spec Ethernet.
1728d9fb9f38SJeff Kirsher 	 * Start with half-duplex. check_link will update
1729d9fb9f38SJeff Kirsher 	 * to the correct settings.
1730d9fb9f38SJeff Kirsher 	 */
1731d9fb9f38SJeff Kirsher 
1732d9fb9f38SJeff Kirsher 	/* DRTH: 2: start tx if 64 bytes are in the fifo
1733d9fb9f38SJeff Kirsher 	 * FLTH: 0x10: refill with next packet if 512 bytes are free
1734d9fb9f38SJeff Kirsher 	 * MXDMA: 0: up to 256 byte bursts.
1735d9fb9f38SJeff Kirsher 	 * 	MXDMA must be <= FLTH
1736d9fb9f38SJeff Kirsher 	 * ECRETRY=1
1737d9fb9f38SJeff Kirsher 	 * ATP=1
1738d9fb9f38SJeff Kirsher 	 */
1739d9fb9f38SJeff Kirsher 	np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1740d9fb9f38SJeff Kirsher 				TX_FLTH_VAL | TX_DRTH_VAL_START;
1741d9fb9f38SJeff Kirsher 	writel(np->tx_config, ioaddr + TxConfig);
1742d9fb9f38SJeff Kirsher 
1743d9fb9f38SJeff Kirsher 	/* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
1744d9fb9f38SJeff Kirsher 	 * MXDMA 0: up to 256 byte bursts
1745d9fb9f38SJeff Kirsher 	 */
1746d9fb9f38SJeff Kirsher 	np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1747d9fb9f38SJeff Kirsher 	/* if receive ring now has bigger buffers than normal, enable jumbo */
1748d9fb9f38SJeff Kirsher 	if (np->rx_buf_sz > NATSEMI_LONGPKT)
1749d9fb9f38SJeff Kirsher 		np->rx_config |= RxAcceptLong;
1750d9fb9f38SJeff Kirsher 
1751d9fb9f38SJeff Kirsher 	writel(np->rx_config, ioaddr + RxConfig);
1752d9fb9f38SJeff Kirsher 
1753d9fb9f38SJeff Kirsher 	/* Disable PME:
1754d9fb9f38SJeff Kirsher 	 * The PME bit is initialized from the EEPROM contents.
1755d9fb9f38SJeff Kirsher 	 * PCI cards probably have PME disabled, but motherboard
1756d9fb9f38SJeff Kirsher 	 * implementations may have PME set to enable WakeOnLan.
1757d9fb9f38SJeff Kirsher 	 * With PME set the chip will scan incoming packets but
1758d9fb9f38SJeff Kirsher 	 * nothing will be written to memory. */
1759d9fb9f38SJeff Kirsher 	np->SavedClkRun = readl(ioaddr + ClkRun);
1760d9fb9f38SJeff Kirsher 	writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1761d9fb9f38SJeff Kirsher 	if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1762d9fb9f38SJeff Kirsher 		printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1763d9fb9f38SJeff Kirsher 			dev->name, readl(ioaddr + WOLCmd));
1764d9fb9f38SJeff Kirsher 	}
1765d9fb9f38SJeff Kirsher 
1766d9fb9f38SJeff Kirsher 	check_link(dev);
1767d9fb9f38SJeff Kirsher 	__set_rx_mode(dev);
1768d9fb9f38SJeff Kirsher 
1769d9fb9f38SJeff Kirsher 	/* Enable interrupts by setting the interrupt mask. */
1770d9fb9f38SJeff Kirsher 	writel(DEFAULT_INTR, ioaddr + IntrMask);
1771d9fb9f38SJeff Kirsher 	natsemi_irq_enable(dev);
1772d9fb9f38SJeff Kirsher 
1773d9fb9f38SJeff Kirsher 	writel(RxOn | TxOn, ioaddr + ChipCmd);
1774d9fb9f38SJeff Kirsher 	writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
1775d9fb9f38SJeff Kirsher }
1776d9fb9f38SJeff Kirsher 
1777d9fb9f38SJeff Kirsher /*
1778d9fb9f38SJeff Kirsher  * netdev_timer:
1779d9fb9f38SJeff Kirsher  * Purpose:
1780d9fb9f38SJeff Kirsher  * 1) check for link changes. Usually they are handled by the MII interrupt
1781d9fb9f38SJeff Kirsher  *    but it doesn't hurt to check twice.
1782d9fb9f38SJeff Kirsher  * 2) check for sudden death of the NIC:
1783d9fb9f38SJeff Kirsher  *    It seems that a reference set for this chip went out with incorrect info,
1784d9fb9f38SJeff Kirsher  *    and there exist boards that aren't quite right.  An unexpected voltage
1785d9fb9f38SJeff Kirsher  *    drop can cause the PHY to get itself in a weird state (basically reset).
1786d9fb9f38SJeff Kirsher  *    NOTE: this only seems to affect revC chips.  The user can disable
1787d9fb9f38SJeff Kirsher  *    this check via dspcfg_workaround sysfs option.
1788d9fb9f38SJeff Kirsher  * 3) check of death of the RX path due to OOM
1789d9fb9f38SJeff Kirsher  */
179015735c9dSKees Cook static void netdev_timer(struct timer_list *t)
1791d9fb9f38SJeff Kirsher {
179215735c9dSKees Cook 	struct netdev_private *np = from_timer(np, t, timer);
179315735c9dSKees Cook 	struct net_device *dev = np->dev;
1794d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1795d9fb9f38SJeff Kirsher 	int next_tick = NATSEMI_TIMER_FREQ;
1796d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
1797d9fb9f38SJeff Kirsher 
1798d9fb9f38SJeff Kirsher 	if (netif_msg_timer(np)) {
1799d9fb9f38SJeff Kirsher 		/* DO NOT read the IntrStatus register,
1800d9fb9f38SJeff Kirsher 		 * a read clears any pending interrupts.
1801d9fb9f38SJeff Kirsher 		 */
1802d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1803d9fb9f38SJeff Kirsher 			dev->name);
1804d9fb9f38SJeff Kirsher 	}
1805d9fb9f38SJeff Kirsher 
1806d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP) {
1807d9fb9f38SJeff Kirsher 		u16 dspcfg;
1808d9fb9f38SJeff Kirsher 
1809d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
1810d9fb9f38SJeff Kirsher 		/* check for a nasty random phy-reset - use dspcfg as a flag */
1811d9fb9f38SJeff Kirsher 		writew(1, ioaddr+PGSEL);
1812d9fb9f38SJeff Kirsher 		dspcfg = readw(ioaddr+DSPCFG);
1813d9fb9f38SJeff Kirsher 		writew(0, ioaddr+PGSEL);
1814d9fb9f38SJeff Kirsher 		if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
1815d9fb9f38SJeff Kirsher 			if (!netif_queue_stopped(dev)) {
1816d9fb9f38SJeff Kirsher 				spin_unlock_irq(&np->lock);
1817d9fb9f38SJeff Kirsher 				if (netif_msg_drv(np))
1818d9fb9f38SJeff Kirsher 					printk(KERN_NOTICE "%s: possible phy reset: "
1819d9fb9f38SJeff Kirsher 						"re-initializing\n", dev->name);
1820d710ce13SFrancois Romieu 				disable_irq(irq);
1821d9fb9f38SJeff Kirsher 				spin_lock_irq(&np->lock);
1822d9fb9f38SJeff Kirsher 				natsemi_stop_rxtx(dev);
1823d9fb9f38SJeff Kirsher 				dump_ring(dev);
1824d9fb9f38SJeff Kirsher 				reinit_ring(dev);
1825d9fb9f38SJeff Kirsher 				init_registers(dev);
1826d9fb9f38SJeff Kirsher 				spin_unlock_irq(&np->lock);
1827d710ce13SFrancois Romieu 				enable_irq(irq);
1828d9fb9f38SJeff Kirsher 			} else {
1829d9fb9f38SJeff Kirsher 				/* hurry back */
1830d9fb9f38SJeff Kirsher 				next_tick = HZ;
1831d9fb9f38SJeff Kirsher 				spin_unlock_irq(&np->lock);
1832d9fb9f38SJeff Kirsher 			}
1833d9fb9f38SJeff Kirsher 		} else {
1834d9fb9f38SJeff Kirsher 			/* init_registers() calls check_link() for the above case */
1835d9fb9f38SJeff Kirsher 			check_link(dev);
1836d9fb9f38SJeff Kirsher 			spin_unlock_irq(&np->lock);
1837d9fb9f38SJeff Kirsher 		}
1838d9fb9f38SJeff Kirsher 	} else {
1839d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
1840d9fb9f38SJeff Kirsher 		check_link(dev);
1841d9fb9f38SJeff Kirsher 		spin_unlock_irq(&np->lock);
1842d9fb9f38SJeff Kirsher 	}
1843d9fb9f38SJeff Kirsher 	if (np->oom) {
1844d710ce13SFrancois Romieu 		disable_irq(irq);
1845d9fb9f38SJeff Kirsher 		np->oom = 0;
1846d9fb9f38SJeff Kirsher 		refill_rx(dev);
1847d710ce13SFrancois Romieu 		enable_irq(irq);
1848d9fb9f38SJeff Kirsher 		if (!np->oom) {
1849d9fb9f38SJeff Kirsher 			writel(RxOn, ioaddr + ChipCmd);
1850d9fb9f38SJeff Kirsher 		} else {
1851d9fb9f38SJeff Kirsher 			next_tick = 1;
1852d9fb9f38SJeff Kirsher 		}
1853d9fb9f38SJeff Kirsher 	}
1854d9fb9f38SJeff Kirsher 
1855d9fb9f38SJeff Kirsher 	if (next_tick > 1)
1856d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1857d9fb9f38SJeff Kirsher 	else
1858d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, jiffies + next_tick);
1859d9fb9f38SJeff Kirsher }
1860d9fb9f38SJeff Kirsher 
1861d9fb9f38SJeff Kirsher static void dump_ring(struct net_device *dev)
1862d9fb9f38SJeff Kirsher {
1863d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1864d9fb9f38SJeff Kirsher 
1865d9fb9f38SJeff Kirsher 	if (netif_msg_pktdata(np)) {
1866d9fb9f38SJeff Kirsher 		int i;
1867d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "  Tx ring at %p:\n", np->tx_ring);
1868d9fb9f38SJeff Kirsher 		for (i = 0; i < TX_RING_SIZE; i++) {
1869d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1870d9fb9f38SJeff Kirsher 				i, np->tx_ring[i].next_desc,
1871d9fb9f38SJeff Kirsher 				np->tx_ring[i].cmd_status,
1872d9fb9f38SJeff Kirsher 				np->tx_ring[i].addr);
1873d9fb9f38SJeff Kirsher 		}
1874d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
1875d9fb9f38SJeff Kirsher 		for (i = 0; i < RX_RING_SIZE; i++) {
1876d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1877d9fb9f38SJeff Kirsher 				i, np->rx_ring[i].next_desc,
1878d9fb9f38SJeff Kirsher 				np->rx_ring[i].cmd_status,
1879d9fb9f38SJeff Kirsher 				np->rx_ring[i].addr);
1880d9fb9f38SJeff Kirsher 		}
1881d9fb9f38SJeff Kirsher 	}
1882d9fb9f38SJeff Kirsher }
1883d9fb9f38SJeff Kirsher 
18840290bd29SMichael S. Tsirkin static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
1885d9fb9f38SJeff Kirsher {
1886d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1887d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1888d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
1889d9fb9f38SJeff Kirsher 
1890d710ce13SFrancois Romieu 	disable_irq(irq);
1891d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
1892d9fb9f38SJeff Kirsher 	if (!np->hands_off) {
1893d9fb9f38SJeff Kirsher 		if (netif_msg_tx_err(np))
1894d9fb9f38SJeff Kirsher 			printk(KERN_WARNING
1895d9fb9f38SJeff Kirsher 				"%s: Transmit timed out, status %#08x,"
1896d9fb9f38SJeff Kirsher 				" resetting...\n",
1897d9fb9f38SJeff Kirsher 				dev->name, readl(ioaddr + IntrStatus));
1898d9fb9f38SJeff Kirsher 		dump_ring(dev);
1899d9fb9f38SJeff Kirsher 
1900d9fb9f38SJeff Kirsher 		natsemi_reset(dev);
1901d9fb9f38SJeff Kirsher 		reinit_ring(dev);
1902d9fb9f38SJeff Kirsher 		init_registers(dev);
1903d9fb9f38SJeff Kirsher 	} else {
1904d9fb9f38SJeff Kirsher 		printk(KERN_WARNING
1905d9fb9f38SJeff Kirsher 			"%s: tx_timeout while in hands_off state?\n",
1906d9fb9f38SJeff Kirsher 			dev->name);
1907d9fb9f38SJeff Kirsher 	}
1908d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
1909d710ce13SFrancois Romieu 	enable_irq(irq);
1910d9fb9f38SJeff Kirsher 
1911860e9538SFlorian Westphal 	netif_trans_update(dev); /* prevent tx timeout */
1912d9fb9f38SJeff Kirsher 	dev->stats.tx_errors++;
1913d9fb9f38SJeff Kirsher 	netif_wake_queue(dev);
1914d9fb9f38SJeff Kirsher }
1915d9fb9f38SJeff Kirsher 
1916d9fb9f38SJeff Kirsher static int alloc_ring(struct net_device *dev)
1917d9fb9f38SJeff Kirsher {
1918d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1919d9fb9f38SJeff Kirsher 	np->rx_ring = pci_alloc_consistent(np->pci_dev,
1920d9fb9f38SJeff Kirsher 		sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1921d9fb9f38SJeff Kirsher 		&np->ring_dma);
1922d9fb9f38SJeff Kirsher 	if (!np->rx_ring)
1923d9fb9f38SJeff Kirsher 		return -ENOMEM;
1924d9fb9f38SJeff Kirsher 	np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1925d9fb9f38SJeff Kirsher 	return 0;
1926d9fb9f38SJeff Kirsher }
1927d9fb9f38SJeff Kirsher 
1928d9fb9f38SJeff Kirsher static void refill_rx(struct net_device *dev)
1929d9fb9f38SJeff Kirsher {
1930d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1931d9fb9f38SJeff Kirsher 
1932d9fb9f38SJeff Kirsher 	/* Refill the Rx ring buffers. */
1933d9fb9f38SJeff Kirsher 	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1934d9fb9f38SJeff Kirsher 		struct sk_buff *skb;
1935d9fb9f38SJeff Kirsher 		int entry = np->dirty_rx % RX_RING_SIZE;
1936d9fb9f38SJeff Kirsher 		if (np->rx_skbuff[entry] == NULL) {
1937d9fb9f38SJeff Kirsher 			unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1938c056b734SPradeep A Dalvi 			skb = netdev_alloc_skb(dev, buflen);
1939d9fb9f38SJeff Kirsher 			np->rx_skbuff[entry] = skb;
1940d9fb9f38SJeff Kirsher 			if (skb == NULL)
1941d9fb9f38SJeff Kirsher 				break; /* Better luck next round. */
1942d9fb9f38SJeff Kirsher 			np->rx_dma[entry] = pci_map_single(np->pci_dev,
1943d9fb9f38SJeff Kirsher 				skb->data, buflen, PCI_DMA_FROMDEVICE);
194445af5500SAlexey Khoroshilov 			if (pci_dma_mapping_error(np->pci_dev,
194545af5500SAlexey Khoroshilov 						  np->rx_dma[entry])) {
194645af5500SAlexey Khoroshilov 				dev_kfree_skb_any(skb);
194745af5500SAlexey Khoroshilov 				np->rx_skbuff[entry] = NULL;
194845af5500SAlexey Khoroshilov 				break; /* Better luck next round. */
194945af5500SAlexey Khoroshilov 			}
1950d9fb9f38SJeff Kirsher 			np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1951d9fb9f38SJeff Kirsher 		}
1952d9fb9f38SJeff Kirsher 		np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1953d9fb9f38SJeff Kirsher 	}
1954d9fb9f38SJeff Kirsher 	if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1955d9fb9f38SJeff Kirsher 		if (netif_msg_rx_err(np))
1956d9fb9f38SJeff Kirsher 			printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1957d9fb9f38SJeff Kirsher 		np->oom = 1;
1958d9fb9f38SJeff Kirsher 	}
1959d9fb9f38SJeff Kirsher }
1960d9fb9f38SJeff Kirsher 
1961d9fb9f38SJeff Kirsher static void set_bufsize(struct net_device *dev)
1962d9fb9f38SJeff Kirsher {
1963d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1964d9fb9f38SJeff Kirsher 	if (dev->mtu <= ETH_DATA_LEN)
1965d9fb9f38SJeff Kirsher 		np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1966d9fb9f38SJeff Kirsher 	else
1967d9fb9f38SJeff Kirsher 		np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1968d9fb9f38SJeff Kirsher }
1969d9fb9f38SJeff Kirsher 
1970d9fb9f38SJeff Kirsher /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1971d9fb9f38SJeff Kirsher static void init_ring(struct net_device *dev)
1972d9fb9f38SJeff Kirsher {
1973d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1974d9fb9f38SJeff Kirsher 	int i;
1975d9fb9f38SJeff Kirsher 
1976d9fb9f38SJeff Kirsher 	/* 1) TX ring */
1977d9fb9f38SJeff Kirsher 	np->dirty_tx = np->cur_tx = 0;
1978d9fb9f38SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1979d9fb9f38SJeff Kirsher 		np->tx_skbuff[i] = NULL;
1980d9fb9f38SJeff Kirsher 		np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1981d9fb9f38SJeff Kirsher 			+sizeof(struct netdev_desc)
1982d9fb9f38SJeff Kirsher 			*((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1983d9fb9f38SJeff Kirsher 		np->tx_ring[i].cmd_status = 0;
1984d9fb9f38SJeff Kirsher 	}
1985d9fb9f38SJeff Kirsher 
1986d9fb9f38SJeff Kirsher 	/* 2) RX ring */
1987d9fb9f38SJeff Kirsher 	np->dirty_rx = 0;
1988d9fb9f38SJeff Kirsher 	np->cur_rx = RX_RING_SIZE;
1989d9fb9f38SJeff Kirsher 	np->oom = 0;
1990d9fb9f38SJeff Kirsher 	set_bufsize(dev);
1991d9fb9f38SJeff Kirsher 
1992d9fb9f38SJeff Kirsher 	np->rx_head_desc = &np->rx_ring[0];
1993d9fb9f38SJeff Kirsher 
1994d9fb9f38SJeff Kirsher 	/* Please be careful before changing this loop - at least gcc-2.95.1
1995d9fb9f38SJeff Kirsher 	 * miscompiles it otherwise.
1996d9fb9f38SJeff Kirsher 	 */
1997d9fb9f38SJeff Kirsher 	/* Initialize all Rx descriptors. */
1998d9fb9f38SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1999d9fb9f38SJeff Kirsher 		np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
2000d9fb9f38SJeff Kirsher 				+sizeof(struct netdev_desc)
2001d9fb9f38SJeff Kirsher 				*((i+1)%RX_RING_SIZE));
2002d9fb9f38SJeff Kirsher 		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2003d9fb9f38SJeff Kirsher 		np->rx_skbuff[i] = NULL;
2004d9fb9f38SJeff Kirsher 	}
2005d9fb9f38SJeff Kirsher 	refill_rx(dev);
2006d9fb9f38SJeff Kirsher 	dump_ring(dev);
2007d9fb9f38SJeff Kirsher }
2008d9fb9f38SJeff Kirsher 
2009d9fb9f38SJeff Kirsher static void drain_tx(struct net_device *dev)
2010d9fb9f38SJeff Kirsher {
2011d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2012d9fb9f38SJeff Kirsher 	int i;
2013d9fb9f38SJeff Kirsher 
2014d9fb9f38SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
2015d9fb9f38SJeff Kirsher 		if (np->tx_skbuff[i]) {
2016d9fb9f38SJeff Kirsher 			pci_unmap_single(np->pci_dev,
2017d9fb9f38SJeff Kirsher 				np->tx_dma[i], np->tx_skbuff[i]->len,
2018d9fb9f38SJeff Kirsher 				PCI_DMA_TODEVICE);
2019d9fb9f38SJeff Kirsher 			dev_kfree_skb(np->tx_skbuff[i]);
2020d9fb9f38SJeff Kirsher 			dev->stats.tx_dropped++;
2021d9fb9f38SJeff Kirsher 		}
2022d9fb9f38SJeff Kirsher 		np->tx_skbuff[i] = NULL;
2023d9fb9f38SJeff Kirsher 	}
2024d9fb9f38SJeff Kirsher }
2025d9fb9f38SJeff Kirsher 
2026d9fb9f38SJeff Kirsher static void drain_rx(struct net_device *dev)
2027d9fb9f38SJeff Kirsher {
2028d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2029d9fb9f38SJeff Kirsher 	unsigned int buflen = np->rx_buf_sz;
2030d9fb9f38SJeff Kirsher 	int i;
2031d9fb9f38SJeff Kirsher 
2032d9fb9f38SJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
2033d9fb9f38SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
2034d9fb9f38SJeff Kirsher 		np->rx_ring[i].cmd_status = 0;
2035d9fb9f38SJeff Kirsher 		np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
2036d9fb9f38SJeff Kirsher 		if (np->rx_skbuff[i]) {
2037d9fb9f38SJeff Kirsher 			pci_unmap_single(np->pci_dev, np->rx_dma[i],
2038d9fb9f38SJeff Kirsher 				buflen + NATSEMI_PADDING,
2039d9fb9f38SJeff Kirsher 				PCI_DMA_FROMDEVICE);
2040d9fb9f38SJeff Kirsher 			dev_kfree_skb(np->rx_skbuff[i]);
2041d9fb9f38SJeff Kirsher 		}
2042d9fb9f38SJeff Kirsher 		np->rx_skbuff[i] = NULL;
2043d9fb9f38SJeff Kirsher 	}
2044d9fb9f38SJeff Kirsher }
2045d9fb9f38SJeff Kirsher 
2046d9fb9f38SJeff Kirsher static void drain_ring(struct net_device *dev)
2047d9fb9f38SJeff Kirsher {
2048d9fb9f38SJeff Kirsher 	drain_rx(dev);
2049d9fb9f38SJeff Kirsher 	drain_tx(dev);
2050d9fb9f38SJeff Kirsher }
2051d9fb9f38SJeff Kirsher 
2052d9fb9f38SJeff Kirsher static void free_ring(struct net_device *dev)
2053d9fb9f38SJeff Kirsher {
2054d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2055d9fb9f38SJeff Kirsher 	pci_free_consistent(np->pci_dev,
2056d9fb9f38SJeff Kirsher 		sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
2057d9fb9f38SJeff Kirsher 		np->rx_ring, np->ring_dma);
2058d9fb9f38SJeff Kirsher }
2059d9fb9f38SJeff Kirsher 
2060d9fb9f38SJeff Kirsher static void reinit_rx(struct net_device *dev)
2061d9fb9f38SJeff Kirsher {
2062d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2063d9fb9f38SJeff Kirsher 	int i;
2064d9fb9f38SJeff Kirsher 
2065d9fb9f38SJeff Kirsher 	/* RX Ring */
2066d9fb9f38SJeff Kirsher 	np->dirty_rx = 0;
2067d9fb9f38SJeff Kirsher 	np->cur_rx = RX_RING_SIZE;
2068d9fb9f38SJeff Kirsher 	np->rx_head_desc = &np->rx_ring[0];
2069d9fb9f38SJeff Kirsher 	/* Initialize all Rx descriptors. */
2070d9fb9f38SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++)
2071d9fb9f38SJeff Kirsher 		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2072d9fb9f38SJeff Kirsher 
2073d9fb9f38SJeff Kirsher 	refill_rx(dev);
2074d9fb9f38SJeff Kirsher }
2075d9fb9f38SJeff Kirsher 
2076d9fb9f38SJeff Kirsher static void reinit_ring(struct net_device *dev)
2077d9fb9f38SJeff Kirsher {
2078d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2079d9fb9f38SJeff Kirsher 	int i;
2080d9fb9f38SJeff Kirsher 
2081d9fb9f38SJeff Kirsher 	/* drain TX ring */
2082d9fb9f38SJeff Kirsher 	drain_tx(dev);
2083d9fb9f38SJeff Kirsher 	np->dirty_tx = np->cur_tx = 0;
2084d9fb9f38SJeff Kirsher 	for (i=0;i<TX_RING_SIZE;i++)
2085d9fb9f38SJeff Kirsher 		np->tx_ring[i].cmd_status = 0;
2086d9fb9f38SJeff Kirsher 
2087d9fb9f38SJeff Kirsher 	reinit_rx(dev);
2088d9fb9f38SJeff Kirsher }
2089d9fb9f38SJeff Kirsher 
2090d9fb9f38SJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2091d9fb9f38SJeff Kirsher {
2092d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2093d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2094d9fb9f38SJeff Kirsher 	unsigned entry;
2095d9fb9f38SJeff Kirsher 	unsigned long flags;
2096d9fb9f38SJeff Kirsher 
2097d9fb9f38SJeff Kirsher 	/* Note: Ordering is important here, set the field with the
2098d9fb9f38SJeff Kirsher 	   "ownership" bit last, and only then increment cur_tx. */
2099d9fb9f38SJeff Kirsher 
2100d9fb9f38SJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
2101d9fb9f38SJeff Kirsher 	entry = np->cur_tx % TX_RING_SIZE;
2102d9fb9f38SJeff Kirsher 
2103d9fb9f38SJeff Kirsher 	np->tx_skbuff[entry] = skb;
2104d9fb9f38SJeff Kirsher 	np->tx_dma[entry] = pci_map_single(np->pci_dev,
2105d9fb9f38SJeff Kirsher 				skb->data,skb->len, PCI_DMA_TODEVICE);
210645af5500SAlexey Khoroshilov 	if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) {
210745af5500SAlexey Khoroshilov 		np->tx_skbuff[entry] = NULL;
210845af5500SAlexey Khoroshilov 		dev_kfree_skb_irq(skb);
210945af5500SAlexey Khoroshilov 		dev->stats.tx_dropped++;
211045af5500SAlexey Khoroshilov 		return NETDEV_TX_OK;
211145af5500SAlexey Khoroshilov 	}
2112d9fb9f38SJeff Kirsher 
2113d9fb9f38SJeff Kirsher 	np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2114d9fb9f38SJeff Kirsher 
2115d9fb9f38SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
2116d9fb9f38SJeff Kirsher 
2117d9fb9f38SJeff Kirsher 	if (!np->hands_off) {
2118d9fb9f38SJeff Kirsher 		np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2119d9fb9f38SJeff Kirsher 		/* StrongARM: Explicitly cache flush np->tx_ring and
2120d9fb9f38SJeff Kirsher 		 * skb->data,skb->len. */
2121d9fb9f38SJeff Kirsher 		wmb();
2122d9fb9f38SJeff Kirsher 		np->cur_tx++;
2123d9fb9f38SJeff Kirsher 		if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2124d9fb9f38SJeff Kirsher 			netdev_tx_done(dev);
2125d9fb9f38SJeff Kirsher 			if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2126d9fb9f38SJeff Kirsher 				netif_stop_queue(dev);
2127d9fb9f38SJeff Kirsher 		}
2128d9fb9f38SJeff Kirsher 		/* Wake the potentially-idle transmit channel. */
2129d9fb9f38SJeff Kirsher 		writel(TxOn, ioaddr + ChipCmd);
2130d9fb9f38SJeff Kirsher 	} else {
2131d9fb9f38SJeff Kirsher 		dev_kfree_skb_irq(skb);
2132d9fb9f38SJeff Kirsher 		dev->stats.tx_dropped++;
2133d9fb9f38SJeff Kirsher 	}
2134d9fb9f38SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
2135d9fb9f38SJeff Kirsher 
2136d9fb9f38SJeff Kirsher 	if (netif_msg_tx_queued(np)) {
2137d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2138d9fb9f38SJeff Kirsher 			dev->name, np->cur_tx, entry);
2139d9fb9f38SJeff Kirsher 	}
2140d9fb9f38SJeff Kirsher 	return NETDEV_TX_OK;
2141d9fb9f38SJeff Kirsher }
2142d9fb9f38SJeff Kirsher 
2143d9fb9f38SJeff Kirsher static void netdev_tx_done(struct net_device *dev)
2144d9fb9f38SJeff Kirsher {
2145d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2146d9fb9f38SJeff Kirsher 
2147d9fb9f38SJeff Kirsher 	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2148d9fb9f38SJeff Kirsher 		int entry = np->dirty_tx % TX_RING_SIZE;
2149d9fb9f38SJeff Kirsher 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2150d9fb9f38SJeff Kirsher 			break;
2151d9fb9f38SJeff Kirsher 		if (netif_msg_tx_done(np))
2152d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG
2153d9fb9f38SJeff Kirsher 				"%s: tx frame #%d finished, status %#08x.\n",
2154d9fb9f38SJeff Kirsher 					dev->name, np->dirty_tx,
2155d9fb9f38SJeff Kirsher 					le32_to_cpu(np->tx_ring[entry].cmd_status));
2156d9fb9f38SJeff Kirsher 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2157d9fb9f38SJeff Kirsher 			dev->stats.tx_packets++;
2158d9fb9f38SJeff Kirsher 			dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2159d9fb9f38SJeff Kirsher 		} else { /* Various Tx errors */
2160d9fb9f38SJeff Kirsher 			int tx_status =
2161d9fb9f38SJeff Kirsher 				le32_to_cpu(np->tx_ring[entry].cmd_status);
2162d9fb9f38SJeff Kirsher 			if (tx_status & (DescTxAbort|DescTxExcColl))
2163d9fb9f38SJeff Kirsher 				dev->stats.tx_aborted_errors++;
2164d9fb9f38SJeff Kirsher 			if (tx_status & DescTxFIFO)
2165d9fb9f38SJeff Kirsher 				dev->stats.tx_fifo_errors++;
2166d9fb9f38SJeff Kirsher 			if (tx_status & DescTxCarrier)
2167d9fb9f38SJeff Kirsher 				dev->stats.tx_carrier_errors++;
2168d9fb9f38SJeff Kirsher 			if (tx_status & DescTxOOWCol)
2169d9fb9f38SJeff Kirsher 				dev->stats.tx_window_errors++;
2170d9fb9f38SJeff Kirsher 			dev->stats.tx_errors++;
2171d9fb9f38SJeff Kirsher 		}
2172d9fb9f38SJeff Kirsher 		pci_unmap_single(np->pci_dev,np->tx_dma[entry],
2173d9fb9f38SJeff Kirsher 					np->tx_skbuff[entry]->len,
2174d9fb9f38SJeff Kirsher 					PCI_DMA_TODEVICE);
2175d9fb9f38SJeff Kirsher 		/* Free the original skb. */
2176380ab7e3SYang Wei 		dev_consume_skb_irq(np->tx_skbuff[entry]);
2177d9fb9f38SJeff Kirsher 		np->tx_skbuff[entry] = NULL;
2178d9fb9f38SJeff Kirsher 	}
2179d9fb9f38SJeff Kirsher 	if (netif_queue_stopped(dev) &&
2180d9fb9f38SJeff Kirsher 	    np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2181d9fb9f38SJeff Kirsher 		/* The ring is no longer full, wake queue. */
2182d9fb9f38SJeff Kirsher 		netif_wake_queue(dev);
2183d9fb9f38SJeff Kirsher 	}
2184d9fb9f38SJeff Kirsher }
2185d9fb9f38SJeff Kirsher 
2186d9fb9f38SJeff Kirsher /* The interrupt handler doesn't actually handle interrupts itself, it
2187d9fb9f38SJeff Kirsher  * schedules a NAPI poll if there is anything to do. */
2188d9fb9f38SJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance)
2189d9fb9f38SJeff Kirsher {
2190d9fb9f38SJeff Kirsher 	struct net_device *dev = dev_instance;
2191d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2192d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2193d9fb9f38SJeff Kirsher 
2194d9fb9f38SJeff Kirsher 	/* Reading IntrStatus automatically acknowledges so don't do
2195d9fb9f38SJeff Kirsher 	 * that while interrupts are disabled, (for example, while a
2196d9fb9f38SJeff Kirsher 	 * poll is scheduled).  */
2197d9fb9f38SJeff Kirsher 	if (np->hands_off || !readl(ioaddr + IntrEnable))
2198d9fb9f38SJeff Kirsher 		return IRQ_NONE;
2199d9fb9f38SJeff Kirsher 
2200d9fb9f38SJeff Kirsher 	np->intr_status = readl(ioaddr + IntrStatus);
2201d9fb9f38SJeff Kirsher 
2202d9fb9f38SJeff Kirsher 	if (!np->intr_status)
2203d9fb9f38SJeff Kirsher 		return IRQ_NONE;
2204d9fb9f38SJeff Kirsher 
2205d9fb9f38SJeff Kirsher 	if (netif_msg_intr(np))
2206d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG
2207d9fb9f38SJeff Kirsher 		       "%s: Interrupt, status %#08x, mask %#08x.\n",
2208d9fb9f38SJeff Kirsher 		       dev->name, np->intr_status,
2209d9fb9f38SJeff Kirsher 		       readl(ioaddr + IntrMask));
2210d9fb9f38SJeff Kirsher 
2211d9fb9f38SJeff Kirsher 	prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2212d9fb9f38SJeff Kirsher 
2213d9fb9f38SJeff Kirsher 	if (napi_schedule_prep(&np->napi)) {
2214d9fb9f38SJeff Kirsher 		/* Disable interrupts and register for poll */
2215d9fb9f38SJeff Kirsher 		natsemi_irq_disable(dev);
2216d9fb9f38SJeff Kirsher 		__napi_schedule(&np->napi);
2217d9fb9f38SJeff Kirsher 	} else
2218d9fb9f38SJeff Kirsher 		printk(KERN_WARNING
2219d9fb9f38SJeff Kirsher 	       	       "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2220d9fb9f38SJeff Kirsher 		       dev->name, np->intr_status,
2221d9fb9f38SJeff Kirsher 		       readl(ioaddr + IntrMask));
2222d9fb9f38SJeff Kirsher 
2223d9fb9f38SJeff Kirsher 	return IRQ_HANDLED;
2224d9fb9f38SJeff Kirsher }
2225d9fb9f38SJeff Kirsher 
2226d9fb9f38SJeff Kirsher /* This is the NAPI poll routine.  As well as the standard RX handling
2227d9fb9f38SJeff Kirsher  * it also handles all other interrupts that the chip might raise.
2228d9fb9f38SJeff Kirsher  */
2229d9fb9f38SJeff Kirsher static int natsemi_poll(struct napi_struct *napi, int budget)
2230d9fb9f38SJeff Kirsher {
2231d9fb9f38SJeff Kirsher 	struct netdev_private *np = container_of(napi, struct netdev_private, napi);
2232d9fb9f38SJeff Kirsher 	struct net_device *dev = np->dev;
2233d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2234d9fb9f38SJeff Kirsher 	int work_done = 0;
2235d9fb9f38SJeff Kirsher 
2236d9fb9f38SJeff Kirsher 	do {
2237d9fb9f38SJeff Kirsher 		if (netif_msg_intr(np))
2238d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG
2239d9fb9f38SJeff Kirsher 			       "%s: Poll, status %#08x, mask %#08x.\n",
2240d9fb9f38SJeff Kirsher 			       dev->name, np->intr_status,
2241d9fb9f38SJeff Kirsher 			       readl(ioaddr + IntrMask));
2242d9fb9f38SJeff Kirsher 
2243d9fb9f38SJeff Kirsher 		/* netdev_rx() may read IntrStatus again if the RX state
2244d9fb9f38SJeff Kirsher 		 * machine falls over so do it first. */
2245d9fb9f38SJeff Kirsher 		if (np->intr_status &
2246d9fb9f38SJeff Kirsher 		    (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2247d9fb9f38SJeff Kirsher 		     IntrRxErr | IntrRxOverrun)) {
2248d9fb9f38SJeff Kirsher 			netdev_rx(dev, &work_done, budget);
2249d9fb9f38SJeff Kirsher 		}
2250d9fb9f38SJeff Kirsher 
2251d9fb9f38SJeff Kirsher 		if (np->intr_status &
2252d9fb9f38SJeff Kirsher 		    (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2253d9fb9f38SJeff Kirsher 			spin_lock(&np->lock);
2254d9fb9f38SJeff Kirsher 			netdev_tx_done(dev);
2255d9fb9f38SJeff Kirsher 			spin_unlock(&np->lock);
2256d9fb9f38SJeff Kirsher 		}
2257d9fb9f38SJeff Kirsher 
2258d9fb9f38SJeff Kirsher 		/* Abnormal error summary/uncommon events handlers. */
2259d9fb9f38SJeff Kirsher 		if (np->intr_status & IntrAbnormalSummary)
2260d9fb9f38SJeff Kirsher 			netdev_error(dev, np->intr_status);
2261d9fb9f38SJeff Kirsher 
2262d9fb9f38SJeff Kirsher 		if (work_done >= budget)
2263d9fb9f38SJeff Kirsher 			return work_done;
2264d9fb9f38SJeff Kirsher 
2265d9fb9f38SJeff Kirsher 		np->intr_status = readl(ioaddr + IntrStatus);
2266d9fb9f38SJeff Kirsher 	} while (np->intr_status);
2267d9fb9f38SJeff Kirsher 
22686ad20165SEric Dumazet 	napi_complete_done(napi, work_done);
2269d9fb9f38SJeff Kirsher 
2270d9fb9f38SJeff Kirsher 	/* Reenable interrupts providing nothing is trying to shut
2271d9fb9f38SJeff Kirsher 	 * the chip down. */
2272d9fb9f38SJeff Kirsher 	spin_lock(&np->lock);
2273d9fb9f38SJeff Kirsher 	if (!np->hands_off)
2274d9fb9f38SJeff Kirsher 		natsemi_irq_enable(dev);
2275d9fb9f38SJeff Kirsher 	spin_unlock(&np->lock);
2276d9fb9f38SJeff Kirsher 
2277d9fb9f38SJeff Kirsher 	return work_done;
2278d9fb9f38SJeff Kirsher }
2279d9fb9f38SJeff Kirsher 
2280d9fb9f38SJeff Kirsher /* This routine is logically part of the interrupt handler, but separated
2281d9fb9f38SJeff Kirsher    for clarity and better register allocation. */
2282d9fb9f38SJeff Kirsher static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2283d9fb9f38SJeff Kirsher {
2284d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2285d9fb9f38SJeff Kirsher 	int entry = np->cur_rx % RX_RING_SIZE;
2286d9fb9f38SJeff Kirsher 	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2287d9fb9f38SJeff Kirsher 	s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2288d9fb9f38SJeff Kirsher 	unsigned int buflen = np->rx_buf_sz;
2289d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2290d9fb9f38SJeff Kirsher 
2291d9fb9f38SJeff Kirsher 	/* If the driver owns the next entry it's a new packet. Send it up. */
2292d9fb9f38SJeff Kirsher 	while (desc_status < 0) { /* e.g. & DescOwn */
2293d9fb9f38SJeff Kirsher 		int pkt_len;
2294d9fb9f38SJeff Kirsher 		if (netif_msg_rx_status(np))
2295d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG
2296d9fb9f38SJeff Kirsher 				"  netdev_rx() entry %d status was %#08x.\n",
2297d9fb9f38SJeff Kirsher 				entry, desc_status);
2298d9fb9f38SJeff Kirsher 		if (--boguscnt < 0)
2299d9fb9f38SJeff Kirsher 			break;
2300d9fb9f38SJeff Kirsher 
2301d9fb9f38SJeff Kirsher 		if (*work_done >= work_to_do)
2302d9fb9f38SJeff Kirsher 			break;
2303d9fb9f38SJeff Kirsher 
2304d9fb9f38SJeff Kirsher 		(*work_done)++;
2305d9fb9f38SJeff Kirsher 
2306d9fb9f38SJeff Kirsher 		pkt_len = (desc_status & DescSizeMask) - 4;
2307d9fb9f38SJeff Kirsher 		if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2308d9fb9f38SJeff Kirsher 			if (desc_status & DescMore) {
2309d9fb9f38SJeff Kirsher 				unsigned long flags;
2310d9fb9f38SJeff Kirsher 
2311d9fb9f38SJeff Kirsher 				if (netif_msg_rx_err(np))
2312d9fb9f38SJeff Kirsher 					printk(KERN_WARNING
2313d9fb9f38SJeff Kirsher 						"%s: Oversized(?) Ethernet "
2314d9fb9f38SJeff Kirsher 						"frame spanned multiple "
2315d9fb9f38SJeff Kirsher 						"buffers, entry %#08x "
2316d9fb9f38SJeff Kirsher 						"status %#08x.\n", dev->name,
2317d9fb9f38SJeff Kirsher 						np->cur_rx, desc_status);
2318d9fb9f38SJeff Kirsher 				dev->stats.rx_length_errors++;
2319d9fb9f38SJeff Kirsher 
2320d9fb9f38SJeff Kirsher 				/* The RX state machine has probably
2321d9fb9f38SJeff Kirsher 				 * locked up beneath us.  Follow the
2322d9fb9f38SJeff Kirsher 				 * reset procedure documented in
2323d9fb9f38SJeff Kirsher 				 * AN-1287. */
2324d9fb9f38SJeff Kirsher 
2325d9fb9f38SJeff Kirsher 				spin_lock_irqsave(&np->lock, flags);
2326d9fb9f38SJeff Kirsher 				reset_rx(dev);
2327d9fb9f38SJeff Kirsher 				reinit_rx(dev);
2328d9fb9f38SJeff Kirsher 				writel(np->ring_dma, ioaddr + RxRingPtr);
2329d9fb9f38SJeff Kirsher 				check_link(dev);
2330d9fb9f38SJeff Kirsher 				spin_unlock_irqrestore(&np->lock, flags);
2331d9fb9f38SJeff Kirsher 
2332d9fb9f38SJeff Kirsher 				/* We'll enable RX on exit from this
2333d9fb9f38SJeff Kirsher 				 * function. */
2334d9fb9f38SJeff Kirsher 				break;
2335d9fb9f38SJeff Kirsher 
2336d9fb9f38SJeff Kirsher 			} else {
2337d9fb9f38SJeff Kirsher 				/* There was an error. */
2338d9fb9f38SJeff Kirsher 				dev->stats.rx_errors++;
2339d9fb9f38SJeff Kirsher 				if (desc_status & (DescRxAbort|DescRxOver))
2340d9fb9f38SJeff Kirsher 					dev->stats.rx_over_errors++;
2341d9fb9f38SJeff Kirsher 				if (desc_status & (DescRxLong|DescRxRunt))
2342d9fb9f38SJeff Kirsher 					dev->stats.rx_length_errors++;
2343d9fb9f38SJeff Kirsher 				if (desc_status & (DescRxInvalid|DescRxAlign))
2344d9fb9f38SJeff Kirsher 					dev->stats.rx_frame_errors++;
2345d9fb9f38SJeff Kirsher 				if (desc_status & DescRxCRC)
2346d9fb9f38SJeff Kirsher 					dev->stats.rx_crc_errors++;
2347d9fb9f38SJeff Kirsher 			}
2348d9fb9f38SJeff Kirsher 		} else if (pkt_len > np->rx_buf_sz) {
2349d9fb9f38SJeff Kirsher 			/* if this is the tail of a double buffer
2350d9fb9f38SJeff Kirsher 			 * packet, we've already counted the error
2351d9fb9f38SJeff Kirsher 			 * on the first part.  Ignore the second half.
2352d9fb9f38SJeff Kirsher 			 */
2353d9fb9f38SJeff Kirsher 		} else {
2354d9fb9f38SJeff Kirsher 			struct sk_buff *skb;
2355d9fb9f38SJeff Kirsher 			/* Omit CRC size. */
2356d9fb9f38SJeff Kirsher 			/* Check if the packet is long enough to accept
2357d9fb9f38SJeff Kirsher 			 * without copying to a minimally-sized skbuff. */
2358d9fb9f38SJeff Kirsher 			if (pkt_len < rx_copybreak &&
2359c056b734SPradeep A Dalvi 			    (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2360d9fb9f38SJeff Kirsher 				/* 16 byte align the IP header */
2361d9fb9f38SJeff Kirsher 				skb_reserve(skb, RX_OFFSET);
2362d9fb9f38SJeff Kirsher 				pci_dma_sync_single_for_cpu(np->pci_dev,
2363d9fb9f38SJeff Kirsher 					np->rx_dma[entry],
2364d9fb9f38SJeff Kirsher 					buflen,
2365d9fb9f38SJeff Kirsher 					PCI_DMA_FROMDEVICE);
2366d9fb9f38SJeff Kirsher 				skb_copy_to_linear_data(skb,
2367d9fb9f38SJeff Kirsher 					np->rx_skbuff[entry]->data, pkt_len);
2368d9fb9f38SJeff Kirsher 				skb_put(skb, pkt_len);
2369d9fb9f38SJeff Kirsher 				pci_dma_sync_single_for_device(np->pci_dev,
2370d9fb9f38SJeff Kirsher 					np->rx_dma[entry],
2371d9fb9f38SJeff Kirsher 					buflen,
2372d9fb9f38SJeff Kirsher 					PCI_DMA_FROMDEVICE);
2373d9fb9f38SJeff Kirsher 			} else {
2374d9fb9f38SJeff Kirsher 				pci_unmap_single(np->pci_dev, np->rx_dma[entry],
2375d9fb9f38SJeff Kirsher 						 buflen + NATSEMI_PADDING,
2376d9fb9f38SJeff Kirsher 						 PCI_DMA_FROMDEVICE);
2377d9fb9f38SJeff Kirsher 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
2378d9fb9f38SJeff Kirsher 				np->rx_skbuff[entry] = NULL;
2379d9fb9f38SJeff Kirsher 			}
2380d9fb9f38SJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
2381d9fb9f38SJeff Kirsher 			netif_receive_skb(skb);
2382d9fb9f38SJeff Kirsher 			dev->stats.rx_packets++;
2383d9fb9f38SJeff Kirsher 			dev->stats.rx_bytes += pkt_len;
2384d9fb9f38SJeff Kirsher 		}
2385d9fb9f38SJeff Kirsher 		entry = (++np->cur_rx) % RX_RING_SIZE;
2386d9fb9f38SJeff Kirsher 		np->rx_head_desc = &np->rx_ring[entry];
2387d9fb9f38SJeff Kirsher 		desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2388d9fb9f38SJeff Kirsher 	}
2389d9fb9f38SJeff Kirsher 	refill_rx(dev);
2390d9fb9f38SJeff Kirsher 
2391d9fb9f38SJeff Kirsher 	/* Restart Rx engine if stopped. */
2392d9fb9f38SJeff Kirsher 	if (np->oom)
2393d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, jiffies + 1);
2394d9fb9f38SJeff Kirsher 	else
2395d9fb9f38SJeff Kirsher 		writel(RxOn, ioaddr + ChipCmd);
2396d9fb9f38SJeff Kirsher }
2397d9fb9f38SJeff Kirsher 
2398d9fb9f38SJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status)
2399d9fb9f38SJeff Kirsher {
2400d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2401d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2402d9fb9f38SJeff Kirsher 
2403d9fb9f38SJeff Kirsher 	spin_lock(&np->lock);
2404d9fb9f38SJeff Kirsher 	if (intr_status & LinkChange) {
2405d9fb9f38SJeff Kirsher 		u16 lpa = mdio_read(dev, MII_LPA);
2406d9fb9f38SJeff Kirsher 		if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2407d9fb9f38SJeff Kirsher 		    netif_msg_link(np)) {
2408d9fb9f38SJeff Kirsher 			printk(KERN_INFO
2409d9fb9f38SJeff Kirsher 				"%s: Autonegotiation advertising"
2410d9fb9f38SJeff Kirsher 				" %#04x  partner %#04x.\n", dev->name,
2411d9fb9f38SJeff Kirsher 				np->advertising, lpa);
2412d9fb9f38SJeff Kirsher 		}
2413d9fb9f38SJeff Kirsher 
2414d9fb9f38SJeff Kirsher 		/* read MII int status to clear the flag */
2415d9fb9f38SJeff Kirsher 		readw(ioaddr + MIntrStatus);
2416d9fb9f38SJeff Kirsher 		check_link(dev);
2417d9fb9f38SJeff Kirsher 	}
2418d9fb9f38SJeff Kirsher 	if (intr_status & StatsMax) {
2419d9fb9f38SJeff Kirsher 		__get_stats(dev);
2420d9fb9f38SJeff Kirsher 	}
2421d9fb9f38SJeff Kirsher 	if (intr_status & IntrTxUnderrun) {
2422d9fb9f38SJeff Kirsher 		if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2423d9fb9f38SJeff Kirsher 			np->tx_config += TX_DRTH_VAL_INC;
2424d9fb9f38SJeff Kirsher 			if (netif_msg_tx_err(np))
2425d9fb9f38SJeff Kirsher 				printk(KERN_NOTICE
2426d9fb9f38SJeff Kirsher 					"%s: increased tx threshold, txcfg %#08x.\n",
2427d9fb9f38SJeff Kirsher 					dev->name, np->tx_config);
2428d9fb9f38SJeff Kirsher 		} else {
2429d9fb9f38SJeff Kirsher 			if (netif_msg_tx_err(np))
2430d9fb9f38SJeff Kirsher 				printk(KERN_NOTICE
2431d9fb9f38SJeff Kirsher 					"%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2432d9fb9f38SJeff Kirsher 					dev->name, np->tx_config);
2433d9fb9f38SJeff Kirsher 		}
2434d9fb9f38SJeff Kirsher 		writel(np->tx_config, ioaddr + TxConfig);
2435d9fb9f38SJeff Kirsher 	}
2436d9fb9f38SJeff Kirsher 	if (intr_status & WOLPkt && netif_msg_wol(np)) {
2437d9fb9f38SJeff Kirsher 		int wol_status = readl(ioaddr + WOLCmd);
2438d9fb9f38SJeff Kirsher 		printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2439d9fb9f38SJeff Kirsher 			dev->name, wol_status);
2440d9fb9f38SJeff Kirsher 	}
2441d9fb9f38SJeff Kirsher 	if (intr_status & RxStatusFIFOOver) {
2442d9fb9f38SJeff Kirsher 		if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2443d9fb9f38SJeff Kirsher 			printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2444d9fb9f38SJeff Kirsher 				dev->name);
2445d9fb9f38SJeff Kirsher 		}
2446d9fb9f38SJeff Kirsher 		dev->stats.rx_fifo_errors++;
2447d9fb9f38SJeff Kirsher 		dev->stats.rx_errors++;
2448d9fb9f38SJeff Kirsher 	}
2449d9fb9f38SJeff Kirsher 	/* Hmmmmm, it's not clear how to recover from PCI faults. */
2450d9fb9f38SJeff Kirsher 	if (intr_status & IntrPCIErr) {
2451d9fb9f38SJeff Kirsher 		printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2452d9fb9f38SJeff Kirsher 			intr_status & IntrPCIErr);
2453d9fb9f38SJeff Kirsher 		dev->stats.tx_fifo_errors++;
2454d9fb9f38SJeff Kirsher 		dev->stats.tx_errors++;
2455d9fb9f38SJeff Kirsher 		dev->stats.rx_fifo_errors++;
2456d9fb9f38SJeff Kirsher 		dev->stats.rx_errors++;
2457d9fb9f38SJeff Kirsher 	}
2458d9fb9f38SJeff Kirsher 	spin_unlock(&np->lock);
2459d9fb9f38SJeff Kirsher }
2460d9fb9f38SJeff Kirsher 
2461d9fb9f38SJeff Kirsher static void __get_stats(struct net_device *dev)
2462d9fb9f38SJeff Kirsher {
2463d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2464d9fb9f38SJeff Kirsher 
2465d9fb9f38SJeff Kirsher 	/* The chip only need report frame silently dropped. */
2466d9fb9f38SJeff Kirsher 	dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2467d9fb9f38SJeff Kirsher 	dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2468d9fb9f38SJeff Kirsher }
2469d9fb9f38SJeff Kirsher 
2470d9fb9f38SJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev)
2471d9fb9f38SJeff Kirsher {
2472d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2473d9fb9f38SJeff Kirsher 
2474d9fb9f38SJeff Kirsher 	/* The chip only need report frame silently dropped. */
2475d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2476d9fb9f38SJeff Kirsher 	if (netif_running(dev) && !np->hands_off)
2477d9fb9f38SJeff Kirsher 		__get_stats(dev);
2478d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2479d9fb9f38SJeff Kirsher 
2480d9fb9f38SJeff Kirsher 	return &dev->stats;
2481d9fb9f38SJeff Kirsher }
2482d9fb9f38SJeff Kirsher 
2483d9fb9f38SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
2484d9fb9f38SJeff Kirsher static void natsemi_poll_controller(struct net_device *dev)
2485d9fb9f38SJeff Kirsher {
2486d710ce13SFrancois Romieu 	struct netdev_private *np = netdev_priv(dev);
2487d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
2488d710ce13SFrancois Romieu 
2489d710ce13SFrancois Romieu 	disable_irq(irq);
2490d710ce13SFrancois Romieu 	intr_handler(irq, dev);
2491d710ce13SFrancois Romieu 	enable_irq(irq);
2492d9fb9f38SJeff Kirsher }
2493d9fb9f38SJeff Kirsher #endif
2494d9fb9f38SJeff Kirsher 
2495d9fb9f38SJeff Kirsher #define HASH_TABLE	0x200
2496d9fb9f38SJeff Kirsher static void __set_rx_mode(struct net_device *dev)
2497d9fb9f38SJeff Kirsher {
2498d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2499d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2500d9fb9f38SJeff Kirsher 	u8 mc_filter[64]; /* Multicast hash filter */
2501d9fb9f38SJeff Kirsher 	u32 rx_mode;
2502d9fb9f38SJeff Kirsher 
2503d9fb9f38SJeff Kirsher 	if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2504d9fb9f38SJeff Kirsher 		rx_mode = RxFilterEnable | AcceptBroadcast
2505d9fb9f38SJeff Kirsher 			| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2506d9fb9f38SJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2507d9fb9f38SJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
2508d9fb9f38SJeff Kirsher 		rx_mode = RxFilterEnable | AcceptBroadcast
2509d9fb9f38SJeff Kirsher 			| AcceptAllMulticast | AcceptMyPhys;
2510d9fb9f38SJeff Kirsher 	} else {
2511d9fb9f38SJeff Kirsher 		struct netdev_hw_addr *ha;
2512d9fb9f38SJeff Kirsher 		int i;
2513d9fb9f38SJeff Kirsher 
2514d9fb9f38SJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
2515d9fb9f38SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2516d9fb9f38SJeff Kirsher 			int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2517d9fb9f38SJeff Kirsher 			mc_filter[b/8] |= (1 << (b & 0x07));
2518d9fb9f38SJeff Kirsher 		}
2519d9fb9f38SJeff Kirsher 		rx_mode = RxFilterEnable | AcceptBroadcast
2520d9fb9f38SJeff Kirsher 			| AcceptMulticast | AcceptMyPhys;
2521d9fb9f38SJeff Kirsher 		for (i = 0; i < 64; i += 2) {
2522d9fb9f38SJeff Kirsher 			writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2523d9fb9f38SJeff Kirsher 			writel((mc_filter[i + 1] << 8) + mc_filter[i],
2524d9fb9f38SJeff Kirsher 			       ioaddr + RxFilterData);
2525d9fb9f38SJeff Kirsher 		}
2526d9fb9f38SJeff Kirsher 	}
2527d9fb9f38SJeff Kirsher 	writel(rx_mode, ioaddr + RxFilterAddr);
2528d9fb9f38SJeff Kirsher 	np->cur_rx_mode = rx_mode;
2529d9fb9f38SJeff Kirsher }
2530d9fb9f38SJeff Kirsher 
2531d9fb9f38SJeff Kirsher static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2532d9fb9f38SJeff Kirsher {
2533d9fb9f38SJeff Kirsher 	dev->mtu = new_mtu;
2534d9fb9f38SJeff Kirsher 
2535d9fb9f38SJeff Kirsher 	/* synchronized against open : rtnl_lock() held by caller */
2536d9fb9f38SJeff Kirsher 	if (netif_running(dev)) {
2537d9fb9f38SJeff Kirsher 		struct netdev_private *np = netdev_priv(dev);
2538d9fb9f38SJeff Kirsher 		void __iomem * ioaddr = ns_ioaddr(dev);
2539d710ce13SFrancois Romieu 		const int irq = np->pci_dev->irq;
2540d9fb9f38SJeff Kirsher 
2541d710ce13SFrancois Romieu 		disable_irq(irq);
2542d9fb9f38SJeff Kirsher 		spin_lock(&np->lock);
2543d9fb9f38SJeff Kirsher 		/* stop engines */
2544d9fb9f38SJeff Kirsher 		natsemi_stop_rxtx(dev);
2545d9fb9f38SJeff Kirsher 		/* drain rx queue */
2546d9fb9f38SJeff Kirsher 		drain_rx(dev);
2547d9fb9f38SJeff Kirsher 		/* change buffers */
2548d9fb9f38SJeff Kirsher 		set_bufsize(dev);
2549d9fb9f38SJeff Kirsher 		reinit_rx(dev);
2550d9fb9f38SJeff Kirsher 		writel(np->ring_dma, ioaddr + RxRingPtr);
2551d9fb9f38SJeff Kirsher 		/* restart engines */
2552d9fb9f38SJeff Kirsher 		writel(RxOn | TxOn, ioaddr + ChipCmd);
2553d9fb9f38SJeff Kirsher 		spin_unlock(&np->lock);
2554d710ce13SFrancois Romieu 		enable_irq(irq);
2555d9fb9f38SJeff Kirsher 	}
2556d9fb9f38SJeff Kirsher 	return 0;
2557d9fb9f38SJeff Kirsher }
2558d9fb9f38SJeff Kirsher 
2559d9fb9f38SJeff Kirsher static void set_rx_mode(struct net_device *dev)
2560d9fb9f38SJeff Kirsher {
2561d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2562d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2563d9fb9f38SJeff Kirsher 	if (!np->hands_off)
2564d9fb9f38SJeff Kirsher 		__set_rx_mode(dev);
2565d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2566d9fb9f38SJeff Kirsher }
2567d9fb9f38SJeff Kirsher 
2568d9fb9f38SJeff Kirsher static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2569d9fb9f38SJeff Kirsher {
2570d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
257168aad78cSRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
257268aad78cSRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
257368aad78cSRick Jones 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
2574d9fb9f38SJeff Kirsher }
2575d9fb9f38SJeff Kirsher 
2576d9fb9f38SJeff Kirsher static int get_regs_len(struct net_device *dev)
2577d9fb9f38SJeff Kirsher {
2578d9fb9f38SJeff Kirsher 	return NATSEMI_REGS_SIZE;
2579d9fb9f38SJeff Kirsher }
2580d9fb9f38SJeff Kirsher 
2581d9fb9f38SJeff Kirsher static int get_eeprom_len(struct net_device *dev)
2582d9fb9f38SJeff Kirsher {
2583d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2584d9fb9f38SJeff Kirsher 	return np->eeprom_size;
2585d9fb9f38SJeff Kirsher }
2586d9fb9f38SJeff Kirsher 
2587586b6e27SPhilippe Reynes static int get_link_ksettings(struct net_device *dev,
2588586b6e27SPhilippe Reynes 			      struct ethtool_link_ksettings *ecmd)
2589d9fb9f38SJeff Kirsher {
2590d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2591d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2592d9fb9f38SJeff Kirsher 	netdev_get_ecmd(dev, ecmd);
2593d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2594d9fb9f38SJeff Kirsher 	return 0;
2595d9fb9f38SJeff Kirsher }
2596d9fb9f38SJeff Kirsher 
2597586b6e27SPhilippe Reynes static int set_link_ksettings(struct net_device *dev,
2598586b6e27SPhilippe Reynes 			      const struct ethtool_link_ksettings *ecmd)
2599d9fb9f38SJeff Kirsher {
2600d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2601d9fb9f38SJeff Kirsher 	int res;
2602d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2603d9fb9f38SJeff Kirsher 	res = netdev_set_ecmd(dev, ecmd);
2604d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2605d9fb9f38SJeff Kirsher 	return res;
2606d9fb9f38SJeff Kirsher }
2607d9fb9f38SJeff Kirsher 
2608d9fb9f38SJeff Kirsher static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2609d9fb9f38SJeff Kirsher {
2610d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2611d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2612d9fb9f38SJeff Kirsher 	netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2613d9fb9f38SJeff Kirsher 	netdev_get_sopass(dev, wol->sopass);
2614d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2615d9fb9f38SJeff Kirsher }
2616d9fb9f38SJeff Kirsher 
2617d9fb9f38SJeff Kirsher static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2618d9fb9f38SJeff Kirsher {
2619d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2620d9fb9f38SJeff Kirsher 	int res;
2621d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2622d9fb9f38SJeff Kirsher 	netdev_set_wol(dev, wol->wolopts);
2623d9fb9f38SJeff Kirsher 	res = netdev_set_sopass(dev, wol->sopass);
2624d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2625d9fb9f38SJeff Kirsher 	return res;
2626d9fb9f38SJeff Kirsher }
2627d9fb9f38SJeff Kirsher 
2628d9fb9f38SJeff Kirsher static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2629d9fb9f38SJeff Kirsher {
2630d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2631d9fb9f38SJeff Kirsher 	regs->version = NATSEMI_REGS_VER;
2632d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2633d9fb9f38SJeff Kirsher 	netdev_get_regs(dev, buf);
2634d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2635d9fb9f38SJeff Kirsher }
2636d9fb9f38SJeff Kirsher 
2637d9fb9f38SJeff Kirsher static u32 get_msglevel(struct net_device *dev)
2638d9fb9f38SJeff Kirsher {
2639d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2640d9fb9f38SJeff Kirsher 	return np->msg_enable;
2641d9fb9f38SJeff Kirsher }
2642d9fb9f38SJeff Kirsher 
2643d9fb9f38SJeff Kirsher static void set_msglevel(struct net_device *dev, u32 val)
2644d9fb9f38SJeff Kirsher {
2645d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2646d9fb9f38SJeff Kirsher 	np->msg_enable = val;
2647d9fb9f38SJeff Kirsher }
2648d9fb9f38SJeff Kirsher 
2649d9fb9f38SJeff Kirsher static int nway_reset(struct net_device *dev)
2650d9fb9f38SJeff Kirsher {
2651d9fb9f38SJeff Kirsher 	int tmp;
2652d9fb9f38SJeff Kirsher 	int r = -EINVAL;
2653d9fb9f38SJeff Kirsher 	/* if autoneg is off, it's an error */
2654d9fb9f38SJeff Kirsher 	tmp = mdio_read(dev, MII_BMCR);
2655d9fb9f38SJeff Kirsher 	if (tmp & BMCR_ANENABLE) {
2656d9fb9f38SJeff Kirsher 		tmp |= (BMCR_ANRESTART);
2657d9fb9f38SJeff Kirsher 		mdio_write(dev, MII_BMCR, tmp);
2658d9fb9f38SJeff Kirsher 		r = 0;
2659d9fb9f38SJeff Kirsher 	}
2660d9fb9f38SJeff Kirsher 	return r;
2661d9fb9f38SJeff Kirsher }
2662d9fb9f38SJeff Kirsher 
2663d9fb9f38SJeff Kirsher static u32 get_link(struct net_device *dev)
2664d9fb9f38SJeff Kirsher {
2665d9fb9f38SJeff Kirsher 	/* LSTATUS is latched low until a read - so read twice */
2666d9fb9f38SJeff Kirsher 	mdio_read(dev, MII_BMSR);
2667d9fb9f38SJeff Kirsher 	return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2668d9fb9f38SJeff Kirsher }
2669d9fb9f38SJeff Kirsher 
2670d9fb9f38SJeff Kirsher static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2671d9fb9f38SJeff Kirsher {
2672d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2673d9fb9f38SJeff Kirsher 	u8 *eebuf;
2674d9fb9f38SJeff Kirsher 	int res;
2675d9fb9f38SJeff Kirsher 
2676d9fb9f38SJeff Kirsher 	eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2677d9fb9f38SJeff Kirsher 	if (!eebuf)
2678d9fb9f38SJeff Kirsher 		return -ENOMEM;
2679d9fb9f38SJeff Kirsher 
2680d9fb9f38SJeff Kirsher 	eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2681d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2682d9fb9f38SJeff Kirsher 	res = netdev_get_eeprom(dev, eebuf);
2683d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2684d9fb9f38SJeff Kirsher 	if (!res)
2685d9fb9f38SJeff Kirsher 		memcpy(data, eebuf+eeprom->offset, eeprom->len);
2686d9fb9f38SJeff Kirsher 	kfree(eebuf);
2687d9fb9f38SJeff Kirsher 	return res;
2688d9fb9f38SJeff Kirsher }
2689d9fb9f38SJeff Kirsher 
2690d9fb9f38SJeff Kirsher static const struct ethtool_ops ethtool_ops = {
2691d9fb9f38SJeff Kirsher 	.get_drvinfo = get_drvinfo,
2692d9fb9f38SJeff Kirsher 	.get_regs_len = get_regs_len,
2693d9fb9f38SJeff Kirsher 	.get_eeprom_len = get_eeprom_len,
2694d9fb9f38SJeff Kirsher 	.get_wol = get_wol,
2695d9fb9f38SJeff Kirsher 	.set_wol = set_wol,
2696d9fb9f38SJeff Kirsher 	.get_regs = get_regs,
2697d9fb9f38SJeff Kirsher 	.get_msglevel = get_msglevel,
2698d9fb9f38SJeff Kirsher 	.set_msglevel = set_msglevel,
2699d9fb9f38SJeff Kirsher 	.nway_reset = nway_reset,
2700d9fb9f38SJeff Kirsher 	.get_link = get_link,
2701d9fb9f38SJeff Kirsher 	.get_eeprom = get_eeprom,
2702586b6e27SPhilippe Reynes 	.get_link_ksettings = get_link_ksettings,
2703586b6e27SPhilippe Reynes 	.set_link_ksettings = set_link_ksettings,
2704d9fb9f38SJeff Kirsher };
2705d9fb9f38SJeff Kirsher 
2706d9fb9f38SJeff Kirsher static int netdev_set_wol(struct net_device *dev, u32 newval)
2707d9fb9f38SJeff Kirsher {
2708d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2709d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2710d9fb9f38SJeff Kirsher 	u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2711d9fb9f38SJeff Kirsher 
2712d9fb9f38SJeff Kirsher 	/* translate to bitmasks this chip understands */
2713d9fb9f38SJeff Kirsher 	if (newval & WAKE_PHY)
2714d9fb9f38SJeff Kirsher 		data |= WakePhy;
2715d9fb9f38SJeff Kirsher 	if (newval & WAKE_UCAST)
2716d9fb9f38SJeff Kirsher 		data |= WakeUnicast;
2717d9fb9f38SJeff Kirsher 	if (newval & WAKE_MCAST)
2718d9fb9f38SJeff Kirsher 		data |= WakeMulticast;
2719d9fb9f38SJeff Kirsher 	if (newval & WAKE_BCAST)
2720d9fb9f38SJeff Kirsher 		data |= WakeBroadcast;
2721d9fb9f38SJeff Kirsher 	if (newval & WAKE_ARP)
2722d9fb9f38SJeff Kirsher 		data |= WakeArp;
2723d9fb9f38SJeff Kirsher 	if (newval & WAKE_MAGIC)
2724d9fb9f38SJeff Kirsher 		data |= WakeMagic;
2725d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83815_D) {
2726d9fb9f38SJeff Kirsher 		if (newval & WAKE_MAGICSECURE) {
2727d9fb9f38SJeff Kirsher 			data |= WakeMagicSecure;
2728d9fb9f38SJeff Kirsher 		}
2729d9fb9f38SJeff Kirsher 	}
2730d9fb9f38SJeff Kirsher 
2731d9fb9f38SJeff Kirsher 	writel(data, ioaddr + WOLCmd);
2732d9fb9f38SJeff Kirsher 
2733d9fb9f38SJeff Kirsher 	return 0;
2734d9fb9f38SJeff Kirsher }
2735d9fb9f38SJeff Kirsher 
2736d9fb9f38SJeff Kirsher static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2737d9fb9f38SJeff Kirsher {
2738d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2739d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2740d9fb9f38SJeff Kirsher 	u32 regval = readl(ioaddr + WOLCmd);
2741d9fb9f38SJeff Kirsher 
2742d9fb9f38SJeff Kirsher 	*supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2743d9fb9f38SJeff Kirsher 			| WAKE_ARP | WAKE_MAGIC);
2744d9fb9f38SJeff Kirsher 
2745d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83815_D) {
2746d9fb9f38SJeff Kirsher 		/* SOPASS works on revD and higher */
2747d9fb9f38SJeff Kirsher 		*supported |= WAKE_MAGICSECURE;
2748d9fb9f38SJeff Kirsher 	}
2749d9fb9f38SJeff Kirsher 	*cur = 0;
2750d9fb9f38SJeff Kirsher 
2751d9fb9f38SJeff Kirsher 	/* translate from chip bitmasks */
2752d9fb9f38SJeff Kirsher 	if (regval & WakePhy)
2753d9fb9f38SJeff Kirsher 		*cur |= WAKE_PHY;
2754d9fb9f38SJeff Kirsher 	if (regval & WakeUnicast)
2755d9fb9f38SJeff Kirsher 		*cur |= WAKE_UCAST;
2756d9fb9f38SJeff Kirsher 	if (regval & WakeMulticast)
2757d9fb9f38SJeff Kirsher 		*cur |= WAKE_MCAST;
2758d9fb9f38SJeff Kirsher 	if (regval & WakeBroadcast)
2759d9fb9f38SJeff Kirsher 		*cur |= WAKE_BCAST;
2760d9fb9f38SJeff Kirsher 	if (regval & WakeArp)
2761d9fb9f38SJeff Kirsher 		*cur |= WAKE_ARP;
2762d9fb9f38SJeff Kirsher 	if (regval & WakeMagic)
2763d9fb9f38SJeff Kirsher 		*cur |= WAKE_MAGIC;
2764d9fb9f38SJeff Kirsher 	if (regval & WakeMagicSecure) {
2765d9fb9f38SJeff Kirsher 		/* this can be on in revC, but it's broken */
2766d9fb9f38SJeff Kirsher 		*cur |= WAKE_MAGICSECURE;
2767d9fb9f38SJeff Kirsher 	}
2768d9fb9f38SJeff Kirsher 
2769d9fb9f38SJeff Kirsher 	return 0;
2770d9fb9f38SJeff Kirsher }
2771d9fb9f38SJeff Kirsher 
2772d9fb9f38SJeff Kirsher static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2773d9fb9f38SJeff Kirsher {
2774d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2775d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2776d9fb9f38SJeff Kirsher 	u16 *sval = (u16 *)newval;
2777d9fb9f38SJeff Kirsher 	u32 addr;
2778d9fb9f38SJeff Kirsher 
2779d9fb9f38SJeff Kirsher 	if (np->srr < SRR_DP83815_D) {
2780d9fb9f38SJeff Kirsher 		return 0;
2781d9fb9f38SJeff Kirsher 	}
2782d9fb9f38SJeff Kirsher 
2783d9fb9f38SJeff Kirsher 	/* enable writing to these registers by disabling the RX filter */
2784d9fb9f38SJeff Kirsher 	addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2785d9fb9f38SJeff Kirsher 	addr &= ~RxFilterEnable;
2786d9fb9f38SJeff Kirsher 	writel(addr, ioaddr + RxFilterAddr);
2787d9fb9f38SJeff Kirsher 
2788d9fb9f38SJeff Kirsher 	/* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
2789d9fb9f38SJeff Kirsher 	writel(addr | 0xa, ioaddr + RxFilterAddr);
2790d9fb9f38SJeff Kirsher 	writew(sval[0], ioaddr + RxFilterData);
2791d9fb9f38SJeff Kirsher 
2792d9fb9f38SJeff Kirsher 	writel(addr | 0xc, ioaddr + RxFilterAddr);
2793d9fb9f38SJeff Kirsher 	writew(sval[1], ioaddr + RxFilterData);
2794d9fb9f38SJeff Kirsher 
2795d9fb9f38SJeff Kirsher 	writel(addr | 0xe, ioaddr + RxFilterAddr);
2796d9fb9f38SJeff Kirsher 	writew(sval[2], ioaddr + RxFilterData);
2797d9fb9f38SJeff Kirsher 
2798d9fb9f38SJeff Kirsher 	/* re-enable the RX filter */
2799d9fb9f38SJeff Kirsher 	writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2800d9fb9f38SJeff Kirsher 
2801d9fb9f38SJeff Kirsher 	return 0;
2802d9fb9f38SJeff Kirsher }
2803d9fb9f38SJeff Kirsher 
2804d9fb9f38SJeff Kirsher static int netdev_get_sopass(struct net_device *dev, u8 *data)
2805d9fb9f38SJeff Kirsher {
2806d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2807d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2808d9fb9f38SJeff Kirsher 	u16 *sval = (u16 *)data;
2809d9fb9f38SJeff Kirsher 	u32 addr;
2810d9fb9f38SJeff Kirsher 
2811d9fb9f38SJeff Kirsher 	if (np->srr < SRR_DP83815_D) {
2812d9fb9f38SJeff Kirsher 		sval[0] = sval[1] = sval[2] = 0;
2813d9fb9f38SJeff Kirsher 		return 0;
2814d9fb9f38SJeff Kirsher 	}
2815d9fb9f38SJeff Kirsher 
2816d9fb9f38SJeff Kirsher 	/* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
2817d9fb9f38SJeff Kirsher 	addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2818d9fb9f38SJeff Kirsher 
2819d9fb9f38SJeff Kirsher 	writel(addr | 0xa, ioaddr + RxFilterAddr);
2820d9fb9f38SJeff Kirsher 	sval[0] = readw(ioaddr + RxFilterData);
2821d9fb9f38SJeff Kirsher 
2822d9fb9f38SJeff Kirsher 	writel(addr | 0xc, ioaddr + RxFilterAddr);
2823d9fb9f38SJeff Kirsher 	sval[1] = readw(ioaddr + RxFilterData);
2824d9fb9f38SJeff Kirsher 
2825d9fb9f38SJeff Kirsher 	writel(addr | 0xe, ioaddr + RxFilterAddr);
2826d9fb9f38SJeff Kirsher 	sval[2] = readw(ioaddr + RxFilterData);
2827d9fb9f38SJeff Kirsher 
2828d9fb9f38SJeff Kirsher 	writel(addr, ioaddr + RxFilterAddr);
2829d9fb9f38SJeff Kirsher 
2830d9fb9f38SJeff Kirsher 	return 0;
2831d9fb9f38SJeff Kirsher }
2832d9fb9f38SJeff Kirsher 
2833586b6e27SPhilippe Reynes static int netdev_get_ecmd(struct net_device *dev,
2834586b6e27SPhilippe Reynes 			   struct ethtool_link_ksettings *ecmd)
2835d9fb9f38SJeff Kirsher {
2836d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2837586b6e27SPhilippe Reynes 	u32 supported, advertising;
2838d9fb9f38SJeff Kirsher 	u32 tmp;
2839d9fb9f38SJeff Kirsher 
2840586b6e27SPhilippe Reynes 	ecmd->base.port   = dev->if_port;
2841586b6e27SPhilippe Reynes 	ecmd->base.speed  = np->speed;
2842586b6e27SPhilippe Reynes 	ecmd->base.duplex = np->duplex;
2843586b6e27SPhilippe Reynes 	ecmd->base.autoneg = np->autoneg;
2844586b6e27SPhilippe Reynes 	advertising = 0;
2845586b6e27SPhilippe Reynes 
2846d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_10HALF)
2847586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_10baseT_Half;
2848d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_10FULL)
2849586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_10baseT_Full;
2850d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_100HALF)
2851586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_100baseT_Half;
2852d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_100FULL)
2853586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_100baseT_Full;
2854586b6e27SPhilippe Reynes 	supported   = (SUPPORTED_Autoneg |
2855d9fb9f38SJeff Kirsher 		SUPPORTED_10baseT_Half  | SUPPORTED_10baseT_Full  |
2856d9fb9f38SJeff Kirsher 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2857d9fb9f38SJeff Kirsher 		SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2858586b6e27SPhilippe Reynes 	ecmd->base.phy_address = np->phy_addr_external;
2859d9fb9f38SJeff Kirsher 	/*
2860d9fb9f38SJeff Kirsher 	 * We intentionally report the phy address of the external
2861d9fb9f38SJeff Kirsher 	 * phy, even if the internal phy is used. This is necessary
2862d9fb9f38SJeff Kirsher 	 * to work around a deficiency of the ethtool interface:
2863d9fb9f38SJeff Kirsher 	 * It's only possible to query the settings of the active
2864d9fb9f38SJeff Kirsher 	 * port. Therefore
2865d9fb9f38SJeff Kirsher 	 * # ethtool -s ethX port mii
2866d9fb9f38SJeff Kirsher 	 * actually sends an ioctl to switch to port mii with the
2867d9fb9f38SJeff Kirsher 	 * settings that are used for the current active port.
2868d9fb9f38SJeff Kirsher 	 * If we would report a different phy address in this
2869d9fb9f38SJeff Kirsher 	 * command, then
2870d9fb9f38SJeff Kirsher 	 * # ethtool -s ethX port tp;ethtool -s ethX port mii
2871d9fb9f38SJeff Kirsher 	 * would unintentionally change the phy address.
2872d9fb9f38SJeff Kirsher 	 *
2873d9fb9f38SJeff Kirsher 	 * Fortunately the phy address doesn't matter with the
2874d9fb9f38SJeff Kirsher 	 * internal phy...
2875d9fb9f38SJeff Kirsher 	 */
2876d9fb9f38SJeff Kirsher 
2877d9fb9f38SJeff Kirsher 	/* set information based on active port type */
2878586b6e27SPhilippe Reynes 	switch (ecmd->base.port) {
2879d9fb9f38SJeff Kirsher 	default:
2880d9fb9f38SJeff Kirsher 	case PORT_TP:
2881586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_TP;
2882d9fb9f38SJeff Kirsher 		break;
2883d9fb9f38SJeff Kirsher 	case PORT_MII:
2884586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_MII;
2885d9fb9f38SJeff Kirsher 		break;
2886d9fb9f38SJeff Kirsher 	case PORT_FIBRE:
2887586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_FIBRE;
2888d9fb9f38SJeff Kirsher 		break;
2889d9fb9f38SJeff Kirsher 	}
2890d9fb9f38SJeff Kirsher 
2891d9fb9f38SJeff Kirsher 	/* if autonegotiation is on, try to return the active speed/duplex */
2892586b6e27SPhilippe Reynes 	if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2893586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_Autoneg;
2894d9fb9f38SJeff Kirsher 		tmp = mii_nway_result(
2895d9fb9f38SJeff Kirsher 			np->advertising & mdio_read(dev, MII_LPA));
2896d9fb9f38SJeff Kirsher 		if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2897586b6e27SPhilippe Reynes 			ecmd->base.speed = SPEED_100;
2898d9fb9f38SJeff Kirsher 		else
2899586b6e27SPhilippe Reynes 			ecmd->base.speed = SPEED_10;
2900d9fb9f38SJeff Kirsher 		if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2901586b6e27SPhilippe Reynes 			ecmd->base.duplex = DUPLEX_FULL;
2902d9fb9f38SJeff Kirsher 		else
2903586b6e27SPhilippe Reynes 			ecmd->base.duplex = DUPLEX_HALF;
2904d9fb9f38SJeff Kirsher 	}
2905d9fb9f38SJeff Kirsher 
2906d9fb9f38SJeff Kirsher 	/* ignore maxtxpkt, maxrxpkt for now */
2907d9fb9f38SJeff Kirsher 
2908586b6e27SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
2909586b6e27SPhilippe Reynes 						supported);
2910586b6e27SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
2911586b6e27SPhilippe Reynes 						advertising);
2912586b6e27SPhilippe Reynes 
2913d9fb9f38SJeff Kirsher 	return 0;
2914d9fb9f38SJeff Kirsher }
2915d9fb9f38SJeff Kirsher 
2916586b6e27SPhilippe Reynes static int netdev_set_ecmd(struct net_device *dev,
2917586b6e27SPhilippe Reynes 			   const struct ethtool_link_ksettings *ecmd)
2918d9fb9f38SJeff Kirsher {
2919d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2920586b6e27SPhilippe Reynes 	u32 advertising;
2921d9fb9f38SJeff Kirsher 
2922586b6e27SPhilippe Reynes 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
2923586b6e27SPhilippe Reynes 						ecmd->link_modes.advertising);
2924586b6e27SPhilippe Reynes 
2925586b6e27SPhilippe Reynes 	if (ecmd->base.port != PORT_TP &&
2926586b6e27SPhilippe Reynes 	    ecmd->base.port != PORT_MII &&
2927586b6e27SPhilippe Reynes 	    ecmd->base.port != PORT_FIBRE)
2928d9fb9f38SJeff Kirsher 		return -EINVAL;
2929586b6e27SPhilippe Reynes 	if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2930586b6e27SPhilippe Reynes 		if ((advertising & (ADVERTISED_10baseT_Half |
2931d9fb9f38SJeff Kirsher 					  ADVERTISED_10baseT_Full |
2932d9fb9f38SJeff Kirsher 					  ADVERTISED_100baseT_Half |
2933d9fb9f38SJeff Kirsher 					  ADVERTISED_100baseT_Full)) == 0) {
2934d9fb9f38SJeff Kirsher 			return -EINVAL;
2935d9fb9f38SJeff Kirsher 		}
2936586b6e27SPhilippe Reynes 	} else if (ecmd->base.autoneg == AUTONEG_DISABLE) {
2937586b6e27SPhilippe Reynes 		u32 speed = ecmd->base.speed;
2938d9fb9f38SJeff Kirsher 		if (speed != SPEED_10 && speed != SPEED_100)
2939d9fb9f38SJeff Kirsher 			return -EINVAL;
2940586b6e27SPhilippe Reynes 		if (ecmd->base.duplex != DUPLEX_HALF &&
2941586b6e27SPhilippe Reynes 		    ecmd->base.duplex != DUPLEX_FULL)
2942d9fb9f38SJeff Kirsher 			return -EINVAL;
2943d9fb9f38SJeff Kirsher 	} else {
2944d9fb9f38SJeff Kirsher 		return -EINVAL;
2945d9fb9f38SJeff Kirsher 	}
2946d9fb9f38SJeff Kirsher 
2947d9fb9f38SJeff Kirsher 	/*
2948d9fb9f38SJeff Kirsher 	 * If we're ignoring the PHY then autoneg and the internal
2949d9fb9f38SJeff Kirsher 	 * transceiver are really not going to work so don't let the
2950d9fb9f38SJeff Kirsher 	 * user select them.
2951d9fb9f38SJeff Kirsher 	 */
2952586b6e27SPhilippe Reynes 	if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE ||
2953586b6e27SPhilippe Reynes 			       ecmd->base.port == PORT_TP))
2954d9fb9f38SJeff Kirsher 		return -EINVAL;
2955d9fb9f38SJeff Kirsher 
2956d9fb9f38SJeff Kirsher 	/*
2957d9fb9f38SJeff Kirsher 	 * maxtxpkt, maxrxpkt: ignored for now.
2958d9fb9f38SJeff Kirsher 	 *
2959d9fb9f38SJeff Kirsher 	 * transceiver:
2960d9fb9f38SJeff Kirsher 	 * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
2961d9fb9f38SJeff Kirsher 	 * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
2962d9fb9f38SJeff Kirsher 	 * selects based on ecmd->port.
2963d9fb9f38SJeff Kirsher 	 *
2964d9fb9f38SJeff Kirsher 	 * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
2965d9fb9f38SJeff Kirsher 	 * phys that are connected to the mii bus. It's used to apply fibre
2966d9fb9f38SJeff Kirsher 	 * specific updates.
2967d9fb9f38SJeff Kirsher 	 */
2968d9fb9f38SJeff Kirsher 
2969d9fb9f38SJeff Kirsher 	/* WHEW! now lets bang some bits */
2970d9fb9f38SJeff Kirsher 
2971d9fb9f38SJeff Kirsher 	/* save the parms */
2972586b6e27SPhilippe Reynes 	dev->if_port          = ecmd->base.port;
2973586b6e27SPhilippe Reynes 	np->autoneg           = ecmd->base.autoneg;
2974586b6e27SPhilippe Reynes 	np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask;
2975d9fb9f38SJeff Kirsher 	if (np->autoneg == AUTONEG_ENABLE) {
2976d9fb9f38SJeff Kirsher 		/* advertise only what has been requested */
2977d9fb9f38SJeff Kirsher 		np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2978586b6e27SPhilippe Reynes 		if (advertising & ADVERTISED_10baseT_Half)
2979d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_10HALF;
2980586b6e27SPhilippe Reynes 		if (advertising & ADVERTISED_10baseT_Full)
2981d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_10FULL;
2982586b6e27SPhilippe Reynes 		if (advertising & ADVERTISED_100baseT_Half)
2983d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_100HALF;
2984586b6e27SPhilippe Reynes 		if (advertising & ADVERTISED_100baseT_Full)
2985d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_100FULL;
2986d9fb9f38SJeff Kirsher 	} else {
2987586b6e27SPhilippe Reynes 		np->speed  = ecmd->base.speed;
2988586b6e27SPhilippe Reynes 		np->duplex = ecmd->base.duplex;
2989d9fb9f38SJeff Kirsher 		/* user overriding the initial full duplex parm? */
2990d9fb9f38SJeff Kirsher 		if (np->duplex == DUPLEX_HALF)
2991d9fb9f38SJeff Kirsher 			np->full_duplex = 0;
2992d9fb9f38SJeff Kirsher 	}
2993d9fb9f38SJeff Kirsher 
2994d9fb9f38SJeff Kirsher 	/* get the right phy enabled */
2995586b6e27SPhilippe Reynes 	if (ecmd->base.port == PORT_TP)
2996d9fb9f38SJeff Kirsher 		switch_port_internal(dev);
2997d9fb9f38SJeff Kirsher 	else
2998d9fb9f38SJeff Kirsher 		switch_port_external(dev);
2999d9fb9f38SJeff Kirsher 
3000d9fb9f38SJeff Kirsher 	/* set parms and see how this affected our link status */
3001d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
3002d9fb9f38SJeff Kirsher 	check_link(dev);
3003d9fb9f38SJeff Kirsher 	return 0;
3004d9fb9f38SJeff Kirsher }
3005d9fb9f38SJeff Kirsher 
3006d9fb9f38SJeff Kirsher static int netdev_get_regs(struct net_device *dev, u8 *buf)
3007d9fb9f38SJeff Kirsher {
3008d9fb9f38SJeff Kirsher 	int i;
3009d9fb9f38SJeff Kirsher 	int j;
3010d9fb9f38SJeff Kirsher 	u32 rfcr;
3011d9fb9f38SJeff Kirsher 	u32 *rbuf = (u32 *)buf;
3012d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3013d9fb9f38SJeff Kirsher 
3014d9fb9f38SJeff Kirsher 	/* read non-mii page 0 of registers */
3015d9fb9f38SJeff Kirsher 	for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
3016d9fb9f38SJeff Kirsher 		rbuf[i] = readl(ioaddr + i*4);
3017d9fb9f38SJeff Kirsher 	}
3018d9fb9f38SJeff Kirsher 
3019d9fb9f38SJeff Kirsher 	/* read current mii registers */
3020d9fb9f38SJeff Kirsher 	for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
3021d9fb9f38SJeff Kirsher 		rbuf[i] = mdio_read(dev, i & 0x1f);
3022d9fb9f38SJeff Kirsher 
3023d9fb9f38SJeff Kirsher 	/* read only the 'magic' registers from page 1 */
3024d9fb9f38SJeff Kirsher 	writew(1, ioaddr + PGSEL);
3025d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + PMDCSR);
3026d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + TSTDAT);
3027d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + DSPCFG);
3028d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + SDCFG);
3029d9fb9f38SJeff Kirsher 	writew(0, ioaddr + PGSEL);
3030d9fb9f38SJeff Kirsher 
3031d9fb9f38SJeff Kirsher 	/* read RFCR indexed registers */
3032d9fb9f38SJeff Kirsher 	rfcr = readl(ioaddr + RxFilterAddr);
3033d9fb9f38SJeff Kirsher 	for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3034d9fb9f38SJeff Kirsher 		writel(j*2, ioaddr + RxFilterAddr);
3035d9fb9f38SJeff Kirsher 		rbuf[i++] = readw(ioaddr + RxFilterData);
3036d9fb9f38SJeff Kirsher 	}
3037d9fb9f38SJeff Kirsher 	writel(rfcr, ioaddr + RxFilterAddr);
3038d9fb9f38SJeff Kirsher 
3039d9fb9f38SJeff Kirsher 	/* the interrupt status is clear-on-read - see if we missed any */
3040d9fb9f38SJeff Kirsher 	if (rbuf[4] & rbuf[5]) {
3041d9fb9f38SJeff Kirsher 		printk(KERN_WARNING
3042d9fb9f38SJeff Kirsher 			"%s: shoot, we dropped an interrupt (%#08x)\n",
3043d9fb9f38SJeff Kirsher 			dev->name, rbuf[4] & rbuf[5]);
3044d9fb9f38SJeff Kirsher 	}
3045d9fb9f38SJeff Kirsher 
3046d9fb9f38SJeff Kirsher 	return 0;
3047d9fb9f38SJeff Kirsher }
3048d9fb9f38SJeff Kirsher 
3049d9fb9f38SJeff Kirsher #define SWAP_BITS(x)	( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3050d9fb9f38SJeff Kirsher 			| (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9)  \
3051d9fb9f38SJeff Kirsher 			| (((x) & 0x0010) << 7)  | (((x) & 0x0020) << 5)  \
3052d9fb9f38SJeff Kirsher 			| (((x) & 0x0040) << 3)  | (((x) & 0x0080) << 1)  \
3053d9fb9f38SJeff Kirsher 			| (((x) & 0x0100) >> 1)  | (((x) & 0x0200) >> 3)  \
3054d9fb9f38SJeff Kirsher 			| (((x) & 0x0400) >> 5)  | (((x) & 0x0800) >> 7)  \
3055d9fb9f38SJeff Kirsher 			| (((x) & 0x1000) >> 9)  | (((x) & 0x2000) >> 11) \
3056d9fb9f38SJeff Kirsher 			| (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3057d9fb9f38SJeff Kirsher 
3058d9fb9f38SJeff Kirsher static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3059d9fb9f38SJeff Kirsher {
3060d9fb9f38SJeff Kirsher 	int i;
3061d9fb9f38SJeff Kirsher 	u16 *ebuf = (u16 *)buf;
3062d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3063d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3064d9fb9f38SJeff Kirsher 
3065d9fb9f38SJeff Kirsher 	/* eeprom_read reads 16 bits, and indexes by 16 bits */
3066d9fb9f38SJeff Kirsher 	for (i = 0; i < np->eeprom_size/2; i++) {
3067d9fb9f38SJeff Kirsher 		ebuf[i] = eeprom_read(ioaddr, i);
3068d9fb9f38SJeff Kirsher 		/* The EEPROM itself stores data bit-swapped, but eeprom_read
3069d9fb9f38SJeff Kirsher 		 * reads it back "sanely". So we swap it back here in order to
3070d9fb9f38SJeff Kirsher 		 * present it to userland as it is stored. */
3071d9fb9f38SJeff Kirsher 		ebuf[i] = SWAP_BITS(ebuf[i]);
3072d9fb9f38SJeff Kirsher 	}
3073d9fb9f38SJeff Kirsher 	return 0;
3074d9fb9f38SJeff Kirsher }
3075d9fb9f38SJeff Kirsher 
3076d9fb9f38SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3077d9fb9f38SJeff Kirsher {
3078d9fb9f38SJeff Kirsher 	struct mii_ioctl_data *data = if_mii(rq);
3079d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3080d9fb9f38SJeff Kirsher 
3081d9fb9f38SJeff Kirsher 	switch(cmd) {
3082d9fb9f38SJeff Kirsher 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
3083d9fb9f38SJeff Kirsher 		data->phy_id = np->phy_addr_external;
3084df561f66SGustavo A. R. Silva 		fallthrough;
3085d9fb9f38SJeff Kirsher 
3086d9fb9f38SJeff Kirsher 	case SIOCGMIIREG:		/* Read MII PHY register. */
3087d9fb9f38SJeff Kirsher 		/* The phy_id is not enough to uniquely identify
3088d9fb9f38SJeff Kirsher 		 * the intended target. Therefore the command is sent to
3089d9fb9f38SJeff Kirsher 		 * the given mii on the current port.
3090d9fb9f38SJeff Kirsher 		 */
3091d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_TP) {
3092d9fb9f38SJeff Kirsher 			if ((data->phy_id & 0x1f) == np->phy_addr_external)
3093d9fb9f38SJeff Kirsher 				data->val_out = mdio_read(dev,
3094d9fb9f38SJeff Kirsher 							data->reg_num & 0x1f);
3095d9fb9f38SJeff Kirsher 			else
3096d9fb9f38SJeff Kirsher 				data->val_out = 0;
3097d9fb9f38SJeff Kirsher 		} else {
3098d9fb9f38SJeff Kirsher 			move_int_phy(dev, data->phy_id & 0x1f);
3099d9fb9f38SJeff Kirsher 			data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3100d9fb9f38SJeff Kirsher 							data->reg_num & 0x1f);
3101d9fb9f38SJeff Kirsher 		}
3102d9fb9f38SJeff Kirsher 		return 0;
3103d9fb9f38SJeff Kirsher 
3104d9fb9f38SJeff Kirsher 	case SIOCSMIIREG:		/* Write MII PHY register. */
3105d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_TP) {
3106d9fb9f38SJeff Kirsher 			if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3107d9fb9f38SJeff Kirsher  				if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3108d9fb9f38SJeff Kirsher 					np->advertising = data->val_in;
3109d9fb9f38SJeff Kirsher 				mdio_write(dev, data->reg_num & 0x1f,
3110d9fb9f38SJeff Kirsher 							data->val_in);
3111d9fb9f38SJeff Kirsher 			}
3112d9fb9f38SJeff Kirsher 		} else {
3113d9fb9f38SJeff Kirsher 			if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3114d9fb9f38SJeff Kirsher  				if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3115d9fb9f38SJeff Kirsher 					np->advertising = data->val_in;
3116d9fb9f38SJeff Kirsher 			}
3117d9fb9f38SJeff Kirsher 			move_int_phy(dev, data->phy_id & 0x1f);
3118d9fb9f38SJeff Kirsher 			miiport_write(dev, data->phy_id & 0x1f,
3119d9fb9f38SJeff Kirsher 						data->reg_num & 0x1f,
3120d9fb9f38SJeff Kirsher 						data->val_in);
3121d9fb9f38SJeff Kirsher 		}
3122d9fb9f38SJeff Kirsher 		return 0;
3123d9fb9f38SJeff Kirsher 	default:
3124d9fb9f38SJeff Kirsher 		return -EOPNOTSUPP;
3125d9fb9f38SJeff Kirsher 	}
3126d9fb9f38SJeff Kirsher }
3127d9fb9f38SJeff Kirsher 
3128d9fb9f38SJeff Kirsher static void enable_wol_mode(struct net_device *dev, int enable_intr)
3129d9fb9f38SJeff Kirsher {
3130d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3131d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3132d9fb9f38SJeff Kirsher 
3133d9fb9f38SJeff Kirsher 	if (netif_msg_wol(np))
3134d9fb9f38SJeff Kirsher 		printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3135d9fb9f38SJeff Kirsher 			dev->name);
3136d9fb9f38SJeff Kirsher 
3137d9fb9f38SJeff Kirsher 	/* For WOL we must restart the rx process in silent mode.
3138d9fb9f38SJeff Kirsher 	 * Write NULL to the RxRingPtr. Only possible if
3139d9fb9f38SJeff Kirsher 	 * rx process is stopped
3140d9fb9f38SJeff Kirsher 	 */
3141d9fb9f38SJeff Kirsher 	writel(0, ioaddr + RxRingPtr);
3142d9fb9f38SJeff Kirsher 
3143d9fb9f38SJeff Kirsher 	/* read WoL status to clear */
3144d9fb9f38SJeff Kirsher 	readl(ioaddr + WOLCmd);
3145d9fb9f38SJeff Kirsher 
3146d9fb9f38SJeff Kirsher 	/* PME on, clear status */
3147d9fb9f38SJeff Kirsher 	writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3148d9fb9f38SJeff Kirsher 
3149d9fb9f38SJeff Kirsher 	/* and restart the rx process */
3150d9fb9f38SJeff Kirsher 	writel(RxOn, ioaddr + ChipCmd);
3151d9fb9f38SJeff Kirsher 
3152d9fb9f38SJeff Kirsher 	if (enable_intr) {
3153d9fb9f38SJeff Kirsher 		/* enable the WOL interrupt.
3154d9fb9f38SJeff Kirsher 		 * Could be used to send a netlink message.
3155d9fb9f38SJeff Kirsher 		 */
3156d9fb9f38SJeff Kirsher 		writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3157d9fb9f38SJeff Kirsher 		natsemi_irq_enable(dev);
3158d9fb9f38SJeff Kirsher 	}
3159d9fb9f38SJeff Kirsher }
3160d9fb9f38SJeff Kirsher 
3161d9fb9f38SJeff Kirsher static int netdev_close(struct net_device *dev)
3162d9fb9f38SJeff Kirsher {
3163d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3164d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3165d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
3166d9fb9f38SJeff Kirsher 
3167d9fb9f38SJeff Kirsher 	if (netif_msg_ifdown(np))
3168d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG
3169d9fb9f38SJeff Kirsher 			"%s: Shutting down ethercard, status was %#04x.\n",
3170d9fb9f38SJeff Kirsher 			dev->name, (int)readl(ioaddr + ChipCmd));
3171d9fb9f38SJeff Kirsher 	if (netif_msg_pktdata(np))
3172d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG
3173d9fb9f38SJeff Kirsher 			"%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
3174d9fb9f38SJeff Kirsher 			dev->name, np->cur_tx, np->dirty_tx,
3175d9fb9f38SJeff Kirsher 			np->cur_rx, np->dirty_rx);
3176d9fb9f38SJeff Kirsher 
3177d9fb9f38SJeff Kirsher 	napi_disable(&np->napi);
3178d9fb9f38SJeff Kirsher 
3179d9fb9f38SJeff Kirsher 	/*
3180d9fb9f38SJeff Kirsher 	 * FIXME: what if someone tries to close a device
3181d9fb9f38SJeff Kirsher 	 * that is suspended?
3182d9fb9f38SJeff Kirsher 	 * Should we reenable the nic to switch to
3183d9fb9f38SJeff Kirsher 	 * the final WOL settings?
3184d9fb9f38SJeff Kirsher 	 */
3185d9fb9f38SJeff Kirsher 
3186d9fb9f38SJeff Kirsher 	del_timer_sync(&np->timer);
3187d710ce13SFrancois Romieu 	disable_irq(irq);
3188d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
3189d9fb9f38SJeff Kirsher 	natsemi_irq_disable(dev);
3190d9fb9f38SJeff Kirsher 	np->hands_off = 1;
3191d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
3192d710ce13SFrancois Romieu 	enable_irq(irq);
3193d9fb9f38SJeff Kirsher 
3194d710ce13SFrancois Romieu 	free_irq(irq, dev);
3195d9fb9f38SJeff Kirsher 
3196d9fb9f38SJeff Kirsher 	/* Interrupt disabled, interrupt handler released,
3197d9fb9f38SJeff Kirsher 	 * queue stopped, timer deleted, rtnl_lock held
3198d9fb9f38SJeff Kirsher 	 * All async codepaths that access the driver are disabled.
3199d9fb9f38SJeff Kirsher 	 */
3200d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
3201d9fb9f38SJeff Kirsher 	np->hands_off = 0;
3202d9fb9f38SJeff Kirsher 	readl(ioaddr + IntrMask);
3203d9fb9f38SJeff Kirsher 	readw(ioaddr + MIntrStatus);
3204d9fb9f38SJeff Kirsher 
3205d9fb9f38SJeff Kirsher 	/* Freeze Stats */
3206d9fb9f38SJeff Kirsher 	writel(StatsFreeze, ioaddr + StatsCtrl);
3207d9fb9f38SJeff Kirsher 
3208d9fb9f38SJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
3209d9fb9f38SJeff Kirsher 	natsemi_stop_rxtx(dev);
3210d9fb9f38SJeff Kirsher 
3211d9fb9f38SJeff Kirsher 	__get_stats(dev);
3212d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
3213d9fb9f38SJeff Kirsher 
3214d9fb9f38SJeff Kirsher 	/* clear the carrier last - an interrupt could reenable it otherwise */
3215d9fb9f38SJeff Kirsher 	netif_carrier_off(dev);
3216d9fb9f38SJeff Kirsher 	netif_stop_queue(dev);
3217d9fb9f38SJeff Kirsher 
3218d9fb9f38SJeff Kirsher 	dump_ring(dev);
3219d9fb9f38SJeff Kirsher 	drain_ring(dev);
3220d9fb9f38SJeff Kirsher 	free_ring(dev);
3221d9fb9f38SJeff Kirsher 
3222d9fb9f38SJeff Kirsher 	{
3223d9fb9f38SJeff Kirsher 		u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3224d9fb9f38SJeff Kirsher 		if (wol) {
3225d9fb9f38SJeff Kirsher 			/* restart the NIC in WOL mode.
3226d9fb9f38SJeff Kirsher 			 * The nic must be stopped for this.
3227d9fb9f38SJeff Kirsher 			 */
3228d9fb9f38SJeff Kirsher 			enable_wol_mode(dev, 0);
3229d9fb9f38SJeff Kirsher 		} else {
3230d9fb9f38SJeff Kirsher 			/* Restore PME enable bit unmolested */
3231d9fb9f38SJeff Kirsher 			writel(np->SavedClkRun, ioaddr + ClkRun);
3232d9fb9f38SJeff Kirsher 		}
3233d9fb9f38SJeff Kirsher 	}
3234d9fb9f38SJeff Kirsher 	return 0;
3235d9fb9f38SJeff Kirsher }
3236d9fb9f38SJeff Kirsher 
3237d9fb9f38SJeff Kirsher 
32386980cbe4SBill Pemberton static void natsemi_remove1(struct pci_dev *pdev)
3239d9fb9f38SJeff Kirsher {
3240d9fb9f38SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
3241d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3242d9fb9f38SJeff Kirsher 
3243d9fb9f38SJeff Kirsher 	NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
3244d9fb9f38SJeff Kirsher 	unregister_netdev (dev);
3245d9fb9f38SJeff Kirsher 	pci_release_regions (pdev);
3246d9fb9f38SJeff Kirsher 	iounmap(ioaddr);
3247d9fb9f38SJeff Kirsher 	free_netdev (dev);
3248d9fb9f38SJeff Kirsher }
3249d9fb9f38SJeff Kirsher 
3250d9fb9f38SJeff Kirsher /*
3251d9fb9f38SJeff Kirsher  * The ns83815 chip doesn't have explicit RxStop bits.
3252d9fb9f38SJeff Kirsher  * Kicking the Rx or Tx process for a new packet reenables the Rx process
3253d9fb9f38SJeff Kirsher  * of the nic, thus this function must be very careful:
3254d9fb9f38SJeff Kirsher  *
3255d9fb9f38SJeff Kirsher  * suspend/resume synchronization:
3256d9fb9f38SJeff Kirsher  * entry points:
3257d9fb9f38SJeff Kirsher  *   netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
3258d9fb9f38SJeff Kirsher  *   start_tx, ns_tx_timeout
3259d9fb9f38SJeff Kirsher  *
3260d9fb9f38SJeff Kirsher  * No function accesses the hardware without checking np->hands_off.
3261d9fb9f38SJeff Kirsher  *	the check occurs under spin_lock_irq(&np->lock);
3262d9fb9f38SJeff Kirsher  * exceptions:
3263d9fb9f38SJeff Kirsher  *	* netdev_ioctl: noncritical access.
3264d9fb9f38SJeff Kirsher  *	* netdev_open: cannot happen due to the device_detach
3265d9fb9f38SJeff Kirsher  *	* netdev_close: doesn't hurt.
3266d9fb9f38SJeff Kirsher  *	* netdev_timer: timer stopped by natsemi_suspend.
3267d9fb9f38SJeff Kirsher  *	* intr_handler: doesn't acquire the spinlock. suspend calls
3268d9fb9f38SJeff Kirsher  *		disable_irq() to enforce synchronization.
3269d9fb9f38SJeff Kirsher  *      * natsemi_poll: checks before reenabling interrupts.  suspend
3270d9fb9f38SJeff Kirsher  *              sets hands_off, disables interrupts and then waits with
3271d9fb9f38SJeff Kirsher  *              napi_disable().
3272d9fb9f38SJeff Kirsher  *
3273d9fb9f38SJeff Kirsher  * Interrupts must be disabled, otherwise hands_off can cause irq storms.
3274d9fb9f38SJeff Kirsher  */
3275d9fb9f38SJeff Kirsher 
327640c1b1eeSVaibhav Gupta static int __maybe_unused natsemi_suspend(struct device *dev_d)
3277d9fb9f38SJeff Kirsher {
327840c1b1eeSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
3279d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3280d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3281d9fb9f38SJeff Kirsher 
3282d9fb9f38SJeff Kirsher 	rtnl_lock();
3283d9fb9f38SJeff Kirsher 	if (netif_running (dev)) {
3284d710ce13SFrancois Romieu 		const int irq = np->pci_dev->irq;
3285d710ce13SFrancois Romieu 
3286d9fb9f38SJeff Kirsher 		del_timer_sync(&np->timer);
3287d9fb9f38SJeff Kirsher 
3288d710ce13SFrancois Romieu 		disable_irq(irq);
3289d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
3290d9fb9f38SJeff Kirsher 
3291d9fb9f38SJeff Kirsher 		natsemi_irq_disable(dev);
3292d9fb9f38SJeff Kirsher 		np->hands_off = 1;
3293d9fb9f38SJeff Kirsher 		natsemi_stop_rxtx(dev);
3294d9fb9f38SJeff Kirsher 		netif_stop_queue(dev);
3295d9fb9f38SJeff Kirsher 
3296d9fb9f38SJeff Kirsher 		spin_unlock_irq(&np->lock);
3297d710ce13SFrancois Romieu 		enable_irq(irq);
3298d9fb9f38SJeff Kirsher 
3299d9fb9f38SJeff Kirsher 		napi_disable(&np->napi);
3300d9fb9f38SJeff Kirsher 
3301d9fb9f38SJeff Kirsher 		/* Update the error counts. */
3302d9fb9f38SJeff Kirsher 		__get_stats(dev);
3303d9fb9f38SJeff Kirsher 
3304d9fb9f38SJeff Kirsher 		/* pci_power_off(pdev, -1); */
3305d9fb9f38SJeff Kirsher 		drain_ring(dev);
3306d9fb9f38SJeff Kirsher 		{
3307d9fb9f38SJeff Kirsher 			u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3308d9fb9f38SJeff Kirsher 			/* Restore PME enable bit */
3309d9fb9f38SJeff Kirsher 			if (wol) {
3310d9fb9f38SJeff Kirsher 				/* restart the NIC in WOL mode.
3311d9fb9f38SJeff Kirsher 				 * The nic must be stopped for this.
3312d9fb9f38SJeff Kirsher 				 * FIXME: use the WOL interrupt
3313d9fb9f38SJeff Kirsher 				 */
3314d9fb9f38SJeff Kirsher 				enable_wol_mode(dev, 0);
3315d9fb9f38SJeff Kirsher 			} else {
3316d9fb9f38SJeff Kirsher 				/* Restore PME enable bit unmolested */
3317d9fb9f38SJeff Kirsher 				writel(np->SavedClkRun, ioaddr + ClkRun);
3318d9fb9f38SJeff Kirsher 			}
3319d9fb9f38SJeff Kirsher 		}
3320d9fb9f38SJeff Kirsher 	}
3321d9fb9f38SJeff Kirsher 	netif_device_detach(dev);
3322d9fb9f38SJeff Kirsher 	rtnl_unlock();
3323d9fb9f38SJeff Kirsher 	return 0;
3324d9fb9f38SJeff Kirsher }
3325d9fb9f38SJeff Kirsher 
3326d9fb9f38SJeff Kirsher 
332740c1b1eeSVaibhav Gupta static int __maybe_unused natsemi_resume(struct device *dev_d)
3328d9fb9f38SJeff Kirsher {
332940c1b1eeSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
3330d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3331d9fb9f38SJeff Kirsher 
3332d9fb9f38SJeff Kirsher 	rtnl_lock();
3333d9fb9f38SJeff Kirsher 	if (netif_device_present(dev))
3334d9fb9f38SJeff Kirsher 		goto out;
3335d9fb9f38SJeff Kirsher 	if (netif_running(dev)) {
3336d710ce13SFrancois Romieu 		const int irq = np->pci_dev->irq;
3337d710ce13SFrancois Romieu 
3338d9fb9f38SJeff Kirsher 		BUG_ON(!np->hands_off);
3339d9fb9f38SJeff Kirsher 	/*	pci_power_on(pdev); */
3340d9fb9f38SJeff Kirsher 
3341d9fb9f38SJeff Kirsher 		napi_enable(&np->napi);
3342d9fb9f38SJeff Kirsher 
3343d9fb9f38SJeff Kirsher 		natsemi_reset(dev);
3344d9fb9f38SJeff Kirsher 		init_ring(dev);
3345d710ce13SFrancois Romieu 		disable_irq(irq);
3346d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
3347d9fb9f38SJeff Kirsher 		np->hands_off = 0;
3348d9fb9f38SJeff Kirsher 		init_registers(dev);
3349d9fb9f38SJeff Kirsher 		netif_device_attach(dev);
3350d9fb9f38SJeff Kirsher 		spin_unlock_irq(&np->lock);
3351d710ce13SFrancois Romieu 		enable_irq(irq);
3352d9fb9f38SJeff Kirsher 
3353d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3354d9fb9f38SJeff Kirsher 	}
3355d9fb9f38SJeff Kirsher 	netif_device_attach(dev);
3356d9fb9f38SJeff Kirsher out:
3357d9fb9f38SJeff Kirsher 	rtnl_unlock();
335840c1b1eeSVaibhav Gupta 	return 0;
3359d9fb9f38SJeff Kirsher }
3360d9fb9f38SJeff Kirsher 
336140c1b1eeSVaibhav Gupta static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume);
3362d9fb9f38SJeff Kirsher 
3363d9fb9f38SJeff Kirsher static struct pci_driver natsemi_driver = {
3364d9fb9f38SJeff Kirsher 	.name		= DRV_NAME,
3365d9fb9f38SJeff Kirsher 	.id_table	= natsemi_pci_tbl,
3366d9fb9f38SJeff Kirsher 	.probe		= natsemi_probe1,
33676980cbe4SBill Pemberton 	.remove		= natsemi_remove1,
336840c1b1eeSVaibhav Gupta 	.driver.pm	= &natsemi_pm_ops,
3369d9fb9f38SJeff Kirsher };
3370d9fb9f38SJeff Kirsher 
3371d9fb9f38SJeff Kirsher static int __init natsemi_init_mod (void)
3372d9fb9f38SJeff Kirsher {
3373d9fb9f38SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
3374d9fb9f38SJeff Kirsher #ifdef MODULE
3375d9fb9f38SJeff Kirsher 	printk(version);
3376d9fb9f38SJeff Kirsher #endif
3377d9fb9f38SJeff Kirsher 
3378d9fb9f38SJeff Kirsher 	return pci_register_driver(&natsemi_driver);
3379d9fb9f38SJeff Kirsher }
3380d9fb9f38SJeff Kirsher 
3381d9fb9f38SJeff Kirsher static void __exit natsemi_exit_mod (void)
3382d9fb9f38SJeff Kirsher {
3383d9fb9f38SJeff Kirsher 	pci_unregister_driver (&natsemi_driver);
3384d9fb9f38SJeff Kirsher }
3385d9fb9f38SJeff Kirsher 
3386d9fb9f38SJeff Kirsher module_init(natsemi_init_mod);
3387d9fb9f38SJeff Kirsher module_exit(natsemi_exit_mod);
3388d9fb9f38SJeff Kirsher 
3389