1d9fb9f38SJeff Kirsher /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
2d9fb9f38SJeff Kirsher /*
3d9fb9f38SJeff Kirsher 	Written/copyright 1999-2001 by Donald Becker.
4d9fb9f38SJeff Kirsher 	Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
5d9fb9f38SJeff Kirsher 	Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
6d9fb9f38SJeff Kirsher 	Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
7d9fb9f38SJeff Kirsher 
8d9fb9f38SJeff Kirsher 	This software may be used and distributed according to the terms of
9d9fb9f38SJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
10d9fb9f38SJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
11d9fb9f38SJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
12d9fb9f38SJeff Kirsher 	a complete program and may only be used when the entire operating
13d9fb9f38SJeff Kirsher 	system is licensed under the GPL.  License for under other terms may be
14d9fb9f38SJeff Kirsher 	available.  Contact the original author for details.
15d9fb9f38SJeff Kirsher 
16d9fb9f38SJeff Kirsher 	The original author may be reached as becker@scyld.com, or at
17d9fb9f38SJeff Kirsher 	Scyld Computing Corporation
18d9fb9f38SJeff Kirsher 	410 Severn Ave., Suite 210
19d9fb9f38SJeff Kirsher 	Annapolis MD 21403
20d9fb9f38SJeff Kirsher 
21d9fb9f38SJeff Kirsher 	Support information and updates available at
22d9fb9f38SJeff Kirsher 	http://www.scyld.com/network/netsemi.html
23d9fb9f38SJeff Kirsher 	[link no longer provides useful info -jgarzik]
24d9fb9f38SJeff Kirsher 
25d9fb9f38SJeff Kirsher 
26d9fb9f38SJeff Kirsher 	TODO:
27d9fb9f38SJeff Kirsher 	* big endian support with CFG:BEM instead of cpu_to_le32
28d9fb9f38SJeff Kirsher */
29d9fb9f38SJeff Kirsher 
30d9fb9f38SJeff Kirsher #include <linux/module.h>
31d9fb9f38SJeff Kirsher #include <linux/kernel.h>
32d9fb9f38SJeff Kirsher #include <linux/string.h>
33d9fb9f38SJeff Kirsher #include <linux/timer.h>
34d9fb9f38SJeff Kirsher #include <linux/errno.h>
35d9fb9f38SJeff Kirsher #include <linux/ioport.h>
36d9fb9f38SJeff Kirsher #include <linux/slab.h>
37d9fb9f38SJeff Kirsher #include <linux/interrupt.h>
38d9fb9f38SJeff Kirsher #include <linux/pci.h>
39d9fb9f38SJeff Kirsher #include <linux/netdevice.h>
40d9fb9f38SJeff Kirsher #include <linux/etherdevice.h>
41d9fb9f38SJeff Kirsher #include <linux/skbuff.h>
42d9fb9f38SJeff Kirsher #include <linux/init.h>
43d9fb9f38SJeff Kirsher #include <linux/spinlock.h>
44d9fb9f38SJeff Kirsher #include <linux/ethtool.h>
45d9fb9f38SJeff Kirsher #include <linux/delay.h>
46d9fb9f38SJeff Kirsher #include <linux/rtnetlink.h>
47d9fb9f38SJeff Kirsher #include <linux/mii.h>
48d9fb9f38SJeff Kirsher #include <linux/crc32.h>
49d9fb9f38SJeff Kirsher #include <linux/bitops.h>
50d9fb9f38SJeff Kirsher #include <linux/prefetch.h>
51d9fb9f38SJeff Kirsher #include <asm/processor.h>	/* Processor type for cache alignment. */
52d9fb9f38SJeff Kirsher #include <asm/io.h>
53d9fb9f38SJeff Kirsher #include <asm/irq.h>
547c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
55d9fb9f38SJeff Kirsher 
56d9fb9f38SJeff Kirsher #define DRV_NAME	"natsemi"
57d9fb9f38SJeff Kirsher #define DRV_VERSION	"2.1"
58d9fb9f38SJeff Kirsher #define DRV_RELDATE	"Sept 11, 2006"
59d9fb9f38SJeff Kirsher 
60d9fb9f38SJeff Kirsher #define RX_OFFSET	2
61d9fb9f38SJeff Kirsher 
62d9fb9f38SJeff Kirsher /* Updated to recommendations in pci-skeleton v2.03. */
63d9fb9f38SJeff Kirsher 
64d9fb9f38SJeff Kirsher /* The user-configurable values.
65d9fb9f38SJeff Kirsher    These may be modified when a driver module is loaded.*/
66d9fb9f38SJeff Kirsher 
67d9fb9f38SJeff Kirsher #define NATSEMI_DEF_MSG		(NETIF_MSG_DRV		| \
68d9fb9f38SJeff Kirsher 				 NETIF_MSG_LINK		| \
69d9fb9f38SJeff Kirsher 				 NETIF_MSG_WOL		| \
70d9fb9f38SJeff Kirsher 				 NETIF_MSG_RX_ERR	| \
71d9fb9f38SJeff Kirsher 				 NETIF_MSG_TX_ERR)
72d9fb9f38SJeff Kirsher static int debug = -1;
73d9fb9f38SJeff Kirsher 
74d9fb9f38SJeff Kirsher static int mtu;
75d9fb9f38SJeff Kirsher 
76d9fb9f38SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
77d9fb9f38SJeff Kirsher    This chip uses a 512 element hash table based on the Ethernet CRC.  */
78d9fb9f38SJeff Kirsher static const int multicast_filter_limit = 100;
79d9fb9f38SJeff Kirsher 
80d9fb9f38SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
81d9fb9f38SJeff Kirsher    Setting to > 1518 effectively disables this feature. */
82d9fb9f38SJeff Kirsher static int rx_copybreak;
83d9fb9f38SJeff Kirsher 
84d9fb9f38SJeff Kirsher static int dspcfg_workaround = 1;
85d9fb9f38SJeff Kirsher 
86d9fb9f38SJeff Kirsher /* Used to pass the media type, etc.
87d9fb9f38SJeff Kirsher    Both 'options[]' and 'full_duplex[]' should exist for driver
88d9fb9f38SJeff Kirsher    interoperability.
89d9fb9f38SJeff Kirsher    The media type is usually passed in 'options[]'.
90d9fb9f38SJeff Kirsher */
91d9fb9f38SJeff Kirsher #define MAX_UNITS 8		/* More are supported, limit only on options */
92d9fb9f38SJeff Kirsher static int options[MAX_UNITS];
93d9fb9f38SJeff Kirsher static int full_duplex[MAX_UNITS];
94d9fb9f38SJeff Kirsher 
95d9fb9f38SJeff Kirsher /* Operational parameters that are set at compile time. */
96d9fb9f38SJeff Kirsher 
97d9fb9f38SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
98d9fb9f38SJeff Kirsher    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
99d9fb9f38SJeff Kirsher    Making the Tx ring too large decreases the effectiveness of channel
100d9fb9f38SJeff Kirsher    bonding and packet priority.
101d9fb9f38SJeff Kirsher    There are no ill effects from too-large receive rings. */
102d9fb9f38SJeff Kirsher #define TX_RING_SIZE	16
103d9fb9f38SJeff Kirsher #define TX_QUEUE_LEN	10 /* Limit ring entries actually used, min 4. */
104d9fb9f38SJeff Kirsher #define RX_RING_SIZE	32
105d9fb9f38SJeff Kirsher 
106d9fb9f38SJeff Kirsher /* Operational parameters that usually are not changed. */
107d9fb9f38SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
108d9fb9f38SJeff Kirsher #define TX_TIMEOUT  (2*HZ)
109d9fb9f38SJeff Kirsher 
110d9fb9f38SJeff Kirsher #define NATSEMI_HW_TIMEOUT	400
111d9fb9f38SJeff Kirsher #define NATSEMI_TIMER_FREQ	5*HZ
112d9fb9f38SJeff Kirsher #define NATSEMI_PG0_NREGS	64
113d9fb9f38SJeff Kirsher #define NATSEMI_RFDR_NREGS	8
114d9fb9f38SJeff Kirsher #define NATSEMI_PG1_NREGS	4
115d9fb9f38SJeff Kirsher #define NATSEMI_NREGS		(NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
116d9fb9f38SJeff Kirsher 				 NATSEMI_PG1_NREGS)
117d9fb9f38SJeff Kirsher #define NATSEMI_REGS_VER	1 /* v1 added RFDR registers */
118d9fb9f38SJeff Kirsher #define NATSEMI_REGS_SIZE	(NATSEMI_NREGS * sizeof(u32))
119d9fb9f38SJeff Kirsher 
120d9fb9f38SJeff Kirsher /* Buffer sizes:
121d9fb9f38SJeff Kirsher  * The nic writes 32-bit values, even if the upper bytes of
122d9fb9f38SJeff Kirsher  * a 32-bit value are beyond the end of the buffer.
123d9fb9f38SJeff Kirsher  */
124d9fb9f38SJeff Kirsher #define NATSEMI_HEADERS		22	/* 2*mac,type,vlan,crc */
125d9fb9f38SJeff Kirsher #define NATSEMI_PADDING		16	/* 2 bytes should be sufficient */
126d9fb9f38SJeff Kirsher #define NATSEMI_LONGPKT		1518	/* limit for normal packets */
127d9fb9f38SJeff Kirsher #define NATSEMI_RX_LIMIT	2046	/* maximum supported by hardware */
128d9fb9f38SJeff Kirsher 
129d9fb9f38SJeff Kirsher /* These identify the driver base version and may not be removed. */
1306980cbe4SBill Pemberton static const char version[] =
131d9fb9f38SJeff Kirsher   KERN_INFO DRV_NAME " dp8381x driver, version "
132d9fb9f38SJeff Kirsher       DRV_VERSION ", " DRV_RELDATE "\n"
133d9fb9f38SJeff Kirsher   "  originally by Donald Becker <becker@scyld.com>\n"
134d9fb9f38SJeff Kirsher   "  2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135d9fb9f38SJeff Kirsher 
136d9fb9f38SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
137d9fb9f38SJeff Kirsher MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
138d9fb9f38SJeff Kirsher MODULE_LICENSE("GPL");
139d9fb9f38SJeff Kirsher 
140d9fb9f38SJeff Kirsher module_param(mtu, int, 0);
141d9fb9f38SJeff Kirsher module_param(debug, int, 0);
142d9fb9f38SJeff Kirsher module_param(rx_copybreak, int, 0);
143d9fb9f38SJeff Kirsher module_param(dspcfg_workaround, int, 0);
144d9fb9f38SJeff Kirsher module_param_array(options, int, NULL, 0);
145d9fb9f38SJeff Kirsher module_param_array(full_duplex, int, NULL, 0);
146d9fb9f38SJeff Kirsher MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
147d9fb9f38SJeff Kirsher MODULE_PARM_DESC(debug, "DP8381x default debug level");
148d9fb9f38SJeff Kirsher MODULE_PARM_DESC(rx_copybreak,
149d9fb9f38SJeff Kirsher 	"DP8381x copy breakpoint for copy-only-tiny-frames");
150d9fb9f38SJeff Kirsher MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
151d9fb9f38SJeff Kirsher MODULE_PARM_DESC(options,
152d9fb9f38SJeff Kirsher 	"DP8381x: Bits 0-3: media type, bit 17: full duplex");
153d9fb9f38SJeff Kirsher MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
154d9fb9f38SJeff Kirsher 
155d9fb9f38SJeff Kirsher /*
156d9fb9f38SJeff Kirsher 				Theory of Operation
157d9fb9f38SJeff Kirsher 
158d9fb9f38SJeff Kirsher I. Board Compatibility
159d9fb9f38SJeff Kirsher 
160d9fb9f38SJeff Kirsher This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
161951c6270SJilin Yuan It also works with other chips in the DP83810 series.
162d9fb9f38SJeff Kirsher 
163d9fb9f38SJeff Kirsher II. Board-specific settings
164d9fb9f38SJeff Kirsher 
165d9fb9f38SJeff Kirsher This driver requires the PCI interrupt line to be valid.
166d9fb9f38SJeff Kirsher It honors the EEPROM-set values.
167d9fb9f38SJeff Kirsher 
168d9fb9f38SJeff Kirsher III. Driver operation
169d9fb9f38SJeff Kirsher 
170d9fb9f38SJeff Kirsher IIIa. Ring buffers
171d9fb9f38SJeff Kirsher 
172d9fb9f38SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists
173d9fb9f38SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
174d9fb9f38SJeff Kirsher the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
175d9fb9f38SJeff Kirsher The NatSemi design uses a 'next descriptor' pointer that the driver forms
176d9fb9f38SJeff Kirsher into a list.
177d9fb9f38SJeff Kirsher 
178d9fb9f38SJeff Kirsher IIIb/c. Transmit/Receive Structure
179d9fb9f38SJeff Kirsher 
180d9fb9f38SJeff Kirsher This driver uses a zero-copy receive and transmit scheme.
181d9fb9f38SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
182d9fb9f38SJeff Kirsher open() time and passes the skb->data field to the chip as receive data
183d9fb9f38SJeff Kirsher buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
184d9fb9f38SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
185d9fb9f38SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
186d9fb9f38SJeff Kirsher protocol stack.  Buffers consumed this way are replaced by newly allocated
187d9fb9f38SJeff Kirsher skbuffs in a later phase of receives.
188d9fb9f38SJeff Kirsher 
189d9fb9f38SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
190d9fb9f38SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
191d9fb9f38SJeff Kirsher frames.  New boards are typically used in generously configured machines
192d9fb9f38SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of
193d9fb9f38SJeff Kirsher a single allocation size, so the default value of zero results in never
194d9fb9f38SJeff Kirsher copying packets.  When copying is done, the cost is usually mitigated by using
195d9fb9f38SJeff Kirsher a combined copy/checksum routine.  Copying also preloads the cache, which is
196d9fb9f38SJeff Kirsher most useful with small frames.
197d9fb9f38SJeff Kirsher 
198d9fb9f38SJeff Kirsher A subtle aspect of the operation is that unaligned buffers are not permitted
199d9fb9f38SJeff Kirsher by the hardware.  Thus the IP header at offset 14 in an ethernet frame isn't
200d9fb9f38SJeff Kirsher longword aligned for further processing.  On copies frames are put into the
201d9fb9f38SJeff Kirsher skbuff at an offset of "+2", 16-byte aligning the IP header.
202d9fb9f38SJeff Kirsher 
203d9fb9f38SJeff Kirsher IIId. Synchronization
204d9fb9f38SJeff Kirsher 
205d9fb9f38SJeff Kirsher Most operations are synchronized on the np->lock irq spinlock, except the
206d9fb9f38SJeff Kirsher receive and transmit paths which are synchronised using a combination of
207d9fb9f38SJeff Kirsher hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
208d9fb9f38SJeff Kirsher 
209d9fb9f38SJeff Kirsher IVb. References
210d9fb9f38SJeff Kirsher 
211d9fb9f38SJeff Kirsher http://www.scyld.com/expert/100mbps.html
212d9fb9f38SJeff Kirsher http://www.scyld.com/expert/NWay.html
213d9fb9f38SJeff Kirsher Datasheet is available from:
214d9fb9f38SJeff Kirsher http://www.national.com/pf/DP/DP83815.html
215d9fb9f38SJeff Kirsher 
216d9fb9f38SJeff Kirsher IVc. Errata
217d9fb9f38SJeff Kirsher 
218d9fb9f38SJeff Kirsher None characterised.
219d9fb9f38SJeff Kirsher */
220d9fb9f38SJeff Kirsher 
221d9fb9f38SJeff Kirsher 
222d9fb9f38SJeff Kirsher 
223d9fb9f38SJeff Kirsher /*
224d9fb9f38SJeff Kirsher  * Support for fibre connections on Am79C874:
225d9fb9f38SJeff Kirsher  * This phy needs a special setup when connected to a fibre cable.
226d9fb9f38SJeff Kirsher  * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
227d9fb9f38SJeff Kirsher  */
228d9fb9f38SJeff Kirsher #define PHYID_AM79C874	0x0022561b
229d9fb9f38SJeff Kirsher 
230d9fb9f38SJeff Kirsher enum {
231d9fb9f38SJeff Kirsher 	MII_MCTRL	= 0x15,		/* mode control register */
232d9fb9f38SJeff Kirsher 	MII_FX_SEL	= 0x0001,	/* 100BASE-FX (fiber) */
233d9fb9f38SJeff Kirsher 	MII_EN_SCRM	= 0x0004,	/* enable scrambler (tp) */
234d9fb9f38SJeff Kirsher };
235d9fb9f38SJeff Kirsher 
236d9fb9f38SJeff Kirsher enum {
237d9fb9f38SJeff Kirsher 	NATSEMI_FLAG_IGNORE_PHY		= 0x1,
238d9fb9f38SJeff Kirsher };
239d9fb9f38SJeff Kirsher 
240d9fb9f38SJeff Kirsher /* array of board data directly indexed by pci_tbl[x].driver_data */
241d9fb9f38SJeff Kirsher static struct {
242d9fb9f38SJeff Kirsher 	const char *name;
243d9fb9f38SJeff Kirsher 	unsigned long flags;
244d9fb9f38SJeff Kirsher 	unsigned int eeprom_size;
2456980cbe4SBill Pemberton } natsemi_pci_info[] = {
246d9fb9f38SJeff Kirsher 	{ "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
247d9fb9f38SJeff Kirsher 	{ "NatSemi DP8381[56]", 0, 24 },
248d9fb9f38SJeff Kirsher };
249d9fb9f38SJeff Kirsher 
2509baa3c34SBenoit Taine static const struct pci_device_id natsemi_pci_tbl[] = {
251d9fb9f38SJeff Kirsher 	{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9,     0x000c,     0, 0, 0 },
252d9fb9f38SJeff Kirsher 	{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253d9fb9f38SJeff Kirsher 	{ }	/* terminate list */
254d9fb9f38SJeff Kirsher };
255d9fb9f38SJeff Kirsher MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
256d9fb9f38SJeff Kirsher 
257d9fb9f38SJeff Kirsher /* Offsets to the device registers.
258d9fb9f38SJeff Kirsher    Unlike software-only systems, device drivers interact with complex hardware.
259d9fb9f38SJeff Kirsher    It's not useful to define symbolic names for every register bit in the
260d9fb9f38SJeff Kirsher    device.
261d9fb9f38SJeff Kirsher */
262d9fb9f38SJeff Kirsher enum register_offsets {
263d9fb9f38SJeff Kirsher 	ChipCmd			= 0x00,
264d9fb9f38SJeff Kirsher 	ChipConfig		= 0x04,
265d9fb9f38SJeff Kirsher 	EECtrl			= 0x08,
266d9fb9f38SJeff Kirsher 	PCIBusCfg		= 0x0C,
267d9fb9f38SJeff Kirsher 	IntrStatus		= 0x10,
268d9fb9f38SJeff Kirsher 	IntrMask		= 0x14,
269d9fb9f38SJeff Kirsher 	IntrEnable		= 0x18,
270d9fb9f38SJeff Kirsher 	IntrHoldoff		= 0x1C, /* DP83816 only */
271d9fb9f38SJeff Kirsher 	TxRingPtr		= 0x20,
272d9fb9f38SJeff Kirsher 	TxConfig		= 0x24,
273d9fb9f38SJeff Kirsher 	RxRingPtr		= 0x30,
274d9fb9f38SJeff Kirsher 	RxConfig		= 0x34,
275d9fb9f38SJeff Kirsher 	ClkRun			= 0x3C,
276d9fb9f38SJeff Kirsher 	WOLCmd			= 0x40,
277d9fb9f38SJeff Kirsher 	PauseCmd		= 0x44,
278d9fb9f38SJeff Kirsher 	RxFilterAddr		= 0x48,
279d9fb9f38SJeff Kirsher 	RxFilterData		= 0x4C,
280d9fb9f38SJeff Kirsher 	BootRomAddr		= 0x50,
281d9fb9f38SJeff Kirsher 	BootRomData		= 0x54,
282d9fb9f38SJeff Kirsher 	SiliconRev		= 0x58,
283d9fb9f38SJeff Kirsher 	StatsCtrl		= 0x5C,
284d9fb9f38SJeff Kirsher 	StatsData		= 0x60,
285d9fb9f38SJeff Kirsher 	RxPktErrs		= 0x60,
286d9fb9f38SJeff Kirsher 	RxMissed		= 0x68,
287d9fb9f38SJeff Kirsher 	RxCRCErrs		= 0x64,
288d9fb9f38SJeff Kirsher 	BasicControl		= 0x80,
289d9fb9f38SJeff Kirsher 	BasicStatus		= 0x84,
290d9fb9f38SJeff Kirsher 	AnegAdv			= 0x90,
291d9fb9f38SJeff Kirsher 	AnegPeer		= 0x94,
292d9fb9f38SJeff Kirsher 	PhyStatus		= 0xC0,
293d9fb9f38SJeff Kirsher 	MIntrCtrl		= 0xC4,
294d9fb9f38SJeff Kirsher 	MIntrStatus		= 0xC8,
295d9fb9f38SJeff Kirsher 	PhyCtrl			= 0xE4,
296d9fb9f38SJeff Kirsher 
297d9fb9f38SJeff Kirsher 	/* These are from the spec, around page 78... on a separate table.
298d9fb9f38SJeff Kirsher 	 * The meaning of these registers depend on the value of PGSEL. */
299d9fb9f38SJeff Kirsher 	PGSEL			= 0xCC,
300d9fb9f38SJeff Kirsher 	PMDCSR			= 0xE4,
301d9fb9f38SJeff Kirsher 	TSTDAT			= 0xFC,
302d9fb9f38SJeff Kirsher 	DSPCFG			= 0xF4,
303d9fb9f38SJeff Kirsher 	SDCFG			= 0xF8
304d9fb9f38SJeff Kirsher };
305d9fb9f38SJeff Kirsher /* the values for the 'magic' registers above (PGSEL=1) */
306d9fb9f38SJeff Kirsher #define PMDCSR_VAL	0x189c	/* enable preferred adaptation circuitry */
307d9fb9f38SJeff Kirsher #define TSTDAT_VAL	0x0
308d9fb9f38SJeff Kirsher #define DSPCFG_VAL	0x5040
309d9fb9f38SJeff Kirsher #define SDCFG_VAL	0x008c	/* set voltage thresholds for Signal Detect */
310d9fb9f38SJeff Kirsher #define DSPCFG_LOCK	0x20	/* coefficient lock bit in DSPCFG */
311d9fb9f38SJeff Kirsher #define DSPCFG_COEF	0x1000	/* see coefficient (in TSTDAT) bit in DSPCFG */
312d9fb9f38SJeff Kirsher #define TSTDAT_FIXED	0xe8	/* magic number for bad coefficients */
313d9fb9f38SJeff Kirsher 
314d9fb9f38SJeff Kirsher /* misc PCI space registers */
315d9fb9f38SJeff Kirsher enum pci_register_offsets {
316d9fb9f38SJeff Kirsher 	PCIPM			= 0x44,
317d9fb9f38SJeff Kirsher };
318d9fb9f38SJeff Kirsher 
319d9fb9f38SJeff Kirsher enum ChipCmd_bits {
320d9fb9f38SJeff Kirsher 	ChipReset		= 0x100,
321d9fb9f38SJeff Kirsher 	RxReset			= 0x20,
322d9fb9f38SJeff Kirsher 	TxReset			= 0x10,
323d9fb9f38SJeff Kirsher 	RxOff			= 0x08,
324d9fb9f38SJeff Kirsher 	RxOn			= 0x04,
325d9fb9f38SJeff Kirsher 	TxOff			= 0x02,
326d9fb9f38SJeff Kirsher 	TxOn			= 0x01,
327d9fb9f38SJeff Kirsher };
328d9fb9f38SJeff Kirsher 
329d9fb9f38SJeff Kirsher enum ChipConfig_bits {
330d9fb9f38SJeff Kirsher 	CfgPhyDis		= 0x200,
331d9fb9f38SJeff Kirsher 	CfgPhyRst		= 0x400,
332d9fb9f38SJeff Kirsher 	CfgExtPhy		= 0x1000,
333d9fb9f38SJeff Kirsher 	CfgAnegEnable		= 0x2000,
334d9fb9f38SJeff Kirsher 	CfgAneg100		= 0x4000,
335d9fb9f38SJeff Kirsher 	CfgAnegFull		= 0x8000,
336d9fb9f38SJeff Kirsher 	CfgAnegDone		= 0x8000000,
337d9fb9f38SJeff Kirsher 	CfgFullDuplex		= 0x20000000,
338d9fb9f38SJeff Kirsher 	CfgSpeed100		= 0x40000000,
339d9fb9f38SJeff Kirsher 	CfgLink			= 0x80000000,
340d9fb9f38SJeff Kirsher };
341d9fb9f38SJeff Kirsher 
342d9fb9f38SJeff Kirsher enum EECtrl_bits {
343d9fb9f38SJeff Kirsher 	EE_ShiftClk		= 0x04,
344d9fb9f38SJeff Kirsher 	EE_DataIn		= 0x01,
345d9fb9f38SJeff Kirsher 	EE_ChipSelect		= 0x08,
346d9fb9f38SJeff Kirsher 	EE_DataOut		= 0x02,
347d9fb9f38SJeff Kirsher 	MII_Data 		= 0x10,
348d9fb9f38SJeff Kirsher 	MII_Write		= 0x20,
349d9fb9f38SJeff Kirsher 	MII_ShiftClk		= 0x40,
350d9fb9f38SJeff Kirsher };
351d9fb9f38SJeff Kirsher 
352d9fb9f38SJeff Kirsher enum PCIBusCfg_bits {
353d9fb9f38SJeff Kirsher 	EepromReload		= 0x4,
354d9fb9f38SJeff Kirsher };
355d9fb9f38SJeff Kirsher 
356d9fb9f38SJeff Kirsher /* Bits in the interrupt status/mask registers. */
357d9fb9f38SJeff Kirsher enum IntrStatus_bits {
358d9fb9f38SJeff Kirsher 	IntrRxDone		= 0x0001,
359d9fb9f38SJeff Kirsher 	IntrRxIntr		= 0x0002,
360d9fb9f38SJeff Kirsher 	IntrRxErr		= 0x0004,
361d9fb9f38SJeff Kirsher 	IntrRxEarly		= 0x0008,
362d9fb9f38SJeff Kirsher 	IntrRxIdle		= 0x0010,
363d9fb9f38SJeff Kirsher 	IntrRxOverrun		= 0x0020,
364d9fb9f38SJeff Kirsher 	IntrTxDone		= 0x0040,
365d9fb9f38SJeff Kirsher 	IntrTxIntr		= 0x0080,
366d9fb9f38SJeff Kirsher 	IntrTxErr		= 0x0100,
367d9fb9f38SJeff Kirsher 	IntrTxIdle		= 0x0200,
368d9fb9f38SJeff Kirsher 	IntrTxUnderrun		= 0x0400,
369d9fb9f38SJeff Kirsher 	StatsMax		= 0x0800,
370d9fb9f38SJeff Kirsher 	SWInt			= 0x1000,
371d9fb9f38SJeff Kirsher 	WOLPkt			= 0x2000,
372d9fb9f38SJeff Kirsher 	LinkChange		= 0x4000,
373d9fb9f38SJeff Kirsher 	IntrHighBits		= 0x8000,
374d9fb9f38SJeff Kirsher 	RxStatusFIFOOver	= 0x10000,
375d9fb9f38SJeff Kirsher 	IntrPCIErr		= 0xf00000,
376d9fb9f38SJeff Kirsher 	RxResetDone		= 0x1000000,
377d9fb9f38SJeff Kirsher 	TxResetDone		= 0x2000000,
378d9fb9f38SJeff Kirsher 	IntrAbnormalSummary	= 0xCD20,
379d9fb9f38SJeff Kirsher };
380d9fb9f38SJeff Kirsher 
381d9fb9f38SJeff Kirsher /*
382d9fb9f38SJeff Kirsher  * Default Interrupts:
383d9fb9f38SJeff Kirsher  * Rx OK, Rx Packet Error, Rx Overrun,
384d9fb9f38SJeff Kirsher  * Tx OK, Tx Packet Error, Tx Underrun,
385d9fb9f38SJeff Kirsher  * MIB Service, Phy Interrupt, High Bits,
386d9fb9f38SJeff Kirsher  * Rx Status FIFO overrun,
387d9fb9f38SJeff Kirsher  * Received Target Abort, Received Master Abort,
388d9fb9f38SJeff Kirsher  * Signalled System Error, Received Parity Error
389d9fb9f38SJeff Kirsher  */
390d9fb9f38SJeff Kirsher #define DEFAULT_INTR 0x00f1cd65
391d9fb9f38SJeff Kirsher 
392d9fb9f38SJeff Kirsher enum TxConfig_bits {
393d9fb9f38SJeff Kirsher 	TxDrthMask		= 0x3f,
394d9fb9f38SJeff Kirsher 	TxFlthMask		= 0x3f00,
395d9fb9f38SJeff Kirsher 	TxMxdmaMask		= 0x700000,
396d9fb9f38SJeff Kirsher 	TxMxdma_512		= 0x0,
397d9fb9f38SJeff Kirsher 	TxMxdma_4		= 0x100000,
398d9fb9f38SJeff Kirsher 	TxMxdma_8		= 0x200000,
399d9fb9f38SJeff Kirsher 	TxMxdma_16		= 0x300000,
400d9fb9f38SJeff Kirsher 	TxMxdma_32		= 0x400000,
401d9fb9f38SJeff Kirsher 	TxMxdma_64		= 0x500000,
402d9fb9f38SJeff Kirsher 	TxMxdma_128		= 0x600000,
403d9fb9f38SJeff Kirsher 	TxMxdma_256		= 0x700000,
404d9fb9f38SJeff Kirsher 	TxCollRetry		= 0x800000,
405d9fb9f38SJeff Kirsher 	TxAutoPad		= 0x10000000,
406d9fb9f38SJeff Kirsher 	TxMacLoop		= 0x20000000,
407d9fb9f38SJeff Kirsher 	TxHeartIgn		= 0x40000000,
408d9fb9f38SJeff Kirsher 	TxCarrierIgn		= 0x80000000
409d9fb9f38SJeff Kirsher };
410d9fb9f38SJeff Kirsher 
411d9fb9f38SJeff Kirsher /*
412d9fb9f38SJeff Kirsher  * Tx Configuration:
413d9fb9f38SJeff Kirsher  * - 256 byte DMA burst length
414d9fb9f38SJeff Kirsher  * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
415d9fb9f38SJeff Kirsher  * - 64 bytes initial drain threshold (i.e. begin actual transmission
416d9fb9f38SJeff Kirsher  *   when 64 byte are in the fifo)
417d9fb9f38SJeff Kirsher  * - on tx underruns, increase drain threshold by 64.
418d9fb9f38SJeff Kirsher  * - at most use a drain threshold of 1472 bytes: The sum of the fill
419d9fb9f38SJeff Kirsher  *   threshold and the drain threshold must be less than 2016 bytes.
420d9fb9f38SJeff Kirsher  *
421d9fb9f38SJeff Kirsher  */
422d9fb9f38SJeff Kirsher #define TX_FLTH_VAL		((512/32) << 8)
423d9fb9f38SJeff Kirsher #define TX_DRTH_VAL_START	(64/32)
424d9fb9f38SJeff Kirsher #define TX_DRTH_VAL_INC		2
425d9fb9f38SJeff Kirsher #define TX_DRTH_VAL_LIMIT	(1472/32)
426d9fb9f38SJeff Kirsher 
427d9fb9f38SJeff Kirsher enum RxConfig_bits {
428d9fb9f38SJeff Kirsher 	RxDrthMask		= 0x3e,
429d9fb9f38SJeff Kirsher 	RxMxdmaMask		= 0x700000,
430d9fb9f38SJeff Kirsher 	RxMxdma_512		= 0x0,
431d9fb9f38SJeff Kirsher 	RxMxdma_4		= 0x100000,
432d9fb9f38SJeff Kirsher 	RxMxdma_8		= 0x200000,
433d9fb9f38SJeff Kirsher 	RxMxdma_16		= 0x300000,
434d9fb9f38SJeff Kirsher 	RxMxdma_32		= 0x400000,
435d9fb9f38SJeff Kirsher 	RxMxdma_64		= 0x500000,
436d9fb9f38SJeff Kirsher 	RxMxdma_128		= 0x600000,
437d9fb9f38SJeff Kirsher 	RxMxdma_256		= 0x700000,
438d9fb9f38SJeff Kirsher 	RxAcceptLong		= 0x8000000,
439d9fb9f38SJeff Kirsher 	RxAcceptTx		= 0x10000000,
440d9fb9f38SJeff Kirsher 	RxAcceptRunt		= 0x40000000,
441d9fb9f38SJeff Kirsher 	RxAcceptErr		= 0x80000000
442d9fb9f38SJeff Kirsher };
443d9fb9f38SJeff Kirsher #define RX_DRTH_VAL		(128/8)
444d9fb9f38SJeff Kirsher 
445d9fb9f38SJeff Kirsher enum ClkRun_bits {
446d9fb9f38SJeff Kirsher 	PMEEnable		= 0x100,
447d9fb9f38SJeff Kirsher 	PMEStatus		= 0x8000,
448d9fb9f38SJeff Kirsher };
449d9fb9f38SJeff Kirsher 
450d9fb9f38SJeff Kirsher enum WolCmd_bits {
451d9fb9f38SJeff Kirsher 	WakePhy			= 0x1,
452d9fb9f38SJeff Kirsher 	WakeUnicast		= 0x2,
453d9fb9f38SJeff Kirsher 	WakeMulticast		= 0x4,
454d9fb9f38SJeff Kirsher 	WakeBroadcast		= 0x8,
455d9fb9f38SJeff Kirsher 	WakeArp			= 0x10,
456d9fb9f38SJeff Kirsher 	WakePMatch0		= 0x20,
457d9fb9f38SJeff Kirsher 	WakePMatch1		= 0x40,
458d9fb9f38SJeff Kirsher 	WakePMatch2		= 0x80,
459d9fb9f38SJeff Kirsher 	WakePMatch3		= 0x100,
460d9fb9f38SJeff Kirsher 	WakeMagic		= 0x200,
461d9fb9f38SJeff Kirsher 	WakeMagicSecure		= 0x400,
462d9fb9f38SJeff Kirsher 	SecureHack		= 0x100000,
463d9fb9f38SJeff Kirsher 	WokePhy			= 0x400000,
464d9fb9f38SJeff Kirsher 	WokeUnicast		= 0x800000,
465d9fb9f38SJeff Kirsher 	WokeMulticast		= 0x1000000,
466d9fb9f38SJeff Kirsher 	WokeBroadcast		= 0x2000000,
467d9fb9f38SJeff Kirsher 	WokeArp			= 0x4000000,
468d9fb9f38SJeff Kirsher 	WokePMatch0		= 0x8000000,
469d9fb9f38SJeff Kirsher 	WokePMatch1		= 0x10000000,
470d9fb9f38SJeff Kirsher 	WokePMatch2		= 0x20000000,
471d9fb9f38SJeff Kirsher 	WokePMatch3		= 0x40000000,
472d9fb9f38SJeff Kirsher 	WokeMagic		= 0x80000000,
473d9fb9f38SJeff Kirsher 	WakeOptsSummary		= 0x7ff
474d9fb9f38SJeff Kirsher };
475d9fb9f38SJeff Kirsher 
476d9fb9f38SJeff Kirsher enum RxFilterAddr_bits {
477d9fb9f38SJeff Kirsher 	RFCRAddressMask		= 0x3ff,
478d9fb9f38SJeff Kirsher 	AcceptMulticast		= 0x00200000,
479d9fb9f38SJeff Kirsher 	AcceptMyPhys		= 0x08000000,
480d9fb9f38SJeff Kirsher 	AcceptAllPhys		= 0x10000000,
481d9fb9f38SJeff Kirsher 	AcceptAllMulticast	= 0x20000000,
482d9fb9f38SJeff Kirsher 	AcceptBroadcast		= 0x40000000,
483d9fb9f38SJeff Kirsher 	RxFilterEnable		= 0x80000000
484d9fb9f38SJeff Kirsher };
485d9fb9f38SJeff Kirsher 
486d9fb9f38SJeff Kirsher enum StatsCtrl_bits {
487d9fb9f38SJeff Kirsher 	StatsWarn		= 0x1,
488d9fb9f38SJeff Kirsher 	StatsFreeze		= 0x2,
489d9fb9f38SJeff Kirsher 	StatsClear		= 0x4,
490d9fb9f38SJeff Kirsher 	StatsStrobe		= 0x8,
491d9fb9f38SJeff Kirsher };
492d9fb9f38SJeff Kirsher 
493d9fb9f38SJeff Kirsher enum MIntrCtrl_bits {
494d9fb9f38SJeff Kirsher 	MICRIntEn		= 0x2,
495d9fb9f38SJeff Kirsher };
496d9fb9f38SJeff Kirsher 
497d9fb9f38SJeff Kirsher enum PhyCtrl_bits {
498d9fb9f38SJeff Kirsher 	PhyAddrMask		= 0x1f,
499d9fb9f38SJeff Kirsher };
500d9fb9f38SJeff Kirsher 
501d9fb9f38SJeff Kirsher #define PHY_ADDR_NONE		32
502d9fb9f38SJeff Kirsher #define PHY_ADDR_INTERNAL	1
503d9fb9f38SJeff Kirsher 
504d9fb9f38SJeff Kirsher /* values we might find in the silicon revision register */
505d9fb9f38SJeff Kirsher #define SRR_DP83815_C	0x0302
506d9fb9f38SJeff Kirsher #define SRR_DP83815_D	0x0403
507d9fb9f38SJeff Kirsher #define SRR_DP83816_A4	0x0504
508d9fb9f38SJeff Kirsher #define SRR_DP83816_A5	0x0505
509d9fb9f38SJeff Kirsher 
510d9fb9f38SJeff Kirsher /* The Rx and Tx buffer descriptors. */
511d9fb9f38SJeff Kirsher /* Note that using only 32 bit fields simplifies conversion to big-endian
512d9fb9f38SJeff Kirsher    architectures. */
513d9fb9f38SJeff Kirsher struct netdev_desc {
514d9fb9f38SJeff Kirsher 	__le32 next_desc;
515d9fb9f38SJeff Kirsher 	__le32 cmd_status;
516d9fb9f38SJeff Kirsher 	__le32 addr;
517d9fb9f38SJeff Kirsher 	__le32 software_use;
518d9fb9f38SJeff Kirsher };
519d9fb9f38SJeff Kirsher 
520d9fb9f38SJeff Kirsher /* Bits in network_desc.status */
521d9fb9f38SJeff Kirsher enum desc_status_bits {
522d9fb9f38SJeff Kirsher 	DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
523d9fb9f38SJeff Kirsher 	DescNoCRC=0x10000000, DescPktOK=0x08000000,
524d9fb9f38SJeff Kirsher 	DescSizeMask=0xfff,
525d9fb9f38SJeff Kirsher 
526d9fb9f38SJeff Kirsher 	DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
527d9fb9f38SJeff Kirsher 	DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
528d9fb9f38SJeff Kirsher 	DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
529d9fb9f38SJeff Kirsher 	DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
530d9fb9f38SJeff Kirsher 
531d9fb9f38SJeff Kirsher 	DescRxAbort=0x04000000, DescRxOver=0x02000000,
532d9fb9f38SJeff Kirsher 	DescRxDest=0x01800000, DescRxLong=0x00400000,
533d9fb9f38SJeff Kirsher 	DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
534d9fb9f38SJeff Kirsher 	DescRxCRC=0x00080000, DescRxAlign=0x00040000,
535d9fb9f38SJeff Kirsher 	DescRxLoop=0x00020000, DesRxColl=0x00010000,
536d9fb9f38SJeff Kirsher };
537d9fb9f38SJeff Kirsher 
538d9fb9f38SJeff Kirsher struct netdev_private {
539d9fb9f38SJeff Kirsher 	/* Descriptor rings first for alignment */
540d9fb9f38SJeff Kirsher 	dma_addr_t ring_dma;
541d9fb9f38SJeff Kirsher 	struct netdev_desc *rx_ring;
542d9fb9f38SJeff Kirsher 	struct netdev_desc *tx_ring;
543d9fb9f38SJeff Kirsher 	/* The addresses of receive-in-place skbuffs */
544d9fb9f38SJeff Kirsher 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
545d9fb9f38SJeff Kirsher 	dma_addr_t rx_dma[RX_RING_SIZE];
546d9fb9f38SJeff Kirsher 	/* address of a sent-in-place packet/buffer, for later free() */
547d9fb9f38SJeff Kirsher 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
548d9fb9f38SJeff Kirsher 	dma_addr_t tx_dma[TX_RING_SIZE];
549d9fb9f38SJeff Kirsher 	struct net_device *dev;
550d710ce13SFrancois Romieu 	void __iomem *ioaddr;
551d9fb9f38SJeff Kirsher 	struct napi_struct napi;
552d9fb9f38SJeff Kirsher 	/* Media monitoring timer */
553d9fb9f38SJeff Kirsher 	struct timer_list timer;
554d9fb9f38SJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect */
555d9fb9f38SJeff Kirsher 	struct pci_dev *pci_dev;
556d9fb9f38SJeff Kirsher 	struct netdev_desc *rx_head_desc;
557d9fb9f38SJeff Kirsher 	/* Producer/consumer ring indices */
558d9fb9f38SJeff Kirsher 	unsigned int cur_rx, dirty_rx;
559d9fb9f38SJeff Kirsher 	unsigned int cur_tx, dirty_tx;
560d9fb9f38SJeff Kirsher 	/* Based on MTU+slack. */
561d9fb9f38SJeff Kirsher 	unsigned int rx_buf_sz;
562d9fb9f38SJeff Kirsher 	int oom;
563d9fb9f38SJeff Kirsher 	/* Interrupt status */
564d9fb9f38SJeff Kirsher 	u32 intr_status;
565d9fb9f38SJeff Kirsher 	/* Do not touch the nic registers */
566d9fb9f38SJeff Kirsher 	int hands_off;
567d9fb9f38SJeff Kirsher 	/* Don't pay attention to the reported link state. */
568d9fb9f38SJeff Kirsher 	int ignore_phy;
569d9fb9f38SJeff Kirsher 	/* external phy that is used: only valid if dev->if_port != PORT_TP */
570d9fb9f38SJeff Kirsher 	int mii;
571d9fb9f38SJeff Kirsher 	int phy_addr_external;
572d9fb9f38SJeff Kirsher 	unsigned int full_duplex;
573d9fb9f38SJeff Kirsher 	/* Rx filter */
574d9fb9f38SJeff Kirsher 	u32 cur_rx_mode;
575d9fb9f38SJeff Kirsher 	u32 rx_filter[16];
576d9fb9f38SJeff Kirsher 	/* FIFO and PCI burst thresholds */
577d9fb9f38SJeff Kirsher 	u32 tx_config, rx_config;
578d9fb9f38SJeff Kirsher 	/* original contents of ClkRun register */
579d9fb9f38SJeff Kirsher 	u32 SavedClkRun;
580d9fb9f38SJeff Kirsher 	/* silicon revision */
581d9fb9f38SJeff Kirsher 	u32 srr;
582d9fb9f38SJeff Kirsher 	/* expected DSPCFG value */
583d9fb9f38SJeff Kirsher 	u16 dspcfg;
584d9fb9f38SJeff Kirsher 	int dspcfg_workaround;
585d9fb9f38SJeff Kirsher 	/* parms saved in ethtool format */
586d9fb9f38SJeff Kirsher 	u16	speed;		/* The forced speed, 10Mb, 100Mb, gigabit */
587d9fb9f38SJeff Kirsher 	u8	duplex;		/* Duplex, half or full */
588d9fb9f38SJeff Kirsher 	u8	autoneg;	/* Autonegotiation enabled */
589d9fb9f38SJeff Kirsher 	/* MII transceiver section */
590d9fb9f38SJeff Kirsher 	u16 advertising;
591d9fb9f38SJeff Kirsher 	unsigned int iosize;
592d9fb9f38SJeff Kirsher 	spinlock_t lock;
593d9fb9f38SJeff Kirsher 	u32 msg_enable;
594d9fb9f38SJeff Kirsher 	/* EEPROM data */
595d9fb9f38SJeff Kirsher 	int eeprom_size;
596d9fb9f38SJeff Kirsher };
597d9fb9f38SJeff Kirsher 
598d9fb9f38SJeff Kirsher static void move_int_phy(struct net_device *dev, int addr);
599d9fb9f38SJeff Kirsher static int eeprom_read(void __iomem *ioaddr, int location);
600d9fb9f38SJeff Kirsher static int mdio_read(struct net_device *dev, int reg);
601d9fb9f38SJeff Kirsher static void mdio_write(struct net_device *dev, int reg, u16 data);
602d9fb9f38SJeff Kirsher static void init_phy_fixup(struct net_device *dev);
603d9fb9f38SJeff Kirsher static int miiport_read(struct net_device *dev, int phy_id, int reg);
604d9fb9f38SJeff Kirsher static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
605d9fb9f38SJeff Kirsher static int find_mii(struct net_device *dev);
606d9fb9f38SJeff Kirsher static void natsemi_reset(struct net_device *dev);
607d9fb9f38SJeff Kirsher static void natsemi_reload_eeprom(struct net_device *dev);
608d9fb9f38SJeff Kirsher static void natsemi_stop_rxtx(struct net_device *dev);
609d9fb9f38SJeff Kirsher static int netdev_open(struct net_device *dev);
610d9fb9f38SJeff Kirsher static void do_cable_magic(struct net_device *dev);
611d9fb9f38SJeff Kirsher static void undo_cable_magic(struct net_device *dev);
612d9fb9f38SJeff Kirsher static void check_link(struct net_device *dev);
61315735c9dSKees Cook static void netdev_timer(struct timer_list *t);
614d9fb9f38SJeff Kirsher static void dump_ring(struct net_device *dev);
6150290bd29SMichael S. Tsirkin static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
616d9fb9f38SJeff Kirsher static int alloc_ring(struct net_device *dev);
617d9fb9f38SJeff Kirsher static void refill_rx(struct net_device *dev);
618d9fb9f38SJeff Kirsher static void init_ring(struct net_device *dev);
619d9fb9f38SJeff Kirsher static void drain_tx(struct net_device *dev);
620d9fb9f38SJeff Kirsher static void drain_ring(struct net_device *dev);
621d9fb9f38SJeff Kirsher static void free_ring(struct net_device *dev);
622d9fb9f38SJeff Kirsher static void reinit_ring(struct net_device *dev);
623d9fb9f38SJeff Kirsher static void init_registers(struct net_device *dev);
624d9fb9f38SJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
625d9fb9f38SJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance);
626d9fb9f38SJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status);
627d9fb9f38SJeff Kirsher static int natsemi_poll(struct napi_struct *napi, int budget);
628d9fb9f38SJeff Kirsher static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
629d9fb9f38SJeff Kirsher static void netdev_tx_done(struct net_device *dev);
630d9fb9f38SJeff Kirsher static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
631d9fb9f38SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
632d9fb9f38SJeff Kirsher static void natsemi_poll_controller(struct net_device *dev);
633d9fb9f38SJeff Kirsher #endif
634d9fb9f38SJeff Kirsher static void __set_rx_mode(struct net_device *dev);
635d9fb9f38SJeff Kirsher static void set_rx_mode(struct net_device *dev);
636d9fb9f38SJeff Kirsher static void __get_stats(struct net_device *dev);
637d9fb9f38SJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev);
638d9fb9f38SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
639d9fb9f38SJeff Kirsher static int netdev_set_wol(struct net_device *dev, u32 newval);
640d9fb9f38SJeff Kirsher static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
641d9fb9f38SJeff Kirsher static int netdev_set_sopass(struct net_device *dev, u8 *newval);
642d9fb9f38SJeff Kirsher static int netdev_get_sopass(struct net_device *dev, u8 *data);
643586b6e27SPhilippe Reynes static int netdev_get_ecmd(struct net_device *dev,
644586b6e27SPhilippe Reynes 			   struct ethtool_link_ksettings *ecmd);
645586b6e27SPhilippe Reynes static int netdev_set_ecmd(struct net_device *dev,
646586b6e27SPhilippe Reynes 			   const struct ethtool_link_ksettings *ecmd);
647d9fb9f38SJeff Kirsher static void enable_wol_mode(struct net_device *dev, int enable_intr);
648d9fb9f38SJeff Kirsher static int netdev_close(struct net_device *dev);
649d9fb9f38SJeff Kirsher static int netdev_get_regs(struct net_device *dev, u8 *buf);
650d9fb9f38SJeff Kirsher static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
651d9fb9f38SJeff Kirsher static const struct ethtool_ops ethtool_ops;
652d9fb9f38SJeff Kirsher 
653d9fb9f38SJeff Kirsher #define NATSEMI_ATTR(_name) \
654d9fb9f38SJeff Kirsher static ssize_t natsemi_show_##_name(struct device *dev, \
655d9fb9f38SJeff Kirsher          struct device_attribute *attr, char *buf); \
656d9fb9f38SJeff Kirsher 	 static ssize_t natsemi_set_##_name(struct device *dev, \
657d9fb9f38SJeff Kirsher 		struct device_attribute *attr, \
658d9fb9f38SJeff Kirsher 	        const char *buf, size_t count); \
659d9fb9f38SJeff Kirsher 	 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
660d9fb9f38SJeff Kirsher 
661d9fb9f38SJeff Kirsher #define NATSEMI_CREATE_FILE(_dev, _name) \
662d9fb9f38SJeff Kirsher          device_create_file(&_dev->dev, &dev_attr_##_name)
663d9fb9f38SJeff Kirsher #define NATSEMI_REMOVE_FILE(_dev, _name) \
664d9fb9f38SJeff Kirsher          device_remove_file(&_dev->dev, &dev_attr_##_name)
665d9fb9f38SJeff Kirsher 
666d9fb9f38SJeff Kirsher NATSEMI_ATTR(dspcfg_workaround);
667d9fb9f38SJeff Kirsher 
natsemi_show_dspcfg_workaround(struct device * dev,struct device_attribute * attr,char * buf)668d9fb9f38SJeff Kirsher static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
669d9fb9f38SJeff Kirsher 				  	      struct device_attribute *attr,
670d9fb9f38SJeff Kirsher 					      char *buf)
671d9fb9f38SJeff Kirsher {
672d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(to_net_dev(dev));
673d9fb9f38SJeff Kirsher 
674d9fb9f38SJeff Kirsher 	return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
675d9fb9f38SJeff Kirsher }
676d9fb9f38SJeff Kirsher 
natsemi_set_dspcfg_workaround(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)677d9fb9f38SJeff Kirsher static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
678d9fb9f38SJeff Kirsher 					     struct device_attribute *attr,
679d9fb9f38SJeff Kirsher 					     const char *buf, size_t count)
680d9fb9f38SJeff Kirsher {
681d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(to_net_dev(dev));
682d9fb9f38SJeff Kirsher 	int new_setting;
683d9fb9f38SJeff Kirsher 	unsigned long flags;
684d9fb9f38SJeff Kirsher 
685d9fb9f38SJeff Kirsher         /* Find out the new setting */
686d9fb9f38SJeff Kirsher         if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
687d9fb9f38SJeff Kirsher                 new_setting = 1;
688d9fb9f38SJeff Kirsher         else if (!strncmp("off", buf, count - 1) ||
689d9fb9f38SJeff Kirsher                  !strncmp("0", buf, count - 1))
690d9fb9f38SJeff Kirsher 		new_setting = 0;
691d9fb9f38SJeff Kirsher 	else
692d9fb9f38SJeff Kirsher                  return count;
693d9fb9f38SJeff Kirsher 
694d9fb9f38SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
695d9fb9f38SJeff Kirsher 
696d9fb9f38SJeff Kirsher 	np->dspcfg_workaround = new_setting;
697d9fb9f38SJeff Kirsher 
698d9fb9f38SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
699d9fb9f38SJeff Kirsher 
700d9fb9f38SJeff Kirsher 	return count;
701d9fb9f38SJeff Kirsher }
702d9fb9f38SJeff Kirsher 
ns_ioaddr(struct net_device * dev)703d9fb9f38SJeff Kirsher static inline void __iomem *ns_ioaddr(struct net_device *dev)
704d9fb9f38SJeff Kirsher {
705d710ce13SFrancois Romieu 	struct netdev_private *np = netdev_priv(dev);
706d710ce13SFrancois Romieu 
707d710ce13SFrancois Romieu 	return np->ioaddr;
708d9fb9f38SJeff Kirsher }
709d9fb9f38SJeff Kirsher 
natsemi_irq_enable(struct net_device * dev)710d9fb9f38SJeff Kirsher static inline void natsemi_irq_enable(struct net_device *dev)
711d9fb9f38SJeff Kirsher {
712d9fb9f38SJeff Kirsher 	writel(1, ns_ioaddr(dev) + IntrEnable);
713d9fb9f38SJeff Kirsher 	readl(ns_ioaddr(dev) + IntrEnable);
714d9fb9f38SJeff Kirsher }
715d9fb9f38SJeff Kirsher 
natsemi_irq_disable(struct net_device * dev)716d9fb9f38SJeff Kirsher static inline void natsemi_irq_disable(struct net_device *dev)
717d9fb9f38SJeff Kirsher {
718d9fb9f38SJeff Kirsher 	writel(0, ns_ioaddr(dev) + IntrEnable);
719d9fb9f38SJeff Kirsher 	readl(ns_ioaddr(dev) + IntrEnable);
720d9fb9f38SJeff Kirsher }
721d9fb9f38SJeff Kirsher 
move_int_phy(struct net_device * dev,int addr)722d9fb9f38SJeff Kirsher static void move_int_phy(struct net_device *dev, int addr)
723d9fb9f38SJeff Kirsher {
724d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
725d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
726d9fb9f38SJeff Kirsher 	int target = 31;
727d9fb9f38SJeff Kirsher 
728d9fb9f38SJeff Kirsher 	/*
729d9fb9f38SJeff Kirsher 	 * The internal phy is visible on the external mii bus. Therefore we must
730d9fb9f38SJeff Kirsher 	 * move it away before we can send commands to an external phy.
731d9fb9f38SJeff Kirsher 	 * There are two addresses we must avoid:
732d9fb9f38SJeff Kirsher 	 * - the address on the external phy that is used for transmission.
733d9fb9f38SJeff Kirsher 	 * - the address that we want to access. User space can access phys
734d9fb9f38SJeff Kirsher 	 *   on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the
735d9fb9f38SJeff Kirsher 	 *   phy that is used for transmission.
736d9fb9f38SJeff Kirsher 	 */
737d9fb9f38SJeff Kirsher 
738d9fb9f38SJeff Kirsher 	if (target == addr)
739d9fb9f38SJeff Kirsher 		target--;
740d9fb9f38SJeff Kirsher 	if (target == np->phy_addr_external)
741d9fb9f38SJeff Kirsher 		target--;
742d9fb9f38SJeff Kirsher 	writew(target, ioaddr + PhyCtrl);
743d9fb9f38SJeff Kirsher 	readw(ioaddr + PhyCtrl);
744d9fb9f38SJeff Kirsher 	udelay(1);
745d9fb9f38SJeff Kirsher }
746d9fb9f38SJeff Kirsher 
natsemi_init_media(struct net_device * dev)7476980cbe4SBill Pemberton static void natsemi_init_media(struct net_device *dev)
748d9fb9f38SJeff Kirsher {
749d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
750d9fb9f38SJeff Kirsher 	u32 tmp;
751d9fb9f38SJeff Kirsher 
752d9fb9f38SJeff Kirsher 	if (np->ignore_phy)
753d9fb9f38SJeff Kirsher 		netif_carrier_on(dev);
754d9fb9f38SJeff Kirsher 	else
755d9fb9f38SJeff Kirsher 		netif_carrier_off(dev);
756d9fb9f38SJeff Kirsher 
757d9fb9f38SJeff Kirsher 	/* get the initial settings from hardware */
758d9fb9f38SJeff Kirsher 	tmp            = mdio_read(dev, MII_BMCR);
759d9fb9f38SJeff Kirsher 	np->speed      = (tmp & BMCR_SPEED100)? SPEED_100     : SPEED_10;
760d9fb9f38SJeff Kirsher 	np->duplex     = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL   : DUPLEX_HALF;
761d9fb9f38SJeff Kirsher 	np->autoneg    = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
762d9fb9f38SJeff Kirsher 	np->advertising= mdio_read(dev, MII_ADVERTISE);
763d9fb9f38SJeff Kirsher 
764d9fb9f38SJeff Kirsher 	if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
765d9fb9f38SJeff Kirsher 	    netif_msg_probe(np)) {
766d9fb9f38SJeff Kirsher 		printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
767d9fb9f38SJeff Kirsher 			"10%s %s duplex.\n",
768d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev),
769d9fb9f38SJeff Kirsher 			(mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
770d9fb9f38SJeff Kirsher 			  "enabled, advertise" : "disabled, force",
771d9fb9f38SJeff Kirsher 			(np->advertising &
772d9fb9f38SJeff Kirsher 			  (ADVERTISE_100FULL|ADVERTISE_100HALF))?
773d9fb9f38SJeff Kirsher 			    "0" : "",
774d9fb9f38SJeff Kirsher 			(np->advertising &
775d9fb9f38SJeff Kirsher 			  (ADVERTISE_100FULL|ADVERTISE_10FULL))?
776d9fb9f38SJeff Kirsher 			    "full" : "half");
777d9fb9f38SJeff Kirsher 	}
778d9fb9f38SJeff Kirsher 	if (netif_msg_probe(np))
779d9fb9f38SJeff Kirsher 		printk(KERN_INFO
780d9fb9f38SJeff Kirsher 			"natsemi %s: Transceiver status %#04x advertising %#04x.\n",
781d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
782d9fb9f38SJeff Kirsher 			np->advertising);
783d9fb9f38SJeff Kirsher 
784d9fb9f38SJeff Kirsher }
785d9fb9f38SJeff Kirsher 
786d9fb9f38SJeff Kirsher static const struct net_device_ops natsemi_netdev_ops = {
787d9fb9f38SJeff Kirsher 	.ndo_open		= netdev_open,
788d9fb9f38SJeff Kirsher 	.ndo_stop		= netdev_close,
789d9fb9f38SJeff Kirsher 	.ndo_start_xmit		= start_tx,
790d9fb9f38SJeff Kirsher 	.ndo_get_stats		= get_stats,
791afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= set_rx_mode,
792d9fb9f38SJeff Kirsher 	.ndo_change_mtu		= natsemi_change_mtu,
793a7605370SArnd Bergmann 	.ndo_eth_ioctl		= netdev_ioctl,
794d9fb9f38SJeff Kirsher 	.ndo_tx_timeout 	= ns_tx_timeout,
795d9fb9f38SJeff Kirsher 	.ndo_set_mac_address 	= eth_mac_addr,
796d9fb9f38SJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
797d9fb9f38SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
798d9fb9f38SJeff Kirsher 	.ndo_poll_controller	= natsemi_poll_controller,
799d9fb9f38SJeff Kirsher #endif
800d9fb9f38SJeff Kirsher };
801d9fb9f38SJeff Kirsher 
natsemi_probe1(struct pci_dev * pdev,const struct pci_device_id * ent)8021dd06ae8SGreg Kroah-Hartman static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
803d9fb9f38SJeff Kirsher {
804d9fb9f38SJeff Kirsher 	struct net_device *dev;
805d9fb9f38SJeff Kirsher 	struct netdev_private *np;
806d9fb9f38SJeff Kirsher 	int i, option, irq, chip_idx = ent->driver_data;
807d9fb9f38SJeff Kirsher 	static int find_cnt = -1;
808d9fb9f38SJeff Kirsher 	resource_size_t iostart;
809d9fb9f38SJeff Kirsher 	unsigned long iosize;
810d9fb9f38SJeff Kirsher 	void __iomem *ioaddr;
811d9fb9f38SJeff Kirsher 	const int pcibar = 1; /* PCI base address register */
8124abd7cffSJakub Kicinski 	u8 addr[ETH_ALEN];
813d9fb9f38SJeff Kirsher 	int prev_eedata;
814d9fb9f38SJeff Kirsher 	u32 tmp;
815d9fb9f38SJeff Kirsher 
816d9fb9f38SJeff Kirsher /* when built into the kernel, we only print version if device is found */
817d9fb9f38SJeff Kirsher #ifndef MODULE
818d9fb9f38SJeff Kirsher 	static int printed_version;
819d9fb9f38SJeff Kirsher 	if (!printed_version++)
820d9fb9f38SJeff Kirsher 		printk(version);
821d9fb9f38SJeff Kirsher #endif
822d9fb9f38SJeff Kirsher 
8237fe74dfdSWang Hai 	i = pcim_enable_device(pdev);
824d9fb9f38SJeff Kirsher 	if (i) return i;
825d9fb9f38SJeff Kirsher 
826d9fb9f38SJeff Kirsher 	/* natsemi has a non-standard PM control register
827d9fb9f38SJeff Kirsher 	 * in PCI config space.  Some boards apparently need
828d9fb9f38SJeff Kirsher 	 * to be brought to D0 in this manner.
829d9fb9f38SJeff Kirsher 	 */
830d9fb9f38SJeff Kirsher 	pci_read_config_dword(pdev, PCIPM, &tmp);
831d9fb9f38SJeff Kirsher 	if (tmp & PCI_PM_CTRL_STATE_MASK) {
832d9fb9f38SJeff Kirsher 		/* D0 state, disable PME assertion */
833d9fb9f38SJeff Kirsher 		u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
834d9fb9f38SJeff Kirsher 		pci_write_config_dword(pdev, PCIPM, newtmp);
835d9fb9f38SJeff Kirsher 	}
836d9fb9f38SJeff Kirsher 
837d9fb9f38SJeff Kirsher 	find_cnt++;
838d9fb9f38SJeff Kirsher 	iostart = pci_resource_start(pdev, pcibar);
839d9fb9f38SJeff Kirsher 	iosize = pci_resource_len(pdev, pcibar);
840d9fb9f38SJeff Kirsher 	irq = pdev->irq;
841d9fb9f38SJeff Kirsher 
842d9fb9f38SJeff Kirsher 	pci_set_master(pdev);
843d9fb9f38SJeff Kirsher 
844d9fb9f38SJeff Kirsher 	dev = alloc_etherdev(sizeof (struct netdev_private));
845d9fb9f38SJeff Kirsher 	if (!dev)
846d9fb9f38SJeff Kirsher 		return -ENOMEM;
847d9fb9f38SJeff Kirsher 	SET_NETDEV_DEV(dev, &pdev->dev);
848d9fb9f38SJeff Kirsher 
849d9fb9f38SJeff Kirsher 	i = pci_request_regions(pdev, DRV_NAME);
850d9fb9f38SJeff Kirsher 	if (i)
851d9fb9f38SJeff Kirsher 		goto err_pci_request_regions;
852d9fb9f38SJeff Kirsher 
853d9fb9f38SJeff Kirsher 	ioaddr = ioremap(iostart, iosize);
854d9fb9f38SJeff Kirsher 	if (!ioaddr) {
855d9fb9f38SJeff Kirsher 		i = -ENOMEM;
8567fe74dfdSWang Hai 		goto err_pci_request_regions;
857d9fb9f38SJeff Kirsher 	}
858d9fb9f38SJeff Kirsher 
859d9fb9f38SJeff Kirsher 	/* Work around the dropped serial bit. */
860d9fb9f38SJeff Kirsher 	prev_eedata = eeprom_read(ioaddr, 6);
861d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
862d9fb9f38SJeff Kirsher 		int eedata = eeprom_read(ioaddr, i + 7);
8634abd7cffSJakub Kicinski 		addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
8644abd7cffSJakub Kicinski 		addr[i*2+1] = eedata >> 7;
865d9fb9f38SJeff Kirsher 		prev_eedata = eedata;
866d9fb9f38SJeff Kirsher 	}
8674abd7cffSJakub Kicinski 	eth_hw_addr_set(dev, addr);
868d9fb9f38SJeff Kirsher 
869d9fb9f38SJeff Kirsher 	np = netdev_priv(dev);
870d710ce13SFrancois Romieu 	np->ioaddr = ioaddr;
871d710ce13SFrancois Romieu 
872*b48b89f9SJakub Kicinski 	netif_napi_add(dev, &np->napi, natsemi_poll);
873d9fb9f38SJeff Kirsher 	np->dev = dev;
874d9fb9f38SJeff Kirsher 
875d9fb9f38SJeff Kirsher 	np->pci_dev = pdev;
876d9fb9f38SJeff Kirsher 	pci_set_drvdata(pdev, dev);
877d9fb9f38SJeff Kirsher 	np->iosize = iosize;
878d9fb9f38SJeff Kirsher 	spin_lock_init(&np->lock);
879d9fb9f38SJeff Kirsher 	np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
880d9fb9f38SJeff Kirsher 	np->hands_off = 0;
881d9fb9f38SJeff Kirsher 	np->intr_status = 0;
882d9fb9f38SJeff Kirsher 	np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
883d9fb9f38SJeff Kirsher 	if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
884d9fb9f38SJeff Kirsher 		np->ignore_phy = 1;
885d9fb9f38SJeff Kirsher 	else
886d9fb9f38SJeff Kirsher 		np->ignore_phy = 0;
887d9fb9f38SJeff Kirsher 	np->dspcfg_workaround = dspcfg_workaround;
888d9fb9f38SJeff Kirsher 
889d9fb9f38SJeff Kirsher 	/* Initial port:
890d9fb9f38SJeff Kirsher 	 * - If configured to ignore the PHY set up for external.
891d9fb9f38SJeff Kirsher 	 * - If the nic was configured to use an external phy and if find_mii
892d9fb9f38SJeff Kirsher 	 *   finds a phy: use external port, first phy that replies.
893d9fb9f38SJeff Kirsher 	 * - Otherwise: internal port.
894d9fb9f38SJeff Kirsher 	 * Note that the phy address for the internal phy doesn't matter:
895d9fb9f38SJeff Kirsher 	 * The address would be used to access a phy over the mii bus, but
896d9fb9f38SJeff Kirsher 	 * the internal phy is accessed through mapped registers.
897d9fb9f38SJeff Kirsher 	 */
898d9fb9f38SJeff Kirsher 	if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
899d9fb9f38SJeff Kirsher 		dev->if_port = PORT_MII;
900d9fb9f38SJeff Kirsher 	else
901d9fb9f38SJeff Kirsher 		dev->if_port = PORT_TP;
902d9fb9f38SJeff Kirsher 	/* Reset the chip to erase previous misconfiguration. */
903d9fb9f38SJeff Kirsher 	natsemi_reload_eeprom(dev);
904d9fb9f38SJeff Kirsher 	natsemi_reset(dev);
905d9fb9f38SJeff Kirsher 
906d9fb9f38SJeff Kirsher 	if (dev->if_port != PORT_TP) {
907d9fb9f38SJeff Kirsher 		np->phy_addr_external = find_mii(dev);
908d9fb9f38SJeff Kirsher 		/* If we're ignoring the PHY it doesn't matter if we can't
909d9fb9f38SJeff Kirsher 		 * find one. */
910d9fb9f38SJeff Kirsher 		if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
911d9fb9f38SJeff Kirsher 			dev->if_port = PORT_TP;
912d9fb9f38SJeff Kirsher 			np->phy_addr_external = PHY_ADDR_INTERNAL;
913d9fb9f38SJeff Kirsher 		}
914d9fb9f38SJeff Kirsher 	} else {
915d9fb9f38SJeff Kirsher 		np->phy_addr_external = PHY_ADDR_INTERNAL;
916d9fb9f38SJeff Kirsher 	}
917d9fb9f38SJeff Kirsher 
918d9fb9f38SJeff Kirsher 	option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
919d9fb9f38SJeff Kirsher 	/* The lower four bits are the media type. */
920d9fb9f38SJeff Kirsher 	if (option) {
921d9fb9f38SJeff Kirsher 		if (option & 0x200)
922d9fb9f38SJeff Kirsher 			np->full_duplex = 1;
923d9fb9f38SJeff Kirsher 		if (option & 15)
924d9fb9f38SJeff Kirsher 			printk(KERN_INFO
925d9fb9f38SJeff Kirsher 				"natsemi %s: ignoring user supplied media type %d",
926d9fb9f38SJeff Kirsher 				pci_name(np->pci_dev), option & 15);
927d9fb9f38SJeff Kirsher 	}
928d9fb9f38SJeff Kirsher 	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt])
929d9fb9f38SJeff Kirsher 		np->full_duplex = 1;
930d9fb9f38SJeff Kirsher 
931d9fb9f38SJeff Kirsher 	dev->netdev_ops = &natsemi_netdev_ops;
932d9fb9f38SJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
933d9fb9f38SJeff Kirsher 
9347ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &ethtool_ops;
935d9fb9f38SJeff Kirsher 
93644770e11SJarod Wilson 	/* MTU range: 64 - 2024 */
93744770e11SJarod Wilson 	dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
93844770e11SJarod Wilson 	dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS;
93944770e11SJarod Wilson 
940d9fb9f38SJeff Kirsher 	if (mtu)
941d9fb9f38SJeff Kirsher 		dev->mtu = mtu;
942d9fb9f38SJeff Kirsher 
943d9fb9f38SJeff Kirsher 	natsemi_init_media(dev);
944d9fb9f38SJeff Kirsher 
945d9fb9f38SJeff Kirsher 	/* save the silicon revision for later querying */
946d9fb9f38SJeff Kirsher 	np->srr = readl(ioaddr + SiliconRev);
947d9fb9f38SJeff Kirsher 	if (netif_msg_hw(np))
948d9fb9f38SJeff Kirsher 		printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
949d9fb9f38SJeff Kirsher 				pci_name(np->pci_dev), np->srr);
950d9fb9f38SJeff Kirsher 
951d9fb9f38SJeff Kirsher 	i = register_netdev(dev);
952d9fb9f38SJeff Kirsher 	if (i)
953d9fb9f38SJeff Kirsher 		goto err_register_netdev;
95452428d91SPeter Senna Tschudin 	i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
95552428d91SPeter Senna Tschudin 	if (i)
956d9fb9f38SJeff Kirsher 		goto err_create_file;
957d9fb9f38SJeff Kirsher 
958d9fb9f38SJeff Kirsher 	if (netif_msg_drv(np)) {
959d9fb9f38SJeff Kirsher 		printk(KERN_INFO "natsemi %s: %s at %#08llx "
960d9fb9f38SJeff Kirsher 		       "(%s), %pM, IRQ %d",
961d9fb9f38SJeff Kirsher 		       dev->name, natsemi_pci_info[chip_idx].name,
962d9fb9f38SJeff Kirsher 		       (unsigned long long)iostart, pci_name(np->pci_dev),
963d9fb9f38SJeff Kirsher 		       dev->dev_addr, irq);
964d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_TP)
965d9fb9f38SJeff Kirsher 			printk(", port TP.\n");
966d9fb9f38SJeff Kirsher 		else if (np->ignore_phy)
967d9fb9f38SJeff Kirsher 			printk(", port MII, ignoring PHY\n");
968d9fb9f38SJeff Kirsher 		else
969d9fb9f38SJeff Kirsher 			printk(", port MII, phy ad %d.\n", np->phy_addr_external);
970d9fb9f38SJeff Kirsher 	}
971d9fb9f38SJeff Kirsher 	return 0;
972d9fb9f38SJeff Kirsher 
973d9fb9f38SJeff Kirsher  err_create_file:
974d9fb9f38SJeff Kirsher 	unregister_netdev(dev);
975d9fb9f38SJeff Kirsher 
976d9fb9f38SJeff Kirsher  err_register_netdev:
977d9fb9f38SJeff Kirsher 	iounmap(ioaddr);
978d9fb9f38SJeff Kirsher 
979d9fb9f38SJeff Kirsher  err_pci_request_regions:
980d9fb9f38SJeff Kirsher 	free_netdev(dev);
981d9fb9f38SJeff Kirsher 	return i;
982d9fb9f38SJeff Kirsher }
983d9fb9f38SJeff Kirsher 
984d9fb9f38SJeff Kirsher 
985d9fb9f38SJeff Kirsher /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
986d9fb9f38SJeff Kirsher    The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
987d9fb9f38SJeff Kirsher 
988d9fb9f38SJeff Kirsher /* Delay between EEPROM clock transitions.
989d9fb9f38SJeff Kirsher    No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
990d9fb9f38SJeff Kirsher    a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
991d9fb9f38SJeff Kirsher    made udelay() unreliable.
992d9fb9f38SJeff Kirsher */
993d9fb9f38SJeff Kirsher #define eeprom_delay(ee_addr)	readl(ee_addr)
994d9fb9f38SJeff Kirsher 
995d9fb9f38SJeff Kirsher #define EE_Write0 (EE_ChipSelect)
996d9fb9f38SJeff Kirsher #define EE_Write1 (EE_ChipSelect | EE_DataIn)
997d9fb9f38SJeff Kirsher 
998d9fb9f38SJeff Kirsher /* The EEPROM commands include the alway-set leading bit. */
999d9fb9f38SJeff Kirsher enum EEPROM_Cmds {
1000d9fb9f38SJeff Kirsher 	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
1001d9fb9f38SJeff Kirsher };
1002d9fb9f38SJeff Kirsher 
eeprom_read(void __iomem * addr,int location)1003d9fb9f38SJeff Kirsher static int eeprom_read(void __iomem *addr, int location)
1004d9fb9f38SJeff Kirsher {
1005d9fb9f38SJeff Kirsher 	int i;
1006d9fb9f38SJeff Kirsher 	int retval = 0;
1007d9fb9f38SJeff Kirsher 	void __iomem *ee_addr = addr + EECtrl;
1008d9fb9f38SJeff Kirsher 	int read_cmd = location | EE_ReadCmd;
1009d9fb9f38SJeff Kirsher 
1010d9fb9f38SJeff Kirsher 	writel(EE_Write0, ee_addr);
1011d9fb9f38SJeff Kirsher 
1012d9fb9f38SJeff Kirsher 	/* Shift the read command bits out. */
1013d9fb9f38SJeff Kirsher 	for (i = 10; i >= 0; i--) {
1014d9fb9f38SJeff Kirsher 		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1015d9fb9f38SJeff Kirsher 		writel(dataval, ee_addr);
1016d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1017d9fb9f38SJeff Kirsher 		writel(dataval | EE_ShiftClk, ee_addr);
1018d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1019d9fb9f38SJeff Kirsher 	}
1020d9fb9f38SJeff Kirsher 	writel(EE_ChipSelect, ee_addr);
1021d9fb9f38SJeff Kirsher 	eeprom_delay(ee_addr);
1022d9fb9f38SJeff Kirsher 
1023d9fb9f38SJeff Kirsher 	for (i = 0; i < 16; i++) {
1024d9fb9f38SJeff Kirsher 		writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1025d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1026d9fb9f38SJeff Kirsher 		retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1027d9fb9f38SJeff Kirsher 		writel(EE_ChipSelect, ee_addr);
1028d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1029d9fb9f38SJeff Kirsher 	}
1030d9fb9f38SJeff Kirsher 
1031d9fb9f38SJeff Kirsher 	/* Terminate the EEPROM access. */
1032d9fb9f38SJeff Kirsher 	writel(EE_Write0, ee_addr);
1033d9fb9f38SJeff Kirsher 	writel(0, ee_addr);
1034d9fb9f38SJeff Kirsher 	return retval;
1035d9fb9f38SJeff Kirsher }
1036d9fb9f38SJeff Kirsher 
1037d9fb9f38SJeff Kirsher /* MII transceiver control section.
1038d9fb9f38SJeff Kirsher  * The 83815 series has an internal transceiver, and we present the
1039d9fb9f38SJeff Kirsher  * internal management registers as if they were MII connected.
1040d9fb9f38SJeff Kirsher  * External Phy registers are referenced through the MII interface.
1041d9fb9f38SJeff Kirsher  */
1042d9fb9f38SJeff Kirsher 
1043d9fb9f38SJeff Kirsher /* clock transitions >= 20ns (25MHz)
1044d9fb9f38SJeff Kirsher  * One readl should be good to PCI @ 100MHz
1045d9fb9f38SJeff Kirsher  */
1046d9fb9f38SJeff Kirsher #define mii_delay(ioaddr)  readl(ioaddr + EECtrl)
1047d9fb9f38SJeff Kirsher 
mii_getbit(struct net_device * dev)1048d9fb9f38SJeff Kirsher static int mii_getbit (struct net_device *dev)
1049d9fb9f38SJeff Kirsher {
1050d9fb9f38SJeff Kirsher 	int data;
1051d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1052d9fb9f38SJeff Kirsher 
1053d9fb9f38SJeff Kirsher 	writel(MII_ShiftClk, ioaddr + EECtrl);
1054d9fb9f38SJeff Kirsher 	data = readl(ioaddr + EECtrl);
1055d9fb9f38SJeff Kirsher 	writel(0, ioaddr + EECtrl);
1056d9fb9f38SJeff Kirsher 	mii_delay(ioaddr);
1057d9fb9f38SJeff Kirsher 	return (data & MII_Data)? 1 : 0;
1058d9fb9f38SJeff Kirsher }
1059d9fb9f38SJeff Kirsher 
mii_send_bits(struct net_device * dev,u32 data,int len)1060d9fb9f38SJeff Kirsher static void mii_send_bits (struct net_device *dev, u32 data, int len)
1061d9fb9f38SJeff Kirsher {
1062d9fb9f38SJeff Kirsher 	u32 i;
1063d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1064d9fb9f38SJeff Kirsher 
1065d9fb9f38SJeff Kirsher 	for (i = (1 << (len-1)); i; i >>= 1)
1066d9fb9f38SJeff Kirsher 	{
1067d9fb9f38SJeff Kirsher 		u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1068d9fb9f38SJeff Kirsher 		writel(mdio_val, ioaddr + EECtrl);
1069d9fb9f38SJeff Kirsher 		mii_delay(ioaddr);
1070d9fb9f38SJeff Kirsher 		writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1071d9fb9f38SJeff Kirsher 		mii_delay(ioaddr);
1072d9fb9f38SJeff Kirsher 	}
1073d9fb9f38SJeff Kirsher 	writel(0, ioaddr + EECtrl);
1074d9fb9f38SJeff Kirsher 	mii_delay(ioaddr);
1075d9fb9f38SJeff Kirsher }
1076d9fb9f38SJeff Kirsher 
miiport_read(struct net_device * dev,int phy_id,int reg)1077d9fb9f38SJeff Kirsher static int miiport_read(struct net_device *dev, int phy_id, int reg)
1078d9fb9f38SJeff Kirsher {
1079d9fb9f38SJeff Kirsher 	u32 cmd;
1080d9fb9f38SJeff Kirsher 	int i;
1081d9fb9f38SJeff Kirsher 	u32 retval = 0;
1082d9fb9f38SJeff Kirsher 
1083d9fb9f38SJeff Kirsher 	/* Ensure sync */
1084d9fb9f38SJeff Kirsher 	mii_send_bits (dev, 0xffffffff, 32);
1085d9fb9f38SJeff Kirsher 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1086d9fb9f38SJeff Kirsher 	/* ST,OP = 0110'b for read operation */
1087d9fb9f38SJeff Kirsher 	cmd = (0x06 << 10) | (phy_id << 5) | reg;
1088d9fb9f38SJeff Kirsher 	mii_send_bits (dev, cmd, 14);
1089d9fb9f38SJeff Kirsher 	/* Turnaround */
1090d9fb9f38SJeff Kirsher 	if (mii_getbit (dev))
1091d9fb9f38SJeff Kirsher 		return 0;
1092d9fb9f38SJeff Kirsher 	/* Read data */
1093d9fb9f38SJeff Kirsher 	for (i = 0; i < 16; i++) {
1094d9fb9f38SJeff Kirsher 		retval <<= 1;
1095d9fb9f38SJeff Kirsher 		retval |= mii_getbit (dev);
1096d9fb9f38SJeff Kirsher 	}
1097d9fb9f38SJeff Kirsher 	/* End cycle */
1098d9fb9f38SJeff Kirsher 	mii_getbit (dev);
1099d9fb9f38SJeff Kirsher 	return retval;
1100d9fb9f38SJeff Kirsher }
1101d9fb9f38SJeff Kirsher 
miiport_write(struct net_device * dev,int phy_id,int reg,u16 data)1102d9fb9f38SJeff Kirsher static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1103d9fb9f38SJeff Kirsher {
1104d9fb9f38SJeff Kirsher 	u32 cmd;
1105d9fb9f38SJeff Kirsher 
1106d9fb9f38SJeff Kirsher 	/* Ensure sync */
1107d9fb9f38SJeff Kirsher 	mii_send_bits (dev, 0xffffffff, 32);
1108d9fb9f38SJeff Kirsher 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1109d9fb9f38SJeff Kirsher 	/* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1110d9fb9f38SJeff Kirsher 	cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1111d9fb9f38SJeff Kirsher 	mii_send_bits (dev, cmd, 32);
1112d9fb9f38SJeff Kirsher 	/* End cycle */
1113d9fb9f38SJeff Kirsher 	mii_getbit (dev);
1114d9fb9f38SJeff Kirsher }
1115d9fb9f38SJeff Kirsher 
mdio_read(struct net_device * dev,int reg)1116d9fb9f38SJeff Kirsher static int mdio_read(struct net_device *dev, int reg)
1117d9fb9f38SJeff Kirsher {
1118d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1119d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1120d9fb9f38SJeff Kirsher 
1121d9fb9f38SJeff Kirsher 	/* The 83815 series has two ports:
1122d9fb9f38SJeff Kirsher 	 * - an internal transceiver
1123d9fb9f38SJeff Kirsher 	 * - an external mii bus
1124d9fb9f38SJeff Kirsher 	 */
1125d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP)
1126d9fb9f38SJeff Kirsher 		return readw(ioaddr+BasicControl+(reg<<2));
1127d9fb9f38SJeff Kirsher 	else
1128d9fb9f38SJeff Kirsher 		return miiport_read(dev, np->phy_addr_external, reg);
1129d9fb9f38SJeff Kirsher }
1130d9fb9f38SJeff Kirsher 
mdio_write(struct net_device * dev,int reg,u16 data)1131d9fb9f38SJeff Kirsher static void mdio_write(struct net_device *dev, int reg, u16 data)
1132d9fb9f38SJeff Kirsher {
1133d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1134d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1135d9fb9f38SJeff Kirsher 
1136d9fb9f38SJeff Kirsher 	/* The 83815 series has an internal transceiver; handle separately */
1137d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP)
1138d9fb9f38SJeff Kirsher 		writew(data, ioaddr+BasicControl+(reg<<2));
1139d9fb9f38SJeff Kirsher 	else
1140d9fb9f38SJeff Kirsher 		miiport_write(dev, np->phy_addr_external, reg, data);
1141d9fb9f38SJeff Kirsher }
1142d9fb9f38SJeff Kirsher 
init_phy_fixup(struct net_device * dev)1143d9fb9f38SJeff Kirsher static void init_phy_fixup(struct net_device *dev)
1144d9fb9f38SJeff Kirsher {
1145d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1146d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1147d9fb9f38SJeff Kirsher 	int i;
1148d9fb9f38SJeff Kirsher 	u32 cfg;
1149d9fb9f38SJeff Kirsher 	u16 tmp;
1150d9fb9f38SJeff Kirsher 
1151d9fb9f38SJeff Kirsher 	/* restore stuff lost when power was out */
1152d9fb9f38SJeff Kirsher 	tmp = mdio_read(dev, MII_BMCR);
1153d9fb9f38SJeff Kirsher 	if (np->autoneg == AUTONEG_ENABLE) {
1154d9fb9f38SJeff Kirsher 		/* renegotiate if something changed */
1155d9fb9f38SJeff Kirsher 		if ((tmp & BMCR_ANENABLE) == 0 ||
1156d9fb9f38SJeff Kirsher 		    np->advertising != mdio_read(dev, MII_ADVERTISE))
1157d9fb9f38SJeff Kirsher 		{
1158d9fb9f38SJeff Kirsher 			/* turn on autonegotiation and force negotiation */
1159d9fb9f38SJeff Kirsher 			tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1160d9fb9f38SJeff Kirsher 			mdio_write(dev, MII_ADVERTISE, np->advertising);
1161d9fb9f38SJeff Kirsher 		}
1162d9fb9f38SJeff Kirsher 	} else {
1163d9fb9f38SJeff Kirsher 		/* turn off auto negotiation, set speed and duplexity */
1164d9fb9f38SJeff Kirsher 		tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1165d9fb9f38SJeff Kirsher 		if (np->speed == SPEED_100)
1166d9fb9f38SJeff Kirsher 			tmp |= BMCR_SPEED100;
1167d9fb9f38SJeff Kirsher 		if (np->duplex == DUPLEX_FULL)
1168d9fb9f38SJeff Kirsher 			tmp |= BMCR_FULLDPLX;
1169d9fb9f38SJeff Kirsher 		/*
1170d9fb9f38SJeff Kirsher 		 * Note: there is no good way to inform the link partner
1171d9fb9f38SJeff Kirsher 		 * that our capabilities changed. The user has to unplug
1172d9fb9f38SJeff Kirsher 		 * and replug the network cable after some changes, e.g.
1173d9fb9f38SJeff Kirsher 		 * after switching from 10HD, autoneg off to 100 HD,
1174d9fb9f38SJeff Kirsher 		 * autoneg off.
1175d9fb9f38SJeff Kirsher 		 */
1176d9fb9f38SJeff Kirsher 	}
1177d9fb9f38SJeff Kirsher 	mdio_write(dev, MII_BMCR, tmp);
1178d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1179d9fb9f38SJeff Kirsher 	udelay(1);
1180d9fb9f38SJeff Kirsher 
1181d9fb9f38SJeff Kirsher 	/* find out what phy this is */
1182d9fb9f38SJeff Kirsher 	np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1183d9fb9f38SJeff Kirsher 				+ mdio_read(dev, MII_PHYSID2);
1184d9fb9f38SJeff Kirsher 
1185d9fb9f38SJeff Kirsher 	/* handle external phys here */
1186d9fb9f38SJeff Kirsher 	switch (np->mii) {
1187d9fb9f38SJeff Kirsher 	case PHYID_AM79C874:
1188d9fb9f38SJeff Kirsher 		/* phy specific configuration for fibre/tp operation */
1189d9fb9f38SJeff Kirsher 		tmp = mdio_read(dev, MII_MCTRL);
1190d9fb9f38SJeff Kirsher 		tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1191d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_FIBRE)
1192d9fb9f38SJeff Kirsher 			tmp |= MII_FX_SEL;
1193d9fb9f38SJeff Kirsher 		else
1194d9fb9f38SJeff Kirsher 			tmp |= MII_EN_SCRM;
1195d9fb9f38SJeff Kirsher 		mdio_write(dev, MII_MCTRL, tmp);
1196d9fb9f38SJeff Kirsher 		break;
1197d9fb9f38SJeff Kirsher 	default:
1198d9fb9f38SJeff Kirsher 		break;
1199d9fb9f38SJeff Kirsher 	}
1200d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig);
1201d9fb9f38SJeff Kirsher 	if (cfg & CfgExtPhy)
1202d9fb9f38SJeff Kirsher 		return;
1203d9fb9f38SJeff Kirsher 
1204d9fb9f38SJeff Kirsher 	/* On page 78 of the spec, they recommend some settings for "optimum
1205d9fb9f38SJeff Kirsher 	   performance" to be done in sequence.  These settings optimize some
1206d9fb9f38SJeff Kirsher 	   of the 100Mbit autodetection circuitry.  They say we only want to
1207d9fb9f38SJeff Kirsher 	   do this for rev C of the chip, but engineers at NSC (Bradley
1208d9fb9f38SJeff Kirsher 	   Kennedy) recommends always setting them.  If you don't, you get
1209d9fb9f38SJeff Kirsher 	   errors on some autonegotiations that make the device unusable.
1210d9fb9f38SJeff Kirsher 
1211d9fb9f38SJeff Kirsher 	   It seems that the DSP needs a few usec to reinitialize after
1212d9fb9f38SJeff Kirsher 	   the start of the phy. Just retry writing these values until they
1213d9fb9f38SJeff Kirsher 	   stick.
1214d9fb9f38SJeff Kirsher 	*/
1215d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1216d9fb9f38SJeff Kirsher 
1217d9fb9f38SJeff Kirsher 		int dspcfg;
1218d9fb9f38SJeff Kirsher 		writew(1, ioaddr + PGSEL);
1219d9fb9f38SJeff Kirsher 		writew(PMDCSR_VAL, ioaddr + PMDCSR);
1220d9fb9f38SJeff Kirsher 		writew(TSTDAT_VAL, ioaddr + TSTDAT);
1221d9fb9f38SJeff Kirsher 		np->dspcfg = (np->srr <= SRR_DP83815_C)?
1222d9fb9f38SJeff Kirsher 			DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1223d9fb9f38SJeff Kirsher 		writew(np->dspcfg, ioaddr + DSPCFG);
1224d9fb9f38SJeff Kirsher 		writew(SDCFG_VAL, ioaddr + SDCFG);
1225d9fb9f38SJeff Kirsher 		writew(0, ioaddr + PGSEL);
1226d9fb9f38SJeff Kirsher 		readl(ioaddr + ChipConfig);
1227d9fb9f38SJeff Kirsher 		udelay(10);
1228d9fb9f38SJeff Kirsher 
1229d9fb9f38SJeff Kirsher 		writew(1, ioaddr + PGSEL);
1230d9fb9f38SJeff Kirsher 		dspcfg = readw(ioaddr + DSPCFG);
1231d9fb9f38SJeff Kirsher 		writew(0, ioaddr + PGSEL);
1232d9fb9f38SJeff Kirsher 		if (np->dspcfg == dspcfg)
1233d9fb9f38SJeff Kirsher 			break;
1234d9fb9f38SJeff Kirsher 	}
1235d9fb9f38SJeff Kirsher 
1236d9fb9f38SJeff Kirsher 	if (netif_msg_link(np)) {
1237d9fb9f38SJeff Kirsher 		if (i==NATSEMI_HW_TIMEOUT) {
1238d9fb9f38SJeff Kirsher 			printk(KERN_INFO
1239d9fb9f38SJeff Kirsher 				"%s: DSPCFG mismatch after retrying for %d usec.\n",
1240d9fb9f38SJeff Kirsher 				dev->name, i*10);
1241d9fb9f38SJeff Kirsher 		} else {
1242d9fb9f38SJeff Kirsher 			printk(KERN_INFO
1243d9fb9f38SJeff Kirsher 				"%s: DSPCFG accepted after %d usec.\n",
1244d9fb9f38SJeff Kirsher 				dev->name, i*10);
1245d9fb9f38SJeff Kirsher 		}
1246d9fb9f38SJeff Kirsher 	}
1247d9fb9f38SJeff Kirsher 	/*
1248d9fb9f38SJeff Kirsher 	 * Enable PHY Specific event based interrupts.  Link state change
1249d9fb9f38SJeff Kirsher 	 * and Auto-Negotiation Completion are among the affected.
1250d9fb9f38SJeff Kirsher 	 * Read the intr status to clear it (needed for wake events).
1251d9fb9f38SJeff Kirsher 	 */
1252d9fb9f38SJeff Kirsher 	readw(ioaddr + MIntrStatus);
1253d9fb9f38SJeff Kirsher 	writew(MICRIntEn, ioaddr + MIntrCtrl);
1254d9fb9f38SJeff Kirsher }
1255d9fb9f38SJeff Kirsher 
switch_port_external(struct net_device * dev)1256d9fb9f38SJeff Kirsher static int switch_port_external(struct net_device *dev)
1257d9fb9f38SJeff Kirsher {
1258d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1259d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1260d9fb9f38SJeff Kirsher 	u32 cfg;
1261d9fb9f38SJeff Kirsher 
1262d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig);
1263d9fb9f38SJeff Kirsher 	if (cfg & CfgExtPhy)
1264d9fb9f38SJeff Kirsher 		return 0;
1265d9fb9f38SJeff Kirsher 
1266d9fb9f38SJeff Kirsher 	if (netif_msg_link(np)) {
1267d9fb9f38SJeff Kirsher 		printk(KERN_INFO "%s: switching to external transceiver.\n",
1268d9fb9f38SJeff Kirsher 				dev->name);
1269d9fb9f38SJeff Kirsher 	}
1270d9fb9f38SJeff Kirsher 
1271d9fb9f38SJeff Kirsher 	/* 1) switch back to external phy */
1272d9fb9f38SJeff Kirsher 	writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1273d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1274d9fb9f38SJeff Kirsher 	udelay(1);
1275d9fb9f38SJeff Kirsher 
1276d9fb9f38SJeff Kirsher 	/* 2) reset the external phy: */
1277d9fb9f38SJeff Kirsher 	/* resetting the external PHY has been known to cause a hub supplying
1278d9fb9f38SJeff Kirsher 	 * power over Ethernet to kill the power.  We don't want to kill
1279d9fb9f38SJeff Kirsher 	 * power to this computer, so we avoid resetting the phy.
1280d9fb9f38SJeff Kirsher 	 */
1281d9fb9f38SJeff Kirsher 
1282d9fb9f38SJeff Kirsher 	/* 3) reinit the phy fixup, it got lost during power down. */
1283d9fb9f38SJeff Kirsher 	move_int_phy(dev, np->phy_addr_external);
1284d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
1285d9fb9f38SJeff Kirsher 
1286d9fb9f38SJeff Kirsher 	return 1;
1287d9fb9f38SJeff Kirsher }
1288d9fb9f38SJeff Kirsher 
switch_port_internal(struct net_device * dev)1289d9fb9f38SJeff Kirsher static int switch_port_internal(struct net_device *dev)
1290d9fb9f38SJeff Kirsher {
1291d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1292d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1293d9fb9f38SJeff Kirsher 	int i;
1294d9fb9f38SJeff Kirsher 	u32 cfg;
1295d9fb9f38SJeff Kirsher 	u16 bmcr;
1296d9fb9f38SJeff Kirsher 
1297d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig);
1298d9fb9f38SJeff Kirsher 	if (!(cfg &CfgExtPhy))
1299d9fb9f38SJeff Kirsher 		return 0;
1300d9fb9f38SJeff Kirsher 
1301d9fb9f38SJeff Kirsher 	if (netif_msg_link(np)) {
1302d9fb9f38SJeff Kirsher 		printk(KERN_INFO "%s: switching to internal transceiver.\n",
1303d9fb9f38SJeff Kirsher 				dev->name);
1304d9fb9f38SJeff Kirsher 	}
1305d9fb9f38SJeff Kirsher 	/* 1) switch back to internal phy: */
1306d9fb9f38SJeff Kirsher 	cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1307d9fb9f38SJeff Kirsher 	writel(cfg, ioaddr + ChipConfig);
1308d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1309d9fb9f38SJeff Kirsher 	udelay(1);
1310d9fb9f38SJeff Kirsher 
1311d9fb9f38SJeff Kirsher 	/* 2) reset the internal phy: */
1312d9fb9f38SJeff Kirsher 	bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1313d9fb9f38SJeff Kirsher 	writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1314d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1315d9fb9f38SJeff Kirsher 	udelay(10);
1316d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1317d9fb9f38SJeff Kirsher 		bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1318d9fb9f38SJeff Kirsher 		if (!(bmcr & BMCR_RESET))
1319d9fb9f38SJeff Kirsher 			break;
1320d9fb9f38SJeff Kirsher 		udelay(10);
1321d9fb9f38SJeff Kirsher 	}
1322d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1323d9fb9f38SJeff Kirsher 		printk(KERN_INFO
1324d9fb9f38SJeff Kirsher 			"%s: phy reset did not complete in %d usec.\n",
1325d9fb9f38SJeff Kirsher 			dev->name, i*10);
1326d9fb9f38SJeff Kirsher 	}
1327d9fb9f38SJeff Kirsher 	/* 3) reinit the phy fixup, it got lost during power down. */
1328d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
1329d9fb9f38SJeff Kirsher 
1330d9fb9f38SJeff Kirsher 	return 1;
1331d9fb9f38SJeff Kirsher }
1332d9fb9f38SJeff Kirsher 
1333d9fb9f38SJeff Kirsher /* Scan for a PHY on the external mii bus.
1334d9fb9f38SJeff Kirsher  * There are two tricky points:
1335d9fb9f38SJeff Kirsher  * - Do not scan while the internal phy is enabled. The internal phy will
1336d9fb9f38SJeff Kirsher  *   crash: e.g. reads from the DSPCFG register will return odd values and
1337d9fb9f38SJeff Kirsher  *   the nasty random phy reset code will reset the nic every few seconds.
1338d9fb9f38SJeff Kirsher  * - The internal phy must be moved around, an external phy could
1339d9fb9f38SJeff Kirsher  *   have the same address as the internal phy.
1340d9fb9f38SJeff Kirsher  */
find_mii(struct net_device * dev)1341d9fb9f38SJeff Kirsher static int find_mii(struct net_device *dev)
1342d9fb9f38SJeff Kirsher {
1343d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1344d9fb9f38SJeff Kirsher 	int tmp;
1345d9fb9f38SJeff Kirsher 	int i;
1346d9fb9f38SJeff Kirsher 	int did_switch;
1347d9fb9f38SJeff Kirsher 
1348d9fb9f38SJeff Kirsher 	/* Switch to external phy */
1349d9fb9f38SJeff Kirsher 	did_switch = switch_port_external(dev);
1350d9fb9f38SJeff Kirsher 
1351d9fb9f38SJeff Kirsher 	/* Scan the possible phy addresses:
1352d9fb9f38SJeff Kirsher 	 *
1353d9fb9f38SJeff Kirsher 	 * PHY address 0 means that the phy is in isolate mode. Not yet
1354d9fb9f38SJeff Kirsher 	 * supported due to lack of test hardware. User space should
1355d9fb9f38SJeff Kirsher 	 * handle it through ethtool.
1356d9fb9f38SJeff Kirsher 	 */
1357d9fb9f38SJeff Kirsher 	for (i = 1; i <= 31; i++) {
1358d9fb9f38SJeff Kirsher 		move_int_phy(dev, i);
1359d9fb9f38SJeff Kirsher 		tmp = miiport_read(dev, i, MII_BMSR);
1360d9fb9f38SJeff Kirsher 		if (tmp != 0xffff && tmp != 0x0000) {
1361d9fb9f38SJeff Kirsher 			/* found something! */
1362d9fb9f38SJeff Kirsher 			np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1363d9fb9f38SJeff Kirsher 					+ mdio_read(dev, MII_PHYSID2);
1364d9fb9f38SJeff Kirsher 	 		if (netif_msg_probe(np)) {
1365d9fb9f38SJeff Kirsher 				printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1366d9fb9f38SJeff Kirsher 						pci_name(np->pci_dev), np->mii, i);
1367d9fb9f38SJeff Kirsher 			}
1368d9fb9f38SJeff Kirsher 			break;
1369d9fb9f38SJeff Kirsher 		}
1370d9fb9f38SJeff Kirsher 	}
1371d9fb9f38SJeff Kirsher 	/* And switch back to internal phy: */
1372d9fb9f38SJeff Kirsher 	if (did_switch)
1373d9fb9f38SJeff Kirsher 		switch_port_internal(dev);
1374d9fb9f38SJeff Kirsher 	return i;
1375d9fb9f38SJeff Kirsher }
1376d9fb9f38SJeff Kirsher 
1377d9fb9f38SJeff Kirsher /* CFG bits [13:16] [18:23] */
1378d9fb9f38SJeff Kirsher #define CFG_RESET_SAVE 0xfde000
1379d9fb9f38SJeff Kirsher /* WCSR bits [0:4] [9:10] */
1380d9fb9f38SJeff Kirsher #define WCSR_RESET_SAVE 0x61f
1381d9fb9f38SJeff Kirsher /* RFCR bits [20] [22] [27:31] */
1382d9fb9f38SJeff Kirsher #define RFCR_RESET_SAVE 0xf8500000
1383d9fb9f38SJeff Kirsher 
natsemi_reset(struct net_device * dev)1384d9fb9f38SJeff Kirsher static void natsemi_reset(struct net_device *dev)
1385d9fb9f38SJeff Kirsher {
1386d9fb9f38SJeff Kirsher 	int i;
1387d9fb9f38SJeff Kirsher 	u32 cfg;
1388d9fb9f38SJeff Kirsher 	u32 wcsr;
1389d9fb9f38SJeff Kirsher 	u32 rfcr;
1390d9fb9f38SJeff Kirsher 	u16 pmatch[3];
1391d9fb9f38SJeff Kirsher 	u16 sopass[3];
1392d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1393d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1394d9fb9f38SJeff Kirsher 
1395d9fb9f38SJeff Kirsher 	/*
1396d9fb9f38SJeff Kirsher 	 * Resetting the chip causes some registers to be lost.
1397d9fb9f38SJeff Kirsher 	 * Natsemi suggests NOT reloading the EEPROM while live, so instead
1398d9fb9f38SJeff Kirsher 	 * we save the state that would have been loaded from EEPROM
1399d9fb9f38SJeff Kirsher 	 * on a normal power-up (see the spec EEPROM map).  This assumes
1400d9fb9f38SJeff Kirsher 	 * whoever calls this will follow up with init_registers() eventually.
1401d9fb9f38SJeff Kirsher 	 */
1402d9fb9f38SJeff Kirsher 
1403d9fb9f38SJeff Kirsher 	/* CFG */
1404d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1405d9fb9f38SJeff Kirsher 	/* WCSR */
1406d9fb9f38SJeff Kirsher 	wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1407d9fb9f38SJeff Kirsher 	/* RFCR */
1408d9fb9f38SJeff Kirsher 	rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1409d9fb9f38SJeff Kirsher 	/* PMATCH */
1410d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1411d9fb9f38SJeff Kirsher 		writel(i*2, ioaddr + RxFilterAddr);
1412d9fb9f38SJeff Kirsher 		pmatch[i] = readw(ioaddr + RxFilterData);
1413d9fb9f38SJeff Kirsher 	}
1414d9fb9f38SJeff Kirsher 	/* SOPAS */
1415d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1416d9fb9f38SJeff Kirsher 		writel(0xa+(i*2), ioaddr + RxFilterAddr);
1417d9fb9f38SJeff Kirsher 		sopass[i] = readw(ioaddr + RxFilterData);
1418d9fb9f38SJeff Kirsher 	}
1419d9fb9f38SJeff Kirsher 
1420d9fb9f38SJeff Kirsher 	/* now whack the chip */
1421d9fb9f38SJeff Kirsher 	writel(ChipReset, ioaddr + ChipCmd);
1422d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1423d9fb9f38SJeff Kirsher 		if (!(readl(ioaddr + ChipCmd) & ChipReset))
1424d9fb9f38SJeff Kirsher 			break;
1425d9fb9f38SJeff Kirsher 		udelay(5);
1426d9fb9f38SJeff Kirsher 	}
1427d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1428d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1429d9fb9f38SJeff Kirsher 			dev->name, i*5);
1430d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1431d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1432d9fb9f38SJeff Kirsher 			dev->name, i*5);
1433d9fb9f38SJeff Kirsher 	}
1434d9fb9f38SJeff Kirsher 
1435d9fb9f38SJeff Kirsher 	/* restore CFG */
1436d9fb9f38SJeff Kirsher 	cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1437d9fb9f38SJeff Kirsher 	/* turn on external phy if it was selected */
1438d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP)
1439d9fb9f38SJeff Kirsher 		cfg &= ~(CfgExtPhy | CfgPhyDis);
1440d9fb9f38SJeff Kirsher 	else
1441d9fb9f38SJeff Kirsher 		cfg |= (CfgExtPhy | CfgPhyDis);
1442d9fb9f38SJeff Kirsher 	writel(cfg, ioaddr + ChipConfig);
1443d9fb9f38SJeff Kirsher 	/* restore WCSR */
1444d9fb9f38SJeff Kirsher 	wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1445d9fb9f38SJeff Kirsher 	writel(wcsr, ioaddr + WOLCmd);
1446d9fb9f38SJeff Kirsher 	/* read RFCR */
1447d9fb9f38SJeff Kirsher 	rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1448d9fb9f38SJeff Kirsher 	/* restore PMATCH */
1449d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1450d9fb9f38SJeff Kirsher 		writel(i*2, ioaddr + RxFilterAddr);
1451d9fb9f38SJeff Kirsher 		writew(pmatch[i], ioaddr + RxFilterData);
1452d9fb9f38SJeff Kirsher 	}
1453d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1454d9fb9f38SJeff Kirsher 		writel(0xa+(i*2), ioaddr + RxFilterAddr);
1455d9fb9f38SJeff Kirsher 		writew(sopass[i], ioaddr + RxFilterData);
1456d9fb9f38SJeff Kirsher 	}
1457d9fb9f38SJeff Kirsher 	/* restore RFCR */
1458d9fb9f38SJeff Kirsher 	writel(rfcr, ioaddr + RxFilterAddr);
1459d9fb9f38SJeff Kirsher }
1460d9fb9f38SJeff Kirsher 
reset_rx(struct net_device * dev)1461d9fb9f38SJeff Kirsher static void reset_rx(struct net_device *dev)
1462d9fb9f38SJeff Kirsher {
1463d9fb9f38SJeff Kirsher 	int i;
1464d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1465d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1466d9fb9f38SJeff Kirsher 
1467d9fb9f38SJeff Kirsher 	np->intr_status &= ~RxResetDone;
1468d9fb9f38SJeff Kirsher 
1469d9fb9f38SJeff Kirsher 	writel(RxReset, ioaddr + ChipCmd);
1470d9fb9f38SJeff Kirsher 
1471d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1472d9fb9f38SJeff Kirsher 		np->intr_status |= readl(ioaddr + IntrStatus);
1473d9fb9f38SJeff Kirsher 		if (np->intr_status & RxResetDone)
1474d9fb9f38SJeff Kirsher 			break;
1475d9fb9f38SJeff Kirsher 		udelay(15);
1476d9fb9f38SJeff Kirsher 	}
1477d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1478d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1479d9fb9f38SJeff Kirsher 		       dev->name, i*15);
1480d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1481d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1482d9fb9f38SJeff Kirsher 		       dev->name, i*15);
1483d9fb9f38SJeff Kirsher 	}
1484d9fb9f38SJeff Kirsher }
1485d9fb9f38SJeff Kirsher 
natsemi_reload_eeprom(struct net_device * dev)1486d9fb9f38SJeff Kirsher static void natsemi_reload_eeprom(struct net_device *dev)
1487d9fb9f38SJeff Kirsher {
1488d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1489d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1490d9fb9f38SJeff Kirsher 	int i;
1491d9fb9f38SJeff Kirsher 
1492d9fb9f38SJeff Kirsher 	writel(EepromReload, ioaddr + PCIBusCfg);
1493d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1494d9fb9f38SJeff Kirsher 		udelay(50);
1495d9fb9f38SJeff Kirsher 		if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1496d9fb9f38SJeff Kirsher 			break;
1497d9fb9f38SJeff Kirsher 	}
1498d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1499d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1500d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev), i*50);
1501d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1502d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1503d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev), i*50);
1504d9fb9f38SJeff Kirsher 	}
1505d9fb9f38SJeff Kirsher }
1506d9fb9f38SJeff Kirsher 
natsemi_stop_rxtx(struct net_device * dev)1507d9fb9f38SJeff Kirsher static void natsemi_stop_rxtx(struct net_device *dev)
1508d9fb9f38SJeff Kirsher {
1509d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1510d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1511d9fb9f38SJeff Kirsher 	int i;
1512d9fb9f38SJeff Kirsher 
1513d9fb9f38SJeff Kirsher 	writel(RxOff | TxOff, ioaddr + ChipCmd);
1514d9fb9f38SJeff Kirsher 	for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1515d9fb9f38SJeff Kirsher 		if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1516d9fb9f38SJeff Kirsher 			break;
1517d9fb9f38SJeff Kirsher 		udelay(5);
1518d9fb9f38SJeff Kirsher 	}
1519d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1520d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1521d9fb9f38SJeff Kirsher 			dev->name, i*5);
1522d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1523d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1524d9fb9f38SJeff Kirsher 			dev->name, i*5);
1525d9fb9f38SJeff Kirsher 	}
1526d9fb9f38SJeff Kirsher }
1527d9fb9f38SJeff Kirsher 
netdev_open(struct net_device * dev)1528d9fb9f38SJeff Kirsher static int netdev_open(struct net_device *dev)
1529d9fb9f38SJeff Kirsher {
1530d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1531d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1532d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
1533d9fb9f38SJeff Kirsher 	int i;
1534d9fb9f38SJeff Kirsher 
1535d9fb9f38SJeff Kirsher 	/* Reset the chip, just in case. */
1536d9fb9f38SJeff Kirsher 	natsemi_reset(dev);
1537d9fb9f38SJeff Kirsher 
1538d710ce13SFrancois Romieu 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1539d9fb9f38SJeff Kirsher 	if (i) return i;
1540d9fb9f38SJeff Kirsher 
1541d9fb9f38SJeff Kirsher 	if (netif_msg_ifup(np))
1542d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1543d710ce13SFrancois Romieu 			dev->name, irq);
1544d9fb9f38SJeff Kirsher 	i = alloc_ring(dev);
1545d9fb9f38SJeff Kirsher 	if (i < 0) {
1546d710ce13SFrancois Romieu 		free_irq(irq, dev);
1547d9fb9f38SJeff Kirsher 		return i;
1548d9fb9f38SJeff Kirsher 	}
1549d9fb9f38SJeff Kirsher 	napi_enable(&np->napi);
1550d9fb9f38SJeff Kirsher 
1551d9fb9f38SJeff Kirsher 	init_ring(dev);
1552d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
1553d9fb9f38SJeff Kirsher 	init_registers(dev);
1554d9fb9f38SJeff Kirsher 	/* now set the MAC address according to dev->dev_addr */
1555d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1556d9fb9f38SJeff Kirsher 		u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1557d9fb9f38SJeff Kirsher 
1558d9fb9f38SJeff Kirsher 		writel(i*2, ioaddr + RxFilterAddr);
1559d9fb9f38SJeff Kirsher 		writew(mac, ioaddr + RxFilterData);
1560d9fb9f38SJeff Kirsher 	}
1561d9fb9f38SJeff Kirsher 	writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1562d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
1563d9fb9f38SJeff Kirsher 
1564d9fb9f38SJeff Kirsher 	netif_start_queue(dev);
1565d9fb9f38SJeff Kirsher 
1566d9fb9f38SJeff Kirsher 	if (netif_msg_ifup(np))
1567d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1568d9fb9f38SJeff Kirsher 			dev->name, (int)readl(ioaddr + ChipCmd));
1569d9fb9f38SJeff Kirsher 
1570d9fb9f38SJeff Kirsher 	/* Set the timer to check for link beat. */
157115735c9dSKees Cook 	timer_setup(&np->timer, netdev_timer, 0);
1572d9fb9f38SJeff Kirsher 	np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1573d9fb9f38SJeff Kirsher 	add_timer(&np->timer);
1574d9fb9f38SJeff Kirsher 
1575d9fb9f38SJeff Kirsher 	return 0;
1576d9fb9f38SJeff Kirsher }
1577d9fb9f38SJeff Kirsher 
do_cable_magic(struct net_device * dev)1578d9fb9f38SJeff Kirsher static void do_cable_magic(struct net_device *dev)
1579d9fb9f38SJeff Kirsher {
1580d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1581d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1582d9fb9f38SJeff Kirsher 
1583d9fb9f38SJeff Kirsher 	if (dev->if_port != PORT_TP)
1584d9fb9f38SJeff Kirsher 		return;
1585d9fb9f38SJeff Kirsher 
1586d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83816_A5)
1587d9fb9f38SJeff Kirsher 		return;
1588d9fb9f38SJeff Kirsher 
1589d9fb9f38SJeff Kirsher 	/*
1590d9fb9f38SJeff Kirsher 	 * 100 MBit links with short cables can trip an issue with the chip.
1591d9fb9f38SJeff Kirsher 	 * The problem manifests as lots of CRC errors and/or flickering
1592d9fb9f38SJeff Kirsher 	 * activity LED while idle.  This process is based on instructions
1593d9fb9f38SJeff Kirsher 	 * from engineers at National.
1594d9fb9f38SJeff Kirsher 	 */
1595d9fb9f38SJeff Kirsher 	if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1596d9fb9f38SJeff Kirsher 		u16 data;
1597d9fb9f38SJeff Kirsher 
1598d9fb9f38SJeff Kirsher 		writew(1, ioaddr + PGSEL);
1599d9fb9f38SJeff Kirsher 		/*
1600d9fb9f38SJeff Kirsher 		 * coefficient visibility should already be enabled via
1601d9fb9f38SJeff Kirsher 		 * DSPCFG | 0x1000
1602d9fb9f38SJeff Kirsher 		 */
1603d9fb9f38SJeff Kirsher 		data = readw(ioaddr + TSTDAT) & 0xff;
1604d9fb9f38SJeff Kirsher 		/*
1605d9fb9f38SJeff Kirsher 		 * the value must be negative, and within certain values
1606d9fb9f38SJeff Kirsher 		 * (these values all come from National)
1607d9fb9f38SJeff Kirsher 		 */
1608d9fb9f38SJeff Kirsher 		if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1609d9fb9f38SJeff Kirsher 			np = netdev_priv(dev);
1610d9fb9f38SJeff Kirsher 
1611d9fb9f38SJeff Kirsher 			/* the bug has been triggered - fix the coefficient */
1612d9fb9f38SJeff Kirsher 			writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1613d9fb9f38SJeff Kirsher 			/* lock the value */
1614d9fb9f38SJeff Kirsher 			data = readw(ioaddr + DSPCFG);
1615d9fb9f38SJeff Kirsher 			np->dspcfg = data | DSPCFG_LOCK;
1616d9fb9f38SJeff Kirsher 			writew(np->dspcfg, ioaddr + DSPCFG);
1617d9fb9f38SJeff Kirsher 		}
1618d9fb9f38SJeff Kirsher 		writew(0, ioaddr + PGSEL);
1619d9fb9f38SJeff Kirsher 	}
1620d9fb9f38SJeff Kirsher }
1621d9fb9f38SJeff Kirsher 
undo_cable_magic(struct net_device * dev)1622d9fb9f38SJeff Kirsher static void undo_cable_magic(struct net_device *dev)
1623d9fb9f38SJeff Kirsher {
1624d9fb9f38SJeff Kirsher 	u16 data;
1625d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1626d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1627d9fb9f38SJeff Kirsher 
1628d9fb9f38SJeff Kirsher 	if (dev->if_port != PORT_TP)
1629d9fb9f38SJeff Kirsher 		return;
1630d9fb9f38SJeff Kirsher 
1631d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83816_A5)
1632d9fb9f38SJeff Kirsher 		return;
1633d9fb9f38SJeff Kirsher 
1634d9fb9f38SJeff Kirsher 	writew(1, ioaddr + PGSEL);
1635d9fb9f38SJeff Kirsher 	/* make sure the lock bit is clear */
1636d9fb9f38SJeff Kirsher 	data = readw(ioaddr + DSPCFG);
1637d9fb9f38SJeff Kirsher 	np->dspcfg = data & ~DSPCFG_LOCK;
1638d9fb9f38SJeff Kirsher 	writew(np->dspcfg, ioaddr + DSPCFG);
1639d9fb9f38SJeff Kirsher 	writew(0, ioaddr + PGSEL);
1640d9fb9f38SJeff Kirsher }
1641d9fb9f38SJeff Kirsher 
check_link(struct net_device * dev)1642d9fb9f38SJeff Kirsher static void check_link(struct net_device *dev)
1643d9fb9f38SJeff Kirsher {
1644d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1645d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1646d9fb9f38SJeff Kirsher 	int duplex = np->duplex;
1647d9fb9f38SJeff Kirsher 	u16 bmsr;
1648d9fb9f38SJeff Kirsher 
1649d9fb9f38SJeff Kirsher 	/* If we are ignoring the PHY then don't try reading it. */
1650d9fb9f38SJeff Kirsher 	if (np->ignore_phy)
1651d9fb9f38SJeff Kirsher 		goto propagate_state;
1652d9fb9f38SJeff Kirsher 
1653d9fb9f38SJeff Kirsher 	/* The link status field is latched: it remains low after a temporary
1654d9fb9f38SJeff Kirsher 	 * link failure until it's read. We need the current link status,
1655d9fb9f38SJeff Kirsher 	 * thus read twice.
1656d9fb9f38SJeff Kirsher 	 */
1657d9fb9f38SJeff Kirsher 	mdio_read(dev, MII_BMSR);
1658d9fb9f38SJeff Kirsher 	bmsr = mdio_read(dev, MII_BMSR);
1659d9fb9f38SJeff Kirsher 
1660d9fb9f38SJeff Kirsher 	if (!(bmsr & BMSR_LSTATUS)) {
1661d9fb9f38SJeff Kirsher 		if (netif_carrier_ok(dev)) {
1662d9fb9f38SJeff Kirsher 			if (netif_msg_link(np))
1663d9fb9f38SJeff Kirsher 				printk(KERN_NOTICE "%s: link down.\n",
1664d9fb9f38SJeff Kirsher 				       dev->name);
1665d9fb9f38SJeff Kirsher 			netif_carrier_off(dev);
1666d9fb9f38SJeff Kirsher 			undo_cable_magic(dev);
1667d9fb9f38SJeff Kirsher 		}
1668d9fb9f38SJeff Kirsher 		return;
1669d9fb9f38SJeff Kirsher 	}
1670d9fb9f38SJeff Kirsher 	if (!netif_carrier_ok(dev)) {
1671d9fb9f38SJeff Kirsher 		if (netif_msg_link(np))
1672d9fb9f38SJeff Kirsher 			printk(KERN_NOTICE "%s: link up.\n", dev->name);
1673d9fb9f38SJeff Kirsher 		netif_carrier_on(dev);
1674d9fb9f38SJeff Kirsher 		do_cable_magic(dev);
1675d9fb9f38SJeff Kirsher 	}
1676d9fb9f38SJeff Kirsher 
1677d9fb9f38SJeff Kirsher 	duplex = np->full_duplex;
1678d9fb9f38SJeff Kirsher 	if (!duplex) {
1679d9fb9f38SJeff Kirsher 		if (bmsr & BMSR_ANEGCOMPLETE) {
1680d9fb9f38SJeff Kirsher 			int tmp = mii_nway_result(
1681d9fb9f38SJeff Kirsher 				np->advertising & mdio_read(dev, MII_LPA));
1682d9fb9f38SJeff Kirsher 			if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1683d9fb9f38SJeff Kirsher 				duplex = 1;
1684d9fb9f38SJeff Kirsher 		} else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1685d9fb9f38SJeff Kirsher 			duplex = 1;
1686d9fb9f38SJeff Kirsher 	}
1687d9fb9f38SJeff Kirsher 
1688d9fb9f38SJeff Kirsher propagate_state:
1689d9fb9f38SJeff Kirsher 	/* if duplex is set then bit 28 must be set, too */
1690d9fb9f38SJeff Kirsher 	if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1691d9fb9f38SJeff Kirsher 		if (netif_msg_link(np))
1692d9fb9f38SJeff Kirsher 			printk(KERN_INFO
1693d9fb9f38SJeff Kirsher 				"%s: Setting %s-duplex based on negotiated "
1694d9fb9f38SJeff Kirsher 				"link capability.\n", dev->name,
1695d9fb9f38SJeff Kirsher 				duplex ? "full" : "half");
1696d9fb9f38SJeff Kirsher 		if (duplex) {
1697d9fb9f38SJeff Kirsher 			np->rx_config |= RxAcceptTx;
1698d9fb9f38SJeff Kirsher 			np->tx_config |= TxCarrierIgn | TxHeartIgn;
1699d9fb9f38SJeff Kirsher 		} else {
1700d9fb9f38SJeff Kirsher 			np->rx_config &= ~RxAcceptTx;
1701d9fb9f38SJeff Kirsher 			np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1702d9fb9f38SJeff Kirsher 		}
1703d9fb9f38SJeff Kirsher 		writel(np->tx_config, ioaddr + TxConfig);
1704d9fb9f38SJeff Kirsher 		writel(np->rx_config, ioaddr + RxConfig);
1705d9fb9f38SJeff Kirsher 	}
1706d9fb9f38SJeff Kirsher }
1707d9fb9f38SJeff Kirsher 
init_registers(struct net_device * dev)1708d9fb9f38SJeff Kirsher static void init_registers(struct net_device *dev)
1709d9fb9f38SJeff Kirsher {
1710d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1711d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1712d9fb9f38SJeff Kirsher 
1713d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
1714d9fb9f38SJeff Kirsher 
1715d9fb9f38SJeff Kirsher 	/* clear any interrupts that are pending, such as wake events */
1716d9fb9f38SJeff Kirsher 	readl(ioaddr + IntrStatus);
1717d9fb9f38SJeff Kirsher 
1718d9fb9f38SJeff Kirsher 	writel(np->ring_dma, ioaddr + RxRingPtr);
1719d9fb9f38SJeff Kirsher 	writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1720d9fb9f38SJeff Kirsher 		ioaddr + TxRingPtr);
1721d9fb9f38SJeff Kirsher 
1722d9fb9f38SJeff Kirsher 	/* Initialize other registers.
1723d9fb9f38SJeff Kirsher 	 * Configure the PCI bus bursts and FIFO thresholds.
1724d9fb9f38SJeff Kirsher 	 * Configure for standard, in-spec Ethernet.
1725d9fb9f38SJeff Kirsher 	 * Start with half-duplex. check_link will update
1726d9fb9f38SJeff Kirsher 	 * to the correct settings.
1727d9fb9f38SJeff Kirsher 	 */
1728d9fb9f38SJeff Kirsher 
1729d9fb9f38SJeff Kirsher 	/* DRTH: 2: start tx if 64 bytes are in the fifo
1730d9fb9f38SJeff Kirsher 	 * FLTH: 0x10: refill with next packet if 512 bytes are free
1731d9fb9f38SJeff Kirsher 	 * MXDMA: 0: up to 256 byte bursts.
1732d9fb9f38SJeff Kirsher 	 * 	MXDMA must be <= FLTH
1733d9fb9f38SJeff Kirsher 	 * ECRETRY=1
1734d9fb9f38SJeff Kirsher 	 * ATP=1
1735d9fb9f38SJeff Kirsher 	 */
1736d9fb9f38SJeff Kirsher 	np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1737d9fb9f38SJeff Kirsher 				TX_FLTH_VAL | TX_DRTH_VAL_START;
1738d9fb9f38SJeff Kirsher 	writel(np->tx_config, ioaddr + TxConfig);
1739d9fb9f38SJeff Kirsher 
1740d9fb9f38SJeff Kirsher 	/* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
1741d9fb9f38SJeff Kirsher 	 * MXDMA 0: up to 256 byte bursts
1742d9fb9f38SJeff Kirsher 	 */
1743d9fb9f38SJeff Kirsher 	np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1744d9fb9f38SJeff Kirsher 	/* if receive ring now has bigger buffers than normal, enable jumbo */
1745d9fb9f38SJeff Kirsher 	if (np->rx_buf_sz > NATSEMI_LONGPKT)
1746d9fb9f38SJeff Kirsher 		np->rx_config |= RxAcceptLong;
1747d9fb9f38SJeff Kirsher 
1748d9fb9f38SJeff Kirsher 	writel(np->rx_config, ioaddr + RxConfig);
1749d9fb9f38SJeff Kirsher 
1750d9fb9f38SJeff Kirsher 	/* Disable PME:
1751d9fb9f38SJeff Kirsher 	 * The PME bit is initialized from the EEPROM contents.
1752d9fb9f38SJeff Kirsher 	 * PCI cards probably have PME disabled, but motherboard
1753d9fb9f38SJeff Kirsher 	 * implementations may have PME set to enable WakeOnLan.
1754d9fb9f38SJeff Kirsher 	 * With PME set the chip will scan incoming packets but
1755d9fb9f38SJeff Kirsher 	 * nothing will be written to memory. */
1756d9fb9f38SJeff Kirsher 	np->SavedClkRun = readl(ioaddr + ClkRun);
1757d9fb9f38SJeff Kirsher 	writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1758d9fb9f38SJeff Kirsher 	if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1759d9fb9f38SJeff Kirsher 		printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1760d9fb9f38SJeff Kirsher 			dev->name, readl(ioaddr + WOLCmd));
1761d9fb9f38SJeff Kirsher 	}
1762d9fb9f38SJeff Kirsher 
1763d9fb9f38SJeff Kirsher 	check_link(dev);
1764d9fb9f38SJeff Kirsher 	__set_rx_mode(dev);
1765d9fb9f38SJeff Kirsher 
1766d9fb9f38SJeff Kirsher 	/* Enable interrupts by setting the interrupt mask. */
1767d9fb9f38SJeff Kirsher 	writel(DEFAULT_INTR, ioaddr + IntrMask);
1768d9fb9f38SJeff Kirsher 	natsemi_irq_enable(dev);
1769d9fb9f38SJeff Kirsher 
1770d9fb9f38SJeff Kirsher 	writel(RxOn | TxOn, ioaddr + ChipCmd);
1771d9fb9f38SJeff Kirsher 	writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
1772d9fb9f38SJeff Kirsher }
1773d9fb9f38SJeff Kirsher 
1774d9fb9f38SJeff Kirsher /*
1775d9fb9f38SJeff Kirsher  * netdev_timer:
1776d9fb9f38SJeff Kirsher  * Purpose:
1777d9fb9f38SJeff Kirsher  * 1) check for link changes. Usually they are handled by the MII interrupt
1778d9fb9f38SJeff Kirsher  *    but it doesn't hurt to check twice.
1779d9fb9f38SJeff Kirsher  * 2) check for sudden death of the NIC:
1780d9fb9f38SJeff Kirsher  *    It seems that a reference set for this chip went out with incorrect info,
1781d9fb9f38SJeff Kirsher  *    and there exist boards that aren't quite right.  An unexpected voltage
1782d9fb9f38SJeff Kirsher  *    drop can cause the PHY to get itself in a weird state (basically reset).
1783d9fb9f38SJeff Kirsher  *    NOTE: this only seems to affect revC chips.  The user can disable
1784d9fb9f38SJeff Kirsher  *    this check via dspcfg_workaround sysfs option.
1785d9fb9f38SJeff Kirsher  * 3) check of death of the RX path due to OOM
1786d9fb9f38SJeff Kirsher  */
netdev_timer(struct timer_list * t)178715735c9dSKees Cook static void netdev_timer(struct timer_list *t)
1788d9fb9f38SJeff Kirsher {
178915735c9dSKees Cook 	struct netdev_private *np = from_timer(np, t, timer);
179015735c9dSKees Cook 	struct net_device *dev = np->dev;
1791d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1792d9fb9f38SJeff Kirsher 	int next_tick = NATSEMI_TIMER_FREQ;
1793d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
1794d9fb9f38SJeff Kirsher 
1795d9fb9f38SJeff Kirsher 	if (netif_msg_timer(np)) {
1796d9fb9f38SJeff Kirsher 		/* DO NOT read the IntrStatus register,
1797d9fb9f38SJeff Kirsher 		 * a read clears any pending interrupts.
1798d9fb9f38SJeff Kirsher 		 */
1799d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1800d9fb9f38SJeff Kirsher 			dev->name);
1801d9fb9f38SJeff Kirsher 	}
1802d9fb9f38SJeff Kirsher 
1803d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP) {
1804d9fb9f38SJeff Kirsher 		u16 dspcfg;
1805d9fb9f38SJeff Kirsher 
1806d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
1807d9fb9f38SJeff Kirsher 		/* check for a nasty random phy-reset - use dspcfg as a flag */
1808d9fb9f38SJeff Kirsher 		writew(1, ioaddr+PGSEL);
1809d9fb9f38SJeff Kirsher 		dspcfg = readw(ioaddr+DSPCFG);
1810d9fb9f38SJeff Kirsher 		writew(0, ioaddr+PGSEL);
1811d9fb9f38SJeff Kirsher 		if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
1812d9fb9f38SJeff Kirsher 			if (!netif_queue_stopped(dev)) {
1813d9fb9f38SJeff Kirsher 				spin_unlock_irq(&np->lock);
1814d9fb9f38SJeff Kirsher 				if (netif_msg_drv(np))
1815d9fb9f38SJeff Kirsher 					printk(KERN_NOTICE "%s: possible phy reset: "
1816d9fb9f38SJeff Kirsher 						"re-initializing\n", dev->name);
1817d710ce13SFrancois Romieu 				disable_irq(irq);
1818d9fb9f38SJeff Kirsher 				spin_lock_irq(&np->lock);
1819d9fb9f38SJeff Kirsher 				natsemi_stop_rxtx(dev);
1820d9fb9f38SJeff Kirsher 				dump_ring(dev);
1821d9fb9f38SJeff Kirsher 				reinit_ring(dev);
1822d9fb9f38SJeff Kirsher 				init_registers(dev);
1823d9fb9f38SJeff Kirsher 				spin_unlock_irq(&np->lock);
1824d710ce13SFrancois Romieu 				enable_irq(irq);
1825d9fb9f38SJeff Kirsher 			} else {
1826d9fb9f38SJeff Kirsher 				/* hurry back */
1827d9fb9f38SJeff Kirsher 				next_tick = HZ;
1828d9fb9f38SJeff Kirsher 				spin_unlock_irq(&np->lock);
1829d9fb9f38SJeff Kirsher 			}
1830d9fb9f38SJeff Kirsher 		} else {
1831d9fb9f38SJeff Kirsher 			/* init_registers() calls check_link() for the above case */
1832d9fb9f38SJeff Kirsher 			check_link(dev);
1833d9fb9f38SJeff Kirsher 			spin_unlock_irq(&np->lock);
1834d9fb9f38SJeff Kirsher 		}
1835d9fb9f38SJeff Kirsher 	} else {
1836d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
1837d9fb9f38SJeff Kirsher 		check_link(dev);
1838d9fb9f38SJeff Kirsher 		spin_unlock_irq(&np->lock);
1839d9fb9f38SJeff Kirsher 	}
1840d9fb9f38SJeff Kirsher 	if (np->oom) {
1841d710ce13SFrancois Romieu 		disable_irq(irq);
1842d9fb9f38SJeff Kirsher 		np->oom = 0;
1843d9fb9f38SJeff Kirsher 		refill_rx(dev);
1844d710ce13SFrancois Romieu 		enable_irq(irq);
1845d9fb9f38SJeff Kirsher 		if (!np->oom) {
1846d9fb9f38SJeff Kirsher 			writel(RxOn, ioaddr + ChipCmd);
1847d9fb9f38SJeff Kirsher 		} else {
1848d9fb9f38SJeff Kirsher 			next_tick = 1;
1849d9fb9f38SJeff Kirsher 		}
1850d9fb9f38SJeff Kirsher 	}
1851d9fb9f38SJeff Kirsher 
1852d9fb9f38SJeff Kirsher 	if (next_tick > 1)
1853d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1854d9fb9f38SJeff Kirsher 	else
1855d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, jiffies + next_tick);
1856d9fb9f38SJeff Kirsher }
1857d9fb9f38SJeff Kirsher 
dump_ring(struct net_device * dev)1858d9fb9f38SJeff Kirsher static void dump_ring(struct net_device *dev)
1859d9fb9f38SJeff Kirsher {
1860d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1861d9fb9f38SJeff Kirsher 
1862d9fb9f38SJeff Kirsher 	if (netif_msg_pktdata(np)) {
1863d9fb9f38SJeff Kirsher 		int i;
1864d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "  Tx ring at %p:\n", np->tx_ring);
1865d9fb9f38SJeff Kirsher 		for (i = 0; i < TX_RING_SIZE; i++) {
1866d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1867d9fb9f38SJeff Kirsher 				i, np->tx_ring[i].next_desc,
1868d9fb9f38SJeff Kirsher 				np->tx_ring[i].cmd_status,
1869d9fb9f38SJeff Kirsher 				np->tx_ring[i].addr);
1870d9fb9f38SJeff Kirsher 		}
1871d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
1872d9fb9f38SJeff Kirsher 		for (i = 0; i < RX_RING_SIZE; i++) {
1873d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1874d9fb9f38SJeff Kirsher 				i, np->rx_ring[i].next_desc,
1875d9fb9f38SJeff Kirsher 				np->rx_ring[i].cmd_status,
1876d9fb9f38SJeff Kirsher 				np->rx_ring[i].addr);
1877d9fb9f38SJeff Kirsher 		}
1878d9fb9f38SJeff Kirsher 	}
1879d9fb9f38SJeff Kirsher }
1880d9fb9f38SJeff Kirsher 
ns_tx_timeout(struct net_device * dev,unsigned int txqueue)18810290bd29SMichael S. Tsirkin static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
1882d9fb9f38SJeff Kirsher {
1883d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1884d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1885d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
1886d9fb9f38SJeff Kirsher 
1887d710ce13SFrancois Romieu 	disable_irq(irq);
1888d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
1889d9fb9f38SJeff Kirsher 	if (!np->hands_off) {
1890d9fb9f38SJeff Kirsher 		if (netif_msg_tx_err(np))
1891d9fb9f38SJeff Kirsher 			printk(KERN_WARNING
1892d9fb9f38SJeff Kirsher 				"%s: Transmit timed out, status %#08x,"
1893d9fb9f38SJeff Kirsher 				" resetting...\n",
1894d9fb9f38SJeff Kirsher 				dev->name, readl(ioaddr + IntrStatus));
1895d9fb9f38SJeff Kirsher 		dump_ring(dev);
1896d9fb9f38SJeff Kirsher 
1897d9fb9f38SJeff Kirsher 		natsemi_reset(dev);
1898d9fb9f38SJeff Kirsher 		reinit_ring(dev);
1899d9fb9f38SJeff Kirsher 		init_registers(dev);
1900d9fb9f38SJeff Kirsher 	} else {
1901d9fb9f38SJeff Kirsher 		printk(KERN_WARNING
1902d9fb9f38SJeff Kirsher 			"%s: tx_timeout while in hands_off state?\n",
1903d9fb9f38SJeff Kirsher 			dev->name);
1904d9fb9f38SJeff Kirsher 	}
1905d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
1906d710ce13SFrancois Romieu 	enable_irq(irq);
1907d9fb9f38SJeff Kirsher 
1908860e9538SFlorian Westphal 	netif_trans_update(dev); /* prevent tx timeout */
1909d9fb9f38SJeff Kirsher 	dev->stats.tx_errors++;
1910d9fb9f38SJeff Kirsher 	netif_wake_queue(dev);
1911d9fb9f38SJeff Kirsher }
1912d9fb9f38SJeff Kirsher 
alloc_ring(struct net_device * dev)1913d9fb9f38SJeff Kirsher static int alloc_ring(struct net_device *dev)
1914d9fb9f38SJeff Kirsher {
1915d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1916fd9e4d6fSChristophe JAILLET 	np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
1917d9fb9f38SJeff Kirsher 					 sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
1918fd9e4d6fSChristophe JAILLET 					 &np->ring_dma, GFP_KERNEL);
1919d9fb9f38SJeff Kirsher 	if (!np->rx_ring)
1920d9fb9f38SJeff Kirsher 		return -ENOMEM;
1921d9fb9f38SJeff Kirsher 	np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1922d9fb9f38SJeff Kirsher 	return 0;
1923d9fb9f38SJeff Kirsher }
1924d9fb9f38SJeff Kirsher 
refill_rx(struct net_device * dev)1925d9fb9f38SJeff Kirsher static void refill_rx(struct net_device *dev)
1926d9fb9f38SJeff Kirsher {
1927d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1928d9fb9f38SJeff Kirsher 
1929d9fb9f38SJeff Kirsher 	/* Refill the Rx ring buffers. */
1930d9fb9f38SJeff Kirsher 	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1931d9fb9f38SJeff Kirsher 		struct sk_buff *skb;
1932d9fb9f38SJeff Kirsher 		int entry = np->dirty_rx % RX_RING_SIZE;
1933d9fb9f38SJeff Kirsher 		if (np->rx_skbuff[entry] == NULL) {
1934d9fb9f38SJeff Kirsher 			unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1935c056b734SPradeep A Dalvi 			skb = netdev_alloc_skb(dev, buflen);
1936d9fb9f38SJeff Kirsher 			np->rx_skbuff[entry] = skb;
1937d9fb9f38SJeff Kirsher 			if (skb == NULL)
1938d9fb9f38SJeff Kirsher 				break; /* Better luck next round. */
1939fd9e4d6fSChristophe JAILLET 			np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
1940fd9e4d6fSChristophe JAILLET 							   skb->data, buflen,
1941fd9e4d6fSChristophe JAILLET 							   DMA_FROM_DEVICE);
1942fd9e4d6fSChristophe JAILLET 			if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) {
194345af5500SAlexey Khoroshilov 				dev_kfree_skb_any(skb);
194445af5500SAlexey Khoroshilov 				np->rx_skbuff[entry] = NULL;
194545af5500SAlexey Khoroshilov 				break; /* Better luck next round. */
194645af5500SAlexey Khoroshilov 			}
1947d9fb9f38SJeff Kirsher 			np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1948d9fb9f38SJeff Kirsher 		}
1949d9fb9f38SJeff Kirsher 		np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1950d9fb9f38SJeff Kirsher 	}
1951d9fb9f38SJeff Kirsher 	if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1952d9fb9f38SJeff Kirsher 		if (netif_msg_rx_err(np))
1953d9fb9f38SJeff Kirsher 			printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1954d9fb9f38SJeff Kirsher 		np->oom = 1;
1955d9fb9f38SJeff Kirsher 	}
1956d9fb9f38SJeff Kirsher }
1957d9fb9f38SJeff Kirsher 
set_bufsize(struct net_device * dev)1958d9fb9f38SJeff Kirsher static void set_bufsize(struct net_device *dev)
1959d9fb9f38SJeff Kirsher {
1960d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1961d9fb9f38SJeff Kirsher 	if (dev->mtu <= ETH_DATA_LEN)
1962d9fb9f38SJeff Kirsher 		np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1963d9fb9f38SJeff Kirsher 	else
1964d9fb9f38SJeff Kirsher 		np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1965d9fb9f38SJeff Kirsher }
1966d9fb9f38SJeff Kirsher 
1967d9fb9f38SJeff Kirsher /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
init_ring(struct net_device * dev)1968d9fb9f38SJeff Kirsher static void init_ring(struct net_device *dev)
1969d9fb9f38SJeff Kirsher {
1970d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1971d9fb9f38SJeff Kirsher 	int i;
1972d9fb9f38SJeff Kirsher 
1973d9fb9f38SJeff Kirsher 	/* 1) TX ring */
1974d9fb9f38SJeff Kirsher 	np->dirty_tx = np->cur_tx = 0;
1975d9fb9f38SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1976d9fb9f38SJeff Kirsher 		np->tx_skbuff[i] = NULL;
1977d9fb9f38SJeff Kirsher 		np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1978d9fb9f38SJeff Kirsher 			+sizeof(struct netdev_desc)
1979d9fb9f38SJeff Kirsher 			*((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1980d9fb9f38SJeff Kirsher 		np->tx_ring[i].cmd_status = 0;
1981d9fb9f38SJeff Kirsher 	}
1982d9fb9f38SJeff Kirsher 
1983d9fb9f38SJeff Kirsher 	/* 2) RX ring */
1984d9fb9f38SJeff Kirsher 	np->dirty_rx = 0;
1985d9fb9f38SJeff Kirsher 	np->cur_rx = RX_RING_SIZE;
1986d9fb9f38SJeff Kirsher 	np->oom = 0;
1987d9fb9f38SJeff Kirsher 	set_bufsize(dev);
1988d9fb9f38SJeff Kirsher 
1989d9fb9f38SJeff Kirsher 	np->rx_head_desc = &np->rx_ring[0];
1990d9fb9f38SJeff Kirsher 
1991d9fb9f38SJeff Kirsher 	/* Please be careful before changing this loop - at least gcc-2.95.1
1992d9fb9f38SJeff Kirsher 	 * miscompiles it otherwise.
1993d9fb9f38SJeff Kirsher 	 */
1994d9fb9f38SJeff Kirsher 	/* Initialize all Rx descriptors. */
1995d9fb9f38SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1996d9fb9f38SJeff Kirsher 		np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1997d9fb9f38SJeff Kirsher 				+sizeof(struct netdev_desc)
1998d9fb9f38SJeff Kirsher 				*((i+1)%RX_RING_SIZE));
1999d9fb9f38SJeff Kirsher 		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2000d9fb9f38SJeff Kirsher 		np->rx_skbuff[i] = NULL;
2001d9fb9f38SJeff Kirsher 	}
2002d9fb9f38SJeff Kirsher 	refill_rx(dev);
2003d9fb9f38SJeff Kirsher 	dump_ring(dev);
2004d9fb9f38SJeff Kirsher }
2005d9fb9f38SJeff Kirsher 
drain_tx(struct net_device * dev)2006d9fb9f38SJeff Kirsher static void drain_tx(struct net_device *dev)
2007d9fb9f38SJeff Kirsher {
2008d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2009d9fb9f38SJeff Kirsher 	int i;
2010d9fb9f38SJeff Kirsher 
2011d9fb9f38SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
2012d9fb9f38SJeff Kirsher 		if (np->tx_skbuff[i]) {
2013fd9e4d6fSChristophe JAILLET 			dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i],
2014fd9e4d6fSChristophe JAILLET 					 np->tx_skbuff[i]->len, DMA_TO_DEVICE);
2015d9fb9f38SJeff Kirsher 			dev_kfree_skb(np->tx_skbuff[i]);
2016d9fb9f38SJeff Kirsher 			dev->stats.tx_dropped++;
2017d9fb9f38SJeff Kirsher 		}
2018d9fb9f38SJeff Kirsher 		np->tx_skbuff[i] = NULL;
2019d9fb9f38SJeff Kirsher 	}
2020d9fb9f38SJeff Kirsher }
2021d9fb9f38SJeff Kirsher 
drain_rx(struct net_device * dev)2022d9fb9f38SJeff Kirsher static void drain_rx(struct net_device *dev)
2023d9fb9f38SJeff Kirsher {
2024d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2025d9fb9f38SJeff Kirsher 	unsigned int buflen = np->rx_buf_sz;
2026d9fb9f38SJeff Kirsher 	int i;
2027d9fb9f38SJeff Kirsher 
2028d9fb9f38SJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
2029d9fb9f38SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
2030d9fb9f38SJeff Kirsher 		np->rx_ring[i].cmd_status = 0;
2031d9fb9f38SJeff Kirsher 		np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
2032d9fb9f38SJeff Kirsher 		if (np->rx_skbuff[i]) {
2033fd9e4d6fSChristophe JAILLET 			dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i],
2034d9fb9f38SJeff Kirsher 					 buflen + NATSEMI_PADDING,
2035fd9e4d6fSChristophe JAILLET 					 DMA_FROM_DEVICE);
2036d9fb9f38SJeff Kirsher 			dev_kfree_skb(np->rx_skbuff[i]);
2037d9fb9f38SJeff Kirsher 		}
2038d9fb9f38SJeff Kirsher 		np->rx_skbuff[i] = NULL;
2039d9fb9f38SJeff Kirsher 	}
2040d9fb9f38SJeff Kirsher }
2041d9fb9f38SJeff Kirsher 
drain_ring(struct net_device * dev)2042d9fb9f38SJeff Kirsher static void drain_ring(struct net_device *dev)
2043d9fb9f38SJeff Kirsher {
2044d9fb9f38SJeff Kirsher 	drain_rx(dev);
2045d9fb9f38SJeff Kirsher 	drain_tx(dev);
2046d9fb9f38SJeff Kirsher }
2047d9fb9f38SJeff Kirsher 
free_ring(struct net_device * dev)2048d9fb9f38SJeff Kirsher static void free_ring(struct net_device *dev)
2049d9fb9f38SJeff Kirsher {
2050d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2051fd9e4d6fSChristophe JAILLET 	dma_free_coherent(&np->pci_dev->dev,
2052d9fb9f38SJeff Kirsher 			  sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
2053d9fb9f38SJeff Kirsher 			  np->rx_ring, np->ring_dma);
2054d9fb9f38SJeff Kirsher }
2055d9fb9f38SJeff Kirsher 
reinit_rx(struct net_device * dev)2056d9fb9f38SJeff Kirsher static void reinit_rx(struct net_device *dev)
2057d9fb9f38SJeff Kirsher {
2058d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2059d9fb9f38SJeff Kirsher 	int i;
2060d9fb9f38SJeff Kirsher 
2061d9fb9f38SJeff Kirsher 	/* RX Ring */
2062d9fb9f38SJeff Kirsher 	np->dirty_rx = 0;
2063d9fb9f38SJeff Kirsher 	np->cur_rx = RX_RING_SIZE;
2064d9fb9f38SJeff Kirsher 	np->rx_head_desc = &np->rx_ring[0];
2065d9fb9f38SJeff Kirsher 	/* Initialize all Rx descriptors. */
2066d9fb9f38SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++)
2067d9fb9f38SJeff Kirsher 		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2068d9fb9f38SJeff Kirsher 
2069d9fb9f38SJeff Kirsher 	refill_rx(dev);
2070d9fb9f38SJeff Kirsher }
2071d9fb9f38SJeff Kirsher 
reinit_ring(struct net_device * dev)2072d9fb9f38SJeff Kirsher static void reinit_ring(struct net_device *dev)
2073d9fb9f38SJeff Kirsher {
2074d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2075d9fb9f38SJeff Kirsher 	int i;
2076d9fb9f38SJeff Kirsher 
2077d9fb9f38SJeff Kirsher 	/* drain TX ring */
2078d9fb9f38SJeff Kirsher 	drain_tx(dev);
2079d9fb9f38SJeff Kirsher 	np->dirty_tx = np->cur_tx = 0;
2080d9fb9f38SJeff Kirsher 	for (i=0;i<TX_RING_SIZE;i++)
2081d9fb9f38SJeff Kirsher 		np->tx_ring[i].cmd_status = 0;
2082d9fb9f38SJeff Kirsher 
2083d9fb9f38SJeff Kirsher 	reinit_rx(dev);
2084d9fb9f38SJeff Kirsher }
2085d9fb9f38SJeff Kirsher 
start_tx(struct sk_buff * skb,struct net_device * dev)2086d9fb9f38SJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2087d9fb9f38SJeff Kirsher {
2088d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2089d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2090d9fb9f38SJeff Kirsher 	unsigned entry;
2091d9fb9f38SJeff Kirsher 	unsigned long flags;
2092d9fb9f38SJeff Kirsher 
2093d9fb9f38SJeff Kirsher 	/* Note: Ordering is important here, set the field with the
2094d9fb9f38SJeff Kirsher 	   "ownership" bit last, and only then increment cur_tx. */
2095d9fb9f38SJeff Kirsher 
2096d9fb9f38SJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
2097d9fb9f38SJeff Kirsher 	entry = np->cur_tx % TX_RING_SIZE;
2098d9fb9f38SJeff Kirsher 
2099d9fb9f38SJeff Kirsher 	np->tx_skbuff[entry] = skb;
2100fd9e4d6fSChristophe JAILLET 	np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
2101fd9e4d6fSChristophe JAILLET 					   skb->len, DMA_TO_DEVICE);
2102fd9e4d6fSChristophe JAILLET 	if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) {
210345af5500SAlexey Khoroshilov 		np->tx_skbuff[entry] = NULL;
210445af5500SAlexey Khoroshilov 		dev_kfree_skb_irq(skb);
210545af5500SAlexey Khoroshilov 		dev->stats.tx_dropped++;
210645af5500SAlexey Khoroshilov 		return NETDEV_TX_OK;
210745af5500SAlexey Khoroshilov 	}
2108d9fb9f38SJeff Kirsher 
2109d9fb9f38SJeff Kirsher 	np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2110d9fb9f38SJeff Kirsher 
2111d9fb9f38SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
2112d9fb9f38SJeff Kirsher 
2113d9fb9f38SJeff Kirsher 	if (!np->hands_off) {
2114d9fb9f38SJeff Kirsher 		np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2115d9fb9f38SJeff Kirsher 		/* StrongARM: Explicitly cache flush np->tx_ring and
2116d9fb9f38SJeff Kirsher 		 * skb->data,skb->len. */
2117d9fb9f38SJeff Kirsher 		wmb();
2118d9fb9f38SJeff Kirsher 		np->cur_tx++;
2119d9fb9f38SJeff Kirsher 		if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2120d9fb9f38SJeff Kirsher 			netdev_tx_done(dev);
2121d9fb9f38SJeff Kirsher 			if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2122d9fb9f38SJeff Kirsher 				netif_stop_queue(dev);
2123d9fb9f38SJeff Kirsher 		}
2124d9fb9f38SJeff Kirsher 		/* Wake the potentially-idle transmit channel. */
2125d9fb9f38SJeff Kirsher 		writel(TxOn, ioaddr + ChipCmd);
2126d9fb9f38SJeff Kirsher 	} else {
2127d9fb9f38SJeff Kirsher 		dev_kfree_skb_irq(skb);
2128d9fb9f38SJeff Kirsher 		dev->stats.tx_dropped++;
2129d9fb9f38SJeff Kirsher 	}
2130d9fb9f38SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
2131d9fb9f38SJeff Kirsher 
2132d9fb9f38SJeff Kirsher 	if (netif_msg_tx_queued(np)) {
2133d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2134d9fb9f38SJeff Kirsher 			dev->name, np->cur_tx, entry);
2135d9fb9f38SJeff Kirsher 	}
2136d9fb9f38SJeff Kirsher 	return NETDEV_TX_OK;
2137d9fb9f38SJeff Kirsher }
2138d9fb9f38SJeff Kirsher 
netdev_tx_done(struct net_device * dev)2139d9fb9f38SJeff Kirsher static void netdev_tx_done(struct net_device *dev)
2140d9fb9f38SJeff Kirsher {
2141d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2142d9fb9f38SJeff Kirsher 
2143d9fb9f38SJeff Kirsher 	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2144d9fb9f38SJeff Kirsher 		int entry = np->dirty_tx % TX_RING_SIZE;
2145d9fb9f38SJeff Kirsher 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2146d9fb9f38SJeff Kirsher 			break;
2147d9fb9f38SJeff Kirsher 		if (netif_msg_tx_done(np))
2148d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG
2149d9fb9f38SJeff Kirsher 				"%s: tx frame #%d finished, status %#08x.\n",
2150d9fb9f38SJeff Kirsher 					dev->name, np->dirty_tx,
2151d9fb9f38SJeff Kirsher 					le32_to_cpu(np->tx_ring[entry].cmd_status));
2152d9fb9f38SJeff Kirsher 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2153d9fb9f38SJeff Kirsher 			dev->stats.tx_packets++;
2154d9fb9f38SJeff Kirsher 			dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2155d9fb9f38SJeff Kirsher 		} else { /* Various Tx errors */
2156d9fb9f38SJeff Kirsher 			int tx_status =
2157d9fb9f38SJeff Kirsher 				le32_to_cpu(np->tx_ring[entry].cmd_status);
2158d9fb9f38SJeff Kirsher 			if (tx_status & (DescTxAbort|DescTxExcColl))
2159d9fb9f38SJeff Kirsher 				dev->stats.tx_aborted_errors++;
2160d9fb9f38SJeff Kirsher 			if (tx_status & DescTxFIFO)
2161d9fb9f38SJeff Kirsher 				dev->stats.tx_fifo_errors++;
2162d9fb9f38SJeff Kirsher 			if (tx_status & DescTxCarrier)
2163d9fb9f38SJeff Kirsher 				dev->stats.tx_carrier_errors++;
2164d9fb9f38SJeff Kirsher 			if (tx_status & DescTxOOWCol)
2165d9fb9f38SJeff Kirsher 				dev->stats.tx_window_errors++;
2166d9fb9f38SJeff Kirsher 			dev->stats.tx_errors++;
2167d9fb9f38SJeff Kirsher 		}
2168fd9e4d6fSChristophe JAILLET 		dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry],
2169fd9e4d6fSChristophe JAILLET 				 np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
2170d9fb9f38SJeff Kirsher 		/* Free the original skb. */
2171380ab7e3SYang Wei 		dev_consume_skb_irq(np->tx_skbuff[entry]);
2172d9fb9f38SJeff Kirsher 		np->tx_skbuff[entry] = NULL;
2173d9fb9f38SJeff Kirsher 	}
2174d9fb9f38SJeff Kirsher 	if (netif_queue_stopped(dev) &&
2175d9fb9f38SJeff Kirsher 	    np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2176d9fb9f38SJeff Kirsher 		/* The ring is no longer full, wake queue. */
2177d9fb9f38SJeff Kirsher 		netif_wake_queue(dev);
2178d9fb9f38SJeff Kirsher 	}
2179d9fb9f38SJeff Kirsher }
2180d9fb9f38SJeff Kirsher 
2181d9fb9f38SJeff Kirsher /* The interrupt handler doesn't actually handle interrupts itself, it
2182d9fb9f38SJeff Kirsher  * schedules a NAPI poll if there is anything to do. */
intr_handler(int irq,void * dev_instance)2183d9fb9f38SJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance)
2184d9fb9f38SJeff Kirsher {
2185d9fb9f38SJeff Kirsher 	struct net_device *dev = dev_instance;
2186d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2187d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2188d9fb9f38SJeff Kirsher 
2189d9fb9f38SJeff Kirsher 	/* Reading IntrStatus automatically acknowledges so don't do
2190d9fb9f38SJeff Kirsher 	 * that while interrupts are disabled, (for example, while a
2191d9fb9f38SJeff Kirsher 	 * poll is scheduled).  */
2192d9fb9f38SJeff Kirsher 	if (np->hands_off || !readl(ioaddr + IntrEnable))
2193d9fb9f38SJeff Kirsher 		return IRQ_NONE;
2194d9fb9f38SJeff Kirsher 
2195d9fb9f38SJeff Kirsher 	np->intr_status = readl(ioaddr + IntrStatus);
2196d9fb9f38SJeff Kirsher 
2197d9fb9f38SJeff Kirsher 	if (!np->intr_status)
2198d9fb9f38SJeff Kirsher 		return IRQ_NONE;
2199d9fb9f38SJeff Kirsher 
2200d9fb9f38SJeff Kirsher 	if (netif_msg_intr(np))
2201d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG
2202d9fb9f38SJeff Kirsher 		       "%s: Interrupt, status %#08x, mask %#08x.\n",
2203d9fb9f38SJeff Kirsher 		       dev->name, np->intr_status,
2204d9fb9f38SJeff Kirsher 		       readl(ioaddr + IntrMask));
2205d9fb9f38SJeff Kirsher 
2206d9fb9f38SJeff Kirsher 	prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2207d9fb9f38SJeff Kirsher 
2208d9fb9f38SJeff Kirsher 	if (napi_schedule_prep(&np->napi)) {
2209d9fb9f38SJeff Kirsher 		/* Disable interrupts and register for poll */
2210d9fb9f38SJeff Kirsher 		natsemi_irq_disable(dev);
2211d9fb9f38SJeff Kirsher 		__napi_schedule(&np->napi);
2212d9fb9f38SJeff Kirsher 	} else
2213d9fb9f38SJeff Kirsher 		printk(KERN_WARNING
2214d9fb9f38SJeff Kirsher 	       	       "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2215d9fb9f38SJeff Kirsher 		       dev->name, np->intr_status,
2216d9fb9f38SJeff Kirsher 		       readl(ioaddr + IntrMask));
2217d9fb9f38SJeff Kirsher 
2218d9fb9f38SJeff Kirsher 	return IRQ_HANDLED;
2219d9fb9f38SJeff Kirsher }
2220d9fb9f38SJeff Kirsher 
2221d9fb9f38SJeff Kirsher /* This is the NAPI poll routine.  As well as the standard RX handling
2222d9fb9f38SJeff Kirsher  * it also handles all other interrupts that the chip might raise.
2223d9fb9f38SJeff Kirsher  */
natsemi_poll(struct napi_struct * napi,int budget)2224d9fb9f38SJeff Kirsher static int natsemi_poll(struct napi_struct *napi, int budget)
2225d9fb9f38SJeff Kirsher {
2226d9fb9f38SJeff Kirsher 	struct netdev_private *np = container_of(napi, struct netdev_private, napi);
2227d9fb9f38SJeff Kirsher 	struct net_device *dev = np->dev;
2228d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2229d9fb9f38SJeff Kirsher 	int work_done = 0;
2230d9fb9f38SJeff Kirsher 
2231d9fb9f38SJeff Kirsher 	do {
2232d9fb9f38SJeff Kirsher 		if (netif_msg_intr(np))
2233d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG
2234d9fb9f38SJeff Kirsher 			       "%s: Poll, status %#08x, mask %#08x.\n",
2235d9fb9f38SJeff Kirsher 			       dev->name, np->intr_status,
2236d9fb9f38SJeff Kirsher 			       readl(ioaddr + IntrMask));
2237d9fb9f38SJeff Kirsher 
2238d9fb9f38SJeff Kirsher 		/* netdev_rx() may read IntrStatus again if the RX state
2239d9fb9f38SJeff Kirsher 		 * machine falls over so do it first. */
2240d9fb9f38SJeff Kirsher 		if (np->intr_status &
2241d9fb9f38SJeff Kirsher 		    (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2242d9fb9f38SJeff Kirsher 		     IntrRxErr | IntrRxOverrun)) {
2243d9fb9f38SJeff Kirsher 			netdev_rx(dev, &work_done, budget);
2244d9fb9f38SJeff Kirsher 		}
2245d9fb9f38SJeff Kirsher 
2246d9fb9f38SJeff Kirsher 		if (np->intr_status &
2247d9fb9f38SJeff Kirsher 		    (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2248d9fb9f38SJeff Kirsher 			spin_lock(&np->lock);
2249d9fb9f38SJeff Kirsher 			netdev_tx_done(dev);
2250d9fb9f38SJeff Kirsher 			spin_unlock(&np->lock);
2251d9fb9f38SJeff Kirsher 		}
2252d9fb9f38SJeff Kirsher 
2253d9fb9f38SJeff Kirsher 		/* Abnormal error summary/uncommon events handlers. */
2254d9fb9f38SJeff Kirsher 		if (np->intr_status & IntrAbnormalSummary)
2255d9fb9f38SJeff Kirsher 			netdev_error(dev, np->intr_status);
2256d9fb9f38SJeff Kirsher 
2257d9fb9f38SJeff Kirsher 		if (work_done >= budget)
2258d9fb9f38SJeff Kirsher 			return work_done;
2259d9fb9f38SJeff Kirsher 
2260d9fb9f38SJeff Kirsher 		np->intr_status = readl(ioaddr + IntrStatus);
2261d9fb9f38SJeff Kirsher 	} while (np->intr_status);
2262d9fb9f38SJeff Kirsher 
22636ad20165SEric Dumazet 	napi_complete_done(napi, work_done);
2264d9fb9f38SJeff Kirsher 
2265d9fb9f38SJeff Kirsher 	/* Reenable interrupts providing nothing is trying to shut
2266d9fb9f38SJeff Kirsher 	 * the chip down. */
2267d9fb9f38SJeff Kirsher 	spin_lock(&np->lock);
2268d9fb9f38SJeff Kirsher 	if (!np->hands_off)
2269d9fb9f38SJeff Kirsher 		natsemi_irq_enable(dev);
2270d9fb9f38SJeff Kirsher 	spin_unlock(&np->lock);
2271d9fb9f38SJeff Kirsher 
2272d9fb9f38SJeff Kirsher 	return work_done;
2273d9fb9f38SJeff Kirsher }
2274d9fb9f38SJeff Kirsher 
2275d9fb9f38SJeff Kirsher /* This routine is logically part of the interrupt handler, but separated
2276d9fb9f38SJeff Kirsher    for clarity and better register allocation. */
netdev_rx(struct net_device * dev,int * work_done,int work_to_do)2277d9fb9f38SJeff Kirsher static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2278d9fb9f38SJeff Kirsher {
2279d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2280d9fb9f38SJeff Kirsher 	int entry = np->cur_rx % RX_RING_SIZE;
2281d9fb9f38SJeff Kirsher 	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2282d9fb9f38SJeff Kirsher 	s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2283d9fb9f38SJeff Kirsher 	unsigned int buflen = np->rx_buf_sz;
2284d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2285d9fb9f38SJeff Kirsher 
2286d9fb9f38SJeff Kirsher 	/* If the driver owns the next entry it's a new packet. Send it up. */
2287d9fb9f38SJeff Kirsher 	while (desc_status < 0) { /* e.g. & DescOwn */
2288d9fb9f38SJeff Kirsher 		int pkt_len;
2289d9fb9f38SJeff Kirsher 		if (netif_msg_rx_status(np))
2290d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG
2291d9fb9f38SJeff Kirsher 				"  netdev_rx() entry %d status was %#08x.\n",
2292d9fb9f38SJeff Kirsher 				entry, desc_status);
2293d9fb9f38SJeff Kirsher 		if (--boguscnt < 0)
2294d9fb9f38SJeff Kirsher 			break;
2295d9fb9f38SJeff Kirsher 
2296d9fb9f38SJeff Kirsher 		if (*work_done >= work_to_do)
2297d9fb9f38SJeff Kirsher 			break;
2298d9fb9f38SJeff Kirsher 
2299d9fb9f38SJeff Kirsher 		(*work_done)++;
2300d9fb9f38SJeff Kirsher 
2301d9fb9f38SJeff Kirsher 		pkt_len = (desc_status & DescSizeMask) - 4;
2302d9fb9f38SJeff Kirsher 		if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2303d9fb9f38SJeff Kirsher 			if (desc_status & DescMore) {
2304d9fb9f38SJeff Kirsher 				unsigned long flags;
2305d9fb9f38SJeff Kirsher 
2306d9fb9f38SJeff Kirsher 				if (netif_msg_rx_err(np))
2307d9fb9f38SJeff Kirsher 					printk(KERN_WARNING
2308d9fb9f38SJeff Kirsher 						"%s: Oversized(?) Ethernet "
2309d9fb9f38SJeff Kirsher 						"frame spanned multiple "
2310d9fb9f38SJeff Kirsher 						"buffers, entry %#08x "
2311d9fb9f38SJeff Kirsher 						"status %#08x.\n", dev->name,
2312d9fb9f38SJeff Kirsher 						np->cur_rx, desc_status);
2313d9fb9f38SJeff Kirsher 				dev->stats.rx_length_errors++;
2314d9fb9f38SJeff Kirsher 
2315d9fb9f38SJeff Kirsher 				/* The RX state machine has probably
2316d9fb9f38SJeff Kirsher 				 * locked up beneath us.  Follow the
2317d9fb9f38SJeff Kirsher 				 * reset procedure documented in
2318d9fb9f38SJeff Kirsher 				 * AN-1287. */
2319d9fb9f38SJeff Kirsher 
2320d9fb9f38SJeff Kirsher 				spin_lock_irqsave(&np->lock, flags);
2321d9fb9f38SJeff Kirsher 				reset_rx(dev);
2322d9fb9f38SJeff Kirsher 				reinit_rx(dev);
2323d9fb9f38SJeff Kirsher 				writel(np->ring_dma, ioaddr + RxRingPtr);
2324d9fb9f38SJeff Kirsher 				check_link(dev);
2325d9fb9f38SJeff Kirsher 				spin_unlock_irqrestore(&np->lock, flags);
2326d9fb9f38SJeff Kirsher 
2327d9fb9f38SJeff Kirsher 				/* We'll enable RX on exit from this
2328d9fb9f38SJeff Kirsher 				 * function. */
2329d9fb9f38SJeff Kirsher 				break;
2330d9fb9f38SJeff Kirsher 
2331d9fb9f38SJeff Kirsher 			} else {
2332d9fb9f38SJeff Kirsher 				/* There was an error. */
2333d9fb9f38SJeff Kirsher 				dev->stats.rx_errors++;
2334d9fb9f38SJeff Kirsher 				if (desc_status & (DescRxAbort|DescRxOver))
2335d9fb9f38SJeff Kirsher 					dev->stats.rx_over_errors++;
2336d9fb9f38SJeff Kirsher 				if (desc_status & (DescRxLong|DescRxRunt))
2337d9fb9f38SJeff Kirsher 					dev->stats.rx_length_errors++;
2338d9fb9f38SJeff Kirsher 				if (desc_status & (DescRxInvalid|DescRxAlign))
2339d9fb9f38SJeff Kirsher 					dev->stats.rx_frame_errors++;
2340d9fb9f38SJeff Kirsher 				if (desc_status & DescRxCRC)
2341d9fb9f38SJeff Kirsher 					dev->stats.rx_crc_errors++;
2342d9fb9f38SJeff Kirsher 			}
2343d9fb9f38SJeff Kirsher 		} else if (pkt_len > np->rx_buf_sz) {
2344d9fb9f38SJeff Kirsher 			/* if this is the tail of a double buffer
2345d9fb9f38SJeff Kirsher 			 * packet, we've already counted the error
2346d9fb9f38SJeff Kirsher 			 * on the first part.  Ignore the second half.
2347d9fb9f38SJeff Kirsher 			 */
2348d9fb9f38SJeff Kirsher 		} else {
2349d9fb9f38SJeff Kirsher 			struct sk_buff *skb;
2350d9fb9f38SJeff Kirsher 			/* Omit CRC size. */
2351d9fb9f38SJeff Kirsher 			/* Check if the packet is long enough to accept
2352d9fb9f38SJeff Kirsher 			 * without copying to a minimally-sized skbuff. */
2353d9fb9f38SJeff Kirsher 			if (pkt_len < rx_copybreak &&
2354c056b734SPradeep A Dalvi 			    (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2355d9fb9f38SJeff Kirsher 				/* 16 byte align the IP header */
2356d9fb9f38SJeff Kirsher 				skb_reserve(skb, RX_OFFSET);
2357fd9e4d6fSChristophe JAILLET 				dma_sync_single_for_cpu(&np->pci_dev->dev,
2358d9fb9f38SJeff Kirsher 							np->rx_dma[entry],
2359d9fb9f38SJeff Kirsher 							buflen,
2360fd9e4d6fSChristophe JAILLET 							DMA_FROM_DEVICE);
2361d9fb9f38SJeff Kirsher 				skb_copy_to_linear_data(skb,
2362d9fb9f38SJeff Kirsher 					np->rx_skbuff[entry]->data, pkt_len);
2363d9fb9f38SJeff Kirsher 				skb_put(skb, pkt_len);
2364fd9e4d6fSChristophe JAILLET 				dma_sync_single_for_device(&np->pci_dev->dev,
2365d9fb9f38SJeff Kirsher 							   np->rx_dma[entry],
2366d9fb9f38SJeff Kirsher 							   buflen,
2367fd9e4d6fSChristophe JAILLET 							   DMA_FROM_DEVICE);
2368d9fb9f38SJeff Kirsher 			} else {
2369fd9e4d6fSChristophe JAILLET 				dma_unmap_single(&np->pci_dev->dev,
2370fd9e4d6fSChristophe JAILLET 						 np->rx_dma[entry],
2371d9fb9f38SJeff Kirsher 						 buflen + NATSEMI_PADDING,
2372fd9e4d6fSChristophe JAILLET 						 DMA_FROM_DEVICE);
2373d9fb9f38SJeff Kirsher 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
2374d9fb9f38SJeff Kirsher 				np->rx_skbuff[entry] = NULL;
2375d9fb9f38SJeff Kirsher 			}
2376d9fb9f38SJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
2377d9fb9f38SJeff Kirsher 			netif_receive_skb(skb);
2378d9fb9f38SJeff Kirsher 			dev->stats.rx_packets++;
2379d9fb9f38SJeff Kirsher 			dev->stats.rx_bytes += pkt_len;
2380d9fb9f38SJeff Kirsher 		}
2381d9fb9f38SJeff Kirsher 		entry = (++np->cur_rx) % RX_RING_SIZE;
2382d9fb9f38SJeff Kirsher 		np->rx_head_desc = &np->rx_ring[entry];
2383d9fb9f38SJeff Kirsher 		desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2384d9fb9f38SJeff Kirsher 	}
2385d9fb9f38SJeff Kirsher 	refill_rx(dev);
2386d9fb9f38SJeff Kirsher 
2387d9fb9f38SJeff Kirsher 	/* Restart Rx engine if stopped. */
2388d9fb9f38SJeff Kirsher 	if (np->oom)
2389d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, jiffies + 1);
2390d9fb9f38SJeff Kirsher 	else
2391d9fb9f38SJeff Kirsher 		writel(RxOn, ioaddr + ChipCmd);
2392d9fb9f38SJeff Kirsher }
2393d9fb9f38SJeff Kirsher 
netdev_error(struct net_device * dev,int intr_status)2394d9fb9f38SJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status)
2395d9fb9f38SJeff Kirsher {
2396d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2397d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2398d9fb9f38SJeff Kirsher 
2399d9fb9f38SJeff Kirsher 	spin_lock(&np->lock);
2400d9fb9f38SJeff Kirsher 	if (intr_status & LinkChange) {
2401d9fb9f38SJeff Kirsher 		u16 lpa = mdio_read(dev, MII_LPA);
2402d9fb9f38SJeff Kirsher 		if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2403d9fb9f38SJeff Kirsher 		    netif_msg_link(np)) {
2404d9fb9f38SJeff Kirsher 			printk(KERN_INFO
2405d9fb9f38SJeff Kirsher 				"%s: Autonegotiation advertising"
2406d9fb9f38SJeff Kirsher 				" %#04x  partner %#04x.\n", dev->name,
2407d9fb9f38SJeff Kirsher 				np->advertising, lpa);
2408d9fb9f38SJeff Kirsher 		}
2409d9fb9f38SJeff Kirsher 
2410d9fb9f38SJeff Kirsher 		/* read MII int status to clear the flag */
2411d9fb9f38SJeff Kirsher 		readw(ioaddr + MIntrStatus);
2412d9fb9f38SJeff Kirsher 		check_link(dev);
2413d9fb9f38SJeff Kirsher 	}
2414d9fb9f38SJeff Kirsher 	if (intr_status & StatsMax) {
2415d9fb9f38SJeff Kirsher 		__get_stats(dev);
2416d9fb9f38SJeff Kirsher 	}
2417d9fb9f38SJeff Kirsher 	if (intr_status & IntrTxUnderrun) {
2418d9fb9f38SJeff Kirsher 		if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2419d9fb9f38SJeff Kirsher 			np->tx_config += TX_DRTH_VAL_INC;
2420d9fb9f38SJeff Kirsher 			if (netif_msg_tx_err(np))
2421d9fb9f38SJeff Kirsher 				printk(KERN_NOTICE
2422d9fb9f38SJeff Kirsher 					"%s: increased tx threshold, txcfg %#08x.\n",
2423d9fb9f38SJeff Kirsher 					dev->name, np->tx_config);
2424d9fb9f38SJeff Kirsher 		} else {
2425d9fb9f38SJeff Kirsher 			if (netif_msg_tx_err(np))
2426d9fb9f38SJeff Kirsher 				printk(KERN_NOTICE
2427d9fb9f38SJeff Kirsher 					"%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2428d9fb9f38SJeff Kirsher 					dev->name, np->tx_config);
2429d9fb9f38SJeff Kirsher 		}
2430d9fb9f38SJeff Kirsher 		writel(np->tx_config, ioaddr + TxConfig);
2431d9fb9f38SJeff Kirsher 	}
2432d9fb9f38SJeff Kirsher 	if (intr_status & WOLPkt && netif_msg_wol(np)) {
2433d9fb9f38SJeff Kirsher 		int wol_status = readl(ioaddr + WOLCmd);
2434d9fb9f38SJeff Kirsher 		printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2435d9fb9f38SJeff Kirsher 			dev->name, wol_status);
2436d9fb9f38SJeff Kirsher 	}
2437d9fb9f38SJeff Kirsher 	if (intr_status & RxStatusFIFOOver) {
2438d9fb9f38SJeff Kirsher 		if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2439d9fb9f38SJeff Kirsher 			printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2440d9fb9f38SJeff Kirsher 				dev->name);
2441d9fb9f38SJeff Kirsher 		}
2442d9fb9f38SJeff Kirsher 		dev->stats.rx_fifo_errors++;
2443d9fb9f38SJeff Kirsher 		dev->stats.rx_errors++;
2444d9fb9f38SJeff Kirsher 	}
2445d9fb9f38SJeff Kirsher 	/* Hmmmmm, it's not clear how to recover from PCI faults. */
2446d9fb9f38SJeff Kirsher 	if (intr_status & IntrPCIErr) {
2447d9fb9f38SJeff Kirsher 		printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2448d9fb9f38SJeff Kirsher 			intr_status & IntrPCIErr);
2449d9fb9f38SJeff Kirsher 		dev->stats.tx_fifo_errors++;
2450d9fb9f38SJeff Kirsher 		dev->stats.tx_errors++;
2451d9fb9f38SJeff Kirsher 		dev->stats.rx_fifo_errors++;
2452d9fb9f38SJeff Kirsher 		dev->stats.rx_errors++;
2453d9fb9f38SJeff Kirsher 	}
2454d9fb9f38SJeff Kirsher 	spin_unlock(&np->lock);
2455d9fb9f38SJeff Kirsher }
2456d9fb9f38SJeff Kirsher 
__get_stats(struct net_device * dev)2457d9fb9f38SJeff Kirsher static void __get_stats(struct net_device *dev)
2458d9fb9f38SJeff Kirsher {
2459d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2460d9fb9f38SJeff Kirsher 
2461d9fb9f38SJeff Kirsher 	/* The chip only need report frame silently dropped. */
2462d9fb9f38SJeff Kirsher 	dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2463d9fb9f38SJeff Kirsher 	dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2464d9fb9f38SJeff Kirsher }
2465d9fb9f38SJeff Kirsher 
get_stats(struct net_device * dev)2466d9fb9f38SJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev)
2467d9fb9f38SJeff Kirsher {
2468d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2469d9fb9f38SJeff Kirsher 
2470d9fb9f38SJeff Kirsher 	/* The chip only need report frame silently dropped. */
2471d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2472d9fb9f38SJeff Kirsher 	if (netif_running(dev) && !np->hands_off)
2473d9fb9f38SJeff Kirsher 		__get_stats(dev);
2474d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2475d9fb9f38SJeff Kirsher 
2476d9fb9f38SJeff Kirsher 	return &dev->stats;
2477d9fb9f38SJeff Kirsher }
2478d9fb9f38SJeff Kirsher 
2479d9fb9f38SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
natsemi_poll_controller(struct net_device * dev)2480d9fb9f38SJeff Kirsher static void natsemi_poll_controller(struct net_device *dev)
2481d9fb9f38SJeff Kirsher {
2482d710ce13SFrancois Romieu 	struct netdev_private *np = netdev_priv(dev);
2483d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
2484d710ce13SFrancois Romieu 
2485d710ce13SFrancois Romieu 	disable_irq(irq);
2486d710ce13SFrancois Romieu 	intr_handler(irq, dev);
2487d710ce13SFrancois Romieu 	enable_irq(irq);
2488d9fb9f38SJeff Kirsher }
2489d9fb9f38SJeff Kirsher #endif
2490d9fb9f38SJeff Kirsher 
2491d9fb9f38SJeff Kirsher #define HASH_TABLE	0x200
__set_rx_mode(struct net_device * dev)2492d9fb9f38SJeff Kirsher static void __set_rx_mode(struct net_device *dev)
2493d9fb9f38SJeff Kirsher {
2494d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2495d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2496d9fb9f38SJeff Kirsher 	u8 mc_filter[64]; /* Multicast hash filter */
2497d9fb9f38SJeff Kirsher 	u32 rx_mode;
2498d9fb9f38SJeff Kirsher 
2499d9fb9f38SJeff Kirsher 	if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2500d9fb9f38SJeff Kirsher 		rx_mode = RxFilterEnable | AcceptBroadcast
2501d9fb9f38SJeff Kirsher 			| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2502d9fb9f38SJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2503d9fb9f38SJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
2504d9fb9f38SJeff Kirsher 		rx_mode = RxFilterEnable | AcceptBroadcast
2505d9fb9f38SJeff Kirsher 			| AcceptAllMulticast | AcceptMyPhys;
2506d9fb9f38SJeff Kirsher 	} else {
2507d9fb9f38SJeff Kirsher 		struct netdev_hw_addr *ha;
2508d9fb9f38SJeff Kirsher 		int i;
2509d9fb9f38SJeff Kirsher 
2510d9fb9f38SJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
2511d9fb9f38SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2512d9fb9f38SJeff Kirsher 			int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2513d9fb9f38SJeff Kirsher 			mc_filter[b/8] |= (1 << (b & 0x07));
2514d9fb9f38SJeff Kirsher 		}
2515d9fb9f38SJeff Kirsher 		rx_mode = RxFilterEnable | AcceptBroadcast
2516d9fb9f38SJeff Kirsher 			| AcceptMulticast | AcceptMyPhys;
2517d9fb9f38SJeff Kirsher 		for (i = 0; i < 64; i += 2) {
2518d9fb9f38SJeff Kirsher 			writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2519d9fb9f38SJeff Kirsher 			writel((mc_filter[i + 1] << 8) + mc_filter[i],
2520d9fb9f38SJeff Kirsher 			       ioaddr + RxFilterData);
2521d9fb9f38SJeff Kirsher 		}
2522d9fb9f38SJeff Kirsher 	}
2523d9fb9f38SJeff Kirsher 	writel(rx_mode, ioaddr + RxFilterAddr);
2524d9fb9f38SJeff Kirsher 	np->cur_rx_mode = rx_mode;
2525d9fb9f38SJeff Kirsher }
2526d9fb9f38SJeff Kirsher 
natsemi_change_mtu(struct net_device * dev,int new_mtu)2527d9fb9f38SJeff Kirsher static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2528d9fb9f38SJeff Kirsher {
2529d9fb9f38SJeff Kirsher 	dev->mtu = new_mtu;
2530d9fb9f38SJeff Kirsher 
2531d9fb9f38SJeff Kirsher 	/* synchronized against open : rtnl_lock() held by caller */
2532d9fb9f38SJeff Kirsher 	if (netif_running(dev)) {
2533d9fb9f38SJeff Kirsher 		struct netdev_private *np = netdev_priv(dev);
2534d9fb9f38SJeff Kirsher 		void __iomem * ioaddr = ns_ioaddr(dev);
2535d710ce13SFrancois Romieu 		const int irq = np->pci_dev->irq;
2536d9fb9f38SJeff Kirsher 
2537d710ce13SFrancois Romieu 		disable_irq(irq);
2538d9fb9f38SJeff Kirsher 		spin_lock(&np->lock);
2539d9fb9f38SJeff Kirsher 		/* stop engines */
2540d9fb9f38SJeff Kirsher 		natsemi_stop_rxtx(dev);
2541d9fb9f38SJeff Kirsher 		/* drain rx queue */
2542d9fb9f38SJeff Kirsher 		drain_rx(dev);
2543d9fb9f38SJeff Kirsher 		/* change buffers */
2544d9fb9f38SJeff Kirsher 		set_bufsize(dev);
2545d9fb9f38SJeff Kirsher 		reinit_rx(dev);
2546d9fb9f38SJeff Kirsher 		writel(np->ring_dma, ioaddr + RxRingPtr);
2547d9fb9f38SJeff Kirsher 		/* restart engines */
2548d9fb9f38SJeff Kirsher 		writel(RxOn | TxOn, ioaddr + ChipCmd);
2549d9fb9f38SJeff Kirsher 		spin_unlock(&np->lock);
2550d710ce13SFrancois Romieu 		enable_irq(irq);
2551d9fb9f38SJeff Kirsher 	}
2552d9fb9f38SJeff Kirsher 	return 0;
2553d9fb9f38SJeff Kirsher }
2554d9fb9f38SJeff Kirsher 
set_rx_mode(struct net_device * dev)2555d9fb9f38SJeff Kirsher static void set_rx_mode(struct net_device *dev)
2556d9fb9f38SJeff Kirsher {
2557d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2558d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2559d9fb9f38SJeff Kirsher 	if (!np->hands_off)
2560d9fb9f38SJeff Kirsher 		__set_rx_mode(dev);
2561d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2562d9fb9f38SJeff Kirsher }
2563d9fb9f38SJeff Kirsher 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2564d9fb9f38SJeff Kirsher static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2565d9fb9f38SJeff Kirsher {
2566d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2567f029c781SWolfram Sang 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
2568f029c781SWolfram Sang 	strscpy(info->version, DRV_VERSION, sizeof(info->version));
2569f029c781SWolfram Sang 	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
2570d9fb9f38SJeff Kirsher }
2571d9fb9f38SJeff Kirsher 
get_regs_len(struct net_device * dev)2572d9fb9f38SJeff Kirsher static int get_regs_len(struct net_device *dev)
2573d9fb9f38SJeff Kirsher {
2574d9fb9f38SJeff Kirsher 	return NATSEMI_REGS_SIZE;
2575d9fb9f38SJeff Kirsher }
2576d9fb9f38SJeff Kirsher 
get_eeprom_len(struct net_device * dev)2577d9fb9f38SJeff Kirsher static int get_eeprom_len(struct net_device *dev)
2578d9fb9f38SJeff Kirsher {
2579d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2580d9fb9f38SJeff Kirsher 	return np->eeprom_size;
2581d9fb9f38SJeff Kirsher }
2582d9fb9f38SJeff Kirsher 
get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * ecmd)2583586b6e27SPhilippe Reynes static int get_link_ksettings(struct net_device *dev,
2584586b6e27SPhilippe Reynes 			      struct ethtool_link_ksettings *ecmd)
2585d9fb9f38SJeff Kirsher {
2586d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2587d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2588d9fb9f38SJeff Kirsher 	netdev_get_ecmd(dev, ecmd);
2589d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2590d9fb9f38SJeff Kirsher 	return 0;
2591d9fb9f38SJeff Kirsher }
2592d9fb9f38SJeff Kirsher 
set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * ecmd)2593586b6e27SPhilippe Reynes static int set_link_ksettings(struct net_device *dev,
2594586b6e27SPhilippe Reynes 			      const struct ethtool_link_ksettings *ecmd)
2595d9fb9f38SJeff Kirsher {
2596d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2597d9fb9f38SJeff Kirsher 	int res;
2598d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2599d9fb9f38SJeff Kirsher 	res = netdev_set_ecmd(dev, ecmd);
2600d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2601d9fb9f38SJeff Kirsher 	return res;
2602d9fb9f38SJeff Kirsher }
2603d9fb9f38SJeff Kirsher 
get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2604d9fb9f38SJeff Kirsher static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2605d9fb9f38SJeff Kirsher {
2606d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2607d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2608d9fb9f38SJeff Kirsher 	netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2609d9fb9f38SJeff Kirsher 	netdev_get_sopass(dev, wol->sopass);
2610d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2611d9fb9f38SJeff Kirsher }
2612d9fb9f38SJeff Kirsher 
set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2613d9fb9f38SJeff Kirsher static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2614d9fb9f38SJeff Kirsher {
2615d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2616d9fb9f38SJeff Kirsher 	int res;
2617d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2618d9fb9f38SJeff Kirsher 	netdev_set_wol(dev, wol->wolopts);
2619d9fb9f38SJeff Kirsher 	res = netdev_set_sopass(dev, wol->sopass);
2620d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2621d9fb9f38SJeff Kirsher 	return res;
2622d9fb9f38SJeff Kirsher }
2623d9fb9f38SJeff Kirsher 
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)2624d9fb9f38SJeff Kirsher static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2625d9fb9f38SJeff Kirsher {
2626d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2627d9fb9f38SJeff Kirsher 	regs->version = NATSEMI_REGS_VER;
2628d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2629d9fb9f38SJeff Kirsher 	netdev_get_regs(dev, buf);
2630d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2631d9fb9f38SJeff Kirsher }
2632d9fb9f38SJeff Kirsher 
get_msglevel(struct net_device * dev)2633d9fb9f38SJeff Kirsher static u32 get_msglevel(struct net_device *dev)
2634d9fb9f38SJeff Kirsher {
2635d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2636d9fb9f38SJeff Kirsher 	return np->msg_enable;
2637d9fb9f38SJeff Kirsher }
2638d9fb9f38SJeff Kirsher 
set_msglevel(struct net_device * dev,u32 val)2639d9fb9f38SJeff Kirsher static void set_msglevel(struct net_device *dev, u32 val)
2640d9fb9f38SJeff Kirsher {
2641d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2642d9fb9f38SJeff Kirsher 	np->msg_enable = val;
2643d9fb9f38SJeff Kirsher }
2644d9fb9f38SJeff Kirsher 
nway_reset(struct net_device * dev)2645d9fb9f38SJeff Kirsher static int nway_reset(struct net_device *dev)
2646d9fb9f38SJeff Kirsher {
2647d9fb9f38SJeff Kirsher 	int tmp;
2648d9fb9f38SJeff Kirsher 	int r = -EINVAL;
2649d9fb9f38SJeff Kirsher 	/* if autoneg is off, it's an error */
2650d9fb9f38SJeff Kirsher 	tmp = mdio_read(dev, MII_BMCR);
2651d9fb9f38SJeff Kirsher 	if (tmp & BMCR_ANENABLE) {
2652d9fb9f38SJeff Kirsher 		tmp |= (BMCR_ANRESTART);
2653d9fb9f38SJeff Kirsher 		mdio_write(dev, MII_BMCR, tmp);
2654d9fb9f38SJeff Kirsher 		r = 0;
2655d9fb9f38SJeff Kirsher 	}
2656d9fb9f38SJeff Kirsher 	return r;
2657d9fb9f38SJeff Kirsher }
2658d9fb9f38SJeff Kirsher 
get_link(struct net_device * dev)2659d9fb9f38SJeff Kirsher static u32 get_link(struct net_device *dev)
2660d9fb9f38SJeff Kirsher {
2661d9fb9f38SJeff Kirsher 	/* LSTATUS is latched low until a read - so read twice */
2662d9fb9f38SJeff Kirsher 	mdio_read(dev, MII_BMSR);
2663d9fb9f38SJeff Kirsher 	return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2664d9fb9f38SJeff Kirsher }
2665d9fb9f38SJeff Kirsher 
get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)2666d9fb9f38SJeff Kirsher static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2667d9fb9f38SJeff Kirsher {
2668d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2669d9fb9f38SJeff Kirsher 	u8 *eebuf;
2670d9fb9f38SJeff Kirsher 	int res;
2671d9fb9f38SJeff Kirsher 
2672d9fb9f38SJeff Kirsher 	eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2673d9fb9f38SJeff Kirsher 	if (!eebuf)
2674d9fb9f38SJeff Kirsher 		return -ENOMEM;
2675d9fb9f38SJeff Kirsher 
2676d9fb9f38SJeff Kirsher 	eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2677d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2678d9fb9f38SJeff Kirsher 	res = netdev_get_eeprom(dev, eebuf);
2679d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2680d9fb9f38SJeff Kirsher 	if (!res)
2681d9fb9f38SJeff Kirsher 		memcpy(data, eebuf+eeprom->offset, eeprom->len);
2682d9fb9f38SJeff Kirsher 	kfree(eebuf);
2683d9fb9f38SJeff Kirsher 	return res;
2684d9fb9f38SJeff Kirsher }
2685d9fb9f38SJeff Kirsher 
2686d9fb9f38SJeff Kirsher static const struct ethtool_ops ethtool_ops = {
2687d9fb9f38SJeff Kirsher 	.get_drvinfo = get_drvinfo,
2688d9fb9f38SJeff Kirsher 	.get_regs_len = get_regs_len,
2689d9fb9f38SJeff Kirsher 	.get_eeprom_len = get_eeprom_len,
2690d9fb9f38SJeff Kirsher 	.get_wol = get_wol,
2691d9fb9f38SJeff Kirsher 	.set_wol = set_wol,
2692d9fb9f38SJeff Kirsher 	.get_regs = get_regs,
2693d9fb9f38SJeff Kirsher 	.get_msglevel = get_msglevel,
2694d9fb9f38SJeff Kirsher 	.set_msglevel = set_msglevel,
2695d9fb9f38SJeff Kirsher 	.nway_reset = nway_reset,
2696d9fb9f38SJeff Kirsher 	.get_link = get_link,
2697d9fb9f38SJeff Kirsher 	.get_eeprom = get_eeprom,
2698586b6e27SPhilippe Reynes 	.get_link_ksettings = get_link_ksettings,
2699586b6e27SPhilippe Reynes 	.set_link_ksettings = set_link_ksettings,
2700d9fb9f38SJeff Kirsher };
2701d9fb9f38SJeff Kirsher 
netdev_set_wol(struct net_device * dev,u32 newval)2702d9fb9f38SJeff Kirsher static int netdev_set_wol(struct net_device *dev, u32 newval)
2703d9fb9f38SJeff Kirsher {
2704d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2705d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2706d9fb9f38SJeff Kirsher 	u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2707d9fb9f38SJeff Kirsher 
2708d9fb9f38SJeff Kirsher 	/* translate to bitmasks this chip understands */
2709d9fb9f38SJeff Kirsher 	if (newval & WAKE_PHY)
2710d9fb9f38SJeff Kirsher 		data |= WakePhy;
2711d9fb9f38SJeff Kirsher 	if (newval & WAKE_UCAST)
2712d9fb9f38SJeff Kirsher 		data |= WakeUnicast;
2713d9fb9f38SJeff Kirsher 	if (newval & WAKE_MCAST)
2714d9fb9f38SJeff Kirsher 		data |= WakeMulticast;
2715d9fb9f38SJeff Kirsher 	if (newval & WAKE_BCAST)
2716d9fb9f38SJeff Kirsher 		data |= WakeBroadcast;
2717d9fb9f38SJeff Kirsher 	if (newval & WAKE_ARP)
2718d9fb9f38SJeff Kirsher 		data |= WakeArp;
2719d9fb9f38SJeff Kirsher 	if (newval & WAKE_MAGIC)
2720d9fb9f38SJeff Kirsher 		data |= WakeMagic;
2721d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83815_D) {
2722d9fb9f38SJeff Kirsher 		if (newval & WAKE_MAGICSECURE) {
2723d9fb9f38SJeff Kirsher 			data |= WakeMagicSecure;
2724d9fb9f38SJeff Kirsher 		}
2725d9fb9f38SJeff Kirsher 	}
2726d9fb9f38SJeff Kirsher 
2727d9fb9f38SJeff Kirsher 	writel(data, ioaddr + WOLCmd);
2728d9fb9f38SJeff Kirsher 
2729d9fb9f38SJeff Kirsher 	return 0;
2730d9fb9f38SJeff Kirsher }
2731d9fb9f38SJeff Kirsher 
netdev_get_wol(struct net_device * dev,u32 * supported,u32 * cur)2732d9fb9f38SJeff Kirsher static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2733d9fb9f38SJeff Kirsher {
2734d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2735d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2736d9fb9f38SJeff Kirsher 	u32 regval = readl(ioaddr + WOLCmd);
2737d9fb9f38SJeff Kirsher 
2738d9fb9f38SJeff Kirsher 	*supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2739d9fb9f38SJeff Kirsher 			| WAKE_ARP | WAKE_MAGIC);
2740d9fb9f38SJeff Kirsher 
2741d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83815_D) {
2742d9fb9f38SJeff Kirsher 		/* SOPASS works on revD and higher */
2743d9fb9f38SJeff Kirsher 		*supported |= WAKE_MAGICSECURE;
2744d9fb9f38SJeff Kirsher 	}
2745d9fb9f38SJeff Kirsher 	*cur = 0;
2746d9fb9f38SJeff Kirsher 
2747d9fb9f38SJeff Kirsher 	/* translate from chip bitmasks */
2748d9fb9f38SJeff Kirsher 	if (regval & WakePhy)
2749d9fb9f38SJeff Kirsher 		*cur |= WAKE_PHY;
2750d9fb9f38SJeff Kirsher 	if (regval & WakeUnicast)
2751d9fb9f38SJeff Kirsher 		*cur |= WAKE_UCAST;
2752d9fb9f38SJeff Kirsher 	if (regval & WakeMulticast)
2753d9fb9f38SJeff Kirsher 		*cur |= WAKE_MCAST;
2754d9fb9f38SJeff Kirsher 	if (regval & WakeBroadcast)
2755d9fb9f38SJeff Kirsher 		*cur |= WAKE_BCAST;
2756d9fb9f38SJeff Kirsher 	if (regval & WakeArp)
2757d9fb9f38SJeff Kirsher 		*cur |= WAKE_ARP;
2758d9fb9f38SJeff Kirsher 	if (regval & WakeMagic)
2759d9fb9f38SJeff Kirsher 		*cur |= WAKE_MAGIC;
2760d9fb9f38SJeff Kirsher 	if (regval & WakeMagicSecure) {
2761d9fb9f38SJeff Kirsher 		/* this can be on in revC, but it's broken */
2762d9fb9f38SJeff Kirsher 		*cur |= WAKE_MAGICSECURE;
2763d9fb9f38SJeff Kirsher 	}
2764d9fb9f38SJeff Kirsher 
2765d9fb9f38SJeff Kirsher 	return 0;
2766d9fb9f38SJeff Kirsher }
2767d9fb9f38SJeff Kirsher 
netdev_set_sopass(struct net_device * dev,u8 * newval)2768d9fb9f38SJeff Kirsher static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2769d9fb9f38SJeff Kirsher {
2770d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2771d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2772d9fb9f38SJeff Kirsher 	u16 *sval = (u16 *)newval;
2773d9fb9f38SJeff Kirsher 	u32 addr;
2774d9fb9f38SJeff Kirsher 
2775d9fb9f38SJeff Kirsher 	if (np->srr < SRR_DP83815_D) {
2776d9fb9f38SJeff Kirsher 		return 0;
2777d9fb9f38SJeff Kirsher 	}
2778d9fb9f38SJeff Kirsher 
2779d9fb9f38SJeff Kirsher 	/* enable writing to these registers by disabling the RX filter */
2780d9fb9f38SJeff Kirsher 	addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2781d9fb9f38SJeff Kirsher 	addr &= ~RxFilterEnable;
2782d9fb9f38SJeff Kirsher 	writel(addr, ioaddr + RxFilterAddr);
2783d9fb9f38SJeff Kirsher 
2784d9fb9f38SJeff Kirsher 	/* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
2785d9fb9f38SJeff Kirsher 	writel(addr | 0xa, ioaddr + RxFilterAddr);
2786d9fb9f38SJeff Kirsher 	writew(sval[0], ioaddr + RxFilterData);
2787d9fb9f38SJeff Kirsher 
2788d9fb9f38SJeff Kirsher 	writel(addr | 0xc, ioaddr + RxFilterAddr);
2789d9fb9f38SJeff Kirsher 	writew(sval[1], ioaddr + RxFilterData);
2790d9fb9f38SJeff Kirsher 
2791d9fb9f38SJeff Kirsher 	writel(addr | 0xe, ioaddr + RxFilterAddr);
2792d9fb9f38SJeff Kirsher 	writew(sval[2], ioaddr + RxFilterData);
2793d9fb9f38SJeff Kirsher 
2794d9fb9f38SJeff Kirsher 	/* re-enable the RX filter */
2795d9fb9f38SJeff Kirsher 	writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2796d9fb9f38SJeff Kirsher 
2797d9fb9f38SJeff Kirsher 	return 0;
2798d9fb9f38SJeff Kirsher }
2799d9fb9f38SJeff Kirsher 
netdev_get_sopass(struct net_device * dev,u8 * data)2800d9fb9f38SJeff Kirsher static int netdev_get_sopass(struct net_device *dev, u8 *data)
2801d9fb9f38SJeff Kirsher {
2802d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2803d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2804d9fb9f38SJeff Kirsher 	u16 *sval = (u16 *)data;
2805d9fb9f38SJeff Kirsher 	u32 addr;
2806d9fb9f38SJeff Kirsher 
2807d9fb9f38SJeff Kirsher 	if (np->srr < SRR_DP83815_D) {
2808d9fb9f38SJeff Kirsher 		sval[0] = sval[1] = sval[2] = 0;
2809d9fb9f38SJeff Kirsher 		return 0;
2810d9fb9f38SJeff Kirsher 	}
2811d9fb9f38SJeff Kirsher 
2812d9fb9f38SJeff Kirsher 	/* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
2813d9fb9f38SJeff Kirsher 	addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2814d9fb9f38SJeff Kirsher 
2815d9fb9f38SJeff Kirsher 	writel(addr | 0xa, ioaddr + RxFilterAddr);
2816d9fb9f38SJeff Kirsher 	sval[0] = readw(ioaddr + RxFilterData);
2817d9fb9f38SJeff Kirsher 
2818d9fb9f38SJeff Kirsher 	writel(addr | 0xc, ioaddr + RxFilterAddr);
2819d9fb9f38SJeff Kirsher 	sval[1] = readw(ioaddr + RxFilterData);
2820d9fb9f38SJeff Kirsher 
2821d9fb9f38SJeff Kirsher 	writel(addr | 0xe, ioaddr + RxFilterAddr);
2822d9fb9f38SJeff Kirsher 	sval[2] = readw(ioaddr + RxFilterData);
2823d9fb9f38SJeff Kirsher 
2824d9fb9f38SJeff Kirsher 	writel(addr, ioaddr + RxFilterAddr);
2825d9fb9f38SJeff Kirsher 
2826d9fb9f38SJeff Kirsher 	return 0;
2827d9fb9f38SJeff Kirsher }
2828d9fb9f38SJeff Kirsher 
netdev_get_ecmd(struct net_device * dev,struct ethtool_link_ksettings * ecmd)2829586b6e27SPhilippe Reynes static int netdev_get_ecmd(struct net_device *dev,
2830586b6e27SPhilippe Reynes 			   struct ethtool_link_ksettings *ecmd)
2831d9fb9f38SJeff Kirsher {
2832d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2833586b6e27SPhilippe Reynes 	u32 supported, advertising;
2834d9fb9f38SJeff Kirsher 	u32 tmp;
2835d9fb9f38SJeff Kirsher 
2836586b6e27SPhilippe Reynes 	ecmd->base.port   = dev->if_port;
2837586b6e27SPhilippe Reynes 	ecmd->base.speed  = np->speed;
2838586b6e27SPhilippe Reynes 	ecmd->base.duplex = np->duplex;
2839586b6e27SPhilippe Reynes 	ecmd->base.autoneg = np->autoneg;
2840586b6e27SPhilippe Reynes 	advertising = 0;
2841586b6e27SPhilippe Reynes 
2842d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_10HALF)
2843586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_10baseT_Half;
2844d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_10FULL)
2845586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_10baseT_Full;
2846d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_100HALF)
2847586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_100baseT_Half;
2848d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_100FULL)
2849586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_100baseT_Full;
2850586b6e27SPhilippe Reynes 	supported   = (SUPPORTED_Autoneg |
2851d9fb9f38SJeff Kirsher 		SUPPORTED_10baseT_Half  | SUPPORTED_10baseT_Full  |
2852d9fb9f38SJeff Kirsher 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2853d9fb9f38SJeff Kirsher 		SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2854586b6e27SPhilippe Reynes 	ecmd->base.phy_address = np->phy_addr_external;
2855d9fb9f38SJeff Kirsher 	/*
2856d9fb9f38SJeff Kirsher 	 * We intentionally report the phy address of the external
2857d9fb9f38SJeff Kirsher 	 * phy, even if the internal phy is used. This is necessary
2858d9fb9f38SJeff Kirsher 	 * to work around a deficiency of the ethtool interface:
2859d9fb9f38SJeff Kirsher 	 * It's only possible to query the settings of the active
2860d9fb9f38SJeff Kirsher 	 * port. Therefore
2861d9fb9f38SJeff Kirsher 	 * # ethtool -s ethX port mii
2862d9fb9f38SJeff Kirsher 	 * actually sends an ioctl to switch to port mii with the
2863d9fb9f38SJeff Kirsher 	 * settings that are used for the current active port.
2864d9fb9f38SJeff Kirsher 	 * If we would report a different phy address in this
2865d9fb9f38SJeff Kirsher 	 * command, then
2866d9fb9f38SJeff Kirsher 	 * # ethtool -s ethX port tp;ethtool -s ethX port mii
2867d9fb9f38SJeff Kirsher 	 * would unintentionally change the phy address.
2868d9fb9f38SJeff Kirsher 	 *
2869d9fb9f38SJeff Kirsher 	 * Fortunately the phy address doesn't matter with the
2870d9fb9f38SJeff Kirsher 	 * internal phy...
2871d9fb9f38SJeff Kirsher 	 */
2872d9fb9f38SJeff Kirsher 
2873d9fb9f38SJeff Kirsher 	/* set information based on active port type */
2874586b6e27SPhilippe Reynes 	switch (ecmd->base.port) {
2875d9fb9f38SJeff Kirsher 	default:
2876d9fb9f38SJeff Kirsher 	case PORT_TP:
2877586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_TP;
2878d9fb9f38SJeff Kirsher 		break;
2879d9fb9f38SJeff Kirsher 	case PORT_MII:
2880586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_MII;
2881d9fb9f38SJeff Kirsher 		break;
2882d9fb9f38SJeff Kirsher 	case PORT_FIBRE:
2883586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_FIBRE;
2884d9fb9f38SJeff Kirsher 		break;
2885d9fb9f38SJeff Kirsher 	}
2886d9fb9f38SJeff Kirsher 
2887d9fb9f38SJeff Kirsher 	/* if autonegotiation is on, try to return the active speed/duplex */
2888586b6e27SPhilippe Reynes 	if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2889586b6e27SPhilippe Reynes 		advertising |= ADVERTISED_Autoneg;
2890d9fb9f38SJeff Kirsher 		tmp = mii_nway_result(
2891d9fb9f38SJeff Kirsher 			np->advertising & mdio_read(dev, MII_LPA));
2892d9fb9f38SJeff Kirsher 		if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2893586b6e27SPhilippe Reynes 			ecmd->base.speed = SPEED_100;
2894d9fb9f38SJeff Kirsher 		else
2895586b6e27SPhilippe Reynes 			ecmd->base.speed = SPEED_10;
2896d9fb9f38SJeff Kirsher 		if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2897586b6e27SPhilippe Reynes 			ecmd->base.duplex = DUPLEX_FULL;
2898d9fb9f38SJeff Kirsher 		else
2899586b6e27SPhilippe Reynes 			ecmd->base.duplex = DUPLEX_HALF;
2900d9fb9f38SJeff Kirsher 	}
2901d9fb9f38SJeff Kirsher 
2902d9fb9f38SJeff Kirsher 	/* ignore maxtxpkt, maxrxpkt for now */
2903d9fb9f38SJeff Kirsher 
2904586b6e27SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
2905586b6e27SPhilippe Reynes 						supported);
2906586b6e27SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
2907586b6e27SPhilippe Reynes 						advertising);
2908586b6e27SPhilippe Reynes 
2909d9fb9f38SJeff Kirsher 	return 0;
2910d9fb9f38SJeff Kirsher }
2911d9fb9f38SJeff Kirsher 
netdev_set_ecmd(struct net_device * dev,const struct ethtool_link_ksettings * ecmd)2912586b6e27SPhilippe Reynes static int netdev_set_ecmd(struct net_device *dev,
2913586b6e27SPhilippe Reynes 			   const struct ethtool_link_ksettings *ecmd)
2914d9fb9f38SJeff Kirsher {
2915d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2916586b6e27SPhilippe Reynes 	u32 advertising;
2917d9fb9f38SJeff Kirsher 
2918586b6e27SPhilippe Reynes 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
2919586b6e27SPhilippe Reynes 						ecmd->link_modes.advertising);
2920586b6e27SPhilippe Reynes 
2921586b6e27SPhilippe Reynes 	if (ecmd->base.port != PORT_TP &&
2922586b6e27SPhilippe Reynes 	    ecmd->base.port != PORT_MII &&
2923586b6e27SPhilippe Reynes 	    ecmd->base.port != PORT_FIBRE)
2924d9fb9f38SJeff Kirsher 		return -EINVAL;
2925586b6e27SPhilippe Reynes 	if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2926586b6e27SPhilippe Reynes 		if ((advertising & (ADVERTISED_10baseT_Half |
2927d9fb9f38SJeff Kirsher 					  ADVERTISED_10baseT_Full |
2928d9fb9f38SJeff Kirsher 					  ADVERTISED_100baseT_Half |
2929d9fb9f38SJeff Kirsher 					  ADVERTISED_100baseT_Full)) == 0) {
2930d9fb9f38SJeff Kirsher 			return -EINVAL;
2931d9fb9f38SJeff Kirsher 		}
2932586b6e27SPhilippe Reynes 	} else if (ecmd->base.autoneg == AUTONEG_DISABLE) {
2933586b6e27SPhilippe Reynes 		u32 speed = ecmd->base.speed;
2934d9fb9f38SJeff Kirsher 		if (speed != SPEED_10 && speed != SPEED_100)
2935d9fb9f38SJeff Kirsher 			return -EINVAL;
2936586b6e27SPhilippe Reynes 		if (ecmd->base.duplex != DUPLEX_HALF &&
2937586b6e27SPhilippe Reynes 		    ecmd->base.duplex != DUPLEX_FULL)
2938d9fb9f38SJeff Kirsher 			return -EINVAL;
2939d9fb9f38SJeff Kirsher 	} else {
2940d9fb9f38SJeff Kirsher 		return -EINVAL;
2941d9fb9f38SJeff Kirsher 	}
2942d9fb9f38SJeff Kirsher 
2943d9fb9f38SJeff Kirsher 	/*
2944d9fb9f38SJeff Kirsher 	 * If we're ignoring the PHY then autoneg and the internal
2945d9fb9f38SJeff Kirsher 	 * transceiver are really not going to work so don't let the
2946d9fb9f38SJeff Kirsher 	 * user select them.
2947d9fb9f38SJeff Kirsher 	 */
2948586b6e27SPhilippe Reynes 	if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE ||
2949586b6e27SPhilippe Reynes 			       ecmd->base.port == PORT_TP))
2950d9fb9f38SJeff Kirsher 		return -EINVAL;
2951d9fb9f38SJeff Kirsher 
2952d9fb9f38SJeff Kirsher 	/*
2953d9fb9f38SJeff Kirsher 	 * maxtxpkt, maxrxpkt: ignored for now.
2954d9fb9f38SJeff Kirsher 	 *
2955d9fb9f38SJeff Kirsher 	 * transceiver:
2956d9fb9f38SJeff Kirsher 	 * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
2957d9fb9f38SJeff Kirsher 	 * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
2958d9fb9f38SJeff Kirsher 	 * selects based on ecmd->port.
2959d9fb9f38SJeff Kirsher 	 *
2960d9fb9f38SJeff Kirsher 	 * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
2961d9fb9f38SJeff Kirsher 	 * phys that are connected to the mii bus. It's used to apply fibre
2962d9fb9f38SJeff Kirsher 	 * specific updates.
2963d9fb9f38SJeff Kirsher 	 */
2964d9fb9f38SJeff Kirsher 
2965d9fb9f38SJeff Kirsher 	/* WHEW! now lets bang some bits */
2966d9fb9f38SJeff Kirsher 
2967d9fb9f38SJeff Kirsher 	/* save the parms */
2968586b6e27SPhilippe Reynes 	dev->if_port          = ecmd->base.port;
2969586b6e27SPhilippe Reynes 	np->autoneg           = ecmd->base.autoneg;
2970586b6e27SPhilippe Reynes 	np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask;
2971d9fb9f38SJeff Kirsher 	if (np->autoneg == AUTONEG_ENABLE) {
2972d9fb9f38SJeff Kirsher 		/* advertise only what has been requested */
2973d9fb9f38SJeff Kirsher 		np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2974586b6e27SPhilippe Reynes 		if (advertising & ADVERTISED_10baseT_Half)
2975d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_10HALF;
2976586b6e27SPhilippe Reynes 		if (advertising & ADVERTISED_10baseT_Full)
2977d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_10FULL;
2978586b6e27SPhilippe Reynes 		if (advertising & ADVERTISED_100baseT_Half)
2979d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_100HALF;
2980586b6e27SPhilippe Reynes 		if (advertising & ADVERTISED_100baseT_Full)
2981d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_100FULL;
2982d9fb9f38SJeff Kirsher 	} else {
2983586b6e27SPhilippe Reynes 		np->speed  = ecmd->base.speed;
2984586b6e27SPhilippe Reynes 		np->duplex = ecmd->base.duplex;
2985d9fb9f38SJeff Kirsher 		/* user overriding the initial full duplex parm? */
2986d9fb9f38SJeff Kirsher 		if (np->duplex == DUPLEX_HALF)
2987d9fb9f38SJeff Kirsher 			np->full_duplex = 0;
2988d9fb9f38SJeff Kirsher 	}
2989d9fb9f38SJeff Kirsher 
2990d9fb9f38SJeff Kirsher 	/* get the right phy enabled */
2991586b6e27SPhilippe Reynes 	if (ecmd->base.port == PORT_TP)
2992d9fb9f38SJeff Kirsher 		switch_port_internal(dev);
2993d9fb9f38SJeff Kirsher 	else
2994d9fb9f38SJeff Kirsher 		switch_port_external(dev);
2995d9fb9f38SJeff Kirsher 
2996d9fb9f38SJeff Kirsher 	/* set parms and see how this affected our link status */
2997d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
2998d9fb9f38SJeff Kirsher 	check_link(dev);
2999d9fb9f38SJeff Kirsher 	return 0;
3000d9fb9f38SJeff Kirsher }
3001d9fb9f38SJeff Kirsher 
netdev_get_regs(struct net_device * dev,u8 * buf)3002d9fb9f38SJeff Kirsher static int netdev_get_regs(struct net_device *dev, u8 *buf)
3003d9fb9f38SJeff Kirsher {
3004d9fb9f38SJeff Kirsher 	int i;
3005d9fb9f38SJeff Kirsher 	int j;
3006d9fb9f38SJeff Kirsher 	u32 rfcr;
3007d9fb9f38SJeff Kirsher 	u32 *rbuf = (u32 *)buf;
3008d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3009d9fb9f38SJeff Kirsher 
3010d9fb9f38SJeff Kirsher 	/* read non-mii page 0 of registers */
3011d9fb9f38SJeff Kirsher 	for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
3012d9fb9f38SJeff Kirsher 		rbuf[i] = readl(ioaddr + i*4);
3013d9fb9f38SJeff Kirsher 	}
3014d9fb9f38SJeff Kirsher 
3015d9fb9f38SJeff Kirsher 	/* read current mii registers */
3016d9fb9f38SJeff Kirsher 	for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
3017d9fb9f38SJeff Kirsher 		rbuf[i] = mdio_read(dev, i & 0x1f);
3018d9fb9f38SJeff Kirsher 
3019d9fb9f38SJeff Kirsher 	/* read only the 'magic' registers from page 1 */
3020d9fb9f38SJeff Kirsher 	writew(1, ioaddr + PGSEL);
3021d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + PMDCSR);
3022d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + TSTDAT);
3023d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + DSPCFG);
3024d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + SDCFG);
3025d9fb9f38SJeff Kirsher 	writew(0, ioaddr + PGSEL);
3026d9fb9f38SJeff Kirsher 
3027d9fb9f38SJeff Kirsher 	/* read RFCR indexed registers */
3028d9fb9f38SJeff Kirsher 	rfcr = readl(ioaddr + RxFilterAddr);
3029d9fb9f38SJeff Kirsher 	for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3030d9fb9f38SJeff Kirsher 		writel(j*2, ioaddr + RxFilterAddr);
3031d9fb9f38SJeff Kirsher 		rbuf[i++] = readw(ioaddr + RxFilterData);
3032d9fb9f38SJeff Kirsher 	}
3033d9fb9f38SJeff Kirsher 	writel(rfcr, ioaddr + RxFilterAddr);
3034d9fb9f38SJeff Kirsher 
3035d9fb9f38SJeff Kirsher 	/* the interrupt status is clear-on-read - see if we missed any */
3036d9fb9f38SJeff Kirsher 	if (rbuf[4] & rbuf[5]) {
3037d9fb9f38SJeff Kirsher 		printk(KERN_WARNING
3038d9fb9f38SJeff Kirsher 			"%s: shoot, we dropped an interrupt (%#08x)\n",
3039d9fb9f38SJeff Kirsher 			dev->name, rbuf[4] & rbuf[5]);
3040d9fb9f38SJeff Kirsher 	}
3041d9fb9f38SJeff Kirsher 
3042d9fb9f38SJeff Kirsher 	return 0;
3043d9fb9f38SJeff Kirsher }
3044d9fb9f38SJeff Kirsher 
3045d9fb9f38SJeff Kirsher #define SWAP_BITS(x)	( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3046d9fb9f38SJeff Kirsher 			| (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9)  \
3047d9fb9f38SJeff Kirsher 			| (((x) & 0x0010) << 7)  | (((x) & 0x0020) << 5)  \
3048d9fb9f38SJeff Kirsher 			| (((x) & 0x0040) << 3)  | (((x) & 0x0080) << 1)  \
3049d9fb9f38SJeff Kirsher 			| (((x) & 0x0100) >> 1)  | (((x) & 0x0200) >> 3)  \
3050d9fb9f38SJeff Kirsher 			| (((x) & 0x0400) >> 5)  | (((x) & 0x0800) >> 7)  \
3051d9fb9f38SJeff Kirsher 			| (((x) & 0x1000) >> 9)  | (((x) & 0x2000) >> 11) \
3052d9fb9f38SJeff Kirsher 			| (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3053d9fb9f38SJeff Kirsher 
netdev_get_eeprom(struct net_device * dev,u8 * buf)3054d9fb9f38SJeff Kirsher static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3055d9fb9f38SJeff Kirsher {
3056d9fb9f38SJeff Kirsher 	int i;
3057d9fb9f38SJeff Kirsher 	u16 *ebuf = (u16 *)buf;
3058d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3059d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3060d9fb9f38SJeff Kirsher 
3061d9fb9f38SJeff Kirsher 	/* eeprom_read reads 16 bits, and indexes by 16 bits */
3062d9fb9f38SJeff Kirsher 	for (i = 0; i < np->eeprom_size/2; i++) {
3063d9fb9f38SJeff Kirsher 		ebuf[i] = eeprom_read(ioaddr, i);
3064d9fb9f38SJeff Kirsher 		/* The EEPROM itself stores data bit-swapped, but eeprom_read
3065d9fb9f38SJeff Kirsher 		 * reads it back "sanely". So we swap it back here in order to
3066d9fb9f38SJeff Kirsher 		 * present it to userland as it is stored. */
3067d9fb9f38SJeff Kirsher 		ebuf[i] = SWAP_BITS(ebuf[i]);
3068d9fb9f38SJeff Kirsher 	}
3069d9fb9f38SJeff Kirsher 	return 0;
3070d9fb9f38SJeff Kirsher }
3071d9fb9f38SJeff Kirsher 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)3072d9fb9f38SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3073d9fb9f38SJeff Kirsher {
3074d9fb9f38SJeff Kirsher 	struct mii_ioctl_data *data = if_mii(rq);
3075d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3076d9fb9f38SJeff Kirsher 
3077d9fb9f38SJeff Kirsher 	switch(cmd) {
3078d9fb9f38SJeff Kirsher 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
3079d9fb9f38SJeff Kirsher 		data->phy_id = np->phy_addr_external;
3080df561f66SGustavo A. R. Silva 		fallthrough;
3081d9fb9f38SJeff Kirsher 
3082d9fb9f38SJeff Kirsher 	case SIOCGMIIREG:		/* Read MII PHY register. */
3083d9fb9f38SJeff Kirsher 		/* The phy_id is not enough to uniquely identify
3084d9fb9f38SJeff Kirsher 		 * the intended target. Therefore the command is sent to
3085d9fb9f38SJeff Kirsher 		 * the given mii on the current port.
3086d9fb9f38SJeff Kirsher 		 */
3087d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_TP) {
3088d9fb9f38SJeff Kirsher 			if ((data->phy_id & 0x1f) == np->phy_addr_external)
3089d9fb9f38SJeff Kirsher 				data->val_out = mdio_read(dev,
3090d9fb9f38SJeff Kirsher 							data->reg_num & 0x1f);
3091d9fb9f38SJeff Kirsher 			else
3092d9fb9f38SJeff Kirsher 				data->val_out = 0;
3093d9fb9f38SJeff Kirsher 		} else {
3094d9fb9f38SJeff Kirsher 			move_int_phy(dev, data->phy_id & 0x1f);
3095d9fb9f38SJeff Kirsher 			data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3096d9fb9f38SJeff Kirsher 							data->reg_num & 0x1f);
3097d9fb9f38SJeff Kirsher 		}
3098d9fb9f38SJeff Kirsher 		return 0;
3099d9fb9f38SJeff Kirsher 
3100d9fb9f38SJeff Kirsher 	case SIOCSMIIREG:		/* Write MII PHY register. */
3101d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_TP) {
3102d9fb9f38SJeff Kirsher 			if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3103d9fb9f38SJeff Kirsher 				if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3104d9fb9f38SJeff Kirsher 					np->advertising = data->val_in;
3105d9fb9f38SJeff Kirsher 				mdio_write(dev, data->reg_num & 0x1f,
3106d9fb9f38SJeff Kirsher 							data->val_in);
3107d9fb9f38SJeff Kirsher 			}
3108d9fb9f38SJeff Kirsher 		} else {
3109d9fb9f38SJeff Kirsher 			if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3110d9fb9f38SJeff Kirsher 				if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3111d9fb9f38SJeff Kirsher 					np->advertising = data->val_in;
3112d9fb9f38SJeff Kirsher 			}
3113d9fb9f38SJeff Kirsher 			move_int_phy(dev, data->phy_id & 0x1f);
3114d9fb9f38SJeff Kirsher 			miiport_write(dev, data->phy_id & 0x1f,
3115d9fb9f38SJeff Kirsher 						data->reg_num & 0x1f,
3116d9fb9f38SJeff Kirsher 						data->val_in);
3117d9fb9f38SJeff Kirsher 		}
3118d9fb9f38SJeff Kirsher 		return 0;
3119d9fb9f38SJeff Kirsher 	default:
3120d9fb9f38SJeff Kirsher 		return -EOPNOTSUPP;
3121d9fb9f38SJeff Kirsher 	}
3122d9fb9f38SJeff Kirsher }
3123d9fb9f38SJeff Kirsher 
enable_wol_mode(struct net_device * dev,int enable_intr)3124d9fb9f38SJeff Kirsher static void enable_wol_mode(struct net_device *dev, int enable_intr)
3125d9fb9f38SJeff Kirsher {
3126d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3127d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3128d9fb9f38SJeff Kirsher 
3129d9fb9f38SJeff Kirsher 	if (netif_msg_wol(np))
3130d9fb9f38SJeff Kirsher 		printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3131d9fb9f38SJeff Kirsher 			dev->name);
3132d9fb9f38SJeff Kirsher 
3133d9fb9f38SJeff Kirsher 	/* For WOL we must restart the rx process in silent mode.
3134d9fb9f38SJeff Kirsher 	 * Write NULL to the RxRingPtr. Only possible if
3135d9fb9f38SJeff Kirsher 	 * rx process is stopped
3136d9fb9f38SJeff Kirsher 	 */
3137d9fb9f38SJeff Kirsher 	writel(0, ioaddr + RxRingPtr);
3138d9fb9f38SJeff Kirsher 
3139d9fb9f38SJeff Kirsher 	/* read WoL status to clear */
3140d9fb9f38SJeff Kirsher 	readl(ioaddr + WOLCmd);
3141d9fb9f38SJeff Kirsher 
3142d9fb9f38SJeff Kirsher 	/* PME on, clear status */
3143d9fb9f38SJeff Kirsher 	writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3144d9fb9f38SJeff Kirsher 
3145d9fb9f38SJeff Kirsher 	/* and restart the rx process */
3146d9fb9f38SJeff Kirsher 	writel(RxOn, ioaddr + ChipCmd);
3147d9fb9f38SJeff Kirsher 
3148d9fb9f38SJeff Kirsher 	if (enable_intr) {
3149d9fb9f38SJeff Kirsher 		/* enable the WOL interrupt.
3150d9fb9f38SJeff Kirsher 		 * Could be used to send a netlink message.
3151d9fb9f38SJeff Kirsher 		 */
3152d9fb9f38SJeff Kirsher 		writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3153d9fb9f38SJeff Kirsher 		natsemi_irq_enable(dev);
3154d9fb9f38SJeff Kirsher 	}
3155d9fb9f38SJeff Kirsher }
3156d9fb9f38SJeff Kirsher 
netdev_close(struct net_device * dev)3157d9fb9f38SJeff Kirsher static int netdev_close(struct net_device *dev)
3158d9fb9f38SJeff Kirsher {
3159d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3160d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3161d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
3162d9fb9f38SJeff Kirsher 
3163d9fb9f38SJeff Kirsher 	if (netif_msg_ifdown(np))
3164d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG
3165d9fb9f38SJeff Kirsher 			"%s: Shutting down ethercard, status was %#04x.\n",
3166d9fb9f38SJeff Kirsher 			dev->name, (int)readl(ioaddr + ChipCmd));
3167d9fb9f38SJeff Kirsher 	if (netif_msg_pktdata(np))
3168d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG
3169d9fb9f38SJeff Kirsher 			"%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
3170d9fb9f38SJeff Kirsher 			dev->name, np->cur_tx, np->dirty_tx,
3171d9fb9f38SJeff Kirsher 			np->cur_rx, np->dirty_rx);
3172d9fb9f38SJeff Kirsher 
3173d9fb9f38SJeff Kirsher 	napi_disable(&np->napi);
3174d9fb9f38SJeff Kirsher 
3175d9fb9f38SJeff Kirsher 	/*
3176d9fb9f38SJeff Kirsher 	 * FIXME: what if someone tries to close a device
3177d9fb9f38SJeff Kirsher 	 * that is suspended?
3178d9fb9f38SJeff Kirsher 	 * Should we reenable the nic to switch to
3179d9fb9f38SJeff Kirsher 	 * the final WOL settings?
3180d9fb9f38SJeff Kirsher 	 */
3181d9fb9f38SJeff Kirsher 
3182d9fb9f38SJeff Kirsher 	del_timer_sync(&np->timer);
3183d710ce13SFrancois Romieu 	disable_irq(irq);
3184d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
3185d9fb9f38SJeff Kirsher 	natsemi_irq_disable(dev);
3186d9fb9f38SJeff Kirsher 	np->hands_off = 1;
3187d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
3188d710ce13SFrancois Romieu 	enable_irq(irq);
3189d9fb9f38SJeff Kirsher 
3190d710ce13SFrancois Romieu 	free_irq(irq, dev);
3191d9fb9f38SJeff Kirsher 
3192d9fb9f38SJeff Kirsher 	/* Interrupt disabled, interrupt handler released,
3193d9fb9f38SJeff Kirsher 	 * queue stopped, timer deleted, rtnl_lock held
3194d9fb9f38SJeff Kirsher 	 * All async codepaths that access the driver are disabled.
3195d9fb9f38SJeff Kirsher 	 */
3196d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
3197d9fb9f38SJeff Kirsher 	np->hands_off = 0;
3198d9fb9f38SJeff Kirsher 	readl(ioaddr + IntrMask);
3199d9fb9f38SJeff Kirsher 	readw(ioaddr + MIntrStatus);
3200d9fb9f38SJeff Kirsher 
3201d9fb9f38SJeff Kirsher 	/* Freeze Stats */
3202d9fb9f38SJeff Kirsher 	writel(StatsFreeze, ioaddr + StatsCtrl);
3203d9fb9f38SJeff Kirsher 
3204d9fb9f38SJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
3205d9fb9f38SJeff Kirsher 	natsemi_stop_rxtx(dev);
3206d9fb9f38SJeff Kirsher 
3207d9fb9f38SJeff Kirsher 	__get_stats(dev);
3208d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
3209d9fb9f38SJeff Kirsher 
3210d9fb9f38SJeff Kirsher 	/* clear the carrier last - an interrupt could reenable it otherwise */
3211d9fb9f38SJeff Kirsher 	netif_carrier_off(dev);
3212d9fb9f38SJeff Kirsher 	netif_stop_queue(dev);
3213d9fb9f38SJeff Kirsher 
3214d9fb9f38SJeff Kirsher 	dump_ring(dev);
3215d9fb9f38SJeff Kirsher 	drain_ring(dev);
3216d9fb9f38SJeff Kirsher 	free_ring(dev);
3217d9fb9f38SJeff Kirsher 
3218d9fb9f38SJeff Kirsher 	{
3219d9fb9f38SJeff Kirsher 		u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3220d9fb9f38SJeff Kirsher 		if (wol) {
3221d9fb9f38SJeff Kirsher 			/* restart the NIC in WOL mode.
3222d9fb9f38SJeff Kirsher 			 * The nic must be stopped for this.
3223d9fb9f38SJeff Kirsher 			 */
3224d9fb9f38SJeff Kirsher 			enable_wol_mode(dev, 0);
3225d9fb9f38SJeff Kirsher 		} else {
3226d9fb9f38SJeff Kirsher 			/* Restore PME enable bit unmolested */
3227d9fb9f38SJeff Kirsher 			writel(np->SavedClkRun, ioaddr + ClkRun);
3228d9fb9f38SJeff Kirsher 		}
3229d9fb9f38SJeff Kirsher 	}
3230d9fb9f38SJeff Kirsher 	return 0;
3231d9fb9f38SJeff Kirsher }
3232d9fb9f38SJeff Kirsher 
3233d9fb9f38SJeff Kirsher 
natsemi_remove1(struct pci_dev * pdev)32346980cbe4SBill Pemberton static void natsemi_remove1(struct pci_dev *pdev)
3235d9fb9f38SJeff Kirsher {
3236d9fb9f38SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
3237d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3238d9fb9f38SJeff Kirsher 
3239d9fb9f38SJeff Kirsher 	NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
3240d9fb9f38SJeff Kirsher 	unregister_netdev (dev);
3241d9fb9f38SJeff Kirsher 	iounmap(ioaddr);
3242d9fb9f38SJeff Kirsher 	free_netdev (dev);
3243d9fb9f38SJeff Kirsher }
3244d9fb9f38SJeff Kirsher 
3245d9fb9f38SJeff Kirsher /*
3246d9fb9f38SJeff Kirsher  * The ns83815 chip doesn't have explicit RxStop bits.
3247d9fb9f38SJeff Kirsher  * Kicking the Rx or Tx process for a new packet reenables the Rx process
3248d9fb9f38SJeff Kirsher  * of the nic, thus this function must be very careful:
3249d9fb9f38SJeff Kirsher  *
3250d9fb9f38SJeff Kirsher  * suspend/resume synchronization:
3251d9fb9f38SJeff Kirsher  * entry points:
3252d9fb9f38SJeff Kirsher  *   netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
3253d9fb9f38SJeff Kirsher  *   start_tx, ns_tx_timeout
3254d9fb9f38SJeff Kirsher  *
3255d9fb9f38SJeff Kirsher  * No function accesses the hardware without checking np->hands_off.
3256d9fb9f38SJeff Kirsher  *	the check occurs under spin_lock_irq(&np->lock);
3257d9fb9f38SJeff Kirsher  * exceptions:
3258d9fb9f38SJeff Kirsher  *	* netdev_ioctl: noncritical access.
3259d9fb9f38SJeff Kirsher  *	* netdev_open: cannot happen due to the device_detach
3260d9fb9f38SJeff Kirsher  *	* netdev_close: doesn't hurt.
3261d9fb9f38SJeff Kirsher  *	* netdev_timer: timer stopped by natsemi_suspend.
3262d9fb9f38SJeff Kirsher  *	* intr_handler: doesn't acquire the spinlock. suspend calls
3263d9fb9f38SJeff Kirsher  *		disable_irq() to enforce synchronization.
3264d9fb9f38SJeff Kirsher  *      * natsemi_poll: checks before reenabling interrupts.  suspend
3265d9fb9f38SJeff Kirsher  *              sets hands_off, disables interrupts and then waits with
3266d9fb9f38SJeff Kirsher  *              napi_disable().
3267d9fb9f38SJeff Kirsher  *
3268d9fb9f38SJeff Kirsher  * Interrupts must be disabled, otherwise hands_off can cause irq storms.
3269d9fb9f38SJeff Kirsher  */
3270d9fb9f38SJeff Kirsher 
natsemi_suspend(struct device * dev_d)327140c1b1eeSVaibhav Gupta static int __maybe_unused natsemi_suspend(struct device *dev_d)
3272d9fb9f38SJeff Kirsher {
327340c1b1eeSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
3274d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3275d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3276d9fb9f38SJeff Kirsher 
3277d9fb9f38SJeff Kirsher 	rtnl_lock();
3278d9fb9f38SJeff Kirsher 	if (netif_running (dev)) {
3279d710ce13SFrancois Romieu 		const int irq = np->pci_dev->irq;
3280d710ce13SFrancois Romieu 
3281d9fb9f38SJeff Kirsher 		del_timer_sync(&np->timer);
3282d9fb9f38SJeff Kirsher 
3283d710ce13SFrancois Romieu 		disable_irq(irq);
3284d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
3285d9fb9f38SJeff Kirsher 
3286d9fb9f38SJeff Kirsher 		natsemi_irq_disable(dev);
3287d9fb9f38SJeff Kirsher 		np->hands_off = 1;
3288d9fb9f38SJeff Kirsher 		natsemi_stop_rxtx(dev);
3289d9fb9f38SJeff Kirsher 		netif_stop_queue(dev);
3290d9fb9f38SJeff Kirsher 
3291d9fb9f38SJeff Kirsher 		spin_unlock_irq(&np->lock);
3292d710ce13SFrancois Romieu 		enable_irq(irq);
3293d9fb9f38SJeff Kirsher 
3294d9fb9f38SJeff Kirsher 		napi_disable(&np->napi);
3295d9fb9f38SJeff Kirsher 
3296d9fb9f38SJeff Kirsher 		/* Update the error counts. */
3297d9fb9f38SJeff Kirsher 		__get_stats(dev);
3298d9fb9f38SJeff Kirsher 
3299d9fb9f38SJeff Kirsher 		/* pci_power_off(pdev, -1); */
3300d9fb9f38SJeff Kirsher 		drain_ring(dev);
3301d9fb9f38SJeff Kirsher 		{
3302d9fb9f38SJeff Kirsher 			u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3303d9fb9f38SJeff Kirsher 			/* Restore PME enable bit */
3304d9fb9f38SJeff Kirsher 			if (wol) {
3305d9fb9f38SJeff Kirsher 				/* restart the NIC in WOL mode.
3306d9fb9f38SJeff Kirsher 				 * The nic must be stopped for this.
3307d9fb9f38SJeff Kirsher 				 * FIXME: use the WOL interrupt
3308d9fb9f38SJeff Kirsher 				 */
3309d9fb9f38SJeff Kirsher 				enable_wol_mode(dev, 0);
3310d9fb9f38SJeff Kirsher 			} else {
3311d9fb9f38SJeff Kirsher 				/* Restore PME enable bit unmolested */
3312d9fb9f38SJeff Kirsher 				writel(np->SavedClkRun, ioaddr + ClkRun);
3313d9fb9f38SJeff Kirsher 			}
3314d9fb9f38SJeff Kirsher 		}
3315d9fb9f38SJeff Kirsher 	}
3316d9fb9f38SJeff Kirsher 	netif_device_detach(dev);
3317d9fb9f38SJeff Kirsher 	rtnl_unlock();
3318d9fb9f38SJeff Kirsher 	return 0;
3319d9fb9f38SJeff Kirsher }
3320d9fb9f38SJeff Kirsher 
3321d9fb9f38SJeff Kirsher 
natsemi_resume(struct device * dev_d)332240c1b1eeSVaibhav Gupta static int __maybe_unused natsemi_resume(struct device *dev_d)
3323d9fb9f38SJeff Kirsher {
332440c1b1eeSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
3325d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3326d9fb9f38SJeff Kirsher 
3327d9fb9f38SJeff Kirsher 	rtnl_lock();
3328d9fb9f38SJeff Kirsher 	if (netif_device_present(dev))
3329d9fb9f38SJeff Kirsher 		goto out;
3330d9fb9f38SJeff Kirsher 	if (netif_running(dev)) {
3331d710ce13SFrancois Romieu 		const int irq = np->pci_dev->irq;
3332d710ce13SFrancois Romieu 
3333d9fb9f38SJeff Kirsher 		BUG_ON(!np->hands_off);
3334d9fb9f38SJeff Kirsher 	/*	pci_power_on(pdev); */
3335d9fb9f38SJeff Kirsher 
3336d9fb9f38SJeff Kirsher 		napi_enable(&np->napi);
3337d9fb9f38SJeff Kirsher 
3338d9fb9f38SJeff Kirsher 		natsemi_reset(dev);
3339d9fb9f38SJeff Kirsher 		init_ring(dev);
3340d710ce13SFrancois Romieu 		disable_irq(irq);
3341d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
3342d9fb9f38SJeff Kirsher 		np->hands_off = 0;
3343d9fb9f38SJeff Kirsher 		init_registers(dev);
3344d9fb9f38SJeff Kirsher 		netif_device_attach(dev);
3345d9fb9f38SJeff Kirsher 		spin_unlock_irq(&np->lock);
3346d710ce13SFrancois Romieu 		enable_irq(irq);
3347d9fb9f38SJeff Kirsher 
3348d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3349d9fb9f38SJeff Kirsher 	}
3350d9fb9f38SJeff Kirsher 	netif_device_attach(dev);
3351d9fb9f38SJeff Kirsher out:
3352d9fb9f38SJeff Kirsher 	rtnl_unlock();
335340c1b1eeSVaibhav Gupta 	return 0;
3354d9fb9f38SJeff Kirsher }
3355d9fb9f38SJeff Kirsher 
335640c1b1eeSVaibhav Gupta static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume);
3357d9fb9f38SJeff Kirsher 
3358d9fb9f38SJeff Kirsher static struct pci_driver natsemi_driver = {
3359d9fb9f38SJeff Kirsher 	.name		= DRV_NAME,
3360d9fb9f38SJeff Kirsher 	.id_table	= natsemi_pci_tbl,
3361d9fb9f38SJeff Kirsher 	.probe		= natsemi_probe1,
33626980cbe4SBill Pemberton 	.remove		= natsemi_remove1,
336340c1b1eeSVaibhav Gupta 	.driver.pm	= &natsemi_pm_ops,
3364d9fb9f38SJeff Kirsher };
3365d9fb9f38SJeff Kirsher 
natsemi_init_mod(void)3366d9fb9f38SJeff Kirsher static int __init natsemi_init_mod (void)
3367d9fb9f38SJeff Kirsher {
3368d9fb9f38SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
3369d9fb9f38SJeff Kirsher #ifdef MODULE
3370d9fb9f38SJeff Kirsher 	printk(version);
3371d9fb9f38SJeff Kirsher #endif
3372d9fb9f38SJeff Kirsher 
3373d9fb9f38SJeff Kirsher 	return pci_register_driver(&natsemi_driver);
3374d9fb9f38SJeff Kirsher }
3375d9fb9f38SJeff Kirsher 
natsemi_exit_mod(void)3376d9fb9f38SJeff Kirsher static void __exit natsemi_exit_mod (void)
3377d9fb9f38SJeff Kirsher {
3378d9fb9f38SJeff Kirsher 	pci_unregister_driver (&natsemi_driver);
3379d9fb9f38SJeff Kirsher }
3380d9fb9f38SJeff Kirsher 
3381d9fb9f38SJeff Kirsher module_init(natsemi_init_mod);
3382d9fb9f38SJeff Kirsher module_exit(natsemi_exit_mod);
3383d9fb9f38SJeff Kirsher 
3384