1d9fb9f38SJeff Kirsher /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
2d9fb9f38SJeff Kirsher /*
3d9fb9f38SJeff Kirsher 	Written/copyright 1999-2001 by Donald Becker.
4d9fb9f38SJeff Kirsher 	Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
5d9fb9f38SJeff Kirsher 	Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
6d9fb9f38SJeff Kirsher 	Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
7d9fb9f38SJeff Kirsher 
8d9fb9f38SJeff Kirsher 	This software may be used and distributed according to the terms of
9d9fb9f38SJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
10d9fb9f38SJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
11d9fb9f38SJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
12d9fb9f38SJeff Kirsher 	a complete program and may only be used when the entire operating
13d9fb9f38SJeff Kirsher 	system is licensed under the GPL.  License for under other terms may be
14d9fb9f38SJeff Kirsher 	available.  Contact the original author for details.
15d9fb9f38SJeff Kirsher 
16d9fb9f38SJeff Kirsher 	The original author may be reached as becker@scyld.com, or at
17d9fb9f38SJeff Kirsher 	Scyld Computing Corporation
18d9fb9f38SJeff Kirsher 	410 Severn Ave., Suite 210
19d9fb9f38SJeff Kirsher 	Annapolis MD 21403
20d9fb9f38SJeff Kirsher 
21d9fb9f38SJeff Kirsher 	Support information and updates available at
22d9fb9f38SJeff Kirsher 	http://www.scyld.com/network/netsemi.html
23d9fb9f38SJeff Kirsher 	[link no longer provides useful info -jgarzik]
24d9fb9f38SJeff Kirsher 
25d9fb9f38SJeff Kirsher 
26d9fb9f38SJeff Kirsher 	TODO:
27d9fb9f38SJeff Kirsher 	* big endian support with CFG:BEM instead of cpu_to_le32
28d9fb9f38SJeff Kirsher */
29d9fb9f38SJeff Kirsher 
30d9fb9f38SJeff Kirsher #include <linux/module.h>
31d9fb9f38SJeff Kirsher #include <linux/kernel.h>
32d9fb9f38SJeff Kirsher #include <linux/string.h>
33d9fb9f38SJeff Kirsher #include <linux/timer.h>
34d9fb9f38SJeff Kirsher #include <linux/errno.h>
35d9fb9f38SJeff Kirsher #include <linux/ioport.h>
36d9fb9f38SJeff Kirsher #include <linux/slab.h>
37d9fb9f38SJeff Kirsher #include <linux/interrupt.h>
38d9fb9f38SJeff Kirsher #include <linux/pci.h>
39d9fb9f38SJeff Kirsher #include <linux/netdevice.h>
40d9fb9f38SJeff Kirsher #include <linux/etherdevice.h>
41d9fb9f38SJeff Kirsher #include <linux/skbuff.h>
42d9fb9f38SJeff Kirsher #include <linux/init.h>
43d9fb9f38SJeff Kirsher #include <linux/spinlock.h>
44d9fb9f38SJeff Kirsher #include <linux/ethtool.h>
45d9fb9f38SJeff Kirsher #include <linux/delay.h>
46d9fb9f38SJeff Kirsher #include <linux/rtnetlink.h>
47d9fb9f38SJeff Kirsher #include <linux/mii.h>
48d9fb9f38SJeff Kirsher #include <linux/crc32.h>
49d9fb9f38SJeff Kirsher #include <linux/bitops.h>
50d9fb9f38SJeff Kirsher #include <linux/prefetch.h>
51d9fb9f38SJeff Kirsher #include <asm/processor.h>	/* Processor type for cache alignment. */
52d9fb9f38SJeff Kirsher #include <asm/io.h>
53d9fb9f38SJeff Kirsher #include <asm/irq.h>
54d9fb9f38SJeff Kirsher #include <asm/uaccess.h>
55d9fb9f38SJeff Kirsher 
56d9fb9f38SJeff Kirsher #define DRV_NAME	"natsemi"
57d9fb9f38SJeff Kirsher #define DRV_VERSION	"2.1"
58d9fb9f38SJeff Kirsher #define DRV_RELDATE	"Sept 11, 2006"
59d9fb9f38SJeff Kirsher 
60d9fb9f38SJeff Kirsher #define RX_OFFSET	2
61d9fb9f38SJeff Kirsher 
62d9fb9f38SJeff Kirsher /* Updated to recommendations in pci-skeleton v2.03. */
63d9fb9f38SJeff Kirsher 
64d9fb9f38SJeff Kirsher /* The user-configurable values.
65d9fb9f38SJeff Kirsher    These may be modified when a driver module is loaded.*/
66d9fb9f38SJeff Kirsher 
67d9fb9f38SJeff Kirsher #define NATSEMI_DEF_MSG		(NETIF_MSG_DRV		| \
68d9fb9f38SJeff Kirsher 				 NETIF_MSG_LINK		| \
69d9fb9f38SJeff Kirsher 				 NETIF_MSG_WOL		| \
70d9fb9f38SJeff Kirsher 				 NETIF_MSG_RX_ERR	| \
71d9fb9f38SJeff Kirsher 				 NETIF_MSG_TX_ERR)
72d9fb9f38SJeff Kirsher static int debug = -1;
73d9fb9f38SJeff Kirsher 
74d9fb9f38SJeff Kirsher static int mtu;
75d9fb9f38SJeff Kirsher 
76d9fb9f38SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
77d9fb9f38SJeff Kirsher    This chip uses a 512 element hash table based on the Ethernet CRC.  */
78d9fb9f38SJeff Kirsher static const int multicast_filter_limit = 100;
79d9fb9f38SJeff Kirsher 
80d9fb9f38SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
81d9fb9f38SJeff Kirsher    Setting to > 1518 effectively disables this feature. */
82d9fb9f38SJeff Kirsher static int rx_copybreak;
83d9fb9f38SJeff Kirsher 
84d9fb9f38SJeff Kirsher static int dspcfg_workaround = 1;
85d9fb9f38SJeff Kirsher 
86d9fb9f38SJeff Kirsher /* Used to pass the media type, etc.
87d9fb9f38SJeff Kirsher    Both 'options[]' and 'full_duplex[]' should exist for driver
88d9fb9f38SJeff Kirsher    interoperability.
89d9fb9f38SJeff Kirsher    The media type is usually passed in 'options[]'.
90d9fb9f38SJeff Kirsher */
91d9fb9f38SJeff Kirsher #define MAX_UNITS 8		/* More are supported, limit only on options */
92d9fb9f38SJeff Kirsher static int options[MAX_UNITS];
93d9fb9f38SJeff Kirsher static int full_duplex[MAX_UNITS];
94d9fb9f38SJeff Kirsher 
95d9fb9f38SJeff Kirsher /* Operational parameters that are set at compile time. */
96d9fb9f38SJeff Kirsher 
97d9fb9f38SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
98d9fb9f38SJeff Kirsher    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
99d9fb9f38SJeff Kirsher    Making the Tx ring too large decreases the effectiveness of channel
100d9fb9f38SJeff Kirsher    bonding and packet priority.
101d9fb9f38SJeff Kirsher    There are no ill effects from too-large receive rings. */
102d9fb9f38SJeff Kirsher #define TX_RING_SIZE	16
103d9fb9f38SJeff Kirsher #define TX_QUEUE_LEN	10 /* Limit ring entries actually used, min 4. */
104d9fb9f38SJeff Kirsher #define RX_RING_SIZE	32
105d9fb9f38SJeff Kirsher 
106d9fb9f38SJeff Kirsher /* Operational parameters that usually are not changed. */
107d9fb9f38SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
108d9fb9f38SJeff Kirsher #define TX_TIMEOUT  (2*HZ)
109d9fb9f38SJeff Kirsher 
110d9fb9f38SJeff Kirsher #define NATSEMI_HW_TIMEOUT	400
111d9fb9f38SJeff Kirsher #define NATSEMI_TIMER_FREQ	5*HZ
112d9fb9f38SJeff Kirsher #define NATSEMI_PG0_NREGS	64
113d9fb9f38SJeff Kirsher #define NATSEMI_RFDR_NREGS	8
114d9fb9f38SJeff Kirsher #define NATSEMI_PG1_NREGS	4
115d9fb9f38SJeff Kirsher #define NATSEMI_NREGS		(NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
116d9fb9f38SJeff Kirsher 				 NATSEMI_PG1_NREGS)
117d9fb9f38SJeff Kirsher #define NATSEMI_REGS_VER	1 /* v1 added RFDR registers */
118d9fb9f38SJeff Kirsher #define NATSEMI_REGS_SIZE	(NATSEMI_NREGS * sizeof(u32))
119d9fb9f38SJeff Kirsher 
120d9fb9f38SJeff Kirsher /* Buffer sizes:
121d9fb9f38SJeff Kirsher  * The nic writes 32-bit values, even if the upper bytes of
122d9fb9f38SJeff Kirsher  * a 32-bit value are beyond the end of the buffer.
123d9fb9f38SJeff Kirsher  */
124d9fb9f38SJeff Kirsher #define NATSEMI_HEADERS		22	/* 2*mac,type,vlan,crc */
125d9fb9f38SJeff Kirsher #define NATSEMI_PADDING		16	/* 2 bytes should be sufficient */
126d9fb9f38SJeff Kirsher #define NATSEMI_LONGPKT		1518	/* limit for normal packets */
127d9fb9f38SJeff Kirsher #define NATSEMI_RX_LIMIT	2046	/* maximum supported by hardware */
128d9fb9f38SJeff Kirsher 
129d9fb9f38SJeff Kirsher /* These identify the driver base version and may not be removed. */
1306980cbe4SBill Pemberton static const char version[] =
131d9fb9f38SJeff Kirsher   KERN_INFO DRV_NAME " dp8381x driver, version "
132d9fb9f38SJeff Kirsher       DRV_VERSION ", " DRV_RELDATE "\n"
133d9fb9f38SJeff Kirsher   "  originally by Donald Becker <becker@scyld.com>\n"
134d9fb9f38SJeff Kirsher   "  2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135d9fb9f38SJeff Kirsher 
136d9fb9f38SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
137d9fb9f38SJeff Kirsher MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
138d9fb9f38SJeff Kirsher MODULE_LICENSE("GPL");
139d9fb9f38SJeff Kirsher 
140d9fb9f38SJeff Kirsher module_param(mtu, int, 0);
141d9fb9f38SJeff Kirsher module_param(debug, int, 0);
142d9fb9f38SJeff Kirsher module_param(rx_copybreak, int, 0);
143d9fb9f38SJeff Kirsher module_param(dspcfg_workaround, int, 0);
144d9fb9f38SJeff Kirsher module_param_array(options, int, NULL, 0);
145d9fb9f38SJeff Kirsher module_param_array(full_duplex, int, NULL, 0);
146d9fb9f38SJeff Kirsher MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
147d9fb9f38SJeff Kirsher MODULE_PARM_DESC(debug, "DP8381x default debug level");
148d9fb9f38SJeff Kirsher MODULE_PARM_DESC(rx_copybreak,
149d9fb9f38SJeff Kirsher 	"DP8381x copy breakpoint for copy-only-tiny-frames");
150d9fb9f38SJeff Kirsher MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
151d9fb9f38SJeff Kirsher MODULE_PARM_DESC(options,
152d9fb9f38SJeff Kirsher 	"DP8381x: Bits 0-3: media type, bit 17: full duplex");
153d9fb9f38SJeff Kirsher MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
154d9fb9f38SJeff Kirsher 
155d9fb9f38SJeff Kirsher /*
156d9fb9f38SJeff Kirsher 				Theory of Operation
157d9fb9f38SJeff Kirsher 
158d9fb9f38SJeff Kirsher I. Board Compatibility
159d9fb9f38SJeff Kirsher 
160d9fb9f38SJeff Kirsher This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
161d9fb9f38SJeff Kirsher It also works with other chips in in the DP83810 series.
162d9fb9f38SJeff Kirsher 
163d9fb9f38SJeff Kirsher II. Board-specific settings
164d9fb9f38SJeff Kirsher 
165d9fb9f38SJeff Kirsher This driver requires the PCI interrupt line to be valid.
166d9fb9f38SJeff Kirsher It honors the EEPROM-set values.
167d9fb9f38SJeff Kirsher 
168d9fb9f38SJeff Kirsher III. Driver operation
169d9fb9f38SJeff Kirsher 
170d9fb9f38SJeff Kirsher IIIa. Ring buffers
171d9fb9f38SJeff Kirsher 
172d9fb9f38SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists
173d9fb9f38SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
174d9fb9f38SJeff Kirsher the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
175d9fb9f38SJeff Kirsher The NatSemi design uses a 'next descriptor' pointer that the driver forms
176d9fb9f38SJeff Kirsher into a list.
177d9fb9f38SJeff Kirsher 
178d9fb9f38SJeff Kirsher IIIb/c. Transmit/Receive Structure
179d9fb9f38SJeff Kirsher 
180d9fb9f38SJeff Kirsher This driver uses a zero-copy receive and transmit scheme.
181d9fb9f38SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
182d9fb9f38SJeff Kirsher open() time and passes the skb->data field to the chip as receive data
183d9fb9f38SJeff Kirsher buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
184d9fb9f38SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
185d9fb9f38SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
186d9fb9f38SJeff Kirsher protocol stack.  Buffers consumed this way are replaced by newly allocated
187d9fb9f38SJeff Kirsher skbuffs in a later phase of receives.
188d9fb9f38SJeff Kirsher 
189d9fb9f38SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
190d9fb9f38SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
191d9fb9f38SJeff Kirsher frames.  New boards are typically used in generously configured machines
192d9fb9f38SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of
193d9fb9f38SJeff Kirsher a single allocation size, so the default value of zero results in never
194d9fb9f38SJeff Kirsher copying packets.  When copying is done, the cost is usually mitigated by using
195d9fb9f38SJeff Kirsher a combined copy/checksum routine.  Copying also preloads the cache, which is
196d9fb9f38SJeff Kirsher most useful with small frames.
197d9fb9f38SJeff Kirsher 
198d9fb9f38SJeff Kirsher A subtle aspect of the operation is that unaligned buffers are not permitted
199d9fb9f38SJeff Kirsher by the hardware.  Thus the IP header at offset 14 in an ethernet frame isn't
200d9fb9f38SJeff Kirsher longword aligned for further processing.  On copies frames are put into the
201d9fb9f38SJeff Kirsher skbuff at an offset of "+2", 16-byte aligning the IP header.
202d9fb9f38SJeff Kirsher 
203d9fb9f38SJeff Kirsher IIId. Synchronization
204d9fb9f38SJeff Kirsher 
205d9fb9f38SJeff Kirsher Most operations are synchronized on the np->lock irq spinlock, except the
206d9fb9f38SJeff Kirsher receive and transmit paths which are synchronised using a combination of
207d9fb9f38SJeff Kirsher hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
208d9fb9f38SJeff Kirsher 
209d9fb9f38SJeff Kirsher IVb. References
210d9fb9f38SJeff Kirsher 
211d9fb9f38SJeff Kirsher http://www.scyld.com/expert/100mbps.html
212d9fb9f38SJeff Kirsher http://www.scyld.com/expert/NWay.html
213d9fb9f38SJeff Kirsher Datasheet is available from:
214d9fb9f38SJeff Kirsher http://www.national.com/pf/DP/DP83815.html
215d9fb9f38SJeff Kirsher 
216d9fb9f38SJeff Kirsher IVc. Errata
217d9fb9f38SJeff Kirsher 
218d9fb9f38SJeff Kirsher None characterised.
219d9fb9f38SJeff Kirsher */
220d9fb9f38SJeff Kirsher 
221d9fb9f38SJeff Kirsher 
222d9fb9f38SJeff Kirsher 
223d9fb9f38SJeff Kirsher /*
224d9fb9f38SJeff Kirsher  * Support for fibre connections on Am79C874:
225d9fb9f38SJeff Kirsher  * This phy needs a special setup when connected to a fibre cable.
226d9fb9f38SJeff Kirsher  * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
227d9fb9f38SJeff Kirsher  */
228d9fb9f38SJeff Kirsher #define PHYID_AM79C874	0x0022561b
229d9fb9f38SJeff Kirsher 
230d9fb9f38SJeff Kirsher enum {
231d9fb9f38SJeff Kirsher 	MII_MCTRL	= 0x15,		/* mode control register */
232d9fb9f38SJeff Kirsher 	MII_FX_SEL	= 0x0001,	/* 100BASE-FX (fiber) */
233d9fb9f38SJeff Kirsher 	MII_EN_SCRM	= 0x0004,	/* enable scrambler (tp) */
234d9fb9f38SJeff Kirsher };
235d9fb9f38SJeff Kirsher 
236d9fb9f38SJeff Kirsher enum {
237d9fb9f38SJeff Kirsher 	NATSEMI_FLAG_IGNORE_PHY		= 0x1,
238d9fb9f38SJeff Kirsher };
239d9fb9f38SJeff Kirsher 
240d9fb9f38SJeff Kirsher /* array of board data directly indexed by pci_tbl[x].driver_data */
241d9fb9f38SJeff Kirsher static struct {
242d9fb9f38SJeff Kirsher 	const char *name;
243d9fb9f38SJeff Kirsher 	unsigned long flags;
244d9fb9f38SJeff Kirsher 	unsigned int eeprom_size;
2456980cbe4SBill Pemberton } natsemi_pci_info[] = {
246d9fb9f38SJeff Kirsher 	{ "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
247d9fb9f38SJeff Kirsher 	{ "NatSemi DP8381[56]", 0, 24 },
248d9fb9f38SJeff Kirsher };
249d9fb9f38SJeff Kirsher 
2509baa3c34SBenoit Taine static const struct pci_device_id natsemi_pci_tbl[] = {
251d9fb9f38SJeff Kirsher 	{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9,     0x000c,     0, 0, 0 },
252d9fb9f38SJeff Kirsher 	{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253d9fb9f38SJeff Kirsher 	{ }	/* terminate list */
254d9fb9f38SJeff Kirsher };
255d9fb9f38SJeff Kirsher MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
256d9fb9f38SJeff Kirsher 
257d9fb9f38SJeff Kirsher /* Offsets to the device registers.
258d9fb9f38SJeff Kirsher    Unlike software-only systems, device drivers interact with complex hardware.
259d9fb9f38SJeff Kirsher    It's not useful to define symbolic names for every register bit in the
260d9fb9f38SJeff Kirsher    device.
261d9fb9f38SJeff Kirsher */
262d9fb9f38SJeff Kirsher enum register_offsets {
263d9fb9f38SJeff Kirsher 	ChipCmd			= 0x00,
264d9fb9f38SJeff Kirsher 	ChipConfig		= 0x04,
265d9fb9f38SJeff Kirsher 	EECtrl			= 0x08,
266d9fb9f38SJeff Kirsher 	PCIBusCfg		= 0x0C,
267d9fb9f38SJeff Kirsher 	IntrStatus		= 0x10,
268d9fb9f38SJeff Kirsher 	IntrMask		= 0x14,
269d9fb9f38SJeff Kirsher 	IntrEnable		= 0x18,
270d9fb9f38SJeff Kirsher 	IntrHoldoff		= 0x1C, /* DP83816 only */
271d9fb9f38SJeff Kirsher 	TxRingPtr		= 0x20,
272d9fb9f38SJeff Kirsher 	TxConfig		= 0x24,
273d9fb9f38SJeff Kirsher 	RxRingPtr		= 0x30,
274d9fb9f38SJeff Kirsher 	RxConfig		= 0x34,
275d9fb9f38SJeff Kirsher 	ClkRun			= 0x3C,
276d9fb9f38SJeff Kirsher 	WOLCmd			= 0x40,
277d9fb9f38SJeff Kirsher 	PauseCmd		= 0x44,
278d9fb9f38SJeff Kirsher 	RxFilterAddr		= 0x48,
279d9fb9f38SJeff Kirsher 	RxFilterData		= 0x4C,
280d9fb9f38SJeff Kirsher 	BootRomAddr		= 0x50,
281d9fb9f38SJeff Kirsher 	BootRomData		= 0x54,
282d9fb9f38SJeff Kirsher 	SiliconRev		= 0x58,
283d9fb9f38SJeff Kirsher 	StatsCtrl		= 0x5C,
284d9fb9f38SJeff Kirsher 	StatsData		= 0x60,
285d9fb9f38SJeff Kirsher 	RxPktErrs		= 0x60,
286d9fb9f38SJeff Kirsher 	RxMissed		= 0x68,
287d9fb9f38SJeff Kirsher 	RxCRCErrs		= 0x64,
288d9fb9f38SJeff Kirsher 	BasicControl		= 0x80,
289d9fb9f38SJeff Kirsher 	BasicStatus		= 0x84,
290d9fb9f38SJeff Kirsher 	AnegAdv			= 0x90,
291d9fb9f38SJeff Kirsher 	AnegPeer		= 0x94,
292d9fb9f38SJeff Kirsher 	PhyStatus		= 0xC0,
293d9fb9f38SJeff Kirsher 	MIntrCtrl		= 0xC4,
294d9fb9f38SJeff Kirsher 	MIntrStatus		= 0xC8,
295d9fb9f38SJeff Kirsher 	PhyCtrl			= 0xE4,
296d9fb9f38SJeff Kirsher 
297d9fb9f38SJeff Kirsher 	/* These are from the spec, around page 78... on a separate table.
298d9fb9f38SJeff Kirsher 	 * The meaning of these registers depend on the value of PGSEL. */
299d9fb9f38SJeff Kirsher 	PGSEL			= 0xCC,
300d9fb9f38SJeff Kirsher 	PMDCSR			= 0xE4,
301d9fb9f38SJeff Kirsher 	TSTDAT			= 0xFC,
302d9fb9f38SJeff Kirsher 	DSPCFG			= 0xF4,
303d9fb9f38SJeff Kirsher 	SDCFG			= 0xF8
304d9fb9f38SJeff Kirsher };
305d9fb9f38SJeff Kirsher /* the values for the 'magic' registers above (PGSEL=1) */
306d9fb9f38SJeff Kirsher #define PMDCSR_VAL	0x189c	/* enable preferred adaptation circuitry */
307d9fb9f38SJeff Kirsher #define TSTDAT_VAL	0x0
308d9fb9f38SJeff Kirsher #define DSPCFG_VAL	0x5040
309d9fb9f38SJeff Kirsher #define SDCFG_VAL	0x008c	/* set voltage thresholds for Signal Detect */
310d9fb9f38SJeff Kirsher #define DSPCFG_LOCK	0x20	/* coefficient lock bit in DSPCFG */
311d9fb9f38SJeff Kirsher #define DSPCFG_COEF	0x1000	/* see coefficient (in TSTDAT) bit in DSPCFG */
312d9fb9f38SJeff Kirsher #define TSTDAT_FIXED	0xe8	/* magic number for bad coefficients */
313d9fb9f38SJeff Kirsher 
314d9fb9f38SJeff Kirsher /* misc PCI space registers */
315d9fb9f38SJeff Kirsher enum pci_register_offsets {
316d9fb9f38SJeff Kirsher 	PCIPM			= 0x44,
317d9fb9f38SJeff Kirsher };
318d9fb9f38SJeff Kirsher 
319d9fb9f38SJeff Kirsher enum ChipCmd_bits {
320d9fb9f38SJeff Kirsher 	ChipReset		= 0x100,
321d9fb9f38SJeff Kirsher 	RxReset			= 0x20,
322d9fb9f38SJeff Kirsher 	TxReset			= 0x10,
323d9fb9f38SJeff Kirsher 	RxOff			= 0x08,
324d9fb9f38SJeff Kirsher 	RxOn			= 0x04,
325d9fb9f38SJeff Kirsher 	TxOff			= 0x02,
326d9fb9f38SJeff Kirsher 	TxOn			= 0x01,
327d9fb9f38SJeff Kirsher };
328d9fb9f38SJeff Kirsher 
329d9fb9f38SJeff Kirsher enum ChipConfig_bits {
330d9fb9f38SJeff Kirsher 	CfgPhyDis		= 0x200,
331d9fb9f38SJeff Kirsher 	CfgPhyRst		= 0x400,
332d9fb9f38SJeff Kirsher 	CfgExtPhy		= 0x1000,
333d9fb9f38SJeff Kirsher 	CfgAnegEnable		= 0x2000,
334d9fb9f38SJeff Kirsher 	CfgAneg100		= 0x4000,
335d9fb9f38SJeff Kirsher 	CfgAnegFull		= 0x8000,
336d9fb9f38SJeff Kirsher 	CfgAnegDone		= 0x8000000,
337d9fb9f38SJeff Kirsher 	CfgFullDuplex		= 0x20000000,
338d9fb9f38SJeff Kirsher 	CfgSpeed100		= 0x40000000,
339d9fb9f38SJeff Kirsher 	CfgLink			= 0x80000000,
340d9fb9f38SJeff Kirsher };
341d9fb9f38SJeff Kirsher 
342d9fb9f38SJeff Kirsher enum EECtrl_bits {
343d9fb9f38SJeff Kirsher 	EE_ShiftClk		= 0x04,
344d9fb9f38SJeff Kirsher 	EE_DataIn		= 0x01,
345d9fb9f38SJeff Kirsher 	EE_ChipSelect		= 0x08,
346d9fb9f38SJeff Kirsher 	EE_DataOut		= 0x02,
347d9fb9f38SJeff Kirsher 	MII_Data 		= 0x10,
348d9fb9f38SJeff Kirsher 	MII_Write		= 0x20,
349d9fb9f38SJeff Kirsher 	MII_ShiftClk		= 0x40,
350d9fb9f38SJeff Kirsher };
351d9fb9f38SJeff Kirsher 
352d9fb9f38SJeff Kirsher enum PCIBusCfg_bits {
353d9fb9f38SJeff Kirsher 	EepromReload		= 0x4,
354d9fb9f38SJeff Kirsher };
355d9fb9f38SJeff Kirsher 
356d9fb9f38SJeff Kirsher /* Bits in the interrupt status/mask registers. */
357d9fb9f38SJeff Kirsher enum IntrStatus_bits {
358d9fb9f38SJeff Kirsher 	IntrRxDone		= 0x0001,
359d9fb9f38SJeff Kirsher 	IntrRxIntr		= 0x0002,
360d9fb9f38SJeff Kirsher 	IntrRxErr		= 0x0004,
361d9fb9f38SJeff Kirsher 	IntrRxEarly		= 0x0008,
362d9fb9f38SJeff Kirsher 	IntrRxIdle		= 0x0010,
363d9fb9f38SJeff Kirsher 	IntrRxOverrun		= 0x0020,
364d9fb9f38SJeff Kirsher 	IntrTxDone		= 0x0040,
365d9fb9f38SJeff Kirsher 	IntrTxIntr		= 0x0080,
366d9fb9f38SJeff Kirsher 	IntrTxErr		= 0x0100,
367d9fb9f38SJeff Kirsher 	IntrTxIdle		= 0x0200,
368d9fb9f38SJeff Kirsher 	IntrTxUnderrun		= 0x0400,
369d9fb9f38SJeff Kirsher 	StatsMax		= 0x0800,
370d9fb9f38SJeff Kirsher 	SWInt			= 0x1000,
371d9fb9f38SJeff Kirsher 	WOLPkt			= 0x2000,
372d9fb9f38SJeff Kirsher 	LinkChange		= 0x4000,
373d9fb9f38SJeff Kirsher 	IntrHighBits		= 0x8000,
374d9fb9f38SJeff Kirsher 	RxStatusFIFOOver	= 0x10000,
375d9fb9f38SJeff Kirsher 	IntrPCIErr		= 0xf00000,
376d9fb9f38SJeff Kirsher 	RxResetDone		= 0x1000000,
377d9fb9f38SJeff Kirsher 	TxResetDone		= 0x2000000,
378d9fb9f38SJeff Kirsher 	IntrAbnormalSummary	= 0xCD20,
379d9fb9f38SJeff Kirsher };
380d9fb9f38SJeff Kirsher 
381d9fb9f38SJeff Kirsher /*
382d9fb9f38SJeff Kirsher  * Default Interrupts:
383d9fb9f38SJeff Kirsher  * Rx OK, Rx Packet Error, Rx Overrun,
384d9fb9f38SJeff Kirsher  * Tx OK, Tx Packet Error, Tx Underrun,
385d9fb9f38SJeff Kirsher  * MIB Service, Phy Interrupt, High Bits,
386d9fb9f38SJeff Kirsher  * Rx Status FIFO overrun,
387d9fb9f38SJeff Kirsher  * Received Target Abort, Received Master Abort,
388d9fb9f38SJeff Kirsher  * Signalled System Error, Received Parity Error
389d9fb9f38SJeff Kirsher  */
390d9fb9f38SJeff Kirsher #define DEFAULT_INTR 0x00f1cd65
391d9fb9f38SJeff Kirsher 
392d9fb9f38SJeff Kirsher enum TxConfig_bits {
393d9fb9f38SJeff Kirsher 	TxDrthMask		= 0x3f,
394d9fb9f38SJeff Kirsher 	TxFlthMask		= 0x3f00,
395d9fb9f38SJeff Kirsher 	TxMxdmaMask		= 0x700000,
396d9fb9f38SJeff Kirsher 	TxMxdma_512		= 0x0,
397d9fb9f38SJeff Kirsher 	TxMxdma_4		= 0x100000,
398d9fb9f38SJeff Kirsher 	TxMxdma_8		= 0x200000,
399d9fb9f38SJeff Kirsher 	TxMxdma_16		= 0x300000,
400d9fb9f38SJeff Kirsher 	TxMxdma_32		= 0x400000,
401d9fb9f38SJeff Kirsher 	TxMxdma_64		= 0x500000,
402d9fb9f38SJeff Kirsher 	TxMxdma_128		= 0x600000,
403d9fb9f38SJeff Kirsher 	TxMxdma_256		= 0x700000,
404d9fb9f38SJeff Kirsher 	TxCollRetry		= 0x800000,
405d9fb9f38SJeff Kirsher 	TxAutoPad		= 0x10000000,
406d9fb9f38SJeff Kirsher 	TxMacLoop		= 0x20000000,
407d9fb9f38SJeff Kirsher 	TxHeartIgn		= 0x40000000,
408d9fb9f38SJeff Kirsher 	TxCarrierIgn		= 0x80000000
409d9fb9f38SJeff Kirsher };
410d9fb9f38SJeff Kirsher 
411d9fb9f38SJeff Kirsher /*
412d9fb9f38SJeff Kirsher  * Tx Configuration:
413d9fb9f38SJeff Kirsher  * - 256 byte DMA burst length
414d9fb9f38SJeff Kirsher  * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
415d9fb9f38SJeff Kirsher  * - 64 bytes initial drain threshold (i.e. begin actual transmission
416d9fb9f38SJeff Kirsher  *   when 64 byte are in the fifo)
417d9fb9f38SJeff Kirsher  * - on tx underruns, increase drain threshold by 64.
418d9fb9f38SJeff Kirsher  * - at most use a drain threshold of 1472 bytes: The sum of the fill
419d9fb9f38SJeff Kirsher  *   threshold and the drain threshold must be less than 2016 bytes.
420d9fb9f38SJeff Kirsher  *
421d9fb9f38SJeff Kirsher  */
422d9fb9f38SJeff Kirsher #define TX_FLTH_VAL		((512/32) << 8)
423d9fb9f38SJeff Kirsher #define TX_DRTH_VAL_START	(64/32)
424d9fb9f38SJeff Kirsher #define TX_DRTH_VAL_INC		2
425d9fb9f38SJeff Kirsher #define TX_DRTH_VAL_LIMIT	(1472/32)
426d9fb9f38SJeff Kirsher 
427d9fb9f38SJeff Kirsher enum RxConfig_bits {
428d9fb9f38SJeff Kirsher 	RxDrthMask		= 0x3e,
429d9fb9f38SJeff Kirsher 	RxMxdmaMask		= 0x700000,
430d9fb9f38SJeff Kirsher 	RxMxdma_512		= 0x0,
431d9fb9f38SJeff Kirsher 	RxMxdma_4		= 0x100000,
432d9fb9f38SJeff Kirsher 	RxMxdma_8		= 0x200000,
433d9fb9f38SJeff Kirsher 	RxMxdma_16		= 0x300000,
434d9fb9f38SJeff Kirsher 	RxMxdma_32		= 0x400000,
435d9fb9f38SJeff Kirsher 	RxMxdma_64		= 0x500000,
436d9fb9f38SJeff Kirsher 	RxMxdma_128		= 0x600000,
437d9fb9f38SJeff Kirsher 	RxMxdma_256		= 0x700000,
438d9fb9f38SJeff Kirsher 	RxAcceptLong		= 0x8000000,
439d9fb9f38SJeff Kirsher 	RxAcceptTx		= 0x10000000,
440d9fb9f38SJeff Kirsher 	RxAcceptRunt		= 0x40000000,
441d9fb9f38SJeff Kirsher 	RxAcceptErr		= 0x80000000
442d9fb9f38SJeff Kirsher };
443d9fb9f38SJeff Kirsher #define RX_DRTH_VAL		(128/8)
444d9fb9f38SJeff Kirsher 
445d9fb9f38SJeff Kirsher enum ClkRun_bits {
446d9fb9f38SJeff Kirsher 	PMEEnable		= 0x100,
447d9fb9f38SJeff Kirsher 	PMEStatus		= 0x8000,
448d9fb9f38SJeff Kirsher };
449d9fb9f38SJeff Kirsher 
450d9fb9f38SJeff Kirsher enum WolCmd_bits {
451d9fb9f38SJeff Kirsher 	WakePhy			= 0x1,
452d9fb9f38SJeff Kirsher 	WakeUnicast		= 0x2,
453d9fb9f38SJeff Kirsher 	WakeMulticast		= 0x4,
454d9fb9f38SJeff Kirsher 	WakeBroadcast		= 0x8,
455d9fb9f38SJeff Kirsher 	WakeArp			= 0x10,
456d9fb9f38SJeff Kirsher 	WakePMatch0		= 0x20,
457d9fb9f38SJeff Kirsher 	WakePMatch1		= 0x40,
458d9fb9f38SJeff Kirsher 	WakePMatch2		= 0x80,
459d9fb9f38SJeff Kirsher 	WakePMatch3		= 0x100,
460d9fb9f38SJeff Kirsher 	WakeMagic		= 0x200,
461d9fb9f38SJeff Kirsher 	WakeMagicSecure		= 0x400,
462d9fb9f38SJeff Kirsher 	SecureHack		= 0x100000,
463d9fb9f38SJeff Kirsher 	WokePhy			= 0x400000,
464d9fb9f38SJeff Kirsher 	WokeUnicast		= 0x800000,
465d9fb9f38SJeff Kirsher 	WokeMulticast		= 0x1000000,
466d9fb9f38SJeff Kirsher 	WokeBroadcast		= 0x2000000,
467d9fb9f38SJeff Kirsher 	WokeArp			= 0x4000000,
468d9fb9f38SJeff Kirsher 	WokePMatch0		= 0x8000000,
469d9fb9f38SJeff Kirsher 	WokePMatch1		= 0x10000000,
470d9fb9f38SJeff Kirsher 	WokePMatch2		= 0x20000000,
471d9fb9f38SJeff Kirsher 	WokePMatch3		= 0x40000000,
472d9fb9f38SJeff Kirsher 	WokeMagic		= 0x80000000,
473d9fb9f38SJeff Kirsher 	WakeOptsSummary		= 0x7ff
474d9fb9f38SJeff Kirsher };
475d9fb9f38SJeff Kirsher 
476d9fb9f38SJeff Kirsher enum RxFilterAddr_bits {
477d9fb9f38SJeff Kirsher 	RFCRAddressMask		= 0x3ff,
478d9fb9f38SJeff Kirsher 	AcceptMulticast		= 0x00200000,
479d9fb9f38SJeff Kirsher 	AcceptMyPhys		= 0x08000000,
480d9fb9f38SJeff Kirsher 	AcceptAllPhys		= 0x10000000,
481d9fb9f38SJeff Kirsher 	AcceptAllMulticast	= 0x20000000,
482d9fb9f38SJeff Kirsher 	AcceptBroadcast		= 0x40000000,
483d9fb9f38SJeff Kirsher 	RxFilterEnable		= 0x80000000
484d9fb9f38SJeff Kirsher };
485d9fb9f38SJeff Kirsher 
486d9fb9f38SJeff Kirsher enum StatsCtrl_bits {
487d9fb9f38SJeff Kirsher 	StatsWarn		= 0x1,
488d9fb9f38SJeff Kirsher 	StatsFreeze		= 0x2,
489d9fb9f38SJeff Kirsher 	StatsClear		= 0x4,
490d9fb9f38SJeff Kirsher 	StatsStrobe		= 0x8,
491d9fb9f38SJeff Kirsher };
492d9fb9f38SJeff Kirsher 
493d9fb9f38SJeff Kirsher enum MIntrCtrl_bits {
494d9fb9f38SJeff Kirsher 	MICRIntEn		= 0x2,
495d9fb9f38SJeff Kirsher };
496d9fb9f38SJeff Kirsher 
497d9fb9f38SJeff Kirsher enum PhyCtrl_bits {
498d9fb9f38SJeff Kirsher 	PhyAddrMask		= 0x1f,
499d9fb9f38SJeff Kirsher };
500d9fb9f38SJeff Kirsher 
501d9fb9f38SJeff Kirsher #define PHY_ADDR_NONE		32
502d9fb9f38SJeff Kirsher #define PHY_ADDR_INTERNAL	1
503d9fb9f38SJeff Kirsher 
504d9fb9f38SJeff Kirsher /* values we might find in the silicon revision register */
505d9fb9f38SJeff Kirsher #define SRR_DP83815_C	0x0302
506d9fb9f38SJeff Kirsher #define SRR_DP83815_D	0x0403
507d9fb9f38SJeff Kirsher #define SRR_DP83816_A4	0x0504
508d9fb9f38SJeff Kirsher #define SRR_DP83816_A5	0x0505
509d9fb9f38SJeff Kirsher 
510d9fb9f38SJeff Kirsher /* The Rx and Tx buffer descriptors. */
511d9fb9f38SJeff Kirsher /* Note that using only 32 bit fields simplifies conversion to big-endian
512d9fb9f38SJeff Kirsher    architectures. */
513d9fb9f38SJeff Kirsher struct netdev_desc {
514d9fb9f38SJeff Kirsher 	__le32 next_desc;
515d9fb9f38SJeff Kirsher 	__le32 cmd_status;
516d9fb9f38SJeff Kirsher 	__le32 addr;
517d9fb9f38SJeff Kirsher 	__le32 software_use;
518d9fb9f38SJeff Kirsher };
519d9fb9f38SJeff Kirsher 
520d9fb9f38SJeff Kirsher /* Bits in network_desc.status */
521d9fb9f38SJeff Kirsher enum desc_status_bits {
522d9fb9f38SJeff Kirsher 	DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
523d9fb9f38SJeff Kirsher 	DescNoCRC=0x10000000, DescPktOK=0x08000000,
524d9fb9f38SJeff Kirsher 	DescSizeMask=0xfff,
525d9fb9f38SJeff Kirsher 
526d9fb9f38SJeff Kirsher 	DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
527d9fb9f38SJeff Kirsher 	DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
528d9fb9f38SJeff Kirsher 	DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
529d9fb9f38SJeff Kirsher 	DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
530d9fb9f38SJeff Kirsher 
531d9fb9f38SJeff Kirsher 	DescRxAbort=0x04000000, DescRxOver=0x02000000,
532d9fb9f38SJeff Kirsher 	DescRxDest=0x01800000, DescRxLong=0x00400000,
533d9fb9f38SJeff Kirsher 	DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
534d9fb9f38SJeff Kirsher 	DescRxCRC=0x00080000, DescRxAlign=0x00040000,
535d9fb9f38SJeff Kirsher 	DescRxLoop=0x00020000, DesRxColl=0x00010000,
536d9fb9f38SJeff Kirsher };
537d9fb9f38SJeff Kirsher 
538d9fb9f38SJeff Kirsher struct netdev_private {
539d9fb9f38SJeff Kirsher 	/* Descriptor rings first for alignment */
540d9fb9f38SJeff Kirsher 	dma_addr_t ring_dma;
541d9fb9f38SJeff Kirsher 	struct netdev_desc *rx_ring;
542d9fb9f38SJeff Kirsher 	struct netdev_desc *tx_ring;
543d9fb9f38SJeff Kirsher 	/* The addresses of receive-in-place skbuffs */
544d9fb9f38SJeff Kirsher 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
545d9fb9f38SJeff Kirsher 	dma_addr_t rx_dma[RX_RING_SIZE];
546d9fb9f38SJeff Kirsher 	/* address of a sent-in-place packet/buffer, for later free() */
547d9fb9f38SJeff Kirsher 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
548d9fb9f38SJeff Kirsher 	dma_addr_t tx_dma[TX_RING_SIZE];
549d9fb9f38SJeff Kirsher 	struct net_device *dev;
550d710ce13SFrancois Romieu 	void __iomem *ioaddr;
551d9fb9f38SJeff Kirsher 	struct napi_struct napi;
552d9fb9f38SJeff Kirsher 	/* Media monitoring timer */
553d9fb9f38SJeff Kirsher 	struct timer_list timer;
554d9fb9f38SJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect */
555d9fb9f38SJeff Kirsher 	struct pci_dev *pci_dev;
556d9fb9f38SJeff Kirsher 	struct netdev_desc *rx_head_desc;
557d9fb9f38SJeff Kirsher 	/* Producer/consumer ring indices */
558d9fb9f38SJeff Kirsher 	unsigned int cur_rx, dirty_rx;
559d9fb9f38SJeff Kirsher 	unsigned int cur_tx, dirty_tx;
560d9fb9f38SJeff Kirsher 	/* Based on MTU+slack. */
561d9fb9f38SJeff Kirsher 	unsigned int rx_buf_sz;
562d9fb9f38SJeff Kirsher 	int oom;
563d9fb9f38SJeff Kirsher 	/* Interrupt status */
564d9fb9f38SJeff Kirsher 	u32 intr_status;
565d9fb9f38SJeff Kirsher 	/* Do not touch the nic registers */
566d9fb9f38SJeff Kirsher 	int hands_off;
567d9fb9f38SJeff Kirsher 	/* Don't pay attention to the reported link state. */
568d9fb9f38SJeff Kirsher 	int ignore_phy;
569d9fb9f38SJeff Kirsher 	/* external phy that is used: only valid if dev->if_port != PORT_TP */
570d9fb9f38SJeff Kirsher 	int mii;
571d9fb9f38SJeff Kirsher 	int phy_addr_external;
572d9fb9f38SJeff Kirsher 	unsigned int full_duplex;
573d9fb9f38SJeff Kirsher 	/* Rx filter */
574d9fb9f38SJeff Kirsher 	u32 cur_rx_mode;
575d9fb9f38SJeff Kirsher 	u32 rx_filter[16];
576d9fb9f38SJeff Kirsher 	/* FIFO and PCI burst thresholds */
577d9fb9f38SJeff Kirsher 	u32 tx_config, rx_config;
578d9fb9f38SJeff Kirsher 	/* original contents of ClkRun register */
579d9fb9f38SJeff Kirsher 	u32 SavedClkRun;
580d9fb9f38SJeff Kirsher 	/* silicon revision */
581d9fb9f38SJeff Kirsher 	u32 srr;
582d9fb9f38SJeff Kirsher 	/* expected DSPCFG value */
583d9fb9f38SJeff Kirsher 	u16 dspcfg;
584d9fb9f38SJeff Kirsher 	int dspcfg_workaround;
585d9fb9f38SJeff Kirsher 	/* parms saved in ethtool format */
586d9fb9f38SJeff Kirsher 	u16	speed;		/* The forced speed, 10Mb, 100Mb, gigabit */
587d9fb9f38SJeff Kirsher 	u8	duplex;		/* Duplex, half or full */
588d9fb9f38SJeff Kirsher 	u8	autoneg;	/* Autonegotiation enabled */
589d9fb9f38SJeff Kirsher 	/* MII transceiver section */
590d9fb9f38SJeff Kirsher 	u16 advertising;
591d9fb9f38SJeff Kirsher 	unsigned int iosize;
592d9fb9f38SJeff Kirsher 	spinlock_t lock;
593d9fb9f38SJeff Kirsher 	u32 msg_enable;
594d9fb9f38SJeff Kirsher 	/* EEPROM data */
595d9fb9f38SJeff Kirsher 	int eeprom_size;
596d9fb9f38SJeff Kirsher };
597d9fb9f38SJeff Kirsher 
598d9fb9f38SJeff Kirsher static void move_int_phy(struct net_device *dev, int addr);
599d9fb9f38SJeff Kirsher static int eeprom_read(void __iomem *ioaddr, int location);
600d9fb9f38SJeff Kirsher static int mdio_read(struct net_device *dev, int reg);
601d9fb9f38SJeff Kirsher static void mdio_write(struct net_device *dev, int reg, u16 data);
602d9fb9f38SJeff Kirsher static void init_phy_fixup(struct net_device *dev);
603d9fb9f38SJeff Kirsher static int miiport_read(struct net_device *dev, int phy_id, int reg);
604d9fb9f38SJeff Kirsher static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
605d9fb9f38SJeff Kirsher static int find_mii(struct net_device *dev);
606d9fb9f38SJeff Kirsher static void natsemi_reset(struct net_device *dev);
607d9fb9f38SJeff Kirsher static void natsemi_reload_eeprom(struct net_device *dev);
608d9fb9f38SJeff Kirsher static void natsemi_stop_rxtx(struct net_device *dev);
609d9fb9f38SJeff Kirsher static int netdev_open(struct net_device *dev);
610d9fb9f38SJeff Kirsher static void do_cable_magic(struct net_device *dev);
611d9fb9f38SJeff Kirsher static void undo_cable_magic(struct net_device *dev);
612d9fb9f38SJeff Kirsher static void check_link(struct net_device *dev);
613d9fb9f38SJeff Kirsher static void netdev_timer(unsigned long data);
614d9fb9f38SJeff Kirsher static void dump_ring(struct net_device *dev);
615d9fb9f38SJeff Kirsher static void ns_tx_timeout(struct net_device *dev);
616d9fb9f38SJeff Kirsher static int alloc_ring(struct net_device *dev);
617d9fb9f38SJeff Kirsher static void refill_rx(struct net_device *dev);
618d9fb9f38SJeff Kirsher static void init_ring(struct net_device *dev);
619d9fb9f38SJeff Kirsher static void drain_tx(struct net_device *dev);
620d9fb9f38SJeff Kirsher static void drain_ring(struct net_device *dev);
621d9fb9f38SJeff Kirsher static void free_ring(struct net_device *dev);
622d9fb9f38SJeff Kirsher static void reinit_ring(struct net_device *dev);
623d9fb9f38SJeff Kirsher static void init_registers(struct net_device *dev);
624d9fb9f38SJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
625d9fb9f38SJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance);
626d9fb9f38SJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status);
627d9fb9f38SJeff Kirsher static int natsemi_poll(struct napi_struct *napi, int budget);
628d9fb9f38SJeff Kirsher static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
629d9fb9f38SJeff Kirsher static void netdev_tx_done(struct net_device *dev);
630d9fb9f38SJeff Kirsher static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
631d9fb9f38SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
632d9fb9f38SJeff Kirsher static void natsemi_poll_controller(struct net_device *dev);
633d9fb9f38SJeff Kirsher #endif
634d9fb9f38SJeff Kirsher static void __set_rx_mode(struct net_device *dev);
635d9fb9f38SJeff Kirsher static void set_rx_mode(struct net_device *dev);
636d9fb9f38SJeff Kirsher static void __get_stats(struct net_device *dev);
637d9fb9f38SJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev);
638d9fb9f38SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
639d9fb9f38SJeff Kirsher static int netdev_set_wol(struct net_device *dev, u32 newval);
640d9fb9f38SJeff Kirsher static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
641d9fb9f38SJeff Kirsher static int netdev_set_sopass(struct net_device *dev, u8 *newval);
642d9fb9f38SJeff Kirsher static int netdev_get_sopass(struct net_device *dev, u8 *data);
643d9fb9f38SJeff Kirsher static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
644d9fb9f38SJeff Kirsher static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
645d9fb9f38SJeff Kirsher static void enable_wol_mode(struct net_device *dev, int enable_intr);
646d9fb9f38SJeff Kirsher static int netdev_close(struct net_device *dev);
647d9fb9f38SJeff Kirsher static int netdev_get_regs(struct net_device *dev, u8 *buf);
648d9fb9f38SJeff Kirsher static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
649d9fb9f38SJeff Kirsher static const struct ethtool_ops ethtool_ops;
650d9fb9f38SJeff Kirsher 
651d9fb9f38SJeff Kirsher #define NATSEMI_ATTR(_name) \
652d9fb9f38SJeff Kirsher static ssize_t natsemi_show_##_name(struct device *dev, \
653d9fb9f38SJeff Kirsher          struct device_attribute *attr, char *buf); \
654d9fb9f38SJeff Kirsher 	 static ssize_t natsemi_set_##_name(struct device *dev, \
655d9fb9f38SJeff Kirsher 		struct device_attribute *attr, \
656d9fb9f38SJeff Kirsher 	        const char *buf, size_t count); \
657d9fb9f38SJeff Kirsher 	 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
658d9fb9f38SJeff Kirsher 
659d9fb9f38SJeff Kirsher #define NATSEMI_CREATE_FILE(_dev, _name) \
660d9fb9f38SJeff Kirsher          device_create_file(&_dev->dev, &dev_attr_##_name)
661d9fb9f38SJeff Kirsher #define NATSEMI_REMOVE_FILE(_dev, _name) \
662d9fb9f38SJeff Kirsher          device_remove_file(&_dev->dev, &dev_attr_##_name)
663d9fb9f38SJeff Kirsher 
664d9fb9f38SJeff Kirsher NATSEMI_ATTR(dspcfg_workaround);
665d9fb9f38SJeff Kirsher 
666d9fb9f38SJeff Kirsher static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
667d9fb9f38SJeff Kirsher 				  	      struct device_attribute *attr,
668d9fb9f38SJeff Kirsher 					      char *buf)
669d9fb9f38SJeff Kirsher {
670d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(to_net_dev(dev));
671d9fb9f38SJeff Kirsher 
672d9fb9f38SJeff Kirsher 	return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
673d9fb9f38SJeff Kirsher }
674d9fb9f38SJeff Kirsher 
675d9fb9f38SJeff Kirsher static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
676d9fb9f38SJeff Kirsher 					     struct device_attribute *attr,
677d9fb9f38SJeff Kirsher 					     const char *buf, size_t count)
678d9fb9f38SJeff Kirsher {
679d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(to_net_dev(dev));
680d9fb9f38SJeff Kirsher 	int new_setting;
681d9fb9f38SJeff Kirsher 	unsigned long flags;
682d9fb9f38SJeff Kirsher 
683d9fb9f38SJeff Kirsher         /* Find out the new setting */
684d9fb9f38SJeff Kirsher         if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
685d9fb9f38SJeff Kirsher                 new_setting = 1;
686d9fb9f38SJeff Kirsher         else if (!strncmp("off", buf, count - 1) ||
687d9fb9f38SJeff Kirsher                  !strncmp("0", buf, count - 1))
688d9fb9f38SJeff Kirsher 		new_setting = 0;
689d9fb9f38SJeff Kirsher 	else
690d9fb9f38SJeff Kirsher                  return count;
691d9fb9f38SJeff Kirsher 
692d9fb9f38SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
693d9fb9f38SJeff Kirsher 
694d9fb9f38SJeff Kirsher 	np->dspcfg_workaround = new_setting;
695d9fb9f38SJeff Kirsher 
696d9fb9f38SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
697d9fb9f38SJeff Kirsher 
698d9fb9f38SJeff Kirsher 	return count;
699d9fb9f38SJeff Kirsher }
700d9fb9f38SJeff Kirsher 
701d9fb9f38SJeff Kirsher static inline void __iomem *ns_ioaddr(struct net_device *dev)
702d9fb9f38SJeff Kirsher {
703d710ce13SFrancois Romieu 	struct netdev_private *np = netdev_priv(dev);
704d710ce13SFrancois Romieu 
705d710ce13SFrancois Romieu 	return np->ioaddr;
706d9fb9f38SJeff Kirsher }
707d9fb9f38SJeff Kirsher 
708d9fb9f38SJeff Kirsher static inline void natsemi_irq_enable(struct net_device *dev)
709d9fb9f38SJeff Kirsher {
710d9fb9f38SJeff Kirsher 	writel(1, ns_ioaddr(dev) + IntrEnable);
711d9fb9f38SJeff Kirsher 	readl(ns_ioaddr(dev) + IntrEnable);
712d9fb9f38SJeff Kirsher }
713d9fb9f38SJeff Kirsher 
714d9fb9f38SJeff Kirsher static inline void natsemi_irq_disable(struct net_device *dev)
715d9fb9f38SJeff Kirsher {
716d9fb9f38SJeff Kirsher 	writel(0, ns_ioaddr(dev) + IntrEnable);
717d9fb9f38SJeff Kirsher 	readl(ns_ioaddr(dev) + IntrEnable);
718d9fb9f38SJeff Kirsher }
719d9fb9f38SJeff Kirsher 
720d9fb9f38SJeff Kirsher static void move_int_phy(struct net_device *dev, int addr)
721d9fb9f38SJeff Kirsher {
722d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
723d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
724d9fb9f38SJeff Kirsher 	int target = 31;
725d9fb9f38SJeff Kirsher 
726d9fb9f38SJeff Kirsher 	/*
727d9fb9f38SJeff Kirsher 	 * The internal phy is visible on the external mii bus. Therefore we must
728d9fb9f38SJeff Kirsher 	 * move it away before we can send commands to an external phy.
729d9fb9f38SJeff Kirsher 	 * There are two addresses we must avoid:
730d9fb9f38SJeff Kirsher 	 * - the address on the external phy that is used for transmission.
731d9fb9f38SJeff Kirsher 	 * - the address that we want to access. User space can access phys
732d9fb9f38SJeff Kirsher 	 *   on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the
733d9fb9f38SJeff Kirsher 	 *   phy that is used for transmission.
734d9fb9f38SJeff Kirsher 	 */
735d9fb9f38SJeff Kirsher 
736d9fb9f38SJeff Kirsher 	if (target == addr)
737d9fb9f38SJeff Kirsher 		target--;
738d9fb9f38SJeff Kirsher 	if (target == np->phy_addr_external)
739d9fb9f38SJeff Kirsher 		target--;
740d9fb9f38SJeff Kirsher 	writew(target, ioaddr + PhyCtrl);
741d9fb9f38SJeff Kirsher 	readw(ioaddr + PhyCtrl);
742d9fb9f38SJeff Kirsher 	udelay(1);
743d9fb9f38SJeff Kirsher }
744d9fb9f38SJeff Kirsher 
7456980cbe4SBill Pemberton static void natsemi_init_media(struct net_device *dev)
746d9fb9f38SJeff Kirsher {
747d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
748d9fb9f38SJeff Kirsher 	u32 tmp;
749d9fb9f38SJeff Kirsher 
750d9fb9f38SJeff Kirsher 	if (np->ignore_phy)
751d9fb9f38SJeff Kirsher 		netif_carrier_on(dev);
752d9fb9f38SJeff Kirsher 	else
753d9fb9f38SJeff Kirsher 		netif_carrier_off(dev);
754d9fb9f38SJeff Kirsher 
755d9fb9f38SJeff Kirsher 	/* get the initial settings from hardware */
756d9fb9f38SJeff Kirsher 	tmp            = mdio_read(dev, MII_BMCR);
757d9fb9f38SJeff Kirsher 	np->speed      = (tmp & BMCR_SPEED100)? SPEED_100     : SPEED_10;
758d9fb9f38SJeff Kirsher 	np->duplex     = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL   : DUPLEX_HALF;
759d9fb9f38SJeff Kirsher 	np->autoneg    = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
760d9fb9f38SJeff Kirsher 	np->advertising= mdio_read(dev, MII_ADVERTISE);
761d9fb9f38SJeff Kirsher 
762d9fb9f38SJeff Kirsher 	if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
763d9fb9f38SJeff Kirsher 	    netif_msg_probe(np)) {
764d9fb9f38SJeff Kirsher 		printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
765d9fb9f38SJeff Kirsher 			"10%s %s duplex.\n",
766d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev),
767d9fb9f38SJeff Kirsher 			(mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
768d9fb9f38SJeff Kirsher 			  "enabled, advertise" : "disabled, force",
769d9fb9f38SJeff Kirsher 			(np->advertising &
770d9fb9f38SJeff Kirsher 			  (ADVERTISE_100FULL|ADVERTISE_100HALF))?
771d9fb9f38SJeff Kirsher 			    "0" : "",
772d9fb9f38SJeff Kirsher 			(np->advertising &
773d9fb9f38SJeff Kirsher 			  (ADVERTISE_100FULL|ADVERTISE_10FULL))?
774d9fb9f38SJeff Kirsher 			    "full" : "half");
775d9fb9f38SJeff Kirsher 	}
776d9fb9f38SJeff Kirsher 	if (netif_msg_probe(np))
777d9fb9f38SJeff Kirsher 		printk(KERN_INFO
778d9fb9f38SJeff Kirsher 			"natsemi %s: Transceiver status %#04x advertising %#04x.\n",
779d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
780d9fb9f38SJeff Kirsher 			np->advertising);
781d9fb9f38SJeff Kirsher 
782d9fb9f38SJeff Kirsher }
783d9fb9f38SJeff Kirsher 
784d9fb9f38SJeff Kirsher static const struct net_device_ops natsemi_netdev_ops = {
785d9fb9f38SJeff Kirsher 	.ndo_open		= netdev_open,
786d9fb9f38SJeff Kirsher 	.ndo_stop		= netdev_close,
787d9fb9f38SJeff Kirsher 	.ndo_start_xmit		= start_tx,
788d9fb9f38SJeff Kirsher 	.ndo_get_stats		= get_stats,
789afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= set_rx_mode,
790d9fb9f38SJeff Kirsher 	.ndo_change_mtu		= natsemi_change_mtu,
791d9fb9f38SJeff Kirsher 	.ndo_do_ioctl		= netdev_ioctl,
792d9fb9f38SJeff Kirsher 	.ndo_tx_timeout 	= ns_tx_timeout,
793d9fb9f38SJeff Kirsher 	.ndo_set_mac_address 	= eth_mac_addr,
794d9fb9f38SJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
795d9fb9f38SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
796d9fb9f38SJeff Kirsher 	.ndo_poll_controller	= natsemi_poll_controller,
797d9fb9f38SJeff Kirsher #endif
798d9fb9f38SJeff Kirsher };
799d9fb9f38SJeff Kirsher 
8001dd06ae8SGreg Kroah-Hartman static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
801d9fb9f38SJeff Kirsher {
802d9fb9f38SJeff Kirsher 	struct net_device *dev;
803d9fb9f38SJeff Kirsher 	struct netdev_private *np;
804d9fb9f38SJeff Kirsher 	int i, option, irq, chip_idx = ent->driver_data;
805d9fb9f38SJeff Kirsher 	static int find_cnt = -1;
806d9fb9f38SJeff Kirsher 	resource_size_t iostart;
807d9fb9f38SJeff Kirsher 	unsigned long iosize;
808d9fb9f38SJeff Kirsher 	void __iomem *ioaddr;
809d9fb9f38SJeff Kirsher 	const int pcibar = 1; /* PCI base address register */
810d9fb9f38SJeff Kirsher 	int prev_eedata;
811d9fb9f38SJeff Kirsher 	u32 tmp;
812d9fb9f38SJeff Kirsher 
813d9fb9f38SJeff Kirsher /* when built into the kernel, we only print version if device is found */
814d9fb9f38SJeff Kirsher #ifndef MODULE
815d9fb9f38SJeff Kirsher 	static int printed_version;
816d9fb9f38SJeff Kirsher 	if (!printed_version++)
817d9fb9f38SJeff Kirsher 		printk(version);
818d9fb9f38SJeff Kirsher #endif
819d9fb9f38SJeff Kirsher 
820d9fb9f38SJeff Kirsher 	i = pci_enable_device(pdev);
821d9fb9f38SJeff Kirsher 	if (i) return i;
822d9fb9f38SJeff Kirsher 
823d9fb9f38SJeff Kirsher 	/* natsemi has a non-standard PM control register
824d9fb9f38SJeff Kirsher 	 * in PCI config space.  Some boards apparently need
825d9fb9f38SJeff Kirsher 	 * to be brought to D0 in this manner.
826d9fb9f38SJeff Kirsher 	 */
827d9fb9f38SJeff Kirsher 	pci_read_config_dword(pdev, PCIPM, &tmp);
828d9fb9f38SJeff Kirsher 	if (tmp & PCI_PM_CTRL_STATE_MASK) {
829d9fb9f38SJeff Kirsher 		/* D0 state, disable PME assertion */
830d9fb9f38SJeff Kirsher 		u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
831d9fb9f38SJeff Kirsher 		pci_write_config_dword(pdev, PCIPM, newtmp);
832d9fb9f38SJeff Kirsher 	}
833d9fb9f38SJeff Kirsher 
834d9fb9f38SJeff Kirsher 	find_cnt++;
835d9fb9f38SJeff Kirsher 	iostart = pci_resource_start(pdev, pcibar);
836d9fb9f38SJeff Kirsher 	iosize = pci_resource_len(pdev, pcibar);
837d9fb9f38SJeff Kirsher 	irq = pdev->irq;
838d9fb9f38SJeff Kirsher 
839d9fb9f38SJeff Kirsher 	pci_set_master(pdev);
840d9fb9f38SJeff Kirsher 
841d9fb9f38SJeff Kirsher 	dev = alloc_etherdev(sizeof (struct netdev_private));
842d9fb9f38SJeff Kirsher 	if (!dev)
843d9fb9f38SJeff Kirsher 		return -ENOMEM;
844d9fb9f38SJeff Kirsher 	SET_NETDEV_DEV(dev, &pdev->dev);
845d9fb9f38SJeff Kirsher 
846d9fb9f38SJeff Kirsher 	i = pci_request_regions(pdev, DRV_NAME);
847d9fb9f38SJeff Kirsher 	if (i)
848d9fb9f38SJeff Kirsher 		goto err_pci_request_regions;
849d9fb9f38SJeff Kirsher 
850d9fb9f38SJeff Kirsher 	ioaddr = ioremap(iostart, iosize);
851d9fb9f38SJeff Kirsher 	if (!ioaddr) {
852d9fb9f38SJeff Kirsher 		i = -ENOMEM;
853d9fb9f38SJeff Kirsher 		goto err_ioremap;
854d9fb9f38SJeff Kirsher 	}
855d9fb9f38SJeff Kirsher 
856d9fb9f38SJeff Kirsher 	/* Work around the dropped serial bit. */
857d9fb9f38SJeff Kirsher 	prev_eedata = eeprom_read(ioaddr, 6);
858d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
859d9fb9f38SJeff Kirsher 		int eedata = eeprom_read(ioaddr, i + 7);
860d9fb9f38SJeff Kirsher 		dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
861d9fb9f38SJeff Kirsher 		dev->dev_addr[i*2+1] = eedata >> 7;
862d9fb9f38SJeff Kirsher 		prev_eedata = eedata;
863d9fb9f38SJeff Kirsher 	}
864d9fb9f38SJeff Kirsher 
865d9fb9f38SJeff Kirsher 	np = netdev_priv(dev);
866d710ce13SFrancois Romieu 	np->ioaddr = ioaddr;
867d710ce13SFrancois Romieu 
868d9fb9f38SJeff Kirsher 	netif_napi_add(dev, &np->napi, natsemi_poll, 64);
869d9fb9f38SJeff Kirsher 	np->dev = dev;
870d9fb9f38SJeff Kirsher 
871d9fb9f38SJeff Kirsher 	np->pci_dev = pdev;
872d9fb9f38SJeff Kirsher 	pci_set_drvdata(pdev, dev);
873d9fb9f38SJeff Kirsher 	np->iosize = iosize;
874d9fb9f38SJeff Kirsher 	spin_lock_init(&np->lock);
875d9fb9f38SJeff Kirsher 	np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
876d9fb9f38SJeff Kirsher 	np->hands_off = 0;
877d9fb9f38SJeff Kirsher 	np->intr_status = 0;
878d9fb9f38SJeff Kirsher 	np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
879d9fb9f38SJeff Kirsher 	if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
880d9fb9f38SJeff Kirsher 		np->ignore_phy = 1;
881d9fb9f38SJeff Kirsher 	else
882d9fb9f38SJeff Kirsher 		np->ignore_phy = 0;
883d9fb9f38SJeff Kirsher 	np->dspcfg_workaround = dspcfg_workaround;
884d9fb9f38SJeff Kirsher 
885d9fb9f38SJeff Kirsher 	/* Initial port:
886d9fb9f38SJeff Kirsher 	 * - If configured to ignore the PHY set up for external.
887d9fb9f38SJeff Kirsher 	 * - If the nic was configured to use an external phy and if find_mii
888d9fb9f38SJeff Kirsher 	 *   finds a phy: use external port, first phy that replies.
889d9fb9f38SJeff Kirsher 	 * - Otherwise: internal port.
890d9fb9f38SJeff Kirsher 	 * Note that the phy address for the internal phy doesn't matter:
891d9fb9f38SJeff Kirsher 	 * The address would be used to access a phy over the mii bus, but
892d9fb9f38SJeff Kirsher 	 * the internal phy is accessed through mapped registers.
893d9fb9f38SJeff Kirsher 	 */
894d9fb9f38SJeff Kirsher 	if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
895d9fb9f38SJeff Kirsher 		dev->if_port = PORT_MII;
896d9fb9f38SJeff Kirsher 	else
897d9fb9f38SJeff Kirsher 		dev->if_port = PORT_TP;
898d9fb9f38SJeff Kirsher 	/* Reset the chip to erase previous misconfiguration. */
899d9fb9f38SJeff Kirsher 	natsemi_reload_eeprom(dev);
900d9fb9f38SJeff Kirsher 	natsemi_reset(dev);
901d9fb9f38SJeff Kirsher 
902d9fb9f38SJeff Kirsher 	if (dev->if_port != PORT_TP) {
903d9fb9f38SJeff Kirsher 		np->phy_addr_external = find_mii(dev);
904d9fb9f38SJeff Kirsher 		/* If we're ignoring the PHY it doesn't matter if we can't
905d9fb9f38SJeff Kirsher 		 * find one. */
906d9fb9f38SJeff Kirsher 		if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
907d9fb9f38SJeff Kirsher 			dev->if_port = PORT_TP;
908d9fb9f38SJeff Kirsher 			np->phy_addr_external = PHY_ADDR_INTERNAL;
909d9fb9f38SJeff Kirsher 		}
910d9fb9f38SJeff Kirsher 	} else {
911d9fb9f38SJeff Kirsher 		np->phy_addr_external = PHY_ADDR_INTERNAL;
912d9fb9f38SJeff Kirsher 	}
913d9fb9f38SJeff Kirsher 
914d9fb9f38SJeff Kirsher 	option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
915d9fb9f38SJeff Kirsher 	/* The lower four bits are the media type. */
916d9fb9f38SJeff Kirsher 	if (option) {
917d9fb9f38SJeff Kirsher 		if (option & 0x200)
918d9fb9f38SJeff Kirsher 			np->full_duplex = 1;
919d9fb9f38SJeff Kirsher 		if (option & 15)
920d9fb9f38SJeff Kirsher 			printk(KERN_INFO
921d9fb9f38SJeff Kirsher 				"natsemi %s: ignoring user supplied media type %d",
922d9fb9f38SJeff Kirsher 				pci_name(np->pci_dev), option & 15);
923d9fb9f38SJeff Kirsher 	}
924d9fb9f38SJeff Kirsher 	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt])
925d9fb9f38SJeff Kirsher 		np->full_duplex = 1;
926d9fb9f38SJeff Kirsher 
927d9fb9f38SJeff Kirsher 	dev->netdev_ops = &natsemi_netdev_ops;
928d9fb9f38SJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
929d9fb9f38SJeff Kirsher 
9307ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &ethtool_ops;
931d9fb9f38SJeff Kirsher 
932d9fb9f38SJeff Kirsher 	if (mtu)
933d9fb9f38SJeff Kirsher 		dev->mtu = mtu;
934d9fb9f38SJeff Kirsher 
935d9fb9f38SJeff Kirsher 	natsemi_init_media(dev);
936d9fb9f38SJeff Kirsher 
937d9fb9f38SJeff Kirsher 	/* save the silicon revision for later querying */
938d9fb9f38SJeff Kirsher 	np->srr = readl(ioaddr + SiliconRev);
939d9fb9f38SJeff Kirsher 	if (netif_msg_hw(np))
940d9fb9f38SJeff Kirsher 		printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
941d9fb9f38SJeff Kirsher 				pci_name(np->pci_dev), np->srr);
942d9fb9f38SJeff Kirsher 
943d9fb9f38SJeff Kirsher 	i = register_netdev(dev);
944d9fb9f38SJeff Kirsher 	if (i)
945d9fb9f38SJeff Kirsher 		goto err_register_netdev;
94652428d91SPeter Senna Tschudin 	i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
94752428d91SPeter Senna Tschudin 	if (i)
948d9fb9f38SJeff Kirsher 		goto err_create_file;
949d9fb9f38SJeff Kirsher 
950d9fb9f38SJeff Kirsher 	if (netif_msg_drv(np)) {
951d9fb9f38SJeff Kirsher 		printk(KERN_INFO "natsemi %s: %s at %#08llx "
952d9fb9f38SJeff Kirsher 		       "(%s), %pM, IRQ %d",
953d9fb9f38SJeff Kirsher 		       dev->name, natsemi_pci_info[chip_idx].name,
954d9fb9f38SJeff Kirsher 		       (unsigned long long)iostart, pci_name(np->pci_dev),
955d9fb9f38SJeff Kirsher 		       dev->dev_addr, irq);
956d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_TP)
957d9fb9f38SJeff Kirsher 			printk(", port TP.\n");
958d9fb9f38SJeff Kirsher 		else if (np->ignore_phy)
959d9fb9f38SJeff Kirsher 			printk(", port MII, ignoring PHY\n");
960d9fb9f38SJeff Kirsher 		else
961d9fb9f38SJeff Kirsher 			printk(", port MII, phy ad %d.\n", np->phy_addr_external);
962d9fb9f38SJeff Kirsher 	}
963d9fb9f38SJeff Kirsher 	return 0;
964d9fb9f38SJeff Kirsher 
965d9fb9f38SJeff Kirsher  err_create_file:
966d9fb9f38SJeff Kirsher  	unregister_netdev(dev);
967d9fb9f38SJeff Kirsher 
968d9fb9f38SJeff Kirsher  err_register_netdev:
969d9fb9f38SJeff Kirsher 	iounmap(ioaddr);
970d9fb9f38SJeff Kirsher 
971d9fb9f38SJeff Kirsher  err_ioremap:
972d9fb9f38SJeff Kirsher 	pci_release_regions(pdev);
973d9fb9f38SJeff Kirsher 
974d9fb9f38SJeff Kirsher  err_pci_request_regions:
975d9fb9f38SJeff Kirsher 	free_netdev(dev);
976d9fb9f38SJeff Kirsher 	return i;
977d9fb9f38SJeff Kirsher }
978d9fb9f38SJeff Kirsher 
979d9fb9f38SJeff Kirsher 
980d9fb9f38SJeff Kirsher /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
981d9fb9f38SJeff Kirsher    The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
982d9fb9f38SJeff Kirsher 
983d9fb9f38SJeff Kirsher /* Delay between EEPROM clock transitions.
984d9fb9f38SJeff Kirsher    No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
985d9fb9f38SJeff Kirsher    a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
986d9fb9f38SJeff Kirsher    made udelay() unreliable.
987d9fb9f38SJeff Kirsher    The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
988d9fb9f38SJeff Kirsher    deprecated.
989d9fb9f38SJeff Kirsher */
990d9fb9f38SJeff Kirsher #define eeprom_delay(ee_addr)	readl(ee_addr)
991d9fb9f38SJeff Kirsher 
992d9fb9f38SJeff Kirsher #define EE_Write0 (EE_ChipSelect)
993d9fb9f38SJeff Kirsher #define EE_Write1 (EE_ChipSelect | EE_DataIn)
994d9fb9f38SJeff Kirsher 
995d9fb9f38SJeff Kirsher /* The EEPROM commands include the alway-set leading bit. */
996d9fb9f38SJeff Kirsher enum EEPROM_Cmds {
997d9fb9f38SJeff Kirsher 	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
998d9fb9f38SJeff Kirsher };
999d9fb9f38SJeff Kirsher 
1000d9fb9f38SJeff Kirsher static int eeprom_read(void __iomem *addr, int location)
1001d9fb9f38SJeff Kirsher {
1002d9fb9f38SJeff Kirsher 	int i;
1003d9fb9f38SJeff Kirsher 	int retval = 0;
1004d9fb9f38SJeff Kirsher 	void __iomem *ee_addr = addr + EECtrl;
1005d9fb9f38SJeff Kirsher 	int read_cmd = location | EE_ReadCmd;
1006d9fb9f38SJeff Kirsher 
1007d9fb9f38SJeff Kirsher 	writel(EE_Write0, ee_addr);
1008d9fb9f38SJeff Kirsher 
1009d9fb9f38SJeff Kirsher 	/* Shift the read command bits out. */
1010d9fb9f38SJeff Kirsher 	for (i = 10; i >= 0; i--) {
1011d9fb9f38SJeff Kirsher 		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1012d9fb9f38SJeff Kirsher 		writel(dataval, ee_addr);
1013d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1014d9fb9f38SJeff Kirsher 		writel(dataval | EE_ShiftClk, ee_addr);
1015d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1016d9fb9f38SJeff Kirsher 	}
1017d9fb9f38SJeff Kirsher 	writel(EE_ChipSelect, ee_addr);
1018d9fb9f38SJeff Kirsher 	eeprom_delay(ee_addr);
1019d9fb9f38SJeff Kirsher 
1020d9fb9f38SJeff Kirsher 	for (i = 0; i < 16; i++) {
1021d9fb9f38SJeff Kirsher 		writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1022d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1023d9fb9f38SJeff Kirsher 		retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1024d9fb9f38SJeff Kirsher 		writel(EE_ChipSelect, ee_addr);
1025d9fb9f38SJeff Kirsher 		eeprom_delay(ee_addr);
1026d9fb9f38SJeff Kirsher 	}
1027d9fb9f38SJeff Kirsher 
1028d9fb9f38SJeff Kirsher 	/* Terminate the EEPROM access. */
1029d9fb9f38SJeff Kirsher 	writel(EE_Write0, ee_addr);
1030d9fb9f38SJeff Kirsher 	writel(0, ee_addr);
1031d9fb9f38SJeff Kirsher 	return retval;
1032d9fb9f38SJeff Kirsher }
1033d9fb9f38SJeff Kirsher 
1034d9fb9f38SJeff Kirsher /* MII transceiver control section.
1035d9fb9f38SJeff Kirsher  * The 83815 series has an internal transceiver, and we present the
1036d9fb9f38SJeff Kirsher  * internal management registers as if they were MII connected.
1037d9fb9f38SJeff Kirsher  * External Phy registers are referenced through the MII interface.
1038d9fb9f38SJeff Kirsher  */
1039d9fb9f38SJeff Kirsher 
1040d9fb9f38SJeff Kirsher /* clock transitions >= 20ns (25MHz)
1041d9fb9f38SJeff Kirsher  * One readl should be good to PCI @ 100MHz
1042d9fb9f38SJeff Kirsher  */
1043d9fb9f38SJeff Kirsher #define mii_delay(ioaddr)  readl(ioaddr + EECtrl)
1044d9fb9f38SJeff Kirsher 
1045d9fb9f38SJeff Kirsher static int mii_getbit (struct net_device *dev)
1046d9fb9f38SJeff Kirsher {
1047d9fb9f38SJeff Kirsher 	int data;
1048d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1049d9fb9f38SJeff Kirsher 
1050d9fb9f38SJeff Kirsher 	writel(MII_ShiftClk, ioaddr + EECtrl);
1051d9fb9f38SJeff Kirsher 	data = readl(ioaddr + EECtrl);
1052d9fb9f38SJeff Kirsher 	writel(0, ioaddr + EECtrl);
1053d9fb9f38SJeff Kirsher 	mii_delay(ioaddr);
1054d9fb9f38SJeff Kirsher 	return (data & MII_Data)? 1 : 0;
1055d9fb9f38SJeff Kirsher }
1056d9fb9f38SJeff Kirsher 
1057d9fb9f38SJeff Kirsher static void mii_send_bits (struct net_device *dev, u32 data, int len)
1058d9fb9f38SJeff Kirsher {
1059d9fb9f38SJeff Kirsher 	u32 i;
1060d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1061d9fb9f38SJeff Kirsher 
1062d9fb9f38SJeff Kirsher 	for (i = (1 << (len-1)); i; i >>= 1)
1063d9fb9f38SJeff Kirsher 	{
1064d9fb9f38SJeff Kirsher 		u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1065d9fb9f38SJeff Kirsher 		writel(mdio_val, ioaddr + EECtrl);
1066d9fb9f38SJeff Kirsher 		mii_delay(ioaddr);
1067d9fb9f38SJeff Kirsher 		writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1068d9fb9f38SJeff Kirsher 		mii_delay(ioaddr);
1069d9fb9f38SJeff Kirsher 	}
1070d9fb9f38SJeff Kirsher 	writel(0, ioaddr + EECtrl);
1071d9fb9f38SJeff Kirsher 	mii_delay(ioaddr);
1072d9fb9f38SJeff Kirsher }
1073d9fb9f38SJeff Kirsher 
1074d9fb9f38SJeff Kirsher static int miiport_read(struct net_device *dev, int phy_id, int reg)
1075d9fb9f38SJeff Kirsher {
1076d9fb9f38SJeff Kirsher 	u32 cmd;
1077d9fb9f38SJeff Kirsher 	int i;
1078d9fb9f38SJeff Kirsher 	u32 retval = 0;
1079d9fb9f38SJeff Kirsher 
1080d9fb9f38SJeff Kirsher 	/* Ensure sync */
1081d9fb9f38SJeff Kirsher 	mii_send_bits (dev, 0xffffffff, 32);
1082d9fb9f38SJeff Kirsher 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1083d9fb9f38SJeff Kirsher 	/* ST,OP = 0110'b for read operation */
1084d9fb9f38SJeff Kirsher 	cmd = (0x06 << 10) | (phy_id << 5) | reg;
1085d9fb9f38SJeff Kirsher 	mii_send_bits (dev, cmd, 14);
1086d9fb9f38SJeff Kirsher 	/* Turnaround */
1087d9fb9f38SJeff Kirsher 	if (mii_getbit (dev))
1088d9fb9f38SJeff Kirsher 		return 0;
1089d9fb9f38SJeff Kirsher 	/* Read data */
1090d9fb9f38SJeff Kirsher 	for (i = 0; i < 16; i++) {
1091d9fb9f38SJeff Kirsher 		retval <<= 1;
1092d9fb9f38SJeff Kirsher 		retval |= mii_getbit (dev);
1093d9fb9f38SJeff Kirsher 	}
1094d9fb9f38SJeff Kirsher 	/* End cycle */
1095d9fb9f38SJeff Kirsher 	mii_getbit (dev);
1096d9fb9f38SJeff Kirsher 	return retval;
1097d9fb9f38SJeff Kirsher }
1098d9fb9f38SJeff Kirsher 
1099d9fb9f38SJeff Kirsher static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1100d9fb9f38SJeff Kirsher {
1101d9fb9f38SJeff Kirsher 	u32 cmd;
1102d9fb9f38SJeff Kirsher 
1103d9fb9f38SJeff Kirsher 	/* Ensure sync */
1104d9fb9f38SJeff Kirsher 	mii_send_bits (dev, 0xffffffff, 32);
1105d9fb9f38SJeff Kirsher 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1106d9fb9f38SJeff Kirsher 	/* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1107d9fb9f38SJeff Kirsher 	cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1108d9fb9f38SJeff Kirsher 	mii_send_bits (dev, cmd, 32);
1109d9fb9f38SJeff Kirsher 	/* End cycle */
1110d9fb9f38SJeff Kirsher 	mii_getbit (dev);
1111d9fb9f38SJeff Kirsher }
1112d9fb9f38SJeff Kirsher 
1113d9fb9f38SJeff Kirsher static int mdio_read(struct net_device *dev, int reg)
1114d9fb9f38SJeff Kirsher {
1115d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1116d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1117d9fb9f38SJeff Kirsher 
1118d9fb9f38SJeff Kirsher 	/* The 83815 series has two ports:
1119d9fb9f38SJeff Kirsher 	 * - an internal transceiver
1120d9fb9f38SJeff Kirsher 	 * - an external mii bus
1121d9fb9f38SJeff Kirsher 	 */
1122d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP)
1123d9fb9f38SJeff Kirsher 		return readw(ioaddr+BasicControl+(reg<<2));
1124d9fb9f38SJeff Kirsher 	else
1125d9fb9f38SJeff Kirsher 		return miiport_read(dev, np->phy_addr_external, reg);
1126d9fb9f38SJeff Kirsher }
1127d9fb9f38SJeff Kirsher 
1128d9fb9f38SJeff Kirsher static void mdio_write(struct net_device *dev, int reg, u16 data)
1129d9fb9f38SJeff Kirsher {
1130d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1131d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1132d9fb9f38SJeff Kirsher 
1133d9fb9f38SJeff Kirsher 	/* The 83815 series has an internal transceiver; handle separately */
1134d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP)
1135d9fb9f38SJeff Kirsher 		writew(data, ioaddr+BasicControl+(reg<<2));
1136d9fb9f38SJeff Kirsher 	else
1137d9fb9f38SJeff Kirsher 		miiport_write(dev, np->phy_addr_external, reg, data);
1138d9fb9f38SJeff Kirsher }
1139d9fb9f38SJeff Kirsher 
1140d9fb9f38SJeff Kirsher static void init_phy_fixup(struct net_device *dev)
1141d9fb9f38SJeff Kirsher {
1142d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1143d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1144d9fb9f38SJeff Kirsher 	int i;
1145d9fb9f38SJeff Kirsher 	u32 cfg;
1146d9fb9f38SJeff Kirsher 	u16 tmp;
1147d9fb9f38SJeff Kirsher 
1148d9fb9f38SJeff Kirsher 	/* restore stuff lost when power was out */
1149d9fb9f38SJeff Kirsher 	tmp = mdio_read(dev, MII_BMCR);
1150d9fb9f38SJeff Kirsher 	if (np->autoneg == AUTONEG_ENABLE) {
1151d9fb9f38SJeff Kirsher 		/* renegotiate if something changed */
1152d9fb9f38SJeff Kirsher 		if ((tmp & BMCR_ANENABLE) == 0 ||
1153d9fb9f38SJeff Kirsher 		    np->advertising != mdio_read(dev, MII_ADVERTISE))
1154d9fb9f38SJeff Kirsher 		{
1155d9fb9f38SJeff Kirsher 			/* turn on autonegotiation and force negotiation */
1156d9fb9f38SJeff Kirsher 			tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1157d9fb9f38SJeff Kirsher 			mdio_write(dev, MII_ADVERTISE, np->advertising);
1158d9fb9f38SJeff Kirsher 		}
1159d9fb9f38SJeff Kirsher 	} else {
1160d9fb9f38SJeff Kirsher 		/* turn off auto negotiation, set speed and duplexity */
1161d9fb9f38SJeff Kirsher 		tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1162d9fb9f38SJeff Kirsher 		if (np->speed == SPEED_100)
1163d9fb9f38SJeff Kirsher 			tmp |= BMCR_SPEED100;
1164d9fb9f38SJeff Kirsher 		if (np->duplex == DUPLEX_FULL)
1165d9fb9f38SJeff Kirsher 			tmp |= BMCR_FULLDPLX;
1166d9fb9f38SJeff Kirsher 		/*
1167d9fb9f38SJeff Kirsher 		 * Note: there is no good way to inform the link partner
1168d9fb9f38SJeff Kirsher 		 * that our capabilities changed. The user has to unplug
1169d9fb9f38SJeff Kirsher 		 * and replug the network cable after some changes, e.g.
1170d9fb9f38SJeff Kirsher 		 * after switching from 10HD, autoneg off to 100 HD,
1171d9fb9f38SJeff Kirsher 		 * autoneg off.
1172d9fb9f38SJeff Kirsher 		 */
1173d9fb9f38SJeff Kirsher 	}
1174d9fb9f38SJeff Kirsher 	mdio_write(dev, MII_BMCR, tmp);
1175d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1176d9fb9f38SJeff Kirsher 	udelay(1);
1177d9fb9f38SJeff Kirsher 
1178d9fb9f38SJeff Kirsher 	/* find out what phy this is */
1179d9fb9f38SJeff Kirsher 	np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1180d9fb9f38SJeff Kirsher 				+ mdio_read(dev, MII_PHYSID2);
1181d9fb9f38SJeff Kirsher 
1182d9fb9f38SJeff Kirsher 	/* handle external phys here */
1183d9fb9f38SJeff Kirsher 	switch (np->mii) {
1184d9fb9f38SJeff Kirsher 	case PHYID_AM79C874:
1185d9fb9f38SJeff Kirsher 		/* phy specific configuration for fibre/tp operation */
1186d9fb9f38SJeff Kirsher 		tmp = mdio_read(dev, MII_MCTRL);
1187d9fb9f38SJeff Kirsher 		tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1188d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_FIBRE)
1189d9fb9f38SJeff Kirsher 			tmp |= MII_FX_SEL;
1190d9fb9f38SJeff Kirsher 		else
1191d9fb9f38SJeff Kirsher 			tmp |= MII_EN_SCRM;
1192d9fb9f38SJeff Kirsher 		mdio_write(dev, MII_MCTRL, tmp);
1193d9fb9f38SJeff Kirsher 		break;
1194d9fb9f38SJeff Kirsher 	default:
1195d9fb9f38SJeff Kirsher 		break;
1196d9fb9f38SJeff Kirsher 	}
1197d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig);
1198d9fb9f38SJeff Kirsher 	if (cfg & CfgExtPhy)
1199d9fb9f38SJeff Kirsher 		return;
1200d9fb9f38SJeff Kirsher 
1201d9fb9f38SJeff Kirsher 	/* On page 78 of the spec, they recommend some settings for "optimum
1202d9fb9f38SJeff Kirsher 	   performance" to be done in sequence.  These settings optimize some
1203d9fb9f38SJeff Kirsher 	   of the 100Mbit autodetection circuitry.  They say we only want to
1204d9fb9f38SJeff Kirsher 	   do this for rev C of the chip, but engineers at NSC (Bradley
1205d9fb9f38SJeff Kirsher 	   Kennedy) recommends always setting them.  If you don't, you get
1206d9fb9f38SJeff Kirsher 	   errors on some autonegotiations that make the device unusable.
1207d9fb9f38SJeff Kirsher 
1208d9fb9f38SJeff Kirsher 	   It seems that the DSP needs a few usec to reinitialize after
1209d9fb9f38SJeff Kirsher 	   the start of the phy. Just retry writing these values until they
1210d9fb9f38SJeff Kirsher 	   stick.
1211d9fb9f38SJeff Kirsher 	*/
1212d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1213d9fb9f38SJeff Kirsher 
1214d9fb9f38SJeff Kirsher 		int dspcfg;
1215d9fb9f38SJeff Kirsher 		writew(1, ioaddr + PGSEL);
1216d9fb9f38SJeff Kirsher 		writew(PMDCSR_VAL, ioaddr + PMDCSR);
1217d9fb9f38SJeff Kirsher 		writew(TSTDAT_VAL, ioaddr + TSTDAT);
1218d9fb9f38SJeff Kirsher 		np->dspcfg = (np->srr <= SRR_DP83815_C)?
1219d9fb9f38SJeff Kirsher 			DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1220d9fb9f38SJeff Kirsher 		writew(np->dspcfg, ioaddr + DSPCFG);
1221d9fb9f38SJeff Kirsher 		writew(SDCFG_VAL, ioaddr + SDCFG);
1222d9fb9f38SJeff Kirsher 		writew(0, ioaddr + PGSEL);
1223d9fb9f38SJeff Kirsher 		readl(ioaddr + ChipConfig);
1224d9fb9f38SJeff Kirsher 		udelay(10);
1225d9fb9f38SJeff Kirsher 
1226d9fb9f38SJeff Kirsher 		writew(1, ioaddr + PGSEL);
1227d9fb9f38SJeff Kirsher 		dspcfg = readw(ioaddr + DSPCFG);
1228d9fb9f38SJeff Kirsher 		writew(0, ioaddr + PGSEL);
1229d9fb9f38SJeff Kirsher 		if (np->dspcfg == dspcfg)
1230d9fb9f38SJeff Kirsher 			break;
1231d9fb9f38SJeff Kirsher 	}
1232d9fb9f38SJeff Kirsher 
1233d9fb9f38SJeff Kirsher 	if (netif_msg_link(np)) {
1234d9fb9f38SJeff Kirsher 		if (i==NATSEMI_HW_TIMEOUT) {
1235d9fb9f38SJeff Kirsher 			printk(KERN_INFO
1236d9fb9f38SJeff Kirsher 				"%s: DSPCFG mismatch after retrying for %d usec.\n",
1237d9fb9f38SJeff Kirsher 				dev->name, i*10);
1238d9fb9f38SJeff Kirsher 		} else {
1239d9fb9f38SJeff Kirsher 			printk(KERN_INFO
1240d9fb9f38SJeff Kirsher 				"%s: DSPCFG accepted after %d usec.\n",
1241d9fb9f38SJeff Kirsher 				dev->name, i*10);
1242d9fb9f38SJeff Kirsher 		}
1243d9fb9f38SJeff Kirsher 	}
1244d9fb9f38SJeff Kirsher 	/*
1245d9fb9f38SJeff Kirsher 	 * Enable PHY Specific event based interrupts.  Link state change
1246d9fb9f38SJeff Kirsher 	 * and Auto-Negotiation Completion are among the affected.
1247d9fb9f38SJeff Kirsher 	 * Read the intr status to clear it (needed for wake events).
1248d9fb9f38SJeff Kirsher 	 */
1249d9fb9f38SJeff Kirsher 	readw(ioaddr + MIntrStatus);
1250d9fb9f38SJeff Kirsher 	writew(MICRIntEn, ioaddr + MIntrCtrl);
1251d9fb9f38SJeff Kirsher }
1252d9fb9f38SJeff Kirsher 
1253d9fb9f38SJeff Kirsher static int switch_port_external(struct net_device *dev)
1254d9fb9f38SJeff Kirsher {
1255d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1256d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1257d9fb9f38SJeff Kirsher 	u32 cfg;
1258d9fb9f38SJeff Kirsher 
1259d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig);
1260d9fb9f38SJeff Kirsher 	if (cfg & CfgExtPhy)
1261d9fb9f38SJeff Kirsher 		return 0;
1262d9fb9f38SJeff Kirsher 
1263d9fb9f38SJeff Kirsher 	if (netif_msg_link(np)) {
1264d9fb9f38SJeff Kirsher 		printk(KERN_INFO "%s: switching to external transceiver.\n",
1265d9fb9f38SJeff Kirsher 				dev->name);
1266d9fb9f38SJeff Kirsher 	}
1267d9fb9f38SJeff Kirsher 
1268d9fb9f38SJeff Kirsher 	/* 1) switch back to external phy */
1269d9fb9f38SJeff Kirsher 	writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1270d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1271d9fb9f38SJeff Kirsher 	udelay(1);
1272d9fb9f38SJeff Kirsher 
1273d9fb9f38SJeff Kirsher 	/* 2) reset the external phy: */
1274d9fb9f38SJeff Kirsher 	/* resetting the external PHY has been known to cause a hub supplying
1275d9fb9f38SJeff Kirsher 	 * power over Ethernet to kill the power.  We don't want to kill
1276d9fb9f38SJeff Kirsher 	 * power to this computer, so we avoid resetting the phy.
1277d9fb9f38SJeff Kirsher 	 */
1278d9fb9f38SJeff Kirsher 
1279d9fb9f38SJeff Kirsher 	/* 3) reinit the phy fixup, it got lost during power down. */
1280d9fb9f38SJeff Kirsher 	move_int_phy(dev, np->phy_addr_external);
1281d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
1282d9fb9f38SJeff Kirsher 
1283d9fb9f38SJeff Kirsher 	return 1;
1284d9fb9f38SJeff Kirsher }
1285d9fb9f38SJeff Kirsher 
1286d9fb9f38SJeff Kirsher static int switch_port_internal(struct net_device *dev)
1287d9fb9f38SJeff Kirsher {
1288d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1289d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1290d9fb9f38SJeff Kirsher 	int i;
1291d9fb9f38SJeff Kirsher 	u32 cfg;
1292d9fb9f38SJeff Kirsher 	u16 bmcr;
1293d9fb9f38SJeff Kirsher 
1294d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig);
1295d9fb9f38SJeff Kirsher 	if (!(cfg &CfgExtPhy))
1296d9fb9f38SJeff Kirsher 		return 0;
1297d9fb9f38SJeff Kirsher 
1298d9fb9f38SJeff Kirsher 	if (netif_msg_link(np)) {
1299d9fb9f38SJeff Kirsher 		printk(KERN_INFO "%s: switching to internal transceiver.\n",
1300d9fb9f38SJeff Kirsher 				dev->name);
1301d9fb9f38SJeff Kirsher 	}
1302d9fb9f38SJeff Kirsher 	/* 1) switch back to internal phy: */
1303d9fb9f38SJeff Kirsher 	cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1304d9fb9f38SJeff Kirsher 	writel(cfg, ioaddr + ChipConfig);
1305d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1306d9fb9f38SJeff Kirsher 	udelay(1);
1307d9fb9f38SJeff Kirsher 
1308d9fb9f38SJeff Kirsher 	/* 2) reset the internal phy: */
1309d9fb9f38SJeff Kirsher 	bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1310d9fb9f38SJeff Kirsher 	writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1311d9fb9f38SJeff Kirsher 	readl(ioaddr + ChipConfig);
1312d9fb9f38SJeff Kirsher 	udelay(10);
1313d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1314d9fb9f38SJeff Kirsher 		bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1315d9fb9f38SJeff Kirsher 		if (!(bmcr & BMCR_RESET))
1316d9fb9f38SJeff Kirsher 			break;
1317d9fb9f38SJeff Kirsher 		udelay(10);
1318d9fb9f38SJeff Kirsher 	}
1319d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1320d9fb9f38SJeff Kirsher 		printk(KERN_INFO
1321d9fb9f38SJeff Kirsher 			"%s: phy reset did not complete in %d usec.\n",
1322d9fb9f38SJeff Kirsher 			dev->name, i*10);
1323d9fb9f38SJeff Kirsher 	}
1324d9fb9f38SJeff Kirsher 	/* 3) reinit the phy fixup, it got lost during power down. */
1325d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
1326d9fb9f38SJeff Kirsher 
1327d9fb9f38SJeff Kirsher 	return 1;
1328d9fb9f38SJeff Kirsher }
1329d9fb9f38SJeff Kirsher 
1330d9fb9f38SJeff Kirsher /* Scan for a PHY on the external mii bus.
1331d9fb9f38SJeff Kirsher  * There are two tricky points:
1332d9fb9f38SJeff Kirsher  * - Do not scan while the internal phy is enabled. The internal phy will
1333d9fb9f38SJeff Kirsher  *   crash: e.g. reads from the DSPCFG register will return odd values and
1334d9fb9f38SJeff Kirsher  *   the nasty random phy reset code will reset the nic every few seconds.
1335d9fb9f38SJeff Kirsher  * - The internal phy must be moved around, an external phy could
1336d9fb9f38SJeff Kirsher  *   have the same address as the internal phy.
1337d9fb9f38SJeff Kirsher  */
1338d9fb9f38SJeff Kirsher static int find_mii(struct net_device *dev)
1339d9fb9f38SJeff Kirsher {
1340d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1341d9fb9f38SJeff Kirsher 	int tmp;
1342d9fb9f38SJeff Kirsher 	int i;
1343d9fb9f38SJeff Kirsher 	int did_switch;
1344d9fb9f38SJeff Kirsher 
1345d9fb9f38SJeff Kirsher 	/* Switch to external phy */
1346d9fb9f38SJeff Kirsher 	did_switch = switch_port_external(dev);
1347d9fb9f38SJeff Kirsher 
1348d9fb9f38SJeff Kirsher 	/* Scan the possible phy addresses:
1349d9fb9f38SJeff Kirsher 	 *
1350d9fb9f38SJeff Kirsher 	 * PHY address 0 means that the phy is in isolate mode. Not yet
1351d9fb9f38SJeff Kirsher 	 * supported due to lack of test hardware. User space should
1352d9fb9f38SJeff Kirsher 	 * handle it through ethtool.
1353d9fb9f38SJeff Kirsher 	 */
1354d9fb9f38SJeff Kirsher 	for (i = 1; i <= 31; i++) {
1355d9fb9f38SJeff Kirsher 		move_int_phy(dev, i);
1356d9fb9f38SJeff Kirsher 		tmp = miiport_read(dev, i, MII_BMSR);
1357d9fb9f38SJeff Kirsher 		if (tmp != 0xffff && tmp != 0x0000) {
1358d9fb9f38SJeff Kirsher 			/* found something! */
1359d9fb9f38SJeff Kirsher 			np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1360d9fb9f38SJeff Kirsher 					+ mdio_read(dev, MII_PHYSID2);
1361d9fb9f38SJeff Kirsher 	 		if (netif_msg_probe(np)) {
1362d9fb9f38SJeff Kirsher 				printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1363d9fb9f38SJeff Kirsher 						pci_name(np->pci_dev), np->mii, i);
1364d9fb9f38SJeff Kirsher 			}
1365d9fb9f38SJeff Kirsher 			break;
1366d9fb9f38SJeff Kirsher 		}
1367d9fb9f38SJeff Kirsher 	}
1368d9fb9f38SJeff Kirsher 	/* And switch back to internal phy: */
1369d9fb9f38SJeff Kirsher 	if (did_switch)
1370d9fb9f38SJeff Kirsher 		switch_port_internal(dev);
1371d9fb9f38SJeff Kirsher 	return i;
1372d9fb9f38SJeff Kirsher }
1373d9fb9f38SJeff Kirsher 
1374d9fb9f38SJeff Kirsher /* CFG bits [13:16] [18:23] */
1375d9fb9f38SJeff Kirsher #define CFG_RESET_SAVE 0xfde000
1376d9fb9f38SJeff Kirsher /* WCSR bits [0:4] [9:10] */
1377d9fb9f38SJeff Kirsher #define WCSR_RESET_SAVE 0x61f
1378d9fb9f38SJeff Kirsher /* RFCR bits [20] [22] [27:31] */
1379d9fb9f38SJeff Kirsher #define RFCR_RESET_SAVE 0xf8500000
1380d9fb9f38SJeff Kirsher 
1381d9fb9f38SJeff Kirsher static void natsemi_reset(struct net_device *dev)
1382d9fb9f38SJeff Kirsher {
1383d9fb9f38SJeff Kirsher 	int i;
1384d9fb9f38SJeff Kirsher 	u32 cfg;
1385d9fb9f38SJeff Kirsher 	u32 wcsr;
1386d9fb9f38SJeff Kirsher 	u32 rfcr;
1387d9fb9f38SJeff Kirsher 	u16 pmatch[3];
1388d9fb9f38SJeff Kirsher 	u16 sopass[3];
1389d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1390d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1391d9fb9f38SJeff Kirsher 
1392d9fb9f38SJeff Kirsher 	/*
1393d9fb9f38SJeff Kirsher 	 * Resetting the chip causes some registers to be lost.
1394d9fb9f38SJeff Kirsher 	 * Natsemi suggests NOT reloading the EEPROM while live, so instead
1395d9fb9f38SJeff Kirsher 	 * we save the state that would have been loaded from EEPROM
1396d9fb9f38SJeff Kirsher 	 * on a normal power-up (see the spec EEPROM map).  This assumes
1397d9fb9f38SJeff Kirsher 	 * whoever calls this will follow up with init_registers() eventually.
1398d9fb9f38SJeff Kirsher 	 */
1399d9fb9f38SJeff Kirsher 
1400d9fb9f38SJeff Kirsher 	/* CFG */
1401d9fb9f38SJeff Kirsher 	cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1402d9fb9f38SJeff Kirsher 	/* WCSR */
1403d9fb9f38SJeff Kirsher 	wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1404d9fb9f38SJeff Kirsher 	/* RFCR */
1405d9fb9f38SJeff Kirsher 	rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1406d9fb9f38SJeff Kirsher 	/* PMATCH */
1407d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1408d9fb9f38SJeff Kirsher 		writel(i*2, ioaddr + RxFilterAddr);
1409d9fb9f38SJeff Kirsher 		pmatch[i] = readw(ioaddr + RxFilterData);
1410d9fb9f38SJeff Kirsher 	}
1411d9fb9f38SJeff Kirsher 	/* SOPAS */
1412d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1413d9fb9f38SJeff Kirsher 		writel(0xa+(i*2), ioaddr + RxFilterAddr);
1414d9fb9f38SJeff Kirsher 		sopass[i] = readw(ioaddr + RxFilterData);
1415d9fb9f38SJeff Kirsher 	}
1416d9fb9f38SJeff Kirsher 
1417d9fb9f38SJeff Kirsher 	/* now whack the chip */
1418d9fb9f38SJeff Kirsher 	writel(ChipReset, ioaddr + ChipCmd);
1419d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1420d9fb9f38SJeff Kirsher 		if (!(readl(ioaddr + ChipCmd) & ChipReset))
1421d9fb9f38SJeff Kirsher 			break;
1422d9fb9f38SJeff Kirsher 		udelay(5);
1423d9fb9f38SJeff Kirsher 	}
1424d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1425d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1426d9fb9f38SJeff Kirsher 			dev->name, i*5);
1427d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1428d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1429d9fb9f38SJeff Kirsher 			dev->name, i*5);
1430d9fb9f38SJeff Kirsher 	}
1431d9fb9f38SJeff Kirsher 
1432d9fb9f38SJeff Kirsher 	/* restore CFG */
1433d9fb9f38SJeff Kirsher 	cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1434d9fb9f38SJeff Kirsher 	/* turn on external phy if it was selected */
1435d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP)
1436d9fb9f38SJeff Kirsher 		cfg &= ~(CfgExtPhy | CfgPhyDis);
1437d9fb9f38SJeff Kirsher 	else
1438d9fb9f38SJeff Kirsher 		cfg |= (CfgExtPhy | CfgPhyDis);
1439d9fb9f38SJeff Kirsher 	writel(cfg, ioaddr + ChipConfig);
1440d9fb9f38SJeff Kirsher 	/* restore WCSR */
1441d9fb9f38SJeff Kirsher 	wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1442d9fb9f38SJeff Kirsher 	writel(wcsr, ioaddr + WOLCmd);
1443d9fb9f38SJeff Kirsher 	/* read RFCR */
1444d9fb9f38SJeff Kirsher 	rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1445d9fb9f38SJeff Kirsher 	/* restore PMATCH */
1446d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1447d9fb9f38SJeff Kirsher 		writel(i*2, ioaddr + RxFilterAddr);
1448d9fb9f38SJeff Kirsher 		writew(pmatch[i], ioaddr + RxFilterData);
1449d9fb9f38SJeff Kirsher 	}
1450d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1451d9fb9f38SJeff Kirsher 		writel(0xa+(i*2), ioaddr + RxFilterAddr);
1452d9fb9f38SJeff Kirsher 		writew(sopass[i], ioaddr + RxFilterData);
1453d9fb9f38SJeff Kirsher 	}
1454d9fb9f38SJeff Kirsher 	/* restore RFCR */
1455d9fb9f38SJeff Kirsher 	writel(rfcr, ioaddr + RxFilterAddr);
1456d9fb9f38SJeff Kirsher }
1457d9fb9f38SJeff Kirsher 
1458d9fb9f38SJeff Kirsher static void reset_rx(struct net_device *dev)
1459d9fb9f38SJeff Kirsher {
1460d9fb9f38SJeff Kirsher 	int i;
1461d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1462d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1463d9fb9f38SJeff Kirsher 
1464d9fb9f38SJeff Kirsher 	np->intr_status &= ~RxResetDone;
1465d9fb9f38SJeff Kirsher 
1466d9fb9f38SJeff Kirsher 	writel(RxReset, ioaddr + ChipCmd);
1467d9fb9f38SJeff Kirsher 
1468d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1469d9fb9f38SJeff Kirsher 		np->intr_status |= readl(ioaddr + IntrStatus);
1470d9fb9f38SJeff Kirsher 		if (np->intr_status & RxResetDone)
1471d9fb9f38SJeff Kirsher 			break;
1472d9fb9f38SJeff Kirsher 		udelay(15);
1473d9fb9f38SJeff Kirsher 	}
1474d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1475d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1476d9fb9f38SJeff Kirsher 		       dev->name, i*15);
1477d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1478d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1479d9fb9f38SJeff Kirsher 		       dev->name, i*15);
1480d9fb9f38SJeff Kirsher 	}
1481d9fb9f38SJeff Kirsher }
1482d9fb9f38SJeff Kirsher 
1483d9fb9f38SJeff Kirsher static void natsemi_reload_eeprom(struct net_device *dev)
1484d9fb9f38SJeff Kirsher {
1485d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1486d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1487d9fb9f38SJeff Kirsher 	int i;
1488d9fb9f38SJeff Kirsher 
1489d9fb9f38SJeff Kirsher 	writel(EepromReload, ioaddr + PCIBusCfg);
1490d9fb9f38SJeff Kirsher 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1491d9fb9f38SJeff Kirsher 		udelay(50);
1492d9fb9f38SJeff Kirsher 		if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1493d9fb9f38SJeff Kirsher 			break;
1494d9fb9f38SJeff Kirsher 	}
1495d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1496d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1497d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev), i*50);
1498d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1499d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1500d9fb9f38SJeff Kirsher 			pci_name(np->pci_dev), i*50);
1501d9fb9f38SJeff Kirsher 	}
1502d9fb9f38SJeff Kirsher }
1503d9fb9f38SJeff Kirsher 
1504d9fb9f38SJeff Kirsher static void natsemi_stop_rxtx(struct net_device *dev)
1505d9fb9f38SJeff Kirsher {
1506d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1507d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1508d9fb9f38SJeff Kirsher 	int i;
1509d9fb9f38SJeff Kirsher 
1510d9fb9f38SJeff Kirsher 	writel(RxOff | TxOff, ioaddr + ChipCmd);
1511d9fb9f38SJeff Kirsher 	for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1512d9fb9f38SJeff Kirsher 		if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1513d9fb9f38SJeff Kirsher 			break;
1514d9fb9f38SJeff Kirsher 		udelay(5);
1515d9fb9f38SJeff Kirsher 	}
1516d9fb9f38SJeff Kirsher 	if (i==NATSEMI_HW_TIMEOUT) {
1517d9fb9f38SJeff Kirsher 		printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1518d9fb9f38SJeff Kirsher 			dev->name, i*5);
1519d9fb9f38SJeff Kirsher 	} else if (netif_msg_hw(np)) {
1520d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1521d9fb9f38SJeff Kirsher 			dev->name, i*5);
1522d9fb9f38SJeff Kirsher 	}
1523d9fb9f38SJeff Kirsher }
1524d9fb9f38SJeff Kirsher 
1525d9fb9f38SJeff Kirsher static int netdev_open(struct net_device *dev)
1526d9fb9f38SJeff Kirsher {
1527d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1528d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1529d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
1530d9fb9f38SJeff Kirsher 	int i;
1531d9fb9f38SJeff Kirsher 
1532d9fb9f38SJeff Kirsher 	/* Reset the chip, just in case. */
1533d9fb9f38SJeff Kirsher 	natsemi_reset(dev);
1534d9fb9f38SJeff Kirsher 
1535d710ce13SFrancois Romieu 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1536d9fb9f38SJeff Kirsher 	if (i) return i;
1537d9fb9f38SJeff Kirsher 
1538d9fb9f38SJeff Kirsher 	if (netif_msg_ifup(np))
1539d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1540d710ce13SFrancois Romieu 			dev->name, irq);
1541d9fb9f38SJeff Kirsher 	i = alloc_ring(dev);
1542d9fb9f38SJeff Kirsher 	if (i < 0) {
1543d710ce13SFrancois Romieu 		free_irq(irq, dev);
1544d9fb9f38SJeff Kirsher 		return i;
1545d9fb9f38SJeff Kirsher 	}
1546d9fb9f38SJeff Kirsher 	napi_enable(&np->napi);
1547d9fb9f38SJeff Kirsher 
1548d9fb9f38SJeff Kirsher 	init_ring(dev);
1549d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
1550d9fb9f38SJeff Kirsher 	init_registers(dev);
1551d9fb9f38SJeff Kirsher 	/* now set the MAC address according to dev->dev_addr */
1552d9fb9f38SJeff Kirsher 	for (i = 0; i < 3; i++) {
1553d9fb9f38SJeff Kirsher 		u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1554d9fb9f38SJeff Kirsher 
1555d9fb9f38SJeff Kirsher 		writel(i*2, ioaddr + RxFilterAddr);
1556d9fb9f38SJeff Kirsher 		writew(mac, ioaddr + RxFilterData);
1557d9fb9f38SJeff Kirsher 	}
1558d9fb9f38SJeff Kirsher 	writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1559d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
1560d9fb9f38SJeff Kirsher 
1561d9fb9f38SJeff Kirsher 	netif_start_queue(dev);
1562d9fb9f38SJeff Kirsher 
1563d9fb9f38SJeff Kirsher 	if (netif_msg_ifup(np))
1564d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1565d9fb9f38SJeff Kirsher 			dev->name, (int)readl(ioaddr + ChipCmd));
1566d9fb9f38SJeff Kirsher 
1567d9fb9f38SJeff Kirsher 	/* Set the timer to check for link beat. */
1568d9fb9f38SJeff Kirsher 	init_timer(&np->timer);
1569d9fb9f38SJeff Kirsher 	np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1570d9fb9f38SJeff Kirsher 	np->timer.data = (unsigned long)dev;
1571d9fb9f38SJeff Kirsher 	np->timer.function = netdev_timer; /* timer handler */
1572d9fb9f38SJeff Kirsher 	add_timer(&np->timer);
1573d9fb9f38SJeff Kirsher 
1574d9fb9f38SJeff Kirsher 	return 0;
1575d9fb9f38SJeff Kirsher }
1576d9fb9f38SJeff Kirsher 
1577d9fb9f38SJeff Kirsher static void do_cable_magic(struct net_device *dev)
1578d9fb9f38SJeff Kirsher {
1579d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1580d9fb9f38SJeff Kirsher 	void __iomem *ioaddr = ns_ioaddr(dev);
1581d9fb9f38SJeff Kirsher 
1582d9fb9f38SJeff Kirsher 	if (dev->if_port != PORT_TP)
1583d9fb9f38SJeff Kirsher 		return;
1584d9fb9f38SJeff Kirsher 
1585d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83816_A5)
1586d9fb9f38SJeff Kirsher 		return;
1587d9fb9f38SJeff Kirsher 
1588d9fb9f38SJeff Kirsher 	/*
1589d9fb9f38SJeff Kirsher 	 * 100 MBit links with short cables can trip an issue with the chip.
1590d9fb9f38SJeff Kirsher 	 * The problem manifests as lots of CRC errors and/or flickering
1591d9fb9f38SJeff Kirsher 	 * activity LED while idle.  This process is based on instructions
1592d9fb9f38SJeff Kirsher 	 * from engineers at National.
1593d9fb9f38SJeff Kirsher 	 */
1594d9fb9f38SJeff Kirsher 	if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1595d9fb9f38SJeff Kirsher 		u16 data;
1596d9fb9f38SJeff Kirsher 
1597d9fb9f38SJeff Kirsher 		writew(1, ioaddr + PGSEL);
1598d9fb9f38SJeff Kirsher 		/*
1599d9fb9f38SJeff Kirsher 		 * coefficient visibility should already be enabled via
1600d9fb9f38SJeff Kirsher 		 * DSPCFG | 0x1000
1601d9fb9f38SJeff Kirsher 		 */
1602d9fb9f38SJeff Kirsher 		data = readw(ioaddr + TSTDAT) & 0xff;
1603d9fb9f38SJeff Kirsher 		/*
1604d9fb9f38SJeff Kirsher 		 * the value must be negative, and within certain values
1605d9fb9f38SJeff Kirsher 		 * (these values all come from National)
1606d9fb9f38SJeff Kirsher 		 */
1607d9fb9f38SJeff Kirsher 		if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1608d9fb9f38SJeff Kirsher 			np = netdev_priv(dev);
1609d9fb9f38SJeff Kirsher 
1610d9fb9f38SJeff Kirsher 			/* the bug has been triggered - fix the coefficient */
1611d9fb9f38SJeff Kirsher 			writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1612d9fb9f38SJeff Kirsher 			/* lock the value */
1613d9fb9f38SJeff Kirsher 			data = readw(ioaddr + DSPCFG);
1614d9fb9f38SJeff Kirsher 			np->dspcfg = data | DSPCFG_LOCK;
1615d9fb9f38SJeff Kirsher 			writew(np->dspcfg, ioaddr + DSPCFG);
1616d9fb9f38SJeff Kirsher 		}
1617d9fb9f38SJeff Kirsher 		writew(0, ioaddr + PGSEL);
1618d9fb9f38SJeff Kirsher 	}
1619d9fb9f38SJeff Kirsher }
1620d9fb9f38SJeff Kirsher 
1621d9fb9f38SJeff Kirsher static void undo_cable_magic(struct net_device *dev)
1622d9fb9f38SJeff Kirsher {
1623d9fb9f38SJeff Kirsher 	u16 data;
1624d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1625d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1626d9fb9f38SJeff Kirsher 
1627d9fb9f38SJeff Kirsher 	if (dev->if_port != PORT_TP)
1628d9fb9f38SJeff Kirsher 		return;
1629d9fb9f38SJeff Kirsher 
1630d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83816_A5)
1631d9fb9f38SJeff Kirsher 		return;
1632d9fb9f38SJeff Kirsher 
1633d9fb9f38SJeff Kirsher 	writew(1, ioaddr + PGSEL);
1634d9fb9f38SJeff Kirsher 	/* make sure the lock bit is clear */
1635d9fb9f38SJeff Kirsher 	data = readw(ioaddr + DSPCFG);
1636d9fb9f38SJeff Kirsher 	np->dspcfg = data & ~DSPCFG_LOCK;
1637d9fb9f38SJeff Kirsher 	writew(np->dspcfg, ioaddr + DSPCFG);
1638d9fb9f38SJeff Kirsher 	writew(0, ioaddr + PGSEL);
1639d9fb9f38SJeff Kirsher }
1640d9fb9f38SJeff Kirsher 
1641d9fb9f38SJeff Kirsher static void check_link(struct net_device *dev)
1642d9fb9f38SJeff Kirsher {
1643d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1644d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1645d9fb9f38SJeff Kirsher 	int duplex = np->duplex;
1646d9fb9f38SJeff Kirsher 	u16 bmsr;
1647d9fb9f38SJeff Kirsher 
1648d9fb9f38SJeff Kirsher 	/* If we are ignoring the PHY then don't try reading it. */
1649d9fb9f38SJeff Kirsher 	if (np->ignore_phy)
1650d9fb9f38SJeff Kirsher 		goto propagate_state;
1651d9fb9f38SJeff Kirsher 
1652d9fb9f38SJeff Kirsher 	/* The link status field is latched: it remains low after a temporary
1653d9fb9f38SJeff Kirsher 	 * link failure until it's read. We need the current link status,
1654d9fb9f38SJeff Kirsher 	 * thus read twice.
1655d9fb9f38SJeff Kirsher 	 */
1656d9fb9f38SJeff Kirsher 	mdio_read(dev, MII_BMSR);
1657d9fb9f38SJeff Kirsher 	bmsr = mdio_read(dev, MII_BMSR);
1658d9fb9f38SJeff Kirsher 
1659d9fb9f38SJeff Kirsher 	if (!(bmsr & BMSR_LSTATUS)) {
1660d9fb9f38SJeff Kirsher 		if (netif_carrier_ok(dev)) {
1661d9fb9f38SJeff Kirsher 			if (netif_msg_link(np))
1662d9fb9f38SJeff Kirsher 				printk(KERN_NOTICE "%s: link down.\n",
1663d9fb9f38SJeff Kirsher 				       dev->name);
1664d9fb9f38SJeff Kirsher 			netif_carrier_off(dev);
1665d9fb9f38SJeff Kirsher 			undo_cable_magic(dev);
1666d9fb9f38SJeff Kirsher 		}
1667d9fb9f38SJeff Kirsher 		return;
1668d9fb9f38SJeff Kirsher 	}
1669d9fb9f38SJeff Kirsher 	if (!netif_carrier_ok(dev)) {
1670d9fb9f38SJeff Kirsher 		if (netif_msg_link(np))
1671d9fb9f38SJeff Kirsher 			printk(KERN_NOTICE "%s: link up.\n", dev->name);
1672d9fb9f38SJeff Kirsher 		netif_carrier_on(dev);
1673d9fb9f38SJeff Kirsher 		do_cable_magic(dev);
1674d9fb9f38SJeff Kirsher 	}
1675d9fb9f38SJeff Kirsher 
1676d9fb9f38SJeff Kirsher 	duplex = np->full_duplex;
1677d9fb9f38SJeff Kirsher 	if (!duplex) {
1678d9fb9f38SJeff Kirsher 		if (bmsr & BMSR_ANEGCOMPLETE) {
1679d9fb9f38SJeff Kirsher 			int tmp = mii_nway_result(
1680d9fb9f38SJeff Kirsher 				np->advertising & mdio_read(dev, MII_LPA));
1681d9fb9f38SJeff Kirsher 			if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1682d9fb9f38SJeff Kirsher 				duplex = 1;
1683d9fb9f38SJeff Kirsher 		} else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1684d9fb9f38SJeff Kirsher 			duplex = 1;
1685d9fb9f38SJeff Kirsher 	}
1686d9fb9f38SJeff Kirsher 
1687d9fb9f38SJeff Kirsher propagate_state:
1688d9fb9f38SJeff Kirsher 	/* if duplex is set then bit 28 must be set, too */
1689d9fb9f38SJeff Kirsher 	if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1690d9fb9f38SJeff Kirsher 		if (netif_msg_link(np))
1691d9fb9f38SJeff Kirsher 			printk(KERN_INFO
1692d9fb9f38SJeff Kirsher 				"%s: Setting %s-duplex based on negotiated "
1693d9fb9f38SJeff Kirsher 				"link capability.\n", dev->name,
1694d9fb9f38SJeff Kirsher 				duplex ? "full" : "half");
1695d9fb9f38SJeff Kirsher 		if (duplex) {
1696d9fb9f38SJeff Kirsher 			np->rx_config |= RxAcceptTx;
1697d9fb9f38SJeff Kirsher 			np->tx_config |= TxCarrierIgn | TxHeartIgn;
1698d9fb9f38SJeff Kirsher 		} else {
1699d9fb9f38SJeff Kirsher 			np->rx_config &= ~RxAcceptTx;
1700d9fb9f38SJeff Kirsher 			np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1701d9fb9f38SJeff Kirsher 		}
1702d9fb9f38SJeff Kirsher 		writel(np->tx_config, ioaddr + TxConfig);
1703d9fb9f38SJeff Kirsher 		writel(np->rx_config, ioaddr + RxConfig);
1704d9fb9f38SJeff Kirsher 	}
1705d9fb9f38SJeff Kirsher }
1706d9fb9f38SJeff Kirsher 
1707d9fb9f38SJeff Kirsher static void init_registers(struct net_device *dev)
1708d9fb9f38SJeff Kirsher {
1709d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1710d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1711d9fb9f38SJeff Kirsher 
1712d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
1713d9fb9f38SJeff Kirsher 
1714d9fb9f38SJeff Kirsher 	/* clear any interrupts that are pending, such as wake events */
1715d9fb9f38SJeff Kirsher 	readl(ioaddr + IntrStatus);
1716d9fb9f38SJeff Kirsher 
1717d9fb9f38SJeff Kirsher 	writel(np->ring_dma, ioaddr + RxRingPtr);
1718d9fb9f38SJeff Kirsher 	writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1719d9fb9f38SJeff Kirsher 		ioaddr + TxRingPtr);
1720d9fb9f38SJeff Kirsher 
1721d9fb9f38SJeff Kirsher 	/* Initialize other registers.
1722d9fb9f38SJeff Kirsher 	 * Configure the PCI bus bursts and FIFO thresholds.
1723d9fb9f38SJeff Kirsher 	 * Configure for standard, in-spec Ethernet.
1724d9fb9f38SJeff Kirsher 	 * Start with half-duplex. check_link will update
1725d9fb9f38SJeff Kirsher 	 * to the correct settings.
1726d9fb9f38SJeff Kirsher 	 */
1727d9fb9f38SJeff Kirsher 
1728d9fb9f38SJeff Kirsher 	/* DRTH: 2: start tx if 64 bytes are in the fifo
1729d9fb9f38SJeff Kirsher 	 * FLTH: 0x10: refill with next packet if 512 bytes are free
1730d9fb9f38SJeff Kirsher 	 * MXDMA: 0: up to 256 byte bursts.
1731d9fb9f38SJeff Kirsher 	 * 	MXDMA must be <= FLTH
1732d9fb9f38SJeff Kirsher 	 * ECRETRY=1
1733d9fb9f38SJeff Kirsher 	 * ATP=1
1734d9fb9f38SJeff Kirsher 	 */
1735d9fb9f38SJeff Kirsher 	np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1736d9fb9f38SJeff Kirsher 				TX_FLTH_VAL | TX_DRTH_VAL_START;
1737d9fb9f38SJeff Kirsher 	writel(np->tx_config, ioaddr + TxConfig);
1738d9fb9f38SJeff Kirsher 
1739d9fb9f38SJeff Kirsher 	/* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
1740d9fb9f38SJeff Kirsher 	 * MXDMA 0: up to 256 byte bursts
1741d9fb9f38SJeff Kirsher 	 */
1742d9fb9f38SJeff Kirsher 	np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1743d9fb9f38SJeff Kirsher 	/* if receive ring now has bigger buffers than normal, enable jumbo */
1744d9fb9f38SJeff Kirsher 	if (np->rx_buf_sz > NATSEMI_LONGPKT)
1745d9fb9f38SJeff Kirsher 		np->rx_config |= RxAcceptLong;
1746d9fb9f38SJeff Kirsher 
1747d9fb9f38SJeff Kirsher 	writel(np->rx_config, ioaddr + RxConfig);
1748d9fb9f38SJeff Kirsher 
1749d9fb9f38SJeff Kirsher 	/* Disable PME:
1750d9fb9f38SJeff Kirsher 	 * The PME bit is initialized from the EEPROM contents.
1751d9fb9f38SJeff Kirsher 	 * PCI cards probably have PME disabled, but motherboard
1752d9fb9f38SJeff Kirsher 	 * implementations may have PME set to enable WakeOnLan.
1753d9fb9f38SJeff Kirsher 	 * With PME set the chip will scan incoming packets but
1754d9fb9f38SJeff Kirsher 	 * nothing will be written to memory. */
1755d9fb9f38SJeff Kirsher 	np->SavedClkRun = readl(ioaddr + ClkRun);
1756d9fb9f38SJeff Kirsher 	writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1757d9fb9f38SJeff Kirsher 	if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1758d9fb9f38SJeff Kirsher 		printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1759d9fb9f38SJeff Kirsher 			dev->name, readl(ioaddr + WOLCmd));
1760d9fb9f38SJeff Kirsher 	}
1761d9fb9f38SJeff Kirsher 
1762d9fb9f38SJeff Kirsher 	check_link(dev);
1763d9fb9f38SJeff Kirsher 	__set_rx_mode(dev);
1764d9fb9f38SJeff Kirsher 
1765d9fb9f38SJeff Kirsher 	/* Enable interrupts by setting the interrupt mask. */
1766d9fb9f38SJeff Kirsher 	writel(DEFAULT_INTR, ioaddr + IntrMask);
1767d9fb9f38SJeff Kirsher 	natsemi_irq_enable(dev);
1768d9fb9f38SJeff Kirsher 
1769d9fb9f38SJeff Kirsher 	writel(RxOn | TxOn, ioaddr + ChipCmd);
1770d9fb9f38SJeff Kirsher 	writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
1771d9fb9f38SJeff Kirsher }
1772d9fb9f38SJeff Kirsher 
1773d9fb9f38SJeff Kirsher /*
1774d9fb9f38SJeff Kirsher  * netdev_timer:
1775d9fb9f38SJeff Kirsher  * Purpose:
1776d9fb9f38SJeff Kirsher  * 1) check for link changes. Usually they are handled by the MII interrupt
1777d9fb9f38SJeff Kirsher  *    but it doesn't hurt to check twice.
1778d9fb9f38SJeff Kirsher  * 2) check for sudden death of the NIC:
1779d9fb9f38SJeff Kirsher  *    It seems that a reference set for this chip went out with incorrect info,
1780d9fb9f38SJeff Kirsher  *    and there exist boards that aren't quite right.  An unexpected voltage
1781d9fb9f38SJeff Kirsher  *    drop can cause the PHY to get itself in a weird state (basically reset).
1782d9fb9f38SJeff Kirsher  *    NOTE: this only seems to affect revC chips.  The user can disable
1783d9fb9f38SJeff Kirsher  *    this check via dspcfg_workaround sysfs option.
1784d9fb9f38SJeff Kirsher  * 3) check of death of the RX path due to OOM
1785d9fb9f38SJeff Kirsher  */
1786d9fb9f38SJeff Kirsher static void netdev_timer(unsigned long data)
1787d9fb9f38SJeff Kirsher {
1788d9fb9f38SJeff Kirsher 	struct net_device *dev = (struct net_device *)data;
1789d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1790d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1791d9fb9f38SJeff Kirsher 	int next_tick = NATSEMI_TIMER_FREQ;
1792d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
1793d9fb9f38SJeff Kirsher 
1794d9fb9f38SJeff Kirsher 	if (netif_msg_timer(np)) {
1795d9fb9f38SJeff Kirsher 		/* DO NOT read the IntrStatus register,
1796d9fb9f38SJeff Kirsher 		 * a read clears any pending interrupts.
1797d9fb9f38SJeff Kirsher 		 */
1798d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1799d9fb9f38SJeff Kirsher 			dev->name);
1800d9fb9f38SJeff Kirsher 	}
1801d9fb9f38SJeff Kirsher 
1802d9fb9f38SJeff Kirsher 	if (dev->if_port == PORT_TP) {
1803d9fb9f38SJeff Kirsher 		u16 dspcfg;
1804d9fb9f38SJeff Kirsher 
1805d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
1806d9fb9f38SJeff Kirsher 		/* check for a nasty random phy-reset - use dspcfg as a flag */
1807d9fb9f38SJeff Kirsher 		writew(1, ioaddr+PGSEL);
1808d9fb9f38SJeff Kirsher 		dspcfg = readw(ioaddr+DSPCFG);
1809d9fb9f38SJeff Kirsher 		writew(0, ioaddr+PGSEL);
1810d9fb9f38SJeff Kirsher 		if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
1811d9fb9f38SJeff Kirsher 			if (!netif_queue_stopped(dev)) {
1812d9fb9f38SJeff Kirsher 				spin_unlock_irq(&np->lock);
1813d9fb9f38SJeff Kirsher 				if (netif_msg_drv(np))
1814d9fb9f38SJeff Kirsher 					printk(KERN_NOTICE "%s: possible phy reset: "
1815d9fb9f38SJeff Kirsher 						"re-initializing\n", dev->name);
1816d710ce13SFrancois Romieu 				disable_irq(irq);
1817d9fb9f38SJeff Kirsher 				spin_lock_irq(&np->lock);
1818d9fb9f38SJeff Kirsher 				natsemi_stop_rxtx(dev);
1819d9fb9f38SJeff Kirsher 				dump_ring(dev);
1820d9fb9f38SJeff Kirsher 				reinit_ring(dev);
1821d9fb9f38SJeff Kirsher 				init_registers(dev);
1822d9fb9f38SJeff Kirsher 				spin_unlock_irq(&np->lock);
1823d710ce13SFrancois Romieu 				enable_irq(irq);
1824d9fb9f38SJeff Kirsher 			} else {
1825d9fb9f38SJeff Kirsher 				/* hurry back */
1826d9fb9f38SJeff Kirsher 				next_tick = HZ;
1827d9fb9f38SJeff Kirsher 				spin_unlock_irq(&np->lock);
1828d9fb9f38SJeff Kirsher 			}
1829d9fb9f38SJeff Kirsher 		} else {
1830d9fb9f38SJeff Kirsher 			/* init_registers() calls check_link() for the above case */
1831d9fb9f38SJeff Kirsher 			check_link(dev);
1832d9fb9f38SJeff Kirsher 			spin_unlock_irq(&np->lock);
1833d9fb9f38SJeff Kirsher 		}
1834d9fb9f38SJeff Kirsher 	} else {
1835d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
1836d9fb9f38SJeff Kirsher 		check_link(dev);
1837d9fb9f38SJeff Kirsher 		spin_unlock_irq(&np->lock);
1838d9fb9f38SJeff Kirsher 	}
1839d9fb9f38SJeff Kirsher 	if (np->oom) {
1840d710ce13SFrancois Romieu 		disable_irq(irq);
1841d9fb9f38SJeff Kirsher 		np->oom = 0;
1842d9fb9f38SJeff Kirsher 		refill_rx(dev);
1843d710ce13SFrancois Romieu 		enable_irq(irq);
1844d9fb9f38SJeff Kirsher 		if (!np->oom) {
1845d9fb9f38SJeff Kirsher 			writel(RxOn, ioaddr + ChipCmd);
1846d9fb9f38SJeff Kirsher 		} else {
1847d9fb9f38SJeff Kirsher 			next_tick = 1;
1848d9fb9f38SJeff Kirsher 		}
1849d9fb9f38SJeff Kirsher 	}
1850d9fb9f38SJeff Kirsher 
1851d9fb9f38SJeff Kirsher 	if (next_tick > 1)
1852d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1853d9fb9f38SJeff Kirsher 	else
1854d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, jiffies + next_tick);
1855d9fb9f38SJeff Kirsher }
1856d9fb9f38SJeff Kirsher 
1857d9fb9f38SJeff Kirsher static void dump_ring(struct net_device *dev)
1858d9fb9f38SJeff Kirsher {
1859d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1860d9fb9f38SJeff Kirsher 
1861d9fb9f38SJeff Kirsher 	if (netif_msg_pktdata(np)) {
1862d9fb9f38SJeff Kirsher 		int i;
1863d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "  Tx ring at %p:\n", np->tx_ring);
1864d9fb9f38SJeff Kirsher 		for (i = 0; i < TX_RING_SIZE; i++) {
1865d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1866d9fb9f38SJeff Kirsher 				i, np->tx_ring[i].next_desc,
1867d9fb9f38SJeff Kirsher 				np->tx_ring[i].cmd_status,
1868d9fb9f38SJeff Kirsher 				np->tx_ring[i].addr);
1869d9fb9f38SJeff Kirsher 		}
1870d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
1871d9fb9f38SJeff Kirsher 		for (i = 0; i < RX_RING_SIZE; i++) {
1872d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1873d9fb9f38SJeff Kirsher 				i, np->rx_ring[i].next_desc,
1874d9fb9f38SJeff Kirsher 				np->rx_ring[i].cmd_status,
1875d9fb9f38SJeff Kirsher 				np->rx_ring[i].addr);
1876d9fb9f38SJeff Kirsher 		}
1877d9fb9f38SJeff Kirsher 	}
1878d9fb9f38SJeff Kirsher }
1879d9fb9f38SJeff Kirsher 
1880d9fb9f38SJeff Kirsher static void ns_tx_timeout(struct net_device *dev)
1881d9fb9f38SJeff Kirsher {
1882d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1883d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
1884d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
1885d9fb9f38SJeff Kirsher 
1886d710ce13SFrancois Romieu 	disable_irq(irq);
1887d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
1888d9fb9f38SJeff Kirsher 	if (!np->hands_off) {
1889d9fb9f38SJeff Kirsher 		if (netif_msg_tx_err(np))
1890d9fb9f38SJeff Kirsher 			printk(KERN_WARNING
1891d9fb9f38SJeff Kirsher 				"%s: Transmit timed out, status %#08x,"
1892d9fb9f38SJeff Kirsher 				" resetting...\n",
1893d9fb9f38SJeff Kirsher 				dev->name, readl(ioaddr + IntrStatus));
1894d9fb9f38SJeff Kirsher 		dump_ring(dev);
1895d9fb9f38SJeff Kirsher 
1896d9fb9f38SJeff Kirsher 		natsemi_reset(dev);
1897d9fb9f38SJeff Kirsher 		reinit_ring(dev);
1898d9fb9f38SJeff Kirsher 		init_registers(dev);
1899d9fb9f38SJeff Kirsher 	} else {
1900d9fb9f38SJeff Kirsher 		printk(KERN_WARNING
1901d9fb9f38SJeff Kirsher 			"%s: tx_timeout while in hands_off state?\n",
1902d9fb9f38SJeff Kirsher 			dev->name);
1903d9fb9f38SJeff Kirsher 	}
1904d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
1905d710ce13SFrancois Romieu 	enable_irq(irq);
1906d9fb9f38SJeff Kirsher 
1907d9fb9f38SJeff Kirsher 	dev->trans_start = jiffies; /* prevent tx timeout */
1908d9fb9f38SJeff Kirsher 	dev->stats.tx_errors++;
1909d9fb9f38SJeff Kirsher 	netif_wake_queue(dev);
1910d9fb9f38SJeff Kirsher }
1911d9fb9f38SJeff Kirsher 
1912d9fb9f38SJeff Kirsher static int alloc_ring(struct net_device *dev)
1913d9fb9f38SJeff Kirsher {
1914d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1915d9fb9f38SJeff Kirsher 	np->rx_ring = pci_alloc_consistent(np->pci_dev,
1916d9fb9f38SJeff Kirsher 		sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1917d9fb9f38SJeff Kirsher 		&np->ring_dma);
1918d9fb9f38SJeff Kirsher 	if (!np->rx_ring)
1919d9fb9f38SJeff Kirsher 		return -ENOMEM;
1920d9fb9f38SJeff Kirsher 	np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1921d9fb9f38SJeff Kirsher 	return 0;
1922d9fb9f38SJeff Kirsher }
1923d9fb9f38SJeff Kirsher 
1924d9fb9f38SJeff Kirsher static void refill_rx(struct net_device *dev)
1925d9fb9f38SJeff Kirsher {
1926d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1927d9fb9f38SJeff Kirsher 
1928d9fb9f38SJeff Kirsher 	/* Refill the Rx ring buffers. */
1929d9fb9f38SJeff Kirsher 	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1930d9fb9f38SJeff Kirsher 		struct sk_buff *skb;
1931d9fb9f38SJeff Kirsher 		int entry = np->dirty_rx % RX_RING_SIZE;
1932d9fb9f38SJeff Kirsher 		if (np->rx_skbuff[entry] == NULL) {
1933d9fb9f38SJeff Kirsher 			unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1934c056b734SPradeep A Dalvi 			skb = netdev_alloc_skb(dev, buflen);
1935d9fb9f38SJeff Kirsher 			np->rx_skbuff[entry] = skb;
1936d9fb9f38SJeff Kirsher 			if (skb == NULL)
1937d9fb9f38SJeff Kirsher 				break; /* Better luck next round. */
1938d9fb9f38SJeff Kirsher 			np->rx_dma[entry] = pci_map_single(np->pci_dev,
1939d9fb9f38SJeff Kirsher 				skb->data, buflen, PCI_DMA_FROMDEVICE);
1940d9fb9f38SJeff Kirsher 			np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1941d9fb9f38SJeff Kirsher 		}
1942d9fb9f38SJeff Kirsher 		np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1943d9fb9f38SJeff Kirsher 	}
1944d9fb9f38SJeff Kirsher 	if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1945d9fb9f38SJeff Kirsher 		if (netif_msg_rx_err(np))
1946d9fb9f38SJeff Kirsher 			printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1947d9fb9f38SJeff Kirsher 		np->oom = 1;
1948d9fb9f38SJeff Kirsher 	}
1949d9fb9f38SJeff Kirsher }
1950d9fb9f38SJeff Kirsher 
1951d9fb9f38SJeff Kirsher static void set_bufsize(struct net_device *dev)
1952d9fb9f38SJeff Kirsher {
1953d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1954d9fb9f38SJeff Kirsher 	if (dev->mtu <= ETH_DATA_LEN)
1955d9fb9f38SJeff Kirsher 		np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1956d9fb9f38SJeff Kirsher 	else
1957d9fb9f38SJeff Kirsher 		np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1958d9fb9f38SJeff Kirsher }
1959d9fb9f38SJeff Kirsher 
1960d9fb9f38SJeff Kirsher /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1961d9fb9f38SJeff Kirsher static void init_ring(struct net_device *dev)
1962d9fb9f38SJeff Kirsher {
1963d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
1964d9fb9f38SJeff Kirsher 	int i;
1965d9fb9f38SJeff Kirsher 
1966d9fb9f38SJeff Kirsher 	/* 1) TX ring */
1967d9fb9f38SJeff Kirsher 	np->dirty_tx = np->cur_tx = 0;
1968d9fb9f38SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1969d9fb9f38SJeff Kirsher 		np->tx_skbuff[i] = NULL;
1970d9fb9f38SJeff Kirsher 		np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1971d9fb9f38SJeff Kirsher 			+sizeof(struct netdev_desc)
1972d9fb9f38SJeff Kirsher 			*((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1973d9fb9f38SJeff Kirsher 		np->tx_ring[i].cmd_status = 0;
1974d9fb9f38SJeff Kirsher 	}
1975d9fb9f38SJeff Kirsher 
1976d9fb9f38SJeff Kirsher 	/* 2) RX ring */
1977d9fb9f38SJeff Kirsher 	np->dirty_rx = 0;
1978d9fb9f38SJeff Kirsher 	np->cur_rx = RX_RING_SIZE;
1979d9fb9f38SJeff Kirsher 	np->oom = 0;
1980d9fb9f38SJeff Kirsher 	set_bufsize(dev);
1981d9fb9f38SJeff Kirsher 
1982d9fb9f38SJeff Kirsher 	np->rx_head_desc = &np->rx_ring[0];
1983d9fb9f38SJeff Kirsher 
1984d9fb9f38SJeff Kirsher 	/* Please be careful before changing this loop - at least gcc-2.95.1
1985d9fb9f38SJeff Kirsher 	 * miscompiles it otherwise.
1986d9fb9f38SJeff Kirsher 	 */
1987d9fb9f38SJeff Kirsher 	/* Initialize all Rx descriptors. */
1988d9fb9f38SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1989d9fb9f38SJeff Kirsher 		np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1990d9fb9f38SJeff Kirsher 				+sizeof(struct netdev_desc)
1991d9fb9f38SJeff Kirsher 				*((i+1)%RX_RING_SIZE));
1992d9fb9f38SJeff Kirsher 		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
1993d9fb9f38SJeff Kirsher 		np->rx_skbuff[i] = NULL;
1994d9fb9f38SJeff Kirsher 	}
1995d9fb9f38SJeff Kirsher 	refill_rx(dev);
1996d9fb9f38SJeff Kirsher 	dump_ring(dev);
1997d9fb9f38SJeff Kirsher }
1998d9fb9f38SJeff Kirsher 
1999d9fb9f38SJeff Kirsher static void drain_tx(struct net_device *dev)
2000d9fb9f38SJeff Kirsher {
2001d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2002d9fb9f38SJeff Kirsher 	int i;
2003d9fb9f38SJeff Kirsher 
2004d9fb9f38SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
2005d9fb9f38SJeff Kirsher 		if (np->tx_skbuff[i]) {
2006d9fb9f38SJeff Kirsher 			pci_unmap_single(np->pci_dev,
2007d9fb9f38SJeff Kirsher 				np->tx_dma[i], np->tx_skbuff[i]->len,
2008d9fb9f38SJeff Kirsher 				PCI_DMA_TODEVICE);
2009d9fb9f38SJeff Kirsher 			dev_kfree_skb(np->tx_skbuff[i]);
2010d9fb9f38SJeff Kirsher 			dev->stats.tx_dropped++;
2011d9fb9f38SJeff Kirsher 		}
2012d9fb9f38SJeff Kirsher 		np->tx_skbuff[i] = NULL;
2013d9fb9f38SJeff Kirsher 	}
2014d9fb9f38SJeff Kirsher }
2015d9fb9f38SJeff Kirsher 
2016d9fb9f38SJeff Kirsher static void drain_rx(struct net_device *dev)
2017d9fb9f38SJeff Kirsher {
2018d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2019d9fb9f38SJeff Kirsher 	unsigned int buflen = np->rx_buf_sz;
2020d9fb9f38SJeff Kirsher 	int i;
2021d9fb9f38SJeff Kirsher 
2022d9fb9f38SJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
2023d9fb9f38SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
2024d9fb9f38SJeff Kirsher 		np->rx_ring[i].cmd_status = 0;
2025d9fb9f38SJeff Kirsher 		np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
2026d9fb9f38SJeff Kirsher 		if (np->rx_skbuff[i]) {
2027d9fb9f38SJeff Kirsher 			pci_unmap_single(np->pci_dev, np->rx_dma[i],
2028d9fb9f38SJeff Kirsher 				buflen + NATSEMI_PADDING,
2029d9fb9f38SJeff Kirsher 				PCI_DMA_FROMDEVICE);
2030d9fb9f38SJeff Kirsher 			dev_kfree_skb(np->rx_skbuff[i]);
2031d9fb9f38SJeff Kirsher 		}
2032d9fb9f38SJeff Kirsher 		np->rx_skbuff[i] = NULL;
2033d9fb9f38SJeff Kirsher 	}
2034d9fb9f38SJeff Kirsher }
2035d9fb9f38SJeff Kirsher 
2036d9fb9f38SJeff Kirsher static void drain_ring(struct net_device *dev)
2037d9fb9f38SJeff Kirsher {
2038d9fb9f38SJeff Kirsher 	drain_rx(dev);
2039d9fb9f38SJeff Kirsher 	drain_tx(dev);
2040d9fb9f38SJeff Kirsher }
2041d9fb9f38SJeff Kirsher 
2042d9fb9f38SJeff Kirsher static void free_ring(struct net_device *dev)
2043d9fb9f38SJeff Kirsher {
2044d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2045d9fb9f38SJeff Kirsher 	pci_free_consistent(np->pci_dev,
2046d9fb9f38SJeff Kirsher 		sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
2047d9fb9f38SJeff Kirsher 		np->rx_ring, np->ring_dma);
2048d9fb9f38SJeff Kirsher }
2049d9fb9f38SJeff Kirsher 
2050d9fb9f38SJeff Kirsher static void reinit_rx(struct net_device *dev)
2051d9fb9f38SJeff Kirsher {
2052d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2053d9fb9f38SJeff Kirsher 	int i;
2054d9fb9f38SJeff Kirsher 
2055d9fb9f38SJeff Kirsher 	/* RX Ring */
2056d9fb9f38SJeff Kirsher 	np->dirty_rx = 0;
2057d9fb9f38SJeff Kirsher 	np->cur_rx = RX_RING_SIZE;
2058d9fb9f38SJeff Kirsher 	np->rx_head_desc = &np->rx_ring[0];
2059d9fb9f38SJeff Kirsher 	/* Initialize all Rx descriptors. */
2060d9fb9f38SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++)
2061d9fb9f38SJeff Kirsher 		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2062d9fb9f38SJeff Kirsher 
2063d9fb9f38SJeff Kirsher 	refill_rx(dev);
2064d9fb9f38SJeff Kirsher }
2065d9fb9f38SJeff Kirsher 
2066d9fb9f38SJeff Kirsher static void reinit_ring(struct net_device *dev)
2067d9fb9f38SJeff Kirsher {
2068d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2069d9fb9f38SJeff Kirsher 	int i;
2070d9fb9f38SJeff Kirsher 
2071d9fb9f38SJeff Kirsher 	/* drain TX ring */
2072d9fb9f38SJeff Kirsher 	drain_tx(dev);
2073d9fb9f38SJeff Kirsher 	np->dirty_tx = np->cur_tx = 0;
2074d9fb9f38SJeff Kirsher 	for (i=0;i<TX_RING_SIZE;i++)
2075d9fb9f38SJeff Kirsher 		np->tx_ring[i].cmd_status = 0;
2076d9fb9f38SJeff Kirsher 
2077d9fb9f38SJeff Kirsher 	reinit_rx(dev);
2078d9fb9f38SJeff Kirsher }
2079d9fb9f38SJeff Kirsher 
2080d9fb9f38SJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2081d9fb9f38SJeff Kirsher {
2082d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2083d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2084d9fb9f38SJeff Kirsher 	unsigned entry;
2085d9fb9f38SJeff Kirsher 	unsigned long flags;
2086d9fb9f38SJeff Kirsher 
2087d9fb9f38SJeff Kirsher 	/* Note: Ordering is important here, set the field with the
2088d9fb9f38SJeff Kirsher 	   "ownership" bit last, and only then increment cur_tx. */
2089d9fb9f38SJeff Kirsher 
2090d9fb9f38SJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
2091d9fb9f38SJeff Kirsher 	entry = np->cur_tx % TX_RING_SIZE;
2092d9fb9f38SJeff Kirsher 
2093d9fb9f38SJeff Kirsher 	np->tx_skbuff[entry] = skb;
2094d9fb9f38SJeff Kirsher 	np->tx_dma[entry] = pci_map_single(np->pci_dev,
2095d9fb9f38SJeff Kirsher 				skb->data,skb->len, PCI_DMA_TODEVICE);
2096d9fb9f38SJeff Kirsher 
2097d9fb9f38SJeff Kirsher 	np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2098d9fb9f38SJeff Kirsher 
2099d9fb9f38SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
2100d9fb9f38SJeff Kirsher 
2101d9fb9f38SJeff Kirsher 	if (!np->hands_off) {
2102d9fb9f38SJeff Kirsher 		np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2103d9fb9f38SJeff Kirsher 		/* StrongARM: Explicitly cache flush np->tx_ring and
2104d9fb9f38SJeff Kirsher 		 * skb->data,skb->len. */
2105d9fb9f38SJeff Kirsher 		wmb();
2106d9fb9f38SJeff Kirsher 		np->cur_tx++;
2107d9fb9f38SJeff Kirsher 		if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2108d9fb9f38SJeff Kirsher 			netdev_tx_done(dev);
2109d9fb9f38SJeff Kirsher 			if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2110d9fb9f38SJeff Kirsher 				netif_stop_queue(dev);
2111d9fb9f38SJeff Kirsher 		}
2112d9fb9f38SJeff Kirsher 		/* Wake the potentially-idle transmit channel. */
2113d9fb9f38SJeff Kirsher 		writel(TxOn, ioaddr + ChipCmd);
2114d9fb9f38SJeff Kirsher 	} else {
2115d9fb9f38SJeff Kirsher 		dev_kfree_skb_irq(skb);
2116d9fb9f38SJeff Kirsher 		dev->stats.tx_dropped++;
2117d9fb9f38SJeff Kirsher 	}
2118d9fb9f38SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
2119d9fb9f38SJeff Kirsher 
2120d9fb9f38SJeff Kirsher 	if (netif_msg_tx_queued(np)) {
2121d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2122d9fb9f38SJeff Kirsher 			dev->name, np->cur_tx, entry);
2123d9fb9f38SJeff Kirsher 	}
2124d9fb9f38SJeff Kirsher 	return NETDEV_TX_OK;
2125d9fb9f38SJeff Kirsher }
2126d9fb9f38SJeff Kirsher 
2127d9fb9f38SJeff Kirsher static void netdev_tx_done(struct net_device *dev)
2128d9fb9f38SJeff Kirsher {
2129d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2130d9fb9f38SJeff Kirsher 
2131d9fb9f38SJeff Kirsher 	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2132d9fb9f38SJeff Kirsher 		int entry = np->dirty_tx % TX_RING_SIZE;
2133d9fb9f38SJeff Kirsher 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2134d9fb9f38SJeff Kirsher 			break;
2135d9fb9f38SJeff Kirsher 		if (netif_msg_tx_done(np))
2136d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG
2137d9fb9f38SJeff Kirsher 				"%s: tx frame #%d finished, status %#08x.\n",
2138d9fb9f38SJeff Kirsher 					dev->name, np->dirty_tx,
2139d9fb9f38SJeff Kirsher 					le32_to_cpu(np->tx_ring[entry].cmd_status));
2140d9fb9f38SJeff Kirsher 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2141d9fb9f38SJeff Kirsher 			dev->stats.tx_packets++;
2142d9fb9f38SJeff Kirsher 			dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2143d9fb9f38SJeff Kirsher 		} else { /* Various Tx errors */
2144d9fb9f38SJeff Kirsher 			int tx_status =
2145d9fb9f38SJeff Kirsher 				le32_to_cpu(np->tx_ring[entry].cmd_status);
2146d9fb9f38SJeff Kirsher 			if (tx_status & (DescTxAbort|DescTxExcColl))
2147d9fb9f38SJeff Kirsher 				dev->stats.tx_aborted_errors++;
2148d9fb9f38SJeff Kirsher 			if (tx_status & DescTxFIFO)
2149d9fb9f38SJeff Kirsher 				dev->stats.tx_fifo_errors++;
2150d9fb9f38SJeff Kirsher 			if (tx_status & DescTxCarrier)
2151d9fb9f38SJeff Kirsher 				dev->stats.tx_carrier_errors++;
2152d9fb9f38SJeff Kirsher 			if (tx_status & DescTxOOWCol)
2153d9fb9f38SJeff Kirsher 				dev->stats.tx_window_errors++;
2154d9fb9f38SJeff Kirsher 			dev->stats.tx_errors++;
2155d9fb9f38SJeff Kirsher 		}
2156d9fb9f38SJeff Kirsher 		pci_unmap_single(np->pci_dev,np->tx_dma[entry],
2157d9fb9f38SJeff Kirsher 					np->tx_skbuff[entry]->len,
2158d9fb9f38SJeff Kirsher 					PCI_DMA_TODEVICE);
2159d9fb9f38SJeff Kirsher 		/* Free the original skb. */
2160d9fb9f38SJeff Kirsher 		dev_kfree_skb_irq(np->tx_skbuff[entry]);
2161d9fb9f38SJeff Kirsher 		np->tx_skbuff[entry] = NULL;
2162d9fb9f38SJeff Kirsher 	}
2163d9fb9f38SJeff Kirsher 	if (netif_queue_stopped(dev) &&
2164d9fb9f38SJeff Kirsher 	    np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2165d9fb9f38SJeff Kirsher 		/* The ring is no longer full, wake queue. */
2166d9fb9f38SJeff Kirsher 		netif_wake_queue(dev);
2167d9fb9f38SJeff Kirsher 	}
2168d9fb9f38SJeff Kirsher }
2169d9fb9f38SJeff Kirsher 
2170d9fb9f38SJeff Kirsher /* The interrupt handler doesn't actually handle interrupts itself, it
2171d9fb9f38SJeff Kirsher  * schedules a NAPI poll if there is anything to do. */
2172d9fb9f38SJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance)
2173d9fb9f38SJeff Kirsher {
2174d9fb9f38SJeff Kirsher 	struct net_device *dev = dev_instance;
2175d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2176d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2177d9fb9f38SJeff Kirsher 
2178d9fb9f38SJeff Kirsher 	/* Reading IntrStatus automatically acknowledges so don't do
2179d9fb9f38SJeff Kirsher 	 * that while interrupts are disabled, (for example, while a
2180d9fb9f38SJeff Kirsher 	 * poll is scheduled).  */
2181d9fb9f38SJeff Kirsher 	if (np->hands_off || !readl(ioaddr + IntrEnable))
2182d9fb9f38SJeff Kirsher 		return IRQ_NONE;
2183d9fb9f38SJeff Kirsher 
2184d9fb9f38SJeff Kirsher 	np->intr_status = readl(ioaddr + IntrStatus);
2185d9fb9f38SJeff Kirsher 
2186d9fb9f38SJeff Kirsher 	if (!np->intr_status)
2187d9fb9f38SJeff Kirsher 		return IRQ_NONE;
2188d9fb9f38SJeff Kirsher 
2189d9fb9f38SJeff Kirsher 	if (netif_msg_intr(np))
2190d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG
2191d9fb9f38SJeff Kirsher 		       "%s: Interrupt, status %#08x, mask %#08x.\n",
2192d9fb9f38SJeff Kirsher 		       dev->name, np->intr_status,
2193d9fb9f38SJeff Kirsher 		       readl(ioaddr + IntrMask));
2194d9fb9f38SJeff Kirsher 
2195d9fb9f38SJeff Kirsher 	prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2196d9fb9f38SJeff Kirsher 
2197d9fb9f38SJeff Kirsher 	if (napi_schedule_prep(&np->napi)) {
2198d9fb9f38SJeff Kirsher 		/* Disable interrupts and register for poll */
2199d9fb9f38SJeff Kirsher 		natsemi_irq_disable(dev);
2200d9fb9f38SJeff Kirsher 		__napi_schedule(&np->napi);
2201d9fb9f38SJeff Kirsher 	} else
2202d9fb9f38SJeff Kirsher 		printk(KERN_WARNING
2203d9fb9f38SJeff Kirsher 	       	       "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2204d9fb9f38SJeff Kirsher 		       dev->name, np->intr_status,
2205d9fb9f38SJeff Kirsher 		       readl(ioaddr + IntrMask));
2206d9fb9f38SJeff Kirsher 
2207d9fb9f38SJeff Kirsher 	return IRQ_HANDLED;
2208d9fb9f38SJeff Kirsher }
2209d9fb9f38SJeff Kirsher 
2210d9fb9f38SJeff Kirsher /* This is the NAPI poll routine.  As well as the standard RX handling
2211d9fb9f38SJeff Kirsher  * it also handles all other interrupts that the chip might raise.
2212d9fb9f38SJeff Kirsher  */
2213d9fb9f38SJeff Kirsher static int natsemi_poll(struct napi_struct *napi, int budget)
2214d9fb9f38SJeff Kirsher {
2215d9fb9f38SJeff Kirsher 	struct netdev_private *np = container_of(napi, struct netdev_private, napi);
2216d9fb9f38SJeff Kirsher 	struct net_device *dev = np->dev;
2217d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2218d9fb9f38SJeff Kirsher 	int work_done = 0;
2219d9fb9f38SJeff Kirsher 
2220d9fb9f38SJeff Kirsher 	do {
2221d9fb9f38SJeff Kirsher 		if (netif_msg_intr(np))
2222d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG
2223d9fb9f38SJeff Kirsher 			       "%s: Poll, status %#08x, mask %#08x.\n",
2224d9fb9f38SJeff Kirsher 			       dev->name, np->intr_status,
2225d9fb9f38SJeff Kirsher 			       readl(ioaddr + IntrMask));
2226d9fb9f38SJeff Kirsher 
2227d9fb9f38SJeff Kirsher 		/* netdev_rx() may read IntrStatus again if the RX state
2228d9fb9f38SJeff Kirsher 		 * machine falls over so do it first. */
2229d9fb9f38SJeff Kirsher 		if (np->intr_status &
2230d9fb9f38SJeff Kirsher 		    (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2231d9fb9f38SJeff Kirsher 		     IntrRxErr | IntrRxOverrun)) {
2232d9fb9f38SJeff Kirsher 			netdev_rx(dev, &work_done, budget);
2233d9fb9f38SJeff Kirsher 		}
2234d9fb9f38SJeff Kirsher 
2235d9fb9f38SJeff Kirsher 		if (np->intr_status &
2236d9fb9f38SJeff Kirsher 		    (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2237d9fb9f38SJeff Kirsher 			spin_lock(&np->lock);
2238d9fb9f38SJeff Kirsher 			netdev_tx_done(dev);
2239d9fb9f38SJeff Kirsher 			spin_unlock(&np->lock);
2240d9fb9f38SJeff Kirsher 		}
2241d9fb9f38SJeff Kirsher 
2242d9fb9f38SJeff Kirsher 		/* Abnormal error summary/uncommon events handlers. */
2243d9fb9f38SJeff Kirsher 		if (np->intr_status & IntrAbnormalSummary)
2244d9fb9f38SJeff Kirsher 			netdev_error(dev, np->intr_status);
2245d9fb9f38SJeff Kirsher 
2246d9fb9f38SJeff Kirsher 		if (work_done >= budget)
2247d9fb9f38SJeff Kirsher 			return work_done;
2248d9fb9f38SJeff Kirsher 
2249d9fb9f38SJeff Kirsher 		np->intr_status = readl(ioaddr + IntrStatus);
2250d9fb9f38SJeff Kirsher 	} while (np->intr_status);
2251d9fb9f38SJeff Kirsher 
2252d9fb9f38SJeff Kirsher 	napi_complete(napi);
2253d9fb9f38SJeff Kirsher 
2254d9fb9f38SJeff Kirsher 	/* Reenable interrupts providing nothing is trying to shut
2255d9fb9f38SJeff Kirsher 	 * the chip down. */
2256d9fb9f38SJeff Kirsher 	spin_lock(&np->lock);
2257d9fb9f38SJeff Kirsher 	if (!np->hands_off)
2258d9fb9f38SJeff Kirsher 		natsemi_irq_enable(dev);
2259d9fb9f38SJeff Kirsher 	spin_unlock(&np->lock);
2260d9fb9f38SJeff Kirsher 
2261d9fb9f38SJeff Kirsher 	return work_done;
2262d9fb9f38SJeff Kirsher }
2263d9fb9f38SJeff Kirsher 
2264d9fb9f38SJeff Kirsher /* This routine is logically part of the interrupt handler, but separated
2265d9fb9f38SJeff Kirsher    for clarity and better register allocation. */
2266d9fb9f38SJeff Kirsher static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2267d9fb9f38SJeff Kirsher {
2268d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2269d9fb9f38SJeff Kirsher 	int entry = np->cur_rx % RX_RING_SIZE;
2270d9fb9f38SJeff Kirsher 	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2271d9fb9f38SJeff Kirsher 	s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2272d9fb9f38SJeff Kirsher 	unsigned int buflen = np->rx_buf_sz;
2273d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2274d9fb9f38SJeff Kirsher 
2275d9fb9f38SJeff Kirsher 	/* If the driver owns the next entry it's a new packet. Send it up. */
2276d9fb9f38SJeff Kirsher 	while (desc_status < 0) { /* e.g. & DescOwn */
2277d9fb9f38SJeff Kirsher 		int pkt_len;
2278d9fb9f38SJeff Kirsher 		if (netif_msg_rx_status(np))
2279d9fb9f38SJeff Kirsher 			printk(KERN_DEBUG
2280d9fb9f38SJeff Kirsher 				"  netdev_rx() entry %d status was %#08x.\n",
2281d9fb9f38SJeff Kirsher 				entry, desc_status);
2282d9fb9f38SJeff Kirsher 		if (--boguscnt < 0)
2283d9fb9f38SJeff Kirsher 			break;
2284d9fb9f38SJeff Kirsher 
2285d9fb9f38SJeff Kirsher 		if (*work_done >= work_to_do)
2286d9fb9f38SJeff Kirsher 			break;
2287d9fb9f38SJeff Kirsher 
2288d9fb9f38SJeff Kirsher 		(*work_done)++;
2289d9fb9f38SJeff Kirsher 
2290d9fb9f38SJeff Kirsher 		pkt_len = (desc_status & DescSizeMask) - 4;
2291d9fb9f38SJeff Kirsher 		if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2292d9fb9f38SJeff Kirsher 			if (desc_status & DescMore) {
2293d9fb9f38SJeff Kirsher 				unsigned long flags;
2294d9fb9f38SJeff Kirsher 
2295d9fb9f38SJeff Kirsher 				if (netif_msg_rx_err(np))
2296d9fb9f38SJeff Kirsher 					printk(KERN_WARNING
2297d9fb9f38SJeff Kirsher 						"%s: Oversized(?) Ethernet "
2298d9fb9f38SJeff Kirsher 						"frame spanned multiple "
2299d9fb9f38SJeff Kirsher 						"buffers, entry %#08x "
2300d9fb9f38SJeff Kirsher 						"status %#08x.\n", dev->name,
2301d9fb9f38SJeff Kirsher 						np->cur_rx, desc_status);
2302d9fb9f38SJeff Kirsher 				dev->stats.rx_length_errors++;
2303d9fb9f38SJeff Kirsher 
2304d9fb9f38SJeff Kirsher 				/* The RX state machine has probably
2305d9fb9f38SJeff Kirsher 				 * locked up beneath us.  Follow the
2306d9fb9f38SJeff Kirsher 				 * reset procedure documented in
2307d9fb9f38SJeff Kirsher 				 * AN-1287. */
2308d9fb9f38SJeff Kirsher 
2309d9fb9f38SJeff Kirsher 				spin_lock_irqsave(&np->lock, flags);
2310d9fb9f38SJeff Kirsher 				reset_rx(dev);
2311d9fb9f38SJeff Kirsher 				reinit_rx(dev);
2312d9fb9f38SJeff Kirsher 				writel(np->ring_dma, ioaddr + RxRingPtr);
2313d9fb9f38SJeff Kirsher 				check_link(dev);
2314d9fb9f38SJeff Kirsher 				spin_unlock_irqrestore(&np->lock, flags);
2315d9fb9f38SJeff Kirsher 
2316d9fb9f38SJeff Kirsher 				/* We'll enable RX on exit from this
2317d9fb9f38SJeff Kirsher 				 * function. */
2318d9fb9f38SJeff Kirsher 				break;
2319d9fb9f38SJeff Kirsher 
2320d9fb9f38SJeff Kirsher 			} else {
2321d9fb9f38SJeff Kirsher 				/* There was an error. */
2322d9fb9f38SJeff Kirsher 				dev->stats.rx_errors++;
2323d9fb9f38SJeff Kirsher 				if (desc_status & (DescRxAbort|DescRxOver))
2324d9fb9f38SJeff Kirsher 					dev->stats.rx_over_errors++;
2325d9fb9f38SJeff Kirsher 				if (desc_status & (DescRxLong|DescRxRunt))
2326d9fb9f38SJeff Kirsher 					dev->stats.rx_length_errors++;
2327d9fb9f38SJeff Kirsher 				if (desc_status & (DescRxInvalid|DescRxAlign))
2328d9fb9f38SJeff Kirsher 					dev->stats.rx_frame_errors++;
2329d9fb9f38SJeff Kirsher 				if (desc_status & DescRxCRC)
2330d9fb9f38SJeff Kirsher 					dev->stats.rx_crc_errors++;
2331d9fb9f38SJeff Kirsher 			}
2332d9fb9f38SJeff Kirsher 		} else if (pkt_len > np->rx_buf_sz) {
2333d9fb9f38SJeff Kirsher 			/* if this is the tail of a double buffer
2334d9fb9f38SJeff Kirsher 			 * packet, we've already counted the error
2335d9fb9f38SJeff Kirsher 			 * on the first part.  Ignore the second half.
2336d9fb9f38SJeff Kirsher 			 */
2337d9fb9f38SJeff Kirsher 		} else {
2338d9fb9f38SJeff Kirsher 			struct sk_buff *skb;
2339d9fb9f38SJeff Kirsher 			/* Omit CRC size. */
2340d9fb9f38SJeff Kirsher 			/* Check if the packet is long enough to accept
2341d9fb9f38SJeff Kirsher 			 * without copying to a minimally-sized skbuff. */
2342d9fb9f38SJeff Kirsher 			if (pkt_len < rx_copybreak &&
2343c056b734SPradeep A Dalvi 			    (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2344d9fb9f38SJeff Kirsher 				/* 16 byte align the IP header */
2345d9fb9f38SJeff Kirsher 				skb_reserve(skb, RX_OFFSET);
2346d9fb9f38SJeff Kirsher 				pci_dma_sync_single_for_cpu(np->pci_dev,
2347d9fb9f38SJeff Kirsher 					np->rx_dma[entry],
2348d9fb9f38SJeff Kirsher 					buflen,
2349d9fb9f38SJeff Kirsher 					PCI_DMA_FROMDEVICE);
2350d9fb9f38SJeff Kirsher 				skb_copy_to_linear_data(skb,
2351d9fb9f38SJeff Kirsher 					np->rx_skbuff[entry]->data, pkt_len);
2352d9fb9f38SJeff Kirsher 				skb_put(skb, pkt_len);
2353d9fb9f38SJeff Kirsher 				pci_dma_sync_single_for_device(np->pci_dev,
2354d9fb9f38SJeff Kirsher 					np->rx_dma[entry],
2355d9fb9f38SJeff Kirsher 					buflen,
2356d9fb9f38SJeff Kirsher 					PCI_DMA_FROMDEVICE);
2357d9fb9f38SJeff Kirsher 			} else {
2358d9fb9f38SJeff Kirsher 				pci_unmap_single(np->pci_dev, np->rx_dma[entry],
2359d9fb9f38SJeff Kirsher 						 buflen + NATSEMI_PADDING,
2360d9fb9f38SJeff Kirsher 						 PCI_DMA_FROMDEVICE);
2361d9fb9f38SJeff Kirsher 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
2362d9fb9f38SJeff Kirsher 				np->rx_skbuff[entry] = NULL;
2363d9fb9f38SJeff Kirsher 			}
2364d9fb9f38SJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
2365d9fb9f38SJeff Kirsher 			netif_receive_skb(skb);
2366d9fb9f38SJeff Kirsher 			dev->stats.rx_packets++;
2367d9fb9f38SJeff Kirsher 			dev->stats.rx_bytes += pkt_len;
2368d9fb9f38SJeff Kirsher 		}
2369d9fb9f38SJeff Kirsher 		entry = (++np->cur_rx) % RX_RING_SIZE;
2370d9fb9f38SJeff Kirsher 		np->rx_head_desc = &np->rx_ring[entry];
2371d9fb9f38SJeff Kirsher 		desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2372d9fb9f38SJeff Kirsher 	}
2373d9fb9f38SJeff Kirsher 	refill_rx(dev);
2374d9fb9f38SJeff Kirsher 
2375d9fb9f38SJeff Kirsher 	/* Restart Rx engine if stopped. */
2376d9fb9f38SJeff Kirsher 	if (np->oom)
2377d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, jiffies + 1);
2378d9fb9f38SJeff Kirsher 	else
2379d9fb9f38SJeff Kirsher 		writel(RxOn, ioaddr + ChipCmd);
2380d9fb9f38SJeff Kirsher }
2381d9fb9f38SJeff Kirsher 
2382d9fb9f38SJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status)
2383d9fb9f38SJeff Kirsher {
2384d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2385d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2386d9fb9f38SJeff Kirsher 
2387d9fb9f38SJeff Kirsher 	spin_lock(&np->lock);
2388d9fb9f38SJeff Kirsher 	if (intr_status & LinkChange) {
2389d9fb9f38SJeff Kirsher 		u16 lpa = mdio_read(dev, MII_LPA);
2390d9fb9f38SJeff Kirsher 		if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2391d9fb9f38SJeff Kirsher 		    netif_msg_link(np)) {
2392d9fb9f38SJeff Kirsher 			printk(KERN_INFO
2393d9fb9f38SJeff Kirsher 				"%s: Autonegotiation advertising"
2394d9fb9f38SJeff Kirsher 				" %#04x  partner %#04x.\n", dev->name,
2395d9fb9f38SJeff Kirsher 				np->advertising, lpa);
2396d9fb9f38SJeff Kirsher 		}
2397d9fb9f38SJeff Kirsher 
2398d9fb9f38SJeff Kirsher 		/* read MII int status to clear the flag */
2399d9fb9f38SJeff Kirsher 		readw(ioaddr + MIntrStatus);
2400d9fb9f38SJeff Kirsher 		check_link(dev);
2401d9fb9f38SJeff Kirsher 	}
2402d9fb9f38SJeff Kirsher 	if (intr_status & StatsMax) {
2403d9fb9f38SJeff Kirsher 		__get_stats(dev);
2404d9fb9f38SJeff Kirsher 	}
2405d9fb9f38SJeff Kirsher 	if (intr_status & IntrTxUnderrun) {
2406d9fb9f38SJeff Kirsher 		if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2407d9fb9f38SJeff Kirsher 			np->tx_config += TX_DRTH_VAL_INC;
2408d9fb9f38SJeff Kirsher 			if (netif_msg_tx_err(np))
2409d9fb9f38SJeff Kirsher 				printk(KERN_NOTICE
2410d9fb9f38SJeff Kirsher 					"%s: increased tx threshold, txcfg %#08x.\n",
2411d9fb9f38SJeff Kirsher 					dev->name, np->tx_config);
2412d9fb9f38SJeff Kirsher 		} else {
2413d9fb9f38SJeff Kirsher 			if (netif_msg_tx_err(np))
2414d9fb9f38SJeff Kirsher 				printk(KERN_NOTICE
2415d9fb9f38SJeff Kirsher 					"%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2416d9fb9f38SJeff Kirsher 					dev->name, np->tx_config);
2417d9fb9f38SJeff Kirsher 		}
2418d9fb9f38SJeff Kirsher 		writel(np->tx_config, ioaddr + TxConfig);
2419d9fb9f38SJeff Kirsher 	}
2420d9fb9f38SJeff Kirsher 	if (intr_status & WOLPkt && netif_msg_wol(np)) {
2421d9fb9f38SJeff Kirsher 		int wol_status = readl(ioaddr + WOLCmd);
2422d9fb9f38SJeff Kirsher 		printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2423d9fb9f38SJeff Kirsher 			dev->name, wol_status);
2424d9fb9f38SJeff Kirsher 	}
2425d9fb9f38SJeff Kirsher 	if (intr_status & RxStatusFIFOOver) {
2426d9fb9f38SJeff Kirsher 		if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2427d9fb9f38SJeff Kirsher 			printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2428d9fb9f38SJeff Kirsher 				dev->name);
2429d9fb9f38SJeff Kirsher 		}
2430d9fb9f38SJeff Kirsher 		dev->stats.rx_fifo_errors++;
2431d9fb9f38SJeff Kirsher 		dev->stats.rx_errors++;
2432d9fb9f38SJeff Kirsher 	}
2433d9fb9f38SJeff Kirsher 	/* Hmmmmm, it's not clear how to recover from PCI faults. */
2434d9fb9f38SJeff Kirsher 	if (intr_status & IntrPCIErr) {
2435d9fb9f38SJeff Kirsher 		printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2436d9fb9f38SJeff Kirsher 			intr_status & IntrPCIErr);
2437d9fb9f38SJeff Kirsher 		dev->stats.tx_fifo_errors++;
2438d9fb9f38SJeff Kirsher 		dev->stats.tx_errors++;
2439d9fb9f38SJeff Kirsher 		dev->stats.rx_fifo_errors++;
2440d9fb9f38SJeff Kirsher 		dev->stats.rx_errors++;
2441d9fb9f38SJeff Kirsher 	}
2442d9fb9f38SJeff Kirsher 	spin_unlock(&np->lock);
2443d9fb9f38SJeff Kirsher }
2444d9fb9f38SJeff Kirsher 
2445d9fb9f38SJeff Kirsher static void __get_stats(struct net_device *dev)
2446d9fb9f38SJeff Kirsher {
2447d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2448d9fb9f38SJeff Kirsher 
2449d9fb9f38SJeff Kirsher 	/* The chip only need report frame silently dropped. */
2450d9fb9f38SJeff Kirsher 	dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2451d9fb9f38SJeff Kirsher 	dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2452d9fb9f38SJeff Kirsher }
2453d9fb9f38SJeff Kirsher 
2454d9fb9f38SJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev)
2455d9fb9f38SJeff Kirsher {
2456d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2457d9fb9f38SJeff Kirsher 
2458d9fb9f38SJeff Kirsher 	/* The chip only need report frame silently dropped. */
2459d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2460d9fb9f38SJeff Kirsher 	if (netif_running(dev) && !np->hands_off)
2461d9fb9f38SJeff Kirsher 		__get_stats(dev);
2462d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2463d9fb9f38SJeff Kirsher 
2464d9fb9f38SJeff Kirsher 	return &dev->stats;
2465d9fb9f38SJeff Kirsher }
2466d9fb9f38SJeff Kirsher 
2467d9fb9f38SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
2468d9fb9f38SJeff Kirsher static void natsemi_poll_controller(struct net_device *dev)
2469d9fb9f38SJeff Kirsher {
2470d710ce13SFrancois Romieu 	struct netdev_private *np = netdev_priv(dev);
2471d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
2472d710ce13SFrancois Romieu 
2473d710ce13SFrancois Romieu 	disable_irq(irq);
2474d710ce13SFrancois Romieu 	intr_handler(irq, dev);
2475d710ce13SFrancois Romieu 	enable_irq(irq);
2476d9fb9f38SJeff Kirsher }
2477d9fb9f38SJeff Kirsher #endif
2478d9fb9f38SJeff Kirsher 
2479d9fb9f38SJeff Kirsher #define HASH_TABLE	0x200
2480d9fb9f38SJeff Kirsher static void __set_rx_mode(struct net_device *dev)
2481d9fb9f38SJeff Kirsher {
2482d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2483d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2484d9fb9f38SJeff Kirsher 	u8 mc_filter[64]; /* Multicast hash filter */
2485d9fb9f38SJeff Kirsher 	u32 rx_mode;
2486d9fb9f38SJeff Kirsher 
2487d9fb9f38SJeff Kirsher 	if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2488d9fb9f38SJeff Kirsher 		rx_mode = RxFilterEnable | AcceptBroadcast
2489d9fb9f38SJeff Kirsher 			| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2490d9fb9f38SJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2491d9fb9f38SJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
2492d9fb9f38SJeff Kirsher 		rx_mode = RxFilterEnable | AcceptBroadcast
2493d9fb9f38SJeff Kirsher 			| AcceptAllMulticast | AcceptMyPhys;
2494d9fb9f38SJeff Kirsher 	} else {
2495d9fb9f38SJeff Kirsher 		struct netdev_hw_addr *ha;
2496d9fb9f38SJeff Kirsher 		int i;
2497d9fb9f38SJeff Kirsher 
2498d9fb9f38SJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
2499d9fb9f38SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2500d9fb9f38SJeff Kirsher 			int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2501d9fb9f38SJeff Kirsher 			mc_filter[b/8] |= (1 << (b & 0x07));
2502d9fb9f38SJeff Kirsher 		}
2503d9fb9f38SJeff Kirsher 		rx_mode = RxFilterEnable | AcceptBroadcast
2504d9fb9f38SJeff Kirsher 			| AcceptMulticast | AcceptMyPhys;
2505d9fb9f38SJeff Kirsher 		for (i = 0; i < 64; i += 2) {
2506d9fb9f38SJeff Kirsher 			writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2507d9fb9f38SJeff Kirsher 			writel((mc_filter[i + 1] << 8) + mc_filter[i],
2508d9fb9f38SJeff Kirsher 			       ioaddr + RxFilterData);
2509d9fb9f38SJeff Kirsher 		}
2510d9fb9f38SJeff Kirsher 	}
2511d9fb9f38SJeff Kirsher 	writel(rx_mode, ioaddr + RxFilterAddr);
2512d9fb9f38SJeff Kirsher 	np->cur_rx_mode = rx_mode;
2513d9fb9f38SJeff Kirsher }
2514d9fb9f38SJeff Kirsher 
2515d9fb9f38SJeff Kirsher static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2516d9fb9f38SJeff Kirsher {
2517d9fb9f38SJeff Kirsher 	if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
2518d9fb9f38SJeff Kirsher 		return -EINVAL;
2519d9fb9f38SJeff Kirsher 
2520d9fb9f38SJeff Kirsher 	dev->mtu = new_mtu;
2521d9fb9f38SJeff Kirsher 
2522d9fb9f38SJeff Kirsher 	/* synchronized against open : rtnl_lock() held by caller */
2523d9fb9f38SJeff Kirsher 	if (netif_running(dev)) {
2524d9fb9f38SJeff Kirsher 		struct netdev_private *np = netdev_priv(dev);
2525d9fb9f38SJeff Kirsher 		void __iomem * ioaddr = ns_ioaddr(dev);
2526d710ce13SFrancois Romieu 		const int irq = np->pci_dev->irq;
2527d9fb9f38SJeff Kirsher 
2528d710ce13SFrancois Romieu 		disable_irq(irq);
2529d9fb9f38SJeff Kirsher 		spin_lock(&np->lock);
2530d9fb9f38SJeff Kirsher 		/* stop engines */
2531d9fb9f38SJeff Kirsher 		natsemi_stop_rxtx(dev);
2532d9fb9f38SJeff Kirsher 		/* drain rx queue */
2533d9fb9f38SJeff Kirsher 		drain_rx(dev);
2534d9fb9f38SJeff Kirsher 		/* change buffers */
2535d9fb9f38SJeff Kirsher 		set_bufsize(dev);
2536d9fb9f38SJeff Kirsher 		reinit_rx(dev);
2537d9fb9f38SJeff Kirsher 		writel(np->ring_dma, ioaddr + RxRingPtr);
2538d9fb9f38SJeff Kirsher 		/* restart engines */
2539d9fb9f38SJeff Kirsher 		writel(RxOn | TxOn, ioaddr + ChipCmd);
2540d9fb9f38SJeff Kirsher 		spin_unlock(&np->lock);
2541d710ce13SFrancois Romieu 		enable_irq(irq);
2542d9fb9f38SJeff Kirsher 	}
2543d9fb9f38SJeff Kirsher 	return 0;
2544d9fb9f38SJeff Kirsher }
2545d9fb9f38SJeff Kirsher 
2546d9fb9f38SJeff Kirsher static void set_rx_mode(struct net_device *dev)
2547d9fb9f38SJeff Kirsher {
2548d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2549d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2550d9fb9f38SJeff Kirsher 	if (!np->hands_off)
2551d9fb9f38SJeff Kirsher 		__set_rx_mode(dev);
2552d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2553d9fb9f38SJeff Kirsher }
2554d9fb9f38SJeff Kirsher 
2555d9fb9f38SJeff Kirsher static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2556d9fb9f38SJeff Kirsher {
2557d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
255868aad78cSRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
255968aad78cSRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
256068aad78cSRick Jones 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
2561d9fb9f38SJeff Kirsher }
2562d9fb9f38SJeff Kirsher 
2563d9fb9f38SJeff Kirsher static int get_regs_len(struct net_device *dev)
2564d9fb9f38SJeff Kirsher {
2565d9fb9f38SJeff Kirsher 	return NATSEMI_REGS_SIZE;
2566d9fb9f38SJeff Kirsher }
2567d9fb9f38SJeff Kirsher 
2568d9fb9f38SJeff Kirsher static int get_eeprom_len(struct net_device *dev)
2569d9fb9f38SJeff Kirsher {
2570d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2571d9fb9f38SJeff Kirsher 	return np->eeprom_size;
2572d9fb9f38SJeff Kirsher }
2573d9fb9f38SJeff Kirsher 
2574d9fb9f38SJeff Kirsher static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2575d9fb9f38SJeff Kirsher {
2576d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2577d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2578d9fb9f38SJeff Kirsher 	netdev_get_ecmd(dev, ecmd);
2579d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2580d9fb9f38SJeff Kirsher 	return 0;
2581d9fb9f38SJeff Kirsher }
2582d9fb9f38SJeff Kirsher 
2583d9fb9f38SJeff Kirsher static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2584d9fb9f38SJeff Kirsher {
2585d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2586d9fb9f38SJeff Kirsher 	int res;
2587d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2588d9fb9f38SJeff Kirsher 	res = netdev_set_ecmd(dev, ecmd);
2589d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2590d9fb9f38SJeff Kirsher 	return res;
2591d9fb9f38SJeff Kirsher }
2592d9fb9f38SJeff Kirsher 
2593d9fb9f38SJeff Kirsher static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2594d9fb9f38SJeff Kirsher {
2595d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2596d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2597d9fb9f38SJeff Kirsher 	netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2598d9fb9f38SJeff Kirsher 	netdev_get_sopass(dev, wol->sopass);
2599d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2600d9fb9f38SJeff Kirsher }
2601d9fb9f38SJeff Kirsher 
2602d9fb9f38SJeff Kirsher static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2603d9fb9f38SJeff Kirsher {
2604d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2605d9fb9f38SJeff Kirsher 	int res;
2606d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2607d9fb9f38SJeff Kirsher 	netdev_set_wol(dev, wol->wolopts);
2608d9fb9f38SJeff Kirsher 	res = netdev_set_sopass(dev, wol->sopass);
2609d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2610d9fb9f38SJeff Kirsher 	return res;
2611d9fb9f38SJeff Kirsher }
2612d9fb9f38SJeff Kirsher 
2613d9fb9f38SJeff Kirsher static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2614d9fb9f38SJeff Kirsher {
2615d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2616d9fb9f38SJeff Kirsher 	regs->version = NATSEMI_REGS_VER;
2617d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2618d9fb9f38SJeff Kirsher 	netdev_get_regs(dev, buf);
2619d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2620d9fb9f38SJeff Kirsher }
2621d9fb9f38SJeff Kirsher 
2622d9fb9f38SJeff Kirsher static u32 get_msglevel(struct net_device *dev)
2623d9fb9f38SJeff Kirsher {
2624d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2625d9fb9f38SJeff Kirsher 	return np->msg_enable;
2626d9fb9f38SJeff Kirsher }
2627d9fb9f38SJeff Kirsher 
2628d9fb9f38SJeff Kirsher static void set_msglevel(struct net_device *dev, u32 val)
2629d9fb9f38SJeff Kirsher {
2630d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2631d9fb9f38SJeff Kirsher 	np->msg_enable = val;
2632d9fb9f38SJeff Kirsher }
2633d9fb9f38SJeff Kirsher 
2634d9fb9f38SJeff Kirsher static int nway_reset(struct net_device *dev)
2635d9fb9f38SJeff Kirsher {
2636d9fb9f38SJeff Kirsher 	int tmp;
2637d9fb9f38SJeff Kirsher 	int r = -EINVAL;
2638d9fb9f38SJeff Kirsher 	/* if autoneg is off, it's an error */
2639d9fb9f38SJeff Kirsher 	tmp = mdio_read(dev, MII_BMCR);
2640d9fb9f38SJeff Kirsher 	if (tmp & BMCR_ANENABLE) {
2641d9fb9f38SJeff Kirsher 		tmp |= (BMCR_ANRESTART);
2642d9fb9f38SJeff Kirsher 		mdio_write(dev, MII_BMCR, tmp);
2643d9fb9f38SJeff Kirsher 		r = 0;
2644d9fb9f38SJeff Kirsher 	}
2645d9fb9f38SJeff Kirsher 	return r;
2646d9fb9f38SJeff Kirsher }
2647d9fb9f38SJeff Kirsher 
2648d9fb9f38SJeff Kirsher static u32 get_link(struct net_device *dev)
2649d9fb9f38SJeff Kirsher {
2650d9fb9f38SJeff Kirsher 	/* LSTATUS is latched low until a read - so read twice */
2651d9fb9f38SJeff Kirsher 	mdio_read(dev, MII_BMSR);
2652d9fb9f38SJeff Kirsher 	return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2653d9fb9f38SJeff Kirsher }
2654d9fb9f38SJeff Kirsher 
2655d9fb9f38SJeff Kirsher static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2656d9fb9f38SJeff Kirsher {
2657d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2658d9fb9f38SJeff Kirsher 	u8 *eebuf;
2659d9fb9f38SJeff Kirsher 	int res;
2660d9fb9f38SJeff Kirsher 
2661d9fb9f38SJeff Kirsher 	eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2662d9fb9f38SJeff Kirsher 	if (!eebuf)
2663d9fb9f38SJeff Kirsher 		return -ENOMEM;
2664d9fb9f38SJeff Kirsher 
2665d9fb9f38SJeff Kirsher 	eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2666d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
2667d9fb9f38SJeff Kirsher 	res = netdev_get_eeprom(dev, eebuf);
2668d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
2669d9fb9f38SJeff Kirsher 	if (!res)
2670d9fb9f38SJeff Kirsher 		memcpy(data, eebuf+eeprom->offset, eeprom->len);
2671d9fb9f38SJeff Kirsher 	kfree(eebuf);
2672d9fb9f38SJeff Kirsher 	return res;
2673d9fb9f38SJeff Kirsher }
2674d9fb9f38SJeff Kirsher 
2675d9fb9f38SJeff Kirsher static const struct ethtool_ops ethtool_ops = {
2676d9fb9f38SJeff Kirsher 	.get_drvinfo = get_drvinfo,
2677d9fb9f38SJeff Kirsher 	.get_regs_len = get_regs_len,
2678d9fb9f38SJeff Kirsher 	.get_eeprom_len = get_eeprom_len,
2679d9fb9f38SJeff Kirsher 	.get_settings = get_settings,
2680d9fb9f38SJeff Kirsher 	.set_settings = set_settings,
2681d9fb9f38SJeff Kirsher 	.get_wol = get_wol,
2682d9fb9f38SJeff Kirsher 	.set_wol = set_wol,
2683d9fb9f38SJeff Kirsher 	.get_regs = get_regs,
2684d9fb9f38SJeff Kirsher 	.get_msglevel = get_msglevel,
2685d9fb9f38SJeff Kirsher 	.set_msglevel = set_msglevel,
2686d9fb9f38SJeff Kirsher 	.nway_reset = nway_reset,
2687d9fb9f38SJeff Kirsher 	.get_link = get_link,
2688d9fb9f38SJeff Kirsher 	.get_eeprom = get_eeprom,
2689d9fb9f38SJeff Kirsher };
2690d9fb9f38SJeff Kirsher 
2691d9fb9f38SJeff Kirsher static int netdev_set_wol(struct net_device *dev, u32 newval)
2692d9fb9f38SJeff Kirsher {
2693d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2694d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2695d9fb9f38SJeff Kirsher 	u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2696d9fb9f38SJeff Kirsher 
2697d9fb9f38SJeff Kirsher 	/* translate to bitmasks this chip understands */
2698d9fb9f38SJeff Kirsher 	if (newval & WAKE_PHY)
2699d9fb9f38SJeff Kirsher 		data |= WakePhy;
2700d9fb9f38SJeff Kirsher 	if (newval & WAKE_UCAST)
2701d9fb9f38SJeff Kirsher 		data |= WakeUnicast;
2702d9fb9f38SJeff Kirsher 	if (newval & WAKE_MCAST)
2703d9fb9f38SJeff Kirsher 		data |= WakeMulticast;
2704d9fb9f38SJeff Kirsher 	if (newval & WAKE_BCAST)
2705d9fb9f38SJeff Kirsher 		data |= WakeBroadcast;
2706d9fb9f38SJeff Kirsher 	if (newval & WAKE_ARP)
2707d9fb9f38SJeff Kirsher 		data |= WakeArp;
2708d9fb9f38SJeff Kirsher 	if (newval & WAKE_MAGIC)
2709d9fb9f38SJeff Kirsher 		data |= WakeMagic;
2710d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83815_D) {
2711d9fb9f38SJeff Kirsher 		if (newval & WAKE_MAGICSECURE) {
2712d9fb9f38SJeff Kirsher 			data |= WakeMagicSecure;
2713d9fb9f38SJeff Kirsher 		}
2714d9fb9f38SJeff Kirsher 	}
2715d9fb9f38SJeff Kirsher 
2716d9fb9f38SJeff Kirsher 	writel(data, ioaddr + WOLCmd);
2717d9fb9f38SJeff Kirsher 
2718d9fb9f38SJeff Kirsher 	return 0;
2719d9fb9f38SJeff Kirsher }
2720d9fb9f38SJeff Kirsher 
2721d9fb9f38SJeff Kirsher static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2722d9fb9f38SJeff Kirsher {
2723d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2724d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2725d9fb9f38SJeff Kirsher 	u32 regval = readl(ioaddr + WOLCmd);
2726d9fb9f38SJeff Kirsher 
2727d9fb9f38SJeff Kirsher 	*supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2728d9fb9f38SJeff Kirsher 			| WAKE_ARP | WAKE_MAGIC);
2729d9fb9f38SJeff Kirsher 
2730d9fb9f38SJeff Kirsher 	if (np->srr >= SRR_DP83815_D) {
2731d9fb9f38SJeff Kirsher 		/* SOPASS works on revD and higher */
2732d9fb9f38SJeff Kirsher 		*supported |= WAKE_MAGICSECURE;
2733d9fb9f38SJeff Kirsher 	}
2734d9fb9f38SJeff Kirsher 	*cur = 0;
2735d9fb9f38SJeff Kirsher 
2736d9fb9f38SJeff Kirsher 	/* translate from chip bitmasks */
2737d9fb9f38SJeff Kirsher 	if (regval & WakePhy)
2738d9fb9f38SJeff Kirsher 		*cur |= WAKE_PHY;
2739d9fb9f38SJeff Kirsher 	if (regval & WakeUnicast)
2740d9fb9f38SJeff Kirsher 		*cur |= WAKE_UCAST;
2741d9fb9f38SJeff Kirsher 	if (regval & WakeMulticast)
2742d9fb9f38SJeff Kirsher 		*cur |= WAKE_MCAST;
2743d9fb9f38SJeff Kirsher 	if (regval & WakeBroadcast)
2744d9fb9f38SJeff Kirsher 		*cur |= WAKE_BCAST;
2745d9fb9f38SJeff Kirsher 	if (regval & WakeArp)
2746d9fb9f38SJeff Kirsher 		*cur |= WAKE_ARP;
2747d9fb9f38SJeff Kirsher 	if (regval & WakeMagic)
2748d9fb9f38SJeff Kirsher 		*cur |= WAKE_MAGIC;
2749d9fb9f38SJeff Kirsher 	if (regval & WakeMagicSecure) {
2750d9fb9f38SJeff Kirsher 		/* this can be on in revC, but it's broken */
2751d9fb9f38SJeff Kirsher 		*cur |= WAKE_MAGICSECURE;
2752d9fb9f38SJeff Kirsher 	}
2753d9fb9f38SJeff Kirsher 
2754d9fb9f38SJeff Kirsher 	return 0;
2755d9fb9f38SJeff Kirsher }
2756d9fb9f38SJeff Kirsher 
2757d9fb9f38SJeff Kirsher static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2758d9fb9f38SJeff Kirsher {
2759d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2760d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2761d9fb9f38SJeff Kirsher 	u16 *sval = (u16 *)newval;
2762d9fb9f38SJeff Kirsher 	u32 addr;
2763d9fb9f38SJeff Kirsher 
2764d9fb9f38SJeff Kirsher 	if (np->srr < SRR_DP83815_D) {
2765d9fb9f38SJeff Kirsher 		return 0;
2766d9fb9f38SJeff Kirsher 	}
2767d9fb9f38SJeff Kirsher 
2768d9fb9f38SJeff Kirsher 	/* enable writing to these registers by disabling the RX filter */
2769d9fb9f38SJeff Kirsher 	addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2770d9fb9f38SJeff Kirsher 	addr &= ~RxFilterEnable;
2771d9fb9f38SJeff Kirsher 	writel(addr, ioaddr + RxFilterAddr);
2772d9fb9f38SJeff Kirsher 
2773d9fb9f38SJeff Kirsher 	/* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
2774d9fb9f38SJeff Kirsher 	writel(addr | 0xa, ioaddr + RxFilterAddr);
2775d9fb9f38SJeff Kirsher 	writew(sval[0], ioaddr + RxFilterData);
2776d9fb9f38SJeff Kirsher 
2777d9fb9f38SJeff Kirsher 	writel(addr | 0xc, ioaddr + RxFilterAddr);
2778d9fb9f38SJeff Kirsher 	writew(sval[1], ioaddr + RxFilterData);
2779d9fb9f38SJeff Kirsher 
2780d9fb9f38SJeff Kirsher 	writel(addr | 0xe, ioaddr + RxFilterAddr);
2781d9fb9f38SJeff Kirsher 	writew(sval[2], ioaddr + RxFilterData);
2782d9fb9f38SJeff Kirsher 
2783d9fb9f38SJeff Kirsher 	/* re-enable the RX filter */
2784d9fb9f38SJeff Kirsher 	writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2785d9fb9f38SJeff Kirsher 
2786d9fb9f38SJeff Kirsher 	return 0;
2787d9fb9f38SJeff Kirsher }
2788d9fb9f38SJeff Kirsher 
2789d9fb9f38SJeff Kirsher static int netdev_get_sopass(struct net_device *dev, u8 *data)
2790d9fb9f38SJeff Kirsher {
2791d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2792d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2793d9fb9f38SJeff Kirsher 	u16 *sval = (u16 *)data;
2794d9fb9f38SJeff Kirsher 	u32 addr;
2795d9fb9f38SJeff Kirsher 
2796d9fb9f38SJeff Kirsher 	if (np->srr < SRR_DP83815_D) {
2797d9fb9f38SJeff Kirsher 		sval[0] = sval[1] = sval[2] = 0;
2798d9fb9f38SJeff Kirsher 		return 0;
2799d9fb9f38SJeff Kirsher 	}
2800d9fb9f38SJeff Kirsher 
2801d9fb9f38SJeff Kirsher 	/* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
2802d9fb9f38SJeff Kirsher 	addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2803d9fb9f38SJeff Kirsher 
2804d9fb9f38SJeff Kirsher 	writel(addr | 0xa, ioaddr + RxFilterAddr);
2805d9fb9f38SJeff Kirsher 	sval[0] = readw(ioaddr + RxFilterData);
2806d9fb9f38SJeff Kirsher 
2807d9fb9f38SJeff Kirsher 	writel(addr | 0xc, ioaddr + RxFilterAddr);
2808d9fb9f38SJeff Kirsher 	sval[1] = readw(ioaddr + RxFilterData);
2809d9fb9f38SJeff Kirsher 
2810d9fb9f38SJeff Kirsher 	writel(addr | 0xe, ioaddr + RxFilterAddr);
2811d9fb9f38SJeff Kirsher 	sval[2] = readw(ioaddr + RxFilterData);
2812d9fb9f38SJeff Kirsher 
2813d9fb9f38SJeff Kirsher 	writel(addr, ioaddr + RxFilterAddr);
2814d9fb9f38SJeff Kirsher 
2815d9fb9f38SJeff Kirsher 	return 0;
2816d9fb9f38SJeff Kirsher }
2817d9fb9f38SJeff Kirsher 
2818d9fb9f38SJeff Kirsher static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2819d9fb9f38SJeff Kirsher {
2820d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2821d9fb9f38SJeff Kirsher 	u32 tmp;
2822d9fb9f38SJeff Kirsher 
2823d9fb9f38SJeff Kirsher 	ecmd->port        = dev->if_port;
2824d9fb9f38SJeff Kirsher 	ethtool_cmd_speed_set(ecmd, np->speed);
2825d9fb9f38SJeff Kirsher 	ecmd->duplex      = np->duplex;
2826d9fb9f38SJeff Kirsher 	ecmd->autoneg     = np->autoneg;
2827d9fb9f38SJeff Kirsher 	ecmd->advertising = 0;
2828d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_10HALF)
2829d9fb9f38SJeff Kirsher 		ecmd->advertising |= ADVERTISED_10baseT_Half;
2830d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_10FULL)
2831d9fb9f38SJeff Kirsher 		ecmd->advertising |= ADVERTISED_10baseT_Full;
2832d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_100HALF)
2833d9fb9f38SJeff Kirsher 		ecmd->advertising |= ADVERTISED_100baseT_Half;
2834d9fb9f38SJeff Kirsher 	if (np->advertising & ADVERTISE_100FULL)
2835d9fb9f38SJeff Kirsher 		ecmd->advertising |= ADVERTISED_100baseT_Full;
2836d9fb9f38SJeff Kirsher 	ecmd->supported   = (SUPPORTED_Autoneg |
2837d9fb9f38SJeff Kirsher 		SUPPORTED_10baseT_Half  | SUPPORTED_10baseT_Full  |
2838d9fb9f38SJeff Kirsher 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2839d9fb9f38SJeff Kirsher 		SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2840d9fb9f38SJeff Kirsher 	ecmd->phy_address = np->phy_addr_external;
2841d9fb9f38SJeff Kirsher 	/*
2842d9fb9f38SJeff Kirsher 	 * We intentionally report the phy address of the external
2843d9fb9f38SJeff Kirsher 	 * phy, even if the internal phy is used. This is necessary
2844d9fb9f38SJeff Kirsher 	 * to work around a deficiency of the ethtool interface:
2845d9fb9f38SJeff Kirsher 	 * It's only possible to query the settings of the active
2846d9fb9f38SJeff Kirsher 	 * port. Therefore
2847d9fb9f38SJeff Kirsher 	 * # ethtool -s ethX port mii
2848d9fb9f38SJeff Kirsher 	 * actually sends an ioctl to switch to port mii with the
2849d9fb9f38SJeff Kirsher 	 * settings that are used for the current active port.
2850d9fb9f38SJeff Kirsher 	 * If we would report a different phy address in this
2851d9fb9f38SJeff Kirsher 	 * command, then
2852d9fb9f38SJeff Kirsher 	 * # ethtool -s ethX port tp;ethtool -s ethX port mii
2853d9fb9f38SJeff Kirsher 	 * would unintentionally change the phy address.
2854d9fb9f38SJeff Kirsher 	 *
2855d9fb9f38SJeff Kirsher 	 * Fortunately the phy address doesn't matter with the
2856d9fb9f38SJeff Kirsher 	 * internal phy...
2857d9fb9f38SJeff Kirsher 	 */
2858d9fb9f38SJeff Kirsher 
2859d9fb9f38SJeff Kirsher 	/* set information based on active port type */
2860d9fb9f38SJeff Kirsher 	switch (ecmd->port) {
2861d9fb9f38SJeff Kirsher 	default:
2862d9fb9f38SJeff Kirsher 	case PORT_TP:
2863d9fb9f38SJeff Kirsher 		ecmd->advertising |= ADVERTISED_TP;
2864d9fb9f38SJeff Kirsher 		ecmd->transceiver = XCVR_INTERNAL;
2865d9fb9f38SJeff Kirsher 		break;
2866d9fb9f38SJeff Kirsher 	case PORT_MII:
2867d9fb9f38SJeff Kirsher 		ecmd->advertising |= ADVERTISED_MII;
2868d9fb9f38SJeff Kirsher 		ecmd->transceiver = XCVR_EXTERNAL;
2869d9fb9f38SJeff Kirsher 		break;
2870d9fb9f38SJeff Kirsher 	case PORT_FIBRE:
2871d9fb9f38SJeff Kirsher 		ecmd->advertising |= ADVERTISED_FIBRE;
2872d9fb9f38SJeff Kirsher 		ecmd->transceiver = XCVR_EXTERNAL;
2873d9fb9f38SJeff Kirsher 		break;
2874d9fb9f38SJeff Kirsher 	}
2875d9fb9f38SJeff Kirsher 
2876d9fb9f38SJeff Kirsher 	/* if autonegotiation is on, try to return the active speed/duplex */
2877d9fb9f38SJeff Kirsher 	if (ecmd->autoneg == AUTONEG_ENABLE) {
2878d9fb9f38SJeff Kirsher 		ecmd->advertising |= ADVERTISED_Autoneg;
2879d9fb9f38SJeff Kirsher 		tmp = mii_nway_result(
2880d9fb9f38SJeff Kirsher 			np->advertising & mdio_read(dev, MII_LPA));
2881d9fb9f38SJeff Kirsher 		if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2882d9fb9f38SJeff Kirsher 			ethtool_cmd_speed_set(ecmd, SPEED_100);
2883d9fb9f38SJeff Kirsher 		else
2884d9fb9f38SJeff Kirsher 			ethtool_cmd_speed_set(ecmd, SPEED_10);
2885d9fb9f38SJeff Kirsher 		if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2886d9fb9f38SJeff Kirsher 			ecmd->duplex = DUPLEX_FULL;
2887d9fb9f38SJeff Kirsher 		else
2888d9fb9f38SJeff Kirsher 			ecmd->duplex = DUPLEX_HALF;
2889d9fb9f38SJeff Kirsher 	}
2890d9fb9f38SJeff Kirsher 
2891d9fb9f38SJeff Kirsher 	/* ignore maxtxpkt, maxrxpkt for now */
2892d9fb9f38SJeff Kirsher 
2893d9fb9f38SJeff Kirsher 	return 0;
2894d9fb9f38SJeff Kirsher }
2895d9fb9f38SJeff Kirsher 
2896d9fb9f38SJeff Kirsher static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2897d9fb9f38SJeff Kirsher {
2898d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
2899d9fb9f38SJeff Kirsher 
2900d9fb9f38SJeff Kirsher 	if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
2901d9fb9f38SJeff Kirsher 		return -EINVAL;
2902d9fb9f38SJeff Kirsher 	if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
2903d9fb9f38SJeff Kirsher 		return -EINVAL;
2904d9fb9f38SJeff Kirsher 	if (ecmd->autoneg == AUTONEG_ENABLE) {
2905d9fb9f38SJeff Kirsher 		if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
2906d9fb9f38SJeff Kirsher 					  ADVERTISED_10baseT_Full |
2907d9fb9f38SJeff Kirsher 					  ADVERTISED_100baseT_Half |
2908d9fb9f38SJeff Kirsher 					  ADVERTISED_100baseT_Full)) == 0) {
2909d9fb9f38SJeff Kirsher 			return -EINVAL;
2910d9fb9f38SJeff Kirsher 		}
2911d9fb9f38SJeff Kirsher 	} else if (ecmd->autoneg == AUTONEG_DISABLE) {
2912d9fb9f38SJeff Kirsher 		u32 speed = ethtool_cmd_speed(ecmd);
2913d9fb9f38SJeff Kirsher 		if (speed != SPEED_10 && speed != SPEED_100)
2914d9fb9f38SJeff Kirsher 			return -EINVAL;
2915d9fb9f38SJeff Kirsher 		if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2916d9fb9f38SJeff Kirsher 			return -EINVAL;
2917d9fb9f38SJeff Kirsher 	} else {
2918d9fb9f38SJeff Kirsher 		return -EINVAL;
2919d9fb9f38SJeff Kirsher 	}
2920d9fb9f38SJeff Kirsher 
2921d9fb9f38SJeff Kirsher 	/*
2922d9fb9f38SJeff Kirsher 	 * If we're ignoring the PHY then autoneg and the internal
2923d9fb9f38SJeff Kirsher 	 * transceiver are really not going to work so don't let the
2924d9fb9f38SJeff Kirsher 	 * user select them.
2925d9fb9f38SJeff Kirsher 	 */
2926d9fb9f38SJeff Kirsher 	if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE ||
2927d9fb9f38SJeff Kirsher 			       ecmd->port == PORT_TP))
2928d9fb9f38SJeff Kirsher 		return -EINVAL;
2929d9fb9f38SJeff Kirsher 
2930d9fb9f38SJeff Kirsher 	/*
2931d9fb9f38SJeff Kirsher 	 * maxtxpkt, maxrxpkt: ignored for now.
2932d9fb9f38SJeff Kirsher 	 *
2933d9fb9f38SJeff Kirsher 	 * transceiver:
2934d9fb9f38SJeff Kirsher 	 * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
2935d9fb9f38SJeff Kirsher 	 * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
2936d9fb9f38SJeff Kirsher 	 * selects based on ecmd->port.
2937d9fb9f38SJeff Kirsher 	 *
2938d9fb9f38SJeff Kirsher 	 * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
2939d9fb9f38SJeff Kirsher 	 * phys that are connected to the mii bus. It's used to apply fibre
2940d9fb9f38SJeff Kirsher 	 * specific updates.
2941d9fb9f38SJeff Kirsher 	 */
2942d9fb9f38SJeff Kirsher 
2943d9fb9f38SJeff Kirsher 	/* WHEW! now lets bang some bits */
2944d9fb9f38SJeff Kirsher 
2945d9fb9f38SJeff Kirsher 	/* save the parms */
2946d9fb9f38SJeff Kirsher 	dev->if_port          = ecmd->port;
2947d9fb9f38SJeff Kirsher 	np->autoneg           = ecmd->autoneg;
2948d9fb9f38SJeff Kirsher 	np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
2949d9fb9f38SJeff Kirsher 	if (np->autoneg == AUTONEG_ENABLE) {
2950d9fb9f38SJeff Kirsher 		/* advertise only what has been requested */
2951d9fb9f38SJeff Kirsher 		np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2952d9fb9f38SJeff Kirsher 		if (ecmd->advertising & ADVERTISED_10baseT_Half)
2953d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_10HALF;
2954d9fb9f38SJeff Kirsher 		if (ecmd->advertising & ADVERTISED_10baseT_Full)
2955d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_10FULL;
2956d9fb9f38SJeff Kirsher 		if (ecmd->advertising & ADVERTISED_100baseT_Half)
2957d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_100HALF;
2958d9fb9f38SJeff Kirsher 		if (ecmd->advertising & ADVERTISED_100baseT_Full)
2959d9fb9f38SJeff Kirsher 			np->advertising |= ADVERTISE_100FULL;
2960d9fb9f38SJeff Kirsher 	} else {
2961d9fb9f38SJeff Kirsher 		np->speed  = ethtool_cmd_speed(ecmd);
2962d9fb9f38SJeff Kirsher 		np->duplex = ecmd->duplex;
2963d9fb9f38SJeff Kirsher 		/* user overriding the initial full duplex parm? */
2964d9fb9f38SJeff Kirsher 		if (np->duplex == DUPLEX_HALF)
2965d9fb9f38SJeff Kirsher 			np->full_duplex = 0;
2966d9fb9f38SJeff Kirsher 	}
2967d9fb9f38SJeff Kirsher 
2968d9fb9f38SJeff Kirsher 	/* get the right phy enabled */
2969d9fb9f38SJeff Kirsher 	if (ecmd->port == PORT_TP)
2970d9fb9f38SJeff Kirsher 		switch_port_internal(dev);
2971d9fb9f38SJeff Kirsher 	else
2972d9fb9f38SJeff Kirsher 		switch_port_external(dev);
2973d9fb9f38SJeff Kirsher 
2974d9fb9f38SJeff Kirsher 	/* set parms and see how this affected our link status */
2975d9fb9f38SJeff Kirsher 	init_phy_fixup(dev);
2976d9fb9f38SJeff Kirsher 	check_link(dev);
2977d9fb9f38SJeff Kirsher 	return 0;
2978d9fb9f38SJeff Kirsher }
2979d9fb9f38SJeff Kirsher 
2980d9fb9f38SJeff Kirsher static int netdev_get_regs(struct net_device *dev, u8 *buf)
2981d9fb9f38SJeff Kirsher {
2982d9fb9f38SJeff Kirsher 	int i;
2983d9fb9f38SJeff Kirsher 	int j;
2984d9fb9f38SJeff Kirsher 	u32 rfcr;
2985d9fb9f38SJeff Kirsher 	u32 *rbuf = (u32 *)buf;
2986d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
2987d9fb9f38SJeff Kirsher 
2988d9fb9f38SJeff Kirsher 	/* read non-mii page 0 of registers */
2989d9fb9f38SJeff Kirsher 	for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
2990d9fb9f38SJeff Kirsher 		rbuf[i] = readl(ioaddr + i*4);
2991d9fb9f38SJeff Kirsher 	}
2992d9fb9f38SJeff Kirsher 
2993d9fb9f38SJeff Kirsher 	/* read current mii registers */
2994d9fb9f38SJeff Kirsher 	for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
2995d9fb9f38SJeff Kirsher 		rbuf[i] = mdio_read(dev, i & 0x1f);
2996d9fb9f38SJeff Kirsher 
2997d9fb9f38SJeff Kirsher 	/* read only the 'magic' registers from page 1 */
2998d9fb9f38SJeff Kirsher 	writew(1, ioaddr + PGSEL);
2999d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + PMDCSR);
3000d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + TSTDAT);
3001d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + DSPCFG);
3002d9fb9f38SJeff Kirsher 	rbuf[i++] = readw(ioaddr + SDCFG);
3003d9fb9f38SJeff Kirsher 	writew(0, ioaddr + PGSEL);
3004d9fb9f38SJeff Kirsher 
3005d9fb9f38SJeff Kirsher 	/* read RFCR indexed registers */
3006d9fb9f38SJeff Kirsher 	rfcr = readl(ioaddr + RxFilterAddr);
3007d9fb9f38SJeff Kirsher 	for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3008d9fb9f38SJeff Kirsher 		writel(j*2, ioaddr + RxFilterAddr);
3009d9fb9f38SJeff Kirsher 		rbuf[i++] = readw(ioaddr + RxFilterData);
3010d9fb9f38SJeff Kirsher 	}
3011d9fb9f38SJeff Kirsher 	writel(rfcr, ioaddr + RxFilterAddr);
3012d9fb9f38SJeff Kirsher 
3013d9fb9f38SJeff Kirsher 	/* the interrupt status is clear-on-read - see if we missed any */
3014d9fb9f38SJeff Kirsher 	if (rbuf[4] & rbuf[5]) {
3015d9fb9f38SJeff Kirsher 		printk(KERN_WARNING
3016d9fb9f38SJeff Kirsher 			"%s: shoot, we dropped an interrupt (%#08x)\n",
3017d9fb9f38SJeff Kirsher 			dev->name, rbuf[4] & rbuf[5]);
3018d9fb9f38SJeff Kirsher 	}
3019d9fb9f38SJeff Kirsher 
3020d9fb9f38SJeff Kirsher 	return 0;
3021d9fb9f38SJeff Kirsher }
3022d9fb9f38SJeff Kirsher 
3023d9fb9f38SJeff Kirsher #define SWAP_BITS(x)	( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3024d9fb9f38SJeff Kirsher 			| (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9)  \
3025d9fb9f38SJeff Kirsher 			| (((x) & 0x0010) << 7)  | (((x) & 0x0020) << 5)  \
3026d9fb9f38SJeff Kirsher 			| (((x) & 0x0040) << 3)  | (((x) & 0x0080) << 1)  \
3027d9fb9f38SJeff Kirsher 			| (((x) & 0x0100) >> 1)  | (((x) & 0x0200) >> 3)  \
3028d9fb9f38SJeff Kirsher 			| (((x) & 0x0400) >> 5)  | (((x) & 0x0800) >> 7)  \
3029d9fb9f38SJeff Kirsher 			| (((x) & 0x1000) >> 9)  | (((x) & 0x2000) >> 11) \
3030d9fb9f38SJeff Kirsher 			| (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3031d9fb9f38SJeff Kirsher 
3032d9fb9f38SJeff Kirsher static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3033d9fb9f38SJeff Kirsher {
3034d9fb9f38SJeff Kirsher 	int i;
3035d9fb9f38SJeff Kirsher 	u16 *ebuf = (u16 *)buf;
3036d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3037d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3038d9fb9f38SJeff Kirsher 
3039d9fb9f38SJeff Kirsher 	/* eeprom_read reads 16 bits, and indexes by 16 bits */
3040d9fb9f38SJeff Kirsher 	for (i = 0; i < np->eeprom_size/2; i++) {
3041d9fb9f38SJeff Kirsher 		ebuf[i] = eeprom_read(ioaddr, i);
3042d9fb9f38SJeff Kirsher 		/* The EEPROM itself stores data bit-swapped, but eeprom_read
3043d9fb9f38SJeff Kirsher 		 * reads it back "sanely". So we swap it back here in order to
3044d9fb9f38SJeff Kirsher 		 * present it to userland as it is stored. */
3045d9fb9f38SJeff Kirsher 		ebuf[i] = SWAP_BITS(ebuf[i]);
3046d9fb9f38SJeff Kirsher 	}
3047d9fb9f38SJeff Kirsher 	return 0;
3048d9fb9f38SJeff Kirsher }
3049d9fb9f38SJeff Kirsher 
3050d9fb9f38SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3051d9fb9f38SJeff Kirsher {
3052d9fb9f38SJeff Kirsher 	struct mii_ioctl_data *data = if_mii(rq);
3053d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3054d9fb9f38SJeff Kirsher 
3055d9fb9f38SJeff Kirsher 	switch(cmd) {
3056d9fb9f38SJeff Kirsher 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
3057d9fb9f38SJeff Kirsher 		data->phy_id = np->phy_addr_external;
3058d9fb9f38SJeff Kirsher 		/* Fall Through */
3059d9fb9f38SJeff Kirsher 
3060d9fb9f38SJeff Kirsher 	case SIOCGMIIREG:		/* Read MII PHY register. */
3061d9fb9f38SJeff Kirsher 		/* The phy_id is not enough to uniquely identify
3062d9fb9f38SJeff Kirsher 		 * the intended target. Therefore the command is sent to
3063d9fb9f38SJeff Kirsher 		 * the given mii on the current port.
3064d9fb9f38SJeff Kirsher 		 */
3065d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_TP) {
3066d9fb9f38SJeff Kirsher 			if ((data->phy_id & 0x1f) == np->phy_addr_external)
3067d9fb9f38SJeff Kirsher 				data->val_out = mdio_read(dev,
3068d9fb9f38SJeff Kirsher 							data->reg_num & 0x1f);
3069d9fb9f38SJeff Kirsher 			else
3070d9fb9f38SJeff Kirsher 				data->val_out = 0;
3071d9fb9f38SJeff Kirsher 		} else {
3072d9fb9f38SJeff Kirsher 			move_int_phy(dev, data->phy_id & 0x1f);
3073d9fb9f38SJeff Kirsher 			data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3074d9fb9f38SJeff Kirsher 							data->reg_num & 0x1f);
3075d9fb9f38SJeff Kirsher 		}
3076d9fb9f38SJeff Kirsher 		return 0;
3077d9fb9f38SJeff Kirsher 
3078d9fb9f38SJeff Kirsher 	case SIOCSMIIREG:		/* Write MII PHY register. */
3079d9fb9f38SJeff Kirsher 		if (dev->if_port == PORT_TP) {
3080d9fb9f38SJeff Kirsher 			if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3081d9fb9f38SJeff Kirsher  				if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3082d9fb9f38SJeff Kirsher 					np->advertising = data->val_in;
3083d9fb9f38SJeff Kirsher 				mdio_write(dev, data->reg_num & 0x1f,
3084d9fb9f38SJeff Kirsher 							data->val_in);
3085d9fb9f38SJeff Kirsher 			}
3086d9fb9f38SJeff Kirsher 		} else {
3087d9fb9f38SJeff Kirsher 			if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3088d9fb9f38SJeff Kirsher  				if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3089d9fb9f38SJeff Kirsher 					np->advertising = data->val_in;
3090d9fb9f38SJeff Kirsher 			}
3091d9fb9f38SJeff Kirsher 			move_int_phy(dev, data->phy_id & 0x1f);
3092d9fb9f38SJeff Kirsher 			miiport_write(dev, data->phy_id & 0x1f,
3093d9fb9f38SJeff Kirsher 						data->reg_num & 0x1f,
3094d9fb9f38SJeff Kirsher 						data->val_in);
3095d9fb9f38SJeff Kirsher 		}
3096d9fb9f38SJeff Kirsher 		return 0;
3097d9fb9f38SJeff Kirsher 	default:
3098d9fb9f38SJeff Kirsher 		return -EOPNOTSUPP;
3099d9fb9f38SJeff Kirsher 	}
3100d9fb9f38SJeff Kirsher }
3101d9fb9f38SJeff Kirsher 
3102d9fb9f38SJeff Kirsher static void enable_wol_mode(struct net_device *dev, int enable_intr)
3103d9fb9f38SJeff Kirsher {
3104d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3105d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3106d9fb9f38SJeff Kirsher 
3107d9fb9f38SJeff Kirsher 	if (netif_msg_wol(np))
3108d9fb9f38SJeff Kirsher 		printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3109d9fb9f38SJeff Kirsher 			dev->name);
3110d9fb9f38SJeff Kirsher 
3111d9fb9f38SJeff Kirsher 	/* For WOL we must restart the rx process in silent mode.
3112d9fb9f38SJeff Kirsher 	 * Write NULL to the RxRingPtr. Only possible if
3113d9fb9f38SJeff Kirsher 	 * rx process is stopped
3114d9fb9f38SJeff Kirsher 	 */
3115d9fb9f38SJeff Kirsher 	writel(0, ioaddr + RxRingPtr);
3116d9fb9f38SJeff Kirsher 
3117d9fb9f38SJeff Kirsher 	/* read WoL status to clear */
3118d9fb9f38SJeff Kirsher 	readl(ioaddr + WOLCmd);
3119d9fb9f38SJeff Kirsher 
3120d9fb9f38SJeff Kirsher 	/* PME on, clear status */
3121d9fb9f38SJeff Kirsher 	writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3122d9fb9f38SJeff Kirsher 
3123d9fb9f38SJeff Kirsher 	/* and restart the rx process */
3124d9fb9f38SJeff Kirsher 	writel(RxOn, ioaddr + ChipCmd);
3125d9fb9f38SJeff Kirsher 
3126d9fb9f38SJeff Kirsher 	if (enable_intr) {
3127d9fb9f38SJeff Kirsher 		/* enable the WOL interrupt.
3128d9fb9f38SJeff Kirsher 		 * Could be used to send a netlink message.
3129d9fb9f38SJeff Kirsher 		 */
3130d9fb9f38SJeff Kirsher 		writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3131d9fb9f38SJeff Kirsher 		natsemi_irq_enable(dev);
3132d9fb9f38SJeff Kirsher 	}
3133d9fb9f38SJeff Kirsher }
3134d9fb9f38SJeff Kirsher 
3135d9fb9f38SJeff Kirsher static int netdev_close(struct net_device *dev)
3136d9fb9f38SJeff Kirsher {
3137d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3138d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3139d710ce13SFrancois Romieu 	const int irq = np->pci_dev->irq;
3140d9fb9f38SJeff Kirsher 
3141d9fb9f38SJeff Kirsher 	if (netif_msg_ifdown(np))
3142d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG
3143d9fb9f38SJeff Kirsher 			"%s: Shutting down ethercard, status was %#04x.\n",
3144d9fb9f38SJeff Kirsher 			dev->name, (int)readl(ioaddr + ChipCmd));
3145d9fb9f38SJeff Kirsher 	if (netif_msg_pktdata(np))
3146d9fb9f38SJeff Kirsher 		printk(KERN_DEBUG
3147d9fb9f38SJeff Kirsher 			"%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
3148d9fb9f38SJeff Kirsher 			dev->name, np->cur_tx, np->dirty_tx,
3149d9fb9f38SJeff Kirsher 			np->cur_rx, np->dirty_rx);
3150d9fb9f38SJeff Kirsher 
3151d9fb9f38SJeff Kirsher 	napi_disable(&np->napi);
3152d9fb9f38SJeff Kirsher 
3153d9fb9f38SJeff Kirsher 	/*
3154d9fb9f38SJeff Kirsher 	 * FIXME: what if someone tries to close a device
3155d9fb9f38SJeff Kirsher 	 * that is suspended?
3156d9fb9f38SJeff Kirsher 	 * Should we reenable the nic to switch to
3157d9fb9f38SJeff Kirsher 	 * the final WOL settings?
3158d9fb9f38SJeff Kirsher 	 */
3159d9fb9f38SJeff Kirsher 
3160d9fb9f38SJeff Kirsher 	del_timer_sync(&np->timer);
3161d710ce13SFrancois Romieu 	disable_irq(irq);
3162d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
3163d9fb9f38SJeff Kirsher 	natsemi_irq_disable(dev);
3164d9fb9f38SJeff Kirsher 	np->hands_off = 1;
3165d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
3166d710ce13SFrancois Romieu 	enable_irq(irq);
3167d9fb9f38SJeff Kirsher 
3168d710ce13SFrancois Romieu 	free_irq(irq, dev);
3169d9fb9f38SJeff Kirsher 
3170d9fb9f38SJeff Kirsher 	/* Interrupt disabled, interrupt handler released,
3171d9fb9f38SJeff Kirsher 	 * queue stopped, timer deleted, rtnl_lock held
3172d9fb9f38SJeff Kirsher 	 * All async codepaths that access the driver are disabled.
3173d9fb9f38SJeff Kirsher 	 */
3174d9fb9f38SJeff Kirsher 	spin_lock_irq(&np->lock);
3175d9fb9f38SJeff Kirsher 	np->hands_off = 0;
3176d9fb9f38SJeff Kirsher 	readl(ioaddr + IntrMask);
3177d9fb9f38SJeff Kirsher 	readw(ioaddr + MIntrStatus);
3178d9fb9f38SJeff Kirsher 
3179d9fb9f38SJeff Kirsher 	/* Freeze Stats */
3180d9fb9f38SJeff Kirsher 	writel(StatsFreeze, ioaddr + StatsCtrl);
3181d9fb9f38SJeff Kirsher 
3182d9fb9f38SJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
3183d9fb9f38SJeff Kirsher 	natsemi_stop_rxtx(dev);
3184d9fb9f38SJeff Kirsher 
3185d9fb9f38SJeff Kirsher 	__get_stats(dev);
3186d9fb9f38SJeff Kirsher 	spin_unlock_irq(&np->lock);
3187d9fb9f38SJeff Kirsher 
3188d9fb9f38SJeff Kirsher 	/* clear the carrier last - an interrupt could reenable it otherwise */
3189d9fb9f38SJeff Kirsher 	netif_carrier_off(dev);
3190d9fb9f38SJeff Kirsher 	netif_stop_queue(dev);
3191d9fb9f38SJeff Kirsher 
3192d9fb9f38SJeff Kirsher 	dump_ring(dev);
3193d9fb9f38SJeff Kirsher 	drain_ring(dev);
3194d9fb9f38SJeff Kirsher 	free_ring(dev);
3195d9fb9f38SJeff Kirsher 
3196d9fb9f38SJeff Kirsher 	{
3197d9fb9f38SJeff Kirsher 		u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3198d9fb9f38SJeff Kirsher 		if (wol) {
3199d9fb9f38SJeff Kirsher 			/* restart the NIC in WOL mode.
3200d9fb9f38SJeff Kirsher 			 * The nic must be stopped for this.
3201d9fb9f38SJeff Kirsher 			 */
3202d9fb9f38SJeff Kirsher 			enable_wol_mode(dev, 0);
3203d9fb9f38SJeff Kirsher 		} else {
3204d9fb9f38SJeff Kirsher 			/* Restore PME enable bit unmolested */
3205d9fb9f38SJeff Kirsher 			writel(np->SavedClkRun, ioaddr + ClkRun);
3206d9fb9f38SJeff Kirsher 		}
3207d9fb9f38SJeff Kirsher 	}
3208d9fb9f38SJeff Kirsher 	return 0;
3209d9fb9f38SJeff Kirsher }
3210d9fb9f38SJeff Kirsher 
3211d9fb9f38SJeff Kirsher 
32126980cbe4SBill Pemberton static void natsemi_remove1(struct pci_dev *pdev)
3213d9fb9f38SJeff Kirsher {
3214d9fb9f38SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
3215d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3216d9fb9f38SJeff Kirsher 
3217d9fb9f38SJeff Kirsher 	NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
3218d9fb9f38SJeff Kirsher 	unregister_netdev (dev);
3219d9fb9f38SJeff Kirsher 	pci_release_regions (pdev);
3220d9fb9f38SJeff Kirsher 	iounmap(ioaddr);
3221d9fb9f38SJeff Kirsher 	free_netdev (dev);
3222d9fb9f38SJeff Kirsher }
3223d9fb9f38SJeff Kirsher 
3224d9fb9f38SJeff Kirsher #ifdef CONFIG_PM
3225d9fb9f38SJeff Kirsher 
3226d9fb9f38SJeff Kirsher /*
3227d9fb9f38SJeff Kirsher  * The ns83815 chip doesn't have explicit RxStop bits.
3228d9fb9f38SJeff Kirsher  * Kicking the Rx or Tx process for a new packet reenables the Rx process
3229d9fb9f38SJeff Kirsher  * of the nic, thus this function must be very careful:
3230d9fb9f38SJeff Kirsher  *
3231d9fb9f38SJeff Kirsher  * suspend/resume synchronization:
3232d9fb9f38SJeff Kirsher  * entry points:
3233d9fb9f38SJeff Kirsher  *   netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
3234d9fb9f38SJeff Kirsher  *   start_tx, ns_tx_timeout
3235d9fb9f38SJeff Kirsher  *
3236d9fb9f38SJeff Kirsher  * No function accesses the hardware without checking np->hands_off.
3237d9fb9f38SJeff Kirsher  *	the check occurs under spin_lock_irq(&np->lock);
3238d9fb9f38SJeff Kirsher  * exceptions:
3239d9fb9f38SJeff Kirsher  *	* netdev_ioctl: noncritical access.
3240d9fb9f38SJeff Kirsher  *	* netdev_open: cannot happen due to the device_detach
3241d9fb9f38SJeff Kirsher  *	* netdev_close: doesn't hurt.
3242d9fb9f38SJeff Kirsher  *	* netdev_timer: timer stopped by natsemi_suspend.
3243d9fb9f38SJeff Kirsher  *	* intr_handler: doesn't acquire the spinlock. suspend calls
3244d9fb9f38SJeff Kirsher  *		disable_irq() to enforce synchronization.
3245d9fb9f38SJeff Kirsher  *      * natsemi_poll: checks before reenabling interrupts.  suspend
3246d9fb9f38SJeff Kirsher  *              sets hands_off, disables interrupts and then waits with
3247d9fb9f38SJeff Kirsher  *              napi_disable().
3248d9fb9f38SJeff Kirsher  *
3249d9fb9f38SJeff Kirsher  * Interrupts must be disabled, otherwise hands_off can cause irq storms.
3250d9fb9f38SJeff Kirsher  */
3251d9fb9f38SJeff Kirsher 
3252d9fb9f38SJeff Kirsher static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3253d9fb9f38SJeff Kirsher {
3254d9fb9f38SJeff Kirsher 	struct net_device *dev = pci_get_drvdata (pdev);
3255d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3256d9fb9f38SJeff Kirsher 	void __iomem * ioaddr = ns_ioaddr(dev);
3257d9fb9f38SJeff Kirsher 
3258d9fb9f38SJeff Kirsher 	rtnl_lock();
3259d9fb9f38SJeff Kirsher 	if (netif_running (dev)) {
3260d710ce13SFrancois Romieu 		const int irq = np->pci_dev->irq;
3261d710ce13SFrancois Romieu 
3262d9fb9f38SJeff Kirsher 		del_timer_sync(&np->timer);
3263d9fb9f38SJeff Kirsher 
3264d710ce13SFrancois Romieu 		disable_irq(irq);
3265d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
3266d9fb9f38SJeff Kirsher 
3267d9fb9f38SJeff Kirsher 		natsemi_irq_disable(dev);
3268d9fb9f38SJeff Kirsher 		np->hands_off = 1;
3269d9fb9f38SJeff Kirsher 		natsemi_stop_rxtx(dev);
3270d9fb9f38SJeff Kirsher 		netif_stop_queue(dev);
3271d9fb9f38SJeff Kirsher 
3272d9fb9f38SJeff Kirsher 		spin_unlock_irq(&np->lock);
3273d710ce13SFrancois Romieu 		enable_irq(irq);
3274d9fb9f38SJeff Kirsher 
3275d9fb9f38SJeff Kirsher 		napi_disable(&np->napi);
3276d9fb9f38SJeff Kirsher 
3277d9fb9f38SJeff Kirsher 		/* Update the error counts. */
3278d9fb9f38SJeff Kirsher 		__get_stats(dev);
3279d9fb9f38SJeff Kirsher 
3280d9fb9f38SJeff Kirsher 		/* pci_power_off(pdev, -1); */
3281d9fb9f38SJeff Kirsher 		drain_ring(dev);
3282d9fb9f38SJeff Kirsher 		{
3283d9fb9f38SJeff Kirsher 			u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3284d9fb9f38SJeff Kirsher 			/* Restore PME enable bit */
3285d9fb9f38SJeff Kirsher 			if (wol) {
3286d9fb9f38SJeff Kirsher 				/* restart the NIC in WOL mode.
3287d9fb9f38SJeff Kirsher 				 * The nic must be stopped for this.
3288d9fb9f38SJeff Kirsher 				 * FIXME: use the WOL interrupt
3289d9fb9f38SJeff Kirsher 				 */
3290d9fb9f38SJeff Kirsher 				enable_wol_mode(dev, 0);
3291d9fb9f38SJeff Kirsher 			} else {
3292d9fb9f38SJeff Kirsher 				/* Restore PME enable bit unmolested */
3293d9fb9f38SJeff Kirsher 				writel(np->SavedClkRun, ioaddr + ClkRun);
3294d9fb9f38SJeff Kirsher 			}
3295d9fb9f38SJeff Kirsher 		}
3296d9fb9f38SJeff Kirsher 	}
3297d9fb9f38SJeff Kirsher 	netif_device_detach(dev);
3298d9fb9f38SJeff Kirsher 	rtnl_unlock();
3299d9fb9f38SJeff Kirsher 	return 0;
3300d9fb9f38SJeff Kirsher }
3301d9fb9f38SJeff Kirsher 
3302d9fb9f38SJeff Kirsher 
3303d9fb9f38SJeff Kirsher static int natsemi_resume (struct pci_dev *pdev)
3304d9fb9f38SJeff Kirsher {
3305d9fb9f38SJeff Kirsher 	struct net_device *dev = pci_get_drvdata (pdev);
3306d9fb9f38SJeff Kirsher 	struct netdev_private *np = netdev_priv(dev);
3307d9fb9f38SJeff Kirsher 	int ret = 0;
3308d9fb9f38SJeff Kirsher 
3309d9fb9f38SJeff Kirsher 	rtnl_lock();
3310d9fb9f38SJeff Kirsher 	if (netif_device_present(dev))
3311d9fb9f38SJeff Kirsher 		goto out;
3312d9fb9f38SJeff Kirsher 	if (netif_running(dev)) {
3313d710ce13SFrancois Romieu 		const int irq = np->pci_dev->irq;
3314d710ce13SFrancois Romieu 
3315d9fb9f38SJeff Kirsher 		BUG_ON(!np->hands_off);
3316d9fb9f38SJeff Kirsher 		ret = pci_enable_device(pdev);
3317d9fb9f38SJeff Kirsher 		if (ret < 0) {
3318d9fb9f38SJeff Kirsher 			dev_err(&pdev->dev,
3319d9fb9f38SJeff Kirsher 				"pci_enable_device() failed: %d\n", ret);
3320d9fb9f38SJeff Kirsher 			goto out;
3321d9fb9f38SJeff Kirsher 		}
3322d9fb9f38SJeff Kirsher 	/*	pci_power_on(pdev); */
3323d9fb9f38SJeff Kirsher 
3324d9fb9f38SJeff Kirsher 		napi_enable(&np->napi);
3325d9fb9f38SJeff Kirsher 
3326d9fb9f38SJeff Kirsher 		natsemi_reset(dev);
3327d9fb9f38SJeff Kirsher 		init_ring(dev);
3328d710ce13SFrancois Romieu 		disable_irq(irq);
3329d9fb9f38SJeff Kirsher 		spin_lock_irq(&np->lock);
3330d9fb9f38SJeff Kirsher 		np->hands_off = 0;
3331d9fb9f38SJeff Kirsher 		init_registers(dev);
3332d9fb9f38SJeff Kirsher 		netif_device_attach(dev);
3333d9fb9f38SJeff Kirsher 		spin_unlock_irq(&np->lock);
3334d710ce13SFrancois Romieu 		enable_irq(irq);
3335d9fb9f38SJeff Kirsher 
3336d9fb9f38SJeff Kirsher 		mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3337d9fb9f38SJeff Kirsher 	}
3338d9fb9f38SJeff Kirsher 	netif_device_attach(dev);
3339d9fb9f38SJeff Kirsher out:
3340d9fb9f38SJeff Kirsher 	rtnl_unlock();
3341d9fb9f38SJeff Kirsher 	return ret;
3342d9fb9f38SJeff Kirsher }
3343d9fb9f38SJeff Kirsher 
3344d9fb9f38SJeff Kirsher #endif /* CONFIG_PM */
3345d9fb9f38SJeff Kirsher 
3346d9fb9f38SJeff Kirsher static struct pci_driver natsemi_driver = {
3347d9fb9f38SJeff Kirsher 	.name		= DRV_NAME,
3348d9fb9f38SJeff Kirsher 	.id_table	= natsemi_pci_tbl,
3349d9fb9f38SJeff Kirsher 	.probe		= natsemi_probe1,
33506980cbe4SBill Pemberton 	.remove		= natsemi_remove1,
3351d9fb9f38SJeff Kirsher #ifdef CONFIG_PM
3352d9fb9f38SJeff Kirsher 	.suspend	= natsemi_suspend,
3353d9fb9f38SJeff Kirsher 	.resume		= natsemi_resume,
3354d9fb9f38SJeff Kirsher #endif
3355d9fb9f38SJeff Kirsher };
3356d9fb9f38SJeff Kirsher 
3357d9fb9f38SJeff Kirsher static int __init natsemi_init_mod (void)
3358d9fb9f38SJeff Kirsher {
3359d9fb9f38SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
3360d9fb9f38SJeff Kirsher #ifdef MODULE
3361d9fb9f38SJeff Kirsher 	printk(version);
3362d9fb9f38SJeff Kirsher #endif
3363d9fb9f38SJeff Kirsher 
3364d9fb9f38SJeff Kirsher 	return pci_register_driver(&natsemi_driver);
3365d9fb9f38SJeff Kirsher }
3366d9fb9f38SJeff Kirsher 
3367d9fb9f38SJeff Kirsher static void __exit natsemi_exit_mod (void)
3368d9fb9f38SJeff Kirsher {
3369d9fb9f38SJeff Kirsher 	pci_unregister_driver (&natsemi_driver);
3370d9fb9f38SJeff Kirsher }
3371d9fb9f38SJeff Kirsher 
3372d9fb9f38SJeff Kirsher module_init(natsemi_init_mod);
3373d9fb9f38SJeff Kirsher module_exit(natsemi_exit_mod);
3374d9fb9f38SJeff Kirsher 
3375