xref: /openbmc/linux/drivers/net/ethernet/via/via-rhine.c (revision 269f3114b53a3ce93eb5977852ac2624a380f600)
1f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2f2148a47SJeff Kirsher /*
3f2148a47SJeff Kirsher 	Written 1998-2001 by Donald Becker.
4f2148a47SJeff Kirsher 
5f2148a47SJeff Kirsher 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6f2148a47SJeff Kirsher 
7f2148a47SJeff Kirsher 	This software may be used and distributed according to the terms of
8f2148a47SJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
9f2148a47SJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
10f2148a47SJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
11f2148a47SJeff Kirsher 	a complete program and may only be used when the entire operating
12f2148a47SJeff Kirsher 	system is licensed under the GPL.
13f2148a47SJeff Kirsher 
14f2148a47SJeff Kirsher 	This driver is designed for the VIA VT86C100A Rhine-I.
15f2148a47SJeff Kirsher 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16f2148a47SJeff Kirsher 	and management NIC 6105M).
17f2148a47SJeff Kirsher 
18f2148a47SJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
19f2148a47SJeff Kirsher 	Scyld Computing Corporation
20f2148a47SJeff Kirsher 	410 Severn Ave., Suite 210
21f2148a47SJeff Kirsher 	Annapolis MD 21403
22f2148a47SJeff Kirsher 
23f2148a47SJeff Kirsher 
24f2148a47SJeff Kirsher 	This driver contains some changes from the original Donald Becker
25f2148a47SJeff Kirsher 	version. He may or may not be interested in bug reports on this
26f2148a47SJeff Kirsher 	code. You can find his versions at:
27f2148a47SJeff Kirsher 	http://www.scyld.com/network/via-rhine.html
28f2148a47SJeff Kirsher 	[link no longer provides useful info -jgarzik]
29f2148a47SJeff Kirsher 
30f2148a47SJeff Kirsher */
31f2148a47SJeff Kirsher 
32f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33f2148a47SJeff Kirsher 
34f2148a47SJeff Kirsher #define DRV_NAME	"via-rhine"
35f2148a47SJeff Kirsher #define DRV_VERSION	"1.5.0"
36f2148a47SJeff Kirsher #define DRV_RELDATE	"2010-10-09"
37f2148a47SJeff Kirsher 
38eb939922SRusty Russell #include <linux/types.h>
39f2148a47SJeff Kirsher 
40f2148a47SJeff Kirsher /* A few user-configurable values.
41f2148a47SJeff Kirsher    These may be modified when a driver module is loaded. */
42f2148a47SJeff Kirsher 
43f2148a47SJeff Kirsher #define DEBUG
44f2148a47SJeff Kirsher static int debug = 1;	/* 1 normal messages, 0 quiet .. 7 verbose. */
45f2148a47SJeff Kirsher static int max_interrupt_work = 20;
46f2148a47SJeff Kirsher 
47f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
48f2148a47SJeff Kirsher    Setting to > 1518 effectively disables this feature. */
49f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
50f2148a47SJeff Kirsher 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
51f2148a47SJeff Kirsher 	defined(__sh__) || defined(__mips__)
52f2148a47SJeff Kirsher static int rx_copybreak = 1518;
53f2148a47SJeff Kirsher #else
54f2148a47SJeff Kirsher static int rx_copybreak;
55f2148a47SJeff Kirsher #endif
56f2148a47SJeff Kirsher 
57f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of
58f2148a47SJeff Kirsher    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
59eb939922SRusty Russell static bool avoid_D3;
60f2148a47SJeff Kirsher 
61f2148a47SJeff Kirsher /*
62f2148a47SJeff Kirsher  * In case you are looking for 'options[]' or 'full_duplex[]', they
63f2148a47SJeff Kirsher  * are gone. Use ethtool(8) instead.
64f2148a47SJeff Kirsher  */
65f2148a47SJeff Kirsher 
66f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
67f2148a47SJeff Kirsher    The Rhine has a 64 element 8390-like hash table. */
68f2148a47SJeff Kirsher static const int multicast_filter_limit = 32;
69f2148a47SJeff Kirsher 
70f2148a47SJeff Kirsher 
71f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */
72f2148a47SJeff Kirsher 
73f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
74f2148a47SJeff Kirsher    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
75f2148a47SJeff Kirsher    Making the Tx ring too large decreases the effectiveness of channel
76f2148a47SJeff Kirsher    bonding and packet priority.
77f2148a47SJeff Kirsher    There are no ill effects from too-large receive rings. */
78f2148a47SJeff Kirsher #define TX_RING_SIZE	16
79f2148a47SJeff Kirsher #define TX_QUEUE_LEN	10	/* Limit ring entries actually used. */
80f2148a47SJeff Kirsher #define RX_RING_SIZE	64
81f2148a47SJeff Kirsher 
82f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */
83f2148a47SJeff Kirsher 
84f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
85f2148a47SJeff Kirsher #define TX_TIMEOUT	(2*HZ)
86f2148a47SJeff Kirsher 
87f2148a47SJeff Kirsher #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
88f2148a47SJeff Kirsher 
89f2148a47SJeff Kirsher #include <linux/module.h>
90f2148a47SJeff Kirsher #include <linux/moduleparam.h>
91f2148a47SJeff Kirsher #include <linux/kernel.h>
92f2148a47SJeff Kirsher #include <linux/string.h>
93f2148a47SJeff Kirsher #include <linux/timer.h>
94f2148a47SJeff Kirsher #include <linux/errno.h>
95f2148a47SJeff Kirsher #include <linux/ioport.h>
96f2148a47SJeff Kirsher #include <linux/interrupt.h>
97f2148a47SJeff Kirsher #include <linux/pci.h>
98f2148a47SJeff Kirsher #include <linux/dma-mapping.h>
99f2148a47SJeff Kirsher #include <linux/netdevice.h>
100f2148a47SJeff Kirsher #include <linux/etherdevice.h>
101f2148a47SJeff Kirsher #include <linux/skbuff.h>
102f2148a47SJeff Kirsher #include <linux/init.h>
103f2148a47SJeff Kirsher #include <linux/delay.h>
104f2148a47SJeff Kirsher #include <linux/mii.h>
105f2148a47SJeff Kirsher #include <linux/ethtool.h>
106f2148a47SJeff Kirsher #include <linux/crc32.h>
107f2148a47SJeff Kirsher #include <linux/if_vlan.h>
108f2148a47SJeff Kirsher #include <linux/bitops.h>
109f2148a47SJeff Kirsher #include <linux/workqueue.h>
110f2148a47SJeff Kirsher #include <asm/processor.h>	/* Processor type for cache alignment. */
111f2148a47SJeff Kirsher #include <asm/io.h>
112f2148a47SJeff Kirsher #include <asm/irq.h>
113f2148a47SJeff Kirsher #include <asm/uaccess.h>
114f2148a47SJeff Kirsher #include <linux/dmi.h>
115f2148a47SJeff Kirsher 
116f2148a47SJeff Kirsher /* These identify the driver base version and may not be removed. */
117f2148a47SJeff Kirsher static const char version[] __devinitconst =
118f2148a47SJeff Kirsher 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
119f2148a47SJeff Kirsher 
120f2148a47SJeff Kirsher /* This driver was written to use PCI memory space. Some early versions
121f2148a47SJeff Kirsher    of the Rhine may only work correctly with I/O space accesses. */
122f2148a47SJeff Kirsher #ifdef CONFIG_VIA_RHINE_MMIO
123f2148a47SJeff Kirsher #define USE_MMIO
124f2148a47SJeff Kirsher #else
125f2148a47SJeff Kirsher #endif
126f2148a47SJeff Kirsher 
127f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129f2148a47SJeff Kirsher MODULE_LICENSE("GPL");
130f2148a47SJeff Kirsher 
131f2148a47SJeff Kirsher module_param(max_interrupt_work, int, 0);
132f2148a47SJeff Kirsher module_param(debug, int, 0);
133f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0);
134f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0);
135f2148a47SJeff Kirsher MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136f2148a47SJeff Kirsher MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
138f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
139f2148a47SJeff Kirsher 
140f2148a47SJeff Kirsher #define MCAM_SIZE	32
141f2148a47SJeff Kirsher #define VCAM_SIZE	32
142f2148a47SJeff Kirsher 
143f2148a47SJeff Kirsher /*
144f2148a47SJeff Kirsher 		Theory of Operation
145f2148a47SJeff Kirsher 
146f2148a47SJeff Kirsher I. Board Compatibility
147f2148a47SJeff Kirsher 
148f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
149f2148a47SJeff Kirsher controller.
150f2148a47SJeff Kirsher 
151f2148a47SJeff Kirsher II. Board-specific settings
152f2148a47SJeff Kirsher 
153f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot.
154f2148a47SJeff Kirsher 
155f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at
156f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are
157f2148a47SJeff Kirsher correct.
158f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM
159f2148a47SJeff Kirsher must be configured to enable memory ops.
160f2148a47SJeff Kirsher 
161f2148a47SJeff Kirsher III. Driver operation
162f2148a47SJeff Kirsher 
163f2148a47SJeff Kirsher IIIa. Ring buffers
164f2148a47SJeff Kirsher 
165f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists
166f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
167f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
168f2148a47SJeff Kirsher 
169f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure
170f2148a47SJeff Kirsher 
171f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme.
172f2148a47SJeff Kirsher 
173f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so
174f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers.
175f2148a47SJeff Kirsher 
176f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
177f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data
178f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
179f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
180f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
181f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated
182f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx().
183f2148a47SJeff Kirsher 
184f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
185f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
186f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines
187f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of
188f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never
189f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using
190f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is
191f2148a47SJeff Kirsher most useful with small frames.
192f2148a47SJeff Kirsher 
193f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit
194f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't
195f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers
196f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header.
197f2148a47SJeff Kirsher 
198f2148a47SJeff Kirsher IIId. Synchronization
199f2148a47SJeff Kirsher 
200f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One
201f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
202f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
203f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software.
204f2148a47SJeff Kirsher 
205f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the
206f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
207f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by
208f2148a47SJeff Kirsher calling netif_stop_queue.
209f2148a47SJeff Kirsher 
210f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
211f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as
212f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in
213f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped.
214f2148a47SJeff Kirsher 
215f2148a47SJeff Kirsher IV. Notes
216f2148a47SJeff Kirsher 
217f2148a47SJeff Kirsher IVb. References
218f2148a47SJeff Kirsher 
219f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/
220f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html
221f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html
222f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
223f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
224f2148a47SJeff Kirsher 
225f2148a47SJeff Kirsher 
226f2148a47SJeff Kirsher IVc. Errata
227f2148a47SJeff Kirsher 
228f2148a47SJeff Kirsher The VT86C100A manual is not reliable information.
229f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting
230f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit
231f2148a47SJeff Kirsher and unaligned IP headers on receive.
232f2148a47SJeff Kirsher The chip does not pad to minimum transmit length.
233f2148a47SJeff Kirsher 
234f2148a47SJeff Kirsher */
235f2148a47SJeff Kirsher 
236f2148a47SJeff Kirsher 
237f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all
238f2148a47SJeff Kirsher    of the drivers, and will likely be provided by some future kernel.
239f2148a47SJeff Kirsher    Note the matching code -- the first table entry matchs all 56** cards but
240f2148a47SJeff Kirsher    second only the 1234 card.
241f2148a47SJeff Kirsher */
242f2148a47SJeff Kirsher 
243f2148a47SJeff Kirsher enum rhine_revs {
244f2148a47SJeff Kirsher 	VT86C100A	= 0x00,
245f2148a47SJeff Kirsher 	VTunknown0	= 0x20,
246f2148a47SJeff Kirsher 	VT6102		= 0x40,
247f2148a47SJeff Kirsher 	VT8231		= 0x50,	/* Integrated MAC */
248f2148a47SJeff Kirsher 	VT8233		= 0x60,	/* Integrated MAC */
249f2148a47SJeff Kirsher 	VT8235		= 0x74,	/* Integrated MAC */
250f2148a47SJeff Kirsher 	VT8237		= 0x78,	/* Integrated MAC */
251f2148a47SJeff Kirsher 	VTunknown1	= 0x7C,
252f2148a47SJeff Kirsher 	VT6105		= 0x80,
253f2148a47SJeff Kirsher 	VT6105_B0	= 0x83,
254f2148a47SJeff Kirsher 	VT6105L		= 0x8A,
255f2148a47SJeff Kirsher 	VT6107		= 0x8C,
256f2148a47SJeff Kirsher 	VTunknown2	= 0x8E,
257f2148a47SJeff Kirsher 	VT6105M		= 0x90,	/* Management adapter */
258f2148a47SJeff Kirsher };
259f2148a47SJeff Kirsher 
260f2148a47SJeff Kirsher enum rhine_quirks {
261f2148a47SJeff Kirsher 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
262f2148a47SJeff Kirsher 	rqForceReset	= 0x0002,
263f2148a47SJeff Kirsher 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
264f2148a47SJeff Kirsher 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
265f2148a47SJeff Kirsher 	rqRhineI	= 0x0100,	/* See comment below */
266f2148a47SJeff Kirsher };
267f2148a47SJeff Kirsher /*
268f2148a47SJeff Kirsher  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
269f2148a47SJeff Kirsher  * MMIO as well as for the collision counter and the Tx FIFO underflow
270f2148a47SJeff Kirsher  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
271f2148a47SJeff Kirsher  */
272f2148a47SJeff Kirsher 
273f2148a47SJeff Kirsher /* Beware of PCI posted writes */
274f2148a47SJeff Kirsher #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
275f2148a47SJeff Kirsher 
276f2148a47SJeff Kirsher static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
277f2148a47SJeff Kirsher 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
278f2148a47SJeff Kirsher 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
279f2148a47SJeff Kirsher 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
280f2148a47SJeff Kirsher 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
281f2148a47SJeff Kirsher 	{ }	/* terminate list */
282f2148a47SJeff Kirsher };
283f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
284f2148a47SJeff Kirsher 
285f2148a47SJeff Kirsher 
286f2148a47SJeff Kirsher /* Offsets to the device registers. */
287f2148a47SJeff Kirsher enum register_offsets {
288f2148a47SJeff Kirsher 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
289f2148a47SJeff Kirsher 	ChipCmd1=0x09, TQWake=0x0A,
290f2148a47SJeff Kirsher 	IntrStatus=0x0C, IntrEnable=0x0E,
291f2148a47SJeff Kirsher 	MulticastFilter0=0x10, MulticastFilter1=0x14,
292f2148a47SJeff Kirsher 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
293f2148a47SJeff Kirsher 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
294f2148a47SJeff Kirsher 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
295f2148a47SJeff Kirsher 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
296f2148a47SJeff Kirsher 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
297f2148a47SJeff Kirsher 	StickyHW=0x83, IntrStatus2=0x84,
298f2148a47SJeff Kirsher 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
299f2148a47SJeff Kirsher 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
300f2148a47SJeff Kirsher 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
301f2148a47SJeff Kirsher 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
302f2148a47SJeff Kirsher };
303f2148a47SJeff Kirsher 
304f2148a47SJeff Kirsher /* Bits in ConfigD */
305f2148a47SJeff Kirsher enum backoff_bits {
306f2148a47SJeff Kirsher 	BackOptional=0x01, BackModify=0x02,
307f2148a47SJeff Kirsher 	BackCaptureEffect=0x04, BackRandom=0x08
308f2148a47SJeff Kirsher };
309f2148a47SJeff Kirsher 
310f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */
311f2148a47SJeff Kirsher enum tcr_bits {
312f2148a47SJeff Kirsher 	TCR_PQEN=0x01,
313f2148a47SJeff Kirsher 	TCR_LB0=0x02,		/* loopback[0] */
314f2148a47SJeff Kirsher 	TCR_LB1=0x04,		/* loopback[1] */
315f2148a47SJeff Kirsher 	TCR_OFSET=0x08,
316f2148a47SJeff Kirsher 	TCR_RTGOPT=0x10,
317f2148a47SJeff Kirsher 	TCR_RTFT0=0x20,
318f2148a47SJeff Kirsher 	TCR_RTFT1=0x40,
319f2148a47SJeff Kirsher 	TCR_RTSF=0x80,
320f2148a47SJeff Kirsher };
321f2148a47SJeff Kirsher 
322f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */
323f2148a47SJeff Kirsher enum camcon_bits {
324f2148a47SJeff Kirsher 	CAMC_CAMEN=0x01,
325f2148a47SJeff Kirsher 	CAMC_VCAMSL=0x02,
326f2148a47SJeff Kirsher 	CAMC_CAMWR=0x04,
327f2148a47SJeff Kirsher 	CAMC_CAMRD=0x08,
328f2148a47SJeff Kirsher };
329f2148a47SJeff Kirsher 
330f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */
331f2148a47SJeff Kirsher enum bcr1_bits {
332f2148a47SJeff Kirsher 	BCR1_POT0=0x01,
333f2148a47SJeff Kirsher 	BCR1_POT1=0x02,
334f2148a47SJeff Kirsher 	BCR1_POT2=0x04,
335f2148a47SJeff Kirsher 	BCR1_CTFT0=0x08,
336f2148a47SJeff Kirsher 	BCR1_CTFT1=0x10,
337f2148a47SJeff Kirsher 	BCR1_CTSF=0x20,
338f2148a47SJeff Kirsher 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
339f2148a47SJeff Kirsher 	BCR1_VIDFR=0x80,	/* for VT6105 */
340f2148a47SJeff Kirsher 	BCR1_MED0=0x40,		/* for VT6102 */
341f2148a47SJeff Kirsher 	BCR1_MED1=0x80,		/* for VT6102 */
342f2148a47SJeff Kirsher };
343f2148a47SJeff Kirsher 
344f2148a47SJeff Kirsher #ifdef USE_MMIO
345f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */
346f2148a47SJeff Kirsher static const int mmio_verify_registers[] = {
347f2148a47SJeff Kirsher 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
348f2148a47SJeff Kirsher 	0
349f2148a47SJeff Kirsher };
350f2148a47SJeff Kirsher #endif
351f2148a47SJeff Kirsher 
352f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */
353f2148a47SJeff Kirsher enum intr_status_bits {
354f2148a47SJeff Kirsher 	IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
355f2148a47SJeff Kirsher 	IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
356f2148a47SJeff Kirsher 	IntrPCIErr=0x0040,
357f2148a47SJeff Kirsher 	IntrStatsMax=0x0080, IntrRxEarly=0x0100,
358f2148a47SJeff Kirsher 	IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
359f2148a47SJeff Kirsher 	IntrTxAborted=0x2000, IntrLinkChange=0x4000,
360f2148a47SJeff Kirsher 	IntrRxWakeUp=0x8000,
361f2148a47SJeff Kirsher 	IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
362f2148a47SJeff Kirsher 	IntrTxDescRace=0x080000,	/* mapped from IntrStatus2 */
363f2148a47SJeff Kirsher 	IntrTxErrSummary=0x082218,
364f2148a47SJeff Kirsher };
365f2148a47SJeff Kirsher 
366f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
367f2148a47SJeff Kirsher enum wol_bits {
368f2148a47SJeff Kirsher 	WOLucast	= 0x10,
369f2148a47SJeff Kirsher 	WOLmagic	= 0x20,
370f2148a47SJeff Kirsher 	WOLbmcast	= 0x30,
371f2148a47SJeff Kirsher 	WOLlnkon	= 0x40,
372f2148a47SJeff Kirsher 	WOLlnkoff	= 0x80,
373f2148a47SJeff Kirsher };
374f2148a47SJeff Kirsher 
375f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */
376f2148a47SJeff Kirsher struct rx_desc {
377f2148a47SJeff Kirsher 	__le32 rx_status;
378f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Buffer/frame length */
379f2148a47SJeff Kirsher 	__le32 addr;
380f2148a47SJeff Kirsher 	__le32 next_desc;
381f2148a47SJeff Kirsher };
382f2148a47SJeff Kirsher struct tx_desc {
383f2148a47SJeff Kirsher 	__le32 tx_status;
384f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
385f2148a47SJeff Kirsher 	__le32 addr;
386f2148a47SJeff Kirsher 	__le32 next_desc;
387f2148a47SJeff Kirsher };
388f2148a47SJeff Kirsher 
389f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
390f2148a47SJeff Kirsher #define TXDESC		0x00e08000
391f2148a47SJeff Kirsher 
392f2148a47SJeff Kirsher enum rx_status_bits {
393f2148a47SJeff Kirsher 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
394f2148a47SJeff Kirsher };
395f2148a47SJeff Kirsher 
396f2148a47SJeff Kirsher /* Bits in *_desc.*_status */
397f2148a47SJeff Kirsher enum desc_status_bits {
398f2148a47SJeff Kirsher 	DescOwn=0x80000000
399f2148a47SJeff Kirsher };
400f2148a47SJeff Kirsher 
401f2148a47SJeff Kirsher /* Bits in *_desc.*_length */
402f2148a47SJeff Kirsher enum desc_length_bits {
403f2148a47SJeff Kirsher 	DescTag=0x00010000
404f2148a47SJeff Kirsher };
405f2148a47SJeff Kirsher 
406f2148a47SJeff Kirsher /* Bits in ChipCmd. */
407f2148a47SJeff Kirsher enum chip_cmd_bits {
408f2148a47SJeff Kirsher 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
409f2148a47SJeff Kirsher 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
410f2148a47SJeff Kirsher 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
411f2148a47SJeff Kirsher 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
412f2148a47SJeff Kirsher };
413f2148a47SJeff Kirsher 
414f2148a47SJeff Kirsher struct rhine_private {
415f2148a47SJeff Kirsher 	/* Bit mask for configured VLAN ids */
416f2148a47SJeff Kirsher 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
417f2148a47SJeff Kirsher 
418f2148a47SJeff Kirsher 	/* Descriptor rings */
419f2148a47SJeff Kirsher 	struct rx_desc *rx_ring;
420f2148a47SJeff Kirsher 	struct tx_desc *tx_ring;
421f2148a47SJeff Kirsher 	dma_addr_t rx_ring_dma;
422f2148a47SJeff Kirsher 	dma_addr_t tx_ring_dma;
423f2148a47SJeff Kirsher 
424f2148a47SJeff Kirsher 	/* The addresses of receive-in-place skbuffs. */
425f2148a47SJeff Kirsher 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
426f2148a47SJeff Kirsher 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
427f2148a47SJeff Kirsher 
428f2148a47SJeff Kirsher 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
429f2148a47SJeff Kirsher 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
430f2148a47SJeff Kirsher 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
431f2148a47SJeff Kirsher 
432f2148a47SJeff Kirsher 	/* Tx bounce buffers (Rhine-I only) */
433f2148a47SJeff Kirsher 	unsigned char *tx_buf[TX_RING_SIZE];
434f2148a47SJeff Kirsher 	unsigned char *tx_bufs;
435f2148a47SJeff Kirsher 	dma_addr_t tx_bufs_dma;
436f2148a47SJeff Kirsher 
437f2148a47SJeff Kirsher 	struct pci_dev *pdev;
438f2148a47SJeff Kirsher 	long pioaddr;
439f2148a47SJeff Kirsher 	struct net_device *dev;
440f2148a47SJeff Kirsher 	struct napi_struct napi;
441f2148a47SJeff Kirsher 	spinlock_t lock;
442f2148a47SJeff Kirsher 	struct work_struct reset_task;
443f2148a47SJeff Kirsher 
444f2148a47SJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect. */
445f2148a47SJeff Kirsher 	u32 quirks;
446f2148a47SJeff Kirsher 	struct rx_desc *rx_head_desc;
447f2148a47SJeff Kirsher 	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
448f2148a47SJeff Kirsher 	unsigned int cur_tx, dirty_tx;
449f2148a47SJeff Kirsher 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
450f2148a47SJeff Kirsher 	u8 wolopts;
451f2148a47SJeff Kirsher 
452f2148a47SJeff Kirsher 	u8 tx_thresh, rx_thresh;
453f2148a47SJeff Kirsher 
454f2148a47SJeff Kirsher 	struct mii_if_info mii_if;
455f2148a47SJeff Kirsher 	void __iomem *base;
456f2148a47SJeff Kirsher };
457f2148a47SJeff Kirsher 
458f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
459f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
460f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
461f2148a47SJeff Kirsher 
462f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
463f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
464f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
465f2148a47SJeff Kirsher 
466f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
467f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
468f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
469f2148a47SJeff Kirsher 
470f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
471f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
472f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
473f2148a47SJeff Kirsher 
474f2148a47SJeff Kirsher 
475f2148a47SJeff Kirsher static int  mdio_read(struct net_device *dev, int phy_id, int location);
476f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
477f2148a47SJeff Kirsher static int  rhine_open(struct net_device *dev);
478f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work);
479f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev);
480f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
481f2148a47SJeff Kirsher 				  struct net_device *dev);
482f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
483f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev);
484f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit);
485f2148a47SJeff Kirsher static void rhine_error(struct net_device *dev, int intr_status);
486f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev);
487f2148a47SJeff Kirsher static struct net_device_stats *rhine_get_stats(struct net_device *dev);
488f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
489f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops;
490f2148a47SJeff Kirsher static int  rhine_close(struct net_device *dev);
491f2148a47SJeff Kirsher static void rhine_shutdown (struct pci_dev *pdev);
4928e586137SJiri Pirko static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
4938e586137SJiri Pirko static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
494f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
495f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
496f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
497f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
498f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev);
499f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev);
500f2148a47SJeff Kirsher 
501f2148a47SJeff Kirsher #define RHINE_WAIT_FOR(condition)				\
502f2148a47SJeff Kirsher do {								\
503f2148a47SJeff Kirsher 	int i = 1024;						\
504f2148a47SJeff Kirsher 	while (!(condition) && --i)				\
505f2148a47SJeff Kirsher 		;						\
506f2148a47SJeff Kirsher 	if (debug > 1 && i < 512)				\
507f2148a47SJeff Kirsher 		pr_info("%4d cycles used @ %s:%d\n",		\
508f2148a47SJeff Kirsher 			1024 - i, __func__, __LINE__);		\
509f2148a47SJeff Kirsher } while (0)
510f2148a47SJeff Kirsher 
511f2148a47SJeff Kirsher static inline u32 get_intr_status(struct net_device *dev)
512f2148a47SJeff Kirsher {
513f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
514f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
515f2148a47SJeff Kirsher 	u32 intr_status;
516f2148a47SJeff Kirsher 
517f2148a47SJeff Kirsher 	intr_status = ioread16(ioaddr + IntrStatus);
518f2148a47SJeff Kirsher 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
519f2148a47SJeff Kirsher 	if (rp->quirks & rqStatusWBRace)
520f2148a47SJeff Kirsher 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
521f2148a47SJeff Kirsher 	return intr_status;
522f2148a47SJeff Kirsher }
523f2148a47SJeff Kirsher 
524f2148a47SJeff Kirsher /*
525f2148a47SJeff Kirsher  * Get power related registers into sane state.
526f2148a47SJeff Kirsher  * Notify user about past WOL event.
527f2148a47SJeff Kirsher  */
528f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev)
529f2148a47SJeff Kirsher {
530f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
531f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
532f2148a47SJeff Kirsher 	u16 wolstat;
533f2148a47SJeff Kirsher 
534f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL) {
535f2148a47SJeff Kirsher 		/* Make sure chip is in power state D0 */
536f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
537f2148a47SJeff Kirsher 
538f2148a47SJeff Kirsher 		/* Disable "force PME-enable" */
539f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + WOLcgClr);
540f2148a47SJeff Kirsher 
541f2148a47SJeff Kirsher 		/* Clear power-event config bits (WOL) */
542f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + WOLcrClr);
543f2148a47SJeff Kirsher 		/* More recent cards can manage two additional patterns */
544f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
545f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + WOLcrClr1);
546f2148a47SJeff Kirsher 
547f2148a47SJeff Kirsher 		/* Save power-event status bits */
548f2148a47SJeff Kirsher 		wolstat = ioread8(ioaddr + PwrcsrSet);
549f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
550f2148a47SJeff Kirsher 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
551f2148a47SJeff Kirsher 
552f2148a47SJeff Kirsher 		/* Clear power-event status bits */
553f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + PwrcsrClr);
554f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
555f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + PwrcsrClr1);
556f2148a47SJeff Kirsher 
557f2148a47SJeff Kirsher 		if (wolstat) {
558f2148a47SJeff Kirsher 			char *reason;
559f2148a47SJeff Kirsher 			switch (wolstat) {
560f2148a47SJeff Kirsher 			case WOLmagic:
561f2148a47SJeff Kirsher 				reason = "Magic packet";
562f2148a47SJeff Kirsher 				break;
563f2148a47SJeff Kirsher 			case WOLlnkon:
564f2148a47SJeff Kirsher 				reason = "Link went up";
565f2148a47SJeff Kirsher 				break;
566f2148a47SJeff Kirsher 			case WOLlnkoff:
567f2148a47SJeff Kirsher 				reason = "Link went down";
568f2148a47SJeff Kirsher 				break;
569f2148a47SJeff Kirsher 			case WOLucast:
570f2148a47SJeff Kirsher 				reason = "Unicast packet";
571f2148a47SJeff Kirsher 				break;
572f2148a47SJeff Kirsher 			case WOLbmcast:
573f2148a47SJeff Kirsher 				reason = "Multicast/broadcast packet";
574f2148a47SJeff Kirsher 				break;
575f2148a47SJeff Kirsher 			default:
576f2148a47SJeff Kirsher 				reason = "Unknown";
577f2148a47SJeff Kirsher 			}
578f2148a47SJeff Kirsher 			netdev_info(dev, "Woke system up. Reason: %s\n",
579f2148a47SJeff Kirsher 				    reason);
580f2148a47SJeff Kirsher 		}
581f2148a47SJeff Kirsher 	}
582f2148a47SJeff Kirsher }
583f2148a47SJeff Kirsher 
584f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev)
585f2148a47SJeff Kirsher {
586f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
587f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
588f2148a47SJeff Kirsher 
589f2148a47SJeff Kirsher 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
590f2148a47SJeff Kirsher 	IOSYNC;
591f2148a47SJeff Kirsher 
592f2148a47SJeff Kirsher 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
593f2148a47SJeff Kirsher 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
594f2148a47SJeff Kirsher 
595f2148a47SJeff Kirsher 		/* Force reset */
596f2148a47SJeff Kirsher 		if (rp->quirks & rqForceReset)
597f2148a47SJeff Kirsher 			iowrite8(0x40, ioaddr + MiscCmd);
598f2148a47SJeff Kirsher 
599f2148a47SJeff Kirsher 		/* Reset can take somewhat longer (rare) */
600f2148a47SJeff Kirsher 		RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
601f2148a47SJeff Kirsher 	}
602f2148a47SJeff Kirsher 
603f2148a47SJeff Kirsher 	if (debug > 1)
604f2148a47SJeff Kirsher 		netdev_info(dev, "Reset %s\n",
605f2148a47SJeff Kirsher 			    (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
606f2148a47SJeff Kirsher 			    "failed" : "succeeded");
607f2148a47SJeff Kirsher }
608f2148a47SJeff Kirsher 
609f2148a47SJeff Kirsher #ifdef USE_MMIO
610f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks)
611f2148a47SJeff Kirsher {
612f2148a47SJeff Kirsher 	int n;
613f2148a47SJeff Kirsher 	if (quirks & rqRhineI) {
614f2148a47SJeff Kirsher 		/* More recent docs say that this bit is reserved ... */
615f2148a47SJeff Kirsher 		n = inb(pioaddr + ConfigA) | 0x20;
616f2148a47SJeff Kirsher 		outb(n, pioaddr + ConfigA);
617f2148a47SJeff Kirsher 	} else {
618f2148a47SJeff Kirsher 		n = inb(pioaddr + ConfigD) | 0x80;
619f2148a47SJeff Kirsher 		outb(n, pioaddr + ConfigD);
620f2148a47SJeff Kirsher 	}
621f2148a47SJeff Kirsher }
622f2148a47SJeff Kirsher #endif
623f2148a47SJeff Kirsher 
624f2148a47SJeff Kirsher /*
625f2148a47SJeff Kirsher  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
626f2148a47SJeff Kirsher  * (plus 0x6C for Rhine-I/II)
627f2148a47SJeff Kirsher  */
628f2148a47SJeff Kirsher static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
629f2148a47SJeff Kirsher {
630f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
631f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
632f2148a47SJeff Kirsher 
633f2148a47SJeff Kirsher 	outb(0x20, pioaddr + MACRegEEcsr);
634f2148a47SJeff Kirsher 	RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
635f2148a47SJeff Kirsher 
636f2148a47SJeff Kirsher #ifdef USE_MMIO
637f2148a47SJeff Kirsher 	/*
638f2148a47SJeff Kirsher 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
639f2148a47SJeff Kirsher 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
640f2148a47SJeff Kirsher 	 * it is not known if that still works with the "win98-reboot" problem.
641f2148a47SJeff Kirsher 	 */
642f2148a47SJeff Kirsher 	enable_mmio(pioaddr, rp->quirks);
643f2148a47SJeff Kirsher #endif
644f2148a47SJeff Kirsher 
645f2148a47SJeff Kirsher 	/* Turn off EEPROM-controlled wake-up (magic packet) */
646f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL)
647f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
648f2148a47SJeff Kirsher 
649f2148a47SJeff Kirsher }
650f2148a47SJeff Kirsher 
651f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
652f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev)
653f2148a47SJeff Kirsher {
654f2148a47SJeff Kirsher 	disable_irq(dev->irq);
655f2148a47SJeff Kirsher 	rhine_interrupt(dev->irq, (void *)dev);
656f2148a47SJeff Kirsher 	enable_irq(dev->irq);
657f2148a47SJeff Kirsher }
658f2148a47SJeff Kirsher #endif
659f2148a47SJeff Kirsher 
660*269f3114SFrancois Romieu static void rhine_kick_tx_threshold(struct rhine_private *rp)
661*269f3114SFrancois Romieu {
662*269f3114SFrancois Romieu 	if (rp->tx_thresh < 0xe0) {
663*269f3114SFrancois Romieu 		void __iomem *ioaddr = rp->base;
664*269f3114SFrancois Romieu 
665*269f3114SFrancois Romieu 		rp->tx_thresh += 0x20;
666*269f3114SFrancois Romieu 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
667*269f3114SFrancois Romieu 	}
668*269f3114SFrancois Romieu }
669*269f3114SFrancois Romieu 
670f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget)
671f2148a47SJeff Kirsher {
672f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
673f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
674f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
675f2148a47SJeff Kirsher 	int work_done;
676f2148a47SJeff Kirsher 
677f2148a47SJeff Kirsher 	work_done = rhine_rx(dev, budget);
678f2148a47SJeff Kirsher 
679f2148a47SJeff Kirsher 	if (work_done < budget) {
680f2148a47SJeff Kirsher 		napi_complete(napi);
681f2148a47SJeff Kirsher 
682f2148a47SJeff Kirsher 		iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
683f2148a47SJeff Kirsher 			  IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
684f2148a47SJeff Kirsher 			  IntrTxDone | IntrTxError | IntrTxUnderrun |
685f2148a47SJeff Kirsher 			  IntrPCIErr | IntrStatsMax | IntrLinkChange,
686f2148a47SJeff Kirsher 			  ioaddr + IntrEnable);
687f2148a47SJeff Kirsher 	}
688f2148a47SJeff Kirsher 	return work_done;
689f2148a47SJeff Kirsher }
690f2148a47SJeff Kirsher 
691f2148a47SJeff Kirsher static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
692f2148a47SJeff Kirsher {
693f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
694f2148a47SJeff Kirsher 
695f2148a47SJeff Kirsher 	/* Reset the chip to erase previous misconfiguration. */
696f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
697f2148a47SJeff Kirsher 
698f2148a47SJeff Kirsher 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
699f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
700f2148a47SJeff Kirsher 		msleep(5);
701f2148a47SJeff Kirsher 
702f2148a47SJeff Kirsher 	/* Reload EEPROM controlled bytes cleared by soft reset */
703f2148a47SJeff Kirsher 	rhine_reload_eeprom(pioaddr, dev);
704f2148a47SJeff Kirsher }
705f2148a47SJeff Kirsher 
706f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = {
707f2148a47SJeff Kirsher 	.ndo_open		 = rhine_open,
708f2148a47SJeff Kirsher 	.ndo_stop		 = rhine_close,
709f2148a47SJeff Kirsher 	.ndo_start_xmit		 = rhine_start_tx,
710f2148a47SJeff Kirsher 	.ndo_get_stats		 = rhine_get_stats,
711afc4b13dSJiri Pirko 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
712f2148a47SJeff Kirsher 	.ndo_change_mtu		 = eth_change_mtu,
713f2148a47SJeff Kirsher 	.ndo_validate_addr	 = eth_validate_addr,
714f2148a47SJeff Kirsher 	.ndo_set_mac_address 	 = eth_mac_addr,
715f2148a47SJeff Kirsher 	.ndo_do_ioctl		 = netdev_ioctl,
716f2148a47SJeff Kirsher 	.ndo_tx_timeout 	 = rhine_tx_timeout,
717f2148a47SJeff Kirsher 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
718f2148a47SJeff Kirsher 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
719f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
720f2148a47SJeff Kirsher 	.ndo_poll_controller	 = rhine_poll,
721f2148a47SJeff Kirsher #endif
722f2148a47SJeff Kirsher };
723f2148a47SJeff Kirsher 
724f2148a47SJeff Kirsher static int __devinit rhine_init_one(struct pci_dev *pdev,
725f2148a47SJeff Kirsher 				    const struct pci_device_id *ent)
726f2148a47SJeff Kirsher {
727f2148a47SJeff Kirsher 	struct net_device *dev;
728f2148a47SJeff Kirsher 	struct rhine_private *rp;
729f2148a47SJeff Kirsher 	int i, rc;
730f2148a47SJeff Kirsher 	u32 quirks;
731f2148a47SJeff Kirsher 	long pioaddr;
732f2148a47SJeff Kirsher 	long memaddr;
733f2148a47SJeff Kirsher 	void __iomem *ioaddr;
734f2148a47SJeff Kirsher 	int io_size, phy_id;
735f2148a47SJeff Kirsher 	const char *name;
736f2148a47SJeff Kirsher #ifdef USE_MMIO
737f2148a47SJeff Kirsher 	int bar = 1;
738f2148a47SJeff Kirsher #else
739f2148a47SJeff Kirsher 	int bar = 0;
740f2148a47SJeff Kirsher #endif
741f2148a47SJeff Kirsher 
742f2148a47SJeff Kirsher /* when built into the kernel, we only print version if device is found */
743f2148a47SJeff Kirsher #ifndef MODULE
744f2148a47SJeff Kirsher 	pr_info_once("%s\n", version);
745f2148a47SJeff Kirsher #endif
746f2148a47SJeff Kirsher 
747f2148a47SJeff Kirsher 	io_size = 256;
748f2148a47SJeff Kirsher 	phy_id = 0;
749f2148a47SJeff Kirsher 	quirks = 0;
750f2148a47SJeff Kirsher 	name = "Rhine";
751f2148a47SJeff Kirsher 	if (pdev->revision < VTunknown0) {
752f2148a47SJeff Kirsher 		quirks = rqRhineI;
753f2148a47SJeff Kirsher 		io_size = 128;
754f2148a47SJeff Kirsher 	}
755f2148a47SJeff Kirsher 	else if (pdev->revision >= VT6102) {
756f2148a47SJeff Kirsher 		quirks = rqWOL | rqForceReset;
757f2148a47SJeff Kirsher 		if (pdev->revision < VT6105) {
758f2148a47SJeff Kirsher 			name = "Rhine II";
759f2148a47SJeff Kirsher 			quirks |= rqStatusWBRace;	/* Rhine-II exclusive */
760f2148a47SJeff Kirsher 		}
761f2148a47SJeff Kirsher 		else {
762f2148a47SJeff Kirsher 			phy_id = 1;	/* Integrated PHY, phy_id fixed to 1 */
763f2148a47SJeff Kirsher 			if (pdev->revision >= VT6105_B0)
764f2148a47SJeff Kirsher 				quirks |= rq6patterns;
765f2148a47SJeff Kirsher 			if (pdev->revision < VT6105M)
766f2148a47SJeff Kirsher 				name = "Rhine III";
767f2148a47SJeff Kirsher 			else
768f2148a47SJeff Kirsher 				name = "Rhine III (Management Adapter)";
769f2148a47SJeff Kirsher 		}
770f2148a47SJeff Kirsher 	}
771f2148a47SJeff Kirsher 
772f2148a47SJeff Kirsher 	rc = pci_enable_device(pdev);
773f2148a47SJeff Kirsher 	if (rc)
774f2148a47SJeff Kirsher 		goto err_out;
775f2148a47SJeff Kirsher 
776f2148a47SJeff Kirsher 	/* this should always be supported */
777f2148a47SJeff Kirsher 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
778f2148a47SJeff Kirsher 	if (rc) {
779f2148a47SJeff Kirsher 		dev_err(&pdev->dev,
780f2148a47SJeff Kirsher 			"32-bit PCI DMA addresses not supported by the card!?\n");
781f2148a47SJeff Kirsher 		goto err_out;
782f2148a47SJeff Kirsher 	}
783f2148a47SJeff Kirsher 
784f2148a47SJeff Kirsher 	/* sanity check */
785f2148a47SJeff Kirsher 	if ((pci_resource_len(pdev, 0) < io_size) ||
786f2148a47SJeff Kirsher 	    (pci_resource_len(pdev, 1) < io_size)) {
787f2148a47SJeff Kirsher 		rc = -EIO;
788f2148a47SJeff Kirsher 		dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
789f2148a47SJeff Kirsher 		goto err_out;
790f2148a47SJeff Kirsher 	}
791f2148a47SJeff Kirsher 
792f2148a47SJeff Kirsher 	pioaddr = pci_resource_start(pdev, 0);
793f2148a47SJeff Kirsher 	memaddr = pci_resource_start(pdev, 1);
794f2148a47SJeff Kirsher 
795f2148a47SJeff Kirsher 	pci_set_master(pdev);
796f2148a47SJeff Kirsher 
797f2148a47SJeff Kirsher 	dev = alloc_etherdev(sizeof(struct rhine_private));
798f2148a47SJeff Kirsher 	if (!dev) {
799f2148a47SJeff Kirsher 		rc = -ENOMEM;
800f2148a47SJeff Kirsher 		dev_err(&pdev->dev, "alloc_etherdev failed\n");
801f2148a47SJeff Kirsher 		goto err_out;
802f2148a47SJeff Kirsher 	}
803f2148a47SJeff Kirsher 	SET_NETDEV_DEV(dev, &pdev->dev);
804f2148a47SJeff Kirsher 
805f2148a47SJeff Kirsher 	rp = netdev_priv(dev);
806f2148a47SJeff Kirsher 	rp->dev = dev;
807f2148a47SJeff Kirsher 	rp->quirks = quirks;
808f2148a47SJeff Kirsher 	rp->pioaddr = pioaddr;
809f2148a47SJeff Kirsher 	rp->pdev = pdev;
810f2148a47SJeff Kirsher 
811f2148a47SJeff Kirsher 	rc = pci_request_regions(pdev, DRV_NAME);
812f2148a47SJeff Kirsher 	if (rc)
813f2148a47SJeff Kirsher 		goto err_out_free_netdev;
814f2148a47SJeff Kirsher 
815f2148a47SJeff Kirsher 	ioaddr = pci_iomap(pdev, bar, io_size);
816f2148a47SJeff Kirsher 	if (!ioaddr) {
817f2148a47SJeff Kirsher 		rc = -EIO;
818f2148a47SJeff Kirsher 		dev_err(&pdev->dev,
819f2148a47SJeff Kirsher 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
820f2148a47SJeff Kirsher 			pci_name(pdev), io_size, memaddr);
821f2148a47SJeff Kirsher 		goto err_out_free_res;
822f2148a47SJeff Kirsher 	}
823f2148a47SJeff Kirsher 
824f2148a47SJeff Kirsher #ifdef USE_MMIO
825f2148a47SJeff Kirsher 	enable_mmio(pioaddr, quirks);
826f2148a47SJeff Kirsher 
827f2148a47SJeff Kirsher 	/* Check that selected MMIO registers match the PIO ones */
828f2148a47SJeff Kirsher 	i = 0;
829f2148a47SJeff Kirsher 	while (mmio_verify_registers[i]) {
830f2148a47SJeff Kirsher 		int reg = mmio_verify_registers[i++];
831f2148a47SJeff Kirsher 		unsigned char a = inb(pioaddr+reg);
832f2148a47SJeff Kirsher 		unsigned char b = readb(ioaddr+reg);
833f2148a47SJeff Kirsher 		if (a != b) {
834f2148a47SJeff Kirsher 			rc = -EIO;
835f2148a47SJeff Kirsher 			dev_err(&pdev->dev,
836f2148a47SJeff Kirsher 				"MMIO do not match PIO [%02x] (%02x != %02x)\n",
837f2148a47SJeff Kirsher 				reg, a, b);
838f2148a47SJeff Kirsher 			goto err_out_unmap;
839f2148a47SJeff Kirsher 		}
840f2148a47SJeff Kirsher 	}
841f2148a47SJeff Kirsher #endif /* USE_MMIO */
842f2148a47SJeff Kirsher 
843f2148a47SJeff Kirsher 	dev->base_addr = (unsigned long)ioaddr;
844f2148a47SJeff Kirsher 	rp->base = ioaddr;
845f2148a47SJeff Kirsher 
846f2148a47SJeff Kirsher 	/* Get chip registers into a sane state */
847f2148a47SJeff Kirsher 	rhine_power_init(dev);
848f2148a47SJeff Kirsher 	rhine_hw_init(dev, pioaddr);
849f2148a47SJeff Kirsher 
850f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
851f2148a47SJeff Kirsher 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
852f2148a47SJeff Kirsher 
853f2148a47SJeff Kirsher 	if (!is_valid_ether_addr(dev->dev_addr)) {
854f2148a47SJeff Kirsher 		/* Report it and use a random ethernet address instead */
855f2148a47SJeff Kirsher 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
856f2148a47SJeff Kirsher 		random_ether_addr(dev->dev_addr);
857f2148a47SJeff Kirsher 		netdev_info(dev, "Using random MAC address: %pM\n",
858f2148a47SJeff Kirsher 			    dev->dev_addr);
859f2148a47SJeff Kirsher 	}
860f2148a47SJeff Kirsher 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
861f2148a47SJeff Kirsher 
862f2148a47SJeff Kirsher 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
863f2148a47SJeff Kirsher 	if (!phy_id)
864f2148a47SJeff Kirsher 		phy_id = ioread8(ioaddr + 0x6C);
865f2148a47SJeff Kirsher 
866f2148a47SJeff Kirsher 	dev->irq = pdev->irq;
867f2148a47SJeff Kirsher 
868f2148a47SJeff Kirsher 	spin_lock_init(&rp->lock);
869f2148a47SJeff Kirsher 	INIT_WORK(&rp->reset_task, rhine_reset_task);
870f2148a47SJeff Kirsher 
871f2148a47SJeff Kirsher 	rp->mii_if.dev = dev;
872f2148a47SJeff Kirsher 	rp->mii_if.mdio_read = mdio_read;
873f2148a47SJeff Kirsher 	rp->mii_if.mdio_write = mdio_write;
874f2148a47SJeff Kirsher 	rp->mii_if.phy_id_mask = 0x1f;
875f2148a47SJeff Kirsher 	rp->mii_if.reg_num_mask = 0x1f;
876f2148a47SJeff Kirsher 
877f2148a47SJeff Kirsher 	/* The chip-specific entries in the device structure. */
878f2148a47SJeff Kirsher 	dev->netdev_ops = &rhine_netdev_ops;
879f2148a47SJeff Kirsher 	dev->ethtool_ops = &netdev_ethtool_ops,
880f2148a47SJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
881f2148a47SJeff Kirsher 
882f2148a47SJeff Kirsher 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
883f2148a47SJeff Kirsher 
884f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
885f2148a47SJeff Kirsher 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
886f2148a47SJeff Kirsher 
887f2148a47SJeff Kirsher 	if (pdev->revision >= VT6105M)
888f2148a47SJeff Kirsher 		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
889f2148a47SJeff Kirsher 		NETIF_F_HW_VLAN_FILTER;
890f2148a47SJeff Kirsher 
891f2148a47SJeff Kirsher 	/* dev->name not defined before register_netdev()! */
892f2148a47SJeff Kirsher 	rc = register_netdev(dev);
893f2148a47SJeff Kirsher 	if (rc)
894f2148a47SJeff Kirsher 		goto err_out_unmap;
895f2148a47SJeff Kirsher 
896f2148a47SJeff Kirsher 	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
897f2148a47SJeff Kirsher 		    name,
898f2148a47SJeff Kirsher #ifdef USE_MMIO
899f2148a47SJeff Kirsher 		    memaddr,
900f2148a47SJeff Kirsher #else
901f2148a47SJeff Kirsher 		    (long)ioaddr,
902f2148a47SJeff Kirsher #endif
903f2148a47SJeff Kirsher 		    dev->dev_addr, pdev->irq);
904f2148a47SJeff Kirsher 
905f2148a47SJeff Kirsher 	pci_set_drvdata(pdev, dev);
906f2148a47SJeff Kirsher 
907f2148a47SJeff Kirsher 	{
908f2148a47SJeff Kirsher 		u16 mii_cmd;
909f2148a47SJeff Kirsher 		int mii_status = mdio_read(dev, phy_id, 1);
910f2148a47SJeff Kirsher 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
911f2148a47SJeff Kirsher 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
912f2148a47SJeff Kirsher 		if (mii_status != 0xffff && mii_status != 0x0000) {
913f2148a47SJeff Kirsher 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
914f2148a47SJeff Kirsher 			netdev_info(dev,
915f2148a47SJeff Kirsher 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
916f2148a47SJeff Kirsher 				    phy_id,
917f2148a47SJeff Kirsher 				    mii_status, rp->mii_if.advertising,
918f2148a47SJeff Kirsher 				    mdio_read(dev, phy_id, 5));
919f2148a47SJeff Kirsher 
920f2148a47SJeff Kirsher 			/* set IFF_RUNNING */
921f2148a47SJeff Kirsher 			if (mii_status & BMSR_LSTATUS)
922f2148a47SJeff Kirsher 				netif_carrier_on(dev);
923f2148a47SJeff Kirsher 			else
924f2148a47SJeff Kirsher 				netif_carrier_off(dev);
925f2148a47SJeff Kirsher 
926f2148a47SJeff Kirsher 		}
927f2148a47SJeff Kirsher 	}
928f2148a47SJeff Kirsher 	rp->mii_if.phy_id = phy_id;
929f2148a47SJeff Kirsher 	if (debug > 1 && avoid_D3)
930f2148a47SJeff Kirsher 		netdev_info(dev, "No D3 power state at shutdown\n");
931f2148a47SJeff Kirsher 
932f2148a47SJeff Kirsher 	return 0;
933f2148a47SJeff Kirsher 
934f2148a47SJeff Kirsher err_out_unmap:
935f2148a47SJeff Kirsher 	pci_iounmap(pdev, ioaddr);
936f2148a47SJeff Kirsher err_out_free_res:
937f2148a47SJeff Kirsher 	pci_release_regions(pdev);
938f2148a47SJeff Kirsher err_out_free_netdev:
939f2148a47SJeff Kirsher 	free_netdev(dev);
940f2148a47SJeff Kirsher err_out:
941f2148a47SJeff Kirsher 	return rc;
942f2148a47SJeff Kirsher }
943f2148a47SJeff Kirsher 
944f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev)
945f2148a47SJeff Kirsher {
946f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
947f2148a47SJeff Kirsher 	void *ring;
948f2148a47SJeff Kirsher 	dma_addr_t ring_dma;
949f2148a47SJeff Kirsher 
950f2148a47SJeff Kirsher 	ring = pci_alloc_consistent(rp->pdev,
951f2148a47SJeff Kirsher 				    RX_RING_SIZE * sizeof(struct rx_desc) +
952f2148a47SJeff Kirsher 				    TX_RING_SIZE * sizeof(struct tx_desc),
953f2148a47SJeff Kirsher 				    &ring_dma);
954f2148a47SJeff Kirsher 	if (!ring) {
955f2148a47SJeff Kirsher 		netdev_err(dev, "Could not allocate DMA memory\n");
956f2148a47SJeff Kirsher 		return -ENOMEM;
957f2148a47SJeff Kirsher 	}
958f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI) {
959f2148a47SJeff Kirsher 		rp->tx_bufs = pci_alloc_consistent(rp->pdev,
960f2148a47SJeff Kirsher 						   PKT_BUF_SZ * TX_RING_SIZE,
961f2148a47SJeff Kirsher 						   &rp->tx_bufs_dma);
962f2148a47SJeff Kirsher 		if (rp->tx_bufs == NULL) {
963f2148a47SJeff Kirsher 			pci_free_consistent(rp->pdev,
964f2148a47SJeff Kirsher 				    RX_RING_SIZE * sizeof(struct rx_desc) +
965f2148a47SJeff Kirsher 				    TX_RING_SIZE * sizeof(struct tx_desc),
966f2148a47SJeff Kirsher 				    ring, ring_dma);
967f2148a47SJeff Kirsher 			return -ENOMEM;
968f2148a47SJeff Kirsher 		}
969f2148a47SJeff Kirsher 	}
970f2148a47SJeff Kirsher 
971f2148a47SJeff Kirsher 	rp->rx_ring = ring;
972f2148a47SJeff Kirsher 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
973f2148a47SJeff Kirsher 	rp->rx_ring_dma = ring_dma;
974f2148a47SJeff Kirsher 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
975f2148a47SJeff Kirsher 
976f2148a47SJeff Kirsher 	return 0;
977f2148a47SJeff Kirsher }
978f2148a47SJeff Kirsher 
979f2148a47SJeff Kirsher static void free_ring(struct net_device* dev)
980f2148a47SJeff Kirsher {
981f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
982f2148a47SJeff Kirsher 
983f2148a47SJeff Kirsher 	pci_free_consistent(rp->pdev,
984f2148a47SJeff Kirsher 			    RX_RING_SIZE * sizeof(struct rx_desc) +
985f2148a47SJeff Kirsher 			    TX_RING_SIZE * sizeof(struct tx_desc),
986f2148a47SJeff Kirsher 			    rp->rx_ring, rp->rx_ring_dma);
987f2148a47SJeff Kirsher 	rp->tx_ring = NULL;
988f2148a47SJeff Kirsher 
989f2148a47SJeff Kirsher 	if (rp->tx_bufs)
990f2148a47SJeff Kirsher 		pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
991f2148a47SJeff Kirsher 				    rp->tx_bufs, rp->tx_bufs_dma);
992f2148a47SJeff Kirsher 
993f2148a47SJeff Kirsher 	rp->tx_bufs = NULL;
994f2148a47SJeff Kirsher 
995f2148a47SJeff Kirsher }
996f2148a47SJeff Kirsher 
997f2148a47SJeff Kirsher static void alloc_rbufs(struct net_device *dev)
998f2148a47SJeff Kirsher {
999f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1000f2148a47SJeff Kirsher 	dma_addr_t next;
1001f2148a47SJeff Kirsher 	int i;
1002f2148a47SJeff Kirsher 
1003f2148a47SJeff Kirsher 	rp->dirty_rx = rp->cur_rx = 0;
1004f2148a47SJeff Kirsher 
1005f2148a47SJeff Kirsher 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1006f2148a47SJeff Kirsher 	rp->rx_head_desc = &rp->rx_ring[0];
1007f2148a47SJeff Kirsher 	next = rp->rx_ring_dma;
1008f2148a47SJeff Kirsher 
1009f2148a47SJeff Kirsher 	/* Init the ring entries */
1010f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1011f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1012f2148a47SJeff Kirsher 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1013f2148a47SJeff Kirsher 		next += sizeof(struct rx_desc);
1014f2148a47SJeff Kirsher 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1015f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1016f2148a47SJeff Kirsher 	}
1017f2148a47SJeff Kirsher 	/* Mark the last entry as wrapping the ring. */
1018f2148a47SJeff Kirsher 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1019f2148a47SJeff Kirsher 
1020f2148a47SJeff Kirsher 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1021f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1022f2148a47SJeff Kirsher 		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1023f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = skb;
1024f2148a47SJeff Kirsher 		if (skb == NULL)
1025f2148a47SJeff Kirsher 			break;
1026f2148a47SJeff Kirsher 		skb->dev = dev;                 /* Mark as being used by this device. */
1027f2148a47SJeff Kirsher 
1028f2148a47SJeff Kirsher 		rp->rx_skbuff_dma[i] =
1029f2148a47SJeff Kirsher 			pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1030f2148a47SJeff Kirsher 				       PCI_DMA_FROMDEVICE);
1031f2148a47SJeff Kirsher 
1032f2148a47SJeff Kirsher 		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1033f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1034f2148a47SJeff Kirsher 	}
1035f2148a47SJeff Kirsher 	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1036f2148a47SJeff Kirsher }
1037f2148a47SJeff Kirsher 
1038f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev)
1039f2148a47SJeff Kirsher {
1040f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1041f2148a47SJeff Kirsher 	int i;
1042f2148a47SJeff Kirsher 
1043f2148a47SJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
1044f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1045f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1046f2148a47SJeff Kirsher 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1047f2148a47SJeff Kirsher 		if (rp->rx_skbuff[i]) {
1048f2148a47SJeff Kirsher 			pci_unmap_single(rp->pdev,
1049f2148a47SJeff Kirsher 					 rp->rx_skbuff_dma[i],
1050f2148a47SJeff Kirsher 					 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1051f2148a47SJeff Kirsher 			dev_kfree_skb(rp->rx_skbuff[i]);
1052f2148a47SJeff Kirsher 		}
1053f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1054f2148a47SJeff Kirsher 	}
1055f2148a47SJeff Kirsher }
1056f2148a47SJeff Kirsher 
1057f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev)
1058f2148a47SJeff Kirsher {
1059f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1060f2148a47SJeff Kirsher 	dma_addr_t next;
1061f2148a47SJeff Kirsher 	int i;
1062f2148a47SJeff Kirsher 
1063f2148a47SJeff Kirsher 	rp->dirty_tx = rp->cur_tx = 0;
1064f2148a47SJeff Kirsher 	next = rp->tx_ring_dma;
1065f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1066f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1067f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1068f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1069f2148a47SJeff Kirsher 		next += sizeof(struct tx_desc);
1070f2148a47SJeff Kirsher 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1071f2148a47SJeff Kirsher 		if (rp->quirks & rqRhineI)
1072f2148a47SJeff Kirsher 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1073f2148a47SJeff Kirsher 	}
1074f2148a47SJeff Kirsher 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1075f2148a47SJeff Kirsher 
1076f2148a47SJeff Kirsher }
1077f2148a47SJeff Kirsher 
1078f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev)
1079f2148a47SJeff Kirsher {
1080f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1081f2148a47SJeff Kirsher 	int i;
1082f2148a47SJeff Kirsher 
1083f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1084f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1085f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1086f2148a47SJeff Kirsher 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1087f2148a47SJeff Kirsher 		if (rp->tx_skbuff[i]) {
1088f2148a47SJeff Kirsher 			if (rp->tx_skbuff_dma[i]) {
1089f2148a47SJeff Kirsher 				pci_unmap_single(rp->pdev,
1090f2148a47SJeff Kirsher 						 rp->tx_skbuff_dma[i],
1091f2148a47SJeff Kirsher 						 rp->tx_skbuff[i]->len,
1092f2148a47SJeff Kirsher 						 PCI_DMA_TODEVICE);
1093f2148a47SJeff Kirsher 			}
1094f2148a47SJeff Kirsher 			dev_kfree_skb(rp->tx_skbuff[i]);
1095f2148a47SJeff Kirsher 		}
1096f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1097f2148a47SJeff Kirsher 		rp->tx_buf[i] = NULL;
1098f2148a47SJeff Kirsher 	}
1099f2148a47SJeff Kirsher }
1100f2148a47SJeff Kirsher 
1101f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1102f2148a47SJeff Kirsher {
1103f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1104f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1105f2148a47SJeff Kirsher 
1106f2148a47SJeff Kirsher 	mii_check_media(&rp->mii_if, debug, init_media);
1107f2148a47SJeff Kirsher 
1108f2148a47SJeff Kirsher 	if (rp->mii_if.full_duplex)
1109f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1110f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1111f2148a47SJeff Kirsher 	else
1112f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1113f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1114f2148a47SJeff Kirsher 	if (debug > 1)
1115f2148a47SJeff Kirsher 		netdev_info(dev, "force_media %d, carrier %d\n",
1116f2148a47SJeff Kirsher 			    rp->mii_if.force_media, netif_carrier_ok(dev));
1117f2148a47SJeff Kirsher }
1118f2148a47SJeff Kirsher 
1119f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */
1120f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii)
1121f2148a47SJeff Kirsher {
1122f2148a47SJeff Kirsher 	if (mii->force_media) {
1123f2148a47SJeff Kirsher 		/* autoneg is off: Link is always assumed to be up */
1124f2148a47SJeff Kirsher 		if (!netif_carrier_ok(mii->dev))
1125f2148a47SJeff Kirsher 			netif_carrier_on(mii->dev);
1126f2148a47SJeff Kirsher 	}
1127f2148a47SJeff Kirsher 	else	/* Let MMI library update carrier status */
1128f2148a47SJeff Kirsher 		rhine_check_media(mii->dev, 0);
1129f2148a47SJeff Kirsher 	if (debug > 1)
1130f2148a47SJeff Kirsher 		netdev_info(mii->dev, "force_media %d, carrier %d\n",
1131f2148a47SJeff Kirsher 			    mii->force_media, netif_carrier_ok(mii->dev));
1132f2148a47SJeff Kirsher }
1133f2148a47SJeff Kirsher 
1134f2148a47SJeff Kirsher /**
1135f2148a47SJeff Kirsher  * rhine_set_cam - set CAM multicast filters
1136f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1137f2148a47SJeff Kirsher  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1138f2148a47SJeff Kirsher  * @addr: multicast address (6 bytes)
1139f2148a47SJeff Kirsher  *
1140f2148a47SJeff Kirsher  * Load addresses into multicast filters.
1141f2148a47SJeff Kirsher  */
1142f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1143f2148a47SJeff Kirsher {
1144f2148a47SJeff Kirsher 	int i;
1145f2148a47SJeff Kirsher 
1146f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1147f2148a47SJeff Kirsher 	wmb();
1148f2148a47SJeff Kirsher 
1149f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1150f2148a47SJeff Kirsher 	idx &= (MCAM_SIZE - 1);
1151f2148a47SJeff Kirsher 
1152f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1153f2148a47SJeff Kirsher 
1154f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++, addr++)
1155f2148a47SJeff Kirsher 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1156f2148a47SJeff Kirsher 	udelay(10);
1157f2148a47SJeff Kirsher 	wmb();
1158f2148a47SJeff Kirsher 
1159f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1160f2148a47SJeff Kirsher 	udelay(10);
1161f2148a47SJeff Kirsher 
1162f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1163f2148a47SJeff Kirsher }
1164f2148a47SJeff Kirsher 
1165f2148a47SJeff Kirsher /**
1166f2148a47SJeff Kirsher  * rhine_set_vlan_cam - set CAM VLAN filters
1167f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1168f2148a47SJeff Kirsher  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1169f2148a47SJeff Kirsher  * @addr: VLAN ID (2 bytes)
1170f2148a47SJeff Kirsher  *
1171f2148a47SJeff Kirsher  * Load addresses into VLAN filters.
1172f2148a47SJeff Kirsher  */
1173f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1174f2148a47SJeff Kirsher {
1175f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1176f2148a47SJeff Kirsher 	wmb();
1177f2148a47SJeff Kirsher 
1178f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1179f2148a47SJeff Kirsher 	idx &= (VCAM_SIZE - 1);
1180f2148a47SJeff Kirsher 
1181f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1182f2148a47SJeff Kirsher 
1183f2148a47SJeff Kirsher 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1184f2148a47SJeff Kirsher 	udelay(10);
1185f2148a47SJeff Kirsher 	wmb();
1186f2148a47SJeff Kirsher 
1187f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1188f2148a47SJeff Kirsher 	udelay(10);
1189f2148a47SJeff Kirsher 
1190f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1191f2148a47SJeff Kirsher }
1192f2148a47SJeff Kirsher 
1193f2148a47SJeff Kirsher /**
1194f2148a47SJeff Kirsher  * rhine_set_cam_mask - set multicast CAM mask
1195f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1196f2148a47SJeff Kirsher  * @mask: multicast CAM mask
1197f2148a47SJeff Kirsher  *
1198f2148a47SJeff Kirsher  * Mask sets multicast filters active/inactive.
1199f2148a47SJeff Kirsher  */
1200f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1201f2148a47SJeff Kirsher {
1202f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1203f2148a47SJeff Kirsher 	wmb();
1204f2148a47SJeff Kirsher 
1205f2148a47SJeff Kirsher 	/* write mask */
1206f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1207f2148a47SJeff Kirsher 
1208f2148a47SJeff Kirsher 	/* disable CAMEN */
1209f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1210f2148a47SJeff Kirsher }
1211f2148a47SJeff Kirsher 
1212f2148a47SJeff Kirsher /**
1213f2148a47SJeff Kirsher  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1214f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1215f2148a47SJeff Kirsher  * @mask: VLAN CAM mask
1216f2148a47SJeff Kirsher  *
1217f2148a47SJeff Kirsher  * Mask sets VLAN filters active/inactive.
1218f2148a47SJeff Kirsher  */
1219f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1220f2148a47SJeff Kirsher {
1221f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1222f2148a47SJeff Kirsher 	wmb();
1223f2148a47SJeff Kirsher 
1224f2148a47SJeff Kirsher 	/* write mask */
1225f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1226f2148a47SJeff Kirsher 
1227f2148a47SJeff Kirsher 	/* disable CAMEN */
1228f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1229f2148a47SJeff Kirsher }
1230f2148a47SJeff Kirsher 
1231f2148a47SJeff Kirsher /**
1232f2148a47SJeff Kirsher  * rhine_init_cam_filter - initialize CAM filters
1233f2148a47SJeff Kirsher  * @dev: network device
1234f2148a47SJeff Kirsher  *
1235f2148a47SJeff Kirsher  * Initialize (disable) hardware VLAN and multicast support on this
1236f2148a47SJeff Kirsher  * Rhine.
1237f2148a47SJeff Kirsher  */
1238f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev)
1239f2148a47SJeff Kirsher {
1240f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1241f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1242f2148a47SJeff Kirsher 
1243f2148a47SJeff Kirsher 	/* Disable all CAMs */
1244f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, 0);
1245f2148a47SJeff Kirsher 	rhine_set_cam_mask(ioaddr, 0);
1246f2148a47SJeff Kirsher 
1247f2148a47SJeff Kirsher 	/* disable hardware VLAN support */
1248f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1249f2148a47SJeff Kirsher 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1250f2148a47SJeff Kirsher }
1251f2148a47SJeff Kirsher 
1252f2148a47SJeff Kirsher /**
1253f2148a47SJeff Kirsher  * rhine_update_vcam - update VLAN CAM filters
1254f2148a47SJeff Kirsher  * @rp: rhine_private data of this Rhine
1255f2148a47SJeff Kirsher  *
1256f2148a47SJeff Kirsher  * Update VLAN CAM filters to match configuration change.
1257f2148a47SJeff Kirsher  */
1258f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev)
1259f2148a47SJeff Kirsher {
1260f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1261f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1262f2148a47SJeff Kirsher 	u16 vid;
1263f2148a47SJeff Kirsher 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1264f2148a47SJeff Kirsher 	unsigned int i = 0;
1265f2148a47SJeff Kirsher 
1266f2148a47SJeff Kirsher 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1267f2148a47SJeff Kirsher 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1268f2148a47SJeff Kirsher 		vCAMmask |= 1 << i;
1269f2148a47SJeff Kirsher 		if (++i >= VCAM_SIZE)
1270f2148a47SJeff Kirsher 			break;
1271f2148a47SJeff Kirsher 	}
1272f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1273f2148a47SJeff Kirsher }
1274f2148a47SJeff Kirsher 
12758e586137SJiri Pirko static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1276f2148a47SJeff Kirsher {
1277f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1278f2148a47SJeff Kirsher 
1279f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
1280f2148a47SJeff Kirsher 	set_bit(vid, rp->active_vlans);
1281f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
1282f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
12838e586137SJiri Pirko 	return 0;
1284f2148a47SJeff Kirsher }
1285f2148a47SJeff Kirsher 
12868e586137SJiri Pirko static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1287f2148a47SJeff Kirsher {
1288f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1289f2148a47SJeff Kirsher 
1290f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
1291f2148a47SJeff Kirsher 	clear_bit(vid, rp->active_vlans);
1292f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
1293f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
12948e586137SJiri Pirko 	return 0;
1295f2148a47SJeff Kirsher }
1296f2148a47SJeff Kirsher 
1297f2148a47SJeff Kirsher static void init_registers(struct net_device *dev)
1298f2148a47SJeff Kirsher {
1299f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1300f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1301f2148a47SJeff Kirsher 	int i;
1302f2148a47SJeff Kirsher 
1303f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
1304f2148a47SJeff Kirsher 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1305f2148a47SJeff Kirsher 
1306f2148a47SJeff Kirsher 	/* Initialize other registers. */
1307f2148a47SJeff Kirsher 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1308f2148a47SJeff Kirsher 	/* Configure initial FIFO thresholds. */
1309f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + TxConfig);
1310f2148a47SJeff Kirsher 	rp->tx_thresh = 0x20;
1311f2148a47SJeff Kirsher 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1312f2148a47SJeff Kirsher 
1313f2148a47SJeff Kirsher 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1314f2148a47SJeff Kirsher 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1315f2148a47SJeff Kirsher 
1316f2148a47SJeff Kirsher 	rhine_set_rx_mode(dev);
1317f2148a47SJeff Kirsher 
1318f2148a47SJeff Kirsher 	if (rp->pdev->revision >= VT6105M)
1319f2148a47SJeff Kirsher 		rhine_init_cam_filter(dev);
1320f2148a47SJeff Kirsher 
1321f2148a47SJeff Kirsher 	napi_enable(&rp->napi);
1322f2148a47SJeff Kirsher 
1323f2148a47SJeff Kirsher 	/* Enable interrupts by setting the interrupt mask. */
1324f2148a47SJeff Kirsher 	iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1325f2148a47SJeff Kirsher 	       IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1326f2148a47SJeff Kirsher 	       IntrTxDone | IntrTxError | IntrTxUnderrun |
1327f2148a47SJeff Kirsher 	       IntrPCIErr | IntrStatsMax | IntrLinkChange,
1328f2148a47SJeff Kirsher 	       ioaddr + IntrEnable);
1329f2148a47SJeff Kirsher 
1330f2148a47SJeff Kirsher 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1331f2148a47SJeff Kirsher 	       ioaddr + ChipCmd);
1332f2148a47SJeff Kirsher 	rhine_check_media(dev, 1);
1333f2148a47SJeff Kirsher }
1334f2148a47SJeff Kirsher 
1335f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */
1336f2148a47SJeff Kirsher static void rhine_enable_linkmon(void __iomem *ioaddr)
1337f2148a47SJeff Kirsher {
1338f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1339f2148a47SJeff Kirsher 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1340f2148a47SJeff Kirsher 	iowrite8(0x80, ioaddr + MIICmd);
1341f2148a47SJeff Kirsher 
1342f2148a47SJeff Kirsher 	RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1343f2148a47SJeff Kirsher 
1344f2148a47SJeff Kirsher 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1345f2148a47SJeff Kirsher }
1346f2148a47SJeff Kirsher 
1347f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */
1348f2148a47SJeff Kirsher static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1349f2148a47SJeff Kirsher {
1350f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1351f2148a47SJeff Kirsher 
1352f2148a47SJeff Kirsher 	if (quirks & rqRhineI) {
1353f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1354f2148a47SJeff Kirsher 
1355f2148a47SJeff Kirsher 		/* Can be called from ISR. Evil. */
1356f2148a47SJeff Kirsher 		mdelay(1);
1357f2148a47SJeff Kirsher 
1358f2148a47SJeff Kirsher 		/* 0x80 must be set immediately before turning it off */
1359f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + MIICmd);
1360f2148a47SJeff Kirsher 
1361f2148a47SJeff Kirsher 		RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1362f2148a47SJeff Kirsher 
1363f2148a47SJeff Kirsher 		/* Heh. Now clear 0x80 again. */
1364f2148a47SJeff Kirsher 		iowrite8(0, ioaddr + MIICmd);
1365f2148a47SJeff Kirsher 	}
1366f2148a47SJeff Kirsher 	else
1367f2148a47SJeff Kirsher 		RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1368f2148a47SJeff Kirsher }
1369f2148a47SJeff Kirsher 
1370f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */
1371f2148a47SJeff Kirsher 
1372f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1373f2148a47SJeff Kirsher {
1374f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1375f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1376f2148a47SJeff Kirsher 	int result;
1377f2148a47SJeff Kirsher 
1378f2148a47SJeff Kirsher 	rhine_disable_linkmon(ioaddr, rp->quirks);
1379f2148a47SJeff Kirsher 
1380f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1381f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1382f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1383f2148a47SJeff Kirsher 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1384f2148a47SJeff Kirsher 	RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1385f2148a47SJeff Kirsher 	result = ioread16(ioaddr + MIIData);
1386f2148a47SJeff Kirsher 
1387f2148a47SJeff Kirsher 	rhine_enable_linkmon(ioaddr);
1388f2148a47SJeff Kirsher 	return result;
1389f2148a47SJeff Kirsher }
1390f2148a47SJeff Kirsher 
1391f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1392f2148a47SJeff Kirsher {
1393f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1394f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1395f2148a47SJeff Kirsher 
1396f2148a47SJeff Kirsher 	rhine_disable_linkmon(ioaddr, rp->quirks);
1397f2148a47SJeff Kirsher 
1398f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1399f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1400f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1401f2148a47SJeff Kirsher 	iowrite16(value, ioaddr + MIIData);
1402f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1403f2148a47SJeff Kirsher 	RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1404f2148a47SJeff Kirsher 
1405f2148a47SJeff Kirsher 	rhine_enable_linkmon(ioaddr);
1406f2148a47SJeff Kirsher }
1407f2148a47SJeff Kirsher 
1408f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev)
1409f2148a47SJeff Kirsher {
1410f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1411f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1412f2148a47SJeff Kirsher 	int rc;
1413f2148a47SJeff Kirsher 
1414f2148a47SJeff Kirsher 	rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1415f2148a47SJeff Kirsher 			dev);
1416f2148a47SJeff Kirsher 	if (rc)
1417f2148a47SJeff Kirsher 		return rc;
1418f2148a47SJeff Kirsher 
1419f2148a47SJeff Kirsher 	if (debug > 1)
1420f2148a47SJeff Kirsher 		netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1421f2148a47SJeff Kirsher 
1422f2148a47SJeff Kirsher 	rc = alloc_ring(dev);
1423f2148a47SJeff Kirsher 	if (rc) {
1424f2148a47SJeff Kirsher 		free_irq(rp->pdev->irq, dev);
1425f2148a47SJeff Kirsher 		return rc;
1426f2148a47SJeff Kirsher 	}
1427f2148a47SJeff Kirsher 	alloc_rbufs(dev);
1428f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1429f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
1430f2148a47SJeff Kirsher 	init_registers(dev);
1431f2148a47SJeff Kirsher 	if (debug > 2)
1432f2148a47SJeff Kirsher 		netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
1433f2148a47SJeff Kirsher 			   __func__, ioread16(ioaddr + ChipCmd),
1434f2148a47SJeff Kirsher 			   mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1435f2148a47SJeff Kirsher 
1436f2148a47SJeff Kirsher 	netif_start_queue(dev);
1437f2148a47SJeff Kirsher 
1438f2148a47SJeff Kirsher 	return 0;
1439f2148a47SJeff Kirsher }
1440f2148a47SJeff Kirsher 
1441f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work)
1442f2148a47SJeff Kirsher {
1443f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(work, struct rhine_private,
1444f2148a47SJeff Kirsher 						reset_task);
1445f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
1446f2148a47SJeff Kirsher 
1447f2148a47SJeff Kirsher 	/* protect against concurrent rx interrupts */
1448f2148a47SJeff Kirsher 	disable_irq(rp->pdev->irq);
1449f2148a47SJeff Kirsher 
1450f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
1451f2148a47SJeff Kirsher 
1452f2148a47SJeff Kirsher 	spin_lock_bh(&rp->lock);
1453f2148a47SJeff Kirsher 
1454f2148a47SJeff Kirsher 	/* clear all descriptors */
1455f2148a47SJeff Kirsher 	free_tbufs(dev);
1456f2148a47SJeff Kirsher 	free_rbufs(dev);
1457f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1458f2148a47SJeff Kirsher 	alloc_rbufs(dev);
1459f2148a47SJeff Kirsher 
1460f2148a47SJeff Kirsher 	/* Reinitialize the hardware. */
1461f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
1462f2148a47SJeff Kirsher 	init_registers(dev);
1463f2148a47SJeff Kirsher 
1464f2148a47SJeff Kirsher 	spin_unlock_bh(&rp->lock);
1465f2148a47SJeff Kirsher 	enable_irq(rp->pdev->irq);
1466f2148a47SJeff Kirsher 
1467f2148a47SJeff Kirsher 	dev->trans_start = jiffies; /* prevent tx timeout */
1468f2148a47SJeff Kirsher 	dev->stats.tx_errors++;
1469f2148a47SJeff Kirsher 	netif_wake_queue(dev);
1470f2148a47SJeff Kirsher }
1471f2148a47SJeff Kirsher 
1472f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev)
1473f2148a47SJeff Kirsher {
1474f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1475f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1476f2148a47SJeff Kirsher 
1477f2148a47SJeff Kirsher 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1478f2148a47SJeff Kirsher 		    ioread16(ioaddr + IntrStatus),
1479f2148a47SJeff Kirsher 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1480f2148a47SJeff Kirsher 
1481f2148a47SJeff Kirsher 	schedule_work(&rp->reset_task);
1482f2148a47SJeff Kirsher }
1483f2148a47SJeff Kirsher 
1484f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1485f2148a47SJeff Kirsher 				  struct net_device *dev)
1486f2148a47SJeff Kirsher {
1487f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1488f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1489f2148a47SJeff Kirsher 	unsigned entry;
1490f2148a47SJeff Kirsher 	unsigned long flags;
1491f2148a47SJeff Kirsher 
1492f2148a47SJeff Kirsher 	/* Caution: the write order is important here, set the field
1493f2148a47SJeff Kirsher 	   with the "ownership" bits last. */
1494f2148a47SJeff Kirsher 
1495f2148a47SJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
1496f2148a47SJeff Kirsher 	entry = rp->cur_tx % TX_RING_SIZE;
1497f2148a47SJeff Kirsher 
1498f2148a47SJeff Kirsher 	if (skb_padto(skb, ETH_ZLEN))
1499f2148a47SJeff Kirsher 		return NETDEV_TX_OK;
1500f2148a47SJeff Kirsher 
1501f2148a47SJeff Kirsher 	rp->tx_skbuff[entry] = skb;
1502f2148a47SJeff Kirsher 
1503f2148a47SJeff Kirsher 	if ((rp->quirks & rqRhineI) &&
1504f2148a47SJeff Kirsher 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1505f2148a47SJeff Kirsher 		/* Must use alignment buffer. */
1506f2148a47SJeff Kirsher 		if (skb->len > PKT_BUF_SZ) {
1507f2148a47SJeff Kirsher 			/* packet too long, drop it */
1508f2148a47SJeff Kirsher 			dev_kfree_skb(skb);
1509f2148a47SJeff Kirsher 			rp->tx_skbuff[entry] = NULL;
1510f2148a47SJeff Kirsher 			dev->stats.tx_dropped++;
1511f2148a47SJeff Kirsher 			return NETDEV_TX_OK;
1512f2148a47SJeff Kirsher 		}
1513f2148a47SJeff Kirsher 
1514f2148a47SJeff Kirsher 		/* Padding is not copied and so must be redone. */
1515f2148a47SJeff Kirsher 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1516f2148a47SJeff Kirsher 		if (skb->len < ETH_ZLEN)
1517f2148a47SJeff Kirsher 			memset(rp->tx_buf[entry] + skb->len, 0,
1518f2148a47SJeff Kirsher 			       ETH_ZLEN - skb->len);
1519f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] = 0;
1520f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1521f2148a47SJeff Kirsher 						      (rp->tx_buf[entry] -
1522f2148a47SJeff Kirsher 						       rp->tx_bufs));
1523f2148a47SJeff Kirsher 	} else {
1524f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] =
1525f2148a47SJeff Kirsher 			pci_map_single(rp->pdev, skb->data, skb->len,
1526f2148a47SJeff Kirsher 				       PCI_DMA_TODEVICE);
1527f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1528f2148a47SJeff Kirsher 	}
1529f2148a47SJeff Kirsher 
1530f2148a47SJeff Kirsher 	rp->tx_ring[entry].desc_length =
1531f2148a47SJeff Kirsher 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1532f2148a47SJeff Kirsher 
1533f2148a47SJeff Kirsher 	if (unlikely(vlan_tx_tag_present(skb))) {
1534f2148a47SJeff Kirsher 		rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1535f2148a47SJeff Kirsher 		/* request tagging */
1536f2148a47SJeff Kirsher 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1537f2148a47SJeff Kirsher 	}
1538f2148a47SJeff Kirsher 	else
1539f2148a47SJeff Kirsher 		rp->tx_ring[entry].tx_status = 0;
1540f2148a47SJeff Kirsher 
1541f2148a47SJeff Kirsher 	/* lock eth irq */
1542f2148a47SJeff Kirsher 	spin_lock_irqsave(&rp->lock, flags);
1543f2148a47SJeff Kirsher 	wmb();
1544f2148a47SJeff Kirsher 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1545f2148a47SJeff Kirsher 	wmb();
1546f2148a47SJeff Kirsher 
1547f2148a47SJeff Kirsher 	rp->cur_tx++;
1548f2148a47SJeff Kirsher 
1549f2148a47SJeff Kirsher 	/* Non-x86 Todo: explicitly flush cache lines here. */
1550f2148a47SJeff Kirsher 
1551f2148a47SJeff Kirsher 	if (vlan_tx_tag_present(skb))
1552f2148a47SJeff Kirsher 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1553f2148a47SJeff Kirsher 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1554f2148a47SJeff Kirsher 
1555f2148a47SJeff Kirsher 	/* Wake the potentially-idle transmit channel */
1556f2148a47SJeff Kirsher 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1557f2148a47SJeff Kirsher 	       ioaddr + ChipCmd1);
1558f2148a47SJeff Kirsher 	IOSYNC;
1559f2148a47SJeff Kirsher 
1560f2148a47SJeff Kirsher 	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1561f2148a47SJeff Kirsher 		netif_stop_queue(dev);
1562f2148a47SJeff Kirsher 
1563f2148a47SJeff Kirsher 	spin_unlock_irqrestore(&rp->lock, flags);
1564f2148a47SJeff Kirsher 
1565f2148a47SJeff Kirsher 	if (debug > 4) {
1566f2148a47SJeff Kirsher 		netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1567f2148a47SJeff Kirsher 			   rp->cur_tx-1, entry);
1568f2148a47SJeff Kirsher 	}
1569f2148a47SJeff Kirsher 	return NETDEV_TX_OK;
1570f2148a47SJeff Kirsher }
1571f2148a47SJeff Kirsher 
1572f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
1573f2148a47SJeff Kirsher    after the Tx thread. */
1574f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1575f2148a47SJeff Kirsher {
1576f2148a47SJeff Kirsher 	struct net_device *dev = dev_instance;
1577f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1578f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1579f2148a47SJeff Kirsher 	u32 intr_status;
1580f2148a47SJeff Kirsher 	int boguscnt = max_interrupt_work;
1581f2148a47SJeff Kirsher 	int handled = 0;
1582f2148a47SJeff Kirsher 
1583f2148a47SJeff Kirsher 	while ((intr_status = get_intr_status(dev))) {
1584f2148a47SJeff Kirsher 		handled = 1;
1585f2148a47SJeff Kirsher 
1586f2148a47SJeff Kirsher 		/* Acknowledge all of the current interrupt sources ASAP. */
1587f2148a47SJeff Kirsher 		if (intr_status & IntrTxDescRace)
1588f2148a47SJeff Kirsher 			iowrite8(0x08, ioaddr + IntrStatus2);
1589f2148a47SJeff Kirsher 		iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1590f2148a47SJeff Kirsher 		IOSYNC;
1591f2148a47SJeff Kirsher 
1592f2148a47SJeff Kirsher 		if (debug > 4)
1593f2148a47SJeff Kirsher 			netdev_dbg(dev, "Interrupt, status %08x\n",
1594f2148a47SJeff Kirsher 				   intr_status);
1595f2148a47SJeff Kirsher 
1596f2148a47SJeff Kirsher 		if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1597f2148a47SJeff Kirsher 				   IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1598f2148a47SJeff Kirsher 			iowrite16(IntrTxAborted |
1599f2148a47SJeff Kirsher 				  IntrTxDone | IntrTxError | IntrTxUnderrun |
1600f2148a47SJeff Kirsher 				  IntrPCIErr | IntrStatsMax | IntrLinkChange,
1601f2148a47SJeff Kirsher 				  ioaddr + IntrEnable);
1602f2148a47SJeff Kirsher 
1603f2148a47SJeff Kirsher 			napi_schedule(&rp->napi);
1604f2148a47SJeff Kirsher 		}
1605f2148a47SJeff Kirsher 
1606f2148a47SJeff Kirsher 		if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1607f2148a47SJeff Kirsher 			if (intr_status & IntrTxErrSummary) {
1608f2148a47SJeff Kirsher 				/* Avoid scavenging before Tx engine turned off */
1609f2148a47SJeff Kirsher 				RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1610f2148a47SJeff Kirsher 				if (debug > 2 &&
1611f2148a47SJeff Kirsher 				    ioread8(ioaddr+ChipCmd) & CmdTxOn)
1612f2148a47SJeff Kirsher 					netdev_warn(dev,
1613f2148a47SJeff Kirsher 						    "%s: Tx engine still on\n",
1614f2148a47SJeff Kirsher 						    __func__);
1615f2148a47SJeff Kirsher 			}
1616f2148a47SJeff Kirsher 			rhine_tx(dev);
1617f2148a47SJeff Kirsher 		}
1618f2148a47SJeff Kirsher 
1619f2148a47SJeff Kirsher 		/* Abnormal error summary/uncommon events handlers. */
1620f2148a47SJeff Kirsher 		if (intr_status & (IntrPCIErr | IntrLinkChange |
1621f2148a47SJeff Kirsher 				   IntrStatsMax | IntrTxError | IntrTxAborted |
1622f2148a47SJeff Kirsher 				   IntrTxUnderrun | IntrTxDescRace))
1623f2148a47SJeff Kirsher 			rhine_error(dev, intr_status);
1624f2148a47SJeff Kirsher 
1625f2148a47SJeff Kirsher 		if (--boguscnt < 0) {
1626f2148a47SJeff Kirsher 			netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
1627f2148a47SJeff Kirsher 				    intr_status);
1628f2148a47SJeff Kirsher 			break;
1629f2148a47SJeff Kirsher 		}
1630f2148a47SJeff Kirsher 	}
1631f2148a47SJeff Kirsher 
1632f2148a47SJeff Kirsher 	if (debug > 3)
1633f2148a47SJeff Kirsher 		netdev_dbg(dev, "exiting interrupt, status=%08x\n",
1634f2148a47SJeff Kirsher 			   ioread16(ioaddr + IntrStatus));
1635f2148a47SJeff Kirsher 	return IRQ_RETVAL(handled);
1636f2148a47SJeff Kirsher }
1637f2148a47SJeff Kirsher 
1638f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated
1639f2148a47SJeff Kirsher    for clarity. */
1640f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev)
1641f2148a47SJeff Kirsher {
1642f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1643f2148a47SJeff Kirsher 	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1644f2148a47SJeff Kirsher 
1645f2148a47SJeff Kirsher 	spin_lock(&rp->lock);
1646f2148a47SJeff Kirsher 
1647f2148a47SJeff Kirsher 	/* find and cleanup dirty tx descriptors */
1648f2148a47SJeff Kirsher 	while (rp->dirty_tx != rp->cur_tx) {
1649f2148a47SJeff Kirsher 		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1650f2148a47SJeff Kirsher 		if (debug > 6)
1651f2148a47SJeff Kirsher 			netdev_dbg(dev, "Tx scavenge %d status %08x\n",
1652f2148a47SJeff Kirsher 				   entry, txstatus);
1653f2148a47SJeff Kirsher 		if (txstatus & DescOwn)
1654f2148a47SJeff Kirsher 			break;
1655f2148a47SJeff Kirsher 		if (txstatus & 0x8000) {
1656f2148a47SJeff Kirsher 			if (debug > 1)
1657f2148a47SJeff Kirsher 				netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1658f2148a47SJeff Kirsher 					   txstatus);
1659f2148a47SJeff Kirsher 			dev->stats.tx_errors++;
1660f2148a47SJeff Kirsher 			if (txstatus & 0x0400)
1661f2148a47SJeff Kirsher 				dev->stats.tx_carrier_errors++;
1662f2148a47SJeff Kirsher 			if (txstatus & 0x0200)
1663f2148a47SJeff Kirsher 				dev->stats.tx_window_errors++;
1664f2148a47SJeff Kirsher 			if (txstatus & 0x0100)
1665f2148a47SJeff Kirsher 				dev->stats.tx_aborted_errors++;
1666f2148a47SJeff Kirsher 			if (txstatus & 0x0080)
1667f2148a47SJeff Kirsher 				dev->stats.tx_heartbeat_errors++;
1668f2148a47SJeff Kirsher 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1669f2148a47SJeff Kirsher 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1670f2148a47SJeff Kirsher 				dev->stats.tx_fifo_errors++;
1671f2148a47SJeff Kirsher 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1672f2148a47SJeff Kirsher 				break; /* Keep the skb - we try again */
1673f2148a47SJeff Kirsher 			}
1674f2148a47SJeff Kirsher 			/* Transmitter restarted in 'abnormal' handler. */
1675f2148a47SJeff Kirsher 		} else {
1676f2148a47SJeff Kirsher 			if (rp->quirks & rqRhineI)
1677f2148a47SJeff Kirsher 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1678f2148a47SJeff Kirsher 			else
1679f2148a47SJeff Kirsher 				dev->stats.collisions += txstatus & 0x0F;
1680f2148a47SJeff Kirsher 			if (debug > 6)
1681f2148a47SJeff Kirsher 				netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
1682f2148a47SJeff Kirsher 					   (txstatus >> 3) & 0xF,
1683f2148a47SJeff Kirsher 					   txstatus & 0xF);
1684f2148a47SJeff Kirsher 			dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1685f2148a47SJeff Kirsher 			dev->stats.tx_packets++;
1686f2148a47SJeff Kirsher 		}
1687f2148a47SJeff Kirsher 		/* Free the original skb. */
1688f2148a47SJeff Kirsher 		if (rp->tx_skbuff_dma[entry]) {
1689f2148a47SJeff Kirsher 			pci_unmap_single(rp->pdev,
1690f2148a47SJeff Kirsher 					 rp->tx_skbuff_dma[entry],
1691f2148a47SJeff Kirsher 					 rp->tx_skbuff[entry]->len,
1692f2148a47SJeff Kirsher 					 PCI_DMA_TODEVICE);
1693f2148a47SJeff Kirsher 		}
1694f2148a47SJeff Kirsher 		dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1695f2148a47SJeff Kirsher 		rp->tx_skbuff[entry] = NULL;
1696f2148a47SJeff Kirsher 		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1697f2148a47SJeff Kirsher 	}
1698f2148a47SJeff Kirsher 	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1699f2148a47SJeff Kirsher 		netif_wake_queue(dev);
1700f2148a47SJeff Kirsher 
1701f2148a47SJeff Kirsher 	spin_unlock(&rp->lock);
1702f2148a47SJeff Kirsher }
1703f2148a47SJeff Kirsher 
1704f2148a47SJeff Kirsher /**
1705f2148a47SJeff Kirsher  * rhine_get_vlan_tci - extract TCI from Rx data buffer
1706f2148a47SJeff Kirsher  * @skb: pointer to sk_buff
1707f2148a47SJeff Kirsher  * @data_size: used data area of the buffer including CRC
1708f2148a47SJeff Kirsher  *
1709f2148a47SJeff Kirsher  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1710f2148a47SJeff Kirsher  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1711f2148a47SJeff Kirsher  * aligned following the CRC.
1712f2148a47SJeff Kirsher  */
1713f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1714f2148a47SJeff Kirsher {
1715f2148a47SJeff Kirsher 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1716f2148a47SJeff Kirsher 	return be16_to_cpup((__be16 *)trailer);
1717f2148a47SJeff Kirsher }
1718f2148a47SJeff Kirsher 
1719f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */
1720f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit)
1721f2148a47SJeff Kirsher {
1722f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1723f2148a47SJeff Kirsher 	int count;
1724f2148a47SJeff Kirsher 	int entry = rp->cur_rx % RX_RING_SIZE;
1725f2148a47SJeff Kirsher 
1726f2148a47SJeff Kirsher 	if (debug > 4) {
1727f2148a47SJeff Kirsher 		netdev_dbg(dev, "%s(), entry %d status %08x\n",
1728f2148a47SJeff Kirsher 			   __func__, entry,
1729f2148a47SJeff Kirsher 			   le32_to_cpu(rp->rx_head_desc->rx_status));
1730f2148a47SJeff Kirsher 	}
1731f2148a47SJeff Kirsher 
1732f2148a47SJeff Kirsher 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1733f2148a47SJeff Kirsher 	for (count = 0; count < limit; ++count) {
1734f2148a47SJeff Kirsher 		struct rx_desc *desc = rp->rx_head_desc;
1735f2148a47SJeff Kirsher 		u32 desc_status = le32_to_cpu(desc->rx_status);
1736f2148a47SJeff Kirsher 		u32 desc_length = le32_to_cpu(desc->desc_length);
1737f2148a47SJeff Kirsher 		int data_size = desc_status >> 16;
1738f2148a47SJeff Kirsher 
1739f2148a47SJeff Kirsher 		if (desc_status & DescOwn)
1740f2148a47SJeff Kirsher 			break;
1741f2148a47SJeff Kirsher 
1742f2148a47SJeff Kirsher 		if (debug > 4)
1743f2148a47SJeff Kirsher 			netdev_dbg(dev, "%s() status is %08x\n",
1744f2148a47SJeff Kirsher 				   __func__, desc_status);
1745f2148a47SJeff Kirsher 
1746f2148a47SJeff Kirsher 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1747f2148a47SJeff Kirsher 			if ((desc_status & RxWholePkt) != RxWholePkt) {
1748f2148a47SJeff Kirsher 				netdev_warn(dev,
1749f2148a47SJeff Kirsher 	"Oversized Ethernet frame spanned multiple buffers, "
1750f2148a47SJeff Kirsher 	"entry %#x length %d status %08x!\n",
1751f2148a47SJeff Kirsher 					    entry, data_size,
1752f2148a47SJeff Kirsher 					    desc_status);
1753f2148a47SJeff Kirsher 				netdev_warn(dev,
1754f2148a47SJeff Kirsher 					    "Oversized Ethernet frame %p vs %p\n",
1755f2148a47SJeff Kirsher 					    rp->rx_head_desc,
1756f2148a47SJeff Kirsher 					    &rp->rx_ring[entry]);
1757f2148a47SJeff Kirsher 				dev->stats.rx_length_errors++;
1758f2148a47SJeff Kirsher 			} else if (desc_status & RxErr) {
1759f2148a47SJeff Kirsher 				/* There was a error. */
1760f2148a47SJeff Kirsher 				if (debug > 2)
1761f2148a47SJeff Kirsher 					netdev_dbg(dev, "%s() Rx error was %08x\n",
1762f2148a47SJeff Kirsher 						   __func__, desc_status);
1763f2148a47SJeff Kirsher 				dev->stats.rx_errors++;
1764f2148a47SJeff Kirsher 				if (desc_status & 0x0030)
1765f2148a47SJeff Kirsher 					dev->stats.rx_length_errors++;
1766f2148a47SJeff Kirsher 				if (desc_status & 0x0048)
1767f2148a47SJeff Kirsher 					dev->stats.rx_fifo_errors++;
1768f2148a47SJeff Kirsher 				if (desc_status & 0x0004)
1769f2148a47SJeff Kirsher 					dev->stats.rx_frame_errors++;
1770f2148a47SJeff Kirsher 				if (desc_status & 0x0002) {
1771f2148a47SJeff Kirsher 					/* this can also be updated outside the interrupt handler */
1772f2148a47SJeff Kirsher 					spin_lock(&rp->lock);
1773f2148a47SJeff Kirsher 					dev->stats.rx_crc_errors++;
1774f2148a47SJeff Kirsher 					spin_unlock(&rp->lock);
1775f2148a47SJeff Kirsher 				}
1776f2148a47SJeff Kirsher 			}
1777f2148a47SJeff Kirsher 		} else {
1778f2148a47SJeff Kirsher 			struct sk_buff *skb = NULL;
1779f2148a47SJeff Kirsher 			/* Length should omit the CRC */
1780f2148a47SJeff Kirsher 			int pkt_len = data_size - 4;
1781f2148a47SJeff Kirsher 			u16 vlan_tci = 0;
1782f2148a47SJeff Kirsher 
1783f2148a47SJeff Kirsher 			/* Check if the packet is long enough to accept without
1784f2148a47SJeff Kirsher 			   copying to a minimally-sized skbuff. */
1785f2148a47SJeff Kirsher 			if (pkt_len < rx_copybreak)
1786f2148a47SJeff Kirsher 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1787f2148a47SJeff Kirsher 			if (skb) {
1788f2148a47SJeff Kirsher 				pci_dma_sync_single_for_cpu(rp->pdev,
1789f2148a47SJeff Kirsher 							    rp->rx_skbuff_dma[entry],
1790f2148a47SJeff Kirsher 							    rp->rx_buf_sz,
1791f2148a47SJeff Kirsher 							    PCI_DMA_FROMDEVICE);
1792f2148a47SJeff Kirsher 
1793f2148a47SJeff Kirsher 				skb_copy_to_linear_data(skb,
1794f2148a47SJeff Kirsher 						 rp->rx_skbuff[entry]->data,
1795f2148a47SJeff Kirsher 						 pkt_len);
1796f2148a47SJeff Kirsher 				skb_put(skb, pkt_len);
1797f2148a47SJeff Kirsher 				pci_dma_sync_single_for_device(rp->pdev,
1798f2148a47SJeff Kirsher 							       rp->rx_skbuff_dma[entry],
1799f2148a47SJeff Kirsher 							       rp->rx_buf_sz,
1800f2148a47SJeff Kirsher 							       PCI_DMA_FROMDEVICE);
1801f2148a47SJeff Kirsher 			} else {
1802f2148a47SJeff Kirsher 				skb = rp->rx_skbuff[entry];
1803f2148a47SJeff Kirsher 				if (skb == NULL) {
1804f2148a47SJeff Kirsher 					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1805f2148a47SJeff Kirsher 					break;
1806f2148a47SJeff Kirsher 				}
1807f2148a47SJeff Kirsher 				rp->rx_skbuff[entry] = NULL;
1808f2148a47SJeff Kirsher 				skb_put(skb, pkt_len);
1809f2148a47SJeff Kirsher 				pci_unmap_single(rp->pdev,
1810f2148a47SJeff Kirsher 						 rp->rx_skbuff_dma[entry],
1811f2148a47SJeff Kirsher 						 rp->rx_buf_sz,
1812f2148a47SJeff Kirsher 						 PCI_DMA_FROMDEVICE);
1813f2148a47SJeff Kirsher 			}
1814f2148a47SJeff Kirsher 
1815f2148a47SJeff Kirsher 			if (unlikely(desc_length & DescTag))
1816f2148a47SJeff Kirsher 				vlan_tci = rhine_get_vlan_tci(skb, data_size);
1817f2148a47SJeff Kirsher 
1818f2148a47SJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
1819f2148a47SJeff Kirsher 
1820f2148a47SJeff Kirsher 			if (unlikely(desc_length & DescTag))
1821f2148a47SJeff Kirsher 				__vlan_hwaccel_put_tag(skb, vlan_tci);
1822f2148a47SJeff Kirsher 			netif_receive_skb(skb);
1823f2148a47SJeff Kirsher 			dev->stats.rx_bytes += pkt_len;
1824f2148a47SJeff Kirsher 			dev->stats.rx_packets++;
1825f2148a47SJeff Kirsher 		}
1826f2148a47SJeff Kirsher 		entry = (++rp->cur_rx) % RX_RING_SIZE;
1827f2148a47SJeff Kirsher 		rp->rx_head_desc = &rp->rx_ring[entry];
1828f2148a47SJeff Kirsher 	}
1829f2148a47SJeff Kirsher 
1830f2148a47SJeff Kirsher 	/* Refill the Rx ring buffers. */
1831f2148a47SJeff Kirsher 	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1832f2148a47SJeff Kirsher 		struct sk_buff *skb;
1833f2148a47SJeff Kirsher 		entry = rp->dirty_rx % RX_RING_SIZE;
1834f2148a47SJeff Kirsher 		if (rp->rx_skbuff[entry] == NULL) {
1835f2148a47SJeff Kirsher 			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1836f2148a47SJeff Kirsher 			rp->rx_skbuff[entry] = skb;
1837f2148a47SJeff Kirsher 			if (skb == NULL)
1838f2148a47SJeff Kirsher 				break;	/* Better luck next round. */
1839f2148a47SJeff Kirsher 			skb->dev = dev;	/* Mark as being used by this device. */
1840f2148a47SJeff Kirsher 			rp->rx_skbuff_dma[entry] =
1841f2148a47SJeff Kirsher 				pci_map_single(rp->pdev, skb->data,
1842f2148a47SJeff Kirsher 					       rp->rx_buf_sz,
1843f2148a47SJeff Kirsher 					       PCI_DMA_FROMDEVICE);
1844f2148a47SJeff Kirsher 			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1845f2148a47SJeff Kirsher 		}
1846f2148a47SJeff Kirsher 		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1847f2148a47SJeff Kirsher 	}
1848f2148a47SJeff Kirsher 
1849f2148a47SJeff Kirsher 	return count;
1850f2148a47SJeff Kirsher }
1851f2148a47SJeff Kirsher 
1852f2148a47SJeff Kirsher /*
1853f2148a47SJeff Kirsher  * Clears the "tally counters" for CRC errors and missed frames(?).
1854f2148a47SJeff Kirsher  * It has been reported that some chips need a write of 0 to clear
1855f2148a47SJeff Kirsher  * these, for others the counters are set to 1 when written to and
1856f2148a47SJeff Kirsher  * instead cleared when read. So we clear them both ways ...
1857f2148a47SJeff Kirsher  */
1858f2148a47SJeff Kirsher static inline void clear_tally_counters(void __iomem *ioaddr)
1859f2148a47SJeff Kirsher {
1860f2148a47SJeff Kirsher 	iowrite32(0, ioaddr + RxMissed);
1861f2148a47SJeff Kirsher 	ioread16(ioaddr + RxCRCErrs);
1862f2148a47SJeff Kirsher 	ioread16(ioaddr + RxMissed);
1863f2148a47SJeff Kirsher }
1864f2148a47SJeff Kirsher 
1865f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) {
1866f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1867f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1868f2148a47SJeff Kirsher 	int entry = rp->dirty_tx % TX_RING_SIZE;
1869f2148a47SJeff Kirsher 	u32 intr_status;
1870f2148a47SJeff Kirsher 
1871f2148a47SJeff Kirsher 	/*
1872f2148a47SJeff Kirsher 	 * If new errors occurred, we need to sort them out before doing Tx.
1873f2148a47SJeff Kirsher 	 * In that case the ISR will be back here RSN anyway.
1874f2148a47SJeff Kirsher 	 */
1875f2148a47SJeff Kirsher 	intr_status = get_intr_status(dev);
1876f2148a47SJeff Kirsher 
1877f2148a47SJeff Kirsher 	if ((intr_status & IntrTxErrSummary) == 0) {
1878f2148a47SJeff Kirsher 
1879f2148a47SJeff Kirsher 		/* We know better than the chip where it should continue. */
1880f2148a47SJeff Kirsher 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1881f2148a47SJeff Kirsher 		       ioaddr + TxRingPtr);
1882f2148a47SJeff Kirsher 
1883f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1884f2148a47SJeff Kirsher 		       ioaddr + ChipCmd);
1885f2148a47SJeff Kirsher 
1886f2148a47SJeff Kirsher 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1887f2148a47SJeff Kirsher 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1888f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1889f2148a47SJeff Kirsher 
1890f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1891f2148a47SJeff Kirsher 		       ioaddr + ChipCmd1);
1892f2148a47SJeff Kirsher 		IOSYNC;
1893f2148a47SJeff Kirsher 	}
1894f2148a47SJeff Kirsher 	else {
1895f2148a47SJeff Kirsher 		/* This should never happen */
1896f2148a47SJeff Kirsher 		if (debug > 1)
1897f2148a47SJeff Kirsher 			netdev_warn(dev, "%s() Another error occurred %08x\n",
1898f2148a47SJeff Kirsher 				   __func__, intr_status);
1899f2148a47SJeff Kirsher 	}
1900f2148a47SJeff Kirsher 
1901f2148a47SJeff Kirsher }
1902f2148a47SJeff Kirsher 
1903f2148a47SJeff Kirsher static void rhine_error(struct net_device *dev, int intr_status)
1904f2148a47SJeff Kirsher {
1905f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1906f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1907f2148a47SJeff Kirsher 
1908f2148a47SJeff Kirsher 	spin_lock(&rp->lock);
1909f2148a47SJeff Kirsher 
1910f2148a47SJeff Kirsher 	if (intr_status & IntrLinkChange)
1911f2148a47SJeff Kirsher 		rhine_check_media(dev, 0);
1912f2148a47SJeff Kirsher 	if (intr_status & IntrStatsMax) {
1913f2148a47SJeff Kirsher 		dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1914f2148a47SJeff Kirsher 		dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1915f2148a47SJeff Kirsher 		clear_tally_counters(ioaddr);
1916f2148a47SJeff Kirsher 	}
1917f2148a47SJeff Kirsher 	if (intr_status & IntrTxAborted) {
1918f2148a47SJeff Kirsher 		if (debug > 1)
1919f2148a47SJeff Kirsher 			netdev_info(dev, "Abort %08x, frame dropped\n",
1920f2148a47SJeff Kirsher 				    intr_status);
1921f2148a47SJeff Kirsher 	}
1922f2148a47SJeff Kirsher 	if (intr_status & IntrTxUnderrun) {
1923*269f3114SFrancois Romieu 		rhine_kick_tx_threshold(rp);
1924f2148a47SJeff Kirsher 		if (debug > 1)
1925f2148a47SJeff Kirsher 			netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
1926f2148a47SJeff Kirsher 				    rp->tx_thresh);
1927f2148a47SJeff Kirsher 	}
1928f2148a47SJeff Kirsher 	if (intr_status & IntrTxDescRace) {
1929f2148a47SJeff Kirsher 		if (debug > 2)
1930f2148a47SJeff Kirsher 			netdev_info(dev, "Tx descriptor write-back race\n");
1931f2148a47SJeff Kirsher 	}
1932f2148a47SJeff Kirsher 	if ((intr_status & IntrTxError) &&
1933f2148a47SJeff Kirsher 	    (intr_status & (IntrTxAborted |
1934f2148a47SJeff Kirsher 	     IntrTxUnderrun | IntrTxDescRace)) == 0) {
1935*269f3114SFrancois Romieu 		rhine_kick_tx_threshold(rp);
1936f2148a47SJeff Kirsher 		if (debug > 1)
1937f2148a47SJeff Kirsher 			netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
1938f2148a47SJeff Kirsher 				    rp->tx_thresh);
1939f2148a47SJeff Kirsher 	}
1940f2148a47SJeff Kirsher 	if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1941f2148a47SJeff Kirsher 			   IntrTxError))
1942f2148a47SJeff Kirsher 		rhine_restart_tx(dev);
1943f2148a47SJeff Kirsher 
1944f2148a47SJeff Kirsher 	if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1945f2148a47SJeff Kirsher 			    IntrTxError | IntrTxAborted | IntrNormalSummary |
1946f2148a47SJeff Kirsher 			    IntrTxDescRace)) {
1947f2148a47SJeff Kirsher 		if (debug > 1)
1948f2148a47SJeff Kirsher 			netdev_err(dev, "Something Wicked happened! %08x\n",
1949f2148a47SJeff Kirsher 				   intr_status);
1950f2148a47SJeff Kirsher 	}
1951f2148a47SJeff Kirsher 
1952f2148a47SJeff Kirsher 	spin_unlock(&rp->lock);
1953f2148a47SJeff Kirsher }
1954f2148a47SJeff Kirsher 
1955f2148a47SJeff Kirsher static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1956f2148a47SJeff Kirsher {
1957f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1958f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1959f2148a47SJeff Kirsher 	unsigned long flags;
1960f2148a47SJeff Kirsher 
1961f2148a47SJeff Kirsher 	spin_lock_irqsave(&rp->lock, flags);
1962f2148a47SJeff Kirsher 	dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1963f2148a47SJeff Kirsher 	dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1964f2148a47SJeff Kirsher 	clear_tally_counters(ioaddr);
1965f2148a47SJeff Kirsher 	spin_unlock_irqrestore(&rp->lock, flags);
1966f2148a47SJeff Kirsher 
1967f2148a47SJeff Kirsher 	return &dev->stats;
1968f2148a47SJeff Kirsher }
1969f2148a47SJeff Kirsher 
1970f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev)
1971f2148a47SJeff Kirsher {
1972f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1973f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1974f2148a47SJeff Kirsher 	u32 mc_filter[2];	/* Multicast hash filter */
1975f2148a47SJeff Kirsher 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
1976f2148a47SJeff Kirsher 	struct netdev_hw_addr *ha;
1977f2148a47SJeff Kirsher 
1978f2148a47SJeff Kirsher 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
1979f2148a47SJeff Kirsher 		rx_mode = 0x1C;
1980f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1981f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1982f2148a47SJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1983f2148a47SJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
1984f2148a47SJeff Kirsher 		/* Too many to match, or accept all multicasts. */
1985f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1986f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1987f2148a47SJeff Kirsher 	} else if (rp->pdev->revision >= VT6105M) {
1988f2148a47SJeff Kirsher 		int i = 0;
1989f2148a47SJeff Kirsher 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
1990f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
1991f2148a47SJeff Kirsher 			if (i == MCAM_SIZE)
1992f2148a47SJeff Kirsher 				break;
1993f2148a47SJeff Kirsher 			rhine_set_cam(ioaddr, i, ha->addr);
1994f2148a47SJeff Kirsher 			mCAMmask |= 1 << i;
1995f2148a47SJeff Kirsher 			i++;
1996f2148a47SJeff Kirsher 		}
1997f2148a47SJeff Kirsher 		rhine_set_cam_mask(ioaddr, mCAMmask);
1998f2148a47SJeff Kirsher 	} else {
1999f2148a47SJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
2000f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2001f2148a47SJeff Kirsher 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2002f2148a47SJeff Kirsher 
2003f2148a47SJeff Kirsher 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2004f2148a47SJeff Kirsher 		}
2005f2148a47SJeff Kirsher 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2006f2148a47SJeff Kirsher 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2007f2148a47SJeff Kirsher 	}
2008f2148a47SJeff Kirsher 	/* enable/disable VLAN receive filtering */
2009f2148a47SJeff Kirsher 	if (rp->pdev->revision >= VT6105M) {
2010f2148a47SJeff Kirsher 		if (dev->flags & IFF_PROMISC)
2011f2148a47SJeff Kirsher 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2012f2148a47SJeff Kirsher 		else
2013f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2014f2148a47SJeff Kirsher 	}
2015f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2016f2148a47SJeff Kirsher }
2017f2148a47SJeff Kirsher 
2018f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2019f2148a47SJeff Kirsher {
2020f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2021f2148a47SJeff Kirsher 
202223020ab3SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
202323020ab3SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
202423020ab3SRick Jones 	strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2025f2148a47SJeff Kirsher }
2026f2148a47SJeff Kirsher 
2027f2148a47SJeff Kirsher static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2028f2148a47SJeff Kirsher {
2029f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2030f2148a47SJeff Kirsher 	int rc;
2031f2148a47SJeff Kirsher 
2032f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2033f2148a47SJeff Kirsher 	rc = mii_ethtool_gset(&rp->mii_if, cmd);
2034f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2035f2148a47SJeff Kirsher 
2036f2148a47SJeff Kirsher 	return rc;
2037f2148a47SJeff Kirsher }
2038f2148a47SJeff Kirsher 
2039f2148a47SJeff Kirsher static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2040f2148a47SJeff Kirsher {
2041f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2042f2148a47SJeff Kirsher 	int rc;
2043f2148a47SJeff Kirsher 
2044f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2045f2148a47SJeff Kirsher 	rc = mii_ethtool_sset(&rp->mii_if, cmd);
2046f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2047f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
2048f2148a47SJeff Kirsher 
2049f2148a47SJeff Kirsher 	return rc;
2050f2148a47SJeff Kirsher }
2051f2148a47SJeff Kirsher 
2052f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev)
2053f2148a47SJeff Kirsher {
2054f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2055f2148a47SJeff Kirsher 
2056f2148a47SJeff Kirsher 	return mii_nway_restart(&rp->mii_if);
2057f2148a47SJeff Kirsher }
2058f2148a47SJeff Kirsher 
2059f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev)
2060f2148a47SJeff Kirsher {
2061f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2062f2148a47SJeff Kirsher 
2063f2148a47SJeff Kirsher 	return mii_link_ok(&rp->mii_if);
2064f2148a47SJeff Kirsher }
2065f2148a47SJeff Kirsher 
2066f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev)
2067f2148a47SJeff Kirsher {
2068f2148a47SJeff Kirsher 	return debug;
2069f2148a47SJeff Kirsher }
2070f2148a47SJeff Kirsher 
2071f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value)
2072f2148a47SJeff Kirsher {
2073f2148a47SJeff Kirsher 	debug = value;
2074f2148a47SJeff Kirsher }
2075f2148a47SJeff Kirsher 
2076f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2077f2148a47SJeff Kirsher {
2078f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2079f2148a47SJeff Kirsher 
2080f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2081f2148a47SJeff Kirsher 		return;
2082f2148a47SJeff Kirsher 
2083f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2084f2148a47SJeff Kirsher 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2085f2148a47SJeff Kirsher 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2086f2148a47SJeff Kirsher 	wol->wolopts = rp->wolopts;
2087f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2088f2148a47SJeff Kirsher }
2089f2148a47SJeff Kirsher 
2090f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2091f2148a47SJeff Kirsher {
2092f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2093f2148a47SJeff Kirsher 	u32 support = WAKE_PHY | WAKE_MAGIC |
2094f2148a47SJeff Kirsher 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2095f2148a47SJeff Kirsher 
2096f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2097f2148a47SJeff Kirsher 		return -EINVAL;
2098f2148a47SJeff Kirsher 
2099f2148a47SJeff Kirsher 	if (wol->wolopts & ~support)
2100f2148a47SJeff Kirsher 		return -EINVAL;
2101f2148a47SJeff Kirsher 
2102f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2103f2148a47SJeff Kirsher 	rp->wolopts = wol->wolopts;
2104f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2105f2148a47SJeff Kirsher 
2106f2148a47SJeff Kirsher 	return 0;
2107f2148a47SJeff Kirsher }
2108f2148a47SJeff Kirsher 
2109f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = {
2110f2148a47SJeff Kirsher 	.get_drvinfo		= netdev_get_drvinfo,
2111f2148a47SJeff Kirsher 	.get_settings		= netdev_get_settings,
2112f2148a47SJeff Kirsher 	.set_settings		= netdev_set_settings,
2113f2148a47SJeff Kirsher 	.nway_reset		= netdev_nway_reset,
2114f2148a47SJeff Kirsher 	.get_link		= netdev_get_link,
2115f2148a47SJeff Kirsher 	.get_msglevel		= netdev_get_msglevel,
2116f2148a47SJeff Kirsher 	.set_msglevel		= netdev_set_msglevel,
2117f2148a47SJeff Kirsher 	.get_wol		= rhine_get_wol,
2118f2148a47SJeff Kirsher 	.set_wol		= rhine_set_wol,
2119f2148a47SJeff Kirsher };
2120f2148a47SJeff Kirsher 
2121f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2122f2148a47SJeff Kirsher {
2123f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2124f2148a47SJeff Kirsher 	int rc;
2125f2148a47SJeff Kirsher 
2126f2148a47SJeff Kirsher 	if (!netif_running(dev))
2127f2148a47SJeff Kirsher 		return -EINVAL;
2128f2148a47SJeff Kirsher 
2129f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2130f2148a47SJeff Kirsher 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2131f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2132f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
2133f2148a47SJeff Kirsher 
2134f2148a47SJeff Kirsher 	return rc;
2135f2148a47SJeff Kirsher }
2136f2148a47SJeff Kirsher 
2137f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev)
2138f2148a47SJeff Kirsher {
2139f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2140f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2141f2148a47SJeff Kirsher 
2142f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2143f2148a47SJeff Kirsher 	cancel_work_sync(&rp->reset_task);
2144f2148a47SJeff Kirsher 	netif_stop_queue(dev);
2145f2148a47SJeff Kirsher 
2146f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2147f2148a47SJeff Kirsher 
2148f2148a47SJeff Kirsher 	if (debug > 1)
2149f2148a47SJeff Kirsher 		netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
2150f2148a47SJeff Kirsher 			   ioread16(ioaddr + ChipCmd));
2151f2148a47SJeff Kirsher 
2152f2148a47SJeff Kirsher 	/* Switch to loopback mode to avoid hardware races. */
2153f2148a47SJeff Kirsher 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2154f2148a47SJeff Kirsher 
2155f2148a47SJeff Kirsher 	/* Disable interrupts by clearing the interrupt mask. */
2156f2148a47SJeff Kirsher 	iowrite16(0x0000, ioaddr + IntrEnable);
2157f2148a47SJeff Kirsher 
2158f2148a47SJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
2159f2148a47SJeff Kirsher 	iowrite16(CmdStop, ioaddr + ChipCmd);
2160f2148a47SJeff Kirsher 
2161f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2162f2148a47SJeff Kirsher 
2163f2148a47SJeff Kirsher 	free_irq(rp->pdev->irq, dev);
2164f2148a47SJeff Kirsher 	free_rbufs(dev);
2165f2148a47SJeff Kirsher 	free_tbufs(dev);
2166f2148a47SJeff Kirsher 	free_ring(dev);
2167f2148a47SJeff Kirsher 
2168f2148a47SJeff Kirsher 	return 0;
2169f2148a47SJeff Kirsher }
2170f2148a47SJeff Kirsher 
2171f2148a47SJeff Kirsher 
2172f2148a47SJeff Kirsher static void __devexit rhine_remove_one(struct pci_dev *pdev)
2173f2148a47SJeff Kirsher {
2174f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2175f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2176f2148a47SJeff Kirsher 
2177f2148a47SJeff Kirsher 	unregister_netdev(dev);
2178f2148a47SJeff Kirsher 
2179f2148a47SJeff Kirsher 	pci_iounmap(pdev, rp->base);
2180f2148a47SJeff Kirsher 	pci_release_regions(pdev);
2181f2148a47SJeff Kirsher 
2182f2148a47SJeff Kirsher 	free_netdev(dev);
2183f2148a47SJeff Kirsher 	pci_disable_device(pdev);
2184f2148a47SJeff Kirsher 	pci_set_drvdata(pdev, NULL);
2185f2148a47SJeff Kirsher }
2186f2148a47SJeff Kirsher 
2187f2148a47SJeff Kirsher static void rhine_shutdown (struct pci_dev *pdev)
2188f2148a47SJeff Kirsher {
2189f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2190f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2191f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2192f2148a47SJeff Kirsher 
2193f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2194f2148a47SJeff Kirsher 		return; /* Nothing to do for non-WOL adapters */
2195f2148a47SJeff Kirsher 
2196f2148a47SJeff Kirsher 	rhine_power_init(dev);
2197f2148a47SJeff Kirsher 
2198f2148a47SJeff Kirsher 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2199f2148a47SJeff Kirsher 	if (rp->quirks & rq6patterns)
2200f2148a47SJeff Kirsher 		iowrite8(0x04, ioaddr + WOLcgClr);
2201f2148a47SJeff Kirsher 
2202f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_MAGIC) {
2203f2148a47SJeff Kirsher 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2204f2148a47SJeff Kirsher 		/*
2205f2148a47SJeff Kirsher 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2206f2148a47SJeff Kirsher 		 * not cooperate otherwise.
2207f2148a47SJeff Kirsher 		 */
2208f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2209f2148a47SJeff Kirsher 	}
2210f2148a47SJeff Kirsher 
2211f2148a47SJeff Kirsher 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2212f2148a47SJeff Kirsher 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2213f2148a47SJeff Kirsher 
2214f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_PHY)
2215f2148a47SJeff Kirsher 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2216f2148a47SJeff Kirsher 
2217f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_UCAST)
2218f2148a47SJeff Kirsher 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2219f2148a47SJeff Kirsher 
2220f2148a47SJeff Kirsher 	if (rp->wolopts) {
2221f2148a47SJeff Kirsher 		/* Enable legacy WOL (for old motherboards) */
2222f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + PwcfgSet);
2223f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2224f2148a47SJeff Kirsher 	}
2225f2148a47SJeff Kirsher 
2226f2148a47SJeff Kirsher 	/* Hit power state D3 (sleep) */
2227f2148a47SJeff Kirsher 	if (!avoid_D3)
2228f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2229f2148a47SJeff Kirsher 
2230f2148a47SJeff Kirsher 	/* TODO: Check use of pci_enable_wake() */
2231f2148a47SJeff Kirsher 
2232f2148a47SJeff Kirsher }
2233f2148a47SJeff Kirsher 
2234f2148a47SJeff Kirsher #ifdef CONFIG_PM
2235f2148a47SJeff Kirsher static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
2236f2148a47SJeff Kirsher {
2237f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2238f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2239f2148a47SJeff Kirsher 	unsigned long flags;
2240f2148a47SJeff Kirsher 
2241f2148a47SJeff Kirsher 	if (!netif_running(dev))
2242f2148a47SJeff Kirsher 		return 0;
2243f2148a47SJeff Kirsher 
2244f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2245f2148a47SJeff Kirsher 
2246f2148a47SJeff Kirsher 	netif_device_detach(dev);
2247f2148a47SJeff Kirsher 	pci_save_state(pdev);
2248f2148a47SJeff Kirsher 
2249f2148a47SJeff Kirsher 	spin_lock_irqsave(&rp->lock, flags);
2250f2148a47SJeff Kirsher 	rhine_shutdown(pdev);
2251f2148a47SJeff Kirsher 	spin_unlock_irqrestore(&rp->lock, flags);
2252f2148a47SJeff Kirsher 
2253f2148a47SJeff Kirsher 	free_irq(dev->irq, dev);
2254f2148a47SJeff Kirsher 	return 0;
2255f2148a47SJeff Kirsher }
2256f2148a47SJeff Kirsher 
2257f2148a47SJeff Kirsher static int rhine_resume(struct pci_dev *pdev)
2258f2148a47SJeff Kirsher {
2259f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2260f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2261f2148a47SJeff Kirsher 	unsigned long flags;
2262f2148a47SJeff Kirsher 	int ret;
2263f2148a47SJeff Kirsher 
2264f2148a47SJeff Kirsher 	if (!netif_running(dev))
2265f2148a47SJeff Kirsher 		return 0;
2266f2148a47SJeff Kirsher 
2267f2148a47SJeff Kirsher 	if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
2268f2148a47SJeff Kirsher 		netdev_err(dev, "request_irq failed\n");
2269f2148a47SJeff Kirsher 
2270f2148a47SJeff Kirsher 	ret = pci_set_power_state(pdev, PCI_D0);
2271f2148a47SJeff Kirsher 	if (debug > 1)
2272f2148a47SJeff Kirsher 		netdev_info(dev, "Entering power state D0 %s (%d)\n",
2273f2148a47SJeff Kirsher 			    ret ? "failed" : "succeeded", ret);
2274f2148a47SJeff Kirsher 
2275f2148a47SJeff Kirsher 	pci_restore_state(pdev);
2276f2148a47SJeff Kirsher 
2277f2148a47SJeff Kirsher 	spin_lock_irqsave(&rp->lock, flags);
2278f2148a47SJeff Kirsher #ifdef USE_MMIO
2279f2148a47SJeff Kirsher 	enable_mmio(rp->pioaddr, rp->quirks);
2280f2148a47SJeff Kirsher #endif
2281f2148a47SJeff Kirsher 	rhine_power_init(dev);
2282f2148a47SJeff Kirsher 	free_tbufs(dev);
2283f2148a47SJeff Kirsher 	free_rbufs(dev);
2284f2148a47SJeff Kirsher 	alloc_tbufs(dev);
2285f2148a47SJeff Kirsher 	alloc_rbufs(dev);
2286f2148a47SJeff Kirsher 	init_registers(dev);
2287f2148a47SJeff Kirsher 	spin_unlock_irqrestore(&rp->lock, flags);
2288f2148a47SJeff Kirsher 
2289f2148a47SJeff Kirsher 	netif_device_attach(dev);
2290f2148a47SJeff Kirsher 
2291f2148a47SJeff Kirsher 	return 0;
2292f2148a47SJeff Kirsher }
2293f2148a47SJeff Kirsher #endif /* CONFIG_PM */
2294f2148a47SJeff Kirsher 
2295f2148a47SJeff Kirsher static struct pci_driver rhine_driver = {
2296f2148a47SJeff Kirsher 	.name		= DRV_NAME,
2297f2148a47SJeff Kirsher 	.id_table	= rhine_pci_tbl,
2298f2148a47SJeff Kirsher 	.probe		= rhine_init_one,
2299f2148a47SJeff Kirsher 	.remove		= __devexit_p(rhine_remove_one),
2300f2148a47SJeff Kirsher #ifdef CONFIG_PM
2301f2148a47SJeff Kirsher 	.suspend	= rhine_suspend,
2302f2148a47SJeff Kirsher 	.resume		= rhine_resume,
2303f2148a47SJeff Kirsher #endif /* CONFIG_PM */
2304f2148a47SJeff Kirsher 	.shutdown =	rhine_shutdown,
2305f2148a47SJeff Kirsher };
2306f2148a47SJeff Kirsher 
2307f2148a47SJeff Kirsher static struct dmi_system_id __initdata rhine_dmi_table[] = {
2308f2148a47SJeff Kirsher 	{
2309f2148a47SJeff Kirsher 		.ident = "EPIA-M",
2310f2148a47SJeff Kirsher 		.matches = {
2311f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2312f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2313f2148a47SJeff Kirsher 		},
2314f2148a47SJeff Kirsher 	},
2315f2148a47SJeff Kirsher 	{
2316f2148a47SJeff Kirsher 		.ident = "KV7",
2317f2148a47SJeff Kirsher 		.matches = {
2318f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2319f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2320f2148a47SJeff Kirsher 		},
2321f2148a47SJeff Kirsher 	},
2322f2148a47SJeff Kirsher 	{ NULL }
2323f2148a47SJeff Kirsher };
2324f2148a47SJeff Kirsher 
2325f2148a47SJeff Kirsher static int __init rhine_init(void)
2326f2148a47SJeff Kirsher {
2327f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
2328f2148a47SJeff Kirsher #ifdef MODULE
2329f2148a47SJeff Kirsher 	pr_info("%s\n", version);
2330f2148a47SJeff Kirsher #endif
2331f2148a47SJeff Kirsher 	if (dmi_check_system(rhine_dmi_table)) {
2332f2148a47SJeff Kirsher 		/* these BIOSes fail at PXE boot if chip is in D3 */
2333eb939922SRusty Russell 		avoid_D3 = true;
2334f2148a47SJeff Kirsher 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2335f2148a47SJeff Kirsher 	}
2336f2148a47SJeff Kirsher 	else if (avoid_D3)
2337f2148a47SJeff Kirsher 		pr_info("avoid_D3 set\n");
2338f2148a47SJeff Kirsher 
2339f2148a47SJeff Kirsher 	return pci_register_driver(&rhine_driver);
2340f2148a47SJeff Kirsher }
2341f2148a47SJeff Kirsher 
2342f2148a47SJeff Kirsher 
2343f2148a47SJeff Kirsher static void __exit rhine_cleanup(void)
2344f2148a47SJeff Kirsher {
2345f2148a47SJeff Kirsher 	pci_unregister_driver(&rhine_driver);
2346f2148a47SJeff Kirsher }
2347f2148a47SJeff Kirsher 
2348f2148a47SJeff Kirsher 
2349f2148a47SJeff Kirsher module_init(rhine_init);
2350f2148a47SJeff Kirsher module_exit(rhine_cleanup);
2351