xref: /openbmc/linux/drivers/net/ethernet/via/via-rhine.c (revision f7630d189c6ade2c83e20fa37169114def4271e8)
1f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2f2148a47SJeff Kirsher /*
3f2148a47SJeff Kirsher 	Written 1998-2001 by Donald Becker.
4f2148a47SJeff Kirsher 
5f2148a47SJeff Kirsher 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6f2148a47SJeff Kirsher 
7f2148a47SJeff Kirsher 	This software may be used and distributed according to the terms of
8f2148a47SJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
9f2148a47SJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
10f2148a47SJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
11f2148a47SJeff Kirsher 	a complete program and may only be used when the entire operating
12f2148a47SJeff Kirsher 	system is licensed under the GPL.
13f2148a47SJeff Kirsher 
14f2148a47SJeff Kirsher 	This driver is designed for the VIA VT86C100A Rhine-I.
15f2148a47SJeff Kirsher 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16f2148a47SJeff Kirsher 	and management NIC 6105M).
17f2148a47SJeff Kirsher 
18f2148a47SJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
19f2148a47SJeff Kirsher 	Scyld Computing Corporation
20f2148a47SJeff Kirsher 	410 Severn Ave., Suite 210
21f2148a47SJeff Kirsher 	Annapolis MD 21403
22f2148a47SJeff Kirsher 
23f2148a47SJeff Kirsher 
24f2148a47SJeff Kirsher 	This driver contains some changes from the original Donald Becker
25f2148a47SJeff Kirsher 	version. He may or may not be interested in bug reports on this
26f2148a47SJeff Kirsher 	code. You can find his versions at:
27f2148a47SJeff Kirsher 	http://www.scyld.com/network/via-rhine.html
28f2148a47SJeff Kirsher 	[link no longer provides useful info -jgarzik]
29f2148a47SJeff Kirsher 
30f2148a47SJeff Kirsher */
31f2148a47SJeff Kirsher 
32f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33f2148a47SJeff Kirsher 
34f2148a47SJeff Kirsher #define DRV_NAME	"via-rhine"
35207070f5SRoger Luethi #define DRV_VERSION	"1.5.1"
36f2148a47SJeff Kirsher #define DRV_RELDATE	"2010-10-09"
37f2148a47SJeff Kirsher 
38eb939922SRusty Russell #include <linux/types.h>
39f2148a47SJeff Kirsher 
40f2148a47SJeff Kirsher /* A few user-configurable values.
41f2148a47SJeff Kirsher    These may be modified when a driver module is loaded. */
42fc3e0f8aSFrancois Romieu static int debug = 0;
43fc3e0f8aSFrancois Romieu #define RHINE_MSG_DEFAULT \
44fc3e0f8aSFrancois Romieu         (0x0000)
45f2148a47SJeff Kirsher 
46f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47f2148a47SJeff Kirsher    Setting to > 1518 effectively disables this feature. */
48f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49f2148a47SJeff Kirsher 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50f2148a47SJeff Kirsher 	defined(__sh__) || defined(__mips__)
51f2148a47SJeff Kirsher static int rx_copybreak = 1518;
52f2148a47SJeff Kirsher #else
53f2148a47SJeff Kirsher static int rx_copybreak;
54f2148a47SJeff Kirsher #endif
55f2148a47SJeff Kirsher 
56f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of
57f2148a47SJeff Kirsher    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58eb939922SRusty Russell static bool avoid_D3;
59f2148a47SJeff Kirsher 
60f2148a47SJeff Kirsher /*
61f2148a47SJeff Kirsher  * In case you are looking for 'options[]' or 'full_duplex[]', they
62f2148a47SJeff Kirsher  * are gone. Use ethtool(8) instead.
63f2148a47SJeff Kirsher  */
64f2148a47SJeff Kirsher 
65f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66f2148a47SJeff Kirsher    The Rhine has a 64 element 8390-like hash table. */
67f2148a47SJeff Kirsher static const int multicast_filter_limit = 32;
68f2148a47SJeff Kirsher 
69f2148a47SJeff Kirsher 
70f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */
71f2148a47SJeff Kirsher 
72f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
73f2148a47SJeff Kirsher    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74f2148a47SJeff Kirsher    Making the Tx ring too large decreases the effectiveness of channel
75f2148a47SJeff Kirsher    bonding and packet priority.
76f2148a47SJeff Kirsher    There are no ill effects from too-large receive rings. */
77f2148a47SJeff Kirsher #define TX_RING_SIZE	16
78f2148a47SJeff Kirsher #define TX_QUEUE_LEN	10	/* Limit ring entries actually used. */
79f2148a47SJeff Kirsher #define RX_RING_SIZE	64
80f2148a47SJeff Kirsher 
81f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */
82f2148a47SJeff Kirsher 
83f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
84f2148a47SJeff Kirsher #define TX_TIMEOUT	(2*HZ)
85f2148a47SJeff Kirsher 
86f2148a47SJeff Kirsher #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
87f2148a47SJeff Kirsher 
88f2148a47SJeff Kirsher #include <linux/module.h>
89f2148a47SJeff Kirsher #include <linux/moduleparam.h>
90f2148a47SJeff Kirsher #include <linux/kernel.h>
91f2148a47SJeff Kirsher #include <linux/string.h>
92f2148a47SJeff Kirsher #include <linux/timer.h>
93f2148a47SJeff Kirsher #include <linux/errno.h>
94f2148a47SJeff Kirsher #include <linux/ioport.h>
95f2148a47SJeff Kirsher #include <linux/interrupt.h>
96f2148a47SJeff Kirsher #include <linux/pci.h>
97f2148a47SJeff Kirsher #include <linux/dma-mapping.h>
98f2148a47SJeff Kirsher #include <linux/netdevice.h>
99f2148a47SJeff Kirsher #include <linux/etherdevice.h>
100f2148a47SJeff Kirsher #include <linux/skbuff.h>
101f2148a47SJeff Kirsher #include <linux/init.h>
102f2148a47SJeff Kirsher #include <linux/delay.h>
103f2148a47SJeff Kirsher #include <linux/mii.h>
104f2148a47SJeff Kirsher #include <linux/ethtool.h>
105f2148a47SJeff Kirsher #include <linux/crc32.h>
106f2148a47SJeff Kirsher #include <linux/if_vlan.h>
107f2148a47SJeff Kirsher #include <linux/bitops.h>
108f2148a47SJeff Kirsher #include <linux/workqueue.h>
109f2148a47SJeff Kirsher #include <asm/processor.h>	/* Processor type for cache alignment. */
110f2148a47SJeff Kirsher #include <asm/io.h>
111f2148a47SJeff Kirsher #include <asm/irq.h>
112f2148a47SJeff Kirsher #include <asm/uaccess.h>
113f2148a47SJeff Kirsher #include <linux/dmi.h>
114f2148a47SJeff Kirsher 
115f2148a47SJeff Kirsher /* These identify the driver base version and may not be removed. */
11676e239e1SBill Pemberton static const char version[] =
117f2148a47SJeff Kirsher 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118f2148a47SJeff Kirsher 
119f2148a47SJeff Kirsher /* This driver was written to use PCI memory space. Some early versions
120f2148a47SJeff Kirsher    of the Rhine may only work correctly with I/O space accesses. */
121f2148a47SJeff Kirsher #ifdef CONFIG_VIA_RHINE_MMIO
122f2148a47SJeff Kirsher #define USE_MMIO
123f2148a47SJeff Kirsher #else
124f2148a47SJeff Kirsher #endif
125f2148a47SJeff Kirsher 
126f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128f2148a47SJeff Kirsher MODULE_LICENSE("GPL");
129f2148a47SJeff Kirsher 
130f2148a47SJeff Kirsher module_param(debug, int, 0);
131f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0);
132f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0);
133fc3e0f8aSFrancois Romieu MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
136f2148a47SJeff Kirsher 
137f2148a47SJeff Kirsher #define MCAM_SIZE	32
138f2148a47SJeff Kirsher #define VCAM_SIZE	32
139f2148a47SJeff Kirsher 
140f2148a47SJeff Kirsher /*
141f2148a47SJeff Kirsher 		Theory of Operation
142f2148a47SJeff Kirsher 
143f2148a47SJeff Kirsher I. Board Compatibility
144f2148a47SJeff Kirsher 
145f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
146f2148a47SJeff Kirsher controller.
147f2148a47SJeff Kirsher 
148f2148a47SJeff Kirsher II. Board-specific settings
149f2148a47SJeff Kirsher 
150f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot.
151f2148a47SJeff Kirsher 
152f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at
153f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are
154f2148a47SJeff Kirsher correct.
155f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM
156f2148a47SJeff Kirsher must be configured to enable memory ops.
157f2148a47SJeff Kirsher 
158f2148a47SJeff Kirsher III. Driver operation
159f2148a47SJeff Kirsher 
160f2148a47SJeff Kirsher IIIa. Ring buffers
161f2148a47SJeff Kirsher 
162f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists
163f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
164f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
165f2148a47SJeff Kirsher 
166f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure
167f2148a47SJeff Kirsher 
168f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme.
169f2148a47SJeff Kirsher 
170f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so
171f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers.
172f2148a47SJeff Kirsher 
173f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
174f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data
175f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
177f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
178f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated
179f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx().
180f2148a47SJeff Kirsher 
181f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
183f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines
184f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of
185f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never
186f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using
187f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is
188f2148a47SJeff Kirsher most useful with small frames.
189f2148a47SJeff Kirsher 
190f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit
191f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't
192f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers
193f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header.
194f2148a47SJeff Kirsher 
195f2148a47SJeff Kirsher IIId. Synchronization
196f2148a47SJeff Kirsher 
197f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One
198f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
199f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software.
201f2148a47SJeff Kirsher 
202f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the
203f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by
205f2148a47SJeff Kirsher calling netif_stop_queue.
206f2148a47SJeff Kirsher 
207f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
208f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in
210f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped.
211f2148a47SJeff Kirsher 
212f2148a47SJeff Kirsher IV. Notes
213f2148a47SJeff Kirsher 
214f2148a47SJeff Kirsher IVb. References
215f2148a47SJeff Kirsher 
216f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/
217f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html
218f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html
219f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
221f2148a47SJeff Kirsher 
222f2148a47SJeff Kirsher 
223f2148a47SJeff Kirsher IVc. Errata
224f2148a47SJeff Kirsher 
225f2148a47SJeff Kirsher The VT86C100A manual is not reliable information.
226f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit
228f2148a47SJeff Kirsher and unaligned IP headers on receive.
229f2148a47SJeff Kirsher The chip does not pad to minimum transmit length.
230f2148a47SJeff Kirsher 
231f2148a47SJeff Kirsher */
232f2148a47SJeff Kirsher 
233f2148a47SJeff Kirsher 
234f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all
235f2148a47SJeff Kirsher    of the drivers, and will likely be provided by some future kernel.
236f2148a47SJeff Kirsher    Note the matching code -- the first table entry matchs all 56** cards but
237f2148a47SJeff Kirsher    second only the 1234 card.
238f2148a47SJeff Kirsher */
239f2148a47SJeff Kirsher 
240f2148a47SJeff Kirsher enum rhine_revs {
241f2148a47SJeff Kirsher 	VT86C100A	= 0x00,
242f2148a47SJeff Kirsher 	VTunknown0	= 0x20,
243f2148a47SJeff Kirsher 	VT6102		= 0x40,
244f2148a47SJeff Kirsher 	VT8231		= 0x50,	/* Integrated MAC */
245f2148a47SJeff Kirsher 	VT8233		= 0x60,	/* Integrated MAC */
246f2148a47SJeff Kirsher 	VT8235		= 0x74,	/* Integrated MAC */
247f2148a47SJeff Kirsher 	VT8237		= 0x78,	/* Integrated MAC */
248f2148a47SJeff Kirsher 	VTunknown1	= 0x7C,
249f2148a47SJeff Kirsher 	VT6105		= 0x80,
250f2148a47SJeff Kirsher 	VT6105_B0	= 0x83,
251f2148a47SJeff Kirsher 	VT6105L		= 0x8A,
252f2148a47SJeff Kirsher 	VT6107		= 0x8C,
253f2148a47SJeff Kirsher 	VTunknown2	= 0x8E,
254f2148a47SJeff Kirsher 	VT6105M		= 0x90,	/* Management adapter */
255f2148a47SJeff Kirsher };
256f2148a47SJeff Kirsher 
257f2148a47SJeff Kirsher enum rhine_quirks {
258f2148a47SJeff Kirsher 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
259f2148a47SJeff Kirsher 	rqForceReset	= 0x0002,
260f2148a47SJeff Kirsher 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
261f2148a47SJeff Kirsher 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
262f2148a47SJeff Kirsher 	rqRhineI	= 0x0100,	/* See comment below */
263f2148a47SJeff Kirsher };
264f2148a47SJeff Kirsher /*
265f2148a47SJeff Kirsher  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266f2148a47SJeff Kirsher  * MMIO as well as for the collision counter and the Tx FIFO underflow
267f2148a47SJeff Kirsher  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
268f2148a47SJeff Kirsher  */
269f2148a47SJeff Kirsher 
270f2148a47SJeff Kirsher /* Beware of PCI posted writes */
271f2148a47SJeff Kirsher #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
272f2148a47SJeff Kirsher 
273f2148a47SJeff Kirsher static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274f2148a47SJeff Kirsher 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
275f2148a47SJeff Kirsher 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
276f2148a47SJeff Kirsher 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
277f2148a47SJeff Kirsher 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
278f2148a47SJeff Kirsher 	{ }	/* terminate list */
279f2148a47SJeff Kirsher };
280f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281f2148a47SJeff Kirsher 
282f2148a47SJeff Kirsher 
283f2148a47SJeff Kirsher /* Offsets to the device registers. */
284f2148a47SJeff Kirsher enum register_offsets {
285f2148a47SJeff Kirsher 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286f2148a47SJeff Kirsher 	ChipCmd1=0x09, TQWake=0x0A,
287f2148a47SJeff Kirsher 	IntrStatus=0x0C, IntrEnable=0x0E,
288f2148a47SJeff Kirsher 	MulticastFilter0=0x10, MulticastFilter1=0x14,
289f2148a47SJeff Kirsher 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290f2148a47SJeff Kirsher 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291f2148a47SJeff Kirsher 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292f2148a47SJeff Kirsher 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293f2148a47SJeff Kirsher 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294f2148a47SJeff Kirsher 	StickyHW=0x83, IntrStatus2=0x84,
295f2148a47SJeff Kirsher 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296f2148a47SJeff Kirsher 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297f2148a47SJeff Kirsher 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
298f2148a47SJeff Kirsher 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299f2148a47SJeff Kirsher };
300f2148a47SJeff Kirsher 
301f2148a47SJeff Kirsher /* Bits in ConfigD */
302f2148a47SJeff Kirsher enum backoff_bits {
303f2148a47SJeff Kirsher 	BackOptional=0x01, BackModify=0x02,
304f2148a47SJeff Kirsher 	BackCaptureEffect=0x04, BackRandom=0x08
305f2148a47SJeff Kirsher };
306f2148a47SJeff Kirsher 
307f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */
308f2148a47SJeff Kirsher enum tcr_bits {
309f2148a47SJeff Kirsher 	TCR_PQEN=0x01,
310f2148a47SJeff Kirsher 	TCR_LB0=0x02,		/* loopback[0] */
311f2148a47SJeff Kirsher 	TCR_LB1=0x04,		/* loopback[1] */
312f2148a47SJeff Kirsher 	TCR_OFSET=0x08,
313f2148a47SJeff Kirsher 	TCR_RTGOPT=0x10,
314f2148a47SJeff Kirsher 	TCR_RTFT0=0x20,
315f2148a47SJeff Kirsher 	TCR_RTFT1=0x40,
316f2148a47SJeff Kirsher 	TCR_RTSF=0x80,
317f2148a47SJeff Kirsher };
318f2148a47SJeff Kirsher 
319f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */
320f2148a47SJeff Kirsher enum camcon_bits {
321f2148a47SJeff Kirsher 	CAMC_CAMEN=0x01,
322f2148a47SJeff Kirsher 	CAMC_VCAMSL=0x02,
323f2148a47SJeff Kirsher 	CAMC_CAMWR=0x04,
324f2148a47SJeff Kirsher 	CAMC_CAMRD=0x08,
325f2148a47SJeff Kirsher };
326f2148a47SJeff Kirsher 
327f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */
328f2148a47SJeff Kirsher enum bcr1_bits {
329f2148a47SJeff Kirsher 	BCR1_POT0=0x01,
330f2148a47SJeff Kirsher 	BCR1_POT1=0x02,
331f2148a47SJeff Kirsher 	BCR1_POT2=0x04,
332f2148a47SJeff Kirsher 	BCR1_CTFT0=0x08,
333f2148a47SJeff Kirsher 	BCR1_CTFT1=0x10,
334f2148a47SJeff Kirsher 	BCR1_CTSF=0x20,
335f2148a47SJeff Kirsher 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
336f2148a47SJeff Kirsher 	BCR1_VIDFR=0x80,	/* for VT6105 */
337f2148a47SJeff Kirsher 	BCR1_MED0=0x40,		/* for VT6102 */
338f2148a47SJeff Kirsher 	BCR1_MED1=0x80,		/* for VT6102 */
339f2148a47SJeff Kirsher };
340f2148a47SJeff Kirsher 
341f2148a47SJeff Kirsher #ifdef USE_MMIO
342f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */
343f2148a47SJeff Kirsher static const int mmio_verify_registers[] = {
344f2148a47SJeff Kirsher 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345f2148a47SJeff Kirsher 	0
346f2148a47SJeff Kirsher };
347f2148a47SJeff Kirsher #endif
348f2148a47SJeff Kirsher 
349f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */
350f2148a47SJeff Kirsher enum intr_status_bits {
3517ab87ff4SFrancois Romieu 	IntrRxDone	= 0x0001,
3527ab87ff4SFrancois Romieu 	IntrTxDone	= 0x0002,
3537ab87ff4SFrancois Romieu 	IntrRxErr	= 0x0004,
3547ab87ff4SFrancois Romieu 	IntrTxError	= 0x0008,
3557ab87ff4SFrancois Romieu 	IntrRxEmpty	= 0x0020,
356f2148a47SJeff Kirsher 	IntrPCIErr	= 0x0040,
3577ab87ff4SFrancois Romieu 	IntrStatsMax	= 0x0080,
3587ab87ff4SFrancois Romieu 	IntrRxEarly	= 0x0100,
3597ab87ff4SFrancois Romieu 	IntrTxUnderrun	= 0x0210,
3607ab87ff4SFrancois Romieu 	IntrRxOverflow	= 0x0400,
3617ab87ff4SFrancois Romieu 	IntrRxDropped	= 0x0800,
3627ab87ff4SFrancois Romieu 	IntrRxNoBuf	= 0x1000,
3637ab87ff4SFrancois Romieu 	IntrTxAborted	= 0x2000,
3647ab87ff4SFrancois Romieu 	IntrLinkChange	= 0x4000,
365f2148a47SJeff Kirsher 	IntrRxWakeUp	= 0x8000,
366f2148a47SJeff Kirsher 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
3677ab87ff4SFrancois Romieu 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
3687ab87ff4SFrancois Romieu 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
3697ab87ff4SFrancois Romieu 				  IntrTxUnderrun,
370f2148a47SJeff Kirsher };
371f2148a47SJeff Kirsher 
372f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
373f2148a47SJeff Kirsher enum wol_bits {
374f2148a47SJeff Kirsher 	WOLucast	= 0x10,
375f2148a47SJeff Kirsher 	WOLmagic	= 0x20,
376f2148a47SJeff Kirsher 	WOLbmcast	= 0x30,
377f2148a47SJeff Kirsher 	WOLlnkon	= 0x40,
378f2148a47SJeff Kirsher 	WOLlnkoff	= 0x80,
379f2148a47SJeff Kirsher };
380f2148a47SJeff Kirsher 
381f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */
382f2148a47SJeff Kirsher struct rx_desc {
383f2148a47SJeff Kirsher 	__le32 rx_status;
384f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Buffer/frame length */
385f2148a47SJeff Kirsher 	__le32 addr;
386f2148a47SJeff Kirsher 	__le32 next_desc;
387f2148a47SJeff Kirsher };
388f2148a47SJeff Kirsher struct tx_desc {
389f2148a47SJeff Kirsher 	__le32 tx_status;
390f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
391f2148a47SJeff Kirsher 	__le32 addr;
392f2148a47SJeff Kirsher 	__le32 next_desc;
393f2148a47SJeff Kirsher };
394f2148a47SJeff Kirsher 
395f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396f2148a47SJeff Kirsher #define TXDESC		0x00e08000
397f2148a47SJeff Kirsher 
398f2148a47SJeff Kirsher enum rx_status_bits {
399f2148a47SJeff Kirsher 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
400f2148a47SJeff Kirsher };
401f2148a47SJeff Kirsher 
402f2148a47SJeff Kirsher /* Bits in *_desc.*_status */
403f2148a47SJeff Kirsher enum desc_status_bits {
404f2148a47SJeff Kirsher 	DescOwn=0x80000000
405f2148a47SJeff Kirsher };
406f2148a47SJeff Kirsher 
407f2148a47SJeff Kirsher /* Bits in *_desc.*_length */
408f2148a47SJeff Kirsher enum desc_length_bits {
409f2148a47SJeff Kirsher 	DescTag=0x00010000
410f2148a47SJeff Kirsher };
411f2148a47SJeff Kirsher 
412f2148a47SJeff Kirsher /* Bits in ChipCmd. */
413f2148a47SJeff Kirsher enum chip_cmd_bits {
414f2148a47SJeff Kirsher 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415f2148a47SJeff Kirsher 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416f2148a47SJeff Kirsher 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417f2148a47SJeff Kirsher 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
418f2148a47SJeff Kirsher };
419f2148a47SJeff Kirsher 
420f7b5d1b9SJamie Gloudon struct rhine_stats {
421f7b5d1b9SJamie Gloudon 	u64		packets;
422f7b5d1b9SJamie Gloudon 	u64		bytes;
423f7b5d1b9SJamie Gloudon 	struct u64_stats_sync syncp;
424f7b5d1b9SJamie Gloudon };
425f7b5d1b9SJamie Gloudon 
426f2148a47SJeff Kirsher struct rhine_private {
427f2148a47SJeff Kirsher 	/* Bit mask for configured VLAN ids */
428f2148a47SJeff Kirsher 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
429f2148a47SJeff Kirsher 
430f2148a47SJeff Kirsher 	/* Descriptor rings */
431f2148a47SJeff Kirsher 	struct rx_desc *rx_ring;
432f2148a47SJeff Kirsher 	struct tx_desc *tx_ring;
433f2148a47SJeff Kirsher 	dma_addr_t rx_ring_dma;
434f2148a47SJeff Kirsher 	dma_addr_t tx_ring_dma;
435f2148a47SJeff Kirsher 
436f2148a47SJeff Kirsher 	/* The addresses of receive-in-place skbuffs. */
437f2148a47SJeff Kirsher 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
438f2148a47SJeff Kirsher 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
439f2148a47SJeff Kirsher 
440f2148a47SJeff Kirsher 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
441f2148a47SJeff Kirsher 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
442f2148a47SJeff Kirsher 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
443f2148a47SJeff Kirsher 
444f2148a47SJeff Kirsher 	/* Tx bounce buffers (Rhine-I only) */
445f2148a47SJeff Kirsher 	unsigned char *tx_buf[TX_RING_SIZE];
446f2148a47SJeff Kirsher 	unsigned char *tx_bufs;
447f2148a47SJeff Kirsher 	dma_addr_t tx_bufs_dma;
448f2148a47SJeff Kirsher 
449*f7630d18SAlexey Charkov 	int revision;
450*f7630d18SAlexey Charkov 	int irq;
451f2148a47SJeff Kirsher 	long pioaddr;
452f2148a47SJeff Kirsher 	struct net_device *dev;
453f2148a47SJeff Kirsher 	struct napi_struct napi;
454f2148a47SJeff Kirsher 	spinlock_t lock;
4557ab87ff4SFrancois Romieu 	struct mutex task_lock;
4567ab87ff4SFrancois Romieu 	bool task_enable;
4577ab87ff4SFrancois Romieu 	struct work_struct slow_event_task;
458f2148a47SJeff Kirsher 	struct work_struct reset_task;
459f2148a47SJeff Kirsher 
460fc3e0f8aSFrancois Romieu 	u32 msg_enable;
461fc3e0f8aSFrancois Romieu 
462f2148a47SJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect. */
463f2148a47SJeff Kirsher 	u32 quirks;
464f2148a47SJeff Kirsher 	struct rx_desc *rx_head_desc;
465f2148a47SJeff Kirsher 	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
466f2148a47SJeff Kirsher 	unsigned int cur_tx, dirty_tx;
467f2148a47SJeff Kirsher 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
468f7b5d1b9SJamie Gloudon 	struct rhine_stats rx_stats;
469f7b5d1b9SJamie Gloudon 	struct rhine_stats tx_stats;
470f2148a47SJeff Kirsher 	u8 wolopts;
471f2148a47SJeff Kirsher 
472f2148a47SJeff Kirsher 	u8 tx_thresh, rx_thresh;
473f2148a47SJeff Kirsher 
474f2148a47SJeff Kirsher 	struct mii_if_info mii_if;
475f2148a47SJeff Kirsher 	void __iomem *base;
476f2148a47SJeff Kirsher };
477f2148a47SJeff Kirsher 
478f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
479f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
480f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
481f2148a47SJeff Kirsher 
482f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
483f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
484f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
485f2148a47SJeff Kirsher 
486f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
487f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
488f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
489f2148a47SJeff Kirsher 
490f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
491f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
492f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
493f2148a47SJeff Kirsher 
494f2148a47SJeff Kirsher 
495f2148a47SJeff Kirsher static int  mdio_read(struct net_device *dev, int phy_id, int location);
496f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
497f2148a47SJeff Kirsher static int  rhine_open(struct net_device *dev);
498f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work);
4997ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work);
500f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev);
501f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
502f2148a47SJeff Kirsher 				  struct net_device *dev);
503f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
504f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev);
505f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit);
506f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev);
507f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
508f7b5d1b9SJamie Gloudon 	       struct rtnl_link_stats64 *stats);
509f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
510f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops;
511f2148a47SJeff Kirsher static int  rhine_close(struct net_device *dev);
51280d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev,
51380d5c368SPatrick McHardy 				 __be16 proto, u16 vid);
51480d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev,
51580d5c368SPatrick McHardy 				  __be16 proto, u16 vid);
5167ab87ff4SFrancois Romieu static void rhine_restart_tx(struct net_device *dev);
517f2148a47SJeff Kirsher 
5183f8c91a7SAndreas Mohr static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
519a384a33bSFrancois Romieu {
520a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
521a384a33bSFrancois Romieu 	int i;
522a384a33bSFrancois Romieu 
523a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
5243f8c91a7SAndreas Mohr 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
5253f8c91a7SAndreas Mohr 
5263f8c91a7SAndreas Mohr 		if (low ^ has_mask_bits)
527a384a33bSFrancois Romieu 			break;
528a384a33bSFrancois Romieu 		udelay(10);
529a384a33bSFrancois Romieu 	}
530a384a33bSFrancois Romieu 	if (i > 64) {
531fc3e0f8aSFrancois Romieu 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
5323f8c91a7SAndreas Mohr 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
533a384a33bSFrancois Romieu 	}
534a384a33bSFrancois Romieu }
535a384a33bSFrancois Romieu 
536a384a33bSFrancois Romieu static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
537a384a33bSFrancois Romieu {
5383f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, false);
539a384a33bSFrancois Romieu }
540a384a33bSFrancois Romieu 
541a384a33bSFrancois Romieu static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
542a384a33bSFrancois Romieu {
5433f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, true);
544a384a33bSFrancois Romieu }
545f2148a47SJeff Kirsher 
546a20a28bcSFrancois Romieu static u32 rhine_get_events(struct rhine_private *rp)
547f2148a47SJeff Kirsher {
548f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
549f2148a47SJeff Kirsher 	u32 intr_status;
550f2148a47SJeff Kirsher 
551f2148a47SJeff Kirsher 	intr_status = ioread16(ioaddr + IntrStatus);
552f2148a47SJeff Kirsher 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
553f2148a47SJeff Kirsher 	if (rp->quirks & rqStatusWBRace)
554f2148a47SJeff Kirsher 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
555f2148a47SJeff Kirsher 	return intr_status;
556f2148a47SJeff Kirsher }
557f2148a47SJeff Kirsher 
558a20a28bcSFrancois Romieu static void rhine_ack_events(struct rhine_private *rp, u32 mask)
559a20a28bcSFrancois Romieu {
560a20a28bcSFrancois Romieu 	void __iomem *ioaddr = rp->base;
561a20a28bcSFrancois Romieu 
562a20a28bcSFrancois Romieu 	if (rp->quirks & rqStatusWBRace)
563a20a28bcSFrancois Romieu 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
564a20a28bcSFrancois Romieu 	iowrite16(mask, ioaddr + IntrStatus);
5657ab87ff4SFrancois Romieu 	mmiowb();
566a20a28bcSFrancois Romieu }
567a20a28bcSFrancois Romieu 
568f2148a47SJeff Kirsher /*
569f2148a47SJeff Kirsher  * Get power related registers into sane state.
570f2148a47SJeff Kirsher  * Notify user about past WOL event.
571f2148a47SJeff Kirsher  */
572f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev)
573f2148a47SJeff Kirsher {
574f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
575f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
576f2148a47SJeff Kirsher 	u16 wolstat;
577f2148a47SJeff Kirsher 
578f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL) {
579f2148a47SJeff Kirsher 		/* Make sure chip is in power state D0 */
580f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
581f2148a47SJeff Kirsher 
582f2148a47SJeff Kirsher 		/* Disable "force PME-enable" */
583f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + WOLcgClr);
584f2148a47SJeff Kirsher 
585f2148a47SJeff Kirsher 		/* Clear power-event config bits (WOL) */
586f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + WOLcrClr);
587f2148a47SJeff Kirsher 		/* More recent cards can manage two additional patterns */
588f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
589f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + WOLcrClr1);
590f2148a47SJeff Kirsher 
591f2148a47SJeff Kirsher 		/* Save power-event status bits */
592f2148a47SJeff Kirsher 		wolstat = ioread8(ioaddr + PwrcsrSet);
593f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
594f2148a47SJeff Kirsher 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
595f2148a47SJeff Kirsher 
596f2148a47SJeff Kirsher 		/* Clear power-event status bits */
597f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + PwrcsrClr);
598f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
599f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + PwrcsrClr1);
600f2148a47SJeff Kirsher 
601f2148a47SJeff Kirsher 		if (wolstat) {
602f2148a47SJeff Kirsher 			char *reason;
603f2148a47SJeff Kirsher 			switch (wolstat) {
604f2148a47SJeff Kirsher 			case WOLmagic:
605f2148a47SJeff Kirsher 				reason = "Magic packet";
606f2148a47SJeff Kirsher 				break;
607f2148a47SJeff Kirsher 			case WOLlnkon:
608f2148a47SJeff Kirsher 				reason = "Link went up";
609f2148a47SJeff Kirsher 				break;
610f2148a47SJeff Kirsher 			case WOLlnkoff:
611f2148a47SJeff Kirsher 				reason = "Link went down";
612f2148a47SJeff Kirsher 				break;
613f2148a47SJeff Kirsher 			case WOLucast:
614f2148a47SJeff Kirsher 				reason = "Unicast packet";
615f2148a47SJeff Kirsher 				break;
616f2148a47SJeff Kirsher 			case WOLbmcast:
617f2148a47SJeff Kirsher 				reason = "Multicast/broadcast packet";
618f2148a47SJeff Kirsher 				break;
619f2148a47SJeff Kirsher 			default:
620f2148a47SJeff Kirsher 				reason = "Unknown";
621f2148a47SJeff Kirsher 			}
622f2148a47SJeff Kirsher 			netdev_info(dev, "Woke system up. Reason: %s\n",
623f2148a47SJeff Kirsher 				    reason);
624f2148a47SJeff Kirsher 		}
625f2148a47SJeff Kirsher 	}
626f2148a47SJeff Kirsher }
627f2148a47SJeff Kirsher 
628f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev)
629f2148a47SJeff Kirsher {
630f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
631f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
632fc3e0f8aSFrancois Romieu 	u8 cmd1;
633f2148a47SJeff Kirsher 
634f2148a47SJeff Kirsher 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
635f2148a47SJeff Kirsher 	IOSYNC;
636f2148a47SJeff Kirsher 
637f2148a47SJeff Kirsher 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
638f2148a47SJeff Kirsher 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
639f2148a47SJeff Kirsher 
640f2148a47SJeff Kirsher 		/* Force reset */
641f2148a47SJeff Kirsher 		if (rp->quirks & rqForceReset)
642f2148a47SJeff Kirsher 			iowrite8(0x40, ioaddr + MiscCmd);
643f2148a47SJeff Kirsher 
644f2148a47SJeff Kirsher 		/* Reset can take somewhat longer (rare) */
645a384a33bSFrancois Romieu 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
646f2148a47SJeff Kirsher 	}
647f2148a47SJeff Kirsher 
648fc3e0f8aSFrancois Romieu 	cmd1 = ioread8(ioaddr + ChipCmd1);
649fc3e0f8aSFrancois Romieu 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
650f2148a47SJeff Kirsher 		   "failed" : "succeeded");
651f2148a47SJeff Kirsher }
652f2148a47SJeff Kirsher 
653f2148a47SJeff Kirsher #ifdef USE_MMIO
654f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks)
655f2148a47SJeff Kirsher {
656f2148a47SJeff Kirsher 	int n;
657f2148a47SJeff Kirsher 	if (quirks & rqRhineI) {
658f2148a47SJeff Kirsher 		/* More recent docs say that this bit is reserved ... */
659f2148a47SJeff Kirsher 		n = inb(pioaddr + ConfigA) | 0x20;
660f2148a47SJeff Kirsher 		outb(n, pioaddr + ConfigA);
661f2148a47SJeff Kirsher 	} else {
662f2148a47SJeff Kirsher 		n = inb(pioaddr + ConfigD) | 0x80;
663f2148a47SJeff Kirsher 		outb(n, pioaddr + ConfigD);
664f2148a47SJeff Kirsher 	}
665f2148a47SJeff Kirsher }
666f2148a47SJeff Kirsher #endif
667f2148a47SJeff Kirsher 
668f2148a47SJeff Kirsher /*
669f2148a47SJeff Kirsher  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
670f2148a47SJeff Kirsher  * (plus 0x6C for Rhine-I/II)
671f2148a47SJeff Kirsher  */
67276e239e1SBill Pemberton static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
673f2148a47SJeff Kirsher {
674f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
675f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
676a384a33bSFrancois Romieu 	int i;
677f2148a47SJeff Kirsher 
678f2148a47SJeff Kirsher 	outb(0x20, pioaddr + MACRegEEcsr);
679a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
680a384a33bSFrancois Romieu 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
681a384a33bSFrancois Romieu 			break;
682a384a33bSFrancois Romieu 	}
683a384a33bSFrancois Romieu 	if (i > 512)
684a384a33bSFrancois Romieu 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
685f2148a47SJeff Kirsher 
686f2148a47SJeff Kirsher #ifdef USE_MMIO
687f2148a47SJeff Kirsher 	/*
688f2148a47SJeff Kirsher 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
689f2148a47SJeff Kirsher 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
690f2148a47SJeff Kirsher 	 * it is not known if that still works with the "win98-reboot" problem.
691f2148a47SJeff Kirsher 	 */
692f2148a47SJeff Kirsher 	enable_mmio(pioaddr, rp->quirks);
693f2148a47SJeff Kirsher #endif
694f2148a47SJeff Kirsher 
695f2148a47SJeff Kirsher 	/* Turn off EEPROM-controlled wake-up (magic packet) */
696f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL)
697f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
698f2148a47SJeff Kirsher 
699f2148a47SJeff Kirsher }
700f2148a47SJeff Kirsher 
701f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
702f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev)
703f2148a47SJeff Kirsher {
70405d334ecSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
705*f7630d18SAlexey Charkov 	const int irq = rp->irq;
70605d334ecSFrancois Romieu 
70705d334ecSFrancois Romieu 	disable_irq(irq);
70805d334ecSFrancois Romieu 	rhine_interrupt(irq, dev);
70905d334ecSFrancois Romieu 	enable_irq(irq);
710f2148a47SJeff Kirsher }
711f2148a47SJeff Kirsher #endif
712f2148a47SJeff Kirsher 
713269f3114SFrancois Romieu static void rhine_kick_tx_threshold(struct rhine_private *rp)
714269f3114SFrancois Romieu {
715269f3114SFrancois Romieu 	if (rp->tx_thresh < 0xe0) {
716269f3114SFrancois Romieu 		void __iomem *ioaddr = rp->base;
717269f3114SFrancois Romieu 
718269f3114SFrancois Romieu 		rp->tx_thresh += 0x20;
719269f3114SFrancois Romieu 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
720269f3114SFrancois Romieu 	}
721269f3114SFrancois Romieu }
722269f3114SFrancois Romieu 
7237ab87ff4SFrancois Romieu static void rhine_tx_err(struct rhine_private *rp, u32 status)
7247ab87ff4SFrancois Romieu {
7257ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
7267ab87ff4SFrancois Romieu 
7277ab87ff4SFrancois Romieu 	if (status & IntrTxAborted) {
728fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev,
729fc3e0f8aSFrancois Romieu 			   "Abort %08x, frame dropped\n", status);
7307ab87ff4SFrancois Romieu 	}
7317ab87ff4SFrancois Romieu 
7327ab87ff4SFrancois Romieu 	if (status & IntrTxUnderrun) {
7337ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
734fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
735fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7367ab87ff4SFrancois Romieu 	}
7377ab87ff4SFrancois Romieu 
738fc3e0f8aSFrancois Romieu 	if (status & IntrTxDescRace)
739fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
7407ab87ff4SFrancois Romieu 
7417ab87ff4SFrancois Romieu 	if ((status & IntrTxError) &&
7427ab87ff4SFrancois Romieu 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
7437ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
744fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Unspecified error. "
745fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7467ab87ff4SFrancois Romieu 	}
7477ab87ff4SFrancois Romieu 
7487ab87ff4SFrancois Romieu 	rhine_restart_tx(dev);
7497ab87ff4SFrancois Romieu }
7507ab87ff4SFrancois Romieu 
7517ab87ff4SFrancois Romieu static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
7527ab87ff4SFrancois Romieu {
7537ab87ff4SFrancois Romieu 	void __iomem *ioaddr = rp->base;
7547ab87ff4SFrancois Romieu 	struct net_device_stats *stats = &rp->dev->stats;
7557ab87ff4SFrancois Romieu 
7567ab87ff4SFrancois Romieu 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
7577ab87ff4SFrancois Romieu 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
7587ab87ff4SFrancois Romieu 
7597ab87ff4SFrancois Romieu 	/*
7607ab87ff4SFrancois Romieu 	 * Clears the "tally counters" for CRC errors and missed frames(?).
7617ab87ff4SFrancois Romieu 	 * It has been reported that some chips need a write of 0 to clear
7627ab87ff4SFrancois Romieu 	 * these, for others the counters are set to 1 when written to and
7637ab87ff4SFrancois Romieu 	 * instead cleared when read. So we clear them both ways ...
7647ab87ff4SFrancois Romieu 	 */
7657ab87ff4SFrancois Romieu 	iowrite32(0, ioaddr + RxMissed);
7667ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxCRCErrs);
7677ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxMissed);
7687ab87ff4SFrancois Romieu }
7697ab87ff4SFrancois Romieu 
7707ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
7717ab87ff4SFrancois Romieu 				 IntrRxErr | \
7727ab87ff4SFrancois Romieu 				 IntrRxEmpty | \
7737ab87ff4SFrancois Romieu 				 IntrRxOverflow	| \
7747ab87ff4SFrancois Romieu 				 IntrRxDropped | \
7757ab87ff4SFrancois Romieu 				 IntrRxNoBuf | \
7767ab87ff4SFrancois Romieu 				 IntrRxWakeUp)
7777ab87ff4SFrancois Romieu 
7787ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
7797ab87ff4SFrancois Romieu 				 IntrTxAborted | \
7807ab87ff4SFrancois Romieu 				 IntrTxUnderrun | \
7817ab87ff4SFrancois Romieu 				 IntrTxDescRace)
7827ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
7837ab87ff4SFrancois Romieu 
7847ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
7857ab87ff4SFrancois Romieu 				 RHINE_EVENT_NAPI_TX | \
7867ab87ff4SFrancois Romieu 				 IntrStatsMax)
7877ab87ff4SFrancois Romieu #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
7887ab87ff4SFrancois Romieu #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
7897ab87ff4SFrancois Romieu 
790f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget)
791f2148a47SJeff Kirsher {
792f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
793f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
794f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
7957ab87ff4SFrancois Romieu 	u16 enable_mask = RHINE_EVENT & 0xffff;
7967ab87ff4SFrancois Romieu 	int work_done = 0;
7977ab87ff4SFrancois Romieu 	u32 status;
798f2148a47SJeff Kirsher 
7997ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
8007ab87ff4SFrancois Romieu 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
8017ab87ff4SFrancois Romieu 
8027ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_RX)
8037ab87ff4SFrancois Romieu 		work_done += rhine_rx(dev, budget);
8047ab87ff4SFrancois Romieu 
8057ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_TX) {
8067ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
8077ab87ff4SFrancois Romieu 			/* Avoid scavenging before Tx engine turned off */
808a384a33bSFrancois Romieu 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
809fc3e0f8aSFrancois Romieu 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
810fc3e0f8aSFrancois Romieu 				netif_warn(rp, tx_err, dev, "Tx still on\n");
8117ab87ff4SFrancois Romieu 		}
812fc3e0f8aSFrancois Romieu 
8137ab87ff4SFrancois Romieu 		rhine_tx(dev);
8147ab87ff4SFrancois Romieu 
8157ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR)
8167ab87ff4SFrancois Romieu 			rhine_tx_err(rp, status);
8177ab87ff4SFrancois Romieu 	}
8187ab87ff4SFrancois Romieu 
8197ab87ff4SFrancois Romieu 	if (status & IntrStatsMax) {
8207ab87ff4SFrancois Romieu 		spin_lock(&rp->lock);
8217ab87ff4SFrancois Romieu 		rhine_update_rx_crc_and_missed_errord(rp);
8227ab87ff4SFrancois Romieu 		spin_unlock(&rp->lock);
8237ab87ff4SFrancois Romieu 	}
8247ab87ff4SFrancois Romieu 
8257ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_SLOW) {
8267ab87ff4SFrancois Romieu 		enable_mask &= ~RHINE_EVENT_SLOW;
8277ab87ff4SFrancois Romieu 		schedule_work(&rp->slow_event_task);
8287ab87ff4SFrancois Romieu 	}
829f2148a47SJeff Kirsher 
830f2148a47SJeff Kirsher 	if (work_done < budget) {
831f2148a47SJeff Kirsher 		napi_complete(napi);
8327ab87ff4SFrancois Romieu 		iowrite16(enable_mask, ioaddr + IntrEnable);
8337ab87ff4SFrancois Romieu 		mmiowb();
834f2148a47SJeff Kirsher 	}
835f2148a47SJeff Kirsher 	return work_done;
836f2148a47SJeff Kirsher }
837f2148a47SJeff Kirsher 
83876e239e1SBill Pemberton static void rhine_hw_init(struct net_device *dev, long pioaddr)
839f2148a47SJeff Kirsher {
840f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
841f2148a47SJeff Kirsher 
842f2148a47SJeff Kirsher 	/* Reset the chip to erase previous misconfiguration. */
843f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
844f2148a47SJeff Kirsher 
845f2148a47SJeff Kirsher 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
846f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
847f2148a47SJeff Kirsher 		msleep(5);
848f2148a47SJeff Kirsher 
849f2148a47SJeff Kirsher 	/* Reload EEPROM controlled bytes cleared by soft reset */
850f2148a47SJeff Kirsher 	rhine_reload_eeprom(pioaddr, dev);
851f2148a47SJeff Kirsher }
852f2148a47SJeff Kirsher 
853f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = {
854f2148a47SJeff Kirsher 	.ndo_open		 = rhine_open,
855f2148a47SJeff Kirsher 	.ndo_stop		 = rhine_close,
856f2148a47SJeff Kirsher 	.ndo_start_xmit		 = rhine_start_tx,
857f7b5d1b9SJamie Gloudon 	.ndo_get_stats64	 = rhine_get_stats64,
858afc4b13dSJiri Pirko 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
859f2148a47SJeff Kirsher 	.ndo_change_mtu		 = eth_change_mtu,
860f2148a47SJeff Kirsher 	.ndo_validate_addr	 = eth_validate_addr,
861f2148a47SJeff Kirsher 	.ndo_set_mac_address 	 = eth_mac_addr,
862f2148a47SJeff Kirsher 	.ndo_do_ioctl		 = netdev_ioctl,
863f2148a47SJeff Kirsher 	.ndo_tx_timeout 	 = rhine_tx_timeout,
864f2148a47SJeff Kirsher 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
865f2148a47SJeff Kirsher 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
866f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
867f2148a47SJeff Kirsher 	.ndo_poll_controller	 = rhine_poll,
868f2148a47SJeff Kirsher #endif
869f2148a47SJeff Kirsher };
870f2148a47SJeff Kirsher 
8711dd06ae8SGreg Kroah-Hartman static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
872f2148a47SJeff Kirsher {
873f2148a47SJeff Kirsher 	struct net_device *dev;
874f2148a47SJeff Kirsher 	struct rhine_private *rp;
875*f7630d18SAlexey Charkov 	struct device *hwdev = &pdev->dev;
876*f7630d18SAlexey Charkov 	int revision = pdev->revision;
877f2148a47SJeff Kirsher 	int i, rc;
878f2148a47SJeff Kirsher 	u32 quirks;
879f2148a47SJeff Kirsher 	long pioaddr;
880f2148a47SJeff Kirsher 	long memaddr;
881f2148a47SJeff Kirsher 	void __iomem *ioaddr;
882f2148a47SJeff Kirsher 	int io_size, phy_id;
883f2148a47SJeff Kirsher 	const char *name;
884f2148a47SJeff Kirsher #ifdef USE_MMIO
885f2148a47SJeff Kirsher 	int bar = 1;
886f2148a47SJeff Kirsher #else
887f2148a47SJeff Kirsher 	int bar = 0;
888f2148a47SJeff Kirsher #endif
889f2148a47SJeff Kirsher 
890f2148a47SJeff Kirsher /* when built into the kernel, we only print version if device is found */
891f2148a47SJeff Kirsher #ifndef MODULE
892f2148a47SJeff Kirsher 	pr_info_once("%s\n", version);
893f2148a47SJeff Kirsher #endif
894f2148a47SJeff Kirsher 
895f2148a47SJeff Kirsher 	io_size = 256;
896f2148a47SJeff Kirsher 	phy_id = 0;
897f2148a47SJeff Kirsher 	quirks = 0;
898f2148a47SJeff Kirsher 	name = "Rhine";
899*f7630d18SAlexey Charkov 	if (revision < VTunknown0) {
900f2148a47SJeff Kirsher 		quirks = rqRhineI;
901f2148a47SJeff Kirsher 		io_size = 128;
902*f7630d18SAlexey Charkov 	} else if (revision >= VT6102) {
903f2148a47SJeff Kirsher 		quirks = rqWOL | rqForceReset;
904*f7630d18SAlexey Charkov 		if (revision < VT6105) {
905f2148a47SJeff Kirsher 			name = "Rhine II";
906f2148a47SJeff Kirsher 			quirks |= rqStatusWBRace;	/* Rhine-II exclusive */
907*f7630d18SAlexey Charkov 		} else {
908f2148a47SJeff Kirsher 			phy_id = 1;	/* Integrated PHY, phy_id fixed to 1 */
909*f7630d18SAlexey Charkov 			if (revision >= VT6105_B0)
910f2148a47SJeff Kirsher 				quirks |= rq6patterns;
911*f7630d18SAlexey Charkov 			if (revision < VT6105M)
912f2148a47SJeff Kirsher 				name = "Rhine III";
913f2148a47SJeff Kirsher 			else
914f2148a47SJeff Kirsher 				name = "Rhine III (Management Adapter)";
915f2148a47SJeff Kirsher 		}
916f2148a47SJeff Kirsher 	}
917f2148a47SJeff Kirsher 
918f2148a47SJeff Kirsher 	rc = pci_enable_device(pdev);
919f2148a47SJeff Kirsher 	if (rc)
920f2148a47SJeff Kirsher 		goto err_out;
921f2148a47SJeff Kirsher 
922f2148a47SJeff Kirsher 	/* this should always be supported */
923*f7630d18SAlexey Charkov 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
924f2148a47SJeff Kirsher 	if (rc) {
925*f7630d18SAlexey Charkov 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
926ae996154SRoger Luethi 		goto err_out_pci_disable;
927f2148a47SJeff Kirsher 	}
928f2148a47SJeff Kirsher 
929f2148a47SJeff Kirsher 	/* sanity check */
930f2148a47SJeff Kirsher 	if ((pci_resource_len(pdev, 0) < io_size) ||
931f2148a47SJeff Kirsher 	    (pci_resource_len(pdev, 1) < io_size)) {
932f2148a47SJeff Kirsher 		rc = -EIO;
933*f7630d18SAlexey Charkov 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
934ae996154SRoger Luethi 		goto err_out_pci_disable;
935f2148a47SJeff Kirsher 	}
936f2148a47SJeff Kirsher 
937f2148a47SJeff Kirsher 	pioaddr = pci_resource_start(pdev, 0);
938f2148a47SJeff Kirsher 	memaddr = pci_resource_start(pdev, 1);
939f2148a47SJeff Kirsher 
940f2148a47SJeff Kirsher 	pci_set_master(pdev);
941f2148a47SJeff Kirsher 
942f2148a47SJeff Kirsher 	dev = alloc_etherdev(sizeof(struct rhine_private));
943f2148a47SJeff Kirsher 	if (!dev) {
944f2148a47SJeff Kirsher 		rc = -ENOMEM;
945ae996154SRoger Luethi 		goto err_out_pci_disable;
946f2148a47SJeff Kirsher 	}
947*f7630d18SAlexey Charkov 	SET_NETDEV_DEV(dev, hwdev);
948f2148a47SJeff Kirsher 
949f2148a47SJeff Kirsher 	rp = netdev_priv(dev);
950f2148a47SJeff Kirsher 	rp->dev = dev;
951*f7630d18SAlexey Charkov 	rp->revision = revision;
952f2148a47SJeff Kirsher 	rp->quirks = quirks;
953f2148a47SJeff Kirsher 	rp->pioaddr = pioaddr;
954fc3e0f8aSFrancois Romieu 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
955f2148a47SJeff Kirsher 
956f2148a47SJeff Kirsher 	rc = pci_request_regions(pdev, DRV_NAME);
957f2148a47SJeff Kirsher 	if (rc)
958f2148a47SJeff Kirsher 		goto err_out_free_netdev;
959f2148a47SJeff Kirsher 
960f2148a47SJeff Kirsher 	ioaddr = pci_iomap(pdev, bar, io_size);
961f2148a47SJeff Kirsher 	if (!ioaddr) {
962f2148a47SJeff Kirsher 		rc = -EIO;
963*f7630d18SAlexey Charkov 		dev_err(hwdev,
964f2148a47SJeff Kirsher 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
965*f7630d18SAlexey Charkov 			dev_name(hwdev), io_size, memaddr);
966f2148a47SJeff Kirsher 		goto err_out_free_res;
967f2148a47SJeff Kirsher 	}
968f2148a47SJeff Kirsher 
969f2148a47SJeff Kirsher #ifdef USE_MMIO
970f2148a47SJeff Kirsher 	enable_mmio(pioaddr, quirks);
971f2148a47SJeff Kirsher 
972f2148a47SJeff Kirsher 	/* Check that selected MMIO registers match the PIO ones */
973f2148a47SJeff Kirsher 	i = 0;
974f2148a47SJeff Kirsher 	while (mmio_verify_registers[i]) {
975f2148a47SJeff Kirsher 		int reg = mmio_verify_registers[i++];
976f2148a47SJeff Kirsher 		unsigned char a = inb(pioaddr+reg);
977f2148a47SJeff Kirsher 		unsigned char b = readb(ioaddr+reg);
978f2148a47SJeff Kirsher 		if (a != b) {
979f2148a47SJeff Kirsher 			rc = -EIO;
980*f7630d18SAlexey Charkov 			dev_err(hwdev,
981f2148a47SJeff Kirsher 				"MMIO do not match PIO [%02x] (%02x != %02x)\n",
982f2148a47SJeff Kirsher 				reg, a, b);
983f2148a47SJeff Kirsher 			goto err_out_unmap;
984f2148a47SJeff Kirsher 		}
985f2148a47SJeff Kirsher 	}
986f2148a47SJeff Kirsher #endif /* USE_MMIO */
987f2148a47SJeff Kirsher 
988f2148a47SJeff Kirsher 	rp->base = ioaddr;
989*f7630d18SAlexey Charkov 	rp->irq = pdev->irq;
990f2148a47SJeff Kirsher 
991827da44cSJohn Stultz 	u64_stats_init(&rp->tx_stats.syncp);
992827da44cSJohn Stultz 	u64_stats_init(&rp->rx_stats.syncp);
993827da44cSJohn Stultz 
994f2148a47SJeff Kirsher 	/* Get chip registers into a sane state */
995f2148a47SJeff Kirsher 	rhine_power_init(dev);
996f2148a47SJeff Kirsher 	rhine_hw_init(dev, pioaddr);
997f2148a47SJeff Kirsher 
998f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
999f2148a47SJeff Kirsher 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
1000f2148a47SJeff Kirsher 
1001f2148a47SJeff Kirsher 	if (!is_valid_ether_addr(dev->dev_addr)) {
1002f2148a47SJeff Kirsher 		/* Report it and use a random ethernet address instead */
1003f2148a47SJeff Kirsher 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
1004f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
1005f2148a47SJeff Kirsher 		netdev_info(dev, "Using random MAC address: %pM\n",
1006f2148a47SJeff Kirsher 			    dev->dev_addr);
1007f2148a47SJeff Kirsher 	}
1008f2148a47SJeff Kirsher 
1009f2148a47SJeff Kirsher 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
1010f2148a47SJeff Kirsher 	if (!phy_id)
1011f2148a47SJeff Kirsher 		phy_id = ioread8(ioaddr + 0x6C);
1012f2148a47SJeff Kirsher 
1013f2148a47SJeff Kirsher 	spin_lock_init(&rp->lock);
10147ab87ff4SFrancois Romieu 	mutex_init(&rp->task_lock);
1015f2148a47SJeff Kirsher 	INIT_WORK(&rp->reset_task, rhine_reset_task);
10167ab87ff4SFrancois Romieu 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1017f2148a47SJeff Kirsher 
1018f2148a47SJeff Kirsher 	rp->mii_if.dev = dev;
1019f2148a47SJeff Kirsher 	rp->mii_if.mdio_read = mdio_read;
1020f2148a47SJeff Kirsher 	rp->mii_if.mdio_write = mdio_write;
1021f2148a47SJeff Kirsher 	rp->mii_if.phy_id_mask = 0x1f;
1022f2148a47SJeff Kirsher 	rp->mii_if.reg_num_mask = 0x1f;
1023f2148a47SJeff Kirsher 
1024f2148a47SJeff Kirsher 	/* The chip-specific entries in the device structure. */
1025f2148a47SJeff Kirsher 	dev->netdev_ops = &rhine_netdev_ops;
1026e76070f2Swangweidong 	dev->ethtool_ops = &netdev_ethtool_ops;
1027f2148a47SJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
1028f2148a47SJeff Kirsher 
1029f2148a47SJeff Kirsher 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1030f2148a47SJeff Kirsher 
1031f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
1032f2148a47SJeff Kirsher 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1033f2148a47SJeff Kirsher 
1034*f7630d18SAlexey Charkov 	if (rp->revision >= VT6105M)
1035f646968fSPatrick McHardy 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1036f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_RX |
1037f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_FILTER;
1038f2148a47SJeff Kirsher 
1039f2148a47SJeff Kirsher 	/* dev->name not defined before register_netdev()! */
1040f2148a47SJeff Kirsher 	rc = register_netdev(dev);
1041f2148a47SJeff Kirsher 	if (rc)
1042f2148a47SJeff Kirsher 		goto err_out_unmap;
1043f2148a47SJeff Kirsher 
1044f2148a47SJeff Kirsher 	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1045f2148a47SJeff Kirsher 		    name,
1046f2148a47SJeff Kirsher #ifdef USE_MMIO
1047f2148a47SJeff Kirsher 		    memaddr,
1048f2148a47SJeff Kirsher #else
1049f2148a47SJeff Kirsher 		    (long)ioaddr,
1050f2148a47SJeff Kirsher #endif
1051*f7630d18SAlexey Charkov 		    dev->dev_addr, rp->irq);
1052f2148a47SJeff Kirsher 
1053*f7630d18SAlexey Charkov 	dev_set_drvdata(hwdev, dev);
1054f2148a47SJeff Kirsher 
1055f2148a47SJeff Kirsher 	{
1056f2148a47SJeff Kirsher 		u16 mii_cmd;
1057f2148a47SJeff Kirsher 		int mii_status = mdio_read(dev, phy_id, 1);
1058f2148a47SJeff Kirsher 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1059f2148a47SJeff Kirsher 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1060f2148a47SJeff Kirsher 		if (mii_status != 0xffff && mii_status != 0x0000) {
1061f2148a47SJeff Kirsher 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1062f2148a47SJeff Kirsher 			netdev_info(dev,
1063f2148a47SJeff Kirsher 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1064f2148a47SJeff Kirsher 				    phy_id,
1065f2148a47SJeff Kirsher 				    mii_status, rp->mii_if.advertising,
1066f2148a47SJeff Kirsher 				    mdio_read(dev, phy_id, 5));
1067f2148a47SJeff Kirsher 
1068f2148a47SJeff Kirsher 			/* set IFF_RUNNING */
1069f2148a47SJeff Kirsher 			if (mii_status & BMSR_LSTATUS)
1070f2148a47SJeff Kirsher 				netif_carrier_on(dev);
1071f2148a47SJeff Kirsher 			else
1072f2148a47SJeff Kirsher 				netif_carrier_off(dev);
1073f2148a47SJeff Kirsher 
1074f2148a47SJeff Kirsher 		}
1075f2148a47SJeff Kirsher 	}
1076f2148a47SJeff Kirsher 	rp->mii_if.phy_id = phy_id;
1077fc3e0f8aSFrancois Romieu 	if (avoid_D3)
1078fc3e0f8aSFrancois Romieu 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1079f2148a47SJeff Kirsher 
1080f2148a47SJeff Kirsher 	return 0;
1081f2148a47SJeff Kirsher 
1082f2148a47SJeff Kirsher err_out_unmap:
1083f2148a47SJeff Kirsher 	pci_iounmap(pdev, ioaddr);
1084f2148a47SJeff Kirsher err_out_free_res:
1085f2148a47SJeff Kirsher 	pci_release_regions(pdev);
1086f2148a47SJeff Kirsher err_out_free_netdev:
1087f2148a47SJeff Kirsher 	free_netdev(dev);
1088ae996154SRoger Luethi err_out_pci_disable:
1089ae996154SRoger Luethi 	pci_disable_device(pdev);
1090f2148a47SJeff Kirsher err_out:
1091f2148a47SJeff Kirsher 	return rc;
1092f2148a47SJeff Kirsher }
1093f2148a47SJeff Kirsher 
1094f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev)
1095f2148a47SJeff Kirsher {
1096f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1097*f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1098f2148a47SJeff Kirsher 	void *ring;
1099f2148a47SJeff Kirsher 	dma_addr_t ring_dma;
1100f2148a47SJeff Kirsher 
1101*f7630d18SAlexey Charkov 	ring = dma_alloc_coherent(hwdev,
1102f2148a47SJeff Kirsher 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1103f2148a47SJeff Kirsher 				  TX_RING_SIZE * sizeof(struct tx_desc),
11044087c4dcSAlexey Charkov 				  &ring_dma,
11054087c4dcSAlexey Charkov 				  GFP_ATOMIC);
1106f2148a47SJeff Kirsher 	if (!ring) {
1107f2148a47SJeff Kirsher 		netdev_err(dev, "Could not allocate DMA memory\n");
1108f2148a47SJeff Kirsher 		return -ENOMEM;
1109f2148a47SJeff Kirsher 	}
1110f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI) {
1111*f7630d18SAlexey Charkov 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1112f2148a47SJeff Kirsher 						 PKT_BUF_SZ * TX_RING_SIZE,
11134087c4dcSAlexey Charkov 						 &rp->tx_bufs_dma,
11144087c4dcSAlexey Charkov 						 GFP_ATOMIC);
1115f2148a47SJeff Kirsher 		if (rp->tx_bufs == NULL) {
1116*f7630d18SAlexey Charkov 			dma_free_coherent(hwdev,
1117f2148a47SJeff Kirsher 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1118f2148a47SJeff Kirsher 					  TX_RING_SIZE * sizeof(struct tx_desc),
1119f2148a47SJeff Kirsher 					  ring, ring_dma);
1120f2148a47SJeff Kirsher 			return -ENOMEM;
1121f2148a47SJeff Kirsher 		}
1122f2148a47SJeff Kirsher 	}
1123f2148a47SJeff Kirsher 
1124f2148a47SJeff Kirsher 	rp->rx_ring = ring;
1125f2148a47SJeff Kirsher 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1126f2148a47SJeff Kirsher 	rp->rx_ring_dma = ring_dma;
1127f2148a47SJeff Kirsher 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1128f2148a47SJeff Kirsher 
1129f2148a47SJeff Kirsher 	return 0;
1130f2148a47SJeff Kirsher }
1131f2148a47SJeff Kirsher 
1132f2148a47SJeff Kirsher static void free_ring(struct net_device* dev)
1133f2148a47SJeff Kirsher {
1134f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1135*f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1136f2148a47SJeff Kirsher 
1137*f7630d18SAlexey Charkov 	dma_free_coherent(hwdev,
1138f2148a47SJeff Kirsher 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1139f2148a47SJeff Kirsher 			  TX_RING_SIZE * sizeof(struct tx_desc),
1140f2148a47SJeff Kirsher 			  rp->rx_ring, rp->rx_ring_dma);
1141f2148a47SJeff Kirsher 	rp->tx_ring = NULL;
1142f2148a47SJeff Kirsher 
1143f2148a47SJeff Kirsher 	if (rp->tx_bufs)
1144*f7630d18SAlexey Charkov 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1145f2148a47SJeff Kirsher 				  rp->tx_bufs, rp->tx_bufs_dma);
1146f2148a47SJeff Kirsher 
1147f2148a47SJeff Kirsher 	rp->tx_bufs = NULL;
1148f2148a47SJeff Kirsher 
1149f2148a47SJeff Kirsher }
1150f2148a47SJeff Kirsher 
1151f2148a47SJeff Kirsher static void alloc_rbufs(struct net_device *dev)
1152f2148a47SJeff Kirsher {
1153f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1154*f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1155f2148a47SJeff Kirsher 	dma_addr_t next;
1156f2148a47SJeff Kirsher 	int i;
1157f2148a47SJeff Kirsher 
1158f2148a47SJeff Kirsher 	rp->dirty_rx = rp->cur_rx = 0;
1159f2148a47SJeff Kirsher 
1160f2148a47SJeff Kirsher 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1161f2148a47SJeff Kirsher 	rp->rx_head_desc = &rp->rx_ring[0];
1162f2148a47SJeff Kirsher 	next = rp->rx_ring_dma;
1163f2148a47SJeff Kirsher 
1164f2148a47SJeff Kirsher 	/* Init the ring entries */
1165f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1166f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1167f2148a47SJeff Kirsher 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1168f2148a47SJeff Kirsher 		next += sizeof(struct rx_desc);
1169f2148a47SJeff Kirsher 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1170f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1171f2148a47SJeff Kirsher 	}
1172f2148a47SJeff Kirsher 	/* Mark the last entry as wrapping the ring. */
1173f2148a47SJeff Kirsher 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1174f2148a47SJeff Kirsher 
1175f2148a47SJeff Kirsher 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1176f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1177f2148a47SJeff Kirsher 		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1178f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = skb;
1179f2148a47SJeff Kirsher 		if (skb == NULL)
1180f2148a47SJeff Kirsher 			break;
1181f2148a47SJeff Kirsher 
1182f2148a47SJeff Kirsher 		rp->rx_skbuff_dma[i] =
1183*f7630d18SAlexey Charkov 			dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
11844087c4dcSAlexey Charkov 				       DMA_FROM_DEVICE);
1185*f7630d18SAlexey Charkov 		if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
11869b4fe5fbSNeil Horman 			rp->rx_skbuff_dma[i] = 0;
11879b4fe5fbSNeil Horman 			dev_kfree_skb(skb);
11889b4fe5fbSNeil Horman 			break;
11899b4fe5fbSNeil Horman 		}
1190f2148a47SJeff Kirsher 		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1191f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1192f2148a47SJeff Kirsher 	}
1193f2148a47SJeff Kirsher 	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1194f2148a47SJeff Kirsher }
1195f2148a47SJeff Kirsher 
1196f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev)
1197f2148a47SJeff Kirsher {
1198f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1199*f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1200f2148a47SJeff Kirsher 	int i;
1201f2148a47SJeff Kirsher 
1202f2148a47SJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
1203f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1204f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1205f2148a47SJeff Kirsher 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1206f2148a47SJeff Kirsher 		if (rp->rx_skbuff[i]) {
1207*f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1208f2148a47SJeff Kirsher 					 rp->rx_skbuff_dma[i],
12094087c4dcSAlexey Charkov 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1210f2148a47SJeff Kirsher 			dev_kfree_skb(rp->rx_skbuff[i]);
1211f2148a47SJeff Kirsher 		}
1212f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1213f2148a47SJeff Kirsher 	}
1214f2148a47SJeff Kirsher }
1215f2148a47SJeff Kirsher 
1216f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev)
1217f2148a47SJeff Kirsher {
1218f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1219f2148a47SJeff Kirsher 	dma_addr_t next;
1220f2148a47SJeff Kirsher 	int i;
1221f2148a47SJeff Kirsher 
1222f2148a47SJeff Kirsher 	rp->dirty_tx = rp->cur_tx = 0;
1223f2148a47SJeff Kirsher 	next = rp->tx_ring_dma;
1224f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1225f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1226f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1227f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1228f2148a47SJeff Kirsher 		next += sizeof(struct tx_desc);
1229f2148a47SJeff Kirsher 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1230f2148a47SJeff Kirsher 		if (rp->quirks & rqRhineI)
1231f2148a47SJeff Kirsher 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1232f2148a47SJeff Kirsher 	}
1233f2148a47SJeff Kirsher 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1234f2148a47SJeff Kirsher 
1235f2148a47SJeff Kirsher }
1236f2148a47SJeff Kirsher 
1237f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev)
1238f2148a47SJeff Kirsher {
1239f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1240*f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1241f2148a47SJeff Kirsher 	int i;
1242f2148a47SJeff Kirsher 
1243f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1244f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1245f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1246f2148a47SJeff Kirsher 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1247f2148a47SJeff Kirsher 		if (rp->tx_skbuff[i]) {
1248f2148a47SJeff Kirsher 			if (rp->tx_skbuff_dma[i]) {
1249*f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
1250f2148a47SJeff Kirsher 						 rp->tx_skbuff_dma[i],
1251f2148a47SJeff Kirsher 						 rp->tx_skbuff[i]->len,
12524087c4dcSAlexey Charkov 						 DMA_TO_DEVICE);
1253f2148a47SJeff Kirsher 			}
1254f2148a47SJeff Kirsher 			dev_kfree_skb(rp->tx_skbuff[i]);
1255f2148a47SJeff Kirsher 		}
1256f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1257f2148a47SJeff Kirsher 		rp->tx_buf[i] = NULL;
1258f2148a47SJeff Kirsher 	}
1259f2148a47SJeff Kirsher }
1260f2148a47SJeff Kirsher 
1261f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1262f2148a47SJeff Kirsher {
1263f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1264f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1265f2148a47SJeff Kirsher 
1266fc3e0f8aSFrancois Romieu 	mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1267f2148a47SJeff Kirsher 
1268f2148a47SJeff Kirsher 	if (rp->mii_if.full_duplex)
1269f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1270f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1271f2148a47SJeff Kirsher 	else
1272f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1273f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1274fc3e0f8aSFrancois Romieu 
1275fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1276f2148a47SJeff Kirsher 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1277f2148a47SJeff Kirsher }
1278f2148a47SJeff Kirsher 
1279f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */
1280f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii)
1281f2148a47SJeff Kirsher {
1282fc3e0f8aSFrancois Romieu 	struct net_device *dev = mii->dev;
1283fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
1284fc3e0f8aSFrancois Romieu 
1285f2148a47SJeff Kirsher 	if (mii->force_media) {
1286f2148a47SJeff Kirsher 		/* autoneg is off: Link is always assumed to be up */
1287fc3e0f8aSFrancois Romieu 		if (!netif_carrier_ok(dev))
1288fc3e0f8aSFrancois Romieu 			netif_carrier_on(dev);
1289fc3e0f8aSFrancois Romieu 	} else	/* Let MMI library update carrier status */
1290fc3e0f8aSFrancois Romieu 		rhine_check_media(dev, 0);
1291fc3e0f8aSFrancois Romieu 
1292fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1293fc3e0f8aSFrancois Romieu 		   mii->force_media, netif_carrier_ok(dev));
1294f2148a47SJeff Kirsher }
1295f2148a47SJeff Kirsher 
1296f2148a47SJeff Kirsher /**
1297f2148a47SJeff Kirsher  * rhine_set_cam - set CAM multicast filters
1298f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1299f2148a47SJeff Kirsher  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1300f2148a47SJeff Kirsher  * @addr: multicast address (6 bytes)
1301f2148a47SJeff Kirsher  *
1302f2148a47SJeff Kirsher  * Load addresses into multicast filters.
1303f2148a47SJeff Kirsher  */
1304f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1305f2148a47SJeff Kirsher {
1306f2148a47SJeff Kirsher 	int i;
1307f2148a47SJeff Kirsher 
1308f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1309f2148a47SJeff Kirsher 	wmb();
1310f2148a47SJeff Kirsher 
1311f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1312f2148a47SJeff Kirsher 	idx &= (MCAM_SIZE - 1);
1313f2148a47SJeff Kirsher 
1314f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1315f2148a47SJeff Kirsher 
1316f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++, addr++)
1317f2148a47SJeff Kirsher 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1318f2148a47SJeff Kirsher 	udelay(10);
1319f2148a47SJeff Kirsher 	wmb();
1320f2148a47SJeff Kirsher 
1321f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1322f2148a47SJeff Kirsher 	udelay(10);
1323f2148a47SJeff Kirsher 
1324f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1325f2148a47SJeff Kirsher }
1326f2148a47SJeff Kirsher 
1327f2148a47SJeff Kirsher /**
1328f2148a47SJeff Kirsher  * rhine_set_vlan_cam - set CAM VLAN filters
1329f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1330f2148a47SJeff Kirsher  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1331f2148a47SJeff Kirsher  * @addr: VLAN ID (2 bytes)
1332f2148a47SJeff Kirsher  *
1333f2148a47SJeff Kirsher  * Load addresses into VLAN filters.
1334f2148a47SJeff Kirsher  */
1335f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1336f2148a47SJeff Kirsher {
1337f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1338f2148a47SJeff Kirsher 	wmb();
1339f2148a47SJeff Kirsher 
1340f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1341f2148a47SJeff Kirsher 	idx &= (VCAM_SIZE - 1);
1342f2148a47SJeff Kirsher 
1343f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1344f2148a47SJeff Kirsher 
1345f2148a47SJeff Kirsher 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1346f2148a47SJeff Kirsher 	udelay(10);
1347f2148a47SJeff Kirsher 	wmb();
1348f2148a47SJeff Kirsher 
1349f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1350f2148a47SJeff Kirsher 	udelay(10);
1351f2148a47SJeff Kirsher 
1352f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1353f2148a47SJeff Kirsher }
1354f2148a47SJeff Kirsher 
1355f2148a47SJeff Kirsher /**
1356f2148a47SJeff Kirsher  * rhine_set_cam_mask - set multicast CAM mask
1357f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1358f2148a47SJeff Kirsher  * @mask: multicast CAM mask
1359f2148a47SJeff Kirsher  *
1360f2148a47SJeff Kirsher  * Mask sets multicast filters active/inactive.
1361f2148a47SJeff Kirsher  */
1362f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1363f2148a47SJeff Kirsher {
1364f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1365f2148a47SJeff Kirsher 	wmb();
1366f2148a47SJeff Kirsher 
1367f2148a47SJeff Kirsher 	/* write mask */
1368f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1369f2148a47SJeff Kirsher 
1370f2148a47SJeff Kirsher 	/* disable CAMEN */
1371f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1372f2148a47SJeff Kirsher }
1373f2148a47SJeff Kirsher 
1374f2148a47SJeff Kirsher /**
1375f2148a47SJeff Kirsher  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1376f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1377f2148a47SJeff Kirsher  * @mask: VLAN CAM mask
1378f2148a47SJeff Kirsher  *
1379f2148a47SJeff Kirsher  * Mask sets VLAN filters active/inactive.
1380f2148a47SJeff Kirsher  */
1381f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1382f2148a47SJeff Kirsher {
1383f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1384f2148a47SJeff Kirsher 	wmb();
1385f2148a47SJeff Kirsher 
1386f2148a47SJeff Kirsher 	/* write mask */
1387f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1388f2148a47SJeff Kirsher 
1389f2148a47SJeff Kirsher 	/* disable CAMEN */
1390f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1391f2148a47SJeff Kirsher }
1392f2148a47SJeff Kirsher 
1393f2148a47SJeff Kirsher /**
1394f2148a47SJeff Kirsher  * rhine_init_cam_filter - initialize CAM filters
1395f2148a47SJeff Kirsher  * @dev: network device
1396f2148a47SJeff Kirsher  *
1397f2148a47SJeff Kirsher  * Initialize (disable) hardware VLAN and multicast support on this
1398f2148a47SJeff Kirsher  * Rhine.
1399f2148a47SJeff Kirsher  */
1400f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev)
1401f2148a47SJeff Kirsher {
1402f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1403f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1404f2148a47SJeff Kirsher 
1405f2148a47SJeff Kirsher 	/* Disable all CAMs */
1406f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, 0);
1407f2148a47SJeff Kirsher 	rhine_set_cam_mask(ioaddr, 0);
1408f2148a47SJeff Kirsher 
1409f2148a47SJeff Kirsher 	/* disable hardware VLAN support */
1410f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1411f2148a47SJeff Kirsher 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1412f2148a47SJeff Kirsher }
1413f2148a47SJeff Kirsher 
1414f2148a47SJeff Kirsher /**
1415f2148a47SJeff Kirsher  * rhine_update_vcam - update VLAN CAM filters
1416f2148a47SJeff Kirsher  * @rp: rhine_private data of this Rhine
1417f2148a47SJeff Kirsher  *
1418f2148a47SJeff Kirsher  * Update VLAN CAM filters to match configuration change.
1419f2148a47SJeff Kirsher  */
1420f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev)
1421f2148a47SJeff Kirsher {
1422f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1423f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1424f2148a47SJeff Kirsher 	u16 vid;
1425f2148a47SJeff Kirsher 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1426f2148a47SJeff Kirsher 	unsigned int i = 0;
1427f2148a47SJeff Kirsher 
1428f2148a47SJeff Kirsher 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1429f2148a47SJeff Kirsher 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1430f2148a47SJeff Kirsher 		vCAMmask |= 1 << i;
1431f2148a47SJeff Kirsher 		if (++i >= VCAM_SIZE)
1432f2148a47SJeff Kirsher 			break;
1433f2148a47SJeff Kirsher 	}
1434f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1435f2148a47SJeff Kirsher }
1436f2148a47SJeff Kirsher 
143780d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1438f2148a47SJeff Kirsher {
1439f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1440f2148a47SJeff Kirsher 
14417ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1442f2148a47SJeff Kirsher 	set_bit(vid, rp->active_vlans);
1443f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
14447ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
14458e586137SJiri Pirko 	return 0;
1446f2148a47SJeff Kirsher }
1447f2148a47SJeff Kirsher 
144880d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1449f2148a47SJeff Kirsher {
1450f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1451f2148a47SJeff Kirsher 
14527ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1453f2148a47SJeff Kirsher 	clear_bit(vid, rp->active_vlans);
1454f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
14557ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
14568e586137SJiri Pirko 	return 0;
1457f2148a47SJeff Kirsher }
1458f2148a47SJeff Kirsher 
1459f2148a47SJeff Kirsher static void init_registers(struct net_device *dev)
1460f2148a47SJeff Kirsher {
1461f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1462f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1463f2148a47SJeff Kirsher 	int i;
1464f2148a47SJeff Kirsher 
1465f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
1466f2148a47SJeff Kirsher 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1467f2148a47SJeff Kirsher 
1468f2148a47SJeff Kirsher 	/* Initialize other registers. */
1469f2148a47SJeff Kirsher 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1470f2148a47SJeff Kirsher 	/* Configure initial FIFO thresholds. */
1471f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + TxConfig);
1472f2148a47SJeff Kirsher 	rp->tx_thresh = 0x20;
1473f2148a47SJeff Kirsher 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1474f2148a47SJeff Kirsher 
1475f2148a47SJeff Kirsher 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1476f2148a47SJeff Kirsher 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1477f2148a47SJeff Kirsher 
1478f2148a47SJeff Kirsher 	rhine_set_rx_mode(dev);
1479f2148a47SJeff Kirsher 
1480*f7630d18SAlexey Charkov 	if (rp->revision >= VT6105M)
1481f2148a47SJeff Kirsher 		rhine_init_cam_filter(dev);
1482f2148a47SJeff Kirsher 
1483f2148a47SJeff Kirsher 	napi_enable(&rp->napi);
1484f2148a47SJeff Kirsher 
14857ab87ff4SFrancois Romieu 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1486f2148a47SJeff Kirsher 
1487f2148a47SJeff Kirsher 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1488f2148a47SJeff Kirsher 	       ioaddr + ChipCmd);
1489f2148a47SJeff Kirsher 	rhine_check_media(dev, 1);
1490f2148a47SJeff Kirsher }
1491f2148a47SJeff Kirsher 
1492f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */
1493a384a33bSFrancois Romieu static void rhine_enable_linkmon(struct rhine_private *rp)
1494f2148a47SJeff Kirsher {
1495a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1496a384a33bSFrancois Romieu 
1497f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1498f2148a47SJeff Kirsher 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1499f2148a47SJeff Kirsher 	iowrite8(0x80, ioaddr + MIICmd);
1500f2148a47SJeff Kirsher 
1501a384a33bSFrancois Romieu 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1502f2148a47SJeff Kirsher 
1503f2148a47SJeff Kirsher 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1504f2148a47SJeff Kirsher }
1505f2148a47SJeff Kirsher 
1506f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */
1507a384a33bSFrancois Romieu static void rhine_disable_linkmon(struct rhine_private *rp)
1508f2148a47SJeff Kirsher {
1509a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1510a384a33bSFrancois Romieu 
1511f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1512f2148a47SJeff Kirsher 
1513a384a33bSFrancois Romieu 	if (rp->quirks & rqRhineI) {
1514f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1515f2148a47SJeff Kirsher 
1516f2148a47SJeff Kirsher 		/* Can be called from ISR. Evil. */
1517f2148a47SJeff Kirsher 		mdelay(1);
1518f2148a47SJeff Kirsher 
1519f2148a47SJeff Kirsher 		/* 0x80 must be set immediately before turning it off */
1520f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + MIICmd);
1521f2148a47SJeff Kirsher 
1522a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1523f2148a47SJeff Kirsher 
1524f2148a47SJeff Kirsher 		/* Heh. Now clear 0x80 again. */
1525f2148a47SJeff Kirsher 		iowrite8(0, ioaddr + MIICmd);
1526f2148a47SJeff Kirsher 	}
1527f2148a47SJeff Kirsher 	else
1528a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1529f2148a47SJeff Kirsher }
1530f2148a47SJeff Kirsher 
1531f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */
1532f2148a47SJeff Kirsher 
1533f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1534f2148a47SJeff Kirsher {
1535f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1536f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1537f2148a47SJeff Kirsher 	int result;
1538f2148a47SJeff Kirsher 
1539a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1540f2148a47SJeff Kirsher 
1541f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1542f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1543f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1544f2148a47SJeff Kirsher 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1545a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1546f2148a47SJeff Kirsher 	result = ioread16(ioaddr + MIIData);
1547f2148a47SJeff Kirsher 
1548a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1549f2148a47SJeff Kirsher 	return result;
1550f2148a47SJeff Kirsher }
1551f2148a47SJeff Kirsher 
1552f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1553f2148a47SJeff Kirsher {
1554f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1555f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1556f2148a47SJeff Kirsher 
1557a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1558f2148a47SJeff Kirsher 
1559f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1560f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1561f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1562f2148a47SJeff Kirsher 	iowrite16(value, ioaddr + MIIData);
1563f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1564a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1565f2148a47SJeff Kirsher 
1566a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1567f2148a47SJeff Kirsher }
1568f2148a47SJeff Kirsher 
15697ab87ff4SFrancois Romieu static void rhine_task_disable(struct rhine_private *rp)
15707ab87ff4SFrancois Romieu {
15717ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
15727ab87ff4SFrancois Romieu 	rp->task_enable = false;
15737ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
15747ab87ff4SFrancois Romieu 
15757ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->slow_event_task);
15767ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->reset_task);
15777ab87ff4SFrancois Romieu }
15787ab87ff4SFrancois Romieu 
15797ab87ff4SFrancois Romieu static void rhine_task_enable(struct rhine_private *rp)
15807ab87ff4SFrancois Romieu {
15817ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
15827ab87ff4SFrancois Romieu 	rp->task_enable = true;
15837ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
15847ab87ff4SFrancois Romieu }
15857ab87ff4SFrancois Romieu 
1586f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev)
1587f2148a47SJeff Kirsher {
1588f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1589f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1590f2148a47SJeff Kirsher 	int rc;
1591f2148a47SJeff Kirsher 
1592*f7630d18SAlexey Charkov 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1593f2148a47SJeff Kirsher 	if (rc)
1594f2148a47SJeff Kirsher 		return rc;
1595f2148a47SJeff Kirsher 
1596*f7630d18SAlexey Charkov 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1597f2148a47SJeff Kirsher 
1598f2148a47SJeff Kirsher 	rc = alloc_ring(dev);
1599f2148a47SJeff Kirsher 	if (rc) {
1600*f7630d18SAlexey Charkov 		free_irq(rp->irq, dev);
1601f2148a47SJeff Kirsher 		return rc;
1602f2148a47SJeff Kirsher 	}
1603f2148a47SJeff Kirsher 	alloc_rbufs(dev);
1604f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1605f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
16067ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
1607f2148a47SJeff Kirsher 	init_registers(dev);
1608fc3e0f8aSFrancois Romieu 
1609fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1610f2148a47SJeff Kirsher 		  __func__, ioread16(ioaddr + ChipCmd),
1611f2148a47SJeff Kirsher 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1612f2148a47SJeff Kirsher 
1613f2148a47SJeff Kirsher 	netif_start_queue(dev);
1614f2148a47SJeff Kirsher 
1615f2148a47SJeff Kirsher 	return 0;
1616f2148a47SJeff Kirsher }
1617f2148a47SJeff Kirsher 
1618f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work)
1619f2148a47SJeff Kirsher {
1620f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(work, struct rhine_private,
1621f2148a47SJeff Kirsher 						reset_task);
1622f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
1623f2148a47SJeff Kirsher 
16247ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16257ab87ff4SFrancois Romieu 
16267ab87ff4SFrancois Romieu 	if (!rp->task_enable)
16277ab87ff4SFrancois Romieu 		goto out_unlock;
1628f2148a47SJeff Kirsher 
1629f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
1630a926592fSRichard Weinberger 	netif_tx_disable(dev);
1631f2148a47SJeff Kirsher 	spin_lock_bh(&rp->lock);
1632f2148a47SJeff Kirsher 
1633f2148a47SJeff Kirsher 	/* clear all descriptors */
1634f2148a47SJeff Kirsher 	free_tbufs(dev);
1635f2148a47SJeff Kirsher 	free_rbufs(dev);
1636f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1637f2148a47SJeff Kirsher 	alloc_rbufs(dev);
1638f2148a47SJeff Kirsher 
1639f2148a47SJeff Kirsher 	/* Reinitialize the hardware. */
1640f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
1641f2148a47SJeff Kirsher 	init_registers(dev);
1642f2148a47SJeff Kirsher 
1643f2148a47SJeff Kirsher 	spin_unlock_bh(&rp->lock);
1644f2148a47SJeff Kirsher 
1645f2148a47SJeff Kirsher 	dev->trans_start = jiffies; /* prevent tx timeout */
1646f2148a47SJeff Kirsher 	dev->stats.tx_errors++;
1647f2148a47SJeff Kirsher 	netif_wake_queue(dev);
16487ab87ff4SFrancois Romieu 
16497ab87ff4SFrancois Romieu out_unlock:
16507ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
1651f2148a47SJeff Kirsher }
1652f2148a47SJeff Kirsher 
1653f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev)
1654f2148a47SJeff Kirsher {
1655f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1656f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1657f2148a47SJeff Kirsher 
1658f2148a47SJeff Kirsher 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1659f2148a47SJeff Kirsher 		    ioread16(ioaddr + IntrStatus),
1660f2148a47SJeff Kirsher 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1661f2148a47SJeff Kirsher 
1662f2148a47SJeff Kirsher 	schedule_work(&rp->reset_task);
1663f2148a47SJeff Kirsher }
1664f2148a47SJeff Kirsher 
1665f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1666f2148a47SJeff Kirsher 				  struct net_device *dev)
1667f2148a47SJeff Kirsher {
1668f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1669*f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1670f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1671f2148a47SJeff Kirsher 	unsigned entry;
1672f2148a47SJeff Kirsher 
1673f2148a47SJeff Kirsher 	/* Caution: the write order is important here, set the field
1674f2148a47SJeff Kirsher 	   with the "ownership" bits last. */
1675f2148a47SJeff Kirsher 
1676f2148a47SJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
1677f2148a47SJeff Kirsher 	entry = rp->cur_tx % TX_RING_SIZE;
1678f2148a47SJeff Kirsher 
1679f2148a47SJeff Kirsher 	if (skb_padto(skb, ETH_ZLEN))
1680f2148a47SJeff Kirsher 		return NETDEV_TX_OK;
1681f2148a47SJeff Kirsher 
1682f2148a47SJeff Kirsher 	rp->tx_skbuff[entry] = skb;
1683f2148a47SJeff Kirsher 
1684f2148a47SJeff Kirsher 	if ((rp->quirks & rqRhineI) &&
1685f2148a47SJeff Kirsher 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1686f2148a47SJeff Kirsher 		/* Must use alignment buffer. */
1687f2148a47SJeff Kirsher 		if (skb->len > PKT_BUF_SZ) {
1688f2148a47SJeff Kirsher 			/* packet too long, drop it */
16894b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
1690f2148a47SJeff Kirsher 			rp->tx_skbuff[entry] = NULL;
1691f2148a47SJeff Kirsher 			dev->stats.tx_dropped++;
1692f2148a47SJeff Kirsher 			return NETDEV_TX_OK;
1693f2148a47SJeff Kirsher 		}
1694f2148a47SJeff Kirsher 
1695f2148a47SJeff Kirsher 		/* Padding is not copied and so must be redone. */
1696f2148a47SJeff Kirsher 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1697f2148a47SJeff Kirsher 		if (skb->len < ETH_ZLEN)
1698f2148a47SJeff Kirsher 			memset(rp->tx_buf[entry] + skb->len, 0,
1699f2148a47SJeff Kirsher 			       ETH_ZLEN - skb->len);
1700f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] = 0;
1701f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1702f2148a47SJeff Kirsher 						      (rp->tx_buf[entry] -
1703f2148a47SJeff Kirsher 						       rp->tx_bufs));
1704f2148a47SJeff Kirsher 	} else {
1705f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] =
1706*f7630d18SAlexey Charkov 			dma_map_single(hwdev, skb->data, skb->len,
17074087c4dcSAlexey Charkov 				       DMA_TO_DEVICE);
1708*f7630d18SAlexey Charkov 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
17094b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
17109b4fe5fbSNeil Horman 			rp->tx_skbuff_dma[entry] = 0;
17119b4fe5fbSNeil Horman 			dev->stats.tx_dropped++;
17129b4fe5fbSNeil Horman 			return NETDEV_TX_OK;
17139b4fe5fbSNeil Horman 		}
1714f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1715f2148a47SJeff Kirsher 	}
1716f2148a47SJeff Kirsher 
1717f2148a47SJeff Kirsher 	rp->tx_ring[entry].desc_length =
1718f2148a47SJeff Kirsher 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1719f2148a47SJeff Kirsher 
1720f2148a47SJeff Kirsher 	if (unlikely(vlan_tx_tag_present(skb))) {
1721207070f5SRoger Luethi 		u16 vid_pcp = vlan_tx_tag_get(skb);
1722207070f5SRoger Luethi 
1723207070f5SRoger Luethi 		/* drop CFI/DEI bit, register needs VID and PCP */
1724207070f5SRoger Luethi 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1725207070f5SRoger Luethi 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1726207070f5SRoger Luethi 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1727f2148a47SJeff Kirsher 		/* request tagging */
1728f2148a47SJeff Kirsher 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1729f2148a47SJeff Kirsher 	}
1730f2148a47SJeff Kirsher 	else
1731f2148a47SJeff Kirsher 		rp->tx_ring[entry].tx_status = 0;
1732f2148a47SJeff Kirsher 
1733f2148a47SJeff Kirsher 	/* lock eth irq */
1734f2148a47SJeff Kirsher 	wmb();
1735f2148a47SJeff Kirsher 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1736f2148a47SJeff Kirsher 	wmb();
1737f2148a47SJeff Kirsher 
1738f2148a47SJeff Kirsher 	rp->cur_tx++;
1739f2148a47SJeff Kirsher 
1740f2148a47SJeff Kirsher 	/* Non-x86 Todo: explicitly flush cache lines here. */
1741f2148a47SJeff Kirsher 
1742f2148a47SJeff Kirsher 	if (vlan_tx_tag_present(skb))
1743f2148a47SJeff Kirsher 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1744f2148a47SJeff Kirsher 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1745f2148a47SJeff Kirsher 
1746f2148a47SJeff Kirsher 	/* Wake the potentially-idle transmit channel */
1747f2148a47SJeff Kirsher 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1748f2148a47SJeff Kirsher 	       ioaddr + ChipCmd1);
1749f2148a47SJeff Kirsher 	IOSYNC;
1750f2148a47SJeff Kirsher 
1751f2148a47SJeff Kirsher 	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1752f2148a47SJeff Kirsher 		netif_stop_queue(dev);
1753f2148a47SJeff Kirsher 
1754fc3e0f8aSFrancois Romieu 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1755f2148a47SJeff Kirsher 		  rp->cur_tx - 1, entry);
1756fc3e0f8aSFrancois Romieu 
1757f2148a47SJeff Kirsher 	return NETDEV_TX_OK;
1758f2148a47SJeff Kirsher }
1759f2148a47SJeff Kirsher 
17607ab87ff4SFrancois Romieu static void rhine_irq_disable(struct rhine_private *rp)
17617ab87ff4SFrancois Romieu {
17627ab87ff4SFrancois Romieu 	iowrite16(0x0000, rp->base + IntrEnable);
17637ab87ff4SFrancois Romieu 	mmiowb();
17647ab87ff4SFrancois Romieu }
17657ab87ff4SFrancois Romieu 
1766f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
1767f2148a47SJeff Kirsher    after the Tx thread. */
1768f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1769f2148a47SJeff Kirsher {
1770f2148a47SJeff Kirsher 	struct net_device *dev = dev_instance;
1771f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
17727ab87ff4SFrancois Romieu 	u32 status;
1773f2148a47SJeff Kirsher 	int handled = 0;
1774f2148a47SJeff Kirsher 
17757ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
1776f2148a47SJeff Kirsher 
1777fc3e0f8aSFrancois Romieu 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1778f2148a47SJeff Kirsher 
17797ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT) {
17807ab87ff4SFrancois Romieu 		handled = 1;
1781f2148a47SJeff Kirsher 
17827ab87ff4SFrancois Romieu 		rhine_irq_disable(rp);
1783f2148a47SJeff Kirsher 		napi_schedule(&rp->napi);
1784f2148a47SJeff Kirsher 	}
1785f2148a47SJeff Kirsher 
17867ab87ff4SFrancois Romieu 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1787fc3e0f8aSFrancois Romieu 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
17887ab87ff4SFrancois Romieu 			  status);
1789f2148a47SJeff Kirsher 	}
1790f2148a47SJeff Kirsher 
1791f2148a47SJeff Kirsher 	return IRQ_RETVAL(handled);
1792f2148a47SJeff Kirsher }
1793f2148a47SJeff Kirsher 
1794f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated
1795f2148a47SJeff Kirsher    for clarity. */
1796f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev)
1797f2148a47SJeff Kirsher {
1798f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1799*f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1800f2148a47SJeff Kirsher 	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1801f2148a47SJeff Kirsher 
1802f2148a47SJeff Kirsher 	/* find and cleanup dirty tx descriptors */
1803f2148a47SJeff Kirsher 	while (rp->dirty_tx != rp->cur_tx) {
1804f2148a47SJeff Kirsher 		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1805fc3e0f8aSFrancois Romieu 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1806f2148a47SJeff Kirsher 			  entry, txstatus);
1807f2148a47SJeff Kirsher 		if (txstatus & DescOwn)
1808f2148a47SJeff Kirsher 			break;
1809f2148a47SJeff Kirsher 		if (txstatus & 0x8000) {
1810fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev,
1811fc3e0f8aSFrancois Romieu 				  "Transmit error, Tx status %08x\n", txstatus);
1812f2148a47SJeff Kirsher 			dev->stats.tx_errors++;
1813f2148a47SJeff Kirsher 			if (txstatus & 0x0400)
1814f2148a47SJeff Kirsher 				dev->stats.tx_carrier_errors++;
1815f2148a47SJeff Kirsher 			if (txstatus & 0x0200)
1816f2148a47SJeff Kirsher 				dev->stats.tx_window_errors++;
1817f2148a47SJeff Kirsher 			if (txstatus & 0x0100)
1818f2148a47SJeff Kirsher 				dev->stats.tx_aborted_errors++;
1819f2148a47SJeff Kirsher 			if (txstatus & 0x0080)
1820f2148a47SJeff Kirsher 				dev->stats.tx_heartbeat_errors++;
1821f2148a47SJeff Kirsher 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1822f2148a47SJeff Kirsher 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1823f2148a47SJeff Kirsher 				dev->stats.tx_fifo_errors++;
1824f2148a47SJeff Kirsher 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1825f2148a47SJeff Kirsher 				break; /* Keep the skb - we try again */
1826f2148a47SJeff Kirsher 			}
1827f2148a47SJeff Kirsher 			/* Transmitter restarted in 'abnormal' handler. */
1828f2148a47SJeff Kirsher 		} else {
1829f2148a47SJeff Kirsher 			if (rp->quirks & rqRhineI)
1830f2148a47SJeff Kirsher 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1831f2148a47SJeff Kirsher 			else
1832f2148a47SJeff Kirsher 				dev->stats.collisions += txstatus & 0x0F;
1833fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1834fc3e0f8aSFrancois Romieu 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1835f7b5d1b9SJamie Gloudon 
1836f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->tx_stats.syncp);
1837f7b5d1b9SJamie Gloudon 			rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1838f7b5d1b9SJamie Gloudon 			rp->tx_stats.packets++;
1839f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->tx_stats.syncp);
1840f2148a47SJeff Kirsher 		}
1841f2148a47SJeff Kirsher 		/* Free the original skb. */
1842f2148a47SJeff Kirsher 		if (rp->tx_skbuff_dma[entry]) {
1843*f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1844f2148a47SJeff Kirsher 					 rp->tx_skbuff_dma[entry],
1845f2148a47SJeff Kirsher 					 rp->tx_skbuff[entry]->len,
18464087c4dcSAlexey Charkov 					 DMA_TO_DEVICE);
1847f2148a47SJeff Kirsher 		}
18484b3afc6eSEric W. Biederman 		dev_consume_skb_any(rp->tx_skbuff[entry]);
1849f2148a47SJeff Kirsher 		rp->tx_skbuff[entry] = NULL;
1850f2148a47SJeff Kirsher 		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1851f2148a47SJeff Kirsher 	}
1852f2148a47SJeff Kirsher 	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1853f2148a47SJeff Kirsher 		netif_wake_queue(dev);
1854f2148a47SJeff Kirsher }
1855f2148a47SJeff Kirsher 
1856f2148a47SJeff Kirsher /**
1857f2148a47SJeff Kirsher  * rhine_get_vlan_tci - extract TCI from Rx data buffer
1858f2148a47SJeff Kirsher  * @skb: pointer to sk_buff
1859f2148a47SJeff Kirsher  * @data_size: used data area of the buffer including CRC
1860f2148a47SJeff Kirsher  *
1861f2148a47SJeff Kirsher  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1862f2148a47SJeff Kirsher  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1863f2148a47SJeff Kirsher  * aligned following the CRC.
1864f2148a47SJeff Kirsher  */
1865f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1866f2148a47SJeff Kirsher {
1867f2148a47SJeff Kirsher 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1868f2148a47SJeff Kirsher 	return be16_to_cpup((__be16 *)trailer);
1869f2148a47SJeff Kirsher }
1870f2148a47SJeff Kirsher 
1871f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */
1872f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit)
1873f2148a47SJeff Kirsher {
1874f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1875*f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1876f2148a47SJeff Kirsher 	int count;
1877f2148a47SJeff Kirsher 	int entry = rp->cur_rx % RX_RING_SIZE;
1878f2148a47SJeff Kirsher 
1879fc3e0f8aSFrancois Romieu 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1880fc3e0f8aSFrancois Romieu 		  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1881f2148a47SJeff Kirsher 
1882f2148a47SJeff Kirsher 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1883f2148a47SJeff Kirsher 	for (count = 0; count < limit; ++count) {
1884f2148a47SJeff Kirsher 		struct rx_desc *desc = rp->rx_head_desc;
1885f2148a47SJeff Kirsher 		u32 desc_status = le32_to_cpu(desc->rx_status);
1886f2148a47SJeff Kirsher 		u32 desc_length = le32_to_cpu(desc->desc_length);
1887f2148a47SJeff Kirsher 		int data_size = desc_status >> 16;
1888f2148a47SJeff Kirsher 
1889f2148a47SJeff Kirsher 		if (desc_status & DescOwn)
1890f2148a47SJeff Kirsher 			break;
1891f2148a47SJeff Kirsher 
1892fc3e0f8aSFrancois Romieu 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1893fc3e0f8aSFrancois Romieu 			  desc_status);
1894f2148a47SJeff Kirsher 
1895f2148a47SJeff Kirsher 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1896f2148a47SJeff Kirsher 			if ((desc_status & RxWholePkt) != RxWholePkt) {
1897f2148a47SJeff Kirsher 				netdev_warn(dev,
1898f2148a47SJeff Kirsher 	"Oversized Ethernet frame spanned multiple buffers, "
1899f2148a47SJeff Kirsher 	"entry %#x length %d status %08x!\n",
1900f2148a47SJeff Kirsher 					    entry, data_size,
1901f2148a47SJeff Kirsher 					    desc_status);
1902f2148a47SJeff Kirsher 				netdev_warn(dev,
1903f2148a47SJeff Kirsher 					    "Oversized Ethernet frame %p vs %p\n",
1904f2148a47SJeff Kirsher 					    rp->rx_head_desc,
1905f2148a47SJeff Kirsher 					    &rp->rx_ring[entry]);
1906f2148a47SJeff Kirsher 				dev->stats.rx_length_errors++;
1907f2148a47SJeff Kirsher 			} else if (desc_status & RxErr) {
1908f2148a47SJeff Kirsher 				/* There was a error. */
1909fc3e0f8aSFrancois Romieu 				netif_dbg(rp, rx_err, dev,
1910fc3e0f8aSFrancois Romieu 					  "%s() Rx error %08x\n", __func__,
1911fc3e0f8aSFrancois Romieu 					  desc_status);
1912f2148a47SJeff Kirsher 				dev->stats.rx_errors++;
1913f2148a47SJeff Kirsher 				if (desc_status & 0x0030)
1914f2148a47SJeff Kirsher 					dev->stats.rx_length_errors++;
1915f2148a47SJeff Kirsher 				if (desc_status & 0x0048)
1916f2148a47SJeff Kirsher 					dev->stats.rx_fifo_errors++;
1917f2148a47SJeff Kirsher 				if (desc_status & 0x0004)
1918f2148a47SJeff Kirsher 					dev->stats.rx_frame_errors++;
1919f2148a47SJeff Kirsher 				if (desc_status & 0x0002) {
1920f2148a47SJeff Kirsher 					/* this can also be updated outside the interrupt handler */
1921f2148a47SJeff Kirsher 					spin_lock(&rp->lock);
1922f2148a47SJeff Kirsher 					dev->stats.rx_crc_errors++;
1923f2148a47SJeff Kirsher 					spin_unlock(&rp->lock);
1924f2148a47SJeff Kirsher 				}
1925f2148a47SJeff Kirsher 			}
1926f2148a47SJeff Kirsher 		} else {
1927f2148a47SJeff Kirsher 			struct sk_buff *skb = NULL;
1928f2148a47SJeff Kirsher 			/* Length should omit the CRC */
1929f2148a47SJeff Kirsher 			int pkt_len = data_size - 4;
1930f2148a47SJeff Kirsher 			u16 vlan_tci = 0;
1931f2148a47SJeff Kirsher 
1932f2148a47SJeff Kirsher 			/* Check if the packet is long enough to accept without
1933f2148a47SJeff Kirsher 			   copying to a minimally-sized skbuff. */
1934f2148a47SJeff Kirsher 			if (pkt_len < rx_copybreak)
1935f2148a47SJeff Kirsher 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1936f2148a47SJeff Kirsher 			if (skb) {
1937*f7630d18SAlexey Charkov 				dma_sync_single_for_cpu(hwdev,
1938f2148a47SJeff Kirsher 							rp->rx_skbuff_dma[entry],
1939f2148a47SJeff Kirsher 							rp->rx_buf_sz,
19404087c4dcSAlexey Charkov 							DMA_FROM_DEVICE);
1941f2148a47SJeff Kirsher 
1942f2148a47SJeff Kirsher 				skb_copy_to_linear_data(skb,
1943f2148a47SJeff Kirsher 						 rp->rx_skbuff[entry]->data,
1944f2148a47SJeff Kirsher 						 pkt_len);
1945f2148a47SJeff Kirsher 				skb_put(skb, pkt_len);
1946*f7630d18SAlexey Charkov 				dma_sync_single_for_device(hwdev,
1947f2148a47SJeff Kirsher 							   rp->rx_skbuff_dma[entry],
1948f2148a47SJeff Kirsher 							   rp->rx_buf_sz,
19494087c4dcSAlexey Charkov 							   DMA_FROM_DEVICE);
1950f2148a47SJeff Kirsher 			} else {
1951f2148a47SJeff Kirsher 				skb = rp->rx_skbuff[entry];
1952f2148a47SJeff Kirsher 				if (skb == NULL) {
1953f2148a47SJeff Kirsher 					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1954f2148a47SJeff Kirsher 					break;
1955f2148a47SJeff Kirsher 				}
1956f2148a47SJeff Kirsher 				rp->rx_skbuff[entry] = NULL;
1957f2148a47SJeff Kirsher 				skb_put(skb, pkt_len);
1958*f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
1959f2148a47SJeff Kirsher 						 rp->rx_skbuff_dma[entry],
1960f2148a47SJeff Kirsher 						 rp->rx_buf_sz,
19614087c4dcSAlexey Charkov 						 DMA_FROM_DEVICE);
1962f2148a47SJeff Kirsher 			}
1963f2148a47SJeff Kirsher 
1964f2148a47SJeff Kirsher 			if (unlikely(desc_length & DescTag))
1965f2148a47SJeff Kirsher 				vlan_tci = rhine_get_vlan_tci(skb, data_size);
1966f2148a47SJeff Kirsher 
1967f2148a47SJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
1968f2148a47SJeff Kirsher 
1969f2148a47SJeff Kirsher 			if (unlikely(desc_length & DescTag))
197086a9bad3SPatrick McHardy 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1971f2148a47SJeff Kirsher 			netif_receive_skb(skb);
1972f7b5d1b9SJamie Gloudon 
1973f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->rx_stats.syncp);
1974f7b5d1b9SJamie Gloudon 			rp->rx_stats.bytes += pkt_len;
1975f7b5d1b9SJamie Gloudon 			rp->rx_stats.packets++;
1976f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->rx_stats.syncp);
1977f2148a47SJeff Kirsher 		}
1978f2148a47SJeff Kirsher 		entry = (++rp->cur_rx) % RX_RING_SIZE;
1979f2148a47SJeff Kirsher 		rp->rx_head_desc = &rp->rx_ring[entry];
1980f2148a47SJeff Kirsher 	}
1981f2148a47SJeff Kirsher 
1982f2148a47SJeff Kirsher 	/* Refill the Rx ring buffers. */
1983f2148a47SJeff Kirsher 	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1984f2148a47SJeff Kirsher 		struct sk_buff *skb;
1985f2148a47SJeff Kirsher 		entry = rp->dirty_rx % RX_RING_SIZE;
1986f2148a47SJeff Kirsher 		if (rp->rx_skbuff[entry] == NULL) {
1987f2148a47SJeff Kirsher 			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1988f2148a47SJeff Kirsher 			rp->rx_skbuff[entry] = skb;
1989f2148a47SJeff Kirsher 			if (skb == NULL)
1990f2148a47SJeff Kirsher 				break;	/* Better luck next round. */
1991f2148a47SJeff Kirsher 			rp->rx_skbuff_dma[entry] =
1992*f7630d18SAlexey Charkov 				dma_map_single(hwdev, skb->data,
1993f2148a47SJeff Kirsher 					       rp->rx_buf_sz,
19944087c4dcSAlexey Charkov 					       DMA_FROM_DEVICE);
1995*f7630d18SAlexey Charkov 			if (dma_mapping_error(hwdev,
1996*f7630d18SAlexey Charkov 					      rp->rx_skbuff_dma[entry])) {
19979b4fe5fbSNeil Horman 				dev_kfree_skb(skb);
19989b4fe5fbSNeil Horman 				rp->rx_skbuff_dma[entry] = 0;
19999b4fe5fbSNeil Horman 				break;
20009b4fe5fbSNeil Horman 			}
2001f2148a47SJeff Kirsher 			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
2002f2148a47SJeff Kirsher 		}
2003f2148a47SJeff Kirsher 		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2004f2148a47SJeff Kirsher 	}
2005f2148a47SJeff Kirsher 
2006f2148a47SJeff Kirsher 	return count;
2007f2148a47SJeff Kirsher }
2008f2148a47SJeff Kirsher 
2009f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) {
2010f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2011f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2012f2148a47SJeff Kirsher 	int entry = rp->dirty_tx % TX_RING_SIZE;
2013f2148a47SJeff Kirsher 	u32 intr_status;
2014f2148a47SJeff Kirsher 
2015f2148a47SJeff Kirsher 	/*
2016f2148a47SJeff Kirsher 	 * If new errors occurred, we need to sort them out before doing Tx.
2017f2148a47SJeff Kirsher 	 * In that case the ISR will be back here RSN anyway.
2018f2148a47SJeff Kirsher 	 */
2019a20a28bcSFrancois Romieu 	intr_status = rhine_get_events(rp);
2020f2148a47SJeff Kirsher 
2021f2148a47SJeff Kirsher 	if ((intr_status & IntrTxErrSummary) == 0) {
2022f2148a47SJeff Kirsher 
2023f2148a47SJeff Kirsher 		/* We know better than the chip where it should continue. */
2024f2148a47SJeff Kirsher 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2025f2148a47SJeff Kirsher 		       ioaddr + TxRingPtr);
2026f2148a47SJeff Kirsher 
2027f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2028f2148a47SJeff Kirsher 		       ioaddr + ChipCmd);
2029f2148a47SJeff Kirsher 
2030f2148a47SJeff Kirsher 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2031f2148a47SJeff Kirsher 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2032f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2033f2148a47SJeff Kirsher 
2034f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2035f2148a47SJeff Kirsher 		       ioaddr + ChipCmd1);
2036f2148a47SJeff Kirsher 		IOSYNC;
2037f2148a47SJeff Kirsher 	}
2038f2148a47SJeff Kirsher 	else {
2039f2148a47SJeff Kirsher 		/* This should never happen */
2040fc3e0f8aSFrancois Romieu 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2041fc3e0f8aSFrancois Romieu 			   intr_status);
2042f2148a47SJeff Kirsher 	}
2043f2148a47SJeff Kirsher 
2044f2148a47SJeff Kirsher }
2045f2148a47SJeff Kirsher 
20467ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work)
2047f2148a47SJeff Kirsher {
20487ab87ff4SFrancois Romieu 	struct rhine_private *rp =
20497ab87ff4SFrancois Romieu 		container_of(work, struct rhine_private, slow_event_task);
20507ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
20517ab87ff4SFrancois Romieu 	u32 intr_status;
2052f2148a47SJeff Kirsher 
20537ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
20547ab87ff4SFrancois Romieu 
20557ab87ff4SFrancois Romieu 	if (!rp->task_enable)
20567ab87ff4SFrancois Romieu 		goto out_unlock;
20577ab87ff4SFrancois Romieu 
20587ab87ff4SFrancois Romieu 	intr_status = rhine_get_events(rp);
20597ab87ff4SFrancois Romieu 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2060f2148a47SJeff Kirsher 
2061f2148a47SJeff Kirsher 	if (intr_status & IntrLinkChange)
2062f2148a47SJeff Kirsher 		rhine_check_media(dev, 0);
2063f2148a47SJeff Kirsher 
2064fc3e0f8aSFrancois Romieu 	if (intr_status & IntrPCIErr)
2065fc3e0f8aSFrancois Romieu 		netif_warn(rp, hw, dev, "PCI error\n");
2066fc3e0f8aSFrancois Romieu 
2067559bcac3SDavid S. Miller 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2068f2148a47SJeff Kirsher 
20697ab87ff4SFrancois Romieu out_unlock:
20707ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2071f2148a47SJeff Kirsher }
2072f2148a47SJeff Kirsher 
2073f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 *
2074f7b5d1b9SJamie Gloudon rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2075f2148a47SJeff Kirsher {
2076f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2077f7b5d1b9SJamie Gloudon 	unsigned int start;
2078f2148a47SJeff Kirsher 
20797ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
20807ab87ff4SFrancois Romieu 	rhine_update_rx_crc_and_missed_errord(rp);
20817ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2082f2148a47SJeff Kirsher 
2083f7b5d1b9SJamie Gloudon 	netdev_stats_to_stats64(stats, &dev->stats);
2084f7b5d1b9SJamie Gloudon 
2085f7b5d1b9SJamie Gloudon 	do {
208657a7744eSEric W. Biederman 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2087f7b5d1b9SJamie Gloudon 		stats->rx_packets = rp->rx_stats.packets;
2088f7b5d1b9SJamie Gloudon 		stats->rx_bytes = rp->rx_stats.bytes;
208957a7744eSEric W. Biederman 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2090f7b5d1b9SJamie Gloudon 
2091f7b5d1b9SJamie Gloudon 	do {
209257a7744eSEric W. Biederman 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2093f7b5d1b9SJamie Gloudon 		stats->tx_packets = rp->tx_stats.packets;
2094f7b5d1b9SJamie Gloudon 		stats->tx_bytes = rp->tx_stats.bytes;
209557a7744eSEric W. Biederman 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2096f7b5d1b9SJamie Gloudon 
2097f7b5d1b9SJamie Gloudon 	return stats;
2098f2148a47SJeff Kirsher }
2099f2148a47SJeff Kirsher 
2100f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev)
2101f2148a47SJeff Kirsher {
2102f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2103f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2104f2148a47SJeff Kirsher 	u32 mc_filter[2];	/* Multicast hash filter */
2105f2148a47SJeff Kirsher 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2106f2148a47SJeff Kirsher 	struct netdev_hw_addr *ha;
2107f2148a47SJeff Kirsher 
2108f2148a47SJeff Kirsher 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2109f2148a47SJeff Kirsher 		rx_mode = 0x1C;
2110f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2111f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2112f2148a47SJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2113f2148a47SJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
2114f2148a47SJeff Kirsher 		/* Too many to match, or accept all multicasts. */
2115f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2116f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2117*f7630d18SAlexey Charkov 	} else if (rp->revision >= VT6105M) {
2118f2148a47SJeff Kirsher 		int i = 0;
2119f2148a47SJeff Kirsher 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2120f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2121f2148a47SJeff Kirsher 			if (i == MCAM_SIZE)
2122f2148a47SJeff Kirsher 				break;
2123f2148a47SJeff Kirsher 			rhine_set_cam(ioaddr, i, ha->addr);
2124f2148a47SJeff Kirsher 			mCAMmask |= 1 << i;
2125f2148a47SJeff Kirsher 			i++;
2126f2148a47SJeff Kirsher 		}
2127f2148a47SJeff Kirsher 		rhine_set_cam_mask(ioaddr, mCAMmask);
2128f2148a47SJeff Kirsher 	} else {
2129f2148a47SJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
2130f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2131f2148a47SJeff Kirsher 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2132f2148a47SJeff Kirsher 
2133f2148a47SJeff Kirsher 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2134f2148a47SJeff Kirsher 		}
2135f2148a47SJeff Kirsher 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2136f2148a47SJeff Kirsher 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2137f2148a47SJeff Kirsher 	}
2138f2148a47SJeff Kirsher 	/* enable/disable VLAN receive filtering */
2139*f7630d18SAlexey Charkov 	if (rp->revision >= VT6105M) {
2140f2148a47SJeff Kirsher 		if (dev->flags & IFF_PROMISC)
2141f2148a47SJeff Kirsher 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2142f2148a47SJeff Kirsher 		else
2143f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2144f2148a47SJeff Kirsher 	}
2145f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2146f2148a47SJeff Kirsher }
2147f2148a47SJeff Kirsher 
2148f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2149f2148a47SJeff Kirsher {
2150*f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
2151f2148a47SJeff Kirsher 
215223020ab3SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
215323020ab3SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2154*f7630d18SAlexey Charkov 	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2155f2148a47SJeff Kirsher }
2156f2148a47SJeff Kirsher 
2157f2148a47SJeff Kirsher static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2158f2148a47SJeff Kirsher {
2159f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2160f2148a47SJeff Kirsher 	int rc;
2161f2148a47SJeff Kirsher 
21627ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2163f2148a47SJeff Kirsher 	rc = mii_ethtool_gset(&rp->mii_if, cmd);
21647ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2165f2148a47SJeff Kirsher 
2166f2148a47SJeff Kirsher 	return rc;
2167f2148a47SJeff Kirsher }
2168f2148a47SJeff Kirsher 
2169f2148a47SJeff Kirsher static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2170f2148a47SJeff Kirsher {
2171f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2172f2148a47SJeff Kirsher 	int rc;
2173f2148a47SJeff Kirsher 
21747ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2175f2148a47SJeff Kirsher 	rc = mii_ethtool_sset(&rp->mii_if, cmd);
2176f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
21777ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2178f2148a47SJeff Kirsher 
2179f2148a47SJeff Kirsher 	return rc;
2180f2148a47SJeff Kirsher }
2181f2148a47SJeff Kirsher 
2182f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev)
2183f2148a47SJeff Kirsher {
2184f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2185f2148a47SJeff Kirsher 
2186f2148a47SJeff Kirsher 	return mii_nway_restart(&rp->mii_if);
2187f2148a47SJeff Kirsher }
2188f2148a47SJeff Kirsher 
2189f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev)
2190f2148a47SJeff Kirsher {
2191f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2192f2148a47SJeff Kirsher 
2193f2148a47SJeff Kirsher 	return mii_link_ok(&rp->mii_if);
2194f2148a47SJeff Kirsher }
2195f2148a47SJeff Kirsher 
2196f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev)
2197f2148a47SJeff Kirsher {
2198fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2199fc3e0f8aSFrancois Romieu 
2200fc3e0f8aSFrancois Romieu 	return rp->msg_enable;
2201f2148a47SJeff Kirsher }
2202f2148a47SJeff Kirsher 
2203f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value)
2204f2148a47SJeff Kirsher {
2205fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2206fc3e0f8aSFrancois Romieu 
2207fc3e0f8aSFrancois Romieu 	rp->msg_enable = value;
2208f2148a47SJeff Kirsher }
2209f2148a47SJeff Kirsher 
2210f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2211f2148a47SJeff Kirsher {
2212f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2213f2148a47SJeff Kirsher 
2214f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2215f2148a47SJeff Kirsher 		return;
2216f2148a47SJeff Kirsher 
2217f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2218f2148a47SJeff Kirsher 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2219f2148a47SJeff Kirsher 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2220f2148a47SJeff Kirsher 	wol->wolopts = rp->wolopts;
2221f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2222f2148a47SJeff Kirsher }
2223f2148a47SJeff Kirsher 
2224f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2225f2148a47SJeff Kirsher {
2226f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2227f2148a47SJeff Kirsher 	u32 support = WAKE_PHY | WAKE_MAGIC |
2228f2148a47SJeff Kirsher 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2229f2148a47SJeff Kirsher 
2230f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2231f2148a47SJeff Kirsher 		return -EINVAL;
2232f2148a47SJeff Kirsher 
2233f2148a47SJeff Kirsher 	if (wol->wolopts & ~support)
2234f2148a47SJeff Kirsher 		return -EINVAL;
2235f2148a47SJeff Kirsher 
2236f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2237f2148a47SJeff Kirsher 	rp->wolopts = wol->wolopts;
2238f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2239f2148a47SJeff Kirsher 
2240f2148a47SJeff Kirsher 	return 0;
2241f2148a47SJeff Kirsher }
2242f2148a47SJeff Kirsher 
2243f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = {
2244f2148a47SJeff Kirsher 	.get_drvinfo		= netdev_get_drvinfo,
2245f2148a47SJeff Kirsher 	.get_settings		= netdev_get_settings,
2246f2148a47SJeff Kirsher 	.set_settings		= netdev_set_settings,
2247f2148a47SJeff Kirsher 	.nway_reset		= netdev_nway_reset,
2248f2148a47SJeff Kirsher 	.get_link		= netdev_get_link,
2249f2148a47SJeff Kirsher 	.get_msglevel		= netdev_get_msglevel,
2250f2148a47SJeff Kirsher 	.set_msglevel		= netdev_set_msglevel,
2251f2148a47SJeff Kirsher 	.get_wol		= rhine_get_wol,
2252f2148a47SJeff Kirsher 	.set_wol		= rhine_set_wol,
2253f2148a47SJeff Kirsher };
2254f2148a47SJeff Kirsher 
2255f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2256f2148a47SJeff Kirsher {
2257f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2258f2148a47SJeff Kirsher 	int rc;
2259f2148a47SJeff Kirsher 
2260f2148a47SJeff Kirsher 	if (!netif_running(dev))
2261f2148a47SJeff Kirsher 		return -EINVAL;
2262f2148a47SJeff Kirsher 
22637ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2264f2148a47SJeff Kirsher 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2265f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
22667ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2267f2148a47SJeff Kirsher 
2268f2148a47SJeff Kirsher 	return rc;
2269f2148a47SJeff Kirsher }
2270f2148a47SJeff Kirsher 
2271f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev)
2272f2148a47SJeff Kirsher {
2273f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2274f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2275f2148a47SJeff Kirsher 
22767ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
2277f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2278f2148a47SJeff Kirsher 	netif_stop_queue(dev);
2279f2148a47SJeff Kirsher 
2280fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2281f2148a47SJeff Kirsher 		  ioread16(ioaddr + ChipCmd));
2282f2148a47SJeff Kirsher 
2283f2148a47SJeff Kirsher 	/* Switch to loopback mode to avoid hardware races. */
2284f2148a47SJeff Kirsher 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2285f2148a47SJeff Kirsher 
22867ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2287f2148a47SJeff Kirsher 
2288f2148a47SJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
2289f2148a47SJeff Kirsher 	iowrite16(CmdStop, ioaddr + ChipCmd);
2290f2148a47SJeff Kirsher 
2291*f7630d18SAlexey Charkov 	free_irq(rp->irq, dev);
2292f2148a47SJeff Kirsher 	free_rbufs(dev);
2293f2148a47SJeff Kirsher 	free_tbufs(dev);
2294f2148a47SJeff Kirsher 	free_ring(dev);
2295f2148a47SJeff Kirsher 
2296f2148a47SJeff Kirsher 	return 0;
2297f2148a47SJeff Kirsher }
2298f2148a47SJeff Kirsher 
2299f2148a47SJeff Kirsher 
230076e239e1SBill Pemberton static void rhine_remove_one(struct pci_dev *pdev)
2301f2148a47SJeff Kirsher {
2302f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2303f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2304f2148a47SJeff Kirsher 
2305f2148a47SJeff Kirsher 	unregister_netdev(dev);
2306f2148a47SJeff Kirsher 
2307f2148a47SJeff Kirsher 	pci_iounmap(pdev, rp->base);
2308f2148a47SJeff Kirsher 	pci_release_regions(pdev);
2309f2148a47SJeff Kirsher 
2310f2148a47SJeff Kirsher 	free_netdev(dev);
2311f2148a47SJeff Kirsher 	pci_disable_device(pdev);
2312f2148a47SJeff Kirsher }
2313f2148a47SJeff Kirsher 
2314f2148a47SJeff Kirsher static void rhine_shutdown (struct pci_dev *pdev)
2315f2148a47SJeff Kirsher {
2316f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2317f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2318f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2319f2148a47SJeff Kirsher 
2320f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2321f2148a47SJeff Kirsher 		return; /* Nothing to do for non-WOL adapters */
2322f2148a47SJeff Kirsher 
2323f2148a47SJeff Kirsher 	rhine_power_init(dev);
2324f2148a47SJeff Kirsher 
2325f2148a47SJeff Kirsher 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2326f2148a47SJeff Kirsher 	if (rp->quirks & rq6patterns)
2327f2148a47SJeff Kirsher 		iowrite8(0x04, ioaddr + WOLcgClr);
2328f2148a47SJeff Kirsher 
23297ab87ff4SFrancois Romieu 	spin_lock(&rp->lock);
23307ab87ff4SFrancois Romieu 
2331f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_MAGIC) {
2332f2148a47SJeff Kirsher 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2333f2148a47SJeff Kirsher 		/*
2334f2148a47SJeff Kirsher 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2335f2148a47SJeff Kirsher 		 * not cooperate otherwise.
2336f2148a47SJeff Kirsher 		 */
2337f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2338f2148a47SJeff Kirsher 	}
2339f2148a47SJeff Kirsher 
2340f2148a47SJeff Kirsher 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2341f2148a47SJeff Kirsher 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2342f2148a47SJeff Kirsher 
2343f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_PHY)
2344f2148a47SJeff Kirsher 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2345f2148a47SJeff Kirsher 
2346f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_UCAST)
2347f2148a47SJeff Kirsher 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2348f2148a47SJeff Kirsher 
2349f2148a47SJeff Kirsher 	if (rp->wolopts) {
2350f2148a47SJeff Kirsher 		/* Enable legacy WOL (for old motherboards) */
2351f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + PwcfgSet);
2352f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2353f2148a47SJeff Kirsher 	}
2354f2148a47SJeff Kirsher 
23557ab87ff4SFrancois Romieu 	spin_unlock(&rp->lock);
23567ab87ff4SFrancois Romieu 
2357e92b9b3bSFrancois Romieu 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2358f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2359f2148a47SJeff Kirsher 
2360e92b9b3bSFrancois Romieu 		pci_wake_from_d3(pdev, true);
2361e92b9b3bSFrancois Romieu 		pci_set_power_state(pdev, PCI_D3hot);
2362e92b9b3bSFrancois Romieu 	}
2363f2148a47SJeff Kirsher }
2364f2148a47SJeff Kirsher 
2365e92b9b3bSFrancois Romieu #ifdef CONFIG_PM_SLEEP
2366e92b9b3bSFrancois Romieu static int rhine_suspend(struct device *device)
2367f2148a47SJeff Kirsher {
2368*f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2369f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2370f2148a47SJeff Kirsher 
2371f2148a47SJeff Kirsher 	if (!netif_running(dev))
2372f2148a47SJeff Kirsher 		return 0;
2373f2148a47SJeff Kirsher 
23747ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
23757ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2376f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2377f2148a47SJeff Kirsher 
2378f2148a47SJeff Kirsher 	netif_device_detach(dev);
2379f2148a47SJeff Kirsher 
2380*f7630d18SAlexey Charkov 	if (dev_is_pci(device))
2381*f7630d18SAlexey Charkov 		rhine_shutdown(to_pci_dev(device));
2382f2148a47SJeff Kirsher 
2383f2148a47SJeff Kirsher 	return 0;
2384f2148a47SJeff Kirsher }
2385f2148a47SJeff Kirsher 
2386e92b9b3bSFrancois Romieu static int rhine_resume(struct device *device)
2387f2148a47SJeff Kirsher {
2388*f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2389f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2390f2148a47SJeff Kirsher 
2391f2148a47SJeff Kirsher 	if (!netif_running(dev))
2392f2148a47SJeff Kirsher 		return 0;
2393f2148a47SJeff Kirsher 
2394f2148a47SJeff Kirsher #ifdef USE_MMIO
2395f2148a47SJeff Kirsher 	enable_mmio(rp->pioaddr, rp->quirks);
2396f2148a47SJeff Kirsher #endif
2397f2148a47SJeff Kirsher 	rhine_power_init(dev);
2398f2148a47SJeff Kirsher 	free_tbufs(dev);
2399f2148a47SJeff Kirsher 	free_rbufs(dev);
2400f2148a47SJeff Kirsher 	alloc_tbufs(dev);
2401f2148a47SJeff Kirsher 	alloc_rbufs(dev);
24027ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
24037ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
2404f2148a47SJeff Kirsher 	init_registers(dev);
24057ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2406f2148a47SJeff Kirsher 
2407f2148a47SJeff Kirsher 	netif_device_attach(dev);
2408f2148a47SJeff Kirsher 
2409f2148a47SJeff Kirsher 	return 0;
2410f2148a47SJeff Kirsher }
2411e92b9b3bSFrancois Romieu 
2412e92b9b3bSFrancois Romieu static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2413e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	(&rhine_pm_ops)
2414e92b9b3bSFrancois Romieu 
2415e92b9b3bSFrancois Romieu #else
2416e92b9b3bSFrancois Romieu 
2417e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	NULL
2418e92b9b3bSFrancois Romieu 
2419e92b9b3bSFrancois Romieu #endif /* !CONFIG_PM_SLEEP */
2420f2148a47SJeff Kirsher 
2421f2148a47SJeff Kirsher static struct pci_driver rhine_driver = {
2422f2148a47SJeff Kirsher 	.name		= DRV_NAME,
2423f2148a47SJeff Kirsher 	.id_table	= rhine_pci_tbl,
2424f2148a47SJeff Kirsher 	.probe		= rhine_init_one,
242576e239e1SBill Pemberton 	.remove		= rhine_remove_one,
2426f2148a47SJeff Kirsher 	.shutdown	= rhine_shutdown,
2427e92b9b3bSFrancois Romieu 	.driver.pm	= RHINE_PM_OPS,
2428f2148a47SJeff Kirsher };
2429f2148a47SJeff Kirsher 
243077273eaaSSachin Kamat static struct dmi_system_id rhine_dmi_table[] __initdata = {
2431f2148a47SJeff Kirsher 	{
2432f2148a47SJeff Kirsher 		.ident = "EPIA-M",
2433f2148a47SJeff Kirsher 		.matches = {
2434f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2435f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2436f2148a47SJeff Kirsher 		},
2437f2148a47SJeff Kirsher 	},
2438f2148a47SJeff Kirsher 	{
2439f2148a47SJeff Kirsher 		.ident = "KV7",
2440f2148a47SJeff Kirsher 		.matches = {
2441f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2442f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2443f2148a47SJeff Kirsher 		},
2444f2148a47SJeff Kirsher 	},
2445f2148a47SJeff Kirsher 	{ NULL }
2446f2148a47SJeff Kirsher };
2447f2148a47SJeff Kirsher 
2448f2148a47SJeff Kirsher static int __init rhine_init(void)
2449f2148a47SJeff Kirsher {
2450f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
2451f2148a47SJeff Kirsher #ifdef MODULE
2452f2148a47SJeff Kirsher 	pr_info("%s\n", version);
2453f2148a47SJeff Kirsher #endif
2454f2148a47SJeff Kirsher 	if (dmi_check_system(rhine_dmi_table)) {
2455f2148a47SJeff Kirsher 		/* these BIOSes fail at PXE boot if chip is in D3 */
2456eb939922SRusty Russell 		avoid_D3 = true;
2457f2148a47SJeff Kirsher 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2458f2148a47SJeff Kirsher 	}
2459f2148a47SJeff Kirsher 	else if (avoid_D3)
2460f2148a47SJeff Kirsher 		pr_info("avoid_D3 set\n");
2461f2148a47SJeff Kirsher 
2462f2148a47SJeff Kirsher 	return pci_register_driver(&rhine_driver);
2463f2148a47SJeff Kirsher }
2464f2148a47SJeff Kirsher 
2465f2148a47SJeff Kirsher 
2466f2148a47SJeff Kirsher static void __exit rhine_cleanup(void)
2467f2148a47SJeff Kirsher {
2468f2148a47SJeff Kirsher 	pci_unregister_driver(&rhine_driver);
2469f2148a47SJeff Kirsher }
2470f2148a47SJeff Kirsher 
2471f2148a47SJeff Kirsher 
2472f2148a47SJeff Kirsher module_init(rhine_init);
2473f2148a47SJeff Kirsher module_exit(rhine_cleanup);
2474