xref: /openbmc/linux/drivers/net/ethernet/via/via-rhine.c (revision a21bb8bae1057ecd071f6d43b9ee3f97503ad904)
1f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2f2148a47SJeff Kirsher /*
3f2148a47SJeff Kirsher 	Written 1998-2001 by Donald Becker.
4f2148a47SJeff Kirsher 
5f2148a47SJeff Kirsher 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6f2148a47SJeff Kirsher 
7f2148a47SJeff Kirsher 	This software may be used and distributed according to the terms of
8f2148a47SJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
9f2148a47SJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
10f2148a47SJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
11f2148a47SJeff Kirsher 	a complete program and may only be used when the entire operating
12f2148a47SJeff Kirsher 	system is licensed under the GPL.
13f2148a47SJeff Kirsher 
14f2148a47SJeff Kirsher 	This driver is designed for the VIA VT86C100A Rhine-I.
15f2148a47SJeff Kirsher 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16f2148a47SJeff Kirsher 	and management NIC 6105M).
17f2148a47SJeff Kirsher 
18f2148a47SJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
19f2148a47SJeff Kirsher 	Scyld Computing Corporation
20f2148a47SJeff Kirsher 	410 Severn Ave., Suite 210
21f2148a47SJeff Kirsher 	Annapolis MD 21403
22f2148a47SJeff Kirsher 
23f2148a47SJeff Kirsher 
24f2148a47SJeff Kirsher 	This driver contains some changes from the original Donald Becker
25f2148a47SJeff Kirsher 	version. He may or may not be interested in bug reports on this
26f2148a47SJeff Kirsher 	code. You can find his versions at:
27f2148a47SJeff Kirsher 	http://www.scyld.com/network/via-rhine.html
28f2148a47SJeff Kirsher 	[link no longer provides useful info -jgarzik]
29f2148a47SJeff Kirsher 
30f2148a47SJeff Kirsher */
31f2148a47SJeff Kirsher 
32f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33f2148a47SJeff Kirsher 
34f2148a47SJeff Kirsher #define DRV_NAME	"via-rhine"
35207070f5SRoger Luethi #define DRV_VERSION	"1.5.1"
36f2148a47SJeff Kirsher #define DRV_RELDATE	"2010-10-09"
37f2148a47SJeff Kirsher 
38eb939922SRusty Russell #include <linux/types.h>
39f2148a47SJeff Kirsher 
40f2148a47SJeff Kirsher /* A few user-configurable values.
41f2148a47SJeff Kirsher    These may be modified when a driver module is loaded. */
42fc3e0f8aSFrancois Romieu static int debug = 0;
43fc3e0f8aSFrancois Romieu #define RHINE_MSG_DEFAULT \
44fc3e0f8aSFrancois Romieu         (0x0000)
45f2148a47SJeff Kirsher 
46f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47f2148a47SJeff Kirsher    Setting to > 1518 effectively disables this feature. */
48f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49f2148a47SJeff Kirsher 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50f2148a47SJeff Kirsher 	defined(__sh__) || defined(__mips__)
51f2148a47SJeff Kirsher static int rx_copybreak = 1518;
52f2148a47SJeff Kirsher #else
53f2148a47SJeff Kirsher static int rx_copybreak;
54f2148a47SJeff Kirsher #endif
55f2148a47SJeff Kirsher 
56f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of
57f2148a47SJeff Kirsher    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58eb939922SRusty Russell static bool avoid_D3;
59f2148a47SJeff Kirsher 
60f2148a47SJeff Kirsher /*
61f2148a47SJeff Kirsher  * In case you are looking for 'options[]' or 'full_duplex[]', they
62f2148a47SJeff Kirsher  * are gone. Use ethtool(8) instead.
63f2148a47SJeff Kirsher  */
64f2148a47SJeff Kirsher 
65f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66f2148a47SJeff Kirsher    The Rhine has a 64 element 8390-like hash table. */
67f2148a47SJeff Kirsher static const int multicast_filter_limit = 32;
68f2148a47SJeff Kirsher 
69f2148a47SJeff Kirsher 
70f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */
71f2148a47SJeff Kirsher 
72f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
7392bf2008STino Reichardt  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
7492bf2008STino Reichardt  * Making the Tx ring too large decreases the effectiveness of channel
7592bf2008STino Reichardt  * bonding and packet priority.
7692bf2008STino Reichardt  * With BQL support, we can increase TX ring safely.
7792bf2008STino Reichardt  * There are no ill effects from too-large receive rings.
7892bf2008STino Reichardt  */
7992bf2008STino Reichardt #define TX_RING_SIZE	64
8092bf2008STino Reichardt #define TX_QUEUE_LEN	(TX_RING_SIZE - 6)	/* Limit ring entries actually used. */
81f2148a47SJeff Kirsher #define RX_RING_SIZE	64
82f2148a47SJeff Kirsher 
83f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */
84f2148a47SJeff Kirsher 
85f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
86f2148a47SJeff Kirsher #define TX_TIMEOUT	(2*HZ)
87f2148a47SJeff Kirsher 
88f2148a47SJeff Kirsher #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
89f2148a47SJeff Kirsher 
90f2148a47SJeff Kirsher #include <linux/module.h>
91f2148a47SJeff Kirsher #include <linux/moduleparam.h>
92f2148a47SJeff Kirsher #include <linux/kernel.h>
93f2148a47SJeff Kirsher #include <linux/string.h>
94f2148a47SJeff Kirsher #include <linux/timer.h>
95f2148a47SJeff Kirsher #include <linux/errno.h>
96f2148a47SJeff Kirsher #include <linux/ioport.h>
97f2148a47SJeff Kirsher #include <linux/interrupt.h>
98f2148a47SJeff Kirsher #include <linux/pci.h>
992d283862SAlexey Charkov #include <linux/of_address.h>
1002d283862SAlexey Charkov #include <linux/of_device.h>
1012d283862SAlexey Charkov #include <linux/of_irq.h>
1022d283862SAlexey Charkov #include <linux/platform_device.h>
103f2148a47SJeff Kirsher #include <linux/dma-mapping.h>
104f2148a47SJeff Kirsher #include <linux/netdevice.h>
105f2148a47SJeff Kirsher #include <linux/etherdevice.h>
106f2148a47SJeff Kirsher #include <linux/skbuff.h>
107f2148a47SJeff Kirsher #include <linux/init.h>
108f2148a47SJeff Kirsher #include <linux/delay.h>
109f2148a47SJeff Kirsher #include <linux/mii.h>
110f2148a47SJeff Kirsher #include <linux/ethtool.h>
111f2148a47SJeff Kirsher #include <linux/crc32.h>
112f2148a47SJeff Kirsher #include <linux/if_vlan.h>
113f2148a47SJeff Kirsher #include <linux/bitops.h>
114f2148a47SJeff Kirsher #include <linux/workqueue.h>
115f2148a47SJeff Kirsher #include <asm/processor.h>	/* Processor type for cache alignment. */
116f2148a47SJeff Kirsher #include <asm/io.h>
117f2148a47SJeff Kirsher #include <asm/irq.h>
118f2148a47SJeff Kirsher #include <asm/uaccess.h>
119f2148a47SJeff Kirsher #include <linux/dmi.h>
120f2148a47SJeff Kirsher 
121f2148a47SJeff Kirsher /* These identify the driver base version and may not be removed. */
12276e239e1SBill Pemberton static const char version[] =
123f2148a47SJeff Kirsher 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
124f2148a47SJeff Kirsher 
125f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
126f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
127f2148a47SJeff Kirsher MODULE_LICENSE("GPL");
128f2148a47SJeff Kirsher 
129f2148a47SJeff Kirsher module_param(debug, int, 0);
130f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0);
131f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0);
132fc3e0f8aSFrancois Romieu MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
133f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
134f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
135f2148a47SJeff Kirsher 
136f2148a47SJeff Kirsher #define MCAM_SIZE	32
137f2148a47SJeff Kirsher #define VCAM_SIZE	32
138f2148a47SJeff Kirsher 
139f2148a47SJeff Kirsher /*
140f2148a47SJeff Kirsher 		Theory of Operation
141f2148a47SJeff Kirsher 
142f2148a47SJeff Kirsher I. Board Compatibility
143f2148a47SJeff Kirsher 
144f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
145f2148a47SJeff Kirsher controller.
146f2148a47SJeff Kirsher 
147f2148a47SJeff Kirsher II. Board-specific settings
148f2148a47SJeff Kirsher 
149f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot.
150f2148a47SJeff Kirsher 
151f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at
152f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are
153f2148a47SJeff Kirsher correct.
154f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM
155f2148a47SJeff Kirsher must be configured to enable memory ops.
156f2148a47SJeff Kirsher 
157f2148a47SJeff Kirsher III. Driver operation
158f2148a47SJeff Kirsher 
159f2148a47SJeff Kirsher IIIa. Ring buffers
160f2148a47SJeff Kirsher 
161f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists
162f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
163f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
164f2148a47SJeff Kirsher 
165f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure
166f2148a47SJeff Kirsher 
167f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme.
168f2148a47SJeff Kirsher 
169f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so
170f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers.
171f2148a47SJeff Kirsher 
172f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
173f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data
174f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
175f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
176f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
177f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated
178f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx().
179f2148a47SJeff Kirsher 
180f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
181f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
182f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines
183f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of
184f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never
185f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using
186f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is
187f2148a47SJeff Kirsher most useful with small frames.
188f2148a47SJeff Kirsher 
189f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit
190f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't
191f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers
192f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header.
193f2148a47SJeff Kirsher 
194f2148a47SJeff Kirsher IIId. Synchronization
195f2148a47SJeff Kirsher 
196f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One
197f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
198f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
199f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software.
200f2148a47SJeff Kirsher 
201f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the
202f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
203f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by
204f2148a47SJeff Kirsher calling netif_stop_queue.
205f2148a47SJeff Kirsher 
206f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
207f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as
208f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in
209f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped.
210f2148a47SJeff Kirsher 
211f2148a47SJeff Kirsher IV. Notes
212f2148a47SJeff Kirsher 
213f2148a47SJeff Kirsher IVb. References
214f2148a47SJeff Kirsher 
215f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/
216f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html
217f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html
218f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
219f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
220f2148a47SJeff Kirsher 
221f2148a47SJeff Kirsher 
222f2148a47SJeff Kirsher IVc. Errata
223f2148a47SJeff Kirsher 
224f2148a47SJeff Kirsher The VT86C100A manual is not reliable information.
225f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting
226f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit
227f2148a47SJeff Kirsher and unaligned IP headers on receive.
228f2148a47SJeff Kirsher The chip does not pad to minimum transmit length.
229f2148a47SJeff Kirsher 
230f2148a47SJeff Kirsher */
231f2148a47SJeff Kirsher 
232f2148a47SJeff Kirsher 
233f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all
234f2148a47SJeff Kirsher    of the drivers, and will likely be provided by some future kernel.
235f2148a47SJeff Kirsher    Note the matching code -- the first table entry matchs all 56** cards but
236f2148a47SJeff Kirsher    second only the 1234 card.
237f2148a47SJeff Kirsher */
238f2148a47SJeff Kirsher 
239f2148a47SJeff Kirsher enum rhine_revs {
240f2148a47SJeff Kirsher 	VT86C100A	= 0x00,
241f2148a47SJeff Kirsher 	VTunknown0	= 0x20,
242f2148a47SJeff Kirsher 	VT6102		= 0x40,
243f2148a47SJeff Kirsher 	VT8231		= 0x50,	/* Integrated MAC */
244f2148a47SJeff Kirsher 	VT8233		= 0x60,	/* Integrated MAC */
245f2148a47SJeff Kirsher 	VT8235		= 0x74,	/* Integrated MAC */
246f2148a47SJeff Kirsher 	VT8237		= 0x78,	/* Integrated MAC */
247f2148a47SJeff Kirsher 	VTunknown1	= 0x7C,
248f2148a47SJeff Kirsher 	VT6105		= 0x80,
249f2148a47SJeff Kirsher 	VT6105_B0	= 0x83,
250f2148a47SJeff Kirsher 	VT6105L		= 0x8A,
251f2148a47SJeff Kirsher 	VT6107		= 0x8C,
252f2148a47SJeff Kirsher 	VTunknown2	= 0x8E,
253f2148a47SJeff Kirsher 	VT6105M		= 0x90,	/* Management adapter */
254f2148a47SJeff Kirsher };
255f2148a47SJeff Kirsher 
256f2148a47SJeff Kirsher enum rhine_quirks {
257f2148a47SJeff Kirsher 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
258f2148a47SJeff Kirsher 	rqForceReset	= 0x0002,
259f2148a47SJeff Kirsher 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
260f2148a47SJeff Kirsher 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
261f2148a47SJeff Kirsher 	rqRhineI	= 0x0100,	/* See comment below */
262ca8b6e04SAlexey Charkov 	rqIntPHY	= 0x0200,	/* Integrated PHY */
263ca8b6e04SAlexey Charkov 	rqMgmt		= 0x0400,	/* Management adapter */
2645b579e21SAlexey Charkov 	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
2655b579e21SAlexey Charkov 					 * switched from PIO mode to MMIO
2665b579e21SAlexey Charkov 					 * (only applies to PCI)
2675b579e21SAlexey Charkov 					 */
268f2148a47SJeff Kirsher };
269f2148a47SJeff Kirsher /*
270f2148a47SJeff Kirsher  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
271f2148a47SJeff Kirsher  * MMIO as well as for the collision counter and the Tx FIFO underflow
272f2148a47SJeff Kirsher  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
273f2148a47SJeff Kirsher  */
274f2148a47SJeff Kirsher 
275f2148a47SJeff Kirsher /* Beware of PCI posted writes */
276f2148a47SJeff Kirsher #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
277f2148a47SJeff Kirsher 
2789baa3c34SBenoit Taine static const struct pci_device_id rhine_pci_tbl[] = {
279f2148a47SJeff Kirsher 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
280f2148a47SJeff Kirsher 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
281f2148a47SJeff Kirsher 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
282f2148a47SJeff Kirsher 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
283f2148a47SJeff Kirsher 	{ }	/* terminate list */
284f2148a47SJeff Kirsher };
285f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
286f2148a47SJeff Kirsher 
2872d283862SAlexey Charkov /* OpenFirmware identifiers for platform-bus devices
288ca8b6e04SAlexey Charkov  * The .data field is currently only used to store quirks
2892d283862SAlexey Charkov  */
290ca8b6e04SAlexey Charkov static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
291d2b75a3fSFabian Frederick static const struct of_device_id rhine_of_tbl[] = {
292ca8b6e04SAlexey Charkov 	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
2932d283862SAlexey Charkov 	{ }	/* terminate list */
2942d283862SAlexey Charkov };
2952d283862SAlexey Charkov MODULE_DEVICE_TABLE(of, rhine_of_tbl);
296f2148a47SJeff Kirsher 
297f2148a47SJeff Kirsher /* Offsets to the device registers. */
298f2148a47SJeff Kirsher enum register_offsets {
299f2148a47SJeff Kirsher 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
300f2148a47SJeff Kirsher 	ChipCmd1=0x09, TQWake=0x0A,
301f2148a47SJeff Kirsher 	IntrStatus=0x0C, IntrEnable=0x0E,
302f2148a47SJeff Kirsher 	MulticastFilter0=0x10, MulticastFilter1=0x14,
303f2148a47SJeff Kirsher 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
304f2148a47SJeff Kirsher 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
305f2148a47SJeff Kirsher 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
306f2148a47SJeff Kirsher 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
307f2148a47SJeff Kirsher 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
308f2148a47SJeff Kirsher 	StickyHW=0x83, IntrStatus2=0x84,
309f2148a47SJeff Kirsher 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
310f2148a47SJeff Kirsher 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
311f2148a47SJeff Kirsher 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
312f2148a47SJeff Kirsher 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
313f2148a47SJeff Kirsher };
314f2148a47SJeff Kirsher 
315f2148a47SJeff Kirsher /* Bits in ConfigD */
316f2148a47SJeff Kirsher enum backoff_bits {
317f2148a47SJeff Kirsher 	BackOptional=0x01, BackModify=0x02,
318f2148a47SJeff Kirsher 	BackCaptureEffect=0x04, BackRandom=0x08
319f2148a47SJeff Kirsher };
320f2148a47SJeff Kirsher 
321f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */
322f2148a47SJeff Kirsher enum tcr_bits {
323f2148a47SJeff Kirsher 	TCR_PQEN=0x01,
324f2148a47SJeff Kirsher 	TCR_LB0=0x02,		/* loopback[0] */
325f2148a47SJeff Kirsher 	TCR_LB1=0x04,		/* loopback[1] */
326f2148a47SJeff Kirsher 	TCR_OFSET=0x08,
327f2148a47SJeff Kirsher 	TCR_RTGOPT=0x10,
328f2148a47SJeff Kirsher 	TCR_RTFT0=0x20,
329f2148a47SJeff Kirsher 	TCR_RTFT1=0x40,
330f2148a47SJeff Kirsher 	TCR_RTSF=0x80,
331f2148a47SJeff Kirsher };
332f2148a47SJeff Kirsher 
333f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */
334f2148a47SJeff Kirsher enum camcon_bits {
335f2148a47SJeff Kirsher 	CAMC_CAMEN=0x01,
336f2148a47SJeff Kirsher 	CAMC_VCAMSL=0x02,
337f2148a47SJeff Kirsher 	CAMC_CAMWR=0x04,
338f2148a47SJeff Kirsher 	CAMC_CAMRD=0x08,
339f2148a47SJeff Kirsher };
340f2148a47SJeff Kirsher 
341f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */
342f2148a47SJeff Kirsher enum bcr1_bits {
343f2148a47SJeff Kirsher 	BCR1_POT0=0x01,
344f2148a47SJeff Kirsher 	BCR1_POT1=0x02,
345f2148a47SJeff Kirsher 	BCR1_POT2=0x04,
346f2148a47SJeff Kirsher 	BCR1_CTFT0=0x08,
347f2148a47SJeff Kirsher 	BCR1_CTFT1=0x10,
348f2148a47SJeff Kirsher 	BCR1_CTSF=0x20,
349f2148a47SJeff Kirsher 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
350f2148a47SJeff Kirsher 	BCR1_VIDFR=0x80,	/* for VT6105 */
351f2148a47SJeff Kirsher 	BCR1_MED0=0x40,		/* for VT6102 */
352f2148a47SJeff Kirsher 	BCR1_MED1=0x80,		/* for VT6102 */
353f2148a47SJeff Kirsher };
354f2148a47SJeff Kirsher 
355f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */
356f2148a47SJeff Kirsher static const int mmio_verify_registers[] = {
357f2148a47SJeff Kirsher 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
358f2148a47SJeff Kirsher 	0
359f2148a47SJeff Kirsher };
360f2148a47SJeff Kirsher 
361f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */
362f2148a47SJeff Kirsher enum intr_status_bits {
3637ab87ff4SFrancois Romieu 	IntrRxDone	= 0x0001,
3647ab87ff4SFrancois Romieu 	IntrTxDone	= 0x0002,
3657ab87ff4SFrancois Romieu 	IntrRxErr	= 0x0004,
3667ab87ff4SFrancois Romieu 	IntrTxError	= 0x0008,
3677ab87ff4SFrancois Romieu 	IntrRxEmpty	= 0x0020,
368f2148a47SJeff Kirsher 	IntrPCIErr	= 0x0040,
3697ab87ff4SFrancois Romieu 	IntrStatsMax	= 0x0080,
3707ab87ff4SFrancois Romieu 	IntrRxEarly	= 0x0100,
3717ab87ff4SFrancois Romieu 	IntrTxUnderrun	= 0x0210,
3727ab87ff4SFrancois Romieu 	IntrRxOverflow	= 0x0400,
3737ab87ff4SFrancois Romieu 	IntrRxDropped	= 0x0800,
3747ab87ff4SFrancois Romieu 	IntrRxNoBuf	= 0x1000,
3757ab87ff4SFrancois Romieu 	IntrTxAborted	= 0x2000,
3767ab87ff4SFrancois Romieu 	IntrLinkChange	= 0x4000,
377f2148a47SJeff Kirsher 	IntrRxWakeUp	= 0x8000,
378f2148a47SJeff Kirsher 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
3797ab87ff4SFrancois Romieu 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
3807ab87ff4SFrancois Romieu 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
3817ab87ff4SFrancois Romieu 				  IntrTxUnderrun,
382f2148a47SJeff Kirsher };
383f2148a47SJeff Kirsher 
384f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
385f2148a47SJeff Kirsher enum wol_bits {
386f2148a47SJeff Kirsher 	WOLucast	= 0x10,
387f2148a47SJeff Kirsher 	WOLmagic	= 0x20,
388f2148a47SJeff Kirsher 	WOLbmcast	= 0x30,
389f2148a47SJeff Kirsher 	WOLlnkon	= 0x40,
390f2148a47SJeff Kirsher 	WOLlnkoff	= 0x80,
391f2148a47SJeff Kirsher };
392f2148a47SJeff Kirsher 
393f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */
394f2148a47SJeff Kirsher struct rx_desc {
395f2148a47SJeff Kirsher 	__le32 rx_status;
396f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Buffer/frame length */
397f2148a47SJeff Kirsher 	__le32 addr;
398f2148a47SJeff Kirsher 	__le32 next_desc;
399f2148a47SJeff Kirsher };
400f2148a47SJeff Kirsher struct tx_desc {
401f2148a47SJeff Kirsher 	__le32 tx_status;
402f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
403f2148a47SJeff Kirsher 	__le32 addr;
404f2148a47SJeff Kirsher 	__le32 next_desc;
405f2148a47SJeff Kirsher };
406f2148a47SJeff Kirsher 
407f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
408f2148a47SJeff Kirsher #define TXDESC		0x00e08000
409f2148a47SJeff Kirsher 
410f2148a47SJeff Kirsher enum rx_status_bits {
411f2148a47SJeff Kirsher 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
412f2148a47SJeff Kirsher };
413f2148a47SJeff Kirsher 
414f2148a47SJeff Kirsher /* Bits in *_desc.*_status */
415f2148a47SJeff Kirsher enum desc_status_bits {
416f2148a47SJeff Kirsher 	DescOwn=0x80000000
417f2148a47SJeff Kirsher };
418f2148a47SJeff Kirsher 
419f2148a47SJeff Kirsher /* Bits in *_desc.*_length */
420f2148a47SJeff Kirsher enum desc_length_bits {
421f2148a47SJeff Kirsher 	DescTag=0x00010000
422f2148a47SJeff Kirsher };
423f2148a47SJeff Kirsher 
424f2148a47SJeff Kirsher /* Bits in ChipCmd. */
425f2148a47SJeff Kirsher enum chip_cmd_bits {
426f2148a47SJeff Kirsher 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
427f2148a47SJeff Kirsher 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
428f2148a47SJeff Kirsher 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
429f2148a47SJeff Kirsher 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
430f2148a47SJeff Kirsher };
431f2148a47SJeff Kirsher 
432f7b5d1b9SJamie Gloudon struct rhine_stats {
433f7b5d1b9SJamie Gloudon 	u64		packets;
434f7b5d1b9SJamie Gloudon 	u64		bytes;
435f7b5d1b9SJamie Gloudon 	struct u64_stats_sync syncp;
436f7b5d1b9SJamie Gloudon };
437f7b5d1b9SJamie Gloudon 
438f2148a47SJeff Kirsher struct rhine_private {
439f2148a47SJeff Kirsher 	/* Bit mask for configured VLAN ids */
440f2148a47SJeff Kirsher 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
441f2148a47SJeff Kirsher 
442f2148a47SJeff Kirsher 	/* Descriptor rings */
443f2148a47SJeff Kirsher 	struct rx_desc *rx_ring;
444f2148a47SJeff Kirsher 	struct tx_desc *tx_ring;
445f2148a47SJeff Kirsher 	dma_addr_t rx_ring_dma;
446f2148a47SJeff Kirsher 	dma_addr_t tx_ring_dma;
447f2148a47SJeff Kirsher 
448f2148a47SJeff Kirsher 	/* The addresses of receive-in-place skbuffs. */
449f2148a47SJeff Kirsher 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
450f2148a47SJeff Kirsher 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
451f2148a47SJeff Kirsher 
452f2148a47SJeff Kirsher 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
453f2148a47SJeff Kirsher 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
454f2148a47SJeff Kirsher 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
455f2148a47SJeff Kirsher 
456f2148a47SJeff Kirsher 	/* Tx bounce buffers (Rhine-I only) */
457f2148a47SJeff Kirsher 	unsigned char *tx_buf[TX_RING_SIZE];
458f2148a47SJeff Kirsher 	unsigned char *tx_bufs;
459f2148a47SJeff Kirsher 	dma_addr_t tx_bufs_dma;
460f2148a47SJeff Kirsher 
461f7630d18SAlexey Charkov 	int irq;
462f2148a47SJeff Kirsher 	long pioaddr;
463f2148a47SJeff Kirsher 	struct net_device *dev;
464f2148a47SJeff Kirsher 	struct napi_struct napi;
465f2148a47SJeff Kirsher 	spinlock_t lock;
4667ab87ff4SFrancois Romieu 	struct mutex task_lock;
4677ab87ff4SFrancois Romieu 	bool task_enable;
4687ab87ff4SFrancois Romieu 	struct work_struct slow_event_task;
469f2148a47SJeff Kirsher 	struct work_struct reset_task;
470f2148a47SJeff Kirsher 
471fc3e0f8aSFrancois Romieu 	u32 msg_enable;
472fc3e0f8aSFrancois Romieu 
473f2148a47SJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect. */
474f2148a47SJeff Kirsher 	u32 quirks;
475f2148a47SJeff Kirsher 	struct rx_desc *rx_head_desc;
476f2148a47SJeff Kirsher 	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
477f2148a47SJeff Kirsher 	unsigned int cur_tx, dirty_tx;
478f2148a47SJeff Kirsher 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
479f7b5d1b9SJamie Gloudon 	struct rhine_stats rx_stats;
480f7b5d1b9SJamie Gloudon 	struct rhine_stats tx_stats;
481f2148a47SJeff Kirsher 	u8 wolopts;
482f2148a47SJeff Kirsher 
483f2148a47SJeff Kirsher 	u8 tx_thresh, rx_thresh;
484f2148a47SJeff Kirsher 
485f2148a47SJeff Kirsher 	struct mii_if_info mii_if;
486f2148a47SJeff Kirsher 	void __iomem *base;
487f2148a47SJeff Kirsher };
488f2148a47SJeff Kirsher 
489f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
490f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
491f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
492f2148a47SJeff Kirsher 
493f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
494f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
495f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
496f2148a47SJeff Kirsher 
497f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
498f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
499f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
500f2148a47SJeff Kirsher 
501f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
502f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
503f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
504f2148a47SJeff Kirsher 
505f2148a47SJeff Kirsher 
506f2148a47SJeff Kirsher static int  mdio_read(struct net_device *dev, int phy_id, int location);
507f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
508f2148a47SJeff Kirsher static int  rhine_open(struct net_device *dev);
509f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work);
5107ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work);
511f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev);
512f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
513f2148a47SJeff Kirsher 				  struct net_device *dev);
514f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
515f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev);
516f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit);
517f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev);
518f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
519f7b5d1b9SJamie Gloudon 	       struct rtnl_link_stats64 *stats);
520f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
521f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops;
522f2148a47SJeff Kirsher static int  rhine_close(struct net_device *dev);
52380d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev,
52480d5c368SPatrick McHardy 				 __be16 proto, u16 vid);
52580d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev,
52680d5c368SPatrick McHardy 				  __be16 proto, u16 vid);
5277ab87ff4SFrancois Romieu static void rhine_restart_tx(struct net_device *dev);
528f2148a47SJeff Kirsher 
5293f8c91a7SAndreas Mohr static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
530a384a33bSFrancois Romieu {
531a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
532a384a33bSFrancois Romieu 	int i;
533a384a33bSFrancois Romieu 
534a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
5353f8c91a7SAndreas Mohr 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
5363f8c91a7SAndreas Mohr 
5373f8c91a7SAndreas Mohr 		if (low ^ has_mask_bits)
538a384a33bSFrancois Romieu 			break;
539a384a33bSFrancois Romieu 		udelay(10);
540a384a33bSFrancois Romieu 	}
541a384a33bSFrancois Romieu 	if (i > 64) {
542fc3e0f8aSFrancois Romieu 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
5433f8c91a7SAndreas Mohr 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
544a384a33bSFrancois Romieu 	}
545a384a33bSFrancois Romieu }
546a384a33bSFrancois Romieu 
547a384a33bSFrancois Romieu static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
548a384a33bSFrancois Romieu {
5493f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, false);
550a384a33bSFrancois Romieu }
551a384a33bSFrancois Romieu 
552a384a33bSFrancois Romieu static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
553a384a33bSFrancois Romieu {
5543f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, true);
555a384a33bSFrancois Romieu }
556f2148a47SJeff Kirsher 
557a20a28bcSFrancois Romieu static u32 rhine_get_events(struct rhine_private *rp)
558f2148a47SJeff Kirsher {
559f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
560f2148a47SJeff Kirsher 	u32 intr_status;
561f2148a47SJeff Kirsher 
562f2148a47SJeff Kirsher 	intr_status = ioread16(ioaddr + IntrStatus);
563f2148a47SJeff Kirsher 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
564f2148a47SJeff Kirsher 	if (rp->quirks & rqStatusWBRace)
565f2148a47SJeff Kirsher 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
566f2148a47SJeff Kirsher 	return intr_status;
567f2148a47SJeff Kirsher }
568f2148a47SJeff Kirsher 
569a20a28bcSFrancois Romieu static void rhine_ack_events(struct rhine_private *rp, u32 mask)
570a20a28bcSFrancois Romieu {
571a20a28bcSFrancois Romieu 	void __iomem *ioaddr = rp->base;
572a20a28bcSFrancois Romieu 
573a20a28bcSFrancois Romieu 	if (rp->quirks & rqStatusWBRace)
574a20a28bcSFrancois Romieu 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
575a20a28bcSFrancois Romieu 	iowrite16(mask, ioaddr + IntrStatus);
5767ab87ff4SFrancois Romieu 	mmiowb();
577a20a28bcSFrancois Romieu }
578a20a28bcSFrancois Romieu 
579f2148a47SJeff Kirsher /*
580f2148a47SJeff Kirsher  * Get power related registers into sane state.
581f2148a47SJeff Kirsher  * Notify user about past WOL event.
582f2148a47SJeff Kirsher  */
583f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev)
584f2148a47SJeff Kirsher {
585f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
586f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
587f2148a47SJeff Kirsher 	u16 wolstat;
588f2148a47SJeff Kirsher 
589f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL) {
590f2148a47SJeff Kirsher 		/* Make sure chip is in power state D0 */
591f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
592f2148a47SJeff Kirsher 
593f2148a47SJeff Kirsher 		/* Disable "force PME-enable" */
594f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + WOLcgClr);
595f2148a47SJeff Kirsher 
596f2148a47SJeff Kirsher 		/* Clear power-event config bits (WOL) */
597f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + WOLcrClr);
598f2148a47SJeff Kirsher 		/* More recent cards can manage two additional patterns */
599f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
600f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + WOLcrClr1);
601f2148a47SJeff Kirsher 
602f2148a47SJeff Kirsher 		/* Save power-event status bits */
603f2148a47SJeff Kirsher 		wolstat = ioread8(ioaddr + PwrcsrSet);
604f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
605f2148a47SJeff Kirsher 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
606f2148a47SJeff Kirsher 
607f2148a47SJeff Kirsher 		/* Clear power-event status bits */
608f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + PwrcsrClr);
609f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
610f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + PwrcsrClr1);
611f2148a47SJeff Kirsher 
612f2148a47SJeff Kirsher 		if (wolstat) {
613f2148a47SJeff Kirsher 			char *reason;
614f2148a47SJeff Kirsher 			switch (wolstat) {
615f2148a47SJeff Kirsher 			case WOLmagic:
616f2148a47SJeff Kirsher 				reason = "Magic packet";
617f2148a47SJeff Kirsher 				break;
618f2148a47SJeff Kirsher 			case WOLlnkon:
619f2148a47SJeff Kirsher 				reason = "Link went up";
620f2148a47SJeff Kirsher 				break;
621f2148a47SJeff Kirsher 			case WOLlnkoff:
622f2148a47SJeff Kirsher 				reason = "Link went down";
623f2148a47SJeff Kirsher 				break;
624f2148a47SJeff Kirsher 			case WOLucast:
625f2148a47SJeff Kirsher 				reason = "Unicast packet";
626f2148a47SJeff Kirsher 				break;
627f2148a47SJeff Kirsher 			case WOLbmcast:
628f2148a47SJeff Kirsher 				reason = "Multicast/broadcast packet";
629f2148a47SJeff Kirsher 				break;
630f2148a47SJeff Kirsher 			default:
631f2148a47SJeff Kirsher 				reason = "Unknown";
632f2148a47SJeff Kirsher 			}
633f2148a47SJeff Kirsher 			netdev_info(dev, "Woke system up. Reason: %s\n",
634f2148a47SJeff Kirsher 				    reason);
635f2148a47SJeff Kirsher 		}
636f2148a47SJeff Kirsher 	}
637f2148a47SJeff Kirsher }
638f2148a47SJeff Kirsher 
639f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev)
640f2148a47SJeff Kirsher {
641f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
642f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
643fc3e0f8aSFrancois Romieu 	u8 cmd1;
644f2148a47SJeff Kirsher 
645f2148a47SJeff Kirsher 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
646f2148a47SJeff Kirsher 	IOSYNC;
647f2148a47SJeff Kirsher 
648f2148a47SJeff Kirsher 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
649f2148a47SJeff Kirsher 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
650f2148a47SJeff Kirsher 
651f2148a47SJeff Kirsher 		/* Force reset */
652f2148a47SJeff Kirsher 		if (rp->quirks & rqForceReset)
653f2148a47SJeff Kirsher 			iowrite8(0x40, ioaddr + MiscCmd);
654f2148a47SJeff Kirsher 
655f2148a47SJeff Kirsher 		/* Reset can take somewhat longer (rare) */
656a384a33bSFrancois Romieu 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
657f2148a47SJeff Kirsher 	}
658f2148a47SJeff Kirsher 
659fc3e0f8aSFrancois Romieu 	cmd1 = ioread8(ioaddr + ChipCmd1);
660fc3e0f8aSFrancois Romieu 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
661f2148a47SJeff Kirsher 		   "failed" : "succeeded");
662f2148a47SJeff Kirsher }
663f2148a47SJeff Kirsher 
664f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks)
665f2148a47SJeff Kirsher {
666f2148a47SJeff Kirsher 	int n;
6675b579e21SAlexey Charkov 
6685b579e21SAlexey Charkov 	if (quirks & rqNeedEnMMIO) {
669f2148a47SJeff Kirsher 		if (quirks & rqRhineI) {
6705b579e21SAlexey Charkov 			/* More recent docs say that this bit is reserved */
671f2148a47SJeff Kirsher 			n = inb(pioaddr + ConfigA) | 0x20;
672f2148a47SJeff Kirsher 			outb(n, pioaddr + ConfigA);
673f2148a47SJeff Kirsher 		} else {
674f2148a47SJeff Kirsher 			n = inb(pioaddr + ConfigD) | 0x80;
675f2148a47SJeff Kirsher 			outb(n, pioaddr + ConfigD);
676f2148a47SJeff Kirsher 		}
677f2148a47SJeff Kirsher 	}
6785b579e21SAlexey Charkov }
6795b579e21SAlexey Charkov 
6805b579e21SAlexey Charkov static inline int verify_mmio(struct device *hwdev,
6815b579e21SAlexey Charkov 			      long pioaddr,
6825b579e21SAlexey Charkov 			      void __iomem *ioaddr,
6835b579e21SAlexey Charkov 			      u32 quirks)
6845b579e21SAlexey Charkov {
6855b579e21SAlexey Charkov 	if (quirks & rqNeedEnMMIO) {
6865b579e21SAlexey Charkov 		int i = 0;
6875b579e21SAlexey Charkov 
6885b579e21SAlexey Charkov 		/* Check that selected MMIO registers match the PIO ones */
6895b579e21SAlexey Charkov 		while (mmio_verify_registers[i]) {
6905b579e21SAlexey Charkov 			int reg = mmio_verify_registers[i++];
6915b579e21SAlexey Charkov 			unsigned char a = inb(pioaddr+reg);
6925b579e21SAlexey Charkov 			unsigned char b = readb(ioaddr+reg);
6935b579e21SAlexey Charkov 
6945b579e21SAlexey Charkov 			if (a != b) {
6955b579e21SAlexey Charkov 				dev_err(hwdev,
6965b579e21SAlexey Charkov 					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
6975b579e21SAlexey Charkov 					reg, a, b);
6985b579e21SAlexey Charkov 				return -EIO;
6995b579e21SAlexey Charkov 			}
7005b579e21SAlexey Charkov 		}
7015b579e21SAlexey Charkov 	}
7025b579e21SAlexey Charkov 	return 0;
7035b579e21SAlexey Charkov }
704f2148a47SJeff Kirsher 
705f2148a47SJeff Kirsher /*
706f2148a47SJeff Kirsher  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
707f2148a47SJeff Kirsher  * (plus 0x6C for Rhine-I/II)
708f2148a47SJeff Kirsher  */
70976e239e1SBill Pemberton static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
710f2148a47SJeff Kirsher {
711f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
712f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
713a384a33bSFrancois Romieu 	int i;
714f2148a47SJeff Kirsher 
715f2148a47SJeff Kirsher 	outb(0x20, pioaddr + MACRegEEcsr);
716a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
717a384a33bSFrancois Romieu 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
718a384a33bSFrancois Romieu 			break;
719a384a33bSFrancois Romieu 	}
720a384a33bSFrancois Romieu 	if (i > 512)
721a384a33bSFrancois Romieu 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
722f2148a47SJeff Kirsher 
723f2148a47SJeff Kirsher 	/*
724f2148a47SJeff Kirsher 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
725f2148a47SJeff Kirsher 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
726f2148a47SJeff Kirsher 	 * it is not known if that still works with the "win98-reboot" problem.
727f2148a47SJeff Kirsher 	 */
728f2148a47SJeff Kirsher 	enable_mmio(pioaddr, rp->quirks);
729f2148a47SJeff Kirsher 
730f2148a47SJeff Kirsher 	/* Turn off EEPROM-controlled wake-up (magic packet) */
731f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL)
732f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
733f2148a47SJeff Kirsher 
734f2148a47SJeff Kirsher }
735f2148a47SJeff Kirsher 
736f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
737f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev)
738f2148a47SJeff Kirsher {
73905d334ecSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
740f7630d18SAlexey Charkov 	const int irq = rp->irq;
74105d334ecSFrancois Romieu 
74205d334ecSFrancois Romieu 	disable_irq(irq);
74305d334ecSFrancois Romieu 	rhine_interrupt(irq, dev);
74405d334ecSFrancois Romieu 	enable_irq(irq);
745f2148a47SJeff Kirsher }
746f2148a47SJeff Kirsher #endif
747f2148a47SJeff Kirsher 
748269f3114SFrancois Romieu static void rhine_kick_tx_threshold(struct rhine_private *rp)
749269f3114SFrancois Romieu {
750269f3114SFrancois Romieu 	if (rp->tx_thresh < 0xe0) {
751269f3114SFrancois Romieu 		void __iomem *ioaddr = rp->base;
752269f3114SFrancois Romieu 
753269f3114SFrancois Romieu 		rp->tx_thresh += 0x20;
754269f3114SFrancois Romieu 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
755269f3114SFrancois Romieu 	}
756269f3114SFrancois Romieu }
757269f3114SFrancois Romieu 
7587ab87ff4SFrancois Romieu static void rhine_tx_err(struct rhine_private *rp, u32 status)
7597ab87ff4SFrancois Romieu {
7607ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
7617ab87ff4SFrancois Romieu 
7627ab87ff4SFrancois Romieu 	if (status & IntrTxAborted) {
763fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev,
764fc3e0f8aSFrancois Romieu 			   "Abort %08x, frame dropped\n", status);
7657ab87ff4SFrancois Romieu 	}
7667ab87ff4SFrancois Romieu 
7677ab87ff4SFrancois Romieu 	if (status & IntrTxUnderrun) {
7687ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
769fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
770fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7717ab87ff4SFrancois Romieu 	}
7727ab87ff4SFrancois Romieu 
773fc3e0f8aSFrancois Romieu 	if (status & IntrTxDescRace)
774fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
7757ab87ff4SFrancois Romieu 
7767ab87ff4SFrancois Romieu 	if ((status & IntrTxError) &&
7777ab87ff4SFrancois Romieu 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
7787ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
779fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Unspecified error. "
780fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7817ab87ff4SFrancois Romieu 	}
7827ab87ff4SFrancois Romieu 
7837ab87ff4SFrancois Romieu 	rhine_restart_tx(dev);
7847ab87ff4SFrancois Romieu }
7857ab87ff4SFrancois Romieu 
7867ab87ff4SFrancois Romieu static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
7877ab87ff4SFrancois Romieu {
7887ab87ff4SFrancois Romieu 	void __iomem *ioaddr = rp->base;
7897ab87ff4SFrancois Romieu 	struct net_device_stats *stats = &rp->dev->stats;
7907ab87ff4SFrancois Romieu 
7917ab87ff4SFrancois Romieu 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
7927ab87ff4SFrancois Romieu 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
7937ab87ff4SFrancois Romieu 
7947ab87ff4SFrancois Romieu 	/*
7957ab87ff4SFrancois Romieu 	 * Clears the "tally counters" for CRC errors and missed frames(?).
7967ab87ff4SFrancois Romieu 	 * It has been reported that some chips need a write of 0 to clear
7977ab87ff4SFrancois Romieu 	 * these, for others the counters are set to 1 when written to and
7987ab87ff4SFrancois Romieu 	 * instead cleared when read. So we clear them both ways ...
7997ab87ff4SFrancois Romieu 	 */
8007ab87ff4SFrancois Romieu 	iowrite32(0, ioaddr + RxMissed);
8017ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxCRCErrs);
8027ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxMissed);
8037ab87ff4SFrancois Romieu }
8047ab87ff4SFrancois Romieu 
8057ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
8067ab87ff4SFrancois Romieu 				 IntrRxErr | \
8077ab87ff4SFrancois Romieu 				 IntrRxEmpty | \
8087ab87ff4SFrancois Romieu 				 IntrRxOverflow	| \
8097ab87ff4SFrancois Romieu 				 IntrRxDropped | \
8107ab87ff4SFrancois Romieu 				 IntrRxNoBuf | \
8117ab87ff4SFrancois Romieu 				 IntrRxWakeUp)
8127ab87ff4SFrancois Romieu 
8137ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
8147ab87ff4SFrancois Romieu 				 IntrTxAborted | \
8157ab87ff4SFrancois Romieu 				 IntrTxUnderrun | \
8167ab87ff4SFrancois Romieu 				 IntrTxDescRace)
8177ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
8187ab87ff4SFrancois Romieu 
8197ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
8207ab87ff4SFrancois Romieu 				 RHINE_EVENT_NAPI_TX | \
8217ab87ff4SFrancois Romieu 				 IntrStatsMax)
8227ab87ff4SFrancois Romieu #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
8237ab87ff4SFrancois Romieu #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
8247ab87ff4SFrancois Romieu 
825f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget)
826f2148a47SJeff Kirsher {
827f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
828f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
829f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
8307ab87ff4SFrancois Romieu 	u16 enable_mask = RHINE_EVENT & 0xffff;
8317ab87ff4SFrancois Romieu 	int work_done = 0;
8327ab87ff4SFrancois Romieu 	u32 status;
833f2148a47SJeff Kirsher 
8347ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
8357ab87ff4SFrancois Romieu 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
8367ab87ff4SFrancois Romieu 
8377ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_RX)
8387ab87ff4SFrancois Romieu 		work_done += rhine_rx(dev, budget);
8397ab87ff4SFrancois Romieu 
8407ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_TX) {
8417ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
8427ab87ff4SFrancois Romieu 			/* Avoid scavenging before Tx engine turned off */
843a384a33bSFrancois Romieu 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
844fc3e0f8aSFrancois Romieu 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
845fc3e0f8aSFrancois Romieu 				netif_warn(rp, tx_err, dev, "Tx still on\n");
8467ab87ff4SFrancois Romieu 		}
847fc3e0f8aSFrancois Romieu 
8487ab87ff4SFrancois Romieu 		rhine_tx(dev);
8497ab87ff4SFrancois Romieu 
8507ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR)
8517ab87ff4SFrancois Romieu 			rhine_tx_err(rp, status);
8527ab87ff4SFrancois Romieu 	}
8537ab87ff4SFrancois Romieu 
8547ab87ff4SFrancois Romieu 	if (status & IntrStatsMax) {
8557ab87ff4SFrancois Romieu 		spin_lock(&rp->lock);
8567ab87ff4SFrancois Romieu 		rhine_update_rx_crc_and_missed_errord(rp);
8577ab87ff4SFrancois Romieu 		spin_unlock(&rp->lock);
8587ab87ff4SFrancois Romieu 	}
8597ab87ff4SFrancois Romieu 
8607ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_SLOW) {
8617ab87ff4SFrancois Romieu 		enable_mask &= ~RHINE_EVENT_SLOW;
8627ab87ff4SFrancois Romieu 		schedule_work(&rp->slow_event_task);
8637ab87ff4SFrancois Romieu 	}
864f2148a47SJeff Kirsher 
865f2148a47SJeff Kirsher 	if (work_done < budget) {
866f2148a47SJeff Kirsher 		napi_complete(napi);
8677ab87ff4SFrancois Romieu 		iowrite16(enable_mask, ioaddr + IntrEnable);
8687ab87ff4SFrancois Romieu 		mmiowb();
869f2148a47SJeff Kirsher 	}
870f2148a47SJeff Kirsher 	return work_done;
871f2148a47SJeff Kirsher }
872f2148a47SJeff Kirsher 
87376e239e1SBill Pemberton static void rhine_hw_init(struct net_device *dev, long pioaddr)
874f2148a47SJeff Kirsher {
875f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
876f2148a47SJeff Kirsher 
877f2148a47SJeff Kirsher 	/* Reset the chip to erase previous misconfiguration. */
878f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
879f2148a47SJeff Kirsher 
880f2148a47SJeff Kirsher 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
881f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
882f2148a47SJeff Kirsher 		msleep(5);
883f2148a47SJeff Kirsher 
884f2148a47SJeff Kirsher 	/* Reload EEPROM controlled bytes cleared by soft reset */
8852d283862SAlexey Charkov 	if (dev_is_pci(dev->dev.parent))
886f2148a47SJeff Kirsher 		rhine_reload_eeprom(pioaddr, dev);
887f2148a47SJeff Kirsher }
888f2148a47SJeff Kirsher 
889f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = {
890f2148a47SJeff Kirsher 	.ndo_open		 = rhine_open,
891f2148a47SJeff Kirsher 	.ndo_stop		 = rhine_close,
892f2148a47SJeff Kirsher 	.ndo_start_xmit		 = rhine_start_tx,
893f7b5d1b9SJamie Gloudon 	.ndo_get_stats64	 = rhine_get_stats64,
894afc4b13dSJiri Pirko 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
895f2148a47SJeff Kirsher 	.ndo_change_mtu		 = eth_change_mtu,
896f2148a47SJeff Kirsher 	.ndo_validate_addr	 = eth_validate_addr,
897f2148a47SJeff Kirsher 	.ndo_set_mac_address 	 = eth_mac_addr,
898f2148a47SJeff Kirsher 	.ndo_do_ioctl		 = netdev_ioctl,
899f2148a47SJeff Kirsher 	.ndo_tx_timeout 	 = rhine_tx_timeout,
900f2148a47SJeff Kirsher 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
901f2148a47SJeff Kirsher 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
902f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
903f2148a47SJeff Kirsher 	.ndo_poll_controller	 = rhine_poll,
904f2148a47SJeff Kirsher #endif
905f2148a47SJeff Kirsher };
906f2148a47SJeff Kirsher 
907ca8b6e04SAlexey Charkov static int rhine_init_one_common(struct device *hwdev, u32 quirks,
9082d283862SAlexey Charkov 				 long pioaddr, void __iomem *ioaddr, int irq)
909f2148a47SJeff Kirsher {
910f2148a47SJeff Kirsher 	struct net_device *dev;
911f2148a47SJeff Kirsher 	struct rhine_private *rp;
9122d283862SAlexey Charkov 	int i, rc, phy_id;
913f2148a47SJeff Kirsher 	const char *name;
914f2148a47SJeff Kirsher 
915f2148a47SJeff Kirsher 	/* this should always be supported */
916f7630d18SAlexey Charkov 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
917f2148a47SJeff Kirsher 	if (rc) {
918f7630d18SAlexey Charkov 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
9192d283862SAlexey Charkov 		goto err_out;
920f2148a47SJeff Kirsher 	}
921f2148a47SJeff Kirsher 
922f2148a47SJeff Kirsher 	dev = alloc_etherdev(sizeof(struct rhine_private));
923f2148a47SJeff Kirsher 	if (!dev) {
924f2148a47SJeff Kirsher 		rc = -ENOMEM;
9252d283862SAlexey Charkov 		goto err_out;
926f2148a47SJeff Kirsher 	}
927f7630d18SAlexey Charkov 	SET_NETDEV_DEV(dev, hwdev);
928f2148a47SJeff Kirsher 
929f2148a47SJeff Kirsher 	rp = netdev_priv(dev);
930f2148a47SJeff Kirsher 	rp->dev = dev;
931ca8b6e04SAlexey Charkov 	rp->quirks = quirks;
932f2148a47SJeff Kirsher 	rp->pioaddr = pioaddr;
9332d283862SAlexey Charkov 	rp->base = ioaddr;
9342d283862SAlexey Charkov 	rp->irq = irq;
935fc3e0f8aSFrancois Romieu 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
936f2148a47SJeff Kirsher 
937ca8b6e04SAlexey Charkov 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
938f2148a47SJeff Kirsher 
939827da44cSJohn Stultz 	u64_stats_init(&rp->tx_stats.syncp);
940827da44cSJohn Stultz 	u64_stats_init(&rp->rx_stats.syncp);
941827da44cSJohn Stultz 
942f2148a47SJeff Kirsher 	/* Get chip registers into a sane state */
943f2148a47SJeff Kirsher 	rhine_power_init(dev);
944f2148a47SJeff Kirsher 	rhine_hw_init(dev, pioaddr);
945f2148a47SJeff Kirsher 
946f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
947f2148a47SJeff Kirsher 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
948f2148a47SJeff Kirsher 
949f2148a47SJeff Kirsher 	if (!is_valid_ether_addr(dev->dev_addr)) {
950f2148a47SJeff Kirsher 		/* Report it and use a random ethernet address instead */
951f2148a47SJeff Kirsher 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
952f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
953f2148a47SJeff Kirsher 		netdev_info(dev, "Using random MAC address: %pM\n",
954f2148a47SJeff Kirsher 			    dev->dev_addr);
955f2148a47SJeff Kirsher 	}
956f2148a47SJeff Kirsher 
957f2148a47SJeff Kirsher 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
958f2148a47SJeff Kirsher 	if (!phy_id)
959f2148a47SJeff Kirsher 		phy_id = ioread8(ioaddr + 0x6C);
960f2148a47SJeff Kirsher 
961f2148a47SJeff Kirsher 	spin_lock_init(&rp->lock);
9627ab87ff4SFrancois Romieu 	mutex_init(&rp->task_lock);
963f2148a47SJeff Kirsher 	INIT_WORK(&rp->reset_task, rhine_reset_task);
9647ab87ff4SFrancois Romieu 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
965f2148a47SJeff Kirsher 
966f2148a47SJeff Kirsher 	rp->mii_if.dev = dev;
967f2148a47SJeff Kirsher 	rp->mii_if.mdio_read = mdio_read;
968f2148a47SJeff Kirsher 	rp->mii_if.mdio_write = mdio_write;
969f2148a47SJeff Kirsher 	rp->mii_if.phy_id_mask = 0x1f;
970f2148a47SJeff Kirsher 	rp->mii_if.reg_num_mask = 0x1f;
971f2148a47SJeff Kirsher 
972f2148a47SJeff Kirsher 	/* The chip-specific entries in the device structure. */
973f2148a47SJeff Kirsher 	dev->netdev_ops = &rhine_netdev_ops;
974e76070f2Swangweidong 	dev->ethtool_ops = &netdev_ethtool_ops;
975f2148a47SJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
976f2148a47SJeff Kirsher 
977f2148a47SJeff Kirsher 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
978f2148a47SJeff Kirsher 
979f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
980f2148a47SJeff Kirsher 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
981f2148a47SJeff Kirsher 
982ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt)
983f646968fSPatrick McHardy 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
984f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_RX |
985f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_FILTER;
986f2148a47SJeff Kirsher 
987f2148a47SJeff Kirsher 	/* dev->name not defined before register_netdev()! */
988f2148a47SJeff Kirsher 	rc = register_netdev(dev);
989f2148a47SJeff Kirsher 	if (rc)
9902d283862SAlexey Charkov 		goto err_out_free_netdev;
991f2148a47SJeff Kirsher 
992ca8b6e04SAlexey Charkov 	if (rp->quirks & rqRhineI)
993ca8b6e04SAlexey Charkov 		name = "Rhine";
994ca8b6e04SAlexey Charkov 	else if (rp->quirks & rqStatusWBRace)
995ca8b6e04SAlexey Charkov 		name = "Rhine II";
996ca8b6e04SAlexey Charkov 	else if (rp->quirks & rqMgmt)
997ca8b6e04SAlexey Charkov 		name = "Rhine III (Management Adapter)";
998ca8b6e04SAlexey Charkov 	else
999ca8b6e04SAlexey Charkov 		name = "Rhine III";
1000ca8b6e04SAlexey Charkov 
1001f2148a47SJeff Kirsher 	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
10022d283862SAlexey Charkov 		    name, (long)ioaddr, dev->dev_addr, rp->irq);
1003f2148a47SJeff Kirsher 
1004f7630d18SAlexey Charkov 	dev_set_drvdata(hwdev, dev);
1005f2148a47SJeff Kirsher 
1006f2148a47SJeff Kirsher 	{
1007f2148a47SJeff Kirsher 		u16 mii_cmd;
1008f2148a47SJeff Kirsher 		int mii_status = mdio_read(dev, phy_id, 1);
1009f2148a47SJeff Kirsher 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1010f2148a47SJeff Kirsher 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1011f2148a47SJeff Kirsher 		if (mii_status != 0xffff && mii_status != 0x0000) {
1012f2148a47SJeff Kirsher 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1013f2148a47SJeff Kirsher 			netdev_info(dev,
1014f2148a47SJeff Kirsher 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1015f2148a47SJeff Kirsher 				    phy_id,
1016f2148a47SJeff Kirsher 				    mii_status, rp->mii_if.advertising,
1017f2148a47SJeff Kirsher 				    mdio_read(dev, phy_id, 5));
1018f2148a47SJeff Kirsher 
1019f2148a47SJeff Kirsher 			/* set IFF_RUNNING */
1020f2148a47SJeff Kirsher 			if (mii_status & BMSR_LSTATUS)
1021f2148a47SJeff Kirsher 				netif_carrier_on(dev);
1022f2148a47SJeff Kirsher 			else
1023f2148a47SJeff Kirsher 				netif_carrier_off(dev);
1024f2148a47SJeff Kirsher 
1025f2148a47SJeff Kirsher 		}
1026f2148a47SJeff Kirsher 	}
1027f2148a47SJeff Kirsher 	rp->mii_if.phy_id = phy_id;
1028fc3e0f8aSFrancois Romieu 	if (avoid_D3)
1029fc3e0f8aSFrancois Romieu 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1030f2148a47SJeff Kirsher 
1031f2148a47SJeff Kirsher 	return 0;
1032f2148a47SJeff Kirsher 
10332d283862SAlexey Charkov err_out_free_netdev:
10342d283862SAlexey Charkov 	free_netdev(dev);
10352d283862SAlexey Charkov err_out:
10362d283862SAlexey Charkov 	return rc;
10372d283862SAlexey Charkov }
10382d283862SAlexey Charkov 
10392d283862SAlexey Charkov static int rhine_init_one_pci(struct pci_dev *pdev,
10402d283862SAlexey Charkov 			      const struct pci_device_id *ent)
10412d283862SAlexey Charkov {
10422d283862SAlexey Charkov 	struct device *hwdev = &pdev->dev;
10435b579e21SAlexey Charkov 	int rc;
10442d283862SAlexey Charkov 	long pioaddr, memaddr;
10452d283862SAlexey Charkov 	void __iomem *ioaddr;
10462d283862SAlexey Charkov 	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
10475b579e21SAlexey Charkov 
10485b579e21SAlexey Charkov /* This driver was written to use PCI memory space. Some early versions
10495b579e21SAlexey Charkov  * of the Rhine may only work correctly with I/O space accesses.
10505b579e21SAlexey Charkov  * TODO: determine for which revisions this is true and assign the flag
10515b579e21SAlexey Charkov  *	 in code as opposed to this Kconfig option (???)
10525b579e21SAlexey Charkov  */
10535b579e21SAlexey Charkov #ifdef CONFIG_VIA_RHINE_MMIO
10545b579e21SAlexey Charkov 	u32 quirks = rqNeedEnMMIO;
10552d283862SAlexey Charkov #else
10565b579e21SAlexey Charkov 	u32 quirks = 0;
10572d283862SAlexey Charkov #endif
10582d283862SAlexey Charkov 
10592d283862SAlexey Charkov /* when built into the kernel, we only print version if device is found */
10602d283862SAlexey Charkov #ifndef MODULE
10612d283862SAlexey Charkov 	pr_info_once("%s\n", version);
10622d283862SAlexey Charkov #endif
10632d283862SAlexey Charkov 
10642d283862SAlexey Charkov 	rc = pci_enable_device(pdev);
10652d283862SAlexey Charkov 	if (rc)
10662d283862SAlexey Charkov 		goto err_out;
10672d283862SAlexey Charkov 
1068ca8b6e04SAlexey Charkov 	if (pdev->revision < VTunknown0) {
10695b579e21SAlexey Charkov 		quirks |= rqRhineI;
1070ca8b6e04SAlexey Charkov 	} else if (pdev->revision >= VT6102) {
10715b579e21SAlexey Charkov 		quirks |= rqWOL | rqForceReset;
1072ca8b6e04SAlexey Charkov 		if (pdev->revision < VT6105) {
1073ca8b6e04SAlexey Charkov 			quirks |= rqStatusWBRace;
1074ca8b6e04SAlexey Charkov 		} else {
1075ca8b6e04SAlexey Charkov 			quirks |= rqIntPHY;
1076ca8b6e04SAlexey Charkov 			if (pdev->revision >= VT6105_B0)
1077ca8b6e04SAlexey Charkov 				quirks |= rq6patterns;
1078ca8b6e04SAlexey Charkov 			if (pdev->revision >= VT6105M)
1079ca8b6e04SAlexey Charkov 				quirks |= rqMgmt;
1080ca8b6e04SAlexey Charkov 		}
1081ca8b6e04SAlexey Charkov 	}
1082ca8b6e04SAlexey Charkov 
10832d283862SAlexey Charkov 	/* sanity check */
10842d283862SAlexey Charkov 	if ((pci_resource_len(pdev, 0) < io_size) ||
10852d283862SAlexey Charkov 	    (pci_resource_len(pdev, 1) < io_size)) {
10862d283862SAlexey Charkov 		rc = -EIO;
10872d283862SAlexey Charkov 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
10882d283862SAlexey Charkov 		goto err_out_pci_disable;
10892d283862SAlexey Charkov 	}
10902d283862SAlexey Charkov 
10912d283862SAlexey Charkov 	pioaddr = pci_resource_start(pdev, 0);
10922d283862SAlexey Charkov 	memaddr = pci_resource_start(pdev, 1);
10932d283862SAlexey Charkov 
10942d283862SAlexey Charkov 	pci_set_master(pdev);
10952d283862SAlexey Charkov 
10962d283862SAlexey Charkov 	rc = pci_request_regions(pdev, DRV_NAME);
10972d283862SAlexey Charkov 	if (rc)
10982d283862SAlexey Charkov 		goto err_out_pci_disable;
10992d283862SAlexey Charkov 
11005b579e21SAlexey Charkov 	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
11012d283862SAlexey Charkov 	if (!ioaddr) {
11022d283862SAlexey Charkov 		rc = -EIO;
11032d283862SAlexey Charkov 		dev_err(hwdev,
11042d283862SAlexey Charkov 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
11052d283862SAlexey Charkov 			dev_name(hwdev), io_size, memaddr);
11062d283862SAlexey Charkov 		goto err_out_free_res;
11072d283862SAlexey Charkov 	}
11082d283862SAlexey Charkov 
11092d283862SAlexey Charkov 	enable_mmio(pioaddr, quirks);
11102d283862SAlexey Charkov 
11115b579e21SAlexey Charkov 	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
11125b579e21SAlexey Charkov 	if (rc)
11132d283862SAlexey Charkov 		goto err_out_unmap;
11142d283862SAlexey Charkov 
1115ca8b6e04SAlexey Charkov 	rc = rhine_init_one_common(&pdev->dev, quirks,
11162d283862SAlexey Charkov 				   pioaddr, ioaddr, pdev->irq);
11172d283862SAlexey Charkov 	if (!rc)
11182d283862SAlexey Charkov 		return 0;
11192d283862SAlexey Charkov 
1120f2148a47SJeff Kirsher err_out_unmap:
1121f2148a47SJeff Kirsher 	pci_iounmap(pdev, ioaddr);
1122f2148a47SJeff Kirsher err_out_free_res:
1123f2148a47SJeff Kirsher 	pci_release_regions(pdev);
1124ae996154SRoger Luethi err_out_pci_disable:
1125ae996154SRoger Luethi 	pci_disable_device(pdev);
1126f2148a47SJeff Kirsher err_out:
1127f2148a47SJeff Kirsher 	return rc;
1128f2148a47SJeff Kirsher }
1129f2148a47SJeff Kirsher 
11302d283862SAlexey Charkov static int rhine_init_one_platform(struct platform_device *pdev)
11312d283862SAlexey Charkov {
11322d283862SAlexey Charkov 	const struct of_device_id *match;
1133ca8b6e04SAlexey Charkov 	const u32 *quirks;
11342d283862SAlexey Charkov 	int irq;
11352d283862SAlexey Charkov 	struct resource *res;
11362d283862SAlexey Charkov 	void __iomem *ioaddr;
11372d283862SAlexey Charkov 
11382d283862SAlexey Charkov 	match = of_match_device(rhine_of_tbl, &pdev->dev);
11392d283862SAlexey Charkov 	if (!match)
11402d283862SAlexey Charkov 		return -EINVAL;
11412d283862SAlexey Charkov 
11422d283862SAlexey Charkov 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
11432d283862SAlexey Charkov 	ioaddr = devm_ioremap_resource(&pdev->dev, res);
11442d283862SAlexey Charkov 	if (IS_ERR(ioaddr))
11452d283862SAlexey Charkov 		return PTR_ERR(ioaddr);
11462d283862SAlexey Charkov 
11472d283862SAlexey Charkov 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
11482d283862SAlexey Charkov 	if (!irq)
11492d283862SAlexey Charkov 		return -EINVAL;
11502d283862SAlexey Charkov 
1151ca8b6e04SAlexey Charkov 	quirks = match->data;
1152ca8b6e04SAlexey Charkov 	if (!quirks)
11532d283862SAlexey Charkov 		return -EINVAL;
11542d283862SAlexey Charkov 
1155ca8b6e04SAlexey Charkov 	return rhine_init_one_common(&pdev->dev, *quirks,
11562d283862SAlexey Charkov 				     (long)ioaddr, ioaddr, irq);
11572d283862SAlexey Charkov }
11582d283862SAlexey Charkov 
1159f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev)
1160f2148a47SJeff Kirsher {
1161f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1162f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1163f2148a47SJeff Kirsher 	void *ring;
1164f2148a47SJeff Kirsher 	dma_addr_t ring_dma;
1165f2148a47SJeff Kirsher 
1166f7630d18SAlexey Charkov 	ring = dma_alloc_coherent(hwdev,
1167f2148a47SJeff Kirsher 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1168f2148a47SJeff Kirsher 				  TX_RING_SIZE * sizeof(struct tx_desc),
11694087c4dcSAlexey Charkov 				  &ring_dma,
11704087c4dcSAlexey Charkov 				  GFP_ATOMIC);
1171f2148a47SJeff Kirsher 	if (!ring) {
1172f2148a47SJeff Kirsher 		netdev_err(dev, "Could not allocate DMA memory\n");
1173f2148a47SJeff Kirsher 		return -ENOMEM;
1174f2148a47SJeff Kirsher 	}
1175f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI) {
1176f7630d18SAlexey Charkov 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1177f2148a47SJeff Kirsher 						 PKT_BUF_SZ * TX_RING_SIZE,
11784087c4dcSAlexey Charkov 						 &rp->tx_bufs_dma,
11794087c4dcSAlexey Charkov 						 GFP_ATOMIC);
1180f2148a47SJeff Kirsher 		if (rp->tx_bufs == NULL) {
1181f7630d18SAlexey Charkov 			dma_free_coherent(hwdev,
1182f2148a47SJeff Kirsher 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1183f2148a47SJeff Kirsher 					  TX_RING_SIZE * sizeof(struct tx_desc),
1184f2148a47SJeff Kirsher 					  ring, ring_dma);
1185f2148a47SJeff Kirsher 			return -ENOMEM;
1186f2148a47SJeff Kirsher 		}
1187f2148a47SJeff Kirsher 	}
1188f2148a47SJeff Kirsher 
1189f2148a47SJeff Kirsher 	rp->rx_ring = ring;
1190f2148a47SJeff Kirsher 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1191f2148a47SJeff Kirsher 	rp->rx_ring_dma = ring_dma;
1192f2148a47SJeff Kirsher 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1193f2148a47SJeff Kirsher 
1194f2148a47SJeff Kirsher 	return 0;
1195f2148a47SJeff Kirsher }
1196f2148a47SJeff Kirsher 
1197f2148a47SJeff Kirsher static void free_ring(struct net_device* dev)
1198f2148a47SJeff Kirsher {
1199f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1200f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1201f2148a47SJeff Kirsher 
1202f7630d18SAlexey Charkov 	dma_free_coherent(hwdev,
1203f2148a47SJeff Kirsher 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1204f2148a47SJeff Kirsher 			  TX_RING_SIZE * sizeof(struct tx_desc),
1205f2148a47SJeff Kirsher 			  rp->rx_ring, rp->rx_ring_dma);
1206f2148a47SJeff Kirsher 	rp->tx_ring = NULL;
1207f2148a47SJeff Kirsher 
1208f2148a47SJeff Kirsher 	if (rp->tx_bufs)
1209f7630d18SAlexey Charkov 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1210f2148a47SJeff Kirsher 				  rp->tx_bufs, rp->tx_bufs_dma);
1211f2148a47SJeff Kirsher 
1212f2148a47SJeff Kirsher 	rp->tx_bufs = NULL;
1213f2148a47SJeff Kirsher 
1214f2148a47SJeff Kirsher }
1215f2148a47SJeff Kirsher 
1216*a21bb8baSfrançois romieu struct rhine_skb_dma {
1217*a21bb8baSfrançois romieu 	struct sk_buff *skb;
1218*a21bb8baSfrançois romieu 	dma_addr_t dma;
1219*a21bb8baSfrançois romieu };
1220*a21bb8baSfrançois romieu 
1221*a21bb8baSfrançois romieu static inline int rhine_skb_dma_init(struct net_device *dev,
1222*a21bb8baSfrançois romieu 				     struct rhine_skb_dma *sd)
1223f2148a47SJeff Kirsher {
1224f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1225f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1226*a21bb8baSfrançois romieu 	const int size = rp->rx_buf_sz;
1227*a21bb8baSfrançois romieu 
1228*a21bb8baSfrançois romieu 	sd->skb = netdev_alloc_skb(dev, size);
1229*a21bb8baSfrançois romieu 	if (!sd->skb)
1230*a21bb8baSfrançois romieu 		return -ENOMEM;
1231*a21bb8baSfrançois romieu 
1232*a21bb8baSfrançois romieu 	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1233*a21bb8baSfrançois romieu 	if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1234*a21bb8baSfrançois romieu 		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1235*a21bb8baSfrançois romieu 		dev_kfree_skb_any(sd->skb);
1236*a21bb8baSfrançois romieu 		return -EIO;
1237*a21bb8baSfrançois romieu 	}
1238*a21bb8baSfrançois romieu 
1239*a21bb8baSfrançois romieu 	return 0;
1240*a21bb8baSfrançois romieu }
1241*a21bb8baSfrançois romieu 
1242*a21bb8baSfrançois romieu static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1243*a21bb8baSfrançois romieu 					   struct rhine_skb_dma *sd, int entry)
1244*a21bb8baSfrançois romieu {
1245*a21bb8baSfrançois romieu 	rp->rx_skbuff_dma[entry] = sd->dma;
1246*a21bb8baSfrançois romieu 	rp->rx_skbuff[entry] = sd->skb;
1247*a21bb8baSfrançois romieu 
1248*a21bb8baSfrançois romieu 	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1249*a21bb8baSfrançois romieu 	dma_wmb();
1250*a21bb8baSfrançois romieu }
1251*a21bb8baSfrançois romieu 
1252*a21bb8baSfrançois romieu static void alloc_rbufs(struct net_device *dev)
1253*a21bb8baSfrançois romieu {
1254*a21bb8baSfrançois romieu 	struct rhine_private *rp = netdev_priv(dev);
1255f2148a47SJeff Kirsher 	dma_addr_t next;
1256*a21bb8baSfrançois romieu 	int rc, i;
1257f2148a47SJeff Kirsher 
1258f2148a47SJeff Kirsher 	rp->dirty_rx = rp->cur_rx = 0;
1259f2148a47SJeff Kirsher 
1260f2148a47SJeff Kirsher 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1261f2148a47SJeff Kirsher 	rp->rx_head_desc = &rp->rx_ring[0];
1262f2148a47SJeff Kirsher 	next = rp->rx_ring_dma;
1263f2148a47SJeff Kirsher 
1264f2148a47SJeff Kirsher 	/* Init the ring entries */
1265f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1266f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1267f2148a47SJeff Kirsher 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1268f2148a47SJeff Kirsher 		next += sizeof(struct rx_desc);
1269f2148a47SJeff Kirsher 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1270f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1271f2148a47SJeff Kirsher 	}
1272f2148a47SJeff Kirsher 	/* Mark the last entry as wrapping the ring. */
1273f2148a47SJeff Kirsher 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1274f2148a47SJeff Kirsher 
1275f2148a47SJeff Kirsher 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1276f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1277*a21bb8baSfrançois romieu 		struct rhine_skb_dma sd;
1278*a21bb8baSfrançois romieu 
1279*a21bb8baSfrançois romieu 		rc = rhine_skb_dma_init(dev, &sd);
1280*a21bb8baSfrançois romieu 		if (rc < 0)
1281f2148a47SJeff Kirsher 			break;
1282f2148a47SJeff Kirsher 
1283*a21bb8baSfrançois romieu 		rhine_skb_dma_nic_store(rp, &sd, i);
1284*a21bb8baSfrançois romieu 
1285f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1286f2148a47SJeff Kirsher 	}
1287f2148a47SJeff Kirsher 	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1288f2148a47SJeff Kirsher }
1289f2148a47SJeff Kirsher 
1290f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev)
1291f2148a47SJeff Kirsher {
1292f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1293f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1294f2148a47SJeff Kirsher 	int i;
1295f2148a47SJeff Kirsher 
1296f2148a47SJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
1297f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1298f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1299f2148a47SJeff Kirsher 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1300f2148a47SJeff Kirsher 		if (rp->rx_skbuff[i]) {
1301f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1302f2148a47SJeff Kirsher 					 rp->rx_skbuff_dma[i],
13034087c4dcSAlexey Charkov 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1304f2148a47SJeff Kirsher 			dev_kfree_skb(rp->rx_skbuff[i]);
1305f2148a47SJeff Kirsher 		}
1306f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1307f2148a47SJeff Kirsher 	}
1308f2148a47SJeff Kirsher }
1309f2148a47SJeff Kirsher 
1310f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev)
1311f2148a47SJeff Kirsher {
1312f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1313f2148a47SJeff Kirsher 	dma_addr_t next;
1314f2148a47SJeff Kirsher 	int i;
1315f2148a47SJeff Kirsher 
1316f2148a47SJeff Kirsher 	rp->dirty_tx = rp->cur_tx = 0;
1317f2148a47SJeff Kirsher 	next = rp->tx_ring_dma;
1318f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1319f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1320f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1321f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1322f2148a47SJeff Kirsher 		next += sizeof(struct tx_desc);
1323f2148a47SJeff Kirsher 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1324f2148a47SJeff Kirsher 		if (rp->quirks & rqRhineI)
1325f2148a47SJeff Kirsher 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1326f2148a47SJeff Kirsher 	}
1327f2148a47SJeff Kirsher 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1328f2148a47SJeff Kirsher 
132992bf2008STino Reichardt 	netdev_reset_queue(dev);
1330f2148a47SJeff Kirsher }
1331f2148a47SJeff Kirsher 
1332f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev)
1333f2148a47SJeff Kirsher {
1334f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1335f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1336f2148a47SJeff Kirsher 	int i;
1337f2148a47SJeff Kirsher 
1338f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1339f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1340f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1341f2148a47SJeff Kirsher 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1342f2148a47SJeff Kirsher 		if (rp->tx_skbuff[i]) {
1343f2148a47SJeff Kirsher 			if (rp->tx_skbuff_dma[i]) {
1344f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
1345f2148a47SJeff Kirsher 						 rp->tx_skbuff_dma[i],
1346f2148a47SJeff Kirsher 						 rp->tx_skbuff[i]->len,
13474087c4dcSAlexey Charkov 						 DMA_TO_DEVICE);
1348f2148a47SJeff Kirsher 			}
1349f2148a47SJeff Kirsher 			dev_kfree_skb(rp->tx_skbuff[i]);
1350f2148a47SJeff Kirsher 		}
1351f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1352f2148a47SJeff Kirsher 		rp->tx_buf[i] = NULL;
1353f2148a47SJeff Kirsher 	}
1354f2148a47SJeff Kirsher }
1355f2148a47SJeff Kirsher 
1356f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1357f2148a47SJeff Kirsher {
1358f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1359f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1360f2148a47SJeff Kirsher 
13615bdc7380SBen Hutchings 	if (!rp->mii_if.force_media)
1362fc3e0f8aSFrancois Romieu 		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1363f2148a47SJeff Kirsher 
1364f2148a47SJeff Kirsher 	if (rp->mii_if.full_duplex)
1365f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1366f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1367f2148a47SJeff Kirsher 	else
1368f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1369f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1370fc3e0f8aSFrancois Romieu 
1371fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1372f2148a47SJeff Kirsher 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1373f2148a47SJeff Kirsher }
1374f2148a47SJeff Kirsher 
1375f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */
1376f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii)
1377f2148a47SJeff Kirsher {
1378fc3e0f8aSFrancois Romieu 	struct net_device *dev = mii->dev;
1379fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
1380fc3e0f8aSFrancois Romieu 
1381f2148a47SJeff Kirsher 	if (mii->force_media) {
1382f2148a47SJeff Kirsher 		/* autoneg is off: Link is always assumed to be up */
1383fc3e0f8aSFrancois Romieu 		if (!netif_carrier_ok(dev))
1384fc3e0f8aSFrancois Romieu 			netif_carrier_on(dev);
138517958438SFrançois Cachereul 	}
138617958438SFrançois Cachereul 
1387fc3e0f8aSFrancois Romieu 	rhine_check_media(dev, 0);
1388fc3e0f8aSFrancois Romieu 
1389fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1390fc3e0f8aSFrancois Romieu 		   mii->force_media, netif_carrier_ok(dev));
1391f2148a47SJeff Kirsher }
1392f2148a47SJeff Kirsher 
1393f2148a47SJeff Kirsher /**
1394f2148a47SJeff Kirsher  * rhine_set_cam - set CAM multicast filters
1395f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1396f2148a47SJeff Kirsher  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1397f2148a47SJeff Kirsher  * @addr: multicast address (6 bytes)
1398f2148a47SJeff Kirsher  *
1399f2148a47SJeff Kirsher  * Load addresses into multicast filters.
1400f2148a47SJeff Kirsher  */
1401f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1402f2148a47SJeff Kirsher {
1403f2148a47SJeff Kirsher 	int i;
1404f2148a47SJeff Kirsher 
1405f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1406f2148a47SJeff Kirsher 	wmb();
1407f2148a47SJeff Kirsher 
1408f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1409f2148a47SJeff Kirsher 	idx &= (MCAM_SIZE - 1);
1410f2148a47SJeff Kirsher 
1411f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1412f2148a47SJeff Kirsher 
1413f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++, addr++)
1414f2148a47SJeff Kirsher 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1415f2148a47SJeff Kirsher 	udelay(10);
1416f2148a47SJeff Kirsher 	wmb();
1417f2148a47SJeff Kirsher 
1418f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1419f2148a47SJeff Kirsher 	udelay(10);
1420f2148a47SJeff Kirsher 
1421f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1422f2148a47SJeff Kirsher }
1423f2148a47SJeff Kirsher 
1424f2148a47SJeff Kirsher /**
1425f2148a47SJeff Kirsher  * rhine_set_vlan_cam - set CAM VLAN filters
1426f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1427f2148a47SJeff Kirsher  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1428f2148a47SJeff Kirsher  * @addr: VLAN ID (2 bytes)
1429f2148a47SJeff Kirsher  *
1430f2148a47SJeff Kirsher  * Load addresses into VLAN filters.
1431f2148a47SJeff Kirsher  */
1432f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1433f2148a47SJeff Kirsher {
1434f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1435f2148a47SJeff Kirsher 	wmb();
1436f2148a47SJeff Kirsher 
1437f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1438f2148a47SJeff Kirsher 	idx &= (VCAM_SIZE - 1);
1439f2148a47SJeff Kirsher 
1440f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1441f2148a47SJeff Kirsher 
1442f2148a47SJeff Kirsher 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1443f2148a47SJeff Kirsher 	udelay(10);
1444f2148a47SJeff Kirsher 	wmb();
1445f2148a47SJeff Kirsher 
1446f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1447f2148a47SJeff Kirsher 	udelay(10);
1448f2148a47SJeff Kirsher 
1449f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1450f2148a47SJeff Kirsher }
1451f2148a47SJeff Kirsher 
1452f2148a47SJeff Kirsher /**
1453f2148a47SJeff Kirsher  * rhine_set_cam_mask - set multicast CAM mask
1454f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1455f2148a47SJeff Kirsher  * @mask: multicast CAM mask
1456f2148a47SJeff Kirsher  *
1457f2148a47SJeff Kirsher  * Mask sets multicast filters active/inactive.
1458f2148a47SJeff Kirsher  */
1459f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1460f2148a47SJeff Kirsher {
1461f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1462f2148a47SJeff Kirsher 	wmb();
1463f2148a47SJeff Kirsher 
1464f2148a47SJeff Kirsher 	/* write mask */
1465f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1466f2148a47SJeff Kirsher 
1467f2148a47SJeff Kirsher 	/* disable CAMEN */
1468f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1469f2148a47SJeff Kirsher }
1470f2148a47SJeff Kirsher 
1471f2148a47SJeff Kirsher /**
1472f2148a47SJeff Kirsher  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1473f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1474f2148a47SJeff Kirsher  * @mask: VLAN CAM mask
1475f2148a47SJeff Kirsher  *
1476f2148a47SJeff Kirsher  * Mask sets VLAN filters active/inactive.
1477f2148a47SJeff Kirsher  */
1478f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1479f2148a47SJeff Kirsher {
1480f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1481f2148a47SJeff Kirsher 	wmb();
1482f2148a47SJeff Kirsher 
1483f2148a47SJeff Kirsher 	/* write mask */
1484f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1485f2148a47SJeff Kirsher 
1486f2148a47SJeff Kirsher 	/* disable CAMEN */
1487f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1488f2148a47SJeff Kirsher }
1489f2148a47SJeff Kirsher 
1490f2148a47SJeff Kirsher /**
1491f2148a47SJeff Kirsher  * rhine_init_cam_filter - initialize CAM filters
1492f2148a47SJeff Kirsher  * @dev: network device
1493f2148a47SJeff Kirsher  *
1494f2148a47SJeff Kirsher  * Initialize (disable) hardware VLAN and multicast support on this
1495f2148a47SJeff Kirsher  * Rhine.
1496f2148a47SJeff Kirsher  */
1497f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev)
1498f2148a47SJeff Kirsher {
1499f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1500f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1501f2148a47SJeff Kirsher 
1502f2148a47SJeff Kirsher 	/* Disable all CAMs */
1503f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, 0);
1504f2148a47SJeff Kirsher 	rhine_set_cam_mask(ioaddr, 0);
1505f2148a47SJeff Kirsher 
1506f2148a47SJeff Kirsher 	/* disable hardware VLAN support */
1507f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1508f2148a47SJeff Kirsher 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1509f2148a47SJeff Kirsher }
1510f2148a47SJeff Kirsher 
1511f2148a47SJeff Kirsher /**
1512f2148a47SJeff Kirsher  * rhine_update_vcam - update VLAN CAM filters
1513f2148a47SJeff Kirsher  * @rp: rhine_private data of this Rhine
1514f2148a47SJeff Kirsher  *
1515f2148a47SJeff Kirsher  * Update VLAN CAM filters to match configuration change.
1516f2148a47SJeff Kirsher  */
1517f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev)
1518f2148a47SJeff Kirsher {
1519f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1520f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1521f2148a47SJeff Kirsher 	u16 vid;
1522f2148a47SJeff Kirsher 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1523f2148a47SJeff Kirsher 	unsigned int i = 0;
1524f2148a47SJeff Kirsher 
1525f2148a47SJeff Kirsher 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1526f2148a47SJeff Kirsher 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1527f2148a47SJeff Kirsher 		vCAMmask |= 1 << i;
1528f2148a47SJeff Kirsher 		if (++i >= VCAM_SIZE)
1529f2148a47SJeff Kirsher 			break;
1530f2148a47SJeff Kirsher 	}
1531f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1532f2148a47SJeff Kirsher }
1533f2148a47SJeff Kirsher 
153480d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1535f2148a47SJeff Kirsher {
1536f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1537f2148a47SJeff Kirsher 
15387ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1539f2148a47SJeff Kirsher 	set_bit(vid, rp->active_vlans);
1540f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
15417ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
15428e586137SJiri Pirko 	return 0;
1543f2148a47SJeff Kirsher }
1544f2148a47SJeff Kirsher 
154580d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1546f2148a47SJeff Kirsher {
1547f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1548f2148a47SJeff Kirsher 
15497ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1550f2148a47SJeff Kirsher 	clear_bit(vid, rp->active_vlans);
1551f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
15527ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
15538e586137SJiri Pirko 	return 0;
1554f2148a47SJeff Kirsher }
1555f2148a47SJeff Kirsher 
1556f2148a47SJeff Kirsher static void init_registers(struct net_device *dev)
1557f2148a47SJeff Kirsher {
1558f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1559f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1560f2148a47SJeff Kirsher 	int i;
1561f2148a47SJeff Kirsher 
1562f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
1563f2148a47SJeff Kirsher 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1564f2148a47SJeff Kirsher 
1565f2148a47SJeff Kirsher 	/* Initialize other registers. */
1566f2148a47SJeff Kirsher 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1567f2148a47SJeff Kirsher 	/* Configure initial FIFO thresholds. */
1568f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + TxConfig);
1569f2148a47SJeff Kirsher 	rp->tx_thresh = 0x20;
1570f2148a47SJeff Kirsher 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1571f2148a47SJeff Kirsher 
1572f2148a47SJeff Kirsher 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1573f2148a47SJeff Kirsher 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1574f2148a47SJeff Kirsher 
1575f2148a47SJeff Kirsher 	rhine_set_rx_mode(dev);
1576f2148a47SJeff Kirsher 
1577ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt)
1578f2148a47SJeff Kirsher 		rhine_init_cam_filter(dev);
1579f2148a47SJeff Kirsher 
1580f2148a47SJeff Kirsher 	napi_enable(&rp->napi);
1581f2148a47SJeff Kirsher 
15827ab87ff4SFrancois Romieu 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1583f2148a47SJeff Kirsher 
1584f2148a47SJeff Kirsher 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1585f2148a47SJeff Kirsher 	       ioaddr + ChipCmd);
1586f2148a47SJeff Kirsher 	rhine_check_media(dev, 1);
1587f2148a47SJeff Kirsher }
1588f2148a47SJeff Kirsher 
1589f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */
1590a384a33bSFrancois Romieu static void rhine_enable_linkmon(struct rhine_private *rp)
1591f2148a47SJeff Kirsher {
1592a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1593a384a33bSFrancois Romieu 
1594f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1595f2148a47SJeff Kirsher 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1596f2148a47SJeff Kirsher 	iowrite8(0x80, ioaddr + MIICmd);
1597f2148a47SJeff Kirsher 
1598a384a33bSFrancois Romieu 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1599f2148a47SJeff Kirsher 
1600f2148a47SJeff Kirsher 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1601f2148a47SJeff Kirsher }
1602f2148a47SJeff Kirsher 
1603f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */
1604a384a33bSFrancois Romieu static void rhine_disable_linkmon(struct rhine_private *rp)
1605f2148a47SJeff Kirsher {
1606a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1607a384a33bSFrancois Romieu 
1608f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1609f2148a47SJeff Kirsher 
1610a384a33bSFrancois Romieu 	if (rp->quirks & rqRhineI) {
1611f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1612f2148a47SJeff Kirsher 
1613f2148a47SJeff Kirsher 		/* Can be called from ISR. Evil. */
1614f2148a47SJeff Kirsher 		mdelay(1);
1615f2148a47SJeff Kirsher 
1616f2148a47SJeff Kirsher 		/* 0x80 must be set immediately before turning it off */
1617f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + MIICmd);
1618f2148a47SJeff Kirsher 
1619a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1620f2148a47SJeff Kirsher 
1621f2148a47SJeff Kirsher 		/* Heh. Now clear 0x80 again. */
1622f2148a47SJeff Kirsher 		iowrite8(0, ioaddr + MIICmd);
1623f2148a47SJeff Kirsher 	}
1624f2148a47SJeff Kirsher 	else
1625a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1626f2148a47SJeff Kirsher }
1627f2148a47SJeff Kirsher 
1628f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */
1629f2148a47SJeff Kirsher 
1630f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1631f2148a47SJeff Kirsher {
1632f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1633f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1634f2148a47SJeff Kirsher 	int result;
1635f2148a47SJeff Kirsher 
1636a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1637f2148a47SJeff Kirsher 
1638f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1639f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1640f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1641f2148a47SJeff Kirsher 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1642a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1643f2148a47SJeff Kirsher 	result = ioread16(ioaddr + MIIData);
1644f2148a47SJeff Kirsher 
1645a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1646f2148a47SJeff Kirsher 	return result;
1647f2148a47SJeff Kirsher }
1648f2148a47SJeff Kirsher 
1649f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1650f2148a47SJeff Kirsher {
1651f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1652f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1653f2148a47SJeff Kirsher 
1654a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1655f2148a47SJeff Kirsher 
1656f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1657f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1658f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1659f2148a47SJeff Kirsher 	iowrite16(value, ioaddr + MIIData);
1660f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1661a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1662f2148a47SJeff Kirsher 
1663a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1664f2148a47SJeff Kirsher }
1665f2148a47SJeff Kirsher 
16667ab87ff4SFrancois Romieu static void rhine_task_disable(struct rhine_private *rp)
16677ab87ff4SFrancois Romieu {
16687ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16697ab87ff4SFrancois Romieu 	rp->task_enable = false;
16707ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
16717ab87ff4SFrancois Romieu 
16727ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->slow_event_task);
16737ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->reset_task);
16747ab87ff4SFrancois Romieu }
16757ab87ff4SFrancois Romieu 
16767ab87ff4SFrancois Romieu static void rhine_task_enable(struct rhine_private *rp)
16777ab87ff4SFrancois Romieu {
16787ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16797ab87ff4SFrancois Romieu 	rp->task_enable = true;
16807ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
16817ab87ff4SFrancois Romieu }
16827ab87ff4SFrancois Romieu 
1683f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev)
1684f2148a47SJeff Kirsher {
1685f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1686f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1687f2148a47SJeff Kirsher 	int rc;
1688f2148a47SJeff Kirsher 
1689f7630d18SAlexey Charkov 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1690f2148a47SJeff Kirsher 	if (rc)
1691f2148a47SJeff Kirsher 		return rc;
1692f2148a47SJeff Kirsher 
1693f7630d18SAlexey Charkov 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1694f2148a47SJeff Kirsher 
1695f2148a47SJeff Kirsher 	rc = alloc_ring(dev);
1696f2148a47SJeff Kirsher 	if (rc) {
1697f7630d18SAlexey Charkov 		free_irq(rp->irq, dev);
1698f2148a47SJeff Kirsher 		return rc;
1699f2148a47SJeff Kirsher 	}
1700f2148a47SJeff Kirsher 	alloc_rbufs(dev);
1701f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1702f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
17037ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
1704f2148a47SJeff Kirsher 	init_registers(dev);
1705fc3e0f8aSFrancois Romieu 
1706fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1707f2148a47SJeff Kirsher 		  __func__, ioread16(ioaddr + ChipCmd),
1708f2148a47SJeff Kirsher 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1709f2148a47SJeff Kirsher 
1710f2148a47SJeff Kirsher 	netif_start_queue(dev);
1711f2148a47SJeff Kirsher 
1712f2148a47SJeff Kirsher 	return 0;
1713f2148a47SJeff Kirsher }
1714f2148a47SJeff Kirsher 
1715f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work)
1716f2148a47SJeff Kirsher {
1717f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(work, struct rhine_private,
1718f2148a47SJeff Kirsher 						reset_task);
1719f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
1720f2148a47SJeff Kirsher 
17217ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
17227ab87ff4SFrancois Romieu 
17237ab87ff4SFrancois Romieu 	if (!rp->task_enable)
17247ab87ff4SFrancois Romieu 		goto out_unlock;
1725f2148a47SJeff Kirsher 
1726f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
1727a926592fSRichard Weinberger 	netif_tx_disable(dev);
1728f2148a47SJeff Kirsher 	spin_lock_bh(&rp->lock);
1729f2148a47SJeff Kirsher 
1730f2148a47SJeff Kirsher 	/* clear all descriptors */
1731f2148a47SJeff Kirsher 	free_tbufs(dev);
1732f2148a47SJeff Kirsher 	free_rbufs(dev);
1733f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1734f2148a47SJeff Kirsher 	alloc_rbufs(dev);
1735f2148a47SJeff Kirsher 
1736f2148a47SJeff Kirsher 	/* Reinitialize the hardware. */
1737f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
1738f2148a47SJeff Kirsher 	init_registers(dev);
1739f2148a47SJeff Kirsher 
1740f2148a47SJeff Kirsher 	spin_unlock_bh(&rp->lock);
1741f2148a47SJeff Kirsher 
1742f2148a47SJeff Kirsher 	dev->trans_start = jiffies; /* prevent tx timeout */
1743f2148a47SJeff Kirsher 	dev->stats.tx_errors++;
1744f2148a47SJeff Kirsher 	netif_wake_queue(dev);
17457ab87ff4SFrancois Romieu 
17467ab87ff4SFrancois Romieu out_unlock:
17477ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
1748f2148a47SJeff Kirsher }
1749f2148a47SJeff Kirsher 
1750f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev)
1751f2148a47SJeff Kirsher {
1752f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1753f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1754f2148a47SJeff Kirsher 
1755f2148a47SJeff Kirsher 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1756f2148a47SJeff Kirsher 		    ioread16(ioaddr + IntrStatus),
1757f2148a47SJeff Kirsher 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1758f2148a47SJeff Kirsher 
1759f2148a47SJeff Kirsher 	schedule_work(&rp->reset_task);
1760f2148a47SJeff Kirsher }
1761f2148a47SJeff Kirsher 
1762f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1763f2148a47SJeff Kirsher 				  struct net_device *dev)
1764f2148a47SJeff Kirsher {
1765f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1766f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1767f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1768f2148a47SJeff Kirsher 	unsigned entry;
1769f2148a47SJeff Kirsher 
1770f2148a47SJeff Kirsher 	/* Caution: the write order is important here, set the field
1771f2148a47SJeff Kirsher 	   with the "ownership" bits last. */
1772f2148a47SJeff Kirsher 
1773f2148a47SJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
1774f2148a47SJeff Kirsher 	entry = rp->cur_tx % TX_RING_SIZE;
1775f2148a47SJeff Kirsher 
1776f2148a47SJeff Kirsher 	if (skb_padto(skb, ETH_ZLEN))
1777f2148a47SJeff Kirsher 		return NETDEV_TX_OK;
1778f2148a47SJeff Kirsher 
1779f2148a47SJeff Kirsher 	rp->tx_skbuff[entry] = skb;
1780f2148a47SJeff Kirsher 
1781f2148a47SJeff Kirsher 	if ((rp->quirks & rqRhineI) &&
1782f2148a47SJeff Kirsher 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1783f2148a47SJeff Kirsher 		/* Must use alignment buffer. */
1784f2148a47SJeff Kirsher 		if (skb->len > PKT_BUF_SZ) {
1785f2148a47SJeff Kirsher 			/* packet too long, drop it */
17864b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
1787f2148a47SJeff Kirsher 			rp->tx_skbuff[entry] = NULL;
1788f2148a47SJeff Kirsher 			dev->stats.tx_dropped++;
1789f2148a47SJeff Kirsher 			return NETDEV_TX_OK;
1790f2148a47SJeff Kirsher 		}
1791f2148a47SJeff Kirsher 
1792f2148a47SJeff Kirsher 		/* Padding is not copied and so must be redone. */
1793f2148a47SJeff Kirsher 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1794f2148a47SJeff Kirsher 		if (skb->len < ETH_ZLEN)
1795f2148a47SJeff Kirsher 			memset(rp->tx_buf[entry] + skb->len, 0,
1796f2148a47SJeff Kirsher 			       ETH_ZLEN - skb->len);
1797f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] = 0;
1798f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1799f2148a47SJeff Kirsher 						      (rp->tx_buf[entry] -
1800f2148a47SJeff Kirsher 						       rp->tx_bufs));
1801f2148a47SJeff Kirsher 	} else {
1802f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] =
1803f7630d18SAlexey Charkov 			dma_map_single(hwdev, skb->data, skb->len,
18044087c4dcSAlexey Charkov 				       DMA_TO_DEVICE);
1805f7630d18SAlexey Charkov 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
18064b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
18079b4fe5fbSNeil Horman 			rp->tx_skbuff_dma[entry] = 0;
18089b4fe5fbSNeil Horman 			dev->stats.tx_dropped++;
18099b4fe5fbSNeil Horman 			return NETDEV_TX_OK;
18109b4fe5fbSNeil Horman 		}
1811f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1812f2148a47SJeff Kirsher 	}
1813f2148a47SJeff Kirsher 
1814f2148a47SJeff Kirsher 	rp->tx_ring[entry].desc_length =
1815f2148a47SJeff Kirsher 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1816f2148a47SJeff Kirsher 
1817df8a39deSJiri Pirko 	if (unlikely(skb_vlan_tag_present(skb))) {
1818df8a39deSJiri Pirko 		u16 vid_pcp = skb_vlan_tag_get(skb);
1819207070f5SRoger Luethi 
1820207070f5SRoger Luethi 		/* drop CFI/DEI bit, register needs VID and PCP */
1821207070f5SRoger Luethi 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1822207070f5SRoger Luethi 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1823207070f5SRoger Luethi 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1824f2148a47SJeff Kirsher 		/* request tagging */
1825f2148a47SJeff Kirsher 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1826f2148a47SJeff Kirsher 	}
1827f2148a47SJeff Kirsher 	else
1828f2148a47SJeff Kirsher 		rp->tx_ring[entry].tx_status = 0;
1829f2148a47SJeff Kirsher 
183092bf2008STino Reichardt 	netdev_sent_queue(dev, skb->len);
1831f2148a47SJeff Kirsher 	/* lock eth irq */
1832f2148a47SJeff Kirsher 	wmb();
1833f2148a47SJeff Kirsher 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1834f2148a47SJeff Kirsher 	wmb();
1835f2148a47SJeff Kirsher 
1836f2148a47SJeff Kirsher 	rp->cur_tx++;
1837f2148a47SJeff Kirsher 
1838f2148a47SJeff Kirsher 	/* Non-x86 Todo: explicitly flush cache lines here. */
1839f2148a47SJeff Kirsher 
1840df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1841f2148a47SJeff Kirsher 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1842f2148a47SJeff Kirsher 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1843f2148a47SJeff Kirsher 
1844f2148a47SJeff Kirsher 	/* Wake the potentially-idle transmit channel */
1845f2148a47SJeff Kirsher 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1846f2148a47SJeff Kirsher 	       ioaddr + ChipCmd1);
1847f2148a47SJeff Kirsher 	IOSYNC;
1848f2148a47SJeff Kirsher 
1849f2148a47SJeff Kirsher 	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1850f2148a47SJeff Kirsher 		netif_stop_queue(dev);
1851f2148a47SJeff Kirsher 
1852fc3e0f8aSFrancois Romieu 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1853f2148a47SJeff Kirsher 		  rp->cur_tx - 1, entry);
1854fc3e0f8aSFrancois Romieu 
1855f2148a47SJeff Kirsher 	return NETDEV_TX_OK;
1856f2148a47SJeff Kirsher }
1857f2148a47SJeff Kirsher 
18587ab87ff4SFrancois Romieu static void rhine_irq_disable(struct rhine_private *rp)
18597ab87ff4SFrancois Romieu {
18607ab87ff4SFrancois Romieu 	iowrite16(0x0000, rp->base + IntrEnable);
18617ab87ff4SFrancois Romieu 	mmiowb();
18627ab87ff4SFrancois Romieu }
18637ab87ff4SFrancois Romieu 
1864f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
1865f2148a47SJeff Kirsher    after the Tx thread. */
1866f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1867f2148a47SJeff Kirsher {
1868f2148a47SJeff Kirsher 	struct net_device *dev = dev_instance;
1869f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
18707ab87ff4SFrancois Romieu 	u32 status;
1871f2148a47SJeff Kirsher 	int handled = 0;
1872f2148a47SJeff Kirsher 
18737ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
1874f2148a47SJeff Kirsher 
1875fc3e0f8aSFrancois Romieu 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1876f2148a47SJeff Kirsher 
18777ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT) {
18787ab87ff4SFrancois Romieu 		handled = 1;
1879f2148a47SJeff Kirsher 
18807ab87ff4SFrancois Romieu 		rhine_irq_disable(rp);
1881f2148a47SJeff Kirsher 		napi_schedule(&rp->napi);
1882f2148a47SJeff Kirsher 	}
1883f2148a47SJeff Kirsher 
18847ab87ff4SFrancois Romieu 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1885fc3e0f8aSFrancois Romieu 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
18867ab87ff4SFrancois Romieu 			  status);
1887f2148a47SJeff Kirsher 	}
1888f2148a47SJeff Kirsher 
1889f2148a47SJeff Kirsher 	return IRQ_RETVAL(handled);
1890f2148a47SJeff Kirsher }
1891f2148a47SJeff Kirsher 
1892f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated
1893f2148a47SJeff Kirsher    for clarity. */
1894f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev)
1895f2148a47SJeff Kirsher {
1896f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1897f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1898f2148a47SJeff Kirsher 	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
189992bf2008STino Reichardt 	unsigned int pkts_compl = 0, bytes_compl = 0;
190092bf2008STino Reichardt 	struct sk_buff *skb;
1901f2148a47SJeff Kirsher 
1902f2148a47SJeff Kirsher 	/* find and cleanup dirty tx descriptors */
1903f2148a47SJeff Kirsher 	while (rp->dirty_tx != rp->cur_tx) {
1904f2148a47SJeff Kirsher 		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1905fc3e0f8aSFrancois Romieu 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1906f2148a47SJeff Kirsher 			  entry, txstatus);
1907f2148a47SJeff Kirsher 		if (txstatus & DescOwn)
1908f2148a47SJeff Kirsher 			break;
190992bf2008STino Reichardt 		skb = rp->tx_skbuff[entry];
1910f2148a47SJeff Kirsher 		if (txstatus & 0x8000) {
1911fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev,
1912fc3e0f8aSFrancois Romieu 				  "Transmit error, Tx status %08x\n", txstatus);
1913f2148a47SJeff Kirsher 			dev->stats.tx_errors++;
1914f2148a47SJeff Kirsher 			if (txstatus & 0x0400)
1915f2148a47SJeff Kirsher 				dev->stats.tx_carrier_errors++;
1916f2148a47SJeff Kirsher 			if (txstatus & 0x0200)
1917f2148a47SJeff Kirsher 				dev->stats.tx_window_errors++;
1918f2148a47SJeff Kirsher 			if (txstatus & 0x0100)
1919f2148a47SJeff Kirsher 				dev->stats.tx_aborted_errors++;
1920f2148a47SJeff Kirsher 			if (txstatus & 0x0080)
1921f2148a47SJeff Kirsher 				dev->stats.tx_heartbeat_errors++;
1922f2148a47SJeff Kirsher 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1923f2148a47SJeff Kirsher 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1924f2148a47SJeff Kirsher 				dev->stats.tx_fifo_errors++;
1925f2148a47SJeff Kirsher 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1926f2148a47SJeff Kirsher 				break; /* Keep the skb - we try again */
1927f2148a47SJeff Kirsher 			}
1928f2148a47SJeff Kirsher 			/* Transmitter restarted in 'abnormal' handler. */
1929f2148a47SJeff Kirsher 		} else {
1930f2148a47SJeff Kirsher 			if (rp->quirks & rqRhineI)
1931f2148a47SJeff Kirsher 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1932f2148a47SJeff Kirsher 			else
1933f2148a47SJeff Kirsher 				dev->stats.collisions += txstatus & 0x0F;
1934fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1935fc3e0f8aSFrancois Romieu 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1936f7b5d1b9SJamie Gloudon 
1937f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->tx_stats.syncp);
193892bf2008STino Reichardt 			rp->tx_stats.bytes += skb->len;
1939f7b5d1b9SJamie Gloudon 			rp->tx_stats.packets++;
1940f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->tx_stats.syncp);
1941f2148a47SJeff Kirsher 		}
1942f2148a47SJeff Kirsher 		/* Free the original skb. */
1943f2148a47SJeff Kirsher 		if (rp->tx_skbuff_dma[entry]) {
1944f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1945f2148a47SJeff Kirsher 					 rp->tx_skbuff_dma[entry],
194692bf2008STino Reichardt 					 skb->len,
19474087c4dcSAlexey Charkov 					 DMA_TO_DEVICE);
1948f2148a47SJeff Kirsher 		}
194992bf2008STino Reichardt 		bytes_compl += skb->len;
195092bf2008STino Reichardt 		pkts_compl++;
195192bf2008STino Reichardt 		dev_consume_skb_any(skb);
1952f2148a47SJeff Kirsher 		rp->tx_skbuff[entry] = NULL;
1953f2148a47SJeff Kirsher 		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1954f2148a47SJeff Kirsher 	}
195592bf2008STino Reichardt 
195692bf2008STino Reichardt 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
1957f2148a47SJeff Kirsher 	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1958f2148a47SJeff Kirsher 		netif_wake_queue(dev);
1959f2148a47SJeff Kirsher }
1960f2148a47SJeff Kirsher 
1961f2148a47SJeff Kirsher /**
1962f2148a47SJeff Kirsher  * rhine_get_vlan_tci - extract TCI from Rx data buffer
1963f2148a47SJeff Kirsher  * @skb: pointer to sk_buff
1964f2148a47SJeff Kirsher  * @data_size: used data area of the buffer including CRC
1965f2148a47SJeff Kirsher  *
1966f2148a47SJeff Kirsher  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1967f2148a47SJeff Kirsher  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1968f2148a47SJeff Kirsher  * aligned following the CRC.
1969f2148a47SJeff Kirsher  */
1970f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1971f2148a47SJeff Kirsher {
1972f2148a47SJeff Kirsher 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1973f2148a47SJeff Kirsher 	return be16_to_cpup((__be16 *)trailer);
1974f2148a47SJeff Kirsher }
1975f2148a47SJeff Kirsher 
1976f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */
1977f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit)
1978f2148a47SJeff Kirsher {
1979f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1980f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1981f2148a47SJeff Kirsher 	int count;
1982f2148a47SJeff Kirsher 	int entry = rp->cur_rx % RX_RING_SIZE;
1983f2148a47SJeff Kirsher 
1984fc3e0f8aSFrancois Romieu 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1985fc3e0f8aSFrancois Romieu 		  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1986f2148a47SJeff Kirsher 
1987f2148a47SJeff Kirsher 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1988f2148a47SJeff Kirsher 	for (count = 0; count < limit; ++count) {
1989f2148a47SJeff Kirsher 		struct rx_desc *desc = rp->rx_head_desc;
1990f2148a47SJeff Kirsher 		u32 desc_status = le32_to_cpu(desc->rx_status);
1991f2148a47SJeff Kirsher 		u32 desc_length = le32_to_cpu(desc->desc_length);
1992f2148a47SJeff Kirsher 		int data_size = desc_status >> 16;
1993f2148a47SJeff Kirsher 
1994f2148a47SJeff Kirsher 		if (desc_status & DescOwn)
1995f2148a47SJeff Kirsher 			break;
1996f2148a47SJeff Kirsher 
1997fc3e0f8aSFrancois Romieu 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1998fc3e0f8aSFrancois Romieu 			  desc_status);
1999f2148a47SJeff Kirsher 
2000f2148a47SJeff Kirsher 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2001f2148a47SJeff Kirsher 			if ((desc_status & RxWholePkt) != RxWholePkt) {
2002f2148a47SJeff Kirsher 				netdev_warn(dev,
2003f2148a47SJeff Kirsher 	"Oversized Ethernet frame spanned multiple buffers, "
2004f2148a47SJeff Kirsher 	"entry %#x length %d status %08x!\n",
2005f2148a47SJeff Kirsher 					    entry, data_size,
2006f2148a47SJeff Kirsher 					    desc_status);
2007f2148a47SJeff Kirsher 				netdev_warn(dev,
2008f2148a47SJeff Kirsher 					    "Oversized Ethernet frame %p vs %p\n",
2009f2148a47SJeff Kirsher 					    rp->rx_head_desc,
2010f2148a47SJeff Kirsher 					    &rp->rx_ring[entry]);
2011f2148a47SJeff Kirsher 				dev->stats.rx_length_errors++;
2012f2148a47SJeff Kirsher 			} else if (desc_status & RxErr) {
2013f2148a47SJeff Kirsher 				/* There was a error. */
2014fc3e0f8aSFrancois Romieu 				netif_dbg(rp, rx_err, dev,
2015fc3e0f8aSFrancois Romieu 					  "%s() Rx error %08x\n", __func__,
2016fc3e0f8aSFrancois Romieu 					  desc_status);
2017f2148a47SJeff Kirsher 				dev->stats.rx_errors++;
2018f2148a47SJeff Kirsher 				if (desc_status & 0x0030)
2019f2148a47SJeff Kirsher 					dev->stats.rx_length_errors++;
2020f2148a47SJeff Kirsher 				if (desc_status & 0x0048)
2021f2148a47SJeff Kirsher 					dev->stats.rx_fifo_errors++;
2022f2148a47SJeff Kirsher 				if (desc_status & 0x0004)
2023f2148a47SJeff Kirsher 					dev->stats.rx_frame_errors++;
2024f2148a47SJeff Kirsher 				if (desc_status & 0x0002) {
2025f2148a47SJeff Kirsher 					/* this can also be updated outside the interrupt handler */
2026f2148a47SJeff Kirsher 					spin_lock(&rp->lock);
2027f2148a47SJeff Kirsher 					dev->stats.rx_crc_errors++;
2028f2148a47SJeff Kirsher 					spin_unlock(&rp->lock);
2029f2148a47SJeff Kirsher 				}
2030f2148a47SJeff Kirsher 			}
2031f2148a47SJeff Kirsher 		} else {
2032f2148a47SJeff Kirsher 			struct sk_buff *skb = NULL;
2033f2148a47SJeff Kirsher 			/* Length should omit the CRC */
2034f2148a47SJeff Kirsher 			int pkt_len = data_size - 4;
2035f2148a47SJeff Kirsher 			u16 vlan_tci = 0;
2036f2148a47SJeff Kirsher 
2037f2148a47SJeff Kirsher 			/* Check if the packet is long enough to accept without
2038f2148a47SJeff Kirsher 			   copying to a minimally-sized skbuff. */
2039f2148a47SJeff Kirsher 			if (pkt_len < rx_copybreak)
2040f2148a47SJeff Kirsher 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2041f2148a47SJeff Kirsher 			if (skb) {
2042f7630d18SAlexey Charkov 				dma_sync_single_for_cpu(hwdev,
2043f2148a47SJeff Kirsher 							rp->rx_skbuff_dma[entry],
2044f2148a47SJeff Kirsher 							rp->rx_buf_sz,
20454087c4dcSAlexey Charkov 							DMA_FROM_DEVICE);
2046f2148a47SJeff Kirsher 
2047f2148a47SJeff Kirsher 				skb_copy_to_linear_data(skb,
2048f2148a47SJeff Kirsher 						 rp->rx_skbuff[entry]->data,
2049f2148a47SJeff Kirsher 						 pkt_len);
2050f2148a47SJeff Kirsher 				skb_put(skb, pkt_len);
2051f7630d18SAlexey Charkov 				dma_sync_single_for_device(hwdev,
2052f2148a47SJeff Kirsher 							   rp->rx_skbuff_dma[entry],
2053f2148a47SJeff Kirsher 							   rp->rx_buf_sz,
20544087c4dcSAlexey Charkov 							   DMA_FROM_DEVICE);
2055f2148a47SJeff Kirsher 			} else {
2056f2148a47SJeff Kirsher 				skb = rp->rx_skbuff[entry];
2057f2148a47SJeff Kirsher 				if (skb == NULL) {
2058f2148a47SJeff Kirsher 					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
2059f2148a47SJeff Kirsher 					break;
2060f2148a47SJeff Kirsher 				}
2061f2148a47SJeff Kirsher 				rp->rx_skbuff[entry] = NULL;
2062f2148a47SJeff Kirsher 				skb_put(skb, pkt_len);
2063f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
2064f2148a47SJeff Kirsher 						 rp->rx_skbuff_dma[entry],
2065f2148a47SJeff Kirsher 						 rp->rx_buf_sz,
20664087c4dcSAlexey Charkov 						 DMA_FROM_DEVICE);
2067f2148a47SJeff Kirsher 			}
2068f2148a47SJeff Kirsher 
2069f2148a47SJeff Kirsher 			if (unlikely(desc_length & DescTag))
2070f2148a47SJeff Kirsher 				vlan_tci = rhine_get_vlan_tci(skb, data_size);
2071f2148a47SJeff Kirsher 
2072f2148a47SJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
2073f2148a47SJeff Kirsher 
2074f2148a47SJeff Kirsher 			if (unlikely(desc_length & DescTag))
207586a9bad3SPatrick McHardy 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2076f2148a47SJeff Kirsher 			netif_receive_skb(skb);
2077f7b5d1b9SJamie Gloudon 
2078f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->rx_stats.syncp);
2079f7b5d1b9SJamie Gloudon 			rp->rx_stats.bytes += pkt_len;
2080f7b5d1b9SJamie Gloudon 			rp->rx_stats.packets++;
2081f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->rx_stats.syncp);
2082f2148a47SJeff Kirsher 		}
2083f2148a47SJeff Kirsher 		entry = (++rp->cur_rx) % RX_RING_SIZE;
2084f2148a47SJeff Kirsher 		rp->rx_head_desc = &rp->rx_ring[entry];
2085f2148a47SJeff Kirsher 	}
2086f2148a47SJeff Kirsher 
2087f2148a47SJeff Kirsher 	/* Refill the Rx ring buffers. */
2088f2148a47SJeff Kirsher 	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
2089f2148a47SJeff Kirsher 		struct sk_buff *skb;
2090f2148a47SJeff Kirsher 		entry = rp->dirty_rx % RX_RING_SIZE;
2091f2148a47SJeff Kirsher 		if (rp->rx_skbuff[entry] == NULL) {
2092f2148a47SJeff Kirsher 			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
2093f2148a47SJeff Kirsher 			rp->rx_skbuff[entry] = skb;
2094f2148a47SJeff Kirsher 			if (skb == NULL)
2095f2148a47SJeff Kirsher 				break;	/* Better luck next round. */
2096f2148a47SJeff Kirsher 			rp->rx_skbuff_dma[entry] =
2097f7630d18SAlexey Charkov 				dma_map_single(hwdev, skb->data,
2098f2148a47SJeff Kirsher 					       rp->rx_buf_sz,
20994087c4dcSAlexey Charkov 					       DMA_FROM_DEVICE);
2100f7630d18SAlexey Charkov 			if (dma_mapping_error(hwdev,
2101f7630d18SAlexey Charkov 					      rp->rx_skbuff_dma[entry])) {
21029b4fe5fbSNeil Horman 				dev_kfree_skb(skb);
21039b4fe5fbSNeil Horman 				rp->rx_skbuff_dma[entry] = 0;
21049b4fe5fbSNeil Horman 				break;
21059b4fe5fbSNeil Horman 			}
2106f2148a47SJeff Kirsher 			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
2107e45af497Sfrançois romieu 			dma_wmb();
2108f2148a47SJeff Kirsher 		}
2109f2148a47SJeff Kirsher 		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2110f2148a47SJeff Kirsher 	}
2111f2148a47SJeff Kirsher 
2112f2148a47SJeff Kirsher 	return count;
2113f2148a47SJeff Kirsher }
2114f2148a47SJeff Kirsher 
2115f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) {
2116f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2117f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2118f2148a47SJeff Kirsher 	int entry = rp->dirty_tx % TX_RING_SIZE;
2119f2148a47SJeff Kirsher 	u32 intr_status;
2120f2148a47SJeff Kirsher 
2121f2148a47SJeff Kirsher 	/*
2122f2148a47SJeff Kirsher 	 * If new errors occurred, we need to sort them out before doing Tx.
2123f2148a47SJeff Kirsher 	 * In that case the ISR will be back here RSN anyway.
2124f2148a47SJeff Kirsher 	 */
2125a20a28bcSFrancois Romieu 	intr_status = rhine_get_events(rp);
2126f2148a47SJeff Kirsher 
2127f2148a47SJeff Kirsher 	if ((intr_status & IntrTxErrSummary) == 0) {
2128f2148a47SJeff Kirsher 
2129f2148a47SJeff Kirsher 		/* We know better than the chip where it should continue. */
2130f2148a47SJeff Kirsher 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2131f2148a47SJeff Kirsher 		       ioaddr + TxRingPtr);
2132f2148a47SJeff Kirsher 
2133f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2134f2148a47SJeff Kirsher 		       ioaddr + ChipCmd);
2135f2148a47SJeff Kirsher 
2136f2148a47SJeff Kirsher 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2137f2148a47SJeff Kirsher 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2138f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2139f2148a47SJeff Kirsher 
2140f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2141f2148a47SJeff Kirsher 		       ioaddr + ChipCmd1);
2142f2148a47SJeff Kirsher 		IOSYNC;
2143f2148a47SJeff Kirsher 	}
2144f2148a47SJeff Kirsher 	else {
2145f2148a47SJeff Kirsher 		/* This should never happen */
2146fc3e0f8aSFrancois Romieu 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2147fc3e0f8aSFrancois Romieu 			   intr_status);
2148f2148a47SJeff Kirsher 	}
2149f2148a47SJeff Kirsher 
2150f2148a47SJeff Kirsher }
2151f2148a47SJeff Kirsher 
21527ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work)
2153f2148a47SJeff Kirsher {
21547ab87ff4SFrancois Romieu 	struct rhine_private *rp =
21557ab87ff4SFrancois Romieu 		container_of(work, struct rhine_private, slow_event_task);
21567ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
21577ab87ff4SFrancois Romieu 	u32 intr_status;
2158f2148a47SJeff Kirsher 
21597ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
21607ab87ff4SFrancois Romieu 
21617ab87ff4SFrancois Romieu 	if (!rp->task_enable)
21627ab87ff4SFrancois Romieu 		goto out_unlock;
21637ab87ff4SFrancois Romieu 
21647ab87ff4SFrancois Romieu 	intr_status = rhine_get_events(rp);
21657ab87ff4SFrancois Romieu 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2166f2148a47SJeff Kirsher 
2167f2148a47SJeff Kirsher 	if (intr_status & IntrLinkChange)
2168f2148a47SJeff Kirsher 		rhine_check_media(dev, 0);
2169f2148a47SJeff Kirsher 
2170fc3e0f8aSFrancois Romieu 	if (intr_status & IntrPCIErr)
2171fc3e0f8aSFrancois Romieu 		netif_warn(rp, hw, dev, "PCI error\n");
2172fc3e0f8aSFrancois Romieu 
2173559bcac3SDavid S. Miller 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2174f2148a47SJeff Kirsher 
21757ab87ff4SFrancois Romieu out_unlock:
21767ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2177f2148a47SJeff Kirsher }
2178f2148a47SJeff Kirsher 
2179f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 *
2180f7b5d1b9SJamie Gloudon rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2181f2148a47SJeff Kirsher {
2182f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2183f7b5d1b9SJamie Gloudon 	unsigned int start;
2184f2148a47SJeff Kirsher 
21857ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
21867ab87ff4SFrancois Romieu 	rhine_update_rx_crc_and_missed_errord(rp);
21877ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2188f2148a47SJeff Kirsher 
2189f7b5d1b9SJamie Gloudon 	netdev_stats_to_stats64(stats, &dev->stats);
2190f7b5d1b9SJamie Gloudon 
2191f7b5d1b9SJamie Gloudon 	do {
219257a7744eSEric W. Biederman 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2193f7b5d1b9SJamie Gloudon 		stats->rx_packets = rp->rx_stats.packets;
2194f7b5d1b9SJamie Gloudon 		stats->rx_bytes = rp->rx_stats.bytes;
219557a7744eSEric W. Biederman 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2196f7b5d1b9SJamie Gloudon 
2197f7b5d1b9SJamie Gloudon 	do {
219857a7744eSEric W. Biederman 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2199f7b5d1b9SJamie Gloudon 		stats->tx_packets = rp->tx_stats.packets;
2200f7b5d1b9SJamie Gloudon 		stats->tx_bytes = rp->tx_stats.bytes;
220157a7744eSEric W. Biederman 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2202f7b5d1b9SJamie Gloudon 
2203f7b5d1b9SJamie Gloudon 	return stats;
2204f2148a47SJeff Kirsher }
2205f2148a47SJeff Kirsher 
2206f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev)
2207f2148a47SJeff Kirsher {
2208f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2209f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2210f2148a47SJeff Kirsher 	u32 mc_filter[2];	/* Multicast hash filter */
2211f2148a47SJeff Kirsher 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2212f2148a47SJeff Kirsher 	struct netdev_hw_addr *ha;
2213f2148a47SJeff Kirsher 
2214f2148a47SJeff Kirsher 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2215f2148a47SJeff Kirsher 		rx_mode = 0x1C;
2216f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2217f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2218f2148a47SJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2219f2148a47SJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
2220f2148a47SJeff Kirsher 		/* Too many to match, or accept all multicasts. */
2221f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2222f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2223ca8b6e04SAlexey Charkov 	} else if (rp->quirks & rqMgmt) {
2224f2148a47SJeff Kirsher 		int i = 0;
2225f2148a47SJeff Kirsher 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2226f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2227f2148a47SJeff Kirsher 			if (i == MCAM_SIZE)
2228f2148a47SJeff Kirsher 				break;
2229f2148a47SJeff Kirsher 			rhine_set_cam(ioaddr, i, ha->addr);
2230f2148a47SJeff Kirsher 			mCAMmask |= 1 << i;
2231f2148a47SJeff Kirsher 			i++;
2232f2148a47SJeff Kirsher 		}
2233f2148a47SJeff Kirsher 		rhine_set_cam_mask(ioaddr, mCAMmask);
2234f2148a47SJeff Kirsher 	} else {
2235f2148a47SJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
2236f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2237f2148a47SJeff Kirsher 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2238f2148a47SJeff Kirsher 
2239f2148a47SJeff Kirsher 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2240f2148a47SJeff Kirsher 		}
2241f2148a47SJeff Kirsher 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2242f2148a47SJeff Kirsher 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2243f2148a47SJeff Kirsher 	}
2244f2148a47SJeff Kirsher 	/* enable/disable VLAN receive filtering */
2245ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt) {
2246f2148a47SJeff Kirsher 		if (dev->flags & IFF_PROMISC)
2247f2148a47SJeff Kirsher 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2248f2148a47SJeff Kirsher 		else
2249f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2250f2148a47SJeff Kirsher 	}
2251f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2252f2148a47SJeff Kirsher }
2253f2148a47SJeff Kirsher 
2254f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2255f2148a47SJeff Kirsher {
2256f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
2257f2148a47SJeff Kirsher 
225823020ab3SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
225923020ab3SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2260f7630d18SAlexey Charkov 	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2261f2148a47SJeff Kirsher }
2262f2148a47SJeff Kirsher 
2263f2148a47SJeff Kirsher static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2264f2148a47SJeff Kirsher {
2265f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2266f2148a47SJeff Kirsher 	int rc;
2267f2148a47SJeff Kirsher 
22687ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2269f2148a47SJeff Kirsher 	rc = mii_ethtool_gset(&rp->mii_if, cmd);
22707ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2271f2148a47SJeff Kirsher 
2272f2148a47SJeff Kirsher 	return rc;
2273f2148a47SJeff Kirsher }
2274f2148a47SJeff Kirsher 
2275f2148a47SJeff Kirsher static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2276f2148a47SJeff Kirsher {
2277f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2278f2148a47SJeff Kirsher 	int rc;
2279f2148a47SJeff Kirsher 
22807ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2281f2148a47SJeff Kirsher 	rc = mii_ethtool_sset(&rp->mii_if, cmd);
2282f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
22837ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2284f2148a47SJeff Kirsher 
2285f2148a47SJeff Kirsher 	return rc;
2286f2148a47SJeff Kirsher }
2287f2148a47SJeff Kirsher 
2288f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev)
2289f2148a47SJeff Kirsher {
2290f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2291f2148a47SJeff Kirsher 
2292f2148a47SJeff Kirsher 	return mii_nway_restart(&rp->mii_if);
2293f2148a47SJeff Kirsher }
2294f2148a47SJeff Kirsher 
2295f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev)
2296f2148a47SJeff Kirsher {
2297f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2298f2148a47SJeff Kirsher 
2299f2148a47SJeff Kirsher 	return mii_link_ok(&rp->mii_if);
2300f2148a47SJeff Kirsher }
2301f2148a47SJeff Kirsher 
2302f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev)
2303f2148a47SJeff Kirsher {
2304fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2305fc3e0f8aSFrancois Romieu 
2306fc3e0f8aSFrancois Romieu 	return rp->msg_enable;
2307f2148a47SJeff Kirsher }
2308f2148a47SJeff Kirsher 
2309f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value)
2310f2148a47SJeff Kirsher {
2311fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2312fc3e0f8aSFrancois Romieu 
2313fc3e0f8aSFrancois Romieu 	rp->msg_enable = value;
2314f2148a47SJeff Kirsher }
2315f2148a47SJeff Kirsher 
2316f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2317f2148a47SJeff Kirsher {
2318f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2319f2148a47SJeff Kirsher 
2320f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2321f2148a47SJeff Kirsher 		return;
2322f2148a47SJeff Kirsher 
2323f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2324f2148a47SJeff Kirsher 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2325f2148a47SJeff Kirsher 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2326f2148a47SJeff Kirsher 	wol->wolopts = rp->wolopts;
2327f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2328f2148a47SJeff Kirsher }
2329f2148a47SJeff Kirsher 
2330f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2331f2148a47SJeff Kirsher {
2332f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2333f2148a47SJeff Kirsher 	u32 support = WAKE_PHY | WAKE_MAGIC |
2334f2148a47SJeff Kirsher 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2335f2148a47SJeff Kirsher 
2336f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2337f2148a47SJeff Kirsher 		return -EINVAL;
2338f2148a47SJeff Kirsher 
2339f2148a47SJeff Kirsher 	if (wol->wolopts & ~support)
2340f2148a47SJeff Kirsher 		return -EINVAL;
2341f2148a47SJeff Kirsher 
2342f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2343f2148a47SJeff Kirsher 	rp->wolopts = wol->wolopts;
2344f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2345f2148a47SJeff Kirsher 
2346f2148a47SJeff Kirsher 	return 0;
2347f2148a47SJeff Kirsher }
2348f2148a47SJeff Kirsher 
2349f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = {
2350f2148a47SJeff Kirsher 	.get_drvinfo		= netdev_get_drvinfo,
2351f2148a47SJeff Kirsher 	.get_settings		= netdev_get_settings,
2352f2148a47SJeff Kirsher 	.set_settings		= netdev_set_settings,
2353f2148a47SJeff Kirsher 	.nway_reset		= netdev_nway_reset,
2354f2148a47SJeff Kirsher 	.get_link		= netdev_get_link,
2355f2148a47SJeff Kirsher 	.get_msglevel		= netdev_get_msglevel,
2356f2148a47SJeff Kirsher 	.set_msglevel		= netdev_set_msglevel,
2357f2148a47SJeff Kirsher 	.get_wol		= rhine_get_wol,
2358f2148a47SJeff Kirsher 	.set_wol		= rhine_set_wol,
2359f2148a47SJeff Kirsher };
2360f2148a47SJeff Kirsher 
2361f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2362f2148a47SJeff Kirsher {
2363f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2364f2148a47SJeff Kirsher 	int rc;
2365f2148a47SJeff Kirsher 
2366f2148a47SJeff Kirsher 	if (!netif_running(dev))
2367f2148a47SJeff Kirsher 		return -EINVAL;
2368f2148a47SJeff Kirsher 
23697ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2370f2148a47SJeff Kirsher 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2371f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
23727ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2373f2148a47SJeff Kirsher 
2374f2148a47SJeff Kirsher 	return rc;
2375f2148a47SJeff Kirsher }
2376f2148a47SJeff Kirsher 
2377f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev)
2378f2148a47SJeff Kirsher {
2379f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2380f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2381f2148a47SJeff Kirsher 
23827ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
2383f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2384f2148a47SJeff Kirsher 	netif_stop_queue(dev);
2385f2148a47SJeff Kirsher 
2386fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2387f2148a47SJeff Kirsher 		  ioread16(ioaddr + ChipCmd));
2388f2148a47SJeff Kirsher 
2389f2148a47SJeff Kirsher 	/* Switch to loopback mode to avoid hardware races. */
2390f2148a47SJeff Kirsher 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2391f2148a47SJeff Kirsher 
23927ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2393f2148a47SJeff Kirsher 
2394f2148a47SJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
2395f2148a47SJeff Kirsher 	iowrite16(CmdStop, ioaddr + ChipCmd);
2396f2148a47SJeff Kirsher 
2397f7630d18SAlexey Charkov 	free_irq(rp->irq, dev);
2398f2148a47SJeff Kirsher 	free_rbufs(dev);
2399f2148a47SJeff Kirsher 	free_tbufs(dev);
2400f2148a47SJeff Kirsher 	free_ring(dev);
2401f2148a47SJeff Kirsher 
2402f2148a47SJeff Kirsher 	return 0;
2403f2148a47SJeff Kirsher }
2404f2148a47SJeff Kirsher 
2405f2148a47SJeff Kirsher 
24062d283862SAlexey Charkov static void rhine_remove_one_pci(struct pci_dev *pdev)
2407f2148a47SJeff Kirsher {
2408f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2409f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2410f2148a47SJeff Kirsher 
2411f2148a47SJeff Kirsher 	unregister_netdev(dev);
2412f2148a47SJeff Kirsher 
2413f2148a47SJeff Kirsher 	pci_iounmap(pdev, rp->base);
2414f2148a47SJeff Kirsher 	pci_release_regions(pdev);
2415f2148a47SJeff Kirsher 
2416f2148a47SJeff Kirsher 	free_netdev(dev);
2417f2148a47SJeff Kirsher 	pci_disable_device(pdev);
2418f2148a47SJeff Kirsher }
2419f2148a47SJeff Kirsher 
24202d283862SAlexey Charkov static int rhine_remove_one_platform(struct platform_device *pdev)
24212d283862SAlexey Charkov {
24222d283862SAlexey Charkov 	struct net_device *dev = platform_get_drvdata(pdev);
24232d283862SAlexey Charkov 	struct rhine_private *rp = netdev_priv(dev);
24242d283862SAlexey Charkov 
24252d283862SAlexey Charkov 	unregister_netdev(dev);
24262d283862SAlexey Charkov 
24272d283862SAlexey Charkov 	iounmap(rp->base);
24282d283862SAlexey Charkov 
24292d283862SAlexey Charkov 	free_netdev(dev);
24302d283862SAlexey Charkov 
24312d283862SAlexey Charkov 	return 0;
24322d283862SAlexey Charkov }
24332d283862SAlexey Charkov 
24342d283862SAlexey Charkov static void rhine_shutdown_pci(struct pci_dev *pdev)
2435f2148a47SJeff Kirsher {
2436f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2437f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2438f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2439f2148a47SJeff Kirsher 
2440f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2441f2148a47SJeff Kirsher 		return; /* Nothing to do for non-WOL adapters */
2442f2148a47SJeff Kirsher 
2443f2148a47SJeff Kirsher 	rhine_power_init(dev);
2444f2148a47SJeff Kirsher 
2445f2148a47SJeff Kirsher 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2446f2148a47SJeff Kirsher 	if (rp->quirks & rq6patterns)
2447f2148a47SJeff Kirsher 		iowrite8(0x04, ioaddr + WOLcgClr);
2448f2148a47SJeff Kirsher 
24497ab87ff4SFrancois Romieu 	spin_lock(&rp->lock);
24507ab87ff4SFrancois Romieu 
2451f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_MAGIC) {
2452f2148a47SJeff Kirsher 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2453f2148a47SJeff Kirsher 		/*
2454f2148a47SJeff Kirsher 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2455f2148a47SJeff Kirsher 		 * not cooperate otherwise.
2456f2148a47SJeff Kirsher 		 */
2457f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2458f2148a47SJeff Kirsher 	}
2459f2148a47SJeff Kirsher 
2460f2148a47SJeff Kirsher 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2461f2148a47SJeff Kirsher 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2462f2148a47SJeff Kirsher 
2463f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_PHY)
2464f2148a47SJeff Kirsher 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2465f2148a47SJeff Kirsher 
2466f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_UCAST)
2467f2148a47SJeff Kirsher 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2468f2148a47SJeff Kirsher 
2469f2148a47SJeff Kirsher 	if (rp->wolopts) {
2470f2148a47SJeff Kirsher 		/* Enable legacy WOL (for old motherboards) */
2471f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + PwcfgSet);
2472f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2473f2148a47SJeff Kirsher 	}
2474f2148a47SJeff Kirsher 
24757ab87ff4SFrancois Romieu 	spin_unlock(&rp->lock);
24767ab87ff4SFrancois Romieu 
2477e92b9b3bSFrancois Romieu 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2478f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2479f2148a47SJeff Kirsher 
2480e92b9b3bSFrancois Romieu 		pci_wake_from_d3(pdev, true);
2481e92b9b3bSFrancois Romieu 		pci_set_power_state(pdev, PCI_D3hot);
2482e92b9b3bSFrancois Romieu 	}
2483f2148a47SJeff Kirsher }
2484f2148a47SJeff Kirsher 
2485e92b9b3bSFrancois Romieu #ifdef CONFIG_PM_SLEEP
2486e92b9b3bSFrancois Romieu static int rhine_suspend(struct device *device)
2487f2148a47SJeff Kirsher {
2488f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2489f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2490f2148a47SJeff Kirsher 
2491f2148a47SJeff Kirsher 	if (!netif_running(dev))
2492f2148a47SJeff Kirsher 		return 0;
2493f2148a47SJeff Kirsher 
24947ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
24957ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2496f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2497f2148a47SJeff Kirsher 
2498f2148a47SJeff Kirsher 	netif_device_detach(dev);
2499f2148a47SJeff Kirsher 
2500f7630d18SAlexey Charkov 	if (dev_is_pci(device))
25012d283862SAlexey Charkov 		rhine_shutdown_pci(to_pci_dev(device));
2502f2148a47SJeff Kirsher 
2503f2148a47SJeff Kirsher 	return 0;
2504f2148a47SJeff Kirsher }
2505f2148a47SJeff Kirsher 
2506e92b9b3bSFrancois Romieu static int rhine_resume(struct device *device)
2507f2148a47SJeff Kirsher {
2508f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2509f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2510f2148a47SJeff Kirsher 
2511f2148a47SJeff Kirsher 	if (!netif_running(dev))
2512f2148a47SJeff Kirsher 		return 0;
2513f2148a47SJeff Kirsher 
2514f2148a47SJeff Kirsher 	enable_mmio(rp->pioaddr, rp->quirks);
2515f2148a47SJeff Kirsher 	rhine_power_init(dev);
2516f2148a47SJeff Kirsher 	free_tbufs(dev);
2517f2148a47SJeff Kirsher 	free_rbufs(dev);
2518f2148a47SJeff Kirsher 	alloc_tbufs(dev);
2519f2148a47SJeff Kirsher 	alloc_rbufs(dev);
25207ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
25217ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
2522f2148a47SJeff Kirsher 	init_registers(dev);
25237ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2524f2148a47SJeff Kirsher 
2525f2148a47SJeff Kirsher 	netif_device_attach(dev);
2526f2148a47SJeff Kirsher 
2527f2148a47SJeff Kirsher 	return 0;
2528f2148a47SJeff Kirsher }
2529e92b9b3bSFrancois Romieu 
2530e92b9b3bSFrancois Romieu static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2531e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	(&rhine_pm_ops)
2532e92b9b3bSFrancois Romieu 
2533e92b9b3bSFrancois Romieu #else
2534e92b9b3bSFrancois Romieu 
2535e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	NULL
2536e92b9b3bSFrancois Romieu 
2537e92b9b3bSFrancois Romieu #endif /* !CONFIG_PM_SLEEP */
2538f2148a47SJeff Kirsher 
25392d283862SAlexey Charkov static struct pci_driver rhine_driver_pci = {
2540f2148a47SJeff Kirsher 	.name		= DRV_NAME,
2541f2148a47SJeff Kirsher 	.id_table	= rhine_pci_tbl,
25422d283862SAlexey Charkov 	.probe		= rhine_init_one_pci,
25432d283862SAlexey Charkov 	.remove		= rhine_remove_one_pci,
25442d283862SAlexey Charkov 	.shutdown	= rhine_shutdown_pci,
2545e92b9b3bSFrancois Romieu 	.driver.pm	= RHINE_PM_OPS,
2546f2148a47SJeff Kirsher };
2547f2148a47SJeff Kirsher 
25482d283862SAlexey Charkov static struct platform_driver rhine_driver_platform = {
25492d283862SAlexey Charkov 	.probe		= rhine_init_one_platform,
25502d283862SAlexey Charkov 	.remove		= rhine_remove_one_platform,
25512d283862SAlexey Charkov 	.driver = {
25522d283862SAlexey Charkov 		.name	= DRV_NAME,
25532d283862SAlexey Charkov 		.of_match_table	= rhine_of_tbl,
25542d283862SAlexey Charkov 		.pm		= RHINE_PM_OPS,
25552d283862SAlexey Charkov 	}
25562d283862SAlexey Charkov };
25572d283862SAlexey Charkov 
255877273eaaSSachin Kamat static struct dmi_system_id rhine_dmi_table[] __initdata = {
2559f2148a47SJeff Kirsher 	{
2560f2148a47SJeff Kirsher 		.ident = "EPIA-M",
2561f2148a47SJeff Kirsher 		.matches = {
2562f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2563f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2564f2148a47SJeff Kirsher 		},
2565f2148a47SJeff Kirsher 	},
2566f2148a47SJeff Kirsher 	{
2567f2148a47SJeff Kirsher 		.ident = "KV7",
2568f2148a47SJeff Kirsher 		.matches = {
2569f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2570f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2571f2148a47SJeff Kirsher 		},
2572f2148a47SJeff Kirsher 	},
2573f2148a47SJeff Kirsher 	{ NULL }
2574f2148a47SJeff Kirsher };
2575f2148a47SJeff Kirsher 
2576f2148a47SJeff Kirsher static int __init rhine_init(void)
2577f2148a47SJeff Kirsher {
25782d283862SAlexey Charkov 	int ret_pci, ret_platform;
25792d283862SAlexey Charkov 
2580f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
2581f2148a47SJeff Kirsher #ifdef MODULE
2582f2148a47SJeff Kirsher 	pr_info("%s\n", version);
2583f2148a47SJeff Kirsher #endif
2584f2148a47SJeff Kirsher 	if (dmi_check_system(rhine_dmi_table)) {
2585f2148a47SJeff Kirsher 		/* these BIOSes fail at PXE boot if chip is in D3 */
2586eb939922SRusty Russell 		avoid_D3 = true;
2587f2148a47SJeff Kirsher 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2588f2148a47SJeff Kirsher 	}
2589f2148a47SJeff Kirsher 	else if (avoid_D3)
2590f2148a47SJeff Kirsher 		pr_info("avoid_D3 set\n");
2591f2148a47SJeff Kirsher 
25922d283862SAlexey Charkov 	ret_pci = pci_register_driver(&rhine_driver_pci);
25932d283862SAlexey Charkov 	ret_platform = platform_driver_register(&rhine_driver_platform);
25942d283862SAlexey Charkov 	if ((ret_pci < 0) && (ret_platform < 0))
25952d283862SAlexey Charkov 		return ret_pci;
25962d283862SAlexey Charkov 
25972d283862SAlexey Charkov 	return 0;
2598f2148a47SJeff Kirsher }
2599f2148a47SJeff Kirsher 
2600f2148a47SJeff Kirsher 
2601f2148a47SJeff Kirsher static void __exit rhine_cleanup(void)
2602f2148a47SJeff Kirsher {
26032d283862SAlexey Charkov 	platform_driver_unregister(&rhine_driver_platform);
26042d283862SAlexey Charkov 	pci_unregister_driver(&rhine_driver_pci);
2605f2148a47SJeff Kirsher }
2606f2148a47SJeff Kirsher 
2607f2148a47SJeff Kirsher 
2608f2148a47SJeff Kirsher module_init(rhine_init);
2609f2148a47SJeff Kirsher module_exit(rhine_cleanup);
2610