xref: /openbmc/linux/drivers/net/ethernet/via/via-rhine.c (revision 5bdc73800dad3ef5d06977a4b90304bd34353933)
1f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2f2148a47SJeff Kirsher /*
3f2148a47SJeff Kirsher 	Written 1998-2001 by Donald Becker.
4f2148a47SJeff Kirsher 
5f2148a47SJeff Kirsher 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6f2148a47SJeff Kirsher 
7f2148a47SJeff Kirsher 	This software may be used and distributed according to the terms of
8f2148a47SJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
9f2148a47SJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
10f2148a47SJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
11f2148a47SJeff Kirsher 	a complete program and may only be used when the entire operating
12f2148a47SJeff Kirsher 	system is licensed under the GPL.
13f2148a47SJeff Kirsher 
14f2148a47SJeff Kirsher 	This driver is designed for the VIA VT86C100A Rhine-I.
15f2148a47SJeff Kirsher 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16f2148a47SJeff Kirsher 	and management NIC 6105M).
17f2148a47SJeff Kirsher 
18f2148a47SJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
19f2148a47SJeff Kirsher 	Scyld Computing Corporation
20f2148a47SJeff Kirsher 	410 Severn Ave., Suite 210
21f2148a47SJeff Kirsher 	Annapolis MD 21403
22f2148a47SJeff Kirsher 
23f2148a47SJeff Kirsher 
24f2148a47SJeff Kirsher 	This driver contains some changes from the original Donald Becker
25f2148a47SJeff Kirsher 	version. He may or may not be interested in bug reports on this
26f2148a47SJeff Kirsher 	code. You can find his versions at:
27f2148a47SJeff Kirsher 	http://www.scyld.com/network/via-rhine.html
28f2148a47SJeff Kirsher 	[link no longer provides useful info -jgarzik]
29f2148a47SJeff Kirsher 
30f2148a47SJeff Kirsher */
31f2148a47SJeff Kirsher 
32f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33f2148a47SJeff Kirsher 
34f2148a47SJeff Kirsher #define DRV_NAME	"via-rhine"
35207070f5SRoger Luethi #define DRV_VERSION	"1.5.1"
36f2148a47SJeff Kirsher #define DRV_RELDATE	"2010-10-09"
37f2148a47SJeff Kirsher 
38eb939922SRusty Russell #include <linux/types.h>
39f2148a47SJeff Kirsher 
40f2148a47SJeff Kirsher /* A few user-configurable values.
41f2148a47SJeff Kirsher    These may be modified when a driver module is loaded. */
42fc3e0f8aSFrancois Romieu static int debug = 0;
43fc3e0f8aSFrancois Romieu #define RHINE_MSG_DEFAULT \
44fc3e0f8aSFrancois Romieu         (0x0000)
45f2148a47SJeff Kirsher 
46f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47f2148a47SJeff Kirsher    Setting to > 1518 effectively disables this feature. */
48f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49f2148a47SJeff Kirsher 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50f2148a47SJeff Kirsher 	defined(__sh__) || defined(__mips__)
51f2148a47SJeff Kirsher static int rx_copybreak = 1518;
52f2148a47SJeff Kirsher #else
53f2148a47SJeff Kirsher static int rx_copybreak;
54f2148a47SJeff Kirsher #endif
55f2148a47SJeff Kirsher 
56f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of
57f2148a47SJeff Kirsher    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58eb939922SRusty Russell static bool avoid_D3;
59f2148a47SJeff Kirsher 
60f2148a47SJeff Kirsher /*
61f2148a47SJeff Kirsher  * In case you are looking for 'options[]' or 'full_duplex[]', they
62f2148a47SJeff Kirsher  * are gone. Use ethtool(8) instead.
63f2148a47SJeff Kirsher  */
64f2148a47SJeff Kirsher 
65f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66f2148a47SJeff Kirsher    The Rhine has a 64 element 8390-like hash table. */
67f2148a47SJeff Kirsher static const int multicast_filter_limit = 32;
68f2148a47SJeff Kirsher 
69f2148a47SJeff Kirsher 
70f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */
71f2148a47SJeff Kirsher 
72f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
73f2148a47SJeff Kirsher    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74f2148a47SJeff Kirsher    Making the Tx ring too large decreases the effectiveness of channel
75f2148a47SJeff Kirsher    bonding and packet priority.
76f2148a47SJeff Kirsher    There are no ill effects from too-large receive rings. */
77f2148a47SJeff Kirsher #define TX_RING_SIZE	16
78f2148a47SJeff Kirsher #define TX_QUEUE_LEN	10	/* Limit ring entries actually used. */
79f2148a47SJeff Kirsher #define RX_RING_SIZE	64
80f2148a47SJeff Kirsher 
81f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */
82f2148a47SJeff Kirsher 
83f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
84f2148a47SJeff Kirsher #define TX_TIMEOUT	(2*HZ)
85f2148a47SJeff Kirsher 
86f2148a47SJeff Kirsher #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
87f2148a47SJeff Kirsher 
88f2148a47SJeff Kirsher #include <linux/module.h>
89f2148a47SJeff Kirsher #include <linux/moduleparam.h>
90f2148a47SJeff Kirsher #include <linux/kernel.h>
91f2148a47SJeff Kirsher #include <linux/string.h>
92f2148a47SJeff Kirsher #include <linux/timer.h>
93f2148a47SJeff Kirsher #include <linux/errno.h>
94f2148a47SJeff Kirsher #include <linux/ioport.h>
95f2148a47SJeff Kirsher #include <linux/interrupt.h>
96f2148a47SJeff Kirsher #include <linux/pci.h>
972d283862SAlexey Charkov #include <linux/of_address.h>
982d283862SAlexey Charkov #include <linux/of_device.h>
992d283862SAlexey Charkov #include <linux/of_irq.h>
1002d283862SAlexey Charkov #include <linux/platform_device.h>
101f2148a47SJeff Kirsher #include <linux/dma-mapping.h>
102f2148a47SJeff Kirsher #include <linux/netdevice.h>
103f2148a47SJeff Kirsher #include <linux/etherdevice.h>
104f2148a47SJeff Kirsher #include <linux/skbuff.h>
105f2148a47SJeff Kirsher #include <linux/init.h>
106f2148a47SJeff Kirsher #include <linux/delay.h>
107f2148a47SJeff Kirsher #include <linux/mii.h>
108f2148a47SJeff Kirsher #include <linux/ethtool.h>
109f2148a47SJeff Kirsher #include <linux/crc32.h>
110f2148a47SJeff Kirsher #include <linux/if_vlan.h>
111f2148a47SJeff Kirsher #include <linux/bitops.h>
112f2148a47SJeff Kirsher #include <linux/workqueue.h>
113f2148a47SJeff Kirsher #include <asm/processor.h>	/* Processor type for cache alignment. */
114f2148a47SJeff Kirsher #include <asm/io.h>
115f2148a47SJeff Kirsher #include <asm/irq.h>
116f2148a47SJeff Kirsher #include <asm/uaccess.h>
117f2148a47SJeff Kirsher #include <linux/dmi.h>
118f2148a47SJeff Kirsher 
119f2148a47SJeff Kirsher /* These identify the driver base version and may not be removed. */
12076e239e1SBill Pemberton static const char version[] =
121f2148a47SJeff Kirsher 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
122f2148a47SJeff Kirsher 
123f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
124f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
125f2148a47SJeff Kirsher MODULE_LICENSE("GPL");
126f2148a47SJeff Kirsher 
127f2148a47SJeff Kirsher module_param(debug, int, 0);
128f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0);
129f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0);
130fc3e0f8aSFrancois Romieu MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
131f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
132f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
133f2148a47SJeff Kirsher 
134f2148a47SJeff Kirsher #define MCAM_SIZE	32
135f2148a47SJeff Kirsher #define VCAM_SIZE	32
136f2148a47SJeff Kirsher 
137f2148a47SJeff Kirsher /*
138f2148a47SJeff Kirsher 		Theory of Operation
139f2148a47SJeff Kirsher 
140f2148a47SJeff Kirsher I. Board Compatibility
141f2148a47SJeff Kirsher 
142f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
143f2148a47SJeff Kirsher controller.
144f2148a47SJeff Kirsher 
145f2148a47SJeff Kirsher II. Board-specific settings
146f2148a47SJeff Kirsher 
147f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot.
148f2148a47SJeff Kirsher 
149f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at
150f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are
151f2148a47SJeff Kirsher correct.
152f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM
153f2148a47SJeff Kirsher must be configured to enable memory ops.
154f2148a47SJeff Kirsher 
155f2148a47SJeff Kirsher III. Driver operation
156f2148a47SJeff Kirsher 
157f2148a47SJeff Kirsher IIIa. Ring buffers
158f2148a47SJeff Kirsher 
159f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists
160f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
161f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
162f2148a47SJeff Kirsher 
163f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure
164f2148a47SJeff Kirsher 
165f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme.
166f2148a47SJeff Kirsher 
167f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so
168f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers.
169f2148a47SJeff Kirsher 
170f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
171f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data
172f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
173f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
174f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
175f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated
176f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx().
177f2148a47SJeff Kirsher 
178f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
179f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
180f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines
181f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of
182f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never
183f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using
184f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is
185f2148a47SJeff Kirsher most useful with small frames.
186f2148a47SJeff Kirsher 
187f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit
188f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't
189f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers
190f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header.
191f2148a47SJeff Kirsher 
192f2148a47SJeff Kirsher IIId. Synchronization
193f2148a47SJeff Kirsher 
194f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One
195f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
196f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
197f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software.
198f2148a47SJeff Kirsher 
199f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the
200f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
201f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by
202f2148a47SJeff Kirsher calling netif_stop_queue.
203f2148a47SJeff Kirsher 
204f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
205f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as
206f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in
207f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped.
208f2148a47SJeff Kirsher 
209f2148a47SJeff Kirsher IV. Notes
210f2148a47SJeff Kirsher 
211f2148a47SJeff Kirsher IVb. References
212f2148a47SJeff Kirsher 
213f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/
214f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html
215f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html
216f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
217f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
218f2148a47SJeff Kirsher 
219f2148a47SJeff Kirsher 
220f2148a47SJeff Kirsher IVc. Errata
221f2148a47SJeff Kirsher 
222f2148a47SJeff Kirsher The VT86C100A manual is not reliable information.
223f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting
224f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit
225f2148a47SJeff Kirsher and unaligned IP headers on receive.
226f2148a47SJeff Kirsher The chip does not pad to minimum transmit length.
227f2148a47SJeff Kirsher 
228f2148a47SJeff Kirsher */
229f2148a47SJeff Kirsher 
230f2148a47SJeff Kirsher 
231f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all
232f2148a47SJeff Kirsher    of the drivers, and will likely be provided by some future kernel.
233f2148a47SJeff Kirsher    Note the matching code -- the first table entry matchs all 56** cards but
234f2148a47SJeff Kirsher    second only the 1234 card.
235f2148a47SJeff Kirsher */
236f2148a47SJeff Kirsher 
237f2148a47SJeff Kirsher enum rhine_revs {
238f2148a47SJeff Kirsher 	VT86C100A	= 0x00,
239f2148a47SJeff Kirsher 	VTunknown0	= 0x20,
240f2148a47SJeff Kirsher 	VT6102		= 0x40,
241f2148a47SJeff Kirsher 	VT8231		= 0x50,	/* Integrated MAC */
242f2148a47SJeff Kirsher 	VT8233		= 0x60,	/* Integrated MAC */
243f2148a47SJeff Kirsher 	VT8235		= 0x74,	/* Integrated MAC */
244f2148a47SJeff Kirsher 	VT8237		= 0x78,	/* Integrated MAC */
245f2148a47SJeff Kirsher 	VTunknown1	= 0x7C,
246f2148a47SJeff Kirsher 	VT6105		= 0x80,
247f2148a47SJeff Kirsher 	VT6105_B0	= 0x83,
248f2148a47SJeff Kirsher 	VT6105L		= 0x8A,
249f2148a47SJeff Kirsher 	VT6107		= 0x8C,
250f2148a47SJeff Kirsher 	VTunknown2	= 0x8E,
251f2148a47SJeff Kirsher 	VT6105M		= 0x90,	/* Management adapter */
252f2148a47SJeff Kirsher };
253f2148a47SJeff Kirsher 
254f2148a47SJeff Kirsher enum rhine_quirks {
255f2148a47SJeff Kirsher 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
256f2148a47SJeff Kirsher 	rqForceReset	= 0x0002,
257f2148a47SJeff Kirsher 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
258f2148a47SJeff Kirsher 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
259f2148a47SJeff Kirsher 	rqRhineI	= 0x0100,	/* See comment below */
260ca8b6e04SAlexey Charkov 	rqIntPHY	= 0x0200,	/* Integrated PHY */
261ca8b6e04SAlexey Charkov 	rqMgmt		= 0x0400,	/* Management adapter */
2625b579e21SAlexey Charkov 	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
2635b579e21SAlexey Charkov 					 * switched from PIO mode to MMIO
2645b579e21SAlexey Charkov 					 * (only applies to PCI)
2655b579e21SAlexey Charkov 					 */
266f2148a47SJeff Kirsher };
267f2148a47SJeff Kirsher /*
268f2148a47SJeff Kirsher  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
269f2148a47SJeff Kirsher  * MMIO as well as for the collision counter and the Tx FIFO underflow
270f2148a47SJeff Kirsher  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
271f2148a47SJeff Kirsher  */
272f2148a47SJeff Kirsher 
273f2148a47SJeff Kirsher /* Beware of PCI posted writes */
274f2148a47SJeff Kirsher #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
275f2148a47SJeff Kirsher 
2769baa3c34SBenoit Taine static const struct pci_device_id rhine_pci_tbl[] = {
277f2148a47SJeff Kirsher 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
278f2148a47SJeff Kirsher 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
279f2148a47SJeff Kirsher 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
280f2148a47SJeff Kirsher 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
281f2148a47SJeff Kirsher 	{ }	/* terminate list */
282f2148a47SJeff Kirsher };
283f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
284f2148a47SJeff Kirsher 
2852d283862SAlexey Charkov /* OpenFirmware identifiers for platform-bus devices
286ca8b6e04SAlexey Charkov  * The .data field is currently only used to store quirks
2872d283862SAlexey Charkov  */
288ca8b6e04SAlexey Charkov static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
2892d283862SAlexey Charkov static struct of_device_id rhine_of_tbl[] = {
290ca8b6e04SAlexey Charkov 	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
2912d283862SAlexey Charkov 	{ }	/* terminate list */
2922d283862SAlexey Charkov };
2932d283862SAlexey Charkov MODULE_DEVICE_TABLE(of, rhine_of_tbl);
294f2148a47SJeff Kirsher 
295f2148a47SJeff Kirsher /* Offsets to the device registers. */
296f2148a47SJeff Kirsher enum register_offsets {
297f2148a47SJeff Kirsher 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
298f2148a47SJeff Kirsher 	ChipCmd1=0x09, TQWake=0x0A,
299f2148a47SJeff Kirsher 	IntrStatus=0x0C, IntrEnable=0x0E,
300f2148a47SJeff Kirsher 	MulticastFilter0=0x10, MulticastFilter1=0x14,
301f2148a47SJeff Kirsher 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
302f2148a47SJeff Kirsher 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
303f2148a47SJeff Kirsher 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
304f2148a47SJeff Kirsher 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
305f2148a47SJeff Kirsher 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
306f2148a47SJeff Kirsher 	StickyHW=0x83, IntrStatus2=0x84,
307f2148a47SJeff Kirsher 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
308f2148a47SJeff Kirsher 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
309f2148a47SJeff Kirsher 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
310f2148a47SJeff Kirsher 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
311f2148a47SJeff Kirsher };
312f2148a47SJeff Kirsher 
313f2148a47SJeff Kirsher /* Bits in ConfigD */
314f2148a47SJeff Kirsher enum backoff_bits {
315f2148a47SJeff Kirsher 	BackOptional=0x01, BackModify=0x02,
316f2148a47SJeff Kirsher 	BackCaptureEffect=0x04, BackRandom=0x08
317f2148a47SJeff Kirsher };
318f2148a47SJeff Kirsher 
319f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */
320f2148a47SJeff Kirsher enum tcr_bits {
321f2148a47SJeff Kirsher 	TCR_PQEN=0x01,
322f2148a47SJeff Kirsher 	TCR_LB0=0x02,		/* loopback[0] */
323f2148a47SJeff Kirsher 	TCR_LB1=0x04,		/* loopback[1] */
324f2148a47SJeff Kirsher 	TCR_OFSET=0x08,
325f2148a47SJeff Kirsher 	TCR_RTGOPT=0x10,
326f2148a47SJeff Kirsher 	TCR_RTFT0=0x20,
327f2148a47SJeff Kirsher 	TCR_RTFT1=0x40,
328f2148a47SJeff Kirsher 	TCR_RTSF=0x80,
329f2148a47SJeff Kirsher };
330f2148a47SJeff Kirsher 
331f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */
332f2148a47SJeff Kirsher enum camcon_bits {
333f2148a47SJeff Kirsher 	CAMC_CAMEN=0x01,
334f2148a47SJeff Kirsher 	CAMC_VCAMSL=0x02,
335f2148a47SJeff Kirsher 	CAMC_CAMWR=0x04,
336f2148a47SJeff Kirsher 	CAMC_CAMRD=0x08,
337f2148a47SJeff Kirsher };
338f2148a47SJeff Kirsher 
339f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */
340f2148a47SJeff Kirsher enum bcr1_bits {
341f2148a47SJeff Kirsher 	BCR1_POT0=0x01,
342f2148a47SJeff Kirsher 	BCR1_POT1=0x02,
343f2148a47SJeff Kirsher 	BCR1_POT2=0x04,
344f2148a47SJeff Kirsher 	BCR1_CTFT0=0x08,
345f2148a47SJeff Kirsher 	BCR1_CTFT1=0x10,
346f2148a47SJeff Kirsher 	BCR1_CTSF=0x20,
347f2148a47SJeff Kirsher 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
348f2148a47SJeff Kirsher 	BCR1_VIDFR=0x80,	/* for VT6105 */
349f2148a47SJeff Kirsher 	BCR1_MED0=0x40,		/* for VT6102 */
350f2148a47SJeff Kirsher 	BCR1_MED1=0x80,		/* for VT6102 */
351f2148a47SJeff Kirsher };
352f2148a47SJeff Kirsher 
353f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */
354f2148a47SJeff Kirsher static const int mmio_verify_registers[] = {
355f2148a47SJeff Kirsher 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
356f2148a47SJeff Kirsher 	0
357f2148a47SJeff Kirsher };
358f2148a47SJeff Kirsher 
359f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */
360f2148a47SJeff Kirsher enum intr_status_bits {
3617ab87ff4SFrancois Romieu 	IntrRxDone	= 0x0001,
3627ab87ff4SFrancois Romieu 	IntrTxDone	= 0x0002,
3637ab87ff4SFrancois Romieu 	IntrRxErr	= 0x0004,
3647ab87ff4SFrancois Romieu 	IntrTxError	= 0x0008,
3657ab87ff4SFrancois Romieu 	IntrRxEmpty	= 0x0020,
366f2148a47SJeff Kirsher 	IntrPCIErr	= 0x0040,
3677ab87ff4SFrancois Romieu 	IntrStatsMax	= 0x0080,
3687ab87ff4SFrancois Romieu 	IntrRxEarly	= 0x0100,
3697ab87ff4SFrancois Romieu 	IntrTxUnderrun	= 0x0210,
3707ab87ff4SFrancois Romieu 	IntrRxOverflow	= 0x0400,
3717ab87ff4SFrancois Romieu 	IntrRxDropped	= 0x0800,
3727ab87ff4SFrancois Romieu 	IntrRxNoBuf	= 0x1000,
3737ab87ff4SFrancois Romieu 	IntrTxAborted	= 0x2000,
3747ab87ff4SFrancois Romieu 	IntrLinkChange	= 0x4000,
375f2148a47SJeff Kirsher 	IntrRxWakeUp	= 0x8000,
376f2148a47SJeff Kirsher 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
3777ab87ff4SFrancois Romieu 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
3787ab87ff4SFrancois Romieu 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
3797ab87ff4SFrancois Romieu 				  IntrTxUnderrun,
380f2148a47SJeff Kirsher };
381f2148a47SJeff Kirsher 
382f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
383f2148a47SJeff Kirsher enum wol_bits {
384f2148a47SJeff Kirsher 	WOLucast	= 0x10,
385f2148a47SJeff Kirsher 	WOLmagic	= 0x20,
386f2148a47SJeff Kirsher 	WOLbmcast	= 0x30,
387f2148a47SJeff Kirsher 	WOLlnkon	= 0x40,
388f2148a47SJeff Kirsher 	WOLlnkoff	= 0x80,
389f2148a47SJeff Kirsher };
390f2148a47SJeff Kirsher 
391f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */
392f2148a47SJeff Kirsher struct rx_desc {
393f2148a47SJeff Kirsher 	__le32 rx_status;
394f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Buffer/frame length */
395f2148a47SJeff Kirsher 	__le32 addr;
396f2148a47SJeff Kirsher 	__le32 next_desc;
397f2148a47SJeff Kirsher };
398f2148a47SJeff Kirsher struct tx_desc {
399f2148a47SJeff Kirsher 	__le32 tx_status;
400f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
401f2148a47SJeff Kirsher 	__le32 addr;
402f2148a47SJeff Kirsher 	__le32 next_desc;
403f2148a47SJeff Kirsher };
404f2148a47SJeff Kirsher 
405f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
406f2148a47SJeff Kirsher #define TXDESC		0x00e08000
407f2148a47SJeff Kirsher 
408f2148a47SJeff Kirsher enum rx_status_bits {
409f2148a47SJeff Kirsher 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
410f2148a47SJeff Kirsher };
411f2148a47SJeff Kirsher 
412f2148a47SJeff Kirsher /* Bits in *_desc.*_status */
413f2148a47SJeff Kirsher enum desc_status_bits {
414f2148a47SJeff Kirsher 	DescOwn=0x80000000
415f2148a47SJeff Kirsher };
416f2148a47SJeff Kirsher 
417f2148a47SJeff Kirsher /* Bits in *_desc.*_length */
418f2148a47SJeff Kirsher enum desc_length_bits {
419f2148a47SJeff Kirsher 	DescTag=0x00010000
420f2148a47SJeff Kirsher };
421f2148a47SJeff Kirsher 
422f2148a47SJeff Kirsher /* Bits in ChipCmd. */
423f2148a47SJeff Kirsher enum chip_cmd_bits {
424f2148a47SJeff Kirsher 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
425f2148a47SJeff Kirsher 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
426f2148a47SJeff Kirsher 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
427f2148a47SJeff Kirsher 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
428f2148a47SJeff Kirsher };
429f2148a47SJeff Kirsher 
430f7b5d1b9SJamie Gloudon struct rhine_stats {
431f7b5d1b9SJamie Gloudon 	u64		packets;
432f7b5d1b9SJamie Gloudon 	u64		bytes;
433f7b5d1b9SJamie Gloudon 	struct u64_stats_sync syncp;
434f7b5d1b9SJamie Gloudon };
435f7b5d1b9SJamie Gloudon 
436f2148a47SJeff Kirsher struct rhine_private {
437f2148a47SJeff Kirsher 	/* Bit mask for configured VLAN ids */
438f2148a47SJeff Kirsher 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
439f2148a47SJeff Kirsher 
440f2148a47SJeff Kirsher 	/* Descriptor rings */
441f2148a47SJeff Kirsher 	struct rx_desc *rx_ring;
442f2148a47SJeff Kirsher 	struct tx_desc *tx_ring;
443f2148a47SJeff Kirsher 	dma_addr_t rx_ring_dma;
444f2148a47SJeff Kirsher 	dma_addr_t tx_ring_dma;
445f2148a47SJeff Kirsher 
446f2148a47SJeff Kirsher 	/* The addresses of receive-in-place skbuffs. */
447f2148a47SJeff Kirsher 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
448f2148a47SJeff Kirsher 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
449f2148a47SJeff Kirsher 
450f2148a47SJeff Kirsher 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
451f2148a47SJeff Kirsher 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
452f2148a47SJeff Kirsher 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
453f2148a47SJeff Kirsher 
454f2148a47SJeff Kirsher 	/* Tx bounce buffers (Rhine-I only) */
455f2148a47SJeff Kirsher 	unsigned char *tx_buf[TX_RING_SIZE];
456f2148a47SJeff Kirsher 	unsigned char *tx_bufs;
457f2148a47SJeff Kirsher 	dma_addr_t tx_bufs_dma;
458f2148a47SJeff Kirsher 
459f7630d18SAlexey Charkov 	int irq;
460f2148a47SJeff Kirsher 	long pioaddr;
461f2148a47SJeff Kirsher 	struct net_device *dev;
462f2148a47SJeff Kirsher 	struct napi_struct napi;
463f2148a47SJeff Kirsher 	spinlock_t lock;
4647ab87ff4SFrancois Romieu 	struct mutex task_lock;
4657ab87ff4SFrancois Romieu 	bool task_enable;
4667ab87ff4SFrancois Romieu 	struct work_struct slow_event_task;
467f2148a47SJeff Kirsher 	struct work_struct reset_task;
468f2148a47SJeff Kirsher 
469fc3e0f8aSFrancois Romieu 	u32 msg_enable;
470fc3e0f8aSFrancois Romieu 
471f2148a47SJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect. */
472f2148a47SJeff Kirsher 	u32 quirks;
473f2148a47SJeff Kirsher 	struct rx_desc *rx_head_desc;
474f2148a47SJeff Kirsher 	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
475f2148a47SJeff Kirsher 	unsigned int cur_tx, dirty_tx;
476f2148a47SJeff Kirsher 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
477f7b5d1b9SJamie Gloudon 	struct rhine_stats rx_stats;
478f7b5d1b9SJamie Gloudon 	struct rhine_stats tx_stats;
479f2148a47SJeff Kirsher 	u8 wolopts;
480f2148a47SJeff Kirsher 
481f2148a47SJeff Kirsher 	u8 tx_thresh, rx_thresh;
482f2148a47SJeff Kirsher 
483f2148a47SJeff Kirsher 	struct mii_if_info mii_if;
484f2148a47SJeff Kirsher 	void __iomem *base;
485f2148a47SJeff Kirsher };
486f2148a47SJeff Kirsher 
487f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
488f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
489f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
490f2148a47SJeff Kirsher 
491f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
492f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
493f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
494f2148a47SJeff Kirsher 
495f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
496f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
497f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
498f2148a47SJeff Kirsher 
499f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
500f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
501f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
502f2148a47SJeff Kirsher 
503f2148a47SJeff Kirsher 
504f2148a47SJeff Kirsher static int  mdio_read(struct net_device *dev, int phy_id, int location);
505f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
506f2148a47SJeff Kirsher static int  rhine_open(struct net_device *dev);
507f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work);
5087ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work);
509f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev);
510f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
511f2148a47SJeff Kirsher 				  struct net_device *dev);
512f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
513f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev);
514f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit);
515f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev);
516f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
517f7b5d1b9SJamie Gloudon 	       struct rtnl_link_stats64 *stats);
518f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
519f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops;
520f2148a47SJeff Kirsher static int  rhine_close(struct net_device *dev);
52180d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev,
52280d5c368SPatrick McHardy 				 __be16 proto, u16 vid);
52380d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev,
52480d5c368SPatrick McHardy 				  __be16 proto, u16 vid);
5257ab87ff4SFrancois Romieu static void rhine_restart_tx(struct net_device *dev);
526f2148a47SJeff Kirsher 
5273f8c91a7SAndreas Mohr static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
528a384a33bSFrancois Romieu {
529a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
530a384a33bSFrancois Romieu 	int i;
531a384a33bSFrancois Romieu 
532a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
5333f8c91a7SAndreas Mohr 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
5343f8c91a7SAndreas Mohr 
5353f8c91a7SAndreas Mohr 		if (low ^ has_mask_bits)
536a384a33bSFrancois Romieu 			break;
537a384a33bSFrancois Romieu 		udelay(10);
538a384a33bSFrancois Romieu 	}
539a384a33bSFrancois Romieu 	if (i > 64) {
540fc3e0f8aSFrancois Romieu 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
5413f8c91a7SAndreas Mohr 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
542a384a33bSFrancois Romieu 	}
543a384a33bSFrancois Romieu }
544a384a33bSFrancois Romieu 
545a384a33bSFrancois Romieu static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
546a384a33bSFrancois Romieu {
5473f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, false);
548a384a33bSFrancois Romieu }
549a384a33bSFrancois Romieu 
550a384a33bSFrancois Romieu static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
551a384a33bSFrancois Romieu {
5523f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, true);
553a384a33bSFrancois Romieu }
554f2148a47SJeff Kirsher 
555a20a28bcSFrancois Romieu static u32 rhine_get_events(struct rhine_private *rp)
556f2148a47SJeff Kirsher {
557f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
558f2148a47SJeff Kirsher 	u32 intr_status;
559f2148a47SJeff Kirsher 
560f2148a47SJeff Kirsher 	intr_status = ioread16(ioaddr + IntrStatus);
561f2148a47SJeff Kirsher 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
562f2148a47SJeff Kirsher 	if (rp->quirks & rqStatusWBRace)
563f2148a47SJeff Kirsher 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
564f2148a47SJeff Kirsher 	return intr_status;
565f2148a47SJeff Kirsher }
566f2148a47SJeff Kirsher 
567a20a28bcSFrancois Romieu static void rhine_ack_events(struct rhine_private *rp, u32 mask)
568a20a28bcSFrancois Romieu {
569a20a28bcSFrancois Romieu 	void __iomem *ioaddr = rp->base;
570a20a28bcSFrancois Romieu 
571a20a28bcSFrancois Romieu 	if (rp->quirks & rqStatusWBRace)
572a20a28bcSFrancois Romieu 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
573a20a28bcSFrancois Romieu 	iowrite16(mask, ioaddr + IntrStatus);
5747ab87ff4SFrancois Romieu 	mmiowb();
575a20a28bcSFrancois Romieu }
576a20a28bcSFrancois Romieu 
577f2148a47SJeff Kirsher /*
578f2148a47SJeff Kirsher  * Get power related registers into sane state.
579f2148a47SJeff Kirsher  * Notify user about past WOL event.
580f2148a47SJeff Kirsher  */
581f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev)
582f2148a47SJeff Kirsher {
583f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
584f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
585f2148a47SJeff Kirsher 	u16 wolstat;
586f2148a47SJeff Kirsher 
587f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL) {
588f2148a47SJeff Kirsher 		/* Make sure chip is in power state D0 */
589f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
590f2148a47SJeff Kirsher 
591f2148a47SJeff Kirsher 		/* Disable "force PME-enable" */
592f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + WOLcgClr);
593f2148a47SJeff Kirsher 
594f2148a47SJeff Kirsher 		/* Clear power-event config bits (WOL) */
595f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + WOLcrClr);
596f2148a47SJeff Kirsher 		/* More recent cards can manage two additional patterns */
597f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
598f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + WOLcrClr1);
599f2148a47SJeff Kirsher 
600f2148a47SJeff Kirsher 		/* Save power-event status bits */
601f2148a47SJeff Kirsher 		wolstat = ioread8(ioaddr + PwrcsrSet);
602f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
603f2148a47SJeff Kirsher 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
604f2148a47SJeff Kirsher 
605f2148a47SJeff Kirsher 		/* Clear power-event status bits */
606f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + PwrcsrClr);
607f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
608f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + PwrcsrClr1);
609f2148a47SJeff Kirsher 
610f2148a47SJeff Kirsher 		if (wolstat) {
611f2148a47SJeff Kirsher 			char *reason;
612f2148a47SJeff Kirsher 			switch (wolstat) {
613f2148a47SJeff Kirsher 			case WOLmagic:
614f2148a47SJeff Kirsher 				reason = "Magic packet";
615f2148a47SJeff Kirsher 				break;
616f2148a47SJeff Kirsher 			case WOLlnkon:
617f2148a47SJeff Kirsher 				reason = "Link went up";
618f2148a47SJeff Kirsher 				break;
619f2148a47SJeff Kirsher 			case WOLlnkoff:
620f2148a47SJeff Kirsher 				reason = "Link went down";
621f2148a47SJeff Kirsher 				break;
622f2148a47SJeff Kirsher 			case WOLucast:
623f2148a47SJeff Kirsher 				reason = "Unicast packet";
624f2148a47SJeff Kirsher 				break;
625f2148a47SJeff Kirsher 			case WOLbmcast:
626f2148a47SJeff Kirsher 				reason = "Multicast/broadcast packet";
627f2148a47SJeff Kirsher 				break;
628f2148a47SJeff Kirsher 			default:
629f2148a47SJeff Kirsher 				reason = "Unknown";
630f2148a47SJeff Kirsher 			}
631f2148a47SJeff Kirsher 			netdev_info(dev, "Woke system up. Reason: %s\n",
632f2148a47SJeff Kirsher 				    reason);
633f2148a47SJeff Kirsher 		}
634f2148a47SJeff Kirsher 	}
635f2148a47SJeff Kirsher }
636f2148a47SJeff Kirsher 
637f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev)
638f2148a47SJeff Kirsher {
639f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
640f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
641fc3e0f8aSFrancois Romieu 	u8 cmd1;
642f2148a47SJeff Kirsher 
643f2148a47SJeff Kirsher 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
644f2148a47SJeff Kirsher 	IOSYNC;
645f2148a47SJeff Kirsher 
646f2148a47SJeff Kirsher 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
647f2148a47SJeff Kirsher 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
648f2148a47SJeff Kirsher 
649f2148a47SJeff Kirsher 		/* Force reset */
650f2148a47SJeff Kirsher 		if (rp->quirks & rqForceReset)
651f2148a47SJeff Kirsher 			iowrite8(0x40, ioaddr + MiscCmd);
652f2148a47SJeff Kirsher 
653f2148a47SJeff Kirsher 		/* Reset can take somewhat longer (rare) */
654a384a33bSFrancois Romieu 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
655f2148a47SJeff Kirsher 	}
656f2148a47SJeff Kirsher 
657fc3e0f8aSFrancois Romieu 	cmd1 = ioread8(ioaddr + ChipCmd1);
658fc3e0f8aSFrancois Romieu 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
659f2148a47SJeff Kirsher 		   "failed" : "succeeded");
660f2148a47SJeff Kirsher }
661f2148a47SJeff Kirsher 
662f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks)
663f2148a47SJeff Kirsher {
664f2148a47SJeff Kirsher 	int n;
6655b579e21SAlexey Charkov 
6665b579e21SAlexey Charkov 	if (quirks & rqNeedEnMMIO) {
667f2148a47SJeff Kirsher 		if (quirks & rqRhineI) {
6685b579e21SAlexey Charkov 			/* More recent docs say that this bit is reserved */
669f2148a47SJeff Kirsher 			n = inb(pioaddr + ConfigA) | 0x20;
670f2148a47SJeff Kirsher 			outb(n, pioaddr + ConfigA);
671f2148a47SJeff Kirsher 		} else {
672f2148a47SJeff Kirsher 			n = inb(pioaddr + ConfigD) | 0x80;
673f2148a47SJeff Kirsher 			outb(n, pioaddr + ConfigD);
674f2148a47SJeff Kirsher 		}
675f2148a47SJeff Kirsher 	}
6765b579e21SAlexey Charkov }
6775b579e21SAlexey Charkov 
6785b579e21SAlexey Charkov static inline int verify_mmio(struct device *hwdev,
6795b579e21SAlexey Charkov 			      long pioaddr,
6805b579e21SAlexey Charkov 			      void __iomem *ioaddr,
6815b579e21SAlexey Charkov 			      u32 quirks)
6825b579e21SAlexey Charkov {
6835b579e21SAlexey Charkov 	if (quirks & rqNeedEnMMIO) {
6845b579e21SAlexey Charkov 		int i = 0;
6855b579e21SAlexey Charkov 
6865b579e21SAlexey Charkov 		/* Check that selected MMIO registers match the PIO ones */
6875b579e21SAlexey Charkov 		while (mmio_verify_registers[i]) {
6885b579e21SAlexey Charkov 			int reg = mmio_verify_registers[i++];
6895b579e21SAlexey Charkov 			unsigned char a = inb(pioaddr+reg);
6905b579e21SAlexey Charkov 			unsigned char b = readb(ioaddr+reg);
6915b579e21SAlexey Charkov 
6925b579e21SAlexey Charkov 			if (a != b) {
6935b579e21SAlexey Charkov 				dev_err(hwdev,
6945b579e21SAlexey Charkov 					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
6955b579e21SAlexey Charkov 					reg, a, b);
6965b579e21SAlexey Charkov 				return -EIO;
6975b579e21SAlexey Charkov 			}
6985b579e21SAlexey Charkov 		}
6995b579e21SAlexey Charkov 	}
7005b579e21SAlexey Charkov 	return 0;
7015b579e21SAlexey Charkov }
702f2148a47SJeff Kirsher 
703f2148a47SJeff Kirsher /*
704f2148a47SJeff Kirsher  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
705f2148a47SJeff Kirsher  * (plus 0x6C for Rhine-I/II)
706f2148a47SJeff Kirsher  */
70776e239e1SBill Pemberton static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
708f2148a47SJeff Kirsher {
709f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
710f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
711a384a33bSFrancois Romieu 	int i;
712f2148a47SJeff Kirsher 
713f2148a47SJeff Kirsher 	outb(0x20, pioaddr + MACRegEEcsr);
714a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
715a384a33bSFrancois Romieu 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
716a384a33bSFrancois Romieu 			break;
717a384a33bSFrancois Romieu 	}
718a384a33bSFrancois Romieu 	if (i > 512)
719a384a33bSFrancois Romieu 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
720f2148a47SJeff Kirsher 
721f2148a47SJeff Kirsher 	/*
722f2148a47SJeff Kirsher 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
723f2148a47SJeff Kirsher 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
724f2148a47SJeff Kirsher 	 * it is not known if that still works with the "win98-reboot" problem.
725f2148a47SJeff Kirsher 	 */
726f2148a47SJeff Kirsher 	enable_mmio(pioaddr, rp->quirks);
727f2148a47SJeff Kirsher 
728f2148a47SJeff Kirsher 	/* Turn off EEPROM-controlled wake-up (magic packet) */
729f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL)
730f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
731f2148a47SJeff Kirsher 
732f2148a47SJeff Kirsher }
733f2148a47SJeff Kirsher 
734f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
735f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev)
736f2148a47SJeff Kirsher {
73705d334ecSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
738f7630d18SAlexey Charkov 	const int irq = rp->irq;
73905d334ecSFrancois Romieu 
74005d334ecSFrancois Romieu 	disable_irq(irq);
74105d334ecSFrancois Romieu 	rhine_interrupt(irq, dev);
74205d334ecSFrancois Romieu 	enable_irq(irq);
743f2148a47SJeff Kirsher }
744f2148a47SJeff Kirsher #endif
745f2148a47SJeff Kirsher 
746269f3114SFrancois Romieu static void rhine_kick_tx_threshold(struct rhine_private *rp)
747269f3114SFrancois Romieu {
748269f3114SFrancois Romieu 	if (rp->tx_thresh < 0xe0) {
749269f3114SFrancois Romieu 		void __iomem *ioaddr = rp->base;
750269f3114SFrancois Romieu 
751269f3114SFrancois Romieu 		rp->tx_thresh += 0x20;
752269f3114SFrancois Romieu 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
753269f3114SFrancois Romieu 	}
754269f3114SFrancois Romieu }
755269f3114SFrancois Romieu 
7567ab87ff4SFrancois Romieu static void rhine_tx_err(struct rhine_private *rp, u32 status)
7577ab87ff4SFrancois Romieu {
7587ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
7597ab87ff4SFrancois Romieu 
7607ab87ff4SFrancois Romieu 	if (status & IntrTxAborted) {
761fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev,
762fc3e0f8aSFrancois Romieu 			   "Abort %08x, frame dropped\n", status);
7637ab87ff4SFrancois Romieu 	}
7647ab87ff4SFrancois Romieu 
7657ab87ff4SFrancois Romieu 	if (status & IntrTxUnderrun) {
7667ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
767fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
768fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7697ab87ff4SFrancois Romieu 	}
7707ab87ff4SFrancois Romieu 
771fc3e0f8aSFrancois Romieu 	if (status & IntrTxDescRace)
772fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
7737ab87ff4SFrancois Romieu 
7747ab87ff4SFrancois Romieu 	if ((status & IntrTxError) &&
7757ab87ff4SFrancois Romieu 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
7767ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
777fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Unspecified error. "
778fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7797ab87ff4SFrancois Romieu 	}
7807ab87ff4SFrancois Romieu 
7817ab87ff4SFrancois Romieu 	rhine_restart_tx(dev);
7827ab87ff4SFrancois Romieu }
7837ab87ff4SFrancois Romieu 
7847ab87ff4SFrancois Romieu static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
7857ab87ff4SFrancois Romieu {
7867ab87ff4SFrancois Romieu 	void __iomem *ioaddr = rp->base;
7877ab87ff4SFrancois Romieu 	struct net_device_stats *stats = &rp->dev->stats;
7887ab87ff4SFrancois Romieu 
7897ab87ff4SFrancois Romieu 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
7907ab87ff4SFrancois Romieu 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
7917ab87ff4SFrancois Romieu 
7927ab87ff4SFrancois Romieu 	/*
7937ab87ff4SFrancois Romieu 	 * Clears the "tally counters" for CRC errors and missed frames(?).
7947ab87ff4SFrancois Romieu 	 * It has been reported that some chips need a write of 0 to clear
7957ab87ff4SFrancois Romieu 	 * these, for others the counters are set to 1 when written to and
7967ab87ff4SFrancois Romieu 	 * instead cleared when read. So we clear them both ways ...
7977ab87ff4SFrancois Romieu 	 */
7987ab87ff4SFrancois Romieu 	iowrite32(0, ioaddr + RxMissed);
7997ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxCRCErrs);
8007ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxMissed);
8017ab87ff4SFrancois Romieu }
8027ab87ff4SFrancois Romieu 
8037ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
8047ab87ff4SFrancois Romieu 				 IntrRxErr | \
8057ab87ff4SFrancois Romieu 				 IntrRxEmpty | \
8067ab87ff4SFrancois Romieu 				 IntrRxOverflow	| \
8077ab87ff4SFrancois Romieu 				 IntrRxDropped | \
8087ab87ff4SFrancois Romieu 				 IntrRxNoBuf | \
8097ab87ff4SFrancois Romieu 				 IntrRxWakeUp)
8107ab87ff4SFrancois Romieu 
8117ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
8127ab87ff4SFrancois Romieu 				 IntrTxAborted | \
8137ab87ff4SFrancois Romieu 				 IntrTxUnderrun | \
8147ab87ff4SFrancois Romieu 				 IntrTxDescRace)
8157ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
8167ab87ff4SFrancois Romieu 
8177ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
8187ab87ff4SFrancois Romieu 				 RHINE_EVENT_NAPI_TX | \
8197ab87ff4SFrancois Romieu 				 IntrStatsMax)
8207ab87ff4SFrancois Romieu #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
8217ab87ff4SFrancois Romieu #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
8227ab87ff4SFrancois Romieu 
823f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget)
824f2148a47SJeff Kirsher {
825f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
826f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
827f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
8287ab87ff4SFrancois Romieu 	u16 enable_mask = RHINE_EVENT & 0xffff;
8297ab87ff4SFrancois Romieu 	int work_done = 0;
8307ab87ff4SFrancois Romieu 	u32 status;
831f2148a47SJeff Kirsher 
8327ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
8337ab87ff4SFrancois Romieu 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
8347ab87ff4SFrancois Romieu 
8357ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_RX)
8367ab87ff4SFrancois Romieu 		work_done += rhine_rx(dev, budget);
8377ab87ff4SFrancois Romieu 
8387ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_TX) {
8397ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
8407ab87ff4SFrancois Romieu 			/* Avoid scavenging before Tx engine turned off */
841a384a33bSFrancois Romieu 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
842fc3e0f8aSFrancois Romieu 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
843fc3e0f8aSFrancois Romieu 				netif_warn(rp, tx_err, dev, "Tx still on\n");
8447ab87ff4SFrancois Romieu 		}
845fc3e0f8aSFrancois Romieu 
8467ab87ff4SFrancois Romieu 		rhine_tx(dev);
8477ab87ff4SFrancois Romieu 
8487ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR)
8497ab87ff4SFrancois Romieu 			rhine_tx_err(rp, status);
8507ab87ff4SFrancois Romieu 	}
8517ab87ff4SFrancois Romieu 
8527ab87ff4SFrancois Romieu 	if (status & IntrStatsMax) {
8537ab87ff4SFrancois Romieu 		spin_lock(&rp->lock);
8547ab87ff4SFrancois Romieu 		rhine_update_rx_crc_and_missed_errord(rp);
8557ab87ff4SFrancois Romieu 		spin_unlock(&rp->lock);
8567ab87ff4SFrancois Romieu 	}
8577ab87ff4SFrancois Romieu 
8587ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_SLOW) {
8597ab87ff4SFrancois Romieu 		enable_mask &= ~RHINE_EVENT_SLOW;
8607ab87ff4SFrancois Romieu 		schedule_work(&rp->slow_event_task);
8617ab87ff4SFrancois Romieu 	}
862f2148a47SJeff Kirsher 
863f2148a47SJeff Kirsher 	if (work_done < budget) {
864f2148a47SJeff Kirsher 		napi_complete(napi);
8657ab87ff4SFrancois Romieu 		iowrite16(enable_mask, ioaddr + IntrEnable);
8667ab87ff4SFrancois Romieu 		mmiowb();
867f2148a47SJeff Kirsher 	}
868f2148a47SJeff Kirsher 	return work_done;
869f2148a47SJeff Kirsher }
870f2148a47SJeff Kirsher 
87176e239e1SBill Pemberton static void rhine_hw_init(struct net_device *dev, long pioaddr)
872f2148a47SJeff Kirsher {
873f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
874f2148a47SJeff Kirsher 
875f2148a47SJeff Kirsher 	/* Reset the chip to erase previous misconfiguration. */
876f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
877f2148a47SJeff Kirsher 
878f2148a47SJeff Kirsher 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
879f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
880f2148a47SJeff Kirsher 		msleep(5);
881f2148a47SJeff Kirsher 
882f2148a47SJeff Kirsher 	/* Reload EEPROM controlled bytes cleared by soft reset */
8832d283862SAlexey Charkov 	if (dev_is_pci(dev->dev.parent))
884f2148a47SJeff Kirsher 		rhine_reload_eeprom(pioaddr, dev);
885f2148a47SJeff Kirsher }
886f2148a47SJeff Kirsher 
887f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = {
888f2148a47SJeff Kirsher 	.ndo_open		 = rhine_open,
889f2148a47SJeff Kirsher 	.ndo_stop		 = rhine_close,
890f2148a47SJeff Kirsher 	.ndo_start_xmit		 = rhine_start_tx,
891f7b5d1b9SJamie Gloudon 	.ndo_get_stats64	 = rhine_get_stats64,
892afc4b13dSJiri Pirko 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
893f2148a47SJeff Kirsher 	.ndo_change_mtu		 = eth_change_mtu,
894f2148a47SJeff Kirsher 	.ndo_validate_addr	 = eth_validate_addr,
895f2148a47SJeff Kirsher 	.ndo_set_mac_address 	 = eth_mac_addr,
896f2148a47SJeff Kirsher 	.ndo_do_ioctl		 = netdev_ioctl,
897f2148a47SJeff Kirsher 	.ndo_tx_timeout 	 = rhine_tx_timeout,
898f2148a47SJeff Kirsher 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
899f2148a47SJeff Kirsher 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
900f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
901f2148a47SJeff Kirsher 	.ndo_poll_controller	 = rhine_poll,
902f2148a47SJeff Kirsher #endif
903f2148a47SJeff Kirsher };
904f2148a47SJeff Kirsher 
905ca8b6e04SAlexey Charkov static int rhine_init_one_common(struct device *hwdev, u32 quirks,
9062d283862SAlexey Charkov 				 long pioaddr, void __iomem *ioaddr, int irq)
907f2148a47SJeff Kirsher {
908f2148a47SJeff Kirsher 	struct net_device *dev;
909f2148a47SJeff Kirsher 	struct rhine_private *rp;
9102d283862SAlexey Charkov 	int i, rc, phy_id;
911f2148a47SJeff Kirsher 	const char *name;
912f2148a47SJeff Kirsher 
913f2148a47SJeff Kirsher 	/* this should always be supported */
914f7630d18SAlexey Charkov 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
915f2148a47SJeff Kirsher 	if (rc) {
916f7630d18SAlexey Charkov 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
9172d283862SAlexey Charkov 		goto err_out;
918f2148a47SJeff Kirsher 	}
919f2148a47SJeff Kirsher 
920f2148a47SJeff Kirsher 	dev = alloc_etherdev(sizeof(struct rhine_private));
921f2148a47SJeff Kirsher 	if (!dev) {
922f2148a47SJeff Kirsher 		rc = -ENOMEM;
9232d283862SAlexey Charkov 		goto err_out;
924f2148a47SJeff Kirsher 	}
925f7630d18SAlexey Charkov 	SET_NETDEV_DEV(dev, hwdev);
926f2148a47SJeff Kirsher 
927f2148a47SJeff Kirsher 	rp = netdev_priv(dev);
928f2148a47SJeff Kirsher 	rp->dev = dev;
929ca8b6e04SAlexey Charkov 	rp->quirks = quirks;
930f2148a47SJeff Kirsher 	rp->pioaddr = pioaddr;
9312d283862SAlexey Charkov 	rp->base = ioaddr;
9322d283862SAlexey Charkov 	rp->irq = irq;
933fc3e0f8aSFrancois Romieu 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
934f2148a47SJeff Kirsher 
935ca8b6e04SAlexey Charkov 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
936f2148a47SJeff Kirsher 
937827da44cSJohn Stultz 	u64_stats_init(&rp->tx_stats.syncp);
938827da44cSJohn Stultz 	u64_stats_init(&rp->rx_stats.syncp);
939827da44cSJohn Stultz 
940f2148a47SJeff Kirsher 	/* Get chip registers into a sane state */
941f2148a47SJeff Kirsher 	rhine_power_init(dev);
942f2148a47SJeff Kirsher 	rhine_hw_init(dev, pioaddr);
943f2148a47SJeff Kirsher 
944f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
945f2148a47SJeff Kirsher 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
946f2148a47SJeff Kirsher 
947f2148a47SJeff Kirsher 	if (!is_valid_ether_addr(dev->dev_addr)) {
948f2148a47SJeff Kirsher 		/* Report it and use a random ethernet address instead */
949f2148a47SJeff Kirsher 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
950f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
951f2148a47SJeff Kirsher 		netdev_info(dev, "Using random MAC address: %pM\n",
952f2148a47SJeff Kirsher 			    dev->dev_addr);
953f2148a47SJeff Kirsher 	}
954f2148a47SJeff Kirsher 
955f2148a47SJeff Kirsher 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
956f2148a47SJeff Kirsher 	if (!phy_id)
957f2148a47SJeff Kirsher 		phy_id = ioread8(ioaddr + 0x6C);
958f2148a47SJeff Kirsher 
959f2148a47SJeff Kirsher 	spin_lock_init(&rp->lock);
9607ab87ff4SFrancois Romieu 	mutex_init(&rp->task_lock);
961f2148a47SJeff Kirsher 	INIT_WORK(&rp->reset_task, rhine_reset_task);
9627ab87ff4SFrancois Romieu 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
963f2148a47SJeff Kirsher 
964f2148a47SJeff Kirsher 	rp->mii_if.dev = dev;
965f2148a47SJeff Kirsher 	rp->mii_if.mdio_read = mdio_read;
966f2148a47SJeff Kirsher 	rp->mii_if.mdio_write = mdio_write;
967f2148a47SJeff Kirsher 	rp->mii_if.phy_id_mask = 0x1f;
968f2148a47SJeff Kirsher 	rp->mii_if.reg_num_mask = 0x1f;
969f2148a47SJeff Kirsher 
970f2148a47SJeff Kirsher 	/* The chip-specific entries in the device structure. */
971f2148a47SJeff Kirsher 	dev->netdev_ops = &rhine_netdev_ops;
972e76070f2Swangweidong 	dev->ethtool_ops = &netdev_ethtool_ops;
973f2148a47SJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
974f2148a47SJeff Kirsher 
975f2148a47SJeff Kirsher 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
976f2148a47SJeff Kirsher 
977f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
978f2148a47SJeff Kirsher 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
979f2148a47SJeff Kirsher 
980ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt)
981f646968fSPatrick McHardy 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
982f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_RX |
983f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_FILTER;
984f2148a47SJeff Kirsher 
985f2148a47SJeff Kirsher 	/* dev->name not defined before register_netdev()! */
986f2148a47SJeff Kirsher 	rc = register_netdev(dev);
987f2148a47SJeff Kirsher 	if (rc)
9882d283862SAlexey Charkov 		goto err_out_free_netdev;
989f2148a47SJeff Kirsher 
990ca8b6e04SAlexey Charkov 	if (rp->quirks & rqRhineI)
991ca8b6e04SAlexey Charkov 		name = "Rhine";
992ca8b6e04SAlexey Charkov 	else if (rp->quirks & rqStatusWBRace)
993ca8b6e04SAlexey Charkov 		name = "Rhine II";
994ca8b6e04SAlexey Charkov 	else if (rp->quirks & rqMgmt)
995ca8b6e04SAlexey Charkov 		name = "Rhine III (Management Adapter)";
996ca8b6e04SAlexey Charkov 	else
997ca8b6e04SAlexey Charkov 		name = "Rhine III";
998ca8b6e04SAlexey Charkov 
999f2148a47SJeff Kirsher 	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
10002d283862SAlexey Charkov 		    name, (long)ioaddr, dev->dev_addr, rp->irq);
1001f2148a47SJeff Kirsher 
1002f7630d18SAlexey Charkov 	dev_set_drvdata(hwdev, dev);
1003f2148a47SJeff Kirsher 
1004f2148a47SJeff Kirsher 	{
1005f2148a47SJeff Kirsher 		u16 mii_cmd;
1006f2148a47SJeff Kirsher 		int mii_status = mdio_read(dev, phy_id, 1);
1007f2148a47SJeff Kirsher 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1008f2148a47SJeff Kirsher 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1009f2148a47SJeff Kirsher 		if (mii_status != 0xffff && mii_status != 0x0000) {
1010f2148a47SJeff Kirsher 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1011f2148a47SJeff Kirsher 			netdev_info(dev,
1012f2148a47SJeff Kirsher 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1013f2148a47SJeff Kirsher 				    phy_id,
1014f2148a47SJeff Kirsher 				    mii_status, rp->mii_if.advertising,
1015f2148a47SJeff Kirsher 				    mdio_read(dev, phy_id, 5));
1016f2148a47SJeff Kirsher 
1017f2148a47SJeff Kirsher 			/* set IFF_RUNNING */
1018f2148a47SJeff Kirsher 			if (mii_status & BMSR_LSTATUS)
1019f2148a47SJeff Kirsher 				netif_carrier_on(dev);
1020f2148a47SJeff Kirsher 			else
1021f2148a47SJeff Kirsher 				netif_carrier_off(dev);
1022f2148a47SJeff Kirsher 
1023f2148a47SJeff Kirsher 		}
1024f2148a47SJeff Kirsher 	}
1025f2148a47SJeff Kirsher 	rp->mii_if.phy_id = phy_id;
1026fc3e0f8aSFrancois Romieu 	if (avoid_D3)
1027fc3e0f8aSFrancois Romieu 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1028f2148a47SJeff Kirsher 
1029f2148a47SJeff Kirsher 	return 0;
1030f2148a47SJeff Kirsher 
10312d283862SAlexey Charkov err_out_free_netdev:
10322d283862SAlexey Charkov 	free_netdev(dev);
10332d283862SAlexey Charkov err_out:
10342d283862SAlexey Charkov 	return rc;
10352d283862SAlexey Charkov }
10362d283862SAlexey Charkov 
10372d283862SAlexey Charkov static int rhine_init_one_pci(struct pci_dev *pdev,
10382d283862SAlexey Charkov 			      const struct pci_device_id *ent)
10392d283862SAlexey Charkov {
10402d283862SAlexey Charkov 	struct device *hwdev = &pdev->dev;
10415b579e21SAlexey Charkov 	int rc;
10422d283862SAlexey Charkov 	long pioaddr, memaddr;
10432d283862SAlexey Charkov 	void __iomem *ioaddr;
10442d283862SAlexey Charkov 	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
10455b579e21SAlexey Charkov 
10465b579e21SAlexey Charkov /* This driver was written to use PCI memory space. Some early versions
10475b579e21SAlexey Charkov  * of the Rhine may only work correctly with I/O space accesses.
10485b579e21SAlexey Charkov  * TODO: determine for which revisions this is true and assign the flag
10495b579e21SAlexey Charkov  *	 in code as opposed to this Kconfig option (???)
10505b579e21SAlexey Charkov  */
10515b579e21SAlexey Charkov #ifdef CONFIG_VIA_RHINE_MMIO
10525b579e21SAlexey Charkov 	u32 quirks = rqNeedEnMMIO;
10532d283862SAlexey Charkov #else
10545b579e21SAlexey Charkov 	u32 quirks = 0;
10552d283862SAlexey Charkov #endif
10562d283862SAlexey Charkov 
10572d283862SAlexey Charkov /* when built into the kernel, we only print version if device is found */
10582d283862SAlexey Charkov #ifndef MODULE
10592d283862SAlexey Charkov 	pr_info_once("%s\n", version);
10602d283862SAlexey Charkov #endif
10612d283862SAlexey Charkov 
10622d283862SAlexey Charkov 	rc = pci_enable_device(pdev);
10632d283862SAlexey Charkov 	if (rc)
10642d283862SAlexey Charkov 		goto err_out;
10652d283862SAlexey Charkov 
1066ca8b6e04SAlexey Charkov 	if (pdev->revision < VTunknown0) {
10675b579e21SAlexey Charkov 		quirks |= rqRhineI;
1068ca8b6e04SAlexey Charkov 	} else if (pdev->revision >= VT6102) {
10695b579e21SAlexey Charkov 		quirks |= rqWOL | rqForceReset;
1070ca8b6e04SAlexey Charkov 		if (pdev->revision < VT6105) {
1071ca8b6e04SAlexey Charkov 			quirks |= rqStatusWBRace;
1072ca8b6e04SAlexey Charkov 		} else {
1073ca8b6e04SAlexey Charkov 			quirks |= rqIntPHY;
1074ca8b6e04SAlexey Charkov 			if (pdev->revision >= VT6105_B0)
1075ca8b6e04SAlexey Charkov 				quirks |= rq6patterns;
1076ca8b6e04SAlexey Charkov 			if (pdev->revision >= VT6105M)
1077ca8b6e04SAlexey Charkov 				quirks |= rqMgmt;
1078ca8b6e04SAlexey Charkov 		}
1079ca8b6e04SAlexey Charkov 	}
1080ca8b6e04SAlexey Charkov 
10812d283862SAlexey Charkov 	/* sanity check */
10822d283862SAlexey Charkov 	if ((pci_resource_len(pdev, 0) < io_size) ||
10832d283862SAlexey Charkov 	    (pci_resource_len(pdev, 1) < io_size)) {
10842d283862SAlexey Charkov 		rc = -EIO;
10852d283862SAlexey Charkov 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
10862d283862SAlexey Charkov 		goto err_out_pci_disable;
10872d283862SAlexey Charkov 	}
10882d283862SAlexey Charkov 
10892d283862SAlexey Charkov 	pioaddr = pci_resource_start(pdev, 0);
10902d283862SAlexey Charkov 	memaddr = pci_resource_start(pdev, 1);
10912d283862SAlexey Charkov 
10922d283862SAlexey Charkov 	pci_set_master(pdev);
10932d283862SAlexey Charkov 
10942d283862SAlexey Charkov 	rc = pci_request_regions(pdev, DRV_NAME);
10952d283862SAlexey Charkov 	if (rc)
10962d283862SAlexey Charkov 		goto err_out_pci_disable;
10972d283862SAlexey Charkov 
10985b579e21SAlexey Charkov 	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
10992d283862SAlexey Charkov 	if (!ioaddr) {
11002d283862SAlexey Charkov 		rc = -EIO;
11012d283862SAlexey Charkov 		dev_err(hwdev,
11022d283862SAlexey Charkov 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
11032d283862SAlexey Charkov 			dev_name(hwdev), io_size, memaddr);
11042d283862SAlexey Charkov 		goto err_out_free_res;
11052d283862SAlexey Charkov 	}
11062d283862SAlexey Charkov 
11072d283862SAlexey Charkov 	enable_mmio(pioaddr, quirks);
11082d283862SAlexey Charkov 
11095b579e21SAlexey Charkov 	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
11105b579e21SAlexey Charkov 	if (rc)
11112d283862SAlexey Charkov 		goto err_out_unmap;
11122d283862SAlexey Charkov 
1113ca8b6e04SAlexey Charkov 	rc = rhine_init_one_common(&pdev->dev, quirks,
11142d283862SAlexey Charkov 				   pioaddr, ioaddr, pdev->irq);
11152d283862SAlexey Charkov 	if (!rc)
11162d283862SAlexey Charkov 		return 0;
11172d283862SAlexey Charkov 
1118f2148a47SJeff Kirsher err_out_unmap:
1119f2148a47SJeff Kirsher 	pci_iounmap(pdev, ioaddr);
1120f2148a47SJeff Kirsher err_out_free_res:
1121f2148a47SJeff Kirsher 	pci_release_regions(pdev);
1122ae996154SRoger Luethi err_out_pci_disable:
1123ae996154SRoger Luethi 	pci_disable_device(pdev);
1124f2148a47SJeff Kirsher err_out:
1125f2148a47SJeff Kirsher 	return rc;
1126f2148a47SJeff Kirsher }
1127f2148a47SJeff Kirsher 
11282d283862SAlexey Charkov static int rhine_init_one_platform(struct platform_device *pdev)
11292d283862SAlexey Charkov {
11302d283862SAlexey Charkov 	const struct of_device_id *match;
1131ca8b6e04SAlexey Charkov 	const u32 *quirks;
11322d283862SAlexey Charkov 	int irq;
11332d283862SAlexey Charkov 	struct resource *res;
11342d283862SAlexey Charkov 	void __iomem *ioaddr;
11352d283862SAlexey Charkov 
11362d283862SAlexey Charkov 	match = of_match_device(rhine_of_tbl, &pdev->dev);
11372d283862SAlexey Charkov 	if (!match)
11382d283862SAlexey Charkov 		return -EINVAL;
11392d283862SAlexey Charkov 
11402d283862SAlexey Charkov 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
11412d283862SAlexey Charkov 	ioaddr = devm_ioremap_resource(&pdev->dev, res);
11422d283862SAlexey Charkov 	if (IS_ERR(ioaddr))
11432d283862SAlexey Charkov 		return PTR_ERR(ioaddr);
11442d283862SAlexey Charkov 
11452d283862SAlexey Charkov 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
11462d283862SAlexey Charkov 	if (!irq)
11472d283862SAlexey Charkov 		return -EINVAL;
11482d283862SAlexey Charkov 
1149ca8b6e04SAlexey Charkov 	quirks = match->data;
1150ca8b6e04SAlexey Charkov 	if (!quirks)
11512d283862SAlexey Charkov 		return -EINVAL;
11522d283862SAlexey Charkov 
1153ca8b6e04SAlexey Charkov 	return rhine_init_one_common(&pdev->dev, *quirks,
11542d283862SAlexey Charkov 				     (long)ioaddr, ioaddr, irq);
11552d283862SAlexey Charkov }
11562d283862SAlexey Charkov 
1157f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev)
1158f2148a47SJeff Kirsher {
1159f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1160f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1161f2148a47SJeff Kirsher 	void *ring;
1162f2148a47SJeff Kirsher 	dma_addr_t ring_dma;
1163f2148a47SJeff Kirsher 
1164f7630d18SAlexey Charkov 	ring = dma_alloc_coherent(hwdev,
1165f2148a47SJeff Kirsher 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1166f2148a47SJeff Kirsher 				  TX_RING_SIZE * sizeof(struct tx_desc),
11674087c4dcSAlexey Charkov 				  &ring_dma,
11684087c4dcSAlexey Charkov 				  GFP_ATOMIC);
1169f2148a47SJeff Kirsher 	if (!ring) {
1170f2148a47SJeff Kirsher 		netdev_err(dev, "Could not allocate DMA memory\n");
1171f2148a47SJeff Kirsher 		return -ENOMEM;
1172f2148a47SJeff Kirsher 	}
1173f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI) {
1174f7630d18SAlexey Charkov 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1175f2148a47SJeff Kirsher 						 PKT_BUF_SZ * TX_RING_SIZE,
11764087c4dcSAlexey Charkov 						 &rp->tx_bufs_dma,
11774087c4dcSAlexey Charkov 						 GFP_ATOMIC);
1178f2148a47SJeff Kirsher 		if (rp->tx_bufs == NULL) {
1179f7630d18SAlexey Charkov 			dma_free_coherent(hwdev,
1180f2148a47SJeff Kirsher 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1181f2148a47SJeff Kirsher 					  TX_RING_SIZE * sizeof(struct tx_desc),
1182f2148a47SJeff Kirsher 					  ring, ring_dma);
1183f2148a47SJeff Kirsher 			return -ENOMEM;
1184f2148a47SJeff Kirsher 		}
1185f2148a47SJeff Kirsher 	}
1186f2148a47SJeff Kirsher 
1187f2148a47SJeff Kirsher 	rp->rx_ring = ring;
1188f2148a47SJeff Kirsher 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1189f2148a47SJeff Kirsher 	rp->rx_ring_dma = ring_dma;
1190f2148a47SJeff Kirsher 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1191f2148a47SJeff Kirsher 
1192f2148a47SJeff Kirsher 	return 0;
1193f2148a47SJeff Kirsher }
1194f2148a47SJeff Kirsher 
1195f2148a47SJeff Kirsher static void free_ring(struct net_device* dev)
1196f2148a47SJeff Kirsher {
1197f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1198f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1199f2148a47SJeff Kirsher 
1200f7630d18SAlexey Charkov 	dma_free_coherent(hwdev,
1201f2148a47SJeff Kirsher 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1202f2148a47SJeff Kirsher 			  TX_RING_SIZE * sizeof(struct tx_desc),
1203f2148a47SJeff Kirsher 			  rp->rx_ring, rp->rx_ring_dma);
1204f2148a47SJeff Kirsher 	rp->tx_ring = NULL;
1205f2148a47SJeff Kirsher 
1206f2148a47SJeff Kirsher 	if (rp->tx_bufs)
1207f7630d18SAlexey Charkov 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1208f2148a47SJeff Kirsher 				  rp->tx_bufs, rp->tx_bufs_dma);
1209f2148a47SJeff Kirsher 
1210f2148a47SJeff Kirsher 	rp->tx_bufs = NULL;
1211f2148a47SJeff Kirsher 
1212f2148a47SJeff Kirsher }
1213f2148a47SJeff Kirsher 
1214f2148a47SJeff Kirsher static void alloc_rbufs(struct net_device *dev)
1215f2148a47SJeff Kirsher {
1216f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1217f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1218f2148a47SJeff Kirsher 	dma_addr_t next;
1219f2148a47SJeff Kirsher 	int i;
1220f2148a47SJeff Kirsher 
1221f2148a47SJeff Kirsher 	rp->dirty_rx = rp->cur_rx = 0;
1222f2148a47SJeff Kirsher 
1223f2148a47SJeff Kirsher 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1224f2148a47SJeff Kirsher 	rp->rx_head_desc = &rp->rx_ring[0];
1225f2148a47SJeff Kirsher 	next = rp->rx_ring_dma;
1226f2148a47SJeff Kirsher 
1227f2148a47SJeff Kirsher 	/* Init the ring entries */
1228f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1229f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1230f2148a47SJeff Kirsher 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1231f2148a47SJeff Kirsher 		next += sizeof(struct rx_desc);
1232f2148a47SJeff Kirsher 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1233f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1234f2148a47SJeff Kirsher 	}
1235f2148a47SJeff Kirsher 	/* Mark the last entry as wrapping the ring. */
1236f2148a47SJeff Kirsher 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1237f2148a47SJeff Kirsher 
1238f2148a47SJeff Kirsher 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1239f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1240f2148a47SJeff Kirsher 		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1241f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = skb;
1242f2148a47SJeff Kirsher 		if (skb == NULL)
1243f2148a47SJeff Kirsher 			break;
1244f2148a47SJeff Kirsher 
1245f2148a47SJeff Kirsher 		rp->rx_skbuff_dma[i] =
1246f7630d18SAlexey Charkov 			dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
12474087c4dcSAlexey Charkov 				       DMA_FROM_DEVICE);
1248f7630d18SAlexey Charkov 		if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
12499b4fe5fbSNeil Horman 			rp->rx_skbuff_dma[i] = 0;
12509b4fe5fbSNeil Horman 			dev_kfree_skb(skb);
12519b4fe5fbSNeil Horman 			break;
12529b4fe5fbSNeil Horman 		}
1253f2148a47SJeff Kirsher 		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1254f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1255f2148a47SJeff Kirsher 	}
1256f2148a47SJeff Kirsher 	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1257f2148a47SJeff Kirsher }
1258f2148a47SJeff Kirsher 
1259f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev)
1260f2148a47SJeff Kirsher {
1261f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1262f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1263f2148a47SJeff Kirsher 	int i;
1264f2148a47SJeff Kirsher 
1265f2148a47SJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
1266f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1267f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1268f2148a47SJeff Kirsher 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1269f2148a47SJeff Kirsher 		if (rp->rx_skbuff[i]) {
1270f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1271f2148a47SJeff Kirsher 					 rp->rx_skbuff_dma[i],
12724087c4dcSAlexey Charkov 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1273f2148a47SJeff Kirsher 			dev_kfree_skb(rp->rx_skbuff[i]);
1274f2148a47SJeff Kirsher 		}
1275f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1276f2148a47SJeff Kirsher 	}
1277f2148a47SJeff Kirsher }
1278f2148a47SJeff Kirsher 
1279f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev)
1280f2148a47SJeff Kirsher {
1281f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1282f2148a47SJeff Kirsher 	dma_addr_t next;
1283f2148a47SJeff Kirsher 	int i;
1284f2148a47SJeff Kirsher 
1285f2148a47SJeff Kirsher 	rp->dirty_tx = rp->cur_tx = 0;
1286f2148a47SJeff Kirsher 	next = rp->tx_ring_dma;
1287f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1288f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1289f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1290f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1291f2148a47SJeff Kirsher 		next += sizeof(struct tx_desc);
1292f2148a47SJeff Kirsher 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1293f2148a47SJeff Kirsher 		if (rp->quirks & rqRhineI)
1294f2148a47SJeff Kirsher 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1295f2148a47SJeff Kirsher 	}
1296f2148a47SJeff Kirsher 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1297f2148a47SJeff Kirsher 
1298f2148a47SJeff Kirsher }
1299f2148a47SJeff Kirsher 
1300f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev)
1301f2148a47SJeff Kirsher {
1302f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1303f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1304f2148a47SJeff Kirsher 	int i;
1305f2148a47SJeff Kirsher 
1306f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1307f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1308f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1309f2148a47SJeff Kirsher 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1310f2148a47SJeff Kirsher 		if (rp->tx_skbuff[i]) {
1311f2148a47SJeff Kirsher 			if (rp->tx_skbuff_dma[i]) {
1312f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
1313f2148a47SJeff Kirsher 						 rp->tx_skbuff_dma[i],
1314f2148a47SJeff Kirsher 						 rp->tx_skbuff[i]->len,
13154087c4dcSAlexey Charkov 						 DMA_TO_DEVICE);
1316f2148a47SJeff Kirsher 			}
1317f2148a47SJeff Kirsher 			dev_kfree_skb(rp->tx_skbuff[i]);
1318f2148a47SJeff Kirsher 		}
1319f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1320f2148a47SJeff Kirsher 		rp->tx_buf[i] = NULL;
1321f2148a47SJeff Kirsher 	}
1322f2148a47SJeff Kirsher }
1323f2148a47SJeff Kirsher 
1324f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1325f2148a47SJeff Kirsher {
1326f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1327f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1328f2148a47SJeff Kirsher 
1329*5bdc7380SBen Hutchings 	if (!rp->mii_if.force_media)
1330fc3e0f8aSFrancois Romieu 		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1331f2148a47SJeff Kirsher 
1332f2148a47SJeff Kirsher 	if (rp->mii_if.full_duplex)
1333f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1334f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1335f2148a47SJeff Kirsher 	else
1336f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1337f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1338fc3e0f8aSFrancois Romieu 
1339fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1340f2148a47SJeff Kirsher 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1341f2148a47SJeff Kirsher }
1342f2148a47SJeff Kirsher 
1343f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */
1344f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii)
1345f2148a47SJeff Kirsher {
1346fc3e0f8aSFrancois Romieu 	struct net_device *dev = mii->dev;
1347fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
1348fc3e0f8aSFrancois Romieu 
1349f2148a47SJeff Kirsher 	if (mii->force_media) {
1350f2148a47SJeff Kirsher 		/* autoneg is off: Link is always assumed to be up */
1351fc3e0f8aSFrancois Romieu 		if (!netif_carrier_ok(dev))
1352fc3e0f8aSFrancois Romieu 			netif_carrier_on(dev);
135317958438SFrançois Cachereul 	}
135417958438SFrançois Cachereul 
1355fc3e0f8aSFrancois Romieu 	rhine_check_media(dev, 0);
1356fc3e0f8aSFrancois Romieu 
1357fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1358fc3e0f8aSFrancois Romieu 		   mii->force_media, netif_carrier_ok(dev));
1359f2148a47SJeff Kirsher }
1360f2148a47SJeff Kirsher 
1361f2148a47SJeff Kirsher /**
1362f2148a47SJeff Kirsher  * rhine_set_cam - set CAM multicast filters
1363f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1364f2148a47SJeff Kirsher  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1365f2148a47SJeff Kirsher  * @addr: multicast address (6 bytes)
1366f2148a47SJeff Kirsher  *
1367f2148a47SJeff Kirsher  * Load addresses into multicast filters.
1368f2148a47SJeff Kirsher  */
1369f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1370f2148a47SJeff Kirsher {
1371f2148a47SJeff Kirsher 	int i;
1372f2148a47SJeff Kirsher 
1373f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1374f2148a47SJeff Kirsher 	wmb();
1375f2148a47SJeff Kirsher 
1376f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1377f2148a47SJeff Kirsher 	idx &= (MCAM_SIZE - 1);
1378f2148a47SJeff Kirsher 
1379f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1380f2148a47SJeff Kirsher 
1381f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++, addr++)
1382f2148a47SJeff Kirsher 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1383f2148a47SJeff Kirsher 	udelay(10);
1384f2148a47SJeff Kirsher 	wmb();
1385f2148a47SJeff Kirsher 
1386f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1387f2148a47SJeff Kirsher 	udelay(10);
1388f2148a47SJeff Kirsher 
1389f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1390f2148a47SJeff Kirsher }
1391f2148a47SJeff Kirsher 
1392f2148a47SJeff Kirsher /**
1393f2148a47SJeff Kirsher  * rhine_set_vlan_cam - set CAM VLAN filters
1394f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1395f2148a47SJeff Kirsher  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1396f2148a47SJeff Kirsher  * @addr: VLAN ID (2 bytes)
1397f2148a47SJeff Kirsher  *
1398f2148a47SJeff Kirsher  * Load addresses into VLAN filters.
1399f2148a47SJeff Kirsher  */
1400f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1401f2148a47SJeff Kirsher {
1402f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1403f2148a47SJeff Kirsher 	wmb();
1404f2148a47SJeff Kirsher 
1405f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1406f2148a47SJeff Kirsher 	idx &= (VCAM_SIZE - 1);
1407f2148a47SJeff Kirsher 
1408f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1409f2148a47SJeff Kirsher 
1410f2148a47SJeff Kirsher 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1411f2148a47SJeff Kirsher 	udelay(10);
1412f2148a47SJeff Kirsher 	wmb();
1413f2148a47SJeff Kirsher 
1414f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1415f2148a47SJeff Kirsher 	udelay(10);
1416f2148a47SJeff Kirsher 
1417f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1418f2148a47SJeff Kirsher }
1419f2148a47SJeff Kirsher 
1420f2148a47SJeff Kirsher /**
1421f2148a47SJeff Kirsher  * rhine_set_cam_mask - set multicast CAM mask
1422f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1423f2148a47SJeff Kirsher  * @mask: multicast CAM mask
1424f2148a47SJeff Kirsher  *
1425f2148a47SJeff Kirsher  * Mask sets multicast filters active/inactive.
1426f2148a47SJeff Kirsher  */
1427f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1428f2148a47SJeff Kirsher {
1429f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1430f2148a47SJeff Kirsher 	wmb();
1431f2148a47SJeff Kirsher 
1432f2148a47SJeff Kirsher 	/* write mask */
1433f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1434f2148a47SJeff Kirsher 
1435f2148a47SJeff Kirsher 	/* disable CAMEN */
1436f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1437f2148a47SJeff Kirsher }
1438f2148a47SJeff Kirsher 
1439f2148a47SJeff Kirsher /**
1440f2148a47SJeff Kirsher  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1441f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1442f2148a47SJeff Kirsher  * @mask: VLAN CAM mask
1443f2148a47SJeff Kirsher  *
1444f2148a47SJeff Kirsher  * Mask sets VLAN filters active/inactive.
1445f2148a47SJeff Kirsher  */
1446f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1447f2148a47SJeff Kirsher {
1448f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1449f2148a47SJeff Kirsher 	wmb();
1450f2148a47SJeff Kirsher 
1451f2148a47SJeff Kirsher 	/* write mask */
1452f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1453f2148a47SJeff Kirsher 
1454f2148a47SJeff Kirsher 	/* disable CAMEN */
1455f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1456f2148a47SJeff Kirsher }
1457f2148a47SJeff Kirsher 
1458f2148a47SJeff Kirsher /**
1459f2148a47SJeff Kirsher  * rhine_init_cam_filter - initialize CAM filters
1460f2148a47SJeff Kirsher  * @dev: network device
1461f2148a47SJeff Kirsher  *
1462f2148a47SJeff Kirsher  * Initialize (disable) hardware VLAN and multicast support on this
1463f2148a47SJeff Kirsher  * Rhine.
1464f2148a47SJeff Kirsher  */
1465f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev)
1466f2148a47SJeff Kirsher {
1467f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1468f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1469f2148a47SJeff Kirsher 
1470f2148a47SJeff Kirsher 	/* Disable all CAMs */
1471f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, 0);
1472f2148a47SJeff Kirsher 	rhine_set_cam_mask(ioaddr, 0);
1473f2148a47SJeff Kirsher 
1474f2148a47SJeff Kirsher 	/* disable hardware VLAN support */
1475f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1476f2148a47SJeff Kirsher 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1477f2148a47SJeff Kirsher }
1478f2148a47SJeff Kirsher 
1479f2148a47SJeff Kirsher /**
1480f2148a47SJeff Kirsher  * rhine_update_vcam - update VLAN CAM filters
1481f2148a47SJeff Kirsher  * @rp: rhine_private data of this Rhine
1482f2148a47SJeff Kirsher  *
1483f2148a47SJeff Kirsher  * Update VLAN CAM filters to match configuration change.
1484f2148a47SJeff Kirsher  */
1485f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev)
1486f2148a47SJeff Kirsher {
1487f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1488f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1489f2148a47SJeff Kirsher 	u16 vid;
1490f2148a47SJeff Kirsher 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1491f2148a47SJeff Kirsher 	unsigned int i = 0;
1492f2148a47SJeff Kirsher 
1493f2148a47SJeff Kirsher 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1494f2148a47SJeff Kirsher 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1495f2148a47SJeff Kirsher 		vCAMmask |= 1 << i;
1496f2148a47SJeff Kirsher 		if (++i >= VCAM_SIZE)
1497f2148a47SJeff Kirsher 			break;
1498f2148a47SJeff Kirsher 	}
1499f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1500f2148a47SJeff Kirsher }
1501f2148a47SJeff Kirsher 
150280d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1503f2148a47SJeff Kirsher {
1504f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1505f2148a47SJeff Kirsher 
15067ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1507f2148a47SJeff Kirsher 	set_bit(vid, rp->active_vlans);
1508f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
15097ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
15108e586137SJiri Pirko 	return 0;
1511f2148a47SJeff Kirsher }
1512f2148a47SJeff Kirsher 
151380d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1514f2148a47SJeff Kirsher {
1515f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1516f2148a47SJeff Kirsher 
15177ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1518f2148a47SJeff Kirsher 	clear_bit(vid, rp->active_vlans);
1519f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
15207ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
15218e586137SJiri Pirko 	return 0;
1522f2148a47SJeff Kirsher }
1523f2148a47SJeff Kirsher 
1524f2148a47SJeff Kirsher static void init_registers(struct net_device *dev)
1525f2148a47SJeff Kirsher {
1526f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1527f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1528f2148a47SJeff Kirsher 	int i;
1529f2148a47SJeff Kirsher 
1530f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
1531f2148a47SJeff Kirsher 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1532f2148a47SJeff Kirsher 
1533f2148a47SJeff Kirsher 	/* Initialize other registers. */
1534f2148a47SJeff Kirsher 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1535f2148a47SJeff Kirsher 	/* Configure initial FIFO thresholds. */
1536f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + TxConfig);
1537f2148a47SJeff Kirsher 	rp->tx_thresh = 0x20;
1538f2148a47SJeff Kirsher 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1539f2148a47SJeff Kirsher 
1540f2148a47SJeff Kirsher 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1541f2148a47SJeff Kirsher 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1542f2148a47SJeff Kirsher 
1543f2148a47SJeff Kirsher 	rhine_set_rx_mode(dev);
1544f2148a47SJeff Kirsher 
1545ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt)
1546f2148a47SJeff Kirsher 		rhine_init_cam_filter(dev);
1547f2148a47SJeff Kirsher 
1548f2148a47SJeff Kirsher 	napi_enable(&rp->napi);
1549f2148a47SJeff Kirsher 
15507ab87ff4SFrancois Romieu 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1551f2148a47SJeff Kirsher 
1552f2148a47SJeff Kirsher 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1553f2148a47SJeff Kirsher 	       ioaddr + ChipCmd);
1554f2148a47SJeff Kirsher 	rhine_check_media(dev, 1);
1555f2148a47SJeff Kirsher }
1556f2148a47SJeff Kirsher 
1557f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */
1558a384a33bSFrancois Romieu static void rhine_enable_linkmon(struct rhine_private *rp)
1559f2148a47SJeff Kirsher {
1560a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1561a384a33bSFrancois Romieu 
1562f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1563f2148a47SJeff Kirsher 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1564f2148a47SJeff Kirsher 	iowrite8(0x80, ioaddr + MIICmd);
1565f2148a47SJeff Kirsher 
1566a384a33bSFrancois Romieu 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1567f2148a47SJeff Kirsher 
1568f2148a47SJeff Kirsher 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1569f2148a47SJeff Kirsher }
1570f2148a47SJeff Kirsher 
1571f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */
1572a384a33bSFrancois Romieu static void rhine_disable_linkmon(struct rhine_private *rp)
1573f2148a47SJeff Kirsher {
1574a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1575a384a33bSFrancois Romieu 
1576f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1577f2148a47SJeff Kirsher 
1578a384a33bSFrancois Romieu 	if (rp->quirks & rqRhineI) {
1579f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1580f2148a47SJeff Kirsher 
1581f2148a47SJeff Kirsher 		/* Can be called from ISR. Evil. */
1582f2148a47SJeff Kirsher 		mdelay(1);
1583f2148a47SJeff Kirsher 
1584f2148a47SJeff Kirsher 		/* 0x80 must be set immediately before turning it off */
1585f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + MIICmd);
1586f2148a47SJeff Kirsher 
1587a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1588f2148a47SJeff Kirsher 
1589f2148a47SJeff Kirsher 		/* Heh. Now clear 0x80 again. */
1590f2148a47SJeff Kirsher 		iowrite8(0, ioaddr + MIICmd);
1591f2148a47SJeff Kirsher 	}
1592f2148a47SJeff Kirsher 	else
1593a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1594f2148a47SJeff Kirsher }
1595f2148a47SJeff Kirsher 
1596f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */
1597f2148a47SJeff Kirsher 
1598f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1599f2148a47SJeff Kirsher {
1600f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1601f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1602f2148a47SJeff Kirsher 	int result;
1603f2148a47SJeff Kirsher 
1604a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1605f2148a47SJeff Kirsher 
1606f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1607f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1608f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1609f2148a47SJeff Kirsher 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1610a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1611f2148a47SJeff Kirsher 	result = ioread16(ioaddr + MIIData);
1612f2148a47SJeff Kirsher 
1613a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1614f2148a47SJeff Kirsher 	return result;
1615f2148a47SJeff Kirsher }
1616f2148a47SJeff Kirsher 
1617f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1618f2148a47SJeff Kirsher {
1619f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1620f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1621f2148a47SJeff Kirsher 
1622a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1623f2148a47SJeff Kirsher 
1624f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1625f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1626f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1627f2148a47SJeff Kirsher 	iowrite16(value, ioaddr + MIIData);
1628f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1629a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1630f2148a47SJeff Kirsher 
1631a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1632f2148a47SJeff Kirsher }
1633f2148a47SJeff Kirsher 
16347ab87ff4SFrancois Romieu static void rhine_task_disable(struct rhine_private *rp)
16357ab87ff4SFrancois Romieu {
16367ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16377ab87ff4SFrancois Romieu 	rp->task_enable = false;
16387ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
16397ab87ff4SFrancois Romieu 
16407ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->slow_event_task);
16417ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->reset_task);
16427ab87ff4SFrancois Romieu }
16437ab87ff4SFrancois Romieu 
16447ab87ff4SFrancois Romieu static void rhine_task_enable(struct rhine_private *rp)
16457ab87ff4SFrancois Romieu {
16467ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16477ab87ff4SFrancois Romieu 	rp->task_enable = true;
16487ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
16497ab87ff4SFrancois Romieu }
16507ab87ff4SFrancois Romieu 
1651f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev)
1652f2148a47SJeff Kirsher {
1653f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1654f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1655f2148a47SJeff Kirsher 	int rc;
1656f2148a47SJeff Kirsher 
1657f7630d18SAlexey Charkov 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1658f2148a47SJeff Kirsher 	if (rc)
1659f2148a47SJeff Kirsher 		return rc;
1660f2148a47SJeff Kirsher 
1661f7630d18SAlexey Charkov 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1662f2148a47SJeff Kirsher 
1663f2148a47SJeff Kirsher 	rc = alloc_ring(dev);
1664f2148a47SJeff Kirsher 	if (rc) {
1665f7630d18SAlexey Charkov 		free_irq(rp->irq, dev);
1666f2148a47SJeff Kirsher 		return rc;
1667f2148a47SJeff Kirsher 	}
1668f2148a47SJeff Kirsher 	alloc_rbufs(dev);
1669f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1670f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
16717ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
1672f2148a47SJeff Kirsher 	init_registers(dev);
1673fc3e0f8aSFrancois Romieu 
1674fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1675f2148a47SJeff Kirsher 		  __func__, ioread16(ioaddr + ChipCmd),
1676f2148a47SJeff Kirsher 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1677f2148a47SJeff Kirsher 
1678f2148a47SJeff Kirsher 	netif_start_queue(dev);
1679f2148a47SJeff Kirsher 
1680f2148a47SJeff Kirsher 	return 0;
1681f2148a47SJeff Kirsher }
1682f2148a47SJeff Kirsher 
1683f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work)
1684f2148a47SJeff Kirsher {
1685f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(work, struct rhine_private,
1686f2148a47SJeff Kirsher 						reset_task);
1687f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
1688f2148a47SJeff Kirsher 
16897ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16907ab87ff4SFrancois Romieu 
16917ab87ff4SFrancois Romieu 	if (!rp->task_enable)
16927ab87ff4SFrancois Romieu 		goto out_unlock;
1693f2148a47SJeff Kirsher 
1694f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
1695a926592fSRichard Weinberger 	netif_tx_disable(dev);
1696f2148a47SJeff Kirsher 	spin_lock_bh(&rp->lock);
1697f2148a47SJeff Kirsher 
1698f2148a47SJeff Kirsher 	/* clear all descriptors */
1699f2148a47SJeff Kirsher 	free_tbufs(dev);
1700f2148a47SJeff Kirsher 	free_rbufs(dev);
1701f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1702f2148a47SJeff Kirsher 	alloc_rbufs(dev);
1703f2148a47SJeff Kirsher 
1704f2148a47SJeff Kirsher 	/* Reinitialize the hardware. */
1705f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
1706f2148a47SJeff Kirsher 	init_registers(dev);
1707f2148a47SJeff Kirsher 
1708f2148a47SJeff Kirsher 	spin_unlock_bh(&rp->lock);
1709f2148a47SJeff Kirsher 
1710f2148a47SJeff Kirsher 	dev->trans_start = jiffies; /* prevent tx timeout */
1711f2148a47SJeff Kirsher 	dev->stats.tx_errors++;
1712f2148a47SJeff Kirsher 	netif_wake_queue(dev);
17137ab87ff4SFrancois Romieu 
17147ab87ff4SFrancois Romieu out_unlock:
17157ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
1716f2148a47SJeff Kirsher }
1717f2148a47SJeff Kirsher 
1718f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev)
1719f2148a47SJeff Kirsher {
1720f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1721f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1722f2148a47SJeff Kirsher 
1723f2148a47SJeff Kirsher 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1724f2148a47SJeff Kirsher 		    ioread16(ioaddr + IntrStatus),
1725f2148a47SJeff Kirsher 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1726f2148a47SJeff Kirsher 
1727f2148a47SJeff Kirsher 	schedule_work(&rp->reset_task);
1728f2148a47SJeff Kirsher }
1729f2148a47SJeff Kirsher 
1730f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1731f2148a47SJeff Kirsher 				  struct net_device *dev)
1732f2148a47SJeff Kirsher {
1733f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1734f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1735f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1736f2148a47SJeff Kirsher 	unsigned entry;
1737f2148a47SJeff Kirsher 
1738f2148a47SJeff Kirsher 	/* Caution: the write order is important here, set the field
1739f2148a47SJeff Kirsher 	   with the "ownership" bits last. */
1740f2148a47SJeff Kirsher 
1741f2148a47SJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
1742f2148a47SJeff Kirsher 	entry = rp->cur_tx % TX_RING_SIZE;
1743f2148a47SJeff Kirsher 
1744f2148a47SJeff Kirsher 	if (skb_padto(skb, ETH_ZLEN))
1745f2148a47SJeff Kirsher 		return NETDEV_TX_OK;
1746f2148a47SJeff Kirsher 
1747f2148a47SJeff Kirsher 	rp->tx_skbuff[entry] = skb;
1748f2148a47SJeff Kirsher 
1749f2148a47SJeff Kirsher 	if ((rp->quirks & rqRhineI) &&
1750f2148a47SJeff Kirsher 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1751f2148a47SJeff Kirsher 		/* Must use alignment buffer. */
1752f2148a47SJeff Kirsher 		if (skb->len > PKT_BUF_SZ) {
1753f2148a47SJeff Kirsher 			/* packet too long, drop it */
17544b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
1755f2148a47SJeff Kirsher 			rp->tx_skbuff[entry] = NULL;
1756f2148a47SJeff Kirsher 			dev->stats.tx_dropped++;
1757f2148a47SJeff Kirsher 			return NETDEV_TX_OK;
1758f2148a47SJeff Kirsher 		}
1759f2148a47SJeff Kirsher 
1760f2148a47SJeff Kirsher 		/* Padding is not copied and so must be redone. */
1761f2148a47SJeff Kirsher 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1762f2148a47SJeff Kirsher 		if (skb->len < ETH_ZLEN)
1763f2148a47SJeff Kirsher 			memset(rp->tx_buf[entry] + skb->len, 0,
1764f2148a47SJeff Kirsher 			       ETH_ZLEN - skb->len);
1765f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] = 0;
1766f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1767f2148a47SJeff Kirsher 						      (rp->tx_buf[entry] -
1768f2148a47SJeff Kirsher 						       rp->tx_bufs));
1769f2148a47SJeff Kirsher 	} else {
1770f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] =
1771f7630d18SAlexey Charkov 			dma_map_single(hwdev, skb->data, skb->len,
17724087c4dcSAlexey Charkov 				       DMA_TO_DEVICE);
1773f7630d18SAlexey Charkov 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
17744b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
17759b4fe5fbSNeil Horman 			rp->tx_skbuff_dma[entry] = 0;
17769b4fe5fbSNeil Horman 			dev->stats.tx_dropped++;
17779b4fe5fbSNeil Horman 			return NETDEV_TX_OK;
17789b4fe5fbSNeil Horman 		}
1779f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1780f2148a47SJeff Kirsher 	}
1781f2148a47SJeff Kirsher 
1782f2148a47SJeff Kirsher 	rp->tx_ring[entry].desc_length =
1783f2148a47SJeff Kirsher 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1784f2148a47SJeff Kirsher 
1785df8a39deSJiri Pirko 	if (unlikely(skb_vlan_tag_present(skb))) {
1786df8a39deSJiri Pirko 		u16 vid_pcp = skb_vlan_tag_get(skb);
1787207070f5SRoger Luethi 
1788207070f5SRoger Luethi 		/* drop CFI/DEI bit, register needs VID and PCP */
1789207070f5SRoger Luethi 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1790207070f5SRoger Luethi 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1791207070f5SRoger Luethi 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1792f2148a47SJeff Kirsher 		/* request tagging */
1793f2148a47SJeff Kirsher 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1794f2148a47SJeff Kirsher 	}
1795f2148a47SJeff Kirsher 	else
1796f2148a47SJeff Kirsher 		rp->tx_ring[entry].tx_status = 0;
1797f2148a47SJeff Kirsher 
1798f2148a47SJeff Kirsher 	/* lock eth irq */
1799f2148a47SJeff Kirsher 	wmb();
1800f2148a47SJeff Kirsher 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1801f2148a47SJeff Kirsher 	wmb();
1802f2148a47SJeff Kirsher 
1803f2148a47SJeff Kirsher 	rp->cur_tx++;
1804f2148a47SJeff Kirsher 
1805f2148a47SJeff Kirsher 	/* Non-x86 Todo: explicitly flush cache lines here. */
1806f2148a47SJeff Kirsher 
1807df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1808f2148a47SJeff Kirsher 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1809f2148a47SJeff Kirsher 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1810f2148a47SJeff Kirsher 
1811f2148a47SJeff Kirsher 	/* Wake the potentially-idle transmit channel */
1812f2148a47SJeff Kirsher 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1813f2148a47SJeff Kirsher 	       ioaddr + ChipCmd1);
1814f2148a47SJeff Kirsher 	IOSYNC;
1815f2148a47SJeff Kirsher 
1816f2148a47SJeff Kirsher 	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1817f2148a47SJeff Kirsher 		netif_stop_queue(dev);
1818f2148a47SJeff Kirsher 
1819fc3e0f8aSFrancois Romieu 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1820f2148a47SJeff Kirsher 		  rp->cur_tx - 1, entry);
1821fc3e0f8aSFrancois Romieu 
1822f2148a47SJeff Kirsher 	return NETDEV_TX_OK;
1823f2148a47SJeff Kirsher }
1824f2148a47SJeff Kirsher 
18257ab87ff4SFrancois Romieu static void rhine_irq_disable(struct rhine_private *rp)
18267ab87ff4SFrancois Romieu {
18277ab87ff4SFrancois Romieu 	iowrite16(0x0000, rp->base + IntrEnable);
18287ab87ff4SFrancois Romieu 	mmiowb();
18297ab87ff4SFrancois Romieu }
18307ab87ff4SFrancois Romieu 
1831f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
1832f2148a47SJeff Kirsher    after the Tx thread. */
1833f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1834f2148a47SJeff Kirsher {
1835f2148a47SJeff Kirsher 	struct net_device *dev = dev_instance;
1836f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
18377ab87ff4SFrancois Romieu 	u32 status;
1838f2148a47SJeff Kirsher 	int handled = 0;
1839f2148a47SJeff Kirsher 
18407ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
1841f2148a47SJeff Kirsher 
1842fc3e0f8aSFrancois Romieu 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1843f2148a47SJeff Kirsher 
18447ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT) {
18457ab87ff4SFrancois Romieu 		handled = 1;
1846f2148a47SJeff Kirsher 
18477ab87ff4SFrancois Romieu 		rhine_irq_disable(rp);
1848f2148a47SJeff Kirsher 		napi_schedule(&rp->napi);
1849f2148a47SJeff Kirsher 	}
1850f2148a47SJeff Kirsher 
18517ab87ff4SFrancois Romieu 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1852fc3e0f8aSFrancois Romieu 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
18537ab87ff4SFrancois Romieu 			  status);
1854f2148a47SJeff Kirsher 	}
1855f2148a47SJeff Kirsher 
1856f2148a47SJeff Kirsher 	return IRQ_RETVAL(handled);
1857f2148a47SJeff Kirsher }
1858f2148a47SJeff Kirsher 
1859f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated
1860f2148a47SJeff Kirsher    for clarity. */
1861f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev)
1862f2148a47SJeff Kirsher {
1863f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1864f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1865f2148a47SJeff Kirsher 	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1866f2148a47SJeff Kirsher 
1867f2148a47SJeff Kirsher 	/* find and cleanup dirty tx descriptors */
1868f2148a47SJeff Kirsher 	while (rp->dirty_tx != rp->cur_tx) {
1869f2148a47SJeff Kirsher 		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1870fc3e0f8aSFrancois Romieu 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1871f2148a47SJeff Kirsher 			  entry, txstatus);
1872f2148a47SJeff Kirsher 		if (txstatus & DescOwn)
1873f2148a47SJeff Kirsher 			break;
1874f2148a47SJeff Kirsher 		if (txstatus & 0x8000) {
1875fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev,
1876fc3e0f8aSFrancois Romieu 				  "Transmit error, Tx status %08x\n", txstatus);
1877f2148a47SJeff Kirsher 			dev->stats.tx_errors++;
1878f2148a47SJeff Kirsher 			if (txstatus & 0x0400)
1879f2148a47SJeff Kirsher 				dev->stats.tx_carrier_errors++;
1880f2148a47SJeff Kirsher 			if (txstatus & 0x0200)
1881f2148a47SJeff Kirsher 				dev->stats.tx_window_errors++;
1882f2148a47SJeff Kirsher 			if (txstatus & 0x0100)
1883f2148a47SJeff Kirsher 				dev->stats.tx_aborted_errors++;
1884f2148a47SJeff Kirsher 			if (txstatus & 0x0080)
1885f2148a47SJeff Kirsher 				dev->stats.tx_heartbeat_errors++;
1886f2148a47SJeff Kirsher 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1887f2148a47SJeff Kirsher 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1888f2148a47SJeff Kirsher 				dev->stats.tx_fifo_errors++;
1889f2148a47SJeff Kirsher 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1890f2148a47SJeff Kirsher 				break; /* Keep the skb - we try again */
1891f2148a47SJeff Kirsher 			}
1892f2148a47SJeff Kirsher 			/* Transmitter restarted in 'abnormal' handler. */
1893f2148a47SJeff Kirsher 		} else {
1894f2148a47SJeff Kirsher 			if (rp->quirks & rqRhineI)
1895f2148a47SJeff Kirsher 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1896f2148a47SJeff Kirsher 			else
1897f2148a47SJeff Kirsher 				dev->stats.collisions += txstatus & 0x0F;
1898fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1899fc3e0f8aSFrancois Romieu 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1900f7b5d1b9SJamie Gloudon 
1901f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->tx_stats.syncp);
1902f7b5d1b9SJamie Gloudon 			rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1903f7b5d1b9SJamie Gloudon 			rp->tx_stats.packets++;
1904f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->tx_stats.syncp);
1905f2148a47SJeff Kirsher 		}
1906f2148a47SJeff Kirsher 		/* Free the original skb. */
1907f2148a47SJeff Kirsher 		if (rp->tx_skbuff_dma[entry]) {
1908f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1909f2148a47SJeff Kirsher 					 rp->tx_skbuff_dma[entry],
1910f2148a47SJeff Kirsher 					 rp->tx_skbuff[entry]->len,
19114087c4dcSAlexey Charkov 					 DMA_TO_DEVICE);
1912f2148a47SJeff Kirsher 		}
19134b3afc6eSEric W. Biederman 		dev_consume_skb_any(rp->tx_skbuff[entry]);
1914f2148a47SJeff Kirsher 		rp->tx_skbuff[entry] = NULL;
1915f2148a47SJeff Kirsher 		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1916f2148a47SJeff Kirsher 	}
1917f2148a47SJeff Kirsher 	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1918f2148a47SJeff Kirsher 		netif_wake_queue(dev);
1919f2148a47SJeff Kirsher }
1920f2148a47SJeff Kirsher 
1921f2148a47SJeff Kirsher /**
1922f2148a47SJeff Kirsher  * rhine_get_vlan_tci - extract TCI from Rx data buffer
1923f2148a47SJeff Kirsher  * @skb: pointer to sk_buff
1924f2148a47SJeff Kirsher  * @data_size: used data area of the buffer including CRC
1925f2148a47SJeff Kirsher  *
1926f2148a47SJeff Kirsher  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1927f2148a47SJeff Kirsher  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1928f2148a47SJeff Kirsher  * aligned following the CRC.
1929f2148a47SJeff Kirsher  */
1930f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1931f2148a47SJeff Kirsher {
1932f2148a47SJeff Kirsher 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1933f2148a47SJeff Kirsher 	return be16_to_cpup((__be16 *)trailer);
1934f2148a47SJeff Kirsher }
1935f2148a47SJeff Kirsher 
1936f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */
1937f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit)
1938f2148a47SJeff Kirsher {
1939f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1940f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1941f2148a47SJeff Kirsher 	int count;
1942f2148a47SJeff Kirsher 	int entry = rp->cur_rx % RX_RING_SIZE;
1943f2148a47SJeff Kirsher 
1944fc3e0f8aSFrancois Romieu 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1945fc3e0f8aSFrancois Romieu 		  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1946f2148a47SJeff Kirsher 
1947f2148a47SJeff Kirsher 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1948f2148a47SJeff Kirsher 	for (count = 0; count < limit; ++count) {
1949f2148a47SJeff Kirsher 		struct rx_desc *desc = rp->rx_head_desc;
1950f2148a47SJeff Kirsher 		u32 desc_status = le32_to_cpu(desc->rx_status);
1951f2148a47SJeff Kirsher 		u32 desc_length = le32_to_cpu(desc->desc_length);
1952f2148a47SJeff Kirsher 		int data_size = desc_status >> 16;
1953f2148a47SJeff Kirsher 
1954f2148a47SJeff Kirsher 		if (desc_status & DescOwn)
1955f2148a47SJeff Kirsher 			break;
1956f2148a47SJeff Kirsher 
1957fc3e0f8aSFrancois Romieu 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1958fc3e0f8aSFrancois Romieu 			  desc_status);
1959f2148a47SJeff Kirsher 
1960f2148a47SJeff Kirsher 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1961f2148a47SJeff Kirsher 			if ((desc_status & RxWholePkt) != RxWholePkt) {
1962f2148a47SJeff Kirsher 				netdev_warn(dev,
1963f2148a47SJeff Kirsher 	"Oversized Ethernet frame spanned multiple buffers, "
1964f2148a47SJeff Kirsher 	"entry %#x length %d status %08x!\n",
1965f2148a47SJeff Kirsher 					    entry, data_size,
1966f2148a47SJeff Kirsher 					    desc_status);
1967f2148a47SJeff Kirsher 				netdev_warn(dev,
1968f2148a47SJeff Kirsher 					    "Oversized Ethernet frame %p vs %p\n",
1969f2148a47SJeff Kirsher 					    rp->rx_head_desc,
1970f2148a47SJeff Kirsher 					    &rp->rx_ring[entry]);
1971f2148a47SJeff Kirsher 				dev->stats.rx_length_errors++;
1972f2148a47SJeff Kirsher 			} else if (desc_status & RxErr) {
1973f2148a47SJeff Kirsher 				/* There was a error. */
1974fc3e0f8aSFrancois Romieu 				netif_dbg(rp, rx_err, dev,
1975fc3e0f8aSFrancois Romieu 					  "%s() Rx error %08x\n", __func__,
1976fc3e0f8aSFrancois Romieu 					  desc_status);
1977f2148a47SJeff Kirsher 				dev->stats.rx_errors++;
1978f2148a47SJeff Kirsher 				if (desc_status & 0x0030)
1979f2148a47SJeff Kirsher 					dev->stats.rx_length_errors++;
1980f2148a47SJeff Kirsher 				if (desc_status & 0x0048)
1981f2148a47SJeff Kirsher 					dev->stats.rx_fifo_errors++;
1982f2148a47SJeff Kirsher 				if (desc_status & 0x0004)
1983f2148a47SJeff Kirsher 					dev->stats.rx_frame_errors++;
1984f2148a47SJeff Kirsher 				if (desc_status & 0x0002) {
1985f2148a47SJeff Kirsher 					/* this can also be updated outside the interrupt handler */
1986f2148a47SJeff Kirsher 					spin_lock(&rp->lock);
1987f2148a47SJeff Kirsher 					dev->stats.rx_crc_errors++;
1988f2148a47SJeff Kirsher 					spin_unlock(&rp->lock);
1989f2148a47SJeff Kirsher 				}
1990f2148a47SJeff Kirsher 			}
1991f2148a47SJeff Kirsher 		} else {
1992f2148a47SJeff Kirsher 			struct sk_buff *skb = NULL;
1993f2148a47SJeff Kirsher 			/* Length should omit the CRC */
1994f2148a47SJeff Kirsher 			int pkt_len = data_size - 4;
1995f2148a47SJeff Kirsher 			u16 vlan_tci = 0;
1996f2148a47SJeff Kirsher 
1997f2148a47SJeff Kirsher 			/* Check if the packet is long enough to accept without
1998f2148a47SJeff Kirsher 			   copying to a minimally-sized skbuff. */
1999f2148a47SJeff Kirsher 			if (pkt_len < rx_copybreak)
2000f2148a47SJeff Kirsher 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2001f2148a47SJeff Kirsher 			if (skb) {
2002f7630d18SAlexey Charkov 				dma_sync_single_for_cpu(hwdev,
2003f2148a47SJeff Kirsher 							rp->rx_skbuff_dma[entry],
2004f2148a47SJeff Kirsher 							rp->rx_buf_sz,
20054087c4dcSAlexey Charkov 							DMA_FROM_DEVICE);
2006f2148a47SJeff Kirsher 
2007f2148a47SJeff Kirsher 				skb_copy_to_linear_data(skb,
2008f2148a47SJeff Kirsher 						 rp->rx_skbuff[entry]->data,
2009f2148a47SJeff Kirsher 						 pkt_len);
2010f2148a47SJeff Kirsher 				skb_put(skb, pkt_len);
2011f7630d18SAlexey Charkov 				dma_sync_single_for_device(hwdev,
2012f2148a47SJeff Kirsher 							   rp->rx_skbuff_dma[entry],
2013f2148a47SJeff Kirsher 							   rp->rx_buf_sz,
20144087c4dcSAlexey Charkov 							   DMA_FROM_DEVICE);
2015f2148a47SJeff Kirsher 			} else {
2016f2148a47SJeff Kirsher 				skb = rp->rx_skbuff[entry];
2017f2148a47SJeff Kirsher 				if (skb == NULL) {
2018f2148a47SJeff Kirsher 					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
2019f2148a47SJeff Kirsher 					break;
2020f2148a47SJeff Kirsher 				}
2021f2148a47SJeff Kirsher 				rp->rx_skbuff[entry] = NULL;
2022f2148a47SJeff Kirsher 				skb_put(skb, pkt_len);
2023f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
2024f2148a47SJeff Kirsher 						 rp->rx_skbuff_dma[entry],
2025f2148a47SJeff Kirsher 						 rp->rx_buf_sz,
20264087c4dcSAlexey Charkov 						 DMA_FROM_DEVICE);
2027f2148a47SJeff Kirsher 			}
2028f2148a47SJeff Kirsher 
2029f2148a47SJeff Kirsher 			if (unlikely(desc_length & DescTag))
2030f2148a47SJeff Kirsher 				vlan_tci = rhine_get_vlan_tci(skb, data_size);
2031f2148a47SJeff Kirsher 
2032f2148a47SJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
2033f2148a47SJeff Kirsher 
2034f2148a47SJeff Kirsher 			if (unlikely(desc_length & DescTag))
203586a9bad3SPatrick McHardy 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2036f2148a47SJeff Kirsher 			netif_receive_skb(skb);
2037f7b5d1b9SJamie Gloudon 
2038f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->rx_stats.syncp);
2039f7b5d1b9SJamie Gloudon 			rp->rx_stats.bytes += pkt_len;
2040f7b5d1b9SJamie Gloudon 			rp->rx_stats.packets++;
2041f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->rx_stats.syncp);
2042f2148a47SJeff Kirsher 		}
2043f2148a47SJeff Kirsher 		entry = (++rp->cur_rx) % RX_RING_SIZE;
2044f2148a47SJeff Kirsher 		rp->rx_head_desc = &rp->rx_ring[entry];
2045f2148a47SJeff Kirsher 	}
2046f2148a47SJeff Kirsher 
2047f2148a47SJeff Kirsher 	/* Refill the Rx ring buffers. */
2048f2148a47SJeff Kirsher 	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
2049f2148a47SJeff Kirsher 		struct sk_buff *skb;
2050f2148a47SJeff Kirsher 		entry = rp->dirty_rx % RX_RING_SIZE;
2051f2148a47SJeff Kirsher 		if (rp->rx_skbuff[entry] == NULL) {
2052f2148a47SJeff Kirsher 			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
2053f2148a47SJeff Kirsher 			rp->rx_skbuff[entry] = skb;
2054f2148a47SJeff Kirsher 			if (skb == NULL)
2055f2148a47SJeff Kirsher 				break;	/* Better luck next round. */
2056f2148a47SJeff Kirsher 			rp->rx_skbuff_dma[entry] =
2057f7630d18SAlexey Charkov 				dma_map_single(hwdev, skb->data,
2058f2148a47SJeff Kirsher 					       rp->rx_buf_sz,
20594087c4dcSAlexey Charkov 					       DMA_FROM_DEVICE);
2060f7630d18SAlexey Charkov 			if (dma_mapping_error(hwdev,
2061f7630d18SAlexey Charkov 					      rp->rx_skbuff_dma[entry])) {
20629b4fe5fbSNeil Horman 				dev_kfree_skb(skb);
20639b4fe5fbSNeil Horman 				rp->rx_skbuff_dma[entry] = 0;
20649b4fe5fbSNeil Horman 				break;
20659b4fe5fbSNeil Horman 			}
2066f2148a47SJeff Kirsher 			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
2067f2148a47SJeff Kirsher 		}
2068f2148a47SJeff Kirsher 		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2069f2148a47SJeff Kirsher 	}
2070f2148a47SJeff Kirsher 
2071f2148a47SJeff Kirsher 	return count;
2072f2148a47SJeff Kirsher }
2073f2148a47SJeff Kirsher 
2074f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) {
2075f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2076f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2077f2148a47SJeff Kirsher 	int entry = rp->dirty_tx % TX_RING_SIZE;
2078f2148a47SJeff Kirsher 	u32 intr_status;
2079f2148a47SJeff Kirsher 
2080f2148a47SJeff Kirsher 	/*
2081f2148a47SJeff Kirsher 	 * If new errors occurred, we need to sort them out before doing Tx.
2082f2148a47SJeff Kirsher 	 * In that case the ISR will be back here RSN anyway.
2083f2148a47SJeff Kirsher 	 */
2084a20a28bcSFrancois Romieu 	intr_status = rhine_get_events(rp);
2085f2148a47SJeff Kirsher 
2086f2148a47SJeff Kirsher 	if ((intr_status & IntrTxErrSummary) == 0) {
2087f2148a47SJeff Kirsher 
2088f2148a47SJeff Kirsher 		/* We know better than the chip where it should continue. */
2089f2148a47SJeff Kirsher 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2090f2148a47SJeff Kirsher 		       ioaddr + TxRingPtr);
2091f2148a47SJeff Kirsher 
2092f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2093f2148a47SJeff Kirsher 		       ioaddr + ChipCmd);
2094f2148a47SJeff Kirsher 
2095f2148a47SJeff Kirsher 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2096f2148a47SJeff Kirsher 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2097f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2098f2148a47SJeff Kirsher 
2099f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2100f2148a47SJeff Kirsher 		       ioaddr + ChipCmd1);
2101f2148a47SJeff Kirsher 		IOSYNC;
2102f2148a47SJeff Kirsher 	}
2103f2148a47SJeff Kirsher 	else {
2104f2148a47SJeff Kirsher 		/* This should never happen */
2105fc3e0f8aSFrancois Romieu 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2106fc3e0f8aSFrancois Romieu 			   intr_status);
2107f2148a47SJeff Kirsher 	}
2108f2148a47SJeff Kirsher 
2109f2148a47SJeff Kirsher }
2110f2148a47SJeff Kirsher 
21117ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work)
2112f2148a47SJeff Kirsher {
21137ab87ff4SFrancois Romieu 	struct rhine_private *rp =
21147ab87ff4SFrancois Romieu 		container_of(work, struct rhine_private, slow_event_task);
21157ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
21167ab87ff4SFrancois Romieu 	u32 intr_status;
2117f2148a47SJeff Kirsher 
21187ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
21197ab87ff4SFrancois Romieu 
21207ab87ff4SFrancois Romieu 	if (!rp->task_enable)
21217ab87ff4SFrancois Romieu 		goto out_unlock;
21227ab87ff4SFrancois Romieu 
21237ab87ff4SFrancois Romieu 	intr_status = rhine_get_events(rp);
21247ab87ff4SFrancois Romieu 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2125f2148a47SJeff Kirsher 
2126f2148a47SJeff Kirsher 	if (intr_status & IntrLinkChange)
2127f2148a47SJeff Kirsher 		rhine_check_media(dev, 0);
2128f2148a47SJeff Kirsher 
2129fc3e0f8aSFrancois Romieu 	if (intr_status & IntrPCIErr)
2130fc3e0f8aSFrancois Romieu 		netif_warn(rp, hw, dev, "PCI error\n");
2131fc3e0f8aSFrancois Romieu 
2132559bcac3SDavid S. Miller 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2133f2148a47SJeff Kirsher 
21347ab87ff4SFrancois Romieu out_unlock:
21357ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2136f2148a47SJeff Kirsher }
2137f2148a47SJeff Kirsher 
2138f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 *
2139f7b5d1b9SJamie Gloudon rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2140f2148a47SJeff Kirsher {
2141f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2142f7b5d1b9SJamie Gloudon 	unsigned int start;
2143f2148a47SJeff Kirsher 
21447ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
21457ab87ff4SFrancois Romieu 	rhine_update_rx_crc_and_missed_errord(rp);
21467ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2147f2148a47SJeff Kirsher 
2148f7b5d1b9SJamie Gloudon 	netdev_stats_to_stats64(stats, &dev->stats);
2149f7b5d1b9SJamie Gloudon 
2150f7b5d1b9SJamie Gloudon 	do {
215157a7744eSEric W. Biederman 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2152f7b5d1b9SJamie Gloudon 		stats->rx_packets = rp->rx_stats.packets;
2153f7b5d1b9SJamie Gloudon 		stats->rx_bytes = rp->rx_stats.bytes;
215457a7744eSEric W. Biederman 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2155f7b5d1b9SJamie Gloudon 
2156f7b5d1b9SJamie Gloudon 	do {
215757a7744eSEric W. Biederman 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2158f7b5d1b9SJamie Gloudon 		stats->tx_packets = rp->tx_stats.packets;
2159f7b5d1b9SJamie Gloudon 		stats->tx_bytes = rp->tx_stats.bytes;
216057a7744eSEric W. Biederman 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2161f7b5d1b9SJamie Gloudon 
2162f7b5d1b9SJamie Gloudon 	return stats;
2163f2148a47SJeff Kirsher }
2164f2148a47SJeff Kirsher 
2165f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev)
2166f2148a47SJeff Kirsher {
2167f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2168f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2169f2148a47SJeff Kirsher 	u32 mc_filter[2];	/* Multicast hash filter */
2170f2148a47SJeff Kirsher 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2171f2148a47SJeff Kirsher 	struct netdev_hw_addr *ha;
2172f2148a47SJeff Kirsher 
2173f2148a47SJeff Kirsher 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2174f2148a47SJeff Kirsher 		rx_mode = 0x1C;
2175f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2176f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2177f2148a47SJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2178f2148a47SJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
2179f2148a47SJeff Kirsher 		/* Too many to match, or accept all multicasts. */
2180f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2181f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2182ca8b6e04SAlexey Charkov 	} else if (rp->quirks & rqMgmt) {
2183f2148a47SJeff Kirsher 		int i = 0;
2184f2148a47SJeff Kirsher 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2185f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2186f2148a47SJeff Kirsher 			if (i == MCAM_SIZE)
2187f2148a47SJeff Kirsher 				break;
2188f2148a47SJeff Kirsher 			rhine_set_cam(ioaddr, i, ha->addr);
2189f2148a47SJeff Kirsher 			mCAMmask |= 1 << i;
2190f2148a47SJeff Kirsher 			i++;
2191f2148a47SJeff Kirsher 		}
2192f2148a47SJeff Kirsher 		rhine_set_cam_mask(ioaddr, mCAMmask);
2193f2148a47SJeff Kirsher 	} else {
2194f2148a47SJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
2195f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2196f2148a47SJeff Kirsher 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2197f2148a47SJeff Kirsher 
2198f2148a47SJeff Kirsher 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2199f2148a47SJeff Kirsher 		}
2200f2148a47SJeff Kirsher 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2201f2148a47SJeff Kirsher 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2202f2148a47SJeff Kirsher 	}
2203f2148a47SJeff Kirsher 	/* enable/disable VLAN receive filtering */
2204ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt) {
2205f2148a47SJeff Kirsher 		if (dev->flags & IFF_PROMISC)
2206f2148a47SJeff Kirsher 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2207f2148a47SJeff Kirsher 		else
2208f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2209f2148a47SJeff Kirsher 	}
2210f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2211f2148a47SJeff Kirsher }
2212f2148a47SJeff Kirsher 
2213f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2214f2148a47SJeff Kirsher {
2215f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
2216f2148a47SJeff Kirsher 
221723020ab3SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
221823020ab3SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2219f7630d18SAlexey Charkov 	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2220f2148a47SJeff Kirsher }
2221f2148a47SJeff Kirsher 
2222f2148a47SJeff Kirsher static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2223f2148a47SJeff Kirsher {
2224f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2225f2148a47SJeff Kirsher 	int rc;
2226f2148a47SJeff Kirsher 
22277ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2228f2148a47SJeff Kirsher 	rc = mii_ethtool_gset(&rp->mii_if, cmd);
22297ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2230f2148a47SJeff Kirsher 
2231f2148a47SJeff Kirsher 	return rc;
2232f2148a47SJeff Kirsher }
2233f2148a47SJeff Kirsher 
2234f2148a47SJeff Kirsher static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2235f2148a47SJeff Kirsher {
2236f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2237f2148a47SJeff Kirsher 	int rc;
2238f2148a47SJeff Kirsher 
22397ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2240f2148a47SJeff Kirsher 	rc = mii_ethtool_sset(&rp->mii_if, cmd);
2241f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
22427ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2243f2148a47SJeff Kirsher 
2244f2148a47SJeff Kirsher 	return rc;
2245f2148a47SJeff Kirsher }
2246f2148a47SJeff Kirsher 
2247f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev)
2248f2148a47SJeff Kirsher {
2249f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2250f2148a47SJeff Kirsher 
2251f2148a47SJeff Kirsher 	return mii_nway_restart(&rp->mii_if);
2252f2148a47SJeff Kirsher }
2253f2148a47SJeff Kirsher 
2254f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev)
2255f2148a47SJeff Kirsher {
2256f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2257f2148a47SJeff Kirsher 
2258f2148a47SJeff Kirsher 	return mii_link_ok(&rp->mii_if);
2259f2148a47SJeff Kirsher }
2260f2148a47SJeff Kirsher 
2261f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev)
2262f2148a47SJeff Kirsher {
2263fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2264fc3e0f8aSFrancois Romieu 
2265fc3e0f8aSFrancois Romieu 	return rp->msg_enable;
2266f2148a47SJeff Kirsher }
2267f2148a47SJeff Kirsher 
2268f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value)
2269f2148a47SJeff Kirsher {
2270fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2271fc3e0f8aSFrancois Romieu 
2272fc3e0f8aSFrancois Romieu 	rp->msg_enable = value;
2273f2148a47SJeff Kirsher }
2274f2148a47SJeff Kirsher 
2275f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2276f2148a47SJeff Kirsher {
2277f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2278f2148a47SJeff Kirsher 
2279f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2280f2148a47SJeff Kirsher 		return;
2281f2148a47SJeff Kirsher 
2282f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2283f2148a47SJeff Kirsher 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2284f2148a47SJeff Kirsher 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2285f2148a47SJeff Kirsher 	wol->wolopts = rp->wolopts;
2286f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2287f2148a47SJeff Kirsher }
2288f2148a47SJeff Kirsher 
2289f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2290f2148a47SJeff Kirsher {
2291f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2292f2148a47SJeff Kirsher 	u32 support = WAKE_PHY | WAKE_MAGIC |
2293f2148a47SJeff Kirsher 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2294f2148a47SJeff Kirsher 
2295f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2296f2148a47SJeff Kirsher 		return -EINVAL;
2297f2148a47SJeff Kirsher 
2298f2148a47SJeff Kirsher 	if (wol->wolopts & ~support)
2299f2148a47SJeff Kirsher 		return -EINVAL;
2300f2148a47SJeff Kirsher 
2301f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2302f2148a47SJeff Kirsher 	rp->wolopts = wol->wolopts;
2303f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2304f2148a47SJeff Kirsher 
2305f2148a47SJeff Kirsher 	return 0;
2306f2148a47SJeff Kirsher }
2307f2148a47SJeff Kirsher 
2308f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = {
2309f2148a47SJeff Kirsher 	.get_drvinfo		= netdev_get_drvinfo,
2310f2148a47SJeff Kirsher 	.get_settings		= netdev_get_settings,
2311f2148a47SJeff Kirsher 	.set_settings		= netdev_set_settings,
2312f2148a47SJeff Kirsher 	.nway_reset		= netdev_nway_reset,
2313f2148a47SJeff Kirsher 	.get_link		= netdev_get_link,
2314f2148a47SJeff Kirsher 	.get_msglevel		= netdev_get_msglevel,
2315f2148a47SJeff Kirsher 	.set_msglevel		= netdev_set_msglevel,
2316f2148a47SJeff Kirsher 	.get_wol		= rhine_get_wol,
2317f2148a47SJeff Kirsher 	.set_wol		= rhine_set_wol,
2318f2148a47SJeff Kirsher };
2319f2148a47SJeff Kirsher 
2320f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2321f2148a47SJeff Kirsher {
2322f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2323f2148a47SJeff Kirsher 	int rc;
2324f2148a47SJeff Kirsher 
2325f2148a47SJeff Kirsher 	if (!netif_running(dev))
2326f2148a47SJeff Kirsher 		return -EINVAL;
2327f2148a47SJeff Kirsher 
23287ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2329f2148a47SJeff Kirsher 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2330f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
23317ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2332f2148a47SJeff Kirsher 
2333f2148a47SJeff Kirsher 	return rc;
2334f2148a47SJeff Kirsher }
2335f2148a47SJeff Kirsher 
2336f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev)
2337f2148a47SJeff Kirsher {
2338f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2339f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2340f2148a47SJeff Kirsher 
23417ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
2342f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2343f2148a47SJeff Kirsher 	netif_stop_queue(dev);
2344f2148a47SJeff Kirsher 
2345fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2346f2148a47SJeff Kirsher 		  ioread16(ioaddr + ChipCmd));
2347f2148a47SJeff Kirsher 
2348f2148a47SJeff Kirsher 	/* Switch to loopback mode to avoid hardware races. */
2349f2148a47SJeff Kirsher 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2350f2148a47SJeff Kirsher 
23517ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2352f2148a47SJeff Kirsher 
2353f2148a47SJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
2354f2148a47SJeff Kirsher 	iowrite16(CmdStop, ioaddr + ChipCmd);
2355f2148a47SJeff Kirsher 
2356f7630d18SAlexey Charkov 	free_irq(rp->irq, dev);
2357f2148a47SJeff Kirsher 	free_rbufs(dev);
2358f2148a47SJeff Kirsher 	free_tbufs(dev);
2359f2148a47SJeff Kirsher 	free_ring(dev);
2360f2148a47SJeff Kirsher 
2361f2148a47SJeff Kirsher 	return 0;
2362f2148a47SJeff Kirsher }
2363f2148a47SJeff Kirsher 
2364f2148a47SJeff Kirsher 
23652d283862SAlexey Charkov static void rhine_remove_one_pci(struct pci_dev *pdev)
2366f2148a47SJeff Kirsher {
2367f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2368f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2369f2148a47SJeff Kirsher 
2370f2148a47SJeff Kirsher 	unregister_netdev(dev);
2371f2148a47SJeff Kirsher 
2372f2148a47SJeff Kirsher 	pci_iounmap(pdev, rp->base);
2373f2148a47SJeff Kirsher 	pci_release_regions(pdev);
2374f2148a47SJeff Kirsher 
2375f2148a47SJeff Kirsher 	free_netdev(dev);
2376f2148a47SJeff Kirsher 	pci_disable_device(pdev);
2377f2148a47SJeff Kirsher }
2378f2148a47SJeff Kirsher 
23792d283862SAlexey Charkov static int rhine_remove_one_platform(struct platform_device *pdev)
23802d283862SAlexey Charkov {
23812d283862SAlexey Charkov 	struct net_device *dev = platform_get_drvdata(pdev);
23822d283862SAlexey Charkov 	struct rhine_private *rp = netdev_priv(dev);
23832d283862SAlexey Charkov 
23842d283862SAlexey Charkov 	unregister_netdev(dev);
23852d283862SAlexey Charkov 
23862d283862SAlexey Charkov 	iounmap(rp->base);
23872d283862SAlexey Charkov 
23882d283862SAlexey Charkov 	free_netdev(dev);
23892d283862SAlexey Charkov 
23902d283862SAlexey Charkov 	return 0;
23912d283862SAlexey Charkov }
23922d283862SAlexey Charkov 
23932d283862SAlexey Charkov static void rhine_shutdown_pci(struct pci_dev *pdev)
2394f2148a47SJeff Kirsher {
2395f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2396f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2397f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2398f2148a47SJeff Kirsher 
2399f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2400f2148a47SJeff Kirsher 		return; /* Nothing to do for non-WOL adapters */
2401f2148a47SJeff Kirsher 
2402f2148a47SJeff Kirsher 	rhine_power_init(dev);
2403f2148a47SJeff Kirsher 
2404f2148a47SJeff Kirsher 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2405f2148a47SJeff Kirsher 	if (rp->quirks & rq6patterns)
2406f2148a47SJeff Kirsher 		iowrite8(0x04, ioaddr + WOLcgClr);
2407f2148a47SJeff Kirsher 
24087ab87ff4SFrancois Romieu 	spin_lock(&rp->lock);
24097ab87ff4SFrancois Romieu 
2410f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_MAGIC) {
2411f2148a47SJeff Kirsher 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2412f2148a47SJeff Kirsher 		/*
2413f2148a47SJeff Kirsher 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2414f2148a47SJeff Kirsher 		 * not cooperate otherwise.
2415f2148a47SJeff Kirsher 		 */
2416f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2417f2148a47SJeff Kirsher 	}
2418f2148a47SJeff Kirsher 
2419f2148a47SJeff Kirsher 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2420f2148a47SJeff Kirsher 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2421f2148a47SJeff Kirsher 
2422f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_PHY)
2423f2148a47SJeff Kirsher 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2424f2148a47SJeff Kirsher 
2425f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_UCAST)
2426f2148a47SJeff Kirsher 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2427f2148a47SJeff Kirsher 
2428f2148a47SJeff Kirsher 	if (rp->wolopts) {
2429f2148a47SJeff Kirsher 		/* Enable legacy WOL (for old motherboards) */
2430f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + PwcfgSet);
2431f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2432f2148a47SJeff Kirsher 	}
2433f2148a47SJeff Kirsher 
24347ab87ff4SFrancois Romieu 	spin_unlock(&rp->lock);
24357ab87ff4SFrancois Romieu 
2436e92b9b3bSFrancois Romieu 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2437f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2438f2148a47SJeff Kirsher 
2439e92b9b3bSFrancois Romieu 		pci_wake_from_d3(pdev, true);
2440e92b9b3bSFrancois Romieu 		pci_set_power_state(pdev, PCI_D3hot);
2441e92b9b3bSFrancois Romieu 	}
2442f2148a47SJeff Kirsher }
2443f2148a47SJeff Kirsher 
2444e92b9b3bSFrancois Romieu #ifdef CONFIG_PM_SLEEP
2445e92b9b3bSFrancois Romieu static int rhine_suspend(struct device *device)
2446f2148a47SJeff Kirsher {
2447f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2448f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2449f2148a47SJeff Kirsher 
2450f2148a47SJeff Kirsher 	if (!netif_running(dev))
2451f2148a47SJeff Kirsher 		return 0;
2452f2148a47SJeff Kirsher 
24537ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
24547ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2455f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2456f2148a47SJeff Kirsher 
2457f2148a47SJeff Kirsher 	netif_device_detach(dev);
2458f2148a47SJeff Kirsher 
2459f7630d18SAlexey Charkov 	if (dev_is_pci(device))
24602d283862SAlexey Charkov 		rhine_shutdown_pci(to_pci_dev(device));
2461f2148a47SJeff Kirsher 
2462f2148a47SJeff Kirsher 	return 0;
2463f2148a47SJeff Kirsher }
2464f2148a47SJeff Kirsher 
2465e92b9b3bSFrancois Romieu static int rhine_resume(struct device *device)
2466f2148a47SJeff Kirsher {
2467f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2468f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2469f2148a47SJeff Kirsher 
2470f2148a47SJeff Kirsher 	if (!netif_running(dev))
2471f2148a47SJeff Kirsher 		return 0;
2472f2148a47SJeff Kirsher 
2473f2148a47SJeff Kirsher 	enable_mmio(rp->pioaddr, rp->quirks);
2474f2148a47SJeff Kirsher 	rhine_power_init(dev);
2475f2148a47SJeff Kirsher 	free_tbufs(dev);
2476f2148a47SJeff Kirsher 	free_rbufs(dev);
2477f2148a47SJeff Kirsher 	alloc_tbufs(dev);
2478f2148a47SJeff Kirsher 	alloc_rbufs(dev);
24797ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
24807ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
2481f2148a47SJeff Kirsher 	init_registers(dev);
24827ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2483f2148a47SJeff Kirsher 
2484f2148a47SJeff Kirsher 	netif_device_attach(dev);
2485f2148a47SJeff Kirsher 
2486f2148a47SJeff Kirsher 	return 0;
2487f2148a47SJeff Kirsher }
2488e92b9b3bSFrancois Romieu 
2489e92b9b3bSFrancois Romieu static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2490e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	(&rhine_pm_ops)
2491e92b9b3bSFrancois Romieu 
2492e92b9b3bSFrancois Romieu #else
2493e92b9b3bSFrancois Romieu 
2494e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	NULL
2495e92b9b3bSFrancois Romieu 
2496e92b9b3bSFrancois Romieu #endif /* !CONFIG_PM_SLEEP */
2497f2148a47SJeff Kirsher 
24982d283862SAlexey Charkov static struct pci_driver rhine_driver_pci = {
2499f2148a47SJeff Kirsher 	.name		= DRV_NAME,
2500f2148a47SJeff Kirsher 	.id_table	= rhine_pci_tbl,
25012d283862SAlexey Charkov 	.probe		= rhine_init_one_pci,
25022d283862SAlexey Charkov 	.remove		= rhine_remove_one_pci,
25032d283862SAlexey Charkov 	.shutdown	= rhine_shutdown_pci,
2504e92b9b3bSFrancois Romieu 	.driver.pm	= RHINE_PM_OPS,
2505f2148a47SJeff Kirsher };
2506f2148a47SJeff Kirsher 
25072d283862SAlexey Charkov static struct platform_driver rhine_driver_platform = {
25082d283862SAlexey Charkov 	.probe		= rhine_init_one_platform,
25092d283862SAlexey Charkov 	.remove		= rhine_remove_one_platform,
25102d283862SAlexey Charkov 	.driver = {
25112d283862SAlexey Charkov 		.name	= DRV_NAME,
25122d283862SAlexey Charkov 		.of_match_table	= rhine_of_tbl,
25132d283862SAlexey Charkov 		.pm		= RHINE_PM_OPS,
25142d283862SAlexey Charkov 	}
25152d283862SAlexey Charkov };
25162d283862SAlexey Charkov 
251777273eaaSSachin Kamat static struct dmi_system_id rhine_dmi_table[] __initdata = {
2518f2148a47SJeff Kirsher 	{
2519f2148a47SJeff Kirsher 		.ident = "EPIA-M",
2520f2148a47SJeff Kirsher 		.matches = {
2521f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2522f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2523f2148a47SJeff Kirsher 		},
2524f2148a47SJeff Kirsher 	},
2525f2148a47SJeff Kirsher 	{
2526f2148a47SJeff Kirsher 		.ident = "KV7",
2527f2148a47SJeff Kirsher 		.matches = {
2528f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2529f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2530f2148a47SJeff Kirsher 		},
2531f2148a47SJeff Kirsher 	},
2532f2148a47SJeff Kirsher 	{ NULL }
2533f2148a47SJeff Kirsher };
2534f2148a47SJeff Kirsher 
2535f2148a47SJeff Kirsher static int __init rhine_init(void)
2536f2148a47SJeff Kirsher {
25372d283862SAlexey Charkov 	int ret_pci, ret_platform;
25382d283862SAlexey Charkov 
2539f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
2540f2148a47SJeff Kirsher #ifdef MODULE
2541f2148a47SJeff Kirsher 	pr_info("%s\n", version);
2542f2148a47SJeff Kirsher #endif
2543f2148a47SJeff Kirsher 	if (dmi_check_system(rhine_dmi_table)) {
2544f2148a47SJeff Kirsher 		/* these BIOSes fail at PXE boot if chip is in D3 */
2545eb939922SRusty Russell 		avoid_D3 = true;
2546f2148a47SJeff Kirsher 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2547f2148a47SJeff Kirsher 	}
2548f2148a47SJeff Kirsher 	else if (avoid_D3)
2549f2148a47SJeff Kirsher 		pr_info("avoid_D3 set\n");
2550f2148a47SJeff Kirsher 
25512d283862SAlexey Charkov 	ret_pci = pci_register_driver(&rhine_driver_pci);
25522d283862SAlexey Charkov 	ret_platform = platform_driver_register(&rhine_driver_platform);
25532d283862SAlexey Charkov 	if ((ret_pci < 0) && (ret_platform < 0))
25542d283862SAlexey Charkov 		return ret_pci;
25552d283862SAlexey Charkov 
25562d283862SAlexey Charkov 	return 0;
2557f2148a47SJeff Kirsher }
2558f2148a47SJeff Kirsher 
2559f2148a47SJeff Kirsher 
2560f2148a47SJeff Kirsher static void __exit rhine_cleanup(void)
2561f2148a47SJeff Kirsher {
25622d283862SAlexey Charkov 	platform_driver_unregister(&rhine_driver_platform);
25632d283862SAlexey Charkov 	pci_unregister_driver(&rhine_driver_pci);
2564f2148a47SJeff Kirsher }
2565f2148a47SJeff Kirsher 
2566f2148a47SJeff Kirsher 
2567f2148a47SJeff Kirsher module_init(rhine_init);
2568f2148a47SJeff Kirsher module_exit(rhine_cleanup);
2569