xref: /openbmc/linux/drivers/net/ethernet/via/via-rhine.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2f2148a47SJeff Kirsher /*
3f2148a47SJeff Kirsher 	Written 1998-2001 by Donald Becker.
4f2148a47SJeff Kirsher 
52b6b78e0SKevin Brace 	Current Maintainer: Kevin Brace <kevinbrace@bracecomputerlab.com>
6f2148a47SJeff Kirsher 
7f2148a47SJeff Kirsher 	This software may be used and distributed according to the terms of
8f2148a47SJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
9f2148a47SJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
10f2148a47SJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
11f2148a47SJeff Kirsher 	a complete program and may only be used when the entire operating
12f2148a47SJeff Kirsher 	system is licensed under the GPL.
13f2148a47SJeff Kirsher 
14f2148a47SJeff Kirsher 	This driver is designed for the VIA VT86C100A Rhine-I.
15f2148a47SJeff Kirsher 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16f2148a47SJeff Kirsher 	and management NIC 6105M).
17f2148a47SJeff Kirsher 
18f2148a47SJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
19f2148a47SJeff Kirsher 	Scyld Computing Corporation
20f2148a47SJeff Kirsher 	410 Severn Ave., Suite 210
21f2148a47SJeff Kirsher 	Annapolis MD 21403
22f2148a47SJeff Kirsher 
23f2148a47SJeff Kirsher 
24f2148a47SJeff Kirsher 	This driver contains some changes from the original Donald Becker
25f2148a47SJeff Kirsher 	version. He may or may not be interested in bug reports on this
26f2148a47SJeff Kirsher 	code. You can find his versions at:
27f2148a47SJeff Kirsher 	http://www.scyld.com/network/via-rhine.html
28f2148a47SJeff Kirsher 	[link no longer provides useful info -jgarzik]
29f2148a47SJeff Kirsher 
30f2148a47SJeff Kirsher */
31f2148a47SJeff Kirsher 
32f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33f2148a47SJeff Kirsher 
34f2148a47SJeff Kirsher #define DRV_NAME	"via-rhine"
35f2148a47SJeff Kirsher 
36eb939922SRusty Russell #include <linux/types.h>
37f2148a47SJeff Kirsher 
38f2148a47SJeff Kirsher /* A few user-configurable values.
39f2148a47SJeff Kirsher    These may be modified when a driver module is loaded. */
40fc3e0f8aSFrancois Romieu static int debug = 0;
41fc3e0f8aSFrancois Romieu #define RHINE_MSG_DEFAULT \
42fc3e0f8aSFrancois Romieu         (0x0000)
43f2148a47SJeff Kirsher 
44f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
45f2148a47SJeff Kirsher    Setting to > 1518 effectively disables this feature. */
46f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
47f2148a47SJeff Kirsher 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
48f2148a47SJeff Kirsher 	defined(__sh__) || defined(__mips__)
49f2148a47SJeff Kirsher static int rx_copybreak = 1518;
50f2148a47SJeff Kirsher #else
51f2148a47SJeff Kirsher static int rx_copybreak;
52f2148a47SJeff Kirsher #endif
53f2148a47SJeff Kirsher 
54f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of
55f2148a47SJeff Kirsher    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
56eb939922SRusty Russell static bool avoid_D3;
57f2148a47SJeff Kirsher 
58f2148a47SJeff Kirsher /*
59f2148a47SJeff Kirsher  * In case you are looking for 'options[]' or 'full_duplex[]', they
60f2148a47SJeff Kirsher  * are gone. Use ethtool(8) instead.
61f2148a47SJeff Kirsher  */
62f2148a47SJeff Kirsher 
63f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
64f2148a47SJeff Kirsher    The Rhine has a 64 element 8390-like hash table. */
65f2148a47SJeff Kirsher static const int multicast_filter_limit = 32;
66f2148a47SJeff Kirsher 
67f2148a47SJeff Kirsher 
68f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */
69f2148a47SJeff Kirsher 
70f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
7192bf2008STino Reichardt  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
7292bf2008STino Reichardt  * Making the Tx ring too large decreases the effectiveness of channel
7392bf2008STino Reichardt  * bonding and packet priority.
7492bf2008STino Reichardt  * With BQL support, we can increase TX ring safely.
7592bf2008STino Reichardt  * There are no ill effects from too-large receive rings.
7692bf2008STino Reichardt  */
7792bf2008STino Reichardt #define TX_RING_SIZE	64
7892bf2008STino Reichardt #define TX_QUEUE_LEN	(TX_RING_SIZE - 6)	/* Limit ring entries actually used. */
79f2148a47SJeff Kirsher #define RX_RING_SIZE	64
80f2148a47SJeff Kirsher 
81f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */
82f2148a47SJeff Kirsher 
83f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
84f2148a47SJeff Kirsher #define TX_TIMEOUT	(2*HZ)
85f2148a47SJeff Kirsher 
86f2148a47SJeff Kirsher #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
87f2148a47SJeff Kirsher 
88f2148a47SJeff Kirsher #include <linux/module.h>
89f2148a47SJeff Kirsher #include <linux/moduleparam.h>
90f2148a47SJeff Kirsher #include <linux/kernel.h>
91f2148a47SJeff Kirsher #include <linux/string.h>
92f2148a47SJeff Kirsher #include <linux/timer.h>
93f2148a47SJeff Kirsher #include <linux/errno.h>
94f2148a47SJeff Kirsher #include <linux/ioport.h>
95f2148a47SJeff Kirsher #include <linux/interrupt.h>
96f2148a47SJeff Kirsher #include <linux/pci.h>
97*3d40aed8SRob Herring #include <linux/of.h>
982d283862SAlexey Charkov #include <linux/of_irq.h>
992d283862SAlexey Charkov #include <linux/platform_device.h>
100f2148a47SJeff Kirsher #include <linux/dma-mapping.h>
101f2148a47SJeff Kirsher #include <linux/netdevice.h>
102f2148a47SJeff Kirsher #include <linux/etherdevice.h>
103f2148a47SJeff Kirsher #include <linux/skbuff.h>
104f2148a47SJeff Kirsher #include <linux/init.h>
105f2148a47SJeff Kirsher #include <linux/delay.h>
106f2148a47SJeff Kirsher #include <linux/mii.h>
107f2148a47SJeff Kirsher #include <linux/ethtool.h>
108f2148a47SJeff Kirsher #include <linux/crc32.h>
109f2148a47SJeff Kirsher #include <linux/if_vlan.h>
110f2148a47SJeff Kirsher #include <linux/bitops.h>
111f2148a47SJeff Kirsher #include <linux/workqueue.h>
112f2148a47SJeff Kirsher #include <asm/processor.h>	/* Processor type for cache alignment. */
113f2148a47SJeff Kirsher #include <asm/io.h>
114f2148a47SJeff Kirsher #include <asm/irq.h>
1157c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
116f2148a47SJeff Kirsher #include <linux/dmi.h>
117f2148a47SJeff Kirsher 
118f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
119f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
120f2148a47SJeff Kirsher MODULE_LICENSE("GPL");
121f2148a47SJeff Kirsher 
122f2148a47SJeff Kirsher module_param(debug, int, 0);
123f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0);
124f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0);
125fc3e0f8aSFrancois Romieu MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
126f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
127f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
128f2148a47SJeff Kirsher 
129f2148a47SJeff Kirsher #define MCAM_SIZE	32
130f2148a47SJeff Kirsher #define VCAM_SIZE	32
131f2148a47SJeff Kirsher 
132f2148a47SJeff Kirsher /*
133f2148a47SJeff Kirsher 		Theory of Operation
134f2148a47SJeff Kirsher 
135f2148a47SJeff Kirsher I. Board Compatibility
136f2148a47SJeff Kirsher 
137f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
138f2148a47SJeff Kirsher controller.
139f2148a47SJeff Kirsher 
140f2148a47SJeff Kirsher II. Board-specific settings
141f2148a47SJeff Kirsher 
142f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot.
143f2148a47SJeff Kirsher 
144f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at
145f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are
146f2148a47SJeff Kirsher correct.
147f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM
148f2148a47SJeff Kirsher must be configured to enable memory ops.
149f2148a47SJeff Kirsher 
150f2148a47SJeff Kirsher III. Driver operation
151f2148a47SJeff Kirsher 
152f2148a47SJeff Kirsher IIIa. Ring buffers
153f2148a47SJeff Kirsher 
154f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists
155f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
156f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
157f2148a47SJeff Kirsher 
158f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure
159f2148a47SJeff Kirsher 
160f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme.
161f2148a47SJeff Kirsher 
162f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so
163f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers.
164f2148a47SJeff Kirsher 
165f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
166f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data
167f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
168f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
169f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
170f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated
171f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx().
172f2148a47SJeff Kirsher 
173f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
174f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
175f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines
176f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of
177f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never
178f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using
179f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is
180f2148a47SJeff Kirsher most useful with small frames.
181f2148a47SJeff Kirsher 
182f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit
183f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't
184f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers
185f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header.
186f2148a47SJeff Kirsher 
187f2148a47SJeff Kirsher IIId. Synchronization
188f2148a47SJeff Kirsher 
189f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One
190f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
191f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
192f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software.
193f2148a47SJeff Kirsher 
194f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the
195f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
196f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by
197f2148a47SJeff Kirsher calling netif_stop_queue.
198f2148a47SJeff Kirsher 
199f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
200f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as
201f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in
202f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped.
203f2148a47SJeff Kirsher 
204f2148a47SJeff Kirsher IV. Notes
205f2148a47SJeff Kirsher 
206f2148a47SJeff Kirsher IVb. References
207f2148a47SJeff Kirsher 
208f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/
209f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html
210f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html
211f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
212f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
213f2148a47SJeff Kirsher 
214f2148a47SJeff Kirsher 
215f2148a47SJeff Kirsher IVc. Errata
216f2148a47SJeff Kirsher 
217f2148a47SJeff Kirsher The VT86C100A manual is not reliable information.
218f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting
219f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit
220f2148a47SJeff Kirsher and unaligned IP headers on receive.
221f2148a47SJeff Kirsher The chip does not pad to minimum transmit length.
222f2148a47SJeff Kirsher 
223f2148a47SJeff Kirsher */
224f2148a47SJeff Kirsher 
225f2148a47SJeff Kirsher 
226f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all
227f2148a47SJeff Kirsher    of the drivers, and will likely be provided by some future kernel.
228f2148a47SJeff Kirsher    Note the matching code -- the first table entry matchs all 56** cards but
229f2148a47SJeff Kirsher    second only the 1234 card.
230f2148a47SJeff Kirsher */
231f2148a47SJeff Kirsher 
232f2148a47SJeff Kirsher enum rhine_revs {
233f2148a47SJeff Kirsher 	VT86C100A	= 0x00,
234f2148a47SJeff Kirsher 	VTunknown0	= 0x20,
235f2148a47SJeff Kirsher 	VT6102		= 0x40,
236f2148a47SJeff Kirsher 	VT8231		= 0x50,	/* Integrated MAC */
237f2148a47SJeff Kirsher 	VT8233		= 0x60,	/* Integrated MAC */
238f2148a47SJeff Kirsher 	VT8235		= 0x74,	/* Integrated MAC */
239f2148a47SJeff Kirsher 	VT8237		= 0x78,	/* Integrated MAC */
240aa15190cSKevin Brace 	VT8251		= 0x7C,	/* Integrated MAC */
241f2148a47SJeff Kirsher 	VT6105		= 0x80,
242f2148a47SJeff Kirsher 	VT6105_B0	= 0x83,
243f2148a47SJeff Kirsher 	VT6105L		= 0x8A,
244f2148a47SJeff Kirsher 	VT6107		= 0x8C,
245f2148a47SJeff Kirsher 	VTunknown2	= 0x8E,
246f2148a47SJeff Kirsher 	VT6105M		= 0x90,	/* Management adapter */
247f2148a47SJeff Kirsher };
248f2148a47SJeff Kirsher 
249f2148a47SJeff Kirsher enum rhine_quirks {
250f2148a47SJeff Kirsher 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
251f2148a47SJeff Kirsher 	rqForceReset	= 0x0002,
252f2148a47SJeff Kirsher 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
253f2148a47SJeff Kirsher 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
254f2148a47SJeff Kirsher 	rqRhineI	= 0x0100,	/* See comment below */
255ca8b6e04SAlexey Charkov 	rqIntPHY	= 0x0200,	/* Integrated PHY */
256ca8b6e04SAlexey Charkov 	rqMgmt		= 0x0400,	/* Management adapter */
2575b579e21SAlexey Charkov 	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
2585b579e21SAlexey Charkov 					 * switched from PIO mode to MMIO
2595b579e21SAlexey Charkov 					 * (only applies to PCI)
2605b579e21SAlexey Charkov 					 */
261f2148a47SJeff Kirsher };
262f2148a47SJeff Kirsher /*
263f2148a47SJeff Kirsher  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
264f2148a47SJeff Kirsher  * MMIO as well as for the collision counter and the Tx FIFO underflow
265f2148a47SJeff Kirsher  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
266f2148a47SJeff Kirsher  */
267f2148a47SJeff Kirsher 
268f2148a47SJeff Kirsher /* Beware of PCI posted writes */
269f2148a47SJeff Kirsher #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
270f2148a47SJeff Kirsher 
2719baa3c34SBenoit Taine static const struct pci_device_id rhine_pci_tbl[] = {
272f2148a47SJeff Kirsher 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
273f2148a47SJeff Kirsher 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
274f2148a47SJeff Kirsher 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
275f2148a47SJeff Kirsher 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
276f2148a47SJeff Kirsher 	{ }	/* terminate list */
277f2148a47SJeff Kirsher };
278f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
279f2148a47SJeff Kirsher 
2802d283862SAlexey Charkov /* OpenFirmware identifiers for platform-bus devices
281ca8b6e04SAlexey Charkov  * The .data field is currently only used to store quirks
2822d283862SAlexey Charkov  */
283ca8b6e04SAlexey Charkov static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
284d2b75a3fSFabian Frederick static const struct of_device_id rhine_of_tbl[] = {
285ca8b6e04SAlexey Charkov 	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
2862d283862SAlexey Charkov 	{ }	/* terminate list */
2872d283862SAlexey Charkov };
2882d283862SAlexey Charkov MODULE_DEVICE_TABLE(of, rhine_of_tbl);
289f2148a47SJeff Kirsher 
290f2148a47SJeff Kirsher /* Offsets to the device registers. */
291f2148a47SJeff Kirsher enum register_offsets {
292f2148a47SJeff Kirsher 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
293f2148a47SJeff Kirsher 	ChipCmd1=0x09, TQWake=0x0A,
294f2148a47SJeff Kirsher 	IntrStatus=0x0C, IntrEnable=0x0E,
295f2148a47SJeff Kirsher 	MulticastFilter0=0x10, MulticastFilter1=0x14,
296f2148a47SJeff Kirsher 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
297f2148a47SJeff Kirsher 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
298f2148a47SJeff Kirsher 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
299f2148a47SJeff Kirsher 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
300f2148a47SJeff Kirsher 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
301f2148a47SJeff Kirsher 	StickyHW=0x83, IntrStatus2=0x84,
302f2148a47SJeff Kirsher 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
303f2148a47SJeff Kirsher 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
304f2148a47SJeff Kirsher 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
305f2148a47SJeff Kirsher 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
306f2148a47SJeff Kirsher };
307f2148a47SJeff Kirsher 
308f2148a47SJeff Kirsher /* Bits in ConfigD */
309f2148a47SJeff Kirsher enum backoff_bits {
310f2148a47SJeff Kirsher 	BackOptional=0x01, BackModify=0x02,
311f2148a47SJeff Kirsher 	BackCaptureEffect=0x04, BackRandom=0x08
312f2148a47SJeff Kirsher };
313f2148a47SJeff Kirsher 
314f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */
315f2148a47SJeff Kirsher enum tcr_bits {
316f2148a47SJeff Kirsher 	TCR_PQEN=0x01,
317f2148a47SJeff Kirsher 	TCR_LB0=0x02,		/* loopback[0] */
318f2148a47SJeff Kirsher 	TCR_LB1=0x04,		/* loopback[1] */
319f2148a47SJeff Kirsher 	TCR_OFSET=0x08,
320f2148a47SJeff Kirsher 	TCR_RTGOPT=0x10,
321f2148a47SJeff Kirsher 	TCR_RTFT0=0x20,
322f2148a47SJeff Kirsher 	TCR_RTFT1=0x40,
323f2148a47SJeff Kirsher 	TCR_RTSF=0x80,
324f2148a47SJeff Kirsher };
325f2148a47SJeff Kirsher 
326f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */
327f2148a47SJeff Kirsher enum camcon_bits {
328f2148a47SJeff Kirsher 	CAMC_CAMEN=0x01,
329f2148a47SJeff Kirsher 	CAMC_VCAMSL=0x02,
330f2148a47SJeff Kirsher 	CAMC_CAMWR=0x04,
331f2148a47SJeff Kirsher 	CAMC_CAMRD=0x08,
332f2148a47SJeff Kirsher };
333f2148a47SJeff Kirsher 
334f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */
335f2148a47SJeff Kirsher enum bcr1_bits {
336f2148a47SJeff Kirsher 	BCR1_POT0=0x01,
337f2148a47SJeff Kirsher 	BCR1_POT1=0x02,
338f2148a47SJeff Kirsher 	BCR1_POT2=0x04,
339f2148a47SJeff Kirsher 	BCR1_CTFT0=0x08,
340f2148a47SJeff Kirsher 	BCR1_CTFT1=0x10,
341f2148a47SJeff Kirsher 	BCR1_CTSF=0x20,
342f2148a47SJeff Kirsher 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
343f2148a47SJeff Kirsher 	BCR1_VIDFR=0x80,	/* for VT6105 */
344f2148a47SJeff Kirsher 	BCR1_MED0=0x40,		/* for VT6102 */
345f2148a47SJeff Kirsher 	BCR1_MED1=0x80,		/* for VT6102 */
346f2148a47SJeff Kirsher };
347f2148a47SJeff Kirsher 
348f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */
349f2148a47SJeff Kirsher static const int mmio_verify_registers[] = {
350f2148a47SJeff Kirsher 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
351f2148a47SJeff Kirsher 	0
352f2148a47SJeff Kirsher };
353f2148a47SJeff Kirsher 
354f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */
355f2148a47SJeff Kirsher enum intr_status_bits {
3567ab87ff4SFrancois Romieu 	IntrRxDone	= 0x0001,
3577ab87ff4SFrancois Romieu 	IntrTxDone	= 0x0002,
3587ab87ff4SFrancois Romieu 	IntrRxErr	= 0x0004,
3597ab87ff4SFrancois Romieu 	IntrTxError	= 0x0008,
3607ab87ff4SFrancois Romieu 	IntrRxEmpty	= 0x0020,
361f2148a47SJeff Kirsher 	IntrPCIErr	= 0x0040,
3627ab87ff4SFrancois Romieu 	IntrStatsMax	= 0x0080,
3637ab87ff4SFrancois Romieu 	IntrRxEarly	= 0x0100,
3647ab87ff4SFrancois Romieu 	IntrTxUnderrun	= 0x0210,
3657ab87ff4SFrancois Romieu 	IntrRxOverflow	= 0x0400,
3667ab87ff4SFrancois Romieu 	IntrRxDropped	= 0x0800,
3677ab87ff4SFrancois Romieu 	IntrRxNoBuf	= 0x1000,
3687ab87ff4SFrancois Romieu 	IntrTxAborted	= 0x2000,
3697ab87ff4SFrancois Romieu 	IntrLinkChange	= 0x4000,
370f2148a47SJeff Kirsher 	IntrRxWakeUp	= 0x8000,
371f2148a47SJeff Kirsher 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
3727ab87ff4SFrancois Romieu 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
3737ab87ff4SFrancois Romieu 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
3747ab87ff4SFrancois Romieu 				  IntrTxUnderrun,
375f2148a47SJeff Kirsher };
376f2148a47SJeff Kirsher 
377f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
378f2148a47SJeff Kirsher enum wol_bits {
379f2148a47SJeff Kirsher 	WOLucast	= 0x10,
380f2148a47SJeff Kirsher 	WOLmagic	= 0x20,
381f2148a47SJeff Kirsher 	WOLbmcast	= 0x30,
382f2148a47SJeff Kirsher 	WOLlnkon	= 0x40,
383f2148a47SJeff Kirsher 	WOLlnkoff	= 0x80,
384f2148a47SJeff Kirsher };
385f2148a47SJeff Kirsher 
386f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */
387f2148a47SJeff Kirsher struct rx_desc {
388f2148a47SJeff Kirsher 	__le32 rx_status;
389f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Buffer/frame length */
390f2148a47SJeff Kirsher 	__le32 addr;
391f2148a47SJeff Kirsher 	__le32 next_desc;
392f2148a47SJeff Kirsher };
393f2148a47SJeff Kirsher struct tx_desc {
394f2148a47SJeff Kirsher 	__le32 tx_status;
395f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
396f2148a47SJeff Kirsher 	__le32 addr;
397f2148a47SJeff Kirsher 	__le32 next_desc;
398f2148a47SJeff Kirsher };
399f2148a47SJeff Kirsher 
400f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
401f2148a47SJeff Kirsher #define TXDESC		0x00e08000
402f2148a47SJeff Kirsher 
403f2148a47SJeff Kirsher enum rx_status_bits {
404f2148a47SJeff Kirsher 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
405f2148a47SJeff Kirsher };
406f2148a47SJeff Kirsher 
407f2148a47SJeff Kirsher /* Bits in *_desc.*_status */
408f2148a47SJeff Kirsher enum desc_status_bits {
409f2148a47SJeff Kirsher 	DescOwn=0x80000000
410f2148a47SJeff Kirsher };
411f2148a47SJeff Kirsher 
412f2148a47SJeff Kirsher /* Bits in *_desc.*_length */
413f2148a47SJeff Kirsher enum desc_length_bits {
414f2148a47SJeff Kirsher 	DescTag=0x00010000
415f2148a47SJeff Kirsher };
416f2148a47SJeff Kirsher 
417f2148a47SJeff Kirsher /* Bits in ChipCmd. */
418f2148a47SJeff Kirsher enum chip_cmd_bits {
419f2148a47SJeff Kirsher 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
420f2148a47SJeff Kirsher 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
421f2148a47SJeff Kirsher 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
422f2148a47SJeff Kirsher 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
423f2148a47SJeff Kirsher };
424f2148a47SJeff Kirsher 
425f7b5d1b9SJamie Gloudon struct rhine_stats {
426f7b5d1b9SJamie Gloudon 	u64		packets;
427f7b5d1b9SJamie Gloudon 	u64		bytes;
428f7b5d1b9SJamie Gloudon 	struct u64_stats_sync syncp;
429f7b5d1b9SJamie Gloudon };
430f7b5d1b9SJamie Gloudon 
431f2148a47SJeff Kirsher struct rhine_private {
432f2148a47SJeff Kirsher 	/* Bit mask for configured VLAN ids */
433f2148a47SJeff Kirsher 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
434f2148a47SJeff Kirsher 
435f2148a47SJeff Kirsher 	/* Descriptor rings */
436f2148a47SJeff Kirsher 	struct rx_desc *rx_ring;
437f2148a47SJeff Kirsher 	struct tx_desc *tx_ring;
438f2148a47SJeff Kirsher 	dma_addr_t rx_ring_dma;
439f2148a47SJeff Kirsher 	dma_addr_t tx_ring_dma;
440f2148a47SJeff Kirsher 
441f2148a47SJeff Kirsher 	/* The addresses of receive-in-place skbuffs. */
442f2148a47SJeff Kirsher 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
443f2148a47SJeff Kirsher 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
444f2148a47SJeff Kirsher 
445f2148a47SJeff Kirsher 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
446f2148a47SJeff Kirsher 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
447f2148a47SJeff Kirsher 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
448f2148a47SJeff Kirsher 
449f2148a47SJeff Kirsher 	/* Tx bounce buffers (Rhine-I only) */
450f2148a47SJeff Kirsher 	unsigned char *tx_buf[TX_RING_SIZE];
451f2148a47SJeff Kirsher 	unsigned char *tx_bufs;
452f2148a47SJeff Kirsher 	dma_addr_t tx_bufs_dma;
453f2148a47SJeff Kirsher 
454f7630d18SAlexey Charkov 	int irq;
455f2148a47SJeff Kirsher 	long pioaddr;
456f2148a47SJeff Kirsher 	struct net_device *dev;
457f2148a47SJeff Kirsher 	struct napi_struct napi;
458f2148a47SJeff Kirsher 	spinlock_t lock;
4597ab87ff4SFrancois Romieu 	struct mutex task_lock;
4607ab87ff4SFrancois Romieu 	bool task_enable;
4617ab87ff4SFrancois Romieu 	struct work_struct slow_event_task;
462f2148a47SJeff Kirsher 	struct work_struct reset_task;
463f2148a47SJeff Kirsher 
464fc3e0f8aSFrancois Romieu 	u32 msg_enable;
465fc3e0f8aSFrancois Romieu 
466f2148a47SJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect. */
467f2148a47SJeff Kirsher 	u32 quirks;
4688709bb2cSfrançois romieu 	unsigned int cur_rx;
469f2148a47SJeff Kirsher 	unsigned int cur_tx, dirty_tx;
470f2148a47SJeff Kirsher 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
471f7b5d1b9SJamie Gloudon 	struct rhine_stats rx_stats;
472f7b5d1b9SJamie Gloudon 	struct rhine_stats tx_stats;
473f2148a47SJeff Kirsher 	u8 wolopts;
474f2148a47SJeff Kirsher 
475f2148a47SJeff Kirsher 	u8 tx_thresh, rx_thresh;
476f2148a47SJeff Kirsher 
477f2148a47SJeff Kirsher 	struct mii_if_info mii_if;
478f2148a47SJeff Kirsher 	void __iomem *base;
479f2148a47SJeff Kirsher };
480f2148a47SJeff Kirsher 
481f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
482f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
483f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
484f2148a47SJeff Kirsher 
485f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
486f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
487f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
488f2148a47SJeff Kirsher 
489f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
490f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
491f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
492f2148a47SJeff Kirsher 
493f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
494f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
495f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
496f2148a47SJeff Kirsher 
497f2148a47SJeff Kirsher 
498f2148a47SJeff Kirsher static int  mdio_read(struct net_device *dev, int phy_id, int location);
499f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
500f2148a47SJeff Kirsher static int  rhine_open(struct net_device *dev);
501f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work);
5027ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work);
5030290bd29SMichael S. Tsirkin static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
504f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
505f2148a47SJeff Kirsher 				  struct net_device *dev);
506f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
507f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev);
508f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit);
509f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev);
510bc1f4470Sstephen hemminger static void rhine_get_stats64(struct net_device *dev,
511f7b5d1b9SJamie Gloudon 			      struct rtnl_link_stats64 *stats);
512f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
513f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops;
514f2148a47SJeff Kirsher static int  rhine_close(struct net_device *dev);
51580d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev,
51680d5c368SPatrick McHardy 				 __be16 proto, u16 vid);
51780d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev,
51880d5c368SPatrick McHardy 				  __be16 proto, u16 vid);
5197ab87ff4SFrancois Romieu static void rhine_restart_tx(struct net_device *dev);
520f2148a47SJeff Kirsher 
rhine_wait_bit(struct rhine_private * rp,u8 reg,u8 mask,bool low)5213f8c91a7SAndreas Mohr static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
522a384a33bSFrancois Romieu {
523a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
524a384a33bSFrancois Romieu 	int i;
525a384a33bSFrancois Romieu 
526a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
5273f8c91a7SAndreas Mohr 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
5283f8c91a7SAndreas Mohr 
5293f8c91a7SAndreas Mohr 		if (low ^ has_mask_bits)
530a384a33bSFrancois Romieu 			break;
531a384a33bSFrancois Romieu 		udelay(10);
532a384a33bSFrancois Romieu 	}
533a384a33bSFrancois Romieu 	if (i > 64) {
534fc3e0f8aSFrancois Romieu 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
5353f8c91a7SAndreas Mohr 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
536a384a33bSFrancois Romieu 	}
537a384a33bSFrancois Romieu }
538a384a33bSFrancois Romieu 
rhine_wait_bit_high(struct rhine_private * rp,u8 reg,u8 mask)539a384a33bSFrancois Romieu static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
540a384a33bSFrancois Romieu {
5413f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, false);
542a384a33bSFrancois Romieu }
543a384a33bSFrancois Romieu 
rhine_wait_bit_low(struct rhine_private * rp,u8 reg,u8 mask)544a384a33bSFrancois Romieu static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
545a384a33bSFrancois Romieu {
5463f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, true);
547a384a33bSFrancois Romieu }
548f2148a47SJeff Kirsher 
rhine_get_events(struct rhine_private * rp)549a20a28bcSFrancois Romieu static u32 rhine_get_events(struct rhine_private *rp)
550f2148a47SJeff Kirsher {
551f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
552f2148a47SJeff Kirsher 	u32 intr_status;
553f2148a47SJeff Kirsher 
554f2148a47SJeff Kirsher 	intr_status = ioread16(ioaddr + IntrStatus);
555f2148a47SJeff Kirsher 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
556f2148a47SJeff Kirsher 	if (rp->quirks & rqStatusWBRace)
557f2148a47SJeff Kirsher 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
558f2148a47SJeff Kirsher 	return intr_status;
559f2148a47SJeff Kirsher }
560f2148a47SJeff Kirsher 
rhine_ack_events(struct rhine_private * rp,u32 mask)561a20a28bcSFrancois Romieu static void rhine_ack_events(struct rhine_private *rp, u32 mask)
562a20a28bcSFrancois Romieu {
563a20a28bcSFrancois Romieu 	void __iomem *ioaddr = rp->base;
564a20a28bcSFrancois Romieu 
565a20a28bcSFrancois Romieu 	if (rp->quirks & rqStatusWBRace)
566a20a28bcSFrancois Romieu 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
567a20a28bcSFrancois Romieu 	iowrite16(mask, ioaddr + IntrStatus);
568a20a28bcSFrancois Romieu }
569a20a28bcSFrancois Romieu 
570f2148a47SJeff Kirsher /*
571f2148a47SJeff Kirsher  * Get power related registers into sane state.
572f2148a47SJeff Kirsher  * Notify user about past WOL event.
573f2148a47SJeff Kirsher  */
rhine_power_init(struct net_device * dev)574f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev)
575f2148a47SJeff Kirsher {
576f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
577f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
578f2148a47SJeff Kirsher 	u16 wolstat;
579f2148a47SJeff Kirsher 
580f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL) {
581f2148a47SJeff Kirsher 		/* Make sure chip is in power state D0 */
582f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
583f2148a47SJeff Kirsher 
584f2148a47SJeff Kirsher 		/* Disable "force PME-enable" */
585f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + WOLcgClr);
586f2148a47SJeff Kirsher 
587f2148a47SJeff Kirsher 		/* Clear power-event config bits (WOL) */
588f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + WOLcrClr);
589f2148a47SJeff Kirsher 		/* More recent cards can manage two additional patterns */
590f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
591f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + WOLcrClr1);
592f2148a47SJeff Kirsher 
593f2148a47SJeff Kirsher 		/* Save power-event status bits */
594f2148a47SJeff Kirsher 		wolstat = ioread8(ioaddr + PwrcsrSet);
595f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
596f2148a47SJeff Kirsher 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
597f2148a47SJeff Kirsher 
598f2148a47SJeff Kirsher 		/* Clear power-event status bits */
599f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + PwrcsrClr);
600f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
601f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + PwrcsrClr1);
602f2148a47SJeff Kirsher 
603f2148a47SJeff Kirsher 		if (wolstat) {
604f2148a47SJeff Kirsher 			char *reason;
605f2148a47SJeff Kirsher 			switch (wolstat) {
606f2148a47SJeff Kirsher 			case WOLmagic:
607f2148a47SJeff Kirsher 				reason = "Magic packet";
608f2148a47SJeff Kirsher 				break;
609f2148a47SJeff Kirsher 			case WOLlnkon:
610f2148a47SJeff Kirsher 				reason = "Link went up";
611f2148a47SJeff Kirsher 				break;
612f2148a47SJeff Kirsher 			case WOLlnkoff:
613f2148a47SJeff Kirsher 				reason = "Link went down";
614f2148a47SJeff Kirsher 				break;
615f2148a47SJeff Kirsher 			case WOLucast:
616f2148a47SJeff Kirsher 				reason = "Unicast packet";
617f2148a47SJeff Kirsher 				break;
618f2148a47SJeff Kirsher 			case WOLbmcast:
619f2148a47SJeff Kirsher 				reason = "Multicast/broadcast packet";
620f2148a47SJeff Kirsher 				break;
621f2148a47SJeff Kirsher 			default:
622f2148a47SJeff Kirsher 				reason = "Unknown";
623f2148a47SJeff Kirsher 			}
624f2148a47SJeff Kirsher 			netdev_info(dev, "Woke system up. Reason: %s\n",
625f2148a47SJeff Kirsher 				    reason);
626f2148a47SJeff Kirsher 		}
627f2148a47SJeff Kirsher 	}
628f2148a47SJeff Kirsher }
629f2148a47SJeff Kirsher 
rhine_chip_reset(struct net_device * dev)630f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev)
631f2148a47SJeff Kirsher {
632f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
633f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
634fc3e0f8aSFrancois Romieu 	u8 cmd1;
635f2148a47SJeff Kirsher 
636f2148a47SJeff Kirsher 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
637f2148a47SJeff Kirsher 	IOSYNC;
638f2148a47SJeff Kirsher 
639f2148a47SJeff Kirsher 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
640f2148a47SJeff Kirsher 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
641f2148a47SJeff Kirsher 
642f2148a47SJeff Kirsher 		/* Force reset */
643f2148a47SJeff Kirsher 		if (rp->quirks & rqForceReset)
644f2148a47SJeff Kirsher 			iowrite8(0x40, ioaddr + MiscCmd);
645f2148a47SJeff Kirsher 
646f2148a47SJeff Kirsher 		/* Reset can take somewhat longer (rare) */
647a384a33bSFrancois Romieu 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
648f2148a47SJeff Kirsher 	}
649f2148a47SJeff Kirsher 
650fc3e0f8aSFrancois Romieu 	cmd1 = ioread8(ioaddr + ChipCmd1);
651fc3e0f8aSFrancois Romieu 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
652f2148a47SJeff Kirsher 		   "failed" : "succeeded");
653f2148a47SJeff Kirsher }
654f2148a47SJeff Kirsher 
enable_mmio(long pioaddr,u32 quirks)655f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks)
656f2148a47SJeff Kirsher {
657f2148a47SJeff Kirsher 	int n;
6585b579e21SAlexey Charkov 
6595b579e21SAlexey Charkov 	if (quirks & rqNeedEnMMIO) {
660f2148a47SJeff Kirsher 		if (quirks & rqRhineI) {
6615b579e21SAlexey Charkov 			/* More recent docs say that this bit is reserved */
662f2148a47SJeff Kirsher 			n = inb(pioaddr + ConfigA) | 0x20;
663f2148a47SJeff Kirsher 			outb(n, pioaddr + ConfigA);
664f2148a47SJeff Kirsher 		} else {
665f2148a47SJeff Kirsher 			n = inb(pioaddr + ConfigD) | 0x80;
666f2148a47SJeff Kirsher 			outb(n, pioaddr + ConfigD);
667f2148a47SJeff Kirsher 		}
668f2148a47SJeff Kirsher 	}
6695b579e21SAlexey Charkov }
6705b579e21SAlexey Charkov 
verify_mmio(struct device * hwdev,long pioaddr,void __iomem * ioaddr,u32 quirks)6715b579e21SAlexey Charkov static inline int verify_mmio(struct device *hwdev,
6725b579e21SAlexey Charkov 			      long pioaddr,
6735b579e21SAlexey Charkov 			      void __iomem *ioaddr,
6745b579e21SAlexey Charkov 			      u32 quirks)
6755b579e21SAlexey Charkov {
6765b579e21SAlexey Charkov 	if (quirks & rqNeedEnMMIO) {
6775b579e21SAlexey Charkov 		int i = 0;
6785b579e21SAlexey Charkov 
6795b579e21SAlexey Charkov 		/* Check that selected MMIO registers match the PIO ones */
6805b579e21SAlexey Charkov 		while (mmio_verify_registers[i]) {
6815b579e21SAlexey Charkov 			int reg = mmio_verify_registers[i++];
6825b579e21SAlexey Charkov 			unsigned char a = inb(pioaddr+reg);
6835b579e21SAlexey Charkov 			unsigned char b = readb(ioaddr+reg);
6845b579e21SAlexey Charkov 
6855b579e21SAlexey Charkov 			if (a != b) {
6865b579e21SAlexey Charkov 				dev_err(hwdev,
6875b579e21SAlexey Charkov 					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
6885b579e21SAlexey Charkov 					reg, a, b);
6895b579e21SAlexey Charkov 				return -EIO;
6905b579e21SAlexey Charkov 			}
6915b579e21SAlexey Charkov 		}
6925b579e21SAlexey Charkov 	}
6935b579e21SAlexey Charkov 	return 0;
6945b579e21SAlexey Charkov }
695f2148a47SJeff Kirsher 
696f2148a47SJeff Kirsher /*
697f2148a47SJeff Kirsher  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
698f2148a47SJeff Kirsher  * (plus 0x6C for Rhine-I/II)
699f2148a47SJeff Kirsher  */
rhine_reload_eeprom(long pioaddr,struct net_device * dev)70076e239e1SBill Pemberton static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
701f2148a47SJeff Kirsher {
702f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
703f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
704a384a33bSFrancois Romieu 	int i;
705f2148a47SJeff Kirsher 
706f2148a47SJeff Kirsher 	outb(0x20, pioaddr + MACRegEEcsr);
707a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
708a384a33bSFrancois Romieu 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
709a384a33bSFrancois Romieu 			break;
710a384a33bSFrancois Romieu 	}
711a384a33bSFrancois Romieu 	if (i > 512)
712a384a33bSFrancois Romieu 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
713f2148a47SJeff Kirsher 
714f2148a47SJeff Kirsher 	/*
715f2148a47SJeff Kirsher 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
716f2148a47SJeff Kirsher 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
717f2148a47SJeff Kirsher 	 * it is not known if that still works with the "win98-reboot" problem.
718f2148a47SJeff Kirsher 	 */
719f2148a47SJeff Kirsher 	enable_mmio(pioaddr, rp->quirks);
720f2148a47SJeff Kirsher 
721f2148a47SJeff Kirsher 	/* Turn off EEPROM-controlled wake-up (magic packet) */
722f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL)
723f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
724f2148a47SJeff Kirsher 
725f2148a47SJeff Kirsher }
726f2148a47SJeff Kirsher 
727f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
rhine_poll(struct net_device * dev)728f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev)
729f2148a47SJeff Kirsher {
73005d334ecSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
731f7630d18SAlexey Charkov 	const int irq = rp->irq;
73205d334ecSFrancois Romieu 
73305d334ecSFrancois Romieu 	disable_irq(irq);
73405d334ecSFrancois Romieu 	rhine_interrupt(irq, dev);
73505d334ecSFrancois Romieu 	enable_irq(irq);
736f2148a47SJeff Kirsher }
737f2148a47SJeff Kirsher #endif
738f2148a47SJeff Kirsher 
rhine_kick_tx_threshold(struct rhine_private * rp)739269f3114SFrancois Romieu static void rhine_kick_tx_threshold(struct rhine_private *rp)
740269f3114SFrancois Romieu {
741269f3114SFrancois Romieu 	if (rp->tx_thresh < 0xe0) {
742269f3114SFrancois Romieu 		void __iomem *ioaddr = rp->base;
743269f3114SFrancois Romieu 
744269f3114SFrancois Romieu 		rp->tx_thresh += 0x20;
745269f3114SFrancois Romieu 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
746269f3114SFrancois Romieu 	}
747269f3114SFrancois Romieu }
748269f3114SFrancois Romieu 
rhine_tx_err(struct rhine_private * rp,u32 status)7497ab87ff4SFrancois Romieu static void rhine_tx_err(struct rhine_private *rp, u32 status)
7507ab87ff4SFrancois Romieu {
7517ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
7527ab87ff4SFrancois Romieu 
7537ab87ff4SFrancois Romieu 	if (status & IntrTxAborted) {
754fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev,
755fc3e0f8aSFrancois Romieu 			   "Abort %08x, frame dropped\n", status);
7567ab87ff4SFrancois Romieu 	}
7577ab87ff4SFrancois Romieu 
7587ab87ff4SFrancois Romieu 	if (status & IntrTxUnderrun) {
7597ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
760fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
761fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7627ab87ff4SFrancois Romieu 	}
7637ab87ff4SFrancois Romieu 
764fc3e0f8aSFrancois Romieu 	if (status & IntrTxDescRace)
765fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
7667ab87ff4SFrancois Romieu 
7677ab87ff4SFrancois Romieu 	if ((status & IntrTxError) &&
7687ab87ff4SFrancois Romieu 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
7697ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
770fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Unspecified error. "
771fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7727ab87ff4SFrancois Romieu 	}
7737ab87ff4SFrancois Romieu 
7747ab87ff4SFrancois Romieu 	rhine_restart_tx(dev);
7757ab87ff4SFrancois Romieu }
7767ab87ff4SFrancois Romieu 
rhine_update_rx_crc_and_missed_errord(struct rhine_private * rp)7777ab87ff4SFrancois Romieu static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
7787ab87ff4SFrancois Romieu {
7797ab87ff4SFrancois Romieu 	void __iomem *ioaddr = rp->base;
7807ab87ff4SFrancois Romieu 	struct net_device_stats *stats = &rp->dev->stats;
7817ab87ff4SFrancois Romieu 
7827ab87ff4SFrancois Romieu 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
7837ab87ff4SFrancois Romieu 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
7847ab87ff4SFrancois Romieu 
7857ab87ff4SFrancois Romieu 	/*
7867ab87ff4SFrancois Romieu 	 * Clears the "tally counters" for CRC errors and missed frames(?).
7877ab87ff4SFrancois Romieu 	 * It has been reported that some chips need a write of 0 to clear
7887ab87ff4SFrancois Romieu 	 * these, for others the counters are set to 1 when written to and
7897ab87ff4SFrancois Romieu 	 * instead cleared when read. So we clear them both ways ...
7907ab87ff4SFrancois Romieu 	 */
7917ab87ff4SFrancois Romieu 	iowrite32(0, ioaddr + RxMissed);
7927ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxCRCErrs);
7937ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxMissed);
7947ab87ff4SFrancois Romieu }
7957ab87ff4SFrancois Romieu 
7967ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
7977ab87ff4SFrancois Romieu 				 IntrRxErr | \
7987ab87ff4SFrancois Romieu 				 IntrRxEmpty | \
7997ab87ff4SFrancois Romieu 				 IntrRxOverflow	| \
8007ab87ff4SFrancois Romieu 				 IntrRxDropped | \
8017ab87ff4SFrancois Romieu 				 IntrRxNoBuf | \
8027ab87ff4SFrancois Romieu 				 IntrRxWakeUp)
8037ab87ff4SFrancois Romieu 
8047ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
8057ab87ff4SFrancois Romieu 				 IntrTxAborted | \
8067ab87ff4SFrancois Romieu 				 IntrTxUnderrun | \
8077ab87ff4SFrancois Romieu 				 IntrTxDescRace)
8087ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
8097ab87ff4SFrancois Romieu 
8107ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
8117ab87ff4SFrancois Romieu 				 RHINE_EVENT_NAPI_TX | \
8127ab87ff4SFrancois Romieu 				 IntrStatsMax)
8137ab87ff4SFrancois Romieu #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
8147ab87ff4SFrancois Romieu #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
8157ab87ff4SFrancois Romieu 
rhine_napipoll(struct napi_struct * napi,int budget)816f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget)
817f2148a47SJeff Kirsher {
818f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
819f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
820f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
8217ab87ff4SFrancois Romieu 	u16 enable_mask = RHINE_EVENT & 0xffff;
8227ab87ff4SFrancois Romieu 	int work_done = 0;
8237ab87ff4SFrancois Romieu 	u32 status;
824f2148a47SJeff Kirsher 
8257ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
8267ab87ff4SFrancois Romieu 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
8277ab87ff4SFrancois Romieu 
8287ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_RX)
8297ab87ff4SFrancois Romieu 		work_done += rhine_rx(dev, budget);
8307ab87ff4SFrancois Romieu 
8317ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_TX) {
8327ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
8337ab87ff4SFrancois Romieu 			/* Avoid scavenging before Tx engine turned off */
834a384a33bSFrancois Romieu 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
835fc3e0f8aSFrancois Romieu 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
836fc3e0f8aSFrancois Romieu 				netif_warn(rp, tx_err, dev, "Tx still on\n");
8377ab87ff4SFrancois Romieu 		}
838fc3e0f8aSFrancois Romieu 
8397ab87ff4SFrancois Romieu 		rhine_tx(dev);
8407ab87ff4SFrancois Romieu 
8417ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR)
8427ab87ff4SFrancois Romieu 			rhine_tx_err(rp, status);
8437ab87ff4SFrancois Romieu 	}
8447ab87ff4SFrancois Romieu 
8457ab87ff4SFrancois Romieu 	if (status & IntrStatsMax) {
8467ab87ff4SFrancois Romieu 		spin_lock(&rp->lock);
8477ab87ff4SFrancois Romieu 		rhine_update_rx_crc_and_missed_errord(rp);
8487ab87ff4SFrancois Romieu 		spin_unlock(&rp->lock);
8497ab87ff4SFrancois Romieu 	}
8507ab87ff4SFrancois Romieu 
8517ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_SLOW) {
8527ab87ff4SFrancois Romieu 		enable_mask &= ~RHINE_EVENT_SLOW;
8537ab87ff4SFrancois Romieu 		schedule_work(&rp->slow_event_task);
8547ab87ff4SFrancois Romieu 	}
855f2148a47SJeff Kirsher 
856f2148a47SJeff Kirsher 	if (work_done < budget) {
8576ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
8587ab87ff4SFrancois Romieu 		iowrite16(enable_mask, ioaddr + IntrEnable);
859f2148a47SJeff Kirsher 	}
860f2148a47SJeff Kirsher 	return work_done;
861f2148a47SJeff Kirsher }
862f2148a47SJeff Kirsher 
rhine_hw_init(struct net_device * dev,long pioaddr)86376e239e1SBill Pemberton static void rhine_hw_init(struct net_device *dev, long pioaddr)
864f2148a47SJeff Kirsher {
865f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
866f2148a47SJeff Kirsher 
867f2148a47SJeff Kirsher 	/* Reset the chip to erase previous misconfiguration. */
868f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
869f2148a47SJeff Kirsher 
870f2148a47SJeff Kirsher 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
871f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
872f2148a47SJeff Kirsher 		msleep(5);
873f2148a47SJeff Kirsher 
874f2148a47SJeff Kirsher 	/* Reload EEPROM controlled bytes cleared by soft reset */
8752d283862SAlexey Charkov 	if (dev_is_pci(dev->dev.parent))
876f2148a47SJeff Kirsher 		rhine_reload_eeprom(pioaddr, dev);
877f2148a47SJeff Kirsher }
878f2148a47SJeff Kirsher 
879f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = {
880f2148a47SJeff Kirsher 	.ndo_open		 = rhine_open,
881f2148a47SJeff Kirsher 	.ndo_stop		 = rhine_close,
882f2148a47SJeff Kirsher 	.ndo_start_xmit		 = rhine_start_tx,
883f7b5d1b9SJamie Gloudon 	.ndo_get_stats64	 = rhine_get_stats64,
884afc4b13dSJiri Pirko 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
885f2148a47SJeff Kirsher 	.ndo_validate_addr	 = eth_validate_addr,
886f2148a47SJeff Kirsher 	.ndo_set_mac_address 	 = eth_mac_addr,
887a7605370SArnd Bergmann 	.ndo_eth_ioctl		 = netdev_ioctl,
888f2148a47SJeff Kirsher 	.ndo_tx_timeout 	 = rhine_tx_timeout,
889f2148a47SJeff Kirsher 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
890f2148a47SJeff Kirsher 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
891f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
892f2148a47SJeff Kirsher 	.ndo_poll_controller	 = rhine_poll,
893f2148a47SJeff Kirsher #endif
894f2148a47SJeff Kirsher };
895f2148a47SJeff Kirsher 
rhine_init_one_common(struct device * hwdev,u32 quirks,long pioaddr,void __iomem * ioaddr,int irq)896ca8b6e04SAlexey Charkov static int rhine_init_one_common(struct device *hwdev, u32 quirks,
8972d283862SAlexey Charkov 				 long pioaddr, void __iomem *ioaddr, int irq)
898f2148a47SJeff Kirsher {
899f2148a47SJeff Kirsher 	struct net_device *dev;
900f2148a47SJeff Kirsher 	struct rhine_private *rp;
9012d283862SAlexey Charkov 	int i, rc, phy_id;
90283f262baSJakub Kicinski 	u8 addr[ETH_ALEN];
903f2148a47SJeff Kirsher 	const char *name;
904f2148a47SJeff Kirsher 
905f2148a47SJeff Kirsher 	/* this should always be supported */
906f7630d18SAlexey Charkov 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
907f2148a47SJeff Kirsher 	if (rc) {
908f7630d18SAlexey Charkov 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
9092d283862SAlexey Charkov 		goto err_out;
910f2148a47SJeff Kirsher 	}
911f2148a47SJeff Kirsher 
912f2148a47SJeff Kirsher 	dev = alloc_etherdev(sizeof(struct rhine_private));
913f2148a47SJeff Kirsher 	if (!dev) {
914f2148a47SJeff Kirsher 		rc = -ENOMEM;
9152d283862SAlexey Charkov 		goto err_out;
916f2148a47SJeff Kirsher 	}
917f7630d18SAlexey Charkov 	SET_NETDEV_DEV(dev, hwdev);
918f2148a47SJeff Kirsher 
919f2148a47SJeff Kirsher 	rp = netdev_priv(dev);
920f2148a47SJeff Kirsher 	rp->dev = dev;
921ca8b6e04SAlexey Charkov 	rp->quirks = quirks;
922f2148a47SJeff Kirsher 	rp->pioaddr = pioaddr;
9232d283862SAlexey Charkov 	rp->base = ioaddr;
9242d283862SAlexey Charkov 	rp->irq = irq;
925fc3e0f8aSFrancois Romieu 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
926f2148a47SJeff Kirsher 
927ca8b6e04SAlexey Charkov 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
928f2148a47SJeff Kirsher 
929827da44cSJohn Stultz 	u64_stats_init(&rp->tx_stats.syncp);
930827da44cSJohn Stultz 	u64_stats_init(&rp->rx_stats.syncp);
931827da44cSJohn Stultz 
932f2148a47SJeff Kirsher 	/* Get chip registers into a sane state */
933f2148a47SJeff Kirsher 	rhine_power_init(dev);
934f2148a47SJeff Kirsher 	rhine_hw_init(dev, pioaddr);
935f2148a47SJeff Kirsher 
936f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
93783f262baSJakub Kicinski 		addr[i] = ioread8(ioaddr + StationAddr + i);
93883f262baSJakub Kicinski 	eth_hw_addr_set(dev, addr);
939f2148a47SJeff Kirsher 
940f2148a47SJeff Kirsher 	if (!is_valid_ether_addr(dev->dev_addr)) {
941f2148a47SJeff Kirsher 		/* Report it and use a random ethernet address instead */
942f2148a47SJeff Kirsher 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
943f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
944f2148a47SJeff Kirsher 		netdev_info(dev, "Using random MAC address: %pM\n",
945f2148a47SJeff Kirsher 			    dev->dev_addr);
946f2148a47SJeff Kirsher 	}
947f2148a47SJeff Kirsher 
948f2148a47SJeff Kirsher 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
949f2148a47SJeff Kirsher 	if (!phy_id)
950f2148a47SJeff Kirsher 		phy_id = ioread8(ioaddr + 0x6C);
951f2148a47SJeff Kirsher 
952f2148a47SJeff Kirsher 	spin_lock_init(&rp->lock);
9537ab87ff4SFrancois Romieu 	mutex_init(&rp->task_lock);
954f2148a47SJeff Kirsher 	INIT_WORK(&rp->reset_task, rhine_reset_task);
9557ab87ff4SFrancois Romieu 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
956f2148a47SJeff Kirsher 
957f2148a47SJeff Kirsher 	rp->mii_if.dev = dev;
958f2148a47SJeff Kirsher 	rp->mii_if.mdio_read = mdio_read;
959f2148a47SJeff Kirsher 	rp->mii_if.mdio_write = mdio_write;
960f2148a47SJeff Kirsher 	rp->mii_if.phy_id_mask = 0x1f;
961f2148a47SJeff Kirsher 	rp->mii_if.reg_num_mask = 0x1f;
962f2148a47SJeff Kirsher 
963f2148a47SJeff Kirsher 	/* The chip-specific entries in the device structure. */
964f2148a47SJeff Kirsher 	dev->netdev_ops = &rhine_netdev_ops;
965e76070f2Swangweidong 	dev->ethtool_ops = &netdev_ethtool_ops;
966f2148a47SJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
967f2148a47SJeff Kirsher 
968b48b89f9SJakub Kicinski 	netif_napi_add(dev, &rp->napi, rhine_napipoll);
969f2148a47SJeff Kirsher 
970f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
971f2148a47SJeff Kirsher 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
972f2148a47SJeff Kirsher 
973ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt)
974f646968fSPatrick McHardy 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
975f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_RX |
976f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_FILTER;
977f2148a47SJeff Kirsher 
978f2148a47SJeff Kirsher 	/* dev->name not defined before register_netdev()! */
979f2148a47SJeff Kirsher 	rc = register_netdev(dev);
980f2148a47SJeff Kirsher 	if (rc)
9812d283862SAlexey Charkov 		goto err_out_free_netdev;
982f2148a47SJeff Kirsher 
983ca8b6e04SAlexey Charkov 	if (rp->quirks & rqRhineI)
984ca8b6e04SAlexey Charkov 		name = "Rhine";
985ca8b6e04SAlexey Charkov 	else if (rp->quirks & rqStatusWBRace)
986ca8b6e04SAlexey Charkov 		name = "Rhine II";
987ca8b6e04SAlexey Charkov 	else if (rp->quirks & rqMgmt)
988ca8b6e04SAlexey Charkov 		name = "Rhine III (Management Adapter)";
989ca8b6e04SAlexey Charkov 	else
990ca8b6e04SAlexey Charkov 		name = "Rhine III";
991ca8b6e04SAlexey Charkov 
992a7e4fbbfSColin Ian King 	netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
993a7e4fbbfSColin Ian King 		    name, ioaddr, dev->dev_addr, rp->irq);
994f2148a47SJeff Kirsher 
995f7630d18SAlexey Charkov 	dev_set_drvdata(hwdev, dev);
996f2148a47SJeff Kirsher 
997f2148a47SJeff Kirsher 	{
998f2148a47SJeff Kirsher 		u16 mii_cmd;
999f2148a47SJeff Kirsher 		int mii_status = mdio_read(dev, phy_id, 1);
1000f2148a47SJeff Kirsher 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1001f2148a47SJeff Kirsher 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1002f2148a47SJeff Kirsher 		if (mii_status != 0xffff && mii_status != 0x0000) {
1003f2148a47SJeff Kirsher 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1004f2148a47SJeff Kirsher 			netdev_info(dev,
1005f2148a47SJeff Kirsher 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1006f2148a47SJeff Kirsher 				    phy_id,
1007f2148a47SJeff Kirsher 				    mii_status, rp->mii_if.advertising,
1008f2148a47SJeff Kirsher 				    mdio_read(dev, phy_id, 5));
1009f2148a47SJeff Kirsher 
1010f2148a47SJeff Kirsher 			/* set IFF_RUNNING */
1011f2148a47SJeff Kirsher 			if (mii_status & BMSR_LSTATUS)
1012f2148a47SJeff Kirsher 				netif_carrier_on(dev);
1013f2148a47SJeff Kirsher 			else
1014f2148a47SJeff Kirsher 				netif_carrier_off(dev);
1015f2148a47SJeff Kirsher 
1016f2148a47SJeff Kirsher 		}
1017f2148a47SJeff Kirsher 	}
1018f2148a47SJeff Kirsher 	rp->mii_if.phy_id = phy_id;
1019fc3e0f8aSFrancois Romieu 	if (avoid_D3)
1020fc3e0f8aSFrancois Romieu 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1021f2148a47SJeff Kirsher 
1022f2148a47SJeff Kirsher 	return 0;
1023f2148a47SJeff Kirsher 
10242d283862SAlexey Charkov err_out_free_netdev:
10252d283862SAlexey Charkov 	free_netdev(dev);
10262d283862SAlexey Charkov err_out:
10272d283862SAlexey Charkov 	return rc;
10282d283862SAlexey Charkov }
10292d283862SAlexey Charkov 
rhine_init_one_pci(struct pci_dev * pdev,const struct pci_device_id * ent)10302d283862SAlexey Charkov static int rhine_init_one_pci(struct pci_dev *pdev,
10312d283862SAlexey Charkov 			      const struct pci_device_id *ent)
10322d283862SAlexey Charkov {
10332d283862SAlexey Charkov 	struct device *hwdev = &pdev->dev;
10345b579e21SAlexey Charkov 	int rc;
10352d283862SAlexey Charkov 	long pioaddr, memaddr;
10362d283862SAlexey Charkov 	void __iomem *ioaddr;
10372d283862SAlexey Charkov 	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
10385b579e21SAlexey Charkov 
10395b579e21SAlexey Charkov /* This driver was written to use PCI memory space. Some early versions
10405b579e21SAlexey Charkov  * of the Rhine may only work correctly with I/O space accesses.
10415b579e21SAlexey Charkov  * TODO: determine for which revisions this is true and assign the flag
10425b579e21SAlexey Charkov  *	 in code as opposed to this Kconfig option (???)
10435b579e21SAlexey Charkov  */
10445b579e21SAlexey Charkov #ifdef CONFIG_VIA_RHINE_MMIO
10455b579e21SAlexey Charkov 	u32 quirks = rqNeedEnMMIO;
10462d283862SAlexey Charkov #else
10475b579e21SAlexey Charkov 	u32 quirks = 0;
10482d283862SAlexey Charkov #endif
10492d283862SAlexey Charkov 
10502d283862SAlexey Charkov 	rc = pci_enable_device(pdev);
10512d283862SAlexey Charkov 	if (rc)
10522d283862SAlexey Charkov 		goto err_out;
10532d283862SAlexey Charkov 
1054ca8b6e04SAlexey Charkov 	if (pdev->revision < VTunknown0) {
10555b579e21SAlexey Charkov 		quirks |= rqRhineI;
1056ca8b6e04SAlexey Charkov 	} else if (pdev->revision >= VT6102) {
10575b579e21SAlexey Charkov 		quirks |= rqWOL | rqForceReset;
1058ca8b6e04SAlexey Charkov 		if (pdev->revision < VT6105) {
1059ca8b6e04SAlexey Charkov 			quirks |= rqStatusWBRace;
1060ca8b6e04SAlexey Charkov 		} else {
1061ca8b6e04SAlexey Charkov 			quirks |= rqIntPHY;
1062ca8b6e04SAlexey Charkov 			if (pdev->revision >= VT6105_B0)
1063ca8b6e04SAlexey Charkov 				quirks |= rq6patterns;
1064ca8b6e04SAlexey Charkov 			if (pdev->revision >= VT6105M)
1065ca8b6e04SAlexey Charkov 				quirks |= rqMgmt;
1066ca8b6e04SAlexey Charkov 		}
1067ca8b6e04SAlexey Charkov 	}
1068ca8b6e04SAlexey Charkov 
10692d283862SAlexey Charkov 	/* sanity check */
10702d283862SAlexey Charkov 	if ((pci_resource_len(pdev, 0) < io_size) ||
10712d283862SAlexey Charkov 	    (pci_resource_len(pdev, 1) < io_size)) {
10722d283862SAlexey Charkov 		rc = -EIO;
10732d283862SAlexey Charkov 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
10742d283862SAlexey Charkov 		goto err_out_pci_disable;
10752d283862SAlexey Charkov 	}
10762d283862SAlexey Charkov 
10772d283862SAlexey Charkov 	pioaddr = pci_resource_start(pdev, 0);
10782d283862SAlexey Charkov 	memaddr = pci_resource_start(pdev, 1);
10792d283862SAlexey Charkov 
10802d283862SAlexey Charkov 	pci_set_master(pdev);
10812d283862SAlexey Charkov 
10822d283862SAlexey Charkov 	rc = pci_request_regions(pdev, DRV_NAME);
10832d283862SAlexey Charkov 	if (rc)
10842d283862SAlexey Charkov 		goto err_out_pci_disable;
10852d283862SAlexey Charkov 
10865b579e21SAlexey Charkov 	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
10872d283862SAlexey Charkov 	if (!ioaddr) {
10882d283862SAlexey Charkov 		rc = -EIO;
10892d283862SAlexey Charkov 		dev_err(hwdev,
10902d283862SAlexey Charkov 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
10912d283862SAlexey Charkov 			dev_name(hwdev), io_size, memaddr);
10922d283862SAlexey Charkov 		goto err_out_free_res;
10932d283862SAlexey Charkov 	}
10942d283862SAlexey Charkov 
10952d283862SAlexey Charkov 	enable_mmio(pioaddr, quirks);
10962d283862SAlexey Charkov 
10975b579e21SAlexey Charkov 	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
10985b579e21SAlexey Charkov 	if (rc)
10992d283862SAlexey Charkov 		goto err_out_unmap;
11002d283862SAlexey Charkov 
1101ca8b6e04SAlexey Charkov 	rc = rhine_init_one_common(&pdev->dev, quirks,
11022d283862SAlexey Charkov 				   pioaddr, ioaddr, pdev->irq);
11032d283862SAlexey Charkov 	if (!rc)
11042d283862SAlexey Charkov 		return 0;
11052d283862SAlexey Charkov 
1106f2148a47SJeff Kirsher err_out_unmap:
1107f2148a47SJeff Kirsher 	pci_iounmap(pdev, ioaddr);
1108f2148a47SJeff Kirsher err_out_free_res:
1109f2148a47SJeff Kirsher 	pci_release_regions(pdev);
1110ae996154SRoger Luethi err_out_pci_disable:
1111ae996154SRoger Luethi 	pci_disable_device(pdev);
1112f2148a47SJeff Kirsher err_out:
1113f2148a47SJeff Kirsher 	return rc;
1114f2148a47SJeff Kirsher }
1115f2148a47SJeff Kirsher 
rhine_init_one_platform(struct platform_device * pdev)11162d283862SAlexey Charkov static int rhine_init_one_platform(struct platform_device *pdev)
11172d283862SAlexey Charkov {
1118ca8b6e04SAlexey Charkov 	const u32 *quirks;
11192d283862SAlexey Charkov 	int irq;
11202d283862SAlexey Charkov 	void __iomem *ioaddr;
11212d283862SAlexey Charkov 
1122b708a96dSTang Bin 	quirks = of_device_get_match_data(&pdev->dev);
1123b708a96dSTang Bin 	if (!quirks)
11242d283862SAlexey Charkov 		return -EINVAL;
11252d283862SAlexey Charkov 
11268a54d4c2SYueHaibing 	ioaddr = devm_platform_ioremap_resource(pdev, 0);
11272d283862SAlexey Charkov 	if (IS_ERR(ioaddr))
11282d283862SAlexey Charkov 		return PTR_ERR(ioaddr);
11292d283862SAlexey Charkov 
11302d283862SAlexey Charkov 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
11312d283862SAlexey Charkov 	if (!irq)
11322d283862SAlexey Charkov 		return -EINVAL;
11332d283862SAlexey Charkov 
1134ca8b6e04SAlexey Charkov 	return rhine_init_one_common(&pdev->dev, *quirks,
11352d283862SAlexey Charkov 				     (long)ioaddr, ioaddr, irq);
11362d283862SAlexey Charkov }
11372d283862SAlexey Charkov 
alloc_ring(struct net_device * dev)1138f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev)
1139f2148a47SJeff Kirsher {
1140f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1141f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1142f2148a47SJeff Kirsher 	void *ring;
1143f2148a47SJeff Kirsher 	dma_addr_t ring_dma;
1144f2148a47SJeff Kirsher 
1145f7630d18SAlexey Charkov 	ring = dma_alloc_coherent(hwdev,
1146f2148a47SJeff Kirsher 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1147f2148a47SJeff Kirsher 				  TX_RING_SIZE * sizeof(struct tx_desc),
11484087c4dcSAlexey Charkov 				  &ring_dma,
11494087c4dcSAlexey Charkov 				  GFP_ATOMIC);
1150f2148a47SJeff Kirsher 	if (!ring) {
1151f2148a47SJeff Kirsher 		netdev_err(dev, "Could not allocate DMA memory\n");
1152f2148a47SJeff Kirsher 		return -ENOMEM;
1153f2148a47SJeff Kirsher 	}
1154f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI) {
1155f7630d18SAlexey Charkov 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1156f2148a47SJeff Kirsher 						 PKT_BUF_SZ * TX_RING_SIZE,
11574087c4dcSAlexey Charkov 						 &rp->tx_bufs_dma,
11584087c4dcSAlexey Charkov 						 GFP_ATOMIC);
1159f2148a47SJeff Kirsher 		if (rp->tx_bufs == NULL) {
1160f7630d18SAlexey Charkov 			dma_free_coherent(hwdev,
1161f2148a47SJeff Kirsher 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1162f2148a47SJeff Kirsher 					  TX_RING_SIZE * sizeof(struct tx_desc),
1163f2148a47SJeff Kirsher 					  ring, ring_dma);
1164f2148a47SJeff Kirsher 			return -ENOMEM;
1165f2148a47SJeff Kirsher 		}
1166f2148a47SJeff Kirsher 	}
1167f2148a47SJeff Kirsher 
1168f2148a47SJeff Kirsher 	rp->rx_ring = ring;
1169f2148a47SJeff Kirsher 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1170f2148a47SJeff Kirsher 	rp->rx_ring_dma = ring_dma;
1171f2148a47SJeff Kirsher 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1172f2148a47SJeff Kirsher 
1173f2148a47SJeff Kirsher 	return 0;
1174f2148a47SJeff Kirsher }
1175f2148a47SJeff Kirsher 
free_ring(struct net_device * dev)1176f2148a47SJeff Kirsher static void free_ring(struct net_device* dev)
1177f2148a47SJeff Kirsher {
1178f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1179f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1180f2148a47SJeff Kirsher 
1181f7630d18SAlexey Charkov 	dma_free_coherent(hwdev,
1182f2148a47SJeff Kirsher 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1183f2148a47SJeff Kirsher 			  TX_RING_SIZE * sizeof(struct tx_desc),
1184f2148a47SJeff Kirsher 			  rp->rx_ring, rp->rx_ring_dma);
1185f2148a47SJeff Kirsher 	rp->tx_ring = NULL;
1186f2148a47SJeff Kirsher 
1187f2148a47SJeff Kirsher 	if (rp->tx_bufs)
1188f7630d18SAlexey Charkov 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1189f2148a47SJeff Kirsher 				  rp->tx_bufs, rp->tx_bufs_dma);
1190f2148a47SJeff Kirsher 
1191f2148a47SJeff Kirsher 	rp->tx_bufs = NULL;
1192f2148a47SJeff Kirsher 
1193f2148a47SJeff Kirsher }
1194f2148a47SJeff Kirsher 
1195a21bb8baSfrançois romieu struct rhine_skb_dma {
1196a21bb8baSfrançois romieu 	struct sk_buff *skb;
1197a21bb8baSfrançois romieu 	dma_addr_t dma;
1198a21bb8baSfrançois romieu };
1199a21bb8baSfrançois romieu 
rhine_skb_dma_init(struct net_device * dev,struct rhine_skb_dma * sd)1200a21bb8baSfrançois romieu static inline int rhine_skb_dma_init(struct net_device *dev,
1201a21bb8baSfrançois romieu 				     struct rhine_skb_dma *sd)
1202f2148a47SJeff Kirsher {
1203f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1204f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1205a21bb8baSfrançois romieu 	const int size = rp->rx_buf_sz;
1206a21bb8baSfrançois romieu 
1207a21bb8baSfrançois romieu 	sd->skb = netdev_alloc_skb(dev, size);
1208a21bb8baSfrançois romieu 	if (!sd->skb)
1209a21bb8baSfrançois romieu 		return -ENOMEM;
1210a21bb8baSfrançois romieu 
1211a21bb8baSfrançois romieu 	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1212a21bb8baSfrançois romieu 	if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1213a21bb8baSfrançois romieu 		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1214a21bb8baSfrançois romieu 		dev_kfree_skb_any(sd->skb);
1215a21bb8baSfrançois romieu 		return -EIO;
1216a21bb8baSfrançois romieu 	}
1217a21bb8baSfrançois romieu 
1218a21bb8baSfrançois romieu 	return 0;
1219a21bb8baSfrançois romieu }
1220a21bb8baSfrançois romieu 
rhine_reset_rbufs(struct rhine_private * rp)12218709bb2cSfrançois romieu static void rhine_reset_rbufs(struct rhine_private *rp)
12228709bb2cSfrançois romieu {
12238709bb2cSfrançois romieu 	int i;
12248709bb2cSfrançois romieu 
12258709bb2cSfrançois romieu 	rp->cur_rx = 0;
12268709bb2cSfrançois romieu 
12278709bb2cSfrançois romieu 	for (i = 0; i < RX_RING_SIZE; i++)
12288709bb2cSfrançois romieu 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
12298709bb2cSfrançois romieu }
12308709bb2cSfrançois romieu 
rhine_skb_dma_nic_store(struct rhine_private * rp,struct rhine_skb_dma * sd,int entry)1231a21bb8baSfrançois romieu static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1232a21bb8baSfrançois romieu 					   struct rhine_skb_dma *sd, int entry)
1233a21bb8baSfrançois romieu {
1234a21bb8baSfrançois romieu 	rp->rx_skbuff_dma[entry] = sd->dma;
1235a21bb8baSfrançois romieu 	rp->rx_skbuff[entry] = sd->skb;
1236a21bb8baSfrançois romieu 
1237a21bb8baSfrançois romieu 	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1238a21bb8baSfrançois romieu 	dma_wmb();
1239a21bb8baSfrançois romieu }
1240a21bb8baSfrançois romieu 
12418709bb2cSfrançois romieu static void free_rbufs(struct net_device* dev);
12428709bb2cSfrançois romieu 
alloc_rbufs(struct net_device * dev)12438709bb2cSfrançois romieu static int alloc_rbufs(struct net_device *dev)
1244a21bb8baSfrançois romieu {
1245a21bb8baSfrançois romieu 	struct rhine_private *rp = netdev_priv(dev);
1246f2148a47SJeff Kirsher 	dma_addr_t next;
1247a21bb8baSfrançois romieu 	int rc, i;
1248f2148a47SJeff Kirsher 
1249f2148a47SJeff Kirsher 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1250f2148a47SJeff Kirsher 	next = rp->rx_ring_dma;
1251f2148a47SJeff Kirsher 
1252f2148a47SJeff Kirsher 	/* Init the ring entries */
1253f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1254f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1255f2148a47SJeff Kirsher 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1256f2148a47SJeff Kirsher 		next += sizeof(struct rx_desc);
1257f2148a47SJeff Kirsher 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1258f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1259f2148a47SJeff Kirsher 	}
1260f2148a47SJeff Kirsher 	/* Mark the last entry as wrapping the ring. */
1261f2148a47SJeff Kirsher 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1262f2148a47SJeff Kirsher 
1263f2148a47SJeff Kirsher 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1264f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1265a21bb8baSfrançois romieu 		struct rhine_skb_dma sd;
1266a21bb8baSfrançois romieu 
1267a21bb8baSfrançois romieu 		rc = rhine_skb_dma_init(dev, &sd);
12688709bb2cSfrançois romieu 		if (rc < 0) {
12698709bb2cSfrançois romieu 			free_rbufs(dev);
12708709bb2cSfrançois romieu 			goto out;
12718709bb2cSfrançois romieu 		}
1272f2148a47SJeff Kirsher 
1273a21bb8baSfrançois romieu 		rhine_skb_dma_nic_store(rp, &sd, i);
1274f2148a47SJeff Kirsher 	}
12758709bb2cSfrançois romieu 
12768709bb2cSfrançois romieu 	rhine_reset_rbufs(rp);
12778709bb2cSfrançois romieu out:
12788709bb2cSfrançois romieu 	return rc;
1279f2148a47SJeff Kirsher }
1280f2148a47SJeff Kirsher 
free_rbufs(struct net_device * dev)1281f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev)
1282f2148a47SJeff Kirsher {
1283f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1284f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1285f2148a47SJeff Kirsher 	int i;
1286f2148a47SJeff Kirsher 
1287f2148a47SJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
1288f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1289f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1290f2148a47SJeff Kirsher 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1291f2148a47SJeff Kirsher 		if (rp->rx_skbuff[i]) {
1292f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1293f2148a47SJeff Kirsher 					 rp->rx_skbuff_dma[i],
12944087c4dcSAlexey Charkov 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1295f2148a47SJeff Kirsher 			dev_kfree_skb(rp->rx_skbuff[i]);
1296f2148a47SJeff Kirsher 		}
1297f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1298f2148a47SJeff Kirsher 	}
1299f2148a47SJeff Kirsher }
1300f2148a47SJeff Kirsher 
alloc_tbufs(struct net_device * dev)1301f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev)
1302f2148a47SJeff Kirsher {
1303f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1304f2148a47SJeff Kirsher 	dma_addr_t next;
1305f2148a47SJeff Kirsher 	int i;
1306f2148a47SJeff Kirsher 
1307f2148a47SJeff Kirsher 	rp->dirty_tx = rp->cur_tx = 0;
1308f2148a47SJeff Kirsher 	next = rp->tx_ring_dma;
1309f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1310f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1311f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1312f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1313f2148a47SJeff Kirsher 		next += sizeof(struct tx_desc);
1314f2148a47SJeff Kirsher 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1315f2148a47SJeff Kirsher 		if (rp->quirks & rqRhineI)
1316f2148a47SJeff Kirsher 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1317f2148a47SJeff Kirsher 	}
1318f2148a47SJeff Kirsher 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1319f2148a47SJeff Kirsher 
132092bf2008STino Reichardt 	netdev_reset_queue(dev);
1321f2148a47SJeff Kirsher }
1322f2148a47SJeff Kirsher 
free_tbufs(struct net_device * dev)1323f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev)
1324f2148a47SJeff Kirsher {
1325f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1326f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1327f2148a47SJeff Kirsher 	int i;
1328f2148a47SJeff Kirsher 
1329f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1330f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1331f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1332f2148a47SJeff Kirsher 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1333f2148a47SJeff Kirsher 		if (rp->tx_skbuff[i]) {
1334f2148a47SJeff Kirsher 			if (rp->tx_skbuff_dma[i]) {
1335f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
1336f2148a47SJeff Kirsher 						 rp->tx_skbuff_dma[i],
1337f2148a47SJeff Kirsher 						 rp->tx_skbuff[i]->len,
13384087c4dcSAlexey Charkov 						 DMA_TO_DEVICE);
1339f2148a47SJeff Kirsher 			}
1340f2148a47SJeff Kirsher 			dev_kfree_skb(rp->tx_skbuff[i]);
1341f2148a47SJeff Kirsher 		}
1342f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1343f2148a47SJeff Kirsher 		rp->tx_buf[i] = NULL;
1344f2148a47SJeff Kirsher 	}
1345f2148a47SJeff Kirsher }
1346f2148a47SJeff Kirsher 
rhine_check_media(struct net_device * dev,unsigned int init_media)1347f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1348f2148a47SJeff Kirsher {
1349f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1350f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1351f2148a47SJeff Kirsher 
13525bdc7380SBen Hutchings 	if (!rp->mii_if.force_media)
1353fc3e0f8aSFrancois Romieu 		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1354f2148a47SJeff Kirsher 
1355f2148a47SJeff Kirsher 	if (rp->mii_if.full_duplex)
1356f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1357f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1358f2148a47SJeff Kirsher 	else
1359f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1360f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1361fc3e0f8aSFrancois Romieu 
1362fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1363f2148a47SJeff Kirsher 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1364f2148a47SJeff Kirsher }
1365f2148a47SJeff Kirsher 
1366f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */
rhine_set_carrier(struct mii_if_info * mii)1367f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii)
1368f2148a47SJeff Kirsher {
1369fc3e0f8aSFrancois Romieu 	struct net_device *dev = mii->dev;
1370fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
1371fc3e0f8aSFrancois Romieu 
1372f2148a47SJeff Kirsher 	if (mii->force_media) {
1373f2148a47SJeff Kirsher 		/* autoneg is off: Link is always assumed to be up */
1374fc3e0f8aSFrancois Romieu 		if (!netif_carrier_ok(dev))
1375fc3e0f8aSFrancois Romieu 			netif_carrier_on(dev);
137617958438SFrançois Cachereul 	}
137717958438SFrançois Cachereul 
1378fc3e0f8aSFrancois Romieu 	rhine_check_media(dev, 0);
1379fc3e0f8aSFrancois Romieu 
1380fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1381fc3e0f8aSFrancois Romieu 		   mii->force_media, netif_carrier_ok(dev));
1382f2148a47SJeff Kirsher }
1383f2148a47SJeff Kirsher 
1384f2148a47SJeff Kirsher /**
1385f2148a47SJeff Kirsher  * rhine_set_cam - set CAM multicast filters
1386f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1387f2148a47SJeff Kirsher  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1388f2148a47SJeff Kirsher  * @addr: multicast address (6 bytes)
1389f2148a47SJeff Kirsher  *
1390f2148a47SJeff Kirsher  * Load addresses into multicast filters.
1391f2148a47SJeff Kirsher  */
rhine_set_cam(void __iomem * ioaddr,int idx,u8 * addr)1392f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1393f2148a47SJeff Kirsher {
1394f2148a47SJeff Kirsher 	int i;
1395f2148a47SJeff Kirsher 
1396f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1397f2148a47SJeff Kirsher 	wmb();
1398f2148a47SJeff Kirsher 
1399f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1400f2148a47SJeff Kirsher 	idx &= (MCAM_SIZE - 1);
1401f2148a47SJeff Kirsher 
1402f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1403f2148a47SJeff Kirsher 
1404f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++, addr++)
1405f2148a47SJeff Kirsher 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1406f2148a47SJeff Kirsher 	udelay(10);
1407f2148a47SJeff Kirsher 	wmb();
1408f2148a47SJeff Kirsher 
1409f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1410f2148a47SJeff Kirsher 	udelay(10);
1411f2148a47SJeff Kirsher 
1412f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1413f2148a47SJeff Kirsher }
1414f2148a47SJeff Kirsher 
1415f2148a47SJeff Kirsher /**
1416f2148a47SJeff Kirsher  * rhine_set_vlan_cam - set CAM VLAN filters
1417f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1418f2148a47SJeff Kirsher  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1419f2148a47SJeff Kirsher  * @addr: VLAN ID (2 bytes)
1420f2148a47SJeff Kirsher  *
1421f2148a47SJeff Kirsher  * Load addresses into VLAN filters.
1422f2148a47SJeff Kirsher  */
rhine_set_vlan_cam(void __iomem * ioaddr,int idx,u8 * addr)1423f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1424f2148a47SJeff Kirsher {
1425f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1426f2148a47SJeff Kirsher 	wmb();
1427f2148a47SJeff Kirsher 
1428f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1429f2148a47SJeff Kirsher 	idx &= (VCAM_SIZE - 1);
1430f2148a47SJeff Kirsher 
1431f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1432f2148a47SJeff Kirsher 
1433f2148a47SJeff Kirsher 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1434f2148a47SJeff Kirsher 	udelay(10);
1435f2148a47SJeff Kirsher 	wmb();
1436f2148a47SJeff Kirsher 
1437f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1438f2148a47SJeff Kirsher 	udelay(10);
1439f2148a47SJeff Kirsher 
1440f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1441f2148a47SJeff Kirsher }
1442f2148a47SJeff Kirsher 
1443f2148a47SJeff Kirsher /**
1444f2148a47SJeff Kirsher  * rhine_set_cam_mask - set multicast CAM mask
1445f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1446f2148a47SJeff Kirsher  * @mask: multicast CAM mask
1447f2148a47SJeff Kirsher  *
1448f2148a47SJeff Kirsher  * Mask sets multicast filters active/inactive.
1449f2148a47SJeff Kirsher  */
rhine_set_cam_mask(void __iomem * ioaddr,u32 mask)1450f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1451f2148a47SJeff Kirsher {
1452f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1453f2148a47SJeff Kirsher 	wmb();
1454f2148a47SJeff Kirsher 
1455f2148a47SJeff Kirsher 	/* write mask */
1456f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1457f2148a47SJeff Kirsher 
1458f2148a47SJeff Kirsher 	/* disable CAMEN */
1459f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1460f2148a47SJeff Kirsher }
1461f2148a47SJeff Kirsher 
1462f2148a47SJeff Kirsher /**
1463f2148a47SJeff Kirsher  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1464f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1465f2148a47SJeff Kirsher  * @mask: VLAN CAM mask
1466f2148a47SJeff Kirsher  *
1467f2148a47SJeff Kirsher  * Mask sets VLAN filters active/inactive.
1468f2148a47SJeff Kirsher  */
rhine_set_vlan_cam_mask(void __iomem * ioaddr,u32 mask)1469f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1470f2148a47SJeff Kirsher {
1471f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1472f2148a47SJeff Kirsher 	wmb();
1473f2148a47SJeff Kirsher 
1474f2148a47SJeff Kirsher 	/* write mask */
1475f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1476f2148a47SJeff Kirsher 
1477f2148a47SJeff Kirsher 	/* disable CAMEN */
1478f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1479f2148a47SJeff Kirsher }
1480f2148a47SJeff Kirsher 
1481f2148a47SJeff Kirsher /**
1482f2148a47SJeff Kirsher  * rhine_init_cam_filter - initialize CAM filters
1483f2148a47SJeff Kirsher  * @dev: network device
1484f2148a47SJeff Kirsher  *
1485f2148a47SJeff Kirsher  * Initialize (disable) hardware VLAN and multicast support on this
1486f2148a47SJeff Kirsher  * Rhine.
1487f2148a47SJeff Kirsher  */
rhine_init_cam_filter(struct net_device * dev)1488f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev)
1489f2148a47SJeff Kirsher {
1490f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1491f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1492f2148a47SJeff Kirsher 
1493f2148a47SJeff Kirsher 	/* Disable all CAMs */
1494f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, 0);
1495f2148a47SJeff Kirsher 	rhine_set_cam_mask(ioaddr, 0);
1496f2148a47SJeff Kirsher 
1497f2148a47SJeff Kirsher 	/* disable hardware VLAN support */
1498f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1499f2148a47SJeff Kirsher 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1500f2148a47SJeff Kirsher }
1501f2148a47SJeff Kirsher 
1502f2148a47SJeff Kirsher /**
1503f2148a47SJeff Kirsher  * rhine_update_vcam - update VLAN CAM filters
1504d0ea5cbdSJesse Brandeburg  * @dev: rhine_private data of this Rhine
1505f2148a47SJeff Kirsher  *
1506f2148a47SJeff Kirsher  * Update VLAN CAM filters to match configuration change.
1507f2148a47SJeff Kirsher  */
rhine_update_vcam(struct net_device * dev)1508f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev)
1509f2148a47SJeff Kirsher {
1510f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1511f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1512f2148a47SJeff Kirsher 	u16 vid;
1513f2148a47SJeff Kirsher 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1514f2148a47SJeff Kirsher 	unsigned int i = 0;
1515f2148a47SJeff Kirsher 
1516f2148a47SJeff Kirsher 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1517f2148a47SJeff Kirsher 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1518f2148a47SJeff Kirsher 		vCAMmask |= 1 << i;
1519f2148a47SJeff Kirsher 		if (++i >= VCAM_SIZE)
1520f2148a47SJeff Kirsher 			break;
1521f2148a47SJeff Kirsher 	}
1522f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1523f2148a47SJeff Kirsher }
1524f2148a47SJeff Kirsher 
rhine_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)152580d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1526f2148a47SJeff Kirsher {
1527f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1528f2148a47SJeff Kirsher 
15297ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1530f2148a47SJeff Kirsher 	set_bit(vid, rp->active_vlans);
1531f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
15327ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
15338e586137SJiri Pirko 	return 0;
1534f2148a47SJeff Kirsher }
1535f2148a47SJeff Kirsher 
rhine_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)153680d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1537f2148a47SJeff Kirsher {
1538f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1539f2148a47SJeff Kirsher 
15407ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1541f2148a47SJeff Kirsher 	clear_bit(vid, rp->active_vlans);
1542f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
15437ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
15448e586137SJiri Pirko 	return 0;
1545f2148a47SJeff Kirsher }
1546f2148a47SJeff Kirsher 
init_registers(struct net_device * dev)1547f2148a47SJeff Kirsher static void init_registers(struct net_device *dev)
1548f2148a47SJeff Kirsher {
1549f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1550f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1551f2148a47SJeff Kirsher 	int i;
1552f2148a47SJeff Kirsher 
1553f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
1554f2148a47SJeff Kirsher 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1555f2148a47SJeff Kirsher 
1556f2148a47SJeff Kirsher 	/* Initialize other registers. */
1557f2148a47SJeff Kirsher 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1558f2148a47SJeff Kirsher 	/* Configure initial FIFO thresholds. */
1559f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + TxConfig);
1560f2148a47SJeff Kirsher 	rp->tx_thresh = 0x20;
1561f2148a47SJeff Kirsher 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1562f2148a47SJeff Kirsher 
1563f2148a47SJeff Kirsher 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1564f2148a47SJeff Kirsher 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1565f2148a47SJeff Kirsher 
1566f2148a47SJeff Kirsher 	rhine_set_rx_mode(dev);
1567f2148a47SJeff Kirsher 
1568ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt)
1569f2148a47SJeff Kirsher 		rhine_init_cam_filter(dev);
1570f2148a47SJeff Kirsher 
1571f2148a47SJeff Kirsher 	napi_enable(&rp->napi);
1572f2148a47SJeff Kirsher 
15737ab87ff4SFrancois Romieu 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1574f2148a47SJeff Kirsher 
1575f2148a47SJeff Kirsher 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1576f2148a47SJeff Kirsher 	       ioaddr + ChipCmd);
1577f2148a47SJeff Kirsher 	rhine_check_media(dev, 1);
1578f2148a47SJeff Kirsher }
1579f2148a47SJeff Kirsher 
1580f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */
rhine_enable_linkmon(struct rhine_private * rp)1581a384a33bSFrancois Romieu static void rhine_enable_linkmon(struct rhine_private *rp)
1582f2148a47SJeff Kirsher {
1583a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1584a384a33bSFrancois Romieu 
1585f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1586f2148a47SJeff Kirsher 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1587f2148a47SJeff Kirsher 	iowrite8(0x80, ioaddr + MIICmd);
1588f2148a47SJeff Kirsher 
1589a384a33bSFrancois Romieu 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1590f2148a47SJeff Kirsher 
1591f2148a47SJeff Kirsher 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1592f2148a47SJeff Kirsher }
1593f2148a47SJeff Kirsher 
1594f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */
rhine_disable_linkmon(struct rhine_private * rp)1595a384a33bSFrancois Romieu static void rhine_disable_linkmon(struct rhine_private *rp)
1596f2148a47SJeff Kirsher {
1597a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1598a384a33bSFrancois Romieu 
1599f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1600f2148a47SJeff Kirsher 
1601a384a33bSFrancois Romieu 	if (rp->quirks & rqRhineI) {
1602f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1603f2148a47SJeff Kirsher 
1604f2148a47SJeff Kirsher 		/* Can be called from ISR. Evil. */
1605f2148a47SJeff Kirsher 		mdelay(1);
1606f2148a47SJeff Kirsher 
1607f2148a47SJeff Kirsher 		/* 0x80 must be set immediately before turning it off */
1608f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + MIICmd);
1609f2148a47SJeff Kirsher 
1610a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1611f2148a47SJeff Kirsher 
1612f2148a47SJeff Kirsher 		/* Heh. Now clear 0x80 again. */
1613f2148a47SJeff Kirsher 		iowrite8(0, ioaddr + MIICmd);
1614f2148a47SJeff Kirsher 	}
1615f2148a47SJeff Kirsher 	else
1616a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1617f2148a47SJeff Kirsher }
1618f2148a47SJeff Kirsher 
1619f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */
1620f2148a47SJeff Kirsher 
mdio_read(struct net_device * dev,int phy_id,int regnum)1621f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1622f2148a47SJeff Kirsher {
1623f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1624f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1625f2148a47SJeff Kirsher 	int result;
1626f2148a47SJeff Kirsher 
1627a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1628f2148a47SJeff Kirsher 
1629f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1630f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1631f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1632f2148a47SJeff Kirsher 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1633a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1634f2148a47SJeff Kirsher 	result = ioread16(ioaddr + MIIData);
1635f2148a47SJeff Kirsher 
1636a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1637f2148a47SJeff Kirsher 	return result;
1638f2148a47SJeff Kirsher }
1639f2148a47SJeff Kirsher 
mdio_write(struct net_device * dev,int phy_id,int regnum,int value)1640f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1641f2148a47SJeff Kirsher {
1642f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1643f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1644f2148a47SJeff Kirsher 
1645a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1646f2148a47SJeff Kirsher 
1647f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1648f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1649f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1650f2148a47SJeff Kirsher 	iowrite16(value, ioaddr + MIIData);
1651f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1652a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1653f2148a47SJeff Kirsher 
1654a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1655f2148a47SJeff Kirsher }
1656f2148a47SJeff Kirsher 
rhine_task_disable(struct rhine_private * rp)16577ab87ff4SFrancois Romieu static void rhine_task_disable(struct rhine_private *rp)
16587ab87ff4SFrancois Romieu {
16597ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16607ab87ff4SFrancois Romieu 	rp->task_enable = false;
16617ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
16627ab87ff4SFrancois Romieu 
16637ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->slow_event_task);
16647ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->reset_task);
16657ab87ff4SFrancois Romieu }
16667ab87ff4SFrancois Romieu 
rhine_task_enable(struct rhine_private * rp)16677ab87ff4SFrancois Romieu static void rhine_task_enable(struct rhine_private *rp)
16687ab87ff4SFrancois Romieu {
16697ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16707ab87ff4SFrancois Romieu 	rp->task_enable = true;
16717ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
16727ab87ff4SFrancois Romieu }
16737ab87ff4SFrancois Romieu 
rhine_open(struct net_device * dev)1674f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev)
1675f2148a47SJeff Kirsher {
1676f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1677f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1678f2148a47SJeff Kirsher 	int rc;
1679f2148a47SJeff Kirsher 
1680f7630d18SAlexey Charkov 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1681f2148a47SJeff Kirsher 	if (rc)
16824d1fd9c1Sfrançois romieu 		goto out;
1683f2148a47SJeff Kirsher 
1684f7630d18SAlexey Charkov 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1685f2148a47SJeff Kirsher 
1686f2148a47SJeff Kirsher 	rc = alloc_ring(dev);
16874d1fd9c1Sfrançois romieu 	if (rc < 0)
16884d1fd9c1Sfrançois romieu 		goto out_free_irq;
16894d1fd9c1Sfrançois romieu 
16908709bb2cSfrançois romieu 	rc = alloc_rbufs(dev);
16918709bb2cSfrançois romieu 	if (rc < 0)
16928709bb2cSfrançois romieu 		goto out_free_ring;
16938709bb2cSfrançois romieu 
1694f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1695d120c9a8SKevin Brace 	enable_mmio(rp->pioaddr, rp->quirks);
1696d120c9a8SKevin Brace 	rhine_power_init(dev);
1697f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
16987ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
1699f2148a47SJeff Kirsher 	init_registers(dev);
1700fc3e0f8aSFrancois Romieu 
1701fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1702f2148a47SJeff Kirsher 		  __func__, ioread16(ioaddr + ChipCmd),
1703f2148a47SJeff Kirsher 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1704f2148a47SJeff Kirsher 
1705f2148a47SJeff Kirsher 	netif_start_queue(dev);
1706f2148a47SJeff Kirsher 
17074d1fd9c1Sfrançois romieu out:
17084d1fd9c1Sfrançois romieu 	return rc;
17094d1fd9c1Sfrançois romieu 
17108709bb2cSfrançois romieu out_free_ring:
17118709bb2cSfrançois romieu 	free_ring(dev);
17124d1fd9c1Sfrançois romieu out_free_irq:
17134d1fd9c1Sfrançois romieu 	free_irq(rp->irq, dev);
17144d1fd9c1Sfrançois romieu 	goto out;
1715f2148a47SJeff Kirsher }
1716f2148a47SJeff Kirsher 
rhine_reset_task(struct work_struct * work)1717f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work)
1718f2148a47SJeff Kirsher {
1719f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(work, struct rhine_private,
1720f2148a47SJeff Kirsher 						reset_task);
1721f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
1722f2148a47SJeff Kirsher 
17237ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
17247ab87ff4SFrancois Romieu 
17257ab87ff4SFrancois Romieu 	if (!rp->task_enable)
17267ab87ff4SFrancois Romieu 		goto out_unlock;
1727f2148a47SJeff Kirsher 
1728f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
1729a926592fSRichard Weinberger 	netif_tx_disable(dev);
1730f2148a47SJeff Kirsher 	spin_lock_bh(&rp->lock);
1731f2148a47SJeff Kirsher 
1732f2148a47SJeff Kirsher 	/* clear all descriptors */
1733f2148a47SJeff Kirsher 	free_tbufs(dev);
1734f2148a47SJeff Kirsher 	alloc_tbufs(dev);
17358709bb2cSfrançois romieu 
17368709bb2cSfrançois romieu 	rhine_reset_rbufs(rp);
1737f2148a47SJeff Kirsher 
1738f2148a47SJeff Kirsher 	/* Reinitialize the hardware. */
1739f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
1740f2148a47SJeff Kirsher 	init_registers(dev);
1741f2148a47SJeff Kirsher 
1742f2148a47SJeff Kirsher 	spin_unlock_bh(&rp->lock);
1743f2148a47SJeff Kirsher 
1744860e9538SFlorian Westphal 	netif_trans_update(dev); /* prevent tx timeout */
1745f2148a47SJeff Kirsher 	dev->stats.tx_errors++;
1746f2148a47SJeff Kirsher 	netif_wake_queue(dev);
17477ab87ff4SFrancois Romieu 
17487ab87ff4SFrancois Romieu out_unlock:
17497ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
1750f2148a47SJeff Kirsher }
1751f2148a47SJeff Kirsher 
rhine_tx_timeout(struct net_device * dev,unsigned int txqueue)17520290bd29SMichael S. Tsirkin static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
1753f2148a47SJeff Kirsher {
1754f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1755f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1756f2148a47SJeff Kirsher 
1757f2148a47SJeff Kirsher 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1758f2148a47SJeff Kirsher 		    ioread16(ioaddr + IntrStatus),
1759f2148a47SJeff Kirsher 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1760f2148a47SJeff Kirsher 
1761f2148a47SJeff Kirsher 	schedule_work(&rp->reset_task);
1762f2148a47SJeff Kirsher }
1763f2148a47SJeff Kirsher 
rhine_tx_queue_full(struct rhine_private * rp)17643a5a883aSfrançois romieu static inline bool rhine_tx_queue_full(struct rhine_private *rp)
17653a5a883aSfrançois romieu {
17663a5a883aSfrançois romieu 	return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
17673a5a883aSfrançois romieu }
17683a5a883aSfrançois romieu 
rhine_start_tx(struct sk_buff * skb,struct net_device * dev)1769f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1770f2148a47SJeff Kirsher 				  struct net_device *dev)
1771f2148a47SJeff Kirsher {
1772f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1773f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1774f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1775f2148a47SJeff Kirsher 	unsigned entry;
1776f2148a47SJeff Kirsher 
1777f2148a47SJeff Kirsher 	/* Caution: the write order is important here, set the field
1778f2148a47SJeff Kirsher 	   with the "ownership" bits last. */
1779f2148a47SJeff Kirsher 
1780f2148a47SJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
1781f2148a47SJeff Kirsher 	entry = rp->cur_tx % TX_RING_SIZE;
1782f2148a47SJeff Kirsher 
1783f2148a47SJeff Kirsher 	if (skb_padto(skb, ETH_ZLEN))
1784f2148a47SJeff Kirsher 		return NETDEV_TX_OK;
1785f2148a47SJeff Kirsher 
1786f2148a47SJeff Kirsher 	rp->tx_skbuff[entry] = skb;
1787f2148a47SJeff Kirsher 
1788f2148a47SJeff Kirsher 	if ((rp->quirks & rqRhineI) &&
1789f2148a47SJeff Kirsher 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1790f2148a47SJeff Kirsher 		/* Must use alignment buffer. */
1791f2148a47SJeff Kirsher 		if (skb->len > PKT_BUF_SZ) {
1792f2148a47SJeff Kirsher 			/* packet too long, drop it */
17934b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
1794f2148a47SJeff Kirsher 			rp->tx_skbuff[entry] = NULL;
1795f2148a47SJeff Kirsher 			dev->stats.tx_dropped++;
1796f2148a47SJeff Kirsher 			return NETDEV_TX_OK;
1797f2148a47SJeff Kirsher 		}
1798f2148a47SJeff Kirsher 
1799f2148a47SJeff Kirsher 		/* Padding is not copied and so must be redone. */
1800f2148a47SJeff Kirsher 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1801f2148a47SJeff Kirsher 		if (skb->len < ETH_ZLEN)
1802f2148a47SJeff Kirsher 			memset(rp->tx_buf[entry] + skb->len, 0,
1803f2148a47SJeff Kirsher 			       ETH_ZLEN - skb->len);
1804f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] = 0;
1805f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1806f2148a47SJeff Kirsher 						      (rp->tx_buf[entry] -
1807f2148a47SJeff Kirsher 						       rp->tx_bufs));
1808f2148a47SJeff Kirsher 	} else {
1809f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] =
1810f7630d18SAlexey Charkov 			dma_map_single(hwdev, skb->data, skb->len,
18114087c4dcSAlexey Charkov 				       DMA_TO_DEVICE);
1812f7630d18SAlexey Charkov 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
18134b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
18149b4fe5fbSNeil Horman 			rp->tx_skbuff_dma[entry] = 0;
18159b4fe5fbSNeil Horman 			dev->stats.tx_dropped++;
18169b4fe5fbSNeil Horman 			return NETDEV_TX_OK;
18179b4fe5fbSNeil Horman 		}
1818f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1819f2148a47SJeff Kirsher 	}
1820f2148a47SJeff Kirsher 
1821f2148a47SJeff Kirsher 	rp->tx_ring[entry].desc_length =
1822f2148a47SJeff Kirsher 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1823f2148a47SJeff Kirsher 
1824df8a39deSJiri Pirko 	if (unlikely(skb_vlan_tag_present(skb))) {
1825df8a39deSJiri Pirko 		u16 vid_pcp = skb_vlan_tag_get(skb);
1826207070f5SRoger Luethi 
1827207070f5SRoger Luethi 		/* drop CFI/DEI bit, register needs VID and PCP */
1828207070f5SRoger Luethi 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1829207070f5SRoger Luethi 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1830207070f5SRoger Luethi 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1831f2148a47SJeff Kirsher 		/* request tagging */
1832f2148a47SJeff Kirsher 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1833f2148a47SJeff Kirsher 	}
1834f2148a47SJeff Kirsher 	else
1835f2148a47SJeff Kirsher 		rp->tx_ring[entry].tx_status = 0;
1836f2148a47SJeff Kirsher 
183792bf2008STino Reichardt 	netdev_sent_queue(dev, skb->len);
1838f2148a47SJeff Kirsher 	/* lock eth irq */
1839e1efa872Sfrançois romieu 	dma_wmb();
1840f2148a47SJeff Kirsher 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1841f2148a47SJeff Kirsher 	wmb();
1842f2148a47SJeff Kirsher 
1843f2148a47SJeff Kirsher 	rp->cur_tx++;
18443a5a883aSfrançois romieu 	/*
18453a5a883aSfrançois romieu 	 * Nobody wants cur_tx write to rot for ages after the NIC will have
18463a5a883aSfrançois romieu 	 * seen the transmit request, especially as the transmit completion
18473a5a883aSfrançois romieu 	 * handler could miss it.
18483a5a883aSfrançois romieu 	 */
18493a5a883aSfrançois romieu 	smp_wmb();
1850f2148a47SJeff Kirsher 
1851f2148a47SJeff Kirsher 	/* Non-x86 Todo: explicitly flush cache lines here. */
1852f2148a47SJeff Kirsher 
1853df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1854f2148a47SJeff Kirsher 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1855f2148a47SJeff Kirsher 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1856f2148a47SJeff Kirsher 
1857f2148a47SJeff Kirsher 	/* Wake the potentially-idle transmit channel */
1858f2148a47SJeff Kirsher 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1859f2148a47SJeff Kirsher 	       ioaddr + ChipCmd1);
1860f2148a47SJeff Kirsher 	IOSYNC;
1861f2148a47SJeff Kirsher 
18623a5a883aSfrançois romieu 	/* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
18633a5a883aSfrançois romieu 	if (rhine_tx_queue_full(rp)) {
1864f2148a47SJeff Kirsher 		netif_stop_queue(dev);
18653a5a883aSfrançois romieu 		smp_rmb();
18663a5a883aSfrançois romieu 		/* Rejuvenate. */
18673a5a883aSfrançois romieu 		if (!rhine_tx_queue_full(rp))
18683a5a883aSfrançois romieu 			netif_wake_queue(dev);
18693a5a883aSfrançois romieu 	}
1870f2148a47SJeff Kirsher 
1871fc3e0f8aSFrancois Romieu 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1872f2148a47SJeff Kirsher 		  rp->cur_tx - 1, entry);
1873fc3e0f8aSFrancois Romieu 
1874f2148a47SJeff Kirsher 	return NETDEV_TX_OK;
1875f2148a47SJeff Kirsher }
1876f2148a47SJeff Kirsher 
rhine_irq_disable(struct rhine_private * rp)18777ab87ff4SFrancois Romieu static void rhine_irq_disable(struct rhine_private *rp)
18787ab87ff4SFrancois Romieu {
18797ab87ff4SFrancois Romieu 	iowrite16(0x0000, rp->base + IntrEnable);
18807ab87ff4SFrancois Romieu }
18817ab87ff4SFrancois Romieu 
1882f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
1883f2148a47SJeff Kirsher    after the Tx thread. */
rhine_interrupt(int irq,void * dev_instance)1884f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1885f2148a47SJeff Kirsher {
1886f2148a47SJeff Kirsher 	struct net_device *dev = dev_instance;
1887f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
18887ab87ff4SFrancois Romieu 	u32 status;
1889f2148a47SJeff Kirsher 	int handled = 0;
1890f2148a47SJeff Kirsher 
18917ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
1892f2148a47SJeff Kirsher 
1893fc3e0f8aSFrancois Romieu 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1894f2148a47SJeff Kirsher 
18957ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT) {
18967ab87ff4SFrancois Romieu 		handled = 1;
1897f2148a47SJeff Kirsher 
18987ab87ff4SFrancois Romieu 		rhine_irq_disable(rp);
1899f2148a47SJeff Kirsher 		napi_schedule(&rp->napi);
1900f2148a47SJeff Kirsher 	}
1901f2148a47SJeff Kirsher 
19027ab87ff4SFrancois Romieu 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1903fc3e0f8aSFrancois Romieu 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
19047ab87ff4SFrancois Romieu 			  status);
1905f2148a47SJeff Kirsher 	}
1906f2148a47SJeff Kirsher 
1907f2148a47SJeff Kirsher 	return IRQ_RETVAL(handled);
1908f2148a47SJeff Kirsher }
1909f2148a47SJeff Kirsher 
1910f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated
1911f2148a47SJeff Kirsher    for clarity. */
rhine_tx(struct net_device * dev)1912f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev)
1913f2148a47SJeff Kirsher {
1914f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1915f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
191692bf2008STino Reichardt 	unsigned int pkts_compl = 0, bytes_compl = 0;
19173a5a883aSfrançois romieu 	unsigned int dirty_tx = rp->dirty_tx;
19183a5a883aSfrançois romieu 	unsigned int cur_tx;
191992bf2008STino Reichardt 	struct sk_buff *skb;
1920f2148a47SJeff Kirsher 
19213a5a883aSfrançois romieu 	/*
19223a5a883aSfrançois romieu 	 * The race with rhine_start_tx does not matter here as long as the
19233a5a883aSfrançois romieu 	 * driver enforces a value of cur_tx that was relevant when the
19243a5a883aSfrançois romieu 	 * packet was scheduled to the network chipset.
19253a5a883aSfrançois romieu 	 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
19263a5a883aSfrançois romieu 	 */
19273a5a883aSfrançois romieu 	smp_rmb();
19283a5a883aSfrançois romieu 	cur_tx = rp->cur_tx;
1929f2148a47SJeff Kirsher 	/* find and cleanup dirty tx descriptors */
19303a5a883aSfrançois romieu 	while (dirty_tx != cur_tx) {
19313a5a883aSfrançois romieu 		unsigned int entry = dirty_tx % TX_RING_SIZE;
19323a5a883aSfrançois romieu 		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
19333a5a883aSfrançois romieu 
1934fc3e0f8aSFrancois Romieu 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1935f2148a47SJeff Kirsher 			  entry, txstatus);
1936f2148a47SJeff Kirsher 		if (txstatus & DescOwn)
1937f2148a47SJeff Kirsher 			break;
193892bf2008STino Reichardt 		skb = rp->tx_skbuff[entry];
1939f2148a47SJeff Kirsher 		if (txstatus & 0x8000) {
1940fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev,
1941fc3e0f8aSFrancois Romieu 				  "Transmit error, Tx status %08x\n", txstatus);
1942f2148a47SJeff Kirsher 			dev->stats.tx_errors++;
1943f2148a47SJeff Kirsher 			if (txstatus & 0x0400)
1944f2148a47SJeff Kirsher 				dev->stats.tx_carrier_errors++;
1945f2148a47SJeff Kirsher 			if (txstatus & 0x0200)
1946f2148a47SJeff Kirsher 				dev->stats.tx_window_errors++;
1947f2148a47SJeff Kirsher 			if (txstatus & 0x0100)
1948f2148a47SJeff Kirsher 				dev->stats.tx_aborted_errors++;
1949f2148a47SJeff Kirsher 			if (txstatus & 0x0080)
1950f2148a47SJeff Kirsher 				dev->stats.tx_heartbeat_errors++;
1951f2148a47SJeff Kirsher 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1952f2148a47SJeff Kirsher 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1953f2148a47SJeff Kirsher 				dev->stats.tx_fifo_errors++;
1954f2148a47SJeff Kirsher 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1955f2148a47SJeff Kirsher 				break; /* Keep the skb - we try again */
1956f2148a47SJeff Kirsher 			}
1957f2148a47SJeff Kirsher 			/* Transmitter restarted in 'abnormal' handler. */
1958f2148a47SJeff Kirsher 		} else {
1959f2148a47SJeff Kirsher 			if (rp->quirks & rqRhineI)
1960f2148a47SJeff Kirsher 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1961f2148a47SJeff Kirsher 			else
1962f2148a47SJeff Kirsher 				dev->stats.collisions += txstatus & 0x0F;
1963fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1964fc3e0f8aSFrancois Romieu 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1965f7b5d1b9SJamie Gloudon 
1966f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->tx_stats.syncp);
196792bf2008STino Reichardt 			rp->tx_stats.bytes += skb->len;
1968f7b5d1b9SJamie Gloudon 			rp->tx_stats.packets++;
1969f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->tx_stats.syncp);
1970f2148a47SJeff Kirsher 		}
1971f2148a47SJeff Kirsher 		/* Free the original skb. */
1972f2148a47SJeff Kirsher 		if (rp->tx_skbuff_dma[entry]) {
1973f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1974f2148a47SJeff Kirsher 					 rp->tx_skbuff_dma[entry],
197592bf2008STino Reichardt 					 skb->len,
19764087c4dcSAlexey Charkov 					 DMA_TO_DEVICE);
1977f2148a47SJeff Kirsher 		}
197892bf2008STino Reichardt 		bytes_compl += skb->len;
197992bf2008STino Reichardt 		pkts_compl++;
198092bf2008STino Reichardt 		dev_consume_skb_any(skb);
1981f2148a47SJeff Kirsher 		rp->tx_skbuff[entry] = NULL;
19823a5a883aSfrançois romieu 		dirty_tx++;
1983f2148a47SJeff Kirsher 	}
198492bf2008STino Reichardt 
19853a5a883aSfrançois romieu 	rp->dirty_tx = dirty_tx;
19863a5a883aSfrançois romieu 	/* Pity we can't rely on the nearby BQL completion implicit barrier. */
19873a5a883aSfrançois romieu 	smp_wmb();
19883a5a883aSfrançois romieu 
198992bf2008STino Reichardt 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
19903a5a883aSfrançois romieu 
19913a5a883aSfrançois romieu 	/* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
19923a5a883aSfrançois romieu 	if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
1993f2148a47SJeff Kirsher 		netif_wake_queue(dev);
19943a5a883aSfrançois romieu 		smp_rmb();
19953a5a883aSfrançois romieu 		/* Rejuvenate. */
19963a5a883aSfrançois romieu 		if (rhine_tx_queue_full(rp))
19973a5a883aSfrançois romieu 			netif_stop_queue(dev);
19983a5a883aSfrançois romieu 	}
1999f2148a47SJeff Kirsher }
2000f2148a47SJeff Kirsher 
2001f2148a47SJeff Kirsher /**
2002f2148a47SJeff Kirsher  * rhine_get_vlan_tci - extract TCI from Rx data buffer
2003f2148a47SJeff Kirsher  * @skb: pointer to sk_buff
2004f2148a47SJeff Kirsher  * @data_size: used data area of the buffer including CRC
2005f2148a47SJeff Kirsher  *
2006f2148a47SJeff Kirsher  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2007f2148a47SJeff Kirsher  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2008f2148a47SJeff Kirsher  * aligned following the CRC.
2009f2148a47SJeff Kirsher  */
rhine_get_vlan_tci(struct sk_buff * skb,int data_size)2010f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2011f2148a47SJeff Kirsher {
2012f2148a47SJeff Kirsher 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2013f2148a47SJeff Kirsher 	return be16_to_cpup((__be16 *)trailer);
2014f2148a47SJeff Kirsher }
2015f2148a47SJeff Kirsher 
rhine_rx_vlan_tag(struct sk_buff * skb,struct rx_desc * desc,int data_size)2016810f19bcSfrançois romieu static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2017810f19bcSfrançois romieu 				     int data_size)
2018810f19bcSfrançois romieu {
2019810f19bcSfrançois romieu 	dma_rmb();
2020810f19bcSfrançois romieu 	if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2021810f19bcSfrançois romieu 		u16 vlan_tci;
2022810f19bcSfrançois romieu 
2023810f19bcSfrançois romieu 		vlan_tci = rhine_get_vlan_tci(skb, data_size);
2024810f19bcSfrançois romieu 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2025810f19bcSfrançois romieu 	}
2026810f19bcSfrançois romieu }
2027810f19bcSfrançois romieu 
2028f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */
rhine_rx(struct net_device * dev,int limit)2029f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit)
2030f2148a47SJeff Kirsher {
2031f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2032f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
2033f2148a47SJeff Kirsher 	int entry = rp->cur_rx % RX_RING_SIZE;
203462ca1ba0Sfrançois romieu 	int count;
2035f2148a47SJeff Kirsher 
2036fc3e0f8aSFrancois Romieu 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
203762ca1ba0Sfrançois romieu 		  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2038f2148a47SJeff Kirsher 
2039f2148a47SJeff Kirsher 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
2040f2148a47SJeff Kirsher 	for (count = 0; count < limit; ++count) {
204162ca1ba0Sfrançois romieu 		struct rx_desc *desc = rp->rx_ring + entry;
2042f2148a47SJeff Kirsher 		u32 desc_status = le32_to_cpu(desc->rx_status);
2043f2148a47SJeff Kirsher 		int data_size = desc_status >> 16;
2044f2148a47SJeff Kirsher 
2045f2148a47SJeff Kirsher 		if (desc_status & DescOwn)
2046f2148a47SJeff Kirsher 			break;
2047f2148a47SJeff Kirsher 
2048fc3e0f8aSFrancois Romieu 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2049fc3e0f8aSFrancois Romieu 			  desc_status);
2050f2148a47SJeff Kirsher 
2051f2148a47SJeff Kirsher 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2052f2148a47SJeff Kirsher 			if ((desc_status & RxWholePkt) != RxWholePkt) {
2053f2148a47SJeff Kirsher 				netdev_warn(dev,
2054f2148a47SJeff Kirsher 	"Oversized Ethernet frame spanned multiple buffers, "
2055f2148a47SJeff Kirsher 	"entry %#x length %d status %08x!\n",
2056f2148a47SJeff Kirsher 					    entry, data_size,
2057f2148a47SJeff Kirsher 					    desc_status);
2058f2148a47SJeff Kirsher 				dev->stats.rx_length_errors++;
2059f2148a47SJeff Kirsher 			} else if (desc_status & RxErr) {
2060f2148a47SJeff Kirsher 				/* There was a error. */
2061fc3e0f8aSFrancois Romieu 				netif_dbg(rp, rx_err, dev,
2062fc3e0f8aSFrancois Romieu 					  "%s() Rx error %08x\n", __func__,
2063fc3e0f8aSFrancois Romieu 					  desc_status);
2064f2148a47SJeff Kirsher 				dev->stats.rx_errors++;
2065f2148a47SJeff Kirsher 				if (desc_status & 0x0030)
2066f2148a47SJeff Kirsher 					dev->stats.rx_length_errors++;
2067f2148a47SJeff Kirsher 				if (desc_status & 0x0048)
2068f2148a47SJeff Kirsher 					dev->stats.rx_fifo_errors++;
2069f2148a47SJeff Kirsher 				if (desc_status & 0x0004)
2070f2148a47SJeff Kirsher 					dev->stats.rx_frame_errors++;
2071f2148a47SJeff Kirsher 				if (desc_status & 0x0002) {
2072f2148a47SJeff Kirsher 					/* this can also be updated outside the interrupt handler */
2073f2148a47SJeff Kirsher 					spin_lock(&rp->lock);
2074f2148a47SJeff Kirsher 					dev->stats.rx_crc_errors++;
2075f2148a47SJeff Kirsher 					spin_unlock(&rp->lock);
2076f2148a47SJeff Kirsher 				}
2077f2148a47SJeff Kirsher 			}
2078f2148a47SJeff Kirsher 		} else {
2079f2148a47SJeff Kirsher 			/* Length should omit the CRC */
2080f2148a47SJeff Kirsher 			int pkt_len = data_size - 4;
20818709bb2cSfrançois romieu 			struct sk_buff *skb;
2082f2148a47SJeff Kirsher 
2083f2148a47SJeff Kirsher 			/* Check if the packet is long enough to accept without
2084f2148a47SJeff Kirsher 			   copying to a minimally-sized skbuff. */
20858709bb2cSfrançois romieu 			if (pkt_len < rx_copybreak) {
2086f2148a47SJeff Kirsher 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
20878709bb2cSfrançois romieu 				if (unlikely(!skb))
20888709bb2cSfrançois romieu 					goto drop;
20898709bb2cSfrançois romieu 
2090f7630d18SAlexey Charkov 				dma_sync_single_for_cpu(hwdev,
2091f2148a47SJeff Kirsher 							rp->rx_skbuff_dma[entry],
2092f2148a47SJeff Kirsher 							rp->rx_buf_sz,
20934087c4dcSAlexey Charkov 							DMA_FROM_DEVICE);
2094f2148a47SJeff Kirsher 
2095f2148a47SJeff Kirsher 				skb_copy_to_linear_data(skb,
2096f2148a47SJeff Kirsher 						 rp->rx_skbuff[entry]->data,
2097f2148a47SJeff Kirsher 						 pkt_len);
20988709bb2cSfrançois romieu 
2099f7630d18SAlexey Charkov 				dma_sync_single_for_device(hwdev,
2100f2148a47SJeff Kirsher 							   rp->rx_skbuff_dma[entry],
2101f2148a47SJeff Kirsher 							   rp->rx_buf_sz,
21024087c4dcSAlexey Charkov 							   DMA_FROM_DEVICE);
2103f2148a47SJeff Kirsher 			} else {
21048709bb2cSfrançois romieu 				struct rhine_skb_dma sd;
21058709bb2cSfrançois romieu 
21068709bb2cSfrançois romieu 				if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
21078709bb2cSfrançois romieu 					goto drop;
21088709bb2cSfrançois romieu 
2109f2148a47SJeff Kirsher 				skb = rp->rx_skbuff[entry];
21108709bb2cSfrançois romieu 
2111f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
2112f2148a47SJeff Kirsher 						 rp->rx_skbuff_dma[entry],
2113f2148a47SJeff Kirsher 						 rp->rx_buf_sz,
21144087c4dcSAlexey Charkov 						 DMA_FROM_DEVICE);
21158709bb2cSfrançois romieu 				rhine_skb_dma_nic_store(rp, &sd, entry);
2116f2148a47SJeff Kirsher 			}
2117f2148a47SJeff Kirsher 
21188709bb2cSfrançois romieu 			skb_put(skb, pkt_len);
2119f2148a47SJeff Kirsher 
2120810f19bcSfrançois romieu 			rhine_rx_vlan_tag(skb, desc, data_size);
2121810f19bcSfrançois romieu 
21225f715c09SAndrej Ota 			skb->protocol = eth_type_trans(skb, dev);
21235f715c09SAndrej Ota 
2124f2148a47SJeff Kirsher 			netif_receive_skb(skb);
2125f7b5d1b9SJamie Gloudon 
2126f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->rx_stats.syncp);
2127f7b5d1b9SJamie Gloudon 			rp->rx_stats.bytes += pkt_len;
2128f7b5d1b9SJamie Gloudon 			rp->rx_stats.packets++;
2129f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->rx_stats.syncp);
2130f2148a47SJeff Kirsher 		}
21318709bb2cSfrançois romieu give_descriptor_to_nic:
21328709bb2cSfrançois romieu 		desc->rx_status = cpu_to_le32(DescOwn);
2133f2148a47SJeff Kirsher 		entry = (++rp->cur_rx) % RX_RING_SIZE;
2134f2148a47SJeff Kirsher 	}
2135f2148a47SJeff Kirsher 
2136f2148a47SJeff Kirsher 	return count;
21378709bb2cSfrançois romieu 
21388709bb2cSfrançois romieu drop:
21398709bb2cSfrançois romieu 	dev->stats.rx_dropped++;
21408709bb2cSfrançois romieu 	goto give_descriptor_to_nic;
2141f2148a47SJeff Kirsher }
2142f2148a47SJeff Kirsher 
rhine_restart_tx(struct net_device * dev)2143f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) {
2144f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2145f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2146f2148a47SJeff Kirsher 	int entry = rp->dirty_tx % TX_RING_SIZE;
2147f2148a47SJeff Kirsher 	u32 intr_status;
2148f2148a47SJeff Kirsher 
2149f2148a47SJeff Kirsher 	/*
2150f2148a47SJeff Kirsher 	 * If new errors occurred, we need to sort them out before doing Tx.
2151f2148a47SJeff Kirsher 	 * In that case the ISR will be back here RSN anyway.
2152f2148a47SJeff Kirsher 	 */
2153a20a28bcSFrancois Romieu 	intr_status = rhine_get_events(rp);
2154f2148a47SJeff Kirsher 
2155f2148a47SJeff Kirsher 	if ((intr_status & IntrTxErrSummary) == 0) {
2156f2148a47SJeff Kirsher 
2157f2148a47SJeff Kirsher 		/* We know better than the chip where it should continue. */
2158f2148a47SJeff Kirsher 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2159f2148a47SJeff Kirsher 		       ioaddr + TxRingPtr);
2160f2148a47SJeff Kirsher 
2161f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2162f2148a47SJeff Kirsher 		       ioaddr + ChipCmd);
2163f2148a47SJeff Kirsher 
2164f2148a47SJeff Kirsher 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2165f2148a47SJeff Kirsher 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2166f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2167f2148a47SJeff Kirsher 
2168f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2169f2148a47SJeff Kirsher 		       ioaddr + ChipCmd1);
2170f2148a47SJeff Kirsher 		IOSYNC;
2171f2148a47SJeff Kirsher 	}
2172f2148a47SJeff Kirsher 	else {
2173f2148a47SJeff Kirsher 		/* This should never happen */
2174fc3e0f8aSFrancois Romieu 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2175fc3e0f8aSFrancois Romieu 			   intr_status);
2176f2148a47SJeff Kirsher 	}
2177f2148a47SJeff Kirsher 
2178f2148a47SJeff Kirsher }
2179f2148a47SJeff Kirsher 
rhine_slow_event_task(struct work_struct * work)21807ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work)
2181f2148a47SJeff Kirsher {
21827ab87ff4SFrancois Romieu 	struct rhine_private *rp =
21837ab87ff4SFrancois Romieu 		container_of(work, struct rhine_private, slow_event_task);
21847ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
21857ab87ff4SFrancois Romieu 	u32 intr_status;
2186f2148a47SJeff Kirsher 
21877ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
21887ab87ff4SFrancois Romieu 
21897ab87ff4SFrancois Romieu 	if (!rp->task_enable)
21907ab87ff4SFrancois Romieu 		goto out_unlock;
21917ab87ff4SFrancois Romieu 
21927ab87ff4SFrancois Romieu 	intr_status = rhine_get_events(rp);
21937ab87ff4SFrancois Romieu 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2194f2148a47SJeff Kirsher 
2195f2148a47SJeff Kirsher 	if (intr_status & IntrLinkChange)
2196f2148a47SJeff Kirsher 		rhine_check_media(dev, 0);
2197f2148a47SJeff Kirsher 
2198fc3e0f8aSFrancois Romieu 	if (intr_status & IntrPCIErr)
2199fc3e0f8aSFrancois Romieu 		netif_warn(rp, hw, dev, "PCI error\n");
2200fc3e0f8aSFrancois Romieu 
2201559bcac3SDavid S. Miller 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2202f2148a47SJeff Kirsher 
22037ab87ff4SFrancois Romieu out_unlock:
22047ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2205f2148a47SJeff Kirsher }
2206f2148a47SJeff Kirsher 
2207bc1f4470Sstephen hemminger static void
rhine_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)2208f7b5d1b9SJamie Gloudon rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2209f2148a47SJeff Kirsher {
2210f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2211f7b5d1b9SJamie Gloudon 	unsigned int start;
2212f2148a47SJeff Kirsher 
22137ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
22147ab87ff4SFrancois Romieu 	rhine_update_rx_crc_and_missed_errord(rp);
22157ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2216f2148a47SJeff Kirsher 
2217f7b5d1b9SJamie Gloudon 	netdev_stats_to_stats64(stats, &dev->stats);
2218f7b5d1b9SJamie Gloudon 
2219f7b5d1b9SJamie Gloudon 	do {
2220068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(&rp->rx_stats.syncp);
2221f7b5d1b9SJamie Gloudon 		stats->rx_packets = rp->rx_stats.packets;
2222f7b5d1b9SJamie Gloudon 		stats->rx_bytes = rp->rx_stats.bytes;
2223068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&rp->rx_stats.syncp, start));
2224f7b5d1b9SJamie Gloudon 
2225f7b5d1b9SJamie Gloudon 	do {
2226068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(&rp->tx_stats.syncp);
2227f7b5d1b9SJamie Gloudon 		stats->tx_packets = rp->tx_stats.packets;
2228f7b5d1b9SJamie Gloudon 		stats->tx_bytes = rp->tx_stats.bytes;
2229068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&rp->tx_stats.syncp, start));
2230f2148a47SJeff Kirsher }
2231f2148a47SJeff Kirsher 
rhine_set_rx_mode(struct net_device * dev)2232f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev)
2233f2148a47SJeff Kirsher {
2234f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2235f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2236f2148a47SJeff Kirsher 	u32 mc_filter[2];	/* Multicast hash filter */
2237f2148a47SJeff Kirsher 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2238f2148a47SJeff Kirsher 	struct netdev_hw_addr *ha;
2239f2148a47SJeff Kirsher 
2240f2148a47SJeff Kirsher 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2241f2148a47SJeff Kirsher 		rx_mode = 0x1C;
2242f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2243f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2244f2148a47SJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2245f2148a47SJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
2246f2148a47SJeff Kirsher 		/* Too many to match, or accept all multicasts. */
2247f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2248f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2249ca8b6e04SAlexey Charkov 	} else if (rp->quirks & rqMgmt) {
2250f2148a47SJeff Kirsher 		int i = 0;
2251f2148a47SJeff Kirsher 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2252f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2253f2148a47SJeff Kirsher 			if (i == MCAM_SIZE)
2254f2148a47SJeff Kirsher 				break;
2255f2148a47SJeff Kirsher 			rhine_set_cam(ioaddr, i, ha->addr);
2256f2148a47SJeff Kirsher 			mCAMmask |= 1 << i;
2257f2148a47SJeff Kirsher 			i++;
2258f2148a47SJeff Kirsher 		}
2259f2148a47SJeff Kirsher 		rhine_set_cam_mask(ioaddr, mCAMmask);
2260f2148a47SJeff Kirsher 	} else {
2261f2148a47SJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
2262f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2263f2148a47SJeff Kirsher 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2264f2148a47SJeff Kirsher 
2265f2148a47SJeff Kirsher 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2266f2148a47SJeff Kirsher 		}
2267f2148a47SJeff Kirsher 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2268f2148a47SJeff Kirsher 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2269f2148a47SJeff Kirsher 	}
2270f2148a47SJeff Kirsher 	/* enable/disable VLAN receive filtering */
2271ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt) {
2272f2148a47SJeff Kirsher 		if (dev->flags & IFF_PROMISC)
2273f2148a47SJeff Kirsher 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2274f2148a47SJeff Kirsher 		else
2275f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2276f2148a47SJeff Kirsher 	}
2277f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2278f2148a47SJeff Kirsher }
2279f2148a47SJeff Kirsher 
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2280f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2281f2148a47SJeff Kirsher {
2282f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
2283f2148a47SJeff Kirsher 
2284f029c781SWolfram Sang 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
2285f029c781SWolfram Sang 	strscpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2286f2148a47SJeff Kirsher }
2287f2148a47SJeff Kirsher 
netdev_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2288f918b986SPhilippe Reynes static int netdev_get_link_ksettings(struct net_device *dev,
2289f918b986SPhilippe Reynes 				     struct ethtool_link_ksettings *cmd)
2290f2148a47SJeff Kirsher {
2291f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2292f2148a47SJeff Kirsher 
22937ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
229482c01a84Syuval.shaia@oracle.com 	mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
22957ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2296f2148a47SJeff Kirsher 
229782c01a84Syuval.shaia@oracle.com 	return 0;
2298f2148a47SJeff Kirsher }
2299f2148a47SJeff Kirsher 
netdev_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2300f918b986SPhilippe Reynes static int netdev_set_link_ksettings(struct net_device *dev,
2301f918b986SPhilippe Reynes 				     const struct ethtool_link_ksettings *cmd)
2302f2148a47SJeff Kirsher {
2303f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2304f2148a47SJeff Kirsher 	int rc;
2305f2148a47SJeff Kirsher 
23067ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2307f918b986SPhilippe Reynes 	rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2308f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
23097ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2310f2148a47SJeff Kirsher 
2311f2148a47SJeff Kirsher 	return rc;
2312f2148a47SJeff Kirsher }
2313f2148a47SJeff Kirsher 
netdev_nway_reset(struct net_device * dev)2314f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev)
2315f2148a47SJeff Kirsher {
2316f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2317f2148a47SJeff Kirsher 
2318f2148a47SJeff Kirsher 	return mii_nway_restart(&rp->mii_if);
2319f2148a47SJeff Kirsher }
2320f2148a47SJeff Kirsher 
netdev_get_link(struct net_device * dev)2321f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev)
2322f2148a47SJeff Kirsher {
2323f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2324f2148a47SJeff Kirsher 
2325f2148a47SJeff Kirsher 	return mii_link_ok(&rp->mii_if);
2326f2148a47SJeff Kirsher }
2327f2148a47SJeff Kirsher 
netdev_get_msglevel(struct net_device * dev)2328f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev)
2329f2148a47SJeff Kirsher {
2330fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2331fc3e0f8aSFrancois Romieu 
2332fc3e0f8aSFrancois Romieu 	return rp->msg_enable;
2333f2148a47SJeff Kirsher }
2334f2148a47SJeff Kirsher 
netdev_set_msglevel(struct net_device * dev,u32 value)2335f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value)
2336f2148a47SJeff Kirsher {
2337fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2338fc3e0f8aSFrancois Romieu 
2339fc3e0f8aSFrancois Romieu 	rp->msg_enable = value;
2340f2148a47SJeff Kirsher }
2341f2148a47SJeff Kirsher 
rhine_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2342f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2343f2148a47SJeff Kirsher {
2344f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2345f2148a47SJeff Kirsher 
2346f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2347f2148a47SJeff Kirsher 		return;
2348f2148a47SJeff Kirsher 
2349f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2350f2148a47SJeff Kirsher 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2351f2148a47SJeff Kirsher 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2352f2148a47SJeff Kirsher 	wol->wolopts = rp->wolopts;
2353f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2354f2148a47SJeff Kirsher }
2355f2148a47SJeff Kirsher 
rhine_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2356f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2357f2148a47SJeff Kirsher {
2358f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2359f2148a47SJeff Kirsher 	u32 support = WAKE_PHY | WAKE_MAGIC |
2360f2148a47SJeff Kirsher 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2361f2148a47SJeff Kirsher 
2362f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2363f2148a47SJeff Kirsher 		return -EINVAL;
2364f2148a47SJeff Kirsher 
2365f2148a47SJeff Kirsher 	if (wol->wolopts & ~support)
2366f2148a47SJeff Kirsher 		return -EINVAL;
2367f2148a47SJeff Kirsher 
2368f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2369f2148a47SJeff Kirsher 	rp->wolopts = wol->wolopts;
2370f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2371f2148a47SJeff Kirsher 
2372f2148a47SJeff Kirsher 	return 0;
2373f2148a47SJeff Kirsher }
2374f2148a47SJeff Kirsher 
2375f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = {
2376f2148a47SJeff Kirsher 	.get_drvinfo		= netdev_get_drvinfo,
2377f2148a47SJeff Kirsher 	.nway_reset		= netdev_nway_reset,
2378f2148a47SJeff Kirsher 	.get_link		= netdev_get_link,
2379f2148a47SJeff Kirsher 	.get_msglevel		= netdev_get_msglevel,
2380f2148a47SJeff Kirsher 	.set_msglevel		= netdev_set_msglevel,
2381f2148a47SJeff Kirsher 	.get_wol		= rhine_get_wol,
2382f2148a47SJeff Kirsher 	.set_wol		= rhine_set_wol,
2383f918b986SPhilippe Reynes 	.get_link_ksettings	= netdev_get_link_ksettings,
2384f918b986SPhilippe Reynes 	.set_link_ksettings	= netdev_set_link_ksettings,
2385f2148a47SJeff Kirsher };
2386f2148a47SJeff Kirsher 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2387f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2388f2148a47SJeff Kirsher {
2389f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2390f2148a47SJeff Kirsher 	int rc;
2391f2148a47SJeff Kirsher 
2392f2148a47SJeff Kirsher 	if (!netif_running(dev))
2393f2148a47SJeff Kirsher 		return -EINVAL;
2394f2148a47SJeff Kirsher 
23957ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2396f2148a47SJeff Kirsher 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2397f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
23987ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2399f2148a47SJeff Kirsher 
2400f2148a47SJeff Kirsher 	return rc;
2401f2148a47SJeff Kirsher }
2402f2148a47SJeff Kirsher 
rhine_close(struct net_device * dev)2403f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev)
2404f2148a47SJeff Kirsher {
2405f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2406f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2407f2148a47SJeff Kirsher 
24087ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
2409f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2410f2148a47SJeff Kirsher 	netif_stop_queue(dev);
2411f2148a47SJeff Kirsher 
2412fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2413f2148a47SJeff Kirsher 		  ioread16(ioaddr + ChipCmd));
2414f2148a47SJeff Kirsher 
2415f2148a47SJeff Kirsher 	/* Switch to loopback mode to avoid hardware races. */
2416f2148a47SJeff Kirsher 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2417f2148a47SJeff Kirsher 
24187ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2419f2148a47SJeff Kirsher 
2420f2148a47SJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
2421f2148a47SJeff Kirsher 	iowrite16(CmdStop, ioaddr + ChipCmd);
2422f2148a47SJeff Kirsher 
2423f7630d18SAlexey Charkov 	free_irq(rp->irq, dev);
2424f2148a47SJeff Kirsher 	free_rbufs(dev);
2425f2148a47SJeff Kirsher 	free_tbufs(dev);
2426f2148a47SJeff Kirsher 	free_ring(dev);
2427f2148a47SJeff Kirsher 
2428f2148a47SJeff Kirsher 	return 0;
2429f2148a47SJeff Kirsher }
2430f2148a47SJeff Kirsher 
2431f2148a47SJeff Kirsher 
rhine_remove_one_pci(struct pci_dev * pdev)24322d283862SAlexey Charkov static void rhine_remove_one_pci(struct pci_dev *pdev)
2433f2148a47SJeff Kirsher {
2434f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2435f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2436f2148a47SJeff Kirsher 
2437f2148a47SJeff Kirsher 	unregister_netdev(dev);
2438f2148a47SJeff Kirsher 
2439f2148a47SJeff Kirsher 	pci_iounmap(pdev, rp->base);
2440f2148a47SJeff Kirsher 	pci_release_regions(pdev);
2441f2148a47SJeff Kirsher 
2442f2148a47SJeff Kirsher 	free_netdev(dev);
2443f2148a47SJeff Kirsher 	pci_disable_device(pdev);
2444f2148a47SJeff Kirsher }
2445f2148a47SJeff Kirsher 
rhine_remove_one_platform(struct platform_device * pdev)24462d283862SAlexey Charkov static int rhine_remove_one_platform(struct platform_device *pdev)
24472d283862SAlexey Charkov {
24482d283862SAlexey Charkov 	struct net_device *dev = platform_get_drvdata(pdev);
24492d283862SAlexey Charkov 	struct rhine_private *rp = netdev_priv(dev);
24502d283862SAlexey Charkov 
24512d283862SAlexey Charkov 	unregister_netdev(dev);
24522d283862SAlexey Charkov 
24532d283862SAlexey Charkov 	iounmap(rp->base);
24542d283862SAlexey Charkov 
24552d283862SAlexey Charkov 	free_netdev(dev);
24562d283862SAlexey Charkov 
24572d283862SAlexey Charkov 	return 0;
24582d283862SAlexey Charkov }
24592d283862SAlexey Charkov 
rhine_shutdown_pci(struct pci_dev * pdev)24602d283862SAlexey Charkov static void rhine_shutdown_pci(struct pci_dev *pdev)
2461f2148a47SJeff Kirsher {
2462f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2463f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2464f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2465f2148a47SJeff Kirsher 
2466f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2467f2148a47SJeff Kirsher 		return; /* Nothing to do for non-WOL adapters */
2468f2148a47SJeff Kirsher 
2469f2148a47SJeff Kirsher 	rhine_power_init(dev);
2470f2148a47SJeff Kirsher 
2471f2148a47SJeff Kirsher 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2472f2148a47SJeff Kirsher 	if (rp->quirks & rq6patterns)
2473f2148a47SJeff Kirsher 		iowrite8(0x04, ioaddr + WOLcgClr);
2474f2148a47SJeff Kirsher 
24757ab87ff4SFrancois Romieu 	spin_lock(&rp->lock);
24767ab87ff4SFrancois Romieu 
2477f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_MAGIC) {
2478f2148a47SJeff Kirsher 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2479f2148a47SJeff Kirsher 		/*
2480f2148a47SJeff Kirsher 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2481f2148a47SJeff Kirsher 		 * not cooperate otherwise.
2482f2148a47SJeff Kirsher 		 */
2483f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2484f2148a47SJeff Kirsher 	}
2485f2148a47SJeff Kirsher 
2486f2148a47SJeff Kirsher 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2487f2148a47SJeff Kirsher 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2488f2148a47SJeff Kirsher 
2489f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_PHY)
2490f2148a47SJeff Kirsher 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2491f2148a47SJeff Kirsher 
2492f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_UCAST)
2493f2148a47SJeff Kirsher 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2494f2148a47SJeff Kirsher 
2495f2148a47SJeff Kirsher 	if (rp->wolopts) {
2496f2148a47SJeff Kirsher 		/* Enable legacy WOL (for old motherboards) */
2497f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + PwcfgSet);
2498f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2499f2148a47SJeff Kirsher 	}
2500f2148a47SJeff Kirsher 
25017ab87ff4SFrancois Romieu 	spin_unlock(&rp->lock);
25027ab87ff4SFrancois Romieu 
2503e92b9b3bSFrancois Romieu 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2504f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2505f2148a47SJeff Kirsher 
2506e92b9b3bSFrancois Romieu 		pci_wake_from_d3(pdev, true);
2507e92b9b3bSFrancois Romieu 		pci_set_power_state(pdev, PCI_D3hot);
2508e92b9b3bSFrancois Romieu 	}
2509f2148a47SJeff Kirsher }
2510f2148a47SJeff Kirsher 
2511e92b9b3bSFrancois Romieu #ifdef CONFIG_PM_SLEEP
rhine_suspend(struct device * device)2512e92b9b3bSFrancois Romieu static int rhine_suspend(struct device *device)
2513f2148a47SJeff Kirsher {
2514f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2515f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2516f2148a47SJeff Kirsher 
2517f2148a47SJeff Kirsher 	if (!netif_running(dev))
2518f2148a47SJeff Kirsher 		return 0;
2519f2148a47SJeff Kirsher 
25207ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
25217ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2522f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2523f2148a47SJeff Kirsher 
2524f2148a47SJeff Kirsher 	netif_device_detach(dev);
2525f2148a47SJeff Kirsher 
2526f7630d18SAlexey Charkov 	if (dev_is_pci(device))
25272d283862SAlexey Charkov 		rhine_shutdown_pci(to_pci_dev(device));
2528f2148a47SJeff Kirsher 
2529f2148a47SJeff Kirsher 	return 0;
2530f2148a47SJeff Kirsher }
2531f2148a47SJeff Kirsher 
rhine_resume(struct device * device)2532e92b9b3bSFrancois Romieu static int rhine_resume(struct device *device)
2533f2148a47SJeff Kirsher {
2534f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2535f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2536f2148a47SJeff Kirsher 
2537f2148a47SJeff Kirsher 	if (!netif_running(dev))
2538f2148a47SJeff Kirsher 		return 0;
2539f2148a47SJeff Kirsher 
2540f2148a47SJeff Kirsher 	enable_mmio(rp->pioaddr, rp->quirks);
2541f2148a47SJeff Kirsher 	rhine_power_init(dev);
2542f2148a47SJeff Kirsher 	free_tbufs(dev);
2543f2148a47SJeff Kirsher 	alloc_tbufs(dev);
25448709bb2cSfrançois romieu 	rhine_reset_rbufs(rp);
25457ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
25467ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
2547f2148a47SJeff Kirsher 	init_registers(dev);
25487ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2549f2148a47SJeff Kirsher 
2550f2148a47SJeff Kirsher 	netif_device_attach(dev);
2551f2148a47SJeff Kirsher 
2552f2148a47SJeff Kirsher 	return 0;
2553f2148a47SJeff Kirsher }
2554e92b9b3bSFrancois Romieu 
2555e92b9b3bSFrancois Romieu static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2556e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	(&rhine_pm_ops)
2557e92b9b3bSFrancois Romieu 
2558e92b9b3bSFrancois Romieu #else
2559e92b9b3bSFrancois Romieu 
2560e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	NULL
2561e92b9b3bSFrancois Romieu 
2562e92b9b3bSFrancois Romieu #endif /* !CONFIG_PM_SLEEP */
2563f2148a47SJeff Kirsher 
25642d283862SAlexey Charkov static struct pci_driver rhine_driver_pci = {
2565f2148a47SJeff Kirsher 	.name		= DRV_NAME,
2566f2148a47SJeff Kirsher 	.id_table	= rhine_pci_tbl,
25672d283862SAlexey Charkov 	.probe		= rhine_init_one_pci,
25682d283862SAlexey Charkov 	.remove		= rhine_remove_one_pci,
25692d283862SAlexey Charkov 	.shutdown	= rhine_shutdown_pci,
2570e92b9b3bSFrancois Romieu 	.driver.pm	= RHINE_PM_OPS,
2571f2148a47SJeff Kirsher };
2572f2148a47SJeff Kirsher 
25732d283862SAlexey Charkov static struct platform_driver rhine_driver_platform = {
25742d283862SAlexey Charkov 	.probe		= rhine_init_one_platform,
25752d283862SAlexey Charkov 	.remove		= rhine_remove_one_platform,
25762d283862SAlexey Charkov 	.driver = {
25772d283862SAlexey Charkov 		.name	= DRV_NAME,
25782d283862SAlexey Charkov 		.of_match_table	= rhine_of_tbl,
25792d283862SAlexey Charkov 		.pm		= RHINE_PM_OPS,
25802d283862SAlexey Charkov 	}
25812d283862SAlexey Charkov };
25822d283862SAlexey Charkov 
25836faadbbbSChristoph Hellwig static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2584f2148a47SJeff Kirsher 	{
2585f2148a47SJeff Kirsher 		.ident = "EPIA-M",
2586f2148a47SJeff Kirsher 		.matches = {
2587f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2588f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2589f2148a47SJeff Kirsher 		},
2590f2148a47SJeff Kirsher 	},
2591f2148a47SJeff Kirsher 	{
2592f2148a47SJeff Kirsher 		.ident = "KV7",
2593f2148a47SJeff Kirsher 		.matches = {
2594f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2595f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2596f2148a47SJeff Kirsher 		},
2597f2148a47SJeff Kirsher 	},
2598f2148a47SJeff Kirsher 	{ NULL }
2599f2148a47SJeff Kirsher };
2600f2148a47SJeff Kirsher 
rhine_init(void)2601f2148a47SJeff Kirsher static int __init rhine_init(void)
2602f2148a47SJeff Kirsher {
26032d283862SAlexey Charkov 	int ret_pci, ret_platform;
26042d283862SAlexey Charkov 
2605f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
2606f2148a47SJeff Kirsher 	if (dmi_check_system(rhine_dmi_table)) {
2607f2148a47SJeff Kirsher 		/* these BIOSes fail at PXE boot if chip is in D3 */
2608eb939922SRusty Russell 		avoid_D3 = true;
2609f2148a47SJeff Kirsher 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2610f2148a47SJeff Kirsher 	}
2611f2148a47SJeff Kirsher 	else if (avoid_D3)
2612f2148a47SJeff Kirsher 		pr_info("avoid_D3 set\n");
2613f2148a47SJeff Kirsher 
26142d283862SAlexey Charkov 	ret_pci = pci_register_driver(&rhine_driver_pci);
26152d283862SAlexey Charkov 	ret_platform = platform_driver_register(&rhine_driver_platform);
26162d283862SAlexey Charkov 	if ((ret_pci < 0) && (ret_platform < 0))
26172d283862SAlexey Charkov 		return ret_pci;
26182d283862SAlexey Charkov 
26192d283862SAlexey Charkov 	return 0;
2620f2148a47SJeff Kirsher }
2621f2148a47SJeff Kirsher 
2622f2148a47SJeff Kirsher 
rhine_cleanup(void)2623f2148a47SJeff Kirsher static void __exit rhine_cleanup(void)
2624f2148a47SJeff Kirsher {
26252d283862SAlexey Charkov 	platform_driver_unregister(&rhine_driver_platform);
26262d283862SAlexey Charkov 	pci_unregister_driver(&rhine_driver_pci);
2627f2148a47SJeff Kirsher }
2628f2148a47SJeff Kirsher 
2629f2148a47SJeff Kirsher 
2630f2148a47SJeff Kirsher module_init(rhine_init);
2631f2148a47SJeff Kirsher module_exit(rhine_cleanup);
2632