xref: /openbmc/linux/drivers/net/ethernet/via/via-rhine.c (revision 0290bd291cc0e0488e35e66bf39efcd7d9d9122b)
1f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2f2148a47SJeff Kirsher /*
3f2148a47SJeff Kirsher 	Written 1998-2001 by Donald Becker.
4f2148a47SJeff Kirsher 
5f2148a47SJeff Kirsher 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6f2148a47SJeff Kirsher 
7f2148a47SJeff Kirsher 	This software may be used and distributed according to the terms of
8f2148a47SJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
9f2148a47SJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
10f2148a47SJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
11f2148a47SJeff Kirsher 	a complete program and may only be used when the entire operating
12f2148a47SJeff Kirsher 	system is licensed under the GPL.
13f2148a47SJeff Kirsher 
14f2148a47SJeff Kirsher 	This driver is designed for the VIA VT86C100A Rhine-I.
15f2148a47SJeff Kirsher 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16f2148a47SJeff Kirsher 	and management NIC 6105M).
17f2148a47SJeff Kirsher 
18f2148a47SJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
19f2148a47SJeff Kirsher 	Scyld Computing Corporation
20f2148a47SJeff Kirsher 	410 Severn Ave., Suite 210
21f2148a47SJeff Kirsher 	Annapolis MD 21403
22f2148a47SJeff Kirsher 
23f2148a47SJeff Kirsher 
24f2148a47SJeff Kirsher 	This driver contains some changes from the original Donald Becker
25f2148a47SJeff Kirsher 	version. He may or may not be interested in bug reports on this
26f2148a47SJeff Kirsher 	code. You can find his versions at:
27f2148a47SJeff Kirsher 	http://www.scyld.com/network/via-rhine.html
28f2148a47SJeff Kirsher 	[link no longer provides useful info -jgarzik]
29f2148a47SJeff Kirsher 
30f2148a47SJeff Kirsher */
31f2148a47SJeff Kirsher 
32f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33f2148a47SJeff Kirsher 
34f2148a47SJeff Kirsher #define DRV_NAME	"via-rhine"
35207070f5SRoger Luethi #define DRV_VERSION	"1.5.1"
36f2148a47SJeff Kirsher #define DRV_RELDATE	"2010-10-09"
37f2148a47SJeff Kirsher 
38eb939922SRusty Russell #include <linux/types.h>
39f2148a47SJeff Kirsher 
40f2148a47SJeff Kirsher /* A few user-configurable values.
41f2148a47SJeff Kirsher    These may be modified when a driver module is loaded. */
42fc3e0f8aSFrancois Romieu static int debug = 0;
43fc3e0f8aSFrancois Romieu #define RHINE_MSG_DEFAULT \
44fc3e0f8aSFrancois Romieu         (0x0000)
45f2148a47SJeff Kirsher 
46f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47f2148a47SJeff Kirsher    Setting to > 1518 effectively disables this feature. */
48f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49f2148a47SJeff Kirsher 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50f2148a47SJeff Kirsher 	defined(__sh__) || defined(__mips__)
51f2148a47SJeff Kirsher static int rx_copybreak = 1518;
52f2148a47SJeff Kirsher #else
53f2148a47SJeff Kirsher static int rx_copybreak;
54f2148a47SJeff Kirsher #endif
55f2148a47SJeff Kirsher 
56f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of
57f2148a47SJeff Kirsher    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58eb939922SRusty Russell static bool avoid_D3;
59f2148a47SJeff Kirsher 
60f2148a47SJeff Kirsher /*
61f2148a47SJeff Kirsher  * In case you are looking for 'options[]' or 'full_duplex[]', they
62f2148a47SJeff Kirsher  * are gone. Use ethtool(8) instead.
63f2148a47SJeff Kirsher  */
64f2148a47SJeff Kirsher 
65f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66f2148a47SJeff Kirsher    The Rhine has a 64 element 8390-like hash table. */
67f2148a47SJeff Kirsher static const int multicast_filter_limit = 32;
68f2148a47SJeff Kirsher 
69f2148a47SJeff Kirsher 
70f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */
71f2148a47SJeff Kirsher 
72f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency.
7392bf2008STino Reichardt  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
7492bf2008STino Reichardt  * Making the Tx ring too large decreases the effectiveness of channel
7592bf2008STino Reichardt  * bonding and packet priority.
7692bf2008STino Reichardt  * With BQL support, we can increase TX ring safely.
7792bf2008STino Reichardt  * There are no ill effects from too-large receive rings.
7892bf2008STino Reichardt  */
7992bf2008STino Reichardt #define TX_RING_SIZE	64
8092bf2008STino Reichardt #define TX_QUEUE_LEN	(TX_RING_SIZE - 6)	/* Limit ring entries actually used. */
81f2148a47SJeff Kirsher #define RX_RING_SIZE	64
82f2148a47SJeff Kirsher 
83f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */
84f2148a47SJeff Kirsher 
85f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
86f2148a47SJeff Kirsher #define TX_TIMEOUT	(2*HZ)
87f2148a47SJeff Kirsher 
88f2148a47SJeff Kirsher #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
89f2148a47SJeff Kirsher 
90f2148a47SJeff Kirsher #include <linux/module.h>
91f2148a47SJeff Kirsher #include <linux/moduleparam.h>
92f2148a47SJeff Kirsher #include <linux/kernel.h>
93f2148a47SJeff Kirsher #include <linux/string.h>
94f2148a47SJeff Kirsher #include <linux/timer.h>
95f2148a47SJeff Kirsher #include <linux/errno.h>
96f2148a47SJeff Kirsher #include <linux/ioport.h>
97f2148a47SJeff Kirsher #include <linux/interrupt.h>
98f2148a47SJeff Kirsher #include <linux/pci.h>
992d283862SAlexey Charkov #include <linux/of_device.h>
1002d283862SAlexey Charkov #include <linux/of_irq.h>
1012d283862SAlexey Charkov #include <linux/platform_device.h>
102f2148a47SJeff Kirsher #include <linux/dma-mapping.h>
103f2148a47SJeff Kirsher #include <linux/netdevice.h>
104f2148a47SJeff Kirsher #include <linux/etherdevice.h>
105f2148a47SJeff Kirsher #include <linux/skbuff.h>
106f2148a47SJeff Kirsher #include <linux/init.h>
107f2148a47SJeff Kirsher #include <linux/delay.h>
108f2148a47SJeff Kirsher #include <linux/mii.h>
109f2148a47SJeff Kirsher #include <linux/ethtool.h>
110f2148a47SJeff Kirsher #include <linux/crc32.h>
111f2148a47SJeff Kirsher #include <linux/if_vlan.h>
112f2148a47SJeff Kirsher #include <linux/bitops.h>
113f2148a47SJeff Kirsher #include <linux/workqueue.h>
114f2148a47SJeff Kirsher #include <asm/processor.h>	/* Processor type for cache alignment. */
115f2148a47SJeff Kirsher #include <asm/io.h>
116f2148a47SJeff Kirsher #include <asm/irq.h>
1177c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
118f2148a47SJeff Kirsher #include <linux/dmi.h>
119f2148a47SJeff Kirsher 
120f2148a47SJeff Kirsher /* These identify the driver base version and may not be removed. */
12176e239e1SBill Pemberton static const char version[] =
122f2148a47SJeff Kirsher 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
123f2148a47SJeff Kirsher 
124f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
125f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
126f2148a47SJeff Kirsher MODULE_LICENSE("GPL");
127f2148a47SJeff Kirsher 
128f2148a47SJeff Kirsher module_param(debug, int, 0);
129f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0);
130f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0);
131fc3e0f8aSFrancois Romieu MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
132f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
133f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
134f2148a47SJeff Kirsher 
135f2148a47SJeff Kirsher #define MCAM_SIZE	32
136f2148a47SJeff Kirsher #define VCAM_SIZE	32
137f2148a47SJeff Kirsher 
138f2148a47SJeff Kirsher /*
139f2148a47SJeff Kirsher 		Theory of Operation
140f2148a47SJeff Kirsher 
141f2148a47SJeff Kirsher I. Board Compatibility
142f2148a47SJeff Kirsher 
143f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
144f2148a47SJeff Kirsher controller.
145f2148a47SJeff Kirsher 
146f2148a47SJeff Kirsher II. Board-specific settings
147f2148a47SJeff Kirsher 
148f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot.
149f2148a47SJeff Kirsher 
150f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at
151f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are
152f2148a47SJeff Kirsher correct.
153f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM
154f2148a47SJeff Kirsher must be configured to enable memory ops.
155f2148a47SJeff Kirsher 
156f2148a47SJeff Kirsher III. Driver operation
157f2148a47SJeff Kirsher 
158f2148a47SJeff Kirsher IIIa. Ring buffers
159f2148a47SJeff Kirsher 
160f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists
161f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
162f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
163f2148a47SJeff Kirsher 
164f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure
165f2148a47SJeff Kirsher 
166f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme.
167f2148a47SJeff Kirsher 
168f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so
169f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers.
170f2148a47SJeff Kirsher 
171f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
172f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data
173f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
174f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
175f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
176f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated
177f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx().
178f2148a47SJeff Kirsher 
179f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
180f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
181f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines
182f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of
183f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never
184f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using
185f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is
186f2148a47SJeff Kirsher most useful with small frames.
187f2148a47SJeff Kirsher 
188f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit
189f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't
190f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers
191f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header.
192f2148a47SJeff Kirsher 
193f2148a47SJeff Kirsher IIId. Synchronization
194f2148a47SJeff Kirsher 
195f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One
196f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
197f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
198f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software.
199f2148a47SJeff Kirsher 
200f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the
201f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
202f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by
203f2148a47SJeff Kirsher calling netif_stop_queue.
204f2148a47SJeff Kirsher 
205f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
206f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as
207f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in
208f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped.
209f2148a47SJeff Kirsher 
210f2148a47SJeff Kirsher IV. Notes
211f2148a47SJeff Kirsher 
212f2148a47SJeff Kirsher IVb. References
213f2148a47SJeff Kirsher 
214f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/
215f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html
216f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html
217f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
218f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
219f2148a47SJeff Kirsher 
220f2148a47SJeff Kirsher 
221f2148a47SJeff Kirsher IVc. Errata
222f2148a47SJeff Kirsher 
223f2148a47SJeff Kirsher The VT86C100A manual is not reliable information.
224f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting
225f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit
226f2148a47SJeff Kirsher and unaligned IP headers on receive.
227f2148a47SJeff Kirsher The chip does not pad to minimum transmit length.
228f2148a47SJeff Kirsher 
229f2148a47SJeff Kirsher */
230f2148a47SJeff Kirsher 
231f2148a47SJeff Kirsher 
232f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all
233f2148a47SJeff Kirsher    of the drivers, and will likely be provided by some future kernel.
234f2148a47SJeff Kirsher    Note the matching code -- the first table entry matchs all 56** cards but
235f2148a47SJeff Kirsher    second only the 1234 card.
236f2148a47SJeff Kirsher */
237f2148a47SJeff Kirsher 
238f2148a47SJeff Kirsher enum rhine_revs {
239f2148a47SJeff Kirsher 	VT86C100A	= 0x00,
240f2148a47SJeff Kirsher 	VTunknown0	= 0x20,
241f2148a47SJeff Kirsher 	VT6102		= 0x40,
242f2148a47SJeff Kirsher 	VT8231		= 0x50,	/* Integrated MAC */
243f2148a47SJeff Kirsher 	VT8233		= 0x60,	/* Integrated MAC */
244f2148a47SJeff Kirsher 	VT8235		= 0x74,	/* Integrated MAC */
245f2148a47SJeff Kirsher 	VT8237		= 0x78,	/* Integrated MAC */
246f2148a47SJeff Kirsher 	VTunknown1	= 0x7C,
247f2148a47SJeff Kirsher 	VT6105		= 0x80,
248f2148a47SJeff Kirsher 	VT6105_B0	= 0x83,
249f2148a47SJeff Kirsher 	VT6105L		= 0x8A,
250f2148a47SJeff Kirsher 	VT6107		= 0x8C,
251f2148a47SJeff Kirsher 	VTunknown2	= 0x8E,
252f2148a47SJeff Kirsher 	VT6105M		= 0x90,	/* Management adapter */
253f2148a47SJeff Kirsher };
254f2148a47SJeff Kirsher 
255f2148a47SJeff Kirsher enum rhine_quirks {
256f2148a47SJeff Kirsher 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
257f2148a47SJeff Kirsher 	rqForceReset	= 0x0002,
258f2148a47SJeff Kirsher 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
259f2148a47SJeff Kirsher 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
260f2148a47SJeff Kirsher 	rqRhineI	= 0x0100,	/* See comment below */
261ca8b6e04SAlexey Charkov 	rqIntPHY	= 0x0200,	/* Integrated PHY */
262ca8b6e04SAlexey Charkov 	rqMgmt		= 0x0400,	/* Management adapter */
2635b579e21SAlexey Charkov 	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
2645b579e21SAlexey Charkov 					 * switched from PIO mode to MMIO
2655b579e21SAlexey Charkov 					 * (only applies to PCI)
2665b579e21SAlexey Charkov 					 */
267f2148a47SJeff Kirsher };
268f2148a47SJeff Kirsher /*
269f2148a47SJeff Kirsher  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
270f2148a47SJeff Kirsher  * MMIO as well as for the collision counter and the Tx FIFO underflow
271f2148a47SJeff Kirsher  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
272f2148a47SJeff Kirsher  */
273f2148a47SJeff Kirsher 
274f2148a47SJeff Kirsher /* Beware of PCI posted writes */
275f2148a47SJeff Kirsher #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
276f2148a47SJeff Kirsher 
2779baa3c34SBenoit Taine static const struct pci_device_id rhine_pci_tbl[] = {
278f2148a47SJeff Kirsher 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
279f2148a47SJeff Kirsher 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
280f2148a47SJeff Kirsher 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
281f2148a47SJeff Kirsher 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
282f2148a47SJeff Kirsher 	{ }	/* terminate list */
283f2148a47SJeff Kirsher };
284f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
285f2148a47SJeff Kirsher 
2862d283862SAlexey Charkov /* OpenFirmware identifiers for platform-bus devices
287ca8b6e04SAlexey Charkov  * The .data field is currently only used to store quirks
2882d283862SAlexey Charkov  */
289ca8b6e04SAlexey Charkov static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
290d2b75a3fSFabian Frederick static const struct of_device_id rhine_of_tbl[] = {
291ca8b6e04SAlexey Charkov 	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
2922d283862SAlexey Charkov 	{ }	/* terminate list */
2932d283862SAlexey Charkov };
2942d283862SAlexey Charkov MODULE_DEVICE_TABLE(of, rhine_of_tbl);
295f2148a47SJeff Kirsher 
296f2148a47SJeff Kirsher /* Offsets to the device registers. */
297f2148a47SJeff Kirsher enum register_offsets {
298f2148a47SJeff Kirsher 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
299f2148a47SJeff Kirsher 	ChipCmd1=0x09, TQWake=0x0A,
300f2148a47SJeff Kirsher 	IntrStatus=0x0C, IntrEnable=0x0E,
301f2148a47SJeff Kirsher 	MulticastFilter0=0x10, MulticastFilter1=0x14,
302f2148a47SJeff Kirsher 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
303f2148a47SJeff Kirsher 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
304f2148a47SJeff Kirsher 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
305f2148a47SJeff Kirsher 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
306f2148a47SJeff Kirsher 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
307f2148a47SJeff Kirsher 	StickyHW=0x83, IntrStatus2=0x84,
308f2148a47SJeff Kirsher 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
309f2148a47SJeff Kirsher 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
310f2148a47SJeff Kirsher 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
311f2148a47SJeff Kirsher 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
312f2148a47SJeff Kirsher };
313f2148a47SJeff Kirsher 
314f2148a47SJeff Kirsher /* Bits in ConfigD */
315f2148a47SJeff Kirsher enum backoff_bits {
316f2148a47SJeff Kirsher 	BackOptional=0x01, BackModify=0x02,
317f2148a47SJeff Kirsher 	BackCaptureEffect=0x04, BackRandom=0x08
318f2148a47SJeff Kirsher };
319f2148a47SJeff Kirsher 
320f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */
321f2148a47SJeff Kirsher enum tcr_bits {
322f2148a47SJeff Kirsher 	TCR_PQEN=0x01,
323f2148a47SJeff Kirsher 	TCR_LB0=0x02,		/* loopback[0] */
324f2148a47SJeff Kirsher 	TCR_LB1=0x04,		/* loopback[1] */
325f2148a47SJeff Kirsher 	TCR_OFSET=0x08,
326f2148a47SJeff Kirsher 	TCR_RTGOPT=0x10,
327f2148a47SJeff Kirsher 	TCR_RTFT0=0x20,
328f2148a47SJeff Kirsher 	TCR_RTFT1=0x40,
329f2148a47SJeff Kirsher 	TCR_RTSF=0x80,
330f2148a47SJeff Kirsher };
331f2148a47SJeff Kirsher 
332f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */
333f2148a47SJeff Kirsher enum camcon_bits {
334f2148a47SJeff Kirsher 	CAMC_CAMEN=0x01,
335f2148a47SJeff Kirsher 	CAMC_VCAMSL=0x02,
336f2148a47SJeff Kirsher 	CAMC_CAMWR=0x04,
337f2148a47SJeff Kirsher 	CAMC_CAMRD=0x08,
338f2148a47SJeff Kirsher };
339f2148a47SJeff Kirsher 
340f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */
341f2148a47SJeff Kirsher enum bcr1_bits {
342f2148a47SJeff Kirsher 	BCR1_POT0=0x01,
343f2148a47SJeff Kirsher 	BCR1_POT1=0x02,
344f2148a47SJeff Kirsher 	BCR1_POT2=0x04,
345f2148a47SJeff Kirsher 	BCR1_CTFT0=0x08,
346f2148a47SJeff Kirsher 	BCR1_CTFT1=0x10,
347f2148a47SJeff Kirsher 	BCR1_CTSF=0x20,
348f2148a47SJeff Kirsher 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
349f2148a47SJeff Kirsher 	BCR1_VIDFR=0x80,	/* for VT6105 */
350f2148a47SJeff Kirsher 	BCR1_MED0=0x40,		/* for VT6102 */
351f2148a47SJeff Kirsher 	BCR1_MED1=0x80,		/* for VT6102 */
352f2148a47SJeff Kirsher };
353f2148a47SJeff Kirsher 
354f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */
355f2148a47SJeff Kirsher static const int mmio_verify_registers[] = {
356f2148a47SJeff Kirsher 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
357f2148a47SJeff Kirsher 	0
358f2148a47SJeff Kirsher };
359f2148a47SJeff Kirsher 
360f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */
361f2148a47SJeff Kirsher enum intr_status_bits {
3627ab87ff4SFrancois Romieu 	IntrRxDone	= 0x0001,
3637ab87ff4SFrancois Romieu 	IntrTxDone	= 0x0002,
3647ab87ff4SFrancois Romieu 	IntrRxErr	= 0x0004,
3657ab87ff4SFrancois Romieu 	IntrTxError	= 0x0008,
3667ab87ff4SFrancois Romieu 	IntrRxEmpty	= 0x0020,
367f2148a47SJeff Kirsher 	IntrPCIErr	= 0x0040,
3687ab87ff4SFrancois Romieu 	IntrStatsMax	= 0x0080,
3697ab87ff4SFrancois Romieu 	IntrRxEarly	= 0x0100,
3707ab87ff4SFrancois Romieu 	IntrTxUnderrun	= 0x0210,
3717ab87ff4SFrancois Romieu 	IntrRxOverflow	= 0x0400,
3727ab87ff4SFrancois Romieu 	IntrRxDropped	= 0x0800,
3737ab87ff4SFrancois Romieu 	IntrRxNoBuf	= 0x1000,
3747ab87ff4SFrancois Romieu 	IntrTxAborted	= 0x2000,
3757ab87ff4SFrancois Romieu 	IntrLinkChange	= 0x4000,
376f2148a47SJeff Kirsher 	IntrRxWakeUp	= 0x8000,
377f2148a47SJeff Kirsher 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
3787ab87ff4SFrancois Romieu 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
3797ab87ff4SFrancois Romieu 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
3807ab87ff4SFrancois Romieu 				  IntrTxUnderrun,
381f2148a47SJeff Kirsher };
382f2148a47SJeff Kirsher 
383f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
384f2148a47SJeff Kirsher enum wol_bits {
385f2148a47SJeff Kirsher 	WOLucast	= 0x10,
386f2148a47SJeff Kirsher 	WOLmagic	= 0x20,
387f2148a47SJeff Kirsher 	WOLbmcast	= 0x30,
388f2148a47SJeff Kirsher 	WOLlnkon	= 0x40,
389f2148a47SJeff Kirsher 	WOLlnkoff	= 0x80,
390f2148a47SJeff Kirsher };
391f2148a47SJeff Kirsher 
392f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */
393f2148a47SJeff Kirsher struct rx_desc {
394f2148a47SJeff Kirsher 	__le32 rx_status;
395f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Buffer/frame length */
396f2148a47SJeff Kirsher 	__le32 addr;
397f2148a47SJeff Kirsher 	__le32 next_desc;
398f2148a47SJeff Kirsher };
399f2148a47SJeff Kirsher struct tx_desc {
400f2148a47SJeff Kirsher 	__le32 tx_status;
401f2148a47SJeff Kirsher 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
402f2148a47SJeff Kirsher 	__le32 addr;
403f2148a47SJeff Kirsher 	__le32 next_desc;
404f2148a47SJeff Kirsher };
405f2148a47SJeff Kirsher 
406f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
407f2148a47SJeff Kirsher #define TXDESC		0x00e08000
408f2148a47SJeff Kirsher 
409f2148a47SJeff Kirsher enum rx_status_bits {
410f2148a47SJeff Kirsher 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
411f2148a47SJeff Kirsher };
412f2148a47SJeff Kirsher 
413f2148a47SJeff Kirsher /* Bits in *_desc.*_status */
414f2148a47SJeff Kirsher enum desc_status_bits {
415f2148a47SJeff Kirsher 	DescOwn=0x80000000
416f2148a47SJeff Kirsher };
417f2148a47SJeff Kirsher 
418f2148a47SJeff Kirsher /* Bits in *_desc.*_length */
419f2148a47SJeff Kirsher enum desc_length_bits {
420f2148a47SJeff Kirsher 	DescTag=0x00010000
421f2148a47SJeff Kirsher };
422f2148a47SJeff Kirsher 
423f2148a47SJeff Kirsher /* Bits in ChipCmd. */
424f2148a47SJeff Kirsher enum chip_cmd_bits {
425f2148a47SJeff Kirsher 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
426f2148a47SJeff Kirsher 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
427f2148a47SJeff Kirsher 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
428f2148a47SJeff Kirsher 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
429f2148a47SJeff Kirsher };
430f2148a47SJeff Kirsher 
431f7b5d1b9SJamie Gloudon struct rhine_stats {
432f7b5d1b9SJamie Gloudon 	u64		packets;
433f7b5d1b9SJamie Gloudon 	u64		bytes;
434f7b5d1b9SJamie Gloudon 	struct u64_stats_sync syncp;
435f7b5d1b9SJamie Gloudon };
436f7b5d1b9SJamie Gloudon 
437f2148a47SJeff Kirsher struct rhine_private {
438f2148a47SJeff Kirsher 	/* Bit mask for configured VLAN ids */
439f2148a47SJeff Kirsher 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
440f2148a47SJeff Kirsher 
441f2148a47SJeff Kirsher 	/* Descriptor rings */
442f2148a47SJeff Kirsher 	struct rx_desc *rx_ring;
443f2148a47SJeff Kirsher 	struct tx_desc *tx_ring;
444f2148a47SJeff Kirsher 	dma_addr_t rx_ring_dma;
445f2148a47SJeff Kirsher 	dma_addr_t tx_ring_dma;
446f2148a47SJeff Kirsher 
447f2148a47SJeff Kirsher 	/* The addresses of receive-in-place skbuffs. */
448f2148a47SJeff Kirsher 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
449f2148a47SJeff Kirsher 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
450f2148a47SJeff Kirsher 
451f2148a47SJeff Kirsher 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
452f2148a47SJeff Kirsher 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
453f2148a47SJeff Kirsher 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
454f2148a47SJeff Kirsher 
455f2148a47SJeff Kirsher 	/* Tx bounce buffers (Rhine-I only) */
456f2148a47SJeff Kirsher 	unsigned char *tx_buf[TX_RING_SIZE];
457f2148a47SJeff Kirsher 	unsigned char *tx_bufs;
458f2148a47SJeff Kirsher 	dma_addr_t tx_bufs_dma;
459f2148a47SJeff Kirsher 
460f7630d18SAlexey Charkov 	int irq;
461f2148a47SJeff Kirsher 	long pioaddr;
462f2148a47SJeff Kirsher 	struct net_device *dev;
463f2148a47SJeff Kirsher 	struct napi_struct napi;
464f2148a47SJeff Kirsher 	spinlock_t lock;
4657ab87ff4SFrancois Romieu 	struct mutex task_lock;
4667ab87ff4SFrancois Romieu 	bool task_enable;
4677ab87ff4SFrancois Romieu 	struct work_struct slow_event_task;
468f2148a47SJeff Kirsher 	struct work_struct reset_task;
469f2148a47SJeff Kirsher 
470fc3e0f8aSFrancois Romieu 	u32 msg_enable;
471fc3e0f8aSFrancois Romieu 
472f2148a47SJeff Kirsher 	/* Frequently used values: keep some adjacent for cache effect. */
473f2148a47SJeff Kirsher 	u32 quirks;
4748709bb2cSfrançois romieu 	unsigned int cur_rx;
475f2148a47SJeff Kirsher 	unsigned int cur_tx, dirty_tx;
476f2148a47SJeff Kirsher 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
477f7b5d1b9SJamie Gloudon 	struct rhine_stats rx_stats;
478f7b5d1b9SJamie Gloudon 	struct rhine_stats tx_stats;
479f2148a47SJeff Kirsher 	u8 wolopts;
480f2148a47SJeff Kirsher 
481f2148a47SJeff Kirsher 	u8 tx_thresh, rx_thresh;
482f2148a47SJeff Kirsher 
483f2148a47SJeff Kirsher 	struct mii_if_info mii_if;
484f2148a47SJeff Kirsher 	void __iomem *base;
485f2148a47SJeff Kirsher };
486f2148a47SJeff Kirsher 
487f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
488f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
489f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
490f2148a47SJeff Kirsher 
491f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
492f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
493f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
494f2148a47SJeff Kirsher 
495f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
496f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
497f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
498f2148a47SJeff Kirsher 
499f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
500f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
501f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
502f2148a47SJeff Kirsher 
503f2148a47SJeff Kirsher 
504f2148a47SJeff Kirsher static int  mdio_read(struct net_device *dev, int phy_id, int location);
505f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
506f2148a47SJeff Kirsher static int  rhine_open(struct net_device *dev);
507f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work);
5087ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work);
509*0290bd29SMichael S. Tsirkin static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
510f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
511f2148a47SJeff Kirsher 				  struct net_device *dev);
512f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
513f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev);
514f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit);
515f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev);
516bc1f4470Sstephen hemminger static void rhine_get_stats64(struct net_device *dev,
517f7b5d1b9SJamie Gloudon 			      struct rtnl_link_stats64 *stats);
518f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
519f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops;
520f2148a47SJeff Kirsher static int  rhine_close(struct net_device *dev);
52180d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev,
52280d5c368SPatrick McHardy 				 __be16 proto, u16 vid);
52380d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev,
52480d5c368SPatrick McHardy 				  __be16 proto, u16 vid);
5257ab87ff4SFrancois Romieu static void rhine_restart_tx(struct net_device *dev);
526f2148a47SJeff Kirsher 
5273f8c91a7SAndreas Mohr static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
528a384a33bSFrancois Romieu {
529a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
530a384a33bSFrancois Romieu 	int i;
531a384a33bSFrancois Romieu 
532a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
5333f8c91a7SAndreas Mohr 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
5343f8c91a7SAndreas Mohr 
5353f8c91a7SAndreas Mohr 		if (low ^ has_mask_bits)
536a384a33bSFrancois Romieu 			break;
537a384a33bSFrancois Romieu 		udelay(10);
538a384a33bSFrancois Romieu 	}
539a384a33bSFrancois Romieu 	if (i > 64) {
540fc3e0f8aSFrancois Romieu 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
5413f8c91a7SAndreas Mohr 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
542a384a33bSFrancois Romieu 	}
543a384a33bSFrancois Romieu }
544a384a33bSFrancois Romieu 
545a384a33bSFrancois Romieu static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
546a384a33bSFrancois Romieu {
5473f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, false);
548a384a33bSFrancois Romieu }
549a384a33bSFrancois Romieu 
550a384a33bSFrancois Romieu static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
551a384a33bSFrancois Romieu {
5523f8c91a7SAndreas Mohr 	rhine_wait_bit(rp, reg, mask, true);
553a384a33bSFrancois Romieu }
554f2148a47SJeff Kirsher 
555a20a28bcSFrancois Romieu static u32 rhine_get_events(struct rhine_private *rp)
556f2148a47SJeff Kirsher {
557f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
558f2148a47SJeff Kirsher 	u32 intr_status;
559f2148a47SJeff Kirsher 
560f2148a47SJeff Kirsher 	intr_status = ioread16(ioaddr + IntrStatus);
561f2148a47SJeff Kirsher 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
562f2148a47SJeff Kirsher 	if (rp->quirks & rqStatusWBRace)
563f2148a47SJeff Kirsher 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
564f2148a47SJeff Kirsher 	return intr_status;
565f2148a47SJeff Kirsher }
566f2148a47SJeff Kirsher 
567a20a28bcSFrancois Romieu static void rhine_ack_events(struct rhine_private *rp, u32 mask)
568a20a28bcSFrancois Romieu {
569a20a28bcSFrancois Romieu 	void __iomem *ioaddr = rp->base;
570a20a28bcSFrancois Romieu 
571a20a28bcSFrancois Romieu 	if (rp->quirks & rqStatusWBRace)
572a20a28bcSFrancois Romieu 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
573a20a28bcSFrancois Romieu 	iowrite16(mask, ioaddr + IntrStatus);
574a20a28bcSFrancois Romieu }
575a20a28bcSFrancois Romieu 
576f2148a47SJeff Kirsher /*
577f2148a47SJeff Kirsher  * Get power related registers into sane state.
578f2148a47SJeff Kirsher  * Notify user about past WOL event.
579f2148a47SJeff Kirsher  */
580f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev)
581f2148a47SJeff Kirsher {
582f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
583f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
584f2148a47SJeff Kirsher 	u16 wolstat;
585f2148a47SJeff Kirsher 
586f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL) {
587f2148a47SJeff Kirsher 		/* Make sure chip is in power state D0 */
588f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
589f2148a47SJeff Kirsher 
590f2148a47SJeff Kirsher 		/* Disable "force PME-enable" */
591f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + WOLcgClr);
592f2148a47SJeff Kirsher 
593f2148a47SJeff Kirsher 		/* Clear power-event config bits (WOL) */
594f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + WOLcrClr);
595f2148a47SJeff Kirsher 		/* More recent cards can manage two additional patterns */
596f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
597f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + WOLcrClr1);
598f2148a47SJeff Kirsher 
599f2148a47SJeff Kirsher 		/* Save power-event status bits */
600f2148a47SJeff Kirsher 		wolstat = ioread8(ioaddr + PwrcsrSet);
601f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
602f2148a47SJeff Kirsher 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
603f2148a47SJeff Kirsher 
604f2148a47SJeff Kirsher 		/* Clear power-event status bits */
605f2148a47SJeff Kirsher 		iowrite8(0xFF, ioaddr + PwrcsrClr);
606f2148a47SJeff Kirsher 		if (rp->quirks & rq6patterns)
607f2148a47SJeff Kirsher 			iowrite8(0x03, ioaddr + PwrcsrClr1);
608f2148a47SJeff Kirsher 
609f2148a47SJeff Kirsher 		if (wolstat) {
610f2148a47SJeff Kirsher 			char *reason;
611f2148a47SJeff Kirsher 			switch (wolstat) {
612f2148a47SJeff Kirsher 			case WOLmagic:
613f2148a47SJeff Kirsher 				reason = "Magic packet";
614f2148a47SJeff Kirsher 				break;
615f2148a47SJeff Kirsher 			case WOLlnkon:
616f2148a47SJeff Kirsher 				reason = "Link went up";
617f2148a47SJeff Kirsher 				break;
618f2148a47SJeff Kirsher 			case WOLlnkoff:
619f2148a47SJeff Kirsher 				reason = "Link went down";
620f2148a47SJeff Kirsher 				break;
621f2148a47SJeff Kirsher 			case WOLucast:
622f2148a47SJeff Kirsher 				reason = "Unicast packet";
623f2148a47SJeff Kirsher 				break;
624f2148a47SJeff Kirsher 			case WOLbmcast:
625f2148a47SJeff Kirsher 				reason = "Multicast/broadcast packet";
626f2148a47SJeff Kirsher 				break;
627f2148a47SJeff Kirsher 			default:
628f2148a47SJeff Kirsher 				reason = "Unknown";
629f2148a47SJeff Kirsher 			}
630f2148a47SJeff Kirsher 			netdev_info(dev, "Woke system up. Reason: %s\n",
631f2148a47SJeff Kirsher 				    reason);
632f2148a47SJeff Kirsher 		}
633f2148a47SJeff Kirsher 	}
634f2148a47SJeff Kirsher }
635f2148a47SJeff Kirsher 
636f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev)
637f2148a47SJeff Kirsher {
638f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
639f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
640fc3e0f8aSFrancois Romieu 	u8 cmd1;
641f2148a47SJeff Kirsher 
642f2148a47SJeff Kirsher 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
643f2148a47SJeff Kirsher 	IOSYNC;
644f2148a47SJeff Kirsher 
645f2148a47SJeff Kirsher 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
646f2148a47SJeff Kirsher 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
647f2148a47SJeff Kirsher 
648f2148a47SJeff Kirsher 		/* Force reset */
649f2148a47SJeff Kirsher 		if (rp->quirks & rqForceReset)
650f2148a47SJeff Kirsher 			iowrite8(0x40, ioaddr + MiscCmd);
651f2148a47SJeff Kirsher 
652f2148a47SJeff Kirsher 		/* Reset can take somewhat longer (rare) */
653a384a33bSFrancois Romieu 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
654f2148a47SJeff Kirsher 	}
655f2148a47SJeff Kirsher 
656fc3e0f8aSFrancois Romieu 	cmd1 = ioread8(ioaddr + ChipCmd1);
657fc3e0f8aSFrancois Romieu 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
658f2148a47SJeff Kirsher 		   "failed" : "succeeded");
659f2148a47SJeff Kirsher }
660f2148a47SJeff Kirsher 
661f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks)
662f2148a47SJeff Kirsher {
663f2148a47SJeff Kirsher 	int n;
6645b579e21SAlexey Charkov 
6655b579e21SAlexey Charkov 	if (quirks & rqNeedEnMMIO) {
666f2148a47SJeff Kirsher 		if (quirks & rqRhineI) {
6675b579e21SAlexey Charkov 			/* More recent docs say that this bit is reserved */
668f2148a47SJeff Kirsher 			n = inb(pioaddr + ConfigA) | 0x20;
669f2148a47SJeff Kirsher 			outb(n, pioaddr + ConfigA);
670f2148a47SJeff Kirsher 		} else {
671f2148a47SJeff Kirsher 			n = inb(pioaddr + ConfigD) | 0x80;
672f2148a47SJeff Kirsher 			outb(n, pioaddr + ConfigD);
673f2148a47SJeff Kirsher 		}
674f2148a47SJeff Kirsher 	}
6755b579e21SAlexey Charkov }
6765b579e21SAlexey Charkov 
6775b579e21SAlexey Charkov static inline int verify_mmio(struct device *hwdev,
6785b579e21SAlexey Charkov 			      long pioaddr,
6795b579e21SAlexey Charkov 			      void __iomem *ioaddr,
6805b579e21SAlexey Charkov 			      u32 quirks)
6815b579e21SAlexey Charkov {
6825b579e21SAlexey Charkov 	if (quirks & rqNeedEnMMIO) {
6835b579e21SAlexey Charkov 		int i = 0;
6845b579e21SAlexey Charkov 
6855b579e21SAlexey Charkov 		/* Check that selected MMIO registers match the PIO ones */
6865b579e21SAlexey Charkov 		while (mmio_verify_registers[i]) {
6875b579e21SAlexey Charkov 			int reg = mmio_verify_registers[i++];
6885b579e21SAlexey Charkov 			unsigned char a = inb(pioaddr+reg);
6895b579e21SAlexey Charkov 			unsigned char b = readb(ioaddr+reg);
6905b579e21SAlexey Charkov 
6915b579e21SAlexey Charkov 			if (a != b) {
6925b579e21SAlexey Charkov 				dev_err(hwdev,
6935b579e21SAlexey Charkov 					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
6945b579e21SAlexey Charkov 					reg, a, b);
6955b579e21SAlexey Charkov 				return -EIO;
6965b579e21SAlexey Charkov 			}
6975b579e21SAlexey Charkov 		}
6985b579e21SAlexey Charkov 	}
6995b579e21SAlexey Charkov 	return 0;
7005b579e21SAlexey Charkov }
701f2148a47SJeff Kirsher 
702f2148a47SJeff Kirsher /*
703f2148a47SJeff Kirsher  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
704f2148a47SJeff Kirsher  * (plus 0x6C for Rhine-I/II)
705f2148a47SJeff Kirsher  */
70676e239e1SBill Pemberton static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
707f2148a47SJeff Kirsher {
708f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
709f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
710a384a33bSFrancois Romieu 	int i;
711f2148a47SJeff Kirsher 
712f2148a47SJeff Kirsher 	outb(0x20, pioaddr + MACRegEEcsr);
713a384a33bSFrancois Romieu 	for (i = 0; i < 1024; i++) {
714a384a33bSFrancois Romieu 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
715a384a33bSFrancois Romieu 			break;
716a384a33bSFrancois Romieu 	}
717a384a33bSFrancois Romieu 	if (i > 512)
718a384a33bSFrancois Romieu 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
719f2148a47SJeff Kirsher 
720f2148a47SJeff Kirsher 	/*
721f2148a47SJeff Kirsher 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
722f2148a47SJeff Kirsher 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
723f2148a47SJeff Kirsher 	 * it is not known if that still works with the "win98-reboot" problem.
724f2148a47SJeff Kirsher 	 */
725f2148a47SJeff Kirsher 	enable_mmio(pioaddr, rp->quirks);
726f2148a47SJeff Kirsher 
727f2148a47SJeff Kirsher 	/* Turn off EEPROM-controlled wake-up (magic packet) */
728f2148a47SJeff Kirsher 	if (rp->quirks & rqWOL)
729f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
730f2148a47SJeff Kirsher 
731f2148a47SJeff Kirsher }
732f2148a47SJeff Kirsher 
733f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
734f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev)
735f2148a47SJeff Kirsher {
73605d334ecSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
737f7630d18SAlexey Charkov 	const int irq = rp->irq;
73805d334ecSFrancois Romieu 
73905d334ecSFrancois Romieu 	disable_irq(irq);
74005d334ecSFrancois Romieu 	rhine_interrupt(irq, dev);
74105d334ecSFrancois Romieu 	enable_irq(irq);
742f2148a47SJeff Kirsher }
743f2148a47SJeff Kirsher #endif
744f2148a47SJeff Kirsher 
745269f3114SFrancois Romieu static void rhine_kick_tx_threshold(struct rhine_private *rp)
746269f3114SFrancois Romieu {
747269f3114SFrancois Romieu 	if (rp->tx_thresh < 0xe0) {
748269f3114SFrancois Romieu 		void __iomem *ioaddr = rp->base;
749269f3114SFrancois Romieu 
750269f3114SFrancois Romieu 		rp->tx_thresh += 0x20;
751269f3114SFrancois Romieu 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
752269f3114SFrancois Romieu 	}
753269f3114SFrancois Romieu }
754269f3114SFrancois Romieu 
7557ab87ff4SFrancois Romieu static void rhine_tx_err(struct rhine_private *rp, u32 status)
7567ab87ff4SFrancois Romieu {
7577ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
7587ab87ff4SFrancois Romieu 
7597ab87ff4SFrancois Romieu 	if (status & IntrTxAborted) {
760fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev,
761fc3e0f8aSFrancois Romieu 			   "Abort %08x, frame dropped\n", status);
7627ab87ff4SFrancois Romieu 	}
7637ab87ff4SFrancois Romieu 
7647ab87ff4SFrancois Romieu 	if (status & IntrTxUnderrun) {
7657ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
766fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
767fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7687ab87ff4SFrancois Romieu 	}
7697ab87ff4SFrancois Romieu 
770fc3e0f8aSFrancois Romieu 	if (status & IntrTxDescRace)
771fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
7727ab87ff4SFrancois Romieu 
7737ab87ff4SFrancois Romieu 	if ((status & IntrTxError) &&
7747ab87ff4SFrancois Romieu 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
7757ab87ff4SFrancois Romieu 		rhine_kick_tx_threshold(rp);
776fc3e0f8aSFrancois Romieu 		netif_info(rp, tx_err, dev, "Unspecified error. "
777fc3e0f8aSFrancois Romieu 			   "Tx threshold now %02x\n", rp->tx_thresh);
7787ab87ff4SFrancois Romieu 	}
7797ab87ff4SFrancois Romieu 
7807ab87ff4SFrancois Romieu 	rhine_restart_tx(dev);
7817ab87ff4SFrancois Romieu }
7827ab87ff4SFrancois Romieu 
7837ab87ff4SFrancois Romieu static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
7847ab87ff4SFrancois Romieu {
7857ab87ff4SFrancois Romieu 	void __iomem *ioaddr = rp->base;
7867ab87ff4SFrancois Romieu 	struct net_device_stats *stats = &rp->dev->stats;
7877ab87ff4SFrancois Romieu 
7887ab87ff4SFrancois Romieu 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
7897ab87ff4SFrancois Romieu 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
7907ab87ff4SFrancois Romieu 
7917ab87ff4SFrancois Romieu 	/*
7927ab87ff4SFrancois Romieu 	 * Clears the "tally counters" for CRC errors and missed frames(?).
7937ab87ff4SFrancois Romieu 	 * It has been reported that some chips need a write of 0 to clear
7947ab87ff4SFrancois Romieu 	 * these, for others the counters are set to 1 when written to and
7957ab87ff4SFrancois Romieu 	 * instead cleared when read. So we clear them both ways ...
7967ab87ff4SFrancois Romieu 	 */
7977ab87ff4SFrancois Romieu 	iowrite32(0, ioaddr + RxMissed);
7987ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxCRCErrs);
7997ab87ff4SFrancois Romieu 	ioread16(ioaddr + RxMissed);
8007ab87ff4SFrancois Romieu }
8017ab87ff4SFrancois Romieu 
8027ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
8037ab87ff4SFrancois Romieu 				 IntrRxErr | \
8047ab87ff4SFrancois Romieu 				 IntrRxEmpty | \
8057ab87ff4SFrancois Romieu 				 IntrRxOverflow	| \
8067ab87ff4SFrancois Romieu 				 IntrRxDropped | \
8077ab87ff4SFrancois Romieu 				 IntrRxNoBuf | \
8087ab87ff4SFrancois Romieu 				 IntrRxWakeUp)
8097ab87ff4SFrancois Romieu 
8107ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
8117ab87ff4SFrancois Romieu 				 IntrTxAborted | \
8127ab87ff4SFrancois Romieu 				 IntrTxUnderrun | \
8137ab87ff4SFrancois Romieu 				 IntrTxDescRace)
8147ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
8157ab87ff4SFrancois Romieu 
8167ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
8177ab87ff4SFrancois Romieu 				 RHINE_EVENT_NAPI_TX | \
8187ab87ff4SFrancois Romieu 				 IntrStatsMax)
8197ab87ff4SFrancois Romieu #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
8207ab87ff4SFrancois Romieu #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
8217ab87ff4SFrancois Romieu 
822f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget)
823f2148a47SJeff Kirsher {
824f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
825f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
826f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
8277ab87ff4SFrancois Romieu 	u16 enable_mask = RHINE_EVENT & 0xffff;
8287ab87ff4SFrancois Romieu 	int work_done = 0;
8297ab87ff4SFrancois Romieu 	u32 status;
830f2148a47SJeff Kirsher 
8317ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
8327ab87ff4SFrancois Romieu 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
8337ab87ff4SFrancois Romieu 
8347ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_RX)
8357ab87ff4SFrancois Romieu 		work_done += rhine_rx(dev, budget);
8367ab87ff4SFrancois Romieu 
8377ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_NAPI_TX) {
8387ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
8397ab87ff4SFrancois Romieu 			/* Avoid scavenging before Tx engine turned off */
840a384a33bSFrancois Romieu 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
841fc3e0f8aSFrancois Romieu 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
842fc3e0f8aSFrancois Romieu 				netif_warn(rp, tx_err, dev, "Tx still on\n");
8437ab87ff4SFrancois Romieu 		}
844fc3e0f8aSFrancois Romieu 
8457ab87ff4SFrancois Romieu 		rhine_tx(dev);
8467ab87ff4SFrancois Romieu 
8477ab87ff4SFrancois Romieu 		if (status & RHINE_EVENT_NAPI_TX_ERR)
8487ab87ff4SFrancois Romieu 			rhine_tx_err(rp, status);
8497ab87ff4SFrancois Romieu 	}
8507ab87ff4SFrancois Romieu 
8517ab87ff4SFrancois Romieu 	if (status & IntrStatsMax) {
8527ab87ff4SFrancois Romieu 		spin_lock(&rp->lock);
8537ab87ff4SFrancois Romieu 		rhine_update_rx_crc_and_missed_errord(rp);
8547ab87ff4SFrancois Romieu 		spin_unlock(&rp->lock);
8557ab87ff4SFrancois Romieu 	}
8567ab87ff4SFrancois Romieu 
8577ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT_SLOW) {
8587ab87ff4SFrancois Romieu 		enable_mask &= ~RHINE_EVENT_SLOW;
8597ab87ff4SFrancois Romieu 		schedule_work(&rp->slow_event_task);
8607ab87ff4SFrancois Romieu 	}
861f2148a47SJeff Kirsher 
862f2148a47SJeff Kirsher 	if (work_done < budget) {
8636ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
8647ab87ff4SFrancois Romieu 		iowrite16(enable_mask, ioaddr + IntrEnable);
865f2148a47SJeff Kirsher 	}
866f2148a47SJeff Kirsher 	return work_done;
867f2148a47SJeff Kirsher }
868f2148a47SJeff Kirsher 
86976e239e1SBill Pemberton static void rhine_hw_init(struct net_device *dev, long pioaddr)
870f2148a47SJeff Kirsher {
871f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
872f2148a47SJeff Kirsher 
873f2148a47SJeff Kirsher 	/* Reset the chip to erase previous misconfiguration. */
874f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
875f2148a47SJeff Kirsher 
876f2148a47SJeff Kirsher 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
877f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
878f2148a47SJeff Kirsher 		msleep(5);
879f2148a47SJeff Kirsher 
880f2148a47SJeff Kirsher 	/* Reload EEPROM controlled bytes cleared by soft reset */
8812d283862SAlexey Charkov 	if (dev_is_pci(dev->dev.parent))
882f2148a47SJeff Kirsher 		rhine_reload_eeprom(pioaddr, dev);
883f2148a47SJeff Kirsher }
884f2148a47SJeff Kirsher 
885f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = {
886f2148a47SJeff Kirsher 	.ndo_open		 = rhine_open,
887f2148a47SJeff Kirsher 	.ndo_stop		 = rhine_close,
888f2148a47SJeff Kirsher 	.ndo_start_xmit		 = rhine_start_tx,
889f7b5d1b9SJamie Gloudon 	.ndo_get_stats64	 = rhine_get_stats64,
890afc4b13dSJiri Pirko 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
891f2148a47SJeff Kirsher 	.ndo_validate_addr	 = eth_validate_addr,
892f2148a47SJeff Kirsher 	.ndo_set_mac_address 	 = eth_mac_addr,
893f2148a47SJeff Kirsher 	.ndo_do_ioctl		 = netdev_ioctl,
894f2148a47SJeff Kirsher 	.ndo_tx_timeout 	 = rhine_tx_timeout,
895f2148a47SJeff Kirsher 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
896f2148a47SJeff Kirsher 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
897f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
898f2148a47SJeff Kirsher 	.ndo_poll_controller	 = rhine_poll,
899f2148a47SJeff Kirsher #endif
900f2148a47SJeff Kirsher };
901f2148a47SJeff Kirsher 
902ca8b6e04SAlexey Charkov static int rhine_init_one_common(struct device *hwdev, u32 quirks,
9032d283862SAlexey Charkov 				 long pioaddr, void __iomem *ioaddr, int irq)
904f2148a47SJeff Kirsher {
905f2148a47SJeff Kirsher 	struct net_device *dev;
906f2148a47SJeff Kirsher 	struct rhine_private *rp;
9072d283862SAlexey Charkov 	int i, rc, phy_id;
908f2148a47SJeff Kirsher 	const char *name;
909f2148a47SJeff Kirsher 
910f2148a47SJeff Kirsher 	/* this should always be supported */
911f7630d18SAlexey Charkov 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
912f2148a47SJeff Kirsher 	if (rc) {
913f7630d18SAlexey Charkov 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
9142d283862SAlexey Charkov 		goto err_out;
915f2148a47SJeff Kirsher 	}
916f2148a47SJeff Kirsher 
917f2148a47SJeff Kirsher 	dev = alloc_etherdev(sizeof(struct rhine_private));
918f2148a47SJeff Kirsher 	if (!dev) {
919f2148a47SJeff Kirsher 		rc = -ENOMEM;
9202d283862SAlexey Charkov 		goto err_out;
921f2148a47SJeff Kirsher 	}
922f7630d18SAlexey Charkov 	SET_NETDEV_DEV(dev, hwdev);
923f2148a47SJeff Kirsher 
924f2148a47SJeff Kirsher 	rp = netdev_priv(dev);
925f2148a47SJeff Kirsher 	rp->dev = dev;
926ca8b6e04SAlexey Charkov 	rp->quirks = quirks;
927f2148a47SJeff Kirsher 	rp->pioaddr = pioaddr;
9282d283862SAlexey Charkov 	rp->base = ioaddr;
9292d283862SAlexey Charkov 	rp->irq = irq;
930fc3e0f8aSFrancois Romieu 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
931f2148a47SJeff Kirsher 
932ca8b6e04SAlexey Charkov 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
933f2148a47SJeff Kirsher 
934827da44cSJohn Stultz 	u64_stats_init(&rp->tx_stats.syncp);
935827da44cSJohn Stultz 	u64_stats_init(&rp->rx_stats.syncp);
936827da44cSJohn Stultz 
937f2148a47SJeff Kirsher 	/* Get chip registers into a sane state */
938f2148a47SJeff Kirsher 	rhine_power_init(dev);
939f2148a47SJeff Kirsher 	rhine_hw_init(dev, pioaddr);
940f2148a47SJeff Kirsher 
941f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
942f2148a47SJeff Kirsher 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
943f2148a47SJeff Kirsher 
944f2148a47SJeff Kirsher 	if (!is_valid_ether_addr(dev->dev_addr)) {
945f2148a47SJeff Kirsher 		/* Report it and use a random ethernet address instead */
946f2148a47SJeff Kirsher 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
947f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
948f2148a47SJeff Kirsher 		netdev_info(dev, "Using random MAC address: %pM\n",
949f2148a47SJeff Kirsher 			    dev->dev_addr);
950f2148a47SJeff Kirsher 	}
951f2148a47SJeff Kirsher 
952f2148a47SJeff Kirsher 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
953f2148a47SJeff Kirsher 	if (!phy_id)
954f2148a47SJeff Kirsher 		phy_id = ioread8(ioaddr + 0x6C);
955f2148a47SJeff Kirsher 
956f2148a47SJeff Kirsher 	spin_lock_init(&rp->lock);
9577ab87ff4SFrancois Romieu 	mutex_init(&rp->task_lock);
958f2148a47SJeff Kirsher 	INIT_WORK(&rp->reset_task, rhine_reset_task);
9597ab87ff4SFrancois Romieu 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
960f2148a47SJeff Kirsher 
961f2148a47SJeff Kirsher 	rp->mii_if.dev = dev;
962f2148a47SJeff Kirsher 	rp->mii_if.mdio_read = mdio_read;
963f2148a47SJeff Kirsher 	rp->mii_if.mdio_write = mdio_write;
964f2148a47SJeff Kirsher 	rp->mii_if.phy_id_mask = 0x1f;
965f2148a47SJeff Kirsher 	rp->mii_if.reg_num_mask = 0x1f;
966f2148a47SJeff Kirsher 
967f2148a47SJeff Kirsher 	/* The chip-specific entries in the device structure. */
968f2148a47SJeff Kirsher 	dev->netdev_ops = &rhine_netdev_ops;
969e76070f2Swangweidong 	dev->ethtool_ops = &netdev_ethtool_ops;
970f2148a47SJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
971f2148a47SJeff Kirsher 
972f2148a47SJeff Kirsher 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
973f2148a47SJeff Kirsher 
974f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI)
975f2148a47SJeff Kirsher 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
976f2148a47SJeff Kirsher 
977ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt)
978f646968fSPatrick McHardy 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
979f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_RX |
980f646968fSPatrick McHardy 				 NETIF_F_HW_VLAN_CTAG_FILTER;
981f2148a47SJeff Kirsher 
982f2148a47SJeff Kirsher 	/* dev->name not defined before register_netdev()! */
983f2148a47SJeff Kirsher 	rc = register_netdev(dev);
984f2148a47SJeff Kirsher 	if (rc)
9852d283862SAlexey Charkov 		goto err_out_free_netdev;
986f2148a47SJeff Kirsher 
987ca8b6e04SAlexey Charkov 	if (rp->quirks & rqRhineI)
988ca8b6e04SAlexey Charkov 		name = "Rhine";
989ca8b6e04SAlexey Charkov 	else if (rp->quirks & rqStatusWBRace)
990ca8b6e04SAlexey Charkov 		name = "Rhine II";
991ca8b6e04SAlexey Charkov 	else if (rp->quirks & rqMgmt)
992ca8b6e04SAlexey Charkov 		name = "Rhine III (Management Adapter)";
993ca8b6e04SAlexey Charkov 	else
994ca8b6e04SAlexey Charkov 		name = "Rhine III";
995ca8b6e04SAlexey Charkov 
996a7e4fbbfSColin Ian King 	netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
997a7e4fbbfSColin Ian King 		    name, ioaddr, dev->dev_addr, rp->irq);
998f2148a47SJeff Kirsher 
999f7630d18SAlexey Charkov 	dev_set_drvdata(hwdev, dev);
1000f2148a47SJeff Kirsher 
1001f2148a47SJeff Kirsher 	{
1002f2148a47SJeff Kirsher 		u16 mii_cmd;
1003f2148a47SJeff Kirsher 		int mii_status = mdio_read(dev, phy_id, 1);
1004f2148a47SJeff Kirsher 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1005f2148a47SJeff Kirsher 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1006f2148a47SJeff Kirsher 		if (mii_status != 0xffff && mii_status != 0x0000) {
1007f2148a47SJeff Kirsher 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1008f2148a47SJeff Kirsher 			netdev_info(dev,
1009f2148a47SJeff Kirsher 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1010f2148a47SJeff Kirsher 				    phy_id,
1011f2148a47SJeff Kirsher 				    mii_status, rp->mii_if.advertising,
1012f2148a47SJeff Kirsher 				    mdio_read(dev, phy_id, 5));
1013f2148a47SJeff Kirsher 
1014f2148a47SJeff Kirsher 			/* set IFF_RUNNING */
1015f2148a47SJeff Kirsher 			if (mii_status & BMSR_LSTATUS)
1016f2148a47SJeff Kirsher 				netif_carrier_on(dev);
1017f2148a47SJeff Kirsher 			else
1018f2148a47SJeff Kirsher 				netif_carrier_off(dev);
1019f2148a47SJeff Kirsher 
1020f2148a47SJeff Kirsher 		}
1021f2148a47SJeff Kirsher 	}
1022f2148a47SJeff Kirsher 	rp->mii_if.phy_id = phy_id;
1023fc3e0f8aSFrancois Romieu 	if (avoid_D3)
1024fc3e0f8aSFrancois Romieu 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1025f2148a47SJeff Kirsher 
1026f2148a47SJeff Kirsher 	return 0;
1027f2148a47SJeff Kirsher 
10282d283862SAlexey Charkov err_out_free_netdev:
10292d283862SAlexey Charkov 	free_netdev(dev);
10302d283862SAlexey Charkov err_out:
10312d283862SAlexey Charkov 	return rc;
10322d283862SAlexey Charkov }
10332d283862SAlexey Charkov 
10342d283862SAlexey Charkov static int rhine_init_one_pci(struct pci_dev *pdev,
10352d283862SAlexey Charkov 			      const struct pci_device_id *ent)
10362d283862SAlexey Charkov {
10372d283862SAlexey Charkov 	struct device *hwdev = &pdev->dev;
10385b579e21SAlexey Charkov 	int rc;
10392d283862SAlexey Charkov 	long pioaddr, memaddr;
10402d283862SAlexey Charkov 	void __iomem *ioaddr;
10412d283862SAlexey Charkov 	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
10425b579e21SAlexey Charkov 
10435b579e21SAlexey Charkov /* This driver was written to use PCI memory space. Some early versions
10445b579e21SAlexey Charkov  * of the Rhine may only work correctly with I/O space accesses.
10455b579e21SAlexey Charkov  * TODO: determine for which revisions this is true and assign the flag
10465b579e21SAlexey Charkov  *	 in code as opposed to this Kconfig option (???)
10475b579e21SAlexey Charkov  */
10485b579e21SAlexey Charkov #ifdef CONFIG_VIA_RHINE_MMIO
10495b579e21SAlexey Charkov 	u32 quirks = rqNeedEnMMIO;
10502d283862SAlexey Charkov #else
10515b579e21SAlexey Charkov 	u32 quirks = 0;
10522d283862SAlexey Charkov #endif
10532d283862SAlexey Charkov 
10542d283862SAlexey Charkov /* when built into the kernel, we only print version if device is found */
10552d283862SAlexey Charkov #ifndef MODULE
10562d283862SAlexey Charkov 	pr_info_once("%s\n", version);
10572d283862SAlexey Charkov #endif
10582d283862SAlexey Charkov 
10592d283862SAlexey Charkov 	rc = pci_enable_device(pdev);
10602d283862SAlexey Charkov 	if (rc)
10612d283862SAlexey Charkov 		goto err_out;
10622d283862SAlexey Charkov 
1063ca8b6e04SAlexey Charkov 	if (pdev->revision < VTunknown0) {
10645b579e21SAlexey Charkov 		quirks |= rqRhineI;
1065ca8b6e04SAlexey Charkov 	} else if (pdev->revision >= VT6102) {
10665b579e21SAlexey Charkov 		quirks |= rqWOL | rqForceReset;
1067ca8b6e04SAlexey Charkov 		if (pdev->revision < VT6105) {
1068ca8b6e04SAlexey Charkov 			quirks |= rqStatusWBRace;
1069ca8b6e04SAlexey Charkov 		} else {
1070ca8b6e04SAlexey Charkov 			quirks |= rqIntPHY;
1071ca8b6e04SAlexey Charkov 			if (pdev->revision >= VT6105_B0)
1072ca8b6e04SAlexey Charkov 				quirks |= rq6patterns;
1073ca8b6e04SAlexey Charkov 			if (pdev->revision >= VT6105M)
1074ca8b6e04SAlexey Charkov 				quirks |= rqMgmt;
1075ca8b6e04SAlexey Charkov 		}
1076ca8b6e04SAlexey Charkov 	}
1077ca8b6e04SAlexey Charkov 
10782d283862SAlexey Charkov 	/* sanity check */
10792d283862SAlexey Charkov 	if ((pci_resource_len(pdev, 0) < io_size) ||
10802d283862SAlexey Charkov 	    (pci_resource_len(pdev, 1) < io_size)) {
10812d283862SAlexey Charkov 		rc = -EIO;
10822d283862SAlexey Charkov 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
10832d283862SAlexey Charkov 		goto err_out_pci_disable;
10842d283862SAlexey Charkov 	}
10852d283862SAlexey Charkov 
10862d283862SAlexey Charkov 	pioaddr = pci_resource_start(pdev, 0);
10872d283862SAlexey Charkov 	memaddr = pci_resource_start(pdev, 1);
10882d283862SAlexey Charkov 
10892d283862SAlexey Charkov 	pci_set_master(pdev);
10902d283862SAlexey Charkov 
10912d283862SAlexey Charkov 	rc = pci_request_regions(pdev, DRV_NAME);
10922d283862SAlexey Charkov 	if (rc)
10932d283862SAlexey Charkov 		goto err_out_pci_disable;
10942d283862SAlexey Charkov 
10955b579e21SAlexey Charkov 	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
10962d283862SAlexey Charkov 	if (!ioaddr) {
10972d283862SAlexey Charkov 		rc = -EIO;
10982d283862SAlexey Charkov 		dev_err(hwdev,
10992d283862SAlexey Charkov 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
11002d283862SAlexey Charkov 			dev_name(hwdev), io_size, memaddr);
11012d283862SAlexey Charkov 		goto err_out_free_res;
11022d283862SAlexey Charkov 	}
11032d283862SAlexey Charkov 
11042d283862SAlexey Charkov 	enable_mmio(pioaddr, quirks);
11052d283862SAlexey Charkov 
11065b579e21SAlexey Charkov 	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
11075b579e21SAlexey Charkov 	if (rc)
11082d283862SAlexey Charkov 		goto err_out_unmap;
11092d283862SAlexey Charkov 
1110ca8b6e04SAlexey Charkov 	rc = rhine_init_one_common(&pdev->dev, quirks,
11112d283862SAlexey Charkov 				   pioaddr, ioaddr, pdev->irq);
11122d283862SAlexey Charkov 	if (!rc)
11132d283862SAlexey Charkov 		return 0;
11142d283862SAlexey Charkov 
1115f2148a47SJeff Kirsher err_out_unmap:
1116f2148a47SJeff Kirsher 	pci_iounmap(pdev, ioaddr);
1117f2148a47SJeff Kirsher err_out_free_res:
1118f2148a47SJeff Kirsher 	pci_release_regions(pdev);
1119ae996154SRoger Luethi err_out_pci_disable:
1120ae996154SRoger Luethi 	pci_disable_device(pdev);
1121f2148a47SJeff Kirsher err_out:
1122f2148a47SJeff Kirsher 	return rc;
1123f2148a47SJeff Kirsher }
1124f2148a47SJeff Kirsher 
11252d283862SAlexey Charkov static int rhine_init_one_platform(struct platform_device *pdev)
11262d283862SAlexey Charkov {
11272d283862SAlexey Charkov 	const struct of_device_id *match;
1128ca8b6e04SAlexey Charkov 	const u32 *quirks;
11292d283862SAlexey Charkov 	int irq;
11302d283862SAlexey Charkov 	void __iomem *ioaddr;
11312d283862SAlexey Charkov 
11322d283862SAlexey Charkov 	match = of_match_device(rhine_of_tbl, &pdev->dev);
11332d283862SAlexey Charkov 	if (!match)
11342d283862SAlexey Charkov 		return -EINVAL;
11352d283862SAlexey Charkov 
11368a54d4c2SYueHaibing 	ioaddr = devm_platform_ioremap_resource(pdev, 0);
11372d283862SAlexey Charkov 	if (IS_ERR(ioaddr))
11382d283862SAlexey Charkov 		return PTR_ERR(ioaddr);
11392d283862SAlexey Charkov 
11402d283862SAlexey Charkov 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
11412d283862SAlexey Charkov 	if (!irq)
11422d283862SAlexey Charkov 		return -EINVAL;
11432d283862SAlexey Charkov 
1144ca8b6e04SAlexey Charkov 	quirks = match->data;
1145ca8b6e04SAlexey Charkov 	if (!quirks)
11462d283862SAlexey Charkov 		return -EINVAL;
11472d283862SAlexey Charkov 
1148ca8b6e04SAlexey Charkov 	return rhine_init_one_common(&pdev->dev, *quirks,
11492d283862SAlexey Charkov 				     (long)ioaddr, ioaddr, irq);
11502d283862SAlexey Charkov }
11512d283862SAlexey Charkov 
1152f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev)
1153f2148a47SJeff Kirsher {
1154f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1155f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1156f2148a47SJeff Kirsher 	void *ring;
1157f2148a47SJeff Kirsher 	dma_addr_t ring_dma;
1158f2148a47SJeff Kirsher 
1159f7630d18SAlexey Charkov 	ring = dma_alloc_coherent(hwdev,
1160f2148a47SJeff Kirsher 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1161f2148a47SJeff Kirsher 				  TX_RING_SIZE * sizeof(struct tx_desc),
11624087c4dcSAlexey Charkov 				  &ring_dma,
11634087c4dcSAlexey Charkov 				  GFP_ATOMIC);
1164f2148a47SJeff Kirsher 	if (!ring) {
1165f2148a47SJeff Kirsher 		netdev_err(dev, "Could not allocate DMA memory\n");
1166f2148a47SJeff Kirsher 		return -ENOMEM;
1167f2148a47SJeff Kirsher 	}
1168f2148a47SJeff Kirsher 	if (rp->quirks & rqRhineI) {
1169f7630d18SAlexey Charkov 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1170f2148a47SJeff Kirsher 						 PKT_BUF_SZ * TX_RING_SIZE,
11714087c4dcSAlexey Charkov 						 &rp->tx_bufs_dma,
11724087c4dcSAlexey Charkov 						 GFP_ATOMIC);
1173f2148a47SJeff Kirsher 		if (rp->tx_bufs == NULL) {
1174f7630d18SAlexey Charkov 			dma_free_coherent(hwdev,
1175f2148a47SJeff Kirsher 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1176f2148a47SJeff Kirsher 					  TX_RING_SIZE * sizeof(struct tx_desc),
1177f2148a47SJeff Kirsher 					  ring, ring_dma);
1178f2148a47SJeff Kirsher 			return -ENOMEM;
1179f2148a47SJeff Kirsher 		}
1180f2148a47SJeff Kirsher 	}
1181f2148a47SJeff Kirsher 
1182f2148a47SJeff Kirsher 	rp->rx_ring = ring;
1183f2148a47SJeff Kirsher 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1184f2148a47SJeff Kirsher 	rp->rx_ring_dma = ring_dma;
1185f2148a47SJeff Kirsher 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1186f2148a47SJeff Kirsher 
1187f2148a47SJeff Kirsher 	return 0;
1188f2148a47SJeff Kirsher }
1189f2148a47SJeff Kirsher 
1190f2148a47SJeff Kirsher static void free_ring(struct net_device* dev)
1191f2148a47SJeff Kirsher {
1192f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1193f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1194f2148a47SJeff Kirsher 
1195f7630d18SAlexey Charkov 	dma_free_coherent(hwdev,
1196f2148a47SJeff Kirsher 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1197f2148a47SJeff Kirsher 			  TX_RING_SIZE * sizeof(struct tx_desc),
1198f2148a47SJeff Kirsher 			  rp->rx_ring, rp->rx_ring_dma);
1199f2148a47SJeff Kirsher 	rp->tx_ring = NULL;
1200f2148a47SJeff Kirsher 
1201f2148a47SJeff Kirsher 	if (rp->tx_bufs)
1202f7630d18SAlexey Charkov 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1203f2148a47SJeff Kirsher 				  rp->tx_bufs, rp->tx_bufs_dma);
1204f2148a47SJeff Kirsher 
1205f2148a47SJeff Kirsher 	rp->tx_bufs = NULL;
1206f2148a47SJeff Kirsher 
1207f2148a47SJeff Kirsher }
1208f2148a47SJeff Kirsher 
1209a21bb8baSfrançois romieu struct rhine_skb_dma {
1210a21bb8baSfrançois romieu 	struct sk_buff *skb;
1211a21bb8baSfrançois romieu 	dma_addr_t dma;
1212a21bb8baSfrançois romieu };
1213a21bb8baSfrançois romieu 
1214a21bb8baSfrançois romieu static inline int rhine_skb_dma_init(struct net_device *dev,
1215a21bb8baSfrançois romieu 				     struct rhine_skb_dma *sd)
1216f2148a47SJeff Kirsher {
1217f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1218f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1219a21bb8baSfrançois romieu 	const int size = rp->rx_buf_sz;
1220a21bb8baSfrançois romieu 
1221a21bb8baSfrançois romieu 	sd->skb = netdev_alloc_skb(dev, size);
1222a21bb8baSfrançois romieu 	if (!sd->skb)
1223a21bb8baSfrançois romieu 		return -ENOMEM;
1224a21bb8baSfrançois romieu 
1225a21bb8baSfrançois romieu 	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1226a21bb8baSfrançois romieu 	if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1227a21bb8baSfrançois romieu 		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1228a21bb8baSfrançois romieu 		dev_kfree_skb_any(sd->skb);
1229a21bb8baSfrançois romieu 		return -EIO;
1230a21bb8baSfrançois romieu 	}
1231a21bb8baSfrançois romieu 
1232a21bb8baSfrançois romieu 	return 0;
1233a21bb8baSfrançois romieu }
1234a21bb8baSfrançois romieu 
12358709bb2cSfrançois romieu static void rhine_reset_rbufs(struct rhine_private *rp)
12368709bb2cSfrançois romieu {
12378709bb2cSfrançois romieu 	int i;
12388709bb2cSfrançois romieu 
12398709bb2cSfrançois romieu 	rp->cur_rx = 0;
12408709bb2cSfrançois romieu 
12418709bb2cSfrançois romieu 	for (i = 0; i < RX_RING_SIZE; i++)
12428709bb2cSfrançois romieu 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
12438709bb2cSfrançois romieu }
12448709bb2cSfrançois romieu 
1245a21bb8baSfrançois romieu static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1246a21bb8baSfrançois romieu 					   struct rhine_skb_dma *sd, int entry)
1247a21bb8baSfrançois romieu {
1248a21bb8baSfrançois romieu 	rp->rx_skbuff_dma[entry] = sd->dma;
1249a21bb8baSfrançois romieu 	rp->rx_skbuff[entry] = sd->skb;
1250a21bb8baSfrançois romieu 
1251a21bb8baSfrançois romieu 	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1252a21bb8baSfrançois romieu 	dma_wmb();
1253a21bb8baSfrançois romieu }
1254a21bb8baSfrançois romieu 
12558709bb2cSfrançois romieu static void free_rbufs(struct net_device* dev);
12568709bb2cSfrançois romieu 
12578709bb2cSfrançois romieu static int alloc_rbufs(struct net_device *dev)
1258a21bb8baSfrançois romieu {
1259a21bb8baSfrançois romieu 	struct rhine_private *rp = netdev_priv(dev);
1260f2148a47SJeff Kirsher 	dma_addr_t next;
1261a21bb8baSfrançois romieu 	int rc, i;
1262f2148a47SJeff Kirsher 
1263f2148a47SJeff Kirsher 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1264f2148a47SJeff Kirsher 	next = rp->rx_ring_dma;
1265f2148a47SJeff Kirsher 
1266f2148a47SJeff Kirsher 	/* Init the ring entries */
1267f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1268f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1269f2148a47SJeff Kirsher 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1270f2148a47SJeff Kirsher 		next += sizeof(struct rx_desc);
1271f2148a47SJeff Kirsher 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1272f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1273f2148a47SJeff Kirsher 	}
1274f2148a47SJeff Kirsher 	/* Mark the last entry as wrapping the ring. */
1275f2148a47SJeff Kirsher 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1276f2148a47SJeff Kirsher 
1277f2148a47SJeff Kirsher 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1278f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1279a21bb8baSfrançois romieu 		struct rhine_skb_dma sd;
1280a21bb8baSfrançois romieu 
1281a21bb8baSfrançois romieu 		rc = rhine_skb_dma_init(dev, &sd);
12828709bb2cSfrançois romieu 		if (rc < 0) {
12838709bb2cSfrançois romieu 			free_rbufs(dev);
12848709bb2cSfrançois romieu 			goto out;
12858709bb2cSfrançois romieu 		}
1286f2148a47SJeff Kirsher 
1287a21bb8baSfrançois romieu 		rhine_skb_dma_nic_store(rp, &sd, i);
1288f2148a47SJeff Kirsher 	}
12898709bb2cSfrançois romieu 
12908709bb2cSfrançois romieu 	rhine_reset_rbufs(rp);
12918709bb2cSfrançois romieu out:
12928709bb2cSfrançois romieu 	return rc;
1293f2148a47SJeff Kirsher }
1294f2148a47SJeff Kirsher 
1295f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev)
1296f2148a47SJeff Kirsher {
1297f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1298f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1299f2148a47SJeff Kirsher 	int i;
1300f2148a47SJeff Kirsher 
1301f2148a47SJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
1302f2148a47SJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1303f2148a47SJeff Kirsher 		rp->rx_ring[i].rx_status = 0;
1304f2148a47SJeff Kirsher 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1305f2148a47SJeff Kirsher 		if (rp->rx_skbuff[i]) {
1306f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1307f2148a47SJeff Kirsher 					 rp->rx_skbuff_dma[i],
13084087c4dcSAlexey Charkov 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1309f2148a47SJeff Kirsher 			dev_kfree_skb(rp->rx_skbuff[i]);
1310f2148a47SJeff Kirsher 		}
1311f2148a47SJeff Kirsher 		rp->rx_skbuff[i] = NULL;
1312f2148a47SJeff Kirsher 	}
1313f2148a47SJeff Kirsher }
1314f2148a47SJeff Kirsher 
1315f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev)
1316f2148a47SJeff Kirsher {
1317f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1318f2148a47SJeff Kirsher 	dma_addr_t next;
1319f2148a47SJeff Kirsher 	int i;
1320f2148a47SJeff Kirsher 
1321f2148a47SJeff Kirsher 	rp->dirty_tx = rp->cur_tx = 0;
1322f2148a47SJeff Kirsher 	next = rp->tx_ring_dma;
1323f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1324f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1325f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1326f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1327f2148a47SJeff Kirsher 		next += sizeof(struct tx_desc);
1328f2148a47SJeff Kirsher 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1329f2148a47SJeff Kirsher 		if (rp->quirks & rqRhineI)
1330f2148a47SJeff Kirsher 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1331f2148a47SJeff Kirsher 	}
1332f2148a47SJeff Kirsher 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1333f2148a47SJeff Kirsher 
133492bf2008STino Reichardt 	netdev_reset_queue(dev);
1335f2148a47SJeff Kirsher }
1336f2148a47SJeff Kirsher 
1337f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev)
1338f2148a47SJeff Kirsher {
1339f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1340f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1341f2148a47SJeff Kirsher 	int i;
1342f2148a47SJeff Kirsher 
1343f2148a47SJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1344f2148a47SJeff Kirsher 		rp->tx_ring[i].tx_status = 0;
1345f2148a47SJeff Kirsher 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1346f2148a47SJeff Kirsher 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1347f2148a47SJeff Kirsher 		if (rp->tx_skbuff[i]) {
1348f2148a47SJeff Kirsher 			if (rp->tx_skbuff_dma[i]) {
1349f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
1350f2148a47SJeff Kirsher 						 rp->tx_skbuff_dma[i],
1351f2148a47SJeff Kirsher 						 rp->tx_skbuff[i]->len,
13524087c4dcSAlexey Charkov 						 DMA_TO_DEVICE);
1353f2148a47SJeff Kirsher 			}
1354f2148a47SJeff Kirsher 			dev_kfree_skb(rp->tx_skbuff[i]);
1355f2148a47SJeff Kirsher 		}
1356f2148a47SJeff Kirsher 		rp->tx_skbuff[i] = NULL;
1357f2148a47SJeff Kirsher 		rp->tx_buf[i] = NULL;
1358f2148a47SJeff Kirsher 	}
1359f2148a47SJeff Kirsher }
1360f2148a47SJeff Kirsher 
1361f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1362f2148a47SJeff Kirsher {
1363f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1364f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1365f2148a47SJeff Kirsher 
13665bdc7380SBen Hutchings 	if (!rp->mii_if.force_media)
1367fc3e0f8aSFrancois Romieu 		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1368f2148a47SJeff Kirsher 
1369f2148a47SJeff Kirsher 	if (rp->mii_if.full_duplex)
1370f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1371f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1372f2148a47SJeff Kirsher 	else
1373f2148a47SJeff Kirsher 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1374f2148a47SJeff Kirsher 		   ioaddr + ChipCmd1);
1375fc3e0f8aSFrancois Romieu 
1376fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1377f2148a47SJeff Kirsher 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1378f2148a47SJeff Kirsher }
1379f2148a47SJeff Kirsher 
1380f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */
1381f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii)
1382f2148a47SJeff Kirsher {
1383fc3e0f8aSFrancois Romieu 	struct net_device *dev = mii->dev;
1384fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
1385fc3e0f8aSFrancois Romieu 
1386f2148a47SJeff Kirsher 	if (mii->force_media) {
1387f2148a47SJeff Kirsher 		/* autoneg is off: Link is always assumed to be up */
1388fc3e0f8aSFrancois Romieu 		if (!netif_carrier_ok(dev))
1389fc3e0f8aSFrancois Romieu 			netif_carrier_on(dev);
139017958438SFrançois Cachereul 	}
139117958438SFrançois Cachereul 
1392fc3e0f8aSFrancois Romieu 	rhine_check_media(dev, 0);
1393fc3e0f8aSFrancois Romieu 
1394fc3e0f8aSFrancois Romieu 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1395fc3e0f8aSFrancois Romieu 		   mii->force_media, netif_carrier_ok(dev));
1396f2148a47SJeff Kirsher }
1397f2148a47SJeff Kirsher 
1398f2148a47SJeff Kirsher /**
1399f2148a47SJeff Kirsher  * rhine_set_cam - set CAM multicast filters
1400f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1401f2148a47SJeff Kirsher  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1402f2148a47SJeff Kirsher  * @addr: multicast address (6 bytes)
1403f2148a47SJeff Kirsher  *
1404f2148a47SJeff Kirsher  * Load addresses into multicast filters.
1405f2148a47SJeff Kirsher  */
1406f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1407f2148a47SJeff Kirsher {
1408f2148a47SJeff Kirsher 	int i;
1409f2148a47SJeff Kirsher 
1410f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1411f2148a47SJeff Kirsher 	wmb();
1412f2148a47SJeff Kirsher 
1413f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1414f2148a47SJeff Kirsher 	idx &= (MCAM_SIZE - 1);
1415f2148a47SJeff Kirsher 
1416f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1417f2148a47SJeff Kirsher 
1418f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++, addr++)
1419f2148a47SJeff Kirsher 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1420f2148a47SJeff Kirsher 	udelay(10);
1421f2148a47SJeff Kirsher 	wmb();
1422f2148a47SJeff Kirsher 
1423f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1424f2148a47SJeff Kirsher 	udelay(10);
1425f2148a47SJeff Kirsher 
1426f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1427f2148a47SJeff Kirsher }
1428f2148a47SJeff Kirsher 
1429f2148a47SJeff Kirsher /**
1430f2148a47SJeff Kirsher  * rhine_set_vlan_cam - set CAM VLAN filters
1431f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1432f2148a47SJeff Kirsher  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1433f2148a47SJeff Kirsher  * @addr: VLAN ID (2 bytes)
1434f2148a47SJeff Kirsher  *
1435f2148a47SJeff Kirsher  * Load addresses into VLAN filters.
1436f2148a47SJeff Kirsher  */
1437f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1438f2148a47SJeff Kirsher {
1439f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1440f2148a47SJeff Kirsher 	wmb();
1441f2148a47SJeff Kirsher 
1442f2148a47SJeff Kirsher 	/* Paranoid -- idx out of range should never happen */
1443f2148a47SJeff Kirsher 	idx &= (VCAM_SIZE - 1);
1444f2148a47SJeff Kirsher 
1445f2148a47SJeff Kirsher 	iowrite8((u8) idx, ioaddr + CamAddr);
1446f2148a47SJeff Kirsher 
1447f2148a47SJeff Kirsher 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1448f2148a47SJeff Kirsher 	udelay(10);
1449f2148a47SJeff Kirsher 	wmb();
1450f2148a47SJeff Kirsher 
1451f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1452f2148a47SJeff Kirsher 	udelay(10);
1453f2148a47SJeff Kirsher 
1454f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1455f2148a47SJeff Kirsher }
1456f2148a47SJeff Kirsher 
1457f2148a47SJeff Kirsher /**
1458f2148a47SJeff Kirsher  * rhine_set_cam_mask - set multicast CAM mask
1459f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1460f2148a47SJeff Kirsher  * @mask: multicast CAM mask
1461f2148a47SJeff Kirsher  *
1462f2148a47SJeff Kirsher  * Mask sets multicast filters active/inactive.
1463f2148a47SJeff Kirsher  */
1464f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1465f2148a47SJeff Kirsher {
1466f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1467f2148a47SJeff Kirsher 	wmb();
1468f2148a47SJeff Kirsher 
1469f2148a47SJeff Kirsher 	/* write mask */
1470f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1471f2148a47SJeff Kirsher 
1472f2148a47SJeff Kirsher 	/* disable CAMEN */
1473f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1474f2148a47SJeff Kirsher }
1475f2148a47SJeff Kirsher 
1476f2148a47SJeff Kirsher /**
1477f2148a47SJeff Kirsher  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1478f2148a47SJeff Kirsher  * @ioaddr: register block of this Rhine
1479f2148a47SJeff Kirsher  * @mask: VLAN CAM mask
1480f2148a47SJeff Kirsher  *
1481f2148a47SJeff Kirsher  * Mask sets VLAN filters active/inactive.
1482f2148a47SJeff Kirsher  */
1483f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1484f2148a47SJeff Kirsher {
1485f2148a47SJeff Kirsher 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1486f2148a47SJeff Kirsher 	wmb();
1487f2148a47SJeff Kirsher 
1488f2148a47SJeff Kirsher 	/* write mask */
1489f2148a47SJeff Kirsher 	iowrite32(mask, ioaddr + CamMask);
1490f2148a47SJeff Kirsher 
1491f2148a47SJeff Kirsher 	/* disable CAMEN */
1492f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + CamCon);
1493f2148a47SJeff Kirsher }
1494f2148a47SJeff Kirsher 
1495f2148a47SJeff Kirsher /**
1496f2148a47SJeff Kirsher  * rhine_init_cam_filter - initialize CAM filters
1497f2148a47SJeff Kirsher  * @dev: network device
1498f2148a47SJeff Kirsher  *
1499f2148a47SJeff Kirsher  * Initialize (disable) hardware VLAN and multicast support on this
1500f2148a47SJeff Kirsher  * Rhine.
1501f2148a47SJeff Kirsher  */
1502f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev)
1503f2148a47SJeff Kirsher {
1504f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1505f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1506f2148a47SJeff Kirsher 
1507f2148a47SJeff Kirsher 	/* Disable all CAMs */
1508f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, 0);
1509f2148a47SJeff Kirsher 	rhine_set_cam_mask(ioaddr, 0);
1510f2148a47SJeff Kirsher 
1511f2148a47SJeff Kirsher 	/* disable hardware VLAN support */
1512f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1513f2148a47SJeff Kirsher 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1514f2148a47SJeff Kirsher }
1515f2148a47SJeff Kirsher 
1516f2148a47SJeff Kirsher /**
1517f2148a47SJeff Kirsher  * rhine_update_vcam - update VLAN CAM filters
1518f2148a47SJeff Kirsher  * @rp: rhine_private data of this Rhine
1519f2148a47SJeff Kirsher  *
1520f2148a47SJeff Kirsher  * Update VLAN CAM filters to match configuration change.
1521f2148a47SJeff Kirsher  */
1522f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev)
1523f2148a47SJeff Kirsher {
1524f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1525f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1526f2148a47SJeff Kirsher 	u16 vid;
1527f2148a47SJeff Kirsher 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1528f2148a47SJeff Kirsher 	unsigned int i = 0;
1529f2148a47SJeff Kirsher 
1530f2148a47SJeff Kirsher 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1531f2148a47SJeff Kirsher 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1532f2148a47SJeff Kirsher 		vCAMmask |= 1 << i;
1533f2148a47SJeff Kirsher 		if (++i >= VCAM_SIZE)
1534f2148a47SJeff Kirsher 			break;
1535f2148a47SJeff Kirsher 	}
1536f2148a47SJeff Kirsher 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1537f2148a47SJeff Kirsher }
1538f2148a47SJeff Kirsher 
153980d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1540f2148a47SJeff Kirsher {
1541f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1542f2148a47SJeff Kirsher 
15437ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1544f2148a47SJeff Kirsher 	set_bit(vid, rp->active_vlans);
1545f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
15467ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
15478e586137SJiri Pirko 	return 0;
1548f2148a47SJeff Kirsher }
1549f2148a47SJeff Kirsher 
155080d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1551f2148a47SJeff Kirsher {
1552f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1553f2148a47SJeff Kirsher 
15547ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
1555f2148a47SJeff Kirsher 	clear_bit(vid, rp->active_vlans);
1556f2148a47SJeff Kirsher 	rhine_update_vcam(dev);
15577ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
15588e586137SJiri Pirko 	return 0;
1559f2148a47SJeff Kirsher }
1560f2148a47SJeff Kirsher 
1561f2148a47SJeff Kirsher static void init_registers(struct net_device *dev)
1562f2148a47SJeff Kirsher {
1563f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1564f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1565f2148a47SJeff Kirsher 	int i;
1566f2148a47SJeff Kirsher 
1567f2148a47SJeff Kirsher 	for (i = 0; i < 6; i++)
1568f2148a47SJeff Kirsher 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1569f2148a47SJeff Kirsher 
1570f2148a47SJeff Kirsher 	/* Initialize other registers. */
1571f2148a47SJeff Kirsher 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1572f2148a47SJeff Kirsher 	/* Configure initial FIFO thresholds. */
1573f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + TxConfig);
1574f2148a47SJeff Kirsher 	rp->tx_thresh = 0x20;
1575f2148a47SJeff Kirsher 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1576f2148a47SJeff Kirsher 
1577f2148a47SJeff Kirsher 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1578f2148a47SJeff Kirsher 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1579f2148a47SJeff Kirsher 
1580f2148a47SJeff Kirsher 	rhine_set_rx_mode(dev);
1581f2148a47SJeff Kirsher 
1582ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt)
1583f2148a47SJeff Kirsher 		rhine_init_cam_filter(dev);
1584f2148a47SJeff Kirsher 
1585f2148a47SJeff Kirsher 	napi_enable(&rp->napi);
1586f2148a47SJeff Kirsher 
15877ab87ff4SFrancois Romieu 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1588f2148a47SJeff Kirsher 
1589f2148a47SJeff Kirsher 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1590f2148a47SJeff Kirsher 	       ioaddr + ChipCmd);
1591f2148a47SJeff Kirsher 	rhine_check_media(dev, 1);
1592f2148a47SJeff Kirsher }
1593f2148a47SJeff Kirsher 
1594f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */
1595a384a33bSFrancois Romieu static void rhine_enable_linkmon(struct rhine_private *rp)
1596f2148a47SJeff Kirsher {
1597a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1598a384a33bSFrancois Romieu 
1599f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1600f2148a47SJeff Kirsher 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1601f2148a47SJeff Kirsher 	iowrite8(0x80, ioaddr + MIICmd);
1602f2148a47SJeff Kirsher 
1603a384a33bSFrancois Romieu 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1604f2148a47SJeff Kirsher 
1605f2148a47SJeff Kirsher 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1606f2148a47SJeff Kirsher }
1607f2148a47SJeff Kirsher 
1608f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */
1609a384a33bSFrancois Romieu static void rhine_disable_linkmon(struct rhine_private *rp)
1610f2148a47SJeff Kirsher {
1611a384a33bSFrancois Romieu 	void __iomem *ioaddr = rp->base;
1612a384a33bSFrancois Romieu 
1613f2148a47SJeff Kirsher 	iowrite8(0, ioaddr + MIICmd);
1614f2148a47SJeff Kirsher 
1615a384a33bSFrancois Romieu 	if (rp->quirks & rqRhineI) {
1616f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1617f2148a47SJeff Kirsher 
1618f2148a47SJeff Kirsher 		/* Can be called from ISR. Evil. */
1619f2148a47SJeff Kirsher 		mdelay(1);
1620f2148a47SJeff Kirsher 
1621f2148a47SJeff Kirsher 		/* 0x80 must be set immediately before turning it off */
1622f2148a47SJeff Kirsher 		iowrite8(0x80, ioaddr + MIICmd);
1623f2148a47SJeff Kirsher 
1624a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1625f2148a47SJeff Kirsher 
1626f2148a47SJeff Kirsher 		/* Heh. Now clear 0x80 again. */
1627f2148a47SJeff Kirsher 		iowrite8(0, ioaddr + MIICmd);
1628f2148a47SJeff Kirsher 	}
1629f2148a47SJeff Kirsher 	else
1630a384a33bSFrancois Romieu 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1631f2148a47SJeff Kirsher }
1632f2148a47SJeff Kirsher 
1633f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */
1634f2148a47SJeff Kirsher 
1635f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1636f2148a47SJeff Kirsher {
1637f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1638f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1639f2148a47SJeff Kirsher 	int result;
1640f2148a47SJeff Kirsher 
1641a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1642f2148a47SJeff Kirsher 
1643f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1644f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1645f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1646f2148a47SJeff Kirsher 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1647a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1648f2148a47SJeff Kirsher 	result = ioread16(ioaddr + MIIData);
1649f2148a47SJeff Kirsher 
1650a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1651f2148a47SJeff Kirsher 	return result;
1652f2148a47SJeff Kirsher }
1653f2148a47SJeff Kirsher 
1654f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1655f2148a47SJeff Kirsher {
1656f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1657f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1658f2148a47SJeff Kirsher 
1659a384a33bSFrancois Romieu 	rhine_disable_linkmon(rp);
1660f2148a47SJeff Kirsher 
1661f2148a47SJeff Kirsher 	/* rhine_disable_linkmon already cleared MIICmd */
1662f2148a47SJeff Kirsher 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1663f2148a47SJeff Kirsher 	iowrite8(regnum, ioaddr + MIIRegAddr);
1664f2148a47SJeff Kirsher 	iowrite16(value, ioaddr + MIIData);
1665f2148a47SJeff Kirsher 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1666a384a33bSFrancois Romieu 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1667f2148a47SJeff Kirsher 
1668a384a33bSFrancois Romieu 	rhine_enable_linkmon(rp);
1669f2148a47SJeff Kirsher }
1670f2148a47SJeff Kirsher 
16717ab87ff4SFrancois Romieu static void rhine_task_disable(struct rhine_private *rp)
16727ab87ff4SFrancois Romieu {
16737ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16747ab87ff4SFrancois Romieu 	rp->task_enable = false;
16757ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
16767ab87ff4SFrancois Romieu 
16777ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->slow_event_task);
16787ab87ff4SFrancois Romieu 	cancel_work_sync(&rp->reset_task);
16797ab87ff4SFrancois Romieu }
16807ab87ff4SFrancois Romieu 
16817ab87ff4SFrancois Romieu static void rhine_task_enable(struct rhine_private *rp)
16827ab87ff4SFrancois Romieu {
16837ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
16847ab87ff4SFrancois Romieu 	rp->task_enable = true;
16857ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
16867ab87ff4SFrancois Romieu }
16877ab87ff4SFrancois Romieu 
1688f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev)
1689f2148a47SJeff Kirsher {
1690f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1691f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1692f2148a47SJeff Kirsher 	int rc;
1693f2148a47SJeff Kirsher 
1694f7630d18SAlexey Charkov 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1695f2148a47SJeff Kirsher 	if (rc)
16964d1fd9c1Sfrançois romieu 		goto out;
1697f2148a47SJeff Kirsher 
1698f7630d18SAlexey Charkov 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1699f2148a47SJeff Kirsher 
1700f2148a47SJeff Kirsher 	rc = alloc_ring(dev);
17014d1fd9c1Sfrançois romieu 	if (rc < 0)
17024d1fd9c1Sfrançois romieu 		goto out_free_irq;
17034d1fd9c1Sfrançois romieu 
17048709bb2cSfrançois romieu 	rc = alloc_rbufs(dev);
17058709bb2cSfrançois romieu 	if (rc < 0)
17068709bb2cSfrançois romieu 		goto out_free_ring;
17078709bb2cSfrançois romieu 
1708f2148a47SJeff Kirsher 	alloc_tbufs(dev);
1709f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
17107ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
1711f2148a47SJeff Kirsher 	init_registers(dev);
1712fc3e0f8aSFrancois Romieu 
1713fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1714f2148a47SJeff Kirsher 		  __func__, ioread16(ioaddr + ChipCmd),
1715f2148a47SJeff Kirsher 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1716f2148a47SJeff Kirsher 
1717f2148a47SJeff Kirsher 	netif_start_queue(dev);
1718f2148a47SJeff Kirsher 
17194d1fd9c1Sfrançois romieu out:
17204d1fd9c1Sfrançois romieu 	return rc;
17214d1fd9c1Sfrançois romieu 
17228709bb2cSfrançois romieu out_free_ring:
17238709bb2cSfrançois romieu 	free_ring(dev);
17244d1fd9c1Sfrançois romieu out_free_irq:
17254d1fd9c1Sfrançois romieu 	free_irq(rp->irq, dev);
17264d1fd9c1Sfrançois romieu 	goto out;
1727f2148a47SJeff Kirsher }
1728f2148a47SJeff Kirsher 
1729f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work)
1730f2148a47SJeff Kirsher {
1731f2148a47SJeff Kirsher 	struct rhine_private *rp = container_of(work, struct rhine_private,
1732f2148a47SJeff Kirsher 						reset_task);
1733f2148a47SJeff Kirsher 	struct net_device *dev = rp->dev;
1734f2148a47SJeff Kirsher 
17357ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
17367ab87ff4SFrancois Romieu 
17377ab87ff4SFrancois Romieu 	if (!rp->task_enable)
17387ab87ff4SFrancois Romieu 		goto out_unlock;
1739f2148a47SJeff Kirsher 
1740f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
1741a926592fSRichard Weinberger 	netif_tx_disable(dev);
1742f2148a47SJeff Kirsher 	spin_lock_bh(&rp->lock);
1743f2148a47SJeff Kirsher 
1744f2148a47SJeff Kirsher 	/* clear all descriptors */
1745f2148a47SJeff Kirsher 	free_tbufs(dev);
1746f2148a47SJeff Kirsher 	alloc_tbufs(dev);
17478709bb2cSfrançois romieu 
17488709bb2cSfrançois romieu 	rhine_reset_rbufs(rp);
1749f2148a47SJeff Kirsher 
1750f2148a47SJeff Kirsher 	/* Reinitialize the hardware. */
1751f2148a47SJeff Kirsher 	rhine_chip_reset(dev);
1752f2148a47SJeff Kirsher 	init_registers(dev);
1753f2148a47SJeff Kirsher 
1754f2148a47SJeff Kirsher 	spin_unlock_bh(&rp->lock);
1755f2148a47SJeff Kirsher 
1756860e9538SFlorian Westphal 	netif_trans_update(dev); /* prevent tx timeout */
1757f2148a47SJeff Kirsher 	dev->stats.tx_errors++;
1758f2148a47SJeff Kirsher 	netif_wake_queue(dev);
17597ab87ff4SFrancois Romieu 
17607ab87ff4SFrancois Romieu out_unlock:
17617ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
1762f2148a47SJeff Kirsher }
1763f2148a47SJeff Kirsher 
1764*0290bd29SMichael S. Tsirkin static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
1765f2148a47SJeff Kirsher {
1766f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1767f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1768f2148a47SJeff Kirsher 
1769f2148a47SJeff Kirsher 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1770f2148a47SJeff Kirsher 		    ioread16(ioaddr + IntrStatus),
1771f2148a47SJeff Kirsher 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1772f2148a47SJeff Kirsher 
1773f2148a47SJeff Kirsher 	schedule_work(&rp->reset_task);
1774f2148a47SJeff Kirsher }
1775f2148a47SJeff Kirsher 
17763a5a883aSfrançois romieu static inline bool rhine_tx_queue_full(struct rhine_private *rp)
17773a5a883aSfrançois romieu {
17783a5a883aSfrançois romieu 	return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
17793a5a883aSfrançois romieu }
17803a5a883aSfrançois romieu 
1781f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1782f2148a47SJeff Kirsher 				  struct net_device *dev)
1783f2148a47SJeff Kirsher {
1784f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1785f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
1786f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
1787f2148a47SJeff Kirsher 	unsigned entry;
1788f2148a47SJeff Kirsher 
1789f2148a47SJeff Kirsher 	/* Caution: the write order is important here, set the field
1790f2148a47SJeff Kirsher 	   with the "ownership" bits last. */
1791f2148a47SJeff Kirsher 
1792f2148a47SJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
1793f2148a47SJeff Kirsher 	entry = rp->cur_tx % TX_RING_SIZE;
1794f2148a47SJeff Kirsher 
1795f2148a47SJeff Kirsher 	if (skb_padto(skb, ETH_ZLEN))
1796f2148a47SJeff Kirsher 		return NETDEV_TX_OK;
1797f2148a47SJeff Kirsher 
1798f2148a47SJeff Kirsher 	rp->tx_skbuff[entry] = skb;
1799f2148a47SJeff Kirsher 
1800f2148a47SJeff Kirsher 	if ((rp->quirks & rqRhineI) &&
1801f2148a47SJeff Kirsher 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1802f2148a47SJeff Kirsher 		/* Must use alignment buffer. */
1803f2148a47SJeff Kirsher 		if (skb->len > PKT_BUF_SZ) {
1804f2148a47SJeff Kirsher 			/* packet too long, drop it */
18054b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
1806f2148a47SJeff Kirsher 			rp->tx_skbuff[entry] = NULL;
1807f2148a47SJeff Kirsher 			dev->stats.tx_dropped++;
1808f2148a47SJeff Kirsher 			return NETDEV_TX_OK;
1809f2148a47SJeff Kirsher 		}
1810f2148a47SJeff Kirsher 
1811f2148a47SJeff Kirsher 		/* Padding is not copied and so must be redone. */
1812f2148a47SJeff Kirsher 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1813f2148a47SJeff Kirsher 		if (skb->len < ETH_ZLEN)
1814f2148a47SJeff Kirsher 			memset(rp->tx_buf[entry] + skb->len, 0,
1815f2148a47SJeff Kirsher 			       ETH_ZLEN - skb->len);
1816f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] = 0;
1817f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1818f2148a47SJeff Kirsher 						      (rp->tx_buf[entry] -
1819f2148a47SJeff Kirsher 						       rp->tx_bufs));
1820f2148a47SJeff Kirsher 	} else {
1821f2148a47SJeff Kirsher 		rp->tx_skbuff_dma[entry] =
1822f7630d18SAlexey Charkov 			dma_map_single(hwdev, skb->data, skb->len,
18234087c4dcSAlexey Charkov 				       DMA_TO_DEVICE);
1824f7630d18SAlexey Charkov 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
18254b3afc6eSEric W. Biederman 			dev_kfree_skb_any(skb);
18269b4fe5fbSNeil Horman 			rp->tx_skbuff_dma[entry] = 0;
18279b4fe5fbSNeil Horman 			dev->stats.tx_dropped++;
18289b4fe5fbSNeil Horman 			return NETDEV_TX_OK;
18299b4fe5fbSNeil Horman 		}
1830f2148a47SJeff Kirsher 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1831f2148a47SJeff Kirsher 	}
1832f2148a47SJeff Kirsher 
1833f2148a47SJeff Kirsher 	rp->tx_ring[entry].desc_length =
1834f2148a47SJeff Kirsher 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1835f2148a47SJeff Kirsher 
1836df8a39deSJiri Pirko 	if (unlikely(skb_vlan_tag_present(skb))) {
1837df8a39deSJiri Pirko 		u16 vid_pcp = skb_vlan_tag_get(skb);
1838207070f5SRoger Luethi 
1839207070f5SRoger Luethi 		/* drop CFI/DEI bit, register needs VID and PCP */
1840207070f5SRoger Luethi 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1841207070f5SRoger Luethi 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1842207070f5SRoger Luethi 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1843f2148a47SJeff Kirsher 		/* request tagging */
1844f2148a47SJeff Kirsher 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1845f2148a47SJeff Kirsher 	}
1846f2148a47SJeff Kirsher 	else
1847f2148a47SJeff Kirsher 		rp->tx_ring[entry].tx_status = 0;
1848f2148a47SJeff Kirsher 
184992bf2008STino Reichardt 	netdev_sent_queue(dev, skb->len);
1850f2148a47SJeff Kirsher 	/* lock eth irq */
1851e1efa872Sfrançois romieu 	dma_wmb();
1852f2148a47SJeff Kirsher 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1853f2148a47SJeff Kirsher 	wmb();
1854f2148a47SJeff Kirsher 
1855f2148a47SJeff Kirsher 	rp->cur_tx++;
18563a5a883aSfrançois romieu 	/*
18573a5a883aSfrançois romieu 	 * Nobody wants cur_tx write to rot for ages after the NIC will have
18583a5a883aSfrançois romieu 	 * seen the transmit request, especially as the transmit completion
18593a5a883aSfrançois romieu 	 * handler could miss it.
18603a5a883aSfrançois romieu 	 */
18613a5a883aSfrançois romieu 	smp_wmb();
1862f2148a47SJeff Kirsher 
1863f2148a47SJeff Kirsher 	/* Non-x86 Todo: explicitly flush cache lines here. */
1864f2148a47SJeff Kirsher 
1865df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1866f2148a47SJeff Kirsher 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1867f2148a47SJeff Kirsher 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1868f2148a47SJeff Kirsher 
1869f2148a47SJeff Kirsher 	/* Wake the potentially-idle transmit channel */
1870f2148a47SJeff Kirsher 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1871f2148a47SJeff Kirsher 	       ioaddr + ChipCmd1);
1872f2148a47SJeff Kirsher 	IOSYNC;
1873f2148a47SJeff Kirsher 
18743a5a883aSfrançois romieu 	/* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
18753a5a883aSfrançois romieu 	if (rhine_tx_queue_full(rp)) {
1876f2148a47SJeff Kirsher 		netif_stop_queue(dev);
18773a5a883aSfrançois romieu 		smp_rmb();
18783a5a883aSfrançois romieu 		/* Rejuvenate. */
18793a5a883aSfrançois romieu 		if (!rhine_tx_queue_full(rp))
18803a5a883aSfrançois romieu 			netif_wake_queue(dev);
18813a5a883aSfrançois romieu 	}
1882f2148a47SJeff Kirsher 
1883fc3e0f8aSFrancois Romieu 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1884f2148a47SJeff Kirsher 		  rp->cur_tx - 1, entry);
1885fc3e0f8aSFrancois Romieu 
1886f2148a47SJeff Kirsher 	return NETDEV_TX_OK;
1887f2148a47SJeff Kirsher }
1888f2148a47SJeff Kirsher 
18897ab87ff4SFrancois Romieu static void rhine_irq_disable(struct rhine_private *rp)
18907ab87ff4SFrancois Romieu {
18917ab87ff4SFrancois Romieu 	iowrite16(0x0000, rp->base + IntrEnable);
18927ab87ff4SFrancois Romieu }
18937ab87ff4SFrancois Romieu 
1894f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
1895f2148a47SJeff Kirsher    after the Tx thread. */
1896f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1897f2148a47SJeff Kirsher {
1898f2148a47SJeff Kirsher 	struct net_device *dev = dev_instance;
1899f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
19007ab87ff4SFrancois Romieu 	u32 status;
1901f2148a47SJeff Kirsher 	int handled = 0;
1902f2148a47SJeff Kirsher 
19037ab87ff4SFrancois Romieu 	status = rhine_get_events(rp);
1904f2148a47SJeff Kirsher 
1905fc3e0f8aSFrancois Romieu 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1906f2148a47SJeff Kirsher 
19077ab87ff4SFrancois Romieu 	if (status & RHINE_EVENT) {
19087ab87ff4SFrancois Romieu 		handled = 1;
1909f2148a47SJeff Kirsher 
19107ab87ff4SFrancois Romieu 		rhine_irq_disable(rp);
1911f2148a47SJeff Kirsher 		napi_schedule(&rp->napi);
1912f2148a47SJeff Kirsher 	}
1913f2148a47SJeff Kirsher 
19147ab87ff4SFrancois Romieu 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1915fc3e0f8aSFrancois Romieu 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
19167ab87ff4SFrancois Romieu 			  status);
1917f2148a47SJeff Kirsher 	}
1918f2148a47SJeff Kirsher 
1919f2148a47SJeff Kirsher 	return IRQ_RETVAL(handled);
1920f2148a47SJeff Kirsher }
1921f2148a47SJeff Kirsher 
1922f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated
1923f2148a47SJeff Kirsher    for clarity. */
1924f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev)
1925f2148a47SJeff Kirsher {
1926f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
1927f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
192892bf2008STino Reichardt 	unsigned int pkts_compl = 0, bytes_compl = 0;
19293a5a883aSfrançois romieu 	unsigned int dirty_tx = rp->dirty_tx;
19303a5a883aSfrançois romieu 	unsigned int cur_tx;
193192bf2008STino Reichardt 	struct sk_buff *skb;
1932f2148a47SJeff Kirsher 
19333a5a883aSfrançois romieu 	/*
19343a5a883aSfrançois romieu 	 * The race with rhine_start_tx does not matter here as long as the
19353a5a883aSfrançois romieu 	 * driver enforces a value of cur_tx that was relevant when the
19363a5a883aSfrançois romieu 	 * packet was scheduled to the network chipset.
19373a5a883aSfrançois romieu 	 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
19383a5a883aSfrançois romieu 	 */
19393a5a883aSfrançois romieu 	smp_rmb();
19403a5a883aSfrançois romieu 	cur_tx = rp->cur_tx;
1941f2148a47SJeff Kirsher 	/* find and cleanup dirty tx descriptors */
19423a5a883aSfrançois romieu 	while (dirty_tx != cur_tx) {
19433a5a883aSfrançois romieu 		unsigned int entry = dirty_tx % TX_RING_SIZE;
19443a5a883aSfrançois romieu 		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
19453a5a883aSfrançois romieu 
1946fc3e0f8aSFrancois Romieu 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1947f2148a47SJeff Kirsher 			  entry, txstatus);
1948f2148a47SJeff Kirsher 		if (txstatus & DescOwn)
1949f2148a47SJeff Kirsher 			break;
195092bf2008STino Reichardt 		skb = rp->tx_skbuff[entry];
1951f2148a47SJeff Kirsher 		if (txstatus & 0x8000) {
1952fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev,
1953fc3e0f8aSFrancois Romieu 				  "Transmit error, Tx status %08x\n", txstatus);
1954f2148a47SJeff Kirsher 			dev->stats.tx_errors++;
1955f2148a47SJeff Kirsher 			if (txstatus & 0x0400)
1956f2148a47SJeff Kirsher 				dev->stats.tx_carrier_errors++;
1957f2148a47SJeff Kirsher 			if (txstatus & 0x0200)
1958f2148a47SJeff Kirsher 				dev->stats.tx_window_errors++;
1959f2148a47SJeff Kirsher 			if (txstatus & 0x0100)
1960f2148a47SJeff Kirsher 				dev->stats.tx_aborted_errors++;
1961f2148a47SJeff Kirsher 			if (txstatus & 0x0080)
1962f2148a47SJeff Kirsher 				dev->stats.tx_heartbeat_errors++;
1963f2148a47SJeff Kirsher 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1964f2148a47SJeff Kirsher 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1965f2148a47SJeff Kirsher 				dev->stats.tx_fifo_errors++;
1966f2148a47SJeff Kirsher 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1967f2148a47SJeff Kirsher 				break; /* Keep the skb - we try again */
1968f2148a47SJeff Kirsher 			}
1969f2148a47SJeff Kirsher 			/* Transmitter restarted in 'abnormal' handler. */
1970f2148a47SJeff Kirsher 		} else {
1971f2148a47SJeff Kirsher 			if (rp->quirks & rqRhineI)
1972f2148a47SJeff Kirsher 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1973f2148a47SJeff Kirsher 			else
1974f2148a47SJeff Kirsher 				dev->stats.collisions += txstatus & 0x0F;
1975fc3e0f8aSFrancois Romieu 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1976fc3e0f8aSFrancois Romieu 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1977f7b5d1b9SJamie Gloudon 
1978f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->tx_stats.syncp);
197992bf2008STino Reichardt 			rp->tx_stats.bytes += skb->len;
1980f7b5d1b9SJamie Gloudon 			rp->tx_stats.packets++;
1981f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->tx_stats.syncp);
1982f2148a47SJeff Kirsher 		}
1983f2148a47SJeff Kirsher 		/* Free the original skb. */
1984f2148a47SJeff Kirsher 		if (rp->tx_skbuff_dma[entry]) {
1985f7630d18SAlexey Charkov 			dma_unmap_single(hwdev,
1986f2148a47SJeff Kirsher 					 rp->tx_skbuff_dma[entry],
198792bf2008STino Reichardt 					 skb->len,
19884087c4dcSAlexey Charkov 					 DMA_TO_DEVICE);
1989f2148a47SJeff Kirsher 		}
199092bf2008STino Reichardt 		bytes_compl += skb->len;
199192bf2008STino Reichardt 		pkts_compl++;
199292bf2008STino Reichardt 		dev_consume_skb_any(skb);
1993f2148a47SJeff Kirsher 		rp->tx_skbuff[entry] = NULL;
19943a5a883aSfrançois romieu 		dirty_tx++;
1995f2148a47SJeff Kirsher 	}
199692bf2008STino Reichardt 
19973a5a883aSfrançois romieu 	rp->dirty_tx = dirty_tx;
19983a5a883aSfrançois romieu 	/* Pity we can't rely on the nearby BQL completion implicit barrier. */
19993a5a883aSfrançois romieu 	smp_wmb();
20003a5a883aSfrançois romieu 
200192bf2008STino Reichardt 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
20023a5a883aSfrançois romieu 
20033a5a883aSfrançois romieu 	/* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
20043a5a883aSfrançois romieu 	if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
2005f2148a47SJeff Kirsher 		netif_wake_queue(dev);
20063a5a883aSfrançois romieu 		smp_rmb();
20073a5a883aSfrançois romieu 		/* Rejuvenate. */
20083a5a883aSfrançois romieu 		if (rhine_tx_queue_full(rp))
20093a5a883aSfrançois romieu 			netif_stop_queue(dev);
20103a5a883aSfrançois romieu 	}
2011f2148a47SJeff Kirsher }
2012f2148a47SJeff Kirsher 
2013f2148a47SJeff Kirsher /**
2014f2148a47SJeff Kirsher  * rhine_get_vlan_tci - extract TCI from Rx data buffer
2015f2148a47SJeff Kirsher  * @skb: pointer to sk_buff
2016f2148a47SJeff Kirsher  * @data_size: used data area of the buffer including CRC
2017f2148a47SJeff Kirsher  *
2018f2148a47SJeff Kirsher  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2019f2148a47SJeff Kirsher  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2020f2148a47SJeff Kirsher  * aligned following the CRC.
2021f2148a47SJeff Kirsher  */
2022f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2023f2148a47SJeff Kirsher {
2024f2148a47SJeff Kirsher 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2025f2148a47SJeff Kirsher 	return be16_to_cpup((__be16 *)trailer);
2026f2148a47SJeff Kirsher }
2027f2148a47SJeff Kirsher 
2028810f19bcSfrançois romieu static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2029810f19bcSfrançois romieu 				     int data_size)
2030810f19bcSfrançois romieu {
2031810f19bcSfrançois romieu 	dma_rmb();
2032810f19bcSfrançois romieu 	if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2033810f19bcSfrançois romieu 		u16 vlan_tci;
2034810f19bcSfrançois romieu 
2035810f19bcSfrançois romieu 		vlan_tci = rhine_get_vlan_tci(skb, data_size);
2036810f19bcSfrançois romieu 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2037810f19bcSfrançois romieu 	}
2038810f19bcSfrançois romieu }
2039810f19bcSfrançois romieu 
2040f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */
2041f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit)
2042f2148a47SJeff Kirsher {
2043f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2044f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
2045f2148a47SJeff Kirsher 	int entry = rp->cur_rx % RX_RING_SIZE;
204662ca1ba0Sfrançois romieu 	int count;
2047f2148a47SJeff Kirsher 
2048fc3e0f8aSFrancois Romieu 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
204962ca1ba0Sfrançois romieu 		  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2050f2148a47SJeff Kirsher 
2051f2148a47SJeff Kirsher 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
2052f2148a47SJeff Kirsher 	for (count = 0; count < limit; ++count) {
205362ca1ba0Sfrançois romieu 		struct rx_desc *desc = rp->rx_ring + entry;
2054f2148a47SJeff Kirsher 		u32 desc_status = le32_to_cpu(desc->rx_status);
2055f2148a47SJeff Kirsher 		int data_size = desc_status >> 16;
2056f2148a47SJeff Kirsher 
2057f2148a47SJeff Kirsher 		if (desc_status & DescOwn)
2058f2148a47SJeff Kirsher 			break;
2059f2148a47SJeff Kirsher 
2060fc3e0f8aSFrancois Romieu 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2061fc3e0f8aSFrancois Romieu 			  desc_status);
2062f2148a47SJeff Kirsher 
2063f2148a47SJeff Kirsher 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2064f2148a47SJeff Kirsher 			if ((desc_status & RxWholePkt) != RxWholePkt) {
2065f2148a47SJeff Kirsher 				netdev_warn(dev,
2066f2148a47SJeff Kirsher 	"Oversized Ethernet frame spanned multiple buffers, "
2067f2148a47SJeff Kirsher 	"entry %#x length %d status %08x!\n",
2068f2148a47SJeff Kirsher 					    entry, data_size,
2069f2148a47SJeff Kirsher 					    desc_status);
2070f2148a47SJeff Kirsher 				dev->stats.rx_length_errors++;
2071f2148a47SJeff Kirsher 			} else if (desc_status & RxErr) {
2072f2148a47SJeff Kirsher 				/* There was a error. */
2073fc3e0f8aSFrancois Romieu 				netif_dbg(rp, rx_err, dev,
2074fc3e0f8aSFrancois Romieu 					  "%s() Rx error %08x\n", __func__,
2075fc3e0f8aSFrancois Romieu 					  desc_status);
2076f2148a47SJeff Kirsher 				dev->stats.rx_errors++;
2077f2148a47SJeff Kirsher 				if (desc_status & 0x0030)
2078f2148a47SJeff Kirsher 					dev->stats.rx_length_errors++;
2079f2148a47SJeff Kirsher 				if (desc_status & 0x0048)
2080f2148a47SJeff Kirsher 					dev->stats.rx_fifo_errors++;
2081f2148a47SJeff Kirsher 				if (desc_status & 0x0004)
2082f2148a47SJeff Kirsher 					dev->stats.rx_frame_errors++;
2083f2148a47SJeff Kirsher 				if (desc_status & 0x0002) {
2084f2148a47SJeff Kirsher 					/* this can also be updated outside the interrupt handler */
2085f2148a47SJeff Kirsher 					spin_lock(&rp->lock);
2086f2148a47SJeff Kirsher 					dev->stats.rx_crc_errors++;
2087f2148a47SJeff Kirsher 					spin_unlock(&rp->lock);
2088f2148a47SJeff Kirsher 				}
2089f2148a47SJeff Kirsher 			}
2090f2148a47SJeff Kirsher 		} else {
2091f2148a47SJeff Kirsher 			/* Length should omit the CRC */
2092f2148a47SJeff Kirsher 			int pkt_len = data_size - 4;
20938709bb2cSfrançois romieu 			struct sk_buff *skb;
2094f2148a47SJeff Kirsher 
2095f2148a47SJeff Kirsher 			/* Check if the packet is long enough to accept without
2096f2148a47SJeff Kirsher 			   copying to a minimally-sized skbuff. */
20978709bb2cSfrançois romieu 			if (pkt_len < rx_copybreak) {
2098f2148a47SJeff Kirsher 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
20998709bb2cSfrançois romieu 				if (unlikely(!skb))
21008709bb2cSfrançois romieu 					goto drop;
21018709bb2cSfrançois romieu 
2102f7630d18SAlexey Charkov 				dma_sync_single_for_cpu(hwdev,
2103f2148a47SJeff Kirsher 							rp->rx_skbuff_dma[entry],
2104f2148a47SJeff Kirsher 							rp->rx_buf_sz,
21054087c4dcSAlexey Charkov 							DMA_FROM_DEVICE);
2106f2148a47SJeff Kirsher 
2107f2148a47SJeff Kirsher 				skb_copy_to_linear_data(skb,
2108f2148a47SJeff Kirsher 						 rp->rx_skbuff[entry]->data,
2109f2148a47SJeff Kirsher 						 pkt_len);
21108709bb2cSfrançois romieu 
2111f7630d18SAlexey Charkov 				dma_sync_single_for_device(hwdev,
2112f2148a47SJeff Kirsher 							   rp->rx_skbuff_dma[entry],
2113f2148a47SJeff Kirsher 							   rp->rx_buf_sz,
21144087c4dcSAlexey Charkov 							   DMA_FROM_DEVICE);
2115f2148a47SJeff Kirsher 			} else {
21168709bb2cSfrançois romieu 				struct rhine_skb_dma sd;
21178709bb2cSfrançois romieu 
21188709bb2cSfrançois romieu 				if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
21198709bb2cSfrançois romieu 					goto drop;
21208709bb2cSfrançois romieu 
2121f2148a47SJeff Kirsher 				skb = rp->rx_skbuff[entry];
21228709bb2cSfrançois romieu 
2123f7630d18SAlexey Charkov 				dma_unmap_single(hwdev,
2124f2148a47SJeff Kirsher 						 rp->rx_skbuff_dma[entry],
2125f2148a47SJeff Kirsher 						 rp->rx_buf_sz,
21264087c4dcSAlexey Charkov 						 DMA_FROM_DEVICE);
21278709bb2cSfrançois romieu 				rhine_skb_dma_nic_store(rp, &sd, entry);
2128f2148a47SJeff Kirsher 			}
2129f2148a47SJeff Kirsher 
21308709bb2cSfrançois romieu 			skb_put(skb, pkt_len);
2131f2148a47SJeff Kirsher 
2132810f19bcSfrançois romieu 			rhine_rx_vlan_tag(skb, desc, data_size);
2133810f19bcSfrançois romieu 
21345f715c09SAndrej Ota 			skb->protocol = eth_type_trans(skb, dev);
21355f715c09SAndrej Ota 
2136f2148a47SJeff Kirsher 			netif_receive_skb(skb);
2137f7b5d1b9SJamie Gloudon 
2138f7b5d1b9SJamie Gloudon 			u64_stats_update_begin(&rp->rx_stats.syncp);
2139f7b5d1b9SJamie Gloudon 			rp->rx_stats.bytes += pkt_len;
2140f7b5d1b9SJamie Gloudon 			rp->rx_stats.packets++;
2141f7b5d1b9SJamie Gloudon 			u64_stats_update_end(&rp->rx_stats.syncp);
2142f2148a47SJeff Kirsher 		}
21438709bb2cSfrançois romieu give_descriptor_to_nic:
21448709bb2cSfrançois romieu 		desc->rx_status = cpu_to_le32(DescOwn);
2145f2148a47SJeff Kirsher 		entry = (++rp->cur_rx) % RX_RING_SIZE;
2146f2148a47SJeff Kirsher 	}
2147f2148a47SJeff Kirsher 
2148f2148a47SJeff Kirsher 	return count;
21498709bb2cSfrançois romieu 
21508709bb2cSfrançois romieu drop:
21518709bb2cSfrançois romieu 	dev->stats.rx_dropped++;
21528709bb2cSfrançois romieu 	goto give_descriptor_to_nic;
2153f2148a47SJeff Kirsher }
2154f2148a47SJeff Kirsher 
2155f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) {
2156f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2157f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2158f2148a47SJeff Kirsher 	int entry = rp->dirty_tx % TX_RING_SIZE;
2159f2148a47SJeff Kirsher 	u32 intr_status;
2160f2148a47SJeff Kirsher 
2161f2148a47SJeff Kirsher 	/*
2162f2148a47SJeff Kirsher 	 * If new errors occurred, we need to sort them out before doing Tx.
2163f2148a47SJeff Kirsher 	 * In that case the ISR will be back here RSN anyway.
2164f2148a47SJeff Kirsher 	 */
2165a20a28bcSFrancois Romieu 	intr_status = rhine_get_events(rp);
2166f2148a47SJeff Kirsher 
2167f2148a47SJeff Kirsher 	if ((intr_status & IntrTxErrSummary) == 0) {
2168f2148a47SJeff Kirsher 
2169f2148a47SJeff Kirsher 		/* We know better than the chip where it should continue. */
2170f2148a47SJeff Kirsher 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2171f2148a47SJeff Kirsher 		       ioaddr + TxRingPtr);
2172f2148a47SJeff Kirsher 
2173f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2174f2148a47SJeff Kirsher 		       ioaddr + ChipCmd);
2175f2148a47SJeff Kirsher 
2176f2148a47SJeff Kirsher 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2177f2148a47SJeff Kirsher 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2178f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2179f2148a47SJeff Kirsher 
2180f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2181f2148a47SJeff Kirsher 		       ioaddr + ChipCmd1);
2182f2148a47SJeff Kirsher 		IOSYNC;
2183f2148a47SJeff Kirsher 	}
2184f2148a47SJeff Kirsher 	else {
2185f2148a47SJeff Kirsher 		/* This should never happen */
2186fc3e0f8aSFrancois Romieu 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2187fc3e0f8aSFrancois Romieu 			   intr_status);
2188f2148a47SJeff Kirsher 	}
2189f2148a47SJeff Kirsher 
2190f2148a47SJeff Kirsher }
2191f2148a47SJeff Kirsher 
21927ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work)
2193f2148a47SJeff Kirsher {
21947ab87ff4SFrancois Romieu 	struct rhine_private *rp =
21957ab87ff4SFrancois Romieu 		container_of(work, struct rhine_private, slow_event_task);
21967ab87ff4SFrancois Romieu 	struct net_device *dev = rp->dev;
21977ab87ff4SFrancois Romieu 	u32 intr_status;
2198f2148a47SJeff Kirsher 
21997ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
22007ab87ff4SFrancois Romieu 
22017ab87ff4SFrancois Romieu 	if (!rp->task_enable)
22027ab87ff4SFrancois Romieu 		goto out_unlock;
22037ab87ff4SFrancois Romieu 
22047ab87ff4SFrancois Romieu 	intr_status = rhine_get_events(rp);
22057ab87ff4SFrancois Romieu 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2206f2148a47SJeff Kirsher 
2207f2148a47SJeff Kirsher 	if (intr_status & IntrLinkChange)
2208f2148a47SJeff Kirsher 		rhine_check_media(dev, 0);
2209f2148a47SJeff Kirsher 
2210fc3e0f8aSFrancois Romieu 	if (intr_status & IntrPCIErr)
2211fc3e0f8aSFrancois Romieu 		netif_warn(rp, hw, dev, "PCI error\n");
2212fc3e0f8aSFrancois Romieu 
2213559bcac3SDavid S. Miller 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2214f2148a47SJeff Kirsher 
22157ab87ff4SFrancois Romieu out_unlock:
22167ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2217f2148a47SJeff Kirsher }
2218f2148a47SJeff Kirsher 
2219bc1f4470Sstephen hemminger static void
2220f7b5d1b9SJamie Gloudon rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2221f2148a47SJeff Kirsher {
2222f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2223f7b5d1b9SJamie Gloudon 	unsigned int start;
2224f2148a47SJeff Kirsher 
22257ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
22267ab87ff4SFrancois Romieu 	rhine_update_rx_crc_and_missed_errord(rp);
22277ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2228f2148a47SJeff Kirsher 
2229f7b5d1b9SJamie Gloudon 	netdev_stats_to_stats64(stats, &dev->stats);
2230f7b5d1b9SJamie Gloudon 
2231f7b5d1b9SJamie Gloudon 	do {
223257a7744eSEric W. Biederman 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2233f7b5d1b9SJamie Gloudon 		stats->rx_packets = rp->rx_stats.packets;
2234f7b5d1b9SJamie Gloudon 		stats->rx_bytes = rp->rx_stats.bytes;
223557a7744eSEric W. Biederman 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2236f7b5d1b9SJamie Gloudon 
2237f7b5d1b9SJamie Gloudon 	do {
223857a7744eSEric W. Biederman 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2239f7b5d1b9SJamie Gloudon 		stats->tx_packets = rp->tx_stats.packets;
2240f7b5d1b9SJamie Gloudon 		stats->tx_bytes = rp->tx_stats.bytes;
224157a7744eSEric W. Biederman 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2242f2148a47SJeff Kirsher }
2243f2148a47SJeff Kirsher 
2244f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev)
2245f2148a47SJeff Kirsher {
2246f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2247f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2248f2148a47SJeff Kirsher 	u32 mc_filter[2];	/* Multicast hash filter */
2249f2148a47SJeff Kirsher 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2250f2148a47SJeff Kirsher 	struct netdev_hw_addr *ha;
2251f2148a47SJeff Kirsher 
2252f2148a47SJeff Kirsher 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2253f2148a47SJeff Kirsher 		rx_mode = 0x1C;
2254f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2255f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2256f2148a47SJeff Kirsher 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2257f2148a47SJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
2258f2148a47SJeff Kirsher 		/* Too many to match, or accept all multicasts. */
2259f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2260f2148a47SJeff Kirsher 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2261ca8b6e04SAlexey Charkov 	} else if (rp->quirks & rqMgmt) {
2262f2148a47SJeff Kirsher 		int i = 0;
2263f2148a47SJeff Kirsher 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2264f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2265f2148a47SJeff Kirsher 			if (i == MCAM_SIZE)
2266f2148a47SJeff Kirsher 				break;
2267f2148a47SJeff Kirsher 			rhine_set_cam(ioaddr, i, ha->addr);
2268f2148a47SJeff Kirsher 			mCAMmask |= 1 << i;
2269f2148a47SJeff Kirsher 			i++;
2270f2148a47SJeff Kirsher 		}
2271f2148a47SJeff Kirsher 		rhine_set_cam_mask(ioaddr, mCAMmask);
2272f2148a47SJeff Kirsher 	} else {
2273f2148a47SJeff Kirsher 		memset(mc_filter, 0, sizeof(mc_filter));
2274f2148a47SJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
2275f2148a47SJeff Kirsher 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2276f2148a47SJeff Kirsher 
2277f2148a47SJeff Kirsher 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2278f2148a47SJeff Kirsher 		}
2279f2148a47SJeff Kirsher 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2280f2148a47SJeff Kirsher 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2281f2148a47SJeff Kirsher 	}
2282f2148a47SJeff Kirsher 	/* enable/disable VLAN receive filtering */
2283ca8b6e04SAlexey Charkov 	if (rp->quirks & rqMgmt) {
2284f2148a47SJeff Kirsher 		if (dev->flags & IFF_PROMISC)
2285f2148a47SJeff Kirsher 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2286f2148a47SJeff Kirsher 		else
2287f2148a47SJeff Kirsher 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2288f2148a47SJeff Kirsher 	}
2289f2148a47SJeff Kirsher 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2290f2148a47SJeff Kirsher }
2291f2148a47SJeff Kirsher 
2292f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2293f2148a47SJeff Kirsher {
2294f7630d18SAlexey Charkov 	struct device *hwdev = dev->dev.parent;
2295f2148a47SJeff Kirsher 
229623020ab3SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
229723020ab3SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2298f7630d18SAlexey Charkov 	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2299f2148a47SJeff Kirsher }
2300f2148a47SJeff Kirsher 
2301f918b986SPhilippe Reynes static int netdev_get_link_ksettings(struct net_device *dev,
2302f918b986SPhilippe Reynes 				     struct ethtool_link_ksettings *cmd)
2303f2148a47SJeff Kirsher {
2304f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2305f2148a47SJeff Kirsher 
23067ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
230782c01a84Syuval.shaia@oracle.com 	mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
23087ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2309f2148a47SJeff Kirsher 
231082c01a84Syuval.shaia@oracle.com 	return 0;
2311f2148a47SJeff Kirsher }
2312f2148a47SJeff Kirsher 
2313f918b986SPhilippe Reynes static int netdev_set_link_ksettings(struct net_device *dev,
2314f918b986SPhilippe Reynes 				     const struct ethtool_link_ksettings *cmd)
2315f2148a47SJeff Kirsher {
2316f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2317f2148a47SJeff Kirsher 	int rc;
2318f2148a47SJeff Kirsher 
23197ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2320f918b986SPhilippe Reynes 	rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2321f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
23227ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2323f2148a47SJeff Kirsher 
2324f2148a47SJeff Kirsher 	return rc;
2325f2148a47SJeff Kirsher }
2326f2148a47SJeff Kirsher 
2327f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev)
2328f2148a47SJeff Kirsher {
2329f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2330f2148a47SJeff Kirsher 
2331f2148a47SJeff Kirsher 	return mii_nway_restart(&rp->mii_if);
2332f2148a47SJeff Kirsher }
2333f2148a47SJeff Kirsher 
2334f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev)
2335f2148a47SJeff Kirsher {
2336f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2337f2148a47SJeff Kirsher 
2338f2148a47SJeff Kirsher 	return mii_link_ok(&rp->mii_if);
2339f2148a47SJeff Kirsher }
2340f2148a47SJeff Kirsher 
2341f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev)
2342f2148a47SJeff Kirsher {
2343fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2344fc3e0f8aSFrancois Romieu 
2345fc3e0f8aSFrancois Romieu 	return rp->msg_enable;
2346f2148a47SJeff Kirsher }
2347f2148a47SJeff Kirsher 
2348f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value)
2349f2148a47SJeff Kirsher {
2350fc3e0f8aSFrancois Romieu 	struct rhine_private *rp = netdev_priv(dev);
2351fc3e0f8aSFrancois Romieu 
2352fc3e0f8aSFrancois Romieu 	rp->msg_enable = value;
2353f2148a47SJeff Kirsher }
2354f2148a47SJeff Kirsher 
2355f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2356f2148a47SJeff Kirsher {
2357f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2358f2148a47SJeff Kirsher 
2359f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2360f2148a47SJeff Kirsher 		return;
2361f2148a47SJeff Kirsher 
2362f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2363f2148a47SJeff Kirsher 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2364f2148a47SJeff Kirsher 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2365f2148a47SJeff Kirsher 	wol->wolopts = rp->wolopts;
2366f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2367f2148a47SJeff Kirsher }
2368f2148a47SJeff Kirsher 
2369f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2370f2148a47SJeff Kirsher {
2371f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2372f2148a47SJeff Kirsher 	u32 support = WAKE_PHY | WAKE_MAGIC |
2373f2148a47SJeff Kirsher 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2374f2148a47SJeff Kirsher 
2375f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2376f2148a47SJeff Kirsher 		return -EINVAL;
2377f2148a47SJeff Kirsher 
2378f2148a47SJeff Kirsher 	if (wol->wolopts & ~support)
2379f2148a47SJeff Kirsher 		return -EINVAL;
2380f2148a47SJeff Kirsher 
2381f2148a47SJeff Kirsher 	spin_lock_irq(&rp->lock);
2382f2148a47SJeff Kirsher 	rp->wolopts = wol->wolopts;
2383f2148a47SJeff Kirsher 	spin_unlock_irq(&rp->lock);
2384f2148a47SJeff Kirsher 
2385f2148a47SJeff Kirsher 	return 0;
2386f2148a47SJeff Kirsher }
2387f2148a47SJeff Kirsher 
2388f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = {
2389f2148a47SJeff Kirsher 	.get_drvinfo		= netdev_get_drvinfo,
2390f2148a47SJeff Kirsher 	.nway_reset		= netdev_nway_reset,
2391f2148a47SJeff Kirsher 	.get_link		= netdev_get_link,
2392f2148a47SJeff Kirsher 	.get_msglevel		= netdev_get_msglevel,
2393f2148a47SJeff Kirsher 	.set_msglevel		= netdev_set_msglevel,
2394f2148a47SJeff Kirsher 	.get_wol		= rhine_get_wol,
2395f2148a47SJeff Kirsher 	.set_wol		= rhine_set_wol,
2396f918b986SPhilippe Reynes 	.get_link_ksettings	= netdev_get_link_ksettings,
2397f918b986SPhilippe Reynes 	.set_link_ksettings	= netdev_set_link_ksettings,
2398f2148a47SJeff Kirsher };
2399f2148a47SJeff Kirsher 
2400f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2401f2148a47SJeff Kirsher {
2402f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2403f2148a47SJeff Kirsher 	int rc;
2404f2148a47SJeff Kirsher 
2405f2148a47SJeff Kirsher 	if (!netif_running(dev))
2406f2148a47SJeff Kirsher 		return -EINVAL;
2407f2148a47SJeff Kirsher 
24087ab87ff4SFrancois Romieu 	mutex_lock(&rp->task_lock);
2409f2148a47SJeff Kirsher 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2410f2148a47SJeff Kirsher 	rhine_set_carrier(&rp->mii_if);
24117ab87ff4SFrancois Romieu 	mutex_unlock(&rp->task_lock);
2412f2148a47SJeff Kirsher 
2413f2148a47SJeff Kirsher 	return rc;
2414f2148a47SJeff Kirsher }
2415f2148a47SJeff Kirsher 
2416f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev)
2417f2148a47SJeff Kirsher {
2418f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2419f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2420f2148a47SJeff Kirsher 
24217ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
2422f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2423f2148a47SJeff Kirsher 	netif_stop_queue(dev);
2424f2148a47SJeff Kirsher 
2425fc3e0f8aSFrancois Romieu 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2426f2148a47SJeff Kirsher 		  ioread16(ioaddr + ChipCmd));
2427f2148a47SJeff Kirsher 
2428f2148a47SJeff Kirsher 	/* Switch to loopback mode to avoid hardware races. */
2429f2148a47SJeff Kirsher 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2430f2148a47SJeff Kirsher 
24317ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2432f2148a47SJeff Kirsher 
2433f2148a47SJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
2434f2148a47SJeff Kirsher 	iowrite16(CmdStop, ioaddr + ChipCmd);
2435f2148a47SJeff Kirsher 
2436f7630d18SAlexey Charkov 	free_irq(rp->irq, dev);
2437f2148a47SJeff Kirsher 	free_rbufs(dev);
2438f2148a47SJeff Kirsher 	free_tbufs(dev);
2439f2148a47SJeff Kirsher 	free_ring(dev);
2440f2148a47SJeff Kirsher 
2441f2148a47SJeff Kirsher 	return 0;
2442f2148a47SJeff Kirsher }
2443f2148a47SJeff Kirsher 
2444f2148a47SJeff Kirsher 
24452d283862SAlexey Charkov static void rhine_remove_one_pci(struct pci_dev *pdev)
2446f2148a47SJeff Kirsher {
2447f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2448f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2449f2148a47SJeff Kirsher 
2450f2148a47SJeff Kirsher 	unregister_netdev(dev);
2451f2148a47SJeff Kirsher 
2452f2148a47SJeff Kirsher 	pci_iounmap(pdev, rp->base);
2453f2148a47SJeff Kirsher 	pci_release_regions(pdev);
2454f2148a47SJeff Kirsher 
2455f2148a47SJeff Kirsher 	free_netdev(dev);
2456f2148a47SJeff Kirsher 	pci_disable_device(pdev);
2457f2148a47SJeff Kirsher }
2458f2148a47SJeff Kirsher 
24592d283862SAlexey Charkov static int rhine_remove_one_platform(struct platform_device *pdev)
24602d283862SAlexey Charkov {
24612d283862SAlexey Charkov 	struct net_device *dev = platform_get_drvdata(pdev);
24622d283862SAlexey Charkov 	struct rhine_private *rp = netdev_priv(dev);
24632d283862SAlexey Charkov 
24642d283862SAlexey Charkov 	unregister_netdev(dev);
24652d283862SAlexey Charkov 
24662d283862SAlexey Charkov 	iounmap(rp->base);
24672d283862SAlexey Charkov 
24682d283862SAlexey Charkov 	free_netdev(dev);
24692d283862SAlexey Charkov 
24702d283862SAlexey Charkov 	return 0;
24712d283862SAlexey Charkov }
24722d283862SAlexey Charkov 
24732d283862SAlexey Charkov static void rhine_shutdown_pci(struct pci_dev *pdev)
2474f2148a47SJeff Kirsher {
2475f2148a47SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
2476f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2477f2148a47SJeff Kirsher 	void __iomem *ioaddr = rp->base;
2478f2148a47SJeff Kirsher 
2479f2148a47SJeff Kirsher 	if (!(rp->quirks & rqWOL))
2480f2148a47SJeff Kirsher 		return; /* Nothing to do for non-WOL adapters */
2481f2148a47SJeff Kirsher 
2482f2148a47SJeff Kirsher 	rhine_power_init(dev);
2483f2148a47SJeff Kirsher 
2484f2148a47SJeff Kirsher 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2485f2148a47SJeff Kirsher 	if (rp->quirks & rq6patterns)
2486f2148a47SJeff Kirsher 		iowrite8(0x04, ioaddr + WOLcgClr);
2487f2148a47SJeff Kirsher 
24887ab87ff4SFrancois Romieu 	spin_lock(&rp->lock);
24897ab87ff4SFrancois Romieu 
2490f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_MAGIC) {
2491f2148a47SJeff Kirsher 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2492f2148a47SJeff Kirsher 		/*
2493f2148a47SJeff Kirsher 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2494f2148a47SJeff Kirsher 		 * not cooperate otherwise.
2495f2148a47SJeff Kirsher 		 */
2496f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2497f2148a47SJeff Kirsher 	}
2498f2148a47SJeff Kirsher 
2499f2148a47SJeff Kirsher 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2500f2148a47SJeff Kirsher 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2501f2148a47SJeff Kirsher 
2502f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_PHY)
2503f2148a47SJeff Kirsher 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2504f2148a47SJeff Kirsher 
2505f2148a47SJeff Kirsher 	if (rp->wolopts & WAKE_UCAST)
2506f2148a47SJeff Kirsher 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2507f2148a47SJeff Kirsher 
2508f2148a47SJeff Kirsher 	if (rp->wolopts) {
2509f2148a47SJeff Kirsher 		/* Enable legacy WOL (for old motherboards) */
2510f2148a47SJeff Kirsher 		iowrite8(0x01, ioaddr + PwcfgSet);
2511f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2512f2148a47SJeff Kirsher 	}
2513f2148a47SJeff Kirsher 
25147ab87ff4SFrancois Romieu 	spin_unlock(&rp->lock);
25157ab87ff4SFrancois Romieu 
2516e92b9b3bSFrancois Romieu 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2517f2148a47SJeff Kirsher 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2518f2148a47SJeff Kirsher 
2519e92b9b3bSFrancois Romieu 		pci_wake_from_d3(pdev, true);
2520e92b9b3bSFrancois Romieu 		pci_set_power_state(pdev, PCI_D3hot);
2521e92b9b3bSFrancois Romieu 	}
2522f2148a47SJeff Kirsher }
2523f2148a47SJeff Kirsher 
2524e92b9b3bSFrancois Romieu #ifdef CONFIG_PM_SLEEP
2525e92b9b3bSFrancois Romieu static int rhine_suspend(struct device *device)
2526f2148a47SJeff Kirsher {
2527f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2528f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2529f2148a47SJeff Kirsher 
2530f2148a47SJeff Kirsher 	if (!netif_running(dev))
2531f2148a47SJeff Kirsher 		return 0;
2532f2148a47SJeff Kirsher 
25337ab87ff4SFrancois Romieu 	rhine_task_disable(rp);
25347ab87ff4SFrancois Romieu 	rhine_irq_disable(rp);
2535f2148a47SJeff Kirsher 	napi_disable(&rp->napi);
2536f2148a47SJeff Kirsher 
2537f2148a47SJeff Kirsher 	netif_device_detach(dev);
2538f2148a47SJeff Kirsher 
2539f7630d18SAlexey Charkov 	if (dev_is_pci(device))
25402d283862SAlexey Charkov 		rhine_shutdown_pci(to_pci_dev(device));
2541f2148a47SJeff Kirsher 
2542f2148a47SJeff Kirsher 	return 0;
2543f2148a47SJeff Kirsher }
2544f2148a47SJeff Kirsher 
2545e92b9b3bSFrancois Romieu static int rhine_resume(struct device *device)
2546f2148a47SJeff Kirsher {
2547f7630d18SAlexey Charkov 	struct net_device *dev = dev_get_drvdata(device);
2548f2148a47SJeff Kirsher 	struct rhine_private *rp = netdev_priv(dev);
2549f2148a47SJeff Kirsher 
2550f2148a47SJeff Kirsher 	if (!netif_running(dev))
2551f2148a47SJeff Kirsher 		return 0;
2552f2148a47SJeff Kirsher 
2553f2148a47SJeff Kirsher 	enable_mmio(rp->pioaddr, rp->quirks);
2554f2148a47SJeff Kirsher 	rhine_power_init(dev);
2555f2148a47SJeff Kirsher 	free_tbufs(dev);
2556f2148a47SJeff Kirsher 	alloc_tbufs(dev);
25578709bb2cSfrançois romieu 	rhine_reset_rbufs(rp);
25587ab87ff4SFrancois Romieu 	rhine_task_enable(rp);
25597ab87ff4SFrancois Romieu 	spin_lock_bh(&rp->lock);
2560f2148a47SJeff Kirsher 	init_registers(dev);
25617ab87ff4SFrancois Romieu 	spin_unlock_bh(&rp->lock);
2562f2148a47SJeff Kirsher 
2563f2148a47SJeff Kirsher 	netif_device_attach(dev);
2564f2148a47SJeff Kirsher 
2565f2148a47SJeff Kirsher 	return 0;
2566f2148a47SJeff Kirsher }
2567e92b9b3bSFrancois Romieu 
2568e92b9b3bSFrancois Romieu static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2569e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	(&rhine_pm_ops)
2570e92b9b3bSFrancois Romieu 
2571e92b9b3bSFrancois Romieu #else
2572e92b9b3bSFrancois Romieu 
2573e92b9b3bSFrancois Romieu #define RHINE_PM_OPS	NULL
2574e92b9b3bSFrancois Romieu 
2575e92b9b3bSFrancois Romieu #endif /* !CONFIG_PM_SLEEP */
2576f2148a47SJeff Kirsher 
25772d283862SAlexey Charkov static struct pci_driver rhine_driver_pci = {
2578f2148a47SJeff Kirsher 	.name		= DRV_NAME,
2579f2148a47SJeff Kirsher 	.id_table	= rhine_pci_tbl,
25802d283862SAlexey Charkov 	.probe		= rhine_init_one_pci,
25812d283862SAlexey Charkov 	.remove		= rhine_remove_one_pci,
25822d283862SAlexey Charkov 	.shutdown	= rhine_shutdown_pci,
2583e92b9b3bSFrancois Romieu 	.driver.pm	= RHINE_PM_OPS,
2584f2148a47SJeff Kirsher };
2585f2148a47SJeff Kirsher 
25862d283862SAlexey Charkov static struct platform_driver rhine_driver_platform = {
25872d283862SAlexey Charkov 	.probe		= rhine_init_one_platform,
25882d283862SAlexey Charkov 	.remove		= rhine_remove_one_platform,
25892d283862SAlexey Charkov 	.driver = {
25902d283862SAlexey Charkov 		.name	= DRV_NAME,
25912d283862SAlexey Charkov 		.of_match_table	= rhine_of_tbl,
25922d283862SAlexey Charkov 		.pm		= RHINE_PM_OPS,
25932d283862SAlexey Charkov 	}
25942d283862SAlexey Charkov };
25952d283862SAlexey Charkov 
25966faadbbbSChristoph Hellwig static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2597f2148a47SJeff Kirsher 	{
2598f2148a47SJeff Kirsher 		.ident = "EPIA-M",
2599f2148a47SJeff Kirsher 		.matches = {
2600f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2601f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2602f2148a47SJeff Kirsher 		},
2603f2148a47SJeff Kirsher 	},
2604f2148a47SJeff Kirsher 	{
2605f2148a47SJeff Kirsher 		.ident = "KV7",
2606f2148a47SJeff Kirsher 		.matches = {
2607f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2608f2148a47SJeff Kirsher 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2609f2148a47SJeff Kirsher 		},
2610f2148a47SJeff Kirsher 	},
2611f2148a47SJeff Kirsher 	{ NULL }
2612f2148a47SJeff Kirsher };
2613f2148a47SJeff Kirsher 
2614f2148a47SJeff Kirsher static int __init rhine_init(void)
2615f2148a47SJeff Kirsher {
26162d283862SAlexey Charkov 	int ret_pci, ret_platform;
26172d283862SAlexey Charkov 
2618f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
2619f2148a47SJeff Kirsher #ifdef MODULE
2620f2148a47SJeff Kirsher 	pr_info("%s\n", version);
2621f2148a47SJeff Kirsher #endif
2622f2148a47SJeff Kirsher 	if (dmi_check_system(rhine_dmi_table)) {
2623f2148a47SJeff Kirsher 		/* these BIOSes fail at PXE boot if chip is in D3 */
2624eb939922SRusty Russell 		avoid_D3 = true;
2625f2148a47SJeff Kirsher 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2626f2148a47SJeff Kirsher 	}
2627f2148a47SJeff Kirsher 	else if (avoid_D3)
2628f2148a47SJeff Kirsher 		pr_info("avoid_D3 set\n");
2629f2148a47SJeff Kirsher 
26302d283862SAlexey Charkov 	ret_pci = pci_register_driver(&rhine_driver_pci);
26312d283862SAlexey Charkov 	ret_platform = platform_driver_register(&rhine_driver_platform);
26322d283862SAlexey Charkov 	if ((ret_pci < 0) && (ret_platform < 0))
26332d283862SAlexey Charkov 		return ret_pci;
26342d283862SAlexey Charkov 
26352d283862SAlexey Charkov 	return 0;
2636f2148a47SJeff Kirsher }
2637f2148a47SJeff Kirsher 
2638f2148a47SJeff Kirsher 
2639f2148a47SJeff Kirsher static void __exit rhine_cleanup(void)
2640f2148a47SJeff Kirsher {
26412d283862SAlexey Charkov 	platform_driver_unregister(&rhine_driver_platform);
26422d283862SAlexey Charkov 	pci_unregister_driver(&rhine_driver_pci);
2643f2148a47SJeff Kirsher }
2644f2148a47SJeff Kirsher 
2645f2148a47SJeff Kirsher 
2646f2148a47SJeff Kirsher module_init(rhine_init);
2647f2148a47SJeff Kirsher module_exit(rhine_cleanup);
2648