1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 	Written 1998-2001 by Donald Becker.
4 
5 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6 
7 	This software may be used and distributed according to the terms of
8 	the GNU General Public License (GPL), incorporated herein by reference.
9 	Drivers based on or derived from this code fall under the GPL and must
10 	retain the authorship, copyright and license notice.  This file is not
11 	a complete program and may only be used when the entire operating
12 	system is licensed under the GPL.
13 
14 	This driver is designed for the VIA VT86C100A Rhine-I.
15 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 	and management NIC 6105M).
17 
18 	The author may be reached as becker@scyld.com, or C/O
19 	Scyld Computing Corporation
20 	410 Severn Ave., Suite 210
21 	Annapolis MD 21403
22 
23 
24 	This driver contains some changes from the original Donald Becker
25 	version. He may or may not be interested in bug reports on this
26 	code. You can find his versions at:
27 	http://www.scyld.com/network/via-rhine.html
28 	[link no longer provides useful info -jgarzik]
29 
30 */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #define DRV_NAME	"via-rhine"
35 #define DRV_VERSION	"1.5.1"
36 #define DRV_RELDATE	"2010-10-09"
37 
38 #include <linux/types.h>
39 
40 /* A few user-configurable values.
41    These may be modified when a driver module is loaded. */
42 static int debug = 0;
43 #define RHINE_MSG_DEFAULT \
44         (0x0000)
45 
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47    Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50 	defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
52 #else
53 static int rx_copybreak;
54 #endif
55 
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 static bool avoid_D3;
59 
60 /*
61  * In case you are looking for 'options[]' or 'full_duplex[]', they
62  * are gone. Use ethtool(8) instead.
63  */
64 
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66    The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
68 
69 
70 /* Operational parameters that are set at compile time. */
71 
72 /* Keep the ring sizes a power of two for compile efficiency.
73    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74    Making the Tx ring too large decreases the effectiveness of channel
75    bonding and packet priority.
76    There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE	16
78 #define TX_QUEUE_LEN	10	/* Limit ring entries actually used. */
79 #define RX_RING_SIZE	64
80 
81 /* Operational parameters that usually are not changed. */
82 
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT	(2*HZ)
85 
86 #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
87 
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/of_address.h>
98 #include <linux/of_device.h>
99 #include <linux/of_irq.h>
100 #include <linux/platform_device.h>
101 #include <linux/dma-mapping.h>
102 #include <linux/netdevice.h>
103 #include <linux/etherdevice.h>
104 #include <linux/skbuff.h>
105 #include <linux/init.h>
106 #include <linux/delay.h>
107 #include <linux/mii.h>
108 #include <linux/ethtool.h>
109 #include <linux/crc32.h>
110 #include <linux/if_vlan.h>
111 #include <linux/bitops.h>
112 #include <linux/workqueue.h>
113 #include <asm/processor.h>	/* Processor type for cache alignment. */
114 #include <asm/io.h>
115 #include <asm/irq.h>
116 #include <asm/uaccess.h>
117 #include <linux/dmi.h>
118 
119 /* These identify the driver base version and may not be removed. */
120 static const char version[] =
121 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
122 
123 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
124 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
125 MODULE_LICENSE("GPL");
126 
127 module_param(debug, int, 0);
128 module_param(rx_copybreak, int, 0);
129 module_param(avoid_D3, bool, 0);
130 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
131 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
132 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
133 
134 #define MCAM_SIZE	32
135 #define VCAM_SIZE	32
136 
137 /*
138 		Theory of Operation
139 
140 I. Board Compatibility
141 
142 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
143 controller.
144 
145 II. Board-specific settings
146 
147 Boards with this chip are functional only in a bus-master PCI slot.
148 
149 Many operational settings are loaded from the EEPROM to the Config word at
150 offset 0x78. For most of these settings, this driver assumes that they are
151 correct.
152 If this driver is compiled to use PCI memory space operations the EEPROM
153 must be configured to enable memory ops.
154 
155 III. Driver operation
156 
157 IIIa. Ring buffers
158 
159 This driver uses two statically allocated fixed-size descriptor lists
160 formed into rings by a branch from the final descriptor to the beginning of
161 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
162 
163 IIIb/c. Transmit/Receive Structure
164 
165 This driver attempts to use a zero-copy receive and transmit scheme.
166 
167 Alas, all data buffers are required to start on a 32 bit boundary, so
168 the driver must often copy transmit packets into bounce buffers.
169 
170 The driver allocates full frame size skbuffs for the Rx ring buffers at
171 open() time and passes the skb->data field to the chip as receive data
172 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
173 a fresh skbuff is allocated and the frame is copied to the new skbuff.
174 When the incoming frame is larger, the skbuff is passed directly up the
175 protocol stack. Buffers consumed this way are replaced by newly allocated
176 skbuffs in the last phase of rhine_rx().
177 
178 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
179 using a full-sized skbuff for small frames vs. the copying costs of larger
180 frames. New boards are typically used in generously configured machines
181 and the underfilled buffers have negligible impact compared to the benefit of
182 a single allocation size, so the default value of zero results in never
183 copying packets. When copying is done, the cost is usually mitigated by using
184 a combined copy/checksum routine. Copying also preloads the cache, which is
185 most useful with small frames.
186 
187 Since the VIA chips are only able to transfer data to buffers on 32 bit
188 boundaries, the IP header at offset 14 in an ethernet frame isn't
189 longword aligned for further processing. Copying these unaligned buffers
190 has the beneficial effect of 16-byte aligning the IP header.
191 
192 IIId. Synchronization
193 
194 The driver runs as two independent, single-threaded flows of control. One
195 is the send-packet routine, which enforces single-threaded use by the
196 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
197 which is single threaded by the hardware and interrupt handling software.
198 
199 The send packet thread has partial control over the Tx ring. It locks the
200 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
201 the ring is not available it stops the transmit queue by
202 calling netif_stop_queue.
203 
204 The interrupt handler has exclusive control over the Rx ring and records stats
205 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
206 empty by incrementing the dirty_tx mark. If at least half of the entries in
207 the Rx ring are available the transmit queue is woken up if it was stopped.
208 
209 IV. Notes
210 
211 IVb. References
212 
213 Preliminary VT86C100A manual from http://www.via.com.tw/
214 http://www.scyld.com/expert/100mbps.html
215 http://www.scyld.com/expert/NWay.html
216 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
217 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
218 
219 
220 IVc. Errata
221 
222 The VT86C100A manual is not reliable information.
223 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
224 in significant performance degradation for bounce buffer copies on transmit
225 and unaligned IP headers on receive.
226 The chip does not pad to minimum transmit length.
227 
228 */
229 
230 
231 /* This table drives the PCI probe routines. It's mostly boilerplate in all
232    of the drivers, and will likely be provided by some future kernel.
233    Note the matching code -- the first table entry matchs all 56** cards but
234    second only the 1234 card.
235 */
236 
237 enum rhine_revs {
238 	VT86C100A	= 0x00,
239 	VTunknown0	= 0x20,
240 	VT6102		= 0x40,
241 	VT8231		= 0x50,	/* Integrated MAC */
242 	VT8233		= 0x60,	/* Integrated MAC */
243 	VT8235		= 0x74,	/* Integrated MAC */
244 	VT8237		= 0x78,	/* Integrated MAC */
245 	VTunknown1	= 0x7C,
246 	VT6105		= 0x80,
247 	VT6105_B0	= 0x83,
248 	VT6105L		= 0x8A,
249 	VT6107		= 0x8C,
250 	VTunknown2	= 0x8E,
251 	VT6105M		= 0x90,	/* Management adapter */
252 };
253 
254 enum rhine_quirks {
255 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
256 	rqForceReset	= 0x0002,
257 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
258 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
259 	rqRhineI	= 0x0100,	/* See comment below */
260 	rqIntPHY	= 0x0200,	/* Integrated PHY */
261 	rqMgmt		= 0x0400,	/* Management adapter */
262 	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
263 					 * switched from PIO mode to MMIO
264 					 * (only applies to PCI)
265 					 */
266 };
267 /*
268  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
269  * MMIO as well as for the collision counter and the Tx FIFO underflow
270  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
271  */
272 
273 /* Beware of PCI posted writes */
274 #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
275 
276 static const struct pci_device_id rhine_pci_tbl[] = {
277 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
278 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
279 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
280 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
281 	{ }	/* terminate list */
282 };
283 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
284 
285 /* OpenFirmware identifiers for platform-bus devices
286  * The .data field is currently only used to store quirks
287  */
288 static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
289 static struct of_device_id rhine_of_tbl[] = {
290 	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
291 	{ }	/* terminate list */
292 };
293 MODULE_DEVICE_TABLE(of, rhine_of_tbl);
294 
295 /* Offsets to the device registers. */
296 enum register_offsets {
297 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
298 	ChipCmd1=0x09, TQWake=0x0A,
299 	IntrStatus=0x0C, IntrEnable=0x0E,
300 	MulticastFilter0=0x10, MulticastFilter1=0x14,
301 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
302 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
303 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
304 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
305 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
306 	StickyHW=0x83, IntrStatus2=0x84,
307 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
308 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
309 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
310 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
311 };
312 
313 /* Bits in ConfigD */
314 enum backoff_bits {
315 	BackOptional=0x01, BackModify=0x02,
316 	BackCaptureEffect=0x04, BackRandom=0x08
317 };
318 
319 /* Bits in the TxConfig (TCR) register */
320 enum tcr_bits {
321 	TCR_PQEN=0x01,
322 	TCR_LB0=0x02,		/* loopback[0] */
323 	TCR_LB1=0x04,		/* loopback[1] */
324 	TCR_OFSET=0x08,
325 	TCR_RTGOPT=0x10,
326 	TCR_RTFT0=0x20,
327 	TCR_RTFT1=0x40,
328 	TCR_RTSF=0x80,
329 };
330 
331 /* Bits in the CamCon (CAMC) register */
332 enum camcon_bits {
333 	CAMC_CAMEN=0x01,
334 	CAMC_VCAMSL=0x02,
335 	CAMC_CAMWR=0x04,
336 	CAMC_CAMRD=0x08,
337 };
338 
339 /* Bits in the PCIBusConfig1 (BCR1) register */
340 enum bcr1_bits {
341 	BCR1_POT0=0x01,
342 	BCR1_POT1=0x02,
343 	BCR1_POT2=0x04,
344 	BCR1_CTFT0=0x08,
345 	BCR1_CTFT1=0x10,
346 	BCR1_CTSF=0x20,
347 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
348 	BCR1_VIDFR=0x80,	/* for VT6105 */
349 	BCR1_MED0=0x40,		/* for VT6102 */
350 	BCR1_MED1=0x80,		/* for VT6102 */
351 };
352 
353 /* Registers we check that mmio and reg are the same. */
354 static const int mmio_verify_registers[] = {
355 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
356 	0
357 };
358 
359 /* Bits in the interrupt status/mask registers. */
360 enum intr_status_bits {
361 	IntrRxDone	= 0x0001,
362 	IntrTxDone	= 0x0002,
363 	IntrRxErr	= 0x0004,
364 	IntrTxError	= 0x0008,
365 	IntrRxEmpty	= 0x0020,
366 	IntrPCIErr	= 0x0040,
367 	IntrStatsMax	= 0x0080,
368 	IntrRxEarly	= 0x0100,
369 	IntrTxUnderrun	= 0x0210,
370 	IntrRxOverflow	= 0x0400,
371 	IntrRxDropped	= 0x0800,
372 	IntrRxNoBuf	= 0x1000,
373 	IntrTxAborted	= 0x2000,
374 	IntrLinkChange	= 0x4000,
375 	IntrRxWakeUp	= 0x8000,
376 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
377 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
378 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
379 				  IntrTxUnderrun,
380 };
381 
382 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
383 enum wol_bits {
384 	WOLucast	= 0x10,
385 	WOLmagic	= 0x20,
386 	WOLbmcast	= 0x30,
387 	WOLlnkon	= 0x40,
388 	WOLlnkoff	= 0x80,
389 };
390 
391 /* The Rx and Tx buffer descriptors. */
392 struct rx_desc {
393 	__le32 rx_status;
394 	__le32 desc_length; /* Chain flag, Buffer/frame length */
395 	__le32 addr;
396 	__le32 next_desc;
397 };
398 struct tx_desc {
399 	__le32 tx_status;
400 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
401 	__le32 addr;
402 	__le32 next_desc;
403 };
404 
405 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
406 #define TXDESC		0x00e08000
407 
408 enum rx_status_bits {
409 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
410 };
411 
412 /* Bits in *_desc.*_status */
413 enum desc_status_bits {
414 	DescOwn=0x80000000
415 };
416 
417 /* Bits in *_desc.*_length */
418 enum desc_length_bits {
419 	DescTag=0x00010000
420 };
421 
422 /* Bits in ChipCmd. */
423 enum chip_cmd_bits {
424 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
425 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
426 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
427 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
428 };
429 
430 struct rhine_stats {
431 	u64		packets;
432 	u64		bytes;
433 	struct u64_stats_sync syncp;
434 };
435 
436 struct rhine_private {
437 	/* Bit mask for configured VLAN ids */
438 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
439 
440 	/* Descriptor rings */
441 	struct rx_desc *rx_ring;
442 	struct tx_desc *tx_ring;
443 	dma_addr_t rx_ring_dma;
444 	dma_addr_t tx_ring_dma;
445 
446 	/* The addresses of receive-in-place skbuffs. */
447 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
448 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
449 
450 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
451 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
452 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
453 
454 	/* Tx bounce buffers (Rhine-I only) */
455 	unsigned char *tx_buf[TX_RING_SIZE];
456 	unsigned char *tx_bufs;
457 	dma_addr_t tx_bufs_dma;
458 
459 	int irq;
460 	long pioaddr;
461 	struct net_device *dev;
462 	struct napi_struct napi;
463 	spinlock_t lock;
464 	struct mutex task_lock;
465 	bool task_enable;
466 	struct work_struct slow_event_task;
467 	struct work_struct reset_task;
468 
469 	u32 msg_enable;
470 
471 	/* Frequently used values: keep some adjacent for cache effect. */
472 	u32 quirks;
473 	struct rx_desc *rx_head_desc;
474 	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
475 	unsigned int cur_tx, dirty_tx;
476 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
477 	struct rhine_stats rx_stats;
478 	struct rhine_stats tx_stats;
479 	u8 wolopts;
480 
481 	u8 tx_thresh, rx_thresh;
482 
483 	struct mii_if_info mii_if;
484 	void __iomem *base;
485 };
486 
487 #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
488 #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
489 #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
490 
491 #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
492 #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
493 #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
494 
495 #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
496 #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
497 #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
498 
499 #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
500 #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
501 #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
502 
503 
504 static int  mdio_read(struct net_device *dev, int phy_id, int location);
505 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
506 static int  rhine_open(struct net_device *dev);
507 static void rhine_reset_task(struct work_struct *work);
508 static void rhine_slow_event_task(struct work_struct *work);
509 static void rhine_tx_timeout(struct net_device *dev);
510 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
511 				  struct net_device *dev);
512 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
513 static void rhine_tx(struct net_device *dev);
514 static int rhine_rx(struct net_device *dev, int limit);
515 static void rhine_set_rx_mode(struct net_device *dev);
516 static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
517 	       struct rtnl_link_stats64 *stats);
518 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
519 static const struct ethtool_ops netdev_ethtool_ops;
520 static int  rhine_close(struct net_device *dev);
521 static int rhine_vlan_rx_add_vid(struct net_device *dev,
522 				 __be16 proto, u16 vid);
523 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
524 				  __be16 proto, u16 vid);
525 static void rhine_restart_tx(struct net_device *dev);
526 
527 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
528 {
529 	void __iomem *ioaddr = rp->base;
530 	int i;
531 
532 	for (i = 0; i < 1024; i++) {
533 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
534 
535 		if (low ^ has_mask_bits)
536 			break;
537 		udelay(10);
538 	}
539 	if (i > 64) {
540 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
541 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
542 	}
543 }
544 
545 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
546 {
547 	rhine_wait_bit(rp, reg, mask, false);
548 }
549 
550 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
551 {
552 	rhine_wait_bit(rp, reg, mask, true);
553 }
554 
555 static u32 rhine_get_events(struct rhine_private *rp)
556 {
557 	void __iomem *ioaddr = rp->base;
558 	u32 intr_status;
559 
560 	intr_status = ioread16(ioaddr + IntrStatus);
561 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
562 	if (rp->quirks & rqStatusWBRace)
563 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
564 	return intr_status;
565 }
566 
567 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
568 {
569 	void __iomem *ioaddr = rp->base;
570 
571 	if (rp->quirks & rqStatusWBRace)
572 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
573 	iowrite16(mask, ioaddr + IntrStatus);
574 	mmiowb();
575 }
576 
577 /*
578  * Get power related registers into sane state.
579  * Notify user about past WOL event.
580  */
581 static void rhine_power_init(struct net_device *dev)
582 {
583 	struct rhine_private *rp = netdev_priv(dev);
584 	void __iomem *ioaddr = rp->base;
585 	u16 wolstat;
586 
587 	if (rp->quirks & rqWOL) {
588 		/* Make sure chip is in power state D0 */
589 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
590 
591 		/* Disable "force PME-enable" */
592 		iowrite8(0x80, ioaddr + WOLcgClr);
593 
594 		/* Clear power-event config bits (WOL) */
595 		iowrite8(0xFF, ioaddr + WOLcrClr);
596 		/* More recent cards can manage two additional patterns */
597 		if (rp->quirks & rq6patterns)
598 			iowrite8(0x03, ioaddr + WOLcrClr1);
599 
600 		/* Save power-event status bits */
601 		wolstat = ioread8(ioaddr + PwrcsrSet);
602 		if (rp->quirks & rq6patterns)
603 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
604 
605 		/* Clear power-event status bits */
606 		iowrite8(0xFF, ioaddr + PwrcsrClr);
607 		if (rp->quirks & rq6patterns)
608 			iowrite8(0x03, ioaddr + PwrcsrClr1);
609 
610 		if (wolstat) {
611 			char *reason;
612 			switch (wolstat) {
613 			case WOLmagic:
614 				reason = "Magic packet";
615 				break;
616 			case WOLlnkon:
617 				reason = "Link went up";
618 				break;
619 			case WOLlnkoff:
620 				reason = "Link went down";
621 				break;
622 			case WOLucast:
623 				reason = "Unicast packet";
624 				break;
625 			case WOLbmcast:
626 				reason = "Multicast/broadcast packet";
627 				break;
628 			default:
629 				reason = "Unknown";
630 			}
631 			netdev_info(dev, "Woke system up. Reason: %s\n",
632 				    reason);
633 		}
634 	}
635 }
636 
637 static void rhine_chip_reset(struct net_device *dev)
638 {
639 	struct rhine_private *rp = netdev_priv(dev);
640 	void __iomem *ioaddr = rp->base;
641 	u8 cmd1;
642 
643 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
644 	IOSYNC;
645 
646 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
647 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
648 
649 		/* Force reset */
650 		if (rp->quirks & rqForceReset)
651 			iowrite8(0x40, ioaddr + MiscCmd);
652 
653 		/* Reset can take somewhat longer (rare) */
654 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
655 	}
656 
657 	cmd1 = ioread8(ioaddr + ChipCmd1);
658 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
659 		   "failed" : "succeeded");
660 }
661 
662 static void enable_mmio(long pioaddr, u32 quirks)
663 {
664 	int n;
665 
666 	if (quirks & rqNeedEnMMIO) {
667 		if (quirks & rqRhineI) {
668 			/* More recent docs say that this bit is reserved */
669 			n = inb(pioaddr + ConfigA) | 0x20;
670 			outb(n, pioaddr + ConfigA);
671 		} else {
672 			n = inb(pioaddr + ConfigD) | 0x80;
673 			outb(n, pioaddr + ConfigD);
674 		}
675 	}
676 }
677 
678 static inline int verify_mmio(struct device *hwdev,
679 			      long pioaddr,
680 			      void __iomem *ioaddr,
681 			      u32 quirks)
682 {
683 	if (quirks & rqNeedEnMMIO) {
684 		int i = 0;
685 
686 		/* Check that selected MMIO registers match the PIO ones */
687 		while (mmio_verify_registers[i]) {
688 			int reg = mmio_verify_registers[i++];
689 			unsigned char a = inb(pioaddr+reg);
690 			unsigned char b = readb(ioaddr+reg);
691 
692 			if (a != b) {
693 				dev_err(hwdev,
694 					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
695 					reg, a, b);
696 				return -EIO;
697 			}
698 		}
699 	}
700 	return 0;
701 }
702 
703 /*
704  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
705  * (plus 0x6C for Rhine-I/II)
706  */
707 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
708 {
709 	struct rhine_private *rp = netdev_priv(dev);
710 	void __iomem *ioaddr = rp->base;
711 	int i;
712 
713 	outb(0x20, pioaddr + MACRegEEcsr);
714 	for (i = 0; i < 1024; i++) {
715 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
716 			break;
717 	}
718 	if (i > 512)
719 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
720 
721 	/*
722 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
723 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
724 	 * it is not known if that still works with the "win98-reboot" problem.
725 	 */
726 	enable_mmio(pioaddr, rp->quirks);
727 
728 	/* Turn off EEPROM-controlled wake-up (magic packet) */
729 	if (rp->quirks & rqWOL)
730 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
731 
732 }
733 
734 #ifdef CONFIG_NET_POLL_CONTROLLER
735 static void rhine_poll(struct net_device *dev)
736 {
737 	struct rhine_private *rp = netdev_priv(dev);
738 	const int irq = rp->irq;
739 
740 	disable_irq(irq);
741 	rhine_interrupt(irq, dev);
742 	enable_irq(irq);
743 }
744 #endif
745 
746 static void rhine_kick_tx_threshold(struct rhine_private *rp)
747 {
748 	if (rp->tx_thresh < 0xe0) {
749 		void __iomem *ioaddr = rp->base;
750 
751 		rp->tx_thresh += 0x20;
752 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
753 	}
754 }
755 
756 static void rhine_tx_err(struct rhine_private *rp, u32 status)
757 {
758 	struct net_device *dev = rp->dev;
759 
760 	if (status & IntrTxAborted) {
761 		netif_info(rp, tx_err, dev,
762 			   "Abort %08x, frame dropped\n", status);
763 	}
764 
765 	if (status & IntrTxUnderrun) {
766 		rhine_kick_tx_threshold(rp);
767 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
768 			   "Tx threshold now %02x\n", rp->tx_thresh);
769 	}
770 
771 	if (status & IntrTxDescRace)
772 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
773 
774 	if ((status & IntrTxError) &&
775 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
776 		rhine_kick_tx_threshold(rp);
777 		netif_info(rp, tx_err, dev, "Unspecified error. "
778 			   "Tx threshold now %02x\n", rp->tx_thresh);
779 	}
780 
781 	rhine_restart_tx(dev);
782 }
783 
784 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
785 {
786 	void __iomem *ioaddr = rp->base;
787 	struct net_device_stats *stats = &rp->dev->stats;
788 
789 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
790 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
791 
792 	/*
793 	 * Clears the "tally counters" for CRC errors and missed frames(?).
794 	 * It has been reported that some chips need a write of 0 to clear
795 	 * these, for others the counters are set to 1 when written to and
796 	 * instead cleared when read. So we clear them both ways ...
797 	 */
798 	iowrite32(0, ioaddr + RxMissed);
799 	ioread16(ioaddr + RxCRCErrs);
800 	ioread16(ioaddr + RxMissed);
801 }
802 
803 #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
804 				 IntrRxErr | \
805 				 IntrRxEmpty | \
806 				 IntrRxOverflow	| \
807 				 IntrRxDropped | \
808 				 IntrRxNoBuf | \
809 				 IntrRxWakeUp)
810 
811 #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
812 				 IntrTxAborted | \
813 				 IntrTxUnderrun | \
814 				 IntrTxDescRace)
815 #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
816 
817 #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
818 				 RHINE_EVENT_NAPI_TX | \
819 				 IntrStatsMax)
820 #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
821 #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
822 
823 static int rhine_napipoll(struct napi_struct *napi, int budget)
824 {
825 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
826 	struct net_device *dev = rp->dev;
827 	void __iomem *ioaddr = rp->base;
828 	u16 enable_mask = RHINE_EVENT & 0xffff;
829 	int work_done = 0;
830 	u32 status;
831 
832 	status = rhine_get_events(rp);
833 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
834 
835 	if (status & RHINE_EVENT_NAPI_RX)
836 		work_done += rhine_rx(dev, budget);
837 
838 	if (status & RHINE_EVENT_NAPI_TX) {
839 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
840 			/* Avoid scavenging before Tx engine turned off */
841 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
842 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
843 				netif_warn(rp, tx_err, dev, "Tx still on\n");
844 		}
845 
846 		rhine_tx(dev);
847 
848 		if (status & RHINE_EVENT_NAPI_TX_ERR)
849 			rhine_tx_err(rp, status);
850 	}
851 
852 	if (status & IntrStatsMax) {
853 		spin_lock(&rp->lock);
854 		rhine_update_rx_crc_and_missed_errord(rp);
855 		spin_unlock(&rp->lock);
856 	}
857 
858 	if (status & RHINE_EVENT_SLOW) {
859 		enable_mask &= ~RHINE_EVENT_SLOW;
860 		schedule_work(&rp->slow_event_task);
861 	}
862 
863 	if (work_done < budget) {
864 		napi_complete(napi);
865 		iowrite16(enable_mask, ioaddr + IntrEnable);
866 		mmiowb();
867 	}
868 	return work_done;
869 }
870 
871 static void rhine_hw_init(struct net_device *dev, long pioaddr)
872 {
873 	struct rhine_private *rp = netdev_priv(dev);
874 
875 	/* Reset the chip to erase previous misconfiguration. */
876 	rhine_chip_reset(dev);
877 
878 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
879 	if (rp->quirks & rqRhineI)
880 		msleep(5);
881 
882 	/* Reload EEPROM controlled bytes cleared by soft reset */
883 	if (dev_is_pci(dev->dev.parent))
884 		rhine_reload_eeprom(pioaddr, dev);
885 }
886 
887 static const struct net_device_ops rhine_netdev_ops = {
888 	.ndo_open		 = rhine_open,
889 	.ndo_stop		 = rhine_close,
890 	.ndo_start_xmit		 = rhine_start_tx,
891 	.ndo_get_stats64	 = rhine_get_stats64,
892 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
893 	.ndo_change_mtu		 = eth_change_mtu,
894 	.ndo_validate_addr	 = eth_validate_addr,
895 	.ndo_set_mac_address 	 = eth_mac_addr,
896 	.ndo_do_ioctl		 = netdev_ioctl,
897 	.ndo_tx_timeout 	 = rhine_tx_timeout,
898 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
899 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
900 #ifdef CONFIG_NET_POLL_CONTROLLER
901 	.ndo_poll_controller	 = rhine_poll,
902 #endif
903 };
904 
905 static int rhine_init_one_common(struct device *hwdev, u32 quirks,
906 				 long pioaddr, void __iomem *ioaddr, int irq)
907 {
908 	struct net_device *dev;
909 	struct rhine_private *rp;
910 	int i, rc, phy_id;
911 	const char *name;
912 
913 	/* this should always be supported */
914 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
915 	if (rc) {
916 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
917 		goto err_out;
918 	}
919 
920 	dev = alloc_etherdev(sizeof(struct rhine_private));
921 	if (!dev) {
922 		rc = -ENOMEM;
923 		goto err_out;
924 	}
925 	SET_NETDEV_DEV(dev, hwdev);
926 
927 	rp = netdev_priv(dev);
928 	rp->dev = dev;
929 	rp->quirks = quirks;
930 	rp->pioaddr = pioaddr;
931 	rp->base = ioaddr;
932 	rp->irq = irq;
933 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
934 
935 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
936 
937 	u64_stats_init(&rp->tx_stats.syncp);
938 	u64_stats_init(&rp->rx_stats.syncp);
939 
940 	/* Get chip registers into a sane state */
941 	rhine_power_init(dev);
942 	rhine_hw_init(dev, pioaddr);
943 
944 	for (i = 0; i < 6; i++)
945 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
946 
947 	if (!is_valid_ether_addr(dev->dev_addr)) {
948 		/* Report it and use a random ethernet address instead */
949 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
950 		eth_hw_addr_random(dev);
951 		netdev_info(dev, "Using random MAC address: %pM\n",
952 			    dev->dev_addr);
953 	}
954 
955 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
956 	if (!phy_id)
957 		phy_id = ioread8(ioaddr + 0x6C);
958 
959 	spin_lock_init(&rp->lock);
960 	mutex_init(&rp->task_lock);
961 	INIT_WORK(&rp->reset_task, rhine_reset_task);
962 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
963 
964 	rp->mii_if.dev = dev;
965 	rp->mii_if.mdio_read = mdio_read;
966 	rp->mii_if.mdio_write = mdio_write;
967 	rp->mii_if.phy_id_mask = 0x1f;
968 	rp->mii_if.reg_num_mask = 0x1f;
969 
970 	/* The chip-specific entries in the device structure. */
971 	dev->netdev_ops = &rhine_netdev_ops;
972 	dev->ethtool_ops = &netdev_ethtool_ops;
973 	dev->watchdog_timeo = TX_TIMEOUT;
974 
975 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
976 
977 	if (rp->quirks & rqRhineI)
978 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
979 
980 	if (rp->quirks & rqMgmt)
981 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
982 				 NETIF_F_HW_VLAN_CTAG_RX |
983 				 NETIF_F_HW_VLAN_CTAG_FILTER;
984 
985 	/* dev->name not defined before register_netdev()! */
986 	rc = register_netdev(dev);
987 	if (rc)
988 		goto err_out_free_netdev;
989 
990 	if (rp->quirks & rqRhineI)
991 		name = "Rhine";
992 	else if (rp->quirks & rqStatusWBRace)
993 		name = "Rhine II";
994 	else if (rp->quirks & rqMgmt)
995 		name = "Rhine III (Management Adapter)";
996 	else
997 		name = "Rhine III";
998 
999 	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1000 		    name, (long)ioaddr, dev->dev_addr, rp->irq);
1001 
1002 	dev_set_drvdata(hwdev, dev);
1003 
1004 	{
1005 		u16 mii_cmd;
1006 		int mii_status = mdio_read(dev, phy_id, 1);
1007 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1008 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1009 		if (mii_status != 0xffff && mii_status != 0x0000) {
1010 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1011 			netdev_info(dev,
1012 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1013 				    phy_id,
1014 				    mii_status, rp->mii_if.advertising,
1015 				    mdio_read(dev, phy_id, 5));
1016 
1017 			/* set IFF_RUNNING */
1018 			if (mii_status & BMSR_LSTATUS)
1019 				netif_carrier_on(dev);
1020 			else
1021 				netif_carrier_off(dev);
1022 
1023 		}
1024 	}
1025 	rp->mii_if.phy_id = phy_id;
1026 	if (avoid_D3)
1027 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1028 
1029 	return 0;
1030 
1031 err_out_free_netdev:
1032 	free_netdev(dev);
1033 err_out:
1034 	return rc;
1035 }
1036 
1037 static int rhine_init_one_pci(struct pci_dev *pdev,
1038 			      const struct pci_device_id *ent)
1039 {
1040 	struct device *hwdev = &pdev->dev;
1041 	int rc;
1042 	long pioaddr, memaddr;
1043 	void __iomem *ioaddr;
1044 	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1045 
1046 /* This driver was written to use PCI memory space. Some early versions
1047  * of the Rhine may only work correctly with I/O space accesses.
1048  * TODO: determine for which revisions this is true and assign the flag
1049  *	 in code as opposed to this Kconfig option (???)
1050  */
1051 #ifdef CONFIG_VIA_RHINE_MMIO
1052 	u32 quirks = rqNeedEnMMIO;
1053 #else
1054 	u32 quirks = 0;
1055 #endif
1056 
1057 /* when built into the kernel, we only print version if device is found */
1058 #ifndef MODULE
1059 	pr_info_once("%s\n", version);
1060 #endif
1061 
1062 	rc = pci_enable_device(pdev);
1063 	if (rc)
1064 		goto err_out;
1065 
1066 	if (pdev->revision < VTunknown0) {
1067 		quirks |= rqRhineI;
1068 	} else if (pdev->revision >= VT6102) {
1069 		quirks |= rqWOL | rqForceReset;
1070 		if (pdev->revision < VT6105) {
1071 			quirks |= rqStatusWBRace;
1072 		} else {
1073 			quirks |= rqIntPHY;
1074 			if (pdev->revision >= VT6105_B0)
1075 				quirks |= rq6patterns;
1076 			if (pdev->revision >= VT6105M)
1077 				quirks |= rqMgmt;
1078 		}
1079 	}
1080 
1081 	/* sanity check */
1082 	if ((pci_resource_len(pdev, 0) < io_size) ||
1083 	    (pci_resource_len(pdev, 1) < io_size)) {
1084 		rc = -EIO;
1085 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1086 		goto err_out_pci_disable;
1087 	}
1088 
1089 	pioaddr = pci_resource_start(pdev, 0);
1090 	memaddr = pci_resource_start(pdev, 1);
1091 
1092 	pci_set_master(pdev);
1093 
1094 	rc = pci_request_regions(pdev, DRV_NAME);
1095 	if (rc)
1096 		goto err_out_pci_disable;
1097 
1098 	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1099 	if (!ioaddr) {
1100 		rc = -EIO;
1101 		dev_err(hwdev,
1102 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1103 			dev_name(hwdev), io_size, memaddr);
1104 		goto err_out_free_res;
1105 	}
1106 
1107 	enable_mmio(pioaddr, quirks);
1108 
1109 	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1110 	if (rc)
1111 		goto err_out_unmap;
1112 
1113 	rc = rhine_init_one_common(&pdev->dev, quirks,
1114 				   pioaddr, ioaddr, pdev->irq);
1115 	if (!rc)
1116 		return 0;
1117 
1118 err_out_unmap:
1119 	pci_iounmap(pdev, ioaddr);
1120 err_out_free_res:
1121 	pci_release_regions(pdev);
1122 err_out_pci_disable:
1123 	pci_disable_device(pdev);
1124 err_out:
1125 	return rc;
1126 }
1127 
1128 static int rhine_init_one_platform(struct platform_device *pdev)
1129 {
1130 	const struct of_device_id *match;
1131 	const u32 *quirks;
1132 	int irq;
1133 	struct resource *res;
1134 	void __iomem *ioaddr;
1135 
1136 	match = of_match_device(rhine_of_tbl, &pdev->dev);
1137 	if (!match)
1138 		return -EINVAL;
1139 
1140 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141 	ioaddr = devm_ioremap_resource(&pdev->dev, res);
1142 	if (IS_ERR(ioaddr))
1143 		return PTR_ERR(ioaddr);
1144 
1145 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1146 	if (!irq)
1147 		return -EINVAL;
1148 
1149 	quirks = match->data;
1150 	if (!quirks)
1151 		return -EINVAL;
1152 
1153 	return rhine_init_one_common(&pdev->dev, *quirks,
1154 				     (long)ioaddr, ioaddr, irq);
1155 }
1156 
1157 static int alloc_ring(struct net_device* dev)
1158 {
1159 	struct rhine_private *rp = netdev_priv(dev);
1160 	struct device *hwdev = dev->dev.parent;
1161 	void *ring;
1162 	dma_addr_t ring_dma;
1163 
1164 	ring = dma_alloc_coherent(hwdev,
1165 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1166 				  TX_RING_SIZE * sizeof(struct tx_desc),
1167 				  &ring_dma,
1168 				  GFP_ATOMIC);
1169 	if (!ring) {
1170 		netdev_err(dev, "Could not allocate DMA memory\n");
1171 		return -ENOMEM;
1172 	}
1173 	if (rp->quirks & rqRhineI) {
1174 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1175 						 PKT_BUF_SZ * TX_RING_SIZE,
1176 						 &rp->tx_bufs_dma,
1177 						 GFP_ATOMIC);
1178 		if (rp->tx_bufs == NULL) {
1179 			dma_free_coherent(hwdev,
1180 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1181 					  TX_RING_SIZE * sizeof(struct tx_desc),
1182 					  ring, ring_dma);
1183 			return -ENOMEM;
1184 		}
1185 	}
1186 
1187 	rp->rx_ring = ring;
1188 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1189 	rp->rx_ring_dma = ring_dma;
1190 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1191 
1192 	return 0;
1193 }
1194 
1195 static void free_ring(struct net_device* dev)
1196 {
1197 	struct rhine_private *rp = netdev_priv(dev);
1198 	struct device *hwdev = dev->dev.parent;
1199 
1200 	dma_free_coherent(hwdev,
1201 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1202 			  TX_RING_SIZE * sizeof(struct tx_desc),
1203 			  rp->rx_ring, rp->rx_ring_dma);
1204 	rp->tx_ring = NULL;
1205 
1206 	if (rp->tx_bufs)
1207 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1208 				  rp->tx_bufs, rp->tx_bufs_dma);
1209 
1210 	rp->tx_bufs = NULL;
1211 
1212 }
1213 
1214 static void alloc_rbufs(struct net_device *dev)
1215 {
1216 	struct rhine_private *rp = netdev_priv(dev);
1217 	struct device *hwdev = dev->dev.parent;
1218 	dma_addr_t next;
1219 	int i;
1220 
1221 	rp->dirty_rx = rp->cur_rx = 0;
1222 
1223 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1224 	rp->rx_head_desc = &rp->rx_ring[0];
1225 	next = rp->rx_ring_dma;
1226 
1227 	/* Init the ring entries */
1228 	for (i = 0; i < RX_RING_SIZE; i++) {
1229 		rp->rx_ring[i].rx_status = 0;
1230 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1231 		next += sizeof(struct rx_desc);
1232 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1233 		rp->rx_skbuff[i] = NULL;
1234 	}
1235 	/* Mark the last entry as wrapping the ring. */
1236 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1237 
1238 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1239 	for (i = 0; i < RX_RING_SIZE; i++) {
1240 		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1241 		rp->rx_skbuff[i] = skb;
1242 		if (skb == NULL)
1243 			break;
1244 
1245 		rp->rx_skbuff_dma[i] =
1246 			dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
1247 				       DMA_FROM_DEVICE);
1248 		if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
1249 			rp->rx_skbuff_dma[i] = 0;
1250 			dev_kfree_skb(skb);
1251 			break;
1252 		}
1253 		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1254 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1255 	}
1256 	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1257 }
1258 
1259 static void free_rbufs(struct net_device* dev)
1260 {
1261 	struct rhine_private *rp = netdev_priv(dev);
1262 	struct device *hwdev = dev->dev.parent;
1263 	int i;
1264 
1265 	/* Free all the skbuffs in the Rx queue. */
1266 	for (i = 0; i < RX_RING_SIZE; i++) {
1267 		rp->rx_ring[i].rx_status = 0;
1268 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1269 		if (rp->rx_skbuff[i]) {
1270 			dma_unmap_single(hwdev,
1271 					 rp->rx_skbuff_dma[i],
1272 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1273 			dev_kfree_skb(rp->rx_skbuff[i]);
1274 		}
1275 		rp->rx_skbuff[i] = NULL;
1276 	}
1277 }
1278 
1279 static void alloc_tbufs(struct net_device* dev)
1280 {
1281 	struct rhine_private *rp = netdev_priv(dev);
1282 	dma_addr_t next;
1283 	int i;
1284 
1285 	rp->dirty_tx = rp->cur_tx = 0;
1286 	next = rp->tx_ring_dma;
1287 	for (i = 0; i < TX_RING_SIZE; i++) {
1288 		rp->tx_skbuff[i] = NULL;
1289 		rp->tx_ring[i].tx_status = 0;
1290 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1291 		next += sizeof(struct tx_desc);
1292 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1293 		if (rp->quirks & rqRhineI)
1294 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1295 	}
1296 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1297 
1298 }
1299 
1300 static void free_tbufs(struct net_device* dev)
1301 {
1302 	struct rhine_private *rp = netdev_priv(dev);
1303 	struct device *hwdev = dev->dev.parent;
1304 	int i;
1305 
1306 	for (i = 0; i < TX_RING_SIZE; i++) {
1307 		rp->tx_ring[i].tx_status = 0;
1308 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1309 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1310 		if (rp->tx_skbuff[i]) {
1311 			if (rp->tx_skbuff_dma[i]) {
1312 				dma_unmap_single(hwdev,
1313 						 rp->tx_skbuff_dma[i],
1314 						 rp->tx_skbuff[i]->len,
1315 						 DMA_TO_DEVICE);
1316 			}
1317 			dev_kfree_skb(rp->tx_skbuff[i]);
1318 		}
1319 		rp->tx_skbuff[i] = NULL;
1320 		rp->tx_buf[i] = NULL;
1321 	}
1322 }
1323 
1324 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1325 {
1326 	struct rhine_private *rp = netdev_priv(dev);
1327 	void __iomem *ioaddr = rp->base;
1328 
1329 	mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1330 
1331 	if (rp->mii_if.full_duplex)
1332 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1333 		   ioaddr + ChipCmd1);
1334 	else
1335 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1336 		   ioaddr + ChipCmd1);
1337 
1338 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1339 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1340 }
1341 
1342 /* Called after status of force_media possibly changed */
1343 static void rhine_set_carrier(struct mii_if_info *mii)
1344 {
1345 	struct net_device *dev = mii->dev;
1346 	struct rhine_private *rp = netdev_priv(dev);
1347 
1348 	if (mii->force_media) {
1349 		/* autoneg is off: Link is always assumed to be up */
1350 		if (!netif_carrier_ok(dev))
1351 			netif_carrier_on(dev);
1352 	}
1353 
1354 	rhine_check_media(dev, 0);
1355 
1356 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1357 		   mii->force_media, netif_carrier_ok(dev));
1358 }
1359 
1360 /**
1361  * rhine_set_cam - set CAM multicast filters
1362  * @ioaddr: register block of this Rhine
1363  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1364  * @addr: multicast address (6 bytes)
1365  *
1366  * Load addresses into multicast filters.
1367  */
1368 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1369 {
1370 	int i;
1371 
1372 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1373 	wmb();
1374 
1375 	/* Paranoid -- idx out of range should never happen */
1376 	idx &= (MCAM_SIZE - 1);
1377 
1378 	iowrite8((u8) idx, ioaddr + CamAddr);
1379 
1380 	for (i = 0; i < 6; i++, addr++)
1381 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1382 	udelay(10);
1383 	wmb();
1384 
1385 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1386 	udelay(10);
1387 
1388 	iowrite8(0, ioaddr + CamCon);
1389 }
1390 
1391 /**
1392  * rhine_set_vlan_cam - set CAM VLAN filters
1393  * @ioaddr: register block of this Rhine
1394  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1395  * @addr: VLAN ID (2 bytes)
1396  *
1397  * Load addresses into VLAN filters.
1398  */
1399 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1400 {
1401 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1402 	wmb();
1403 
1404 	/* Paranoid -- idx out of range should never happen */
1405 	idx &= (VCAM_SIZE - 1);
1406 
1407 	iowrite8((u8) idx, ioaddr + CamAddr);
1408 
1409 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1410 	udelay(10);
1411 	wmb();
1412 
1413 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1414 	udelay(10);
1415 
1416 	iowrite8(0, ioaddr + CamCon);
1417 }
1418 
1419 /**
1420  * rhine_set_cam_mask - set multicast CAM mask
1421  * @ioaddr: register block of this Rhine
1422  * @mask: multicast CAM mask
1423  *
1424  * Mask sets multicast filters active/inactive.
1425  */
1426 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1427 {
1428 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1429 	wmb();
1430 
1431 	/* write mask */
1432 	iowrite32(mask, ioaddr + CamMask);
1433 
1434 	/* disable CAMEN */
1435 	iowrite8(0, ioaddr + CamCon);
1436 }
1437 
1438 /**
1439  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1440  * @ioaddr: register block of this Rhine
1441  * @mask: VLAN CAM mask
1442  *
1443  * Mask sets VLAN filters active/inactive.
1444  */
1445 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1446 {
1447 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1448 	wmb();
1449 
1450 	/* write mask */
1451 	iowrite32(mask, ioaddr + CamMask);
1452 
1453 	/* disable CAMEN */
1454 	iowrite8(0, ioaddr + CamCon);
1455 }
1456 
1457 /**
1458  * rhine_init_cam_filter - initialize CAM filters
1459  * @dev: network device
1460  *
1461  * Initialize (disable) hardware VLAN and multicast support on this
1462  * Rhine.
1463  */
1464 static void rhine_init_cam_filter(struct net_device *dev)
1465 {
1466 	struct rhine_private *rp = netdev_priv(dev);
1467 	void __iomem *ioaddr = rp->base;
1468 
1469 	/* Disable all CAMs */
1470 	rhine_set_vlan_cam_mask(ioaddr, 0);
1471 	rhine_set_cam_mask(ioaddr, 0);
1472 
1473 	/* disable hardware VLAN support */
1474 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1475 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1476 }
1477 
1478 /**
1479  * rhine_update_vcam - update VLAN CAM filters
1480  * @rp: rhine_private data of this Rhine
1481  *
1482  * Update VLAN CAM filters to match configuration change.
1483  */
1484 static void rhine_update_vcam(struct net_device *dev)
1485 {
1486 	struct rhine_private *rp = netdev_priv(dev);
1487 	void __iomem *ioaddr = rp->base;
1488 	u16 vid;
1489 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1490 	unsigned int i = 0;
1491 
1492 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1493 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1494 		vCAMmask |= 1 << i;
1495 		if (++i >= VCAM_SIZE)
1496 			break;
1497 	}
1498 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1499 }
1500 
1501 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1502 {
1503 	struct rhine_private *rp = netdev_priv(dev);
1504 
1505 	spin_lock_bh(&rp->lock);
1506 	set_bit(vid, rp->active_vlans);
1507 	rhine_update_vcam(dev);
1508 	spin_unlock_bh(&rp->lock);
1509 	return 0;
1510 }
1511 
1512 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1513 {
1514 	struct rhine_private *rp = netdev_priv(dev);
1515 
1516 	spin_lock_bh(&rp->lock);
1517 	clear_bit(vid, rp->active_vlans);
1518 	rhine_update_vcam(dev);
1519 	spin_unlock_bh(&rp->lock);
1520 	return 0;
1521 }
1522 
1523 static void init_registers(struct net_device *dev)
1524 {
1525 	struct rhine_private *rp = netdev_priv(dev);
1526 	void __iomem *ioaddr = rp->base;
1527 	int i;
1528 
1529 	for (i = 0; i < 6; i++)
1530 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1531 
1532 	/* Initialize other registers. */
1533 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1534 	/* Configure initial FIFO thresholds. */
1535 	iowrite8(0x20, ioaddr + TxConfig);
1536 	rp->tx_thresh = 0x20;
1537 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1538 
1539 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1540 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1541 
1542 	rhine_set_rx_mode(dev);
1543 
1544 	if (rp->quirks & rqMgmt)
1545 		rhine_init_cam_filter(dev);
1546 
1547 	napi_enable(&rp->napi);
1548 
1549 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1550 
1551 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1552 	       ioaddr + ChipCmd);
1553 	rhine_check_media(dev, 1);
1554 }
1555 
1556 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1557 static void rhine_enable_linkmon(struct rhine_private *rp)
1558 {
1559 	void __iomem *ioaddr = rp->base;
1560 
1561 	iowrite8(0, ioaddr + MIICmd);
1562 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1563 	iowrite8(0x80, ioaddr + MIICmd);
1564 
1565 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1566 
1567 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1568 }
1569 
1570 /* Disable MII link status auto-polling (required for MDIO access) */
1571 static void rhine_disable_linkmon(struct rhine_private *rp)
1572 {
1573 	void __iomem *ioaddr = rp->base;
1574 
1575 	iowrite8(0, ioaddr + MIICmd);
1576 
1577 	if (rp->quirks & rqRhineI) {
1578 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1579 
1580 		/* Can be called from ISR. Evil. */
1581 		mdelay(1);
1582 
1583 		/* 0x80 must be set immediately before turning it off */
1584 		iowrite8(0x80, ioaddr + MIICmd);
1585 
1586 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1587 
1588 		/* Heh. Now clear 0x80 again. */
1589 		iowrite8(0, ioaddr + MIICmd);
1590 	}
1591 	else
1592 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1593 }
1594 
1595 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1596 
1597 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1598 {
1599 	struct rhine_private *rp = netdev_priv(dev);
1600 	void __iomem *ioaddr = rp->base;
1601 	int result;
1602 
1603 	rhine_disable_linkmon(rp);
1604 
1605 	/* rhine_disable_linkmon already cleared MIICmd */
1606 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1607 	iowrite8(regnum, ioaddr + MIIRegAddr);
1608 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1609 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1610 	result = ioread16(ioaddr + MIIData);
1611 
1612 	rhine_enable_linkmon(rp);
1613 	return result;
1614 }
1615 
1616 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1617 {
1618 	struct rhine_private *rp = netdev_priv(dev);
1619 	void __iomem *ioaddr = rp->base;
1620 
1621 	rhine_disable_linkmon(rp);
1622 
1623 	/* rhine_disable_linkmon already cleared MIICmd */
1624 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1625 	iowrite8(regnum, ioaddr + MIIRegAddr);
1626 	iowrite16(value, ioaddr + MIIData);
1627 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1628 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1629 
1630 	rhine_enable_linkmon(rp);
1631 }
1632 
1633 static void rhine_task_disable(struct rhine_private *rp)
1634 {
1635 	mutex_lock(&rp->task_lock);
1636 	rp->task_enable = false;
1637 	mutex_unlock(&rp->task_lock);
1638 
1639 	cancel_work_sync(&rp->slow_event_task);
1640 	cancel_work_sync(&rp->reset_task);
1641 }
1642 
1643 static void rhine_task_enable(struct rhine_private *rp)
1644 {
1645 	mutex_lock(&rp->task_lock);
1646 	rp->task_enable = true;
1647 	mutex_unlock(&rp->task_lock);
1648 }
1649 
1650 static int rhine_open(struct net_device *dev)
1651 {
1652 	struct rhine_private *rp = netdev_priv(dev);
1653 	void __iomem *ioaddr = rp->base;
1654 	int rc;
1655 
1656 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1657 	if (rc)
1658 		return rc;
1659 
1660 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1661 
1662 	rc = alloc_ring(dev);
1663 	if (rc) {
1664 		free_irq(rp->irq, dev);
1665 		return rc;
1666 	}
1667 	alloc_rbufs(dev);
1668 	alloc_tbufs(dev);
1669 	rhine_chip_reset(dev);
1670 	rhine_task_enable(rp);
1671 	init_registers(dev);
1672 
1673 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1674 		  __func__, ioread16(ioaddr + ChipCmd),
1675 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1676 
1677 	netif_start_queue(dev);
1678 
1679 	return 0;
1680 }
1681 
1682 static void rhine_reset_task(struct work_struct *work)
1683 {
1684 	struct rhine_private *rp = container_of(work, struct rhine_private,
1685 						reset_task);
1686 	struct net_device *dev = rp->dev;
1687 
1688 	mutex_lock(&rp->task_lock);
1689 
1690 	if (!rp->task_enable)
1691 		goto out_unlock;
1692 
1693 	napi_disable(&rp->napi);
1694 	netif_tx_disable(dev);
1695 	spin_lock_bh(&rp->lock);
1696 
1697 	/* clear all descriptors */
1698 	free_tbufs(dev);
1699 	free_rbufs(dev);
1700 	alloc_tbufs(dev);
1701 	alloc_rbufs(dev);
1702 
1703 	/* Reinitialize the hardware. */
1704 	rhine_chip_reset(dev);
1705 	init_registers(dev);
1706 
1707 	spin_unlock_bh(&rp->lock);
1708 
1709 	dev->trans_start = jiffies; /* prevent tx timeout */
1710 	dev->stats.tx_errors++;
1711 	netif_wake_queue(dev);
1712 
1713 out_unlock:
1714 	mutex_unlock(&rp->task_lock);
1715 }
1716 
1717 static void rhine_tx_timeout(struct net_device *dev)
1718 {
1719 	struct rhine_private *rp = netdev_priv(dev);
1720 	void __iomem *ioaddr = rp->base;
1721 
1722 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1723 		    ioread16(ioaddr + IntrStatus),
1724 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1725 
1726 	schedule_work(&rp->reset_task);
1727 }
1728 
1729 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1730 				  struct net_device *dev)
1731 {
1732 	struct rhine_private *rp = netdev_priv(dev);
1733 	struct device *hwdev = dev->dev.parent;
1734 	void __iomem *ioaddr = rp->base;
1735 	unsigned entry;
1736 
1737 	/* Caution: the write order is important here, set the field
1738 	   with the "ownership" bits last. */
1739 
1740 	/* Calculate the next Tx descriptor entry. */
1741 	entry = rp->cur_tx % TX_RING_SIZE;
1742 
1743 	if (skb_padto(skb, ETH_ZLEN))
1744 		return NETDEV_TX_OK;
1745 
1746 	rp->tx_skbuff[entry] = skb;
1747 
1748 	if ((rp->quirks & rqRhineI) &&
1749 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1750 		/* Must use alignment buffer. */
1751 		if (skb->len > PKT_BUF_SZ) {
1752 			/* packet too long, drop it */
1753 			dev_kfree_skb_any(skb);
1754 			rp->tx_skbuff[entry] = NULL;
1755 			dev->stats.tx_dropped++;
1756 			return NETDEV_TX_OK;
1757 		}
1758 
1759 		/* Padding is not copied and so must be redone. */
1760 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1761 		if (skb->len < ETH_ZLEN)
1762 			memset(rp->tx_buf[entry] + skb->len, 0,
1763 			       ETH_ZLEN - skb->len);
1764 		rp->tx_skbuff_dma[entry] = 0;
1765 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1766 						      (rp->tx_buf[entry] -
1767 						       rp->tx_bufs));
1768 	} else {
1769 		rp->tx_skbuff_dma[entry] =
1770 			dma_map_single(hwdev, skb->data, skb->len,
1771 				       DMA_TO_DEVICE);
1772 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1773 			dev_kfree_skb_any(skb);
1774 			rp->tx_skbuff_dma[entry] = 0;
1775 			dev->stats.tx_dropped++;
1776 			return NETDEV_TX_OK;
1777 		}
1778 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1779 	}
1780 
1781 	rp->tx_ring[entry].desc_length =
1782 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1783 
1784 	if (unlikely(vlan_tx_tag_present(skb))) {
1785 		u16 vid_pcp = vlan_tx_tag_get(skb);
1786 
1787 		/* drop CFI/DEI bit, register needs VID and PCP */
1788 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1789 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1790 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1791 		/* request tagging */
1792 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1793 	}
1794 	else
1795 		rp->tx_ring[entry].tx_status = 0;
1796 
1797 	/* lock eth irq */
1798 	wmb();
1799 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1800 	wmb();
1801 
1802 	rp->cur_tx++;
1803 
1804 	/* Non-x86 Todo: explicitly flush cache lines here. */
1805 
1806 	if (vlan_tx_tag_present(skb))
1807 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1808 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1809 
1810 	/* Wake the potentially-idle transmit channel */
1811 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1812 	       ioaddr + ChipCmd1);
1813 	IOSYNC;
1814 
1815 	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1816 		netif_stop_queue(dev);
1817 
1818 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1819 		  rp->cur_tx - 1, entry);
1820 
1821 	return NETDEV_TX_OK;
1822 }
1823 
1824 static void rhine_irq_disable(struct rhine_private *rp)
1825 {
1826 	iowrite16(0x0000, rp->base + IntrEnable);
1827 	mmiowb();
1828 }
1829 
1830 /* The interrupt handler does all of the Rx thread work and cleans up
1831    after the Tx thread. */
1832 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1833 {
1834 	struct net_device *dev = dev_instance;
1835 	struct rhine_private *rp = netdev_priv(dev);
1836 	u32 status;
1837 	int handled = 0;
1838 
1839 	status = rhine_get_events(rp);
1840 
1841 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1842 
1843 	if (status & RHINE_EVENT) {
1844 		handled = 1;
1845 
1846 		rhine_irq_disable(rp);
1847 		napi_schedule(&rp->napi);
1848 	}
1849 
1850 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1851 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1852 			  status);
1853 	}
1854 
1855 	return IRQ_RETVAL(handled);
1856 }
1857 
1858 /* This routine is logically part of the interrupt handler, but isolated
1859    for clarity. */
1860 static void rhine_tx(struct net_device *dev)
1861 {
1862 	struct rhine_private *rp = netdev_priv(dev);
1863 	struct device *hwdev = dev->dev.parent;
1864 	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1865 
1866 	/* find and cleanup dirty tx descriptors */
1867 	while (rp->dirty_tx != rp->cur_tx) {
1868 		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1869 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1870 			  entry, txstatus);
1871 		if (txstatus & DescOwn)
1872 			break;
1873 		if (txstatus & 0x8000) {
1874 			netif_dbg(rp, tx_done, dev,
1875 				  "Transmit error, Tx status %08x\n", txstatus);
1876 			dev->stats.tx_errors++;
1877 			if (txstatus & 0x0400)
1878 				dev->stats.tx_carrier_errors++;
1879 			if (txstatus & 0x0200)
1880 				dev->stats.tx_window_errors++;
1881 			if (txstatus & 0x0100)
1882 				dev->stats.tx_aborted_errors++;
1883 			if (txstatus & 0x0080)
1884 				dev->stats.tx_heartbeat_errors++;
1885 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1886 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1887 				dev->stats.tx_fifo_errors++;
1888 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1889 				break; /* Keep the skb - we try again */
1890 			}
1891 			/* Transmitter restarted in 'abnormal' handler. */
1892 		} else {
1893 			if (rp->quirks & rqRhineI)
1894 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1895 			else
1896 				dev->stats.collisions += txstatus & 0x0F;
1897 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1898 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1899 
1900 			u64_stats_update_begin(&rp->tx_stats.syncp);
1901 			rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1902 			rp->tx_stats.packets++;
1903 			u64_stats_update_end(&rp->tx_stats.syncp);
1904 		}
1905 		/* Free the original skb. */
1906 		if (rp->tx_skbuff_dma[entry]) {
1907 			dma_unmap_single(hwdev,
1908 					 rp->tx_skbuff_dma[entry],
1909 					 rp->tx_skbuff[entry]->len,
1910 					 DMA_TO_DEVICE);
1911 		}
1912 		dev_consume_skb_any(rp->tx_skbuff[entry]);
1913 		rp->tx_skbuff[entry] = NULL;
1914 		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1915 	}
1916 	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1917 		netif_wake_queue(dev);
1918 }
1919 
1920 /**
1921  * rhine_get_vlan_tci - extract TCI from Rx data buffer
1922  * @skb: pointer to sk_buff
1923  * @data_size: used data area of the buffer including CRC
1924  *
1925  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1926  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1927  * aligned following the CRC.
1928  */
1929 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1930 {
1931 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1932 	return be16_to_cpup((__be16 *)trailer);
1933 }
1934 
1935 /* Process up to limit frames from receive ring */
1936 static int rhine_rx(struct net_device *dev, int limit)
1937 {
1938 	struct rhine_private *rp = netdev_priv(dev);
1939 	struct device *hwdev = dev->dev.parent;
1940 	int count;
1941 	int entry = rp->cur_rx % RX_RING_SIZE;
1942 
1943 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1944 		  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1945 
1946 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1947 	for (count = 0; count < limit; ++count) {
1948 		struct rx_desc *desc = rp->rx_head_desc;
1949 		u32 desc_status = le32_to_cpu(desc->rx_status);
1950 		u32 desc_length = le32_to_cpu(desc->desc_length);
1951 		int data_size = desc_status >> 16;
1952 
1953 		if (desc_status & DescOwn)
1954 			break;
1955 
1956 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1957 			  desc_status);
1958 
1959 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1960 			if ((desc_status & RxWholePkt) != RxWholePkt) {
1961 				netdev_warn(dev,
1962 	"Oversized Ethernet frame spanned multiple buffers, "
1963 	"entry %#x length %d status %08x!\n",
1964 					    entry, data_size,
1965 					    desc_status);
1966 				netdev_warn(dev,
1967 					    "Oversized Ethernet frame %p vs %p\n",
1968 					    rp->rx_head_desc,
1969 					    &rp->rx_ring[entry]);
1970 				dev->stats.rx_length_errors++;
1971 			} else if (desc_status & RxErr) {
1972 				/* There was a error. */
1973 				netif_dbg(rp, rx_err, dev,
1974 					  "%s() Rx error %08x\n", __func__,
1975 					  desc_status);
1976 				dev->stats.rx_errors++;
1977 				if (desc_status & 0x0030)
1978 					dev->stats.rx_length_errors++;
1979 				if (desc_status & 0x0048)
1980 					dev->stats.rx_fifo_errors++;
1981 				if (desc_status & 0x0004)
1982 					dev->stats.rx_frame_errors++;
1983 				if (desc_status & 0x0002) {
1984 					/* this can also be updated outside the interrupt handler */
1985 					spin_lock(&rp->lock);
1986 					dev->stats.rx_crc_errors++;
1987 					spin_unlock(&rp->lock);
1988 				}
1989 			}
1990 		} else {
1991 			struct sk_buff *skb = NULL;
1992 			/* Length should omit the CRC */
1993 			int pkt_len = data_size - 4;
1994 			u16 vlan_tci = 0;
1995 
1996 			/* Check if the packet is long enough to accept without
1997 			   copying to a minimally-sized skbuff. */
1998 			if (pkt_len < rx_copybreak)
1999 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2000 			if (skb) {
2001 				dma_sync_single_for_cpu(hwdev,
2002 							rp->rx_skbuff_dma[entry],
2003 							rp->rx_buf_sz,
2004 							DMA_FROM_DEVICE);
2005 
2006 				skb_copy_to_linear_data(skb,
2007 						 rp->rx_skbuff[entry]->data,
2008 						 pkt_len);
2009 				skb_put(skb, pkt_len);
2010 				dma_sync_single_for_device(hwdev,
2011 							   rp->rx_skbuff_dma[entry],
2012 							   rp->rx_buf_sz,
2013 							   DMA_FROM_DEVICE);
2014 			} else {
2015 				skb = rp->rx_skbuff[entry];
2016 				if (skb == NULL) {
2017 					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
2018 					break;
2019 				}
2020 				rp->rx_skbuff[entry] = NULL;
2021 				skb_put(skb, pkt_len);
2022 				dma_unmap_single(hwdev,
2023 						 rp->rx_skbuff_dma[entry],
2024 						 rp->rx_buf_sz,
2025 						 DMA_FROM_DEVICE);
2026 			}
2027 
2028 			if (unlikely(desc_length & DescTag))
2029 				vlan_tci = rhine_get_vlan_tci(skb, data_size);
2030 
2031 			skb->protocol = eth_type_trans(skb, dev);
2032 
2033 			if (unlikely(desc_length & DescTag))
2034 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2035 			netif_receive_skb(skb);
2036 
2037 			u64_stats_update_begin(&rp->rx_stats.syncp);
2038 			rp->rx_stats.bytes += pkt_len;
2039 			rp->rx_stats.packets++;
2040 			u64_stats_update_end(&rp->rx_stats.syncp);
2041 		}
2042 		entry = (++rp->cur_rx) % RX_RING_SIZE;
2043 		rp->rx_head_desc = &rp->rx_ring[entry];
2044 	}
2045 
2046 	/* Refill the Rx ring buffers. */
2047 	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
2048 		struct sk_buff *skb;
2049 		entry = rp->dirty_rx % RX_RING_SIZE;
2050 		if (rp->rx_skbuff[entry] == NULL) {
2051 			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
2052 			rp->rx_skbuff[entry] = skb;
2053 			if (skb == NULL)
2054 				break;	/* Better luck next round. */
2055 			rp->rx_skbuff_dma[entry] =
2056 				dma_map_single(hwdev, skb->data,
2057 					       rp->rx_buf_sz,
2058 					       DMA_FROM_DEVICE);
2059 			if (dma_mapping_error(hwdev,
2060 					      rp->rx_skbuff_dma[entry])) {
2061 				dev_kfree_skb(skb);
2062 				rp->rx_skbuff_dma[entry] = 0;
2063 				break;
2064 			}
2065 			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
2066 		}
2067 		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2068 	}
2069 
2070 	return count;
2071 }
2072 
2073 static void rhine_restart_tx(struct net_device *dev) {
2074 	struct rhine_private *rp = netdev_priv(dev);
2075 	void __iomem *ioaddr = rp->base;
2076 	int entry = rp->dirty_tx % TX_RING_SIZE;
2077 	u32 intr_status;
2078 
2079 	/*
2080 	 * If new errors occurred, we need to sort them out before doing Tx.
2081 	 * In that case the ISR will be back here RSN anyway.
2082 	 */
2083 	intr_status = rhine_get_events(rp);
2084 
2085 	if ((intr_status & IntrTxErrSummary) == 0) {
2086 
2087 		/* We know better than the chip where it should continue. */
2088 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2089 		       ioaddr + TxRingPtr);
2090 
2091 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2092 		       ioaddr + ChipCmd);
2093 
2094 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2095 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2096 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2097 
2098 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2099 		       ioaddr + ChipCmd1);
2100 		IOSYNC;
2101 	}
2102 	else {
2103 		/* This should never happen */
2104 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2105 			   intr_status);
2106 	}
2107 
2108 }
2109 
2110 static void rhine_slow_event_task(struct work_struct *work)
2111 {
2112 	struct rhine_private *rp =
2113 		container_of(work, struct rhine_private, slow_event_task);
2114 	struct net_device *dev = rp->dev;
2115 	u32 intr_status;
2116 
2117 	mutex_lock(&rp->task_lock);
2118 
2119 	if (!rp->task_enable)
2120 		goto out_unlock;
2121 
2122 	intr_status = rhine_get_events(rp);
2123 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2124 
2125 	if (intr_status & IntrLinkChange)
2126 		rhine_check_media(dev, 0);
2127 
2128 	if (intr_status & IntrPCIErr)
2129 		netif_warn(rp, hw, dev, "PCI error\n");
2130 
2131 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2132 
2133 out_unlock:
2134 	mutex_unlock(&rp->task_lock);
2135 }
2136 
2137 static struct rtnl_link_stats64 *
2138 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2139 {
2140 	struct rhine_private *rp = netdev_priv(dev);
2141 	unsigned int start;
2142 
2143 	spin_lock_bh(&rp->lock);
2144 	rhine_update_rx_crc_and_missed_errord(rp);
2145 	spin_unlock_bh(&rp->lock);
2146 
2147 	netdev_stats_to_stats64(stats, &dev->stats);
2148 
2149 	do {
2150 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2151 		stats->rx_packets = rp->rx_stats.packets;
2152 		stats->rx_bytes = rp->rx_stats.bytes;
2153 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2154 
2155 	do {
2156 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2157 		stats->tx_packets = rp->tx_stats.packets;
2158 		stats->tx_bytes = rp->tx_stats.bytes;
2159 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2160 
2161 	return stats;
2162 }
2163 
2164 static void rhine_set_rx_mode(struct net_device *dev)
2165 {
2166 	struct rhine_private *rp = netdev_priv(dev);
2167 	void __iomem *ioaddr = rp->base;
2168 	u32 mc_filter[2];	/* Multicast hash filter */
2169 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2170 	struct netdev_hw_addr *ha;
2171 
2172 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2173 		rx_mode = 0x1C;
2174 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2175 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2176 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2177 		   (dev->flags & IFF_ALLMULTI)) {
2178 		/* Too many to match, or accept all multicasts. */
2179 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2180 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2181 	} else if (rp->quirks & rqMgmt) {
2182 		int i = 0;
2183 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2184 		netdev_for_each_mc_addr(ha, dev) {
2185 			if (i == MCAM_SIZE)
2186 				break;
2187 			rhine_set_cam(ioaddr, i, ha->addr);
2188 			mCAMmask |= 1 << i;
2189 			i++;
2190 		}
2191 		rhine_set_cam_mask(ioaddr, mCAMmask);
2192 	} else {
2193 		memset(mc_filter, 0, sizeof(mc_filter));
2194 		netdev_for_each_mc_addr(ha, dev) {
2195 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2196 
2197 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2198 		}
2199 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2200 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2201 	}
2202 	/* enable/disable VLAN receive filtering */
2203 	if (rp->quirks & rqMgmt) {
2204 		if (dev->flags & IFF_PROMISC)
2205 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2206 		else
2207 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2208 	}
2209 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2210 }
2211 
2212 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2213 {
2214 	struct device *hwdev = dev->dev.parent;
2215 
2216 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2217 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2218 	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2219 }
2220 
2221 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2222 {
2223 	struct rhine_private *rp = netdev_priv(dev);
2224 	int rc;
2225 
2226 	mutex_lock(&rp->task_lock);
2227 	rc = mii_ethtool_gset(&rp->mii_if, cmd);
2228 	mutex_unlock(&rp->task_lock);
2229 
2230 	return rc;
2231 }
2232 
2233 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2234 {
2235 	struct rhine_private *rp = netdev_priv(dev);
2236 	int rc;
2237 
2238 	mutex_lock(&rp->task_lock);
2239 	rc = mii_ethtool_sset(&rp->mii_if, cmd);
2240 	rhine_set_carrier(&rp->mii_if);
2241 	mutex_unlock(&rp->task_lock);
2242 
2243 	return rc;
2244 }
2245 
2246 static int netdev_nway_reset(struct net_device *dev)
2247 {
2248 	struct rhine_private *rp = netdev_priv(dev);
2249 
2250 	return mii_nway_restart(&rp->mii_if);
2251 }
2252 
2253 static u32 netdev_get_link(struct net_device *dev)
2254 {
2255 	struct rhine_private *rp = netdev_priv(dev);
2256 
2257 	return mii_link_ok(&rp->mii_if);
2258 }
2259 
2260 static u32 netdev_get_msglevel(struct net_device *dev)
2261 {
2262 	struct rhine_private *rp = netdev_priv(dev);
2263 
2264 	return rp->msg_enable;
2265 }
2266 
2267 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2268 {
2269 	struct rhine_private *rp = netdev_priv(dev);
2270 
2271 	rp->msg_enable = value;
2272 }
2273 
2274 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2275 {
2276 	struct rhine_private *rp = netdev_priv(dev);
2277 
2278 	if (!(rp->quirks & rqWOL))
2279 		return;
2280 
2281 	spin_lock_irq(&rp->lock);
2282 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2283 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2284 	wol->wolopts = rp->wolopts;
2285 	spin_unlock_irq(&rp->lock);
2286 }
2287 
2288 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2289 {
2290 	struct rhine_private *rp = netdev_priv(dev);
2291 	u32 support = WAKE_PHY | WAKE_MAGIC |
2292 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2293 
2294 	if (!(rp->quirks & rqWOL))
2295 		return -EINVAL;
2296 
2297 	if (wol->wolopts & ~support)
2298 		return -EINVAL;
2299 
2300 	spin_lock_irq(&rp->lock);
2301 	rp->wolopts = wol->wolopts;
2302 	spin_unlock_irq(&rp->lock);
2303 
2304 	return 0;
2305 }
2306 
2307 static const struct ethtool_ops netdev_ethtool_ops = {
2308 	.get_drvinfo		= netdev_get_drvinfo,
2309 	.get_settings		= netdev_get_settings,
2310 	.set_settings		= netdev_set_settings,
2311 	.nway_reset		= netdev_nway_reset,
2312 	.get_link		= netdev_get_link,
2313 	.get_msglevel		= netdev_get_msglevel,
2314 	.set_msglevel		= netdev_set_msglevel,
2315 	.get_wol		= rhine_get_wol,
2316 	.set_wol		= rhine_set_wol,
2317 };
2318 
2319 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2320 {
2321 	struct rhine_private *rp = netdev_priv(dev);
2322 	int rc;
2323 
2324 	if (!netif_running(dev))
2325 		return -EINVAL;
2326 
2327 	mutex_lock(&rp->task_lock);
2328 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2329 	rhine_set_carrier(&rp->mii_if);
2330 	mutex_unlock(&rp->task_lock);
2331 
2332 	return rc;
2333 }
2334 
2335 static int rhine_close(struct net_device *dev)
2336 {
2337 	struct rhine_private *rp = netdev_priv(dev);
2338 	void __iomem *ioaddr = rp->base;
2339 
2340 	rhine_task_disable(rp);
2341 	napi_disable(&rp->napi);
2342 	netif_stop_queue(dev);
2343 
2344 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2345 		  ioread16(ioaddr + ChipCmd));
2346 
2347 	/* Switch to loopback mode to avoid hardware races. */
2348 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2349 
2350 	rhine_irq_disable(rp);
2351 
2352 	/* Stop the chip's Tx and Rx processes. */
2353 	iowrite16(CmdStop, ioaddr + ChipCmd);
2354 
2355 	free_irq(rp->irq, dev);
2356 	free_rbufs(dev);
2357 	free_tbufs(dev);
2358 	free_ring(dev);
2359 
2360 	return 0;
2361 }
2362 
2363 
2364 static void rhine_remove_one_pci(struct pci_dev *pdev)
2365 {
2366 	struct net_device *dev = pci_get_drvdata(pdev);
2367 	struct rhine_private *rp = netdev_priv(dev);
2368 
2369 	unregister_netdev(dev);
2370 
2371 	pci_iounmap(pdev, rp->base);
2372 	pci_release_regions(pdev);
2373 
2374 	free_netdev(dev);
2375 	pci_disable_device(pdev);
2376 }
2377 
2378 static int rhine_remove_one_platform(struct platform_device *pdev)
2379 {
2380 	struct net_device *dev = platform_get_drvdata(pdev);
2381 	struct rhine_private *rp = netdev_priv(dev);
2382 
2383 	unregister_netdev(dev);
2384 
2385 	iounmap(rp->base);
2386 
2387 	free_netdev(dev);
2388 
2389 	return 0;
2390 }
2391 
2392 static void rhine_shutdown_pci(struct pci_dev *pdev)
2393 {
2394 	struct net_device *dev = pci_get_drvdata(pdev);
2395 	struct rhine_private *rp = netdev_priv(dev);
2396 	void __iomem *ioaddr = rp->base;
2397 
2398 	if (!(rp->quirks & rqWOL))
2399 		return; /* Nothing to do for non-WOL adapters */
2400 
2401 	rhine_power_init(dev);
2402 
2403 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2404 	if (rp->quirks & rq6patterns)
2405 		iowrite8(0x04, ioaddr + WOLcgClr);
2406 
2407 	spin_lock(&rp->lock);
2408 
2409 	if (rp->wolopts & WAKE_MAGIC) {
2410 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2411 		/*
2412 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2413 		 * not cooperate otherwise.
2414 		 */
2415 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2416 	}
2417 
2418 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2419 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2420 
2421 	if (rp->wolopts & WAKE_PHY)
2422 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2423 
2424 	if (rp->wolopts & WAKE_UCAST)
2425 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2426 
2427 	if (rp->wolopts) {
2428 		/* Enable legacy WOL (for old motherboards) */
2429 		iowrite8(0x01, ioaddr + PwcfgSet);
2430 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2431 	}
2432 
2433 	spin_unlock(&rp->lock);
2434 
2435 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2436 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2437 
2438 		pci_wake_from_d3(pdev, true);
2439 		pci_set_power_state(pdev, PCI_D3hot);
2440 	}
2441 }
2442 
2443 #ifdef CONFIG_PM_SLEEP
2444 static int rhine_suspend(struct device *device)
2445 {
2446 	struct net_device *dev = dev_get_drvdata(device);
2447 	struct rhine_private *rp = netdev_priv(dev);
2448 
2449 	if (!netif_running(dev))
2450 		return 0;
2451 
2452 	rhine_task_disable(rp);
2453 	rhine_irq_disable(rp);
2454 	napi_disable(&rp->napi);
2455 
2456 	netif_device_detach(dev);
2457 
2458 	if (dev_is_pci(device))
2459 		rhine_shutdown_pci(to_pci_dev(device));
2460 
2461 	return 0;
2462 }
2463 
2464 static int rhine_resume(struct device *device)
2465 {
2466 	struct net_device *dev = dev_get_drvdata(device);
2467 	struct rhine_private *rp = netdev_priv(dev);
2468 
2469 	if (!netif_running(dev))
2470 		return 0;
2471 
2472 	enable_mmio(rp->pioaddr, rp->quirks);
2473 	rhine_power_init(dev);
2474 	free_tbufs(dev);
2475 	free_rbufs(dev);
2476 	alloc_tbufs(dev);
2477 	alloc_rbufs(dev);
2478 	rhine_task_enable(rp);
2479 	spin_lock_bh(&rp->lock);
2480 	init_registers(dev);
2481 	spin_unlock_bh(&rp->lock);
2482 
2483 	netif_device_attach(dev);
2484 
2485 	return 0;
2486 }
2487 
2488 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2489 #define RHINE_PM_OPS	(&rhine_pm_ops)
2490 
2491 #else
2492 
2493 #define RHINE_PM_OPS	NULL
2494 
2495 #endif /* !CONFIG_PM_SLEEP */
2496 
2497 static struct pci_driver rhine_driver_pci = {
2498 	.name		= DRV_NAME,
2499 	.id_table	= rhine_pci_tbl,
2500 	.probe		= rhine_init_one_pci,
2501 	.remove		= rhine_remove_one_pci,
2502 	.shutdown	= rhine_shutdown_pci,
2503 	.driver.pm	= RHINE_PM_OPS,
2504 };
2505 
2506 static struct platform_driver rhine_driver_platform = {
2507 	.probe		= rhine_init_one_platform,
2508 	.remove		= rhine_remove_one_platform,
2509 	.driver = {
2510 		.name	= DRV_NAME,
2511 		.owner	= THIS_MODULE,
2512 		.of_match_table	= rhine_of_tbl,
2513 		.pm		= RHINE_PM_OPS,
2514 	}
2515 };
2516 
2517 static struct dmi_system_id rhine_dmi_table[] __initdata = {
2518 	{
2519 		.ident = "EPIA-M",
2520 		.matches = {
2521 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2522 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2523 		},
2524 	},
2525 	{
2526 		.ident = "KV7",
2527 		.matches = {
2528 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2529 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2530 		},
2531 	},
2532 	{ NULL }
2533 };
2534 
2535 static int __init rhine_init(void)
2536 {
2537 	int ret_pci, ret_platform;
2538 
2539 /* when a module, this is printed whether or not devices are found in probe */
2540 #ifdef MODULE
2541 	pr_info("%s\n", version);
2542 #endif
2543 	if (dmi_check_system(rhine_dmi_table)) {
2544 		/* these BIOSes fail at PXE boot if chip is in D3 */
2545 		avoid_D3 = true;
2546 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2547 	}
2548 	else if (avoid_D3)
2549 		pr_info("avoid_D3 set\n");
2550 
2551 	ret_pci = pci_register_driver(&rhine_driver_pci);
2552 	ret_platform = platform_driver_register(&rhine_driver_platform);
2553 	if ((ret_pci < 0) && (ret_platform < 0))
2554 		return ret_pci;
2555 
2556 	return 0;
2557 }
2558 
2559 
2560 static void __exit rhine_cleanup(void)
2561 {
2562 	platform_driver_unregister(&rhine_driver_platform);
2563 	pci_unregister_driver(&rhine_driver_pci);
2564 }
2565 
2566 
2567 module_init(rhine_init);
2568 module_exit(rhine_cleanup);
2569