1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 	Written 1998-2001 by Donald Becker.
4 
5 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6 
7 	This software may be used and distributed according to the terms of
8 	the GNU General Public License (GPL), incorporated herein by reference.
9 	Drivers based on or derived from this code fall under the GPL and must
10 	retain the authorship, copyright and license notice.  This file is not
11 	a complete program and may only be used when the entire operating
12 	system is licensed under the GPL.
13 
14 	This driver is designed for the VIA VT86C100A Rhine-I.
15 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 	and management NIC 6105M).
17 
18 	The author may be reached as becker@scyld.com, or C/O
19 	Scyld Computing Corporation
20 	410 Severn Ave., Suite 210
21 	Annapolis MD 21403
22 
23 
24 	This driver contains some changes from the original Donald Becker
25 	version. He may or may not be interested in bug reports on this
26 	code. You can find his versions at:
27 	http://www.scyld.com/network/via-rhine.html
28 	[link no longer provides useful info -jgarzik]
29 
30 */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #define DRV_NAME	"via-rhine"
35 #define DRV_VERSION	"1.5.0"
36 #define DRV_RELDATE	"2010-10-09"
37 
38 #include <linux/types.h>
39 
40 /* A few user-configurable values.
41    These may be modified when a driver module is loaded. */
42 static int debug = 0;
43 #define RHINE_MSG_DEFAULT \
44         (0x0000)
45 
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47    Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50 	defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
52 #else
53 static int rx_copybreak;
54 #endif
55 
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 static bool avoid_D3;
59 
60 /*
61  * In case you are looking for 'options[]' or 'full_duplex[]', they
62  * are gone. Use ethtool(8) instead.
63  */
64 
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66    The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
68 
69 
70 /* Operational parameters that are set at compile time. */
71 
72 /* Keep the ring sizes a power of two for compile efficiency.
73    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74    Making the Tx ring too large decreases the effectiveness of channel
75    bonding and packet priority.
76    There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE	16
78 #define TX_QUEUE_LEN	10	/* Limit ring entries actually used. */
79 #define RX_RING_SIZE	64
80 
81 /* Operational parameters that usually are not changed. */
82 
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT	(2*HZ)
85 
86 #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
87 
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/dma-mapping.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/skbuff.h>
101 #include <linux/init.h>
102 #include <linux/delay.h>
103 #include <linux/mii.h>
104 #include <linux/ethtool.h>
105 #include <linux/crc32.h>
106 #include <linux/if_vlan.h>
107 #include <linux/bitops.h>
108 #include <linux/workqueue.h>
109 #include <asm/processor.h>	/* Processor type for cache alignment. */
110 #include <asm/io.h>
111 #include <asm/irq.h>
112 #include <asm/uaccess.h>
113 #include <linux/dmi.h>
114 
115 /* These identify the driver base version and may not be removed. */
116 static const char version[] =
117 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118 
119 /* This driver was written to use PCI memory space. Some early versions
120    of the Rhine may only work correctly with I/O space accesses. */
121 #ifdef CONFIG_VIA_RHINE_MMIO
122 #define USE_MMIO
123 #else
124 #endif
125 
126 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128 MODULE_LICENSE("GPL");
129 
130 module_param(debug, int, 0);
131 module_param(rx_copybreak, int, 0);
132 module_param(avoid_D3, bool, 0);
133 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
136 
137 #define MCAM_SIZE	32
138 #define VCAM_SIZE	32
139 
140 /*
141 		Theory of Operation
142 
143 I. Board Compatibility
144 
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
146 controller.
147 
148 II. Board-specific settings
149 
150 Boards with this chip are functional only in a bus-master PCI slot.
151 
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
154 correct.
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
157 
158 III. Driver operation
159 
160 IIIa. Ring buffers
161 
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
165 
166 IIIb/c. Transmit/Receive Structure
167 
168 This driver attempts to use a zero-copy receive and transmit scheme.
169 
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
172 
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
180 
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
189 
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
194 
195 IIId. Synchronization
196 
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200 which is single threaded by the hardware and interrupt handling software.
201 
202 The send packet thread has partial control over the Tx ring. It locks the
203 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204 the ring is not available it stops the transmit queue by
205 calling netif_stop_queue.
206 
207 The interrupt handler has exclusive control over the Rx ring and records stats
208 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209 empty by incrementing the dirty_tx mark. If at least half of the entries in
210 the Rx ring are available the transmit queue is woken up if it was stopped.
211 
212 IV. Notes
213 
214 IVb. References
215 
216 Preliminary VT86C100A manual from http://www.via.com.tw/
217 http://www.scyld.com/expert/100mbps.html
218 http://www.scyld.com/expert/NWay.html
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
221 
222 
223 IVc. Errata
224 
225 The VT86C100A manual is not reliable information.
226 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227 in significant performance degradation for bounce buffer copies on transmit
228 and unaligned IP headers on receive.
229 The chip does not pad to minimum transmit length.
230 
231 */
232 
233 
234 /* This table drives the PCI probe routines. It's mostly boilerplate in all
235    of the drivers, and will likely be provided by some future kernel.
236    Note the matching code -- the first table entry matchs all 56** cards but
237    second only the 1234 card.
238 */
239 
240 enum rhine_revs {
241 	VT86C100A	= 0x00,
242 	VTunknown0	= 0x20,
243 	VT6102		= 0x40,
244 	VT8231		= 0x50,	/* Integrated MAC */
245 	VT8233		= 0x60,	/* Integrated MAC */
246 	VT8235		= 0x74,	/* Integrated MAC */
247 	VT8237		= 0x78,	/* Integrated MAC */
248 	VTunknown1	= 0x7C,
249 	VT6105		= 0x80,
250 	VT6105_B0	= 0x83,
251 	VT6105L		= 0x8A,
252 	VT6107		= 0x8C,
253 	VTunknown2	= 0x8E,
254 	VT6105M		= 0x90,	/* Management adapter */
255 };
256 
257 enum rhine_quirks {
258 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
259 	rqForceReset	= 0x0002,
260 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
261 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
262 	rqRhineI	= 0x0100,	/* See comment below */
263 };
264 /*
265  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266  * MMIO as well as for the collision counter and the Tx FIFO underflow
267  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
268  */
269 
270 /* Beware of PCI posted writes */
271 #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
272 
273 static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
275 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
276 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
277 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
278 	{ }	/* terminate list */
279 };
280 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281 
282 
283 /* Offsets to the device registers. */
284 enum register_offsets {
285 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 	ChipCmd1=0x09, TQWake=0x0A,
287 	IntrStatus=0x0C, IntrEnable=0x0E,
288 	MulticastFilter0=0x10, MulticastFilter1=0x14,
289 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 	StickyHW=0x83, IntrStatus2=0x84,
295 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299 };
300 
301 /* Bits in ConfigD */
302 enum backoff_bits {
303 	BackOptional=0x01, BackModify=0x02,
304 	BackCaptureEffect=0x04, BackRandom=0x08
305 };
306 
307 /* Bits in the TxConfig (TCR) register */
308 enum tcr_bits {
309 	TCR_PQEN=0x01,
310 	TCR_LB0=0x02,		/* loopback[0] */
311 	TCR_LB1=0x04,		/* loopback[1] */
312 	TCR_OFSET=0x08,
313 	TCR_RTGOPT=0x10,
314 	TCR_RTFT0=0x20,
315 	TCR_RTFT1=0x40,
316 	TCR_RTSF=0x80,
317 };
318 
319 /* Bits in the CamCon (CAMC) register */
320 enum camcon_bits {
321 	CAMC_CAMEN=0x01,
322 	CAMC_VCAMSL=0x02,
323 	CAMC_CAMWR=0x04,
324 	CAMC_CAMRD=0x08,
325 };
326 
327 /* Bits in the PCIBusConfig1 (BCR1) register */
328 enum bcr1_bits {
329 	BCR1_POT0=0x01,
330 	BCR1_POT1=0x02,
331 	BCR1_POT2=0x04,
332 	BCR1_CTFT0=0x08,
333 	BCR1_CTFT1=0x10,
334 	BCR1_CTSF=0x20,
335 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
336 	BCR1_VIDFR=0x80,	/* for VT6105 */
337 	BCR1_MED0=0x40,		/* for VT6102 */
338 	BCR1_MED1=0x80,		/* for VT6102 */
339 };
340 
341 #ifdef USE_MMIO
342 /* Registers we check that mmio and reg are the same. */
343 static const int mmio_verify_registers[] = {
344 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345 	0
346 };
347 #endif
348 
349 /* Bits in the interrupt status/mask registers. */
350 enum intr_status_bits {
351 	IntrRxDone	= 0x0001,
352 	IntrTxDone	= 0x0002,
353 	IntrRxErr	= 0x0004,
354 	IntrTxError	= 0x0008,
355 	IntrRxEmpty	= 0x0020,
356 	IntrPCIErr	= 0x0040,
357 	IntrStatsMax	= 0x0080,
358 	IntrRxEarly	= 0x0100,
359 	IntrTxUnderrun	= 0x0210,
360 	IntrRxOverflow	= 0x0400,
361 	IntrRxDropped	= 0x0800,
362 	IntrRxNoBuf	= 0x1000,
363 	IntrTxAborted	= 0x2000,
364 	IntrLinkChange	= 0x4000,
365 	IntrRxWakeUp	= 0x8000,
366 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
367 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
368 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
369 				  IntrTxUnderrun,
370 };
371 
372 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
373 enum wol_bits {
374 	WOLucast	= 0x10,
375 	WOLmagic	= 0x20,
376 	WOLbmcast	= 0x30,
377 	WOLlnkon	= 0x40,
378 	WOLlnkoff	= 0x80,
379 };
380 
381 /* The Rx and Tx buffer descriptors. */
382 struct rx_desc {
383 	__le32 rx_status;
384 	__le32 desc_length; /* Chain flag, Buffer/frame length */
385 	__le32 addr;
386 	__le32 next_desc;
387 };
388 struct tx_desc {
389 	__le32 tx_status;
390 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
391 	__le32 addr;
392 	__le32 next_desc;
393 };
394 
395 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396 #define TXDESC		0x00e08000
397 
398 enum rx_status_bits {
399 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
400 };
401 
402 /* Bits in *_desc.*_status */
403 enum desc_status_bits {
404 	DescOwn=0x80000000
405 };
406 
407 /* Bits in *_desc.*_length */
408 enum desc_length_bits {
409 	DescTag=0x00010000
410 };
411 
412 /* Bits in ChipCmd. */
413 enum chip_cmd_bits {
414 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
418 };
419 
420 struct rhine_stats {
421 	u64		packets;
422 	u64		bytes;
423 	struct u64_stats_sync syncp;
424 };
425 
426 struct rhine_private {
427 	/* Bit mask for configured VLAN ids */
428 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
429 
430 	/* Descriptor rings */
431 	struct rx_desc *rx_ring;
432 	struct tx_desc *tx_ring;
433 	dma_addr_t rx_ring_dma;
434 	dma_addr_t tx_ring_dma;
435 
436 	/* The addresses of receive-in-place skbuffs. */
437 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
438 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
439 
440 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
441 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
442 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
443 
444 	/* Tx bounce buffers (Rhine-I only) */
445 	unsigned char *tx_buf[TX_RING_SIZE];
446 	unsigned char *tx_bufs;
447 	dma_addr_t tx_bufs_dma;
448 
449 	struct pci_dev *pdev;
450 	long pioaddr;
451 	struct net_device *dev;
452 	struct napi_struct napi;
453 	spinlock_t lock;
454 	struct mutex task_lock;
455 	bool task_enable;
456 	struct work_struct slow_event_task;
457 	struct work_struct reset_task;
458 
459 	u32 msg_enable;
460 
461 	/* Frequently used values: keep some adjacent for cache effect. */
462 	u32 quirks;
463 	struct rx_desc *rx_head_desc;
464 	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
465 	unsigned int cur_tx, dirty_tx;
466 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
467 	struct rhine_stats rx_stats;
468 	struct rhine_stats tx_stats;
469 	u8 wolopts;
470 
471 	u8 tx_thresh, rx_thresh;
472 
473 	struct mii_if_info mii_if;
474 	void __iomem *base;
475 };
476 
477 #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
478 #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
479 #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
480 
481 #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
482 #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
483 #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
484 
485 #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
486 #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
487 #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
488 
489 #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
490 #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
491 #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
492 
493 
494 static int  mdio_read(struct net_device *dev, int phy_id, int location);
495 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
496 static int  rhine_open(struct net_device *dev);
497 static void rhine_reset_task(struct work_struct *work);
498 static void rhine_slow_event_task(struct work_struct *work);
499 static void rhine_tx_timeout(struct net_device *dev);
500 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
501 				  struct net_device *dev);
502 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
503 static void rhine_tx(struct net_device *dev);
504 static int rhine_rx(struct net_device *dev, int limit);
505 static void rhine_set_rx_mode(struct net_device *dev);
506 static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
507 	       struct rtnl_link_stats64 *stats);
508 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
509 static const struct ethtool_ops netdev_ethtool_ops;
510 static int  rhine_close(struct net_device *dev);
511 static int rhine_vlan_rx_add_vid(struct net_device *dev,
512 				 __be16 proto, u16 vid);
513 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
514 				  __be16 proto, u16 vid);
515 static void rhine_restart_tx(struct net_device *dev);
516 
517 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
518 {
519 	void __iomem *ioaddr = rp->base;
520 	int i;
521 
522 	for (i = 0; i < 1024; i++) {
523 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
524 
525 		if (low ^ has_mask_bits)
526 			break;
527 		udelay(10);
528 	}
529 	if (i > 64) {
530 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
531 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
532 	}
533 }
534 
535 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
536 {
537 	rhine_wait_bit(rp, reg, mask, false);
538 }
539 
540 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
541 {
542 	rhine_wait_bit(rp, reg, mask, true);
543 }
544 
545 static u32 rhine_get_events(struct rhine_private *rp)
546 {
547 	void __iomem *ioaddr = rp->base;
548 	u32 intr_status;
549 
550 	intr_status = ioread16(ioaddr + IntrStatus);
551 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
552 	if (rp->quirks & rqStatusWBRace)
553 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
554 	return intr_status;
555 }
556 
557 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
558 {
559 	void __iomem *ioaddr = rp->base;
560 
561 	if (rp->quirks & rqStatusWBRace)
562 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
563 	iowrite16(mask, ioaddr + IntrStatus);
564 	mmiowb();
565 }
566 
567 /*
568  * Get power related registers into sane state.
569  * Notify user about past WOL event.
570  */
571 static void rhine_power_init(struct net_device *dev)
572 {
573 	struct rhine_private *rp = netdev_priv(dev);
574 	void __iomem *ioaddr = rp->base;
575 	u16 wolstat;
576 
577 	if (rp->quirks & rqWOL) {
578 		/* Make sure chip is in power state D0 */
579 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
580 
581 		/* Disable "force PME-enable" */
582 		iowrite8(0x80, ioaddr + WOLcgClr);
583 
584 		/* Clear power-event config bits (WOL) */
585 		iowrite8(0xFF, ioaddr + WOLcrClr);
586 		/* More recent cards can manage two additional patterns */
587 		if (rp->quirks & rq6patterns)
588 			iowrite8(0x03, ioaddr + WOLcrClr1);
589 
590 		/* Save power-event status bits */
591 		wolstat = ioread8(ioaddr + PwrcsrSet);
592 		if (rp->quirks & rq6patterns)
593 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
594 
595 		/* Clear power-event status bits */
596 		iowrite8(0xFF, ioaddr + PwrcsrClr);
597 		if (rp->quirks & rq6patterns)
598 			iowrite8(0x03, ioaddr + PwrcsrClr1);
599 
600 		if (wolstat) {
601 			char *reason;
602 			switch (wolstat) {
603 			case WOLmagic:
604 				reason = "Magic packet";
605 				break;
606 			case WOLlnkon:
607 				reason = "Link went up";
608 				break;
609 			case WOLlnkoff:
610 				reason = "Link went down";
611 				break;
612 			case WOLucast:
613 				reason = "Unicast packet";
614 				break;
615 			case WOLbmcast:
616 				reason = "Multicast/broadcast packet";
617 				break;
618 			default:
619 				reason = "Unknown";
620 			}
621 			netdev_info(dev, "Woke system up. Reason: %s\n",
622 				    reason);
623 		}
624 	}
625 }
626 
627 static void rhine_chip_reset(struct net_device *dev)
628 {
629 	struct rhine_private *rp = netdev_priv(dev);
630 	void __iomem *ioaddr = rp->base;
631 	u8 cmd1;
632 
633 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
634 	IOSYNC;
635 
636 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
637 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
638 
639 		/* Force reset */
640 		if (rp->quirks & rqForceReset)
641 			iowrite8(0x40, ioaddr + MiscCmd);
642 
643 		/* Reset can take somewhat longer (rare) */
644 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
645 	}
646 
647 	cmd1 = ioread8(ioaddr + ChipCmd1);
648 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
649 		   "failed" : "succeeded");
650 }
651 
652 #ifdef USE_MMIO
653 static void enable_mmio(long pioaddr, u32 quirks)
654 {
655 	int n;
656 	if (quirks & rqRhineI) {
657 		/* More recent docs say that this bit is reserved ... */
658 		n = inb(pioaddr + ConfigA) | 0x20;
659 		outb(n, pioaddr + ConfigA);
660 	} else {
661 		n = inb(pioaddr + ConfigD) | 0x80;
662 		outb(n, pioaddr + ConfigD);
663 	}
664 }
665 #endif
666 
667 /*
668  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
669  * (plus 0x6C for Rhine-I/II)
670  */
671 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
672 {
673 	struct rhine_private *rp = netdev_priv(dev);
674 	void __iomem *ioaddr = rp->base;
675 	int i;
676 
677 	outb(0x20, pioaddr + MACRegEEcsr);
678 	for (i = 0; i < 1024; i++) {
679 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
680 			break;
681 	}
682 	if (i > 512)
683 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
684 
685 #ifdef USE_MMIO
686 	/*
687 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
688 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
689 	 * it is not known if that still works with the "win98-reboot" problem.
690 	 */
691 	enable_mmio(pioaddr, rp->quirks);
692 #endif
693 
694 	/* Turn off EEPROM-controlled wake-up (magic packet) */
695 	if (rp->quirks & rqWOL)
696 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
697 
698 }
699 
700 #ifdef CONFIG_NET_POLL_CONTROLLER
701 static void rhine_poll(struct net_device *dev)
702 {
703 	struct rhine_private *rp = netdev_priv(dev);
704 	const int irq = rp->pdev->irq;
705 
706 	disable_irq(irq);
707 	rhine_interrupt(irq, dev);
708 	enable_irq(irq);
709 }
710 #endif
711 
712 static void rhine_kick_tx_threshold(struct rhine_private *rp)
713 {
714 	if (rp->tx_thresh < 0xe0) {
715 		void __iomem *ioaddr = rp->base;
716 
717 		rp->tx_thresh += 0x20;
718 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
719 	}
720 }
721 
722 static void rhine_tx_err(struct rhine_private *rp, u32 status)
723 {
724 	struct net_device *dev = rp->dev;
725 
726 	if (status & IntrTxAborted) {
727 		netif_info(rp, tx_err, dev,
728 			   "Abort %08x, frame dropped\n", status);
729 	}
730 
731 	if (status & IntrTxUnderrun) {
732 		rhine_kick_tx_threshold(rp);
733 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
734 			   "Tx threshold now %02x\n", rp->tx_thresh);
735 	}
736 
737 	if (status & IntrTxDescRace)
738 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
739 
740 	if ((status & IntrTxError) &&
741 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
742 		rhine_kick_tx_threshold(rp);
743 		netif_info(rp, tx_err, dev, "Unspecified error. "
744 			   "Tx threshold now %02x\n", rp->tx_thresh);
745 	}
746 
747 	rhine_restart_tx(dev);
748 }
749 
750 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
751 {
752 	void __iomem *ioaddr = rp->base;
753 	struct net_device_stats *stats = &rp->dev->stats;
754 
755 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
756 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
757 
758 	/*
759 	 * Clears the "tally counters" for CRC errors and missed frames(?).
760 	 * It has been reported that some chips need a write of 0 to clear
761 	 * these, for others the counters are set to 1 when written to and
762 	 * instead cleared when read. So we clear them both ways ...
763 	 */
764 	iowrite32(0, ioaddr + RxMissed);
765 	ioread16(ioaddr + RxCRCErrs);
766 	ioread16(ioaddr + RxMissed);
767 }
768 
769 #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
770 				 IntrRxErr | \
771 				 IntrRxEmpty | \
772 				 IntrRxOverflow	| \
773 				 IntrRxDropped | \
774 				 IntrRxNoBuf | \
775 				 IntrRxWakeUp)
776 
777 #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
778 				 IntrTxAborted | \
779 				 IntrTxUnderrun | \
780 				 IntrTxDescRace)
781 #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
782 
783 #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
784 				 RHINE_EVENT_NAPI_TX | \
785 				 IntrStatsMax)
786 #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
787 #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
788 
789 static int rhine_napipoll(struct napi_struct *napi, int budget)
790 {
791 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
792 	struct net_device *dev = rp->dev;
793 	void __iomem *ioaddr = rp->base;
794 	u16 enable_mask = RHINE_EVENT & 0xffff;
795 	int work_done = 0;
796 	u32 status;
797 
798 	status = rhine_get_events(rp);
799 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
800 
801 	if (status & RHINE_EVENT_NAPI_RX)
802 		work_done += rhine_rx(dev, budget);
803 
804 	if (status & RHINE_EVENT_NAPI_TX) {
805 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
806 			/* Avoid scavenging before Tx engine turned off */
807 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
808 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
809 				netif_warn(rp, tx_err, dev, "Tx still on\n");
810 		}
811 
812 		rhine_tx(dev);
813 
814 		if (status & RHINE_EVENT_NAPI_TX_ERR)
815 			rhine_tx_err(rp, status);
816 	}
817 
818 	if (status & IntrStatsMax) {
819 		spin_lock(&rp->lock);
820 		rhine_update_rx_crc_and_missed_errord(rp);
821 		spin_unlock(&rp->lock);
822 	}
823 
824 	if (status & RHINE_EVENT_SLOW) {
825 		enable_mask &= ~RHINE_EVENT_SLOW;
826 		schedule_work(&rp->slow_event_task);
827 	}
828 
829 	if (work_done < budget) {
830 		napi_complete(napi);
831 		iowrite16(enable_mask, ioaddr + IntrEnable);
832 		mmiowb();
833 	}
834 	return work_done;
835 }
836 
837 static void rhine_hw_init(struct net_device *dev, long pioaddr)
838 {
839 	struct rhine_private *rp = netdev_priv(dev);
840 
841 	/* Reset the chip to erase previous misconfiguration. */
842 	rhine_chip_reset(dev);
843 
844 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
845 	if (rp->quirks & rqRhineI)
846 		msleep(5);
847 
848 	/* Reload EEPROM controlled bytes cleared by soft reset */
849 	rhine_reload_eeprom(pioaddr, dev);
850 }
851 
852 static const struct net_device_ops rhine_netdev_ops = {
853 	.ndo_open		 = rhine_open,
854 	.ndo_stop		 = rhine_close,
855 	.ndo_start_xmit		 = rhine_start_tx,
856 	.ndo_get_stats64	 = rhine_get_stats64,
857 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
858 	.ndo_change_mtu		 = eth_change_mtu,
859 	.ndo_validate_addr	 = eth_validate_addr,
860 	.ndo_set_mac_address 	 = eth_mac_addr,
861 	.ndo_do_ioctl		 = netdev_ioctl,
862 	.ndo_tx_timeout 	 = rhine_tx_timeout,
863 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
864 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
865 #ifdef CONFIG_NET_POLL_CONTROLLER
866 	.ndo_poll_controller	 = rhine_poll,
867 #endif
868 };
869 
870 static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
871 {
872 	struct net_device *dev;
873 	struct rhine_private *rp;
874 	int i, rc;
875 	u32 quirks;
876 	long pioaddr;
877 	long memaddr;
878 	void __iomem *ioaddr;
879 	int io_size, phy_id;
880 	const char *name;
881 #ifdef USE_MMIO
882 	int bar = 1;
883 #else
884 	int bar = 0;
885 #endif
886 
887 /* when built into the kernel, we only print version if device is found */
888 #ifndef MODULE
889 	pr_info_once("%s\n", version);
890 #endif
891 
892 	io_size = 256;
893 	phy_id = 0;
894 	quirks = 0;
895 	name = "Rhine";
896 	if (pdev->revision < VTunknown0) {
897 		quirks = rqRhineI;
898 		io_size = 128;
899 	}
900 	else if (pdev->revision >= VT6102) {
901 		quirks = rqWOL | rqForceReset;
902 		if (pdev->revision < VT6105) {
903 			name = "Rhine II";
904 			quirks |= rqStatusWBRace;	/* Rhine-II exclusive */
905 		}
906 		else {
907 			phy_id = 1;	/* Integrated PHY, phy_id fixed to 1 */
908 			if (pdev->revision >= VT6105_B0)
909 				quirks |= rq6patterns;
910 			if (pdev->revision < VT6105M)
911 				name = "Rhine III";
912 			else
913 				name = "Rhine III (Management Adapter)";
914 		}
915 	}
916 
917 	rc = pci_enable_device(pdev);
918 	if (rc)
919 		goto err_out;
920 
921 	/* this should always be supported */
922 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
923 	if (rc) {
924 		dev_err(&pdev->dev,
925 			"32-bit PCI DMA addresses not supported by the card!?\n");
926 		goto err_out;
927 	}
928 
929 	/* sanity check */
930 	if ((pci_resource_len(pdev, 0) < io_size) ||
931 	    (pci_resource_len(pdev, 1) < io_size)) {
932 		rc = -EIO;
933 		dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
934 		goto err_out;
935 	}
936 
937 	pioaddr = pci_resource_start(pdev, 0);
938 	memaddr = pci_resource_start(pdev, 1);
939 
940 	pci_set_master(pdev);
941 
942 	dev = alloc_etherdev(sizeof(struct rhine_private));
943 	if (!dev) {
944 		rc = -ENOMEM;
945 		goto err_out;
946 	}
947 	SET_NETDEV_DEV(dev, &pdev->dev);
948 
949 	rp = netdev_priv(dev);
950 	rp->dev = dev;
951 	rp->quirks = quirks;
952 	rp->pioaddr = pioaddr;
953 	rp->pdev = pdev;
954 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
955 
956 	rc = pci_request_regions(pdev, DRV_NAME);
957 	if (rc)
958 		goto err_out_free_netdev;
959 
960 	ioaddr = pci_iomap(pdev, bar, io_size);
961 	if (!ioaddr) {
962 		rc = -EIO;
963 		dev_err(&pdev->dev,
964 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
965 			pci_name(pdev), io_size, memaddr);
966 		goto err_out_free_res;
967 	}
968 
969 #ifdef USE_MMIO
970 	enable_mmio(pioaddr, quirks);
971 
972 	/* Check that selected MMIO registers match the PIO ones */
973 	i = 0;
974 	while (mmio_verify_registers[i]) {
975 		int reg = mmio_verify_registers[i++];
976 		unsigned char a = inb(pioaddr+reg);
977 		unsigned char b = readb(ioaddr+reg);
978 		if (a != b) {
979 			rc = -EIO;
980 			dev_err(&pdev->dev,
981 				"MMIO do not match PIO [%02x] (%02x != %02x)\n",
982 				reg, a, b);
983 			goto err_out_unmap;
984 		}
985 	}
986 #endif /* USE_MMIO */
987 
988 	rp->base = ioaddr;
989 
990 	/* Get chip registers into a sane state */
991 	rhine_power_init(dev);
992 	rhine_hw_init(dev, pioaddr);
993 
994 	for (i = 0; i < 6; i++)
995 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
996 
997 	if (!is_valid_ether_addr(dev->dev_addr)) {
998 		/* Report it and use a random ethernet address instead */
999 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
1000 		eth_hw_addr_random(dev);
1001 		netdev_info(dev, "Using random MAC address: %pM\n",
1002 			    dev->dev_addr);
1003 	}
1004 
1005 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
1006 	if (!phy_id)
1007 		phy_id = ioread8(ioaddr + 0x6C);
1008 
1009 	spin_lock_init(&rp->lock);
1010 	mutex_init(&rp->task_lock);
1011 	INIT_WORK(&rp->reset_task, rhine_reset_task);
1012 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1013 
1014 	rp->mii_if.dev = dev;
1015 	rp->mii_if.mdio_read = mdio_read;
1016 	rp->mii_if.mdio_write = mdio_write;
1017 	rp->mii_if.phy_id_mask = 0x1f;
1018 	rp->mii_if.reg_num_mask = 0x1f;
1019 
1020 	/* The chip-specific entries in the device structure. */
1021 	dev->netdev_ops = &rhine_netdev_ops;
1022 	dev->ethtool_ops = &netdev_ethtool_ops,
1023 	dev->watchdog_timeo = TX_TIMEOUT;
1024 
1025 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1026 
1027 	if (rp->quirks & rqRhineI)
1028 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1029 
1030 	if (pdev->revision >= VT6105M)
1031 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1032 				 NETIF_F_HW_VLAN_CTAG_RX |
1033 				 NETIF_F_HW_VLAN_CTAG_FILTER;
1034 
1035 	/* dev->name not defined before register_netdev()! */
1036 	rc = register_netdev(dev);
1037 	if (rc)
1038 		goto err_out_unmap;
1039 
1040 	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1041 		    name,
1042 #ifdef USE_MMIO
1043 		    memaddr,
1044 #else
1045 		    (long)ioaddr,
1046 #endif
1047 		    dev->dev_addr, pdev->irq);
1048 
1049 	pci_set_drvdata(pdev, dev);
1050 
1051 	{
1052 		u16 mii_cmd;
1053 		int mii_status = mdio_read(dev, phy_id, 1);
1054 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1055 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1056 		if (mii_status != 0xffff && mii_status != 0x0000) {
1057 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1058 			netdev_info(dev,
1059 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1060 				    phy_id,
1061 				    mii_status, rp->mii_if.advertising,
1062 				    mdio_read(dev, phy_id, 5));
1063 
1064 			/* set IFF_RUNNING */
1065 			if (mii_status & BMSR_LSTATUS)
1066 				netif_carrier_on(dev);
1067 			else
1068 				netif_carrier_off(dev);
1069 
1070 		}
1071 	}
1072 	rp->mii_if.phy_id = phy_id;
1073 	if (avoid_D3)
1074 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1075 
1076 	return 0;
1077 
1078 err_out_unmap:
1079 	pci_iounmap(pdev, ioaddr);
1080 err_out_free_res:
1081 	pci_release_regions(pdev);
1082 err_out_free_netdev:
1083 	free_netdev(dev);
1084 err_out:
1085 	return rc;
1086 }
1087 
1088 static int alloc_ring(struct net_device* dev)
1089 {
1090 	struct rhine_private *rp = netdev_priv(dev);
1091 	void *ring;
1092 	dma_addr_t ring_dma;
1093 
1094 	ring = pci_alloc_consistent(rp->pdev,
1095 				    RX_RING_SIZE * sizeof(struct rx_desc) +
1096 				    TX_RING_SIZE * sizeof(struct tx_desc),
1097 				    &ring_dma);
1098 	if (!ring) {
1099 		netdev_err(dev, "Could not allocate DMA memory\n");
1100 		return -ENOMEM;
1101 	}
1102 	if (rp->quirks & rqRhineI) {
1103 		rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1104 						   PKT_BUF_SZ * TX_RING_SIZE,
1105 						   &rp->tx_bufs_dma);
1106 		if (rp->tx_bufs == NULL) {
1107 			pci_free_consistent(rp->pdev,
1108 				    RX_RING_SIZE * sizeof(struct rx_desc) +
1109 				    TX_RING_SIZE * sizeof(struct tx_desc),
1110 				    ring, ring_dma);
1111 			return -ENOMEM;
1112 		}
1113 	}
1114 
1115 	rp->rx_ring = ring;
1116 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1117 	rp->rx_ring_dma = ring_dma;
1118 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1119 
1120 	return 0;
1121 }
1122 
1123 static void free_ring(struct net_device* dev)
1124 {
1125 	struct rhine_private *rp = netdev_priv(dev);
1126 
1127 	pci_free_consistent(rp->pdev,
1128 			    RX_RING_SIZE * sizeof(struct rx_desc) +
1129 			    TX_RING_SIZE * sizeof(struct tx_desc),
1130 			    rp->rx_ring, rp->rx_ring_dma);
1131 	rp->tx_ring = NULL;
1132 
1133 	if (rp->tx_bufs)
1134 		pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1135 				    rp->tx_bufs, rp->tx_bufs_dma);
1136 
1137 	rp->tx_bufs = NULL;
1138 
1139 }
1140 
1141 static void alloc_rbufs(struct net_device *dev)
1142 {
1143 	struct rhine_private *rp = netdev_priv(dev);
1144 	dma_addr_t next;
1145 	int i;
1146 
1147 	rp->dirty_rx = rp->cur_rx = 0;
1148 
1149 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1150 	rp->rx_head_desc = &rp->rx_ring[0];
1151 	next = rp->rx_ring_dma;
1152 
1153 	/* Init the ring entries */
1154 	for (i = 0; i < RX_RING_SIZE; i++) {
1155 		rp->rx_ring[i].rx_status = 0;
1156 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1157 		next += sizeof(struct rx_desc);
1158 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1159 		rp->rx_skbuff[i] = NULL;
1160 	}
1161 	/* Mark the last entry as wrapping the ring. */
1162 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1163 
1164 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1165 	for (i = 0; i < RX_RING_SIZE; i++) {
1166 		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1167 		rp->rx_skbuff[i] = skb;
1168 		if (skb == NULL)
1169 			break;
1170 
1171 		rp->rx_skbuff_dma[i] =
1172 			pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1173 				       PCI_DMA_FROMDEVICE);
1174 
1175 		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1176 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1177 	}
1178 	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1179 }
1180 
1181 static void free_rbufs(struct net_device* dev)
1182 {
1183 	struct rhine_private *rp = netdev_priv(dev);
1184 	int i;
1185 
1186 	/* Free all the skbuffs in the Rx queue. */
1187 	for (i = 0; i < RX_RING_SIZE; i++) {
1188 		rp->rx_ring[i].rx_status = 0;
1189 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1190 		if (rp->rx_skbuff[i]) {
1191 			pci_unmap_single(rp->pdev,
1192 					 rp->rx_skbuff_dma[i],
1193 					 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1194 			dev_kfree_skb(rp->rx_skbuff[i]);
1195 		}
1196 		rp->rx_skbuff[i] = NULL;
1197 	}
1198 }
1199 
1200 static void alloc_tbufs(struct net_device* dev)
1201 {
1202 	struct rhine_private *rp = netdev_priv(dev);
1203 	dma_addr_t next;
1204 	int i;
1205 
1206 	rp->dirty_tx = rp->cur_tx = 0;
1207 	next = rp->tx_ring_dma;
1208 	for (i = 0; i < TX_RING_SIZE; i++) {
1209 		rp->tx_skbuff[i] = NULL;
1210 		rp->tx_ring[i].tx_status = 0;
1211 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1212 		next += sizeof(struct tx_desc);
1213 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1214 		if (rp->quirks & rqRhineI)
1215 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1216 	}
1217 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1218 
1219 }
1220 
1221 static void free_tbufs(struct net_device* dev)
1222 {
1223 	struct rhine_private *rp = netdev_priv(dev);
1224 	int i;
1225 
1226 	for (i = 0; i < TX_RING_SIZE; i++) {
1227 		rp->tx_ring[i].tx_status = 0;
1228 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1229 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1230 		if (rp->tx_skbuff[i]) {
1231 			if (rp->tx_skbuff_dma[i]) {
1232 				pci_unmap_single(rp->pdev,
1233 						 rp->tx_skbuff_dma[i],
1234 						 rp->tx_skbuff[i]->len,
1235 						 PCI_DMA_TODEVICE);
1236 			}
1237 			dev_kfree_skb(rp->tx_skbuff[i]);
1238 		}
1239 		rp->tx_skbuff[i] = NULL;
1240 		rp->tx_buf[i] = NULL;
1241 	}
1242 }
1243 
1244 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1245 {
1246 	struct rhine_private *rp = netdev_priv(dev);
1247 	void __iomem *ioaddr = rp->base;
1248 
1249 	mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1250 
1251 	if (rp->mii_if.full_duplex)
1252 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1253 		   ioaddr + ChipCmd1);
1254 	else
1255 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1256 		   ioaddr + ChipCmd1);
1257 
1258 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1259 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1260 }
1261 
1262 /* Called after status of force_media possibly changed */
1263 static void rhine_set_carrier(struct mii_if_info *mii)
1264 {
1265 	struct net_device *dev = mii->dev;
1266 	struct rhine_private *rp = netdev_priv(dev);
1267 
1268 	if (mii->force_media) {
1269 		/* autoneg is off: Link is always assumed to be up */
1270 		if (!netif_carrier_ok(dev))
1271 			netif_carrier_on(dev);
1272 	} else	/* Let MMI library update carrier status */
1273 		rhine_check_media(dev, 0);
1274 
1275 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1276 		   mii->force_media, netif_carrier_ok(dev));
1277 }
1278 
1279 /**
1280  * rhine_set_cam - set CAM multicast filters
1281  * @ioaddr: register block of this Rhine
1282  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1283  * @addr: multicast address (6 bytes)
1284  *
1285  * Load addresses into multicast filters.
1286  */
1287 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1288 {
1289 	int i;
1290 
1291 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1292 	wmb();
1293 
1294 	/* Paranoid -- idx out of range should never happen */
1295 	idx &= (MCAM_SIZE - 1);
1296 
1297 	iowrite8((u8) idx, ioaddr + CamAddr);
1298 
1299 	for (i = 0; i < 6; i++, addr++)
1300 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1301 	udelay(10);
1302 	wmb();
1303 
1304 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1305 	udelay(10);
1306 
1307 	iowrite8(0, ioaddr + CamCon);
1308 }
1309 
1310 /**
1311  * rhine_set_vlan_cam - set CAM VLAN filters
1312  * @ioaddr: register block of this Rhine
1313  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1314  * @addr: VLAN ID (2 bytes)
1315  *
1316  * Load addresses into VLAN filters.
1317  */
1318 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1319 {
1320 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1321 	wmb();
1322 
1323 	/* Paranoid -- idx out of range should never happen */
1324 	idx &= (VCAM_SIZE - 1);
1325 
1326 	iowrite8((u8) idx, ioaddr + CamAddr);
1327 
1328 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1329 	udelay(10);
1330 	wmb();
1331 
1332 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1333 	udelay(10);
1334 
1335 	iowrite8(0, ioaddr + CamCon);
1336 }
1337 
1338 /**
1339  * rhine_set_cam_mask - set multicast CAM mask
1340  * @ioaddr: register block of this Rhine
1341  * @mask: multicast CAM mask
1342  *
1343  * Mask sets multicast filters active/inactive.
1344  */
1345 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1346 {
1347 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1348 	wmb();
1349 
1350 	/* write mask */
1351 	iowrite32(mask, ioaddr + CamMask);
1352 
1353 	/* disable CAMEN */
1354 	iowrite8(0, ioaddr + CamCon);
1355 }
1356 
1357 /**
1358  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1359  * @ioaddr: register block of this Rhine
1360  * @mask: VLAN CAM mask
1361  *
1362  * Mask sets VLAN filters active/inactive.
1363  */
1364 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1365 {
1366 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1367 	wmb();
1368 
1369 	/* write mask */
1370 	iowrite32(mask, ioaddr + CamMask);
1371 
1372 	/* disable CAMEN */
1373 	iowrite8(0, ioaddr + CamCon);
1374 }
1375 
1376 /**
1377  * rhine_init_cam_filter - initialize CAM filters
1378  * @dev: network device
1379  *
1380  * Initialize (disable) hardware VLAN and multicast support on this
1381  * Rhine.
1382  */
1383 static void rhine_init_cam_filter(struct net_device *dev)
1384 {
1385 	struct rhine_private *rp = netdev_priv(dev);
1386 	void __iomem *ioaddr = rp->base;
1387 
1388 	/* Disable all CAMs */
1389 	rhine_set_vlan_cam_mask(ioaddr, 0);
1390 	rhine_set_cam_mask(ioaddr, 0);
1391 
1392 	/* disable hardware VLAN support */
1393 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1394 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1395 }
1396 
1397 /**
1398  * rhine_update_vcam - update VLAN CAM filters
1399  * @rp: rhine_private data of this Rhine
1400  *
1401  * Update VLAN CAM filters to match configuration change.
1402  */
1403 static void rhine_update_vcam(struct net_device *dev)
1404 {
1405 	struct rhine_private *rp = netdev_priv(dev);
1406 	void __iomem *ioaddr = rp->base;
1407 	u16 vid;
1408 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1409 	unsigned int i = 0;
1410 
1411 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1412 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1413 		vCAMmask |= 1 << i;
1414 		if (++i >= VCAM_SIZE)
1415 			break;
1416 	}
1417 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1418 }
1419 
1420 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1421 {
1422 	struct rhine_private *rp = netdev_priv(dev);
1423 
1424 	spin_lock_bh(&rp->lock);
1425 	set_bit(vid, rp->active_vlans);
1426 	rhine_update_vcam(dev);
1427 	spin_unlock_bh(&rp->lock);
1428 	return 0;
1429 }
1430 
1431 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1432 {
1433 	struct rhine_private *rp = netdev_priv(dev);
1434 
1435 	spin_lock_bh(&rp->lock);
1436 	clear_bit(vid, rp->active_vlans);
1437 	rhine_update_vcam(dev);
1438 	spin_unlock_bh(&rp->lock);
1439 	return 0;
1440 }
1441 
1442 static void init_registers(struct net_device *dev)
1443 {
1444 	struct rhine_private *rp = netdev_priv(dev);
1445 	void __iomem *ioaddr = rp->base;
1446 	int i;
1447 
1448 	for (i = 0; i < 6; i++)
1449 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1450 
1451 	/* Initialize other registers. */
1452 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1453 	/* Configure initial FIFO thresholds. */
1454 	iowrite8(0x20, ioaddr + TxConfig);
1455 	rp->tx_thresh = 0x20;
1456 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1457 
1458 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1459 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1460 
1461 	rhine_set_rx_mode(dev);
1462 
1463 	if (rp->pdev->revision >= VT6105M)
1464 		rhine_init_cam_filter(dev);
1465 
1466 	napi_enable(&rp->napi);
1467 
1468 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1469 
1470 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1471 	       ioaddr + ChipCmd);
1472 	rhine_check_media(dev, 1);
1473 }
1474 
1475 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1476 static void rhine_enable_linkmon(struct rhine_private *rp)
1477 {
1478 	void __iomem *ioaddr = rp->base;
1479 
1480 	iowrite8(0, ioaddr + MIICmd);
1481 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1482 	iowrite8(0x80, ioaddr + MIICmd);
1483 
1484 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1485 
1486 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1487 }
1488 
1489 /* Disable MII link status auto-polling (required for MDIO access) */
1490 static void rhine_disable_linkmon(struct rhine_private *rp)
1491 {
1492 	void __iomem *ioaddr = rp->base;
1493 
1494 	iowrite8(0, ioaddr + MIICmd);
1495 
1496 	if (rp->quirks & rqRhineI) {
1497 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1498 
1499 		/* Can be called from ISR. Evil. */
1500 		mdelay(1);
1501 
1502 		/* 0x80 must be set immediately before turning it off */
1503 		iowrite8(0x80, ioaddr + MIICmd);
1504 
1505 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1506 
1507 		/* Heh. Now clear 0x80 again. */
1508 		iowrite8(0, ioaddr + MIICmd);
1509 	}
1510 	else
1511 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1512 }
1513 
1514 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1515 
1516 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1517 {
1518 	struct rhine_private *rp = netdev_priv(dev);
1519 	void __iomem *ioaddr = rp->base;
1520 	int result;
1521 
1522 	rhine_disable_linkmon(rp);
1523 
1524 	/* rhine_disable_linkmon already cleared MIICmd */
1525 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1526 	iowrite8(regnum, ioaddr + MIIRegAddr);
1527 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1528 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1529 	result = ioread16(ioaddr + MIIData);
1530 
1531 	rhine_enable_linkmon(rp);
1532 	return result;
1533 }
1534 
1535 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1536 {
1537 	struct rhine_private *rp = netdev_priv(dev);
1538 	void __iomem *ioaddr = rp->base;
1539 
1540 	rhine_disable_linkmon(rp);
1541 
1542 	/* rhine_disable_linkmon already cleared MIICmd */
1543 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1544 	iowrite8(regnum, ioaddr + MIIRegAddr);
1545 	iowrite16(value, ioaddr + MIIData);
1546 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1547 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1548 
1549 	rhine_enable_linkmon(rp);
1550 }
1551 
1552 static void rhine_task_disable(struct rhine_private *rp)
1553 {
1554 	mutex_lock(&rp->task_lock);
1555 	rp->task_enable = false;
1556 	mutex_unlock(&rp->task_lock);
1557 
1558 	cancel_work_sync(&rp->slow_event_task);
1559 	cancel_work_sync(&rp->reset_task);
1560 }
1561 
1562 static void rhine_task_enable(struct rhine_private *rp)
1563 {
1564 	mutex_lock(&rp->task_lock);
1565 	rp->task_enable = true;
1566 	mutex_unlock(&rp->task_lock);
1567 }
1568 
1569 static int rhine_open(struct net_device *dev)
1570 {
1571 	struct rhine_private *rp = netdev_priv(dev);
1572 	void __iomem *ioaddr = rp->base;
1573 	int rc;
1574 
1575 	rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1576 			dev);
1577 	if (rc)
1578 		return rc;
1579 
1580 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1581 
1582 	rc = alloc_ring(dev);
1583 	if (rc) {
1584 		free_irq(rp->pdev->irq, dev);
1585 		return rc;
1586 	}
1587 	alloc_rbufs(dev);
1588 	alloc_tbufs(dev);
1589 	rhine_chip_reset(dev);
1590 	rhine_task_enable(rp);
1591 	init_registers(dev);
1592 
1593 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1594 		  __func__, ioread16(ioaddr + ChipCmd),
1595 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1596 
1597 	netif_start_queue(dev);
1598 
1599 	return 0;
1600 }
1601 
1602 static void rhine_reset_task(struct work_struct *work)
1603 {
1604 	struct rhine_private *rp = container_of(work, struct rhine_private,
1605 						reset_task);
1606 	struct net_device *dev = rp->dev;
1607 
1608 	mutex_lock(&rp->task_lock);
1609 
1610 	if (!rp->task_enable)
1611 		goto out_unlock;
1612 
1613 	napi_disable(&rp->napi);
1614 	spin_lock_bh(&rp->lock);
1615 
1616 	/* clear all descriptors */
1617 	free_tbufs(dev);
1618 	free_rbufs(dev);
1619 	alloc_tbufs(dev);
1620 	alloc_rbufs(dev);
1621 
1622 	/* Reinitialize the hardware. */
1623 	rhine_chip_reset(dev);
1624 	init_registers(dev);
1625 
1626 	spin_unlock_bh(&rp->lock);
1627 
1628 	dev->trans_start = jiffies; /* prevent tx timeout */
1629 	dev->stats.tx_errors++;
1630 	netif_wake_queue(dev);
1631 
1632 out_unlock:
1633 	mutex_unlock(&rp->task_lock);
1634 }
1635 
1636 static void rhine_tx_timeout(struct net_device *dev)
1637 {
1638 	struct rhine_private *rp = netdev_priv(dev);
1639 	void __iomem *ioaddr = rp->base;
1640 
1641 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1642 		    ioread16(ioaddr + IntrStatus),
1643 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1644 
1645 	schedule_work(&rp->reset_task);
1646 }
1647 
1648 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1649 				  struct net_device *dev)
1650 {
1651 	struct rhine_private *rp = netdev_priv(dev);
1652 	void __iomem *ioaddr = rp->base;
1653 	unsigned entry;
1654 
1655 	/* Caution: the write order is important here, set the field
1656 	   with the "ownership" bits last. */
1657 
1658 	/* Calculate the next Tx descriptor entry. */
1659 	entry = rp->cur_tx % TX_RING_SIZE;
1660 
1661 	if (skb_padto(skb, ETH_ZLEN))
1662 		return NETDEV_TX_OK;
1663 
1664 	rp->tx_skbuff[entry] = skb;
1665 
1666 	if ((rp->quirks & rqRhineI) &&
1667 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1668 		/* Must use alignment buffer. */
1669 		if (skb->len > PKT_BUF_SZ) {
1670 			/* packet too long, drop it */
1671 			dev_kfree_skb(skb);
1672 			rp->tx_skbuff[entry] = NULL;
1673 			dev->stats.tx_dropped++;
1674 			return NETDEV_TX_OK;
1675 		}
1676 
1677 		/* Padding is not copied and so must be redone. */
1678 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1679 		if (skb->len < ETH_ZLEN)
1680 			memset(rp->tx_buf[entry] + skb->len, 0,
1681 			       ETH_ZLEN - skb->len);
1682 		rp->tx_skbuff_dma[entry] = 0;
1683 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1684 						      (rp->tx_buf[entry] -
1685 						       rp->tx_bufs));
1686 	} else {
1687 		rp->tx_skbuff_dma[entry] =
1688 			pci_map_single(rp->pdev, skb->data, skb->len,
1689 				       PCI_DMA_TODEVICE);
1690 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1691 	}
1692 
1693 	rp->tx_ring[entry].desc_length =
1694 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1695 
1696 	if (unlikely(vlan_tx_tag_present(skb))) {
1697 		rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1698 		/* request tagging */
1699 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1700 	}
1701 	else
1702 		rp->tx_ring[entry].tx_status = 0;
1703 
1704 	/* lock eth irq */
1705 	wmb();
1706 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1707 	wmb();
1708 
1709 	rp->cur_tx++;
1710 
1711 	/* Non-x86 Todo: explicitly flush cache lines here. */
1712 
1713 	if (vlan_tx_tag_present(skb))
1714 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1715 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1716 
1717 	/* Wake the potentially-idle transmit channel */
1718 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1719 	       ioaddr + ChipCmd1);
1720 	IOSYNC;
1721 
1722 	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1723 		netif_stop_queue(dev);
1724 
1725 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1726 		  rp->cur_tx - 1, entry);
1727 
1728 	return NETDEV_TX_OK;
1729 }
1730 
1731 static void rhine_irq_disable(struct rhine_private *rp)
1732 {
1733 	iowrite16(0x0000, rp->base + IntrEnable);
1734 	mmiowb();
1735 }
1736 
1737 /* The interrupt handler does all of the Rx thread work and cleans up
1738    after the Tx thread. */
1739 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1740 {
1741 	struct net_device *dev = dev_instance;
1742 	struct rhine_private *rp = netdev_priv(dev);
1743 	u32 status;
1744 	int handled = 0;
1745 
1746 	status = rhine_get_events(rp);
1747 
1748 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1749 
1750 	if (status & RHINE_EVENT) {
1751 		handled = 1;
1752 
1753 		rhine_irq_disable(rp);
1754 		napi_schedule(&rp->napi);
1755 	}
1756 
1757 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1758 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1759 			  status);
1760 	}
1761 
1762 	return IRQ_RETVAL(handled);
1763 }
1764 
1765 /* This routine is logically part of the interrupt handler, but isolated
1766    for clarity. */
1767 static void rhine_tx(struct net_device *dev)
1768 {
1769 	struct rhine_private *rp = netdev_priv(dev);
1770 	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1771 
1772 	/* find and cleanup dirty tx descriptors */
1773 	while (rp->dirty_tx != rp->cur_tx) {
1774 		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1775 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1776 			  entry, txstatus);
1777 		if (txstatus & DescOwn)
1778 			break;
1779 		if (txstatus & 0x8000) {
1780 			netif_dbg(rp, tx_done, dev,
1781 				  "Transmit error, Tx status %08x\n", txstatus);
1782 			dev->stats.tx_errors++;
1783 			if (txstatus & 0x0400)
1784 				dev->stats.tx_carrier_errors++;
1785 			if (txstatus & 0x0200)
1786 				dev->stats.tx_window_errors++;
1787 			if (txstatus & 0x0100)
1788 				dev->stats.tx_aborted_errors++;
1789 			if (txstatus & 0x0080)
1790 				dev->stats.tx_heartbeat_errors++;
1791 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1792 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1793 				dev->stats.tx_fifo_errors++;
1794 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1795 				break; /* Keep the skb - we try again */
1796 			}
1797 			/* Transmitter restarted in 'abnormal' handler. */
1798 		} else {
1799 			if (rp->quirks & rqRhineI)
1800 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1801 			else
1802 				dev->stats.collisions += txstatus & 0x0F;
1803 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1804 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1805 
1806 			u64_stats_update_begin(&rp->tx_stats.syncp);
1807 			rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1808 			rp->tx_stats.packets++;
1809 			u64_stats_update_end(&rp->tx_stats.syncp);
1810 		}
1811 		/* Free the original skb. */
1812 		if (rp->tx_skbuff_dma[entry]) {
1813 			pci_unmap_single(rp->pdev,
1814 					 rp->tx_skbuff_dma[entry],
1815 					 rp->tx_skbuff[entry]->len,
1816 					 PCI_DMA_TODEVICE);
1817 		}
1818 		dev_kfree_skb(rp->tx_skbuff[entry]);
1819 		rp->tx_skbuff[entry] = NULL;
1820 		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1821 	}
1822 	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1823 		netif_wake_queue(dev);
1824 }
1825 
1826 /**
1827  * rhine_get_vlan_tci - extract TCI from Rx data buffer
1828  * @skb: pointer to sk_buff
1829  * @data_size: used data area of the buffer including CRC
1830  *
1831  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1832  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1833  * aligned following the CRC.
1834  */
1835 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1836 {
1837 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1838 	return be16_to_cpup((__be16 *)trailer);
1839 }
1840 
1841 /* Process up to limit frames from receive ring */
1842 static int rhine_rx(struct net_device *dev, int limit)
1843 {
1844 	struct rhine_private *rp = netdev_priv(dev);
1845 	int count;
1846 	int entry = rp->cur_rx % RX_RING_SIZE;
1847 
1848 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1849 		  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1850 
1851 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1852 	for (count = 0; count < limit; ++count) {
1853 		struct rx_desc *desc = rp->rx_head_desc;
1854 		u32 desc_status = le32_to_cpu(desc->rx_status);
1855 		u32 desc_length = le32_to_cpu(desc->desc_length);
1856 		int data_size = desc_status >> 16;
1857 
1858 		if (desc_status & DescOwn)
1859 			break;
1860 
1861 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1862 			  desc_status);
1863 
1864 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1865 			if ((desc_status & RxWholePkt) != RxWholePkt) {
1866 				netdev_warn(dev,
1867 	"Oversized Ethernet frame spanned multiple buffers, "
1868 	"entry %#x length %d status %08x!\n",
1869 					    entry, data_size,
1870 					    desc_status);
1871 				netdev_warn(dev,
1872 					    "Oversized Ethernet frame %p vs %p\n",
1873 					    rp->rx_head_desc,
1874 					    &rp->rx_ring[entry]);
1875 				dev->stats.rx_length_errors++;
1876 			} else if (desc_status & RxErr) {
1877 				/* There was a error. */
1878 				netif_dbg(rp, rx_err, dev,
1879 					  "%s() Rx error %08x\n", __func__,
1880 					  desc_status);
1881 				dev->stats.rx_errors++;
1882 				if (desc_status & 0x0030)
1883 					dev->stats.rx_length_errors++;
1884 				if (desc_status & 0x0048)
1885 					dev->stats.rx_fifo_errors++;
1886 				if (desc_status & 0x0004)
1887 					dev->stats.rx_frame_errors++;
1888 				if (desc_status & 0x0002) {
1889 					/* this can also be updated outside the interrupt handler */
1890 					spin_lock(&rp->lock);
1891 					dev->stats.rx_crc_errors++;
1892 					spin_unlock(&rp->lock);
1893 				}
1894 			}
1895 		} else {
1896 			struct sk_buff *skb = NULL;
1897 			/* Length should omit the CRC */
1898 			int pkt_len = data_size - 4;
1899 			u16 vlan_tci = 0;
1900 
1901 			/* Check if the packet is long enough to accept without
1902 			   copying to a minimally-sized skbuff. */
1903 			if (pkt_len < rx_copybreak)
1904 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1905 			if (skb) {
1906 				pci_dma_sync_single_for_cpu(rp->pdev,
1907 							    rp->rx_skbuff_dma[entry],
1908 							    rp->rx_buf_sz,
1909 							    PCI_DMA_FROMDEVICE);
1910 
1911 				skb_copy_to_linear_data(skb,
1912 						 rp->rx_skbuff[entry]->data,
1913 						 pkt_len);
1914 				skb_put(skb, pkt_len);
1915 				pci_dma_sync_single_for_device(rp->pdev,
1916 							       rp->rx_skbuff_dma[entry],
1917 							       rp->rx_buf_sz,
1918 							       PCI_DMA_FROMDEVICE);
1919 			} else {
1920 				skb = rp->rx_skbuff[entry];
1921 				if (skb == NULL) {
1922 					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1923 					break;
1924 				}
1925 				rp->rx_skbuff[entry] = NULL;
1926 				skb_put(skb, pkt_len);
1927 				pci_unmap_single(rp->pdev,
1928 						 rp->rx_skbuff_dma[entry],
1929 						 rp->rx_buf_sz,
1930 						 PCI_DMA_FROMDEVICE);
1931 			}
1932 
1933 			if (unlikely(desc_length & DescTag))
1934 				vlan_tci = rhine_get_vlan_tci(skb, data_size);
1935 
1936 			skb->protocol = eth_type_trans(skb, dev);
1937 
1938 			if (unlikely(desc_length & DescTag))
1939 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1940 			netif_receive_skb(skb);
1941 
1942 			u64_stats_update_begin(&rp->rx_stats.syncp);
1943 			rp->rx_stats.bytes += pkt_len;
1944 			rp->rx_stats.packets++;
1945 			u64_stats_update_end(&rp->rx_stats.syncp);
1946 		}
1947 		entry = (++rp->cur_rx) % RX_RING_SIZE;
1948 		rp->rx_head_desc = &rp->rx_ring[entry];
1949 	}
1950 
1951 	/* Refill the Rx ring buffers. */
1952 	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1953 		struct sk_buff *skb;
1954 		entry = rp->dirty_rx % RX_RING_SIZE;
1955 		if (rp->rx_skbuff[entry] == NULL) {
1956 			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1957 			rp->rx_skbuff[entry] = skb;
1958 			if (skb == NULL)
1959 				break;	/* Better luck next round. */
1960 			rp->rx_skbuff_dma[entry] =
1961 				pci_map_single(rp->pdev, skb->data,
1962 					       rp->rx_buf_sz,
1963 					       PCI_DMA_FROMDEVICE);
1964 			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1965 		}
1966 		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1967 	}
1968 
1969 	return count;
1970 }
1971 
1972 static void rhine_restart_tx(struct net_device *dev) {
1973 	struct rhine_private *rp = netdev_priv(dev);
1974 	void __iomem *ioaddr = rp->base;
1975 	int entry = rp->dirty_tx % TX_RING_SIZE;
1976 	u32 intr_status;
1977 
1978 	/*
1979 	 * If new errors occurred, we need to sort them out before doing Tx.
1980 	 * In that case the ISR will be back here RSN anyway.
1981 	 */
1982 	intr_status = rhine_get_events(rp);
1983 
1984 	if ((intr_status & IntrTxErrSummary) == 0) {
1985 
1986 		/* We know better than the chip where it should continue. */
1987 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1988 		       ioaddr + TxRingPtr);
1989 
1990 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1991 		       ioaddr + ChipCmd);
1992 
1993 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1994 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1995 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1996 
1997 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1998 		       ioaddr + ChipCmd1);
1999 		IOSYNC;
2000 	}
2001 	else {
2002 		/* This should never happen */
2003 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2004 			   intr_status);
2005 	}
2006 
2007 }
2008 
2009 static void rhine_slow_event_task(struct work_struct *work)
2010 {
2011 	struct rhine_private *rp =
2012 		container_of(work, struct rhine_private, slow_event_task);
2013 	struct net_device *dev = rp->dev;
2014 	u32 intr_status;
2015 
2016 	mutex_lock(&rp->task_lock);
2017 
2018 	if (!rp->task_enable)
2019 		goto out_unlock;
2020 
2021 	intr_status = rhine_get_events(rp);
2022 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2023 
2024 	if (intr_status & IntrLinkChange)
2025 		rhine_check_media(dev, 0);
2026 
2027 	if (intr_status & IntrPCIErr)
2028 		netif_warn(rp, hw, dev, "PCI error\n");
2029 
2030 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2031 
2032 out_unlock:
2033 	mutex_unlock(&rp->task_lock);
2034 }
2035 
2036 static struct rtnl_link_stats64 *
2037 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2038 {
2039 	struct rhine_private *rp = netdev_priv(dev);
2040 	unsigned int start;
2041 
2042 	spin_lock_bh(&rp->lock);
2043 	rhine_update_rx_crc_and_missed_errord(rp);
2044 	spin_unlock_bh(&rp->lock);
2045 
2046 	netdev_stats_to_stats64(stats, &dev->stats);
2047 
2048 	do {
2049 		start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
2050 		stats->rx_packets = rp->rx_stats.packets;
2051 		stats->rx_bytes = rp->rx_stats.bytes;
2052 	} while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
2053 
2054 	do {
2055 		start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
2056 		stats->tx_packets = rp->tx_stats.packets;
2057 		stats->tx_bytes = rp->tx_stats.bytes;
2058 	} while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
2059 
2060 	return stats;
2061 }
2062 
2063 static void rhine_set_rx_mode(struct net_device *dev)
2064 {
2065 	struct rhine_private *rp = netdev_priv(dev);
2066 	void __iomem *ioaddr = rp->base;
2067 	u32 mc_filter[2];	/* Multicast hash filter */
2068 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2069 	struct netdev_hw_addr *ha;
2070 
2071 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2072 		rx_mode = 0x1C;
2073 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2074 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2075 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2076 		   (dev->flags & IFF_ALLMULTI)) {
2077 		/* Too many to match, or accept all multicasts. */
2078 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2079 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2080 	} else if (rp->pdev->revision >= VT6105M) {
2081 		int i = 0;
2082 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2083 		netdev_for_each_mc_addr(ha, dev) {
2084 			if (i == MCAM_SIZE)
2085 				break;
2086 			rhine_set_cam(ioaddr, i, ha->addr);
2087 			mCAMmask |= 1 << i;
2088 			i++;
2089 		}
2090 		rhine_set_cam_mask(ioaddr, mCAMmask);
2091 	} else {
2092 		memset(mc_filter, 0, sizeof(mc_filter));
2093 		netdev_for_each_mc_addr(ha, dev) {
2094 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2095 
2096 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2097 		}
2098 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2099 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2100 	}
2101 	/* enable/disable VLAN receive filtering */
2102 	if (rp->pdev->revision >= VT6105M) {
2103 		if (dev->flags & IFF_PROMISC)
2104 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2105 		else
2106 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2107 	}
2108 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2109 }
2110 
2111 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2112 {
2113 	struct rhine_private *rp = netdev_priv(dev);
2114 
2115 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2116 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2117 	strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2118 }
2119 
2120 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2121 {
2122 	struct rhine_private *rp = netdev_priv(dev);
2123 	int rc;
2124 
2125 	mutex_lock(&rp->task_lock);
2126 	rc = mii_ethtool_gset(&rp->mii_if, cmd);
2127 	mutex_unlock(&rp->task_lock);
2128 
2129 	return rc;
2130 }
2131 
2132 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2133 {
2134 	struct rhine_private *rp = netdev_priv(dev);
2135 	int rc;
2136 
2137 	mutex_lock(&rp->task_lock);
2138 	rc = mii_ethtool_sset(&rp->mii_if, cmd);
2139 	rhine_set_carrier(&rp->mii_if);
2140 	mutex_unlock(&rp->task_lock);
2141 
2142 	return rc;
2143 }
2144 
2145 static int netdev_nway_reset(struct net_device *dev)
2146 {
2147 	struct rhine_private *rp = netdev_priv(dev);
2148 
2149 	return mii_nway_restart(&rp->mii_if);
2150 }
2151 
2152 static u32 netdev_get_link(struct net_device *dev)
2153 {
2154 	struct rhine_private *rp = netdev_priv(dev);
2155 
2156 	return mii_link_ok(&rp->mii_if);
2157 }
2158 
2159 static u32 netdev_get_msglevel(struct net_device *dev)
2160 {
2161 	struct rhine_private *rp = netdev_priv(dev);
2162 
2163 	return rp->msg_enable;
2164 }
2165 
2166 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2167 {
2168 	struct rhine_private *rp = netdev_priv(dev);
2169 
2170 	rp->msg_enable = value;
2171 }
2172 
2173 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2174 {
2175 	struct rhine_private *rp = netdev_priv(dev);
2176 
2177 	if (!(rp->quirks & rqWOL))
2178 		return;
2179 
2180 	spin_lock_irq(&rp->lock);
2181 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2182 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2183 	wol->wolopts = rp->wolopts;
2184 	spin_unlock_irq(&rp->lock);
2185 }
2186 
2187 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2188 {
2189 	struct rhine_private *rp = netdev_priv(dev);
2190 	u32 support = WAKE_PHY | WAKE_MAGIC |
2191 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2192 
2193 	if (!(rp->quirks & rqWOL))
2194 		return -EINVAL;
2195 
2196 	if (wol->wolopts & ~support)
2197 		return -EINVAL;
2198 
2199 	spin_lock_irq(&rp->lock);
2200 	rp->wolopts = wol->wolopts;
2201 	spin_unlock_irq(&rp->lock);
2202 
2203 	return 0;
2204 }
2205 
2206 static const struct ethtool_ops netdev_ethtool_ops = {
2207 	.get_drvinfo		= netdev_get_drvinfo,
2208 	.get_settings		= netdev_get_settings,
2209 	.set_settings		= netdev_set_settings,
2210 	.nway_reset		= netdev_nway_reset,
2211 	.get_link		= netdev_get_link,
2212 	.get_msglevel		= netdev_get_msglevel,
2213 	.set_msglevel		= netdev_set_msglevel,
2214 	.get_wol		= rhine_get_wol,
2215 	.set_wol		= rhine_set_wol,
2216 };
2217 
2218 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2219 {
2220 	struct rhine_private *rp = netdev_priv(dev);
2221 	int rc;
2222 
2223 	if (!netif_running(dev))
2224 		return -EINVAL;
2225 
2226 	mutex_lock(&rp->task_lock);
2227 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2228 	rhine_set_carrier(&rp->mii_if);
2229 	mutex_unlock(&rp->task_lock);
2230 
2231 	return rc;
2232 }
2233 
2234 static int rhine_close(struct net_device *dev)
2235 {
2236 	struct rhine_private *rp = netdev_priv(dev);
2237 	void __iomem *ioaddr = rp->base;
2238 
2239 	rhine_task_disable(rp);
2240 	napi_disable(&rp->napi);
2241 	netif_stop_queue(dev);
2242 
2243 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2244 		  ioread16(ioaddr + ChipCmd));
2245 
2246 	/* Switch to loopback mode to avoid hardware races. */
2247 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2248 
2249 	rhine_irq_disable(rp);
2250 
2251 	/* Stop the chip's Tx and Rx processes. */
2252 	iowrite16(CmdStop, ioaddr + ChipCmd);
2253 
2254 	free_irq(rp->pdev->irq, dev);
2255 	free_rbufs(dev);
2256 	free_tbufs(dev);
2257 	free_ring(dev);
2258 
2259 	return 0;
2260 }
2261 
2262 
2263 static void rhine_remove_one(struct pci_dev *pdev)
2264 {
2265 	struct net_device *dev = pci_get_drvdata(pdev);
2266 	struct rhine_private *rp = netdev_priv(dev);
2267 
2268 	unregister_netdev(dev);
2269 
2270 	pci_iounmap(pdev, rp->base);
2271 	pci_release_regions(pdev);
2272 
2273 	free_netdev(dev);
2274 	pci_disable_device(pdev);
2275 	pci_set_drvdata(pdev, NULL);
2276 }
2277 
2278 static void rhine_shutdown (struct pci_dev *pdev)
2279 {
2280 	struct net_device *dev = pci_get_drvdata(pdev);
2281 	struct rhine_private *rp = netdev_priv(dev);
2282 	void __iomem *ioaddr = rp->base;
2283 
2284 	if (!(rp->quirks & rqWOL))
2285 		return; /* Nothing to do for non-WOL adapters */
2286 
2287 	rhine_power_init(dev);
2288 
2289 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2290 	if (rp->quirks & rq6patterns)
2291 		iowrite8(0x04, ioaddr + WOLcgClr);
2292 
2293 	spin_lock(&rp->lock);
2294 
2295 	if (rp->wolopts & WAKE_MAGIC) {
2296 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2297 		/*
2298 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2299 		 * not cooperate otherwise.
2300 		 */
2301 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2302 	}
2303 
2304 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2305 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2306 
2307 	if (rp->wolopts & WAKE_PHY)
2308 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2309 
2310 	if (rp->wolopts & WAKE_UCAST)
2311 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2312 
2313 	if (rp->wolopts) {
2314 		/* Enable legacy WOL (for old motherboards) */
2315 		iowrite8(0x01, ioaddr + PwcfgSet);
2316 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2317 	}
2318 
2319 	spin_unlock(&rp->lock);
2320 
2321 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2322 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2323 
2324 		pci_wake_from_d3(pdev, true);
2325 		pci_set_power_state(pdev, PCI_D3hot);
2326 	}
2327 }
2328 
2329 #ifdef CONFIG_PM_SLEEP
2330 static int rhine_suspend(struct device *device)
2331 {
2332 	struct pci_dev *pdev = to_pci_dev(device);
2333 	struct net_device *dev = pci_get_drvdata(pdev);
2334 	struct rhine_private *rp = netdev_priv(dev);
2335 
2336 	if (!netif_running(dev))
2337 		return 0;
2338 
2339 	rhine_task_disable(rp);
2340 	rhine_irq_disable(rp);
2341 	napi_disable(&rp->napi);
2342 
2343 	netif_device_detach(dev);
2344 
2345 	rhine_shutdown(pdev);
2346 
2347 	return 0;
2348 }
2349 
2350 static int rhine_resume(struct device *device)
2351 {
2352 	struct pci_dev *pdev = to_pci_dev(device);
2353 	struct net_device *dev = pci_get_drvdata(pdev);
2354 	struct rhine_private *rp = netdev_priv(dev);
2355 
2356 	if (!netif_running(dev))
2357 		return 0;
2358 
2359 #ifdef USE_MMIO
2360 	enable_mmio(rp->pioaddr, rp->quirks);
2361 #endif
2362 	rhine_power_init(dev);
2363 	free_tbufs(dev);
2364 	free_rbufs(dev);
2365 	alloc_tbufs(dev);
2366 	alloc_rbufs(dev);
2367 	rhine_task_enable(rp);
2368 	spin_lock_bh(&rp->lock);
2369 	init_registers(dev);
2370 	spin_unlock_bh(&rp->lock);
2371 
2372 	netif_device_attach(dev);
2373 
2374 	return 0;
2375 }
2376 
2377 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2378 #define RHINE_PM_OPS	(&rhine_pm_ops)
2379 
2380 #else
2381 
2382 #define RHINE_PM_OPS	NULL
2383 
2384 #endif /* !CONFIG_PM_SLEEP */
2385 
2386 static struct pci_driver rhine_driver = {
2387 	.name		= DRV_NAME,
2388 	.id_table	= rhine_pci_tbl,
2389 	.probe		= rhine_init_one,
2390 	.remove		= rhine_remove_one,
2391 	.shutdown	= rhine_shutdown,
2392 	.driver.pm	= RHINE_PM_OPS,
2393 };
2394 
2395 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2396 	{
2397 		.ident = "EPIA-M",
2398 		.matches = {
2399 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2400 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2401 		},
2402 	},
2403 	{
2404 		.ident = "KV7",
2405 		.matches = {
2406 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2407 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2408 		},
2409 	},
2410 	{ NULL }
2411 };
2412 
2413 static int __init rhine_init(void)
2414 {
2415 /* when a module, this is printed whether or not devices are found in probe */
2416 #ifdef MODULE
2417 	pr_info("%s\n", version);
2418 #endif
2419 	if (dmi_check_system(rhine_dmi_table)) {
2420 		/* these BIOSes fail at PXE boot if chip is in D3 */
2421 		avoid_D3 = true;
2422 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2423 	}
2424 	else if (avoid_D3)
2425 		pr_info("avoid_D3 set\n");
2426 
2427 	return pci_register_driver(&rhine_driver);
2428 }
2429 
2430 
2431 static void __exit rhine_cleanup(void)
2432 {
2433 	pci_unregister_driver(&rhine_driver);
2434 }
2435 
2436 
2437 module_init(rhine_init);
2438 module_exit(rhine_cleanup);
2439