1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 	Written 1998-2001 by Donald Becker.
4 
5 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6 
7 	This software may be used and distributed according to the terms of
8 	the GNU General Public License (GPL), incorporated herein by reference.
9 	Drivers based on or derived from this code fall under the GPL and must
10 	retain the authorship, copyright and license notice.  This file is not
11 	a complete program and may only be used when the entire operating
12 	system is licensed under the GPL.
13 
14 	This driver is designed for the VIA VT86C100A Rhine-I.
15 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 	and management NIC 6105M).
17 
18 	The author may be reached as becker@scyld.com, or C/O
19 	Scyld Computing Corporation
20 	410 Severn Ave., Suite 210
21 	Annapolis MD 21403
22 
23 
24 	This driver contains some changes from the original Donald Becker
25 	version. He may or may not be interested in bug reports on this
26 	code. You can find his versions at:
27 	http://www.scyld.com/network/via-rhine.html
28 	[link no longer provides useful info -jgarzik]
29 
30 */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #define DRV_NAME	"via-rhine"
35 #define DRV_VERSION	"1.5.0"
36 #define DRV_RELDATE	"2010-10-09"
37 
38 #include <linux/types.h>
39 
40 /* A few user-configurable values.
41    These may be modified when a driver module is loaded. */
42 static int debug = 0;
43 #define RHINE_MSG_DEFAULT \
44         (0x0000)
45 
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47    Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50 	defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
52 #else
53 static int rx_copybreak;
54 #endif
55 
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 static bool avoid_D3;
59 
60 /*
61  * In case you are looking for 'options[]' or 'full_duplex[]', they
62  * are gone. Use ethtool(8) instead.
63  */
64 
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66    The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
68 
69 
70 /* Operational parameters that are set at compile time. */
71 
72 /* Keep the ring sizes a power of two for compile efficiency.
73    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74    Making the Tx ring too large decreases the effectiveness of channel
75    bonding and packet priority.
76    There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE	16
78 #define TX_QUEUE_LEN	10	/* Limit ring entries actually used. */
79 #define RX_RING_SIZE	64
80 
81 /* Operational parameters that usually are not changed. */
82 
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT	(2*HZ)
85 
86 #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
87 
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/dma-mapping.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/skbuff.h>
101 #include <linux/init.h>
102 #include <linux/delay.h>
103 #include <linux/mii.h>
104 #include <linux/ethtool.h>
105 #include <linux/crc32.h>
106 #include <linux/if_vlan.h>
107 #include <linux/bitops.h>
108 #include <linux/workqueue.h>
109 #include <asm/processor.h>	/* Processor type for cache alignment. */
110 #include <asm/io.h>
111 #include <asm/irq.h>
112 #include <asm/uaccess.h>
113 #include <linux/dmi.h>
114 
115 /* These identify the driver base version and may not be removed. */
116 static const char version[] =
117 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118 
119 /* This driver was written to use PCI memory space. Some early versions
120    of the Rhine may only work correctly with I/O space accesses. */
121 #ifdef CONFIG_VIA_RHINE_MMIO
122 #define USE_MMIO
123 #else
124 #endif
125 
126 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128 MODULE_LICENSE("GPL");
129 
130 module_param(debug, int, 0);
131 module_param(rx_copybreak, int, 0);
132 module_param(avoid_D3, bool, 0);
133 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
136 
137 #define MCAM_SIZE	32
138 #define VCAM_SIZE	32
139 
140 /*
141 		Theory of Operation
142 
143 I. Board Compatibility
144 
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
146 controller.
147 
148 II. Board-specific settings
149 
150 Boards with this chip are functional only in a bus-master PCI slot.
151 
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
154 correct.
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
157 
158 III. Driver operation
159 
160 IIIa. Ring buffers
161 
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
165 
166 IIIb/c. Transmit/Receive Structure
167 
168 This driver attempts to use a zero-copy receive and transmit scheme.
169 
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
172 
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
180 
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
189 
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
194 
195 IIId. Synchronization
196 
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200 which is single threaded by the hardware and interrupt handling software.
201 
202 The send packet thread has partial control over the Tx ring. It locks the
203 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204 the ring is not available it stops the transmit queue by
205 calling netif_stop_queue.
206 
207 The interrupt handler has exclusive control over the Rx ring and records stats
208 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209 empty by incrementing the dirty_tx mark. If at least half of the entries in
210 the Rx ring are available the transmit queue is woken up if it was stopped.
211 
212 IV. Notes
213 
214 IVb. References
215 
216 Preliminary VT86C100A manual from http://www.via.com.tw/
217 http://www.scyld.com/expert/100mbps.html
218 http://www.scyld.com/expert/NWay.html
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
221 
222 
223 IVc. Errata
224 
225 The VT86C100A manual is not reliable information.
226 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227 in significant performance degradation for bounce buffer copies on transmit
228 and unaligned IP headers on receive.
229 The chip does not pad to minimum transmit length.
230 
231 */
232 
233 
234 /* This table drives the PCI probe routines. It's mostly boilerplate in all
235    of the drivers, and will likely be provided by some future kernel.
236    Note the matching code -- the first table entry matchs all 56** cards but
237    second only the 1234 card.
238 */
239 
240 enum rhine_revs {
241 	VT86C100A	= 0x00,
242 	VTunknown0	= 0x20,
243 	VT6102		= 0x40,
244 	VT8231		= 0x50,	/* Integrated MAC */
245 	VT8233		= 0x60,	/* Integrated MAC */
246 	VT8235		= 0x74,	/* Integrated MAC */
247 	VT8237		= 0x78,	/* Integrated MAC */
248 	VTunknown1	= 0x7C,
249 	VT6105		= 0x80,
250 	VT6105_B0	= 0x83,
251 	VT6105L		= 0x8A,
252 	VT6107		= 0x8C,
253 	VTunknown2	= 0x8E,
254 	VT6105M		= 0x90,	/* Management adapter */
255 };
256 
257 enum rhine_quirks {
258 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
259 	rqForceReset	= 0x0002,
260 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
261 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
262 	rqRhineI	= 0x0100,	/* See comment below */
263 };
264 /*
265  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266  * MMIO as well as for the collision counter and the Tx FIFO underflow
267  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
268  */
269 
270 /* Beware of PCI posted writes */
271 #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
272 
273 static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
275 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
276 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
277 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
278 	{ }	/* terminate list */
279 };
280 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281 
282 
283 /* Offsets to the device registers. */
284 enum register_offsets {
285 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 	ChipCmd1=0x09, TQWake=0x0A,
287 	IntrStatus=0x0C, IntrEnable=0x0E,
288 	MulticastFilter0=0x10, MulticastFilter1=0x14,
289 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 	StickyHW=0x83, IntrStatus2=0x84,
295 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299 };
300 
301 /* Bits in ConfigD */
302 enum backoff_bits {
303 	BackOptional=0x01, BackModify=0x02,
304 	BackCaptureEffect=0x04, BackRandom=0x08
305 };
306 
307 /* Bits in the TxConfig (TCR) register */
308 enum tcr_bits {
309 	TCR_PQEN=0x01,
310 	TCR_LB0=0x02,		/* loopback[0] */
311 	TCR_LB1=0x04,		/* loopback[1] */
312 	TCR_OFSET=0x08,
313 	TCR_RTGOPT=0x10,
314 	TCR_RTFT0=0x20,
315 	TCR_RTFT1=0x40,
316 	TCR_RTSF=0x80,
317 };
318 
319 /* Bits in the CamCon (CAMC) register */
320 enum camcon_bits {
321 	CAMC_CAMEN=0x01,
322 	CAMC_VCAMSL=0x02,
323 	CAMC_CAMWR=0x04,
324 	CAMC_CAMRD=0x08,
325 };
326 
327 /* Bits in the PCIBusConfig1 (BCR1) register */
328 enum bcr1_bits {
329 	BCR1_POT0=0x01,
330 	BCR1_POT1=0x02,
331 	BCR1_POT2=0x04,
332 	BCR1_CTFT0=0x08,
333 	BCR1_CTFT1=0x10,
334 	BCR1_CTSF=0x20,
335 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
336 	BCR1_VIDFR=0x80,	/* for VT6105 */
337 	BCR1_MED0=0x40,		/* for VT6102 */
338 	BCR1_MED1=0x80,		/* for VT6102 */
339 };
340 
341 #ifdef USE_MMIO
342 /* Registers we check that mmio and reg are the same. */
343 static const int mmio_verify_registers[] = {
344 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345 	0
346 };
347 #endif
348 
349 /* Bits in the interrupt status/mask registers. */
350 enum intr_status_bits {
351 	IntrRxDone	= 0x0001,
352 	IntrTxDone	= 0x0002,
353 	IntrRxErr	= 0x0004,
354 	IntrTxError	= 0x0008,
355 	IntrRxEmpty	= 0x0020,
356 	IntrPCIErr	= 0x0040,
357 	IntrStatsMax	= 0x0080,
358 	IntrRxEarly	= 0x0100,
359 	IntrTxUnderrun	= 0x0210,
360 	IntrRxOverflow	= 0x0400,
361 	IntrRxDropped	= 0x0800,
362 	IntrRxNoBuf	= 0x1000,
363 	IntrTxAborted	= 0x2000,
364 	IntrLinkChange	= 0x4000,
365 	IntrRxWakeUp	= 0x8000,
366 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
367 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
368 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
369 				  IntrTxUnderrun,
370 };
371 
372 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
373 enum wol_bits {
374 	WOLucast	= 0x10,
375 	WOLmagic	= 0x20,
376 	WOLbmcast	= 0x30,
377 	WOLlnkon	= 0x40,
378 	WOLlnkoff	= 0x80,
379 };
380 
381 /* The Rx and Tx buffer descriptors. */
382 struct rx_desc {
383 	__le32 rx_status;
384 	__le32 desc_length; /* Chain flag, Buffer/frame length */
385 	__le32 addr;
386 	__le32 next_desc;
387 };
388 struct tx_desc {
389 	__le32 tx_status;
390 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
391 	__le32 addr;
392 	__le32 next_desc;
393 };
394 
395 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396 #define TXDESC		0x00e08000
397 
398 enum rx_status_bits {
399 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
400 };
401 
402 /* Bits in *_desc.*_status */
403 enum desc_status_bits {
404 	DescOwn=0x80000000
405 };
406 
407 /* Bits in *_desc.*_length */
408 enum desc_length_bits {
409 	DescTag=0x00010000
410 };
411 
412 /* Bits in ChipCmd. */
413 enum chip_cmd_bits {
414 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
418 };
419 
420 struct rhine_stats {
421 	u64		packets;
422 	u64		bytes;
423 	struct u64_stats_sync syncp;
424 };
425 
426 struct rhine_private {
427 	/* Bit mask for configured VLAN ids */
428 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
429 
430 	/* Descriptor rings */
431 	struct rx_desc *rx_ring;
432 	struct tx_desc *tx_ring;
433 	dma_addr_t rx_ring_dma;
434 	dma_addr_t tx_ring_dma;
435 
436 	/* The addresses of receive-in-place skbuffs. */
437 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
438 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
439 
440 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
441 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
442 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
443 
444 	/* Tx bounce buffers (Rhine-I only) */
445 	unsigned char *tx_buf[TX_RING_SIZE];
446 	unsigned char *tx_bufs;
447 	dma_addr_t tx_bufs_dma;
448 
449 	struct pci_dev *pdev;
450 	long pioaddr;
451 	struct net_device *dev;
452 	struct napi_struct napi;
453 	spinlock_t lock;
454 	struct mutex task_lock;
455 	bool task_enable;
456 	struct work_struct slow_event_task;
457 	struct work_struct reset_task;
458 
459 	u32 msg_enable;
460 
461 	/* Frequently used values: keep some adjacent for cache effect. */
462 	u32 quirks;
463 	struct rx_desc *rx_head_desc;
464 	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
465 	unsigned int cur_tx, dirty_tx;
466 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
467 	struct rhine_stats rx_stats;
468 	struct rhine_stats tx_stats;
469 	u8 wolopts;
470 
471 	u8 tx_thresh, rx_thresh;
472 
473 	struct mii_if_info mii_if;
474 	void __iomem *base;
475 };
476 
477 #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
478 #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
479 #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
480 
481 #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
482 #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
483 #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
484 
485 #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
486 #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
487 #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
488 
489 #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
490 #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
491 #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
492 
493 
494 static int  mdio_read(struct net_device *dev, int phy_id, int location);
495 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
496 static int  rhine_open(struct net_device *dev);
497 static void rhine_reset_task(struct work_struct *work);
498 static void rhine_slow_event_task(struct work_struct *work);
499 static void rhine_tx_timeout(struct net_device *dev);
500 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
501 				  struct net_device *dev);
502 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
503 static void rhine_tx(struct net_device *dev);
504 static int rhine_rx(struct net_device *dev, int limit);
505 static void rhine_set_rx_mode(struct net_device *dev);
506 static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
507 	       struct rtnl_link_stats64 *stats);
508 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
509 static const struct ethtool_ops netdev_ethtool_ops;
510 static int  rhine_close(struct net_device *dev);
511 static int rhine_vlan_rx_add_vid(struct net_device *dev,
512 				 __be16 proto, u16 vid);
513 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
514 				  __be16 proto, u16 vid);
515 static void rhine_restart_tx(struct net_device *dev);
516 
517 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
518 {
519 	void __iomem *ioaddr = rp->base;
520 	int i;
521 
522 	for (i = 0; i < 1024; i++) {
523 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
524 
525 		if (low ^ has_mask_bits)
526 			break;
527 		udelay(10);
528 	}
529 	if (i > 64) {
530 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
531 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
532 	}
533 }
534 
535 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
536 {
537 	rhine_wait_bit(rp, reg, mask, false);
538 }
539 
540 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
541 {
542 	rhine_wait_bit(rp, reg, mask, true);
543 }
544 
545 static u32 rhine_get_events(struct rhine_private *rp)
546 {
547 	void __iomem *ioaddr = rp->base;
548 	u32 intr_status;
549 
550 	intr_status = ioread16(ioaddr + IntrStatus);
551 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
552 	if (rp->quirks & rqStatusWBRace)
553 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
554 	return intr_status;
555 }
556 
557 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
558 {
559 	void __iomem *ioaddr = rp->base;
560 
561 	if (rp->quirks & rqStatusWBRace)
562 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
563 	iowrite16(mask, ioaddr + IntrStatus);
564 	mmiowb();
565 }
566 
567 /*
568  * Get power related registers into sane state.
569  * Notify user about past WOL event.
570  */
571 static void rhine_power_init(struct net_device *dev)
572 {
573 	struct rhine_private *rp = netdev_priv(dev);
574 	void __iomem *ioaddr = rp->base;
575 	u16 wolstat;
576 
577 	if (rp->quirks & rqWOL) {
578 		/* Make sure chip is in power state D0 */
579 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
580 
581 		/* Disable "force PME-enable" */
582 		iowrite8(0x80, ioaddr + WOLcgClr);
583 
584 		/* Clear power-event config bits (WOL) */
585 		iowrite8(0xFF, ioaddr + WOLcrClr);
586 		/* More recent cards can manage two additional patterns */
587 		if (rp->quirks & rq6patterns)
588 			iowrite8(0x03, ioaddr + WOLcrClr1);
589 
590 		/* Save power-event status bits */
591 		wolstat = ioread8(ioaddr + PwrcsrSet);
592 		if (rp->quirks & rq6patterns)
593 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
594 
595 		/* Clear power-event status bits */
596 		iowrite8(0xFF, ioaddr + PwrcsrClr);
597 		if (rp->quirks & rq6patterns)
598 			iowrite8(0x03, ioaddr + PwrcsrClr1);
599 
600 		if (wolstat) {
601 			char *reason;
602 			switch (wolstat) {
603 			case WOLmagic:
604 				reason = "Magic packet";
605 				break;
606 			case WOLlnkon:
607 				reason = "Link went up";
608 				break;
609 			case WOLlnkoff:
610 				reason = "Link went down";
611 				break;
612 			case WOLucast:
613 				reason = "Unicast packet";
614 				break;
615 			case WOLbmcast:
616 				reason = "Multicast/broadcast packet";
617 				break;
618 			default:
619 				reason = "Unknown";
620 			}
621 			netdev_info(dev, "Woke system up. Reason: %s\n",
622 				    reason);
623 		}
624 	}
625 }
626 
627 static void rhine_chip_reset(struct net_device *dev)
628 {
629 	struct rhine_private *rp = netdev_priv(dev);
630 	void __iomem *ioaddr = rp->base;
631 	u8 cmd1;
632 
633 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
634 	IOSYNC;
635 
636 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
637 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
638 
639 		/* Force reset */
640 		if (rp->quirks & rqForceReset)
641 			iowrite8(0x40, ioaddr + MiscCmd);
642 
643 		/* Reset can take somewhat longer (rare) */
644 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
645 	}
646 
647 	cmd1 = ioread8(ioaddr + ChipCmd1);
648 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
649 		   "failed" : "succeeded");
650 }
651 
652 #ifdef USE_MMIO
653 static void enable_mmio(long pioaddr, u32 quirks)
654 {
655 	int n;
656 	if (quirks & rqRhineI) {
657 		/* More recent docs say that this bit is reserved ... */
658 		n = inb(pioaddr + ConfigA) | 0x20;
659 		outb(n, pioaddr + ConfigA);
660 	} else {
661 		n = inb(pioaddr + ConfigD) | 0x80;
662 		outb(n, pioaddr + ConfigD);
663 	}
664 }
665 #endif
666 
667 /*
668  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
669  * (plus 0x6C for Rhine-I/II)
670  */
671 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
672 {
673 	struct rhine_private *rp = netdev_priv(dev);
674 	void __iomem *ioaddr = rp->base;
675 	int i;
676 
677 	outb(0x20, pioaddr + MACRegEEcsr);
678 	for (i = 0; i < 1024; i++) {
679 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
680 			break;
681 	}
682 	if (i > 512)
683 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
684 
685 #ifdef USE_MMIO
686 	/*
687 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
688 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
689 	 * it is not known if that still works with the "win98-reboot" problem.
690 	 */
691 	enable_mmio(pioaddr, rp->quirks);
692 #endif
693 
694 	/* Turn off EEPROM-controlled wake-up (magic packet) */
695 	if (rp->quirks & rqWOL)
696 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
697 
698 }
699 
700 #ifdef CONFIG_NET_POLL_CONTROLLER
701 static void rhine_poll(struct net_device *dev)
702 {
703 	struct rhine_private *rp = netdev_priv(dev);
704 	const int irq = rp->pdev->irq;
705 
706 	disable_irq(irq);
707 	rhine_interrupt(irq, dev);
708 	enable_irq(irq);
709 }
710 #endif
711 
712 static void rhine_kick_tx_threshold(struct rhine_private *rp)
713 {
714 	if (rp->tx_thresh < 0xe0) {
715 		void __iomem *ioaddr = rp->base;
716 
717 		rp->tx_thresh += 0x20;
718 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
719 	}
720 }
721 
722 static void rhine_tx_err(struct rhine_private *rp, u32 status)
723 {
724 	struct net_device *dev = rp->dev;
725 
726 	if (status & IntrTxAborted) {
727 		netif_info(rp, tx_err, dev,
728 			   "Abort %08x, frame dropped\n", status);
729 	}
730 
731 	if (status & IntrTxUnderrun) {
732 		rhine_kick_tx_threshold(rp);
733 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
734 			   "Tx threshold now %02x\n", rp->tx_thresh);
735 	}
736 
737 	if (status & IntrTxDescRace)
738 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
739 
740 	if ((status & IntrTxError) &&
741 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
742 		rhine_kick_tx_threshold(rp);
743 		netif_info(rp, tx_err, dev, "Unspecified error. "
744 			   "Tx threshold now %02x\n", rp->tx_thresh);
745 	}
746 
747 	rhine_restart_tx(dev);
748 }
749 
750 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
751 {
752 	void __iomem *ioaddr = rp->base;
753 	struct net_device_stats *stats = &rp->dev->stats;
754 
755 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
756 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
757 
758 	/*
759 	 * Clears the "tally counters" for CRC errors and missed frames(?).
760 	 * It has been reported that some chips need a write of 0 to clear
761 	 * these, for others the counters are set to 1 when written to and
762 	 * instead cleared when read. So we clear them both ways ...
763 	 */
764 	iowrite32(0, ioaddr + RxMissed);
765 	ioread16(ioaddr + RxCRCErrs);
766 	ioread16(ioaddr + RxMissed);
767 }
768 
769 #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
770 				 IntrRxErr | \
771 				 IntrRxEmpty | \
772 				 IntrRxOverflow	| \
773 				 IntrRxDropped | \
774 				 IntrRxNoBuf | \
775 				 IntrRxWakeUp)
776 
777 #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
778 				 IntrTxAborted | \
779 				 IntrTxUnderrun | \
780 				 IntrTxDescRace)
781 #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
782 
783 #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
784 				 RHINE_EVENT_NAPI_TX | \
785 				 IntrStatsMax)
786 #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
787 #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
788 
789 static int rhine_napipoll(struct napi_struct *napi, int budget)
790 {
791 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
792 	struct net_device *dev = rp->dev;
793 	void __iomem *ioaddr = rp->base;
794 	u16 enable_mask = RHINE_EVENT & 0xffff;
795 	int work_done = 0;
796 	u32 status;
797 
798 	status = rhine_get_events(rp);
799 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
800 
801 	if (status & RHINE_EVENT_NAPI_RX)
802 		work_done += rhine_rx(dev, budget);
803 
804 	if (status & RHINE_EVENT_NAPI_TX) {
805 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
806 			/* Avoid scavenging before Tx engine turned off */
807 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
808 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
809 				netif_warn(rp, tx_err, dev, "Tx still on\n");
810 		}
811 
812 		rhine_tx(dev);
813 
814 		if (status & RHINE_EVENT_NAPI_TX_ERR)
815 			rhine_tx_err(rp, status);
816 	}
817 
818 	if (status & IntrStatsMax) {
819 		spin_lock(&rp->lock);
820 		rhine_update_rx_crc_and_missed_errord(rp);
821 		spin_unlock(&rp->lock);
822 	}
823 
824 	if (status & RHINE_EVENT_SLOW) {
825 		enable_mask &= ~RHINE_EVENT_SLOW;
826 		schedule_work(&rp->slow_event_task);
827 	}
828 
829 	if (work_done < budget) {
830 		napi_complete(napi);
831 		iowrite16(enable_mask, ioaddr + IntrEnable);
832 		mmiowb();
833 	}
834 	return work_done;
835 }
836 
837 static void rhine_hw_init(struct net_device *dev, long pioaddr)
838 {
839 	struct rhine_private *rp = netdev_priv(dev);
840 
841 	/* Reset the chip to erase previous misconfiguration. */
842 	rhine_chip_reset(dev);
843 
844 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
845 	if (rp->quirks & rqRhineI)
846 		msleep(5);
847 
848 	/* Reload EEPROM controlled bytes cleared by soft reset */
849 	rhine_reload_eeprom(pioaddr, dev);
850 }
851 
852 static const struct net_device_ops rhine_netdev_ops = {
853 	.ndo_open		 = rhine_open,
854 	.ndo_stop		 = rhine_close,
855 	.ndo_start_xmit		 = rhine_start_tx,
856 	.ndo_get_stats64	 = rhine_get_stats64,
857 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
858 	.ndo_change_mtu		 = eth_change_mtu,
859 	.ndo_validate_addr	 = eth_validate_addr,
860 	.ndo_set_mac_address 	 = eth_mac_addr,
861 	.ndo_do_ioctl		 = netdev_ioctl,
862 	.ndo_tx_timeout 	 = rhine_tx_timeout,
863 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
864 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
865 #ifdef CONFIG_NET_POLL_CONTROLLER
866 	.ndo_poll_controller	 = rhine_poll,
867 #endif
868 };
869 
870 static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
871 {
872 	struct net_device *dev;
873 	struct rhine_private *rp;
874 	int i, rc;
875 	u32 quirks;
876 	long pioaddr;
877 	long memaddr;
878 	void __iomem *ioaddr;
879 	int io_size, phy_id;
880 	const char *name;
881 #ifdef USE_MMIO
882 	int bar = 1;
883 #else
884 	int bar = 0;
885 #endif
886 
887 /* when built into the kernel, we only print version if device is found */
888 #ifndef MODULE
889 	pr_info_once("%s\n", version);
890 #endif
891 
892 	io_size = 256;
893 	phy_id = 0;
894 	quirks = 0;
895 	name = "Rhine";
896 	if (pdev->revision < VTunknown0) {
897 		quirks = rqRhineI;
898 		io_size = 128;
899 	}
900 	else if (pdev->revision >= VT6102) {
901 		quirks = rqWOL | rqForceReset;
902 		if (pdev->revision < VT6105) {
903 			name = "Rhine II";
904 			quirks |= rqStatusWBRace;	/* Rhine-II exclusive */
905 		}
906 		else {
907 			phy_id = 1;	/* Integrated PHY, phy_id fixed to 1 */
908 			if (pdev->revision >= VT6105_B0)
909 				quirks |= rq6patterns;
910 			if (pdev->revision < VT6105M)
911 				name = "Rhine III";
912 			else
913 				name = "Rhine III (Management Adapter)";
914 		}
915 	}
916 
917 	rc = pci_enable_device(pdev);
918 	if (rc)
919 		goto err_out;
920 
921 	/* this should always be supported */
922 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
923 	if (rc) {
924 		dev_err(&pdev->dev,
925 			"32-bit PCI DMA addresses not supported by the card!?\n");
926 		goto err_out;
927 	}
928 
929 	/* sanity check */
930 	if ((pci_resource_len(pdev, 0) < io_size) ||
931 	    (pci_resource_len(pdev, 1) < io_size)) {
932 		rc = -EIO;
933 		dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
934 		goto err_out;
935 	}
936 
937 	pioaddr = pci_resource_start(pdev, 0);
938 	memaddr = pci_resource_start(pdev, 1);
939 
940 	pci_set_master(pdev);
941 
942 	dev = alloc_etherdev(sizeof(struct rhine_private));
943 	if (!dev) {
944 		rc = -ENOMEM;
945 		goto err_out;
946 	}
947 	SET_NETDEV_DEV(dev, &pdev->dev);
948 
949 	rp = netdev_priv(dev);
950 	rp->dev = dev;
951 	rp->quirks = quirks;
952 	rp->pioaddr = pioaddr;
953 	rp->pdev = pdev;
954 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
955 
956 	rc = pci_request_regions(pdev, DRV_NAME);
957 	if (rc)
958 		goto err_out_free_netdev;
959 
960 	ioaddr = pci_iomap(pdev, bar, io_size);
961 	if (!ioaddr) {
962 		rc = -EIO;
963 		dev_err(&pdev->dev,
964 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
965 			pci_name(pdev), io_size, memaddr);
966 		goto err_out_free_res;
967 	}
968 
969 #ifdef USE_MMIO
970 	enable_mmio(pioaddr, quirks);
971 
972 	/* Check that selected MMIO registers match the PIO ones */
973 	i = 0;
974 	while (mmio_verify_registers[i]) {
975 		int reg = mmio_verify_registers[i++];
976 		unsigned char a = inb(pioaddr+reg);
977 		unsigned char b = readb(ioaddr+reg);
978 		if (a != b) {
979 			rc = -EIO;
980 			dev_err(&pdev->dev,
981 				"MMIO do not match PIO [%02x] (%02x != %02x)\n",
982 				reg, a, b);
983 			goto err_out_unmap;
984 		}
985 	}
986 #endif /* USE_MMIO */
987 
988 	rp->base = ioaddr;
989 
990 	/* Get chip registers into a sane state */
991 	rhine_power_init(dev);
992 	rhine_hw_init(dev, pioaddr);
993 
994 	for (i = 0; i < 6; i++)
995 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
996 
997 	if (!is_valid_ether_addr(dev->dev_addr)) {
998 		/* Report it and use a random ethernet address instead */
999 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
1000 		eth_hw_addr_random(dev);
1001 		netdev_info(dev, "Using random MAC address: %pM\n",
1002 			    dev->dev_addr);
1003 	}
1004 
1005 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
1006 	if (!phy_id)
1007 		phy_id = ioread8(ioaddr + 0x6C);
1008 
1009 	spin_lock_init(&rp->lock);
1010 	mutex_init(&rp->task_lock);
1011 	INIT_WORK(&rp->reset_task, rhine_reset_task);
1012 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1013 
1014 	rp->mii_if.dev = dev;
1015 	rp->mii_if.mdio_read = mdio_read;
1016 	rp->mii_if.mdio_write = mdio_write;
1017 	rp->mii_if.phy_id_mask = 0x1f;
1018 	rp->mii_if.reg_num_mask = 0x1f;
1019 
1020 	/* The chip-specific entries in the device structure. */
1021 	dev->netdev_ops = &rhine_netdev_ops;
1022 	dev->ethtool_ops = &netdev_ethtool_ops,
1023 	dev->watchdog_timeo = TX_TIMEOUT;
1024 
1025 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1026 
1027 	if (rp->quirks & rqRhineI)
1028 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1029 
1030 	if (pdev->revision >= VT6105M)
1031 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1032 				 NETIF_F_HW_VLAN_CTAG_RX |
1033 				 NETIF_F_HW_VLAN_CTAG_FILTER;
1034 
1035 	/* dev->name not defined before register_netdev()! */
1036 	rc = register_netdev(dev);
1037 	if (rc)
1038 		goto err_out_unmap;
1039 
1040 	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1041 		    name,
1042 #ifdef USE_MMIO
1043 		    memaddr,
1044 #else
1045 		    (long)ioaddr,
1046 #endif
1047 		    dev->dev_addr, pdev->irq);
1048 
1049 	pci_set_drvdata(pdev, dev);
1050 
1051 	{
1052 		u16 mii_cmd;
1053 		int mii_status = mdio_read(dev, phy_id, 1);
1054 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1055 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1056 		if (mii_status != 0xffff && mii_status != 0x0000) {
1057 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1058 			netdev_info(dev,
1059 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1060 				    phy_id,
1061 				    mii_status, rp->mii_if.advertising,
1062 				    mdio_read(dev, phy_id, 5));
1063 
1064 			/* set IFF_RUNNING */
1065 			if (mii_status & BMSR_LSTATUS)
1066 				netif_carrier_on(dev);
1067 			else
1068 				netif_carrier_off(dev);
1069 
1070 		}
1071 	}
1072 	rp->mii_if.phy_id = phy_id;
1073 	if (avoid_D3)
1074 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1075 
1076 	return 0;
1077 
1078 err_out_unmap:
1079 	pci_iounmap(pdev, ioaddr);
1080 err_out_free_res:
1081 	pci_release_regions(pdev);
1082 err_out_free_netdev:
1083 	free_netdev(dev);
1084 err_out:
1085 	return rc;
1086 }
1087 
1088 static int alloc_ring(struct net_device* dev)
1089 {
1090 	struct rhine_private *rp = netdev_priv(dev);
1091 	void *ring;
1092 	dma_addr_t ring_dma;
1093 
1094 	ring = pci_alloc_consistent(rp->pdev,
1095 				    RX_RING_SIZE * sizeof(struct rx_desc) +
1096 				    TX_RING_SIZE * sizeof(struct tx_desc),
1097 				    &ring_dma);
1098 	if (!ring) {
1099 		netdev_err(dev, "Could not allocate DMA memory\n");
1100 		return -ENOMEM;
1101 	}
1102 	if (rp->quirks & rqRhineI) {
1103 		rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1104 						   PKT_BUF_SZ * TX_RING_SIZE,
1105 						   &rp->tx_bufs_dma);
1106 		if (rp->tx_bufs == NULL) {
1107 			pci_free_consistent(rp->pdev,
1108 				    RX_RING_SIZE * sizeof(struct rx_desc) +
1109 				    TX_RING_SIZE * sizeof(struct tx_desc),
1110 				    ring, ring_dma);
1111 			return -ENOMEM;
1112 		}
1113 	}
1114 
1115 	rp->rx_ring = ring;
1116 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1117 	rp->rx_ring_dma = ring_dma;
1118 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1119 
1120 	return 0;
1121 }
1122 
1123 static void free_ring(struct net_device* dev)
1124 {
1125 	struct rhine_private *rp = netdev_priv(dev);
1126 
1127 	pci_free_consistent(rp->pdev,
1128 			    RX_RING_SIZE * sizeof(struct rx_desc) +
1129 			    TX_RING_SIZE * sizeof(struct tx_desc),
1130 			    rp->rx_ring, rp->rx_ring_dma);
1131 	rp->tx_ring = NULL;
1132 
1133 	if (rp->tx_bufs)
1134 		pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1135 				    rp->tx_bufs, rp->tx_bufs_dma);
1136 
1137 	rp->tx_bufs = NULL;
1138 
1139 }
1140 
1141 static void alloc_rbufs(struct net_device *dev)
1142 {
1143 	struct rhine_private *rp = netdev_priv(dev);
1144 	dma_addr_t next;
1145 	int i;
1146 
1147 	rp->dirty_rx = rp->cur_rx = 0;
1148 
1149 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1150 	rp->rx_head_desc = &rp->rx_ring[0];
1151 	next = rp->rx_ring_dma;
1152 
1153 	/* Init the ring entries */
1154 	for (i = 0; i < RX_RING_SIZE; i++) {
1155 		rp->rx_ring[i].rx_status = 0;
1156 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1157 		next += sizeof(struct rx_desc);
1158 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1159 		rp->rx_skbuff[i] = NULL;
1160 	}
1161 	/* Mark the last entry as wrapping the ring. */
1162 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1163 
1164 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1165 	for (i = 0; i < RX_RING_SIZE; i++) {
1166 		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1167 		rp->rx_skbuff[i] = skb;
1168 		if (skb == NULL)
1169 			break;
1170 
1171 		rp->rx_skbuff_dma[i] =
1172 			pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1173 				       PCI_DMA_FROMDEVICE);
1174 		if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
1175 			rp->rx_skbuff_dma[i] = 0;
1176 			dev_kfree_skb(skb);
1177 			break;
1178 		}
1179 		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1180 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1181 	}
1182 	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1183 }
1184 
1185 static void free_rbufs(struct net_device* dev)
1186 {
1187 	struct rhine_private *rp = netdev_priv(dev);
1188 	int i;
1189 
1190 	/* Free all the skbuffs in the Rx queue. */
1191 	for (i = 0; i < RX_RING_SIZE; i++) {
1192 		rp->rx_ring[i].rx_status = 0;
1193 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1194 		if (rp->rx_skbuff[i]) {
1195 			pci_unmap_single(rp->pdev,
1196 					 rp->rx_skbuff_dma[i],
1197 					 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1198 			dev_kfree_skb(rp->rx_skbuff[i]);
1199 		}
1200 		rp->rx_skbuff[i] = NULL;
1201 	}
1202 }
1203 
1204 static void alloc_tbufs(struct net_device* dev)
1205 {
1206 	struct rhine_private *rp = netdev_priv(dev);
1207 	dma_addr_t next;
1208 	int i;
1209 
1210 	rp->dirty_tx = rp->cur_tx = 0;
1211 	next = rp->tx_ring_dma;
1212 	for (i = 0; i < TX_RING_SIZE; i++) {
1213 		rp->tx_skbuff[i] = NULL;
1214 		rp->tx_ring[i].tx_status = 0;
1215 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1216 		next += sizeof(struct tx_desc);
1217 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1218 		if (rp->quirks & rqRhineI)
1219 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1220 	}
1221 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1222 
1223 }
1224 
1225 static void free_tbufs(struct net_device* dev)
1226 {
1227 	struct rhine_private *rp = netdev_priv(dev);
1228 	int i;
1229 
1230 	for (i = 0; i < TX_RING_SIZE; i++) {
1231 		rp->tx_ring[i].tx_status = 0;
1232 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1233 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1234 		if (rp->tx_skbuff[i]) {
1235 			if (rp->tx_skbuff_dma[i]) {
1236 				pci_unmap_single(rp->pdev,
1237 						 rp->tx_skbuff_dma[i],
1238 						 rp->tx_skbuff[i]->len,
1239 						 PCI_DMA_TODEVICE);
1240 			}
1241 			dev_kfree_skb(rp->tx_skbuff[i]);
1242 		}
1243 		rp->tx_skbuff[i] = NULL;
1244 		rp->tx_buf[i] = NULL;
1245 	}
1246 }
1247 
1248 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1249 {
1250 	struct rhine_private *rp = netdev_priv(dev);
1251 	void __iomem *ioaddr = rp->base;
1252 
1253 	mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1254 
1255 	if (rp->mii_if.full_duplex)
1256 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1257 		   ioaddr + ChipCmd1);
1258 	else
1259 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1260 		   ioaddr + ChipCmd1);
1261 
1262 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1263 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1264 }
1265 
1266 /* Called after status of force_media possibly changed */
1267 static void rhine_set_carrier(struct mii_if_info *mii)
1268 {
1269 	struct net_device *dev = mii->dev;
1270 	struct rhine_private *rp = netdev_priv(dev);
1271 
1272 	if (mii->force_media) {
1273 		/* autoneg is off: Link is always assumed to be up */
1274 		if (!netif_carrier_ok(dev))
1275 			netif_carrier_on(dev);
1276 	} else	/* Let MMI library update carrier status */
1277 		rhine_check_media(dev, 0);
1278 
1279 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1280 		   mii->force_media, netif_carrier_ok(dev));
1281 }
1282 
1283 /**
1284  * rhine_set_cam - set CAM multicast filters
1285  * @ioaddr: register block of this Rhine
1286  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1287  * @addr: multicast address (6 bytes)
1288  *
1289  * Load addresses into multicast filters.
1290  */
1291 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1292 {
1293 	int i;
1294 
1295 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1296 	wmb();
1297 
1298 	/* Paranoid -- idx out of range should never happen */
1299 	idx &= (MCAM_SIZE - 1);
1300 
1301 	iowrite8((u8) idx, ioaddr + CamAddr);
1302 
1303 	for (i = 0; i < 6; i++, addr++)
1304 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1305 	udelay(10);
1306 	wmb();
1307 
1308 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1309 	udelay(10);
1310 
1311 	iowrite8(0, ioaddr + CamCon);
1312 }
1313 
1314 /**
1315  * rhine_set_vlan_cam - set CAM VLAN filters
1316  * @ioaddr: register block of this Rhine
1317  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1318  * @addr: VLAN ID (2 bytes)
1319  *
1320  * Load addresses into VLAN filters.
1321  */
1322 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1323 {
1324 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1325 	wmb();
1326 
1327 	/* Paranoid -- idx out of range should never happen */
1328 	idx &= (VCAM_SIZE - 1);
1329 
1330 	iowrite8((u8) idx, ioaddr + CamAddr);
1331 
1332 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1333 	udelay(10);
1334 	wmb();
1335 
1336 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1337 	udelay(10);
1338 
1339 	iowrite8(0, ioaddr + CamCon);
1340 }
1341 
1342 /**
1343  * rhine_set_cam_mask - set multicast CAM mask
1344  * @ioaddr: register block of this Rhine
1345  * @mask: multicast CAM mask
1346  *
1347  * Mask sets multicast filters active/inactive.
1348  */
1349 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1350 {
1351 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1352 	wmb();
1353 
1354 	/* write mask */
1355 	iowrite32(mask, ioaddr + CamMask);
1356 
1357 	/* disable CAMEN */
1358 	iowrite8(0, ioaddr + CamCon);
1359 }
1360 
1361 /**
1362  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1363  * @ioaddr: register block of this Rhine
1364  * @mask: VLAN CAM mask
1365  *
1366  * Mask sets VLAN filters active/inactive.
1367  */
1368 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1369 {
1370 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1371 	wmb();
1372 
1373 	/* write mask */
1374 	iowrite32(mask, ioaddr + CamMask);
1375 
1376 	/* disable CAMEN */
1377 	iowrite8(0, ioaddr + CamCon);
1378 }
1379 
1380 /**
1381  * rhine_init_cam_filter - initialize CAM filters
1382  * @dev: network device
1383  *
1384  * Initialize (disable) hardware VLAN and multicast support on this
1385  * Rhine.
1386  */
1387 static void rhine_init_cam_filter(struct net_device *dev)
1388 {
1389 	struct rhine_private *rp = netdev_priv(dev);
1390 	void __iomem *ioaddr = rp->base;
1391 
1392 	/* Disable all CAMs */
1393 	rhine_set_vlan_cam_mask(ioaddr, 0);
1394 	rhine_set_cam_mask(ioaddr, 0);
1395 
1396 	/* disable hardware VLAN support */
1397 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1398 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1399 }
1400 
1401 /**
1402  * rhine_update_vcam - update VLAN CAM filters
1403  * @rp: rhine_private data of this Rhine
1404  *
1405  * Update VLAN CAM filters to match configuration change.
1406  */
1407 static void rhine_update_vcam(struct net_device *dev)
1408 {
1409 	struct rhine_private *rp = netdev_priv(dev);
1410 	void __iomem *ioaddr = rp->base;
1411 	u16 vid;
1412 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1413 	unsigned int i = 0;
1414 
1415 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1416 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1417 		vCAMmask |= 1 << i;
1418 		if (++i >= VCAM_SIZE)
1419 			break;
1420 	}
1421 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1422 }
1423 
1424 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1425 {
1426 	struct rhine_private *rp = netdev_priv(dev);
1427 
1428 	spin_lock_bh(&rp->lock);
1429 	set_bit(vid, rp->active_vlans);
1430 	rhine_update_vcam(dev);
1431 	spin_unlock_bh(&rp->lock);
1432 	return 0;
1433 }
1434 
1435 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1436 {
1437 	struct rhine_private *rp = netdev_priv(dev);
1438 
1439 	spin_lock_bh(&rp->lock);
1440 	clear_bit(vid, rp->active_vlans);
1441 	rhine_update_vcam(dev);
1442 	spin_unlock_bh(&rp->lock);
1443 	return 0;
1444 }
1445 
1446 static void init_registers(struct net_device *dev)
1447 {
1448 	struct rhine_private *rp = netdev_priv(dev);
1449 	void __iomem *ioaddr = rp->base;
1450 	int i;
1451 
1452 	for (i = 0; i < 6; i++)
1453 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1454 
1455 	/* Initialize other registers. */
1456 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1457 	/* Configure initial FIFO thresholds. */
1458 	iowrite8(0x20, ioaddr + TxConfig);
1459 	rp->tx_thresh = 0x20;
1460 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1461 
1462 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1463 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1464 
1465 	rhine_set_rx_mode(dev);
1466 
1467 	if (rp->pdev->revision >= VT6105M)
1468 		rhine_init_cam_filter(dev);
1469 
1470 	napi_enable(&rp->napi);
1471 
1472 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1473 
1474 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1475 	       ioaddr + ChipCmd);
1476 	rhine_check_media(dev, 1);
1477 }
1478 
1479 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1480 static void rhine_enable_linkmon(struct rhine_private *rp)
1481 {
1482 	void __iomem *ioaddr = rp->base;
1483 
1484 	iowrite8(0, ioaddr + MIICmd);
1485 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1486 	iowrite8(0x80, ioaddr + MIICmd);
1487 
1488 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1489 
1490 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1491 }
1492 
1493 /* Disable MII link status auto-polling (required for MDIO access) */
1494 static void rhine_disable_linkmon(struct rhine_private *rp)
1495 {
1496 	void __iomem *ioaddr = rp->base;
1497 
1498 	iowrite8(0, ioaddr + MIICmd);
1499 
1500 	if (rp->quirks & rqRhineI) {
1501 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1502 
1503 		/* Can be called from ISR. Evil. */
1504 		mdelay(1);
1505 
1506 		/* 0x80 must be set immediately before turning it off */
1507 		iowrite8(0x80, ioaddr + MIICmd);
1508 
1509 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1510 
1511 		/* Heh. Now clear 0x80 again. */
1512 		iowrite8(0, ioaddr + MIICmd);
1513 	}
1514 	else
1515 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1516 }
1517 
1518 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1519 
1520 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1521 {
1522 	struct rhine_private *rp = netdev_priv(dev);
1523 	void __iomem *ioaddr = rp->base;
1524 	int result;
1525 
1526 	rhine_disable_linkmon(rp);
1527 
1528 	/* rhine_disable_linkmon already cleared MIICmd */
1529 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1530 	iowrite8(regnum, ioaddr + MIIRegAddr);
1531 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1532 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1533 	result = ioread16(ioaddr + MIIData);
1534 
1535 	rhine_enable_linkmon(rp);
1536 	return result;
1537 }
1538 
1539 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1540 {
1541 	struct rhine_private *rp = netdev_priv(dev);
1542 	void __iomem *ioaddr = rp->base;
1543 
1544 	rhine_disable_linkmon(rp);
1545 
1546 	/* rhine_disable_linkmon already cleared MIICmd */
1547 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1548 	iowrite8(regnum, ioaddr + MIIRegAddr);
1549 	iowrite16(value, ioaddr + MIIData);
1550 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1551 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1552 
1553 	rhine_enable_linkmon(rp);
1554 }
1555 
1556 static void rhine_task_disable(struct rhine_private *rp)
1557 {
1558 	mutex_lock(&rp->task_lock);
1559 	rp->task_enable = false;
1560 	mutex_unlock(&rp->task_lock);
1561 
1562 	cancel_work_sync(&rp->slow_event_task);
1563 	cancel_work_sync(&rp->reset_task);
1564 }
1565 
1566 static void rhine_task_enable(struct rhine_private *rp)
1567 {
1568 	mutex_lock(&rp->task_lock);
1569 	rp->task_enable = true;
1570 	mutex_unlock(&rp->task_lock);
1571 }
1572 
1573 static int rhine_open(struct net_device *dev)
1574 {
1575 	struct rhine_private *rp = netdev_priv(dev);
1576 	void __iomem *ioaddr = rp->base;
1577 	int rc;
1578 
1579 	rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1580 			dev);
1581 	if (rc)
1582 		return rc;
1583 
1584 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1585 
1586 	rc = alloc_ring(dev);
1587 	if (rc) {
1588 		free_irq(rp->pdev->irq, dev);
1589 		return rc;
1590 	}
1591 	alloc_rbufs(dev);
1592 	alloc_tbufs(dev);
1593 	rhine_chip_reset(dev);
1594 	rhine_task_enable(rp);
1595 	init_registers(dev);
1596 
1597 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1598 		  __func__, ioread16(ioaddr + ChipCmd),
1599 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1600 
1601 	netif_start_queue(dev);
1602 
1603 	return 0;
1604 }
1605 
1606 static void rhine_reset_task(struct work_struct *work)
1607 {
1608 	struct rhine_private *rp = container_of(work, struct rhine_private,
1609 						reset_task);
1610 	struct net_device *dev = rp->dev;
1611 
1612 	mutex_lock(&rp->task_lock);
1613 
1614 	if (!rp->task_enable)
1615 		goto out_unlock;
1616 
1617 	napi_disable(&rp->napi);
1618 	spin_lock_bh(&rp->lock);
1619 
1620 	/* clear all descriptors */
1621 	free_tbufs(dev);
1622 	free_rbufs(dev);
1623 	alloc_tbufs(dev);
1624 	alloc_rbufs(dev);
1625 
1626 	/* Reinitialize the hardware. */
1627 	rhine_chip_reset(dev);
1628 	init_registers(dev);
1629 
1630 	spin_unlock_bh(&rp->lock);
1631 
1632 	dev->trans_start = jiffies; /* prevent tx timeout */
1633 	dev->stats.tx_errors++;
1634 	netif_wake_queue(dev);
1635 
1636 out_unlock:
1637 	mutex_unlock(&rp->task_lock);
1638 }
1639 
1640 static void rhine_tx_timeout(struct net_device *dev)
1641 {
1642 	struct rhine_private *rp = netdev_priv(dev);
1643 	void __iomem *ioaddr = rp->base;
1644 
1645 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1646 		    ioread16(ioaddr + IntrStatus),
1647 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1648 
1649 	schedule_work(&rp->reset_task);
1650 }
1651 
1652 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1653 				  struct net_device *dev)
1654 {
1655 	struct rhine_private *rp = netdev_priv(dev);
1656 	void __iomem *ioaddr = rp->base;
1657 	unsigned entry;
1658 
1659 	/* Caution: the write order is important here, set the field
1660 	   with the "ownership" bits last. */
1661 
1662 	/* Calculate the next Tx descriptor entry. */
1663 	entry = rp->cur_tx % TX_RING_SIZE;
1664 
1665 	if (skb_padto(skb, ETH_ZLEN))
1666 		return NETDEV_TX_OK;
1667 
1668 	rp->tx_skbuff[entry] = skb;
1669 
1670 	if ((rp->quirks & rqRhineI) &&
1671 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1672 		/* Must use alignment buffer. */
1673 		if (skb->len > PKT_BUF_SZ) {
1674 			/* packet too long, drop it */
1675 			dev_kfree_skb(skb);
1676 			rp->tx_skbuff[entry] = NULL;
1677 			dev->stats.tx_dropped++;
1678 			return NETDEV_TX_OK;
1679 		}
1680 
1681 		/* Padding is not copied and so must be redone. */
1682 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1683 		if (skb->len < ETH_ZLEN)
1684 			memset(rp->tx_buf[entry] + skb->len, 0,
1685 			       ETH_ZLEN - skb->len);
1686 		rp->tx_skbuff_dma[entry] = 0;
1687 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1688 						      (rp->tx_buf[entry] -
1689 						       rp->tx_bufs));
1690 	} else {
1691 		rp->tx_skbuff_dma[entry] =
1692 			pci_map_single(rp->pdev, skb->data, skb->len,
1693 				       PCI_DMA_TODEVICE);
1694 		if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
1695 			dev_kfree_skb(skb);
1696 			rp->tx_skbuff_dma[entry] = 0;
1697 			dev->stats.tx_dropped++;
1698 			return NETDEV_TX_OK;
1699 		}
1700 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1701 	}
1702 
1703 	rp->tx_ring[entry].desc_length =
1704 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1705 
1706 	if (unlikely(vlan_tx_tag_present(skb))) {
1707 		rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1708 		/* request tagging */
1709 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1710 	}
1711 	else
1712 		rp->tx_ring[entry].tx_status = 0;
1713 
1714 	/* lock eth irq */
1715 	wmb();
1716 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1717 	wmb();
1718 
1719 	rp->cur_tx++;
1720 
1721 	/* Non-x86 Todo: explicitly flush cache lines here. */
1722 
1723 	if (vlan_tx_tag_present(skb))
1724 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1725 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1726 
1727 	/* Wake the potentially-idle transmit channel */
1728 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1729 	       ioaddr + ChipCmd1);
1730 	IOSYNC;
1731 
1732 	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1733 		netif_stop_queue(dev);
1734 
1735 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1736 		  rp->cur_tx - 1, entry);
1737 
1738 	return NETDEV_TX_OK;
1739 }
1740 
1741 static void rhine_irq_disable(struct rhine_private *rp)
1742 {
1743 	iowrite16(0x0000, rp->base + IntrEnable);
1744 	mmiowb();
1745 }
1746 
1747 /* The interrupt handler does all of the Rx thread work and cleans up
1748    after the Tx thread. */
1749 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1750 {
1751 	struct net_device *dev = dev_instance;
1752 	struct rhine_private *rp = netdev_priv(dev);
1753 	u32 status;
1754 	int handled = 0;
1755 
1756 	status = rhine_get_events(rp);
1757 
1758 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1759 
1760 	if (status & RHINE_EVENT) {
1761 		handled = 1;
1762 
1763 		rhine_irq_disable(rp);
1764 		napi_schedule(&rp->napi);
1765 	}
1766 
1767 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1768 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1769 			  status);
1770 	}
1771 
1772 	return IRQ_RETVAL(handled);
1773 }
1774 
1775 /* This routine is logically part of the interrupt handler, but isolated
1776    for clarity. */
1777 static void rhine_tx(struct net_device *dev)
1778 {
1779 	struct rhine_private *rp = netdev_priv(dev);
1780 	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1781 
1782 	/* find and cleanup dirty tx descriptors */
1783 	while (rp->dirty_tx != rp->cur_tx) {
1784 		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1785 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1786 			  entry, txstatus);
1787 		if (txstatus & DescOwn)
1788 			break;
1789 		if (txstatus & 0x8000) {
1790 			netif_dbg(rp, tx_done, dev,
1791 				  "Transmit error, Tx status %08x\n", txstatus);
1792 			dev->stats.tx_errors++;
1793 			if (txstatus & 0x0400)
1794 				dev->stats.tx_carrier_errors++;
1795 			if (txstatus & 0x0200)
1796 				dev->stats.tx_window_errors++;
1797 			if (txstatus & 0x0100)
1798 				dev->stats.tx_aborted_errors++;
1799 			if (txstatus & 0x0080)
1800 				dev->stats.tx_heartbeat_errors++;
1801 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1802 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1803 				dev->stats.tx_fifo_errors++;
1804 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1805 				break; /* Keep the skb - we try again */
1806 			}
1807 			/* Transmitter restarted in 'abnormal' handler. */
1808 		} else {
1809 			if (rp->quirks & rqRhineI)
1810 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1811 			else
1812 				dev->stats.collisions += txstatus & 0x0F;
1813 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1814 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1815 
1816 			u64_stats_update_begin(&rp->tx_stats.syncp);
1817 			rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1818 			rp->tx_stats.packets++;
1819 			u64_stats_update_end(&rp->tx_stats.syncp);
1820 		}
1821 		/* Free the original skb. */
1822 		if (rp->tx_skbuff_dma[entry]) {
1823 			pci_unmap_single(rp->pdev,
1824 					 rp->tx_skbuff_dma[entry],
1825 					 rp->tx_skbuff[entry]->len,
1826 					 PCI_DMA_TODEVICE);
1827 		}
1828 		dev_kfree_skb(rp->tx_skbuff[entry]);
1829 		rp->tx_skbuff[entry] = NULL;
1830 		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1831 	}
1832 	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1833 		netif_wake_queue(dev);
1834 }
1835 
1836 /**
1837  * rhine_get_vlan_tci - extract TCI from Rx data buffer
1838  * @skb: pointer to sk_buff
1839  * @data_size: used data area of the buffer including CRC
1840  *
1841  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1842  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1843  * aligned following the CRC.
1844  */
1845 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1846 {
1847 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1848 	return be16_to_cpup((__be16 *)trailer);
1849 }
1850 
1851 /* Process up to limit frames from receive ring */
1852 static int rhine_rx(struct net_device *dev, int limit)
1853 {
1854 	struct rhine_private *rp = netdev_priv(dev);
1855 	int count;
1856 	int entry = rp->cur_rx % RX_RING_SIZE;
1857 
1858 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1859 		  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1860 
1861 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1862 	for (count = 0; count < limit; ++count) {
1863 		struct rx_desc *desc = rp->rx_head_desc;
1864 		u32 desc_status = le32_to_cpu(desc->rx_status);
1865 		u32 desc_length = le32_to_cpu(desc->desc_length);
1866 		int data_size = desc_status >> 16;
1867 
1868 		if (desc_status & DescOwn)
1869 			break;
1870 
1871 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1872 			  desc_status);
1873 
1874 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1875 			if ((desc_status & RxWholePkt) != RxWholePkt) {
1876 				netdev_warn(dev,
1877 	"Oversized Ethernet frame spanned multiple buffers, "
1878 	"entry %#x length %d status %08x!\n",
1879 					    entry, data_size,
1880 					    desc_status);
1881 				netdev_warn(dev,
1882 					    "Oversized Ethernet frame %p vs %p\n",
1883 					    rp->rx_head_desc,
1884 					    &rp->rx_ring[entry]);
1885 				dev->stats.rx_length_errors++;
1886 			} else if (desc_status & RxErr) {
1887 				/* There was a error. */
1888 				netif_dbg(rp, rx_err, dev,
1889 					  "%s() Rx error %08x\n", __func__,
1890 					  desc_status);
1891 				dev->stats.rx_errors++;
1892 				if (desc_status & 0x0030)
1893 					dev->stats.rx_length_errors++;
1894 				if (desc_status & 0x0048)
1895 					dev->stats.rx_fifo_errors++;
1896 				if (desc_status & 0x0004)
1897 					dev->stats.rx_frame_errors++;
1898 				if (desc_status & 0x0002) {
1899 					/* this can also be updated outside the interrupt handler */
1900 					spin_lock(&rp->lock);
1901 					dev->stats.rx_crc_errors++;
1902 					spin_unlock(&rp->lock);
1903 				}
1904 			}
1905 		} else {
1906 			struct sk_buff *skb = NULL;
1907 			/* Length should omit the CRC */
1908 			int pkt_len = data_size - 4;
1909 			u16 vlan_tci = 0;
1910 
1911 			/* Check if the packet is long enough to accept without
1912 			   copying to a minimally-sized skbuff. */
1913 			if (pkt_len < rx_copybreak)
1914 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1915 			if (skb) {
1916 				pci_dma_sync_single_for_cpu(rp->pdev,
1917 							    rp->rx_skbuff_dma[entry],
1918 							    rp->rx_buf_sz,
1919 							    PCI_DMA_FROMDEVICE);
1920 
1921 				skb_copy_to_linear_data(skb,
1922 						 rp->rx_skbuff[entry]->data,
1923 						 pkt_len);
1924 				skb_put(skb, pkt_len);
1925 				pci_dma_sync_single_for_device(rp->pdev,
1926 							       rp->rx_skbuff_dma[entry],
1927 							       rp->rx_buf_sz,
1928 							       PCI_DMA_FROMDEVICE);
1929 			} else {
1930 				skb = rp->rx_skbuff[entry];
1931 				if (skb == NULL) {
1932 					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1933 					break;
1934 				}
1935 				rp->rx_skbuff[entry] = NULL;
1936 				skb_put(skb, pkt_len);
1937 				pci_unmap_single(rp->pdev,
1938 						 rp->rx_skbuff_dma[entry],
1939 						 rp->rx_buf_sz,
1940 						 PCI_DMA_FROMDEVICE);
1941 			}
1942 
1943 			if (unlikely(desc_length & DescTag))
1944 				vlan_tci = rhine_get_vlan_tci(skb, data_size);
1945 
1946 			skb->protocol = eth_type_trans(skb, dev);
1947 
1948 			if (unlikely(desc_length & DescTag))
1949 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1950 			netif_receive_skb(skb);
1951 
1952 			u64_stats_update_begin(&rp->rx_stats.syncp);
1953 			rp->rx_stats.bytes += pkt_len;
1954 			rp->rx_stats.packets++;
1955 			u64_stats_update_end(&rp->rx_stats.syncp);
1956 		}
1957 		entry = (++rp->cur_rx) % RX_RING_SIZE;
1958 		rp->rx_head_desc = &rp->rx_ring[entry];
1959 	}
1960 
1961 	/* Refill the Rx ring buffers. */
1962 	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1963 		struct sk_buff *skb;
1964 		entry = rp->dirty_rx % RX_RING_SIZE;
1965 		if (rp->rx_skbuff[entry] == NULL) {
1966 			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1967 			rp->rx_skbuff[entry] = skb;
1968 			if (skb == NULL)
1969 				break;	/* Better luck next round. */
1970 			rp->rx_skbuff_dma[entry] =
1971 				pci_map_single(rp->pdev, skb->data,
1972 					       rp->rx_buf_sz,
1973 					       PCI_DMA_FROMDEVICE);
1974 			if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
1975 				dev_kfree_skb(skb);
1976 				rp->rx_skbuff_dma[entry] = 0;
1977 				break;
1978 			}
1979 			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1980 		}
1981 		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1982 	}
1983 
1984 	return count;
1985 }
1986 
1987 static void rhine_restart_tx(struct net_device *dev) {
1988 	struct rhine_private *rp = netdev_priv(dev);
1989 	void __iomem *ioaddr = rp->base;
1990 	int entry = rp->dirty_tx % TX_RING_SIZE;
1991 	u32 intr_status;
1992 
1993 	/*
1994 	 * If new errors occurred, we need to sort them out before doing Tx.
1995 	 * In that case the ISR will be back here RSN anyway.
1996 	 */
1997 	intr_status = rhine_get_events(rp);
1998 
1999 	if ((intr_status & IntrTxErrSummary) == 0) {
2000 
2001 		/* We know better than the chip where it should continue. */
2002 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2003 		       ioaddr + TxRingPtr);
2004 
2005 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2006 		       ioaddr + ChipCmd);
2007 
2008 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2009 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2010 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2011 
2012 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2013 		       ioaddr + ChipCmd1);
2014 		IOSYNC;
2015 	}
2016 	else {
2017 		/* This should never happen */
2018 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2019 			   intr_status);
2020 	}
2021 
2022 }
2023 
2024 static void rhine_slow_event_task(struct work_struct *work)
2025 {
2026 	struct rhine_private *rp =
2027 		container_of(work, struct rhine_private, slow_event_task);
2028 	struct net_device *dev = rp->dev;
2029 	u32 intr_status;
2030 
2031 	mutex_lock(&rp->task_lock);
2032 
2033 	if (!rp->task_enable)
2034 		goto out_unlock;
2035 
2036 	intr_status = rhine_get_events(rp);
2037 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2038 
2039 	if (intr_status & IntrLinkChange)
2040 		rhine_check_media(dev, 0);
2041 
2042 	if (intr_status & IntrPCIErr)
2043 		netif_warn(rp, hw, dev, "PCI error\n");
2044 
2045 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2046 
2047 out_unlock:
2048 	mutex_unlock(&rp->task_lock);
2049 }
2050 
2051 static struct rtnl_link_stats64 *
2052 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2053 {
2054 	struct rhine_private *rp = netdev_priv(dev);
2055 	unsigned int start;
2056 
2057 	spin_lock_bh(&rp->lock);
2058 	rhine_update_rx_crc_and_missed_errord(rp);
2059 	spin_unlock_bh(&rp->lock);
2060 
2061 	netdev_stats_to_stats64(stats, &dev->stats);
2062 
2063 	do {
2064 		start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
2065 		stats->rx_packets = rp->rx_stats.packets;
2066 		stats->rx_bytes = rp->rx_stats.bytes;
2067 	} while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
2068 
2069 	do {
2070 		start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
2071 		stats->tx_packets = rp->tx_stats.packets;
2072 		stats->tx_bytes = rp->tx_stats.bytes;
2073 	} while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
2074 
2075 	return stats;
2076 }
2077 
2078 static void rhine_set_rx_mode(struct net_device *dev)
2079 {
2080 	struct rhine_private *rp = netdev_priv(dev);
2081 	void __iomem *ioaddr = rp->base;
2082 	u32 mc_filter[2];	/* Multicast hash filter */
2083 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2084 	struct netdev_hw_addr *ha;
2085 
2086 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2087 		rx_mode = 0x1C;
2088 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2089 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2090 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2091 		   (dev->flags & IFF_ALLMULTI)) {
2092 		/* Too many to match, or accept all multicasts. */
2093 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2094 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2095 	} else if (rp->pdev->revision >= VT6105M) {
2096 		int i = 0;
2097 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2098 		netdev_for_each_mc_addr(ha, dev) {
2099 			if (i == MCAM_SIZE)
2100 				break;
2101 			rhine_set_cam(ioaddr, i, ha->addr);
2102 			mCAMmask |= 1 << i;
2103 			i++;
2104 		}
2105 		rhine_set_cam_mask(ioaddr, mCAMmask);
2106 	} else {
2107 		memset(mc_filter, 0, sizeof(mc_filter));
2108 		netdev_for_each_mc_addr(ha, dev) {
2109 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2110 
2111 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2112 		}
2113 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2114 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2115 	}
2116 	/* enable/disable VLAN receive filtering */
2117 	if (rp->pdev->revision >= VT6105M) {
2118 		if (dev->flags & IFF_PROMISC)
2119 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2120 		else
2121 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2122 	}
2123 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2124 }
2125 
2126 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2127 {
2128 	struct rhine_private *rp = netdev_priv(dev);
2129 
2130 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2131 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2132 	strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2133 }
2134 
2135 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2136 {
2137 	struct rhine_private *rp = netdev_priv(dev);
2138 	int rc;
2139 
2140 	mutex_lock(&rp->task_lock);
2141 	rc = mii_ethtool_gset(&rp->mii_if, cmd);
2142 	mutex_unlock(&rp->task_lock);
2143 
2144 	return rc;
2145 }
2146 
2147 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2148 {
2149 	struct rhine_private *rp = netdev_priv(dev);
2150 	int rc;
2151 
2152 	mutex_lock(&rp->task_lock);
2153 	rc = mii_ethtool_sset(&rp->mii_if, cmd);
2154 	rhine_set_carrier(&rp->mii_if);
2155 	mutex_unlock(&rp->task_lock);
2156 
2157 	return rc;
2158 }
2159 
2160 static int netdev_nway_reset(struct net_device *dev)
2161 {
2162 	struct rhine_private *rp = netdev_priv(dev);
2163 
2164 	return mii_nway_restart(&rp->mii_if);
2165 }
2166 
2167 static u32 netdev_get_link(struct net_device *dev)
2168 {
2169 	struct rhine_private *rp = netdev_priv(dev);
2170 
2171 	return mii_link_ok(&rp->mii_if);
2172 }
2173 
2174 static u32 netdev_get_msglevel(struct net_device *dev)
2175 {
2176 	struct rhine_private *rp = netdev_priv(dev);
2177 
2178 	return rp->msg_enable;
2179 }
2180 
2181 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2182 {
2183 	struct rhine_private *rp = netdev_priv(dev);
2184 
2185 	rp->msg_enable = value;
2186 }
2187 
2188 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2189 {
2190 	struct rhine_private *rp = netdev_priv(dev);
2191 
2192 	if (!(rp->quirks & rqWOL))
2193 		return;
2194 
2195 	spin_lock_irq(&rp->lock);
2196 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2197 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2198 	wol->wolopts = rp->wolopts;
2199 	spin_unlock_irq(&rp->lock);
2200 }
2201 
2202 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2203 {
2204 	struct rhine_private *rp = netdev_priv(dev);
2205 	u32 support = WAKE_PHY | WAKE_MAGIC |
2206 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2207 
2208 	if (!(rp->quirks & rqWOL))
2209 		return -EINVAL;
2210 
2211 	if (wol->wolopts & ~support)
2212 		return -EINVAL;
2213 
2214 	spin_lock_irq(&rp->lock);
2215 	rp->wolopts = wol->wolopts;
2216 	spin_unlock_irq(&rp->lock);
2217 
2218 	return 0;
2219 }
2220 
2221 static const struct ethtool_ops netdev_ethtool_ops = {
2222 	.get_drvinfo		= netdev_get_drvinfo,
2223 	.get_settings		= netdev_get_settings,
2224 	.set_settings		= netdev_set_settings,
2225 	.nway_reset		= netdev_nway_reset,
2226 	.get_link		= netdev_get_link,
2227 	.get_msglevel		= netdev_get_msglevel,
2228 	.set_msglevel		= netdev_set_msglevel,
2229 	.get_wol		= rhine_get_wol,
2230 	.set_wol		= rhine_set_wol,
2231 };
2232 
2233 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2234 {
2235 	struct rhine_private *rp = netdev_priv(dev);
2236 	int rc;
2237 
2238 	if (!netif_running(dev))
2239 		return -EINVAL;
2240 
2241 	mutex_lock(&rp->task_lock);
2242 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2243 	rhine_set_carrier(&rp->mii_if);
2244 	mutex_unlock(&rp->task_lock);
2245 
2246 	return rc;
2247 }
2248 
2249 static int rhine_close(struct net_device *dev)
2250 {
2251 	struct rhine_private *rp = netdev_priv(dev);
2252 	void __iomem *ioaddr = rp->base;
2253 
2254 	rhine_task_disable(rp);
2255 	napi_disable(&rp->napi);
2256 	netif_stop_queue(dev);
2257 
2258 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2259 		  ioread16(ioaddr + ChipCmd));
2260 
2261 	/* Switch to loopback mode to avoid hardware races. */
2262 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2263 
2264 	rhine_irq_disable(rp);
2265 
2266 	/* Stop the chip's Tx and Rx processes. */
2267 	iowrite16(CmdStop, ioaddr + ChipCmd);
2268 
2269 	free_irq(rp->pdev->irq, dev);
2270 	free_rbufs(dev);
2271 	free_tbufs(dev);
2272 	free_ring(dev);
2273 
2274 	return 0;
2275 }
2276 
2277 
2278 static void rhine_remove_one(struct pci_dev *pdev)
2279 {
2280 	struct net_device *dev = pci_get_drvdata(pdev);
2281 	struct rhine_private *rp = netdev_priv(dev);
2282 
2283 	unregister_netdev(dev);
2284 
2285 	pci_iounmap(pdev, rp->base);
2286 	pci_release_regions(pdev);
2287 
2288 	free_netdev(dev);
2289 	pci_disable_device(pdev);
2290 	pci_set_drvdata(pdev, NULL);
2291 }
2292 
2293 static void rhine_shutdown (struct pci_dev *pdev)
2294 {
2295 	struct net_device *dev = pci_get_drvdata(pdev);
2296 	struct rhine_private *rp = netdev_priv(dev);
2297 	void __iomem *ioaddr = rp->base;
2298 
2299 	if (!(rp->quirks & rqWOL))
2300 		return; /* Nothing to do for non-WOL adapters */
2301 
2302 	rhine_power_init(dev);
2303 
2304 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2305 	if (rp->quirks & rq6patterns)
2306 		iowrite8(0x04, ioaddr + WOLcgClr);
2307 
2308 	spin_lock(&rp->lock);
2309 
2310 	if (rp->wolopts & WAKE_MAGIC) {
2311 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2312 		/*
2313 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2314 		 * not cooperate otherwise.
2315 		 */
2316 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2317 	}
2318 
2319 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2320 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2321 
2322 	if (rp->wolopts & WAKE_PHY)
2323 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2324 
2325 	if (rp->wolopts & WAKE_UCAST)
2326 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2327 
2328 	if (rp->wolopts) {
2329 		/* Enable legacy WOL (for old motherboards) */
2330 		iowrite8(0x01, ioaddr + PwcfgSet);
2331 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2332 	}
2333 
2334 	spin_unlock(&rp->lock);
2335 
2336 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2337 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2338 
2339 		pci_wake_from_d3(pdev, true);
2340 		pci_set_power_state(pdev, PCI_D3hot);
2341 	}
2342 }
2343 
2344 #ifdef CONFIG_PM_SLEEP
2345 static int rhine_suspend(struct device *device)
2346 {
2347 	struct pci_dev *pdev = to_pci_dev(device);
2348 	struct net_device *dev = pci_get_drvdata(pdev);
2349 	struct rhine_private *rp = netdev_priv(dev);
2350 
2351 	if (!netif_running(dev))
2352 		return 0;
2353 
2354 	rhine_task_disable(rp);
2355 	rhine_irq_disable(rp);
2356 	napi_disable(&rp->napi);
2357 
2358 	netif_device_detach(dev);
2359 
2360 	rhine_shutdown(pdev);
2361 
2362 	return 0;
2363 }
2364 
2365 static int rhine_resume(struct device *device)
2366 {
2367 	struct pci_dev *pdev = to_pci_dev(device);
2368 	struct net_device *dev = pci_get_drvdata(pdev);
2369 	struct rhine_private *rp = netdev_priv(dev);
2370 
2371 	if (!netif_running(dev))
2372 		return 0;
2373 
2374 #ifdef USE_MMIO
2375 	enable_mmio(rp->pioaddr, rp->quirks);
2376 #endif
2377 	rhine_power_init(dev);
2378 	free_tbufs(dev);
2379 	free_rbufs(dev);
2380 	alloc_tbufs(dev);
2381 	alloc_rbufs(dev);
2382 	rhine_task_enable(rp);
2383 	spin_lock_bh(&rp->lock);
2384 	init_registers(dev);
2385 	spin_unlock_bh(&rp->lock);
2386 
2387 	netif_device_attach(dev);
2388 
2389 	return 0;
2390 }
2391 
2392 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2393 #define RHINE_PM_OPS	(&rhine_pm_ops)
2394 
2395 #else
2396 
2397 #define RHINE_PM_OPS	NULL
2398 
2399 #endif /* !CONFIG_PM_SLEEP */
2400 
2401 static struct pci_driver rhine_driver = {
2402 	.name		= DRV_NAME,
2403 	.id_table	= rhine_pci_tbl,
2404 	.probe		= rhine_init_one,
2405 	.remove		= rhine_remove_one,
2406 	.shutdown	= rhine_shutdown,
2407 	.driver.pm	= RHINE_PM_OPS,
2408 };
2409 
2410 static struct dmi_system_id rhine_dmi_table[] __initdata = {
2411 	{
2412 		.ident = "EPIA-M",
2413 		.matches = {
2414 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2415 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2416 		},
2417 	},
2418 	{
2419 		.ident = "KV7",
2420 		.matches = {
2421 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2422 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2423 		},
2424 	},
2425 	{ NULL }
2426 };
2427 
2428 static int __init rhine_init(void)
2429 {
2430 /* when a module, this is printed whether or not devices are found in probe */
2431 #ifdef MODULE
2432 	pr_info("%s\n", version);
2433 #endif
2434 	if (dmi_check_system(rhine_dmi_table)) {
2435 		/* these BIOSes fail at PXE boot if chip is in D3 */
2436 		avoid_D3 = true;
2437 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2438 	}
2439 	else if (avoid_D3)
2440 		pr_info("avoid_D3 set\n");
2441 
2442 	return pci_register_driver(&rhine_driver);
2443 }
2444 
2445 
2446 static void __exit rhine_cleanup(void)
2447 {
2448 	pci_unregister_driver(&rhine_driver);
2449 }
2450 
2451 
2452 module_init(rhine_init);
2453 module_exit(rhine_cleanup);
2454