1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 	Written 1998-2001 by Donald Becker.
4 
5 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6 
7 	This software may be used and distributed according to the terms of
8 	the GNU General Public License (GPL), incorporated herein by reference.
9 	Drivers based on or derived from this code fall under the GPL and must
10 	retain the authorship, copyright and license notice.  This file is not
11 	a complete program and may only be used when the entire operating
12 	system is licensed under the GPL.
13 
14 	This driver is designed for the VIA VT86C100A Rhine-I.
15 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 	and management NIC 6105M).
17 
18 	The author may be reached as becker@scyld.com, or C/O
19 	Scyld Computing Corporation
20 	410 Severn Ave., Suite 210
21 	Annapolis MD 21403
22 
23 
24 	This driver contains some changes from the original Donald Becker
25 	version. He may or may not be interested in bug reports on this
26 	code. You can find his versions at:
27 	http://www.scyld.com/network/via-rhine.html
28 	[link no longer provides useful info -jgarzik]
29 
30 */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #define DRV_NAME	"via-rhine"
35 #define DRV_VERSION	"1.5.1"
36 #define DRV_RELDATE	"2010-10-09"
37 
38 #include <linux/types.h>
39 
40 /* A few user-configurable values.
41    These may be modified when a driver module is loaded. */
42 static int debug = 0;
43 #define RHINE_MSG_DEFAULT \
44         (0x0000)
45 
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47    Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50 	defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
52 #else
53 static int rx_copybreak;
54 #endif
55 
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 static bool avoid_D3;
59 
60 /*
61  * In case you are looking for 'options[]' or 'full_duplex[]', they
62  * are gone. Use ethtool(8) instead.
63  */
64 
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66    The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
68 
69 
70 /* Operational parameters that are set at compile time. */
71 
72 /* Keep the ring sizes a power of two for compile efficiency.
73  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74  * Making the Tx ring too large decreases the effectiveness of channel
75  * bonding and packet priority.
76  * With BQL support, we can increase TX ring safely.
77  * There are no ill effects from too-large receive rings.
78  */
79 #define TX_RING_SIZE	64
80 #define TX_QUEUE_LEN	(TX_RING_SIZE - 6)	/* Limit ring entries actually used. */
81 #define RX_RING_SIZE	64
82 
83 /* Operational parameters that usually are not changed. */
84 
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT	(2*HZ)
87 
88 #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
89 
90 #include <linux/module.h>
91 #include <linux/moduleparam.h>
92 #include <linux/kernel.h>
93 #include <linux/string.h>
94 #include <linux/timer.h>
95 #include <linux/errno.h>
96 #include <linux/ioport.h>
97 #include <linux/interrupt.h>
98 #include <linux/pci.h>
99 #include <linux/of_device.h>
100 #include <linux/of_irq.h>
101 #include <linux/platform_device.h>
102 #include <linux/dma-mapping.h>
103 #include <linux/netdevice.h>
104 #include <linux/etherdevice.h>
105 #include <linux/skbuff.h>
106 #include <linux/init.h>
107 #include <linux/delay.h>
108 #include <linux/mii.h>
109 #include <linux/ethtool.h>
110 #include <linux/crc32.h>
111 #include <linux/if_vlan.h>
112 #include <linux/bitops.h>
113 #include <linux/workqueue.h>
114 #include <asm/processor.h>	/* Processor type for cache alignment. */
115 #include <asm/io.h>
116 #include <asm/irq.h>
117 #include <linux/uaccess.h>
118 #include <linux/dmi.h>
119 
120 /* These identify the driver base version and may not be removed. */
121 static const char version[] =
122 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
123 
124 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
125 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
126 MODULE_LICENSE("GPL");
127 
128 module_param(debug, int, 0);
129 module_param(rx_copybreak, int, 0);
130 module_param(avoid_D3, bool, 0);
131 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
132 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
133 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
134 
135 #define MCAM_SIZE	32
136 #define VCAM_SIZE	32
137 
138 /*
139 		Theory of Operation
140 
141 I. Board Compatibility
142 
143 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
144 controller.
145 
146 II. Board-specific settings
147 
148 Boards with this chip are functional only in a bus-master PCI slot.
149 
150 Many operational settings are loaded from the EEPROM to the Config word at
151 offset 0x78. For most of these settings, this driver assumes that they are
152 correct.
153 If this driver is compiled to use PCI memory space operations the EEPROM
154 must be configured to enable memory ops.
155 
156 III. Driver operation
157 
158 IIIa. Ring buffers
159 
160 This driver uses two statically allocated fixed-size descriptor lists
161 formed into rings by a branch from the final descriptor to the beginning of
162 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
163 
164 IIIb/c. Transmit/Receive Structure
165 
166 This driver attempts to use a zero-copy receive and transmit scheme.
167 
168 Alas, all data buffers are required to start on a 32 bit boundary, so
169 the driver must often copy transmit packets into bounce buffers.
170 
171 The driver allocates full frame size skbuffs for the Rx ring buffers at
172 open() time and passes the skb->data field to the chip as receive data
173 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
174 a fresh skbuff is allocated and the frame is copied to the new skbuff.
175 When the incoming frame is larger, the skbuff is passed directly up the
176 protocol stack. Buffers consumed this way are replaced by newly allocated
177 skbuffs in the last phase of rhine_rx().
178 
179 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
180 using a full-sized skbuff for small frames vs. the copying costs of larger
181 frames. New boards are typically used in generously configured machines
182 and the underfilled buffers have negligible impact compared to the benefit of
183 a single allocation size, so the default value of zero results in never
184 copying packets. When copying is done, the cost is usually mitigated by using
185 a combined copy/checksum routine. Copying also preloads the cache, which is
186 most useful with small frames.
187 
188 Since the VIA chips are only able to transfer data to buffers on 32 bit
189 boundaries, the IP header at offset 14 in an ethernet frame isn't
190 longword aligned for further processing. Copying these unaligned buffers
191 has the beneficial effect of 16-byte aligning the IP header.
192 
193 IIId. Synchronization
194 
195 The driver runs as two independent, single-threaded flows of control. One
196 is the send-packet routine, which enforces single-threaded use by the
197 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
198 which is single threaded by the hardware and interrupt handling software.
199 
200 The send packet thread has partial control over the Tx ring. It locks the
201 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
202 the ring is not available it stops the transmit queue by
203 calling netif_stop_queue.
204 
205 The interrupt handler has exclusive control over the Rx ring and records stats
206 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
207 empty by incrementing the dirty_tx mark. If at least half of the entries in
208 the Rx ring are available the transmit queue is woken up if it was stopped.
209 
210 IV. Notes
211 
212 IVb. References
213 
214 Preliminary VT86C100A manual from http://www.via.com.tw/
215 http://www.scyld.com/expert/100mbps.html
216 http://www.scyld.com/expert/NWay.html
217 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
218 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
219 
220 
221 IVc. Errata
222 
223 The VT86C100A manual is not reliable information.
224 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
225 in significant performance degradation for bounce buffer copies on transmit
226 and unaligned IP headers on receive.
227 The chip does not pad to minimum transmit length.
228 
229 */
230 
231 
232 /* This table drives the PCI probe routines. It's mostly boilerplate in all
233    of the drivers, and will likely be provided by some future kernel.
234    Note the matching code -- the first table entry matchs all 56** cards but
235    second only the 1234 card.
236 */
237 
238 enum rhine_revs {
239 	VT86C100A	= 0x00,
240 	VTunknown0	= 0x20,
241 	VT6102		= 0x40,
242 	VT8231		= 0x50,	/* Integrated MAC */
243 	VT8233		= 0x60,	/* Integrated MAC */
244 	VT8235		= 0x74,	/* Integrated MAC */
245 	VT8237		= 0x78,	/* Integrated MAC */
246 	VTunknown1	= 0x7C,
247 	VT6105		= 0x80,
248 	VT6105_B0	= 0x83,
249 	VT6105L		= 0x8A,
250 	VT6107		= 0x8C,
251 	VTunknown2	= 0x8E,
252 	VT6105M		= 0x90,	/* Management adapter */
253 };
254 
255 enum rhine_quirks {
256 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
257 	rqForceReset	= 0x0002,
258 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
259 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
260 	rqRhineI	= 0x0100,	/* See comment below */
261 	rqIntPHY	= 0x0200,	/* Integrated PHY */
262 	rqMgmt		= 0x0400,	/* Management adapter */
263 	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
264 					 * switched from PIO mode to MMIO
265 					 * (only applies to PCI)
266 					 */
267 };
268 /*
269  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
270  * MMIO as well as for the collision counter and the Tx FIFO underflow
271  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
272  */
273 
274 /* Beware of PCI posted writes */
275 #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
276 
277 static const struct pci_device_id rhine_pci_tbl[] = {
278 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
279 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
280 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
281 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
282 	{ }	/* terminate list */
283 };
284 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
285 
286 /* OpenFirmware identifiers for platform-bus devices
287  * The .data field is currently only used to store quirks
288  */
289 static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
290 static const struct of_device_id rhine_of_tbl[] = {
291 	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
292 	{ }	/* terminate list */
293 };
294 MODULE_DEVICE_TABLE(of, rhine_of_tbl);
295 
296 /* Offsets to the device registers. */
297 enum register_offsets {
298 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
299 	ChipCmd1=0x09, TQWake=0x0A,
300 	IntrStatus=0x0C, IntrEnable=0x0E,
301 	MulticastFilter0=0x10, MulticastFilter1=0x14,
302 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
303 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
304 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
305 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
306 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
307 	StickyHW=0x83, IntrStatus2=0x84,
308 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
309 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
310 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
311 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
312 };
313 
314 /* Bits in ConfigD */
315 enum backoff_bits {
316 	BackOptional=0x01, BackModify=0x02,
317 	BackCaptureEffect=0x04, BackRandom=0x08
318 };
319 
320 /* Bits in the TxConfig (TCR) register */
321 enum tcr_bits {
322 	TCR_PQEN=0x01,
323 	TCR_LB0=0x02,		/* loopback[0] */
324 	TCR_LB1=0x04,		/* loopback[1] */
325 	TCR_OFSET=0x08,
326 	TCR_RTGOPT=0x10,
327 	TCR_RTFT0=0x20,
328 	TCR_RTFT1=0x40,
329 	TCR_RTSF=0x80,
330 };
331 
332 /* Bits in the CamCon (CAMC) register */
333 enum camcon_bits {
334 	CAMC_CAMEN=0x01,
335 	CAMC_VCAMSL=0x02,
336 	CAMC_CAMWR=0x04,
337 	CAMC_CAMRD=0x08,
338 };
339 
340 /* Bits in the PCIBusConfig1 (BCR1) register */
341 enum bcr1_bits {
342 	BCR1_POT0=0x01,
343 	BCR1_POT1=0x02,
344 	BCR1_POT2=0x04,
345 	BCR1_CTFT0=0x08,
346 	BCR1_CTFT1=0x10,
347 	BCR1_CTSF=0x20,
348 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
349 	BCR1_VIDFR=0x80,	/* for VT6105 */
350 	BCR1_MED0=0x40,		/* for VT6102 */
351 	BCR1_MED1=0x80,		/* for VT6102 */
352 };
353 
354 /* Registers we check that mmio and reg are the same. */
355 static const int mmio_verify_registers[] = {
356 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
357 	0
358 };
359 
360 /* Bits in the interrupt status/mask registers. */
361 enum intr_status_bits {
362 	IntrRxDone	= 0x0001,
363 	IntrTxDone	= 0x0002,
364 	IntrRxErr	= 0x0004,
365 	IntrTxError	= 0x0008,
366 	IntrRxEmpty	= 0x0020,
367 	IntrPCIErr	= 0x0040,
368 	IntrStatsMax	= 0x0080,
369 	IntrRxEarly	= 0x0100,
370 	IntrTxUnderrun	= 0x0210,
371 	IntrRxOverflow	= 0x0400,
372 	IntrRxDropped	= 0x0800,
373 	IntrRxNoBuf	= 0x1000,
374 	IntrTxAborted	= 0x2000,
375 	IntrLinkChange	= 0x4000,
376 	IntrRxWakeUp	= 0x8000,
377 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
378 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
379 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
380 				  IntrTxUnderrun,
381 };
382 
383 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
384 enum wol_bits {
385 	WOLucast	= 0x10,
386 	WOLmagic	= 0x20,
387 	WOLbmcast	= 0x30,
388 	WOLlnkon	= 0x40,
389 	WOLlnkoff	= 0x80,
390 };
391 
392 /* The Rx and Tx buffer descriptors. */
393 struct rx_desc {
394 	__le32 rx_status;
395 	__le32 desc_length; /* Chain flag, Buffer/frame length */
396 	__le32 addr;
397 	__le32 next_desc;
398 };
399 struct tx_desc {
400 	__le32 tx_status;
401 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
402 	__le32 addr;
403 	__le32 next_desc;
404 };
405 
406 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
407 #define TXDESC		0x00e08000
408 
409 enum rx_status_bits {
410 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
411 };
412 
413 /* Bits in *_desc.*_status */
414 enum desc_status_bits {
415 	DescOwn=0x80000000
416 };
417 
418 /* Bits in *_desc.*_length */
419 enum desc_length_bits {
420 	DescTag=0x00010000
421 };
422 
423 /* Bits in ChipCmd. */
424 enum chip_cmd_bits {
425 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
426 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
427 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
428 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
429 };
430 
431 struct rhine_stats {
432 	u64		packets;
433 	u64		bytes;
434 	struct u64_stats_sync syncp;
435 };
436 
437 struct rhine_private {
438 	/* Bit mask for configured VLAN ids */
439 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
440 
441 	/* Descriptor rings */
442 	struct rx_desc *rx_ring;
443 	struct tx_desc *tx_ring;
444 	dma_addr_t rx_ring_dma;
445 	dma_addr_t tx_ring_dma;
446 
447 	/* The addresses of receive-in-place skbuffs. */
448 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
449 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
450 
451 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
452 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
453 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
454 
455 	/* Tx bounce buffers (Rhine-I only) */
456 	unsigned char *tx_buf[TX_RING_SIZE];
457 	unsigned char *tx_bufs;
458 	dma_addr_t tx_bufs_dma;
459 
460 	int irq;
461 	long pioaddr;
462 	struct net_device *dev;
463 	struct napi_struct napi;
464 	spinlock_t lock;
465 	struct mutex task_lock;
466 	bool task_enable;
467 	struct work_struct slow_event_task;
468 	struct work_struct reset_task;
469 
470 	u32 msg_enable;
471 
472 	/* Frequently used values: keep some adjacent for cache effect. */
473 	u32 quirks;
474 	unsigned int cur_rx;
475 	unsigned int cur_tx, dirty_tx;
476 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
477 	struct rhine_stats rx_stats;
478 	struct rhine_stats tx_stats;
479 	u8 wolopts;
480 
481 	u8 tx_thresh, rx_thresh;
482 
483 	struct mii_if_info mii_if;
484 	void __iomem *base;
485 };
486 
487 #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
488 #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
489 #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
490 
491 #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
492 #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
493 #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
494 
495 #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
496 #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
497 #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
498 
499 #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
500 #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
501 #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
502 
503 
504 static int  mdio_read(struct net_device *dev, int phy_id, int location);
505 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
506 static int  rhine_open(struct net_device *dev);
507 static void rhine_reset_task(struct work_struct *work);
508 static void rhine_slow_event_task(struct work_struct *work);
509 static void rhine_tx_timeout(struct net_device *dev);
510 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
511 				  struct net_device *dev);
512 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
513 static void rhine_tx(struct net_device *dev);
514 static int rhine_rx(struct net_device *dev, int limit);
515 static void rhine_set_rx_mode(struct net_device *dev);
516 static void rhine_get_stats64(struct net_device *dev,
517 			      struct rtnl_link_stats64 *stats);
518 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
519 static const struct ethtool_ops netdev_ethtool_ops;
520 static int  rhine_close(struct net_device *dev);
521 static int rhine_vlan_rx_add_vid(struct net_device *dev,
522 				 __be16 proto, u16 vid);
523 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
524 				  __be16 proto, u16 vid);
525 static void rhine_restart_tx(struct net_device *dev);
526 
527 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
528 {
529 	void __iomem *ioaddr = rp->base;
530 	int i;
531 
532 	for (i = 0; i < 1024; i++) {
533 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
534 
535 		if (low ^ has_mask_bits)
536 			break;
537 		udelay(10);
538 	}
539 	if (i > 64) {
540 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
541 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
542 	}
543 }
544 
545 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
546 {
547 	rhine_wait_bit(rp, reg, mask, false);
548 }
549 
550 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
551 {
552 	rhine_wait_bit(rp, reg, mask, true);
553 }
554 
555 static u32 rhine_get_events(struct rhine_private *rp)
556 {
557 	void __iomem *ioaddr = rp->base;
558 	u32 intr_status;
559 
560 	intr_status = ioread16(ioaddr + IntrStatus);
561 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
562 	if (rp->quirks & rqStatusWBRace)
563 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
564 	return intr_status;
565 }
566 
567 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
568 {
569 	void __iomem *ioaddr = rp->base;
570 
571 	if (rp->quirks & rqStatusWBRace)
572 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
573 	iowrite16(mask, ioaddr + IntrStatus);
574 }
575 
576 /*
577  * Get power related registers into sane state.
578  * Notify user about past WOL event.
579  */
580 static void rhine_power_init(struct net_device *dev)
581 {
582 	struct rhine_private *rp = netdev_priv(dev);
583 	void __iomem *ioaddr = rp->base;
584 	u16 wolstat;
585 
586 	if (rp->quirks & rqWOL) {
587 		/* Make sure chip is in power state D0 */
588 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
589 
590 		/* Disable "force PME-enable" */
591 		iowrite8(0x80, ioaddr + WOLcgClr);
592 
593 		/* Clear power-event config bits (WOL) */
594 		iowrite8(0xFF, ioaddr + WOLcrClr);
595 		/* More recent cards can manage two additional patterns */
596 		if (rp->quirks & rq6patterns)
597 			iowrite8(0x03, ioaddr + WOLcrClr1);
598 
599 		/* Save power-event status bits */
600 		wolstat = ioread8(ioaddr + PwrcsrSet);
601 		if (rp->quirks & rq6patterns)
602 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
603 
604 		/* Clear power-event status bits */
605 		iowrite8(0xFF, ioaddr + PwrcsrClr);
606 		if (rp->quirks & rq6patterns)
607 			iowrite8(0x03, ioaddr + PwrcsrClr1);
608 
609 		if (wolstat) {
610 			char *reason;
611 			switch (wolstat) {
612 			case WOLmagic:
613 				reason = "Magic packet";
614 				break;
615 			case WOLlnkon:
616 				reason = "Link went up";
617 				break;
618 			case WOLlnkoff:
619 				reason = "Link went down";
620 				break;
621 			case WOLucast:
622 				reason = "Unicast packet";
623 				break;
624 			case WOLbmcast:
625 				reason = "Multicast/broadcast packet";
626 				break;
627 			default:
628 				reason = "Unknown";
629 			}
630 			netdev_info(dev, "Woke system up. Reason: %s\n",
631 				    reason);
632 		}
633 	}
634 }
635 
636 static void rhine_chip_reset(struct net_device *dev)
637 {
638 	struct rhine_private *rp = netdev_priv(dev);
639 	void __iomem *ioaddr = rp->base;
640 	u8 cmd1;
641 
642 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
643 	IOSYNC;
644 
645 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
646 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
647 
648 		/* Force reset */
649 		if (rp->quirks & rqForceReset)
650 			iowrite8(0x40, ioaddr + MiscCmd);
651 
652 		/* Reset can take somewhat longer (rare) */
653 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
654 	}
655 
656 	cmd1 = ioread8(ioaddr + ChipCmd1);
657 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
658 		   "failed" : "succeeded");
659 }
660 
661 static void enable_mmio(long pioaddr, u32 quirks)
662 {
663 	int n;
664 
665 	if (quirks & rqNeedEnMMIO) {
666 		if (quirks & rqRhineI) {
667 			/* More recent docs say that this bit is reserved */
668 			n = inb(pioaddr + ConfigA) | 0x20;
669 			outb(n, pioaddr + ConfigA);
670 		} else {
671 			n = inb(pioaddr + ConfigD) | 0x80;
672 			outb(n, pioaddr + ConfigD);
673 		}
674 	}
675 }
676 
677 static inline int verify_mmio(struct device *hwdev,
678 			      long pioaddr,
679 			      void __iomem *ioaddr,
680 			      u32 quirks)
681 {
682 	if (quirks & rqNeedEnMMIO) {
683 		int i = 0;
684 
685 		/* Check that selected MMIO registers match the PIO ones */
686 		while (mmio_verify_registers[i]) {
687 			int reg = mmio_verify_registers[i++];
688 			unsigned char a = inb(pioaddr+reg);
689 			unsigned char b = readb(ioaddr+reg);
690 
691 			if (a != b) {
692 				dev_err(hwdev,
693 					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
694 					reg, a, b);
695 				return -EIO;
696 			}
697 		}
698 	}
699 	return 0;
700 }
701 
702 /*
703  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
704  * (plus 0x6C for Rhine-I/II)
705  */
706 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
707 {
708 	struct rhine_private *rp = netdev_priv(dev);
709 	void __iomem *ioaddr = rp->base;
710 	int i;
711 
712 	outb(0x20, pioaddr + MACRegEEcsr);
713 	for (i = 0; i < 1024; i++) {
714 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
715 			break;
716 	}
717 	if (i > 512)
718 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
719 
720 	/*
721 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
722 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
723 	 * it is not known if that still works with the "win98-reboot" problem.
724 	 */
725 	enable_mmio(pioaddr, rp->quirks);
726 
727 	/* Turn off EEPROM-controlled wake-up (magic packet) */
728 	if (rp->quirks & rqWOL)
729 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
730 
731 }
732 
733 #ifdef CONFIG_NET_POLL_CONTROLLER
734 static void rhine_poll(struct net_device *dev)
735 {
736 	struct rhine_private *rp = netdev_priv(dev);
737 	const int irq = rp->irq;
738 
739 	disable_irq(irq);
740 	rhine_interrupt(irq, dev);
741 	enable_irq(irq);
742 }
743 #endif
744 
745 static void rhine_kick_tx_threshold(struct rhine_private *rp)
746 {
747 	if (rp->tx_thresh < 0xe0) {
748 		void __iomem *ioaddr = rp->base;
749 
750 		rp->tx_thresh += 0x20;
751 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
752 	}
753 }
754 
755 static void rhine_tx_err(struct rhine_private *rp, u32 status)
756 {
757 	struct net_device *dev = rp->dev;
758 
759 	if (status & IntrTxAborted) {
760 		netif_info(rp, tx_err, dev,
761 			   "Abort %08x, frame dropped\n", status);
762 	}
763 
764 	if (status & IntrTxUnderrun) {
765 		rhine_kick_tx_threshold(rp);
766 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
767 			   "Tx threshold now %02x\n", rp->tx_thresh);
768 	}
769 
770 	if (status & IntrTxDescRace)
771 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
772 
773 	if ((status & IntrTxError) &&
774 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
775 		rhine_kick_tx_threshold(rp);
776 		netif_info(rp, tx_err, dev, "Unspecified error. "
777 			   "Tx threshold now %02x\n", rp->tx_thresh);
778 	}
779 
780 	rhine_restart_tx(dev);
781 }
782 
783 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
784 {
785 	void __iomem *ioaddr = rp->base;
786 	struct net_device_stats *stats = &rp->dev->stats;
787 
788 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
789 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
790 
791 	/*
792 	 * Clears the "tally counters" for CRC errors and missed frames(?).
793 	 * It has been reported that some chips need a write of 0 to clear
794 	 * these, for others the counters are set to 1 when written to and
795 	 * instead cleared when read. So we clear them both ways ...
796 	 */
797 	iowrite32(0, ioaddr + RxMissed);
798 	ioread16(ioaddr + RxCRCErrs);
799 	ioread16(ioaddr + RxMissed);
800 }
801 
802 #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
803 				 IntrRxErr | \
804 				 IntrRxEmpty | \
805 				 IntrRxOverflow	| \
806 				 IntrRxDropped | \
807 				 IntrRxNoBuf | \
808 				 IntrRxWakeUp)
809 
810 #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
811 				 IntrTxAborted | \
812 				 IntrTxUnderrun | \
813 				 IntrTxDescRace)
814 #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
815 
816 #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
817 				 RHINE_EVENT_NAPI_TX | \
818 				 IntrStatsMax)
819 #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
820 #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
821 
822 static int rhine_napipoll(struct napi_struct *napi, int budget)
823 {
824 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
825 	struct net_device *dev = rp->dev;
826 	void __iomem *ioaddr = rp->base;
827 	u16 enable_mask = RHINE_EVENT & 0xffff;
828 	int work_done = 0;
829 	u32 status;
830 
831 	status = rhine_get_events(rp);
832 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
833 
834 	if (status & RHINE_EVENT_NAPI_RX)
835 		work_done += rhine_rx(dev, budget);
836 
837 	if (status & RHINE_EVENT_NAPI_TX) {
838 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
839 			/* Avoid scavenging before Tx engine turned off */
840 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
841 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
842 				netif_warn(rp, tx_err, dev, "Tx still on\n");
843 		}
844 
845 		rhine_tx(dev);
846 
847 		if (status & RHINE_EVENT_NAPI_TX_ERR)
848 			rhine_tx_err(rp, status);
849 	}
850 
851 	if (status & IntrStatsMax) {
852 		spin_lock(&rp->lock);
853 		rhine_update_rx_crc_and_missed_errord(rp);
854 		spin_unlock(&rp->lock);
855 	}
856 
857 	if (status & RHINE_EVENT_SLOW) {
858 		enable_mask &= ~RHINE_EVENT_SLOW;
859 		schedule_work(&rp->slow_event_task);
860 	}
861 
862 	if (work_done < budget) {
863 		napi_complete_done(napi, work_done);
864 		iowrite16(enable_mask, ioaddr + IntrEnable);
865 	}
866 	return work_done;
867 }
868 
869 static void rhine_hw_init(struct net_device *dev, long pioaddr)
870 {
871 	struct rhine_private *rp = netdev_priv(dev);
872 
873 	/* Reset the chip to erase previous misconfiguration. */
874 	rhine_chip_reset(dev);
875 
876 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
877 	if (rp->quirks & rqRhineI)
878 		msleep(5);
879 
880 	/* Reload EEPROM controlled bytes cleared by soft reset */
881 	if (dev_is_pci(dev->dev.parent))
882 		rhine_reload_eeprom(pioaddr, dev);
883 }
884 
885 static const struct net_device_ops rhine_netdev_ops = {
886 	.ndo_open		 = rhine_open,
887 	.ndo_stop		 = rhine_close,
888 	.ndo_start_xmit		 = rhine_start_tx,
889 	.ndo_get_stats64	 = rhine_get_stats64,
890 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
891 	.ndo_validate_addr	 = eth_validate_addr,
892 	.ndo_set_mac_address 	 = eth_mac_addr,
893 	.ndo_do_ioctl		 = netdev_ioctl,
894 	.ndo_tx_timeout 	 = rhine_tx_timeout,
895 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
896 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
897 #ifdef CONFIG_NET_POLL_CONTROLLER
898 	.ndo_poll_controller	 = rhine_poll,
899 #endif
900 };
901 
902 static int rhine_init_one_common(struct device *hwdev, u32 quirks,
903 				 long pioaddr, void __iomem *ioaddr, int irq)
904 {
905 	struct net_device *dev;
906 	struct rhine_private *rp;
907 	int i, rc, phy_id;
908 	const char *name;
909 
910 	/* this should always be supported */
911 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
912 	if (rc) {
913 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
914 		goto err_out;
915 	}
916 
917 	dev = alloc_etherdev(sizeof(struct rhine_private));
918 	if (!dev) {
919 		rc = -ENOMEM;
920 		goto err_out;
921 	}
922 	SET_NETDEV_DEV(dev, hwdev);
923 
924 	rp = netdev_priv(dev);
925 	rp->dev = dev;
926 	rp->quirks = quirks;
927 	rp->pioaddr = pioaddr;
928 	rp->base = ioaddr;
929 	rp->irq = irq;
930 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
931 
932 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
933 
934 	u64_stats_init(&rp->tx_stats.syncp);
935 	u64_stats_init(&rp->rx_stats.syncp);
936 
937 	/* Get chip registers into a sane state */
938 	rhine_power_init(dev);
939 	rhine_hw_init(dev, pioaddr);
940 
941 	for (i = 0; i < 6; i++)
942 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
943 
944 	if (!is_valid_ether_addr(dev->dev_addr)) {
945 		/* Report it and use a random ethernet address instead */
946 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
947 		eth_hw_addr_random(dev);
948 		netdev_info(dev, "Using random MAC address: %pM\n",
949 			    dev->dev_addr);
950 	}
951 
952 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
953 	if (!phy_id)
954 		phy_id = ioread8(ioaddr + 0x6C);
955 
956 	spin_lock_init(&rp->lock);
957 	mutex_init(&rp->task_lock);
958 	INIT_WORK(&rp->reset_task, rhine_reset_task);
959 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
960 
961 	rp->mii_if.dev = dev;
962 	rp->mii_if.mdio_read = mdio_read;
963 	rp->mii_if.mdio_write = mdio_write;
964 	rp->mii_if.phy_id_mask = 0x1f;
965 	rp->mii_if.reg_num_mask = 0x1f;
966 
967 	/* The chip-specific entries in the device structure. */
968 	dev->netdev_ops = &rhine_netdev_ops;
969 	dev->ethtool_ops = &netdev_ethtool_ops;
970 	dev->watchdog_timeo = TX_TIMEOUT;
971 
972 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
973 
974 	if (rp->quirks & rqRhineI)
975 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
976 
977 	if (rp->quirks & rqMgmt)
978 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
979 				 NETIF_F_HW_VLAN_CTAG_RX |
980 				 NETIF_F_HW_VLAN_CTAG_FILTER;
981 
982 	/* dev->name not defined before register_netdev()! */
983 	rc = register_netdev(dev);
984 	if (rc)
985 		goto err_out_free_netdev;
986 
987 	if (rp->quirks & rqRhineI)
988 		name = "Rhine";
989 	else if (rp->quirks & rqStatusWBRace)
990 		name = "Rhine II";
991 	else if (rp->quirks & rqMgmt)
992 		name = "Rhine III (Management Adapter)";
993 	else
994 		name = "Rhine III";
995 
996 	netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
997 		    name, ioaddr, dev->dev_addr, rp->irq);
998 
999 	dev_set_drvdata(hwdev, dev);
1000 
1001 	{
1002 		u16 mii_cmd;
1003 		int mii_status = mdio_read(dev, phy_id, 1);
1004 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1005 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1006 		if (mii_status != 0xffff && mii_status != 0x0000) {
1007 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1008 			netdev_info(dev,
1009 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1010 				    phy_id,
1011 				    mii_status, rp->mii_if.advertising,
1012 				    mdio_read(dev, phy_id, 5));
1013 
1014 			/* set IFF_RUNNING */
1015 			if (mii_status & BMSR_LSTATUS)
1016 				netif_carrier_on(dev);
1017 			else
1018 				netif_carrier_off(dev);
1019 
1020 		}
1021 	}
1022 	rp->mii_if.phy_id = phy_id;
1023 	if (avoid_D3)
1024 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1025 
1026 	return 0;
1027 
1028 err_out_free_netdev:
1029 	free_netdev(dev);
1030 err_out:
1031 	return rc;
1032 }
1033 
1034 static int rhine_init_one_pci(struct pci_dev *pdev,
1035 			      const struct pci_device_id *ent)
1036 {
1037 	struct device *hwdev = &pdev->dev;
1038 	int rc;
1039 	long pioaddr, memaddr;
1040 	void __iomem *ioaddr;
1041 	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1042 
1043 /* This driver was written to use PCI memory space. Some early versions
1044  * of the Rhine may only work correctly with I/O space accesses.
1045  * TODO: determine for which revisions this is true and assign the flag
1046  *	 in code as opposed to this Kconfig option (???)
1047  */
1048 #ifdef CONFIG_VIA_RHINE_MMIO
1049 	u32 quirks = rqNeedEnMMIO;
1050 #else
1051 	u32 quirks = 0;
1052 #endif
1053 
1054 /* when built into the kernel, we only print version if device is found */
1055 #ifndef MODULE
1056 	pr_info_once("%s\n", version);
1057 #endif
1058 
1059 	rc = pci_enable_device(pdev);
1060 	if (rc)
1061 		goto err_out;
1062 
1063 	if (pdev->revision < VTunknown0) {
1064 		quirks |= rqRhineI;
1065 	} else if (pdev->revision >= VT6102) {
1066 		quirks |= rqWOL | rqForceReset;
1067 		if (pdev->revision < VT6105) {
1068 			quirks |= rqStatusWBRace;
1069 		} else {
1070 			quirks |= rqIntPHY;
1071 			if (pdev->revision >= VT6105_B0)
1072 				quirks |= rq6patterns;
1073 			if (pdev->revision >= VT6105M)
1074 				quirks |= rqMgmt;
1075 		}
1076 	}
1077 
1078 	/* sanity check */
1079 	if ((pci_resource_len(pdev, 0) < io_size) ||
1080 	    (pci_resource_len(pdev, 1) < io_size)) {
1081 		rc = -EIO;
1082 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1083 		goto err_out_pci_disable;
1084 	}
1085 
1086 	pioaddr = pci_resource_start(pdev, 0);
1087 	memaddr = pci_resource_start(pdev, 1);
1088 
1089 	pci_set_master(pdev);
1090 
1091 	rc = pci_request_regions(pdev, DRV_NAME);
1092 	if (rc)
1093 		goto err_out_pci_disable;
1094 
1095 	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1096 	if (!ioaddr) {
1097 		rc = -EIO;
1098 		dev_err(hwdev,
1099 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1100 			dev_name(hwdev), io_size, memaddr);
1101 		goto err_out_free_res;
1102 	}
1103 
1104 	enable_mmio(pioaddr, quirks);
1105 
1106 	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1107 	if (rc)
1108 		goto err_out_unmap;
1109 
1110 	rc = rhine_init_one_common(&pdev->dev, quirks,
1111 				   pioaddr, ioaddr, pdev->irq);
1112 	if (!rc)
1113 		return 0;
1114 
1115 err_out_unmap:
1116 	pci_iounmap(pdev, ioaddr);
1117 err_out_free_res:
1118 	pci_release_regions(pdev);
1119 err_out_pci_disable:
1120 	pci_disable_device(pdev);
1121 err_out:
1122 	return rc;
1123 }
1124 
1125 static int rhine_init_one_platform(struct platform_device *pdev)
1126 {
1127 	const struct of_device_id *match;
1128 	const u32 *quirks;
1129 	int irq;
1130 	struct resource *res;
1131 	void __iomem *ioaddr;
1132 
1133 	match = of_match_device(rhine_of_tbl, &pdev->dev);
1134 	if (!match)
1135 		return -EINVAL;
1136 
1137 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1138 	ioaddr = devm_ioremap_resource(&pdev->dev, res);
1139 	if (IS_ERR(ioaddr))
1140 		return PTR_ERR(ioaddr);
1141 
1142 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1143 	if (!irq)
1144 		return -EINVAL;
1145 
1146 	quirks = match->data;
1147 	if (!quirks)
1148 		return -EINVAL;
1149 
1150 	return rhine_init_one_common(&pdev->dev, *quirks,
1151 				     (long)ioaddr, ioaddr, irq);
1152 }
1153 
1154 static int alloc_ring(struct net_device* dev)
1155 {
1156 	struct rhine_private *rp = netdev_priv(dev);
1157 	struct device *hwdev = dev->dev.parent;
1158 	void *ring;
1159 	dma_addr_t ring_dma;
1160 
1161 	ring = dma_alloc_coherent(hwdev,
1162 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1163 				  TX_RING_SIZE * sizeof(struct tx_desc),
1164 				  &ring_dma,
1165 				  GFP_ATOMIC);
1166 	if (!ring) {
1167 		netdev_err(dev, "Could not allocate DMA memory\n");
1168 		return -ENOMEM;
1169 	}
1170 	if (rp->quirks & rqRhineI) {
1171 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1172 						 PKT_BUF_SZ * TX_RING_SIZE,
1173 						 &rp->tx_bufs_dma,
1174 						 GFP_ATOMIC);
1175 		if (rp->tx_bufs == NULL) {
1176 			dma_free_coherent(hwdev,
1177 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1178 					  TX_RING_SIZE * sizeof(struct tx_desc),
1179 					  ring, ring_dma);
1180 			return -ENOMEM;
1181 		}
1182 	}
1183 
1184 	rp->rx_ring = ring;
1185 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1186 	rp->rx_ring_dma = ring_dma;
1187 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1188 
1189 	return 0;
1190 }
1191 
1192 static void free_ring(struct net_device* dev)
1193 {
1194 	struct rhine_private *rp = netdev_priv(dev);
1195 	struct device *hwdev = dev->dev.parent;
1196 
1197 	dma_free_coherent(hwdev,
1198 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1199 			  TX_RING_SIZE * sizeof(struct tx_desc),
1200 			  rp->rx_ring, rp->rx_ring_dma);
1201 	rp->tx_ring = NULL;
1202 
1203 	if (rp->tx_bufs)
1204 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1205 				  rp->tx_bufs, rp->tx_bufs_dma);
1206 
1207 	rp->tx_bufs = NULL;
1208 
1209 }
1210 
1211 struct rhine_skb_dma {
1212 	struct sk_buff *skb;
1213 	dma_addr_t dma;
1214 };
1215 
1216 static inline int rhine_skb_dma_init(struct net_device *dev,
1217 				     struct rhine_skb_dma *sd)
1218 {
1219 	struct rhine_private *rp = netdev_priv(dev);
1220 	struct device *hwdev = dev->dev.parent;
1221 	const int size = rp->rx_buf_sz;
1222 
1223 	sd->skb = netdev_alloc_skb(dev, size);
1224 	if (!sd->skb)
1225 		return -ENOMEM;
1226 
1227 	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1228 	if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1229 		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1230 		dev_kfree_skb_any(sd->skb);
1231 		return -EIO;
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 static void rhine_reset_rbufs(struct rhine_private *rp)
1238 {
1239 	int i;
1240 
1241 	rp->cur_rx = 0;
1242 
1243 	for (i = 0; i < RX_RING_SIZE; i++)
1244 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1245 }
1246 
1247 static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1248 					   struct rhine_skb_dma *sd, int entry)
1249 {
1250 	rp->rx_skbuff_dma[entry] = sd->dma;
1251 	rp->rx_skbuff[entry] = sd->skb;
1252 
1253 	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1254 	dma_wmb();
1255 }
1256 
1257 static void free_rbufs(struct net_device* dev);
1258 
1259 static int alloc_rbufs(struct net_device *dev)
1260 {
1261 	struct rhine_private *rp = netdev_priv(dev);
1262 	dma_addr_t next;
1263 	int rc, i;
1264 
1265 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1266 	next = rp->rx_ring_dma;
1267 
1268 	/* Init the ring entries */
1269 	for (i = 0; i < RX_RING_SIZE; i++) {
1270 		rp->rx_ring[i].rx_status = 0;
1271 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1272 		next += sizeof(struct rx_desc);
1273 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1274 		rp->rx_skbuff[i] = NULL;
1275 	}
1276 	/* Mark the last entry as wrapping the ring. */
1277 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1278 
1279 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1280 	for (i = 0; i < RX_RING_SIZE; i++) {
1281 		struct rhine_skb_dma sd;
1282 
1283 		rc = rhine_skb_dma_init(dev, &sd);
1284 		if (rc < 0) {
1285 			free_rbufs(dev);
1286 			goto out;
1287 		}
1288 
1289 		rhine_skb_dma_nic_store(rp, &sd, i);
1290 	}
1291 
1292 	rhine_reset_rbufs(rp);
1293 out:
1294 	return rc;
1295 }
1296 
1297 static void free_rbufs(struct net_device* dev)
1298 {
1299 	struct rhine_private *rp = netdev_priv(dev);
1300 	struct device *hwdev = dev->dev.parent;
1301 	int i;
1302 
1303 	/* Free all the skbuffs in the Rx queue. */
1304 	for (i = 0; i < RX_RING_SIZE; i++) {
1305 		rp->rx_ring[i].rx_status = 0;
1306 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1307 		if (rp->rx_skbuff[i]) {
1308 			dma_unmap_single(hwdev,
1309 					 rp->rx_skbuff_dma[i],
1310 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1311 			dev_kfree_skb(rp->rx_skbuff[i]);
1312 		}
1313 		rp->rx_skbuff[i] = NULL;
1314 	}
1315 }
1316 
1317 static void alloc_tbufs(struct net_device* dev)
1318 {
1319 	struct rhine_private *rp = netdev_priv(dev);
1320 	dma_addr_t next;
1321 	int i;
1322 
1323 	rp->dirty_tx = rp->cur_tx = 0;
1324 	next = rp->tx_ring_dma;
1325 	for (i = 0; i < TX_RING_SIZE; i++) {
1326 		rp->tx_skbuff[i] = NULL;
1327 		rp->tx_ring[i].tx_status = 0;
1328 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1329 		next += sizeof(struct tx_desc);
1330 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1331 		if (rp->quirks & rqRhineI)
1332 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1333 	}
1334 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1335 
1336 	netdev_reset_queue(dev);
1337 }
1338 
1339 static void free_tbufs(struct net_device* dev)
1340 {
1341 	struct rhine_private *rp = netdev_priv(dev);
1342 	struct device *hwdev = dev->dev.parent;
1343 	int i;
1344 
1345 	for (i = 0; i < TX_RING_SIZE; i++) {
1346 		rp->tx_ring[i].tx_status = 0;
1347 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1348 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1349 		if (rp->tx_skbuff[i]) {
1350 			if (rp->tx_skbuff_dma[i]) {
1351 				dma_unmap_single(hwdev,
1352 						 rp->tx_skbuff_dma[i],
1353 						 rp->tx_skbuff[i]->len,
1354 						 DMA_TO_DEVICE);
1355 			}
1356 			dev_kfree_skb(rp->tx_skbuff[i]);
1357 		}
1358 		rp->tx_skbuff[i] = NULL;
1359 		rp->tx_buf[i] = NULL;
1360 	}
1361 }
1362 
1363 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1364 {
1365 	struct rhine_private *rp = netdev_priv(dev);
1366 	void __iomem *ioaddr = rp->base;
1367 
1368 	if (!rp->mii_if.force_media)
1369 		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1370 
1371 	if (rp->mii_if.full_duplex)
1372 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1373 		   ioaddr + ChipCmd1);
1374 	else
1375 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1376 		   ioaddr + ChipCmd1);
1377 
1378 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1379 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1380 }
1381 
1382 /* Called after status of force_media possibly changed */
1383 static void rhine_set_carrier(struct mii_if_info *mii)
1384 {
1385 	struct net_device *dev = mii->dev;
1386 	struct rhine_private *rp = netdev_priv(dev);
1387 
1388 	if (mii->force_media) {
1389 		/* autoneg is off: Link is always assumed to be up */
1390 		if (!netif_carrier_ok(dev))
1391 			netif_carrier_on(dev);
1392 	}
1393 
1394 	rhine_check_media(dev, 0);
1395 
1396 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1397 		   mii->force_media, netif_carrier_ok(dev));
1398 }
1399 
1400 /**
1401  * rhine_set_cam - set CAM multicast filters
1402  * @ioaddr: register block of this Rhine
1403  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1404  * @addr: multicast address (6 bytes)
1405  *
1406  * Load addresses into multicast filters.
1407  */
1408 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1409 {
1410 	int i;
1411 
1412 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1413 	wmb();
1414 
1415 	/* Paranoid -- idx out of range should never happen */
1416 	idx &= (MCAM_SIZE - 1);
1417 
1418 	iowrite8((u8) idx, ioaddr + CamAddr);
1419 
1420 	for (i = 0; i < 6; i++, addr++)
1421 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1422 	udelay(10);
1423 	wmb();
1424 
1425 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1426 	udelay(10);
1427 
1428 	iowrite8(0, ioaddr + CamCon);
1429 }
1430 
1431 /**
1432  * rhine_set_vlan_cam - set CAM VLAN filters
1433  * @ioaddr: register block of this Rhine
1434  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1435  * @addr: VLAN ID (2 bytes)
1436  *
1437  * Load addresses into VLAN filters.
1438  */
1439 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1440 {
1441 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1442 	wmb();
1443 
1444 	/* Paranoid -- idx out of range should never happen */
1445 	idx &= (VCAM_SIZE - 1);
1446 
1447 	iowrite8((u8) idx, ioaddr + CamAddr);
1448 
1449 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1450 	udelay(10);
1451 	wmb();
1452 
1453 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1454 	udelay(10);
1455 
1456 	iowrite8(0, ioaddr + CamCon);
1457 }
1458 
1459 /**
1460  * rhine_set_cam_mask - set multicast CAM mask
1461  * @ioaddr: register block of this Rhine
1462  * @mask: multicast CAM mask
1463  *
1464  * Mask sets multicast filters active/inactive.
1465  */
1466 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1467 {
1468 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1469 	wmb();
1470 
1471 	/* write mask */
1472 	iowrite32(mask, ioaddr + CamMask);
1473 
1474 	/* disable CAMEN */
1475 	iowrite8(0, ioaddr + CamCon);
1476 }
1477 
1478 /**
1479  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1480  * @ioaddr: register block of this Rhine
1481  * @mask: VLAN CAM mask
1482  *
1483  * Mask sets VLAN filters active/inactive.
1484  */
1485 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1486 {
1487 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1488 	wmb();
1489 
1490 	/* write mask */
1491 	iowrite32(mask, ioaddr + CamMask);
1492 
1493 	/* disable CAMEN */
1494 	iowrite8(0, ioaddr + CamCon);
1495 }
1496 
1497 /**
1498  * rhine_init_cam_filter - initialize CAM filters
1499  * @dev: network device
1500  *
1501  * Initialize (disable) hardware VLAN and multicast support on this
1502  * Rhine.
1503  */
1504 static void rhine_init_cam_filter(struct net_device *dev)
1505 {
1506 	struct rhine_private *rp = netdev_priv(dev);
1507 	void __iomem *ioaddr = rp->base;
1508 
1509 	/* Disable all CAMs */
1510 	rhine_set_vlan_cam_mask(ioaddr, 0);
1511 	rhine_set_cam_mask(ioaddr, 0);
1512 
1513 	/* disable hardware VLAN support */
1514 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1515 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1516 }
1517 
1518 /**
1519  * rhine_update_vcam - update VLAN CAM filters
1520  * @rp: rhine_private data of this Rhine
1521  *
1522  * Update VLAN CAM filters to match configuration change.
1523  */
1524 static void rhine_update_vcam(struct net_device *dev)
1525 {
1526 	struct rhine_private *rp = netdev_priv(dev);
1527 	void __iomem *ioaddr = rp->base;
1528 	u16 vid;
1529 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1530 	unsigned int i = 0;
1531 
1532 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1533 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1534 		vCAMmask |= 1 << i;
1535 		if (++i >= VCAM_SIZE)
1536 			break;
1537 	}
1538 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1539 }
1540 
1541 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1542 {
1543 	struct rhine_private *rp = netdev_priv(dev);
1544 
1545 	spin_lock_bh(&rp->lock);
1546 	set_bit(vid, rp->active_vlans);
1547 	rhine_update_vcam(dev);
1548 	spin_unlock_bh(&rp->lock);
1549 	return 0;
1550 }
1551 
1552 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1553 {
1554 	struct rhine_private *rp = netdev_priv(dev);
1555 
1556 	spin_lock_bh(&rp->lock);
1557 	clear_bit(vid, rp->active_vlans);
1558 	rhine_update_vcam(dev);
1559 	spin_unlock_bh(&rp->lock);
1560 	return 0;
1561 }
1562 
1563 static void init_registers(struct net_device *dev)
1564 {
1565 	struct rhine_private *rp = netdev_priv(dev);
1566 	void __iomem *ioaddr = rp->base;
1567 	int i;
1568 
1569 	for (i = 0; i < 6; i++)
1570 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1571 
1572 	/* Initialize other registers. */
1573 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1574 	/* Configure initial FIFO thresholds. */
1575 	iowrite8(0x20, ioaddr + TxConfig);
1576 	rp->tx_thresh = 0x20;
1577 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1578 
1579 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1580 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1581 
1582 	rhine_set_rx_mode(dev);
1583 
1584 	if (rp->quirks & rqMgmt)
1585 		rhine_init_cam_filter(dev);
1586 
1587 	napi_enable(&rp->napi);
1588 
1589 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1590 
1591 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1592 	       ioaddr + ChipCmd);
1593 	rhine_check_media(dev, 1);
1594 }
1595 
1596 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1597 static void rhine_enable_linkmon(struct rhine_private *rp)
1598 {
1599 	void __iomem *ioaddr = rp->base;
1600 
1601 	iowrite8(0, ioaddr + MIICmd);
1602 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1603 	iowrite8(0x80, ioaddr + MIICmd);
1604 
1605 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1606 
1607 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1608 }
1609 
1610 /* Disable MII link status auto-polling (required for MDIO access) */
1611 static void rhine_disable_linkmon(struct rhine_private *rp)
1612 {
1613 	void __iomem *ioaddr = rp->base;
1614 
1615 	iowrite8(0, ioaddr + MIICmd);
1616 
1617 	if (rp->quirks & rqRhineI) {
1618 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1619 
1620 		/* Can be called from ISR. Evil. */
1621 		mdelay(1);
1622 
1623 		/* 0x80 must be set immediately before turning it off */
1624 		iowrite8(0x80, ioaddr + MIICmd);
1625 
1626 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1627 
1628 		/* Heh. Now clear 0x80 again. */
1629 		iowrite8(0, ioaddr + MIICmd);
1630 	}
1631 	else
1632 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1633 }
1634 
1635 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1636 
1637 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1638 {
1639 	struct rhine_private *rp = netdev_priv(dev);
1640 	void __iomem *ioaddr = rp->base;
1641 	int result;
1642 
1643 	rhine_disable_linkmon(rp);
1644 
1645 	/* rhine_disable_linkmon already cleared MIICmd */
1646 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1647 	iowrite8(regnum, ioaddr + MIIRegAddr);
1648 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1649 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1650 	result = ioread16(ioaddr + MIIData);
1651 
1652 	rhine_enable_linkmon(rp);
1653 	return result;
1654 }
1655 
1656 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1657 {
1658 	struct rhine_private *rp = netdev_priv(dev);
1659 	void __iomem *ioaddr = rp->base;
1660 
1661 	rhine_disable_linkmon(rp);
1662 
1663 	/* rhine_disable_linkmon already cleared MIICmd */
1664 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1665 	iowrite8(regnum, ioaddr + MIIRegAddr);
1666 	iowrite16(value, ioaddr + MIIData);
1667 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1668 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1669 
1670 	rhine_enable_linkmon(rp);
1671 }
1672 
1673 static void rhine_task_disable(struct rhine_private *rp)
1674 {
1675 	mutex_lock(&rp->task_lock);
1676 	rp->task_enable = false;
1677 	mutex_unlock(&rp->task_lock);
1678 
1679 	cancel_work_sync(&rp->slow_event_task);
1680 	cancel_work_sync(&rp->reset_task);
1681 }
1682 
1683 static void rhine_task_enable(struct rhine_private *rp)
1684 {
1685 	mutex_lock(&rp->task_lock);
1686 	rp->task_enable = true;
1687 	mutex_unlock(&rp->task_lock);
1688 }
1689 
1690 static int rhine_open(struct net_device *dev)
1691 {
1692 	struct rhine_private *rp = netdev_priv(dev);
1693 	void __iomem *ioaddr = rp->base;
1694 	int rc;
1695 
1696 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1697 	if (rc)
1698 		goto out;
1699 
1700 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1701 
1702 	rc = alloc_ring(dev);
1703 	if (rc < 0)
1704 		goto out_free_irq;
1705 
1706 	rc = alloc_rbufs(dev);
1707 	if (rc < 0)
1708 		goto out_free_ring;
1709 
1710 	alloc_tbufs(dev);
1711 	rhine_chip_reset(dev);
1712 	rhine_task_enable(rp);
1713 	init_registers(dev);
1714 
1715 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1716 		  __func__, ioread16(ioaddr + ChipCmd),
1717 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1718 
1719 	netif_start_queue(dev);
1720 
1721 out:
1722 	return rc;
1723 
1724 out_free_ring:
1725 	free_ring(dev);
1726 out_free_irq:
1727 	free_irq(rp->irq, dev);
1728 	goto out;
1729 }
1730 
1731 static void rhine_reset_task(struct work_struct *work)
1732 {
1733 	struct rhine_private *rp = container_of(work, struct rhine_private,
1734 						reset_task);
1735 	struct net_device *dev = rp->dev;
1736 
1737 	mutex_lock(&rp->task_lock);
1738 
1739 	if (!rp->task_enable)
1740 		goto out_unlock;
1741 
1742 	napi_disable(&rp->napi);
1743 	netif_tx_disable(dev);
1744 	spin_lock_bh(&rp->lock);
1745 
1746 	/* clear all descriptors */
1747 	free_tbufs(dev);
1748 	alloc_tbufs(dev);
1749 
1750 	rhine_reset_rbufs(rp);
1751 
1752 	/* Reinitialize the hardware. */
1753 	rhine_chip_reset(dev);
1754 	init_registers(dev);
1755 
1756 	spin_unlock_bh(&rp->lock);
1757 
1758 	netif_trans_update(dev); /* prevent tx timeout */
1759 	dev->stats.tx_errors++;
1760 	netif_wake_queue(dev);
1761 
1762 out_unlock:
1763 	mutex_unlock(&rp->task_lock);
1764 }
1765 
1766 static void rhine_tx_timeout(struct net_device *dev)
1767 {
1768 	struct rhine_private *rp = netdev_priv(dev);
1769 	void __iomem *ioaddr = rp->base;
1770 
1771 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1772 		    ioread16(ioaddr + IntrStatus),
1773 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1774 
1775 	schedule_work(&rp->reset_task);
1776 }
1777 
1778 static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1779 {
1780 	return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1781 }
1782 
1783 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1784 				  struct net_device *dev)
1785 {
1786 	struct rhine_private *rp = netdev_priv(dev);
1787 	struct device *hwdev = dev->dev.parent;
1788 	void __iomem *ioaddr = rp->base;
1789 	unsigned entry;
1790 
1791 	/* Caution: the write order is important here, set the field
1792 	   with the "ownership" bits last. */
1793 
1794 	/* Calculate the next Tx descriptor entry. */
1795 	entry = rp->cur_tx % TX_RING_SIZE;
1796 
1797 	if (skb_padto(skb, ETH_ZLEN))
1798 		return NETDEV_TX_OK;
1799 
1800 	rp->tx_skbuff[entry] = skb;
1801 
1802 	if ((rp->quirks & rqRhineI) &&
1803 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1804 		/* Must use alignment buffer. */
1805 		if (skb->len > PKT_BUF_SZ) {
1806 			/* packet too long, drop it */
1807 			dev_kfree_skb_any(skb);
1808 			rp->tx_skbuff[entry] = NULL;
1809 			dev->stats.tx_dropped++;
1810 			return NETDEV_TX_OK;
1811 		}
1812 
1813 		/* Padding is not copied and so must be redone. */
1814 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1815 		if (skb->len < ETH_ZLEN)
1816 			memset(rp->tx_buf[entry] + skb->len, 0,
1817 			       ETH_ZLEN - skb->len);
1818 		rp->tx_skbuff_dma[entry] = 0;
1819 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1820 						      (rp->tx_buf[entry] -
1821 						       rp->tx_bufs));
1822 	} else {
1823 		rp->tx_skbuff_dma[entry] =
1824 			dma_map_single(hwdev, skb->data, skb->len,
1825 				       DMA_TO_DEVICE);
1826 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1827 			dev_kfree_skb_any(skb);
1828 			rp->tx_skbuff_dma[entry] = 0;
1829 			dev->stats.tx_dropped++;
1830 			return NETDEV_TX_OK;
1831 		}
1832 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1833 	}
1834 
1835 	rp->tx_ring[entry].desc_length =
1836 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1837 
1838 	if (unlikely(skb_vlan_tag_present(skb))) {
1839 		u16 vid_pcp = skb_vlan_tag_get(skb);
1840 
1841 		/* drop CFI/DEI bit, register needs VID and PCP */
1842 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1843 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1844 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1845 		/* request tagging */
1846 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1847 	}
1848 	else
1849 		rp->tx_ring[entry].tx_status = 0;
1850 
1851 	netdev_sent_queue(dev, skb->len);
1852 	/* lock eth irq */
1853 	dma_wmb();
1854 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1855 	wmb();
1856 
1857 	rp->cur_tx++;
1858 	/*
1859 	 * Nobody wants cur_tx write to rot for ages after the NIC will have
1860 	 * seen the transmit request, especially as the transmit completion
1861 	 * handler could miss it.
1862 	 */
1863 	smp_wmb();
1864 
1865 	/* Non-x86 Todo: explicitly flush cache lines here. */
1866 
1867 	if (skb_vlan_tag_present(skb))
1868 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1869 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1870 
1871 	/* Wake the potentially-idle transmit channel */
1872 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1873 	       ioaddr + ChipCmd1);
1874 	IOSYNC;
1875 
1876 	/* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
1877 	if (rhine_tx_queue_full(rp)) {
1878 		netif_stop_queue(dev);
1879 		smp_rmb();
1880 		/* Rejuvenate. */
1881 		if (!rhine_tx_queue_full(rp))
1882 			netif_wake_queue(dev);
1883 	}
1884 
1885 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1886 		  rp->cur_tx - 1, entry);
1887 
1888 	return NETDEV_TX_OK;
1889 }
1890 
1891 static void rhine_irq_disable(struct rhine_private *rp)
1892 {
1893 	iowrite16(0x0000, rp->base + IntrEnable);
1894 }
1895 
1896 /* The interrupt handler does all of the Rx thread work and cleans up
1897    after the Tx thread. */
1898 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1899 {
1900 	struct net_device *dev = dev_instance;
1901 	struct rhine_private *rp = netdev_priv(dev);
1902 	u32 status;
1903 	int handled = 0;
1904 
1905 	status = rhine_get_events(rp);
1906 
1907 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1908 
1909 	if (status & RHINE_EVENT) {
1910 		handled = 1;
1911 
1912 		rhine_irq_disable(rp);
1913 		napi_schedule(&rp->napi);
1914 	}
1915 
1916 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1917 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1918 			  status);
1919 	}
1920 
1921 	return IRQ_RETVAL(handled);
1922 }
1923 
1924 /* This routine is logically part of the interrupt handler, but isolated
1925    for clarity. */
1926 static void rhine_tx(struct net_device *dev)
1927 {
1928 	struct rhine_private *rp = netdev_priv(dev);
1929 	struct device *hwdev = dev->dev.parent;
1930 	unsigned int pkts_compl = 0, bytes_compl = 0;
1931 	unsigned int dirty_tx = rp->dirty_tx;
1932 	unsigned int cur_tx;
1933 	struct sk_buff *skb;
1934 
1935 	/*
1936 	 * The race with rhine_start_tx does not matter here as long as the
1937 	 * driver enforces a value of cur_tx that was relevant when the
1938 	 * packet was scheduled to the network chipset.
1939 	 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
1940 	 */
1941 	smp_rmb();
1942 	cur_tx = rp->cur_tx;
1943 	/* find and cleanup dirty tx descriptors */
1944 	while (dirty_tx != cur_tx) {
1945 		unsigned int entry = dirty_tx % TX_RING_SIZE;
1946 		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1947 
1948 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1949 			  entry, txstatus);
1950 		if (txstatus & DescOwn)
1951 			break;
1952 		skb = rp->tx_skbuff[entry];
1953 		if (txstatus & 0x8000) {
1954 			netif_dbg(rp, tx_done, dev,
1955 				  "Transmit error, Tx status %08x\n", txstatus);
1956 			dev->stats.tx_errors++;
1957 			if (txstatus & 0x0400)
1958 				dev->stats.tx_carrier_errors++;
1959 			if (txstatus & 0x0200)
1960 				dev->stats.tx_window_errors++;
1961 			if (txstatus & 0x0100)
1962 				dev->stats.tx_aborted_errors++;
1963 			if (txstatus & 0x0080)
1964 				dev->stats.tx_heartbeat_errors++;
1965 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1966 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1967 				dev->stats.tx_fifo_errors++;
1968 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1969 				break; /* Keep the skb - we try again */
1970 			}
1971 			/* Transmitter restarted in 'abnormal' handler. */
1972 		} else {
1973 			if (rp->quirks & rqRhineI)
1974 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1975 			else
1976 				dev->stats.collisions += txstatus & 0x0F;
1977 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1978 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1979 
1980 			u64_stats_update_begin(&rp->tx_stats.syncp);
1981 			rp->tx_stats.bytes += skb->len;
1982 			rp->tx_stats.packets++;
1983 			u64_stats_update_end(&rp->tx_stats.syncp);
1984 		}
1985 		/* Free the original skb. */
1986 		if (rp->tx_skbuff_dma[entry]) {
1987 			dma_unmap_single(hwdev,
1988 					 rp->tx_skbuff_dma[entry],
1989 					 skb->len,
1990 					 DMA_TO_DEVICE);
1991 		}
1992 		bytes_compl += skb->len;
1993 		pkts_compl++;
1994 		dev_consume_skb_any(skb);
1995 		rp->tx_skbuff[entry] = NULL;
1996 		dirty_tx++;
1997 	}
1998 
1999 	rp->dirty_tx = dirty_tx;
2000 	/* Pity we can't rely on the nearby BQL completion implicit barrier. */
2001 	smp_wmb();
2002 
2003 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2004 
2005 	/* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
2006 	if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
2007 		netif_wake_queue(dev);
2008 		smp_rmb();
2009 		/* Rejuvenate. */
2010 		if (rhine_tx_queue_full(rp))
2011 			netif_stop_queue(dev);
2012 	}
2013 }
2014 
2015 /**
2016  * rhine_get_vlan_tci - extract TCI from Rx data buffer
2017  * @skb: pointer to sk_buff
2018  * @data_size: used data area of the buffer including CRC
2019  *
2020  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2021  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2022  * aligned following the CRC.
2023  */
2024 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2025 {
2026 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2027 	return be16_to_cpup((__be16 *)trailer);
2028 }
2029 
2030 static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2031 				     int data_size)
2032 {
2033 	dma_rmb();
2034 	if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2035 		u16 vlan_tci;
2036 
2037 		vlan_tci = rhine_get_vlan_tci(skb, data_size);
2038 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2039 	}
2040 }
2041 
2042 /* Process up to limit frames from receive ring */
2043 static int rhine_rx(struct net_device *dev, int limit)
2044 {
2045 	struct rhine_private *rp = netdev_priv(dev);
2046 	struct device *hwdev = dev->dev.parent;
2047 	int entry = rp->cur_rx % RX_RING_SIZE;
2048 	int count;
2049 
2050 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2051 		  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2052 
2053 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
2054 	for (count = 0; count < limit; ++count) {
2055 		struct rx_desc *desc = rp->rx_ring + entry;
2056 		u32 desc_status = le32_to_cpu(desc->rx_status);
2057 		int data_size = desc_status >> 16;
2058 
2059 		if (desc_status & DescOwn)
2060 			break;
2061 
2062 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2063 			  desc_status);
2064 
2065 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2066 			if ((desc_status & RxWholePkt) != RxWholePkt) {
2067 				netdev_warn(dev,
2068 	"Oversized Ethernet frame spanned multiple buffers, "
2069 	"entry %#x length %d status %08x!\n",
2070 					    entry, data_size,
2071 					    desc_status);
2072 				dev->stats.rx_length_errors++;
2073 			} else if (desc_status & RxErr) {
2074 				/* There was a error. */
2075 				netif_dbg(rp, rx_err, dev,
2076 					  "%s() Rx error %08x\n", __func__,
2077 					  desc_status);
2078 				dev->stats.rx_errors++;
2079 				if (desc_status & 0x0030)
2080 					dev->stats.rx_length_errors++;
2081 				if (desc_status & 0x0048)
2082 					dev->stats.rx_fifo_errors++;
2083 				if (desc_status & 0x0004)
2084 					dev->stats.rx_frame_errors++;
2085 				if (desc_status & 0x0002) {
2086 					/* this can also be updated outside the interrupt handler */
2087 					spin_lock(&rp->lock);
2088 					dev->stats.rx_crc_errors++;
2089 					spin_unlock(&rp->lock);
2090 				}
2091 			}
2092 		} else {
2093 			/* Length should omit the CRC */
2094 			int pkt_len = data_size - 4;
2095 			struct sk_buff *skb;
2096 
2097 			/* Check if the packet is long enough to accept without
2098 			   copying to a minimally-sized skbuff. */
2099 			if (pkt_len < rx_copybreak) {
2100 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2101 				if (unlikely(!skb))
2102 					goto drop;
2103 
2104 				dma_sync_single_for_cpu(hwdev,
2105 							rp->rx_skbuff_dma[entry],
2106 							rp->rx_buf_sz,
2107 							DMA_FROM_DEVICE);
2108 
2109 				skb_copy_to_linear_data(skb,
2110 						 rp->rx_skbuff[entry]->data,
2111 						 pkt_len);
2112 
2113 				dma_sync_single_for_device(hwdev,
2114 							   rp->rx_skbuff_dma[entry],
2115 							   rp->rx_buf_sz,
2116 							   DMA_FROM_DEVICE);
2117 			} else {
2118 				struct rhine_skb_dma sd;
2119 
2120 				if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2121 					goto drop;
2122 
2123 				skb = rp->rx_skbuff[entry];
2124 
2125 				dma_unmap_single(hwdev,
2126 						 rp->rx_skbuff_dma[entry],
2127 						 rp->rx_buf_sz,
2128 						 DMA_FROM_DEVICE);
2129 				rhine_skb_dma_nic_store(rp, &sd, entry);
2130 			}
2131 
2132 			skb_put(skb, pkt_len);
2133 
2134 			rhine_rx_vlan_tag(skb, desc, data_size);
2135 
2136 			skb->protocol = eth_type_trans(skb, dev);
2137 
2138 			netif_receive_skb(skb);
2139 
2140 			u64_stats_update_begin(&rp->rx_stats.syncp);
2141 			rp->rx_stats.bytes += pkt_len;
2142 			rp->rx_stats.packets++;
2143 			u64_stats_update_end(&rp->rx_stats.syncp);
2144 		}
2145 give_descriptor_to_nic:
2146 		desc->rx_status = cpu_to_le32(DescOwn);
2147 		entry = (++rp->cur_rx) % RX_RING_SIZE;
2148 	}
2149 
2150 	return count;
2151 
2152 drop:
2153 	dev->stats.rx_dropped++;
2154 	goto give_descriptor_to_nic;
2155 }
2156 
2157 static void rhine_restart_tx(struct net_device *dev) {
2158 	struct rhine_private *rp = netdev_priv(dev);
2159 	void __iomem *ioaddr = rp->base;
2160 	int entry = rp->dirty_tx % TX_RING_SIZE;
2161 	u32 intr_status;
2162 
2163 	/*
2164 	 * If new errors occurred, we need to sort them out before doing Tx.
2165 	 * In that case the ISR will be back here RSN anyway.
2166 	 */
2167 	intr_status = rhine_get_events(rp);
2168 
2169 	if ((intr_status & IntrTxErrSummary) == 0) {
2170 
2171 		/* We know better than the chip where it should continue. */
2172 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2173 		       ioaddr + TxRingPtr);
2174 
2175 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2176 		       ioaddr + ChipCmd);
2177 
2178 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2179 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2180 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2181 
2182 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2183 		       ioaddr + ChipCmd1);
2184 		IOSYNC;
2185 	}
2186 	else {
2187 		/* This should never happen */
2188 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2189 			   intr_status);
2190 	}
2191 
2192 }
2193 
2194 static void rhine_slow_event_task(struct work_struct *work)
2195 {
2196 	struct rhine_private *rp =
2197 		container_of(work, struct rhine_private, slow_event_task);
2198 	struct net_device *dev = rp->dev;
2199 	u32 intr_status;
2200 
2201 	mutex_lock(&rp->task_lock);
2202 
2203 	if (!rp->task_enable)
2204 		goto out_unlock;
2205 
2206 	intr_status = rhine_get_events(rp);
2207 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2208 
2209 	if (intr_status & IntrLinkChange)
2210 		rhine_check_media(dev, 0);
2211 
2212 	if (intr_status & IntrPCIErr)
2213 		netif_warn(rp, hw, dev, "PCI error\n");
2214 
2215 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2216 
2217 out_unlock:
2218 	mutex_unlock(&rp->task_lock);
2219 }
2220 
2221 static void
2222 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2223 {
2224 	struct rhine_private *rp = netdev_priv(dev);
2225 	unsigned int start;
2226 
2227 	spin_lock_bh(&rp->lock);
2228 	rhine_update_rx_crc_and_missed_errord(rp);
2229 	spin_unlock_bh(&rp->lock);
2230 
2231 	netdev_stats_to_stats64(stats, &dev->stats);
2232 
2233 	do {
2234 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2235 		stats->rx_packets = rp->rx_stats.packets;
2236 		stats->rx_bytes = rp->rx_stats.bytes;
2237 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2238 
2239 	do {
2240 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2241 		stats->tx_packets = rp->tx_stats.packets;
2242 		stats->tx_bytes = rp->tx_stats.bytes;
2243 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2244 }
2245 
2246 static void rhine_set_rx_mode(struct net_device *dev)
2247 {
2248 	struct rhine_private *rp = netdev_priv(dev);
2249 	void __iomem *ioaddr = rp->base;
2250 	u32 mc_filter[2];	/* Multicast hash filter */
2251 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2252 	struct netdev_hw_addr *ha;
2253 
2254 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2255 		rx_mode = 0x1C;
2256 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2257 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2258 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2259 		   (dev->flags & IFF_ALLMULTI)) {
2260 		/* Too many to match, or accept all multicasts. */
2261 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2262 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2263 	} else if (rp->quirks & rqMgmt) {
2264 		int i = 0;
2265 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2266 		netdev_for_each_mc_addr(ha, dev) {
2267 			if (i == MCAM_SIZE)
2268 				break;
2269 			rhine_set_cam(ioaddr, i, ha->addr);
2270 			mCAMmask |= 1 << i;
2271 			i++;
2272 		}
2273 		rhine_set_cam_mask(ioaddr, mCAMmask);
2274 	} else {
2275 		memset(mc_filter, 0, sizeof(mc_filter));
2276 		netdev_for_each_mc_addr(ha, dev) {
2277 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2278 
2279 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2280 		}
2281 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2282 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2283 	}
2284 	/* enable/disable VLAN receive filtering */
2285 	if (rp->quirks & rqMgmt) {
2286 		if (dev->flags & IFF_PROMISC)
2287 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2288 		else
2289 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2290 	}
2291 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2292 }
2293 
2294 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2295 {
2296 	struct device *hwdev = dev->dev.parent;
2297 
2298 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2299 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2300 	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2301 }
2302 
2303 static int netdev_get_link_ksettings(struct net_device *dev,
2304 				     struct ethtool_link_ksettings *cmd)
2305 {
2306 	struct rhine_private *rp = netdev_priv(dev);
2307 
2308 	mutex_lock(&rp->task_lock);
2309 	mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2310 	mutex_unlock(&rp->task_lock);
2311 
2312 	return 0;
2313 }
2314 
2315 static int netdev_set_link_ksettings(struct net_device *dev,
2316 				     const struct ethtool_link_ksettings *cmd)
2317 {
2318 	struct rhine_private *rp = netdev_priv(dev);
2319 	int rc;
2320 
2321 	mutex_lock(&rp->task_lock);
2322 	rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2323 	rhine_set_carrier(&rp->mii_if);
2324 	mutex_unlock(&rp->task_lock);
2325 
2326 	return rc;
2327 }
2328 
2329 static int netdev_nway_reset(struct net_device *dev)
2330 {
2331 	struct rhine_private *rp = netdev_priv(dev);
2332 
2333 	return mii_nway_restart(&rp->mii_if);
2334 }
2335 
2336 static u32 netdev_get_link(struct net_device *dev)
2337 {
2338 	struct rhine_private *rp = netdev_priv(dev);
2339 
2340 	return mii_link_ok(&rp->mii_if);
2341 }
2342 
2343 static u32 netdev_get_msglevel(struct net_device *dev)
2344 {
2345 	struct rhine_private *rp = netdev_priv(dev);
2346 
2347 	return rp->msg_enable;
2348 }
2349 
2350 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2351 {
2352 	struct rhine_private *rp = netdev_priv(dev);
2353 
2354 	rp->msg_enable = value;
2355 }
2356 
2357 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2358 {
2359 	struct rhine_private *rp = netdev_priv(dev);
2360 
2361 	if (!(rp->quirks & rqWOL))
2362 		return;
2363 
2364 	spin_lock_irq(&rp->lock);
2365 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2366 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2367 	wol->wolopts = rp->wolopts;
2368 	spin_unlock_irq(&rp->lock);
2369 }
2370 
2371 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2372 {
2373 	struct rhine_private *rp = netdev_priv(dev);
2374 	u32 support = WAKE_PHY | WAKE_MAGIC |
2375 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2376 
2377 	if (!(rp->quirks & rqWOL))
2378 		return -EINVAL;
2379 
2380 	if (wol->wolopts & ~support)
2381 		return -EINVAL;
2382 
2383 	spin_lock_irq(&rp->lock);
2384 	rp->wolopts = wol->wolopts;
2385 	spin_unlock_irq(&rp->lock);
2386 
2387 	return 0;
2388 }
2389 
2390 static const struct ethtool_ops netdev_ethtool_ops = {
2391 	.get_drvinfo		= netdev_get_drvinfo,
2392 	.nway_reset		= netdev_nway_reset,
2393 	.get_link		= netdev_get_link,
2394 	.get_msglevel		= netdev_get_msglevel,
2395 	.set_msglevel		= netdev_set_msglevel,
2396 	.get_wol		= rhine_get_wol,
2397 	.set_wol		= rhine_set_wol,
2398 	.get_link_ksettings	= netdev_get_link_ksettings,
2399 	.set_link_ksettings	= netdev_set_link_ksettings,
2400 };
2401 
2402 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2403 {
2404 	struct rhine_private *rp = netdev_priv(dev);
2405 	int rc;
2406 
2407 	if (!netif_running(dev))
2408 		return -EINVAL;
2409 
2410 	mutex_lock(&rp->task_lock);
2411 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2412 	rhine_set_carrier(&rp->mii_if);
2413 	mutex_unlock(&rp->task_lock);
2414 
2415 	return rc;
2416 }
2417 
2418 static int rhine_close(struct net_device *dev)
2419 {
2420 	struct rhine_private *rp = netdev_priv(dev);
2421 	void __iomem *ioaddr = rp->base;
2422 
2423 	rhine_task_disable(rp);
2424 	napi_disable(&rp->napi);
2425 	netif_stop_queue(dev);
2426 
2427 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2428 		  ioread16(ioaddr + ChipCmd));
2429 
2430 	/* Switch to loopback mode to avoid hardware races. */
2431 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2432 
2433 	rhine_irq_disable(rp);
2434 
2435 	/* Stop the chip's Tx and Rx processes. */
2436 	iowrite16(CmdStop, ioaddr + ChipCmd);
2437 
2438 	free_irq(rp->irq, dev);
2439 	free_rbufs(dev);
2440 	free_tbufs(dev);
2441 	free_ring(dev);
2442 
2443 	return 0;
2444 }
2445 
2446 
2447 static void rhine_remove_one_pci(struct pci_dev *pdev)
2448 {
2449 	struct net_device *dev = pci_get_drvdata(pdev);
2450 	struct rhine_private *rp = netdev_priv(dev);
2451 
2452 	unregister_netdev(dev);
2453 
2454 	pci_iounmap(pdev, rp->base);
2455 	pci_release_regions(pdev);
2456 
2457 	free_netdev(dev);
2458 	pci_disable_device(pdev);
2459 }
2460 
2461 static int rhine_remove_one_platform(struct platform_device *pdev)
2462 {
2463 	struct net_device *dev = platform_get_drvdata(pdev);
2464 	struct rhine_private *rp = netdev_priv(dev);
2465 
2466 	unregister_netdev(dev);
2467 
2468 	iounmap(rp->base);
2469 
2470 	free_netdev(dev);
2471 
2472 	return 0;
2473 }
2474 
2475 static void rhine_shutdown_pci(struct pci_dev *pdev)
2476 {
2477 	struct net_device *dev = pci_get_drvdata(pdev);
2478 	struct rhine_private *rp = netdev_priv(dev);
2479 	void __iomem *ioaddr = rp->base;
2480 
2481 	if (!(rp->quirks & rqWOL))
2482 		return; /* Nothing to do for non-WOL adapters */
2483 
2484 	rhine_power_init(dev);
2485 
2486 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2487 	if (rp->quirks & rq6patterns)
2488 		iowrite8(0x04, ioaddr + WOLcgClr);
2489 
2490 	spin_lock(&rp->lock);
2491 
2492 	if (rp->wolopts & WAKE_MAGIC) {
2493 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2494 		/*
2495 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2496 		 * not cooperate otherwise.
2497 		 */
2498 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2499 	}
2500 
2501 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2502 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2503 
2504 	if (rp->wolopts & WAKE_PHY)
2505 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2506 
2507 	if (rp->wolopts & WAKE_UCAST)
2508 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2509 
2510 	if (rp->wolopts) {
2511 		/* Enable legacy WOL (for old motherboards) */
2512 		iowrite8(0x01, ioaddr + PwcfgSet);
2513 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2514 	}
2515 
2516 	spin_unlock(&rp->lock);
2517 
2518 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2519 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2520 
2521 		pci_wake_from_d3(pdev, true);
2522 		pci_set_power_state(pdev, PCI_D3hot);
2523 	}
2524 }
2525 
2526 #ifdef CONFIG_PM_SLEEP
2527 static int rhine_suspend(struct device *device)
2528 {
2529 	struct net_device *dev = dev_get_drvdata(device);
2530 	struct rhine_private *rp = netdev_priv(dev);
2531 
2532 	if (!netif_running(dev))
2533 		return 0;
2534 
2535 	rhine_task_disable(rp);
2536 	rhine_irq_disable(rp);
2537 	napi_disable(&rp->napi);
2538 
2539 	netif_device_detach(dev);
2540 
2541 	if (dev_is_pci(device))
2542 		rhine_shutdown_pci(to_pci_dev(device));
2543 
2544 	return 0;
2545 }
2546 
2547 static int rhine_resume(struct device *device)
2548 {
2549 	struct net_device *dev = dev_get_drvdata(device);
2550 	struct rhine_private *rp = netdev_priv(dev);
2551 
2552 	if (!netif_running(dev))
2553 		return 0;
2554 
2555 	enable_mmio(rp->pioaddr, rp->quirks);
2556 	rhine_power_init(dev);
2557 	free_tbufs(dev);
2558 	alloc_tbufs(dev);
2559 	rhine_reset_rbufs(rp);
2560 	rhine_task_enable(rp);
2561 	spin_lock_bh(&rp->lock);
2562 	init_registers(dev);
2563 	spin_unlock_bh(&rp->lock);
2564 
2565 	netif_device_attach(dev);
2566 
2567 	return 0;
2568 }
2569 
2570 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2571 #define RHINE_PM_OPS	(&rhine_pm_ops)
2572 
2573 #else
2574 
2575 #define RHINE_PM_OPS	NULL
2576 
2577 #endif /* !CONFIG_PM_SLEEP */
2578 
2579 static struct pci_driver rhine_driver_pci = {
2580 	.name		= DRV_NAME,
2581 	.id_table	= rhine_pci_tbl,
2582 	.probe		= rhine_init_one_pci,
2583 	.remove		= rhine_remove_one_pci,
2584 	.shutdown	= rhine_shutdown_pci,
2585 	.driver.pm	= RHINE_PM_OPS,
2586 };
2587 
2588 static struct platform_driver rhine_driver_platform = {
2589 	.probe		= rhine_init_one_platform,
2590 	.remove		= rhine_remove_one_platform,
2591 	.driver = {
2592 		.name	= DRV_NAME,
2593 		.of_match_table	= rhine_of_tbl,
2594 		.pm		= RHINE_PM_OPS,
2595 	}
2596 };
2597 
2598 static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2599 	{
2600 		.ident = "EPIA-M",
2601 		.matches = {
2602 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2603 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2604 		},
2605 	},
2606 	{
2607 		.ident = "KV7",
2608 		.matches = {
2609 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2610 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2611 		},
2612 	},
2613 	{ NULL }
2614 };
2615 
2616 static int __init rhine_init(void)
2617 {
2618 	int ret_pci, ret_platform;
2619 
2620 /* when a module, this is printed whether or not devices are found in probe */
2621 #ifdef MODULE
2622 	pr_info("%s\n", version);
2623 #endif
2624 	if (dmi_check_system(rhine_dmi_table)) {
2625 		/* these BIOSes fail at PXE boot if chip is in D3 */
2626 		avoid_D3 = true;
2627 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2628 	}
2629 	else if (avoid_D3)
2630 		pr_info("avoid_D3 set\n");
2631 
2632 	ret_pci = pci_register_driver(&rhine_driver_pci);
2633 	ret_platform = platform_driver_register(&rhine_driver_platform);
2634 	if ((ret_pci < 0) && (ret_platform < 0))
2635 		return ret_pci;
2636 
2637 	return 0;
2638 }
2639 
2640 
2641 static void __exit rhine_cleanup(void)
2642 {
2643 	platform_driver_unregister(&rhine_driver_platform);
2644 	pci_unregister_driver(&rhine_driver_pci);
2645 }
2646 
2647 
2648 module_init(rhine_init);
2649 module_exit(rhine_cleanup);
2650