1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2 /*
3 	Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4 
5 	Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 	Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 	Copyright 2001 Manfred Spraul				    [natsemi.c]
8 	Copyright 1999-2001 by Donald Becker.			    [natsemi.c]
9        	Written 1997-2001 by Donald Becker.			    [8139too.c]
10 	Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11 
12 	This software may be used and distributed according to the terms of
13 	the GNU General Public License (GPL), incorporated herein by reference.
14 	Drivers based on or derived from this code fall under the GPL and must
15 	retain the authorship, copyright and license notice.  This file is not
16 	a complete program and may only be used when the entire operating
17 	system is licensed under the GPL.
18 
19 	See the file COPYING in this distribution for more information.
20 
21 	Contributors:
22 
23 		Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 		PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
25 		LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
26 
27 	TODO:
28 	* Test Tx checksumming thoroughly
29 
30 	Low priority TODO:
31 	* Complete reset on PciErr
32 	* Consider Rx interrupt mitigation using TimerIntr
33 	* Investigate using skb->priority with h/w VLAN priority
34 	* Investigate using High Priority Tx Queue with skb->priority
35 	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 	* Implement Tx software interrupt mitigation via
38 	  Tx descriptor bit
39 	* The real minimum of CP_MIN_MTU is 4 bytes.  However,
40 	  for this to be supported, one must(?) turn on packet padding.
41 	* Support external MII transceivers (patch available)
42 
43 	NOTES:
44 	* TX checksumming is considered experimental.  It is off by
45 	  default, use ethtool to turn it on.
46 
47  */
48 
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 
51 #define DRV_NAME		"8139cp"
52 #define DRV_VERSION		"1.3"
53 #define DRV_RELDATE		"Mar 22, 2004"
54 
55 
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/init.h>
63 #include <linux/interrupt.h>
64 #include <linux/pci.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/delay.h>
67 #include <linux/ethtool.h>
68 #include <linux/gfp.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
71 #include <linux/crc32.h>
72 #include <linux/in.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
76 #include <linux/cache.h>
77 #include <asm/io.h>
78 #include <asm/irq.h>
79 #include <asm/uaccess.h>
80 
81 /* These identify the driver base version and may not be removed. */
82 static char version[] =
83 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
84 
85 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87 MODULE_VERSION(DRV_VERSION);
88 MODULE_LICENSE("GPL");
89 
90 static int debug = -1;
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
93 
94 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
95    The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
96 static int multicast_filter_limit = 32;
97 module_param(multicast_filter_limit, int, 0);
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
99 
100 #define CP_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
101 				 NETIF_MSG_PROBE 	| \
102 				 NETIF_MSG_LINK)
103 #define CP_NUM_STATS		14	/* struct cp_dma_stats, plus one */
104 #define CP_STATS_SIZE		64	/* size in bytes of DMA stats block */
105 #define CP_REGS_SIZE		(0xff + 1)
106 #define CP_REGS_VER		1		/* version 1 */
107 #define CP_RX_RING_SIZE		64
108 #define CP_TX_RING_SIZE		64
109 #define CP_RING_BYTES		\
110 		((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +	\
111 		 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +	\
112 		 CP_STATS_SIZE)
113 #define NEXT_TX(N)		(((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N)		(((N) + 1) & (CP_RX_RING_SIZE - 1))
115 #define TX_BUFFS_AVAIL(CP)					\
116 	(((CP)->tx_tail <= (CP)->tx_head) ?			\
117 	  (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :	\
118 	  (CP)->tx_tail - (CP)->tx_head - 1)
119 
120 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
121 #define CP_INTERNAL_PHY		32
122 
123 /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
124 #define RX_FIFO_THRESH		5	/* Rx buffer level before first PCI xfer.  */
125 #define RX_DMA_BURST		4	/* Maximum PCI burst, '4' is 256 */
126 #define TX_DMA_BURST		6	/* Maximum PCI burst, '6' is 1024 */
127 #define TX_EARLY_THRESH		256	/* Early Tx threshold, in bytes */
128 
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT		(6*HZ)
131 
132 /* hardware minimum and maximum for a single frame's data payload */
133 #define CP_MIN_MTU		60	/* TODO: allow lower, but pad */
134 #define CP_MAX_MTU		4096
135 
136 enum {
137 	/* NIC register offsets */
138 	MAC0		= 0x00,	/* Ethernet hardware address. */
139 	MAR0		= 0x08,	/* Multicast filter. */
140 	StatsAddr	= 0x10,	/* 64-bit start addr of 64-byte DMA stats blk */
141 	TxRingAddr	= 0x20, /* 64-bit start addr of Tx ring */
142 	HiTxRingAddr	= 0x28, /* 64-bit start addr of high priority Tx ring */
143 	Cmd		= 0x37, /* Command register */
144 	IntrMask	= 0x3C, /* Interrupt mask */
145 	IntrStatus	= 0x3E, /* Interrupt status */
146 	TxConfig	= 0x40, /* Tx configuration */
147 	ChipVersion	= 0x43, /* 8-bit chip version, inside TxConfig */
148 	RxConfig	= 0x44, /* Rx configuration */
149 	RxMissed	= 0x4C,	/* 24 bits valid, write clears */
150 	Cfg9346		= 0x50, /* EEPROM select/control; Cfg reg [un]lock */
151 	Config1		= 0x52, /* Config1 */
152 	Config3		= 0x59, /* Config3 */
153 	Config4		= 0x5A, /* Config4 */
154 	MultiIntr	= 0x5C, /* Multiple interrupt select */
155 	BasicModeCtrl	= 0x62,	/* MII BMCR */
156 	BasicModeStatus	= 0x64, /* MII BMSR */
157 	NWayAdvert	= 0x66, /* MII ADVERTISE */
158 	NWayLPAR	= 0x68, /* MII LPA */
159 	NWayExpansion	= 0x6A, /* MII Expansion */
160 	Config5		= 0xD8,	/* Config5 */
161 	TxPoll		= 0xD9,	/* Tell chip to check Tx descriptors for work */
162 	RxMaxSize	= 0xDA, /* Max size of an Rx packet (8169 only) */
163 	CpCmd		= 0xE0, /* C+ Command register (C+ mode only) */
164 	IntrMitigate	= 0xE2,	/* rx/tx interrupt mitigation control */
165 	RxRingAddr	= 0xE4, /* 64-bit start addr of Rx ring */
166 	TxThresh	= 0xEC, /* Early Tx threshold */
167 	OldRxBufAddr	= 0x30, /* DMA address of Rx ring buffer (C mode) */
168 	OldTSD0		= 0x10, /* DMA address of first Tx desc (C mode) */
169 
170 	/* Tx and Rx status descriptors */
171 	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
172 	RingEnd		= (1 << 30), /* End of descriptor ring */
173 	FirstFrag	= (1 << 29), /* First segment of a packet */
174 	LastFrag	= (1 << 28), /* Final segment of a packet */
175 	LargeSend	= (1 << 27), /* TCP Large Send Offload (TSO) */
176 	MSSShift	= 16,	     /* MSS value position */
177 	MSSMask		= 0xfff,     /* MSS value: 11 bits */
178 	TxError		= (1 << 23), /* Tx error summary */
179 	RxError		= (1 << 20), /* Rx error summary */
180 	IPCS		= (1 << 18), /* Calculate IP checksum */
181 	UDPCS		= (1 << 17), /* Calculate UDP/IP checksum */
182 	TCPCS		= (1 << 16), /* Calculate TCP/IP checksum */
183 	TxVlanTag	= (1 << 17), /* Add VLAN tag */
184 	RxVlanTagged	= (1 << 16), /* Rx VLAN tag available */
185 	IPFail		= (1 << 15), /* IP checksum failed */
186 	UDPFail		= (1 << 14), /* UDP/IP checksum failed */
187 	TCPFail		= (1 << 13), /* TCP/IP checksum failed */
188 	NormalTxPoll	= (1 << 6),  /* One or more normal Tx packets to send */
189 	PID1		= (1 << 17), /* 2 protocol id bits:  0==non-IP, */
190 	PID0		= (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
191 	RxProtoTCP	= 1,
192 	RxProtoUDP	= 2,
193 	RxProtoIP	= 3,
194 	TxFIFOUnder	= (1 << 25), /* Tx FIFO underrun */
195 	TxOWC		= (1 << 22), /* Tx Out-of-window collision */
196 	TxLinkFail	= (1 << 21), /* Link failed during Tx of packet */
197 	TxMaxCol	= (1 << 20), /* Tx aborted due to excessive collisions */
198 	TxColCntShift	= 16,	     /* Shift, to get 4-bit Tx collision cnt */
199 	TxColCntMask	= 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
200 	RxErrFrame	= (1 << 27), /* Rx frame alignment error */
201 	RxMcast		= (1 << 26), /* Rx multicast packet rcv'd */
202 	RxErrCRC	= (1 << 18), /* Rx CRC error */
203 	RxErrRunt	= (1 << 19), /* Rx error, packet < 64 bytes */
204 	RxErrLong	= (1 << 21), /* Rx error, packet > 4096 bytes */
205 	RxErrFIFO	= (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
206 
207 	/* StatsAddr register */
208 	DumpStats	= (1 << 3),  /* Begin stats dump */
209 
210 	/* RxConfig register */
211 	RxCfgFIFOShift	= 13,	     /* Shift, to get Rx FIFO thresh value */
212 	RxCfgDMAShift	= 8,	     /* Shift, to get Rx Max DMA value */
213 	AcceptErr	= 0x20,	     /* Accept packets with CRC errors */
214 	AcceptRunt	= 0x10,	     /* Accept runt (<64 bytes) packets */
215 	AcceptBroadcast	= 0x08,	     /* Accept broadcast packets */
216 	AcceptMulticast	= 0x04,	     /* Accept multicast packets */
217 	AcceptMyPhys	= 0x02,	     /* Accept pkts with our MAC as dest */
218 	AcceptAllPhys	= 0x01,	     /* Accept all pkts w/ physical dest */
219 
220 	/* IntrMask / IntrStatus registers */
221 	PciErr		= (1 << 15), /* System error on the PCI bus */
222 	TimerIntr	= (1 << 14), /* Asserted when TCTR reaches TimerInt value */
223 	LenChg		= (1 << 13), /* Cable length change */
224 	SWInt		= (1 << 8),  /* Software-requested interrupt */
225 	TxEmpty		= (1 << 7),  /* No Tx descriptors available */
226 	RxFIFOOvr	= (1 << 6),  /* Rx FIFO Overflow */
227 	LinkChg		= (1 << 5),  /* Packet underrun, or link change */
228 	RxEmpty		= (1 << 4),  /* No Rx descriptors available */
229 	TxErr		= (1 << 3),  /* Tx error */
230 	TxOK		= (1 << 2),  /* Tx packet sent */
231 	RxErr		= (1 << 1),  /* Rx error */
232 	RxOK		= (1 << 0),  /* Rx packet received */
233 	IntrResvd	= (1 << 10), /* reserved, according to RealTek engineers,
234 					but hardware likes to raise it */
235 
236 	IntrAll		= PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
237 			  RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
238 			  RxErr | RxOK | IntrResvd,
239 
240 	/* C mode command register */
241 	CmdReset	= (1 << 4),  /* Enable to reset; self-clearing */
242 	RxOn		= (1 << 3),  /* Rx mode enable */
243 	TxOn		= (1 << 2),  /* Tx mode enable */
244 
245 	/* C+ mode command register */
246 	RxVlanOn	= (1 << 6),  /* Rx VLAN de-tagging enable */
247 	RxChkSum	= (1 << 5),  /* Rx checksum offload enable */
248 	PCIDAC		= (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
249 	PCIMulRW	= (1 << 3),  /* Enable PCI read/write multiple */
250 	CpRxOn		= (1 << 1),  /* Rx mode enable */
251 	CpTxOn		= (1 << 0),  /* Tx mode enable */
252 
253 	/* Cfg9436 EEPROM control register */
254 	Cfg9346_Lock	= 0x00,	     /* Lock ConfigX/MII register access */
255 	Cfg9346_Unlock	= 0xC0,	     /* Unlock ConfigX/MII register access */
256 
257 	/* TxConfig register */
258 	IFG		= (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
259 	TxDMAShift	= 8,	     /* DMA burst value (0-7) is shift this many bits */
260 
261 	/* Early Tx Threshold register */
262 	TxThreshMask	= 0x3f,	     /* Mask bits 5-0 */
263 	TxThreshMax	= 2048,	     /* Max early Tx threshold */
264 
265 	/* Config1 register */
266 	DriverLoaded	= (1 << 5),  /* Software marker, driver is loaded */
267 	LWACT           = (1 << 4),  /* LWAKE active mode */
268 	PMEnable	= (1 << 0),  /* Enable various PM features of chip */
269 
270 	/* Config3 register */
271 	PARMEnable	= (1 << 6),  /* Enable auto-loading of PHY parms */
272 	MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
273 	LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
274 
275 	/* Config4 register */
276 	LWPTN           = (1 << 1),  /* LWAKE Pattern */
277 	LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
278 
279 	/* Config5 register */
280 	BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
281 	MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
282 	UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
283 	LANWake         = (1 << 1),  /* Enable LANWake signal */
284 	PMEStatus	= (1 << 0),  /* PME status can be reset by PCI RST# */
285 
286 	cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
287 	cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
288 	cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
289 };
290 
291 static const unsigned int cp_rx_config =
292 	  (RX_FIFO_THRESH << RxCfgFIFOShift) |
293 	  (RX_DMA_BURST << RxCfgDMAShift);
294 
295 struct cp_desc {
296 	__le32		opts1;
297 	__le32		opts2;
298 	__le64		addr;
299 };
300 
301 struct cp_dma_stats {
302 	__le64			tx_ok;
303 	__le64			rx_ok;
304 	__le64			tx_err;
305 	__le32			rx_err;
306 	__le16			rx_fifo;
307 	__le16			frame_align;
308 	__le32			tx_ok_1col;
309 	__le32			tx_ok_mcol;
310 	__le64			rx_ok_phys;
311 	__le64			rx_ok_bcast;
312 	__le32			rx_ok_mcast;
313 	__le16			tx_abort;
314 	__le16			tx_underrun;
315 } __packed;
316 
317 struct cp_extra_stats {
318 	unsigned long		rx_frags;
319 };
320 
321 struct cp_private {
322 	void			__iomem *regs;
323 	struct net_device	*dev;
324 	spinlock_t		lock;
325 	u32			msg_enable;
326 
327 	struct napi_struct	napi;
328 
329 	struct pci_dev		*pdev;
330 	u32			rx_config;
331 	u16			cpcmd;
332 
333 	struct cp_extra_stats	cp_stats;
334 
335 	unsigned		rx_head		____cacheline_aligned;
336 	unsigned		rx_tail;
337 	struct cp_desc		*rx_ring;
338 	struct sk_buff		*rx_skb[CP_RX_RING_SIZE];
339 
340 	unsigned		tx_head		____cacheline_aligned;
341 	unsigned		tx_tail;
342 	struct cp_desc		*tx_ring;
343 	struct sk_buff		*tx_skb[CP_TX_RING_SIZE];
344 
345 	unsigned		rx_buf_sz;
346 	unsigned		wol_enabled : 1; /* Is Wake-on-LAN enabled? */
347 
348 	dma_addr_t		ring_dma;
349 
350 	struct mii_if_info	mii_if;
351 };
352 
353 #define cpr8(reg)	readb(cp->regs + (reg))
354 #define cpr16(reg)	readw(cp->regs + (reg))
355 #define cpr32(reg)	readl(cp->regs + (reg))
356 #define cpw8(reg,val)	writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val)	writew((val), cp->regs + (reg))
358 #define cpw32(reg,val)	writel((val), cp->regs + (reg))
359 #define cpw8_f(reg,val) do {			\
360 	writeb((val), cp->regs + (reg));	\
361 	readb(cp->regs + (reg));		\
362 	} while (0)
363 #define cpw16_f(reg,val) do {			\
364 	writew((val), cp->regs + (reg));	\
365 	readw(cp->regs + (reg));		\
366 	} while (0)
367 #define cpw32_f(reg,val) do {			\
368 	writel((val), cp->regs + (reg));	\
369 	readl(cp->regs + (reg));		\
370 	} while (0)
371 
372 
373 static void __cp_set_rx_mode (struct net_device *dev);
374 static void cp_tx (struct cp_private *cp);
375 static void cp_clean_rings (struct cp_private *cp);
376 #ifdef CONFIG_NET_POLL_CONTROLLER
377 static void cp_poll_controller(struct net_device *dev);
378 #endif
379 static int cp_get_eeprom_len(struct net_device *dev);
380 static int cp_get_eeprom(struct net_device *dev,
381 			 struct ethtool_eeprom *eeprom, u8 *data);
382 static int cp_set_eeprom(struct net_device *dev,
383 			 struct ethtool_eeprom *eeprom, u8 *data);
384 
385 static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
386 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	PCI_DEVICE_ID_REALTEK_8139), },
387 	{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH,	PCI_DEVICE_ID_TTTECH_MC322), },
388 	{ },
389 };
390 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
391 
392 static struct {
393 	const char str[ETH_GSTRING_LEN];
394 } ethtool_stats_keys[] = {
395 	{ "tx_ok" },
396 	{ "rx_ok" },
397 	{ "tx_err" },
398 	{ "rx_err" },
399 	{ "rx_fifo" },
400 	{ "frame_align" },
401 	{ "tx_ok_1col" },
402 	{ "tx_ok_mcol" },
403 	{ "rx_ok_phys" },
404 	{ "rx_ok_bcast" },
405 	{ "rx_ok_mcast" },
406 	{ "tx_abort" },
407 	{ "tx_underrun" },
408 	{ "rx_frags" },
409 };
410 
411 
412 static inline void cp_set_rxbufsize (struct cp_private *cp)
413 {
414 	unsigned int mtu = cp->dev->mtu;
415 
416 	if (mtu > ETH_DATA_LEN)
417 		/* MTU + ethernet header + FCS + optional VLAN tag */
418 		cp->rx_buf_sz = mtu + ETH_HLEN + 8;
419 	else
420 		cp->rx_buf_sz = PKT_BUF_SZ;
421 }
422 
423 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
424 			      struct cp_desc *desc)
425 {
426 	u32 opts2 = le32_to_cpu(desc->opts2);
427 
428 	skb->protocol = eth_type_trans (skb, cp->dev);
429 
430 	cp->dev->stats.rx_packets++;
431 	cp->dev->stats.rx_bytes += skb->len;
432 
433 	if (opts2 & RxVlanTagged)
434 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
435 
436 	napi_gro_receive(&cp->napi, skb);
437 }
438 
439 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
440 			    u32 status, u32 len)
441 {
442 	netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
443 		  rx_tail, status, len);
444 	cp->dev->stats.rx_errors++;
445 	if (status & RxErrFrame)
446 		cp->dev->stats.rx_frame_errors++;
447 	if (status & RxErrCRC)
448 		cp->dev->stats.rx_crc_errors++;
449 	if ((status & RxErrRunt) || (status & RxErrLong))
450 		cp->dev->stats.rx_length_errors++;
451 	if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
452 		cp->dev->stats.rx_length_errors++;
453 	if (status & RxErrFIFO)
454 		cp->dev->stats.rx_fifo_errors++;
455 }
456 
457 static inline unsigned int cp_rx_csum_ok (u32 status)
458 {
459 	unsigned int protocol = (status >> 16) & 0x3;
460 
461 	if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
462 	    ((protocol == RxProtoUDP) && !(status & UDPFail)))
463 		return 1;
464 	else
465 		return 0;
466 }
467 
468 static int cp_rx_poll(struct napi_struct *napi, int budget)
469 {
470 	struct cp_private *cp = container_of(napi, struct cp_private, napi);
471 	struct net_device *dev = cp->dev;
472 	unsigned int rx_tail = cp->rx_tail;
473 	int rx;
474 
475 rx_status_loop:
476 	rx = 0;
477 	cpw16(IntrStatus, cp_rx_intr_mask);
478 
479 	while (1) {
480 		u32 status, len;
481 		dma_addr_t mapping;
482 		struct sk_buff *skb, *new_skb;
483 		struct cp_desc *desc;
484 		const unsigned buflen = cp->rx_buf_sz;
485 
486 		skb = cp->rx_skb[rx_tail];
487 		BUG_ON(!skb);
488 
489 		desc = &cp->rx_ring[rx_tail];
490 		status = le32_to_cpu(desc->opts1);
491 		if (status & DescOwn)
492 			break;
493 
494 		len = (status & 0x1fff) - 4;
495 		mapping = le64_to_cpu(desc->addr);
496 
497 		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
498 			/* we don't support incoming fragmented frames.
499 			 * instead, we attempt to ensure that the
500 			 * pre-allocated RX skbs are properly sized such
501 			 * that RX fragments are never encountered
502 			 */
503 			cp_rx_err_acct(cp, rx_tail, status, len);
504 			dev->stats.rx_dropped++;
505 			cp->cp_stats.rx_frags++;
506 			goto rx_next;
507 		}
508 
509 		if (status & (RxError | RxErrFIFO)) {
510 			cp_rx_err_acct(cp, rx_tail, status, len);
511 			goto rx_next;
512 		}
513 
514 		netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
515 			  rx_tail, status, len);
516 
517 		new_skb = netdev_alloc_skb_ip_align(dev, buflen);
518 		if (!new_skb) {
519 			dev->stats.rx_dropped++;
520 			goto rx_next;
521 		}
522 
523 		dma_unmap_single(&cp->pdev->dev, mapping,
524 				 buflen, PCI_DMA_FROMDEVICE);
525 
526 		/* Handle checksum offloading for incoming packets. */
527 		if (cp_rx_csum_ok(status))
528 			skb->ip_summed = CHECKSUM_UNNECESSARY;
529 		else
530 			skb_checksum_none_assert(skb);
531 
532 		skb_put(skb, len);
533 
534 		mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
535 					 PCI_DMA_FROMDEVICE);
536 		cp->rx_skb[rx_tail] = new_skb;
537 
538 		cp_rx_skb(cp, skb, desc);
539 		rx++;
540 
541 rx_next:
542 		cp->rx_ring[rx_tail].opts2 = 0;
543 		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 		if (rx_tail == (CP_RX_RING_SIZE - 1))
545 			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
546 						  cp->rx_buf_sz);
547 		else
548 			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 		rx_tail = NEXT_RX(rx_tail);
550 
551 		if (rx >= budget)
552 			break;
553 	}
554 
555 	cp->rx_tail = rx_tail;
556 
557 	/* if we did not reach work limit, then we're done with
558 	 * this round of polling
559 	 */
560 	if (rx < budget) {
561 		unsigned long flags;
562 
563 		if (cpr16(IntrStatus) & cp_rx_intr_mask)
564 			goto rx_status_loop;
565 
566 		napi_gro_flush(napi, false);
567 		spin_lock_irqsave(&cp->lock, flags);
568 		__napi_complete(napi);
569 		cpw16_f(IntrMask, cp_intr_mask);
570 		spin_unlock_irqrestore(&cp->lock, flags);
571 	}
572 
573 	return rx;
574 }
575 
576 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
577 {
578 	struct net_device *dev = dev_instance;
579 	struct cp_private *cp;
580 	int handled = 0;
581 	u16 status;
582 
583 	if (unlikely(dev == NULL))
584 		return IRQ_NONE;
585 	cp = netdev_priv(dev);
586 
587 	spin_lock(&cp->lock);
588 
589 	status = cpr16(IntrStatus);
590 	if (!status || (status == 0xFFFF))
591 		goto out_unlock;
592 
593 	handled = 1;
594 
595 	netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
596 		  status, cpr8(Cmd), cpr16(CpCmd));
597 
598 	cpw16(IntrStatus, status & ~cp_rx_intr_mask);
599 
600 	/* close possible race's with dev_close */
601 	if (unlikely(!netif_running(dev))) {
602 		cpw16(IntrMask, 0);
603 		goto out_unlock;
604 	}
605 
606 	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
607 		if (napi_schedule_prep(&cp->napi)) {
608 			cpw16_f(IntrMask, cp_norx_intr_mask);
609 			__napi_schedule(&cp->napi);
610 		}
611 
612 	if (status & (TxOK | TxErr | TxEmpty | SWInt))
613 		cp_tx(cp);
614 	if (status & LinkChg)
615 		mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
616 
617 
618 	if (status & PciErr) {
619 		u16 pci_status;
620 
621 		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
622 		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
623 		netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
624 			   status, pci_status);
625 
626 		/* TODO: reset hardware */
627 	}
628 
629 out_unlock:
630 	spin_unlock(&cp->lock);
631 
632 	return IRQ_RETVAL(handled);
633 }
634 
635 #ifdef CONFIG_NET_POLL_CONTROLLER
636 /*
637  * Polling receive - used by netconsole and other diagnostic tools
638  * to allow network i/o with interrupts disabled.
639  */
640 static void cp_poll_controller(struct net_device *dev)
641 {
642 	struct cp_private *cp = netdev_priv(dev);
643 	const int irq = cp->pdev->irq;
644 
645 	disable_irq(irq);
646 	cp_interrupt(irq, dev);
647 	enable_irq(irq);
648 }
649 #endif
650 
651 static void cp_tx (struct cp_private *cp)
652 {
653 	unsigned tx_head = cp->tx_head;
654 	unsigned tx_tail = cp->tx_tail;
655 	unsigned bytes_compl = 0, pkts_compl = 0;
656 
657 	while (tx_tail != tx_head) {
658 		struct cp_desc *txd = cp->tx_ring + tx_tail;
659 		struct sk_buff *skb;
660 		u32 status;
661 
662 		rmb();
663 		status = le32_to_cpu(txd->opts1);
664 		if (status & DescOwn)
665 			break;
666 
667 		skb = cp->tx_skb[tx_tail];
668 		BUG_ON(!skb);
669 
670 		dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
671 				 le32_to_cpu(txd->opts1) & 0xffff,
672 				 PCI_DMA_TODEVICE);
673 
674 		bytes_compl += skb->len;
675 		pkts_compl++;
676 
677 		if (status & LastFrag) {
678 			if (status & (TxError | TxFIFOUnder)) {
679 				netif_dbg(cp, tx_err, cp->dev,
680 					  "tx err, status 0x%x\n", status);
681 				cp->dev->stats.tx_errors++;
682 				if (status & TxOWC)
683 					cp->dev->stats.tx_window_errors++;
684 				if (status & TxMaxCol)
685 					cp->dev->stats.tx_aborted_errors++;
686 				if (status & TxLinkFail)
687 					cp->dev->stats.tx_carrier_errors++;
688 				if (status & TxFIFOUnder)
689 					cp->dev->stats.tx_fifo_errors++;
690 			} else {
691 				cp->dev->stats.collisions +=
692 					((status >> TxColCntShift) & TxColCntMask);
693 				cp->dev->stats.tx_packets++;
694 				cp->dev->stats.tx_bytes += skb->len;
695 				netif_dbg(cp, tx_done, cp->dev,
696 					  "tx done, slot %d\n", tx_tail);
697 			}
698 			dev_kfree_skb_irq(skb);
699 		}
700 
701 		cp->tx_skb[tx_tail] = NULL;
702 
703 		tx_tail = NEXT_TX(tx_tail);
704 	}
705 
706 	cp->tx_tail = tx_tail;
707 
708 	netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
709 	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
710 		netif_wake_queue(cp->dev);
711 }
712 
713 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
714 {
715 	return vlan_tx_tag_present(skb) ?
716 		TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
717 }
718 
719 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
720 					struct net_device *dev)
721 {
722 	struct cp_private *cp = netdev_priv(dev);
723 	unsigned entry;
724 	u32 eor, flags;
725 	unsigned long intr_flags;
726 	__le32 opts2;
727 	int mss = 0;
728 
729 	spin_lock_irqsave(&cp->lock, intr_flags);
730 
731 	/* This is a hard error, log it. */
732 	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
733 		netif_stop_queue(dev);
734 		spin_unlock_irqrestore(&cp->lock, intr_flags);
735 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
736 		return NETDEV_TX_BUSY;
737 	}
738 
739 	entry = cp->tx_head;
740 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
741 	mss = skb_shinfo(skb)->gso_size;
742 
743 	opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
744 
745 	if (skb_shinfo(skb)->nr_frags == 0) {
746 		struct cp_desc *txd = &cp->tx_ring[entry];
747 		u32 len;
748 		dma_addr_t mapping;
749 
750 		len = skb->len;
751 		mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
752 		txd->opts2 = opts2;
753 		txd->addr = cpu_to_le64(mapping);
754 		wmb();
755 
756 		flags = eor | len | DescOwn | FirstFrag | LastFrag;
757 
758 		if (mss)
759 			flags |= LargeSend | ((mss & MSSMask) << MSSShift);
760 		else if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 			const struct iphdr *ip = ip_hdr(skb);
762 			if (ip->protocol == IPPROTO_TCP)
763 				flags |= IPCS | TCPCS;
764 			else if (ip->protocol == IPPROTO_UDP)
765 				flags |= IPCS | UDPCS;
766 			else
767 				WARN_ON(1);	/* we need a WARN() */
768 		}
769 
770 		txd->opts1 = cpu_to_le32(flags);
771 		wmb();
772 
773 		cp->tx_skb[entry] = skb;
774 		entry = NEXT_TX(entry);
775 	} else {
776 		struct cp_desc *txd;
777 		u32 first_len, first_eor;
778 		dma_addr_t first_mapping;
779 		int frag, first_entry = entry;
780 		const struct iphdr *ip = ip_hdr(skb);
781 
782 		/* We must give this initial chunk to the device last.
783 		 * Otherwise we could race with the device.
784 		 */
785 		first_eor = eor;
786 		first_len = skb_headlen(skb);
787 		first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
788 					       first_len, PCI_DMA_TODEVICE);
789 		cp->tx_skb[entry] = skb;
790 		entry = NEXT_TX(entry);
791 
792 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
793 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
794 			u32 len;
795 			u32 ctrl;
796 			dma_addr_t mapping;
797 
798 			len = skb_frag_size(this_frag);
799 			mapping = dma_map_single(&cp->pdev->dev,
800 						 skb_frag_address(this_frag),
801 						 len, PCI_DMA_TODEVICE);
802 			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
803 
804 			ctrl = eor | len | DescOwn;
805 
806 			if (mss)
807 				ctrl |= LargeSend |
808 					((mss & MSSMask) << MSSShift);
809 			else if (skb->ip_summed == CHECKSUM_PARTIAL) {
810 				if (ip->protocol == IPPROTO_TCP)
811 					ctrl |= IPCS | TCPCS;
812 				else if (ip->protocol == IPPROTO_UDP)
813 					ctrl |= IPCS | UDPCS;
814 				else
815 					BUG();
816 			}
817 
818 			if (frag == skb_shinfo(skb)->nr_frags - 1)
819 				ctrl |= LastFrag;
820 
821 			txd = &cp->tx_ring[entry];
822 			txd->opts2 = opts2;
823 			txd->addr = cpu_to_le64(mapping);
824 			wmb();
825 
826 			txd->opts1 = cpu_to_le32(ctrl);
827 			wmb();
828 
829 			cp->tx_skb[entry] = skb;
830 			entry = NEXT_TX(entry);
831 		}
832 
833 		txd = &cp->tx_ring[first_entry];
834 		txd->opts2 = opts2;
835 		txd->addr = cpu_to_le64(first_mapping);
836 		wmb();
837 
838 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
839 			if (ip->protocol == IPPROTO_TCP)
840 				txd->opts1 = cpu_to_le32(first_eor | first_len |
841 							 FirstFrag | DescOwn |
842 							 IPCS | TCPCS);
843 			else if (ip->protocol == IPPROTO_UDP)
844 				txd->opts1 = cpu_to_le32(first_eor | first_len |
845 							 FirstFrag | DescOwn |
846 							 IPCS | UDPCS);
847 			else
848 				BUG();
849 		} else
850 			txd->opts1 = cpu_to_le32(first_eor | first_len |
851 						 FirstFrag | DescOwn);
852 		wmb();
853 	}
854 	cp->tx_head = entry;
855 
856 	netdev_sent_queue(dev, skb->len);
857 	netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
858 		  entry, skb->len);
859 	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
860 		netif_stop_queue(dev);
861 
862 	spin_unlock_irqrestore(&cp->lock, intr_flags);
863 
864 	cpw8(TxPoll, NormalTxPoll);
865 
866 	return NETDEV_TX_OK;
867 }
868 
869 /* Set or clear the multicast filter for this adaptor.
870    This routine is not state sensitive and need not be SMP locked. */
871 
872 static void __cp_set_rx_mode (struct net_device *dev)
873 {
874 	struct cp_private *cp = netdev_priv(dev);
875 	u32 mc_filter[2];	/* Multicast hash filter */
876 	int rx_mode;
877 
878 	/* Note: do not reorder, GCC is clever about common statements. */
879 	if (dev->flags & IFF_PROMISC) {
880 		/* Unconditionally log net taps. */
881 		rx_mode =
882 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
883 		    AcceptAllPhys;
884 		mc_filter[1] = mc_filter[0] = 0xffffffff;
885 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
886 		   (dev->flags & IFF_ALLMULTI)) {
887 		/* Too many to filter perfectly -- accept all multicasts. */
888 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
889 		mc_filter[1] = mc_filter[0] = 0xffffffff;
890 	} else {
891 		struct netdev_hw_addr *ha;
892 		rx_mode = AcceptBroadcast | AcceptMyPhys;
893 		mc_filter[1] = mc_filter[0] = 0;
894 		netdev_for_each_mc_addr(ha, dev) {
895 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
896 
897 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
898 			rx_mode |= AcceptMulticast;
899 		}
900 	}
901 
902 	/* We can safely update without stopping the chip. */
903 	cp->rx_config = cp_rx_config | rx_mode;
904 	cpw32_f(RxConfig, cp->rx_config);
905 
906 	cpw32_f (MAR0 + 0, mc_filter[0]);
907 	cpw32_f (MAR0 + 4, mc_filter[1]);
908 }
909 
910 static void cp_set_rx_mode (struct net_device *dev)
911 {
912 	unsigned long flags;
913 	struct cp_private *cp = netdev_priv(dev);
914 
915 	spin_lock_irqsave (&cp->lock, flags);
916 	__cp_set_rx_mode(dev);
917 	spin_unlock_irqrestore (&cp->lock, flags);
918 }
919 
920 static void __cp_get_stats(struct cp_private *cp)
921 {
922 	/* only lower 24 bits valid; write any value to clear */
923 	cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
924 	cpw32 (RxMissed, 0);
925 }
926 
927 static struct net_device_stats *cp_get_stats(struct net_device *dev)
928 {
929 	struct cp_private *cp = netdev_priv(dev);
930 	unsigned long flags;
931 
932 	/* The chip only need report frame silently dropped. */
933 	spin_lock_irqsave(&cp->lock, flags);
934  	if (netif_running(dev) && netif_device_present(dev))
935  		__cp_get_stats(cp);
936 	spin_unlock_irqrestore(&cp->lock, flags);
937 
938 	return &dev->stats;
939 }
940 
941 static void cp_stop_hw (struct cp_private *cp)
942 {
943 	cpw16(IntrStatus, ~(cpr16(IntrStatus)));
944 	cpw16_f(IntrMask, 0);
945 	cpw8(Cmd, 0);
946 	cpw16_f(CpCmd, 0);
947 	cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
948 
949 	cp->rx_tail = 0;
950 	cp->tx_head = cp->tx_tail = 0;
951 
952 	netdev_reset_queue(cp->dev);
953 }
954 
955 static void cp_reset_hw (struct cp_private *cp)
956 {
957 	unsigned work = 1000;
958 
959 	cpw8(Cmd, CmdReset);
960 
961 	while (work--) {
962 		if (!(cpr8(Cmd) & CmdReset))
963 			return;
964 
965 		schedule_timeout_uninterruptible(10);
966 	}
967 
968 	netdev_err(cp->dev, "hardware reset timeout\n");
969 }
970 
971 static inline void cp_start_hw (struct cp_private *cp)
972 {
973 	dma_addr_t ring_dma;
974 
975 	cpw16(CpCmd, cp->cpcmd);
976 
977 	/*
978 	 * These (at least TxRingAddr) need to be configured after the
979 	 * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
980 	 * (C+ Command Register) recommends that these and more be configured
981 	 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
982 	 * it's been observed that the TxRingAddr is actually reset to garbage
983 	 * when C+ mode Tx is enabled in CpCmd.
984 	 */
985 	cpw32_f(HiTxRingAddr, 0);
986 	cpw32_f(HiTxRingAddr + 4, 0);
987 
988 	ring_dma = cp->ring_dma;
989 	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
990 	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
991 
992 	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
993 	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
994 	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
995 
996 	/*
997 	 * Strictly speaking, the datasheet says this should be enabled
998 	 * *before* setting the descriptor addresses. But what, then, would
999 	 * prevent it from doing DMA to random unconfigured addresses?
1000 	 * This variant appears to work fine.
1001 	 */
1002 	cpw8(Cmd, RxOn | TxOn);
1003 
1004 	netdev_reset_queue(cp->dev);
1005 }
1006 
1007 static void cp_enable_irq(struct cp_private *cp)
1008 {
1009 	cpw16_f(IntrMask, cp_intr_mask);
1010 }
1011 
1012 static void cp_init_hw (struct cp_private *cp)
1013 {
1014 	struct net_device *dev = cp->dev;
1015 
1016 	cp_reset_hw(cp);
1017 
1018 	cpw8_f (Cfg9346, Cfg9346_Unlock);
1019 
1020 	/* Restore our idea of the MAC address. */
1021 	cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1022 	cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1023 
1024 	cp_start_hw(cp);
1025 	cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1026 
1027 	__cp_set_rx_mode(dev);
1028 	cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1029 
1030 	cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1031 	/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1032 	cpw8(Config3, PARMEnable);
1033 	cp->wol_enabled = 0;
1034 
1035 	cpw8(Config5, cpr8(Config5) & PMEStatus);
1036 
1037 	cpw16(MultiIntr, 0);
1038 
1039 	cpw8_f(Cfg9346, Cfg9346_Lock);
1040 }
1041 
1042 static int cp_refill_rx(struct cp_private *cp)
1043 {
1044 	struct net_device *dev = cp->dev;
1045 	unsigned i;
1046 
1047 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1048 		struct sk_buff *skb;
1049 		dma_addr_t mapping;
1050 
1051 		skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1052 		if (!skb)
1053 			goto err_out;
1054 
1055 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
1056 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1057 		cp->rx_skb[i] = skb;
1058 
1059 		cp->rx_ring[i].opts2 = 0;
1060 		cp->rx_ring[i].addr = cpu_to_le64(mapping);
1061 		if (i == (CP_RX_RING_SIZE - 1))
1062 			cp->rx_ring[i].opts1 =
1063 				cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1064 		else
1065 			cp->rx_ring[i].opts1 =
1066 				cpu_to_le32(DescOwn | cp->rx_buf_sz);
1067 	}
1068 
1069 	return 0;
1070 
1071 err_out:
1072 	cp_clean_rings(cp);
1073 	return -ENOMEM;
1074 }
1075 
1076 static void cp_init_rings_index (struct cp_private *cp)
1077 {
1078 	cp->rx_tail = 0;
1079 	cp->tx_head = cp->tx_tail = 0;
1080 }
1081 
1082 static int cp_init_rings (struct cp_private *cp)
1083 {
1084 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1085 	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1086 
1087 	cp_init_rings_index(cp);
1088 
1089 	return cp_refill_rx (cp);
1090 }
1091 
1092 static int cp_alloc_rings (struct cp_private *cp)
1093 {
1094 	struct device *d = &cp->pdev->dev;
1095 	void *mem;
1096 	int rc;
1097 
1098 	mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1099 	if (!mem)
1100 		return -ENOMEM;
1101 
1102 	cp->rx_ring = mem;
1103 	cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1104 
1105 	rc = cp_init_rings(cp);
1106 	if (rc < 0)
1107 		dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1108 
1109 	return rc;
1110 }
1111 
1112 static void cp_clean_rings (struct cp_private *cp)
1113 {
1114 	struct cp_desc *desc;
1115 	unsigned i;
1116 
1117 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1118 		if (cp->rx_skb[i]) {
1119 			desc = cp->rx_ring + i;
1120 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1121 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1122 			dev_kfree_skb(cp->rx_skb[i]);
1123 		}
1124 	}
1125 
1126 	for (i = 0; i < CP_TX_RING_SIZE; i++) {
1127 		if (cp->tx_skb[i]) {
1128 			struct sk_buff *skb = cp->tx_skb[i];
1129 
1130 			desc = cp->tx_ring + i;
1131 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1132 					 le32_to_cpu(desc->opts1) & 0xffff,
1133 					 PCI_DMA_TODEVICE);
1134 			if (le32_to_cpu(desc->opts1) & LastFrag)
1135 				dev_kfree_skb(skb);
1136 			cp->dev->stats.tx_dropped++;
1137 		}
1138 	}
1139 	netdev_reset_queue(cp->dev);
1140 
1141 	memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1142 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1143 
1144 	memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1145 	memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1146 }
1147 
1148 static void cp_free_rings (struct cp_private *cp)
1149 {
1150 	cp_clean_rings(cp);
1151 	dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1152 			  cp->ring_dma);
1153 	cp->rx_ring = NULL;
1154 	cp->tx_ring = NULL;
1155 }
1156 
1157 static int cp_open (struct net_device *dev)
1158 {
1159 	struct cp_private *cp = netdev_priv(dev);
1160 	const int irq = cp->pdev->irq;
1161 	int rc;
1162 
1163 	netif_dbg(cp, ifup, dev, "enabling interface\n");
1164 
1165 	rc = cp_alloc_rings(cp);
1166 	if (rc)
1167 		return rc;
1168 
1169 	napi_enable(&cp->napi);
1170 
1171 	cp_init_hw(cp);
1172 
1173 	rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1174 	if (rc)
1175 		goto err_out_hw;
1176 
1177 	cp_enable_irq(cp);
1178 
1179 	netif_carrier_off(dev);
1180 	mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1181 	netif_start_queue(dev);
1182 
1183 	return 0;
1184 
1185 err_out_hw:
1186 	napi_disable(&cp->napi);
1187 	cp_stop_hw(cp);
1188 	cp_free_rings(cp);
1189 	return rc;
1190 }
1191 
1192 static int cp_close (struct net_device *dev)
1193 {
1194 	struct cp_private *cp = netdev_priv(dev);
1195 	unsigned long flags;
1196 
1197 	napi_disable(&cp->napi);
1198 
1199 	netif_dbg(cp, ifdown, dev, "disabling interface\n");
1200 
1201 	spin_lock_irqsave(&cp->lock, flags);
1202 
1203 	netif_stop_queue(dev);
1204 	netif_carrier_off(dev);
1205 
1206 	cp_stop_hw(cp);
1207 
1208 	spin_unlock_irqrestore(&cp->lock, flags);
1209 
1210 	free_irq(cp->pdev->irq, dev);
1211 
1212 	cp_free_rings(cp);
1213 	return 0;
1214 }
1215 
1216 static void cp_tx_timeout(struct net_device *dev)
1217 {
1218 	struct cp_private *cp = netdev_priv(dev);
1219 	unsigned long flags;
1220 	int rc;
1221 
1222 	netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1223 		    cpr8(Cmd), cpr16(CpCmd),
1224 		    cpr16(IntrStatus), cpr16(IntrMask));
1225 
1226 	spin_lock_irqsave(&cp->lock, flags);
1227 
1228 	cp_stop_hw(cp);
1229 	cp_clean_rings(cp);
1230 	rc = cp_init_rings(cp);
1231 	cp_start_hw(cp);
1232 	cp_enable_irq(cp);
1233 
1234 	netif_wake_queue(dev);
1235 
1236 	spin_unlock_irqrestore(&cp->lock, flags);
1237 }
1238 
1239 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1240 {
1241 	struct cp_private *cp = netdev_priv(dev);
1242 
1243 	/* check for invalid MTU, according to hardware limits */
1244 	if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1245 		return -EINVAL;
1246 
1247 	/* if network interface not up, no need for complexity */
1248 	if (!netif_running(dev)) {
1249 		dev->mtu = new_mtu;
1250 		cp_set_rxbufsize(cp);	/* set new rx buf size */
1251 		return 0;
1252 	}
1253 
1254 	/* network IS up, close it, reset MTU, and come up again. */
1255 	cp_close(dev);
1256 	dev->mtu = new_mtu;
1257 	cp_set_rxbufsize(cp);
1258 	return cp_open(dev);
1259 }
1260 
1261 static const char mii_2_8139_map[8] = {
1262 	BasicModeCtrl,
1263 	BasicModeStatus,
1264 	0,
1265 	0,
1266 	NWayAdvert,
1267 	NWayLPAR,
1268 	NWayExpansion,
1269 	0
1270 };
1271 
1272 static int mdio_read(struct net_device *dev, int phy_id, int location)
1273 {
1274 	struct cp_private *cp = netdev_priv(dev);
1275 
1276 	return location < 8 && mii_2_8139_map[location] ?
1277 	       readw(cp->regs + mii_2_8139_map[location]) : 0;
1278 }
1279 
1280 
1281 static void mdio_write(struct net_device *dev, int phy_id, int location,
1282 		       int value)
1283 {
1284 	struct cp_private *cp = netdev_priv(dev);
1285 
1286 	if (location == 0) {
1287 		cpw8(Cfg9346, Cfg9346_Unlock);
1288 		cpw16(BasicModeCtrl, value);
1289 		cpw8(Cfg9346, Cfg9346_Lock);
1290 	} else if (location < 8 && mii_2_8139_map[location])
1291 		cpw16(mii_2_8139_map[location], value);
1292 }
1293 
1294 /* Set the ethtool Wake-on-LAN settings */
1295 static int netdev_set_wol (struct cp_private *cp,
1296 			   const struct ethtool_wolinfo *wol)
1297 {
1298 	u8 options;
1299 
1300 	options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1301 	/* If WOL is being disabled, no need for complexity */
1302 	if (wol->wolopts) {
1303 		if (wol->wolopts & WAKE_PHY)	options |= LinkUp;
1304 		if (wol->wolopts & WAKE_MAGIC)	options |= MagicPacket;
1305 	}
1306 
1307 	cpw8 (Cfg9346, Cfg9346_Unlock);
1308 	cpw8 (Config3, options);
1309 	cpw8 (Cfg9346, Cfg9346_Lock);
1310 
1311 	options = 0; /* Paranoia setting */
1312 	options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1313 	/* If WOL is being disabled, no need for complexity */
1314 	if (wol->wolopts) {
1315 		if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1316 		if (wol->wolopts & WAKE_BCAST)	options |= BWF;
1317 		if (wol->wolopts & WAKE_MCAST)	options |= MWF;
1318 	}
1319 
1320 	cpw8 (Config5, options);
1321 
1322 	cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1323 
1324 	return 0;
1325 }
1326 
1327 /* Get the ethtool Wake-on-LAN settings */
1328 static void netdev_get_wol (struct cp_private *cp,
1329 	             struct ethtool_wolinfo *wol)
1330 {
1331 	u8 options;
1332 
1333 	wol->wolopts   = 0; /* Start from scratch */
1334 	wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1335 		         WAKE_MCAST | WAKE_UCAST;
1336 	/* We don't need to go on if WOL is disabled */
1337 	if (!cp->wol_enabled) return;
1338 
1339 	options        = cpr8 (Config3);
1340 	if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1341 	if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1342 
1343 	options        = 0; /* Paranoia setting */
1344 	options        = cpr8 (Config5);
1345 	if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1346 	if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1347 	if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1348 }
1349 
1350 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1351 {
1352 	struct cp_private *cp = netdev_priv(dev);
1353 
1354 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1355 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1356 	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1357 }
1358 
1359 static void cp_get_ringparam(struct net_device *dev,
1360 				struct ethtool_ringparam *ring)
1361 {
1362 	ring->rx_max_pending = CP_RX_RING_SIZE;
1363 	ring->tx_max_pending = CP_TX_RING_SIZE;
1364 	ring->rx_pending = CP_RX_RING_SIZE;
1365 	ring->tx_pending = CP_TX_RING_SIZE;
1366 }
1367 
1368 static int cp_get_regs_len(struct net_device *dev)
1369 {
1370 	return CP_REGS_SIZE;
1371 }
1372 
1373 static int cp_get_sset_count (struct net_device *dev, int sset)
1374 {
1375 	switch (sset) {
1376 	case ETH_SS_STATS:
1377 		return CP_NUM_STATS;
1378 	default:
1379 		return -EOPNOTSUPP;
1380 	}
1381 }
1382 
1383 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1384 {
1385 	struct cp_private *cp = netdev_priv(dev);
1386 	int rc;
1387 	unsigned long flags;
1388 
1389 	spin_lock_irqsave(&cp->lock, flags);
1390 	rc = mii_ethtool_gset(&cp->mii_if, cmd);
1391 	spin_unlock_irqrestore(&cp->lock, flags);
1392 
1393 	return rc;
1394 }
1395 
1396 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1397 {
1398 	struct cp_private *cp = netdev_priv(dev);
1399 	int rc;
1400 	unsigned long flags;
1401 
1402 	spin_lock_irqsave(&cp->lock, flags);
1403 	rc = mii_ethtool_sset(&cp->mii_if, cmd);
1404 	spin_unlock_irqrestore(&cp->lock, flags);
1405 
1406 	return rc;
1407 }
1408 
1409 static int cp_nway_reset(struct net_device *dev)
1410 {
1411 	struct cp_private *cp = netdev_priv(dev);
1412 	return mii_nway_restart(&cp->mii_if);
1413 }
1414 
1415 static u32 cp_get_msglevel(struct net_device *dev)
1416 {
1417 	struct cp_private *cp = netdev_priv(dev);
1418 	return cp->msg_enable;
1419 }
1420 
1421 static void cp_set_msglevel(struct net_device *dev, u32 value)
1422 {
1423 	struct cp_private *cp = netdev_priv(dev);
1424 	cp->msg_enable = value;
1425 }
1426 
1427 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1428 {
1429 	struct cp_private *cp = netdev_priv(dev);
1430 	unsigned long flags;
1431 
1432 	if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1433 		return 0;
1434 
1435 	spin_lock_irqsave(&cp->lock, flags);
1436 
1437 	if (features & NETIF_F_RXCSUM)
1438 		cp->cpcmd |= RxChkSum;
1439 	else
1440 		cp->cpcmd &= ~RxChkSum;
1441 
1442 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1443 		cp->cpcmd |= RxVlanOn;
1444 	else
1445 		cp->cpcmd &= ~RxVlanOn;
1446 
1447 	cpw16_f(CpCmd, cp->cpcmd);
1448 	spin_unlock_irqrestore(&cp->lock, flags);
1449 
1450 	return 0;
1451 }
1452 
1453 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1454 		        void *p)
1455 {
1456 	struct cp_private *cp = netdev_priv(dev);
1457 	unsigned long flags;
1458 
1459 	if (regs->len < CP_REGS_SIZE)
1460 		return /* -EINVAL */;
1461 
1462 	regs->version = CP_REGS_VER;
1463 
1464 	spin_lock_irqsave(&cp->lock, flags);
1465 	memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1466 	spin_unlock_irqrestore(&cp->lock, flags);
1467 }
1468 
1469 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1470 {
1471 	struct cp_private *cp = netdev_priv(dev);
1472 	unsigned long flags;
1473 
1474 	spin_lock_irqsave (&cp->lock, flags);
1475 	netdev_get_wol (cp, wol);
1476 	spin_unlock_irqrestore (&cp->lock, flags);
1477 }
1478 
1479 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1480 {
1481 	struct cp_private *cp = netdev_priv(dev);
1482 	unsigned long flags;
1483 	int rc;
1484 
1485 	spin_lock_irqsave (&cp->lock, flags);
1486 	rc = netdev_set_wol (cp, wol);
1487 	spin_unlock_irqrestore (&cp->lock, flags);
1488 
1489 	return rc;
1490 }
1491 
1492 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1493 {
1494 	switch (stringset) {
1495 	case ETH_SS_STATS:
1496 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1497 		break;
1498 	default:
1499 		BUG();
1500 		break;
1501 	}
1502 }
1503 
1504 static void cp_get_ethtool_stats (struct net_device *dev,
1505 				  struct ethtool_stats *estats, u64 *tmp_stats)
1506 {
1507 	struct cp_private *cp = netdev_priv(dev);
1508 	struct cp_dma_stats *nic_stats;
1509 	dma_addr_t dma;
1510 	int i;
1511 
1512 	nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1513 				       &dma, GFP_KERNEL);
1514 	if (!nic_stats)
1515 		return;
1516 
1517 	/* begin NIC statistics dump */
1518 	cpw32(StatsAddr + 4, (u64)dma >> 32);
1519 	cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1520 	cpr32(StatsAddr);
1521 
1522 	for (i = 0; i < 1000; i++) {
1523 		if ((cpr32(StatsAddr) & DumpStats) == 0)
1524 			break;
1525 		udelay(10);
1526 	}
1527 	cpw32(StatsAddr, 0);
1528 	cpw32(StatsAddr + 4, 0);
1529 	cpr32(StatsAddr);
1530 
1531 	i = 0;
1532 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1533 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1534 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1535 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1536 	tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1537 	tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1538 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1539 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1540 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1541 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1542 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1543 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1544 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1545 	tmp_stats[i++] = cp->cp_stats.rx_frags;
1546 	BUG_ON(i != CP_NUM_STATS);
1547 
1548 	dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1549 }
1550 
1551 static const struct ethtool_ops cp_ethtool_ops = {
1552 	.get_drvinfo		= cp_get_drvinfo,
1553 	.get_regs_len		= cp_get_regs_len,
1554 	.get_sset_count		= cp_get_sset_count,
1555 	.get_settings		= cp_get_settings,
1556 	.set_settings		= cp_set_settings,
1557 	.nway_reset		= cp_nway_reset,
1558 	.get_link		= ethtool_op_get_link,
1559 	.get_msglevel		= cp_get_msglevel,
1560 	.set_msglevel		= cp_set_msglevel,
1561 	.get_regs		= cp_get_regs,
1562 	.get_wol		= cp_get_wol,
1563 	.set_wol		= cp_set_wol,
1564 	.get_strings		= cp_get_strings,
1565 	.get_ethtool_stats	= cp_get_ethtool_stats,
1566 	.get_eeprom_len		= cp_get_eeprom_len,
1567 	.get_eeprom		= cp_get_eeprom,
1568 	.set_eeprom		= cp_set_eeprom,
1569 	.get_ringparam		= cp_get_ringparam,
1570 };
1571 
1572 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1573 {
1574 	struct cp_private *cp = netdev_priv(dev);
1575 	int rc;
1576 	unsigned long flags;
1577 
1578 	if (!netif_running(dev))
1579 		return -EINVAL;
1580 
1581 	spin_lock_irqsave(&cp->lock, flags);
1582 	rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1583 	spin_unlock_irqrestore(&cp->lock, flags);
1584 	return rc;
1585 }
1586 
1587 static int cp_set_mac_address(struct net_device *dev, void *p)
1588 {
1589 	struct cp_private *cp = netdev_priv(dev);
1590 	struct sockaddr *addr = p;
1591 
1592 	if (!is_valid_ether_addr(addr->sa_data))
1593 		return -EADDRNOTAVAIL;
1594 
1595 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1596 
1597 	spin_lock_irq(&cp->lock);
1598 
1599 	cpw8_f(Cfg9346, Cfg9346_Unlock);
1600 	cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1601 	cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1602 	cpw8_f(Cfg9346, Cfg9346_Lock);
1603 
1604 	spin_unlock_irq(&cp->lock);
1605 
1606 	return 0;
1607 }
1608 
1609 /* Serial EEPROM section. */
1610 
1611 /*  EEPROM_Ctrl bits. */
1612 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
1613 #define EE_CS			0x08	/* EEPROM chip select. */
1614 #define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
1615 #define EE_WRITE_0		0x00
1616 #define EE_WRITE_1		0x02
1617 #define EE_DATA_READ	0x01	/* EEPROM chip data out. */
1618 #define EE_ENB			(0x80 | EE_CS)
1619 
1620 /* Delay between EEPROM clock transitions.
1621    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1622  */
1623 
1624 #define eeprom_delay()	readb(ee_addr)
1625 
1626 /* The EEPROM commands include the alway-set leading bit. */
1627 #define EE_EXTEND_CMD	(4)
1628 #define EE_WRITE_CMD	(5)
1629 #define EE_READ_CMD		(6)
1630 #define EE_ERASE_CMD	(7)
1631 
1632 #define EE_EWDS_ADDR	(0)
1633 #define EE_WRAL_ADDR	(1)
1634 #define EE_ERAL_ADDR	(2)
1635 #define EE_EWEN_ADDR	(3)
1636 
1637 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1638 
1639 static void eeprom_cmd_start(void __iomem *ee_addr)
1640 {
1641 	writeb (EE_ENB & ~EE_CS, ee_addr);
1642 	writeb (EE_ENB, ee_addr);
1643 	eeprom_delay ();
1644 }
1645 
1646 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1647 {
1648 	int i;
1649 
1650 	/* Shift the command bits out. */
1651 	for (i = cmd_len - 1; i >= 0; i--) {
1652 		int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1653 		writeb (EE_ENB | dataval, ee_addr);
1654 		eeprom_delay ();
1655 		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1656 		eeprom_delay ();
1657 	}
1658 	writeb (EE_ENB, ee_addr);
1659 	eeprom_delay ();
1660 }
1661 
1662 static void eeprom_cmd_end(void __iomem *ee_addr)
1663 {
1664 	writeb(0, ee_addr);
1665 	eeprom_delay ();
1666 }
1667 
1668 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1669 			      int addr_len)
1670 {
1671 	int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1672 
1673 	eeprom_cmd_start(ee_addr);
1674 	eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1675 	eeprom_cmd_end(ee_addr);
1676 }
1677 
1678 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1679 {
1680 	int i;
1681 	u16 retval = 0;
1682 	void __iomem *ee_addr = ioaddr + Cfg9346;
1683 	int read_cmd = location | (EE_READ_CMD << addr_len);
1684 
1685 	eeprom_cmd_start(ee_addr);
1686 	eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1687 
1688 	for (i = 16; i > 0; i--) {
1689 		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1690 		eeprom_delay ();
1691 		retval =
1692 		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1693 				     0);
1694 		writeb (EE_ENB, ee_addr);
1695 		eeprom_delay ();
1696 	}
1697 
1698 	eeprom_cmd_end(ee_addr);
1699 
1700 	return retval;
1701 }
1702 
1703 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1704 			 int addr_len)
1705 {
1706 	int i;
1707 	void __iomem *ee_addr = ioaddr + Cfg9346;
1708 	int write_cmd = location | (EE_WRITE_CMD << addr_len);
1709 
1710 	eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1711 
1712 	eeprom_cmd_start(ee_addr);
1713 	eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1714 	eeprom_cmd(ee_addr, val, 16);
1715 	eeprom_cmd_end(ee_addr);
1716 
1717 	eeprom_cmd_start(ee_addr);
1718 	for (i = 0; i < 20000; i++)
1719 		if (readb(ee_addr) & EE_DATA_READ)
1720 			break;
1721 	eeprom_cmd_end(ee_addr);
1722 
1723 	eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1724 }
1725 
1726 static int cp_get_eeprom_len(struct net_device *dev)
1727 {
1728 	struct cp_private *cp = netdev_priv(dev);
1729 	int size;
1730 
1731 	spin_lock_irq(&cp->lock);
1732 	size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1733 	spin_unlock_irq(&cp->lock);
1734 
1735 	return size;
1736 }
1737 
1738 static int cp_get_eeprom(struct net_device *dev,
1739 			 struct ethtool_eeprom *eeprom, u8 *data)
1740 {
1741 	struct cp_private *cp = netdev_priv(dev);
1742 	unsigned int addr_len;
1743 	u16 val;
1744 	u32 offset = eeprom->offset >> 1;
1745 	u32 len = eeprom->len;
1746 	u32 i = 0;
1747 
1748 	eeprom->magic = CP_EEPROM_MAGIC;
1749 
1750 	spin_lock_irq(&cp->lock);
1751 
1752 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1753 
1754 	if (eeprom->offset & 1) {
1755 		val = read_eeprom(cp->regs, offset, addr_len);
1756 		data[i++] = (u8)(val >> 8);
1757 		offset++;
1758 	}
1759 
1760 	while (i < len - 1) {
1761 		val = read_eeprom(cp->regs, offset, addr_len);
1762 		data[i++] = (u8)val;
1763 		data[i++] = (u8)(val >> 8);
1764 		offset++;
1765 	}
1766 
1767 	if (i < len) {
1768 		val = read_eeprom(cp->regs, offset, addr_len);
1769 		data[i] = (u8)val;
1770 	}
1771 
1772 	spin_unlock_irq(&cp->lock);
1773 	return 0;
1774 }
1775 
1776 static int cp_set_eeprom(struct net_device *dev,
1777 			 struct ethtool_eeprom *eeprom, u8 *data)
1778 {
1779 	struct cp_private *cp = netdev_priv(dev);
1780 	unsigned int addr_len;
1781 	u16 val;
1782 	u32 offset = eeprom->offset >> 1;
1783 	u32 len = eeprom->len;
1784 	u32 i = 0;
1785 
1786 	if (eeprom->magic != CP_EEPROM_MAGIC)
1787 		return -EINVAL;
1788 
1789 	spin_lock_irq(&cp->lock);
1790 
1791 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1792 
1793 	if (eeprom->offset & 1) {
1794 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1795 		val |= (u16)data[i++] << 8;
1796 		write_eeprom(cp->regs, offset, val, addr_len);
1797 		offset++;
1798 	}
1799 
1800 	while (i < len - 1) {
1801 		val = (u16)data[i++];
1802 		val |= (u16)data[i++] << 8;
1803 		write_eeprom(cp->regs, offset, val, addr_len);
1804 		offset++;
1805 	}
1806 
1807 	if (i < len) {
1808 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1809 		val |= (u16)data[i];
1810 		write_eeprom(cp->regs, offset, val, addr_len);
1811 	}
1812 
1813 	spin_unlock_irq(&cp->lock);
1814 	return 0;
1815 }
1816 
1817 /* Put the board into D3cold state and wait for WakeUp signal */
1818 static void cp_set_d3_state (struct cp_private *cp)
1819 {
1820 	pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1821 	pci_set_power_state (cp->pdev, PCI_D3hot);
1822 }
1823 
1824 static const struct net_device_ops cp_netdev_ops = {
1825 	.ndo_open		= cp_open,
1826 	.ndo_stop		= cp_close,
1827 	.ndo_validate_addr	= eth_validate_addr,
1828 	.ndo_set_mac_address 	= cp_set_mac_address,
1829 	.ndo_set_rx_mode	= cp_set_rx_mode,
1830 	.ndo_get_stats		= cp_get_stats,
1831 	.ndo_do_ioctl		= cp_ioctl,
1832 	.ndo_start_xmit		= cp_start_xmit,
1833 	.ndo_tx_timeout		= cp_tx_timeout,
1834 	.ndo_set_features	= cp_set_features,
1835 	.ndo_change_mtu		= cp_change_mtu,
1836 
1837 #ifdef CONFIG_NET_POLL_CONTROLLER
1838 	.ndo_poll_controller	= cp_poll_controller,
1839 #endif
1840 };
1841 
1842 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1843 {
1844 	struct net_device *dev;
1845 	struct cp_private *cp;
1846 	int rc;
1847 	void __iomem *regs;
1848 	resource_size_t pciaddr;
1849 	unsigned int addr_len, i, pci_using_dac;
1850 
1851 #ifndef MODULE
1852 	static int version_printed;
1853 	if (version_printed++ == 0)
1854 		pr_info("%s", version);
1855 #endif
1856 
1857 	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1858 	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1859 		dev_info(&pdev->dev,
1860 			 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1861 			 pdev->vendor, pdev->device, pdev->revision);
1862 		return -ENODEV;
1863 	}
1864 
1865 	dev = alloc_etherdev(sizeof(struct cp_private));
1866 	if (!dev)
1867 		return -ENOMEM;
1868 	SET_NETDEV_DEV(dev, &pdev->dev);
1869 
1870 	cp = netdev_priv(dev);
1871 	cp->pdev = pdev;
1872 	cp->dev = dev;
1873 	cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1874 	spin_lock_init (&cp->lock);
1875 	cp->mii_if.dev = dev;
1876 	cp->mii_if.mdio_read = mdio_read;
1877 	cp->mii_if.mdio_write = mdio_write;
1878 	cp->mii_if.phy_id = CP_INTERNAL_PHY;
1879 	cp->mii_if.phy_id_mask = 0x1f;
1880 	cp->mii_if.reg_num_mask = 0x1f;
1881 	cp_set_rxbufsize(cp);
1882 
1883 	rc = pci_enable_device(pdev);
1884 	if (rc)
1885 		goto err_out_free;
1886 
1887 	rc = pci_set_mwi(pdev);
1888 	if (rc)
1889 		goto err_out_disable;
1890 
1891 	rc = pci_request_regions(pdev, DRV_NAME);
1892 	if (rc)
1893 		goto err_out_mwi;
1894 
1895 	pciaddr = pci_resource_start(pdev, 1);
1896 	if (!pciaddr) {
1897 		rc = -EIO;
1898 		dev_err(&pdev->dev, "no MMIO resource\n");
1899 		goto err_out_res;
1900 	}
1901 	if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1902 		rc = -EIO;
1903 		dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1904 		       (unsigned long long)pci_resource_len(pdev, 1));
1905 		goto err_out_res;
1906 	}
1907 
1908 	/* Configure DMA attributes. */
1909 	if ((sizeof(dma_addr_t) > 4) &&
1910 	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1911 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1912 		pci_using_dac = 1;
1913 	} else {
1914 		pci_using_dac = 0;
1915 
1916 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1917 		if (rc) {
1918 			dev_err(&pdev->dev,
1919 				"No usable DMA configuration, aborting\n");
1920 			goto err_out_res;
1921 		}
1922 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1923 		if (rc) {
1924 			dev_err(&pdev->dev,
1925 				"No usable consistent DMA configuration, aborting\n");
1926 			goto err_out_res;
1927 		}
1928 	}
1929 
1930 	cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1931 		    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1932 
1933 	dev->features |= NETIF_F_RXCSUM;
1934 	dev->hw_features |= NETIF_F_RXCSUM;
1935 
1936 	regs = ioremap(pciaddr, CP_REGS_SIZE);
1937 	if (!regs) {
1938 		rc = -EIO;
1939 		dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1940 			(unsigned long long)pci_resource_len(pdev, 1),
1941 		       (unsigned long long)pciaddr);
1942 		goto err_out_res;
1943 	}
1944 	cp->regs = regs;
1945 
1946 	cp_stop_hw(cp);
1947 
1948 	/* read MAC address from EEPROM */
1949 	addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1950 	for (i = 0; i < 3; i++)
1951 		((__le16 *) (dev->dev_addr))[i] =
1952 		    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1953 
1954 	dev->netdev_ops = &cp_netdev_ops;
1955 	netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1956 	dev->ethtool_ops = &cp_ethtool_ops;
1957 	dev->watchdog_timeo = TX_TIMEOUT;
1958 
1959 	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1960 
1961 	if (pci_using_dac)
1962 		dev->features |= NETIF_F_HIGHDMA;
1963 
1964 	/* disabled by default until verified */
1965 	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1966 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1967 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1968 		NETIF_F_HIGHDMA;
1969 
1970 	rc = register_netdev(dev);
1971 	if (rc)
1972 		goto err_out_iomap;
1973 
1974 	netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
1975 		    regs, dev->dev_addr, pdev->irq);
1976 
1977 	pci_set_drvdata(pdev, dev);
1978 
1979 	/* enable busmastering and memory-write-invalidate */
1980 	pci_set_master(pdev);
1981 
1982 	if (cp->wol_enabled)
1983 		cp_set_d3_state (cp);
1984 
1985 	return 0;
1986 
1987 err_out_iomap:
1988 	iounmap(regs);
1989 err_out_res:
1990 	pci_release_regions(pdev);
1991 err_out_mwi:
1992 	pci_clear_mwi(pdev);
1993 err_out_disable:
1994 	pci_disable_device(pdev);
1995 err_out_free:
1996 	free_netdev(dev);
1997 	return rc;
1998 }
1999 
2000 static void cp_remove_one (struct pci_dev *pdev)
2001 {
2002 	struct net_device *dev = pci_get_drvdata(pdev);
2003 	struct cp_private *cp = netdev_priv(dev);
2004 
2005 	unregister_netdev(dev);
2006 	iounmap(cp->regs);
2007 	if (cp->wol_enabled)
2008 		pci_set_power_state (pdev, PCI_D0);
2009 	pci_release_regions(pdev);
2010 	pci_clear_mwi(pdev);
2011 	pci_disable_device(pdev);
2012 	pci_set_drvdata(pdev, NULL);
2013 	free_netdev(dev);
2014 }
2015 
2016 #ifdef CONFIG_PM
2017 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2018 {
2019 	struct net_device *dev = pci_get_drvdata(pdev);
2020 	struct cp_private *cp = netdev_priv(dev);
2021 	unsigned long flags;
2022 
2023 	if (!netif_running(dev))
2024 		return 0;
2025 
2026 	netif_device_detach (dev);
2027 	netif_stop_queue (dev);
2028 
2029 	spin_lock_irqsave (&cp->lock, flags);
2030 
2031 	/* Disable Rx and Tx */
2032 	cpw16 (IntrMask, 0);
2033 	cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2034 
2035 	spin_unlock_irqrestore (&cp->lock, flags);
2036 
2037 	pci_save_state(pdev);
2038 	pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2039 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2040 
2041 	return 0;
2042 }
2043 
2044 static int cp_resume (struct pci_dev *pdev)
2045 {
2046 	struct net_device *dev = pci_get_drvdata (pdev);
2047 	struct cp_private *cp = netdev_priv(dev);
2048 	unsigned long flags;
2049 
2050 	if (!netif_running(dev))
2051 		return 0;
2052 
2053 	netif_device_attach (dev);
2054 
2055 	pci_set_power_state(pdev, PCI_D0);
2056 	pci_restore_state(pdev);
2057 	pci_enable_wake(pdev, PCI_D0, 0);
2058 
2059 	/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2060 	cp_init_rings_index (cp);
2061 	cp_init_hw (cp);
2062 	cp_enable_irq(cp);
2063 	netif_start_queue (dev);
2064 
2065 	spin_lock_irqsave (&cp->lock, flags);
2066 
2067 	mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2068 
2069 	spin_unlock_irqrestore (&cp->lock, flags);
2070 
2071 	return 0;
2072 }
2073 #endif /* CONFIG_PM */
2074 
2075 static struct pci_driver cp_driver = {
2076 	.name         = DRV_NAME,
2077 	.id_table     = cp_pci_tbl,
2078 	.probe        =	cp_init_one,
2079 	.remove       = cp_remove_one,
2080 #ifdef CONFIG_PM
2081 	.resume       = cp_resume,
2082 	.suspend      = cp_suspend,
2083 #endif
2084 };
2085 
2086 static int __init cp_init (void)
2087 {
2088 #ifdef MODULE
2089 	pr_info("%s", version);
2090 #endif
2091 	return pci_register_driver(&cp_driver);
2092 }
2093 
2094 static void __exit cp_exit (void)
2095 {
2096 	pci_unregister_driver (&cp_driver);
2097 }
2098 
2099 module_init(cp_init);
2100 module_exit(cp_exit);
2101