1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2 /*
3 	Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4 
5 	Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 	Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 	Copyright 2001 Manfred Spraul				    [natsemi.c]
8 	Copyright 1999-2001 by Donald Becker.			    [natsemi.c]
9        	Written 1997-2001 by Donald Becker.			    [8139too.c]
10 	Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11 
12 	This software may be used and distributed according to the terms of
13 	the GNU General Public License (GPL), incorporated herein by reference.
14 	Drivers based on or derived from this code fall under the GPL and must
15 	retain the authorship, copyright and license notice.  This file is not
16 	a complete program and may only be used when the entire operating
17 	system is licensed under the GPL.
18 
19 	See the file COPYING in this distribution for more information.
20 
21 	Contributors:
22 
23 		Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 		PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
25 		LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
26 
27 	TODO:
28 	* Test Tx checksumming thoroughly
29 
30 	Low priority TODO:
31 	* Complete reset on PciErr
32 	* Consider Rx interrupt mitigation using TimerIntr
33 	* Investigate using skb->priority with h/w VLAN priority
34 	* Investigate using High Priority Tx Queue with skb->priority
35 	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 	* Implement Tx software interrupt mitigation via
38 	  Tx descriptor bit
39 	* The real minimum of CP_MIN_MTU is 4 bytes.  However,
40 	  for this to be supported, one must(?) turn on packet padding.
41 	* Support external MII transceivers (patch available)
42 
43 	NOTES:
44 	* TX checksumming is considered experimental.  It is off by
45 	  default, use ethtool to turn it on.
46 
47  */
48 
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 
51 #define DRV_NAME		"8139cp"
52 #define DRV_VERSION		"1.3"
53 #define DRV_RELDATE		"Mar 22, 2004"
54 
55 
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/init.h>
63 #include <linux/interrupt.h>
64 #include <linux/pci.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/delay.h>
67 #include <linux/ethtool.h>
68 #include <linux/gfp.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
71 #include <linux/crc32.h>
72 #include <linux/in.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
76 #include <linux/cache.h>
77 #include <asm/io.h>
78 #include <asm/irq.h>
79 #include <asm/uaccess.h>
80 
81 /* These identify the driver base version and may not be removed. */
82 static char version[] =
83 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
84 
85 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87 MODULE_VERSION(DRV_VERSION);
88 MODULE_LICENSE("GPL");
89 
90 static int debug = -1;
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
93 
94 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
95    The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
96 static int multicast_filter_limit = 32;
97 module_param(multicast_filter_limit, int, 0);
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
99 
100 #define CP_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
101 				 NETIF_MSG_PROBE 	| \
102 				 NETIF_MSG_LINK)
103 #define CP_NUM_STATS		14	/* struct cp_dma_stats, plus one */
104 #define CP_STATS_SIZE		64	/* size in bytes of DMA stats block */
105 #define CP_REGS_SIZE		(0xff + 1)
106 #define CP_REGS_VER		1		/* version 1 */
107 #define CP_RX_RING_SIZE		64
108 #define CP_TX_RING_SIZE		64
109 #define CP_RING_BYTES		\
110 		((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +	\
111 		 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +	\
112 		 CP_STATS_SIZE)
113 #define NEXT_TX(N)		(((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N)		(((N) + 1) & (CP_RX_RING_SIZE - 1))
115 #define TX_BUFFS_AVAIL(CP)					\
116 	(((CP)->tx_tail <= (CP)->tx_head) ?			\
117 	  (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :	\
118 	  (CP)->tx_tail - (CP)->tx_head - 1)
119 
120 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
121 #define CP_INTERNAL_PHY		32
122 
123 /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
124 #define RX_FIFO_THRESH		5	/* Rx buffer level before first PCI xfer.  */
125 #define RX_DMA_BURST		4	/* Maximum PCI burst, '4' is 256 */
126 #define TX_DMA_BURST		6	/* Maximum PCI burst, '6' is 1024 */
127 #define TX_EARLY_THRESH		256	/* Early Tx threshold, in bytes */
128 
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT		(6*HZ)
131 
132 /* hardware minimum and maximum for a single frame's data payload */
133 #define CP_MIN_MTU		60	/* TODO: allow lower, but pad */
134 #define CP_MAX_MTU		4096
135 
136 enum {
137 	/* NIC register offsets */
138 	MAC0		= 0x00,	/* Ethernet hardware address. */
139 	MAR0		= 0x08,	/* Multicast filter. */
140 	StatsAddr	= 0x10,	/* 64-bit start addr of 64-byte DMA stats blk */
141 	TxRingAddr	= 0x20, /* 64-bit start addr of Tx ring */
142 	HiTxRingAddr	= 0x28, /* 64-bit start addr of high priority Tx ring */
143 	Cmd		= 0x37, /* Command register */
144 	IntrMask	= 0x3C, /* Interrupt mask */
145 	IntrStatus	= 0x3E, /* Interrupt status */
146 	TxConfig	= 0x40, /* Tx configuration */
147 	ChipVersion	= 0x43, /* 8-bit chip version, inside TxConfig */
148 	RxConfig	= 0x44, /* Rx configuration */
149 	RxMissed	= 0x4C,	/* 24 bits valid, write clears */
150 	Cfg9346		= 0x50, /* EEPROM select/control; Cfg reg [un]lock */
151 	Config1		= 0x52, /* Config1 */
152 	Config3		= 0x59, /* Config3 */
153 	Config4		= 0x5A, /* Config4 */
154 	MultiIntr	= 0x5C, /* Multiple interrupt select */
155 	BasicModeCtrl	= 0x62,	/* MII BMCR */
156 	BasicModeStatus	= 0x64, /* MII BMSR */
157 	NWayAdvert	= 0x66, /* MII ADVERTISE */
158 	NWayLPAR	= 0x68, /* MII LPA */
159 	NWayExpansion	= 0x6A, /* MII Expansion */
160 	Config5		= 0xD8,	/* Config5 */
161 	TxPoll		= 0xD9,	/* Tell chip to check Tx descriptors for work */
162 	RxMaxSize	= 0xDA, /* Max size of an Rx packet (8169 only) */
163 	CpCmd		= 0xE0, /* C+ Command register (C+ mode only) */
164 	IntrMitigate	= 0xE2,	/* rx/tx interrupt mitigation control */
165 	RxRingAddr	= 0xE4, /* 64-bit start addr of Rx ring */
166 	TxThresh	= 0xEC, /* Early Tx threshold */
167 	OldRxBufAddr	= 0x30, /* DMA address of Rx ring buffer (C mode) */
168 	OldTSD0		= 0x10, /* DMA address of first Tx desc (C mode) */
169 
170 	/* Tx and Rx status descriptors */
171 	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
172 	RingEnd		= (1 << 30), /* End of descriptor ring */
173 	FirstFrag	= (1 << 29), /* First segment of a packet */
174 	LastFrag	= (1 << 28), /* Final segment of a packet */
175 	LargeSend	= (1 << 27), /* TCP Large Send Offload (TSO) */
176 	MSSShift	= 16,	     /* MSS value position */
177 	MSSMask		= 0xfff,     /* MSS value: 11 bits */
178 	TxError		= (1 << 23), /* Tx error summary */
179 	RxError		= (1 << 20), /* Rx error summary */
180 	IPCS		= (1 << 18), /* Calculate IP checksum */
181 	UDPCS		= (1 << 17), /* Calculate UDP/IP checksum */
182 	TCPCS		= (1 << 16), /* Calculate TCP/IP checksum */
183 	TxVlanTag	= (1 << 17), /* Add VLAN tag */
184 	RxVlanTagged	= (1 << 16), /* Rx VLAN tag available */
185 	IPFail		= (1 << 15), /* IP checksum failed */
186 	UDPFail		= (1 << 14), /* UDP/IP checksum failed */
187 	TCPFail		= (1 << 13), /* TCP/IP checksum failed */
188 	NormalTxPoll	= (1 << 6),  /* One or more normal Tx packets to send */
189 	PID1		= (1 << 17), /* 2 protocol id bits:  0==non-IP, */
190 	PID0		= (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
191 	RxProtoTCP	= 1,
192 	RxProtoUDP	= 2,
193 	RxProtoIP	= 3,
194 	TxFIFOUnder	= (1 << 25), /* Tx FIFO underrun */
195 	TxOWC		= (1 << 22), /* Tx Out-of-window collision */
196 	TxLinkFail	= (1 << 21), /* Link failed during Tx of packet */
197 	TxMaxCol	= (1 << 20), /* Tx aborted due to excessive collisions */
198 	TxColCntShift	= 16,	     /* Shift, to get 4-bit Tx collision cnt */
199 	TxColCntMask	= 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
200 	RxErrFrame	= (1 << 27), /* Rx frame alignment error */
201 	RxMcast		= (1 << 26), /* Rx multicast packet rcv'd */
202 	RxErrCRC	= (1 << 18), /* Rx CRC error */
203 	RxErrRunt	= (1 << 19), /* Rx error, packet < 64 bytes */
204 	RxErrLong	= (1 << 21), /* Rx error, packet > 4096 bytes */
205 	RxErrFIFO	= (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
206 
207 	/* StatsAddr register */
208 	DumpStats	= (1 << 3),  /* Begin stats dump */
209 
210 	/* RxConfig register */
211 	RxCfgFIFOShift	= 13,	     /* Shift, to get Rx FIFO thresh value */
212 	RxCfgDMAShift	= 8,	     /* Shift, to get Rx Max DMA value */
213 	AcceptErr	= 0x20,	     /* Accept packets with CRC errors */
214 	AcceptRunt	= 0x10,	     /* Accept runt (<64 bytes) packets */
215 	AcceptBroadcast	= 0x08,	     /* Accept broadcast packets */
216 	AcceptMulticast	= 0x04,	     /* Accept multicast packets */
217 	AcceptMyPhys	= 0x02,	     /* Accept pkts with our MAC as dest */
218 	AcceptAllPhys	= 0x01,	     /* Accept all pkts w/ physical dest */
219 
220 	/* IntrMask / IntrStatus registers */
221 	PciErr		= (1 << 15), /* System error on the PCI bus */
222 	TimerIntr	= (1 << 14), /* Asserted when TCTR reaches TimerInt value */
223 	LenChg		= (1 << 13), /* Cable length change */
224 	SWInt		= (1 << 8),  /* Software-requested interrupt */
225 	TxEmpty		= (1 << 7),  /* No Tx descriptors available */
226 	RxFIFOOvr	= (1 << 6),  /* Rx FIFO Overflow */
227 	LinkChg		= (1 << 5),  /* Packet underrun, or link change */
228 	RxEmpty		= (1 << 4),  /* No Rx descriptors available */
229 	TxErr		= (1 << 3),  /* Tx error */
230 	TxOK		= (1 << 2),  /* Tx packet sent */
231 	RxErr		= (1 << 1),  /* Rx error */
232 	RxOK		= (1 << 0),  /* Rx packet received */
233 	IntrResvd	= (1 << 10), /* reserved, according to RealTek engineers,
234 					but hardware likes to raise it */
235 
236 	IntrAll		= PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
237 			  RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
238 			  RxErr | RxOK | IntrResvd,
239 
240 	/* C mode command register */
241 	CmdReset	= (1 << 4),  /* Enable to reset; self-clearing */
242 	RxOn		= (1 << 3),  /* Rx mode enable */
243 	TxOn		= (1 << 2),  /* Tx mode enable */
244 
245 	/* C+ mode command register */
246 	RxVlanOn	= (1 << 6),  /* Rx VLAN de-tagging enable */
247 	RxChkSum	= (1 << 5),  /* Rx checksum offload enable */
248 	PCIDAC		= (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
249 	PCIMulRW	= (1 << 3),  /* Enable PCI read/write multiple */
250 	CpRxOn		= (1 << 1),  /* Rx mode enable */
251 	CpTxOn		= (1 << 0),  /* Tx mode enable */
252 
253 	/* Cfg9436 EEPROM control register */
254 	Cfg9346_Lock	= 0x00,	     /* Lock ConfigX/MII register access */
255 	Cfg9346_Unlock	= 0xC0,	     /* Unlock ConfigX/MII register access */
256 
257 	/* TxConfig register */
258 	IFG		= (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
259 	TxDMAShift	= 8,	     /* DMA burst value (0-7) is shift this many bits */
260 
261 	/* Early Tx Threshold register */
262 	TxThreshMask	= 0x3f,	     /* Mask bits 5-0 */
263 	TxThreshMax	= 2048,	     /* Max early Tx threshold */
264 
265 	/* Config1 register */
266 	DriverLoaded	= (1 << 5),  /* Software marker, driver is loaded */
267 	LWACT           = (1 << 4),  /* LWAKE active mode */
268 	PMEnable	= (1 << 0),  /* Enable various PM features of chip */
269 
270 	/* Config3 register */
271 	PARMEnable	= (1 << 6),  /* Enable auto-loading of PHY parms */
272 	MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
273 	LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
274 
275 	/* Config4 register */
276 	LWPTN           = (1 << 1),  /* LWAKE Pattern */
277 	LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
278 
279 	/* Config5 register */
280 	BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
281 	MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
282 	UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
283 	LANWake         = (1 << 1),  /* Enable LANWake signal */
284 	PMEStatus	= (1 << 0),  /* PME status can be reset by PCI RST# */
285 
286 	cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
287 	cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
288 	cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
289 };
290 
291 static const unsigned int cp_rx_config =
292 	  (RX_FIFO_THRESH << RxCfgFIFOShift) |
293 	  (RX_DMA_BURST << RxCfgDMAShift);
294 
295 struct cp_desc {
296 	__le32		opts1;
297 	__le32		opts2;
298 	__le64		addr;
299 };
300 
301 struct cp_dma_stats {
302 	__le64			tx_ok;
303 	__le64			rx_ok;
304 	__le64			tx_err;
305 	__le32			rx_err;
306 	__le16			rx_fifo;
307 	__le16			frame_align;
308 	__le32			tx_ok_1col;
309 	__le32			tx_ok_mcol;
310 	__le64			rx_ok_phys;
311 	__le64			rx_ok_bcast;
312 	__le32			rx_ok_mcast;
313 	__le16			tx_abort;
314 	__le16			tx_underrun;
315 } __packed;
316 
317 struct cp_extra_stats {
318 	unsigned long		rx_frags;
319 };
320 
321 struct cp_private {
322 	void			__iomem *regs;
323 	struct net_device	*dev;
324 	spinlock_t		lock;
325 	u32			msg_enable;
326 
327 	struct napi_struct	napi;
328 
329 	struct pci_dev		*pdev;
330 	u32			rx_config;
331 	u16			cpcmd;
332 
333 	struct cp_extra_stats	cp_stats;
334 
335 	unsigned		rx_head		____cacheline_aligned;
336 	unsigned		rx_tail;
337 	struct cp_desc		*rx_ring;
338 	struct sk_buff		*rx_skb[CP_RX_RING_SIZE];
339 
340 	unsigned		tx_head		____cacheline_aligned;
341 	unsigned		tx_tail;
342 	struct cp_desc		*tx_ring;
343 	struct sk_buff		*tx_skb[CP_TX_RING_SIZE];
344 
345 	unsigned		rx_buf_sz;
346 	unsigned		wol_enabled : 1; /* Is Wake-on-LAN enabled? */
347 
348 	dma_addr_t		ring_dma;
349 
350 	struct mii_if_info	mii_if;
351 };
352 
353 #define cpr8(reg)	readb(cp->regs + (reg))
354 #define cpr16(reg)	readw(cp->regs + (reg))
355 #define cpr32(reg)	readl(cp->regs + (reg))
356 #define cpw8(reg,val)	writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val)	writew((val), cp->regs + (reg))
358 #define cpw32(reg,val)	writel((val), cp->regs + (reg))
359 #define cpw8_f(reg,val) do {			\
360 	writeb((val), cp->regs + (reg));	\
361 	readb(cp->regs + (reg));		\
362 	} while (0)
363 #define cpw16_f(reg,val) do {			\
364 	writew((val), cp->regs + (reg));	\
365 	readw(cp->regs + (reg));		\
366 	} while (0)
367 #define cpw32_f(reg,val) do {			\
368 	writel((val), cp->regs + (reg));	\
369 	readl(cp->regs + (reg));		\
370 	} while (0)
371 
372 
373 static void __cp_set_rx_mode (struct net_device *dev);
374 static void cp_tx (struct cp_private *cp);
375 static void cp_clean_rings (struct cp_private *cp);
376 #ifdef CONFIG_NET_POLL_CONTROLLER
377 static void cp_poll_controller(struct net_device *dev);
378 #endif
379 static int cp_get_eeprom_len(struct net_device *dev);
380 static int cp_get_eeprom(struct net_device *dev,
381 			 struct ethtool_eeprom *eeprom, u8 *data);
382 static int cp_set_eeprom(struct net_device *dev,
383 			 struct ethtool_eeprom *eeprom, u8 *data);
384 
385 static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
386 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	PCI_DEVICE_ID_REALTEK_8139), },
387 	{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH,	PCI_DEVICE_ID_TTTECH_MC322), },
388 	{ },
389 };
390 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
391 
392 static struct {
393 	const char str[ETH_GSTRING_LEN];
394 } ethtool_stats_keys[] = {
395 	{ "tx_ok" },
396 	{ "rx_ok" },
397 	{ "tx_err" },
398 	{ "rx_err" },
399 	{ "rx_fifo" },
400 	{ "frame_align" },
401 	{ "tx_ok_1col" },
402 	{ "tx_ok_mcol" },
403 	{ "rx_ok_phys" },
404 	{ "rx_ok_bcast" },
405 	{ "rx_ok_mcast" },
406 	{ "tx_abort" },
407 	{ "tx_underrun" },
408 	{ "rx_frags" },
409 };
410 
411 
412 static inline void cp_set_rxbufsize (struct cp_private *cp)
413 {
414 	unsigned int mtu = cp->dev->mtu;
415 
416 	if (mtu > ETH_DATA_LEN)
417 		/* MTU + ethernet header + FCS + optional VLAN tag */
418 		cp->rx_buf_sz = mtu + ETH_HLEN + 8;
419 	else
420 		cp->rx_buf_sz = PKT_BUF_SZ;
421 }
422 
423 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
424 			      struct cp_desc *desc)
425 {
426 	u32 opts2 = le32_to_cpu(desc->opts2);
427 
428 	skb->protocol = eth_type_trans (skb, cp->dev);
429 
430 	cp->dev->stats.rx_packets++;
431 	cp->dev->stats.rx_bytes += skb->len;
432 
433 	if (opts2 & RxVlanTagged)
434 		__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
435 
436 	napi_gro_receive(&cp->napi, skb);
437 }
438 
439 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
440 			    u32 status, u32 len)
441 {
442 	netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
443 		  rx_tail, status, len);
444 	cp->dev->stats.rx_errors++;
445 	if (status & RxErrFrame)
446 		cp->dev->stats.rx_frame_errors++;
447 	if (status & RxErrCRC)
448 		cp->dev->stats.rx_crc_errors++;
449 	if ((status & RxErrRunt) || (status & RxErrLong))
450 		cp->dev->stats.rx_length_errors++;
451 	if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
452 		cp->dev->stats.rx_length_errors++;
453 	if (status & RxErrFIFO)
454 		cp->dev->stats.rx_fifo_errors++;
455 }
456 
457 static inline unsigned int cp_rx_csum_ok (u32 status)
458 {
459 	unsigned int protocol = (status >> 16) & 0x3;
460 
461 	if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
462 	    ((protocol == RxProtoUDP) && !(status & UDPFail)))
463 		return 1;
464 	else
465 		return 0;
466 }
467 
468 static int cp_rx_poll(struct napi_struct *napi, int budget)
469 {
470 	struct cp_private *cp = container_of(napi, struct cp_private, napi);
471 	struct net_device *dev = cp->dev;
472 	unsigned int rx_tail = cp->rx_tail;
473 	int rx;
474 
475 rx_status_loop:
476 	rx = 0;
477 	cpw16(IntrStatus, cp_rx_intr_mask);
478 
479 	while (1) {
480 		u32 status, len;
481 		dma_addr_t mapping;
482 		struct sk_buff *skb, *new_skb;
483 		struct cp_desc *desc;
484 		const unsigned buflen = cp->rx_buf_sz;
485 
486 		skb = cp->rx_skb[rx_tail];
487 		BUG_ON(!skb);
488 
489 		desc = &cp->rx_ring[rx_tail];
490 		status = le32_to_cpu(desc->opts1);
491 		if (status & DescOwn)
492 			break;
493 
494 		len = (status & 0x1fff) - 4;
495 		mapping = le64_to_cpu(desc->addr);
496 
497 		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
498 			/* we don't support incoming fragmented frames.
499 			 * instead, we attempt to ensure that the
500 			 * pre-allocated RX skbs are properly sized such
501 			 * that RX fragments are never encountered
502 			 */
503 			cp_rx_err_acct(cp, rx_tail, status, len);
504 			dev->stats.rx_dropped++;
505 			cp->cp_stats.rx_frags++;
506 			goto rx_next;
507 		}
508 
509 		if (status & (RxError | RxErrFIFO)) {
510 			cp_rx_err_acct(cp, rx_tail, status, len);
511 			goto rx_next;
512 		}
513 
514 		netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
515 			  rx_tail, status, len);
516 
517 		new_skb = netdev_alloc_skb_ip_align(dev, buflen);
518 		if (!new_skb) {
519 			dev->stats.rx_dropped++;
520 			goto rx_next;
521 		}
522 
523 		dma_unmap_single(&cp->pdev->dev, mapping,
524 				 buflen, PCI_DMA_FROMDEVICE);
525 
526 		/* Handle checksum offloading for incoming packets. */
527 		if (cp_rx_csum_ok(status))
528 			skb->ip_summed = CHECKSUM_UNNECESSARY;
529 		else
530 			skb_checksum_none_assert(skb);
531 
532 		skb_put(skb, len);
533 
534 		mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
535 					 PCI_DMA_FROMDEVICE);
536 		cp->rx_skb[rx_tail] = new_skb;
537 
538 		cp_rx_skb(cp, skb, desc);
539 		rx++;
540 
541 rx_next:
542 		cp->rx_ring[rx_tail].opts2 = 0;
543 		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 		if (rx_tail == (CP_RX_RING_SIZE - 1))
545 			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
546 						  cp->rx_buf_sz);
547 		else
548 			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 		rx_tail = NEXT_RX(rx_tail);
550 
551 		if (rx >= budget)
552 			break;
553 	}
554 
555 	cp->rx_tail = rx_tail;
556 
557 	/* if we did not reach work limit, then we're done with
558 	 * this round of polling
559 	 */
560 	if (rx < budget) {
561 		unsigned long flags;
562 
563 		if (cpr16(IntrStatus) & cp_rx_intr_mask)
564 			goto rx_status_loop;
565 
566 		napi_gro_flush(napi, false);
567 		spin_lock_irqsave(&cp->lock, flags);
568 		__napi_complete(napi);
569 		cpw16_f(IntrMask, cp_intr_mask);
570 		spin_unlock_irqrestore(&cp->lock, flags);
571 	}
572 
573 	return rx;
574 }
575 
576 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
577 {
578 	struct net_device *dev = dev_instance;
579 	struct cp_private *cp;
580 	u16 status;
581 
582 	if (unlikely(dev == NULL))
583 		return IRQ_NONE;
584 	cp = netdev_priv(dev);
585 
586 	status = cpr16(IntrStatus);
587 	if (!status || (status == 0xFFFF))
588 		return IRQ_NONE;
589 
590 	netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
591 		  status, cpr8(Cmd), cpr16(CpCmd));
592 
593 	cpw16(IntrStatus, status & ~cp_rx_intr_mask);
594 
595 	spin_lock(&cp->lock);
596 
597 	/* close possible race's with dev_close */
598 	if (unlikely(!netif_running(dev))) {
599 		cpw16(IntrMask, 0);
600 		spin_unlock(&cp->lock);
601 		return IRQ_HANDLED;
602 	}
603 
604 	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
605 		if (napi_schedule_prep(&cp->napi)) {
606 			cpw16_f(IntrMask, cp_norx_intr_mask);
607 			__napi_schedule(&cp->napi);
608 		}
609 
610 	if (status & (TxOK | TxErr | TxEmpty | SWInt))
611 		cp_tx(cp);
612 	if (status & LinkChg)
613 		mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
614 
615 	spin_unlock(&cp->lock);
616 
617 	if (status & PciErr) {
618 		u16 pci_status;
619 
620 		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
621 		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
622 		netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
623 			   status, pci_status);
624 
625 		/* TODO: reset hardware */
626 	}
627 
628 	return IRQ_HANDLED;
629 }
630 
631 #ifdef CONFIG_NET_POLL_CONTROLLER
632 /*
633  * Polling receive - used by netconsole and other diagnostic tools
634  * to allow network i/o with interrupts disabled.
635  */
636 static void cp_poll_controller(struct net_device *dev)
637 {
638 	struct cp_private *cp = netdev_priv(dev);
639 	const int irq = cp->pdev->irq;
640 
641 	disable_irq(irq);
642 	cp_interrupt(irq, dev);
643 	enable_irq(irq);
644 }
645 #endif
646 
647 static void cp_tx (struct cp_private *cp)
648 {
649 	unsigned tx_head = cp->tx_head;
650 	unsigned tx_tail = cp->tx_tail;
651 
652 	while (tx_tail != tx_head) {
653 		struct cp_desc *txd = cp->tx_ring + tx_tail;
654 		struct sk_buff *skb;
655 		u32 status;
656 
657 		rmb();
658 		status = le32_to_cpu(txd->opts1);
659 		if (status & DescOwn)
660 			break;
661 
662 		skb = cp->tx_skb[tx_tail];
663 		BUG_ON(!skb);
664 
665 		dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
666 				 le32_to_cpu(txd->opts1) & 0xffff,
667 				 PCI_DMA_TODEVICE);
668 
669 		if (status & LastFrag) {
670 			if (status & (TxError | TxFIFOUnder)) {
671 				netif_dbg(cp, tx_err, cp->dev,
672 					  "tx err, status 0x%x\n", status);
673 				cp->dev->stats.tx_errors++;
674 				if (status & TxOWC)
675 					cp->dev->stats.tx_window_errors++;
676 				if (status & TxMaxCol)
677 					cp->dev->stats.tx_aborted_errors++;
678 				if (status & TxLinkFail)
679 					cp->dev->stats.tx_carrier_errors++;
680 				if (status & TxFIFOUnder)
681 					cp->dev->stats.tx_fifo_errors++;
682 			} else {
683 				cp->dev->stats.collisions +=
684 					((status >> TxColCntShift) & TxColCntMask);
685 				cp->dev->stats.tx_packets++;
686 				cp->dev->stats.tx_bytes += skb->len;
687 				netif_dbg(cp, tx_done, cp->dev,
688 					  "tx done, slot %d\n", tx_tail);
689 			}
690 			dev_kfree_skb_irq(skb);
691 		}
692 
693 		cp->tx_skb[tx_tail] = NULL;
694 
695 		tx_tail = NEXT_TX(tx_tail);
696 	}
697 
698 	cp->tx_tail = tx_tail;
699 
700 	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
701 		netif_wake_queue(cp->dev);
702 }
703 
704 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
705 {
706 	return vlan_tx_tag_present(skb) ?
707 		TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
708 }
709 
710 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
711 					struct net_device *dev)
712 {
713 	struct cp_private *cp = netdev_priv(dev);
714 	unsigned entry;
715 	u32 eor, flags;
716 	unsigned long intr_flags;
717 	__le32 opts2;
718 	int mss = 0;
719 
720 	spin_lock_irqsave(&cp->lock, intr_flags);
721 
722 	/* This is a hard error, log it. */
723 	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
724 		netif_stop_queue(dev);
725 		spin_unlock_irqrestore(&cp->lock, intr_flags);
726 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
727 		return NETDEV_TX_BUSY;
728 	}
729 
730 	entry = cp->tx_head;
731 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
732 	mss = skb_shinfo(skb)->gso_size;
733 
734 	opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
735 
736 	if (skb_shinfo(skb)->nr_frags == 0) {
737 		struct cp_desc *txd = &cp->tx_ring[entry];
738 		u32 len;
739 		dma_addr_t mapping;
740 
741 		len = skb->len;
742 		mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
743 		txd->opts2 = opts2;
744 		txd->addr = cpu_to_le64(mapping);
745 		wmb();
746 
747 		flags = eor | len | DescOwn | FirstFrag | LastFrag;
748 
749 		if (mss)
750 			flags |= LargeSend | ((mss & MSSMask) << MSSShift);
751 		else if (skb->ip_summed == CHECKSUM_PARTIAL) {
752 			const struct iphdr *ip = ip_hdr(skb);
753 			if (ip->protocol == IPPROTO_TCP)
754 				flags |= IPCS | TCPCS;
755 			else if (ip->protocol == IPPROTO_UDP)
756 				flags |= IPCS | UDPCS;
757 			else
758 				WARN_ON(1);	/* we need a WARN() */
759 		}
760 
761 		txd->opts1 = cpu_to_le32(flags);
762 		wmb();
763 
764 		cp->tx_skb[entry] = skb;
765 		entry = NEXT_TX(entry);
766 	} else {
767 		struct cp_desc *txd;
768 		u32 first_len, first_eor;
769 		dma_addr_t first_mapping;
770 		int frag, first_entry = entry;
771 		const struct iphdr *ip = ip_hdr(skb);
772 
773 		/* We must give this initial chunk to the device last.
774 		 * Otherwise we could race with the device.
775 		 */
776 		first_eor = eor;
777 		first_len = skb_headlen(skb);
778 		first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
779 					       first_len, PCI_DMA_TODEVICE);
780 		cp->tx_skb[entry] = skb;
781 		entry = NEXT_TX(entry);
782 
783 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
784 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
785 			u32 len;
786 			u32 ctrl;
787 			dma_addr_t mapping;
788 
789 			len = skb_frag_size(this_frag);
790 			mapping = dma_map_single(&cp->pdev->dev,
791 						 skb_frag_address(this_frag),
792 						 len, PCI_DMA_TODEVICE);
793 			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
794 
795 			ctrl = eor | len | DescOwn;
796 
797 			if (mss)
798 				ctrl |= LargeSend |
799 					((mss & MSSMask) << MSSShift);
800 			else if (skb->ip_summed == CHECKSUM_PARTIAL) {
801 				if (ip->protocol == IPPROTO_TCP)
802 					ctrl |= IPCS | TCPCS;
803 				else if (ip->protocol == IPPROTO_UDP)
804 					ctrl |= IPCS | UDPCS;
805 				else
806 					BUG();
807 			}
808 
809 			if (frag == skb_shinfo(skb)->nr_frags - 1)
810 				ctrl |= LastFrag;
811 
812 			txd = &cp->tx_ring[entry];
813 			txd->opts2 = opts2;
814 			txd->addr = cpu_to_le64(mapping);
815 			wmb();
816 
817 			txd->opts1 = cpu_to_le32(ctrl);
818 			wmb();
819 
820 			cp->tx_skb[entry] = skb;
821 			entry = NEXT_TX(entry);
822 		}
823 
824 		txd = &cp->tx_ring[first_entry];
825 		txd->opts2 = opts2;
826 		txd->addr = cpu_to_le64(first_mapping);
827 		wmb();
828 
829 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
830 			if (ip->protocol == IPPROTO_TCP)
831 				txd->opts1 = cpu_to_le32(first_eor | first_len |
832 							 FirstFrag | DescOwn |
833 							 IPCS | TCPCS);
834 			else if (ip->protocol == IPPROTO_UDP)
835 				txd->opts1 = cpu_to_le32(first_eor | first_len |
836 							 FirstFrag | DescOwn |
837 							 IPCS | UDPCS);
838 			else
839 				BUG();
840 		} else
841 			txd->opts1 = cpu_to_le32(first_eor | first_len |
842 						 FirstFrag | DescOwn);
843 		wmb();
844 	}
845 	cp->tx_head = entry;
846 	netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
847 		  entry, skb->len);
848 	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
849 		netif_stop_queue(dev);
850 
851 	spin_unlock_irqrestore(&cp->lock, intr_flags);
852 
853 	cpw8(TxPoll, NormalTxPoll);
854 
855 	return NETDEV_TX_OK;
856 }
857 
858 /* Set or clear the multicast filter for this adaptor.
859    This routine is not state sensitive and need not be SMP locked. */
860 
861 static void __cp_set_rx_mode (struct net_device *dev)
862 {
863 	struct cp_private *cp = netdev_priv(dev);
864 	u32 mc_filter[2];	/* Multicast hash filter */
865 	int rx_mode;
866 
867 	/* Note: do not reorder, GCC is clever about common statements. */
868 	if (dev->flags & IFF_PROMISC) {
869 		/* Unconditionally log net taps. */
870 		rx_mode =
871 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
872 		    AcceptAllPhys;
873 		mc_filter[1] = mc_filter[0] = 0xffffffff;
874 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
875 		   (dev->flags & IFF_ALLMULTI)) {
876 		/* Too many to filter perfectly -- accept all multicasts. */
877 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
878 		mc_filter[1] = mc_filter[0] = 0xffffffff;
879 	} else {
880 		struct netdev_hw_addr *ha;
881 		rx_mode = AcceptBroadcast | AcceptMyPhys;
882 		mc_filter[1] = mc_filter[0] = 0;
883 		netdev_for_each_mc_addr(ha, dev) {
884 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
885 
886 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
887 			rx_mode |= AcceptMulticast;
888 		}
889 	}
890 
891 	/* We can safely update without stopping the chip. */
892 	cp->rx_config = cp_rx_config | rx_mode;
893 	cpw32_f(RxConfig, cp->rx_config);
894 
895 	cpw32_f (MAR0 + 0, mc_filter[0]);
896 	cpw32_f (MAR0 + 4, mc_filter[1]);
897 }
898 
899 static void cp_set_rx_mode (struct net_device *dev)
900 {
901 	unsigned long flags;
902 	struct cp_private *cp = netdev_priv(dev);
903 
904 	spin_lock_irqsave (&cp->lock, flags);
905 	__cp_set_rx_mode(dev);
906 	spin_unlock_irqrestore (&cp->lock, flags);
907 }
908 
909 static void __cp_get_stats(struct cp_private *cp)
910 {
911 	/* only lower 24 bits valid; write any value to clear */
912 	cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
913 	cpw32 (RxMissed, 0);
914 }
915 
916 static struct net_device_stats *cp_get_stats(struct net_device *dev)
917 {
918 	struct cp_private *cp = netdev_priv(dev);
919 	unsigned long flags;
920 
921 	/* The chip only need report frame silently dropped. */
922 	spin_lock_irqsave(&cp->lock, flags);
923  	if (netif_running(dev) && netif_device_present(dev))
924  		__cp_get_stats(cp);
925 	spin_unlock_irqrestore(&cp->lock, flags);
926 
927 	return &dev->stats;
928 }
929 
930 static void cp_stop_hw (struct cp_private *cp)
931 {
932 	cpw16(IntrStatus, ~(cpr16(IntrStatus)));
933 	cpw16_f(IntrMask, 0);
934 	cpw8(Cmd, 0);
935 	cpw16_f(CpCmd, 0);
936 	cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
937 
938 	cp->rx_tail = 0;
939 	cp->tx_head = cp->tx_tail = 0;
940 }
941 
942 static void cp_reset_hw (struct cp_private *cp)
943 {
944 	unsigned work = 1000;
945 
946 	cpw8(Cmd, CmdReset);
947 
948 	while (work--) {
949 		if (!(cpr8(Cmd) & CmdReset))
950 			return;
951 
952 		schedule_timeout_uninterruptible(10);
953 	}
954 
955 	netdev_err(cp->dev, "hardware reset timeout\n");
956 }
957 
958 static inline void cp_start_hw (struct cp_private *cp)
959 {
960 	cpw16(CpCmd, cp->cpcmd);
961 	cpw8(Cmd, RxOn | TxOn);
962 }
963 
964 static void cp_enable_irq(struct cp_private *cp)
965 {
966 	cpw16_f(IntrMask, cp_intr_mask);
967 }
968 
969 static void cp_init_hw (struct cp_private *cp)
970 {
971 	struct net_device *dev = cp->dev;
972 	dma_addr_t ring_dma;
973 
974 	cp_reset_hw(cp);
975 
976 	cpw8_f (Cfg9346, Cfg9346_Unlock);
977 
978 	/* Restore our idea of the MAC address. */
979 	cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
980 	cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
981 
982 	cp_start_hw(cp);
983 	cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
984 
985 	__cp_set_rx_mode(dev);
986 	cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
987 
988 	cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
989 	/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
990 	cpw8(Config3, PARMEnable);
991 	cp->wol_enabled = 0;
992 
993 	cpw8(Config5, cpr8(Config5) & PMEStatus);
994 
995 	cpw32_f(HiTxRingAddr, 0);
996 	cpw32_f(HiTxRingAddr + 4, 0);
997 
998 	ring_dma = cp->ring_dma;
999 	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000 	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001 
1002 	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003 	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004 	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005 
1006 	cpw16(MultiIntr, 0);
1007 
1008 	cpw8_f(Cfg9346, Cfg9346_Lock);
1009 }
1010 
1011 static int cp_refill_rx(struct cp_private *cp)
1012 {
1013 	struct net_device *dev = cp->dev;
1014 	unsigned i;
1015 
1016 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1017 		struct sk_buff *skb;
1018 		dma_addr_t mapping;
1019 
1020 		skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1021 		if (!skb)
1022 			goto err_out;
1023 
1024 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
1025 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1026 		cp->rx_skb[i] = skb;
1027 
1028 		cp->rx_ring[i].opts2 = 0;
1029 		cp->rx_ring[i].addr = cpu_to_le64(mapping);
1030 		if (i == (CP_RX_RING_SIZE - 1))
1031 			cp->rx_ring[i].opts1 =
1032 				cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1033 		else
1034 			cp->rx_ring[i].opts1 =
1035 				cpu_to_le32(DescOwn | cp->rx_buf_sz);
1036 	}
1037 
1038 	return 0;
1039 
1040 err_out:
1041 	cp_clean_rings(cp);
1042 	return -ENOMEM;
1043 }
1044 
1045 static void cp_init_rings_index (struct cp_private *cp)
1046 {
1047 	cp->rx_tail = 0;
1048 	cp->tx_head = cp->tx_tail = 0;
1049 }
1050 
1051 static int cp_init_rings (struct cp_private *cp)
1052 {
1053 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1054 	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1055 
1056 	cp_init_rings_index(cp);
1057 
1058 	return cp_refill_rx (cp);
1059 }
1060 
1061 static int cp_alloc_rings (struct cp_private *cp)
1062 {
1063 	void *mem;
1064 
1065 	mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1066 				 &cp->ring_dma, GFP_KERNEL);
1067 	if (!mem)
1068 		return -ENOMEM;
1069 
1070 	cp->rx_ring = mem;
1071 	cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1072 
1073 	return cp_init_rings(cp);
1074 }
1075 
1076 static void cp_clean_rings (struct cp_private *cp)
1077 {
1078 	struct cp_desc *desc;
1079 	unsigned i;
1080 
1081 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1082 		if (cp->rx_skb[i]) {
1083 			desc = cp->rx_ring + i;
1084 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1085 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1086 			dev_kfree_skb(cp->rx_skb[i]);
1087 		}
1088 	}
1089 
1090 	for (i = 0; i < CP_TX_RING_SIZE; i++) {
1091 		if (cp->tx_skb[i]) {
1092 			struct sk_buff *skb = cp->tx_skb[i];
1093 
1094 			desc = cp->tx_ring + i;
1095 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1096 					 le32_to_cpu(desc->opts1) & 0xffff,
1097 					 PCI_DMA_TODEVICE);
1098 			if (le32_to_cpu(desc->opts1) & LastFrag)
1099 				dev_kfree_skb(skb);
1100 			cp->dev->stats.tx_dropped++;
1101 		}
1102 	}
1103 
1104 	memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1105 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1106 
1107 	memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1108 	memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1109 }
1110 
1111 static void cp_free_rings (struct cp_private *cp)
1112 {
1113 	cp_clean_rings(cp);
1114 	dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1115 			  cp->ring_dma);
1116 	cp->rx_ring = NULL;
1117 	cp->tx_ring = NULL;
1118 }
1119 
1120 static int cp_open (struct net_device *dev)
1121 {
1122 	struct cp_private *cp = netdev_priv(dev);
1123 	const int irq = cp->pdev->irq;
1124 	int rc;
1125 
1126 	netif_dbg(cp, ifup, dev, "enabling interface\n");
1127 
1128 	rc = cp_alloc_rings(cp);
1129 	if (rc)
1130 		return rc;
1131 
1132 	napi_enable(&cp->napi);
1133 
1134 	cp_init_hw(cp);
1135 
1136 	rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1137 	if (rc)
1138 		goto err_out_hw;
1139 
1140 	cp_enable_irq(cp);
1141 
1142 	netif_carrier_off(dev);
1143 	mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1144 	netif_start_queue(dev);
1145 
1146 	return 0;
1147 
1148 err_out_hw:
1149 	napi_disable(&cp->napi);
1150 	cp_stop_hw(cp);
1151 	cp_free_rings(cp);
1152 	return rc;
1153 }
1154 
1155 static int cp_close (struct net_device *dev)
1156 {
1157 	struct cp_private *cp = netdev_priv(dev);
1158 	unsigned long flags;
1159 
1160 	napi_disable(&cp->napi);
1161 
1162 	netif_dbg(cp, ifdown, dev, "disabling interface\n");
1163 
1164 	spin_lock_irqsave(&cp->lock, flags);
1165 
1166 	netif_stop_queue(dev);
1167 	netif_carrier_off(dev);
1168 
1169 	cp_stop_hw(cp);
1170 
1171 	spin_unlock_irqrestore(&cp->lock, flags);
1172 
1173 	free_irq(cp->pdev->irq, dev);
1174 
1175 	cp_free_rings(cp);
1176 	return 0;
1177 }
1178 
1179 static void cp_tx_timeout(struct net_device *dev)
1180 {
1181 	struct cp_private *cp = netdev_priv(dev);
1182 	unsigned long flags;
1183 	int rc;
1184 
1185 	netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1186 		    cpr8(Cmd), cpr16(CpCmd),
1187 		    cpr16(IntrStatus), cpr16(IntrMask));
1188 
1189 	spin_lock_irqsave(&cp->lock, flags);
1190 
1191 	cp_stop_hw(cp);
1192 	cp_clean_rings(cp);
1193 	rc = cp_init_rings(cp);
1194 	cp_start_hw(cp);
1195 
1196 	netif_wake_queue(dev);
1197 
1198 	spin_unlock_irqrestore(&cp->lock, flags);
1199 }
1200 
1201 #ifdef BROKEN
1202 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1203 {
1204 	struct cp_private *cp = netdev_priv(dev);
1205 	int rc;
1206 	unsigned long flags;
1207 
1208 	/* check for invalid MTU, according to hardware limits */
1209 	if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1210 		return -EINVAL;
1211 
1212 	/* if network interface not up, no need for complexity */
1213 	if (!netif_running(dev)) {
1214 		dev->mtu = new_mtu;
1215 		cp_set_rxbufsize(cp);	/* set new rx buf size */
1216 		return 0;
1217 	}
1218 
1219 	spin_lock_irqsave(&cp->lock, flags);
1220 
1221 	cp_stop_hw(cp);			/* stop h/w and free rings */
1222 	cp_clean_rings(cp);
1223 
1224 	dev->mtu = new_mtu;
1225 	cp_set_rxbufsize(cp);		/* set new rx buf size */
1226 
1227 	rc = cp_init_rings(cp);		/* realloc and restart h/w */
1228 	cp_start_hw(cp);
1229 
1230 	spin_unlock_irqrestore(&cp->lock, flags);
1231 
1232 	return rc;
1233 }
1234 #endif /* BROKEN */
1235 
1236 static const char mii_2_8139_map[8] = {
1237 	BasicModeCtrl,
1238 	BasicModeStatus,
1239 	0,
1240 	0,
1241 	NWayAdvert,
1242 	NWayLPAR,
1243 	NWayExpansion,
1244 	0
1245 };
1246 
1247 static int mdio_read(struct net_device *dev, int phy_id, int location)
1248 {
1249 	struct cp_private *cp = netdev_priv(dev);
1250 
1251 	return location < 8 && mii_2_8139_map[location] ?
1252 	       readw(cp->regs + mii_2_8139_map[location]) : 0;
1253 }
1254 
1255 
1256 static void mdio_write(struct net_device *dev, int phy_id, int location,
1257 		       int value)
1258 {
1259 	struct cp_private *cp = netdev_priv(dev);
1260 
1261 	if (location == 0) {
1262 		cpw8(Cfg9346, Cfg9346_Unlock);
1263 		cpw16(BasicModeCtrl, value);
1264 		cpw8(Cfg9346, Cfg9346_Lock);
1265 	} else if (location < 8 && mii_2_8139_map[location])
1266 		cpw16(mii_2_8139_map[location], value);
1267 }
1268 
1269 /* Set the ethtool Wake-on-LAN settings */
1270 static int netdev_set_wol (struct cp_private *cp,
1271 			   const struct ethtool_wolinfo *wol)
1272 {
1273 	u8 options;
1274 
1275 	options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1276 	/* If WOL is being disabled, no need for complexity */
1277 	if (wol->wolopts) {
1278 		if (wol->wolopts & WAKE_PHY)	options |= LinkUp;
1279 		if (wol->wolopts & WAKE_MAGIC)	options |= MagicPacket;
1280 	}
1281 
1282 	cpw8 (Cfg9346, Cfg9346_Unlock);
1283 	cpw8 (Config3, options);
1284 	cpw8 (Cfg9346, Cfg9346_Lock);
1285 
1286 	options = 0; /* Paranoia setting */
1287 	options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1288 	/* If WOL is being disabled, no need for complexity */
1289 	if (wol->wolopts) {
1290 		if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1291 		if (wol->wolopts & WAKE_BCAST)	options |= BWF;
1292 		if (wol->wolopts & WAKE_MCAST)	options |= MWF;
1293 	}
1294 
1295 	cpw8 (Config5, options);
1296 
1297 	cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1298 
1299 	return 0;
1300 }
1301 
1302 /* Get the ethtool Wake-on-LAN settings */
1303 static void netdev_get_wol (struct cp_private *cp,
1304 	             struct ethtool_wolinfo *wol)
1305 {
1306 	u8 options;
1307 
1308 	wol->wolopts   = 0; /* Start from scratch */
1309 	wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1310 		         WAKE_MCAST | WAKE_UCAST;
1311 	/* We don't need to go on if WOL is disabled */
1312 	if (!cp->wol_enabled) return;
1313 
1314 	options        = cpr8 (Config3);
1315 	if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1316 	if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1317 
1318 	options        = 0; /* Paranoia setting */
1319 	options        = cpr8 (Config5);
1320 	if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1321 	if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1322 	if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1323 }
1324 
1325 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1326 {
1327 	struct cp_private *cp = netdev_priv(dev);
1328 
1329 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1330 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1331 	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1332 }
1333 
1334 static void cp_get_ringparam(struct net_device *dev,
1335 				struct ethtool_ringparam *ring)
1336 {
1337 	ring->rx_max_pending = CP_RX_RING_SIZE;
1338 	ring->tx_max_pending = CP_TX_RING_SIZE;
1339 	ring->rx_pending = CP_RX_RING_SIZE;
1340 	ring->tx_pending = CP_TX_RING_SIZE;
1341 }
1342 
1343 static int cp_get_regs_len(struct net_device *dev)
1344 {
1345 	return CP_REGS_SIZE;
1346 }
1347 
1348 static int cp_get_sset_count (struct net_device *dev, int sset)
1349 {
1350 	switch (sset) {
1351 	case ETH_SS_STATS:
1352 		return CP_NUM_STATS;
1353 	default:
1354 		return -EOPNOTSUPP;
1355 	}
1356 }
1357 
1358 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1359 {
1360 	struct cp_private *cp = netdev_priv(dev);
1361 	int rc;
1362 	unsigned long flags;
1363 
1364 	spin_lock_irqsave(&cp->lock, flags);
1365 	rc = mii_ethtool_gset(&cp->mii_if, cmd);
1366 	spin_unlock_irqrestore(&cp->lock, flags);
1367 
1368 	return rc;
1369 }
1370 
1371 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1372 {
1373 	struct cp_private *cp = netdev_priv(dev);
1374 	int rc;
1375 	unsigned long flags;
1376 
1377 	spin_lock_irqsave(&cp->lock, flags);
1378 	rc = mii_ethtool_sset(&cp->mii_if, cmd);
1379 	spin_unlock_irqrestore(&cp->lock, flags);
1380 
1381 	return rc;
1382 }
1383 
1384 static int cp_nway_reset(struct net_device *dev)
1385 {
1386 	struct cp_private *cp = netdev_priv(dev);
1387 	return mii_nway_restart(&cp->mii_if);
1388 }
1389 
1390 static u32 cp_get_msglevel(struct net_device *dev)
1391 {
1392 	struct cp_private *cp = netdev_priv(dev);
1393 	return cp->msg_enable;
1394 }
1395 
1396 static void cp_set_msglevel(struct net_device *dev, u32 value)
1397 {
1398 	struct cp_private *cp = netdev_priv(dev);
1399 	cp->msg_enable = value;
1400 }
1401 
1402 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1403 {
1404 	struct cp_private *cp = netdev_priv(dev);
1405 	unsigned long flags;
1406 
1407 	if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1408 		return 0;
1409 
1410 	spin_lock_irqsave(&cp->lock, flags);
1411 
1412 	if (features & NETIF_F_RXCSUM)
1413 		cp->cpcmd |= RxChkSum;
1414 	else
1415 		cp->cpcmd &= ~RxChkSum;
1416 
1417 	if (features & NETIF_F_HW_VLAN_RX)
1418 		cp->cpcmd |= RxVlanOn;
1419 	else
1420 		cp->cpcmd &= ~RxVlanOn;
1421 
1422 	cpw16_f(CpCmd, cp->cpcmd);
1423 	spin_unlock_irqrestore(&cp->lock, flags);
1424 
1425 	return 0;
1426 }
1427 
1428 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1429 		        void *p)
1430 {
1431 	struct cp_private *cp = netdev_priv(dev);
1432 	unsigned long flags;
1433 
1434 	if (regs->len < CP_REGS_SIZE)
1435 		return /* -EINVAL */;
1436 
1437 	regs->version = CP_REGS_VER;
1438 
1439 	spin_lock_irqsave(&cp->lock, flags);
1440 	memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1441 	spin_unlock_irqrestore(&cp->lock, flags);
1442 }
1443 
1444 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1445 {
1446 	struct cp_private *cp = netdev_priv(dev);
1447 	unsigned long flags;
1448 
1449 	spin_lock_irqsave (&cp->lock, flags);
1450 	netdev_get_wol (cp, wol);
1451 	spin_unlock_irqrestore (&cp->lock, flags);
1452 }
1453 
1454 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1455 {
1456 	struct cp_private *cp = netdev_priv(dev);
1457 	unsigned long flags;
1458 	int rc;
1459 
1460 	spin_lock_irqsave (&cp->lock, flags);
1461 	rc = netdev_set_wol (cp, wol);
1462 	spin_unlock_irqrestore (&cp->lock, flags);
1463 
1464 	return rc;
1465 }
1466 
1467 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1468 {
1469 	switch (stringset) {
1470 	case ETH_SS_STATS:
1471 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1472 		break;
1473 	default:
1474 		BUG();
1475 		break;
1476 	}
1477 }
1478 
1479 static void cp_get_ethtool_stats (struct net_device *dev,
1480 				  struct ethtool_stats *estats, u64 *tmp_stats)
1481 {
1482 	struct cp_private *cp = netdev_priv(dev);
1483 	struct cp_dma_stats *nic_stats;
1484 	dma_addr_t dma;
1485 	int i;
1486 
1487 	nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1488 				       &dma, GFP_KERNEL);
1489 	if (!nic_stats)
1490 		return;
1491 
1492 	/* begin NIC statistics dump */
1493 	cpw32(StatsAddr + 4, (u64)dma >> 32);
1494 	cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1495 	cpr32(StatsAddr);
1496 
1497 	for (i = 0; i < 1000; i++) {
1498 		if ((cpr32(StatsAddr) & DumpStats) == 0)
1499 			break;
1500 		udelay(10);
1501 	}
1502 	cpw32(StatsAddr, 0);
1503 	cpw32(StatsAddr + 4, 0);
1504 	cpr32(StatsAddr);
1505 
1506 	i = 0;
1507 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1508 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1509 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1510 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1511 	tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1512 	tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1513 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1514 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1515 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1516 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1517 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1518 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1519 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1520 	tmp_stats[i++] = cp->cp_stats.rx_frags;
1521 	BUG_ON(i != CP_NUM_STATS);
1522 
1523 	dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1524 }
1525 
1526 static const struct ethtool_ops cp_ethtool_ops = {
1527 	.get_drvinfo		= cp_get_drvinfo,
1528 	.get_regs_len		= cp_get_regs_len,
1529 	.get_sset_count		= cp_get_sset_count,
1530 	.get_settings		= cp_get_settings,
1531 	.set_settings		= cp_set_settings,
1532 	.nway_reset		= cp_nway_reset,
1533 	.get_link		= ethtool_op_get_link,
1534 	.get_msglevel		= cp_get_msglevel,
1535 	.set_msglevel		= cp_set_msglevel,
1536 	.get_regs		= cp_get_regs,
1537 	.get_wol		= cp_get_wol,
1538 	.set_wol		= cp_set_wol,
1539 	.get_strings		= cp_get_strings,
1540 	.get_ethtool_stats	= cp_get_ethtool_stats,
1541 	.get_eeprom_len		= cp_get_eeprom_len,
1542 	.get_eeprom		= cp_get_eeprom,
1543 	.set_eeprom		= cp_set_eeprom,
1544 	.get_ringparam		= cp_get_ringparam,
1545 };
1546 
1547 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1548 {
1549 	struct cp_private *cp = netdev_priv(dev);
1550 	int rc;
1551 	unsigned long flags;
1552 
1553 	if (!netif_running(dev))
1554 		return -EINVAL;
1555 
1556 	spin_lock_irqsave(&cp->lock, flags);
1557 	rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1558 	spin_unlock_irqrestore(&cp->lock, flags);
1559 	return rc;
1560 }
1561 
1562 static int cp_set_mac_address(struct net_device *dev, void *p)
1563 {
1564 	struct cp_private *cp = netdev_priv(dev);
1565 	struct sockaddr *addr = p;
1566 
1567 	if (!is_valid_ether_addr(addr->sa_data))
1568 		return -EADDRNOTAVAIL;
1569 
1570 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1571 
1572 	spin_lock_irq(&cp->lock);
1573 
1574 	cpw8_f(Cfg9346, Cfg9346_Unlock);
1575 	cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1576 	cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1577 	cpw8_f(Cfg9346, Cfg9346_Lock);
1578 
1579 	spin_unlock_irq(&cp->lock);
1580 
1581 	return 0;
1582 }
1583 
1584 /* Serial EEPROM section. */
1585 
1586 /*  EEPROM_Ctrl bits. */
1587 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
1588 #define EE_CS			0x08	/* EEPROM chip select. */
1589 #define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
1590 #define EE_WRITE_0		0x00
1591 #define EE_WRITE_1		0x02
1592 #define EE_DATA_READ	0x01	/* EEPROM chip data out. */
1593 #define EE_ENB			(0x80 | EE_CS)
1594 
1595 /* Delay between EEPROM clock transitions.
1596    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1597  */
1598 
1599 #define eeprom_delay()	readb(ee_addr)
1600 
1601 /* The EEPROM commands include the alway-set leading bit. */
1602 #define EE_EXTEND_CMD	(4)
1603 #define EE_WRITE_CMD	(5)
1604 #define EE_READ_CMD		(6)
1605 #define EE_ERASE_CMD	(7)
1606 
1607 #define EE_EWDS_ADDR	(0)
1608 #define EE_WRAL_ADDR	(1)
1609 #define EE_ERAL_ADDR	(2)
1610 #define EE_EWEN_ADDR	(3)
1611 
1612 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1613 
1614 static void eeprom_cmd_start(void __iomem *ee_addr)
1615 {
1616 	writeb (EE_ENB & ~EE_CS, ee_addr);
1617 	writeb (EE_ENB, ee_addr);
1618 	eeprom_delay ();
1619 }
1620 
1621 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1622 {
1623 	int i;
1624 
1625 	/* Shift the command bits out. */
1626 	for (i = cmd_len - 1; i >= 0; i--) {
1627 		int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1628 		writeb (EE_ENB | dataval, ee_addr);
1629 		eeprom_delay ();
1630 		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1631 		eeprom_delay ();
1632 	}
1633 	writeb (EE_ENB, ee_addr);
1634 	eeprom_delay ();
1635 }
1636 
1637 static void eeprom_cmd_end(void __iomem *ee_addr)
1638 {
1639 	writeb(0, ee_addr);
1640 	eeprom_delay ();
1641 }
1642 
1643 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1644 			      int addr_len)
1645 {
1646 	int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1647 
1648 	eeprom_cmd_start(ee_addr);
1649 	eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1650 	eeprom_cmd_end(ee_addr);
1651 }
1652 
1653 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1654 {
1655 	int i;
1656 	u16 retval = 0;
1657 	void __iomem *ee_addr = ioaddr + Cfg9346;
1658 	int read_cmd = location | (EE_READ_CMD << addr_len);
1659 
1660 	eeprom_cmd_start(ee_addr);
1661 	eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1662 
1663 	for (i = 16; i > 0; i--) {
1664 		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1665 		eeprom_delay ();
1666 		retval =
1667 		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1668 				     0);
1669 		writeb (EE_ENB, ee_addr);
1670 		eeprom_delay ();
1671 	}
1672 
1673 	eeprom_cmd_end(ee_addr);
1674 
1675 	return retval;
1676 }
1677 
1678 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1679 			 int addr_len)
1680 {
1681 	int i;
1682 	void __iomem *ee_addr = ioaddr + Cfg9346;
1683 	int write_cmd = location | (EE_WRITE_CMD << addr_len);
1684 
1685 	eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1686 
1687 	eeprom_cmd_start(ee_addr);
1688 	eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1689 	eeprom_cmd(ee_addr, val, 16);
1690 	eeprom_cmd_end(ee_addr);
1691 
1692 	eeprom_cmd_start(ee_addr);
1693 	for (i = 0; i < 20000; i++)
1694 		if (readb(ee_addr) & EE_DATA_READ)
1695 			break;
1696 	eeprom_cmd_end(ee_addr);
1697 
1698 	eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1699 }
1700 
1701 static int cp_get_eeprom_len(struct net_device *dev)
1702 {
1703 	struct cp_private *cp = netdev_priv(dev);
1704 	int size;
1705 
1706 	spin_lock_irq(&cp->lock);
1707 	size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1708 	spin_unlock_irq(&cp->lock);
1709 
1710 	return size;
1711 }
1712 
1713 static int cp_get_eeprom(struct net_device *dev,
1714 			 struct ethtool_eeprom *eeprom, u8 *data)
1715 {
1716 	struct cp_private *cp = netdev_priv(dev);
1717 	unsigned int addr_len;
1718 	u16 val;
1719 	u32 offset = eeprom->offset >> 1;
1720 	u32 len = eeprom->len;
1721 	u32 i = 0;
1722 
1723 	eeprom->magic = CP_EEPROM_MAGIC;
1724 
1725 	spin_lock_irq(&cp->lock);
1726 
1727 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1728 
1729 	if (eeprom->offset & 1) {
1730 		val = read_eeprom(cp->regs, offset, addr_len);
1731 		data[i++] = (u8)(val >> 8);
1732 		offset++;
1733 	}
1734 
1735 	while (i < len - 1) {
1736 		val = read_eeprom(cp->regs, offset, addr_len);
1737 		data[i++] = (u8)val;
1738 		data[i++] = (u8)(val >> 8);
1739 		offset++;
1740 	}
1741 
1742 	if (i < len) {
1743 		val = read_eeprom(cp->regs, offset, addr_len);
1744 		data[i] = (u8)val;
1745 	}
1746 
1747 	spin_unlock_irq(&cp->lock);
1748 	return 0;
1749 }
1750 
1751 static int cp_set_eeprom(struct net_device *dev,
1752 			 struct ethtool_eeprom *eeprom, u8 *data)
1753 {
1754 	struct cp_private *cp = netdev_priv(dev);
1755 	unsigned int addr_len;
1756 	u16 val;
1757 	u32 offset = eeprom->offset >> 1;
1758 	u32 len = eeprom->len;
1759 	u32 i = 0;
1760 
1761 	if (eeprom->magic != CP_EEPROM_MAGIC)
1762 		return -EINVAL;
1763 
1764 	spin_lock_irq(&cp->lock);
1765 
1766 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1767 
1768 	if (eeprom->offset & 1) {
1769 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1770 		val |= (u16)data[i++] << 8;
1771 		write_eeprom(cp->regs, offset, val, addr_len);
1772 		offset++;
1773 	}
1774 
1775 	while (i < len - 1) {
1776 		val = (u16)data[i++];
1777 		val |= (u16)data[i++] << 8;
1778 		write_eeprom(cp->regs, offset, val, addr_len);
1779 		offset++;
1780 	}
1781 
1782 	if (i < len) {
1783 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1784 		val |= (u16)data[i];
1785 		write_eeprom(cp->regs, offset, val, addr_len);
1786 	}
1787 
1788 	spin_unlock_irq(&cp->lock);
1789 	return 0;
1790 }
1791 
1792 /* Put the board into D3cold state and wait for WakeUp signal */
1793 static void cp_set_d3_state (struct cp_private *cp)
1794 {
1795 	pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1796 	pci_set_power_state (cp->pdev, PCI_D3hot);
1797 }
1798 
1799 static const struct net_device_ops cp_netdev_ops = {
1800 	.ndo_open		= cp_open,
1801 	.ndo_stop		= cp_close,
1802 	.ndo_validate_addr	= eth_validate_addr,
1803 	.ndo_set_mac_address 	= cp_set_mac_address,
1804 	.ndo_set_rx_mode	= cp_set_rx_mode,
1805 	.ndo_get_stats		= cp_get_stats,
1806 	.ndo_do_ioctl		= cp_ioctl,
1807 	.ndo_start_xmit		= cp_start_xmit,
1808 	.ndo_tx_timeout		= cp_tx_timeout,
1809 	.ndo_set_features	= cp_set_features,
1810 #ifdef BROKEN
1811 	.ndo_change_mtu		= cp_change_mtu,
1812 #endif
1813 
1814 #ifdef CONFIG_NET_POLL_CONTROLLER
1815 	.ndo_poll_controller	= cp_poll_controller,
1816 #endif
1817 };
1818 
1819 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1820 {
1821 	struct net_device *dev;
1822 	struct cp_private *cp;
1823 	int rc;
1824 	void __iomem *regs;
1825 	resource_size_t pciaddr;
1826 	unsigned int addr_len, i, pci_using_dac;
1827 
1828 #ifndef MODULE
1829 	static int version_printed;
1830 	if (version_printed++ == 0)
1831 		pr_info("%s", version);
1832 #endif
1833 
1834 	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1835 	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1836 		dev_info(&pdev->dev,
1837 			 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1838 			 pdev->vendor, pdev->device, pdev->revision);
1839 		return -ENODEV;
1840 	}
1841 
1842 	dev = alloc_etherdev(sizeof(struct cp_private));
1843 	if (!dev)
1844 		return -ENOMEM;
1845 	SET_NETDEV_DEV(dev, &pdev->dev);
1846 
1847 	cp = netdev_priv(dev);
1848 	cp->pdev = pdev;
1849 	cp->dev = dev;
1850 	cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1851 	spin_lock_init (&cp->lock);
1852 	cp->mii_if.dev = dev;
1853 	cp->mii_if.mdio_read = mdio_read;
1854 	cp->mii_if.mdio_write = mdio_write;
1855 	cp->mii_if.phy_id = CP_INTERNAL_PHY;
1856 	cp->mii_if.phy_id_mask = 0x1f;
1857 	cp->mii_if.reg_num_mask = 0x1f;
1858 	cp_set_rxbufsize(cp);
1859 
1860 	rc = pci_enable_device(pdev);
1861 	if (rc)
1862 		goto err_out_free;
1863 
1864 	rc = pci_set_mwi(pdev);
1865 	if (rc)
1866 		goto err_out_disable;
1867 
1868 	rc = pci_request_regions(pdev, DRV_NAME);
1869 	if (rc)
1870 		goto err_out_mwi;
1871 
1872 	pciaddr = pci_resource_start(pdev, 1);
1873 	if (!pciaddr) {
1874 		rc = -EIO;
1875 		dev_err(&pdev->dev, "no MMIO resource\n");
1876 		goto err_out_res;
1877 	}
1878 	if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1879 		rc = -EIO;
1880 		dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1881 		       (unsigned long long)pci_resource_len(pdev, 1));
1882 		goto err_out_res;
1883 	}
1884 
1885 	/* Configure DMA attributes. */
1886 	if ((sizeof(dma_addr_t) > 4) &&
1887 	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1888 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1889 		pci_using_dac = 1;
1890 	} else {
1891 		pci_using_dac = 0;
1892 
1893 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1894 		if (rc) {
1895 			dev_err(&pdev->dev,
1896 				"No usable DMA configuration, aborting\n");
1897 			goto err_out_res;
1898 		}
1899 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1900 		if (rc) {
1901 			dev_err(&pdev->dev,
1902 				"No usable consistent DMA configuration, aborting\n");
1903 			goto err_out_res;
1904 		}
1905 	}
1906 
1907 	cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1908 		    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1909 
1910 	dev->features |= NETIF_F_RXCSUM;
1911 	dev->hw_features |= NETIF_F_RXCSUM;
1912 
1913 	regs = ioremap(pciaddr, CP_REGS_SIZE);
1914 	if (!regs) {
1915 		rc = -EIO;
1916 		dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1917 			(unsigned long long)pci_resource_len(pdev, 1),
1918 		       (unsigned long long)pciaddr);
1919 		goto err_out_res;
1920 	}
1921 	cp->regs = regs;
1922 
1923 	cp_stop_hw(cp);
1924 
1925 	/* read MAC address from EEPROM */
1926 	addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1927 	for (i = 0; i < 3; i++)
1928 		((__le16 *) (dev->dev_addr))[i] =
1929 		    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1930 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1931 
1932 	dev->netdev_ops = &cp_netdev_ops;
1933 	netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1934 	dev->ethtool_ops = &cp_ethtool_ops;
1935 	dev->watchdog_timeo = TX_TIMEOUT;
1936 
1937 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1938 
1939 	if (pci_using_dac)
1940 		dev->features |= NETIF_F_HIGHDMA;
1941 
1942 	/* disabled by default until verified */
1943 	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1944 		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1945 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1946 		NETIF_F_HIGHDMA;
1947 
1948 	rc = register_netdev(dev);
1949 	if (rc)
1950 		goto err_out_iomap;
1951 
1952 	netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
1953 		    regs, dev->dev_addr, pdev->irq);
1954 
1955 	pci_set_drvdata(pdev, dev);
1956 
1957 	/* enable busmastering and memory-write-invalidate */
1958 	pci_set_master(pdev);
1959 
1960 	if (cp->wol_enabled)
1961 		cp_set_d3_state (cp);
1962 
1963 	return 0;
1964 
1965 err_out_iomap:
1966 	iounmap(regs);
1967 err_out_res:
1968 	pci_release_regions(pdev);
1969 err_out_mwi:
1970 	pci_clear_mwi(pdev);
1971 err_out_disable:
1972 	pci_disable_device(pdev);
1973 err_out_free:
1974 	free_netdev(dev);
1975 	return rc;
1976 }
1977 
1978 static void cp_remove_one (struct pci_dev *pdev)
1979 {
1980 	struct net_device *dev = pci_get_drvdata(pdev);
1981 	struct cp_private *cp = netdev_priv(dev);
1982 
1983 	unregister_netdev(dev);
1984 	iounmap(cp->regs);
1985 	if (cp->wol_enabled)
1986 		pci_set_power_state (pdev, PCI_D0);
1987 	pci_release_regions(pdev);
1988 	pci_clear_mwi(pdev);
1989 	pci_disable_device(pdev);
1990 	pci_set_drvdata(pdev, NULL);
1991 	free_netdev(dev);
1992 }
1993 
1994 #ifdef CONFIG_PM
1995 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1996 {
1997 	struct net_device *dev = pci_get_drvdata(pdev);
1998 	struct cp_private *cp = netdev_priv(dev);
1999 	unsigned long flags;
2000 
2001 	if (!netif_running(dev))
2002 		return 0;
2003 
2004 	netif_device_detach (dev);
2005 	netif_stop_queue (dev);
2006 
2007 	spin_lock_irqsave (&cp->lock, flags);
2008 
2009 	/* Disable Rx and Tx */
2010 	cpw16 (IntrMask, 0);
2011 	cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2012 
2013 	spin_unlock_irqrestore (&cp->lock, flags);
2014 
2015 	pci_save_state(pdev);
2016 	pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2017 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2018 
2019 	return 0;
2020 }
2021 
2022 static int cp_resume (struct pci_dev *pdev)
2023 {
2024 	struct net_device *dev = pci_get_drvdata (pdev);
2025 	struct cp_private *cp = netdev_priv(dev);
2026 	unsigned long flags;
2027 
2028 	if (!netif_running(dev))
2029 		return 0;
2030 
2031 	netif_device_attach (dev);
2032 
2033 	pci_set_power_state(pdev, PCI_D0);
2034 	pci_restore_state(pdev);
2035 	pci_enable_wake(pdev, PCI_D0, 0);
2036 
2037 	/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2038 	cp_init_rings_index (cp);
2039 	cp_init_hw (cp);
2040 	cp_enable_irq(cp);
2041 	netif_start_queue (dev);
2042 
2043 	spin_lock_irqsave (&cp->lock, flags);
2044 
2045 	mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2046 
2047 	spin_unlock_irqrestore (&cp->lock, flags);
2048 
2049 	return 0;
2050 }
2051 #endif /* CONFIG_PM */
2052 
2053 static struct pci_driver cp_driver = {
2054 	.name         = DRV_NAME,
2055 	.id_table     = cp_pci_tbl,
2056 	.probe        =	cp_init_one,
2057 	.remove       = cp_remove_one,
2058 #ifdef CONFIG_PM
2059 	.resume       = cp_resume,
2060 	.suspend      = cp_suspend,
2061 #endif
2062 };
2063 
2064 static int __init cp_init (void)
2065 {
2066 #ifdef MODULE
2067 	pr_info("%s", version);
2068 #endif
2069 	return pci_register_driver(&cp_driver);
2070 }
2071 
2072 static void __exit cp_exit (void)
2073 {
2074 	pci_unregister_driver (&cp_driver);
2075 }
2076 
2077 module_init(cp_init);
2078 module_exit(cp_exit);
2079