1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2 /*
3 	Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4 
5 	Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 	Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 	Copyright 2001 Manfred Spraul				    [natsemi.c]
8 	Copyright 1999-2001 by Donald Becker.			    [natsemi.c]
9        	Written 1997-2001 by Donald Becker.			    [8139too.c]
10 	Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11 
12 	This software may be used and distributed according to the terms of
13 	the GNU General Public License (GPL), incorporated herein by reference.
14 	Drivers based on or derived from this code fall under the GPL and must
15 	retain the authorship, copyright and license notice.  This file is not
16 	a complete program and may only be used when the entire operating
17 	system is licensed under the GPL.
18 
19 	See the file COPYING in this distribution for more information.
20 
21 	Contributors:
22 
23 		Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 		PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
25 		LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
26 
27 	TODO:
28 	* Test Tx checksumming thoroughly
29 
30 	Low priority TODO:
31 	* Complete reset on PciErr
32 	* Consider Rx interrupt mitigation using TimerIntr
33 	* Investigate using skb->priority with h/w VLAN priority
34 	* Investigate using High Priority Tx Queue with skb->priority
35 	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 	* Implement Tx software interrupt mitigation via
38 	  Tx descriptor bit
39 	* The real minimum of CP_MIN_MTU is 4 bytes.  However,
40 	  for this to be supported, one must(?) turn on packet padding.
41 	* Support external MII transceivers (patch available)
42 
43 	NOTES:
44 	* TX checksumming is considered experimental.  It is off by
45 	  default, use ethtool to turn it on.
46 
47  */
48 
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 
51 #define DRV_NAME		"8139cp"
52 #define DRV_VERSION		"1.3"
53 #define DRV_RELDATE		"Mar 22, 2004"
54 
55 
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/init.h>
63 #include <linux/interrupt.h>
64 #include <linux/pci.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/delay.h>
67 #include <linux/ethtool.h>
68 #include <linux/gfp.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
71 #include <linux/crc32.h>
72 #include <linux/in.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
76 #include <linux/cache.h>
77 #include <asm/io.h>
78 #include <asm/irq.h>
79 #include <asm/uaccess.h>
80 
81 /* These identify the driver base version and may not be removed. */
82 static char version[] =
83 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
84 
85 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87 MODULE_VERSION(DRV_VERSION);
88 MODULE_LICENSE("GPL");
89 
90 static int debug = -1;
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
93 
94 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
95    The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
96 static int multicast_filter_limit = 32;
97 module_param(multicast_filter_limit, int, 0);
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
99 
100 #define CP_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
101 				 NETIF_MSG_PROBE 	| \
102 				 NETIF_MSG_LINK)
103 #define CP_NUM_STATS		14	/* struct cp_dma_stats, plus one */
104 #define CP_STATS_SIZE		64	/* size in bytes of DMA stats block */
105 #define CP_REGS_SIZE		(0xff + 1)
106 #define CP_REGS_VER		1		/* version 1 */
107 #define CP_RX_RING_SIZE		64
108 #define CP_TX_RING_SIZE		64
109 #define CP_RING_BYTES		\
110 		((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +	\
111 		 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +	\
112 		 CP_STATS_SIZE)
113 #define NEXT_TX(N)		(((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N)		(((N) + 1) & (CP_RX_RING_SIZE - 1))
115 #define TX_BUFFS_AVAIL(CP)					\
116 	(((CP)->tx_tail <= (CP)->tx_head) ?			\
117 	  (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :	\
118 	  (CP)->tx_tail - (CP)->tx_head - 1)
119 
120 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
121 #define CP_INTERNAL_PHY		32
122 
123 /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
124 #define RX_FIFO_THRESH		5	/* Rx buffer level before first PCI xfer.  */
125 #define RX_DMA_BURST		4	/* Maximum PCI burst, '4' is 256 */
126 #define TX_DMA_BURST		6	/* Maximum PCI burst, '6' is 1024 */
127 #define TX_EARLY_THRESH		256	/* Early Tx threshold, in bytes */
128 
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT		(6*HZ)
131 
132 /* hardware minimum and maximum for a single frame's data payload */
133 #define CP_MIN_MTU		60	/* TODO: allow lower, but pad */
134 #define CP_MAX_MTU		4096
135 
136 enum {
137 	/* NIC register offsets */
138 	MAC0		= 0x00,	/* Ethernet hardware address. */
139 	MAR0		= 0x08,	/* Multicast filter. */
140 	StatsAddr	= 0x10,	/* 64-bit start addr of 64-byte DMA stats blk */
141 	TxRingAddr	= 0x20, /* 64-bit start addr of Tx ring */
142 	HiTxRingAddr	= 0x28, /* 64-bit start addr of high priority Tx ring */
143 	Cmd		= 0x37, /* Command register */
144 	IntrMask	= 0x3C, /* Interrupt mask */
145 	IntrStatus	= 0x3E, /* Interrupt status */
146 	TxConfig	= 0x40, /* Tx configuration */
147 	ChipVersion	= 0x43, /* 8-bit chip version, inside TxConfig */
148 	RxConfig	= 0x44, /* Rx configuration */
149 	RxMissed	= 0x4C,	/* 24 bits valid, write clears */
150 	Cfg9346		= 0x50, /* EEPROM select/control; Cfg reg [un]lock */
151 	Config1		= 0x52, /* Config1 */
152 	Config3		= 0x59, /* Config3 */
153 	Config4		= 0x5A, /* Config4 */
154 	MultiIntr	= 0x5C, /* Multiple interrupt select */
155 	BasicModeCtrl	= 0x62,	/* MII BMCR */
156 	BasicModeStatus	= 0x64, /* MII BMSR */
157 	NWayAdvert	= 0x66, /* MII ADVERTISE */
158 	NWayLPAR	= 0x68, /* MII LPA */
159 	NWayExpansion	= 0x6A, /* MII Expansion */
160 	Config5		= 0xD8,	/* Config5 */
161 	TxPoll		= 0xD9,	/* Tell chip to check Tx descriptors for work */
162 	RxMaxSize	= 0xDA, /* Max size of an Rx packet (8169 only) */
163 	CpCmd		= 0xE0, /* C+ Command register (C+ mode only) */
164 	IntrMitigate	= 0xE2,	/* rx/tx interrupt mitigation control */
165 	RxRingAddr	= 0xE4, /* 64-bit start addr of Rx ring */
166 	TxThresh	= 0xEC, /* Early Tx threshold */
167 	OldRxBufAddr	= 0x30, /* DMA address of Rx ring buffer (C mode) */
168 	OldTSD0		= 0x10, /* DMA address of first Tx desc (C mode) */
169 
170 	/* Tx and Rx status descriptors */
171 	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
172 	RingEnd		= (1 << 30), /* End of descriptor ring */
173 	FirstFrag	= (1 << 29), /* First segment of a packet */
174 	LastFrag	= (1 << 28), /* Final segment of a packet */
175 	LargeSend	= (1 << 27), /* TCP Large Send Offload (TSO) */
176 	MSSShift	= 16,	     /* MSS value position */
177 	MSSMask		= 0xfff,     /* MSS value: 11 bits */
178 	TxError		= (1 << 23), /* Tx error summary */
179 	RxError		= (1 << 20), /* Rx error summary */
180 	IPCS		= (1 << 18), /* Calculate IP checksum */
181 	UDPCS		= (1 << 17), /* Calculate UDP/IP checksum */
182 	TCPCS		= (1 << 16), /* Calculate TCP/IP checksum */
183 	TxVlanTag	= (1 << 17), /* Add VLAN tag */
184 	RxVlanTagged	= (1 << 16), /* Rx VLAN tag available */
185 	IPFail		= (1 << 15), /* IP checksum failed */
186 	UDPFail		= (1 << 14), /* UDP/IP checksum failed */
187 	TCPFail		= (1 << 13), /* TCP/IP checksum failed */
188 	NormalTxPoll	= (1 << 6),  /* One or more normal Tx packets to send */
189 	PID1		= (1 << 17), /* 2 protocol id bits:  0==non-IP, */
190 	PID0		= (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
191 	RxProtoTCP	= 1,
192 	RxProtoUDP	= 2,
193 	RxProtoIP	= 3,
194 	TxFIFOUnder	= (1 << 25), /* Tx FIFO underrun */
195 	TxOWC		= (1 << 22), /* Tx Out-of-window collision */
196 	TxLinkFail	= (1 << 21), /* Link failed during Tx of packet */
197 	TxMaxCol	= (1 << 20), /* Tx aborted due to excessive collisions */
198 	TxColCntShift	= 16,	     /* Shift, to get 4-bit Tx collision cnt */
199 	TxColCntMask	= 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
200 	RxErrFrame	= (1 << 27), /* Rx frame alignment error */
201 	RxMcast		= (1 << 26), /* Rx multicast packet rcv'd */
202 	RxErrCRC	= (1 << 18), /* Rx CRC error */
203 	RxErrRunt	= (1 << 19), /* Rx error, packet < 64 bytes */
204 	RxErrLong	= (1 << 21), /* Rx error, packet > 4096 bytes */
205 	RxErrFIFO	= (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
206 
207 	/* StatsAddr register */
208 	DumpStats	= (1 << 3),  /* Begin stats dump */
209 
210 	/* RxConfig register */
211 	RxCfgFIFOShift	= 13,	     /* Shift, to get Rx FIFO thresh value */
212 	RxCfgDMAShift	= 8,	     /* Shift, to get Rx Max DMA value */
213 	AcceptErr	= 0x20,	     /* Accept packets with CRC errors */
214 	AcceptRunt	= 0x10,	     /* Accept runt (<64 bytes) packets */
215 	AcceptBroadcast	= 0x08,	     /* Accept broadcast packets */
216 	AcceptMulticast	= 0x04,	     /* Accept multicast packets */
217 	AcceptMyPhys	= 0x02,	     /* Accept pkts with our MAC as dest */
218 	AcceptAllPhys	= 0x01,	     /* Accept all pkts w/ physical dest */
219 
220 	/* IntrMask / IntrStatus registers */
221 	PciErr		= (1 << 15), /* System error on the PCI bus */
222 	TimerIntr	= (1 << 14), /* Asserted when TCTR reaches TimerInt value */
223 	LenChg		= (1 << 13), /* Cable length change */
224 	SWInt		= (1 << 8),  /* Software-requested interrupt */
225 	TxEmpty		= (1 << 7),  /* No Tx descriptors available */
226 	RxFIFOOvr	= (1 << 6),  /* Rx FIFO Overflow */
227 	LinkChg		= (1 << 5),  /* Packet underrun, or link change */
228 	RxEmpty		= (1 << 4),  /* No Rx descriptors available */
229 	TxErr		= (1 << 3),  /* Tx error */
230 	TxOK		= (1 << 2),  /* Tx packet sent */
231 	RxErr		= (1 << 1),  /* Rx error */
232 	RxOK		= (1 << 0),  /* Rx packet received */
233 	IntrResvd	= (1 << 10), /* reserved, according to RealTek engineers,
234 					but hardware likes to raise it */
235 
236 	IntrAll		= PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
237 			  RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
238 			  RxErr | RxOK | IntrResvd,
239 
240 	/* C mode command register */
241 	CmdReset	= (1 << 4),  /* Enable to reset; self-clearing */
242 	RxOn		= (1 << 3),  /* Rx mode enable */
243 	TxOn		= (1 << 2),  /* Tx mode enable */
244 
245 	/* C+ mode command register */
246 	RxVlanOn	= (1 << 6),  /* Rx VLAN de-tagging enable */
247 	RxChkSum	= (1 << 5),  /* Rx checksum offload enable */
248 	PCIDAC		= (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
249 	PCIMulRW	= (1 << 3),  /* Enable PCI read/write multiple */
250 	CpRxOn		= (1 << 1),  /* Rx mode enable */
251 	CpTxOn		= (1 << 0),  /* Tx mode enable */
252 
253 	/* Cfg9436 EEPROM control register */
254 	Cfg9346_Lock	= 0x00,	     /* Lock ConfigX/MII register access */
255 	Cfg9346_Unlock	= 0xC0,	     /* Unlock ConfigX/MII register access */
256 
257 	/* TxConfig register */
258 	IFG		= (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
259 	TxDMAShift	= 8,	     /* DMA burst value (0-7) is shift this many bits */
260 
261 	/* Early Tx Threshold register */
262 	TxThreshMask	= 0x3f,	     /* Mask bits 5-0 */
263 	TxThreshMax	= 2048,	     /* Max early Tx threshold */
264 
265 	/* Config1 register */
266 	DriverLoaded	= (1 << 5),  /* Software marker, driver is loaded */
267 	LWACT           = (1 << 4),  /* LWAKE active mode */
268 	PMEnable	= (1 << 0),  /* Enable various PM features of chip */
269 
270 	/* Config3 register */
271 	PARMEnable	= (1 << 6),  /* Enable auto-loading of PHY parms */
272 	MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
273 	LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
274 
275 	/* Config4 register */
276 	LWPTN           = (1 << 1),  /* LWAKE Pattern */
277 	LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
278 
279 	/* Config5 register */
280 	BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
281 	MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
282 	UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
283 	LANWake         = (1 << 1),  /* Enable LANWake signal */
284 	PMEStatus	= (1 << 0),  /* PME status can be reset by PCI RST# */
285 
286 	cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
287 	cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
288 	cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
289 };
290 
291 static const unsigned int cp_rx_config =
292 	  (RX_FIFO_THRESH << RxCfgFIFOShift) |
293 	  (RX_DMA_BURST << RxCfgDMAShift);
294 
295 struct cp_desc {
296 	__le32		opts1;
297 	__le32		opts2;
298 	__le64		addr;
299 };
300 
301 struct cp_dma_stats {
302 	__le64			tx_ok;
303 	__le64			rx_ok;
304 	__le64			tx_err;
305 	__le32			rx_err;
306 	__le16			rx_fifo;
307 	__le16			frame_align;
308 	__le32			tx_ok_1col;
309 	__le32			tx_ok_mcol;
310 	__le64			rx_ok_phys;
311 	__le64			rx_ok_bcast;
312 	__le32			rx_ok_mcast;
313 	__le16			tx_abort;
314 	__le16			tx_underrun;
315 } __packed;
316 
317 struct cp_extra_stats {
318 	unsigned long		rx_frags;
319 };
320 
321 struct cp_private {
322 	void			__iomem *regs;
323 	struct net_device	*dev;
324 	spinlock_t		lock;
325 	u32			msg_enable;
326 
327 	struct napi_struct	napi;
328 
329 	struct pci_dev		*pdev;
330 	u32			rx_config;
331 	u16			cpcmd;
332 
333 	struct cp_extra_stats	cp_stats;
334 
335 	unsigned		rx_head		____cacheline_aligned;
336 	unsigned		rx_tail;
337 	struct cp_desc		*rx_ring;
338 	struct sk_buff		*rx_skb[CP_RX_RING_SIZE];
339 
340 	unsigned		tx_head		____cacheline_aligned;
341 	unsigned		tx_tail;
342 	struct cp_desc		*tx_ring;
343 	struct sk_buff		*tx_skb[CP_TX_RING_SIZE];
344 
345 	unsigned		rx_buf_sz;
346 	unsigned		wol_enabled : 1; /* Is Wake-on-LAN enabled? */
347 
348 	dma_addr_t		ring_dma;
349 
350 	struct mii_if_info	mii_if;
351 };
352 
353 #define cpr8(reg)	readb(cp->regs + (reg))
354 #define cpr16(reg)	readw(cp->regs + (reg))
355 #define cpr32(reg)	readl(cp->regs + (reg))
356 #define cpw8(reg,val)	writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val)	writew((val), cp->regs + (reg))
358 #define cpw32(reg,val)	writel((val), cp->regs + (reg))
359 #define cpw8_f(reg,val) do {			\
360 	writeb((val), cp->regs + (reg));	\
361 	readb(cp->regs + (reg));		\
362 	} while (0)
363 #define cpw16_f(reg,val) do {			\
364 	writew((val), cp->regs + (reg));	\
365 	readw(cp->regs + (reg));		\
366 	} while (0)
367 #define cpw32_f(reg,val) do {			\
368 	writel((val), cp->regs + (reg));	\
369 	readl(cp->regs + (reg));		\
370 	} while (0)
371 
372 
373 static void __cp_set_rx_mode (struct net_device *dev);
374 static void cp_tx (struct cp_private *cp);
375 static void cp_clean_rings (struct cp_private *cp);
376 #ifdef CONFIG_NET_POLL_CONTROLLER
377 static void cp_poll_controller(struct net_device *dev);
378 #endif
379 static int cp_get_eeprom_len(struct net_device *dev);
380 static int cp_get_eeprom(struct net_device *dev,
381 			 struct ethtool_eeprom *eeprom, u8 *data);
382 static int cp_set_eeprom(struct net_device *dev,
383 			 struct ethtool_eeprom *eeprom, u8 *data);
384 
385 static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
386 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	PCI_DEVICE_ID_REALTEK_8139), },
387 	{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH,	PCI_DEVICE_ID_TTTECH_MC322), },
388 	{ },
389 };
390 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
391 
392 static struct {
393 	const char str[ETH_GSTRING_LEN];
394 } ethtool_stats_keys[] = {
395 	{ "tx_ok" },
396 	{ "rx_ok" },
397 	{ "tx_err" },
398 	{ "rx_err" },
399 	{ "rx_fifo" },
400 	{ "frame_align" },
401 	{ "tx_ok_1col" },
402 	{ "tx_ok_mcol" },
403 	{ "rx_ok_phys" },
404 	{ "rx_ok_bcast" },
405 	{ "rx_ok_mcast" },
406 	{ "tx_abort" },
407 	{ "tx_underrun" },
408 	{ "rx_frags" },
409 };
410 
411 
412 static inline void cp_set_rxbufsize (struct cp_private *cp)
413 {
414 	unsigned int mtu = cp->dev->mtu;
415 
416 	if (mtu > ETH_DATA_LEN)
417 		/* MTU + ethernet header + FCS + optional VLAN tag */
418 		cp->rx_buf_sz = mtu + ETH_HLEN + 8;
419 	else
420 		cp->rx_buf_sz = PKT_BUF_SZ;
421 }
422 
423 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
424 			      struct cp_desc *desc)
425 {
426 	u32 opts2 = le32_to_cpu(desc->opts2);
427 
428 	skb->protocol = eth_type_trans (skb, cp->dev);
429 
430 	cp->dev->stats.rx_packets++;
431 	cp->dev->stats.rx_bytes += skb->len;
432 
433 	if (opts2 & RxVlanTagged)
434 		__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
435 
436 	napi_gro_receive(&cp->napi, skb);
437 }
438 
439 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
440 			    u32 status, u32 len)
441 {
442 	netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
443 		  rx_tail, status, len);
444 	cp->dev->stats.rx_errors++;
445 	if (status & RxErrFrame)
446 		cp->dev->stats.rx_frame_errors++;
447 	if (status & RxErrCRC)
448 		cp->dev->stats.rx_crc_errors++;
449 	if ((status & RxErrRunt) || (status & RxErrLong))
450 		cp->dev->stats.rx_length_errors++;
451 	if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
452 		cp->dev->stats.rx_length_errors++;
453 	if (status & RxErrFIFO)
454 		cp->dev->stats.rx_fifo_errors++;
455 }
456 
457 static inline unsigned int cp_rx_csum_ok (u32 status)
458 {
459 	unsigned int protocol = (status >> 16) & 0x3;
460 
461 	if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
462 	    ((protocol == RxProtoUDP) && !(status & UDPFail)))
463 		return 1;
464 	else
465 		return 0;
466 }
467 
468 static int cp_rx_poll(struct napi_struct *napi, int budget)
469 {
470 	struct cp_private *cp = container_of(napi, struct cp_private, napi);
471 	struct net_device *dev = cp->dev;
472 	unsigned int rx_tail = cp->rx_tail;
473 	int rx;
474 
475 rx_status_loop:
476 	rx = 0;
477 	cpw16(IntrStatus, cp_rx_intr_mask);
478 
479 	while (1) {
480 		u32 status, len;
481 		dma_addr_t mapping;
482 		struct sk_buff *skb, *new_skb;
483 		struct cp_desc *desc;
484 		const unsigned buflen = cp->rx_buf_sz;
485 
486 		skb = cp->rx_skb[rx_tail];
487 		BUG_ON(!skb);
488 
489 		desc = &cp->rx_ring[rx_tail];
490 		status = le32_to_cpu(desc->opts1);
491 		if (status & DescOwn)
492 			break;
493 
494 		len = (status & 0x1fff) - 4;
495 		mapping = le64_to_cpu(desc->addr);
496 
497 		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
498 			/* we don't support incoming fragmented frames.
499 			 * instead, we attempt to ensure that the
500 			 * pre-allocated RX skbs are properly sized such
501 			 * that RX fragments are never encountered
502 			 */
503 			cp_rx_err_acct(cp, rx_tail, status, len);
504 			dev->stats.rx_dropped++;
505 			cp->cp_stats.rx_frags++;
506 			goto rx_next;
507 		}
508 
509 		if (status & (RxError | RxErrFIFO)) {
510 			cp_rx_err_acct(cp, rx_tail, status, len);
511 			goto rx_next;
512 		}
513 
514 		netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
515 			  rx_tail, status, len);
516 
517 		new_skb = netdev_alloc_skb_ip_align(dev, buflen);
518 		if (!new_skb) {
519 			dev->stats.rx_dropped++;
520 			goto rx_next;
521 		}
522 
523 		dma_unmap_single(&cp->pdev->dev, mapping,
524 				 buflen, PCI_DMA_FROMDEVICE);
525 
526 		/* Handle checksum offloading for incoming packets. */
527 		if (cp_rx_csum_ok(status))
528 			skb->ip_summed = CHECKSUM_UNNECESSARY;
529 		else
530 			skb_checksum_none_assert(skb);
531 
532 		skb_put(skb, len);
533 
534 		mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
535 					 PCI_DMA_FROMDEVICE);
536 		cp->rx_skb[rx_tail] = new_skb;
537 
538 		cp_rx_skb(cp, skb, desc);
539 		rx++;
540 
541 rx_next:
542 		cp->rx_ring[rx_tail].opts2 = 0;
543 		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 		if (rx_tail == (CP_RX_RING_SIZE - 1))
545 			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
546 						  cp->rx_buf_sz);
547 		else
548 			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 		rx_tail = NEXT_RX(rx_tail);
550 
551 		if (rx >= budget)
552 			break;
553 	}
554 
555 	cp->rx_tail = rx_tail;
556 
557 	/* if we did not reach work limit, then we're done with
558 	 * this round of polling
559 	 */
560 	if (rx < budget) {
561 		unsigned long flags;
562 
563 		if (cpr16(IntrStatus) & cp_rx_intr_mask)
564 			goto rx_status_loop;
565 
566 		napi_gro_flush(napi, false);
567 		spin_lock_irqsave(&cp->lock, flags);
568 		__napi_complete(napi);
569 		cpw16_f(IntrMask, cp_intr_mask);
570 		spin_unlock_irqrestore(&cp->lock, flags);
571 	}
572 
573 	return rx;
574 }
575 
576 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
577 {
578 	struct net_device *dev = dev_instance;
579 	struct cp_private *cp;
580 	int handled = 0;
581 	u16 status;
582 
583 	if (unlikely(dev == NULL))
584 		return IRQ_NONE;
585 	cp = netdev_priv(dev);
586 
587 	spin_lock(&cp->lock);
588 
589 	status = cpr16(IntrStatus);
590 	if (!status || (status == 0xFFFF))
591 		goto out_unlock;
592 
593 	handled = 1;
594 
595 	netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
596 		  status, cpr8(Cmd), cpr16(CpCmd));
597 
598 	cpw16(IntrStatus, status & ~cp_rx_intr_mask);
599 
600 	/* close possible race's with dev_close */
601 	if (unlikely(!netif_running(dev))) {
602 		cpw16(IntrMask, 0);
603 		goto out_unlock;
604 	}
605 
606 	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
607 		if (napi_schedule_prep(&cp->napi)) {
608 			cpw16_f(IntrMask, cp_norx_intr_mask);
609 			__napi_schedule(&cp->napi);
610 		}
611 
612 	if (status & (TxOK | TxErr | TxEmpty | SWInt))
613 		cp_tx(cp);
614 	if (status & LinkChg)
615 		mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
616 
617 
618 	if (status & PciErr) {
619 		u16 pci_status;
620 
621 		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
622 		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
623 		netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
624 			   status, pci_status);
625 
626 		/* TODO: reset hardware */
627 	}
628 
629 out_unlock:
630 	spin_unlock(&cp->lock);
631 
632 	return IRQ_RETVAL(handled);
633 }
634 
635 #ifdef CONFIG_NET_POLL_CONTROLLER
636 /*
637  * Polling receive - used by netconsole and other diagnostic tools
638  * to allow network i/o with interrupts disabled.
639  */
640 static void cp_poll_controller(struct net_device *dev)
641 {
642 	struct cp_private *cp = netdev_priv(dev);
643 	const int irq = cp->pdev->irq;
644 
645 	disable_irq(irq);
646 	cp_interrupt(irq, dev);
647 	enable_irq(irq);
648 }
649 #endif
650 
651 static void cp_tx (struct cp_private *cp)
652 {
653 	unsigned tx_head = cp->tx_head;
654 	unsigned tx_tail = cp->tx_tail;
655 	unsigned bytes_compl = 0, pkts_compl = 0;
656 
657 	while (tx_tail != tx_head) {
658 		struct cp_desc *txd = cp->tx_ring + tx_tail;
659 		struct sk_buff *skb;
660 		u32 status;
661 
662 		rmb();
663 		status = le32_to_cpu(txd->opts1);
664 		if (status & DescOwn)
665 			break;
666 
667 		skb = cp->tx_skb[tx_tail];
668 		BUG_ON(!skb);
669 
670 		dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
671 				 le32_to_cpu(txd->opts1) & 0xffff,
672 				 PCI_DMA_TODEVICE);
673 
674 		bytes_compl += skb->len;
675 		pkts_compl++;
676 
677 		if (status & LastFrag) {
678 			if (status & (TxError | TxFIFOUnder)) {
679 				netif_dbg(cp, tx_err, cp->dev,
680 					  "tx err, status 0x%x\n", status);
681 				cp->dev->stats.tx_errors++;
682 				if (status & TxOWC)
683 					cp->dev->stats.tx_window_errors++;
684 				if (status & TxMaxCol)
685 					cp->dev->stats.tx_aborted_errors++;
686 				if (status & TxLinkFail)
687 					cp->dev->stats.tx_carrier_errors++;
688 				if (status & TxFIFOUnder)
689 					cp->dev->stats.tx_fifo_errors++;
690 			} else {
691 				cp->dev->stats.collisions +=
692 					((status >> TxColCntShift) & TxColCntMask);
693 				cp->dev->stats.tx_packets++;
694 				cp->dev->stats.tx_bytes += skb->len;
695 				netif_dbg(cp, tx_done, cp->dev,
696 					  "tx done, slot %d\n", tx_tail);
697 			}
698 			dev_kfree_skb_irq(skb);
699 		}
700 
701 		cp->tx_skb[tx_tail] = NULL;
702 
703 		tx_tail = NEXT_TX(tx_tail);
704 	}
705 
706 	cp->tx_tail = tx_tail;
707 
708 	netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
709 	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
710 		netif_wake_queue(cp->dev);
711 }
712 
713 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
714 {
715 	return vlan_tx_tag_present(skb) ?
716 		TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
717 }
718 
719 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
720 					struct net_device *dev)
721 {
722 	struct cp_private *cp = netdev_priv(dev);
723 	unsigned entry;
724 	u32 eor, flags;
725 	unsigned long intr_flags;
726 	__le32 opts2;
727 	int mss = 0;
728 
729 	spin_lock_irqsave(&cp->lock, intr_flags);
730 
731 	/* This is a hard error, log it. */
732 	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
733 		netif_stop_queue(dev);
734 		spin_unlock_irqrestore(&cp->lock, intr_flags);
735 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
736 		return NETDEV_TX_BUSY;
737 	}
738 
739 	entry = cp->tx_head;
740 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
741 	mss = skb_shinfo(skb)->gso_size;
742 
743 	opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
744 
745 	if (skb_shinfo(skb)->nr_frags == 0) {
746 		struct cp_desc *txd = &cp->tx_ring[entry];
747 		u32 len;
748 		dma_addr_t mapping;
749 
750 		len = skb->len;
751 		mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
752 		txd->opts2 = opts2;
753 		txd->addr = cpu_to_le64(mapping);
754 		wmb();
755 
756 		flags = eor | len | DescOwn | FirstFrag | LastFrag;
757 
758 		if (mss)
759 			flags |= LargeSend | ((mss & MSSMask) << MSSShift);
760 		else if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 			const struct iphdr *ip = ip_hdr(skb);
762 			if (ip->protocol == IPPROTO_TCP)
763 				flags |= IPCS | TCPCS;
764 			else if (ip->protocol == IPPROTO_UDP)
765 				flags |= IPCS | UDPCS;
766 			else
767 				WARN_ON(1);	/* we need a WARN() */
768 		}
769 
770 		txd->opts1 = cpu_to_le32(flags);
771 		wmb();
772 
773 		cp->tx_skb[entry] = skb;
774 		entry = NEXT_TX(entry);
775 	} else {
776 		struct cp_desc *txd;
777 		u32 first_len, first_eor;
778 		dma_addr_t first_mapping;
779 		int frag, first_entry = entry;
780 		const struct iphdr *ip = ip_hdr(skb);
781 
782 		/* We must give this initial chunk to the device last.
783 		 * Otherwise we could race with the device.
784 		 */
785 		first_eor = eor;
786 		first_len = skb_headlen(skb);
787 		first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
788 					       first_len, PCI_DMA_TODEVICE);
789 		cp->tx_skb[entry] = skb;
790 		entry = NEXT_TX(entry);
791 
792 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
793 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
794 			u32 len;
795 			u32 ctrl;
796 			dma_addr_t mapping;
797 
798 			len = skb_frag_size(this_frag);
799 			mapping = dma_map_single(&cp->pdev->dev,
800 						 skb_frag_address(this_frag),
801 						 len, PCI_DMA_TODEVICE);
802 			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
803 
804 			ctrl = eor | len | DescOwn;
805 
806 			if (mss)
807 				ctrl |= LargeSend |
808 					((mss & MSSMask) << MSSShift);
809 			else if (skb->ip_summed == CHECKSUM_PARTIAL) {
810 				if (ip->protocol == IPPROTO_TCP)
811 					ctrl |= IPCS | TCPCS;
812 				else if (ip->protocol == IPPROTO_UDP)
813 					ctrl |= IPCS | UDPCS;
814 				else
815 					BUG();
816 			}
817 
818 			if (frag == skb_shinfo(skb)->nr_frags - 1)
819 				ctrl |= LastFrag;
820 
821 			txd = &cp->tx_ring[entry];
822 			txd->opts2 = opts2;
823 			txd->addr = cpu_to_le64(mapping);
824 			wmb();
825 
826 			txd->opts1 = cpu_to_le32(ctrl);
827 			wmb();
828 
829 			cp->tx_skb[entry] = skb;
830 			entry = NEXT_TX(entry);
831 		}
832 
833 		txd = &cp->tx_ring[first_entry];
834 		txd->opts2 = opts2;
835 		txd->addr = cpu_to_le64(first_mapping);
836 		wmb();
837 
838 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
839 			if (ip->protocol == IPPROTO_TCP)
840 				txd->opts1 = cpu_to_le32(first_eor | first_len |
841 							 FirstFrag | DescOwn |
842 							 IPCS | TCPCS);
843 			else if (ip->protocol == IPPROTO_UDP)
844 				txd->opts1 = cpu_to_le32(first_eor | first_len |
845 							 FirstFrag | DescOwn |
846 							 IPCS | UDPCS);
847 			else
848 				BUG();
849 		} else
850 			txd->opts1 = cpu_to_le32(first_eor | first_len |
851 						 FirstFrag | DescOwn);
852 		wmb();
853 	}
854 	cp->tx_head = entry;
855 
856 	netdev_sent_queue(dev, skb->len);
857 	netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
858 		  entry, skb->len);
859 	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
860 		netif_stop_queue(dev);
861 
862 	spin_unlock_irqrestore(&cp->lock, intr_flags);
863 
864 	cpw8(TxPoll, NormalTxPoll);
865 
866 	return NETDEV_TX_OK;
867 }
868 
869 /* Set or clear the multicast filter for this adaptor.
870    This routine is not state sensitive and need not be SMP locked. */
871 
872 static void __cp_set_rx_mode (struct net_device *dev)
873 {
874 	struct cp_private *cp = netdev_priv(dev);
875 	u32 mc_filter[2];	/* Multicast hash filter */
876 	int rx_mode;
877 
878 	/* Note: do not reorder, GCC is clever about common statements. */
879 	if (dev->flags & IFF_PROMISC) {
880 		/* Unconditionally log net taps. */
881 		rx_mode =
882 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
883 		    AcceptAllPhys;
884 		mc_filter[1] = mc_filter[0] = 0xffffffff;
885 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
886 		   (dev->flags & IFF_ALLMULTI)) {
887 		/* Too many to filter perfectly -- accept all multicasts. */
888 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
889 		mc_filter[1] = mc_filter[0] = 0xffffffff;
890 	} else {
891 		struct netdev_hw_addr *ha;
892 		rx_mode = AcceptBroadcast | AcceptMyPhys;
893 		mc_filter[1] = mc_filter[0] = 0;
894 		netdev_for_each_mc_addr(ha, dev) {
895 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
896 
897 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
898 			rx_mode |= AcceptMulticast;
899 		}
900 	}
901 
902 	/* We can safely update without stopping the chip. */
903 	cp->rx_config = cp_rx_config | rx_mode;
904 	cpw32_f(RxConfig, cp->rx_config);
905 
906 	cpw32_f (MAR0 + 0, mc_filter[0]);
907 	cpw32_f (MAR0 + 4, mc_filter[1]);
908 }
909 
910 static void cp_set_rx_mode (struct net_device *dev)
911 {
912 	unsigned long flags;
913 	struct cp_private *cp = netdev_priv(dev);
914 
915 	spin_lock_irqsave (&cp->lock, flags);
916 	__cp_set_rx_mode(dev);
917 	spin_unlock_irqrestore (&cp->lock, flags);
918 }
919 
920 static void __cp_get_stats(struct cp_private *cp)
921 {
922 	/* only lower 24 bits valid; write any value to clear */
923 	cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
924 	cpw32 (RxMissed, 0);
925 }
926 
927 static struct net_device_stats *cp_get_stats(struct net_device *dev)
928 {
929 	struct cp_private *cp = netdev_priv(dev);
930 	unsigned long flags;
931 
932 	/* The chip only need report frame silently dropped. */
933 	spin_lock_irqsave(&cp->lock, flags);
934  	if (netif_running(dev) && netif_device_present(dev))
935  		__cp_get_stats(cp);
936 	spin_unlock_irqrestore(&cp->lock, flags);
937 
938 	return &dev->stats;
939 }
940 
941 static void cp_stop_hw (struct cp_private *cp)
942 {
943 	cpw16(IntrStatus, ~(cpr16(IntrStatus)));
944 	cpw16_f(IntrMask, 0);
945 	cpw8(Cmd, 0);
946 	cpw16_f(CpCmd, 0);
947 	cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
948 
949 	cp->rx_tail = 0;
950 	cp->tx_head = cp->tx_tail = 0;
951 
952 	netdev_reset_queue(cp->dev);
953 }
954 
955 static void cp_reset_hw (struct cp_private *cp)
956 {
957 	unsigned work = 1000;
958 
959 	cpw8(Cmd, CmdReset);
960 
961 	while (work--) {
962 		if (!(cpr8(Cmd) & CmdReset))
963 			return;
964 
965 		schedule_timeout_uninterruptible(10);
966 	}
967 
968 	netdev_err(cp->dev, "hardware reset timeout\n");
969 }
970 
971 static inline void cp_start_hw (struct cp_private *cp)
972 {
973 	dma_addr_t ring_dma;
974 
975 	cpw16(CpCmd, cp->cpcmd);
976 
977 	/*
978 	 * These (at least TxRingAddr) need to be configured after the
979 	 * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
980 	 * (C+ Command Register) recommends that these and more be configured
981 	 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
982 	 * it's been observed that the TxRingAddr is actually reset to garbage
983 	 * when C+ mode Tx is enabled in CpCmd.
984 	 */
985 	cpw32_f(HiTxRingAddr, 0);
986 	cpw32_f(HiTxRingAddr + 4, 0);
987 
988 	ring_dma = cp->ring_dma;
989 	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
990 	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
991 
992 	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
993 	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
994 	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
995 
996 	/*
997 	 * Strictly speaking, the datasheet says this should be enabled
998 	 * *before* setting the descriptor addresses. But what, then, would
999 	 * prevent it from doing DMA to random unconfigured addresses?
1000 	 * This variant appears to work fine.
1001 	 */
1002 	cpw8(Cmd, RxOn | TxOn);
1003 
1004 	netdev_reset_queue(cp->dev);
1005 }
1006 
1007 static void cp_enable_irq(struct cp_private *cp)
1008 {
1009 	cpw16_f(IntrMask, cp_intr_mask);
1010 }
1011 
1012 static void cp_init_hw (struct cp_private *cp)
1013 {
1014 	struct net_device *dev = cp->dev;
1015 
1016 	cp_reset_hw(cp);
1017 
1018 	cpw8_f (Cfg9346, Cfg9346_Unlock);
1019 
1020 	/* Restore our idea of the MAC address. */
1021 	cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1022 	cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1023 
1024 	cp_start_hw(cp);
1025 	cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1026 
1027 	__cp_set_rx_mode(dev);
1028 	cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1029 
1030 	cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1031 	/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1032 	cpw8(Config3, PARMEnable);
1033 	cp->wol_enabled = 0;
1034 
1035 	cpw8(Config5, cpr8(Config5) & PMEStatus);
1036 
1037 	cpw16(MultiIntr, 0);
1038 
1039 	cpw8_f(Cfg9346, Cfg9346_Lock);
1040 }
1041 
1042 static int cp_refill_rx(struct cp_private *cp)
1043 {
1044 	struct net_device *dev = cp->dev;
1045 	unsigned i;
1046 
1047 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1048 		struct sk_buff *skb;
1049 		dma_addr_t mapping;
1050 
1051 		skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1052 		if (!skb)
1053 			goto err_out;
1054 
1055 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
1056 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1057 		cp->rx_skb[i] = skb;
1058 
1059 		cp->rx_ring[i].opts2 = 0;
1060 		cp->rx_ring[i].addr = cpu_to_le64(mapping);
1061 		if (i == (CP_RX_RING_SIZE - 1))
1062 			cp->rx_ring[i].opts1 =
1063 				cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1064 		else
1065 			cp->rx_ring[i].opts1 =
1066 				cpu_to_le32(DescOwn | cp->rx_buf_sz);
1067 	}
1068 
1069 	return 0;
1070 
1071 err_out:
1072 	cp_clean_rings(cp);
1073 	return -ENOMEM;
1074 }
1075 
1076 static void cp_init_rings_index (struct cp_private *cp)
1077 {
1078 	cp->rx_tail = 0;
1079 	cp->tx_head = cp->tx_tail = 0;
1080 }
1081 
1082 static int cp_init_rings (struct cp_private *cp)
1083 {
1084 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1085 	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1086 
1087 	cp_init_rings_index(cp);
1088 
1089 	return cp_refill_rx (cp);
1090 }
1091 
1092 static int cp_alloc_rings (struct cp_private *cp)
1093 {
1094 	struct device *d = &cp->pdev->dev;
1095 	void *mem;
1096 	int rc;
1097 
1098 	mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1099 	if (!mem)
1100 		return -ENOMEM;
1101 
1102 	cp->rx_ring = mem;
1103 	cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1104 
1105 	rc = cp_init_rings(cp);
1106 	if (rc < 0)
1107 		dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1108 
1109 	return rc;
1110 }
1111 
1112 static void cp_clean_rings (struct cp_private *cp)
1113 {
1114 	struct cp_desc *desc;
1115 	unsigned i;
1116 
1117 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1118 		if (cp->rx_skb[i]) {
1119 			desc = cp->rx_ring + i;
1120 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1121 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1122 			dev_kfree_skb(cp->rx_skb[i]);
1123 		}
1124 	}
1125 
1126 	for (i = 0; i < CP_TX_RING_SIZE; i++) {
1127 		if (cp->tx_skb[i]) {
1128 			struct sk_buff *skb = cp->tx_skb[i];
1129 
1130 			desc = cp->tx_ring + i;
1131 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1132 					 le32_to_cpu(desc->opts1) & 0xffff,
1133 					 PCI_DMA_TODEVICE);
1134 			if (le32_to_cpu(desc->opts1) & LastFrag)
1135 				dev_kfree_skb(skb);
1136 			cp->dev->stats.tx_dropped++;
1137 		}
1138 	}
1139 
1140 	memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1141 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1142 
1143 	memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1144 	memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1145 }
1146 
1147 static void cp_free_rings (struct cp_private *cp)
1148 {
1149 	cp_clean_rings(cp);
1150 	dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1151 			  cp->ring_dma);
1152 	cp->rx_ring = NULL;
1153 	cp->tx_ring = NULL;
1154 }
1155 
1156 static int cp_open (struct net_device *dev)
1157 {
1158 	struct cp_private *cp = netdev_priv(dev);
1159 	const int irq = cp->pdev->irq;
1160 	int rc;
1161 
1162 	netif_dbg(cp, ifup, dev, "enabling interface\n");
1163 
1164 	rc = cp_alloc_rings(cp);
1165 	if (rc)
1166 		return rc;
1167 
1168 	napi_enable(&cp->napi);
1169 
1170 	cp_init_hw(cp);
1171 
1172 	rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1173 	if (rc)
1174 		goto err_out_hw;
1175 
1176 	cp_enable_irq(cp);
1177 
1178 	netif_carrier_off(dev);
1179 	mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1180 	netif_start_queue(dev);
1181 
1182 	return 0;
1183 
1184 err_out_hw:
1185 	napi_disable(&cp->napi);
1186 	cp_stop_hw(cp);
1187 	cp_free_rings(cp);
1188 	return rc;
1189 }
1190 
1191 static int cp_close (struct net_device *dev)
1192 {
1193 	struct cp_private *cp = netdev_priv(dev);
1194 	unsigned long flags;
1195 
1196 	napi_disable(&cp->napi);
1197 
1198 	netif_dbg(cp, ifdown, dev, "disabling interface\n");
1199 
1200 	spin_lock_irqsave(&cp->lock, flags);
1201 
1202 	netif_stop_queue(dev);
1203 	netif_carrier_off(dev);
1204 
1205 	cp_stop_hw(cp);
1206 
1207 	spin_unlock_irqrestore(&cp->lock, flags);
1208 
1209 	free_irq(cp->pdev->irq, dev);
1210 
1211 	cp_free_rings(cp);
1212 	return 0;
1213 }
1214 
1215 static void cp_tx_timeout(struct net_device *dev)
1216 {
1217 	struct cp_private *cp = netdev_priv(dev);
1218 	unsigned long flags;
1219 	int rc;
1220 
1221 	netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1222 		    cpr8(Cmd), cpr16(CpCmd),
1223 		    cpr16(IntrStatus), cpr16(IntrMask));
1224 
1225 	spin_lock_irqsave(&cp->lock, flags);
1226 
1227 	cp_stop_hw(cp);
1228 	cp_clean_rings(cp);
1229 	rc = cp_init_rings(cp);
1230 	cp_start_hw(cp);
1231 	cp_enable_irq(cp);
1232 
1233 	netif_wake_queue(dev);
1234 
1235 	spin_unlock_irqrestore(&cp->lock, flags);
1236 }
1237 
1238 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1239 {
1240 	struct cp_private *cp = netdev_priv(dev);
1241 
1242 	/* check for invalid MTU, according to hardware limits */
1243 	if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1244 		return -EINVAL;
1245 
1246 	/* if network interface not up, no need for complexity */
1247 	if (!netif_running(dev)) {
1248 		dev->mtu = new_mtu;
1249 		cp_set_rxbufsize(cp);	/* set new rx buf size */
1250 		return 0;
1251 	}
1252 
1253 	/* network IS up, close it, reset MTU, and come up again. */
1254 	cp_close(dev);
1255 	dev->mtu = new_mtu;
1256 	cp_set_rxbufsize(cp);
1257 	return cp_open(dev);
1258 }
1259 
1260 static const char mii_2_8139_map[8] = {
1261 	BasicModeCtrl,
1262 	BasicModeStatus,
1263 	0,
1264 	0,
1265 	NWayAdvert,
1266 	NWayLPAR,
1267 	NWayExpansion,
1268 	0
1269 };
1270 
1271 static int mdio_read(struct net_device *dev, int phy_id, int location)
1272 {
1273 	struct cp_private *cp = netdev_priv(dev);
1274 
1275 	return location < 8 && mii_2_8139_map[location] ?
1276 	       readw(cp->regs + mii_2_8139_map[location]) : 0;
1277 }
1278 
1279 
1280 static void mdio_write(struct net_device *dev, int phy_id, int location,
1281 		       int value)
1282 {
1283 	struct cp_private *cp = netdev_priv(dev);
1284 
1285 	if (location == 0) {
1286 		cpw8(Cfg9346, Cfg9346_Unlock);
1287 		cpw16(BasicModeCtrl, value);
1288 		cpw8(Cfg9346, Cfg9346_Lock);
1289 	} else if (location < 8 && mii_2_8139_map[location])
1290 		cpw16(mii_2_8139_map[location], value);
1291 }
1292 
1293 /* Set the ethtool Wake-on-LAN settings */
1294 static int netdev_set_wol (struct cp_private *cp,
1295 			   const struct ethtool_wolinfo *wol)
1296 {
1297 	u8 options;
1298 
1299 	options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1300 	/* If WOL is being disabled, no need for complexity */
1301 	if (wol->wolopts) {
1302 		if (wol->wolopts & WAKE_PHY)	options |= LinkUp;
1303 		if (wol->wolopts & WAKE_MAGIC)	options |= MagicPacket;
1304 	}
1305 
1306 	cpw8 (Cfg9346, Cfg9346_Unlock);
1307 	cpw8 (Config3, options);
1308 	cpw8 (Cfg9346, Cfg9346_Lock);
1309 
1310 	options = 0; /* Paranoia setting */
1311 	options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1312 	/* If WOL is being disabled, no need for complexity */
1313 	if (wol->wolopts) {
1314 		if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1315 		if (wol->wolopts & WAKE_BCAST)	options |= BWF;
1316 		if (wol->wolopts & WAKE_MCAST)	options |= MWF;
1317 	}
1318 
1319 	cpw8 (Config5, options);
1320 
1321 	cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1322 
1323 	return 0;
1324 }
1325 
1326 /* Get the ethtool Wake-on-LAN settings */
1327 static void netdev_get_wol (struct cp_private *cp,
1328 	             struct ethtool_wolinfo *wol)
1329 {
1330 	u8 options;
1331 
1332 	wol->wolopts   = 0; /* Start from scratch */
1333 	wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1334 		         WAKE_MCAST | WAKE_UCAST;
1335 	/* We don't need to go on if WOL is disabled */
1336 	if (!cp->wol_enabled) return;
1337 
1338 	options        = cpr8 (Config3);
1339 	if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1340 	if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1341 
1342 	options        = 0; /* Paranoia setting */
1343 	options        = cpr8 (Config5);
1344 	if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1345 	if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1346 	if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1347 }
1348 
1349 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1350 {
1351 	struct cp_private *cp = netdev_priv(dev);
1352 
1353 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1354 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1355 	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1356 }
1357 
1358 static void cp_get_ringparam(struct net_device *dev,
1359 				struct ethtool_ringparam *ring)
1360 {
1361 	ring->rx_max_pending = CP_RX_RING_SIZE;
1362 	ring->tx_max_pending = CP_TX_RING_SIZE;
1363 	ring->rx_pending = CP_RX_RING_SIZE;
1364 	ring->tx_pending = CP_TX_RING_SIZE;
1365 }
1366 
1367 static int cp_get_regs_len(struct net_device *dev)
1368 {
1369 	return CP_REGS_SIZE;
1370 }
1371 
1372 static int cp_get_sset_count (struct net_device *dev, int sset)
1373 {
1374 	switch (sset) {
1375 	case ETH_SS_STATS:
1376 		return CP_NUM_STATS;
1377 	default:
1378 		return -EOPNOTSUPP;
1379 	}
1380 }
1381 
1382 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1383 {
1384 	struct cp_private *cp = netdev_priv(dev);
1385 	int rc;
1386 	unsigned long flags;
1387 
1388 	spin_lock_irqsave(&cp->lock, flags);
1389 	rc = mii_ethtool_gset(&cp->mii_if, cmd);
1390 	spin_unlock_irqrestore(&cp->lock, flags);
1391 
1392 	return rc;
1393 }
1394 
1395 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1396 {
1397 	struct cp_private *cp = netdev_priv(dev);
1398 	int rc;
1399 	unsigned long flags;
1400 
1401 	spin_lock_irqsave(&cp->lock, flags);
1402 	rc = mii_ethtool_sset(&cp->mii_if, cmd);
1403 	spin_unlock_irqrestore(&cp->lock, flags);
1404 
1405 	return rc;
1406 }
1407 
1408 static int cp_nway_reset(struct net_device *dev)
1409 {
1410 	struct cp_private *cp = netdev_priv(dev);
1411 	return mii_nway_restart(&cp->mii_if);
1412 }
1413 
1414 static u32 cp_get_msglevel(struct net_device *dev)
1415 {
1416 	struct cp_private *cp = netdev_priv(dev);
1417 	return cp->msg_enable;
1418 }
1419 
1420 static void cp_set_msglevel(struct net_device *dev, u32 value)
1421 {
1422 	struct cp_private *cp = netdev_priv(dev);
1423 	cp->msg_enable = value;
1424 }
1425 
1426 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1427 {
1428 	struct cp_private *cp = netdev_priv(dev);
1429 	unsigned long flags;
1430 
1431 	if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1432 		return 0;
1433 
1434 	spin_lock_irqsave(&cp->lock, flags);
1435 
1436 	if (features & NETIF_F_RXCSUM)
1437 		cp->cpcmd |= RxChkSum;
1438 	else
1439 		cp->cpcmd &= ~RxChkSum;
1440 
1441 	if (features & NETIF_F_HW_VLAN_RX)
1442 		cp->cpcmd |= RxVlanOn;
1443 	else
1444 		cp->cpcmd &= ~RxVlanOn;
1445 
1446 	cpw16_f(CpCmd, cp->cpcmd);
1447 	spin_unlock_irqrestore(&cp->lock, flags);
1448 
1449 	return 0;
1450 }
1451 
1452 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1453 		        void *p)
1454 {
1455 	struct cp_private *cp = netdev_priv(dev);
1456 	unsigned long flags;
1457 
1458 	if (regs->len < CP_REGS_SIZE)
1459 		return /* -EINVAL */;
1460 
1461 	regs->version = CP_REGS_VER;
1462 
1463 	spin_lock_irqsave(&cp->lock, flags);
1464 	memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1465 	spin_unlock_irqrestore(&cp->lock, flags);
1466 }
1467 
1468 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1469 {
1470 	struct cp_private *cp = netdev_priv(dev);
1471 	unsigned long flags;
1472 
1473 	spin_lock_irqsave (&cp->lock, flags);
1474 	netdev_get_wol (cp, wol);
1475 	spin_unlock_irqrestore (&cp->lock, flags);
1476 }
1477 
1478 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1479 {
1480 	struct cp_private *cp = netdev_priv(dev);
1481 	unsigned long flags;
1482 	int rc;
1483 
1484 	spin_lock_irqsave (&cp->lock, flags);
1485 	rc = netdev_set_wol (cp, wol);
1486 	spin_unlock_irqrestore (&cp->lock, flags);
1487 
1488 	return rc;
1489 }
1490 
1491 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1492 {
1493 	switch (stringset) {
1494 	case ETH_SS_STATS:
1495 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1496 		break;
1497 	default:
1498 		BUG();
1499 		break;
1500 	}
1501 }
1502 
1503 static void cp_get_ethtool_stats (struct net_device *dev,
1504 				  struct ethtool_stats *estats, u64 *tmp_stats)
1505 {
1506 	struct cp_private *cp = netdev_priv(dev);
1507 	struct cp_dma_stats *nic_stats;
1508 	dma_addr_t dma;
1509 	int i;
1510 
1511 	nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1512 				       &dma, GFP_KERNEL);
1513 	if (!nic_stats)
1514 		return;
1515 
1516 	/* begin NIC statistics dump */
1517 	cpw32(StatsAddr + 4, (u64)dma >> 32);
1518 	cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1519 	cpr32(StatsAddr);
1520 
1521 	for (i = 0; i < 1000; i++) {
1522 		if ((cpr32(StatsAddr) & DumpStats) == 0)
1523 			break;
1524 		udelay(10);
1525 	}
1526 	cpw32(StatsAddr, 0);
1527 	cpw32(StatsAddr + 4, 0);
1528 	cpr32(StatsAddr);
1529 
1530 	i = 0;
1531 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1532 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1533 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1534 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1535 	tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1536 	tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1537 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1538 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1539 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1540 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1541 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1542 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1543 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1544 	tmp_stats[i++] = cp->cp_stats.rx_frags;
1545 	BUG_ON(i != CP_NUM_STATS);
1546 
1547 	dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1548 }
1549 
1550 static const struct ethtool_ops cp_ethtool_ops = {
1551 	.get_drvinfo		= cp_get_drvinfo,
1552 	.get_regs_len		= cp_get_regs_len,
1553 	.get_sset_count		= cp_get_sset_count,
1554 	.get_settings		= cp_get_settings,
1555 	.set_settings		= cp_set_settings,
1556 	.nway_reset		= cp_nway_reset,
1557 	.get_link		= ethtool_op_get_link,
1558 	.get_msglevel		= cp_get_msglevel,
1559 	.set_msglevel		= cp_set_msglevel,
1560 	.get_regs		= cp_get_regs,
1561 	.get_wol		= cp_get_wol,
1562 	.set_wol		= cp_set_wol,
1563 	.get_strings		= cp_get_strings,
1564 	.get_ethtool_stats	= cp_get_ethtool_stats,
1565 	.get_eeprom_len		= cp_get_eeprom_len,
1566 	.get_eeprom		= cp_get_eeprom,
1567 	.set_eeprom		= cp_set_eeprom,
1568 	.get_ringparam		= cp_get_ringparam,
1569 };
1570 
1571 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1572 {
1573 	struct cp_private *cp = netdev_priv(dev);
1574 	int rc;
1575 	unsigned long flags;
1576 
1577 	if (!netif_running(dev))
1578 		return -EINVAL;
1579 
1580 	spin_lock_irqsave(&cp->lock, flags);
1581 	rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1582 	spin_unlock_irqrestore(&cp->lock, flags);
1583 	return rc;
1584 }
1585 
1586 static int cp_set_mac_address(struct net_device *dev, void *p)
1587 {
1588 	struct cp_private *cp = netdev_priv(dev);
1589 	struct sockaddr *addr = p;
1590 
1591 	if (!is_valid_ether_addr(addr->sa_data))
1592 		return -EADDRNOTAVAIL;
1593 
1594 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1595 
1596 	spin_lock_irq(&cp->lock);
1597 
1598 	cpw8_f(Cfg9346, Cfg9346_Unlock);
1599 	cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1600 	cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1601 	cpw8_f(Cfg9346, Cfg9346_Lock);
1602 
1603 	spin_unlock_irq(&cp->lock);
1604 
1605 	return 0;
1606 }
1607 
1608 /* Serial EEPROM section. */
1609 
1610 /*  EEPROM_Ctrl bits. */
1611 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
1612 #define EE_CS			0x08	/* EEPROM chip select. */
1613 #define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
1614 #define EE_WRITE_0		0x00
1615 #define EE_WRITE_1		0x02
1616 #define EE_DATA_READ	0x01	/* EEPROM chip data out. */
1617 #define EE_ENB			(0x80 | EE_CS)
1618 
1619 /* Delay between EEPROM clock transitions.
1620    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1621  */
1622 
1623 #define eeprom_delay()	readb(ee_addr)
1624 
1625 /* The EEPROM commands include the alway-set leading bit. */
1626 #define EE_EXTEND_CMD	(4)
1627 #define EE_WRITE_CMD	(5)
1628 #define EE_READ_CMD		(6)
1629 #define EE_ERASE_CMD	(7)
1630 
1631 #define EE_EWDS_ADDR	(0)
1632 #define EE_WRAL_ADDR	(1)
1633 #define EE_ERAL_ADDR	(2)
1634 #define EE_EWEN_ADDR	(3)
1635 
1636 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1637 
1638 static void eeprom_cmd_start(void __iomem *ee_addr)
1639 {
1640 	writeb (EE_ENB & ~EE_CS, ee_addr);
1641 	writeb (EE_ENB, ee_addr);
1642 	eeprom_delay ();
1643 }
1644 
1645 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1646 {
1647 	int i;
1648 
1649 	/* Shift the command bits out. */
1650 	for (i = cmd_len - 1; i >= 0; i--) {
1651 		int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1652 		writeb (EE_ENB | dataval, ee_addr);
1653 		eeprom_delay ();
1654 		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1655 		eeprom_delay ();
1656 	}
1657 	writeb (EE_ENB, ee_addr);
1658 	eeprom_delay ();
1659 }
1660 
1661 static void eeprom_cmd_end(void __iomem *ee_addr)
1662 {
1663 	writeb(0, ee_addr);
1664 	eeprom_delay ();
1665 }
1666 
1667 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1668 			      int addr_len)
1669 {
1670 	int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1671 
1672 	eeprom_cmd_start(ee_addr);
1673 	eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1674 	eeprom_cmd_end(ee_addr);
1675 }
1676 
1677 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1678 {
1679 	int i;
1680 	u16 retval = 0;
1681 	void __iomem *ee_addr = ioaddr + Cfg9346;
1682 	int read_cmd = location | (EE_READ_CMD << addr_len);
1683 
1684 	eeprom_cmd_start(ee_addr);
1685 	eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1686 
1687 	for (i = 16; i > 0; i--) {
1688 		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1689 		eeprom_delay ();
1690 		retval =
1691 		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1692 				     0);
1693 		writeb (EE_ENB, ee_addr);
1694 		eeprom_delay ();
1695 	}
1696 
1697 	eeprom_cmd_end(ee_addr);
1698 
1699 	return retval;
1700 }
1701 
1702 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1703 			 int addr_len)
1704 {
1705 	int i;
1706 	void __iomem *ee_addr = ioaddr + Cfg9346;
1707 	int write_cmd = location | (EE_WRITE_CMD << addr_len);
1708 
1709 	eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1710 
1711 	eeprom_cmd_start(ee_addr);
1712 	eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1713 	eeprom_cmd(ee_addr, val, 16);
1714 	eeprom_cmd_end(ee_addr);
1715 
1716 	eeprom_cmd_start(ee_addr);
1717 	for (i = 0; i < 20000; i++)
1718 		if (readb(ee_addr) & EE_DATA_READ)
1719 			break;
1720 	eeprom_cmd_end(ee_addr);
1721 
1722 	eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1723 }
1724 
1725 static int cp_get_eeprom_len(struct net_device *dev)
1726 {
1727 	struct cp_private *cp = netdev_priv(dev);
1728 	int size;
1729 
1730 	spin_lock_irq(&cp->lock);
1731 	size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1732 	spin_unlock_irq(&cp->lock);
1733 
1734 	return size;
1735 }
1736 
1737 static int cp_get_eeprom(struct net_device *dev,
1738 			 struct ethtool_eeprom *eeprom, u8 *data)
1739 {
1740 	struct cp_private *cp = netdev_priv(dev);
1741 	unsigned int addr_len;
1742 	u16 val;
1743 	u32 offset = eeprom->offset >> 1;
1744 	u32 len = eeprom->len;
1745 	u32 i = 0;
1746 
1747 	eeprom->magic = CP_EEPROM_MAGIC;
1748 
1749 	spin_lock_irq(&cp->lock);
1750 
1751 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1752 
1753 	if (eeprom->offset & 1) {
1754 		val = read_eeprom(cp->regs, offset, addr_len);
1755 		data[i++] = (u8)(val >> 8);
1756 		offset++;
1757 	}
1758 
1759 	while (i < len - 1) {
1760 		val = read_eeprom(cp->regs, offset, addr_len);
1761 		data[i++] = (u8)val;
1762 		data[i++] = (u8)(val >> 8);
1763 		offset++;
1764 	}
1765 
1766 	if (i < len) {
1767 		val = read_eeprom(cp->regs, offset, addr_len);
1768 		data[i] = (u8)val;
1769 	}
1770 
1771 	spin_unlock_irq(&cp->lock);
1772 	return 0;
1773 }
1774 
1775 static int cp_set_eeprom(struct net_device *dev,
1776 			 struct ethtool_eeprom *eeprom, u8 *data)
1777 {
1778 	struct cp_private *cp = netdev_priv(dev);
1779 	unsigned int addr_len;
1780 	u16 val;
1781 	u32 offset = eeprom->offset >> 1;
1782 	u32 len = eeprom->len;
1783 	u32 i = 0;
1784 
1785 	if (eeprom->magic != CP_EEPROM_MAGIC)
1786 		return -EINVAL;
1787 
1788 	spin_lock_irq(&cp->lock);
1789 
1790 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1791 
1792 	if (eeprom->offset & 1) {
1793 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1794 		val |= (u16)data[i++] << 8;
1795 		write_eeprom(cp->regs, offset, val, addr_len);
1796 		offset++;
1797 	}
1798 
1799 	while (i < len - 1) {
1800 		val = (u16)data[i++];
1801 		val |= (u16)data[i++] << 8;
1802 		write_eeprom(cp->regs, offset, val, addr_len);
1803 		offset++;
1804 	}
1805 
1806 	if (i < len) {
1807 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1808 		val |= (u16)data[i];
1809 		write_eeprom(cp->regs, offset, val, addr_len);
1810 	}
1811 
1812 	spin_unlock_irq(&cp->lock);
1813 	return 0;
1814 }
1815 
1816 /* Put the board into D3cold state and wait for WakeUp signal */
1817 static void cp_set_d3_state (struct cp_private *cp)
1818 {
1819 	pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1820 	pci_set_power_state (cp->pdev, PCI_D3hot);
1821 }
1822 
1823 static const struct net_device_ops cp_netdev_ops = {
1824 	.ndo_open		= cp_open,
1825 	.ndo_stop		= cp_close,
1826 	.ndo_validate_addr	= eth_validate_addr,
1827 	.ndo_set_mac_address 	= cp_set_mac_address,
1828 	.ndo_set_rx_mode	= cp_set_rx_mode,
1829 	.ndo_get_stats		= cp_get_stats,
1830 	.ndo_do_ioctl		= cp_ioctl,
1831 	.ndo_start_xmit		= cp_start_xmit,
1832 	.ndo_tx_timeout		= cp_tx_timeout,
1833 	.ndo_set_features	= cp_set_features,
1834 	.ndo_change_mtu		= cp_change_mtu,
1835 
1836 #ifdef CONFIG_NET_POLL_CONTROLLER
1837 	.ndo_poll_controller	= cp_poll_controller,
1838 #endif
1839 };
1840 
1841 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1842 {
1843 	struct net_device *dev;
1844 	struct cp_private *cp;
1845 	int rc;
1846 	void __iomem *regs;
1847 	resource_size_t pciaddr;
1848 	unsigned int addr_len, i, pci_using_dac;
1849 
1850 #ifndef MODULE
1851 	static int version_printed;
1852 	if (version_printed++ == 0)
1853 		pr_info("%s", version);
1854 #endif
1855 
1856 	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1857 	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1858 		dev_info(&pdev->dev,
1859 			 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1860 			 pdev->vendor, pdev->device, pdev->revision);
1861 		return -ENODEV;
1862 	}
1863 
1864 	dev = alloc_etherdev(sizeof(struct cp_private));
1865 	if (!dev)
1866 		return -ENOMEM;
1867 	SET_NETDEV_DEV(dev, &pdev->dev);
1868 
1869 	cp = netdev_priv(dev);
1870 	cp->pdev = pdev;
1871 	cp->dev = dev;
1872 	cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1873 	spin_lock_init (&cp->lock);
1874 	cp->mii_if.dev = dev;
1875 	cp->mii_if.mdio_read = mdio_read;
1876 	cp->mii_if.mdio_write = mdio_write;
1877 	cp->mii_if.phy_id = CP_INTERNAL_PHY;
1878 	cp->mii_if.phy_id_mask = 0x1f;
1879 	cp->mii_if.reg_num_mask = 0x1f;
1880 	cp_set_rxbufsize(cp);
1881 
1882 	rc = pci_enable_device(pdev);
1883 	if (rc)
1884 		goto err_out_free;
1885 
1886 	rc = pci_set_mwi(pdev);
1887 	if (rc)
1888 		goto err_out_disable;
1889 
1890 	rc = pci_request_regions(pdev, DRV_NAME);
1891 	if (rc)
1892 		goto err_out_mwi;
1893 
1894 	pciaddr = pci_resource_start(pdev, 1);
1895 	if (!pciaddr) {
1896 		rc = -EIO;
1897 		dev_err(&pdev->dev, "no MMIO resource\n");
1898 		goto err_out_res;
1899 	}
1900 	if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1901 		rc = -EIO;
1902 		dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1903 		       (unsigned long long)pci_resource_len(pdev, 1));
1904 		goto err_out_res;
1905 	}
1906 
1907 	/* Configure DMA attributes. */
1908 	if ((sizeof(dma_addr_t) > 4) &&
1909 	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1910 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1911 		pci_using_dac = 1;
1912 	} else {
1913 		pci_using_dac = 0;
1914 
1915 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1916 		if (rc) {
1917 			dev_err(&pdev->dev,
1918 				"No usable DMA configuration, aborting\n");
1919 			goto err_out_res;
1920 		}
1921 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1922 		if (rc) {
1923 			dev_err(&pdev->dev,
1924 				"No usable consistent DMA configuration, aborting\n");
1925 			goto err_out_res;
1926 		}
1927 	}
1928 
1929 	cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1930 		    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1931 
1932 	dev->features |= NETIF_F_RXCSUM;
1933 	dev->hw_features |= NETIF_F_RXCSUM;
1934 
1935 	regs = ioremap(pciaddr, CP_REGS_SIZE);
1936 	if (!regs) {
1937 		rc = -EIO;
1938 		dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1939 			(unsigned long long)pci_resource_len(pdev, 1),
1940 		       (unsigned long long)pciaddr);
1941 		goto err_out_res;
1942 	}
1943 	cp->regs = regs;
1944 
1945 	cp_stop_hw(cp);
1946 
1947 	/* read MAC address from EEPROM */
1948 	addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1949 	for (i = 0; i < 3; i++)
1950 		((__le16 *) (dev->dev_addr))[i] =
1951 		    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1952 
1953 	dev->netdev_ops = &cp_netdev_ops;
1954 	netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1955 	dev->ethtool_ops = &cp_ethtool_ops;
1956 	dev->watchdog_timeo = TX_TIMEOUT;
1957 
1958 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1959 
1960 	if (pci_using_dac)
1961 		dev->features |= NETIF_F_HIGHDMA;
1962 
1963 	/* disabled by default until verified */
1964 	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1965 		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1966 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1967 		NETIF_F_HIGHDMA;
1968 
1969 	rc = register_netdev(dev);
1970 	if (rc)
1971 		goto err_out_iomap;
1972 
1973 	netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
1974 		    regs, dev->dev_addr, pdev->irq);
1975 
1976 	pci_set_drvdata(pdev, dev);
1977 
1978 	/* enable busmastering and memory-write-invalidate */
1979 	pci_set_master(pdev);
1980 
1981 	if (cp->wol_enabled)
1982 		cp_set_d3_state (cp);
1983 
1984 	return 0;
1985 
1986 err_out_iomap:
1987 	iounmap(regs);
1988 err_out_res:
1989 	pci_release_regions(pdev);
1990 err_out_mwi:
1991 	pci_clear_mwi(pdev);
1992 err_out_disable:
1993 	pci_disable_device(pdev);
1994 err_out_free:
1995 	free_netdev(dev);
1996 	return rc;
1997 }
1998 
1999 static void cp_remove_one (struct pci_dev *pdev)
2000 {
2001 	struct net_device *dev = pci_get_drvdata(pdev);
2002 	struct cp_private *cp = netdev_priv(dev);
2003 
2004 	unregister_netdev(dev);
2005 	iounmap(cp->regs);
2006 	if (cp->wol_enabled)
2007 		pci_set_power_state (pdev, PCI_D0);
2008 	pci_release_regions(pdev);
2009 	pci_clear_mwi(pdev);
2010 	pci_disable_device(pdev);
2011 	pci_set_drvdata(pdev, NULL);
2012 	free_netdev(dev);
2013 }
2014 
2015 #ifdef CONFIG_PM
2016 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2017 {
2018 	struct net_device *dev = pci_get_drvdata(pdev);
2019 	struct cp_private *cp = netdev_priv(dev);
2020 	unsigned long flags;
2021 
2022 	if (!netif_running(dev))
2023 		return 0;
2024 
2025 	netif_device_detach (dev);
2026 	netif_stop_queue (dev);
2027 
2028 	spin_lock_irqsave (&cp->lock, flags);
2029 
2030 	/* Disable Rx and Tx */
2031 	cpw16 (IntrMask, 0);
2032 	cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2033 
2034 	spin_unlock_irqrestore (&cp->lock, flags);
2035 
2036 	pci_save_state(pdev);
2037 	pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2038 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2039 
2040 	return 0;
2041 }
2042 
2043 static int cp_resume (struct pci_dev *pdev)
2044 {
2045 	struct net_device *dev = pci_get_drvdata (pdev);
2046 	struct cp_private *cp = netdev_priv(dev);
2047 	unsigned long flags;
2048 
2049 	if (!netif_running(dev))
2050 		return 0;
2051 
2052 	netif_device_attach (dev);
2053 
2054 	pci_set_power_state(pdev, PCI_D0);
2055 	pci_restore_state(pdev);
2056 	pci_enable_wake(pdev, PCI_D0, 0);
2057 
2058 	/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2059 	cp_init_rings_index (cp);
2060 	cp_init_hw (cp);
2061 	cp_enable_irq(cp);
2062 	netif_start_queue (dev);
2063 
2064 	spin_lock_irqsave (&cp->lock, flags);
2065 
2066 	mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2067 
2068 	spin_unlock_irqrestore (&cp->lock, flags);
2069 
2070 	return 0;
2071 }
2072 #endif /* CONFIG_PM */
2073 
2074 static struct pci_driver cp_driver = {
2075 	.name         = DRV_NAME,
2076 	.id_table     = cp_pci_tbl,
2077 	.probe        =	cp_init_one,
2078 	.remove       = cp_remove_one,
2079 #ifdef CONFIG_PM
2080 	.resume       = cp_resume,
2081 	.suspend      = cp_suspend,
2082 #endif
2083 };
2084 
2085 static int __init cp_init (void)
2086 {
2087 #ifdef MODULE
2088 	pr_info("%s", version);
2089 #endif
2090 	return pci_register_driver(&cp_driver);
2091 }
2092 
2093 static void __exit cp_exit (void)
2094 {
2095 	pci_unregister_driver (&cp_driver);
2096 }
2097 
2098 module_init(cp_init);
2099 module_exit(cp_exit);
2100