1 /* winbond-840.c: A Linux PCI network adapter device driver. */
2 /*
3 	Written 1998-2001 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	The author may be reached as becker@scyld.com, or C/O
13 	Scyld Computing Corporation
14 	410 Severn Ave., Suite 210
15 	Annapolis MD 21403
16 
17 	Support and updates available at
18 	http://www.scyld.com/network/drivers.html
19 
20 	Do not remove the copyright information.
21 	Do not change the version information unless an improvement has been made.
22 	Merely removing my name, as Compex has done in the past, does not count
23 	as an improvement.
24 
25 	Changelog:
26 	* ported to 2.4
27 		???
28 	* spin lock update, memory barriers, new style dma mappings
29 		limit each tx buffer to < 1024 bytes
30 		remove DescIntr from Rx descriptors (that's an Tx flag)
31 		remove next pointer from Tx descriptors
32 		synchronize tx_q_bytes
33 		software reset in tx_timeout
34 			Copyright (C) 2000 Manfred Spraul
35 	* further cleanups
36 		power management.
37 		support for big endian descriptors
38 			Copyright (C) 2001 Manfred Spraul
39   	* ethtool support (jgarzik)
40 	* Replace some MII-related magic numbers with constants (jgarzik)
41 
42 	TODO:
43 	* enable pci_power_off
44 	* Wake-On-LAN
45 */
46 
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48 
49 #define DRV_NAME	"winbond-840"
50 
51 /* Automatically extracted configuration info:
52 probe-func: winbond840_probe
53 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
54 
55 c-help-name: Winbond W89c840 PCI Ethernet support
56 c-help-symbol: CONFIG_WINBOND_840
57 c-help: This driver is for the Winbond W89c840 chip.  It also works with
58 c-help: the TX9882 chip on the Compex RL100-ATX board.
59 c-help: More specific information and updates are available from
60 c-help: http://www.scyld.com/network/drivers.html
61 */
62 
63 /* The user-configurable values.
64    These may be modified when a driver module is loaded.*/
65 
66 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
67 static int max_interrupt_work = 20;
68 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
69    The '840 uses a 64 element hash table based on the Ethernet CRC.  */
70 static int multicast_filter_limit = 32;
71 
72 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
73    Setting to > 1518 effectively disables this feature. */
74 static int rx_copybreak;
75 
76 /* Used to pass the media type, etc.
77    Both 'options[]' and 'full_duplex[]' should exist for driver
78    interoperability.
79    The media type is usually passed in 'options[]'.
80 */
81 #define MAX_UNITS 8		/* More are supported, limit only on options */
82 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
83 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84 
85 /* Operational parameters that are set at compile time. */
86 
87 /* Keep the ring sizes a power of two for compile efficiency.
88    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
89    Making the Tx ring too large decreases the effectiveness of channel
90    bonding and packet priority.
91    There are no ill effects from too-large receive rings. */
92 #define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
93 #define TX_QUEUE_LEN_RESTART	5
94 
95 #define TX_BUFLIMIT	(1024-128)
96 
97 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
98    To avoid overflowing we don't queue again until we have room for a
99    full-size packet.
100  */
101 #define TX_FIFO_SIZE (2048)
102 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
103 
104 
105 /* Operational parameters that usually are not changed. */
106 /* Time in jiffies before concluding the transmitter is hung. */
107 #define TX_TIMEOUT  (2*HZ)
108 
109 /* Include files, designed to support most kernel versions 2.0.0 and later. */
110 #include <linux/module.h>
111 #include <linux/kernel.h>
112 #include <linux/string.h>
113 #include <linux/timer.h>
114 #include <linux/errno.h>
115 #include <linux/ioport.h>
116 #include <linux/interrupt.h>
117 #include <linux/pci.h>
118 #include <linux/dma-mapping.h>
119 #include <linux/netdevice.h>
120 #include <linux/etherdevice.h>
121 #include <linux/skbuff.h>
122 #include <linux/init.h>
123 #include <linux/delay.h>
124 #include <linux/ethtool.h>
125 #include <linux/mii.h>
126 #include <linux/rtnetlink.h>
127 #include <linux/crc32.h>
128 #include <linux/bitops.h>
129 #include <linux/uaccess.h>
130 #include <asm/processor.h>		/* Processor type for cache alignment. */
131 #include <asm/io.h>
132 #include <asm/irq.h>
133 
134 #include "tulip.h"
135 
136 #undef PKT_BUF_SZ			/* tulip.h also defines this */
137 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
138 
139 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
140 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
141 MODULE_LICENSE("GPL");
142 
143 module_param(max_interrupt_work, int, 0);
144 module_param(debug, int, 0);
145 module_param(rx_copybreak, int, 0);
146 module_param(multicast_filter_limit, int, 0);
147 module_param_array(options, int, NULL, 0);
148 module_param_array(full_duplex, int, NULL, 0);
149 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
150 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
151 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
152 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
153 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
154 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
155 
156 /*
157 				Theory of Operation
158 
159 I. Board Compatibility
160 
161 This driver is for the Winbond w89c840 chip.
162 
163 II. Board-specific settings
164 
165 None.
166 
167 III. Driver operation
168 
169 This chip is very similar to the Digital 21*4* "Tulip" family.  The first
170 twelve registers and the descriptor format are nearly identical.  Read a
171 Tulip manual for operational details.
172 
173 A significant difference is that the multicast filter and station address are
174 stored in registers rather than loaded through a pseudo-transmit packet.
175 
176 Unlike the Tulip, transmit buffers are limited to 1KB.  To transmit a
177 full-sized packet we must use both data buffers in a descriptor.  Thus the
178 driver uses ring mode where descriptors are implicitly sequential in memory,
179 rather than using the second descriptor address as a chain pointer to
180 subsequent descriptors.
181 
182 IV. Notes
183 
184 If you are going to almost clone a Tulip, why not go all the way and avoid
185 the need for a new driver?
186 
187 IVb. References
188 
189 http://www.scyld.com/expert/100mbps.html
190 http://www.scyld.com/expert/NWay.html
191 http://www.winbond.com.tw/
192 
193 IVc. Errata
194 
195 A horrible bug exists in the transmit FIFO.  Apparently the chip doesn't
196 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
197 silent data corruption.
198 
199 Test with 'ping -s 10000' on a fast computer.
200 
201 */
202 
203 
204 
205 /*
206   PCI probe table.
207 */
208 enum chip_capability_flags {
209 	CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
210 };
211 
212 static const struct pci_device_id w840_pci_tbl[] = {
213 	{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153,     0, 0, 0 },
214 	{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
215 	{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
216 	{ }
217 };
218 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
219 
220 enum {
221 	netdev_res_size		= 128,	/* size of PCI BAR resource */
222 };
223 
224 struct pci_id_info {
225         const char *name;
226         int drv_flags;		/* Driver use, intended as capability flags. */
227 };
228 
229 static const struct pci_id_info pci_id_tbl[] = {
230 	{ 				/* Sometime a Level-One switch card. */
231 	  "Winbond W89c840",	CanHaveMII | HasBrokenTx | FDXOnNoMII},
232 	{ "Winbond W89c840",	CanHaveMII | HasBrokenTx},
233 	{ "Compex RL100-ATX",	CanHaveMII | HasBrokenTx},
234 	{ }	/* terminate list. */
235 };
236 
237 /* This driver was written to use PCI memory space, however some x86 systems
238    work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
239 */
240 
241 /* Offsets to the Command and Status Registers, "CSRs".
242    While similar to the Tulip, these registers are longword aligned.
243    Note: It's not useful to define symbolic names for every register bit in
244    the device.  The name can only partially document the semantics and make
245    the driver longer and more difficult to read.
246 */
247 enum w840_offsets {
248 	PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
249 	RxRingPtr=0x0C, TxRingPtr=0x10,
250 	IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
251 	RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
252 	CurRxDescAddr=0x30, CurRxBufAddr=0x34,			/* Debug use */
253 	MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
254 	CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
255 };
256 
257 /* Bits in the NetworkConfig register. */
258 enum rx_mode_bits {
259 	AcceptErr=0x80,
260 	RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
261 	RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
262 };
263 
264 enum mii_reg_bits {
265 	MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
266 	MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
267 };
268 
269 /* The Tulip Rx and Tx buffer descriptors. */
270 struct w840_rx_desc {
271 	s32 status;
272 	s32 length;
273 	u32 buffer1;
274 	u32 buffer2;
275 };
276 
277 struct w840_tx_desc {
278 	s32 status;
279 	s32 length;
280 	u32 buffer1, buffer2;
281 };
282 
283 #define MII_CNT		1 /* winbond only supports one MII */
284 struct netdev_private {
285 	struct w840_rx_desc *rx_ring;
286 	dma_addr_t	rx_addr[RX_RING_SIZE];
287 	struct w840_tx_desc *tx_ring;
288 	dma_addr_t	tx_addr[TX_RING_SIZE];
289 	dma_addr_t ring_dma_addr;
290 	/* The addresses of receive-in-place skbuffs. */
291 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
292 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
293 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
294 	struct net_device_stats stats;
295 	struct timer_list timer;	/* Media monitoring timer. */
296 	/* Frequently used values: keep some adjacent for cache effect. */
297 	spinlock_t lock;
298 	int chip_id, drv_flags;
299 	struct pci_dev *pci_dev;
300 	int csr6;
301 	struct w840_rx_desc *rx_head_desc;
302 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
303 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
304 	unsigned int cur_tx, dirty_tx;
305 	unsigned int tx_q_bytes;
306 	unsigned int tx_full;				/* The Tx queue is full. */
307 	/* MII transceiver section. */
308 	int mii_cnt;						/* MII device addresses. */
309 	unsigned char phys[MII_CNT];		/* MII device addresses, but only the first is used */
310 	u32 mii;
311 	struct mii_if_info mii_if;
312 	void __iomem *base_addr;
313 };
314 
315 static int  eeprom_read(void __iomem *ioaddr, int location);
316 static int  mdio_read(struct net_device *dev, int phy_id, int location);
317 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
318 static int  netdev_open(struct net_device *dev);
319 static int  update_link(struct net_device *dev);
320 static void netdev_timer(struct timer_list *t);
321 static void init_rxtx_rings(struct net_device *dev);
322 static void free_rxtx_rings(struct netdev_private *np);
323 static void init_registers(struct net_device *dev);
324 static void tx_timeout(struct net_device *dev, unsigned int txqueue);
325 static int alloc_ringdesc(struct net_device *dev);
326 static void free_ringdesc(struct netdev_private *np);
327 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
328 static irqreturn_t intr_handler(int irq, void *dev_instance);
329 static void netdev_error(struct net_device *dev, int intr_status);
330 static int  netdev_rx(struct net_device *dev);
331 static u32 __set_rx_mode(struct net_device *dev);
332 static void set_rx_mode(struct net_device *dev);
333 static struct net_device_stats *get_stats(struct net_device *dev);
334 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
335 static const struct ethtool_ops netdev_ethtool_ops;
336 static int  netdev_close(struct net_device *dev);
337 
338 static const struct net_device_ops netdev_ops = {
339 	.ndo_open		= netdev_open,
340 	.ndo_stop		= netdev_close,
341 	.ndo_start_xmit		= start_tx,
342 	.ndo_get_stats		= get_stats,
343 	.ndo_set_rx_mode	= set_rx_mode,
344 	.ndo_do_ioctl		= netdev_ioctl,
345 	.ndo_tx_timeout		= tx_timeout,
346 	.ndo_set_mac_address	= eth_mac_addr,
347 	.ndo_validate_addr	= eth_validate_addr,
348 };
349 
350 static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
351 {
352 	struct net_device *dev;
353 	struct netdev_private *np;
354 	static int find_cnt;
355 	int chip_idx = ent->driver_data;
356 	int irq;
357 	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
358 	void __iomem *ioaddr;
359 
360 	i = pci_enable_device(pdev);
361 	if (i) return i;
362 
363 	pci_set_master(pdev);
364 
365 	irq = pdev->irq;
366 
367 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
368 		pr_warn("Device %s disabled due to DMA limitations\n",
369 			pci_name(pdev));
370 		return -EIO;
371 	}
372 	dev = alloc_etherdev(sizeof(*np));
373 	if (!dev)
374 		return -ENOMEM;
375 	SET_NETDEV_DEV(dev, &pdev->dev);
376 
377 	if (pci_request_regions(pdev, DRV_NAME))
378 		goto err_out_netdev;
379 
380 	ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
381 	if (!ioaddr)
382 		goto err_out_free_res;
383 
384 	for (i = 0; i < 3; i++)
385 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
386 
387 	/* Reset the chip to erase previous misconfiguration.
388 	   No hold time required! */
389 	iowrite32(0x00000001, ioaddr + PCIBusCfg);
390 
391 	np = netdev_priv(dev);
392 	np->pci_dev = pdev;
393 	np->chip_id = chip_idx;
394 	np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
395 	spin_lock_init(&np->lock);
396 	np->mii_if.dev = dev;
397 	np->mii_if.mdio_read = mdio_read;
398 	np->mii_if.mdio_write = mdio_write;
399 	np->base_addr = ioaddr;
400 
401 	pci_set_drvdata(pdev, dev);
402 
403 	if (dev->mem_start)
404 		option = dev->mem_start;
405 
406 	/* The lower four bits are the media type. */
407 	if (option > 0) {
408 		if (option & 0x200)
409 			np->mii_if.full_duplex = 1;
410 		if (option & 15)
411 			dev_info(&dev->dev,
412 				 "ignoring user supplied media type %d",
413 				 option & 15);
414 	}
415 	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
416 		np->mii_if.full_duplex = 1;
417 
418 	if (np->mii_if.full_duplex)
419 		np->mii_if.force_media = 1;
420 
421 	/* The chip-specific entries in the device structure. */
422 	dev->netdev_ops = &netdev_ops;
423 	dev->ethtool_ops = &netdev_ethtool_ops;
424 	dev->watchdog_timeo = TX_TIMEOUT;
425 
426 	i = register_netdev(dev);
427 	if (i)
428 		goto err_out_cleardev;
429 
430 	dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
431 		 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
432 
433 	if (np->drv_flags & CanHaveMII) {
434 		int phy, phy_idx = 0;
435 		for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
436 			int mii_status = mdio_read(dev, phy, MII_BMSR);
437 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
438 				np->phys[phy_idx++] = phy;
439 				np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
440 				np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
441 						mdio_read(dev, phy, MII_PHYSID2);
442 				dev_info(&dev->dev,
443 					 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
444 					 np->mii, phy, mii_status,
445 					 np->mii_if.advertising);
446 			}
447 		}
448 		np->mii_cnt = phy_idx;
449 		np->mii_if.phy_id = np->phys[0];
450 		if (phy_idx == 0) {
451 			dev_warn(&dev->dev,
452 				 "MII PHY not found -- this device may not operate correctly\n");
453 		}
454 	}
455 
456 	find_cnt++;
457 	return 0;
458 
459 err_out_cleardev:
460 	pci_iounmap(pdev, ioaddr);
461 err_out_free_res:
462 	pci_release_regions(pdev);
463 err_out_netdev:
464 	free_netdev (dev);
465 	return -ENODEV;
466 }
467 
468 
469 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.  These are
470    often serial bit streams generated by the host processor.
471    The example below is for the common 93c46 EEPROM, 64 16 bit words. */
472 
473 /* Delay between EEPROM clock transitions.
474    No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
475    a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
476    made udelay() unreliable.
477    The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
478    deprecated.
479 */
480 #define eeprom_delay(ee_addr)	ioread32(ee_addr)
481 
482 enum EEPROM_Ctrl_Bits {
483 	EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
484 	EE_ChipSelect=0x801, EE_DataIn=0x08,
485 };
486 
487 /* The EEPROM commands include the alway-set leading bit. */
488 enum EEPROM_Cmds {
489 	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
490 };
491 
492 static int eeprom_read(void __iomem *addr, int location)
493 {
494 	int i;
495 	int retval = 0;
496 	void __iomem *ee_addr = addr + EECtrl;
497 	int read_cmd = location | EE_ReadCmd;
498 	iowrite32(EE_ChipSelect, ee_addr);
499 
500 	/* Shift the read command bits out. */
501 	for (i = 10; i >= 0; i--) {
502 		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
503 		iowrite32(dataval, ee_addr);
504 		eeprom_delay(ee_addr);
505 		iowrite32(dataval | EE_ShiftClk, ee_addr);
506 		eeprom_delay(ee_addr);
507 	}
508 	iowrite32(EE_ChipSelect, ee_addr);
509 	eeprom_delay(ee_addr);
510 
511 	for (i = 16; i > 0; i--) {
512 		iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
513 		eeprom_delay(ee_addr);
514 		retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
515 		iowrite32(EE_ChipSelect, ee_addr);
516 		eeprom_delay(ee_addr);
517 	}
518 
519 	/* Terminate the EEPROM access. */
520 	iowrite32(0, ee_addr);
521 	return retval;
522 }
523 
524 /*  MII transceiver control section.
525 	Read and write the MII registers using software-generated serial
526 	MDIO protocol.  See the MII specifications or DP83840A data sheet
527 	for details.
528 
529 	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
530 	met by back-to-back 33Mhz PCI cycles. */
531 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
532 
533 /* Set iff a MII transceiver on any interface requires mdio preamble.
534    This only set with older transceivers, so the extra
535    code size of a per-interface flag is not worthwhile. */
536 static char mii_preamble_required = 1;
537 
538 #define MDIO_WRITE0 (MDIO_EnbOutput)
539 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
540 
541 /* Generate the preamble required for initial synchronization and
542    a few older transceivers. */
543 static void mdio_sync(void __iomem *mdio_addr)
544 {
545 	int bits = 32;
546 
547 	/* Establish sync by sending at least 32 logic ones. */
548 	while (--bits >= 0) {
549 		iowrite32(MDIO_WRITE1, mdio_addr);
550 		mdio_delay(mdio_addr);
551 		iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
552 		mdio_delay(mdio_addr);
553 	}
554 }
555 
556 static int mdio_read(struct net_device *dev, int phy_id, int location)
557 {
558 	struct netdev_private *np = netdev_priv(dev);
559 	void __iomem *mdio_addr = np->base_addr + MIICtrl;
560 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
561 	int i, retval = 0;
562 
563 	if (mii_preamble_required)
564 		mdio_sync(mdio_addr);
565 
566 	/* Shift the read command bits out. */
567 	for (i = 15; i >= 0; i--) {
568 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
569 
570 		iowrite32(dataval, mdio_addr);
571 		mdio_delay(mdio_addr);
572 		iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
573 		mdio_delay(mdio_addr);
574 	}
575 	/* Read the two transition, 16 data, and wire-idle bits. */
576 	for (i = 20; i > 0; i--) {
577 		iowrite32(MDIO_EnbIn, mdio_addr);
578 		mdio_delay(mdio_addr);
579 		retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
580 		iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
581 		mdio_delay(mdio_addr);
582 	}
583 	return (retval>>1) & 0xffff;
584 }
585 
586 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
587 {
588 	struct netdev_private *np = netdev_priv(dev);
589 	void __iomem *mdio_addr = np->base_addr + MIICtrl;
590 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
591 	int i;
592 
593 	if (location == 4  &&  phy_id == np->phys[0])
594 		np->mii_if.advertising = value;
595 
596 	if (mii_preamble_required)
597 		mdio_sync(mdio_addr);
598 
599 	/* Shift the command bits out. */
600 	for (i = 31; i >= 0; i--) {
601 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
602 
603 		iowrite32(dataval, mdio_addr);
604 		mdio_delay(mdio_addr);
605 		iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
606 		mdio_delay(mdio_addr);
607 	}
608 	/* Clear out extra bits. */
609 	for (i = 2; i > 0; i--) {
610 		iowrite32(MDIO_EnbIn, mdio_addr);
611 		mdio_delay(mdio_addr);
612 		iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
613 		mdio_delay(mdio_addr);
614 	}
615 }
616 
617 
618 static int netdev_open(struct net_device *dev)
619 {
620 	struct netdev_private *np = netdev_priv(dev);
621 	void __iomem *ioaddr = np->base_addr;
622 	const int irq = np->pci_dev->irq;
623 	int i;
624 
625 	iowrite32(0x00000001, ioaddr + PCIBusCfg);		/* Reset */
626 
627 	netif_device_detach(dev);
628 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
629 	if (i)
630 		goto out_err;
631 
632 	if (debug > 1)
633 		netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
634 
635 	if((i=alloc_ringdesc(dev)))
636 		goto out_err;
637 
638 	spin_lock_irq(&np->lock);
639 	netif_device_attach(dev);
640 	init_registers(dev);
641 	spin_unlock_irq(&np->lock);
642 
643 	netif_start_queue(dev);
644 	if (debug > 2)
645 		netdev_dbg(dev, "Done netdev_open()\n");
646 
647 	/* Set the timer to check for link beat. */
648 	timer_setup(&np->timer, netdev_timer, 0);
649 	np->timer.expires = jiffies + 1*HZ;
650 	add_timer(&np->timer);
651 	return 0;
652 out_err:
653 	netif_device_attach(dev);
654 	return i;
655 }
656 
657 #define MII_DAVICOM_DM9101	0x0181b800
658 
659 static int update_link(struct net_device *dev)
660 {
661 	struct netdev_private *np = netdev_priv(dev);
662 	int duplex, fasteth, result, mii_reg;
663 
664 	/* BSMR */
665 	mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
666 
667 	if (mii_reg == 0xffff)
668 		return np->csr6;
669 	/* reread: the link status bit is sticky */
670 	mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
671 	if (!(mii_reg & 0x4)) {
672 		if (netif_carrier_ok(dev)) {
673 			if (debug)
674 				dev_info(&dev->dev,
675 					 "MII #%d reports no link. Disabling watchdog\n",
676 					 np->phys[0]);
677 			netif_carrier_off(dev);
678 		}
679 		return np->csr6;
680 	}
681 	if (!netif_carrier_ok(dev)) {
682 		if (debug)
683 			dev_info(&dev->dev,
684 				 "MII #%d link is back. Enabling watchdog\n",
685 				 np->phys[0]);
686 		netif_carrier_on(dev);
687 	}
688 
689 	if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
690 		/* If the link partner doesn't support autonegotiation
691 		 * the MII detects it's abilities with the "parallel detection".
692 		 * Some MIIs update the LPA register to the result of the parallel
693 		 * detection, some don't.
694 		 * The Davicom PHY [at least 0181b800] doesn't.
695 		 * Instead bit 9 and 13 of the BMCR are updated to the result
696 		 * of the negotiation..
697 		 */
698 		mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
699 		duplex = mii_reg & BMCR_FULLDPLX;
700 		fasteth = mii_reg & BMCR_SPEED100;
701 	} else {
702 		int negotiated;
703 		mii_reg	= mdio_read(dev, np->phys[0], MII_LPA);
704 		negotiated = mii_reg & np->mii_if.advertising;
705 
706 		duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
707 		fasteth = negotiated & 0x380;
708 	}
709 	duplex |= np->mii_if.force_media;
710 	/* remove fastether and fullduplex */
711 	result = np->csr6 & ~0x20000200;
712 	if (duplex)
713 		result |= 0x200;
714 	if (fasteth)
715 		result |= 0x20000000;
716 	if (result != np->csr6 && debug)
717 		dev_info(&dev->dev,
718 			 "Setting %dMBit-%s-duplex based on MII#%d\n",
719 			 fasteth ? 100 : 10, duplex ? "full" : "half",
720 			 np->phys[0]);
721 	return result;
722 }
723 
724 #define RXTX_TIMEOUT	2000
725 static inline void update_csr6(struct net_device *dev, int new)
726 {
727 	struct netdev_private *np = netdev_priv(dev);
728 	void __iomem *ioaddr = np->base_addr;
729 	int limit = RXTX_TIMEOUT;
730 
731 	if (!netif_device_present(dev))
732 		new = 0;
733 	if (new==np->csr6)
734 		return;
735 	/* stop both Tx and Rx processes */
736 	iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
737 	/* wait until they have really stopped */
738 	for (;;) {
739 		int csr5 = ioread32(ioaddr + IntrStatus);
740 		int t;
741 
742 		t = (csr5 >> 17) & 0x07;
743 		if (t==0||t==1) {
744 			/* rx stopped */
745 			t = (csr5 >> 20) & 0x07;
746 			if (t==0||t==1)
747 				break;
748 		}
749 
750 		limit--;
751 		if(!limit) {
752 			dev_info(&dev->dev,
753 				 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
754 			break;
755 		}
756 		udelay(1);
757 	}
758 	np->csr6 = new;
759 	/* and restart them with the new configuration */
760 	iowrite32(np->csr6, ioaddr + NetworkConfig);
761 	if (new & 0x200)
762 		np->mii_if.full_duplex = 1;
763 }
764 
765 static void netdev_timer(struct timer_list *t)
766 {
767 	struct netdev_private *np = from_timer(np, t, timer);
768 	struct net_device *dev = pci_get_drvdata(np->pci_dev);
769 	void __iomem *ioaddr = np->base_addr;
770 
771 	if (debug > 2)
772 		netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
773 			   ioread32(ioaddr + IntrStatus),
774 			   ioread32(ioaddr + NetworkConfig));
775 	spin_lock_irq(&np->lock);
776 	update_csr6(dev, update_link(dev));
777 	spin_unlock_irq(&np->lock);
778 	np->timer.expires = jiffies + 10*HZ;
779 	add_timer(&np->timer);
780 }
781 
782 static void init_rxtx_rings(struct net_device *dev)
783 {
784 	struct netdev_private *np = netdev_priv(dev);
785 	int i;
786 
787 	np->rx_head_desc = &np->rx_ring[0];
788 	np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
789 
790 	/* Initial all Rx descriptors. */
791 	for (i = 0; i < RX_RING_SIZE; i++) {
792 		np->rx_ring[i].length = np->rx_buf_sz;
793 		np->rx_ring[i].status = 0;
794 		np->rx_skbuff[i] = NULL;
795 	}
796 	/* Mark the last entry as wrapping the ring. */
797 	np->rx_ring[i-1].length |= DescEndRing;
798 
799 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
800 	for (i = 0; i < RX_RING_SIZE; i++) {
801 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
802 		np->rx_skbuff[i] = skb;
803 		if (skb == NULL)
804 			break;
805 		np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
806 					np->rx_buf_sz,PCI_DMA_FROMDEVICE);
807 
808 		np->rx_ring[i].buffer1 = np->rx_addr[i];
809 		np->rx_ring[i].status = DescOwned;
810 	}
811 
812 	np->cur_rx = 0;
813 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
814 
815 	/* Initialize the Tx descriptors */
816 	for (i = 0; i < TX_RING_SIZE; i++) {
817 		np->tx_skbuff[i] = NULL;
818 		np->tx_ring[i].status = 0;
819 	}
820 	np->tx_full = 0;
821 	np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
822 
823 	iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
824 	iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
825 		np->base_addr + TxRingPtr);
826 
827 }
828 
829 static void free_rxtx_rings(struct netdev_private* np)
830 {
831 	int i;
832 	/* Free all the skbuffs in the Rx queue. */
833 	for (i = 0; i < RX_RING_SIZE; i++) {
834 		np->rx_ring[i].status = 0;
835 		if (np->rx_skbuff[i]) {
836 			pci_unmap_single(np->pci_dev,
837 						np->rx_addr[i],
838 						np->rx_skbuff[i]->len,
839 						PCI_DMA_FROMDEVICE);
840 			dev_kfree_skb(np->rx_skbuff[i]);
841 		}
842 		np->rx_skbuff[i] = NULL;
843 	}
844 	for (i = 0; i < TX_RING_SIZE; i++) {
845 		if (np->tx_skbuff[i]) {
846 			pci_unmap_single(np->pci_dev,
847 						np->tx_addr[i],
848 						np->tx_skbuff[i]->len,
849 						PCI_DMA_TODEVICE);
850 			dev_kfree_skb(np->tx_skbuff[i]);
851 		}
852 		np->tx_skbuff[i] = NULL;
853 	}
854 }
855 
856 static void init_registers(struct net_device *dev)
857 {
858 	struct netdev_private *np = netdev_priv(dev);
859 	void __iomem *ioaddr = np->base_addr;
860 	int i;
861 
862 	for (i = 0; i < 6; i++)
863 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
864 
865 	/* Initialize other registers. */
866 #ifdef __BIG_ENDIAN
867 	i = (1<<20);	/* Big-endian descriptors */
868 #else
869 	i = 0;
870 #endif
871 	i |= (0x04<<2);		/* skip length 4 u32 */
872 	i |= 0x02;		/* give Rx priority */
873 
874 	/* Configure the PCI bus bursts and FIFO thresholds.
875 	   486: Set 8 longword cache alignment, 8 longword burst.
876 	   586: Set 16 longword cache alignment, no burst limit.
877 	   Cache alignment bits 15:14	     Burst length 13:8
878 		0000	<not allowed> 		0000 align to cache	0800 8 longwords
879 		4000	8  longwords		0100 1 longword		1000 16 longwords
880 		8000	16 longwords		0200 2 longwords	2000 32 longwords
881 		C000	32  longwords		0400 4 longwords */
882 
883 #if defined (__i386__) && !defined(MODULE)
884 	/* When not a module we can work around broken '486 PCI boards. */
885 	if (boot_cpu_data.x86 <= 4) {
886 		i |= 0x4800;
887 		dev_info(&dev->dev,
888 			 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
889 	} else {
890 		i |= 0xE000;
891 	}
892 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
893 	i |= 0xE000;
894 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
895 	i |= 0x4800;
896 #else
897 	dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
898 	i |= 0x4800;
899 #endif
900 	iowrite32(i, ioaddr + PCIBusCfg);
901 
902 	np->csr6 = 0;
903 	/* 128 byte Tx threshold;
904 		Transmit on; Receive on; */
905 	update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
906 
907 	/* Clear and Enable interrupts by setting the interrupt mask. */
908 	iowrite32(0x1A0F5, ioaddr + IntrStatus);
909 	iowrite32(0x1A0F5, ioaddr + IntrEnable);
910 
911 	iowrite32(0, ioaddr + RxStartDemand);
912 }
913 
914 static void tx_timeout(struct net_device *dev, unsigned int txqueue)
915 {
916 	struct netdev_private *np = netdev_priv(dev);
917 	void __iomem *ioaddr = np->base_addr;
918 	const int irq = np->pci_dev->irq;
919 
920 	dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
921 		 ioread32(ioaddr + IntrStatus));
922 
923 	{
924 		int i;
925 		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
926 		for (i = 0; i < RX_RING_SIZE; i++)
927 			printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
928 		printk(KERN_CONT "\n");
929 		printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
930 		for (i = 0; i < TX_RING_SIZE; i++)
931 			printk(KERN_CONT " %08x", np->tx_ring[i].status);
932 		printk(KERN_CONT "\n");
933 	}
934 	printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
935 	       np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
936 	printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
937 
938 	disable_irq(irq);
939 	spin_lock_irq(&np->lock);
940 	/*
941 	 * Under high load dirty_tx and the internal tx descriptor pointer
942 	 * come out of sync, thus perform a software reset and reinitialize
943 	 * everything.
944 	 */
945 
946 	iowrite32(1, np->base_addr+PCIBusCfg);
947 	udelay(1);
948 
949 	free_rxtx_rings(np);
950 	init_rxtx_rings(dev);
951 	init_registers(dev);
952 	spin_unlock_irq(&np->lock);
953 	enable_irq(irq);
954 
955 	netif_wake_queue(dev);
956 	netif_trans_update(dev); /* prevent tx timeout */
957 	np->stats.tx_errors++;
958 }
959 
960 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
961 static int alloc_ringdesc(struct net_device *dev)
962 {
963 	struct netdev_private *np = netdev_priv(dev);
964 
965 	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
966 
967 	np->rx_ring = pci_alloc_consistent(np->pci_dev,
968 			sizeof(struct w840_rx_desc)*RX_RING_SIZE +
969 			sizeof(struct w840_tx_desc)*TX_RING_SIZE,
970 			&np->ring_dma_addr);
971 	if(!np->rx_ring)
972 		return -ENOMEM;
973 	init_rxtx_rings(dev);
974 	return 0;
975 }
976 
977 static void free_ringdesc(struct netdev_private *np)
978 {
979 	pci_free_consistent(np->pci_dev,
980 			sizeof(struct w840_rx_desc)*RX_RING_SIZE +
981 			sizeof(struct w840_tx_desc)*TX_RING_SIZE,
982 			np->rx_ring, np->ring_dma_addr);
983 
984 }
985 
986 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
987 {
988 	struct netdev_private *np = netdev_priv(dev);
989 	unsigned entry;
990 
991 	/* Caution: the write order is important here, set the field
992 	   with the "ownership" bits last. */
993 
994 	/* Calculate the next Tx descriptor entry. */
995 	entry = np->cur_tx % TX_RING_SIZE;
996 
997 	np->tx_addr[entry] = pci_map_single(np->pci_dev,
998 				skb->data,skb->len, PCI_DMA_TODEVICE);
999 	np->tx_skbuff[entry] = skb;
1000 
1001 	np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1002 	if (skb->len < TX_BUFLIMIT) {
1003 		np->tx_ring[entry].length = DescWholePkt | skb->len;
1004 	} else {
1005 		int len = skb->len - TX_BUFLIMIT;
1006 
1007 		np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1008 		np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1009 	}
1010 	if(entry == TX_RING_SIZE-1)
1011 		np->tx_ring[entry].length |= DescEndRing;
1012 
1013 	/* Now acquire the irq spinlock.
1014 	 * The difficult race is the ordering between
1015 	 * increasing np->cur_tx and setting DescOwned:
1016 	 * - if np->cur_tx is increased first the interrupt
1017 	 *   handler could consider the packet as transmitted
1018 	 *   since DescOwned is cleared.
1019 	 * - If DescOwned is set first the NIC could report the
1020 	 *   packet as sent, but the interrupt handler would ignore it
1021 	 *   since the np->cur_tx was not yet increased.
1022 	 */
1023 	spin_lock_irq(&np->lock);
1024 	np->cur_tx++;
1025 
1026 	wmb(); /* flush length, buffer1, buffer2 */
1027 	np->tx_ring[entry].status = DescOwned;
1028 	wmb(); /* flush status and kick the hardware */
1029 	iowrite32(0, np->base_addr + TxStartDemand);
1030 	np->tx_q_bytes += skb->len;
1031 	/* Work around horrible bug in the chip by marking the queue as full
1032 	   when we do not have FIFO room for a maximum sized packet. */
1033 	if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1034 		((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1035 		netif_stop_queue(dev);
1036 		wmb();
1037 		np->tx_full = 1;
1038 	}
1039 	spin_unlock_irq(&np->lock);
1040 
1041 	if (debug > 4) {
1042 		netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1043 			   np->cur_tx, entry);
1044 	}
1045 	return NETDEV_TX_OK;
1046 }
1047 
1048 static void netdev_tx_done(struct net_device *dev)
1049 {
1050 	struct netdev_private *np = netdev_priv(dev);
1051 	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1052 		int entry = np->dirty_tx % TX_RING_SIZE;
1053 		int tx_status = np->tx_ring[entry].status;
1054 
1055 		if (tx_status < 0)
1056 			break;
1057 		if (tx_status & 0x8000) { 	/* There was an error, log it. */
1058 #ifndef final_version
1059 			if (debug > 1)
1060 				netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1061 					   tx_status);
1062 #endif
1063 			np->stats.tx_errors++;
1064 			if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1065 			if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1066 			if (tx_status & 0x0200) np->stats.tx_window_errors++;
1067 			if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1068 			if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1069 				np->stats.tx_heartbeat_errors++;
1070 		} else {
1071 #ifndef final_version
1072 			if (debug > 3)
1073 				netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1074 					   entry, tx_status);
1075 #endif
1076 			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1077 			np->stats.collisions += (tx_status >> 3) & 15;
1078 			np->stats.tx_packets++;
1079 		}
1080 		/* Free the original skb. */
1081 		pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1082 					np->tx_skbuff[entry]->len,
1083 					PCI_DMA_TODEVICE);
1084 		np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1085 		dev_kfree_skb_irq(np->tx_skbuff[entry]);
1086 		np->tx_skbuff[entry] = NULL;
1087 	}
1088 	if (np->tx_full &&
1089 		np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1090 		np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1091 		/* The ring is no longer full, clear tbusy. */
1092 		np->tx_full = 0;
1093 		wmb();
1094 		netif_wake_queue(dev);
1095 	}
1096 }
1097 
1098 /* The interrupt handler does all of the Rx thread work and cleans up
1099    after the Tx thread. */
1100 static irqreturn_t intr_handler(int irq, void *dev_instance)
1101 {
1102 	struct net_device *dev = (struct net_device *)dev_instance;
1103 	struct netdev_private *np = netdev_priv(dev);
1104 	void __iomem *ioaddr = np->base_addr;
1105 	int work_limit = max_interrupt_work;
1106 	int handled = 0;
1107 
1108 	if (!netif_device_present(dev))
1109 		return IRQ_NONE;
1110 	do {
1111 		u32 intr_status = ioread32(ioaddr + IntrStatus);
1112 
1113 		/* Acknowledge all of the current interrupt sources ASAP. */
1114 		iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1115 
1116 		if (debug > 4)
1117 			netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1118 
1119 		if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1120 			break;
1121 
1122 		handled = 1;
1123 
1124 		if (intr_status & (RxIntr | RxNoBuf))
1125 			netdev_rx(dev);
1126 		if (intr_status & RxNoBuf)
1127 			iowrite32(0, ioaddr + RxStartDemand);
1128 
1129 		if (intr_status & (TxNoBuf | TxIntr) &&
1130 			np->cur_tx != np->dirty_tx) {
1131 			spin_lock(&np->lock);
1132 			netdev_tx_done(dev);
1133 			spin_unlock(&np->lock);
1134 		}
1135 
1136 		/* Abnormal error summary/uncommon events handlers. */
1137 		if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1138 						   TimerInt | TxDied))
1139 			netdev_error(dev, intr_status);
1140 
1141 		if (--work_limit < 0) {
1142 			dev_warn(&dev->dev,
1143 				 "Too much work at interrupt, status=0x%04x\n",
1144 				 intr_status);
1145 			/* Set the timer to re-enable the other interrupts after
1146 			   10*82usec ticks. */
1147 			spin_lock(&np->lock);
1148 			if (netif_device_present(dev)) {
1149 				iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1150 				iowrite32(10, ioaddr + GPTimer);
1151 			}
1152 			spin_unlock(&np->lock);
1153 			break;
1154 		}
1155 	} while (1);
1156 
1157 	if (debug > 3)
1158 		netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1159 			   ioread32(ioaddr + IntrStatus));
1160 	return IRQ_RETVAL(handled);
1161 }
1162 
1163 /* This routine is logically part of the interrupt handler, but separated
1164    for clarity and better register allocation. */
1165 static int netdev_rx(struct net_device *dev)
1166 {
1167 	struct netdev_private *np = netdev_priv(dev);
1168 	int entry = np->cur_rx % RX_RING_SIZE;
1169 	int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1170 
1171 	if (debug > 4) {
1172 		netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1173 			   entry, np->rx_ring[entry].status);
1174 	}
1175 
1176 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1177 	while (--work_limit >= 0) {
1178 		struct w840_rx_desc *desc = np->rx_head_desc;
1179 		s32 status = desc->status;
1180 
1181 		if (debug > 4)
1182 			netdev_dbg(dev, "  netdev_rx() status was %08x\n",
1183 				   status);
1184 		if (status < 0)
1185 			break;
1186 		if ((status & 0x38008300) != 0x0300) {
1187 			if ((status & 0x38000300) != 0x0300) {
1188 				/* Ingore earlier buffers. */
1189 				if ((status & 0xffff) != 0x7fff) {
1190 					dev_warn(&dev->dev,
1191 						 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1192 						 np->cur_rx, status);
1193 					np->stats.rx_length_errors++;
1194 				}
1195 			} else if (status & 0x8000) {
1196 				/* There was a fatal error. */
1197 				if (debug > 2)
1198 					netdev_dbg(dev, "Receive error, Rx status %08x\n",
1199 						   status);
1200 				np->stats.rx_errors++; /* end of a packet.*/
1201 				if (status & 0x0890) np->stats.rx_length_errors++;
1202 				if (status & 0x004C) np->stats.rx_frame_errors++;
1203 				if (status & 0x0002) np->stats.rx_crc_errors++;
1204 			}
1205 		} else {
1206 			struct sk_buff *skb;
1207 			/* Omit the four octet CRC from the length. */
1208 			int pkt_len = ((status >> 16) & 0x7ff) - 4;
1209 
1210 #ifndef final_version
1211 			if (debug > 4)
1212 				netdev_dbg(dev, "  netdev_rx() normal Rx pkt length %d status %x\n",
1213 					   pkt_len, status);
1214 #endif
1215 			/* Check if the packet is long enough to accept without copying
1216 			   to a minimally-sized skbuff. */
1217 			if (pkt_len < rx_copybreak &&
1218 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1219 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1220 				pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1221 							    np->rx_skbuff[entry]->len,
1222 							    PCI_DMA_FROMDEVICE);
1223 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1224 				skb_put(skb, pkt_len);
1225 				pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1226 							       np->rx_skbuff[entry]->len,
1227 							       PCI_DMA_FROMDEVICE);
1228 			} else {
1229 				pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1230 							np->rx_skbuff[entry]->len,
1231 							PCI_DMA_FROMDEVICE);
1232 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1233 				np->rx_skbuff[entry] = NULL;
1234 			}
1235 #ifndef final_version				/* Remove after testing. */
1236 			/* You will want this info for the initial debug. */
1237 			if (debug > 5)
1238 				netdev_dbg(dev, "  Rx data %pM %pM %02x%02x %pI4\n",
1239 					   &skb->data[0], &skb->data[6],
1240 					   skb->data[12], skb->data[13],
1241 					   &skb->data[14]);
1242 #endif
1243 			skb->protocol = eth_type_trans(skb, dev);
1244 			netif_rx(skb);
1245 			np->stats.rx_packets++;
1246 			np->stats.rx_bytes += pkt_len;
1247 		}
1248 		entry = (++np->cur_rx) % RX_RING_SIZE;
1249 		np->rx_head_desc = &np->rx_ring[entry];
1250 	}
1251 
1252 	/* Refill the Rx ring buffers. */
1253 	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1254 		struct sk_buff *skb;
1255 		entry = np->dirty_rx % RX_RING_SIZE;
1256 		if (np->rx_skbuff[entry] == NULL) {
1257 			skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1258 			np->rx_skbuff[entry] = skb;
1259 			if (skb == NULL)
1260 				break;			/* Better luck next round. */
1261 			np->rx_addr[entry] = pci_map_single(np->pci_dev,
1262 							skb->data,
1263 							np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1264 			np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1265 		}
1266 		wmb();
1267 		np->rx_ring[entry].status = DescOwned;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 static void netdev_error(struct net_device *dev, int intr_status)
1274 {
1275 	struct netdev_private *np = netdev_priv(dev);
1276 	void __iomem *ioaddr = np->base_addr;
1277 
1278 	if (debug > 2)
1279 		netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1280 	if (intr_status == 0xffffffff)
1281 		return;
1282 	spin_lock(&np->lock);
1283 	if (intr_status & TxFIFOUnderflow) {
1284 		int new;
1285 		/* Bump up the Tx threshold */
1286 #if 0
1287 		/* This causes lots of dropped packets,
1288 		 * and under high load even tx_timeouts
1289 		 */
1290 		new = np->csr6 + 0x4000;
1291 #else
1292 		new = (np->csr6 >> 14)&0x7f;
1293 		if (new < 64)
1294 			new *= 2;
1295 		 else
1296 		 	new = 127; /* load full packet before starting */
1297 		new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1298 #endif
1299 		netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1300 		update_csr6(dev, new);
1301 	}
1302 	if (intr_status & RxDied) {		/* Missed a Rx frame. */
1303 		np->stats.rx_errors++;
1304 	}
1305 	if (intr_status & TimerInt) {
1306 		/* Re-enable other interrupts. */
1307 		if (netif_device_present(dev))
1308 			iowrite32(0x1A0F5, ioaddr + IntrEnable);
1309 	}
1310 	np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1311 	iowrite32(0, ioaddr + RxStartDemand);
1312 	spin_unlock(&np->lock);
1313 }
1314 
1315 static struct net_device_stats *get_stats(struct net_device *dev)
1316 {
1317 	struct netdev_private *np = netdev_priv(dev);
1318 	void __iomem *ioaddr = np->base_addr;
1319 
1320 	/* The chip only need report frame silently dropped. */
1321 	spin_lock_irq(&np->lock);
1322 	if (netif_running(dev) && netif_device_present(dev))
1323 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1324 	spin_unlock_irq(&np->lock);
1325 
1326 	return &np->stats;
1327 }
1328 
1329 
1330 static u32 __set_rx_mode(struct net_device *dev)
1331 {
1332 	struct netdev_private *np = netdev_priv(dev);
1333 	void __iomem *ioaddr = np->base_addr;
1334 	u32 mc_filter[2];			/* Multicast hash filter */
1335 	u32 rx_mode;
1336 
1337 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1338 		memset(mc_filter, 0xff, sizeof(mc_filter));
1339 		rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1340 			| AcceptMyPhys;
1341 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1342 		   (dev->flags & IFF_ALLMULTI)) {
1343 		/* Too many to match, or accept all multicasts. */
1344 		memset(mc_filter, 0xff, sizeof(mc_filter));
1345 		rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1346 	} else {
1347 		struct netdev_hw_addr *ha;
1348 
1349 		memset(mc_filter, 0, sizeof(mc_filter));
1350 		netdev_for_each_mc_addr(ha, dev) {
1351 			int filbit;
1352 
1353 			filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1354 			filbit &= 0x3f;
1355 			mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1356 		}
1357 		rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1358 	}
1359 	iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1360 	iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1361 	return rx_mode;
1362 }
1363 
1364 static void set_rx_mode(struct net_device *dev)
1365 {
1366 	struct netdev_private *np = netdev_priv(dev);
1367 	u32 rx_mode = __set_rx_mode(dev);
1368 	spin_lock_irq(&np->lock);
1369 	update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1370 	spin_unlock_irq(&np->lock);
1371 }
1372 
1373 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1374 {
1375 	struct netdev_private *np = netdev_priv(dev);
1376 
1377 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1378 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1379 }
1380 
1381 static int netdev_get_link_ksettings(struct net_device *dev,
1382 				     struct ethtool_link_ksettings *cmd)
1383 {
1384 	struct netdev_private *np = netdev_priv(dev);
1385 
1386 	spin_lock_irq(&np->lock);
1387 	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1388 	spin_unlock_irq(&np->lock);
1389 
1390 	return 0;
1391 }
1392 
1393 static int netdev_set_link_ksettings(struct net_device *dev,
1394 				     const struct ethtool_link_ksettings *cmd)
1395 {
1396 	struct netdev_private *np = netdev_priv(dev);
1397 	int rc;
1398 
1399 	spin_lock_irq(&np->lock);
1400 	rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1401 	spin_unlock_irq(&np->lock);
1402 
1403 	return rc;
1404 }
1405 
1406 static int netdev_nway_reset(struct net_device *dev)
1407 {
1408 	struct netdev_private *np = netdev_priv(dev);
1409 	return mii_nway_restart(&np->mii_if);
1410 }
1411 
1412 static u32 netdev_get_link(struct net_device *dev)
1413 {
1414 	struct netdev_private *np = netdev_priv(dev);
1415 	return mii_link_ok(&np->mii_if);
1416 }
1417 
1418 static u32 netdev_get_msglevel(struct net_device *dev)
1419 {
1420 	return debug;
1421 }
1422 
1423 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1424 {
1425 	debug = value;
1426 }
1427 
1428 static const struct ethtool_ops netdev_ethtool_ops = {
1429 	.get_drvinfo		= netdev_get_drvinfo,
1430 	.nway_reset		= netdev_nway_reset,
1431 	.get_link		= netdev_get_link,
1432 	.get_msglevel		= netdev_get_msglevel,
1433 	.set_msglevel		= netdev_set_msglevel,
1434 	.get_link_ksettings	= netdev_get_link_ksettings,
1435 	.set_link_ksettings	= netdev_set_link_ksettings,
1436 };
1437 
1438 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1439 {
1440 	struct mii_ioctl_data *data = if_mii(rq);
1441 	struct netdev_private *np = netdev_priv(dev);
1442 
1443 	switch(cmd) {
1444 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1445 		data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1446 		/* Fall Through */
1447 
1448 	case SIOCGMIIREG:		/* Read MII PHY register. */
1449 		spin_lock_irq(&np->lock);
1450 		data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1451 		spin_unlock_irq(&np->lock);
1452 		return 0;
1453 
1454 	case SIOCSMIIREG:		/* Write MII PHY register. */
1455 		spin_lock_irq(&np->lock);
1456 		mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1457 		spin_unlock_irq(&np->lock);
1458 		return 0;
1459 	default:
1460 		return -EOPNOTSUPP;
1461 	}
1462 }
1463 
1464 static int netdev_close(struct net_device *dev)
1465 {
1466 	struct netdev_private *np = netdev_priv(dev);
1467 	void __iomem *ioaddr = np->base_addr;
1468 
1469 	netif_stop_queue(dev);
1470 
1471 	if (debug > 1) {
1472 		netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1473 			   ioread32(ioaddr + IntrStatus),
1474 			   ioread32(ioaddr + NetworkConfig));
1475 		netdev_dbg(dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1476 			   np->cur_tx, np->dirty_tx,
1477 			   np->cur_rx, np->dirty_rx);
1478 	}
1479 
1480  	/* Stop the chip's Tx and Rx processes. */
1481 	spin_lock_irq(&np->lock);
1482 	netif_device_detach(dev);
1483 	update_csr6(dev, 0);
1484 	iowrite32(0x0000, ioaddr + IntrEnable);
1485 	spin_unlock_irq(&np->lock);
1486 
1487 	free_irq(np->pci_dev->irq, dev);
1488 	wmb();
1489 	netif_device_attach(dev);
1490 
1491 	if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1492 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1493 
1494 #ifdef __i386__
1495 	if (debug > 2) {
1496 		int i;
1497 
1498 		printk(KERN_DEBUG"  Tx ring at %p:\n", np->tx_ring);
1499 		for (i = 0; i < TX_RING_SIZE; i++)
1500 			printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1501 			       i, np->tx_ring[i].length,
1502 			       np->tx_ring[i].status, np->tx_ring[i].buffer1);
1503 		printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
1504 		for (i = 0; i < RX_RING_SIZE; i++) {
1505 			printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1506 			       i, np->rx_ring[i].length,
1507 			       np->rx_ring[i].status, np->rx_ring[i].buffer1);
1508 		}
1509 	}
1510 #endif /* __i386__ debugging only */
1511 
1512 	del_timer_sync(&np->timer);
1513 
1514 	free_rxtx_rings(np);
1515 	free_ringdesc(np);
1516 
1517 	return 0;
1518 }
1519 
1520 static void w840_remove1(struct pci_dev *pdev)
1521 {
1522 	struct net_device *dev = pci_get_drvdata(pdev);
1523 
1524 	if (dev) {
1525 		struct netdev_private *np = netdev_priv(dev);
1526 		unregister_netdev(dev);
1527 		pci_release_regions(pdev);
1528 		pci_iounmap(pdev, np->base_addr);
1529 		free_netdev(dev);
1530 	}
1531 }
1532 
1533 #ifdef CONFIG_PM
1534 
1535 /*
1536  * suspend/resume synchronization:
1537  * - open, close, do_ioctl:
1538  * 	rtnl_lock, & netif_device_detach after the rtnl_unlock.
1539  * - get_stats:
1540  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
1541  * - start_xmit:
1542  * 	synchronize_irq + netif_tx_disable;
1543  * - tx_timeout:
1544  * 	netif_device_detach + netif_tx_disable;
1545  * - set_multicast_list
1546  * 	netif_device_detach + netif_tx_disable;
1547  * - interrupt handler
1548  * 	doesn't touch hw if not present, synchronize_irq waits for
1549  * 	running instances of the interrupt handler.
1550  *
1551  * Disabling hw requires clearing csr6 & IntrEnable.
1552  * update_csr6 & all function that write IntrEnable check netif_device_present
1553  * before settings any bits.
1554  *
1555  * Detach must occur under spin_unlock_irq(), interrupts from a detached
1556  * device would cause an irq storm.
1557  */
1558 static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1559 {
1560 	struct net_device *dev = pci_get_drvdata (pdev);
1561 	struct netdev_private *np = netdev_priv(dev);
1562 	void __iomem *ioaddr = np->base_addr;
1563 
1564 	rtnl_lock();
1565 	if (netif_running (dev)) {
1566 		del_timer_sync(&np->timer);
1567 
1568 		spin_lock_irq(&np->lock);
1569 		netif_device_detach(dev);
1570 		update_csr6(dev, 0);
1571 		iowrite32(0, ioaddr + IntrEnable);
1572 		spin_unlock_irq(&np->lock);
1573 
1574 		synchronize_irq(np->pci_dev->irq);
1575 		netif_tx_disable(dev);
1576 
1577 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1578 
1579 		/* no more hardware accesses behind this line. */
1580 
1581 		BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1582 
1583 		/* pci_power_off(pdev, -1); */
1584 
1585 		free_rxtx_rings(np);
1586 	} else {
1587 		netif_device_detach(dev);
1588 	}
1589 	rtnl_unlock();
1590 	return 0;
1591 }
1592 
1593 static int w840_resume (struct pci_dev *pdev)
1594 {
1595 	struct net_device *dev = pci_get_drvdata (pdev);
1596 	struct netdev_private *np = netdev_priv(dev);
1597 	int retval = 0;
1598 
1599 	rtnl_lock();
1600 	if (netif_device_present(dev))
1601 		goto out; /* device not suspended */
1602 	if (netif_running(dev)) {
1603 		if ((retval = pci_enable_device(pdev))) {
1604 			dev_err(&dev->dev,
1605 				"pci_enable_device failed in resume\n");
1606 			goto out;
1607 		}
1608 		spin_lock_irq(&np->lock);
1609 		iowrite32(1, np->base_addr+PCIBusCfg);
1610 		ioread32(np->base_addr+PCIBusCfg);
1611 		udelay(1);
1612 		netif_device_attach(dev);
1613 		init_rxtx_rings(dev);
1614 		init_registers(dev);
1615 		spin_unlock_irq(&np->lock);
1616 
1617 		netif_wake_queue(dev);
1618 
1619 		mod_timer(&np->timer, jiffies + 1*HZ);
1620 	} else {
1621 		netif_device_attach(dev);
1622 	}
1623 out:
1624 	rtnl_unlock();
1625 	return retval;
1626 }
1627 #endif
1628 
1629 static struct pci_driver w840_driver = {
1630 	.name		= DRV_NAME,
1631 	.id_table	= w840_pci_tbl,
1632 	.probe		= w840_probe1,
1633 	.remove		= w840_remove1,
1634 #ifdef CONFIG_PM
1635 	.suspend	= w840_suspend,
1636 	.resume		= w840_resume,
1637 #endif
1638 };
1639 
1640 static int __init w840_init(void)
1641 {
1642 	return pci_register_driver(&w840_driver);
1643 }
1644 
1645 static void __exit w840_exit(void)
1646 {
1647 	pci_unregister_driver(&w840_driver);
1648 }
1649 
1650 module_init(w840_init);
1651 module_exit(w840_exit);
1652