xref: /openbmc/linux/drivers/net/ethernet/dec/tulip/dmfe.c (revision 36fe4655)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3     A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
4     ethernet driver for Linux.
5     Copyright (C) 1997  Sten Wang
6 
7 
8     DAVICOM Web-Site: www.davicom.com.tw
9 
10     Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
11     Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
12 
13     (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
14 
15     Marcelo Tosatti <marcelo@conectiva.com.br> :
16     Made it compile in 2.3 (device to net_device)
17 
18     Alan Cox <alan@lxorguk.ukuu.org.uk> :
19     Cleaned up for kernel merge.
20     Removed the back compatibility support
21     Reformatted, fixing spelling etc as I went
22     Removed IRQ 0-15 assumption
23 
24     Jeff Garzik <jgarzik@pobox.com> :
25     Updated to use new PCI driver API.
26     Resource usage cleanups.
27     Report driver version to user.
28 
29     Tobias Ringstrom <tori@unhappy.mine.nu> :
30     Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
31     Andrew Morton and Frank Davis for the SMP safety fixes.
32 
33     Vojtech Pavlik <vojtech@suse.cz> :
34     Cleaned up pointer arithmetics.
35     Fixed a lot of 64bit issues.
36     Cleaned up printk()s a bit.
37     Fixed some obvious big endian problems.
38 
39     Tobias Ringstrom <tori@unhappy.mine.nu> :
40     Use time_after for jiffies calculation.  Added ethtool
41     support.  Updated PCI resource allocation.  Do not
42     forget to unmap PCI mapped skbs.
43 
44     Alan Cox <alan@lxorguk.ukuu.org.uk>
45     Added new PCI identifiers provided by Clear Zhang at ALi
46     for their 1563 ethernet device.
47 
48     TODO
49 
50     Check on 64 bit boxes.
51     Check and fix on big endian boxes.
52 
53     Test and make sure PCI latency is now correct for all cases.
54 */
55 
56 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57 
58 #define DRV_NAME	"dmfe"
59 
60 #include <linux/module.h>
61 #include <linux/kernel.h>
62 #include <linux/string.h>
63 #include <linux/timer.h>
64 #include <linux/ptrace.h>
65 #include <linux/errno.h>
66 #include <linux/ioport.h>
67 #include <linux/interrupt.h>
68 #include <linux/pci.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/init.h>
71 #include <linux/netdevice.h>
72 #include <linux/etherdevice.h>
73 #include <linux/ethtool.h>
74 #include <linux/skbuff.h>
75 #include <linux/delay.h>
76 #include <linux/spinlock.h>
77 #include <linux/crc32.h>
78 #include <linux/bitops.h>
79 
80 #include <asm/processor.h>
81 #include <asm/io.h>
82 #include <asm/dma.h>
83 #include <linux/uaccess.h>
84 #include <asm/irq.h>
85 
86 #ifdef CONFIG_TULIP_DM910X
87 #include <linux/of.h>
88 #endif
89 
90 
91 /* Board/System/Debug information/definition ---------------- */
92 #define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
93 #define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
94 #define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
95 #define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
96 
97 #define DM9102_IO_SIZE  0x80
98 #define DM9102A_IO_SIZE 0x100
99 #define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
100 #define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
101 #define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
102 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
103 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
104 #define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
105 #define TX_BUF_ALLOC    0x600
106 #define RX_ALLOC_SIZE   0x620
107 #define DM910X_RESET    1
108 #define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
109 #define CR6_DEFAULT     0x00080000      /* HD */
110 #define CR7_DEFAULT     0x180c1
111 #define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
112 #define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
113 #define MAX_PACKET_SIZE 1514
114 #define DMFE_MAX_MULTICAST 14
115 #define RX_COPY_SIZE	100
116 #define MAX_CHECK_PACKET 0x8000
117 #define DM9801_NOISE_FLOOR 8
118 #define DM9802_NOISE_FLOOR 5
119 
120 #define DMFE_WOL_LINKCHANGE	0x20000000
121 #define DMFE_WOL_SAMPLEPACKET	0x10000000
122 #define DMFE_WOL_MAGICPACKET	0x08000000
123 
124 
125 #define DMFE_10MHF      0
126 #define DMFE_100MHF     1
127 #define DMFE_10MFD      4
128 #define DMFE_100MFD     5
129 #define DMFE_AUTO       8
130 #define DMFE_1M_HPNA    0x10
131 
132 #define DMFE_TXTH_72	0x400000	/* TX TH 72 byte */
133 #define DMFE_TXTH_96	0x404000	/* TX TH 96 byte */
134 #define DMFE_TXTH_128	0x0000		/* TX TH 128 byte */
135 #define DMFE_TXTH_256	0x4000		/* TX TH 256 byte */
136 #define DMFE_TXTH_512	0x8000		/* TX TH 512 byte */
137 #define DMFE_TXTH_1K	0xC000		/* TX TH 1K  byte */
138 
139 #define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
140 #define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
141 #define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
142 
143 #define dw32(reg, val)	iowrite32(val, ioaddr + (reg))
144 #define dw16(reg, val)	iowrite16(val, ioaddr + (reg))
145 #define dr32(reg)	ioread32(ioaddr + (reg))
146 #define dr16(reg)	ioread16(ioaddr + (reg))
147 #define dr8(reg)	ioread8(ioaddr + (reg))
148 
149 #define DMFE_DBUG(dbug_now, msg, value)			\
150 	do {						\
151 		if (dmfe_debug || (dbug_now))		\
152 			pr_err("%s %lx\n",		\
153 			       (msg), (long) (value));	\
154 	} while (0)
155 
156 #define SHOW_MEDIA_TYPE(mode)				\
157 	pr_info("Change Speed to %sMhz %s duplex\n" ,	\
158 		(mode & 1) ? "100":"10",		\
159 		(mode & 4) ? "full":"half");
160 
161 
162 /* CR9 definition: SROM/MII */
163 #define CR9_SROM_READ   0x4800
164 #define CR9_SRCS        0x1
165 #define CR9_SRCLK       0x2
166 #define CR9_CRDOUT      0x8
167 #define SROM_DATA_0     0x0
168 #define SROM_DATA_1     0x4
169 #define PHY_DATA_1      0x20000
170 #define PHY_DATA_0      0x00000
171 #define MDCLKH          0x10000
172 
173 #define PHY_POWER_DOWN	0x800
174 
175 #define SROM_V41_CODE   0x14
176 
177 #define __CHK_IO_SIZE(pci_id, dev_rev) \
178  (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
179 	DM9102A_IO_SIZE: DM9102_IO_SIZE)
180 
181 #define CHK_IO_SIZE(pci_dev) \
182 	(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
183 	(pci_dev)->revision))
184 
185 /* Structure/enum declaration ------------------------------- */
186 struct tx_desc {
187         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
188         char *tx_buf_ptr;               /* Data for us */
189         struct tx_desc *next_tx_desc;
190 } __attribute__(( aligned(32) ));
191 
192 struct rx_desc {
193 	__le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
194 	struct sk_buff *rx_skb_ptr;	/* Data for us */
195 	struct rx_desc *next_rx_desc;
196 } __attribute__(( aligned(32) ));
197 
198 struct dmfe_board_info {
199 	u32 chip_id;			/* Chip vendor/Device ID */
200 	u8 chip_revision;		/* Chip revision */
201 	struct net_device *next_dev;	/* next device */
202 	struct pci_dev *pdev;		/* PCI device */
203 	spinlock_t lock;
204 
205 	void __iomem *ioaddr;		/* I/O base address */
206 	u32 cr0_data;
207 	u32 cr5_data;
208 	u32 cr6_data;
209 	u32 cr7_data;
210 	u32 cr15_data;
211 
212 	/* pointer for memory physical address */
213 	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
214 	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
215 	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
216 	dma_addr_t first_tx_desc_dma;
217 	dma_addr_t first_rx_desc_dma;
218 
219 	/* descriptor pointer */
220 	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
221 	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
222 	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
223 	struct tx_desc *first_tx_desc;
224 	struct tx_desc *tx_insert_ptr;
225 	struct tx_desc *tx_remove_ptr;
226 	struct rx_desc *first_rx_desc;
227 	struct rx_desc *rx_insert_ptr;
228 	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
229 	unsigned long tx_packet_cnt;	/* transmitted packet count */
230 	unsigned long tx_queue_cnt;	/* wait to send packet count */
231 	unsigned long rx_avail_cnt;	/* available rx descriptor count */
232 	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
233 
234 	u16 HPNA_command;		/* For HPNA register 16 */
235 	u16 HPNA_timer;			/* For HPNA remote device check */
236 	u16 dbug_cnt;
237 	u16 NIC_capability;		/* NIC media capability */
238 	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
239 
240 	u8 HPNA_present;		/* 0:none, 1:DM9801, 2:DM9802 */
241 	u8 chip_type;			/* Keep DM9102A chip type */
242 	u8 media_mode;			/* user specify media mode */
243 	u8 op_mode;			/* real work media mode */
244 	u8 phy_addr;
245 	u8 wait_reset;			/* Hardware failed, need to reset */
246 	u8 dm910x_chk_mode;		/* Operating mode check */
247 	u8 first_in_callback;		/* Flag to record state */
248 	u8 wol_mode;			/* user WOL settings */
249 	struct timer_list timer;
250 
251 	/* Driver defined statistic counter */
252 	unsigned long tx_fifo_underrun;
253 	unsigned long tx_loss_carrier;
254 	unsigned long tx_no_carrier;
255 	unsigned long tx_late_collision;
256 	unsigned long tx_excessive_collision;
257 	unsigned long tx_jabber_timeout;
258 	unsigned long reset_count;
259 	unsigned long reset_cr8;
260 	unsigned long reset_fatal;
261 	unsigned long reset_TXtimeout;
262 
263 	/* NIC SROM data */
264 	unsigned char srom[128];
265 };
266 
267 enum dmfe_offsets {
268 	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
269 	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
270 	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
271 	DCR15 = 0x78
272 };
273 
274 enum dmfe_CR6_bits {
275 	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
276 	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
277 	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
278 };
279 
280 /* Global variable declaration ----------------------------- */
281 static int dmfe_debug;
282 static unsigned char dmfe_media_mode = DMFE_AUTO;
283 static u32 dmfe_cr6_user_set;
284 
285 /* For module input parameter */
286 static int debug;
287 static u32 cr6set;
288 static unsigned char mode = 8;
289 static u8 chkmode = 1;
290 static u8 HPNA_mode;		/* Default: Low Power/High Speed */
291 static u8 HPNA_rx_cmd;		/* Default: Disable Rx remote command */
292 static u8 HPNA_tx_cmd;		/* Default: Don't issue remote command */
293 static u8 HPNA_NoiseFloor;	/* Default: HPNA NoiseFloor */
294 static u8 SF_mode;		/* Special Function: 1:VLAN, 2:RX Flow Control
295 				   4: TX pause packet */
296 
297 
298 /* function declaration ------------------------------------- */
299 static int dmfe_open(struct net_device *);
300 static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
301 static int dmfe_stop(struct net_device *);
302 static void dmfe_set_filter_mode(struct net_device *);
303 static const struct ethtool_ops netdev_ethtool_ops;
304 static u16 read_srom_word(void __iomem *, int);
305 static irqreturn_t dmfe_interrupt(int , void *);
306 #ifdef CONFIG_NET_POLL_CONTROLLER
307 static void poll_dmfe (struct net_device *dev);
308 #endif
309 static void dmfe_descriptor_init(struct net_device *);
310 static void allocate_rx_buffer(struct net_device *);
311 static void update_cr6(u32, void __iomem *);
312 static void send_filter_frame(struct net_device *);
313 static void dm9132_id_table(struct net_device *);
314 static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
315 static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
316 static void dmfe_phy_write_1bit(void __iomem *, u32);
317 static u16 dmfe_phy_read_1bit(void __iomem *);
318 static u8 dmfe_sense_speed(struct dmfe_board_info *);
319 static void dmfe_process_mode(struct dmfe_board_info *);
320 static void dmfe_timer(struct timer_list *);
321 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
322 static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
323 static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
324 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
325 static void dmfe_dynamic_reset(struct net_device *);
326 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
327 static void dmfe_init_dm910x(struct net_device *);
328 static void dmfe_parse_srom(struct dmfe_board_info *);
329 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
330 static void dmfe_program_DM9802(struct dmfe_board_info *);
331 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
332 static void dmfe_set_phyxcer(struct dmfe_board_info *);
333 
334 /* DM910X network board routine ---------------------------- */
335 
336 static const struct net_device_ops netdev_ops = {
337 	.ndo_open 		= dmfe_open,
338 	.ndo_stop		= dmfe_stop,
339 	.ndo_start_xmit		= dmfe_start_xmit,
340 	.ndo_set_rx_mode	= dmfe_set_filter_mode,
341 	.ndo_set_mac_address	= eth_mac_addr,
342 	.ndo_validate_addr	= eth_validate_addr,
343 #ifdef CONFIG_NET_POLL_CONTROLLER
344 	.ndo_poll_controller	= poll_dmfe,
345 #endif
346 };
347 
348 /*
349  *	Search DM910X board ,allocate space and register it
350  */
351 
352 static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
353 {
354 	struct dmfe_board_info *db;	/* board information structure */
355 	struct net_device *dev;
356 	u32 pci_pmr;
357 	int i, err;
358 
359 	DMFE_DBUG(0, "dmfe_init_one()", 0);
360 
361 	/*
362 	 *	SPARC on-board DM910x chips should be handled by the main
363 	 *	tulip driver, except for early DM9100s.
364 	 */
365 #ifdef CONFIG_TULIP_DM910X
366 	if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
367 	    ent->driver_data == PCI_DM9102_ID) {
368 		struct device_node *dp = pci_device_to_OF_node(pdev);
369 
370 		if (dp && of_get_property(dp, "local-mac-address", NULL)) {
371 			pr_info("skipping on-board DM910x (use tulip)\n");
372 			return -ENODEV;
373 		}
374 	}
375 #endif
376 
377 	/* Init network device */
378 	dev = alloc_etherdev(sizeof(*db));
379 	if (dev == NULL)
380 		return -ENOMEM;
381 	SET_NETDEV_DEV(dev, &pdev->dev);
382 
383 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
384 		pr_warn("32-bit PCI DMA not available\n");
385 		err = -ENODEV;
386 		goto err_out_free;
387 	}
388 
389 	/* Enable Master/IO access, Disable memory access */
390 	err = pci_enable_device(pdev);
391 	if (err)
392 		goto err_out_free;
393 
394 	if (!pci_resource_start(pdev, 0)) {
395 		pr_err("I/O base is zero\n");
396 		err = -ENODEV;
397 		goto err_out_disable;
398 	}
399 
400 	if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
401 		pr_err("Allocated I/O size too small\n");
402 		err = -ENODEV;
403 		goto err_out_disable;
404 	}
405 
406 #if 0	/* pci_{enable_device,set_master} sets minimum latency for us now */
407 
408 	/* Set Latency Timer 80h */
409 	/* FIXME: setting values > 32 breaks some SiS 559x stuff.
410 	   Need a PCI quirk.. */
411 
412 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
413 #endif
414 
415 	if (pci_request_regions(pdev, DRV_NAME)) {
416 		pr_err("Failed to request PCI regions\n");
417 		err = -ENODEV;
418 		goto err_out_disable;
419 	}
420 
421 	/* Init system & device */
422 	db = netdev_priv(dev);
423 
424 	/* Allocate Tx/Rx descriptor memory */
425 	db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
426 					       sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
427 					       &db->desc_pool_dma_ptr, GFP_KERNEL);
428 	if (!db->desc_pool_ptr) {
429 		err = -ENOMEM;
430 		goto err_out_res;
431 	}
432 
433 	db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
434 					      TX_BUF_ALLOC * TX_DESC_CNT + 4,
435 					      &db->buf_pool_dma_ptr, GFP_KERNEL);
436 	if (!db->buf_pool_ptr) {
437 		err = -ENOMEM;
438 		goto err_out_free_desc;
439 	}
440 
441 	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
442 	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
443 	db->buf_pool_start = db->buf_pool_ptr;
444 	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
445 
446 	db->chip_id = ent->driver_data;
447 	/* IO type range. */
448 	db->ioaddr = pci_iomap(pdev, 0, 0);
449 	if (!db->ioaddr) {
450 		err = -ENOMEM;
451 		goto err_out_free_buf;
452 	}
453 
454 	db->chip_revision = pdev->revision;
455 	db->wol_mode = 0;
456 
457 	db->pdev = pdev;
458 
459 	pci_set_drvdata(pdev, dev);
460 	dev->netdev_ops = &netdev_ops;
461 	dev->ethtool_ops = &netdev_ethtool_ops;
462 	netif_carrier_off(dev);
463 	spin_lock_init(&db->lock);
464 
465 	pci_read_config_dword(pdev, 0x50, &pci_pmr);
466 	pci_pmr &= 0x70000;
467 	if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
468 		db->chip_type = 1;	/* DM9102A E3 */
469 	else
470 		db->chip_type = 0;
471 
472 	/* read 64 word srom data */
473 	for (i = 0; i < 64; i++) {
474 		((__le16 *) db->srom)[i] =
475 			cpu_to_le16(read_srom_word(db->ioaddr, i));
476 	}
477 
478 	/* Set Node address */
479 	for (i = 0; i < 6; i++)
480 		dev->dev_addr[i] = db->srom[20 + i];
481 
482 	err = register_netdev (dev);
483 	if (err)
484 		goto err_out_unmap;
485 
486 	dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
487 		 ent->driver_data >> 16,
488 		 pci_name(pdev), dev->dev_addr, pdev->irq);
489 
490 	pci_set_master(pdev);
491 
492 	return 0;
493 
494 err_out_unmap:
495 	pci_iounmap(pdev, db->ioaddr);
496 err_out_free_buf:
497 	dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
498 			  db->buf_pool_ptr, db->buf_pool_dma_ptr);
499 err_out_free_desc:
500 	dma_free_coherent(&pdev->dev,
501 			  sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
502 			  db->desc_pool_ptr, db->desc_pool_dma_ptr);
503 err_out_res:
504 	pci_release_regions(pdev);
505 err_out_disable:
506 	pci_disable_device(pdev);
507 err_out_free:
508 	free_netdev(dev);
509 
510 	return err;
511 }
512 
513 
514 static void dmfe_remove_one(struct pci_dev *pdev)
515 {
516 	struct net_device *dev = pci_get_drvdata(pdev);
517 	struct dmfe_board_info *db = netdev_priv(dev);
518 
519 	DMFE_DBUG(0, "dmfe_remove_one()", 0);
520 
521  	if (dev) {
522 
523 		unregister_netdev(dev);
524 		pci_iounmap(db->pdev, db->ioaddr);
525 		dma_free_coherent(&db->pdev->dev,
526 				  sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
527 				  db->desc_pool_ptr, db->desc_pool_dma_ptr);
528 		dma_free_coherent(&db->pdev->dev,
529 				  TX_BUF_ALLOC * TX_DESC_CNT + 4,
530 				  db->buf_pool_ptr, db->buf_pool_dma_ptr);
531 		pci_release_regions(pdev);
532 		free_netdev(dev);	/* free board information */
533 	}
534 
535 	DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
536 }
537 
538 
539 /*
540  *	Open the interface.
541  *	The interface is opened whenever "ifconfig" actives it.
542  */
543 
544 static int dmfe_open(struct net_device *dev)
545 {
546 	struct dmfe_board_info *db = netdev_priv(dev);
547 	const int irq = db->pdev->irq;
548 	int ret;
549 
550 	DMFE_DBUG(0, "dmfe_open", 0);
551 
552 	ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
553 	if (ret)
554 		return ret;
555 
556 	/* system variable init */
557 	db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
558 	db->tx_packet_cnt = 0;
559 	db->tx_queue_cnt = 0;
560 	db->rx_avail_cnt = 0;
561 	db->wait_reset = 0;
562 
563 	db->first_in_callback = 0;
564 	db->NIC_capability = 0xf;	/* All capability*/
565 	db->PHY_reg4 = 0x1e0;
566 
567 	/* CR6 operation mode decision */
568 	if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
569 		(db->chip_revision >= 0x30) ) {
570     		db->cr6_data |= DMFE_TXTH_256;
571 		db->cr0_data = CR0_DEFAULT;
572 		db->dm910x_chk_mode=4;		/* Enter the normal mode */
573  	} else {
574 		db->cr6_data |= CR6_SFT;	/* Store & Forward mode */
575 		db->cr0_data = 0;
576 		db->dm910x_chk_mode = 1;	/* Enter the check mode */
577 	}
578 
579 	/* Initialize DM910X board */
580 	dmfe_init_dm910x(dev);
581 
582 	/* Active System Interface */
583 	netif_wake_queue(dev);
584 
585 	/* set and active a timer process */
586 	timer_setup(&db->timer, dmfe_timer, 0);
587 	db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
588 	add_timer(&db->timer);
589 
590 	return 0;
591 }
592 
593 
594 /*	Initialize DM910X board
595  *	Reset DM910X board
596  *	Initialize TX/Rx descriptor chain structure
597  *	Send the set-up frame
598  *	Enable Tx/Rx machine
599  */
600 
601 static void dmfe_init_dm910x(struct net_device *dev)
602 {
603 	struct dmfe_board_info *db = netdev_priv(dev);
604 	void __iomem *ioaddr = db->ioaddr;
605 
606 	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
607 
608 	/* Reset DM910x MAC controller */
609 	dw32(DCR0, DM910X_RESET);	/* RESET MAC */
610 	udelay(100);
611 	dw32(DCR0, db->cr0_data);
612 	udelay(5);
613 
614 	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
615 	db->phy_addr = 1;
616 
617 	/* Parser SROM and media mode */
618 	dmfe_parse_srom(db);
619 	db->media_mode = dmfe_media_mode;
620 
621 	/* RESET Phyxcer Chip by GPR port bit 7 */
622 	dw32(DCR12, 0x180);		/* Let bit 7 output port */
623 	if (db->chip_id == PCI_DM9009_ID) {
624 		dw32(DCR12, 0x80);	/* Issue RESET signal */
625 		mdelay(300);			/* Delay 300 ms */
626 	}
627 	dw32(DCR12, 0x0);	/* Clear RESET signal */
628 
629 	/* Process Phyxcer Media Mode */
630 	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
631 		dmfe_set_phyxcer(db);
632 
633 	/* Media Mode Process */
634 	if ( !(db->media_mode & DMFE_AUTO) )
635 		db->op_mode = db->media_mode; 	/* Force Mode */
636 
637 	/* Initialize Transmit/Receive descriptor and CR3/4 */
638 	dmfe_descriptor_init(dev);
639 
640 	/* Init CR6 to program DM910x operation */
641 	update_cr6(db->cr6_data, ioaddr);
642 
643 	/* Send setup frame */
644 	if (db->chip_id == PCI_DM9132_ID)
645 		dm9132_id_table(dev);	/* DM9132 */
646 	else
647 		send_filter_frame(dev);	/* DM9102/DM9102A */
648 
649 	/* Init CR7, interrupt active bit */
650 	db->cr7_data = CR7_DEFAULT;
651 	dw32(DCR7, db->cr7_data);
652 
653 	/* Init CR15, Tx jabber and Rx watchdog timer */
654 	dw32(DCR15, db->cr15_data);
655 
656 	/* Enable DM910X Tx/Rx function */
657 	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
658 	update_cr6(db->cr6_data, ioaddr);
659 }
660 
661 
662 /*
663  *	Hardware start transmission.
664  *	Send a packet to media from the upper layer.
665  */
666 
667 static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
668 					 struct net_device *dev)
669 {
670 	struct dmfe_board_info *db = netdev_priv(dev);
671 	void __iomem *ioaddr = db->ioaddr;
672 	struct tx_desc *txptr;
673 	unsigned long flags;
674 
675 	DMFE_DBUG(0, "dmfe_start_xmit", 0);
676 
677 	/* Too large packet check */
678 	if (skb->len > MAX_PACKET_SIZE) {
679 		pr_err("big packet = %d\n", (u16)skb->len);
680 		dev_kfree_skb_any(skb);
681 		return NETDEV_TX_OK;
682 	}
683 
684 	/* Resource flag check */
685 	netif_stop_queue(dev);
686 
687 	spin_lock_irqsave(&db->lock, flags);
688 
689 	/* No Tx resource check, it never happen nromally */
690 	if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
691 		spin_unlock_irqrestore(&db->lock, flags);
692 		pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
693 		return NETDEV_TX_BUSY;
694 	}
695 
696 	/* Disable NIC interrupt */
697 	dw32(DCR7, 0);
698 
699 	/* transmit this packet */
700 	txptr = db->tx_insert_ptr;
701 	skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
702 	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
703 
704 	/* Point to next transmit free descriptor */
705 	db->tx_insert_ptr = txptr->next_tx_desc;
706 
707 	/* Transmit Packet Process */
708 	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
709 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
710 		db->tx_packet_cnt++;			/* Ready to send */
711 		dw32(DCR1, 0x1);			/* Issue Tx polling */
712 		netif_trans_update(dev);		/* saved time stamp */
713 	} else {
714 		db->tx_queue_cnt++;			/* queue TX packet */
715 		dw32(DCR1, 0x1);			/* Issue Tx polling */
716 	}
717 
718 	/* Tx resource check */
719 	if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
720 		netif_wake_queue(dev);
721 
722 	/* Restore CR7 to enable interrupt */
723 	spin_unlock_irqrestore(&db->lock, flags);
724 	dw32(DCR7, db->cr7_data);
725 
726 	/* free this SKB */
727 	dev_consume_skb_any(skb);
728 
729 	return NETDEV_TX_OK;
730 }
731 
732 
733 /*
734  *	Stop the interface.
735  *	The interface is stopped when it is brought.
736  */
737 
738 static int dmfe_stop(struct net_device *dev)
739 {
740 	struct dmfe_board_info *db = netdev_priv(dev);
741 	void __iomem *ioaddr = db->ioaddr;
742 
743 	DMFE_DBUG(0, "dmfe_stop", 0);
744 
745 	/* disable system */
746 	netif_stop_queue(dev);
747 
748 	/* deleted timer */
749 	del_timer_sync(&db->timer);
750 
751 	/* Reset & stop DM910X board */
752 	dw32(DCR0, DM910X_RESET);
753 	udelay(100);
754 	dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
755 
756 	/* free interrupt */
757 	free_irq(db->pdev->irq, dev);
758 
759 	/* free allocated rx buffer */
760 	dmfe_free_rxbuffer(db);
761 
762 #if 0
763 	/* show statistic counter */
764 	printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
765 	       db->tx_fifo_underrun, db->tx_excessive_collision,
766 	       db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
767 	       db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
768 	       db->reset_fatal, db->reset_TXtimeout);
769 #endif
770 
771 	return 0;
772 }
773 
774 
775 /*
776  *	DM9102 insterrupt handler
777  *	receive the packet to upper layer, free the transmitted packet
778  */
779 
780 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
781 {
782 	struct net_device *dev = dev_id;
783 	struct dmfe_board_info *db = netdev_priv(dev);
784 	void __iomem *ioaddr = db->ioaddr;
785 	unsigned long flags;
786 
787 	DMFE_DBUG(0, "dmfe_interrupt()", 0);
788 
789 	spin_lock_irqsave(&db->lock, flags);
790 
791 	/* Got DM910X status */
792 	db->cr5_data = dr32(DCR5);
793 	dw32(DCR5, db->cr5_data);
794 	if ( !(db->cr5_data & 0xc1) ) {
795 		spin_unlock_irqrestore(&db->lock, flags);
796 		return IRQ_HANDLED;
797 	}
798 
799 	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
800 	dw32(DCR7, 0);
801 
802 	/* Check system status */
803 	if (db->cr5_data & 0x2000) {
804 		/* system bus error happen */
805 		DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
806 		db->reset_fatal++;
807 		db->wait_reset = 1;	/* Need to RESET */
808 		spin_unlock_irqrestore(&db->lock, flags);
809 		return IRQ_HANDLED;
810 	}
811 
812 	 /* Received the coming packet */
813 	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
814 		dmfe_rx_packet(dev, db);
815 
816 	/* reallocate rx descriptor buffer */
817 	if (db->rx_avail_cnt<RX_DESC_CNT)
818 		allocate_rx_buffer(dev);
819 
820 	/* Free the transmitted descriptor */
821 	if ( db->cr5_data & 0x01)
822 		dmfe_free_tx_pkt(dev, db);
823 
824 	/* Mode Check */
825 	if (db->dm910x_chk_mode & 0x2) {
826 		db->dm910x_chk_mode = 0x4;
827 		db->cr6_data |= 0x100;
828 		update_cr6(db->cr6_data, ioaddr);
829 	}
830 
831 	/* Restore CR7 to enable interrupt mask */
832 	dw32(DCR7, db->cr7_data);
833 
834 	spin_unlock_irqrestore(&db->lock, flags);
835 	return IRQ_HANDLED;
836 }
837 
838 
839 #ifdef CONFIG_NET_POLL_CONTROLLER
840 /*
841  * Polling 'interrupt' - used by things like netconsole to send skbs
842  * without having to re-enable interrupts. It's not called while
843  * the interrupt routine is executing.
844  */
845 
846 static void poll_dmfe (struct net_device *dev)
847 {
848 	struct dmfe_board_info *db = netdev_priv(dev);
849 	const int irq = db->pdev->irq;
850 
851 	/* disable_irq here is not very nice, but with the lockless
852 	   interrupt handler we have no other choice. */
853 	disable_irq(irq);
854 	dmfe_interrupt (irq, dev);
855 	enable_irq(irq);
856 }
857 #endif
858 
859 /*
860  *	Free TX resource after TX complete
861  */
862 
863 static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
864 {
865 	struct tx_desc *txptr;
866 	void __iomem *ioaddr = db->ioaddr;
867 	u32 tdes0;
868 
869 	txptr = db->tx_remove_ptr;
870 	while(db->tx_packet_cnt) {
871 		tdes0 = le32_to_cpu(txptr->tdes0);
872 		if (tdes0 & 0x80000000)
873 			break;
874 
875 		/* A packet sent completed */
876 		db->tx_packet_cnt--;
877 		dev->stats.tx_packets++;
878 
879 		/* Transmit statistic counter */
880 		if ( tdes0 != 0x7fffffff ) {
881 			dev->stats.collisions += (tdes0 >> 3) & 0xf;
882 			dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
883 			if (tdes0 & TDES0_ERR_MASK) {
884 				dev->stats.tx_errors++;
885 
886 				if (tdes0 & 0x0002) {	/* UnderRun */
887 					db->tx_fifo_underrun++;
888 					if ( !(db->cr6_data & CR6_SFT) ) {
889 						db->cr6_data = db->cr6_data | CR6_SFT;
890 						update_cr6(db->cr6_data, ioaddr);
891 					}
892 				}
893 				if (tdes0 & 0x0100)
894 					db->tx_excessive_collision++;
895 				if (tdes0 & 0x0200)
896 					db->tx_late_collision++;
897 				if (tdes0 & 0x0400)
898 					db->tx_no_carrier++;
899 				if (tdes0 & 0x0800)
900 					db->tx_loss_carrier++;
901 				if (tdes0 & 0x4000)
902 					db->tx_jabber_timeout++;
903 			}
904 		}
905 
906     		txptr = txptr->next_tx_desc;
907 	}/* End of while */
908 
909 	/* Update TX remove pointer to next */
910 	db->tx_remove_ptr = txptr;
911 
912 	/* Send the Tx packet in queue */
913 	if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
914 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
915 		db->tx_packet_cnt++;			/* Ready to send */
916 		db->tx_queue_cnt--;
917 		dw32(DCR1, 0x1);			/* Issue Tx polling */
918 		netif_trans_update(dev);		/* saved time stamp */
919 	}
920 
921 	/* Resource available check */
922 	if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
923 		netif_wake_queue(dev);	/* Active upper layer, send again */
924 }
925 
926 
927 /*
928  *	Calculate the CRC valude of the Rx packet
929  *	flag = 	1 : return the reverse CRC (for the received packet CRC)
930  *		0 : return the normal CRC (for Hash Table index)
931  */
932 
933 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
934 {
935 	u32 crc = crc32(~0, Data, Len);
936 	if (flag) crc = ~crc;
937 	return crc;
938 }
939 
940 
941 /*
942  *	Receive the come packet and pass to upper layer
943  */
944 
945 static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
946 {
947 	struct rx_desc *rxptr;
948 	struct sk_buff *skb, *newskb;
949 	int rxlen;
950 	u32 rdes0;
951 
952 	rxptr = db->rx_ready_ptr;
953 
954 	while(db->rx_avail_cnt) {
955 		rdes0 = le32_to_cpu(rxptr->rdes0);
956 		if (rdes0 & 0x80000000)	/* packet owner check */
957 			break;
958 
959 		db->rx_avail_cnt--;
960 		db->interval_rx_cnt++;
961 
962 		dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
963 				 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
964 
965 		if ( (rdes0 & 0x300) != 0x300) {
966 			/* A packet without First/Last flag */
967 			/* reuse this SKB */
968 			DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
969 			dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
970 		} else {
971 			/* A packet with First/Last flag */
972 			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
973 
974 			/* error summary bit check */
975 			if (rdes0 & 0x8000) {
976 				/* This is a error packet */
977 				dev->stats.rx_errors++;
978 				if (rdes0 & 1)
979 					dev->stats.rx_fifo_errors++;
980 				if (rdes0 & 2)
981 					dev->stats.rx_crc_errors++;
982 				if (rdes0 & 0x80)
983 					dev->stats.rx_length_errors++;
984 			}
985 
986 			if ( !(rdes0 & 0x8000) ||
987 				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
988 				skb = rxptr->rx_skb_ptr;
989 
990 				/* Received Packet CRC check need or not */
991 				if ( (db->dm910x_chk_mode & 1) &&
992 					(cal_CRC(skb->data, rxlen, 1) !=
993 					(*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
994 					/* Found a error received packet */
995 					dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
996 					db->dm910x_chk_mode = 3;
997 				} else {
998 					/* Good packet, send to upper layer */
999 					/* Shorst packet used new SKB */
1000 					if ((rxlen < RX_COPY_SIZE) &&
1001 						((newskb = netdev_alloc_skb(dev, rxlen + 2))
1002 						!= NULL)) {
1003 
1004 						skb = newskb;
1005 						/* size less than COPY_SIZE, allocate a rxlen SKB */
1006 						skb_reserve(skb, 2); /* 16byte align */
1007 						skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1008 							  skb_put(skb, rxlen),
1009 									  rxlen);
1010 						dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1011 					} else
1012 						skb_put(skb, rxlen);
1013 
1014 					skb->protocol = eth_type_trans(skb, dev);
1015 					netif_rx(skb);
1016 					dev->stats.rx_packets++;
1017 					dev->stats.rx_bytes += rxlen;
1018 				}
1019 			} else {
1020 				/* Reuse SKB buffer when the packet is error */
1021 				DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1022 				dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1023 			}
1024 		}
1025 
1026 		rxptr = rxptr->next_rx_desc;
1027 	}
1028 
1029 	db->rx_ready_ptr = rxptr;
1030 }
1031 
1032 /*
1033  * Set DM910X multicast address
1034  */
1035 
1036 static void dmfe_set_filter_mode(struct net_device *dev)
1037 {
1038 	struct dmfe_board_info *db = netdev_priv(dev);
1039 	unsigned long flags;
1040 	int mc_count = netdev_mc_count(dev);
1041 
1042 	DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1043 	spin_lock_irqsave(&db->lock, flags);
1044 
1045 	if (dev->flags & IFF_PROMISC) {
1046 		DMFE_DBUG(0, "Enable PROM Mode", 0);
1047 		db->cr6_data |= CR6_PM | CR6_PBF;
1048 		update_cr6(db->cr6_data, db->ioaddr);
1049 		spin_unlock_irqrestore(&db->lock, flags);
1050 		return;
1051 	}
1052 
1053 	if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1054 		DMFE_DBUG(0, "Pass all multicast address", mc_count);
1055 		db->cr6_data &= ~(CR6_PM | CR6_PBF);
1056 		db->cr6_data |= CR6_PAM;
1057 		spin_unlock_irqrestore(&db->lock, flags);
1058 		return;
1059 	}
1060 
1061 	DMFE_DBUG(0, "Set multicast address", mc_count);
1062 	if (db->chip_id == PCI_DM9132_ID)
1063 		dm9132_id_table(dev);	/* DM9132 */
1064 	else
1065 		send_filter_frame(dev);	/* DM9102/DM9102A */
1066 	spin_unlock_irqrestore(&db->lock, flags);
1067 }
1068 
1069 /*
1070  * 	Ethtool interace
1071  */
1072 
1073 static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1074 			       struct ethtool_drvinfo *info)
1075 {
1076 	struct dmfe_board_info *np = netdev_priv(dev);
1077 
1078 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1079 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1080 }
1081 
1082 static int dmfe_ethtool_set_wol(struct net_device *dev,
1083 				struct ethtool_wolinfo *wolinfo)
1084 {
1085 	struct dmfe_board_info *db = netdev_priv(dev);
1086 
1087 	if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1088 		   		WAKE_ARP | WAKE_MAGICSECURE))
1089 		   return -EOPNOTSUPP;
1090 
1091 	db->wol_mode = wolinfo->wolopts;
1092 	return 0;
1093 }
1094 
1095 static void dmfe_ethtool_get_wol(struct net_device *dev,
1096 				 struct ethtool_wolinfo *wolinfo)
1097 {
1098 	struct dmfe_board_info *db = netdev_priv(dev);
1099 
1100 	wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1101 	wolinfo->wolopts = db->wol_mode;
1102 }
1103 
1104 
1105 static const struct ethtool_ops netdev_ethtool_ops = {
1106 	.get_drvinfo		= dmfe_ethtool_get_drvinfo,
1107 	.get_link               = ethtool_op_get_link,
1108 	.set_wol		= dmfe_ethtool_set_wol,
1109 	.get_wol		= dmfe_ethtool_get_wol,
1110 };
1111 
1112 /*
1113  *	A periodic timer routine
1114  *	Dynamic media sense, allocate Rx buffer...
1115  */
1116 
1117 static void dmfe_timer(struct timer_list *t)
1118 {
1119 	struct dmfe_board_info *db = from_timer(db, t, timer);
1120 	struct net_device *dev = pci_get_drvdata(db->pdev);
1121 	void __iomem *ioaddr = db->ioaddr;
1122 	u32 tmp_cr8;
1123 	unsigned char tmp_cr12;
1124  	unsigned long flags;
1125 
1126 	int link_ok, link_ok_phy;
1127 
1128 	DMFE_DBUG(0, "dmfe_timer()", 0);
1129 	spin_lock_irqsave(&db->lock, flags);
1130 
1131 	/* Media mode process when Link OK before enter this route */
1132 	if (db->first_in_callback == 0) {
1133 		db->first_in_callback = 1;
1134 		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1135 			db->cr6_data &= ~0x40000;
1136 			update_cr6(db->cr6_data, ioaddr);
1137 			dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1138 			db->cr6_data |= 0x40000;
1139 			update_cr6(db->cr6_data, ioaddr);
1140 			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1141 			add_timer(&db->timer);
1142 			spin_unlock_irqrestore(&db->lock, flags);
1143 			return;
1144 		}
1145 	}
1146 
1147 
1148 	/* Operating Mode Check */
1149 	if ( (db->dm910x_chk_mode & 0x1) &&
1150 		(dev->stats.rx_packets > MAX_CHECK_PACKET) )
1151 		db->dm910x_chk_mode = 0x4;
1152 
1153 	/* Dynamic reset DM910X : system error or transmit time-out */
1154 	tmp_cr8 = dr32(DCR8);
1155 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1156 		db->reset_cr8++;
1157 		db->wait_reset = 1;
1158 	}
1159 	db->interval_rx_cnt = 0;
1160 
1161 	/* TX polling kick monitor */
1162 	if ( db->tx_packet_cnt &&
1163 	     time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1164 		dw32(DCR1, 0x1);   /* Tx polling again */
1165 
1166 		/* TX Timeout */
1167 		if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1168 			db->reset_TXtimeout++;
1169 			db->wait_reset = 1;
1170 			dev_warn(&dev->dev, "Tx timeout - resetting\n");
1171 		}
1172 	}
1173 
1174 	if (db->wait_reset) {
1175 		DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1176 		db->reset_count++;
1177 		dmfe_dynamic_reset(dev);
1178 		db->first_in_callback = 0;
1179 		db->timer.expires = DMFE_TIMER_WUT;
1180 		add_timer(&db->timer);
1181 		spin_unlock_irqrestore(&db->lock, flags);
1182 		return;
1183 	}
1184 
1185 	/* Link status check, Dynamic media type change */
1186 	if (db->chip_id == PCI_DM9132_ID)
1187 		tmp_cr12 = dr8(DCR9 + 3);	/* DM9132 */
1188 	else
1189 		tmp_cr12 = dr8(DCR12);		/* DM9102/DM9102A */
1190 
1191 	if ( ((db->chip_id == PCI_DM9102_ID) &&
1192 		(db->chip_revision == 0x30)) ||
1193 		((db->chip_id == PCI_DM9132_ID) &&
1194 		(db->chip_revision == 0x10)) ) {
1195 		/* DM9102A Chip */
1196 		if (tmp_cr12 & 2)
1197 			link_ok = 0;
1198 		else
1199 			link_ok = 1;
1200 	}
1201 	else
1202 		/*0x43 is used instead of 0x3 because bit 6 should represent
1203 			link status of external PHY */
1204 		link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1205 
1206 
1207 	/* If chip reports that link is failed it could be because external
1208 		PHY link status pin is not connected correctly to chip
1209 		To be sure ask PHY too.
1210 	*/
1211 
1212 	/* need a dummy read because of PHY's register latch*/
1213 	dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1214 	link_ok_phy = (dmfe_phy_read (db->ioaddr,
1215 				      db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1216 
1217 	if (link_ok_phy != link_ok) {
1218 		DMFE_DBUG (0, "PHY and chip report different link status", 0);
1219 		link_ok = link_ok | link_ok_phy;
1220  	}
1221 
1222 	if ( !link_ok && netif_carrier_ok(dev)) {
1223 		/* Link Failed */
1224 		DMFE_DBUG(0, "Link Failed", tmp_cr12);
1225 		netif_carrier_off(dev);
1226 
1227 		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1228 		/* AUTO or force 1M Homerun/Longrun don't need */
1229 		if ( !(db->media_mode & 0x38) )
1230 			dmfe_phy_write(db->ioaddr, db->phy_addr,
1231 				       0, 0x1000, db->chip_id);
1232 
1233 		/* AUTO mode, if INT phyxcer link failed, select EXT device */
1234 		if (db->media_mode & DMFE_AUTO) {
1235 			/* 10/100M link failed, used 1M Home-Net */
1236 			db->cr6_data|=0x00040000;	/* bit18=1, MII */
1237 			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
1238 			update_cr6(db->cr6_data, ioaddr);
1239 		}
1240 	} else if (!netif_carrier_ok(dev)) {
1241 
1242 		DMFE_DBUG(0, "Link link OK", tmp_cr12);
1243 
1244 		/* Auto Sense Speed */
1245 		if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1246 			netif_carrier_on(dev);
1247 			SHOW_MEDIA_TYPE(db->op_mode);
1248 		}
1249 
1250 		dmfe_process_mode(db);
1251 	}
1252 
1253 	/* HPNA remote command check */
1254 	if (db->HPNA_command & 0xf00) {
1255 		db->HPNA_timer--;
1256 		if (!db->HPNA_timer)
1257 			dmfe_HPNA_remote_cmd_chk(db);
1258 	}
1259 
1260 	/* Timer active again */
1261 	db->timer.expires = DMFE_TIMER_WUT;
1262 	add_timer(&db->timer);
1263 	spin_unlock_irqrestore(&db->lock, flags);
1264 }
1265 
1266 
1267 /*
1268  *	Dynamic reset the DM910X board
1269  *	Stop DM910X board
1270  *	Free Tx/Rx allocated memory
1271  *	Reset DM910X board
1272  *	Re-initialize DM910X board
1273  */
1274 
1275 static void dmfe_dynamic_reset(struct net_device *dev)
1276 {
1277 	struct dmfe_board_info *db = netdev_priv(dev);
1278 	void __iomem *ioaddr = db->ioaddr;
1279 
1280 	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1281 
1282 	/* Sopt MAC controller */
1283 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
1284 	update_cr6(db->cr6_data, ioaddr);
1285 	dw32(DCR7, 0);				/* Disable Interrupt */
1286 	dw32(DCR5, dr32(DCR5));
1287 
1288 	/* Disable upper layer interface */
1289 	netif_stop_queue(dev);
1290 
1291 	/* Free Rx Allocate buffer */
1292 	dmfe_free_rxbuffer(db);
1293 
1294 	/* system variable init */
1295 	db->tx_packet_cnt = 0;
1296 	db->tx_queue_cnt = 0;
1297 	db->rx_avail_cnt = 0;
1298 	netif_carrier_off(dev);
1299 	db->wait_reset = 0;
1300 
1301 	/* Re-initialize DM910X board */
1302 	dmfe_init_dm910x(dev);
1303 
1304 	/* Restart upper layer interface */
1305 	netif_wake_queue(dev);
1306 }
1307 
1308 
1309 /*
1310  *	free all allocated rx buffer
1311  */
1312 
1313 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1314 {
1315 	DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1316 
1317 	/* free allocated rx buffer */
1318 	while (db->rx_avail_cnt) {
1319 		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1320 		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1321 		db->rx_avail_cnt--;
1322 	}
1323 }
1324 
1325 
1326 /*
1327  *	Reuse the SK buffer
1328  */
1329 
1330 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1331 {
1332 	struct rx_desc *rxptr = db->rx_insert_ptr;
1333 
1334 	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1335 		rxptr->rx_skb_ptr = skb;
1336 		rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
1337 							  RX_ALLOC_SIZE, DMA_FROM_DEVICE));
1338 		wmb();
1339 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1340 		db->rx_avail_cnt++;
1341 		db->rx_insert_ptr = rxptr->next_rx_desc;
1342 	} else
1343 		DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1344 }
1345 
1346 
1347 /*
1348  *	Initialize transmit/Receive descriptor
1349  *	Using Chain structure, and allocate Tx/Rx buffer
1350  */
1351 
1352 static void dmfe_descriptor_init(struct net_device *dev)
1353 {
1354 	struct dmfe_board_info *db = netdev_priv(dev);
1355 	void __iomem *ioaddr = db->ioaddr;
1356 	struct tx_desc *tmp_tx;
1357 	struct rx_desc *tmp_rx;
1358 	unsigned char *tmp_buf;
1359 	dma_addr_t tmp_tx_dma, tmp_rx_dma;
1360 	dma_addr_t tmp_buf_dma;
1361 	int i;
1362 
1363 	DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1364 
1365 	/* tx descriptor start pointer */
1366 	db->tx_insert_ptr = db->first_tx_desc;
1367 	db->tx_remove_ptr = db->first_tx_desc;
1368 	dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
1369 
1370 	/* rx descriptor start pointer */
1371 	db->first_rx_desc = (void *)db->first_tx_desc +
1372 			sizeof(struct tx_desc) * TX_DESC_CNT;
1373 
1374 	db->first_rx_desc_dma =  db->first_tx_desc_dma +
1375 			sizeof(struct tx_desc) * TX_DESC_CNT;
1376 	db->rx_insert_ptr = db->first_rx_desc;
1377 	db->rx_ready_ptr = db->first_rx_desc;
1378 	dw32(DCR3, db->first_rx_desc_dma);		/* RX DESC address */
1379 
1380 	/* Init Transmit chain */
1381 	tmp_buf = db->buf_pool_start;
1382 	tmp_buf_dma = db->buf_pool_dma_start;
1383 	tmp_tx_dma = db->first_tx_desc_dma;
1384 	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1385 		tmp_tx->tx_buf_ptr = tmp_buf;
1386 		tmp_tx->tdes0 = cpu_to_le32(0);
1387 		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
1388 		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1389 		tmp_tx_dma += sizeof(struct tx_desc);
1390 		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1391 		tmp_tx->next_tx_desc = tmp_tx + 1;
1392 		tmp_buf = tmp_buf + TX_BUF_ALLOC;
1393 		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1394 	}
1395 	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1396 	tmp_tx->next_tx_desc = db->first_tx_desc;
1397 
1398 	 /* Init Receive descriptor chain */
1399 	tmp_rx_dma=db->first_rx_desc_dma;
1400 	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1401 		tmp_rx->rdes0 = cpu_to_le32(0);
1402 		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1403 		tmp_rx_dma += sizeof(struct rx_desc);
1404 		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1405 		tmp_rx->next_rx_desc = tmp_rx + 1;
1406 	}
1407 	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1408 	tmp_rx->next_rx_desc = db->first_rx_desc;
1409 
1410 	/* pre-allocate Rx buffer */
1411 	allocate_rx_buffer(dev);
1412 }
1413 
1414 
1415 /*
1416  *	Update CR6 value
1417  *	Firstly stop DM910X , then written value and start
1418  */
1419 
1420 static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1421 {
1422 	u32 cr6_tmp;
1423 
1424 	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1425 	dw32(DCR6, cr6_tmp);
1426 	udelay(5);
1427 	dw32(DCR6, cr6_data);
1428 	udelay(5);
1429 }
1430 
1431 
1432 /*
1433  *	Send a setup frame for DM9132
1434  *	This setup frame initialize DM910X address filter mode
1435 */
1436 
1437 static void dm9132_id_table(struct net_device *dev)
1438 {
1439 	struct dmfe_board_info *db = netdev_priv(dev);
1440 	void __iomem *ioaddr = db->ioaddr + 0xc0;
1441 	u16 *addrptr = (u16 *)dev->dev_addr;
1442 	struct netdev_hw_addr *ha;
1443 	u16 i, hash_table[4];
1444 
1445 	/* Node address */
1446 	for (i = 0; i < 3; i++) {
1447 		dw16(0, addrptr[i]);
1448 		ioaddr += 4;
1449 	}
1450 
1451 	/* Clear Hash Table */
1452 	memset(hash_table, 0, sizeof(hash_table));
1453 
1454 	/* broadcast address */
1455 	hash_table[3] = 0x8000;
1456 
1457 	/* the multicast address in Hash Table : 64 bits */
1458 	netdev_for_each_mc_addr(ha, dev) {
1459 		u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1460 
1461 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1462 	}
1463 
1464 	/* Write the hash table to MAC MD table */
1465 	for (i = 0; i < 4; i++, ioaddr += 4)
1466 		dw16(0, hash_table[i]);
1467 }
1468 
1469 
1470 /*
1471  *	Send a setup frame for DM9102/DM9102A
1472  *	This setup frame initialize DM910X address filter mode
1473  */
1474 
1475 static void send_filter_frame(struct net_device *dev)
1476 {
1477 	struct dmfe_board_info *db = netdev_priv(dev);
1478 	struct netdev_hw_addr *ha;
1479 	struct tx_desc *txptr;
1480 	u16 * addrptr;
1481 	u32 * suptr;
1482 	int i;
1483 
1484 	DMFE_DBUG(0, "send_filter_frame()", 0);
1485 
1486 	txptr = db->tx_insert_ptr;
1487 	suptr = (u32 *) txptr->tx_buf_ptr;
1488 
1489 	/* Node address */
1490 	addrptr = (u16 *) dev->dev_addr;
1491 	*suptr++ = addrptr[0];
1492 	*suptr++ = addrptr[1];
1493 	*suptr++ = addrptr[2];
1494 
1495 	/* broadcast address */
1496 	*suptr++ = 0xffff;
1497 	*suptr++ = 0xffff;
1498 	*suptr++ = 0xffff;
1499 
1500 	/* fit the multicast address */
1501 	netdev_for_each_mc_addr(ha, dev) {
1502 		addrptr = (u16 *) ha->addr;
1503 		*suptr++ = addrptr[0];
1504 		*suptr++ = addrptr[1];
1505 		*suptr++ = addrptr[2];
1506 	}
1507 
1508 	for (i = netdev_mc_count(dev); i < 14; i++) {
1509 		*suptr++ = 0xffff;
1510 		*suptr++ = 0xffff;
1511 		*suptr++ = 0xffff;
1512 	}
1513 
1514 	/* prepare the setup frame */
1515 	db->tx_insert_ptr = txptr->next_tx_desc;
1516 	txptr->tdes1 = cpu_to_le32(0x890000c0);
1517 
1518 	/* Resource Check and Send the setup packet */
1519 	if (!db->tx_packet_cnt) {
1520 		void __iomem *ioaddr = db->ioaddr;
1521 
1522 		/* Resource Empty */
1523 		db->tx_packet_cnt++;
1524 		txptr->tdes0 = cpu_to_le32(0x80000000);
1525 		update_cr6(db->cr6_data | 0x2000, ioaddr);
1526 		dw32(DCR1, 0x1);	/* Issue Tx polling */
1527 		update_cr6(db->cr6_data, ioaddr);
1528 		netif_trans_update(dev);
1529 	} else
1530 		db->tx_queue_cnt++;	/* Put in TX queue */
1531 }
1532 
1533 
1534 /*
1535  *	Allocate rx buffer,
1536  *	As possible as allocate maxiumn Rx buffer
1537  */
1538 
1539 static void allocate_rx_buffer(struct net_device *dev)
1540 {
1541 	struct dmfe_board_info *db = netdev_priv(dev);
1542 	struct rx_desc *rxptr;
1543 	struct sk_buff *skb;
1544 
1545 	rxptr = db->rx_insert_ptr;
1546 
1547 	while(db->rx_avail_cnt < RX_DESC_CNT) {
1548 		if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1549 			break;
1550 		rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1551 		rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
1552 							  RX_ALLOC_SIZE, DMA_FROM_DEVICE));
1553 		wmb();
1554 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1555 		rxptr = rxptr->next_rx_desc;
1556 		db->rx_avail_cnt++;
1557 	}
1558 
1559 	db->rx_insert_ptr = rxptr;
1560 }
1561 
1562 static void srom_clk_write(void __iomem *ioaddr, u32 data)
1563 {
1564 	static const u32 cmd[] = {
1565 		CR9_SROM_READ | CR9_SRCS,
1566 		CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1567 		CR9_SROM_READ | CR9_SRCS
1568 	};
1569 	int i;
1570 
1571 	for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1572 		dw32(DCR9, data | cmd[i]);
1573 		udelay(5);
1574 	}
1575 }
1576 
1577 /*
1578  *	Read one word data from the serial ROM
1579  */
1580 static u16 read_srom_word(void __iomem *ioaddr, int offset)
1581 {
1582 	u16 srom_data;
1583 	int i;
1584 
1585 	dw32(DCR9, CR9_SROM_READ);
1586 	udelay(5);
1587 	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1588 	udelay(5);
1589 
1590 	/* Send the Read Command 110b */
1591 	srom_clk_write(ioaddr, SROM_DATA_1);
1592 	srom_clk_write(ioaddr, SROM_DATA_1);
1593 	srom_clk_write(ioaddr, SROM_DATA_0);
1594 
1595 	/* Send the offset */
1596 	for (i = 5; i >= 0; i--) {
1597 		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1598 		srom_clk_write(ioaddr, srom_data);
1599 	}
1600 
1601 	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1602 	udelay(5);
1603 
1604 	for (i = 16; i > 0; i--) {
1605 		dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1606 		udelay(5);
1607 		srom_data = (srom_data << 1) |
1608 				((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1609 		dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1610 		udelay(5);
1611 	}
1612 
1613 	dw32(DCR9, CR9_SROM_READ);
1614 	udelay(5);
1615 	return srom_data;
1616 }
1617 
1618 
1619 /*
1620  *	Auto sense the media mode
1621  */
1622 
1623 static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1624 {
1625 	void __iomem *ioaddr = db->ioaddr;
1626 	u8 ErrFlag = 0;
1627 	u16 phy_mode;
1628 
1629 	/* CR6 bit18=0, select 10/100M */
1630 	update_cr6(db->cr6_data & ~0x40000, ioaddr);
1631 
1632 	phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1633 	phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1634 
1635 	if ( (phy_mode & 0x24) == 0x24 ) {
1636 		if (db->chip_id == PCI_DM9132_ID)	/* DM9132 */
1637 			phy_mode = dmfe_phy_read(db->ioaddr,
1638 						 db->phy_addr, 7, db->chip_id) & 0xf000;
1639 		else 				/* DM9102/DM9102A */
1640 			phy_mode = dmfe_phy_read(db->ioaddr,
1641 						 db->phy_addr, 17, db->chip_id) & 0xf000;
1642 		switch (phy_mode) {
1643 		case 0x1000: db->op_mode = DMFE_10MHF; break;
1644 		case 0x2000: db->op_mode = DMFE_10MFD; break;
1645 		case 0x4000: db->op_mode = DMFE_100MHF; break;
1646 		case 0x8000: db->op_mode = DMFE_100MFD; break;
1647 		default: db->op_mode = DMFE_10MHF;
1648 			ErrFlag = 1;
1649 			break;
1650 		}
1651 	} else {
1652 		db->op_mode = DMFE_10MHF;
1653 		DMFE_DBUG(0, "Link Failed :", phy_mode);
1654 		ErrFlag = 1;
1655 	}
1656 
1657 	return ErrFlag;
1658 }
1659 
1660 
1661 /*
1662  *	Set 10/100 phyxcer capability
1663  *	AUTO mode : phyxcer register4 is NIC capability
1664  *	Force mode: phyxcer register4 is the force media
1665  */
1666 
1667 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1668 {
1669 	void __iomem *ioaddr = db->ioaddr;
1670 	u16 phy_reg;
1671 
1672 	/* Select 10/100M phyxcer */
1673 	db->cr6_data &= ~0x40000;
1674 	update_cr6(db->cr6_data, ioaddr);
1675 
1676 	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
1677 	if (db->chip_id == PCI_DM9009_ID) {
1678 		phy_reg = dmfe_phy_read(db->ioaddr,
1679 					db->phy_addr, 18, db->chip_id) & ~0x1000;
1680 
1681 		dmfe_phy_write(db->ioaddr,
1682 			       db->phy_addr, 18, phy_reg, db->chip_id);
1683 	}
1684 
1685 	/* Phyxcer capability setting */
1686 	phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1687 
1688 	if (db->media_mode & DMFE_AUTO) {
1689 		/* AUTO Mode */
1690 		phy_reg |= db->PHY_reg4;
1691 	} else {
1692 		/* Force Mode */
1693 		switch(db->media_mode) {
1694 		case DMFE_10MHF: phy_reg |= 0x20; break;
1695 		case DMFE_10MFD: phy_reg |= 0x40; break;
1696 		case DMFE_100MHF: phy_reg |= 0x80; break;
1697 		case DMFE_100MFD: phy_reg |= 0x100; break;
1698 		}
1699 		if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1700 	}
1701 
1702   	/* Write new capability to Phyxcer Reg4 */
1703 	if ( !(phy_reg & 0x01e0)) {
1704 		phy_reg|=db->PHY_reg4;
1705 		db->media_mode|=DMFE_AUTO;
1706 	}
1707 	dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1708 
1709  	/* Restart Auto-Negotiation */
1710 	if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1711 		dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1712 	if ( !db->chip_type )
1713 		dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1714 }
1715 
1716 
1717 /*
1718  *	Process op-mode
1719  *	AUTO mode : PHY controller in Auto-negotiation Mode
1720  *	Force mode: PHY controller in force mode with HUB
1721  *			N-way force capability with SWITCH
1722  */
1723 
1724 static void dmfe_process_mode(struct dmfe_board_info *db)
1725 {
1726 	u16 phy_reg;
1727 
1728 	/* Full Duplex Mode Check */
1729 	if (db->op_mode & 0x4)
1730 		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
1731 	else
1732 		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
1733 
1734 	/* Transciver Selection */
1735 	if (db->op_mode & 0x10)		/* 1M HomePNA */
1736 		db->cr6_data |= 0x40000;/* External MII select */
1737 	else
1738 		db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1739 
1740 	update_cr6(db->cr6_data, db->ioaddr);
1741 
1742 	/* 10/100M phyxcer force mode need */
1743 	if ( !(db->media_mode & 0x18)) {
1744 		/* Forece Mode */
1745 		phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1746 		if ( !(phy_reg & 0x1) ) {
1747 			/* parter without N-Way capability */
1748 			phy_reg = 0x0;
1749 			switch(db->op_mode) {
1750 			case DMFE_10MHF: phy_reg = 0x0; break;
1751 			case DMFE_10MFD: phy_reg = 0x100; break;
1752 			case DMFE_100MHF: phy_reg = 0x2000; break;
1753 			case DMFE_100MFD: phy_reg = 0x2100; break;
1754 			}
1755 			dmfe_phy_write(db->ioaddr,
1756 				       db->phy_addr, 0, phy_reg, db->chip_id);
1757        			if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1758 				mdelay(20);
1759 			dmfe_phy_write(db->ioaddr,
1760 				       db->phy_addr, 0, phy_reg, db->chip_id);
1761 		}
1762 	}
1763 }
1764 
1765 
1766 /*
1767  *	Write a word to Phy register
1768  */
1769 
1770 static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1771 			   u16 phy_data, u32 chip_id)
1772 {
1773 	u16 i;
1774 
1775 	if (chip_id == PCI_DM9132_ID) {
1776 		dw16(0x80 + offset * 4, phy_data);
1777 	} else {
1778 		/* DM9102/DM9102A Chip */
1779 
1780 		/* Send 33 synchronization clock to Phy controller */
1781 		for (i = 0; i < 35; i++)
1782 			dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1783 
1784 		/* Send start command(01) to Phy */
1785 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1786 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1787 
1788 		/* Send write command(01) to Phy */
1789 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1790 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1791 
1792 		/* Send Phy address */
1793 		for (i = 0x10; i > 0; i = i >> 1)
1794 			dmfe_phy_write_1bit(ioaddr,
1795 					    phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1796 
1797 		/* Send register address */
1798 		for (i = 0x10; i > 0; i = i >> 1)
1799 			dmfe_phy_write_1bit(ioaddr,
1800 					    offset & i ? PHY_DATA_1 : PHY_DATA_0);
1801 
1802 		/* written trasnition */
1803 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1804 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1805 
1806 		/* Write a word data to PHY controller */
1807 		for ( i = 0x8000; i > 0; i >>= 1)
1808 			dmfe_phy_write_1bit(ioaddr,
1809 					    phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1810 	}
1811 }
1812 
1813 
1814 /*
1815  *	Read a word data from phy register
1816  */
1817 
1818 static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1819 {
1820 	int i;
1821 	u16 phy_data;
1822 
1823 	if (chip_id == PCI_DM9132_ID) {
1824 		/* DM9132 Chip */
1825 		phy_data = dr16(0x80 + offset * 4);
1826 	} else {
1827 		/* DM9102/DM9102A Chip */
1828 
1829 		/* Send 33 synchronization clock to Phy controller */
1830 		for (i = 0; i < 35; i++)
1831 			dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1832 
1833 		/* Send start command(01) to Phy */
1834 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1835 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1836 
1837 		/* Send read command(10) to Phy */
1838 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1839 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1840 
1841 		/* Send Phy address */
1842 		for (i = 0x10; i > 0; i = i >> 1)
1843 			dmfe_phy_write_1bit(ioaddr,
1844 					    phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1845 
1846 		/* Send register address */
1847 		for (i = 0x10; i > 0; i = i >> 1)
1848 			dmfe_phy_write_1bit(ioaddr,
1849 					    offset & i ? PHY_DATA_1 : PHY_DATA_0);
1850 
1851 		/* Skip transition state */
1852 		dmfe_phy_read_1bit(ioaddr);
1853 
1854 		/* read 16bit data */
1855 		for (phy_data = 0, i = 0; i < 16; i++) {
1856 			phy_data <<= 1;
1857 			phy_data |= dmfe_phy_read_1bit(ioaddr);
1858 		}
1859 	}
1860 
1861 	return phy_data;
1862 }
1863 
1864 
1865 /*
1866  *	Write one bit data to Phy Controller
1867  */
1868 
1869 static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1870 {
1871 	dw32(DCR9, phy_data);		/* MII Clock Low */
1872 	udelay(1);
1873 	dw32(DCR9, phy_data | MDCLKH);	/* MII Clock High */
1874 	udelay(1);
1875 	dw32(DCR9, phy_data);		/* MII Clock Low */
1876 	udelay(1);
1877 }
1878 
1879 
1880 /*
1881  *	Read one bit phy data from PHY controller
1882  */
1883 
1884 static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1885 {
1886 	u16 phy_data;
1887 
1888 	dw32(DCR9, 0x50000);
1889 	udelay(1);
1890 	phy_data = (dr32(DCR9) >> 19) & 0x1;
1891 	dw32(DCR9, 0x40000);
1892 	udelay(1);
1893 
1894 	return phy_data;
1895 }
1896 
1897 
1898 /*
1899  *	Parser SROM and media mode
1900  */
1901 
1902 static void dmfe_parse_srom(struct dmfe_board_info * db)
1903 {
1904 	char * srom = db->srom;
1905 	int dmfe_mode, tmp_reg;
1906 
1907 	DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1908 
1909 	/* Init CR15 */
1910 	db->cr15_data = CR15_DEFAULT;
1911 
1912 	/* Check SROM Version */
1913 	if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1914 		/* SROM V4.01 */
1915 		/* Get NIC support media mode */
1916 		db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1917 		db->PHY_reg4 = 0;
1918 		for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1919 			switch( db->NIC_capability & tmp_reg ) {
1920 			case 0x1: db->PHY_reg4 |= 0x0020; break;
1921 			case 0x2: db->PHY_reg4 |= 0x0040; break;
1922 			case 0x4: db->PHY_reg4 |= 0x0080; break;
1923 			case 0x8: db->PHY_reg4 |= 0x0100; break;
1924 			}
1925 		}
1926 
1927 		/* Media Mode Force or not check */
1928 		dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1929 			     le32_to_cpup((__le32 *) (srom + 36)));
1930 		switch(dmfe_mode) {
1931 		case 0x4: dmfe_media_mode = DMFE_100MHF; break;	/* 100MHF */
1932 		case 0x2: dmfe_media_mode = DMFE_10MFD; break;	/* 10MFD */
1933 		case 0x8: dmfe_media_mode = DMFE_100MFD; break;	/* 100MFD */
1934 		case 0x100:
1935 		case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1936 		}
1937 
1938 		/* Special Function setting */
1939 		/* VLAN function */
1940 		if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1941 			db->cr15_data |= 0x40;
1942 
1943 		/* Flow Control */
1944 		if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1945 			db->cr15_data |= 0x400;
1946 
1947 		/* TX pause packet */
1948 		if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1949 			db->cr15_data |= 0x9800;
1950 	}
1951 
1952 	/* Parse HPNA parameter */
1953 	db->HPNA_command = 1;
1954 
1955 	/* Accept remote command or not */
1956 	if (HPNA_rx_cmd == 0)
1957 		db->HPNA_command |= 0x8000;
1958 
1959 	 /* Issue remote command & operation mode */
1960 	if (HPNA_tx_cmd == 1)
1961 		switch(HPNA_mode) {	/* Issue Remote Command */
1962 		case 0: db->HPNA_command |= 0x0904; break;
1963 		case 1: db->HPNA_command |= 0x0a00; break;
1964 		case 2: db->HPNA_command |= 0x0506; break;
1965 		case 3: db->HPNA_command |= 0x0602; break;
1966 		}
1967 	else
1968 		switch(HPNA_mode) {	/* Don't Issue */
1969 		case 0: db->HPNA_command |= 0x0004; break;
1970 		case 1: db->HPNA_command |= 0x0000; break;
1971 		case 2: db->HPNA_command |= 0x0006; break;
1972 		case 3: db->HPNA_command |= 0x0002; break;
1973 		}
1974 
1975 	/* Check DM9801 or DM9802 present or not */
1976 	db->HPNA_present = 0;
1977 	update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1978 	tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1979 	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1980 		/* DM9801 or DM9802 present */
1981 		db->HPNA_timer = 8;
1982 		if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1983 			/* DM9801 HomeRun */
1984 			db->HPNA_present = 1;
1985 			dmfe_program_DM9801(db, tmp_reg);
1986 		} else {
1987 			/* DM9802 LongRun */
1988 			db->HPNA_present = 2;
1989 			dmfe_program_DM9802(db);
1990 		}
1991 	}
1992 
1993 }
1994 
1995 
1996 /*
1997  *	Init HomeRun DM9801
1998  */
1999 
2000 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2001 {
2002 	uint reg17, reg25;
2003 
2004 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2005 	switch(HPNA_rev) {
2006 	case 0xb900: /* DM9801 E3 */
2007 		db->HPNA_command |= 0x1000;
2008 		reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2009 		reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2010 		reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2011 		break;
2012 	case 0xb901: /* DM9801 E4 */
2013 		reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2014 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2015 		reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2016 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2017 		break;
2018 	case 0xb902: /* DM9801 E5 */
2019 	case 0xb903: /* DM9801 E6 */
2020 	default:
2021 		db->HPNA_command |= 0x1000;
2022 		reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2023 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2024 		reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2025 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2026 		break;
2027 	}
2028 	dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2029 	dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2030 	dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2031 }
2032 
2033 
2034 /*
2035  *	Init HomeRun DM9802
2036  */
2037 
2038 static void dmfe_program_DM9802(struct dmfe_board_info * db)
2039 {
2040 	uint phy_reg;
2041 
2042 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2043 	dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2044 	phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2045 	phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2046 	dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2047 }
2048 
2049 
2050 /*
2051  *	Check remote HPNA power and speed status. If not correct,
2052  *	issue command again.
2053 */
2054 
2055 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2056 {
2057 	uint phy_reg;
2058 
2059 	/* Got remote device status */
2060 	phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2061 	switch(phy_reg) {
2062 	case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2063 	case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2064 	case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2065 	case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2066 	}
2067 
2068 	/* Check remote device status match our setting ot not */
2069 	if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2070 		dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2071 			       db->chip_id);
2072 		db->HPNA_timer=8;
2073 	} else
2074 		db->HPNA_timer=600;	/* Match, every 10 minutes, check */
2075 }
2076 
2077 
2078 
2079 static const struct pci_device_id dmfe_pci_tbl[] = {
2080 	{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2081 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2082 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2083 	{ 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2084 	{ 0, }
2085 };
2086 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2087 
2088 static int __maybe_unused dmfe_suspend(struct device *dev_d)
2089 {
2090 	struct net_device *dev = dev_get_drvdata(dev_d);
2091 	struct dmfe_board_info *db = netdev_priv(dev);
2092 	void __iomem *ioaddr = db->ioaddr;
2093 
2094 	/* Disable upper layer interface */
2095 	netif_device_detach(dev);
2096 
2097 	/* Disable Tx/Rx */
2098 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2099 	update_cr6(db->cr6_data, ioaddr);
2100 
2101 	/* Disable Interrupt */
2102 	dw32(DCR7, 0);
2103 	dw32(DCR5, dr32(DCR5));
2104 
2105 	/* Fre RX buffers */
2106 	dmfe_free_rxbuffer(db);
2107 
2108 	/* Enable WOL */
2109 	device_wakeup_enable(dev_d);
2110 
2111 	return 0;
2112 }
2113 
2114 static int __maybe_unused dmfe_resume(struct device *dev_d)
2115 {
2116 	struct net_device *dev = dev_get_drvdata(dev_d);
2117 
2118 	/* Re-initialize DM910X board */
2119 	dmfe_init_dm910x(dev);
2120 
2121 	/* Disable WOL */
2122 	device_wakeup_disable(dev_d);
2123 
2124 	/* Restart upper layer interface */
2125 	netif_device_attach(dev);
2126 
2127 	return 0;
2128 }
2129 
2130 static SIMPLE_DEV_PM_OPS(dmfe_pm_ops, dmfe_suspend, dmfe_resume);
2131 
2132 static struct pci_driver dmfe_driver = {
2133 	.name		= "dmfe",
2134 	.id_table	= dmfe_pci_tbl,
2135 	.probe		= dmfe_init_one,
2136 	.remove		= dmfe_remove_one,
2137 	.driver.pm	= &dmfe_pm_ops,
2138 };
2139 
2140 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2141 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2142 MODULE_LICENSE("GPL");
2143 
2144 module_param(debug, int, 0);
2145 module_param(mode, byte, 0);
2146 module_param(cr6set, int, 0);
2147 module_param(chkmode, byte, 0);
2148 module_param(HPNA_mode, byte, 0);
2149 module_param(HPNA_rx_cmd, byte, 0);
2150 module_param(HPNA_tx_cmd, byte, 0);
2151 module_param(HPNA_NoiseFloor, byte, 0);
2152 module_param(SF_mode, byte, 0);
2153 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2154 MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2155 		"Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2156 
2157 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2158 		"(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2159 
2160 /*	Description:
2161  *	when user used insmod to add module, system invoked init_module()
2162  *	to initialize and register.
2163  */
2164 
2165 static int __init dmfe_init_module(void)
2166 {
2167 	int rc;
2168 
2169 	DMFE_DBUG(0, "init_module() ", debug);
2170 
2171 	if (debug)
2172 		dmfe_debug = debug;	/* set debug flag */
2173 	if (cr6set)
2174 		dmfe_cr6_user_set = cr6set;
2175 
2176 	switch (mode) {
2177 	case DMFE_10MHF:
2178 	case DMFE_100MHF:
2179 	case DMFE_10MFD:
2180 	case DMFE_100MFD:
2181 	case DMFE_1M_HPNA:
2182 		dmfe_media_mode = mode;
2183 		break;
2184 	default:
2185 		dmfe_media_mode = DMFE_AUTO;
2186 		break;
2187 	}
2188 
2189 	if (HPNA_mode > 4)
2190 		HPNA_mode = 0;		/* Default: LP/HS */
2191 	if (HPNA_rx_cmd > 1)
2192 		HPNA_rx_cmd = 0;	/* Default: Ignored remote cmd */
2193 	if (HPNA_tx_cmd > 1)
2194 		HPNA_tx_cmd = 0;	/* Default: Don't issue remote cmd */
2195 	if (HPNA_NoiseFloor > 15)
2196 		HPNA_NoiseFloor = 0;
2197 
2198 	rc = pci_register_driver(&dmfe_driver);
2199 	if (rc < 0)
2200 		return rc;
2201 
2202 	return 0;
2203 }
2204 
2205 
2206 /*
2207  *	Description:
2208  *	when user used rmmod to delete module, system invoked clean_module()
2209  *	to un-register all registered services.
2210  */
2211 
2212 static void __exit dmfe_cleanup_module(void)
2213 {
2214 	DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
2215 	pci_unregister_driver(&dmfe_driver);
2216 }
2217 
2218 module_init(dmfe_init_module);
2219 module_exit(dmfe_cleanup_module);
2220