xref: /openbmc/linux/drivers/net/ethernet/dec/tulip/dmfe.c (revision a88394cfb58007cca945699545469017beb0d206)
1*a88394cfSJeff Kirsher /*
2*a88394cfSJeff Kirsher     A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3*a88394cfSJeff Kirsher     ethernet driver for Linux.
4*a88394cfSJeff Kirsher     Copyright (C) 1997  Sten Wang
5*a88394cfSJeff Kirsher 
6*a88394cfSJeff Kirsher     This program is free software; you can redistribute it and/or
7*a88394cfSJeff Kirsher     modify it under the terms of the GNU General Public License
8*a88394cfSJeff Kirsher     as published by the Free Software Foundation; either version 2
9*a88394cfSJeff Kirsher     of the License, or (at your option) any later version.
10*a88394cfSJeff Kirsher 
11*a88394cfSJeff Kirsher     This program is distributed in the hope that it will be useful,
12*a88394cfSJeff Kirsher     but WITHOUT ANY WARRANTY; without even the implied warranty of
13*a88394cfSJeff Kirsher     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14*a88394cfSJeff Kirsher     GNU General Public License for more details.
15*a88394cfSJeff Kirsher 
16*a88394cfSJeff Kirsher     DAVICOM Web-Site: www.davicom.com.tw
17*a88394cfSJeff Kirsher 
18*a88394cfSJeff Kirsher     Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19*a88394cfSJeff Kirsher     Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20*a88394cfSJeff Kirsher 
21*a88394cfSJeff Kirsher     (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22*a88394cfSJeff Kirsher 
23*a88394cfSJeff Kirsher     Marcelo Tosatti <marcelo@conectiva.com.br> :
24*a88394cfSJeff Kirsher     Made it compile in 2.3 (device to net_device)
25*a88394cfSJeff Kirsher 
26*a88394cfSJeff Kirsher     Alan Cox <alan@lxorguk.ukuu.org.uk> :
27*a88394cfSJeff Kirsher     Cleaned up for kernel merge.
28*a88394cfSJeff Kirsher     Removed the back compatibility support
29*a88394cfSJeff Kirsher     Reformatted, fixing spelling etc as I went
30*a88394cfSJeff Kirsher     Removed IRQ 0-15 assumption
31*a88394cfSJeff Kirsher 
32*a88394cfSJeff Kirsher     Jeff Garzik <jgarzik@pobox.com> :
33*a88394cfSJeff Kirsher     Updated to use new PCI driver API.
34*a88394cfSJeff Kirsher     Resource usage cleanups.
35*a88394cfSJeff Kirsher     Report driver version to user.
36*a88394cfSJeff Kirsher 
37*a88394cfSJeff Kirsher     Tobias Ringstrom <tori@unhappy.mine.nu> :
38*a88394cfSJeff Kirsher     Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
39*a88394cfSJeff Kirsher     Andrew Morton and Frank Davis for the SMP safety fixes.
40*a88394cfSJeff Kirsher 
41*a88394cfSJeff Kirsher     Vojtech Pavlik <vojtech@suse.cz> :
42*a88394cfSJeff Kirsher     Cleaned up pointer arithmetics.
43*a88394cfSJeff Kirsher     Fixed a lot of 64bit issues.
44*a88394cfSJeff Kirsher     Cleaned up printk()s a bit.
45*a88394cfSJeff Kirsher     Fixed some obvious big endian problems.
46*a88394cfSJeff Kirsher 
47*a88394cfSJeff Kirsher     Tobias Ringstrom <tori@unhappy.mine.nu> :
48*a88394cfSJeff Kirsher     Use time_after for jiffies calculation.  Added ethtool
49*a88394cfSJeff Kirsher     support.  Updated PCI resource allocation.  Do not
50*a88394cfSJeff Kirsher     forget to unmap PCI mapped skbs.
51*a88394cfSJeff Kirsher 
52*a88394cfSJeff Kirsher     Alan Cox <alan@lxorguk.ukuu.org.uk>
53*a88394cfSJeff Kirsher     Added new PCI identifiers provided by Clear Zhang at ALi
54*a88394cfSJeff Kirsher     for their 1563 ethernet device.
55*a88394cfSJeff Kirsher 
56*a88394cfSJeff Kirsher     TODO
57*a88394cfSJeff Kirsher 
58*a88394cfSJeff Kirsher     Check on 64 bit boxes.
59*a88394cfSJeff Kirsher     Check and fix on big endian boxes.
60*a88394cfSJeff Kirsher 
61*a88394cfSJeff Kirsher     Test and make sure PCI latency is now correct for all cases.
62*a88394cfSJeff Kirsher */
63*a88394cfSJeff Kirsher 
64*a88394cfSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65*a88394cfSJeff Kirsher 
66*a88394cfSJeff Kirsher #define DRV_NAME	"dmfe"
67*a88394cfSJeff Kirsher #define DRV_VERSION	"1.36.4"
68*a88394cfSJeff Kirsher #define DRV_RELDATE	"2002-01-17"
69*a88394cfSJeff Kirsher 
70*a88394cfSJeff Kirsher #include <linux/module.h>
71*a88394cfSJeff Kirsher #include <linux/kernel.h>
72*a88394cfSJeff Kirsher #include <linux/string.h>
73*a88394cfSJeff Kirsher #include <linux/timer.h>
74*a88394cfSJeff Kirsher #include <linux/ptrace.h>
75*a88394cfSJeff Kirsher #include <linux/errno.h>
76*a88394cfSJeff Kirsher #include <linux/ioport.h>
77*a88394cfSJeff Kirsher #include <linux/interrupt.h>
78*a88394cfSJeff Kirsher #include <linux/pci.h>
79*a88394cfSJeff Kirsher #include <linux/dma-mapping.h>
80*a88394cfSJeff Kirsher #include <linux/init.h>
81*a88394cfSJeff Kirsher #include <linux/netdevice.h>
82*a88394cfSJeff Kirsher #include <linux/etherdevice.h>
83*a88394cfSJeff Kirsher #include <linux/ethtool.h>
84*a88394cfSJeff Kirsher #include <linux/skbuff.h>
85*a88394cfSJeff Kirsher #include <linux/delay.h>
86*a88394cfSJeff Kirsher #include <linux/spinlock.h>
87*a88394cfSJeff Kirsher #include <linux/crc32.h>
88*a88394cfSJeff Kirsher #include <linux/bitops.h>
89*a88394cfSJeff Kirsher 
90*a88394cfSJeff Kirsher #include <asm/processor.h>
91*a88394cfSJeff Kirsher #include <asm/io.h>
92*a88394cfSJeff Kirsher #include <asm/dma.h>
93*a88394cfSJeff Kirsher #include <asm/uaccess.h>
94*a88394cfSJeff Kirsher #include <asm/irq.h>
95*a88394cfSJeff Kirsher 
96*a88394cfSJeff Kirsher #ifdef CONFIG_TULIP_DM910X
97*a88394cfSJeff Kirsher #include <linux/of.h>
98*a88394cfSJeff Kirsher #endif
99*a88394cfSJeff Kirsher 
100*a88394cfSJeff Kirsher 
101*a88394cfSJeff Kirsher /* Board/System/Debug information/definition ---------------- */
102*a88394cfSJeff Kirsher #define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
103*a88394cfSJeff Kirsher #define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
104*a88394cfSJeff Kirsher #define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
105*a88394cfSJeff Kirsher #define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
106*a88394cfSJeff Kirsher 
107*a88394cfSJeff Kirsher #define DM9102_IO_SIZE  0x80
108*a88394cfSJeff Kirsher #define DM9102A_IO_SIZE 0x100
109*a88394cfSJeff Kirsher #define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
110*a88394cfSJeff Kirsher #define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
111*a88394cfSJeff Kirsher #define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
112*a88394cfSJeff Kirsher #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
113*a88394cfSJeff Kirsher #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
114*a88394cfSJeff Kirsher #define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
115*a88394cfSJeff Kirsher #define TX_BUF_ALLOC    0x600
116*a88394cfSJeff Kirsher #define RX_ALLOC_SIZE   0x620
117*a88394cfSJeff Kirsher #define DM910X_RESET    1
118*a88394cfSJeff Kirsher #define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
119*a88394cfSJeff Kirsher #define CR6_DEFAULT     0x00080000      /* HD */
120*a88394cfSJeff Kirsher #define CR7_DEFAULT     0x180c1
121*a88394cfSJeff Kirsher #define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
122*a88394cfSJeff Kirsher #define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
123*a88394cfSJeff Kirsher #define MAX_PACKET_SIZE 1514
124*a88394cfSJeff Kirsher #define DMFE_MAX_MULTICAST 14
125*a88394cfSJeff Kirsher #define RX_COPY_SIZE	100
126*a88394cfSJeff Kirsher #define MAX_CHECK_PACKET 0x8000
127*a88394cfSJeff Kirsher #define DM9801_NOISE_FLOOR 8
128*a88394cfSJeff Kirsher #define DM9802_NOISE_FLOOR 5
129*a88394cfSJeff Kirsher 
130*a88394cfSJeff Kirsher #define DMFE_WOL_LINKCHANGE	0x20000000
131*a88394cfSJeff Kirsher #define DMFE_WOL_SAMPLEPACKET	0x10000000
132*a88394cfSJeff Kirsher #define DMFE_WOL_MAGICPACKET	0x08000000
133*a88394cfSJeff Kirsher 
134*a88394cfSJeff Kirsher 
135*a88394cfSJeff Kirsher #define DMFE_10MHF      0
136*a88394cfSJeff Kirsher #define DMFE_100MHF     1
137*a88394cfSJeff Kirsher #define DMFE_10MFD      4
138*a88394cfSJeff Kirsher #define DMFE_100MFD     5
139*a88394cfSJeff Kirsher #define DMFE_AUTO       8
140*a88394cfSJeff Kirsher #define DMFE_1M_HPNA    0x10
141*a88394cfSJeff Kirsher 
142*a88394cfSJeff Kirsher #define DMFE_TXTH_72	0x400000	/* TX TH 72 byte */
143*a88394cfSJeff Kirsher #define DMFE_TXTH_96	0x404000	/* TX TH 96 byte */
144*a88394cfSJeff Kirsher #define DMFE_TXTH_128	0x0000		/* TX TH 128 byte */
145*a88394cfSJeff Kirsher #define DMFE_TXTH_256	0x4000		/* TX TH 256 byte */
146*a88394cfSJeff Kirsher #define DMFE_TXTH_512	0x8000		/* TX TH 512 byte */
147*a88394cfSJeff Kirsher #define DMFE_TXTH_1K	0xC000		/* TX TH 1K  byte */
148*a88394cfSJeff Kirsher 
149*a88394cfSJeff Kirsher #define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
150*a88394cfSJeff Kirsher #define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
151*a88394cfSJeff Kirsher #define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
152*a88394cfSJeff Kirsher 
153*a88394cfSJeff Kirsher #define DMFE_DBUG(dbug_now, msg, value)			\
154*a88394cfSJeff Kirsher 	do {						\
155*a88394cfSJeff Kirsher 		if (dmfe_debug || (dbug_now))		\
156*a88394cfSJeff Kirsher 			pr_err("%s %lx\n",		\
157*a88394cfSJeff Kirsher 			       (msg), (long) (value));	\
158*a88394cfSJeff Kirsher 	} while (0)
159*a88394cfSJeff Kirsher 
160*a88394cfSJeff Kirsher #define SHOW_MEDIA_TYPE(mode)				\
161*a88394cfSJeff Kirsher 	pr_info("Change Speed to %sMhz %s duplex\n" ,	\
162*a88394cfSJeff Kirsher 		(mode & 1) ? "100":"10",		\
163*a88394cfSJeff Kirsher 		(mode & 4) ? "full":"half");
164*a88394cfSJeff Kirsher 
165*a88394cfSJeff Kirsher 
166*a88394cfSJeff Kirsher /* CR9 definition: SROM/MII */
167*a88394cfSJeff Kirsher #define CR9_SROM_READ   0x4800
168*a88394cfSJeff Kirsher #define CR9_SRCS        0x1
169*a88394cfSJeff Kirsher #define CR9_SRCLK       0x2
170*a88394cfSJeff Kirsher #define CR9_CRDOUT      0x8
171*a88394cfSJeff Kirsher #define SROM_DATA_0     0x0
172*a88394cfSJeff Kirsher #define SROM_DATA_1     0x4
173*a88394cfSJeff Kirsher #define PHY_DATA_1      0x20000
174*a88394cfSJeff Kirsher #define PHY_DATA_0      0x00000
175*a88394cfSJeff Kirsher #define MDCLKH          0x10000
176*a88394cfSJeff Kirsher 
177*a88394cfSJeff Kirsher #define PHY_POWER_DOWN	0x800
178*a88394cfSJeff Kirsher 
179*a88394cfSJeff Kirsher #define SROM_V41_CODE   0x14
180*a88394cfSJeff Kirsher 
181*a88394cfSJeff Kirsher #define SROM_CLK_WRITE(data, ioaddr) \
182*a88394cfSJeff Kirsher 	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
183*a88394cfSJeff Kirsher 	udelay(5); \
184*a88394cfSJeff Kirsher 	outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
185*a88394cfSJeff Kirsher 	udelay(5); \
186*a88394cfSJeff Kirsher 	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
187*a88394cfSJeff Kirsher 	udelay(5);
188*a88394cfSJeff Kirsher 
189*a88394cfSJeff Kirsher #define __CHK_IO_SIZE(pci_id, dev_rev) \
190*a88394cfSJeff Kirsher  (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
191*a88394cfSJeff Kirsher 	DM9102A_IO_SIZE: DM9102_IO_SIZE)
192*a88394cfSJeff Kirsher 
193*a88394cfSJeff Kirsher #define CHK_IO_SIZE(pci_dev) \
194*a88394cfSJeff Kirsher 	(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
195*a88394cfSJeff Kirsher 	(pci_dev)->revision))
196*a88394cfSJeff Kirsher 
197*a88394cfSJeff Kirsher /* Sten Check */
198*a88394cfSJeff Kirsher #define DEVICE net_device
199*a88394cfSJeff Kirsher 
200*a88394cfSJeff Kirsher /* Structure/enum declaration ------------------------------- */
201*a88394cfSJeff Kirsher struct tx_desc {
202*a88394cfSJeff Kirsher         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
203*a88394cfSJeff Kirsher         char *tx_buf_ptr;               /* Data for us */
204*a88394cfSJeff Kirsher         struct tx_desc *next_tx_desc;
205*a88394cfSJeff Kirsher } __attribute__(( aligned(32) ));
206*a88394cfSJeff Kirsher 
207*a88394cfSJeff Kirsher struct rx_desc {
208*a88394cfSJeff Kirsher 	__le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
209*a88394cfSJeff Kirsher 	struct sk_buff *rx_skb_ptr;	/* Data for us */
210*a88394cfSJeff Kirsher 	struct rx_desc *next_rx_desc;
211*a88394cfSJeff Kirsher } __attribute__(( aligned(32) ));
212*a88394cfSJeff Kirsher 
213*a88394cfSJeff Kirsher struct dmfe_board_info {
214*a88394cfSJeff Kirsher 	u32 chip_id;			/* Chip vendor/Device ID */
215*a88394cfSJeff Kirsher 	u8 chip_revision;		/* Chip revision */
216*a88394cfSJeff Kirsher 	struct DEVICE *next_dev;	/* next device */
217*a88394cfSJeff Kirsher 	struct pci_dev *pdev;		/* PCI device */
218*a88394cfSJeff Kirsher 	spinlock_t lock;
219*a88394cfSJeff Kirsher 
220*a88394cfSJeff Kirsher 	long ioaddr;			/* I/O base address */
221*a88394cfSJeff Kirsher 	u32 cr0_data;
222*a88394cfSJeff Kirsher 	u32 cr5_data;
223*a88394cfSJeff Kirsher 	u32 cr6_data;
224*a88394cfSJeff Kirsher 	u32 cr7_data;
225*a88394cfSJeff Kirsher 	u32 cr15_data;
226*a88394cfSJeff Kirsher 
227*a88394cfSJeff Kirsher 	/* pointer for memory physical address */
228*a88394cfSJeff Kirsher 	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
229*a88394cfSJeff Kirsher 	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
230*a88394cfSJeff Kirsher 	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
231*a88394cfSJeff Kirsher 	dma_addr_t first_tx_desc_dma;
232*a88394cfSJeff Kirsher 	dma_addr_t first_rx_desc_dma;
233*a88394cfSJeff Kirsher 
234*a88394cfSJeff Kirsher 	/* descriptor pointer */
235*a88394cfSJeff Kirsher 	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
236*a88394cfSJeff Kirsher 	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
237*a88394cfSJeff Kirsher 	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
238*a88394cfSJeff Kirsher 	struct tx_desc *first_tx_desc;
239*a88394cfSJeff Kirsher 	struct tx_desc *tx_insert_ptr;
240*a88394cfSJeff Kirsher 	struct tx_desc *tx_remove_ptr;
241*a88394cfSJeff Kirsher 	struct rx_desc *first_rx_desc;
242*a88394cfSJeff Kirsher 	struct rx_desc *rx_insert_ptr;
243*a88394cfSJeff Kirsher 	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
244*a88394cfSJeff Kirsher 	unsigned long tx_packet_cnt;	/* transmitted packet count */
245*a88394cfSJeff Kirsher 	unsigned long tx_queue_cnt;	/* wait to send packet count */
246*a88394cfSJeff Kirsher 	unsigned long rx_avail_cnt;	/* available rx descriptor count */
247*a88394cfSJeff Kirsher 	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
248*a88394cfSJeff Kirsher 
249*a88394cfSJeff Kirsher 	u16 HPNA_command;		/* For HPNA register 16 */
250*a88394cfSJeff Kirsher 	u16 HPNA_timer;			/* For HPNA remote device check */
251*a88394cfSJeff Kirsher 	u16 dbug_cnt;
252*a88394cfSJeff Kirsher 	u16 NIC_capability;		/* NIC media capability */
253*a88394cfSJeff Kirsher 	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
254*a88394cfSJeff Kirsher 
255*a88394cfSJeff Kirsher 	u8 HPNA_present;		/* 0:none, 1:DM9801, 2:DM9802 */
256*a88394cfSJeff Kirsher 	u8 chip_type;			/* Keep DM9102A chip type */
257*a88394cfSJeff Kirsher 	u8 media_mode;			/* user specify media mode */
258*a88394cfSJeff Kirsher 	u8 op_mode;			/* real work media mode */
259*a88394cfSJeff Kirsher 	u8 phy_addr;
260*a88394cfSJeff Kirsher 	u8 wait_reset;			/* Hardware failed, need to reset */
261*a88394cfSJeff Kirsher 	u8 dm910x_chk_mode;		/* Operating mode check */
262*a88394cfSJeff Kirsher 	u8 first_in_callback;		/* Flag to record state */
263*a88394cfSJeff Kirsher 	u8 wol_mode;			/* user WOL settings */
264*a88394cfSJeff Kirsher 	struct timer_list timer;
265*a88394cfSJeff Kirsher 
266*a88394cfSJeff Kirsher 	/* Driver defined statistic counter */
267*a88394cfSJeff Kirsher 	unsigned long tx_fifo_underrun;
268*a88394cfSJeff Kirsher 	unsigned long tx_loss_carrier;
269*a88394cfSJeff Kirsher 	unsigned long tx_no_carrier;
270*a88394cfSJeff Kirsher 	unsigned long tx_late_collision;
271*a88394cfSJeff Kirsher 	unsigned long tx_excessive_collision;
272*a88394cfSJeff Kirsher 	unsigned long tx_jabber_timeout;
273*a88394cfSJeff Kirsher 	unsigned long reset_count;
274*a88394cfSJeff Kirsher 	unsigned long reset_cr8;
275*a88394cfSJeff Kirsher 	unsigned long reset_fatal;
276*a88394cfSJeff Kirsher 	unsigned long reset_TXtimeout;
277*a88394cfSJeff Kirsher 
278*a88394cfSJeff Kirsher 	/* NIC SROM data */
279*a88394cfSJeff Kirsher 	unsigned char srom[128];
280*a88394cfSJeff Kirsher };
281*a88394cfSJeff Kirsher 
282*a88394cfSJeff Kirsher enum dmfe_offsets {
283*a88394cfSJeff Kirsher 	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
284*a88394cfSJeff Kirsher 	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
285*a88394cfSJeff Kirsher 	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
286*a88394cfSJeff Kirsher 	DCR15 = 0x78
287*a88394cfSJeff Kirsher };
288*a88394cfSJeff Kirsher 
289*a88394cfSJeff Kirsher enum dmfe_CR6_bits {
290*a88394cfSJeff Kirsher 	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
291*a88394cfSJeff Kirsher 	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
292*a88394cfSJeff Kirsher 	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
293*a88394cfSJeff Kirsher };
294*a88394cfSJeff Kirsher 
295*a88394cfSJeff Kirsher /* Global variable declaration ----------------------------- */
296*a88394cfSJeff Kirsher static int __devinitdata printed_version;
297*a88394cfSJeff Kirsher static const char version[] __devinitconst =
298*a88394cfSJeff Kirsher 	"Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
299*a88394cfSJeff Kirsher 
300*a88394cfSJeff Kirsher static int dmfe_debug;
301*a88394cfSJeff Kirsher static unsigned char dmfe_media_mode = DMFE_AUTO;
302*a88394cfSJeff Kirsher static u32 dmfe_cr6_user_set;
303*a88394cfSJeff Kirsher 
304*a88394cfSJeff Kirsher /* For module input parameter */
305*a88394cfSJeff Kirsher static int debug;
306*a88394cfSJeff Kirsher static u32 cr6set;
307*a88394cfSJeff Kirsher static unsigned char mode = 8;
308*a88394cfSJeff Kirsher static u8 chkmode = 1;
309*a88394cfSJeff Kirsher static u8 HPNA_mode;		/* Default: Low Power/High Speed */
310*a88394cfSJeff Kirsher static u8 HPNA_rx_cmd;		/* Default: Disable Rx remote command */
311*a88394cfSJeff Kirsher static u8 HPNA_tx_cmd;		/* Default: Don't issue remote command */
312*a88394cfSJeff Kirsher static u8 HPNA_NoiseFloor;	/* Default: HPNA NoiseFloor */
313*a88394cfSJeff Kirsher static u8 SF_mode;		/* Special Function: 1:VLAN, 2:RX Flow Control
314*a88394cfSJeff Kirsher 				   4: TX pause packet */
315*a88394cfSJeff Kirsher 
316*a88394cfSJeff Kirsher 
317*a88394cfSJeff Kirsher /* function declaration ------------------------------------- */
318*a88394cfSJeff Kirsher static int dmfe_open(struct DEVICE *);
319*a88394cfSJeff Kirsher static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
320*a88394cfSJeff Kirsher static int dmfe_stop(struct DEVICE *);
321*a88394cfSJeff Kirsher static void dmfe_set_filter_mode(struct DEVICE *);
322*a88394cfSJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops;
323*a88394cfSJeff Kirsher static u16 read_srom_word(long ,int);
324*a88394cfSJeff Kirsher static irqreturn_t dmfe_interrupt(int , void *);
325*a88394cfSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
326*a88394cfSJeff Kirsher static void poll_dmfe (struct net_device *dev);
327*a88394cfSJeff Kirsher #endif
328*a88394cfSJeff Kirsher static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
329*a88394cfSJeff Kirsher static void allocate_rx_buffer(struct dmfe_board_info *);
330*a88394cfSJeff Kirsher static void update_cr6(u32, unsigned long);
331*a88394cfSJeff Kirsher static void send_filter_frame(struct DEVICE *);
332*a88394cfSJeff Kirsher static void dm9132_id_table(struct DEVICE *);
333*a88394cfSJeff Kirsher static u16 phy_read(unsigned long, u8, u8, u32);
334*a88394cfSJeff Kirsher static void phy_write(unsigned long, u8, u8, u16, u32);
335*a88394cfSJeff Kirsher static void phy_write_1bit(unsigned long, u32);
336*a88394cfSJeff Kirsher static u16 phy_read_1bit(unsigned long);
337*a88394cfSJeff Kirsher static u8 dmfe_sense_speed(struct dmfe_board_info *);
338*a88394cfSJeff Kirsher static void dmfe_process_mode(struct dmfe_board_info *);
339*a88394cfSJeff Kirsher static void dmfe_timer(unsigned long);
340*a88394cfSJeff Kirsher static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
341*a88394cfSJeff Kirsher static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
342*a88394cfSJeff Kirsher static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
343*a88394cfSJeff Kirsher static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
344*a88394cfSJeff Kirsher static void dmfe_dynamic_reset(struct DEVICE *);
345*a88394cfSJeff Kirsher static void dmfe_free_rxbuffer(struct dmfe_board_info *);
346*a88394cfSJeff Kirsher static void dmfe_init_dm910x(struct DEVICE *);
347*a88394cfSJeff Kirsher static void dmfe_parse_srom(struct dmfe_board_info *);
348*a88394cfSJeff Kirsher static void dmfe_program_DM9801(struct dmfe_board_info *, int);
349*a88394cfSJeff Kirsher static void dmfe_program_DM9802(struct dmfe_board_info *);
350*a88394cfSJeff Kirsher static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
351*a88394cfSJeff Kirsher static void dmfe_set_phyxcer(struct dmfe_board_info *);
352*a88394cfSJeff Kirsher 
353*a88394cfSJeff Kirsher /* DM910X network board routine ---------------------------- */
354*a88394cfSJeff Kirsher 
355*a88394cfSJeff Kirsher static const struct net_device_ops netdev_ops = {
356*a88394cfSJeff Kirsher 	.ndo_open 		= dmfe_open,
357*a88394cfSJeff Kirsher 	.ndo_stop		= dmfe_stop,
358*a88394cfSJeff Kirsher 	.ndo_start_xmit		= dmfe_start_xmit,
359*a88394cfSJeff Kirsher 	.ndo_set_multicast_list = dmfe_set_filter_mode,
360*a88394cfSJeff Kirsher 	.ndo_change_mtu		= eth_change_mtu,
361*a88394cfSJeff Kirsher 	.ndo_set_mac_address	= eth_mac_addr,
362*a88394cfSJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
363*a88394cfSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
364*a88394cfSJeff Kirsher 	.ndo_poll_controller	= poll_dmfe,
365*a88394cfSJeff Kirsher #endif
366*a88394cfSJeff Kirsher };
367*a88394cfSJeff Kirsher 
368*a88394cfSJeff Kirsher /*
369*a88394cfSJeff Kirsher  *	Search DM910X board ,allocate space and register it
370*a88394cfSJeff Kirsher  */
371*a88394cfSJeff Kirsher 
372*a88394cfSJeff Kirsher static int __devinit dmfe_init_one (struct pci_dev *pdev,
373*a88394cfSJeff Kirsher 				    const struct pci_device_id *ent)
374*a88394cfSJeff Kirsher {
375*a88394cfSJeff Kirsher 	struct dmfe_board_info *db;	/* board information structure */
376*a88394cfSJeff Kirsher 	struct net_device *dev;
377*a88394cfSJeff Kirsher 	u32 pci_pmr;
378*a88394cfSJeff Kirsher 	int i, err;
379*a88394cfSJeff Kirsher 
380*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_init_one()", 0);
381*a88394cfSJeff Kirsher 
382*a88394cfSJeff Kirsher 	if (!printed_version++)
383*a88394cfSJeff Kirsher 		pr_info("%s\n", version);
384*a88394cfSJeff Kirsher 
385*a88394cfSJeff Kirsher 	/*
386*a88394cfSJeff Kirsher 	 *	SPARC on-board DM910x chips should be handled by the main
387*a88394cfSJeff Kirsher 	 *	tulip driver, except for early DM9100s.
388*a88394cfSJeff Kirsher 	 */
389*a88394cfSJeff Kirsher #ifdef CONFIG_TULIP_DM910X
390*a88394cfSJeff Kirsher 	if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
391*a88394cfSJeff Kirsher 	    ent->driver_data == PCI_DM9102_ID) {
392*a88394cfSJeff Kirsher 		struct device_node *dp = pci_device_to_OF_node(pdev);
393*a88394cfSJeff Kirsher 
394*a88394cfSJeff Kirsher 		if (dp && of_get_property(dp, "local-mac-address", NULL)) {
395*a88394cfSJeff Kirsher 			pr_info("skipping on-board DM910x (use tulip)\n");
396*a88394cfSJeff Kirsher 			return -ENODEV;
397*a88394cfSJeff Kirsher 		}
398*a88394cfSJeff Kirsher 	}
399*a88394cfSJeff Kirsher #endif
400*a88394cfSJeff Kirsher 
401*a88394cfSJeff Kirsher 	/* Init network device */
402*a88394cfSJeff Kirsher 	dev = alloc_etherdev(sizeof(*db));
403*a88394cfSJeff Kirsher 	if (dev == NULL)
404*a88394cfSJeff Kirsher 		return -ENOMEM;
405*a88394cfSJeff Kirsher 	SET_NETDEV_DEV(dev, &pdev->dev);
406*a88394cfSJeff Kirsher 
407*a88394cfSJeff Kirsher 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
408*a88394cfSJeff Kirsher 		pr_warn("32-bit PCI DMA not available\n");
409*a88394cfSJeff Kirsher 		err = -ENODEV;
410*a88394cfSJeff Kirsher 		goto err_out_free;
411*a88394cfSJeff Kirsher 	}
412*a88394cfSJeff Kirsher 
413*a88394cfSJeff Kirsher 	/* Enable Master/IO access, Disable memory access */
414*a88394cfSJeff Kirsher 	err = pci_enable_device(pdev);
415*a88394cfSJeff Kirsher 	if (err)
416*a88394cfSJeff Kirsher 		goto err_out_free;
417*a88394cfSJeff Kirsher 
418*a88394cfSJeff Kirsher 	if (!pci_resource_start(pdev, 0)) {
419*a88394cfSJeff Kirsher 		pr_err("I/O base is zero\n");
420*a88394cfSJeff Kirsher 		err = -ENODEV;
421*a88394cfSJeff Kirsher 		goto err_out_disable;
422*a88394cfSJeff Kirsher 	}
423*a88394cfSJeff Kirsher 
424*a88394cfSJeff Kirsher 	if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
425*a88394cfSJeff Kirsher 		pr_err("Allocated I/O size too small\n");
426*a88394cfSJeff Kirsher 		err = -ENODEV;
427*a88394cfSJeff Kirsher 		goto err_out_disable;
428*a88394cfSJeff Kirsher 	}
429*a88394cfSJeff Kirsher 
430*a88394cfSJeff Kirsher #if 0	/* pci_{enable_device,set_master} sets minimum latency for us now */
431*a88394cfSJeff Kirsher 
432*a88394cfSJeff Kirsher 	/* Set Latency Timer 80h */
433*a88394cfSJeff Kirsher 	/* FIXME: setting values > 32 breaks some SiS 559x stuff.
434*a88394cfSJeff Kirsher 	   Need a PCI quirk.. */
435*a88394cfSJeff Kirsher 
436*a88394cfSJeff Kirsher 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
437*a88394cfSJeff Kirsher #endif
438*a88394cfSJeff Kirsher 
439*a88394cfSJeff Kirsher 	if (pci_request_regions(pdev, DRV_NAME)) {
440*a88394cfSJeff Kirsher 		pr_err("Failed to request PCI regions\n");
441*a88394cfSJeff Kirsher 		err = -ENODEV;
442*a88394cfSJeff Kirsher 		goto err_out_disable;
443*a88394cfSJeff Kirsher 	}
444*a88394cfSJeff Kirsher 
445*a88394cfSJeff Kirsher 	/* Init system & device */
446*a88394cfSJeff Kirsher 	db = netdev_priv(dev);
447*a88394cfSJeff Kirsher 
448*a88394cfSJeff Kirsher 	/* Allocate Tx/Rx descriptor memory */
449*a88394cfSJeff Kirsher 	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
450*a88394cfSJeff Kirsher 			DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
451*a88394cfSJeff Kirsher 	if (!db->desc_pool_ptr)
452*a88394cfSJeff Kirsher 		goto err_out_res;
453*a88394cfSJeff Kirsher 
454*a88394cfSJeff Kirsher 	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
455*a88394cfSJeff Kirsher 			TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
456*a88394cfSJeff Kirsher 	if (!db->buf_pool_ptr)
457*a88394cfSJeff Kirsher 		goto err_out_free_desc;
458*a88394cfSJeff Kirsher 
459*a88394cfSJeff Kirsher 	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
460*a88394cfSJeff Kirsher 	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
461*a88394cfSJeff Kirsher 	db->buf_pool_start = db->buf_pool_ptr;
462*a88394cfSJeff Kirsher 	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
463*a88394cfSJeff Kirsher 
464*a88394cfSJeff Kirsher 	db->chip_id = ent->driver_data;
465*a88394cfSJeff Kirsher 	db->ioaddr = pci_resource_start(pdev, 0);
466*a88394cfSJeff Kirsher 	db->chip_revision = pdev->revision;
467*a88394cfSJeff Kirsher 	db->wol_mode = 0;
468*a88394cfSJeff Kirsher 
469*a88394cfSJeff Kirsher 	db->pdev = pdev;
470*a88394cfSJeff Kirsher 
471*a88394cfSJeff Kirsher 	dev->base_addr = db->ioaddr;
472*a88394cfSJeff Kirsher 	dev->irq = pdev->irq;
473*a88394cfSJeff Kirsher 	pci_set_drvdata(pdev, dev);
474*a88394cfSJeff Kirsher 	dev->netdev_ops = &netdev_ops;
475*a88394cfSJeff Kirsher 	dev->ethtool_ops = &netdev_ethtool_ops;
476*a88394cfSJeff Kirsher 	netif_carrier_off(dev);
477*a88394cfSJeff Kirsher 	spin_lock_init(&db->lock);
478*a88394cfSJeff Kirsher 
479*a88394cfSJeff Kirsher 	pci_read_config_dword(pdev, 0x50, &pci_pmr);
480*a88394cfSJeff Kirsher 	pci_pmr &= 0x70000;
481*a88394cfSJeff Kirsher 	if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
482*a88394cfSJeff Kirsher 		db->chip_type = 1;	/* DM9102A E3 */
483*a88394cfSJeff Kirsher 	else
484*a88394cfSJeff Kirsher 		db->chip_type = 0;
485*a88394cfSJeff Kirsher 
486*a88394cfSJeff Kirsher 	/* read 64 word srom data */
487*a88394cfSJeff Kirsher 	for (i = 0; i < 64; i++)
488*a88394cfSJeff Kirsher 		((__le16 *) db->srom)[i] =
489*a88394cfSJeff Kirsher 			cpu_to_le16(read_srom_word(db->ioaddr, i));
490*a88394cfSJeff Kirsher 
491*a88394cfSJeff Kirsher 	/* Set Node address */
492*a88394cfSJeff Kirsher 	for (i = 0; i < 6; i++)
493*a88394cfSJeff Kirsher 		dev->dev_addr[i] = db->srom[20 + i];
494*a88394cfSJeff Kirsher 
495*a88394cfSJeff Kirsher 	err = register_netdev (dev);
496*a88394cfSJeff Kirsher 	if (err)
497*a88394cfSJeff Kirsher 		goto err_out_free_buf;
498*a88394cfSJeff Kirsher 
499*a88394cfSJeff Kirsher 	dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
500*a88394cfSJeff Kirsher 		 ent->driver_data >> 16,
501*a88394cfSJeff Kirsher 		 pci_name(pdev), dev->dev_addr, dev->irq);
502*a88394cfSJeff Kirsher 
503*a88394cfSJeff Kirsher 	pci_set_master(pdev);
504*a88394cfSJeff Kirsher 
505*a88394cfSJeff Kirsher 	return 0;
506*a88394cfSJeff Kirsher 
507*a88394cfSJeff Kirsher err_out_free_buf:
508*a88394cfSJeff Kirsher 	pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
509*a88394cfSJeff Kirsher 			    db->buf_pool_ptr, db->buf_pool_dma_ptr);
510*a88394cfSJeff Kirsher err_out_free_desc:
511*a88394cfSJeff Kirsher 	pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
512*a88394cfSJeff Kirsher 			    db->desc_pool_ptr, db->desc_pool_dma_ptr);
513*a88394cfSJeff Kirsher err_out_res:
514*a88394cfSJeff Kirsher 	pci_release_regions(pdev);
515*a88394cfSJeff Kirsher err_out_disable:
516*a88394cfSJeff Kirsher 	pci_disable_device(pdev);
517*a88394cfSJeff Kirsher err_out_free:
518*a88394cfSJeff Kirsher 	pci_set_drvdata(pdev, NULL);
519*a88394cfSJeff Kirsher 	free_netdev(dev);
520*a88394cfSJeff Kirsher 
521*a88394cfSJeff Kirsher 	return err;
522*a88394cfSJeff Kirsher }
523*a88394cfSJeff Kirsher 
524*a88394cfSJeff Kirsher 
525*a88394cfSJeff Kirsher static void __devexit dmfe_remove_one (struct pci_dev *pdev)
526*a88394cfSJeff Kirsher {
527*a88394cfSJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
528*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
529*a88394cfSJeff Kirsher 
530*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_remove_one()", 0);
531*a88394cfSJeff Kirsher 
532*a88394cfSJeff Kirsher  	if (dev) {
533*a88394cfSJeff Kirsher 
534*a88394cfSJeff Kirsher 		unregister_netdev(dev);
535*a88394cfSJeff Kirsher 
536*a88394cfSJeff Kirsher 		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
537*a88394cfSJeff Kirsher 					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
538*a88394cfSJeff Kirsher  					db->desc_pool_dma_ptr);
539*a88394cfSJeff Kirsher 		pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
540*a88394cfSJeff Kirsher 					db->buf_pool_ptr, db->buf_pool_dma_ptr);
541*a88394cfSJeff Kirsher 		pci_release_regions(pdev);
542*a88394cfSJeff Kirsher 		free_netdev(dev);	/* free board information */
543*a88394cfSJeff Kirsher 
544*a88394cfSJeff Kirsher 		pci_set_drvdata(pdev, NULL);
545*a88394cfSJeff Kirsher 	}
546*a88394cfSJeff Kirsher 
547*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
548*a88394cfSJeff Kirsher }
549*a88394cfSJeff Kirsher 
550*a88394cfSJeff Kirsher 
551*a88394cfSJeff Kirsher /*
552*a88394cfSJeff Kirsher  *	Open the interface.
553*a88394cfSJeff Kirsher  *	The interface is opened whenever "ifconfig" actives it.
554*a88394cfSJeff Kirsher  */
555*a88394cfSJeff Kirsher 
556*a88394cfSJeff Kirsher static int dmfe_open(struct DEVICE *dev)
557*a88394cfSJeff Kirsher {
558*a88394cfSJeff Kirsher 	int ret;
559*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
560*a88394cfSJeff Kirsher 
561*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_open", 0);
562*a88394cfSJeff Kirsher 
563*a88394cfSJeff Kirsher 	ret = request_irq(dev->irq, dmfe_interrupt,
564*a88394cfSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
565*a88394cfSJeff Kirsher 	if (ret)
566*a88394cfSJeff Kirsher 		return ret;
567*a88394cfSJeff Kirsher 
568*a88394cfSJeff Kirsher 	/* system variable init */
569*a88394cfSJeff Kirsher 	db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
570*a88394cfSJeff Kirsher 	db->tx_packet_cnt = 0;
571*a88394cfSJeff Kirsher 	db->tx_queue_cnt = 0;
572*a88394cfSJeff Kirsher 	db->rx_avail_cnt = 0;
573*a88394cfSJeff Kirsher 	db->wait_reset = 0;
574*a88394cfSJeff Kirsher 
575*a88394cfSJeff Kirsher 	db->first_in_callback = 0;
576*a88394cfSJeff Kirsher 	db->NIC_capability = 0xf;	/* All capability*/
577*a88394cfSJeff Kirsher 	db->PHY_reg4 = 0x1e0;
578*a88394cfSJeff Kirsher 
579*a88394cfSJeff Kirsher 	/* CR6 operation mode decision */
580*a88394cfSJeff Kirsher 	if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
581*a88394cfSJeff Kirsher 		(db->chip_revision >= 0x30) ) {
582*a88394cfSJeff Kirsher     		db->cr6_data |= DMFE_TXTH_256;
583*a88394cfSJeff Kirsher 		db->cr0_data = CR0_DEFAULT;
584*a88394cfSJeff Kirsher 		db->dm910x_chk_mode=4;		/* Enter the normal mode */
585*a88394cfSJeff Kirsher  	} else {
586*a88394cfSJeff Kirsher 		db->cr6_data |= CR6_SFT;	/* Store & Forward mode */
587*a88394cfSJeff Kirsher 		db->cr0_data = 0;
588*a88394cfSJeff Kirsher 		db->dm910x_chk_mode = 1;	/* Enter the check mode */
589*a88394cfSJeff Kirsher 	}
590*a88394cfSJeff Kirsher 
591*a88394cfSJeff Kirsher 	/* Initialize DM910X board */
592*a88394cfSJeff Kirsher 	dmfe_init_dm910x(dev);
593*a88394cfSJeff Kirsher 
594*a88394cfSJeff Kirsher 	/* Active System Interface */
595*a88394cfSJeff Kirsher 	netif_wake_queue(dev);
596*a88394cfSJeff Kirsher 
597*a88394cfSJeff Kirsher 	/* set and active a timer process */
598*a88394cfSJeff Kirsher 	init_timer(&db->timer);
599*a88394cfSJeff Kirsher 	db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
600*a88394cfSJeff Kirsher 	db->timer.data = (unsigned long)dev;
601*a88394cfSJeff Kirsher 	db->timer.function = dmfe_timer;
602*a88394cfSJeff Kirsher 	add_timer(&db->timer);
603*a88394cfSJeff Kirsher 
604*a88394cfSJeff Kirsher 	return 0;
605*a88394cfSJeff Kirsher }
606*a88394cfSJeff Kirsher 
607*a88394cfSJeff Kirsher 
608*a88394cfSJeff Kirsher /*	Initialize DM910X board
609*a88394cfSJeff Kirsher  *	Reset DM910X board
610*a88394cfSJeff Kirsher  *	Initialize TX/Rx descriptor chain structure
611*a88394cfSJeff Kirsher  *	Send the set-up frame
612*a88394cfSJeff Kirsher  *	Enable Tx/Rx machine
613*a88394cfSJeff Kirsher  */
614*a88394cfSJeff Kirsher 
615*a88394cfSJeff Kirsher static void dmfe_init_dm910x(struct DEVICE *dev)
616*a88394cfSJeff Kirsher {
617*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
618*a88394cfSJeff Kirsher 	unsigned long ioaddr = db->ioaddr;
619*a88394cfSJeff Kirsher 
620*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
621*a88394cfSJeff Kirsher 
622*a88394cfSJeff Kirsher 	/* Reset DM910x MAC controller */
623*a88394cfSJeff Kirsher 	outl(DM910X_RESET, ioaddr + DCR0);	/* RESET MAC */
624*a88394cfSJeff Kirsher 	udelay(100);
625*a88394cfSJeff Kirsher 	outl(db->cr0_data, ioaddr + DCR0);
626*a88394cfSJeff Kirsher 	udelay(5);
627*a88394cfSJeff Kirsher 
628*a88394cfSJeff Kirsher 	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
629*a88394cfSJeff Kirsher 	db->phy_addr = 1;
630*a88394cfSJeff Kirsher 
631*a88394cfSJeff Kirsher 	/* Parser SROM and media mode */
632*a88394cfSJeff Kirsher 	dmfe_parse_srom(db);
633*a88394cfSJeff Kirsher 	db->media_mode = dmfe_media_mode;
634*a88394cfSJeff Kirsher 
635*a88394cfSJeff Kirsher 	/* RESET Phyxcer Chip by GPR port bit 7 */
636*a88394cfSJeff Kirsher 	outl(0x180, ioaddr + DCR12);		/* Let bit 7 output port */
637*a88394cfSJeff Kirsher 	if (db->chip_id == PCI_DM9009_ID) {
638*a88394cfSJeff Kirsher 		outl(0x80, ioaddr + DCR12);	/* Issue RESET signal */
639*a88394cfSJeff Kirsher 		mdelay(300);			/* Delay 300 ms */
640*a88394cfSJeff Kirsher 	}
641*a88394cfSJeff Kirsher 	outl(0x0, ioaddr + DCR12);	/* Clear RESET signal */
642*a88394cfSJeff Kirsher 
643*a88394cfSJeff Kirsher 	/* Process Phyxcer Media Mode */
644*a88394cfSJeff Kirsher 	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
645*a88394cfSJeff Kirsher 		dmfe_set_phyxcer(db);
646*a88394cfSJeff Kirsher 
647*a88394cfSJeff Kirsher 	/* Media Mode Process */
648*a88394cfSJeff Kirsher 	if ( !(db->media_mode & DMFE_AUTO) )
649*a88394cfSJeff Kirsher 		db->op_mode = db->media_mode; 	/* Force Mode */
650*a88394cfSJeff Kirsher 
651*a88394cfSJeff Kirsher 	/* Initialize Transmit/Receive decriptor and CR3/4 */
652*a88394cfSJeff Kirsher 	dmfe_descriptor_init(db, ioaddr);
653*a88394cfSJeff Kirsher 
654*a88394cfSJeff Kirsher 	/* Init CR6 to program DM910x operation */
655*a88394cfSJeff Kirsher 	update_cr6(db->cr6_data, ioaddr);
656*a88394cfSJeff Kirsher 
657*a88394cfSJeff Kirsher 	/* Send setup frame */
658*a88394cfSJeff Kirsher 	if (db->chip_id == PCI_DM9132_ID)
659*a88394cfSJeff Kirsher 		dm9132_id_table(dev);	/* DM9132 */
660*a88394cfSJeff Kirsher 	else
661*a88394cfSJeff Kirsher 		send_filter_frame(dev);	/* DM9102/DM9102A */
662*a88394cfSJeff Kirsher 
663*a88394cfSJeff Kirsher 	/* Init CR7, interrupt active bit */
664*a88394cfSJeff Kirsher 	db->cr7_data = CR7_DEFAULT;
665*a88394cfSJeff Kirsher 	outl(db->cr7_data, ioaddr + DCR7);
666*a88394cfSJeff Kirsher 
667*a88394cfSJeff Kirsher 	/* Init CR15, Tx jabber and Rx watchdog timer */
668*a88394cfSJeff Kirsher 	outl(db->cr15_data, ioaddr + DCR15);
669*a88394cfSJeff Kirsher 
670*a88394cfSJeff Kirsher 	/* Enable DM910X Tx/Rx function */
671*a88394cfSJeff Kirsher 	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
672*a88394cfSJeff Kirsher 	update_cr6(db->cr6_data, ioaddr);
673*a88394cfSJeff Kirsher }
674*a88394cfSJeff Kirsher 
675*a88394cfSJeff Kirsher 
676*a88394cfSJeff Kirsher /*
677*a88394cfSJeff Kirsher  *	Hardware start transmission.
678*a88394cfSJeff Kirsher  *	Send a packet to media from the upper layer.
679*a88394cfSJeff Kirsher  */
680*a88394cfSJeff Kirsher 
681*a88394cfSJeff Kirsher static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
682*a88394cfSJeff Kirsher 					 struct DEVICE *dev)
683*a88394cfSJeff Kirsher {
684*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
685*a88394cfSJeff Kirsher 	struct tx_desc *txptr;
686*a88394cfSJeff Kirsher 	unsigned long flags;
687*a88394cfSJeff Kirsher 
688*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_start_xmit", 0);
689*a88394cfSJeff Kirsher 
690*a88394cfSJeff Kirsher 	/* Too large packet check */
691*a88394cfSJeff Kirsher 	if (skb->len > MAX_PACKET_SIZE) {
692*a88394cfSJeff Kirsher 		pr_err("big packet = %d\n", (u16)skb->len);
693*a88394cfSJeff Kirsher 		dev_kfree_skb(skb);
694*a88394cfSJeff Kirsher 		return NETDEV_TX_OK;
695*a88394cfSJeff Kirsher 	}
696*a88394cfSJeff Kirsher 
697*a88394cfSJeff Kirsher 	/* Resource flag check */
698*a88394cfSJeff Kirsher 	netif_stop_queue(dev);
699*a88394cfSJeff Kirsher 
700*a88394cfSJeff Kirsher 	spin_lock_irqsave(&db->lock, flags);
701*a88394cfSJeff Kirsher 
702*a88394cfSJeff Kirsher 	/* No Tx resource check, it never happen nromally */
703*a88394cfSJeff Kirsher 	if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
704*a88394cfSJeff Kirsher 		spin_unlock_irqrestore(&db->lock, flags);
705*a88394cfSJeff Kirsher 		pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
706*a88394cfSJeff Kirsher 		return NETDEV_TX_BUSY;
707*a88394cfSJeff Kirsher 	}
708*a88394cfSJeff Kirsher 
709*a88394cfSJeff Kirsher 	/* Disable NIC interrupt */
710*a88394cfSJeff Kirsher 	outl(0, dev->base_addr + DCR7);
711*a88394cfSJeff Kirsher 
712*a88394cfSJeff Kirsher 	/* transmit this packet */
713*a88394cfSJeff Kirsher 	txptr = db->tx_insert_ptr;
714*a88394cfSJeff Kirsher 	skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
715*a88394cfSJeff Kirsher 	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
716*a88394cfSJeff Kirsher 
717*a88394cfSJeff Kirsher 	/* Point to next transmit free descriptor */
718*a88394cfSJeff Kirsher 	db->tx_insert_ptr = txptr->next_tx_desc;
719*a88394cfSJeff Kirsher 
720*a88394cfSJeff Kirsher 	/* Transmit Packet Process */
721*a88394cfSJeff Kirsher 	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
722*a88394cfSJeff Kirsher 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
723*a88394cfSJeff Kirsher 		db->tx_packet_cnt++;			/* Ready to send */
724*a88394cfSJeff Kirsher 		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
725*a88394cfSJeff Kirsher 		dev->trans_start = jiffies;		/* saved time stamp */
726*a88394cfSJeff Kirsher 	} else {
727*a88394cfSJeff Kirsher 		db->tx_queue_cnt++;			/* queue TX packet */
728*a88394cfSJeff Kirsher 		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
729*a88394cfSJeff Kirsher 	}
730*a88394cfSJeff Kirsher 
731*a88394cfSJeff Kirsher 	/* Tx resource check */
732*a88394cfSJeff Kirsher 	if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
733*a88394cfSJeff Kirsher 		netif_wake_queue(dev);
734*a88394cfSJeff Kirsher 
735*a88394cfSJeff Kirsher 	/* Restore CR7 to enable interrupt */
736*a88394cfSJeff Kirsher 	spin_unlock_irqrestore(&db->lock, flags);
737*a88394cfSJeff Kirsher 	outl(db->cr7_data, dev->base_addr + DCR7);
738*a88394cfSJeff Kirsher 
739*a88394cfSJeff Kirsher 	/* free this SKB */
740*a88394cfSJeff Kirsher 	dev_kfree_skb(skb);
741*a88394cfSJeff Kirsher 
742*a88394cfSJeff Kirsher 	return NETDEV_TX_OK;
743*a88394cfSJeff Kirsher }
744*a88394cfSJeff Kirsher 
745*a88394cfSJeff Kirsher 
746*a88394cfSJeff Kirsher /*
747*a88394cfSJeff Kirsher  *	Stop the interface.
748*a88394cfSJeff Kirsher  *	The interface is stopped when it is brought.
749*a88394cfSJeff Kirsher  */
750*a88394cfSJeff Kirsher 
751*a88394cfSJeff Kirsher static int dmfe_stop(struct DEVICE *dev)
752*a88394cfSJeff Kirsher {
753*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
754*a88394cfSJeff Kirsher 	unsigned long ioaddr = dev->base_addr;
755*a88394cfSJeff Kirsher 
756*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_stop", 0);
757*a88394cfSJeff Kirsher 
758*a88394cfSJeff Kirsher 	/* disable system */
759*a88394cfSJeff Kirsher 	netif_stop_queue(dev);
760*a88394cfSJeff Kirsher 
761*a88394cfSJeff Kirsher 	/* deleted timer */
762*a88394cfSJeff Kirsher 	del_timer_sync(&db->timer);
763*a88394cfSJeff Kirsher 
764*a88394cfSJeff Kirsher 	/* Reset & stop DM910X board */
765*a88394cfSJeff Kirsher 	outl(DM910X_RESET, ioaddr + DCR0);
766*a88394cfSJeff Kirsher 	udelay(5);
767*a88394cfSJeff Kirsher 	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
768*a88394cfSJeff Kirsher 
769*a88394cfSJeff Kirsher 	/* free interrupt */
770*a88394cfSJeff Kirsher 	free_irq(dev->irq, dev);
771*a88394cfSJeff Kirsher 
772*a88394cfSJeff Kirsher 	/* free allocated rx buffer */
773*a88394cfSJeff Kirsher 	dmfe_free_rxbuffer(db);
774*a88394cfSJeff Kirsher 
775*a88394cfSJeff Kirsher #if 0
776*a88394cfSJeff Kirsher 	/* show statistic counter */
777*a88394cfSJeff Kirsher 	printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
778*a88394cfSJeff Kirsher 	       db->tx_fifo_underrun, db->tx_excessive_collision,
779*a88394cfSJeff Kirsher 	       db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
780*a88394cfSJeff Kirsher 	       db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
781*a88394cfSJeff Kirsher 	       db->reset_fatal, db->reset_TXtimeout);
782*a88394cfSJeff Kirsher #endif
783*a88394cfSJeff Kirsher 
784*a88394cfSJeff Kirsher 	return 0;
785*a88394cfSJeff Kirsher }
786*a88394cfSJeff Kirsher 
787*a88394cfSJeff Kirsher 
788*a88394cfSJeff Kirsher /*
789*a88394cfSJeff Kirsher  *	DM9102 insterrupt handler
790*a88394cfSJeff Kirsher  *	receive the packet to upper layer, free the transmitted packet
791*a88394cfSJeff Kirsher  */
792*a88394cfSJeff Kirsher 
793*a88394cfSJeff Kirsher static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
794*a88394cfSJeff Kirsher {
795*a88394cfSJeff Kirsher 	struct DEVICE *dev = dev_id;
796*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
797*a88394cfSJeff Kirsher 	unsigned long ioaddr = dev->base_addr;
798*a88394cfSJeff Kirsher 	unsigned long flags;
799*a88394cfSJeff Kirsher 
800*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_interrupt()", 0);
801*a88394cfSJeff Kirsher 
802*a88394cfSJeff Kirsher 	spin_lock_irqsave(&db->lock, flags);
803*a88394cfSJeff Kirsher 
804*a88394cfSJeff Kirsher 	/* Got DM910X status */
805*a88394cfSJeff Kirsher 	db->cr5_data = inl(ioaddr + DCR5);
806*a88394cfSJeff Kirsher 	outl(db->cr5_data, ioaddr + DCR5);
807*a88394cfSJeff Kirsher 	if ( !(db->cr5_data & 0xc1) ) {
808*a88394cfSJeff Kirsher 		spin_unlock_irqrestore(&db->lock, flags);
809*a88394cfSJeff Kirsher 		return IRQ_HANDLED;
810*a88394cfSJeff Kirsher 	}
811*a88394cfSJeff Kirsher 
812*a88394cfSJeff Kirsher 	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
813*a88394cfSJeff Kirsher 	outl(0, ioaddr + DCR7);
814*a88394cfSJeff Kirsher 
815*a88394cfSJeff Kirsher 	/* Check system status */
816*a88394cfSJeff Kirsher 	if (db->cr5_data & 0x2000) {
817*a88394cfSJeff Kirsher 		/* system bus error happen */
818*a88394cfSJeff Kirsher 		DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
819*a88394cfSJeff Kirsher 		db->reset_fatal++;
820*a88394cfSJeff Kirsher 		db->wait_reset = 1;	/* Need to RESET */
821*a88394cfSJeff Kirsher 		spin_unlock_irqrestore(&db->lock, flags);
822*a88394cfSJeff Kirsher 		return IRQ_HANDLED;
823*a88394cfSJeff Kirsher 	}
824*a88394cfSJeff Kirsher 
825*a88394cfSJeff Kirsher 	 /* Received the coming packet */
826*a88394cfSJeff Kirsher 	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
827*a88394cfSJeff Kirsher 		dmfe_rx_packet(dev, db);
828*a88394cfSJeff Kirsher 
829*a88394cfSJeff Kirsher 	/* reallocate rx descriptor buffer */
830*a88394cfSJeff Kirsher 	if (db->rx_avail_cnt<RX_DESC_CNT)
831*a88394cfSJeff Kirsher 		allocate_rx_buffer(db);
832*a88394cfSJeff Kirsher 
833*a88394cfSJeff Kirsher 	/* Free the transmitted descriptor */
834*a88394cfSJeff Kirsher 	if ( db->cr5_data & 0x01)
835*a88394cfSJeff Kirsher 		dmfe_free_tx_pkt(dev, db);
836*a88394cfSJeff Kirsher 
837*a88394cfSJeff Kirsher 	/* Mode Check */
838*a88394cfSJeff Kirsher 	if (db->dm910x_chk_mode & 0x2) {
839*a88394cfSJeff Kirsher 		db->dm910x_chk_mode = 0x4;
840*a88394cfSJeff Kirsher 		db->cr6_data |= 0x100;
841*a88394cfSJeff Kirsher 		update_cr6(db->cr6_data, db->ioaddr);
842*a88394cfSJeff Kirsher 	}
843*a88394cfSJeff Kirsher 
844*a88394cfSJeff Kirsher 	/* Restore CR7 to enable interrupt mask */
845*a88394cfSJeff Kirsher 	outl(db->cr7_data, ioaddr + DCR7);
846*a88394cfSJeff Kirsher 
847*a88394cfSJeff Kirsher 	spin_unlock_irqrestore(&db->lock, flags);
848*a88394cfSJeff Kirsher 	return IRQ_HANDLED;
849*a88394cfSJeff Kirsher }
850*a88394cfSJeff Kirsher 
851*a88394cfSJeff Kirsher 
852*a88394cfSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
853*a88394cfSJeff Kirsher /*
854*a88394cfSJeff Kirsher  * Polling 'interrupt' - used by things like netconsole to send skbs
855*a88394cfSJeff Kirsher  * without having to re-enable interrupts. It's not called while
856*a88394cfSJeff Kirsher  * the interrupt routine is executing.
857*a88394cfSJeff Kirsher  */
858*a88394cfSJeff Kirsher 
859*a88394cfSJeff Kirsher static void poll_dmfe (struct net_device *dev)
860*a88394cfSJeff Kirsher {
861*a88394cfSJeff Kirsher 	/* disable_irq here is not very nice, but with the lockless
862*a88394cfSJeff Kirsher 	   interrupt handler we have no other choice. */
863*a88394cfSJeff Kirsher 	disable_irq(dev->irq);
864*a88394cfSJeff Kirsher 	dmfe_interrupt (dev->irq, dev);
865*a88394cfSJeff Kirsher 	enable_irq(dev->irq);
866*a88394cfSJeff Kirsher }
867*a88394cfSJeff Kirsher #endif
868*a88394cfSJeff Kirsher 
869*a88394cfSJeff Kirsher /*
870*a88394cfSJeff Kirsher  *	Free TX resource after TX complete
871*a88394cfSJeff Kirsher  */
872*a88394cfSJeff Kirsher 
873*a88394cfSJeff Kirsher static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
874*a88394cfSJeff Kirsher {
875*a88394cfSJeff Kirsher 	struct tx_desc *txptr;
876*a88394cfSJeff Kirsher 	unsigned long ioaddr = dev->base_addr;
877*a88394cfSJeff Kirsher 	u32 tdes0;
878*a88394cfSJeff Kirsher 
879*a88394cfSJeff Kirsher 	txptr = db->tx_remove_ptr;
880*a88394cfSJeff Kirsher 	while(db->tx_packet_cnt) {
881*a88394cfSJeff Kirsher 		tdes0 = le32_to_cpu(txptr->tdes0);
882*a88394cfSJeff Kirsher 		if (tdes0 & 0x80000000)
883*a88394cfSJeff Kirsher 			break;
884*a88394cfSJeff Kirsher 
885*a88394cfSJeff Kirsher 		/* A packet sent completed */
886*a88394cfSJeff Kirsher 		db->tx_packet_cnt--;
887*a88394cfSJeff Kirsher 		dev->stats.tx_packets++;
888*a88394cfSJeff Kirsher 
889*a88394cfSJeff Kirsher 		/* Transmit statistic counter */
890*a88394cfSJeff Kirsher 		if ( tdes0 != 0x7fffffff ) {
891*a88394cfSJeff Kirsher 			dev->stats.collisions += (tdes0 >> 3) & 0xf;
892*a88394cfSJeff Kirsher 			dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
893*a88394cfSJeff Kirsher 			if (tdes0 & TDES0_ERR_MASK) {
894*a88394cfSJeff Kirsher 				dev->stats.tx_errors++;
895*a88394cfSJeff Kirsher 
896*a88394cfSJeff Kirsher 				if (tdes0 & 0x0002) {	/* UnderRun */
897*a88394cfSJeff Kirsher 					db->tx_fifo_underrun++;
898*a88394cfSJeff Kirsher 					if ( !(db->cr6_data & CR6_SFT) ) {
899*a88394cfSJeff Kirsher 						db->cr6_data = db->cr6_data | CR6_SFT;
900*a88394cfSJeff Kirsher 						update_cr6(db->cr6_data, db->ioaddr);
901*a88394cfSJeff Kirsher 					}
902*a88394cfSJeff Kirsher 				}
903*a88394cfSJeff Kirsher 				if (tdes0 & 0x0100)
904*a88394cfSJeff Kirsher 					db->tx_excessive_collision++;
905*a88394cfSJeff Kirsher 				if (tdes0 & 0x0200)
906*a88394cfSJeff Kirsher 					db->tx_late_collision++;
907*a88394cfSJeff Kirsher 				if (tdes0 & 0x0400)
908*a88394cfSJeff Kirsher 					db->tx_no_carrier++;
909*a88394cfSJeff Kirsher 				if (tdes0 & 0x0800)
910*a88394cfSJeff Kirsher 					db->tx_loss_carrier++;
911*a88394cfSJeff Kirsher 				if (tdes0 & 0x4000)
912*a88394cfSJeff Kirsher 					db->tx_jabber_timeout++;
913*a88394cfSJeff Kirsher 			}
914*a88394cfSJeff Kirsher 		}
915*a88394cfSJeff Kirsher 
916*a88394cfSJeff Kirsher     		txptr = txptr->next_tx_desc;
917*a88394cfSJeff Kirsher 	}/* End of while */
918*a88394cfSJeff Kirsher 
919*a88394cfSJeff Kirsher 	/* Update TX remove pointer to next */
920*a88394cfSJeff Kirsher 	db->tx_remove_ptr = txptr;
921*a88394cfSJeff Kirsher 
922*a88394cfSJeff Kirsher 	/* Send the Tx packet in queue */
923*a88394cfSJeff Kirsher 	if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
924*a88394cfSJeff Kirsher 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
925*a88394cfSJeff Kirsher 		db->tx_packet_cnt++;			/* Ready to send */
926*a88394cfSJeff Kirsher 		db->tx_queue_cnt--;
927*a88394cfSJeff Kirsher 		outl(0x1, ioaddr + DCR1);		/* Issue Tx polling */
928*a88394cfSJeff Kirsher 		dev->trans_start = jiffies;		/* saved time stamp */
929*a88394cfSJeff Kirsher 	}
930*a88394cfSJeff Kirsher 
931*a88394cfSJeff Kirsher 	/* Resource available check */
932*a88394cfSJeff Kirsher 	if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
933*a88394cfSJeff Kirsher 		netif_wake_queue(dev);	/* Active upper layer, send again */
934*a88394cfSJeff Kirsher }
935*a88394cfSJeff Kirsher 
936*a88394cfSJeff Kirsher 
937*a88394cfSJeff Kirsher /*
938*a88394cfSJeff Kirsher  *	Calculate the CRC valude of the Rx packet
939*a88394cfSJeff Kirsher  *	flag = 	1 : return the reverse CRC (for the received packet CRC)
940*a88394cfSJeff Kirsher  *		0 : return the normal CRC (for Hash Table index)
941*a88394cfSJeff Kirsher  */
942*a88394cfSJeff Kirsher 
943*a88394cfSJeff Kirsher static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
944*a88394cfSJeff Kirsher {
945*a88394cfSJeff Kirsher 	u32 crc = crc32(~0, Data, Len);
946*a88394cfSJeff Kirsher 	if (flag) crc = ~crc;
947*a88394cfSJeff Kirsher 	return crc;
948*a88394cfSJeff Kirsher }
949*a88394cfSJeff Kirsher 
950*a88394cfSJeff Kirsher 
951*a88394cfSJeff Kirsher /*
952*a88394cfSJeff Kirsher  *	Receive the come packet and pass to upper layer
953*a88394cfSJeff Kirsher  */
954*a88394cfSJeff Kirsher 
955*a88394cfSJeff Kirsher static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
956*a88394cfSJeff Kirsher {
957*a88394cfSJeff Kirsher 	struct rx_desc *rxptr;
958*a88394cfSJeff Kirsher 	struct sk_buff *skb, *newskb;
959*a88394cfSJeff Kirsher 	int rxlen;
960*a88394cfSJeff Kirsher 	u32 rdes0;
961*a88394cfSJeff Kirsher 
962*a88394cfSJeff Kirsher 	rxptr = db->rx_ready_ptr;
963*a88394cfSJeff Kirsher 
964*a88394cfSJeff Kirsher 	while(db->rx_avail_cnt) {
965*a88394cfSJeff Kirsher 		rdes0 = le32_to_cpu(rxptr->rdes0);
966*a88394cfSJeff Kirsher 		if (rdes0 & 0x80000000)	/* packet owner check */
967*a88394cfSJeff Kirsher 			break;
968*a88394cfSJeff Kirsher 
969*a88394cfSJeff Kirsher 		db->rx_avail_cnt--;
970*a88394cfSJeff Kirsher 		db->interval_rx_cnt++;
971*a88394cfSJeff Kirsher 
972*a88394cfSJeff Kirsher 		pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
973*a88394cfSJeff Kirsher 				 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
974*a88394cfSJeff Kirsher 
975*a88394cfSJeff Kirsher 		if ( (rdes0 & 0x300) != 0x300) {
976*a88394cfSJeff Kirsher 			/* A packet without First/Last flag */
977*a88394cfSJeff Kirsher 			/* reuse this SKB */
978*a88394cfSJeff Kirsher 			DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
979*a88394cfSJeff Kirsher 			dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
980*a88394cfSJeff Kirsher 		} else {
981*a88394cfSJeff Kirsher 			/* A packet with First/Last flag */
982*a88394cfSJeff Kirsher 			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
983*a88394cfSJeff Kirsher 
984*a88394cfSJeff Kirsher 			/* error summary bit check */
985*a88394cfSJeff Kirsher 			if (rdes0 & 0x8000) {
986*a88394cfSJeff Kirsher 				/* This is a error packet */
987*a88394cfSJeff Kirsher 				dev->stats.rx_errors++;
988*a88394cfSJeff Kirsher 				if (rdes0 & 1)
989*a88394cfSJeff Kirsher 					dev->stats.rx_fifo_errors++;
990*a88394cfSJeff Kirsher 				if (rdes0 & 2)
991*a88394cfSJeff Kirsher 					dev->stats.rx_crc_errors++;
992*a88394cfSJeff Kirsher 				if (rdes0 & 0x80)
993*a88394cfSJeff Kirsher 					dev->stats.rx_length_errors++;
994*a88394cfSJeff Kirsher 			}
995*a88394cfSJeff Kirsher 
996*a88394cfSJeff Kirsher 			if ( !(rdes0 & 0x8000) ||
997*a88394cfSJeff Kirsher 				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
998*a88394cfSJeff Kirsher 				skb = rxptr->rx_skb_ptr;
999*a88394cfSJeff Kirsher 
1000*a88394cfSJeff Kirsher 				/* Received Packet CRC check need or not */
1001*a88394cfSJeff Kirsher 				if ( (db->dm910x_chk_mode & 1) &&
1002*a88394cfSJeff Kirsher 					(cal_CRC(skb->data, rxlen, 1) !=
1003*a88394cfSJeff Kirsher 					(*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
1004*a88394cfSJeff Kirsher 					/* Found a error received packet */
1005*a88394cfSJeff Kirsher 					dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1006*a88394cfSJeff Kirsher 					db->dm910x_chk_mode = 3;
1007*a88394cfSJeff Kirsher 				} else {
1008*a88394cfSJeff Kirsher 					/* Good packet, send to upper layer */
1009*a88394cfSJeff Kirsher 					/* Shorst packet used new SKB */
1010*a88394cfSJeff Kirsher 					if ((rxlen < RX_COPY_SIZE) &&
1011*a88394cfSJeff Kirsher 						((newskb = dev_alloc_skb(rxlen + 2))
1012*a88394cfSJeff Kirsher 						!= NULL)) {
1013*a88394cfSJeff Kirsher 
1014*a88394cfSJeff Kirsher 						skb = newskb;
1015*a88394cfSJeff Kirsher 						/* size less than COPY_SIZE, allocate a rxlen SKB */
1016*a88394cfSJeff Kirsher 						skb_reserve(skb, 2); /* 16byte align */
1017*a88394cfSJeff Kirsher 						skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1018*a88394cfSJeff Kirsher 							  skb_put(skb, rxlen),
1019*a88394cfSJeff Kirsher 									  rxlen);
1020*a88394cfSJeff Kirsher 						dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1021*a88394cfSJeff Kirsher 					} else
1022*a88394cfSJeff Kirsher 						skb_put(skb, rxlen);
1023*a88394cfSJeff Kirsher 
1024*a88394cfSJeff Kirsher 					skb->protocol = eth_type_trans(skb, dev);
1025*a88394cfSJeff Kirsher 					netif_rx(skb);
1026*a88394cfSJeff Kirsher 					dev->stats.rx_packets++;
1027*a88394cfSJeff Kirsher 					dev->stats.rx_bytes += rxlen;
1028*a88394cfSJeff Kirsher 				}
1029*a88394cfSJeff Kirsher 			} else {
1030*a88394cfSJeff Kirsher 				/* Reuse SKB buffer when the packet is error */
1031*a88394cfSJeff Kirsher 				DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1032*a88394cfSJeff Kirsher 				dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1033*a88394cfSJeff Kirsher 			}
1034*a88394cfSJeff Kirsher 		}
1035*a88394cfSJeff Kirsher 
1036*a88394cfSJeff Kirsher 		rxptr = rxptr->next_rx_desc;
1037*a88394cfSJeff Kirsher 	}
1038*a88394cfSJeff Kirsher 
1039*a88394cfSJeff Kirsher 	db->rx_ready_ptr = rxptr;
1040*a88394cfSJeff Kirsher }
1041*a88394cfSJeff Kirsher 
1042*a88394cfSJeff Kirsher /*
1043*a88394cfSJeff Kirsher  * Set DM910X multicast address
1044*a88394cfSJeff Kirsher  */
1045*a88394cfSJeff Kirsher 
1046*a88394cfSJeff Kirsher static void dmfe_set_filter_mode(struct DEVICE * dev)
1047*a88394cfSJeff Kirsher {
1048*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
1049*a88394cfSJeff Kirsher 	unsigned long flags;
1050*a88394cfSJeff Kirsher 	int mc_count = netdev_mc_count(dev);
1051*a88394cfSJeff Kirsher 
1052*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1053*a88394cfSJeff Kirsher 	spin_lock_irqsave(&db->lock, flags);
1054*a88394cfSJeff Kirsher 
1055*a88394cfSJeff Kirsher 	if (dev->flags & IFF_PROMISC) {
1056*a88394cfSJeff Kirsher 		DMFE_DBUG(0, "Enable PROM Mode", 0);
1057*a88394cfSJeff Kirsher 		db->cr6_data |= CR6_PM | CR6_PBF;
1058*a88394cfSJeff Kirsher 		update_cr6(db->cr6_data, db->ioaddr);
1059*a88394cfSJeff Kirsher 		spin_unlock_irqrestore(&db->lock, flags);
1060*a88394cfSJeff Kirsher 		return;
1061*a88394cfSJeff Kirsher 	}
1062*a88394cfSJeff Kirsher 
1063*a88394cfSJeff Kirsher 	if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1064*a88394cfSJeff Kirsher 		DMFE_DBUG(0, "Pass all multicast address", mc_count);
1065*a88394cfSJeff Kirsher 		db->cr6_data &= ~(CR6_PM | CR6_PBF);
1066*a88394cfSJeff Kirsher 		db->cr6_data |= CR6_PAM;
1067*a88394cfSJeff Kirsher 		spin_unlock_irqrestore(&db->lock, flags);
1068*a88394cfSJeff Kirsher 		return;
1069*a88394cfSJeff Kirsher 	}
1070*a88394cfSJeff Kirsher 
1071*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "Set multicast address", mc_count);
1072*a88394cfSJeff Kirsher 	if (db->chip_id == PCI_DM9132_ID)
1073*a88394cfSJeff Kirsher 		dm9132_id_table(dev);	/* DM9132 */
1074*a88394cfSJeff Kirsher 	else
1075*a88394cfSJeff Kirsher 		send_filter_frame(dev);	/* DM9102/DM9102A */
1076*a88394cfSJeff Kirsher 	spin_unlock_irqrestore(&db->lock, flags);
1077*a88394cfSJeff Kirsher }
1078*a88394cfSJeff Kirsher 
1079*a88394cfSJeff Kirsher /*
1080*a88394cfSJeff Kirsher  * 	Ethtool interace
1081*a88394cfSJeff Kirsher  */
1082*a88394cfSJeff Kirsher 
1083*a88394cfSJeff Kirsher static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1084*a88394cfSJeff Kirsher 			       struct ethtool_drvinfo *info)
1085*a88394cfSJeff Kirsher {
1086*a88394cfSJeff Kirsher 	struct dmfe_board_info *np = netdev_priv(dev);
1087*a88394cfSJeff Kirsher 
1088*a88394cfSJeff Kirsher 	strcpy(info->driver, DRV_NAME);
1089*a88394cfSJeff Kirsher 	strcpy(info->version, DRV_VERSION);
1090*a88394cfSJeff Kirsher 	if (np->pdev)
1091*a88394cfSJeff Kirsher 		strcpy(info->bus_info, pci_name(np->pdev));
1092*a88394cfSJeff Kirsher 	else
1093*a88394cfSJeff Kirsher 		sprintf(info->bus_info, "EISA 0x%lx %d",
1094*a88394cfSJeff Kirsher 			dev->base_addr, dev->irq);
1095*a88394cfSJeff Kirsher }
1096*a88394cfSJeff Kirsher 
1097*a88394cfSJeff Kirsher static int dmfe_ethtool_set_wol(struct net_device *dev,
1098*a88394cfSJeff Kirsher 				struct ethtool_wolinfo *wolinfo)
1099*a88394cfSJeff Kirsher {
1100*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
1101*a88394cfSJeff Kirsher 
1102*a88394cfSJeff Kirsher 	if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1103*a88394cfSJeff Kirsher 		   		WAKE_ARP | WAKE_MAGICSECURE))
1104*a88394cfSJeff Kirsher 		   return -EOPNOTSUPP;
1105*a88394cfSJeff Kirsher 
1106*a88394cfSJeff Kirsher 	db->wol_mode = wolinfo->wolopts;
1107*a88394cfSJeff Kirsher 	return 0;
1108*a88394cfSJeff Kirsher }
1109*a88394cfSJeff Kirsher 
1110*a88394cfSJeff Kirsher static void dmfe_ethtool_get_wol(struct net_device *dev,
1111*a88394cfSJeff Kirsher 				 struct ethtool_wolinfo *wolinfo)
1112*a88394cfSJeff Kirsher {
1113*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
1114*a88394cfSJeff Kirsher 
1115*a88394cfSJeff Kirsher 	wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1116*a88394cfSJeff Kirsher 	wolinfo->wolopts = db->wol_mode;
1117*a88394cfSJeff Kirsher }
1118*a88394cfSJeff Kirsher 
1119*a88394cfSJeff Kirsher 
1120*a88394cfSJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = {
1121*a88394cfSJeff Kirsher 	.get_drvinfo		= dmfe_ethtool_get_drvinfo,
1122*a88394cfSJeff Kirsher 	.get_link               = ethtool_op_get_link,
1123*a88394cfSJeff Kirsher 	.set_wol		= dmfe_ethtool_set_wol,
1124*a88394cfSJeff Kirsher 	.get_wol		= dmfe_ethtool_get_wol,
1125*a88394cfSJeff Kirsher };
1126*a88394cfSJeff Kirsher 
1127*a88394cfSJeff Kirsher /*
1128*a88394cfSJeff Kirsher  *	A periodic timer routine
1129*a88394cfSJeff Kirsher  *	Dynamic media sense, allocate Rx buffer...
1130*a88394cfSJeff Kirsher  */
1131*a88394cfSJeff Kirsher 
1132*a88394cfSJeff Kirsher static void dmfe_timer(unsigned long data)
1133*a88394cfSJeff Kirsher {
1134*a88394cfSJeff Kirsher 	u32 tmp_cr8;
1135*a88394cfSJeff Kirsher 	unsigned char tmp_cr12;
1136*a88394cfSJeff Kirsher 	struct DEVICE *dev = (struct DEVICE *) data;
1137*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
1138*a88394cfSJeff Kirsher  	unsigned long flags;
1139*a88394cfSJeff Kirsher 
1140*a88394cfSJeff Kirsher 	int link_ok, link_ok_phy;
1141*a88394cfSJeff Kirsher 
1142*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_timer()", 0);
1143*a88394cfSJeff Kirsher 	spin_lock_irqsave(&db->lock, flags);
1144*a88394cfSJeff Kirsher 
1145*a88394cfSJeff Kirsher 	/* Media mode process when Link OK before enter this route */
1146*a88394cfSJeff Kirsher 	if (db->first_in_callback == 0) {
1147*a88394cfSJeff Kirsher 		db->first_in_callback = 1;
1148*a88394cfSJeff Kirsher 		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1149*a88394cfSJeff Kirsher 			db->cr6_data &= ~0x40000;
1150*a88394cfSJeff Kirsher 			update_cr6(db->cr6_data, db->ioaddr);
1151*a88394cfSJeff Kirsher 			phy_write(db->ioaddr,
1152*a88394cfSJeff Kirsher 				  db->phy_addr, 0, 0x1000, db->chip_id);
1153*a88394cfSJeff Kirsher 			db->cr6_data |= 0x40000;
1154*a88394cfSJeff Kirsher 			update_cr6(db->cr6_data, db->ioaddr);
1155*a88394cfSJeff Kirsher 			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1156*a88394cfSJeff Kirsher 			add_timer(&db->timer);
1157*a88394cfSJeff Kirsher 			spin_unlock_irqrestore(&db->lock, flags);
1158*a88394cfSJeff Kirsher 			return;
1159*a88394cfSJeff Kirsher 		}
1160*a88394cfSJeff Kirsher 	}
1161*a88394cfSJeff Kirsher 
1162*a88394cfSJeff Kirsher 
1163*a88394cfSJeff Kirsher 	/* Operating Mode Check */
1164*a88394cfSJeff Kirsher 	if ( (db->dm910x_chk_mode & 0x1) &&
1165*a88394cfSJeff Kirsher 		(dev->stats.rx_packets > MAX_CHECK_PACKET) )
1166*a88394cfSJeff Kirsher 		db->dm910x_chk_mode = 0x4;
1167*a88394cfSJeff Kirsher 
1168*a88394cfSJeff Kirsher 	/* Dynamic reset DM910X : system error or transmit time-out */
1169*a88394cfSJeff Kirsher 	tmp_cr8 = inl(db->ioaddr + DCR8);
1170*a88394cfSJeff Kirsher 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1171*a88394cfSJeff Kirsher 		db->reset_cr8++;
1172*a88394cfSJeff Kirsher 		db->wait_reset = 1;
1173*a88394cfSJeff Kirsher 	}
1174*a88394cfSJeff Kirsher 	db->interval_rx_cnt = 0;
1175*a88394cfSJeff Kirsher 
1176*a88394cfSJeff Kirsher 	/* TX polling kick monitor */
1177*a88394cfSJeff Kirsher 	if ( db->tx_packet_cnt &&
1178*a88394cfSJeff Kirsher 	     time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1179*a88394cfSJeff Kirsher 		outl(0x1, dev->base_addr + DCR1);   /* Tx polling again */
1180*a88394cfSJeff Kirsher 
1181*a88394cfSJeff Kirsher 		/* TX Timeout */
1182*a88394cfSJeff Kirsher 		if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1183*a88394cfSJeff Kirsher 			db->reset_TXtimeout++;
1184*a88394cfSJeff Kirsher 			db->wait_reset = 1;
1185*a88394cfSJeff Kirsher 			dev_warn(&dev->dev, "Tx timeout - resetting\n");
1186*a88394cfSJeff Kirsher 		}
1187*a88394cfSJeff Kirsher 	}
1188*a88394cfSJeff Kirsher 
1189*a88394cfSJeff Kirsher 	if (db->wait_reset) {
1190*a88394cfSJeff Kirsher 		DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1191*a88394cfSJeff Kirsher 		db->reset_count++;
1192*a88394cfSJeff Kirsher 		dmfe_dynamic_reset(dev);
1193*a88394cfSJeff Kirsher 		db->first_in_callback = 0;
1194*a88394cfSJeff Kirsher 		db->timer.expires = DMFE_TIMER_WUT;
1195*a88394cfSJeff Kirsher 		add_timer(&db->timer);
1196*a88394cfSJeff Kirsher 		spin_unlock_irqrestore(&db->lock, flags);
1197*a88394cfSJeff Kirsher 		return;
1198*a88394cfSJeff Kirsher 	}
1199*a88394cfSJeff Kirsher 
1200*a88394cfSJeff Kirsher 	/* Link status check, Dynamic media type change */
1201*a88394cfSJeff Kirsher 	if (db->chip_id == PCI_DM9132_ID)
1202*a88394cfSJeff Kirsher 		tmp_cr12 = inb(db->ioaddr + DCR9 + 3);	/* DM9132 */
1203*a88394cfSJeff Kirsher 	else
1204*a88394cfSJeff Kirsher 		tmp_cr12 = inb(db->ioaddr + DCR12);	/* DM9102/DM9102A */
1205*a88394cfSJeff Kirsher 
1206*a88394cfSJeff Kirsher 	if ( ((db->chip_id == PCI_DM9102_ID) &&
1207*a88394cfSJeff Kirsher 		(db->chip_revision == 0x30)) ||
1208*a88394cfSJeff Kirsher 		((db->chip_id == PCI_DM9132_ID) &&
1209*a88394cfSJeff Kirsher 		(db->chip_revision == 0x10)) ) {
1210*a88394cfSJeff Kirsher 		/* DM9102A Chip */
1211*a88394cfSJeff Kirsher 		if (tmp_cr12 & 2)
1212*a88394cfSJeff Kirsher 			link_ok = 0;
1213*a88394cfSJeff Kirsher 		else
1214*a88394cfSJeff Kirsher 			link_ok = 1;
1215*a88394cfSJeff Kirsher 	}
1216*a88394cfSJeff Kirsher 	else
1217*a88394cfSJeff Kirsher 		/*0x43 is used instead of 0x3 because bit 6 should represent
1218*a88394cfSJeff Kirsher 			link status of external PHY */
1219*a88394cfSJeff Kirsher 		link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1220*a88394cfSJeff Kirsher 
1221*a88394cfSJeff Kirsher 
1222*a88394cfSJeff Kirsher 	/* If chip reports that link is failed it could be because external
1223*a88394cfSJeff Kirsher 		PHY link status pin is not connected correctly to chip
1224*a88394cfSJeff Kirsher 		To be sure ask PHY too.
1225*a88394cfSJeff Kirsher 	*/
1226*a88394cfSJeff Kirsher 
1227*a88394cfSJeff Kirsher 	/* need a dummy read because of PHY's register latch*/
1228*a88394cfSJeff Kirsher 	phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1229*a88394cfSJeff Kirsher 	link_ok_phy = (phy_read (db->ioaddr,
1230*a88394cfSJeff Kirsher 		       db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1231*a88394cfSJeff Kirsher 
1232*a88394cfSJeff Kirsher 	if (link_ok_phy != link_ok) {
1233*a88394cfSJeff Kirsher 		DMFE_DBUG (0, "PHY and chip report different link status", 0);
1234*a88394cfSJeff Kirsher 		link_ok = link_ok | link_ok_phy;
1235*a88394cfSJeff Kirsher  	}
1236*a88394cfSJeff Kirsher 
1237*a88394cfSJeff Kirsher 	if ( !link_ok && netif_carrier_ok(dev)) {
1238*a88394cfSJeff Kirsher 		/* Link Failed */
1239*a88394cfSJeff Kirsher 		DMFE_DBUG(0, "Link Failed", tmp_cr12);
1240*a88394cfSJeff Kirsher 		netif_carrier_off(dev);
1241*a88394cfSJeff Kirsher 
1242*a88394cfSJeff Kirsher 		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1243*a88394cfSJeff Kirsher 		/* AUTO or force 1M Homerun/Longrun don't need */
1244*a88394cfSJeff Kirsher 		if ( !(db->media_mode & 0x38) )
1245*a88394cfSJeff Kirsher 			phy_write(db->ioaddr, db->phy_addr,
1246*a88394cfSJeff Kirsher 				  0, 0x1000, db->chip_id);
1247*a88394cfSJeff Kirsher 
1248*a88394cfSJeff Kirsher 		/* AUTO mode, if INT phyxcer link failed, select EXT device */
1249*a88394cfSJeff Kirsher 		if (db->media_mode & DMFE_AUTO) {
1250*a88394cfSJeff Kirsher 			/* 10/100M link failed, used 1M Home-Net */
1251*a88394cfSJeff Kirsher 			db->cr6_data|=0x00040000;	/* bit18=1, MII */
1252*a88394cfSJeff Kirsher 			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
1253*a88394cfSJeff Kirsher 			update_cr6(db->cr6_data, db->ioaddr);
1254*a88394cfSJeff Kirsher 		}
1255*a88394cfSJeff Kirsher 	} else if (!netif_carrier_ok(dev)) {
1256*a88394cfSJeff Kirsher 
1257*a88394cfSJeff Kirsher 		DMFE_DBUG(0, "Link link OK", tmp_cr12);
1258*a88394cfSJeff Kirsher 
1259*a88394cfSJeff Kirsher 		/* Auto Sense Speed */
1260*a88394cfSJeff Kirsher 		if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1261*a88394cfSJeff Kirsher 			netif_carrier_on(dev);
1262*a88394cfSJeff Kirsher 			SHOW_MEDIA_TYPE(db->op_mode);
1263*a88394cfSJeff Kirsher 		}
1264*a88394cfSJeff Kirsher 
1265*a88394cfSJeff Kirsher 		dmfe_process_mode(db);
1266*a88394cfSJeff Kirsher 	}
1267*a88394cfSJeff Kirsher 
1268*a88394cfSJeff Kirsher 	/* HPNA remote command check */
1269*a88394cfSJeff Kirsher 	if (db->HPNA_command & 0xf00) {
1270*a88394cfSJeff Kirsher 		db->HPNA_timer--;
1271*a88394cfSJeff Kirsher 		if (!db->HPNA_timer)
1272*a88394cfSJeff Kirsher 			dmfe_HPNA_remote_cmd_chk(db);
1273*a88394cfSJeff Kirsher 	}
1274*a88394cfSJeff Kirsher 
1275*a88394cfSJeff Kirsher 	/* Timer active again */
1276*a88394cfSJeff Kirsher 	db->timer.expires = DMFE_TIMER_WUT;
1277*a88394cfSJeff Kirsher 	add_timer(&db->timer);
1278*a88394cfSJeff Kirsher 	spin_unlock_irqrestore(&db->lock, flags);
1279*a88394cfSJeff Kirsher }
1280*a88394cfSJeff Kirsher 
1281*a88394cfSJeff Kirsher 
1282*a88394cfSJeff Kirsher /*
1283*a88394cfSJeff Kirsher  *	Dynamic reset the DM910X board
1284*a88394cfSJeff Kirsher  *	Stop DM910X board
1285*a88394cfSJeff Kirsher  *	Free Tx/Rx allocated memory
1286*a88394cfSJeff Kirsher  *	Reset DM910X board
1287*a88394cfSJeff Kirsher  *	Re-initialize DM910X board
1288*a88394cfSJeff Kirsher  */
1289*a88394cfSJeff Kirsher 
1290*a88394cfSJeff Kirsher static void dmfe_dynamic_reset(struct DEVICE *dev)
1291*a88394cfSJeff Kirsher {
1292*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
1293*a88394cfSJeff Kirsher 
1294*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1295*a88394cfSJeff Kirsher 
1296*a88394cfSJeff Kirsher 	/* Sopt MAC controller */
1297*a88394cfSJeff Kirsher 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
1298*a88394cfSJeff Kirsher 	update_cr6(db->cr6_data, dev->base_addr);
1299*a88394cfSJeff Kirsher 	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
1300*a88394cfSJeff Kirsher 	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1301*a88394cfSJeff Kirsher 
1302*a88394cfSJeff Kirsher 	/* Disable upper layer interface */
1303*a88394cfSJeff Kirsher 	netif_stop_queue(dev);
1304*a88394cfSJeff Kirsher 
1305*a88394cfSJeff Kirsher 	/* Free Rx Allocate buffer */
1306*a88394cfSJeff Kirsher 	dmfe_free_rxbuffer(db);
1307*a88394cfSJeff Kirsher 
1308*a88394cfSJeff Kirsher 	/* system variable init */
1309*a88394cfSJeff Kirsher 	db->tx_packet_cnt = 0;
1310*a88394cfSJeff Kirsher 	db->tx_queue_cnt = 0;
1311*a88394cfSJeff Kirsher 	db->rx_avail_cnt = 0;
1312*a88394cfSJeff Kirsher 	netif_carrier_off(dev);
1313*a88394cfSJeff Kirsher 	db->wait_reset = 0;
1314*a88394cfSJeff Kirsher 
1315*a88394cfSJeff Kirsher 	/* Re-initialize DM910X board */
1316*a88394cfSJeff Kirsher 	dmfe_init_dm910x(dev);
1317*a88394cfSJeff Kirsher 
1318*a88394cfSJeff Kirsher 	/* Restart upper layer interface */
1319*a88394cfSJeff Kirsher 	netif_wake_queue(dev);
1320*a88394cfSJeff Kirsher }
1321*a88394cfSJeff Kirsher 
1322*a88394cfSJeff Kirsher 
1323*a88394cfSJeff Kirsher /*
1324*a88394cfSJeff Kirsher  *	free all allocated rx buffer
1325*a88394cfSJeff Kirsher  */
1326*a88394cfSJeff Kirsher 
1327*a88394cfSJeff Kirsher static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1328*a88394cfSJeff Kirsher {
1329*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1330*a88394cfSJeff Kirsher 
1331*a88394cfSJeff Kirsher 	/* free allocated rx buffer */
1332*a88394cfSJeff Kirsher 	while (db->rx_avail_cnt) {
1333*a88394cfSJeff Kirsher 		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1334*a88394cfSJeff Kirsher 		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1335*a88394cfSJeff Kirsher 		db->rx_avail_cnt--;
1336*a88394cfSJeff Kirsher 	}
1337*a88394cfSJeff Kirsher }
1338*a88394cfSJeff Kirsher 
1339*a88394cfSJeff Kirsher 
1340*a88394cfSJeff Kirsher /*
1341*a88394cfSJeff Kirsher  *	Reuse the SK buffer
1342*a88394cfSJeff Kirsher  */
1343*a88394cfSJeff Kirsher 
1344*a88394cfSJeff Kirsher static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1345*a88394cfSJeff Kirsher {
1346*a88394cfSJeff Kirsher 	struct rx_desc *rxptr = db->rx_insert_ptr;
1347*a88394cfSJeff Kirsher 
1348*a88394cfSJeff Kirsher 	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1349*a88394cfSJeff Kirsher 		rxptr->rx_skb_ptr = skb;
1350*a88394cfSJeff Kirsher 		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1351*a88394cfSJeff Kirsher 			    skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1352*a88394cfSJeff Kirsher 		wmb();
1353*a88394cfSJeff Kirsher 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1354*a88394cfSJeff Kirsher 		db->rx_avail_cnt++;
1355*a88394cfSJeff Kirsher 		db->rx_insert_ptr = rxptr->next_rx_desc;
1356*a88394cfSJeff Kirsher 	} else
1357*a88394cfSJeff Kirsher 		DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1358*a88394cfSJeff Kirsher }
1359*a88394cfSJeff Kirsher 
1360*a88394cfSJeff Kirsher 
1361*a88394cfSJeff Kirsher /*
1362*a88394cfSJeff Kirsher  *	Initialize transmit/Receive descriptor
1363*a88394cfSJeff Kirsher  *	Using Chain structure, and allocate Tx/Rx buffer
1364*a88394cfSJeff Kirsher  */
1365*a88394cfSJeff Kirsher 
1366*a88394cfSJeff Kirsher static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1367*a88394cfSJeff Kirsher {
1368*a88394cfSJeff Kirsher 	struct tx_desc *tmp_tx;
1369*a88394cfSJeff Kirsher 	struct rx_desc *tmp_rx;
1370*a88394cfSJeff Kirsher 	unsigned char *tmp_buf;
1371*a88394cfSJeff Kirsher 	dma_addr_t tmp_tx_dma, tmp_rx_dma;
1372*a88394cfSJeff Kirsher 	dma_addr_t tmp_buf_dma;
1373*a88394cfSJeff Kirsher 	int i;
1374*a88394cfSJeff Kirsher 
1375*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1376*a88394cfSJeff Kirsher 
1377*a88394cfSJeff Kirsher 	/* tx descriptor start pointer */
1378*a88394cfSJeff Kirsher 	db->tx_insert_ptr = db->first_tx_desc;
1379*a88394cfSJeff Kirsher 	db->tx_remove_ptr = db->first_tx_desc;
1380*a88394cfSJeff Kirsher 	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
1381*a88394cfSJeff Kirsher 
1382*a88394cfSJeff Kirsher 	/* rx descriptor start pointer */
1383*a88394cfSJeff Kirsher 	db->first_rx_desc = (void *)db->first_tx_desc +
1384*a88394cfSJeff Kirsher 			sizeof(struct tx_desc) * TX_DESC_CNT;
1385*a88394cfSJeff Kirsher 
1386*a88394cfSJeff Kirsher 	db->first_rx_desc_dma =  db->first_tx_desc_dma +
1387*a88394cfSJeff Kirsher 			sizeof(struct tx_desc) * TX_DESC_CNT;
1388*a88394cfSJeff Kirsher 	db->rx_insert_ptr = db->first_rx_desc;
1389*a88394cfSJeff Kirsher 	db->rx_ready_ptr = db->first_rx_desc;
1390*a88394cfSJeff Kirsher 	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
1391*a88394cfSJeff Kirsher 
1392*a88394cfSJeff Kirsher 	/* Init Transmit chain */
1393*a88394cfSJeff Kirsher 	tmp_buf = db->buf_pool_start;
1394*a88394cfSJeff Kirsher 	tmp_buf_dma = db->buf_pool_dma_start;
1395*a88394cfSJeff Kirsher 	tmp_tx_dma = db->first_tx_desc_dma;
1396*a88394cfSJeff Kirsher 	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1397*a88394cfSJeff Kirsher 		tmp_tx->tx_buf_ptr = tmp_buf;
1398*a88394cfSJeff Kirsher 		tmp_tx->tdes0 = cpu_to_le32(0);
1399*a88394cfSJeff Kirsher 		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
1400*a88394cfSJeff Kirsher 		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1401*a88394cfSJeff Kirsher 		tmp_tx_dma += sizeof(struct tx_desc);
1402*a88394cfSJeff Kirsher 		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1403*a88394cfSJeff Kirsher 		tmp_tx->next_tx_desc = tmp_tx + 1;
1404*a88394cfSJeff Kirsher 		tmp_buf = tmp_buf + TX_BUF_ALLOC;
1405*a88394cfSJeff Kirsher 		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1406*a88394cfSJeff Kirsher 	}
1407*a88394cfSJeff Kirsher 	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1408*a88394cfSJeff Kirsher 	tmp_tx->next_tx_desc = db->first_tx_desc;
1409*a88394cfSJeff Kirsher 
1410*a88394cfSJeff Kirsher 	 /* Init Receive descriptor chain */
1411*a88394cfSJeff Kirsher 	tmp_rx_dma=db->first_rx_desc_dma;
1412*a88394cfSJeff Kirsher 	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1413*a88394cfSJeff Kirsher 		tmp_rx->rdes0 = cpu_to_le32(0);
1414*a88394cfSJeff Kirsher 		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1415*a88394cfSJeff Kirsher 		tmp_rx_dma += sizeof(struct rx_desc);
1416*a88394cfSJeff Kirsher 		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1417*a88394cfSJeff Kirsher 		tmp_rx->next_rx_desc = tmp_rx + 1;
1418*a88394cfSJeff Kirsher 	}
1419*a88394cfSJeff Kirsher 	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1420*a88394cfSJeff Kirsher 	tmp_rx->next_rx_desc = db->first_rx_desc;
1421*a88394cfSJeff Kirsher 
1422*a88394cfSJeff Kirsher 	/* pre-allocate Rx buffer */
1423*a88394cfSJeff Kirsher 	allocate_rx_buffer(db);
1424*a88394cfSJeff Kirsher }
1425*a88394cfSJeff Kirsher 
1426*a88394cfSJeff Kirsher 
1427*a88394cfSJeff Kirsher /*
1428*a88394cfSJeff Kirsher  *	Update CR6 value
1429*a88394cfSJeff Kirsher  *	Firstly stop DM910X , then written value and start
1430*a88394cfSJeff Kirsher  */
1431*a88394cfSJeff Kirsher 
1432*a88394cfSJeff Kirsher static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1433*a88394cfSJeff Kirsher {
1434*a88394cfSJeff Kirsher 	u32 cr6_tmp;
1435*a88394cfSJeff Kirsher 
1436*a88394cfSJeff Kirsher 	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1437*a88394cfSJeff Kirsher 	outl(cr6_tmp, ioaddr + DCR6);
1438*a88394cfSJeff Kirsher 	udelay(5);
1439*a88394cfSJeff Kirsher 	outl(cr6_data, ioaddr + DCR6);
1440*a88394cfSJeff Kirsher 	udelay(5);
1441*a88394cfSJeff Kirsher }
1442*a88394cfSJeff Kirsher 
1443*a88394cfSJeff Kirsher 
1444*a88394cfSJeff Kirsher /*
1445*a88394cfSJeff Kirsher  *	Send a setup frame for DM9132
1446*a88394cfSJeff Kirsher  *	This setup frame initialize DM910X address filter mode
1447*a88394cfSJeff Kirsher */
1448*a88394cfSJeff Kirsher 
1449*a88394cfSJeff Kirsher static void dm9132_id_table(struct DEVICE *dev)
1450*a88394cfSJeff Kirsher {
1451*a88394cfSJeff Kirsher 	struct netdev_hw_addr *ha;
1452*a88394cfSJeff Kirsher 	u16 * addrptr;
1453*a88394cfSJeff Kirsher 	unsigned long ioaddr = dev->base_addr+0xc0;		/* ID Table */
1454*a88394cfSJeff Kirsher 	u32 hash_val;
1455*a88394cfSJeff Kirsher 	u16 i, hash_table[4];
1456*a88394cfSJeff Kirsher 
1457*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dm9132_id_table()", 0);
1458*a88394cfSJeff Kirsher 
1459*a88394cfSJeff Kirsher 	/* Node address */
1460*a88394cfSJeff Kirsher 	addrptr = (u16 *) dev->dev_addr;
1461*a88394cfSJeff Kirsher 	outw(addrptr[0], ioaddr);
1462*a88394cfSJeff Kirsher 	ioaddr += 4;
1463*a88394cfSJeff Kirsher 	outw(addrptr[1], ioaddr);
1464*a88394cfSJeff Kirsher 	ioaddr += 4;
1465*a88394cfSJeff Kirsher 	outw(addrptr[2], ioaddr);
1466*a88394cfSJeff Kirsher 	ioaddr += 4;
1467*a88394cfSJeff Kirsher 
1468*a88394cfSJeff Kirsher 	/* Clear Hash Table */
1469*a88394cfSJeff Kirsher 	memset(hash_table, 0, sizeof(hash_table));
1470*a88394cfSJeff Kirsher 
1471*a88394cfSJeff Kirsher 	/* broadcast address */
1472*a88394cfSJeff Kirsher 	hash_table[3] = 0x8000;
1473*a88394cfSJeff Kirsher 
1474*a88394cfSJeff Kirsher 	/* the multicast address in Hash Table : 64 bits */
1475*a88394cfSJeff Kirsher 	netdev_for_each_mc_addr(ha, dev) {
1476*a88394cfSJeff Kirsher 		hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
1477*a88394cfSJeff Kirsher 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1478*a88394cfSJeff Kirsher 	}
1479*a88394cfSJeff Kirsher 
1480*a88394cfSJeff Kirsher 	/* Write the hash table to MAC MD table */
1481*a88394cfSJeff Kirsher 	for (i = 0; i < 4; i++, ioaddr += 4)
1482*a88394cfSJeff Kirsher 		outw(hash_table[i], ioaddr);
1483*a88394cfSJeff Kirsher }
1484*a88394cfSJeff Kirsher 
1485*a88394cfSJeff Kirsher 
1486*a88394cfSJeff Kirsher /*
1487*a88394cfSJeff Kirsher  *	Send a setup frame for DM9102/DM9102A
1488*a88394cfSJeff Kirsher  *	This setup frame initialize DM910X address filter mode
1489*a88394cfSJeff Kirsher  */
1490*a88394cfSJeff Kirsher 
1491*a88394cfSJeff Kirsher static void send_filter_frame(struct DEVICE *dev)
1492*a88394cfSJeff Kirsher {
1493*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
1494*a88394cfSJeff Kirsher 	struct netdev_hw_addr *ha;
1495*a88394cfSJeff Kirsher 	struct tx_desc *txptr;
1496*a88394cfSJeff Kirsher 	u16 * addrptr;
1497*a88394cfSJeff Kirsher 	u32 * suptr;
1498*a88394cfSJeff Kirsher 	int i;
1499*a88394cfSJeff Kirsher 
1500*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "send_filter_frame()", 0);
1501*a88394cfSJeff Kirsher 
1502*a88394cfSJeff Kirsher 	txptr = db->tx_insert_ptr;
1503*a88394cfSJeff Kirsher 	suptr = (u32 *) txptr->tx_buf_ptr;
1504*a88394cfSJeff Kirsher 
1505*a88394cfSJeff Kirsher 	/* Node address */
1506*a88394cfSJeff Kirsher 	addrptr = (u16 *) dev->dev_addr;
1507*a88394cfSJeff Kirsher 	*suptr++ = addrptr[0];
1508*a88394cfSJeff Kirsher 	*suptr++ = addrptr[1];
1509*a88394cfSJeff Kirsher 	*suptr++ = addrptr[2];
1510*a88394cfSJeff Kirsher 
1511*a88394cfSJeff Kirsher 	/* broadcast address */
1512*a88394cfSJeff Kirsher 	*suptr++ = 0xffff;
1513*a88394cfSJeff Kirsher 	*suptr++ = 0xffff;
1514*a88394cfSJeff Kirsher 	*suptr++ = 0xffff;
1515*a88394cfSJeff Kirsher 
1516*a88394cfSJeff Kirsher 	/* fit the multicast address */
1517*a88394cfSJeff Kirsher 	netdev_for_each_mc_addr(ha, dev) {
1518*a88394cfSJeff Kirsher 		addrptr = (u16 *) ha->addr;
1519*a88394cfSJeff Kirsher 		*suptr++ = addrptr[0];
1520*a88394cfSJeff Kirsher 		*suptr++ = addrptr[1];
1521*a88394cfSJeff Kirsher 		*suptr++ = addrptr[2];
1522*a88394cfSJeff Kirsher 	}
1523*a88394cfSJeff Kirsher 
1524*a88394cfSJeff Kirsher 	for (i = netdev_mc_count(dev); i < 14; i++) {
1525*a88394cfSJeff Kirsher 		*suptr++ = 0xffff;
1526*a88394cfSJeff Kirsher 		*suptr++ = 0xffff;
1527*a88394cfSJeff Kirsher 		*suptr++ = 0xffff;
1528*a88394cfSJeff Kirsher 	}
1529*a88394cfSJeff Kirsher 
1530*a88394cfSJeff Kirsher 	/* prepare the setup frame */
1531*a88394cfSJeff Kirsher 	db->tx_insert_ptr = txptr->next_tx_desc;
1532*a88394cfSJeff Kirsher 	txptr->tdes1 = cpu_to_le32(0x890000c0);
1533*a88394cfSJeff Kirsher 
1534*a88394cfSJeff Kirsher 	/* Resource Check and Send the setup packet */
1535*a88394cfSJeff Kirsher 	if (!db->tx_packet_cnt) {
1536*a88394cfSJeff Kirsher 		/* Resource Empty */
1537*a88394cfSJeff Kirsher 		db->tx_packet_cnt++;
1538*a88394cfSJeff Kirsher 		txptr->tdes0 = cpu_to_le32(0x80000000);
1539*a88394cfSJeff Kirsher 		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1540*a88394cfSJeff Kirsher 		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
1541*a88394cfSJeff Kirsher 		update_cr6(db->cr6_data, dev->base_addr);
1542*a88394cfSJeff Kirsher 		dev->trans_start = jiffies;
1543*a88394cfSJeff Kirsher 	} else
1544*a88394cfSJeff Kirsher 		db->tx_queue_cnt++;	/* Put in TX queue */
1545*a88394cfSJeff Kirsher }
1546*a88394cfSJeff Kirsher 
1547*a88394cfSJeff Kirsher 
1548*a88394cfSJeff Kirsher /*
1549*a88394cfSJeff Kirsher  *	Allocate rx buffer,
1550*a88394cfSJeff Kirsher  *	As possible as allocate maxiumn Rx buffer
1551*a88394cfSJeff Kirsher  */
1552*a88394cfSJeff Kirsher 
1553*a88394cfSJeff Kirsher static void allocate_rx_buffer(struct dmfe_board_info *db)
1554*a88394cfSJeff Kirsher {
1555*a88394cfSJeff Kirsher 	struct rx_desc *rxptr;
1556*a88394cfSJeff Kirsher 	struct sk_buff *skb;
1557*a88394cfSJeff Kirsher 
1558*a88394cfSJeff Kirsher 	rxptr = db->rx_insert_ptr;
1559*a88394cfSJeff Kirsher 
1560*a88394cfSJeff Kirsher 	while(db->rx_avail_cnt < RX_DESC_CNT) {
1561*a88394cfSJeff Kirsher 		if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1562*a88394cfSJeff Kirsher 			break;
1563*a88394cfSJeff Kirsher 		rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1564*a88394cfSJeff Kirsher 		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1565*a88394cfSJeff Kirsher 				    RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1566*a88394cfSJeff Kirsher 		wmb();
1567*a88394cfSJeff Kirsher 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1568*a88394cfSJeff Kirsher 		rxptr = rxptr->next_rx_desc;
1569*a88394cfSJeff Kirsher 		db->rx_avail_cnt++;
1570*a88394cfSJeff Kirsher 	}
1571*a88394cfSJeff Kirsher 
1572*a88394cfSJeff Kirsher 	db->rx_insert_ptr = rxptr;
1573*a88394cfSJeff Kirsher }
1574*a88394cfSJeff Kirsher 
1575*a88394cfSJeff Kirsher 
1576*a88394cfSJeff Kirsher /*
1577*a88394cfSJeff Kirsher  *	Read one word data from the serial ROM
1578*a88394cfSJeff Kirsher  */
1579*a88394cfSJeff Kirsher 
1580*a88394cfSJeff Kirsher static u16 read_srom_word(long ioaddr, int offset)
1581*a88394cfSJeff Kirsher {
1582*a88394cfSJeff Kirsher 	int i;
1583*a88394cfSJeff Kirsher 	u16 srom_data = 0;
1584*a88394cfSJeff Kirsher 	long cr9_ioaddr = ioaddr + DCR9;
1585*a88394cfSJeff Kirsher 
1586*a88394cfSJeff Kirsher 	outl(CR9_SROM_READ, cr9_ioaddr);
1587*a88394cfSJeff Kirsher 	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1588*a88394cfSJeff Kirsher 
1589*a88394cfSJeff Kirsher 	/* Send the Read Command 110b */
1590*a88394cfSJeff Kirsher 	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1591*a88394cfSJeff Kirsher 	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1592*a88394cfSJeff Kirsher 	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1593*a88394cfSJeff Kirsher 
1594*a88394cfSJeff Kirsher 	/* Send the offset */
1595*a88394cfSJeff Kirsher 	for (i = 5; i >= 0; i--) {
1596*a88394cfSJeff Kirsher 		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1597*a88394cfSJeff Kirsher 		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1598*a88394cfSJeff Kirsher 	}
1599*a88394cfSJeff Kirsher 
1600*a88394cfSJeff Kirsher 	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1601*a88394cfSJeff Kirsher 
1602*a88394cfSJeff Kirsher 	for (i = 16; i > 0; i--) {
1603*a88394cfSJeff Kirsher 		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1604*a88394cfSJeff Kirsher 		udelay(5);
1605*a88394cfSJeff Kirsher 		srom_data = (srom_data << 1) |
1606*a88394cfSJeff Kirsher 				((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1607*a88394cfSJeff Kirsher 		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1608*a88394cfSJeff Kirsher 		udelay(5);
1609*a88394cfSJeff Kirsher 	}
1610*a88394cfSJeff Kirsher 
1611*a88394cfSJeff Kirsher 	outl(CR9_SROM_READ, cr9_ioaddr);
1612*a88394cfSJeff Kirsher 	return srom_data;
1613*a88394cfSJeff Kirsher }
1614*a88394cfSJeff Kirsher 
1615*a88394cfSJeff Kirsher 
1616*a88394cfSJeff Kirsher /*
1617*a88394cfSJeff Kirsher  *	Auto sense the media mode
1618*a88394cfSJeff Kirsher  */
1619*a88394cfSJeff Kirsher 
1620*a88394cfSJeff Kirsher static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1621*a88394cfSJeff Kirsher {
1622*a88394cfSJeff Kirsher 	u8 ErrFlag = 0;
1623*a88394cfSJeff Kirsher 	u16 phy_mode;
1624*a88394cfSJeff Kirsher 
1625*a88394cfSJeff Kirsher 	/* CR6 bit18=0, select 10/100M */
1626*a88394cfSJeff Kirsher 	update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1627*a88394cfSJeff Kirsher 
1628*a88394cfSJeff Kirsher 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1629*a88394cfSJeff Kirsher 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1630*a88394cfSJeff Kirsher 
1631*a88394cfSJeff Kirsher 	if ( (phy_mode & 0x24) == 0x24 ) {
1632*a88394cfSJeff Kirsher 		if (db->chip_id == PCI_DM9132_ID)	/* DM9132 */
1633*a88394cfSJeff Kirsher 			phy_mode = phy_read(db->ioaddr,
1634*a88394cfSJeff Kirsher 				    db->phy_addr, 7, db->chip_id) & 0xf000;
1635*a88394cfSJeff Kirsher 		else 				/* DM9102/DM9102A */
1636*a88394cfSJeff Kirsher 			phy_mode = phy_read(db->ioaddr,
1637*a88394cfSJeff Kirsher 				    db->phy_addr, 17, db->chip_id) & 0xf000;
1638*a88394cfSJeff Kirsher 		switch (phy_mode) {
1639*a88394cfSJeff Kirsher 		case 0x1000: db->op_mode = DMFE_10MHF; break;
1640*a88394cfSJeff Kirsher 		case 0x2000: db->op_mode = DMFE_10MFD; break;
1641*a88394cfSJeff Kirsher 		case 0x4000: db->op_mode = DMFE_100MHF; break;
1642*a88394cfSJeff Kirsher 		case 0x8000: db->op_mode = DMFE_100MFD; break;
1643*a88394cfSJeff Kirsher 		default: db->op_mode = DMFE_10MHF;
1644*a88394cfSJeff Kirsher 			ErrFlag = 1;
1645*a88394cfSJeff Kirsher 			break;
1646*a88394cfSJeff Kirsher 		}
1647*a88394cfSJeff Kirsher 	} else {
1648*a88394cfSJeff Kirsher 		db->op_mode = DMFE_10MHF;
1649*a88394cfSJeff Kirsher 		DMFE_DBUG(0, "Link Failed :", phy_mode);
1650*a88394cfSJeff Kirsher 		ErrFlag = 1;
1651*a88394cfSJeff Kirsher 	}
1652*a88394cfSJeff Kirsher 
1653*a88394cfSJeff Kirsher 	return ErrFlag;
1654*a88394cfSJeff Kirsher }
1655*a88394cfSJeff Kirsher 
1656*a88394cfSJeff Kirsher 
1657*a88394cfSJeff Kirsher /*
1658*a88394cfSJeff Kirsher  *	Set 10/100 phyxcer capability
1659*a88394cfSJeff Kirsher  *	AUTO mode : phyxcer register4 is NIC capability
1660*a88394cfSJeff Kirsher  *	Force mode: phyxcer register4 is the force media
1661*a88394cfSJeff Kirsher  */
1662*a88394cfSJeff Kirsher 
1663*a88394cfSJeff Kirsher static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1664*a88394cfSJeff Kirsher {
1665*a88394cfSJeff Kirsher 	u16 phy_reg;
1666*a88394cfSJeff Kirsher 
1667*a88394cfSJeff Kirsher 	/* Select 10/100M phyxcer */
1668*a88394cfSJeff Kirsher 	db->cr6_data &= ~0x40000;
1669*a88394cfSJeff Kirsher 	update_cr6(db->cr6_data, db->ioaddr);
1670*a88394cfSJeff Kirsher 
1671*a88394cfSJeff Kirsher 	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
1672*a88394cfSJeff Kirsher 	if (db->chip_id == PCI_DM9009_ID) {
1673*a88394cfSJeff Kirsher 		phy_reg = phy_read(db->ioaddr,
1674*a88394cfSJeff Kirsher 				   db->phy_addr, 18, db->chip_id) & ~0x1000;
1675*a88394cfSJeff Kirsher 
1676*a88394cfSJeff Kirsher 		phy_write(db->ioaddr,
1677*a88394cfSJeff Kirsher 			  db->phy_addr, 18, phy_reg, db->chip_id);
1678*a88394cfSJeff Kirsher 	}
1679*a88394cfSJeff Kirsher 
1680*a88394cfSJeff Kirsher 	/* Phyxcer capability setting */
1681*a88394cfSJeff Kirsher 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1682*a88394cfSJeff Kirsher 
1683*a88394cfSJeff Kirsher 	if (db->media_mode & DMFE_AUTO) {
1684*a88394cfSJeff Kirsher 		/* AUTO Mode */
1685*a88394cfSJeff Kirsher 		phy_reg |= db->PHY_reg4;
1686*a88394cfSJeff Kirsher 	} else {
1687*a88394cfSJeff Kirsher 		/* Force Mode */
1688*a88394cfSJeff Kirsher 		switch(db->media_mode) {
1689*a88394cfSJeff Kirsher 		case DMFE_10MHF: phy_reg |= 0x20; break;
1690*a88394cfSJeff Kirsher 		case DMFE_10MFD: phy_reg |= 0x40; break;
1691*a88394cfSJeff Kirsher 		case DMFE_100MHF: phy_reg |= 0x80; break;
1692*a88394cfSJeff Kirsher 		case DMFE_100MFD: phy_reg |= 0x100; break;
1693*a88394cfSJeff Kirsher 		}
1694*a88394cfSJeff Kirsher 		if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1695*a88394cfSJeff Kirsher 	}
1696*a88394cfSJeff Kirsher 
1697*a88394cfSJeff Kirsher   	/* Write new capability to Phyxcer Reg4 */
1698*a88394cfSJeff Kirsher 	if ( !(phy_reg & 0x01e0)) {
1699*a88394cfSJeff Kirsher 		phy_reg|=db->PHY_reg4;
1700*a88394cfSJeff Kirsher 		db->media_mode|=DMFE_AUTO;
1701*a88394cfSJeff Kirsher 	}
1702*a88394cfSJeff Kirsher 	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1703*a88394cfSJeff Kirsher 
1704*a88394cfSJeff Kirsher  	/* Restart Auto-Negotiation */
1705*a88394cfSJeff Kirsher 	if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1706*a88394cfSJeff Kirsher 		phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1707*a88394cfSJeff Kirsher 	if ( !db->chip_type )
1708*a88394cfSJeff Kirsher 		phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1709*a88394cfSJeff Kirsher }
1710*a88394cfSJeff Kirsher 
1711*a88394cfSJeff Kirsher 
1712*a88394cfSJeff Kirsher /*
1713*a88394cfSJeff Kirsher  *	Process op-mode
1714*a88394cfSJeff Kirsher  *	AUTO mode : PHY controller in Auto-negotiation Mode
1715*a88394cfSJeff Kirsher  *	Force mode: PHY controller in force mode with HUB
1716*a88394cfSJeff Kirsher  *			N-way force capability with SWITCH
1717*a88394cfSJeff Kirsher  */
1718*a88394cfSJeff Kirsher 
1719*a88394cfSJeff Kirsher static void dmfe_process_mode(struct dmfe_board_info *db)
1720*a88394cfSJeff Kirsher {
1721*a88394cfSJeff Kirsher 	u16 phy_reg;
1722*a88394cfSJeff Kirsher 
1723*a88394cfSJeff Kirsher 	/* Full Duplex Mode Check */
1724*a88394cfSJeff Kirsher 	if (db->op_mode & 0x4)
1725*a88394cfSJeff Kirsher 		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
1726*a88394cfSJeff Kirsher 	else
1727*a88394cfSJeff Kirsher 		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
1728*a88394cfSJeff Kirsher 
1729*a88394cfSJeff Kirsher 	/* Transciver Selection */
1730*a88394cfSJeff Kirsher 	if (db->op_mode & 0x10)		/* 1M HomePNA */
1731*a88394cfSJeff Kirsher 		db->cr6_data |= 0x40000;/* External MII select */
1732*a88394cfSJeff Kirsher 	else
1733*a88394cfSJeff Kirsher 		db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1734*a88394cfSJeff Kirsher 
1735*a88394cfSJeff Kirsher 	update_cr6(db->cr6_data, db->ioaddr);
1736*a88394cfSJeff Kirsher 
1737*a88394cfSJeff Kirsher 	/* 10/100M phyxcer force mode need */
1738*a88394cfSJeff Kirsher 	if ( !(db->media_mode & 0x18)) {
1739*a88394cfSJeff Kirsher 		/* Forece Mode */
1740*a88394cfSJeff Kirsher 		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1741*a88394cfSJeff Kirsher 		if ( !(phy_reg & 0x1) ) {
1742*a88394cfSJeff Kirsher 			/* parter without N-Way capability */
1743*a88394cfSJeff Kirsher 			phy_reg = 0x0;
1744*a88394cfSJeff Kirsher 			switch(db->op_mode) {
1745*a88394cfSJeff Kirsher 			case DMFE_10MHF: phy_reg = 0x0; break;
1746*a88394cfSJeff Kirsher 			case DMFE_10MFD: phy_reg = 0x100; break;
1747*a88394cfSJeff Kirsher 			case DMFE_100MHF: phy_reg = 0x2000; break;
1748*a88394cfSJeff Kirsher 			case DMFE_100MFD: phy_reg = 0x2100; break;
1749*a88394cfSJeff Kirsher 			}
1750*a88394cfSJeff Kirsher 			phy_write(db->ioaddr,
1751*a88394cfSJeff Kirsher 				  db->phy_addr, 0, phy_reg, db->chip_id);
1752*a88394cfSJeff Kirsher        			if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1753*a88394cfSJeff Kirsher 				mdelay(20);
1754*a88394cfSJeff Kirsher 			phy_write(db->ioaddr,
1755*a88394cfSJeff Kirsher 				  db->phy_addr, 0, phy_reg, db->chip_id);
1756*a88394cfSJeff Kirsher 		}
1757*a88394cfSJeff Kirsher 	}
1758*a88394cfSJeff Kirsher }
1759*a88394cfSJeff Kirsher 
1760*a88394cfSJeff Kirsher 
1761*a88394cfSJeff Kirsher /*
1762*a88394cfSJeff Kirsher  *	Write a word to Phy register
1763*a88394cfSJeff Kirsher  */
1764*a88394cfSJeff Kirsher 
1765*a88394cfSJeff Kirsher static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1766*a88394cfSJeff Kirsher 		      u16 phy_data, u32 chip_id)
1767*a88394cfSJeff Kirsher {
1768*a88394cfSJeff Kirsher 	u16 i;
1769*a88394cfSJeff Kirsher 	unsigned long ioaddr;
1770*a88394cfSJeff Kirsher 
1771*a88394cfSJeff Kirsher 	if (chip_id == PCI_DM9132_ID) {
1772*a88394cfSJeff Kirsher 		ioaddr = iobase + 0x80 + offset * 4;
1773*a88394cfSJeff Kirsher 		outw(phy_data, ioaddr);
1774*a88394cfSJeff Kirsher 	} else {
1775*a88394cfSJeff Kirsher 		/* DM9102/DM9102A Chip */
1776*a88394cfSJeff Kirsher 		ioaddr = iobase + DCR9;
1777*a88394cfSJeff Kirsher 
1778*a88394cfSJeff Kirsher 		/* Send 33 synchronization clock to Phy controller */
1779*a88394cfSJeff Kirsher 		for (i = 0; i < 35; i++)
1780*a88394cfSJeff Kirsher 			phy_write_1bit(ioaddr, PHY_DATA_1);
1781*a88394cfSJeff Kirsher 
1782*a88394cfSJeff Kirsher 		/* Send start command(01) to Phy */
1783*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_0);
1784*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_1);
1785*a88394cfSJeff Kirsher 
1786*a88394cfSJeff Kirsher 		/* Send write command(01) to Phy */
1787*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_0);
1788*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_1);
1789*a88394cfSJeff Kirsher 
1790*a88394cfSJeff Kirsher 		/* Send Phy address */
1791*a88394cfSJeff Kirsher 		for (i = 0x10; i > 0; i = i >> 1)
1792*a88394cfSJeff Kirsher 			phy_write_1bit(ioaddr,
1793*a88394cfSJeff Kirsher 				       phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1794*a88394cfSJeff Kirsher 
1795*a88394cfSJeff Kirsher 		/* Send register address */
1796*a88394cfSJeff Kirsher 		for (i = 0x10; i > 0; i = i >> 1)
1797*a88394cfSJeff Kirsher 			phy_write_1bit(ioaddr,
1798*a88394cfSJeff Kirsher 				       offset & i ? PHY_DATA_1 : PHY_DATA_0);
1799*a88394cfSJeff Kirsher 
1800*a88394cfSJeff Kirsher 		/* written trasnition */
1801*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_1);
1802*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_0);
1803*a88394cfSJeff Kirsher 
1804*a88394cfSJeff Kirsher 		/* Write a word data to PHY controller */
1805*a88394cfSJeff Kirsher 		for ( i = 0x8000; i > 0; i >>= 1)
1806*a88394cfSJeff Kirsher 			phy_write_1bit(ioaddr,
1807*a88394cfSJeff Kirsher 				       phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1808*a88394cfSJeff Kirsher 	}
1809*a88394cfSJeff Kirsher }
1810*a88394cfSJeff Kirsher 
1811*a88394cfSJeff Kirsher 
1812*a88394cfSJeff Kirsher /*
1813*a88394cfSJeff Kirsher  *	Read a word data from phy register
1814*a88394cfSJeff Kirsher  */
1815*a88394cfSJeff Kirsher 
1816*a88394cfSJeff Kirsher static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1817*a88394cfSJeff Kirsher {
1818*a88394cfSJeff Kirsher 	int i;
1819*a88394cfSJeff Kirsher 	u16 phy_data;
1820*a88394cfSJeff Kirsher 	unsigned long ioaddr;
1821*a88394cfSJeff Kirsher 
1822*a88394cfSJeff Kirsher 	if (chip_id == PCI_DM9132_ID) {
1823*a88394cfSJeff Kirsher 		/* DM9132 Chip */
1824*a88394cfSJeff Kirsher 		ioaddr = iobase + 0x80 + offset * 4;
1825*a88394cfSJeff Kirsher 		phy_data = inw(ioaddr);
1826*a88394cfSJeff Kirsher 	} else {
1827*a88394cfSJeff Kirsher 		/* DM9102/DM9102A Chip */
1828*a88394cfSJeff Kirsher 		ioaddr = iobase + DCR9;
1829*a88394cfSJeff Kirsher 
1830*a88394cfSJeff Kirsher 		/* Send 33 synchronization clock to Phy controller */
1831*a88394cfSJeff Kirsher 		for (i = 0; i < 35; i++)
1832*a88394cfSJeff Kirsher 			phy_write_1bit(ioaddr, PHY_DATA_1);
1833*a88394cfSJeff Kirsher 
1834*a88394cfSJeff Kirsher 		/* Send start command(01) to Phy */
1835*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_0);
1836*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_1);
1837*a88394cfSJeff Kirsher 
1838*a88394cfSJeff Kirsher 		/* Send read command(10) to Phy */
1839*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_1);
1840*a88394cfSJeff Kirsher 		phy_write_1bit(ioaddr, PHY_DATA_0);
1841*a88394cfSJeff Kirsher 
1842*a88394cfSJeff Kirsher 		/* Send Phy address */
1843*a88394cfSJeff Kirsher 		for (i = 0x10; i > 0; i = i >> 1)
1844*a88394cfSJeff Kirsher 			phy_write_1bit(ioaddr,
1845*a88394cfSJeff Kirsher 				       phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1846*a88394cfSJeff Kirsher 
1847*a88394cfSJeff Kirsher 		/* Send register address */
1848*a88394cfSJeff Kirsher 		for (i = 0x10; i > 0; i = i >> 1)
1849*a88394cfSJeff Kirsher 			phy_write_1bit(ioaddr,
1850*a88394cfSJeff Kirsher 				       offset & i ? PHY_DATA_1 : PHY_DATA_0);
1851*a88394cfSJeff Kirsher 
1852*a88394cfSJeff Kirsher 		/* Skip transition state */
1853*a88394cfSJeff Kirsher 		phy_read_1bit(ioaddr);
1854*a88394cfSJeff Kirsher 
1855*a88394cfSJeff Kirsher 		/* read 16bit data */
1856*a88394cfSJeff Kirsher 		for (phy_data = 0, i = 0; i < 16; i++) {
1857*a88394cfSJeff Kirsher 			phy_data <<= 1;
1858*a88394cfSJeff Kirsher 			phy_data |= phy_read_1bit(ioaddr);
1859*a88394cfSJeff Kirsher 		}
1860*a88394cfSJeff Kirsher 	}
1861*a88394cfSJeff Kirsher 
1862*a88394cfSJeff Kirsher 	return phy_data;
1863*a88394cfSJeff Kirsher }
1864*a88394cfSJeff Kirsher 
1865*a88394cfSJeff Kirsher 
1866*a88394cfSJeff Kirsher /*
1867*a88394cfSJeff Kirsher  *	Write one bit data to Phy Controller
1868*a88394cfSJeff Kirsher  */
1869*a88394cfSJeff Kirsher 
1870*a88394cfSJeff Kirsher static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1871*a88394cfSJeff Kirsher {
1872*a88394cfSJeff Kirsher 	outl(phy_data, ioaddr);			/* MII Clock Low */
1873*a88394cfSJeff Kirsher 	udelay(1);
1874*a88394cfSJeff Kirsher 	outl(phy_data | MDCLKH, ioaddr);	/* MII Clock High */
1875*a88394cfSJeff Kirsher 	udelay(1);
1876*a88394cfSJeff Kirsher 	outl(phy_data, ioaddr);			/* MII Clock Low */
1877*a88394cfSJeff Kirsher 	udelay(1);
1878*a88394cfSJeff Kirsher }
1879*a88394cfSJeff Kirsher 
1880*a88394cfSJeff Kirsher 
1881*a88394cfSJeff Kirsher /*
1882*a88394cfSJeff Kirsher  *	Read one bit phy data from PHY controller
1883*a88394cfSJeff Kirsher  */
1884*a88394cfSJeff Kirsher 
1885*a88394cfSJeff Kirsher static u16 phy_read_1bit(unsigned long ioaddr)
1886*a88394cfSJeff Kirsher {
1887*a88394cfSJeff Kirsher 	u16 phy_data;
1888*a88394cfSJeff Kirsher 
1889*a88394cfSJeff Kirsher 	outl(0x50000, ioaddr);
1890*a88394cfSJeff Kirsher 	udelay(1);
1891*a88394cfSJeff Kirsher 	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1892*a88394cfSJeff Kirsher 	outl(0x40000, ioaddr);
1893*a88394cfSJeff Kirsher 	udelay(1);
1894*a88394cfSJeff Kirsher 
1895*a88394cfSJeff Kirsher 	return phy_data;
1896*a88394cfSJeff Kirsher }
1897*a88394cfSJeff Kirsher 
1898*a88394cfSJeff Kirsher 
1899*a88394cfSJeff Kirsher /*
1900*a88394cfSJeff Kirsher  *	Parser SROM and media mode
1901*a88394cfSJeff Kirsher  */
1902*a88394cfSJeff Kirsher 
1903*a88394cfSJeff Kirsher static void dmfe_parse_srom(struct dmfe_board_info * db)
1904*a88394cfSJeff Kirsher {
1905*a88394cfSJeff Kirsher 	char * srom = db->srom;
1906*a88394cfSJeff Kirsher 	int dmfe_mode, tmp_reg;
1907*a88394cfSJeff Kirsher 
1908*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1909*a88394cfSJeff Kirsher 
1910*a88394cfSJeff Kirsher 	/* Init CR15 */
1911*a88394cfSJeff Kirsher 	db->cr15_data = CR15_DEFAULT;
1912*a88394cfSJeff Kirsher 
1913*a88394cfSJeff Kirsher 	/* Check SROM Version */
1914*a88394cfSJeff Kirsher 	if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1915*a88394cfSJeff Kirsher 		/* SROM V4.01 */
1916*a88394cfSJeff Kirsher 		/* Get NIC support media mode */
1917*a88394cfSJeff Kirsher 		db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1918*a88394cfSJeff Kirsher 		db->PHY_reg4 = 0;
1919*a88394cfSJeff Kirsher 		for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1920*a88394cfSJeff Kirsher 			switch( db->NIC_capability & tmp_reg ) {
1921*a88394cfSJeff Kirsher 			case 0x1: db->PHY_reg4 |= 0x0020; break;
1922*a88394cfSJeff Kirsher 			case 0x2: db->PHY_reg4 |= 0x0040; break;
1923*a88394cfSJeff Kirsher 			case 0x4: db->PHY_reg4 |= 0x0080; break;
1924*a88394cfSJeff Kirsher 			case 0x8: db->PHY_reg4 |= 0x0100; break;
1925*a88394cfSJeff Kirsher 			}
1926*a88394cfSJeff Kirsher 		}
1927*a88394cfSJeff Kirsher 
1928*a88394cfSJeff Kirsher 		/* Media Mode Force or not check */
1929*a88394cfSJeff Kirsher 		dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1930*a88394cfSJeff Kirsher 			     le32_to_cpup((__le32 *) (srom + 36)));
1931*a88394cfSJeff Kirsher 		switch(dmfe_mode) {
1932*a88394cfSJeff Kirsher 		case 0x4: dmfe_media_mode = DMFE_100MHF; break;	/* 100MHF */
1933*a88394cfSJeff Kirsher 		case 0x2: dmfe_media_mode = DMFE_10MFD; break;	/* 10MFD */
1934*a88394cfSJeff Kirsher 		case 0x8: dmfe_media_mode = DMFE_100MFD; break;	/* 100MFD */
1935*a88394cfSJeff Kirsher 		case 0x100:
1936*a88394cfSJeff Kirsher 		case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1937*a88394cfSJeff Kirsher 		}
1938*a88394cfSJeff Kirsher 
1939*a88394cfSJeff Kirsher 		/* Special Function setting */
1940*a88394cfSJeff Kirsher 		/* VLAN function */
1941*a88394cfSJeff Kirsher 		if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1942*a88394cfSJeff Kirsher 			db->cr15_data |= 0x40;
1943*a88394cfSJeff Kirsher 
1944*a88394cfSJeff Kirsher 		/* Flow Control */
1945*a88394cfSJeff Kirsher 		if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1946*a88394cfSJeff Kirsher 			db->cr15_data |= 0x400;
1947*a88394cfSJeff Kirsher 
1948*a88394cfSJeff Kirsher 		/* TX pause packet */
1949*a88394cfSJeff Kirsher 		if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1950*a88394cfSJeff Kirsher 			db->cr15_data |= 0x9800;
1951*a88394cfSJeff Kirsher 	}
1952*a88394cfSJeff Kirsher 
1953*a88394cfSJeff Kirsher 	/* Parse HPNA parameter */
1954*a88394cfSJeff Kirsher 	db->HPNA_command = 1;
1955*a88394cfSJeff Kirsher 
1956*a88394cfSJeff Kirsher 	/* Accept remote command or not */
1957*a88394cfSJeff Kirsher 	if (HPNA_rx_cmd == 0)
1958*a88394cfSJeff Kirsher 		db->HPNA_command |= 0x8000;
1959*a88394cfSJeff Kirsher 
1960*a88394cfSJeff Kirsher 	 /* Issue remote command & operation mode */
1961*a88394cfSJeff Kirsher 	if (HPNA_tx_cmd == 1)
1962*a88394cfSJeff Kirsher 		switch(HPNA_mode) {	/* Issue Remote Command */
1963*a88394cfSJeff Kirsher 		case 0: db->HPNA_command |= 0x0904; break;
1964*a88394cfSJeff Kirsher 		case 1: db->HPNA_command |= 0x0a00; break;
1965*a88394cfSJeff Kirsher 		case 2: db->HPNA_command |= 0x0506; break;
1966*a88394cfSJeff Kirsher 		case 3: db->HPNA_command |= 0x0602; break;
1967*a88394cfSJeff Kirsher 		}
1968*a88394cfSJeff Kirsher 	else
1969*a88394cfSJeff Kirsher 		switch(HPNA_mode) {	/* Don't Issue */
1970*a88394cfSJeff Kirsher 		case 0: db->HPNA_command |= 0x0004; break;
1971*a88394cfSJeff Kirsher 		case 1: db->HPNA_command |= 0x0000; break;
1972*a88394cfSJeff Kirsher 		case 2: db->HPNA_command |= 0x0006; break;
1973*a88394cfSJeff Kirsher 		case 3: db->HPNA_command |= 0x0002; break;
1974*a88394cfSJeff Kirsher 		}
1975*a88394cfSJeff Kirsher 
1976*a88394cfSJeff Kirsher 	/* Check DM9801 or DM9802 present or not */
1977*a88394cfSJeff Kirsher 	db->HPNA_present = 0;
1978*a88394cfSJeff Kirsher 	update_cr6(db->cr6_data|0x40000, db->ioaddr);
1979*a88394cfSJeff Kirsher 	tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1980*a88394cfSJeff Kirsher 	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1981*a88394cfSJeff Kirsher 		/* DM9801 or DM9802 present */
1982*a88394cfSJeff Kirsher 		db->HPNA_timer = 8;
1983*a88394cfSJeff Kirsher 		if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1984*a88394cfSJeff Kirsher 			/* DM9801 HomeRun */
1985*a88394cfSJeff Kirsher 			db->HPNA_present = 1;
1986*a88394cfSJeff Kirsher 			dmfe_program_DM9801(db, tmp_reg);
1987*a88394cfSJeff Kirsher 		} else {
1988*a88394cfSJeff Kirsher 			/* DM9802 LongRun */
1989*a88394cfSJeff Kirsher 			db->HPNA_present = 2;
1990*a88394cfSJeff Kirsher 			dmfe_program_DM9802(db);
1991*a88394cfSJeff Kirsher 		}
1992*a88394cfSJeff Kirsher 	}
1993*a88394cfSJeff Kirsher 
1994*a88394cfSJeff Kirsher }
1995*a88394cfSJeff Kirsher 
1996*a88394cfSJeff Kirsher 
1997*a88394cfSJeff Kirsher /*
1998*a88394cfSJeff Kirsher  *	Init HomeRun DM9801
1999*a88394cfSJeff Kirsher  */
2000*a88394cfSJeff Kirsher 
2001*a88394cfSJeff Kirsher static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2002*a88394cfSJeff Kirsher {
2003*a88394cfSJeff Kirsher 	uint reg17, reg25;
2004*a88394cfSJeff Kirsher 
2005*a88394cfSJeff Kirsher 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2006*a88394cfSJeff Kirsher 	switch(HPNA_rev) {
2007*a88394cfSJeff Kirsher 	case 0xb900: /* DM9801 E3 */
2008*a88394cfSJeff Kirsher 		db->HPNA_command |= 0x1000;
2009*a88394cfSJeff Kirsher 		reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2010*a88394cfSJeff Kirsher 		reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2011*a88394cfSJeff Kirsher 		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2012*a88394cfSJeff Kirsher 		break;
2013*a88394cfSJeff Kirsher 	case 0xb901: /* DM9801 E4 */
2014*a88394cfSJeff Kirsher 		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2015*a88394cfSJeff Kirsher 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2016*a88394cfSJeff Kirsher 		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2017*a88394cfSJeff Kirsher 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2018*a88394cfSJeff Kirsher 		break;
2019*a88394cfSJeff Kirsher 	case 0xb902: /* DM9801 E5 */
2020*a88394cfSJeff Kirsher 	case 0xb903: /* DM9801 E6 */
2021*a88394cfSJeff Kirsher 	default:
2022*a88394cfSJeff Kirsher 		db->HPNA_command |= 0x1000;
2023*a88394cfSJeff Kirsher 		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2024*a88394cfSJeff Kirsher 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2025*a88394cfSJeff Kirsher 		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2026*a88394cfSJeff Kirsher 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2027*a88394cfSJeff Kirsher 		break;
2028*a88394cfSJeff Kirsher 	}
2029*a88394cfSJeff Kirsher 	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2030*a88394cfSJeff Kirsher 	phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2031*a88394cfSJeff Kirsher 	phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2032*a88394cfSJeff Kirsher }
2033*a88394cfSJeff Kirsher 
2034*a88394cfSJeff Kirsher 
2035*a88394cfSJeff Kirsher /*
2036*a88394cfSJeff Kirsher  *	Init HomeRun DM9802
2037*a88394cfSJeff Kirsher  */
2038*a88394cfSJeff Kirsher 
2039*a88394cfSJeff Kirsher static void dmfe_program_DM9802(struct dmfe_board_info * db)
2040*a88394cfSJeff Kirsher {
2041*a88394cfSJeff Kirsher 	uint phy_reg;
2042*a88394cfSJeff Kirsher 
2043*a88394cfSJeff Kirsher 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2044*a88394cfSJeff Kirsher 	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2045*a88394cfSJeff Kirsher 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2046*a88394cfSJeff Kirsher 	phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2047*a88394cfSJeff Kirsher 	phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2048*a88394cfSJeff Kirsher }
2049*a88394cfSJeff Kirsher 
2050*a88394cfSJeff Kirsher 
2051*a88394cfSJeff Kirsher /*
2052*a88394cfSJeff Kirsher  *	Check remote HPNA power and speed status. If not correct,
2053*a88394cfSJeff Kirsher  *	issue command again.
2054*a88394cfSJeff Kirsher */
2055*a88394cfSJeff Kirsher 
2056*a88394cfSJeff Kirsher static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2057*a88394cfSJeff Kirsher {
2058*a88394cfSJeff Kirsher 	uint phy_reg;
2059*a88394cfSJeff Kirsher 
2060*a88394cfSJeff Kirsher 	/* Got remote device status */
2061*a88394cfSJeff Kirsher 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2062*a88394cfSJeff Kirsher 	switch(phy_reg) {
2063*a88394cfSJeff Kirsher 	case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2064*a88394cfSJeff Kirsher 	case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2065*a88394cfSJeff Kirsher 	case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2066*a88394cfSJeff Kirsher 	case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2067*a88394cfSJeff Kirsher 	}
2068*a88394cfSJeff Kirsher 
2069*a88394cfSJeff Kirsher 	/* Check remote device status match our setting ot not */
2070*a88394cfSJeff Kirsher 	if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2071*a88394cfSJeff Kirsher 		phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2072*a88394cfSJeff Kirsher 			  db->chip_id);
2073*a88394cfSJeff Kirsher 		db->HPNA_timer=8;
2074*a88394cfSJeff Kirsher 	} else
2075*a88394cfSJeff Kirsher 		db->HPNA_timer=600;	/* Match, every 10 minutes, check */
2076*a88394cfSJeff Kirsher }
2077*a88394cfSJeff Kirsher 
2078*a88394cfSJeff Kirsher 
2079*a88394cfSJeff Kirsher 
2080*a88394cfSJeff Kirsher static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
2081*a88394cfSJeff Kirsher 	{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2082*a88394cfSJeff Kirsher 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2083*a88394cfSJeff Kirsher 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2084*a88394cfSJeff Kirsher 	{ 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2085*a88394cfSJeff Kirsher 	{ 0, }
2086*a88394cfSJeff Kirsher };
2087*a88394cfSJeff Kirsher MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2088*a88394cfSJeff Kirsher 
2089*a88394cfSJeff Kirsher 
2090*a88394cfSJeff Kirsher #ifdef CONFIG_PM
2091*a88394cfSJeff Kirsher static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2092*a88394cfSJeff Kirsher {
2093*a88394cfSJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pci_dev);
2094*a88394cfSJeff Kirsher 	struct dmfe_board_info *db = netdev_priv(dev);
2095*a88394cfSJeff Kirsher 	u32 tmp;
2096*a88394cfSJeff Kirsher 
2097*a88394cfSJeff Kirsher 	/* Disable upper layer interface */
2098*a88394cfSJeff Kirsher 	netif_device_detach(dev);
2099*a88394cfSJeff Kirsher 
2100*a88394cfSJeff Kirsher 	/* Disable Tx/Rx */
2101*a88394cfSJeff Kirsher 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2102*a88394cfSJeff Kirsher 	update_cr6(db->cr6_data, dev->base_addr);
2103*a88394cfSJeff Kirsher 
2104*a88394cfSJeff Kirsher 	/* Disable Interrupt */
2105*a88394cfSJeff Kirsher 	outl(0, dev->base_addr + DCR7);
2106*a88394cfSJeff Kirsher 	outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
2107*a88394cfSJeff Kirsher 
2108*a88394cfSJeff Kirsher 	/* Fre RX buffers */
2109*a88394cfSJeff Kirsher 	dmfe_free_rxbuffer(db);
2110*a88394cfSJeff Kirsher 
2111*a88394cfSJeff Kirsher 	/* Enable WOL */
2112*a88394cfSJeff Kirsher 	pci_read_config_dword(pci_dev, 0x40, &tmp);
2113*a88394cfSJeff Kirsher 	tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2114*a88394cfSJeff Kirsher 
2115*a88394cfSJeff Kirsher 	if (db->wol_mode & WAKE_PHY)
2116*a88394cfSJeff Kirsher 		tmp |= DMFE_WOL_LINKCHANGE;
2117*a88394cfSJeff Kirsher 	if (db->wol_mode & WAKE_MAGIC)
2118*a88394cfSJeff Kirsher 		tmp |= DMFE_WOL_MAGICPACKET;
2119*a88394cfSJeff Kirsher 
2120*a88394cfSJeff Kirsher 	pci_write_config_dword(pci_dev, 0x40, tmp);
2121*a88394cfSJeff Kirsher 
2122*a88394cfSJeff Kirsher 	pci_enable_wake(pci_dev, PCI_D3hot, 1);
2123*a88394cfSJeff Kirsher 	pci_enable_wake(pci_dev, PCI_D3cold, 1);
2124*a88394cfSJeff Kirsher 
2125*a88394cfSJeff Kirsher 	/* Power down device*/
2126*a88394cfSJeff Kirsher 	pci_save_state(pci_dev);
2127*a88394cfSJeff Kirsher 	pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2128*a88394cfSJeff Kirsher 
2129*a88394cfSJeff Kirsher 	return 0;
2130*a88394cfSJeff Kirsher }
2131*a88394cfSJeff Kirsher 
2132*a88394cfSJeff Kirsher static int dmfe_resume(struct pci_dev *pci_dev)
2133*a88394cfSJeff Kirsher {
2134*a88394cfSJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pci_dev);
2135*a88394cfSJeff Kirsher 	u32 tmp;
2136*a88394cfSJeff Kirsher 
2137*a88394cfSJeff Kirsher 	pci_set_power_state(pci_dev, PCI_D0);
2138*a88394cfSJeff Kirsher 	pci_restore_state(pci_dev);
2139*a88394cfSJeff Kirsher 
2140*a88394cfSJeff Kirsher 	/* Re-initialize DM910X board */
2141*a88394cfSJeff Kirsher 	dmfe_init_dm910x(dev);
2142*a88394cfSJeff Kirsher 
2143*a88394cfSJeff Kirsher 	/* Disable WOL */
2144*a88394cfSJeff Kirsher 	pci_read_config_dword(pci_dev, 0x40, &tmp);
2145*a88394cfSJeff Kirsher 
2146*a88394cfSJeff Kirsher 	tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2147*a88394cfSJeff Kirsher 	pci_write_config_dword(pci_dev, 0x40, tmp);
2148*a88394cfSJeff Kirsher 
2149*a88394cfSJeff Kirsher 	pci_enable_wake(pci_dev, PCI_D3hot, 0);
2150*a88394cfSJeff Kirsher 	pci_enable_wake(pci_dev, PCI_D3cold, 0);
2151*a88394cfSJeff Kirsher 
2152*a88394cfSJeff Kirsher 	/* Restart upper layer interface */
2153*a88394cfSJeff Kirsher 	netif_device_attach(dev);
2154*a88394cfSJeff Kirsher 
2155*a88394cfSJeff Kirsher 	return 0;
2156*a88394cfSJeff Kirsher }
2157*a88394cfSJeff Kirsher #else
2158*a88394cfSJeff Kirsher #define dmfe_suspend NULL
2159*a88394cfSJeff Kirsher #define dmfe_resume NULL
2160*a88394cfSJeff Kirsher #endif
2161*a88394cfSJeff Kirsher 
2162*a88394cfSJeff Kirsher static struct pci_driver dmfe_driver = {
2163*a88394cfSJeff Kirsher 	.name		= "dmfe",
2164*a88394cfSJeff Kirsher 	.id_table	= dmfe_pci_tbl,
2165*a88394cfSJeff Kirsher 	.probe		= dmfe_init_one,
2166*a88394cfSJeff Kirsher 	.remove		= __devexit_p(dmfe_remove_one),
2167*a88394cfSJeff Kirsher 	.suspend        = dmfe_suspend,
2168*a88394cfSJeff Kirsher 	.resume         = dmfe_resume
2169*a88394cfSJeff Kirsher };
2170*a88394cfSJeff Kirsher 
2171*a88394cfSJeff Kirsher MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2172*a88394cfSJeff Kirsher MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2173*a88394cfSJeff Kirsher MODULE_LICENSE("GPL");
2174*a88394cfSJeff Kirsher MODULE_VERSION(DRV_VERSION);
2175*a88394cfSJeff Kirsher 
2176*a88394cfSJeff Kirsher module_param(debug, int, 0);
2177*a88394cfSJeff Kirsher module_param(mode, byte, 0);
2178*a88394cfSJeff Kirsher module_param(cr6set, int, 0);
2179*a88394cfSJeff Kirsher module_param(chkmode, byte, 0);
2180*a88394cfSJeff Kirsher module_param(HPNA_mode, byte, 0);
2181*a88394cfSJeff Kirsher module_param(HPNA_rx_cmd, byte, 0);
2182*a88394cfSJeff Kirsher module_param(HPNA_tx_cmd, byte, 0);
2183*a88394cfSJeff Kirsher module_param(HPNA_NoiseFloor, byte, 0);
2184*a88394cfSJeff Kirsher module_param(SF_mode, byte, 0);
2185*a88394cfSJeff Kirsher MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2186*a88394cfSJeff Kirsher MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2187*a88394cfSJeff Kirsher 		"Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2188*a88394cfSJeff Kirsher 
2189*a88394cfSJeff Kirsher MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2190*a88394cfSJeff Kirsher 		"(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2191*a88394cfSJeff Kirsher 
2192*a88394cfSJeff Kirsher /*	Description:
2193*a88394cfSJeff Kirsher  *	when user used insmod to add module, system invoked init_module()
2194*a88394cfSJeff Kirsher  *	to initialize and register.
2195*a88394cfSJeff Kirsher  */
2196*a88394cfSJeff Kirsher 
2197*a88394cfSJeff Kirsher static int __init dmfe_init_module(void)
2198*a88394cfSJeff Kirsher {
2199*a88394cfSJeff Kirsher 	int rc;
2200*a88394cfSJeff Kirsher 
2201*a88394cfSJeff Kirsher 	pr_info("%s\n", version);
2202*a88394cfSJeff Kirsher 	printed_version = 1;
2203*a88394cfSJeff Kirsher 
2204*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "init_module() ", debug);
2205*a88394cfSJeff Kirsher 
2206*a88394cfSJeff Kirsher 	if (debug)
2207*a88394cfSJeff Kirsher 		dmfe_debug = debug;	/* set debug flag */
2208*a88394cfSJeff Kirsher 	if (cr6set)
2209*a88394cfSJeff Kirsher 		dmfe_cr6_user_set = cr6set;
2210*a88394cfSJeff Kirsher 
2211*a88394cfSJeff Kirsher  	switch(mode) {
2212*a88394cfSJeff Kirsher    	case DMFE_10MHF:
2213*a88394cfSJeff Kirsher 	case DMFE_100MHF:
2214*a88394cfSJeff Kirsher 	case DMFE_10MFD:
2215*a88394cfSJeff Kirsher 	case DMFE_100MFD:
2216*a88394cfSJeff Kirsher 	case DMFE_1M_HPNA:
2217*a88394cfSJeff Kirsher 		dmfe_media_mode = mode;
2218*a88394cfSJeff Kirsher 		break;
2219*a88394cfSJeff Kirsher 	default:dmfe_media_mode = DMFE_AUTO;
2220*a88394cfSJeff Kirsher 		break;
2221*a88394cfSJeff Kirsher 	}
2222*a88394cfSJeff Kirsher 
2223*a88394cfSJeff Kirsher 	if (HPNA_mode > 4)
2224*a88394cfSJeff Kirsher 		HPNA_mode = 0;		/* Default: LP/HS */
2225*a88394cfSJeff Kirsher 	if (HPNA_rx_cmd > 1)
2226*a88394cfSJeff Kirsher 		HPNA_rx_cmd = 0;	/* Default: Ignored remote cmd */
2227*a88394cfSJeff Kirsher 	if (HPNA_tx_cmd > 1)
2228*a88394cfSJeff Kirsher 		HPNA_tx_cmd = 0;	/* Default: Don't issue remote cmd */
2229*a88394cfSJeff Kirsher 	if (HPNA_NoiseFloor > 15)
2230*a88394cfSJeff Kirsher 		HPNA_NoiseFloor = 0;
2231*a88394cfSJeff Kirsher 
2232*a88394cfSJeff Kirsher 	rc = pci_register_driver(&dmfe_driver);
2233*a88394cfSJeff Kirsher 	if (rc < 0)
2234*a88394cfSJeff Kirsher 		return rc;
2235*a88394cfSJeff Kirsher 
2236*a88394cfSJeff Kirsher 	return 0;
2237*a88394cfSJeff Kirsher }
2238*a88394cfSJeff Kirsher 
2239*a88394cfSJeff Kirsher 
2240*a88394cfSJeff Kirsher /*
2241*a88394cfSJeff Kirsher  *	Description:
2242*a88394cfSJeff Kirsher  *	when user used rmmod to delete module, system invoked clean_module()
2243*a88394cfSJeff Kirsher  *	to un-register all registered services.
2244*a88394cfSJeff Kirsher  */
2245*a88394cfSJeff Kirsher 
2246*a88394cfSJeff Kirsher static void __exit dmfe_cleanup_module(void)
2247*a88394cfSJeff Kirsher {
2248*a88394cfSJeff Kirsher 	DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2249*a88394cfSJeff Kirsher 	pci_unregister_driver(&dmfe_driver);
2250*a88394cfSJeff Kirsher }
2251*a88394cfSJeff Kirsher 
2252*a88394cfSJeff Kirsher module_init(dmfe_init_module);
2253*a88394cfSJeff Kirsher module_exit(dmfe_cleanup_module);
2254