xref: /openbmc/linux/drivers/net/ethernet/toshiba/tc35815.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  /*
2   * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux.
3   *
4   * Based on skelton.c by Donald Becker.
5   *
6   * This driver is a replacement of older and less maintained version.
7   * This is a header of the older version:
8   *	-----<snip>-----
9   *	Copyright 2001 MontaVista Software Inc.
10   *	Author: MontaVista Software, Inc.
11   *		ahennessy@mvista.com
12   *	Copyright (C) 2000-2001 Toshiba Corporation
13   *	static const char *version =
14   *		"tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n";
15   *	-----<snip>-----
16   *
17   * This file is subject to the terms and conditions of the GNU General Public
18   * License.  See the file "COPYING" in the main directory of this archive
19   * for more details.
20   *
21   * (C) Copyright TOSHIBA CORPORATION 2004-2005
22   * All Rights Reserved.
23   */
24  
25  #define DRV_VERSION	"1.39"
26  static const char version[] = "tc35815.c:v" DRV_VERSION "\n";
27  #define MODNAME			"tc35815"
28  
29  #include <linux/module.h>
30  #include <linux/kernel.h>
31  #include <linux/types.h>
32  #include <linux/fcntl.h>
33  #include <linux/interrupt.h>
34  #include <linux/ioport.h>
35  #include <linux/in.h>
36  #include <linux/if_vlan.h>
37  #include <linux/slab.h>
38  #include <linux/string.h>
39  #include <linux/spinlock.h>
40  #include <linux/errno.h>
41  #include <linux/netdevice.h>
42  #include <linux/etherdevice.h>
43  #include <linux/skbuff.h>
44  #include <linux/delay.h>
45  #include <linux/pci.h>
46  #include <linux/phy.h>
47  #include <linux/workqueue.h>
48  #include <linux/platform_device.h>
49  #include <linux/prefetch.h>
50  #include <asm/io.h>
51  #include <asm/byteorder.h>
52  
53  enum tc35815_chiptype {
54  	TC35815CF = 0,
55  	TC35815_NWU,
56  	TC35815_TX4939,
57  };
58  
59  /* indexed by tc35815_chiptype, above */
60  static const struct {
61  	const char *name;
62  } chip_info[] = {
63  	{ "TOSHIBA TC35815CF 10/100BaseTX" },
64  	{ "TOSHIBA TC35815 with Wake on LAN" },
65  	{ "TOSHIBA TC35815/TX4939" },
66  };
67  
68  static const struct pci_device_id tc35815_pci_tbl[] = {
69  	{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
70  	{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
71  	{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
72  	{0,}
73  };
74  MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl);
75  
76  /* see MODULE_PARM_DESC */
77  static struct tc35815_options {
78  	int speed;
79  	int duplex;
80  } options;
81  
82  /*
83   * Registers
84   */
85  struct tc35815_regs {
86  	__u32 DMA_Ctl;		/* 0x00 */
87  	__u32 TxFrmPtr;
88  	__u32 TxThrsh;
89  	__u32 TxPollCtr;
90  	__u32 BLFrmPtr;
91  	__u32 RxFragSize;
92  	__u32 Int_En;
93  	__u32 FDA_Bas;
94  	__u32 FDA_Lim;		/* 0x20 */
95  	__u32 Int_Src;
96  	__u32 unused0[2];
97  	__u32 PauseCnt;
98  	__u32 RemPauCnt;
99  	__u32 TxCtlFrmStat;
100  	__u32 unused1;
101  	__u32 MAC_Ctl;		/* 0x40 */
102  	__u32 CAM_Ctl;
103  	__u32 Tx_Ctl;
104  	__u32 Tx_Stat;
105  	__u32 Rx_Ctl;
106  	__u32 Rx_Stat;
107  	__u32 MD_Data;
108  	__u32 MD_CA;
109  	__u32 CAM_Adr;		/* 0x60 */
110  	__u32 CAM_Data;
111  	__u32 CAM_Ena;
112  	__u32 PROM_Ctl;
113  	__u32 PROM_Data;
114  	__u32 Algn_Cnt;
115  	__u32 CRC_Cnt;
116  	__u32 Miss_Cnt;
117  };
118  
119  /*
120   * Bit assignments
121   */
122  /* DMA_Ctl bit assign ------------------------------------------------------- */
123  #define DMA_RxAlign	       0x00c00000 /* 1:Reception Alignment	     */
124  #define DMA_RxAlign_1	       0x00400000
125  #define DMA_RxAlign_2	       0x00800000
126  #define DMA_RxAlign_3	       0x00c00000
127  #define DMA_M66EnStat	       0x00080000 /* 1:66MHz Enable State	     */
128  #define DMA_IntMask	       0x00040000 /* 1:Interrupt mask		     */
129  #define DMA_SWIntReq	       0x00020000 /* 1:Software Interrupt request    */
130  #define DMA_TxWakeUp	       0x00010000 /* 1:Transmit Wake Up		     */
131  #define DMA_RxBigE	       0x00008000 /* 1:Receive Big Endian	     */
132  #define DMA_TxBigE	       0x00004000 /* 1:Transmit Big Endian	     */
133  #define DMA_TestMode	       0x00002000 /* 1:Test Mode		     */
134  #define DMA_PowrMgmnt	       0x00001000 /* 1:Power Management		     */
135  #define DMA_DmBurst_Mask       0x000001fc /* DMA Burst size		     */
136  
137  /* RxFragSize bit assign ---------------------------------------------------- */
138  #define RxFrag_EnPack	       0x00008000 /* 1:Enable Packing		     */
139  #define RxFrag_MinFragMask     0x00000ffc /* Minimum Fragment		     */
140  
141  /* MAC_Ctl bit assign ------------------------------------------------------- */
142  #define MAC_Link10	       0x00008000 /* 1:Link Status 10Mbits	     */
143  #define MAC_EnMissRoll	       0x00002000 /* 1:Enable Missed Roll	     */
144  #define MAC_MissRoll	       0x00000400 /* 1:Missed Roll		     */
145  #define MAC_Loop10	       0x00000080 /* 1:Loop 10 Mbps		     */
146  #define MAC_Conn_Auto	       0x00000000 /*00:Connection mode (Automatic)   */
147  #define MAC_Conn_10M	       0x00000020 /*01:		       (10Mbps endec)*/
148  #define MAC_Conn_Mll	       0x00000040 /*10:		       (Mll clock)   */
149  #define MAC_MacLoop	       0x00000010 /* 1:MAC Loopback		     */
150  #define MAC_FullDup	       0x00000008 /* 1:Full Duplex 0:Half Duplex     */
151  #define MAC_Reset	       0x00000004 /* 1:Software Reset		     */
152  #define MAC_HaltImm	       0x00000002 /* 1:Halt Immediate		     */
153  #define MAC_HaltReq	       0x00000001 /* 1:Halt request		     */
154  
155  /* PROM_Ctl bit assign ------------------------------------------------------ */
156  #define PROM_Busy	       0x00008000 /* 1:Busy (Start Operation)	     */
157  #define PROM_Read	       0x00004000 /*10:Read operation		     */
158  #define PROM_Write	       0x00002000 /*01:Write operation		     */
159  #define PROM_Erase	       0x00006000 /*11:Erase operation		     */
160  					  /*00:Enable or Disable Writting,   */
161  					  /*	  as specified in PROM_Addr. */
162  #define PROM_Addr_Ena	       0x00000030 /*11xxxx:PROM Write enable	     */
163  					  /*00xxxx:	      disable	     */
164  
165  /* CAM_Ctl bit assign ------------------------------------------------------- */
166  #define CAM_CompEn	       0x00000010 /* 1:CAM Compare Enable	     */
167  #define CAM_NegCAM	       0x00000008 /* 1:Reject packets CAM recognizes,*/
168  					  /*			accept other */
169  #define CAM_BroadAcc	       0x00000004 /* 1:Broadcast assept		     */
170  #define CAM_GroupAcc	       0x00000002 /* 1:Multicast assept		     */
171  #define CAM_StationAcc	       0x00000001 /* 1:unicast accept		     */
172  
173  /* CAM_Ena bit assign ------------------------------------------------------- */
174  #define CAM_ENTRY_MAX		       21   /* CAM Data entry max count	     */
175  #define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits)  */
176  #define CAM_Ena_Bit(index)	(1 << (index))
177  #define CAM_ENTRY_DESTINATION	0
178  #define CAM_ENTRY_SOURCE	1
179  #define CAM_ENTRY_MACCTL	20
180  
181  /* Tx_Ctl bit assign -------------------------------------------------------- */
182  #define Tx_En		       0x00000001 /* 1:Transmit enable		     */
183  #define Tx_TxHalt	       0x00000002 /* 1:Transmit Halt Request	     */
184  #define Tx_NoPad	       0x00000004 /* 1:Suppress Padding		     */
185  #define Tx_NoCRC	       0x00000008 /* 1:Suppress Padding		     */
186  #define Tx_FBack	       0x00000010 /* 1:Fast Back-off		     */
187  #define Tx_EnUnder	       0x00000100 /* 1:Enable Underrun		     */
188  #define Tx_EnExDefer	       0x00000200 /* 1:Enable Excessive Deferral     */
189  #define Tx_EnLCarr	       0x00000400 /* 1:Enable Lost Carrier	     */
190  #define Tx_EnExColl	       0x00000800 /* 1:Enable Excessive Collision    */
191  #define Tx_EnLateColl	       0x00001000 /* 1:Enable Late Collision	     */
192  #define Tx_EnTxPar	       0x00002000 /* 1:Enable Transmit Parity	     */
193  #define Tx_EnComp	       0x00004000 /* 1:Enable Completion	     */
194  
195  /* Tx_Stat bit assign ------------------------------------------------------- */
196  #define Tx_TxColl_MASK	       0x0000000F /* Tx Collision Count		     */
197  #define Tx_ExColl	       0x00000010 /* Excessive Collision	     */
198  #define Tx_TXDefer	       0x00000020 /* Transmit Defered		     */
199  #define Tx_Paused	       0x00000040 /* Transmit Paused		     */
200  #define Tx_IntTx	       0x00000080 /* Interrupt on Tx		     */
201  #define Tx_Under	       0x00000100 /* Underrun			     */
202  #define Tx_Defer	       0x00000200 /* Deferral			     */
203  #define Tx_NCarr	       0x00000400 /* No Carrier			     */
204  #define Tx_10Stat	       0x00000800 /* 10Mbps Status		     */
205  #define Tx_LateColl	       0x00001000 /* Late Collision		     */
206  #define Tx_TxPar	       0x00002000 /* Tx Parity Error		     */
207  #define Tx_Comp		       0x00004000 /* Completion			     */
208  #define Tx_Halted	       0x00008000 /* Tx Halted			     */
209  #define Tx_SQErr	       0x00010000 /* Signal Quality Error(SQE)	     */
210  
211  /* Rx_Ctl bit assign -------------------------------------------------------- */
212  #define Rx_EnGood	       0x00004000 /* 1:Enable Good		     */
213  #define Rx_EnRxPar	       0x00002000 /* 1:Enable Receive Parity	     */
214  #define Rx_EnLongErr	       0x00000800 /* 1:Enable Long Error	     */
215  #define Rx_EnOver	       0x00000400 /* 1:Enable OverFlow		     */
216  #define Rx_EnCRCErr	       0x00000200 /* 1:Enable CRC Error		     */
217  #define Rx_EnAlign	       0x00000100 /* 1:Enable Alignment		     */
218  #define Rx_IgnoreCRC	       0x00000040 /* 1:Ignore CRC Value		     */
219  #define Rx_StripCRC	       0x00000010 /* 1:Strip CRC Value		     */
220  #define Rx_ShortEn	       0x00000008 /* 1:Short Enable		     */
221  #define Rx_LongEn	       0x00000004 /* 1:Long Enable		     */
222  #define Rx_RxHalt	       0x00000002 /* 1:Receive Halt Request	     */
223  #define Rx_RxEn		       0x00000001 /* 1:Receive Intrrupt Enable	     */
224  
225  /* Rx_Stat bit assign ------------------------------------------------------- */
226  #define Rx_Halted	       0x00008000 /* Rx Halted			     */
227  #define Rx_Good		       0x00004000 /* Rx Good			     */
228  #define Rx_RxPar	       0x00002000 /* Rx Parity Error		     */
229  #define Rx_TypePkt	       0x00001000 /* Rx Type Packet		     */
230  #define Rx_LongErr	       0x00000800 /* Rx Long Error		     */
231  #define Rx_Over		       0x00000400 /* Rx Overflow		     */
232  #define Rx_CRCErr	       0x00000200 /* Rx CRC Error		     */
233  #define Rx_Align	       0x00000100 /* Rx Alignment Error		     */
234  #define Rx_10Stat	       0x00000080 /* Rx 10Mbps Status		     */
235  #define Rx_IntRx	       0x00000040 /* Rx Interrupt		     */
236  #define Rx_CtlRecd	       0x00000020 /* Rx Control Receive		     */
237  #define Rx_InLenErr	       0x00000010 /* Rx In Range Frame Length Error  */
238  
239  #define Rx_Stat_Mask	       0x0000FFF0 /* Rx All Status Mask		     */
240  
241  /* Int_En bit assign -------------------------------------------------------- */
242  #define Int_NRAbtEn	       0x00000800 /* 1:Non-recoverable Abort Enable  */
243  #define Int_TxCtlCmpEn	       0x00000400 /* 1:Transmit Ctl Complete Enable  */
244  #define Int_DmParErrEn	       0x00000200 /* 1:DMA Parity Error Enable	     */
245  #define Int_DParDEn	       0x00000100 /* 1:Data Parity Error Enable	     */
246  #define Int_EarNotEn	       0x00000080 /* 1:Early Notify Enable	     */
247  #define Int_DParErrEn	       0x00000040 /* 1:Detected Parity Error Enable  */
248  #define Int_SSysErrEn	       0x00000020 /* 1:Signalled System Error Enable */
249  #define Int_RMasAbtEn	       0x00000010 /* 1:Received Master Abort Enable  */
250  #define Int_RTargAbtEn	       0x00000008 /* 1:Received Target Abort Enable  */
251  #define Int_STargAbtEn	       0x00000004 /* 1:Signalled Target Abort Enable */
252  #define Int_BLExEn	       0x00000002 /* 1:Buffer List Exhausted Enable  */
253  #define Int_FDAExEn	       0x00000001 /* 1:Free Descriptor Area	     */
254  					  /*		   Exhausted Enable  */
255  
256  /* Int_Src bit assign ------------------------------------------------------- */
257  #define Int_NRabt	       0x00004000 /* 1:Non Recoverable error	     */
258  #define Int_DmParErrStat       0x00002000 /* 1:DMA Parity Error & Clear	     */
259  #define Int_BLEx	       0x00001000 /* 1:Buffer List Empty & Clear     */
260  #define Int_FDAEx	       0x00000800 /* 1:FDA Empty & Clear	     */
261  #define Int_IntNRAbt	       0x00000400 /* 1:Non Recoverable Abort	     */
262  #define Int_IntCmp	       0x00000200 /* 1:MAC control packet complete   */
263  #define Int_IntExBD	       0x00000100 /* 1:Interrupt Extra BD & Clear    */
264  #define Int_DmParErr	       0x00000080 /* 1:DMA Parity Error & Clear	     */
265  #define Int_IntEarNot	       0x00000040 /* 1:Receive Data write & Clear    */
266  #define Int_SWInt	       0x00000020 /* 1:Software request & Clear	     */
267  #define Int_IntBLEx	       0x00000010 /* 1:Buffer List Empty & Clear     */
268  #define Int_IntFDAEx	       0x00000008 /* 1:FDA Empty & Clear	     */
269  #define Int_IntPCI	       0x00000004 /* 1:PCI controller & Clear	     */
270  #define Int_IntMacRx	       0x00000002 /* 1:Rx controller & Clear	     */
271  #define Int_IntMacTx	       0x00000001 /* 1:Tx controller & Clear	     */
272  
273  /* MD_CA bit assign --------------------------------------------------------- */
274  #define MD_CA_PreSup	       0x00001000 /* 1:Preamble Suppress		     */
275  #define MD_CA_Busy	       0x00000800 /* 1:Busy (Start Operation)	     */
276  #define MD_CA_Wr	       0x00000400 /* 1:Write 0:Read		     */
277  
278  
279  /*
280   * Descriptors
281   */
282  
283  /* Frame descriptor */
284  struct FDesc {
285  	volatile __u32 FDNext;
286  	volatile __u32 FDSystem;
287  	volatile __u32 FDStat;
288  	volatile __u32 FDCtl;
289  };
290  
291  /* Buffer descriptor */
292  struct BDesc {
293  	volatile __u32 BuffData;
294  	volatile __u32 BDCtl;
295  };
296  
297  #define FD_ALIGN	16
298  
299  /* Frame Descriptor bit assign ---------------------------------------------- */
300  #define FD_FDLength_MASK       0x0000FFFF /* Length MASK		     */
301  #define FD_BDCnt_MASK	       0x001F0000 /* BD count MASK in FD	     */
302  #define FD_FrmOpt_MASK	       0x7C000000 /* Frame option MASK		     */
303  #define FD_FrmOpt_BigEndian    0x40000000 /* Tx/Rx */
304  #define FD_FrmOpt_IntTx	       0x20000000 /* Tx only */
305  #define FD_FrmOpt_NoCRC	       0x10000000 /* Tx only */
306  #define FD_FrmOpt_NoPadding    0x08000000 /* Tx only */
307  #define FD_FrmOpt_Packing      0x04000000 /* Rx only */
308  #define FD_CownsFD	       0x80000000 /* FD Controller owner bit	     */
309  #define FD_Next_EOL	       0x00000001 /* FD EOL indicator		     */
310  #define FD_BDCnt_SHIFT	       16
311  
312  /* Buffer Descriptor bit assign --------------------------------------------- */
313  #define BD_BuffLength_MASK     0x0000FFFF /* Receive Data Size		     */
314  #define BD_RxBDID_MASK	       0x00FF0000 /* BD ID Number MASK		     */
315  #define BD_RxBDSeqN_MASK       0x7F000000 /* Rx BD Sequence Number	     */
316  #define BD_CownsBD	       0x80000000 /* BD Controller owner bit	     */
317  #define BD_RxBDID_SHIFT	       16
318  #define BD_RxBDSeqN_SHIFT      24
319  
320  
321  /* Some useful constants. */
322  
323  #define TX_CTL_CMD	(Tx_EnTxPar | Tx_EnLateColl | \
324  	Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
325  	Tx_En)	/* maybe  0x7b01 */
326  /* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
327  #define RX_CTL_CMD	(Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
328  	| Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
329  #define INT_EN_CMD  (Int_NRAbtEn | \
330  	Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \
331  	Int_SSysErrEn  | Int_RMasAbtEn | Int_RTargAbtEn | \
332  	Int_STargAbtEn | \
333  	Int_BLExEn  | Int_FDAExEn) /* maybe 0xb7f*/
334  #define DMA_CTL_CMD	DMA_BURST_SIZE
335  #define HAVE_DMA_RXALIGN(lp)	likely((lp)->chiptype != TC35815CF)
336  
337  /* Tuning parameters */
338  #define DMA_BURST_SIZE	32
339  #define TX_THRESHOLD	1024
340  /* used threshold with packet max byte for low pci transfer ability.*/
341  #define TX_THRESHOLD_MAX 1536
342  /* setting threshold max value when overrun error occurred this count. */
343  #define TX_THRESHOLD_KEEP_LIMIT 10
344  
345  /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
346  #define FD_PAGE_NUM 4
347  #define RX_BUF_NUM	128	/* < 256 */
348  #define RX_FD_NUM	256	/* >= 32 */
349  #define TX_FD_NUM	128
350  #if RX_CTL_CMD & Rx_LongEn
351  #define RX_BUF_SIZE	PAGE_SIZE
352  #elif RX_CTL_CMD & Rx_StripCRC
353  #define RX_BUF_SIZE	\
354  	L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN)
355  #else
356  #define RX_BUF_SIZE	\
357  	L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
358  #endif
359  #define RX_FD_RESERVE	(2 / 2)	/* max 2 BD per RxFD */
360  #define NAPI_WEIGHT	16
361  
362  struct TxFD {
363  	struct FDesc fd;
364  	struct BDesc bd;
365  	struct BDesc unused;
366  };
367  
368  struct RxFD {
369  	struct FDesc fd;
370  	struct BDesc bd[];	/* variable length */
371  };
372  
373  struct FrFD {
374  	struct FDesc fd;
375  	struct BDesc bd[RX_BUF_NUM];
376  };
377  
378  
379  #define tc_readl(addr)	ioread32(addr)
380  #define tc_writel(d, addr)	iowrite32(d, addr)
381  
382  #define TC35815_TX_TIMEOUT  msecs_to_jiffies(400)
383  
384  /* Information that need to be kept for each controller. */
385  struct tc35815_local {
386  	struct pci_dev *pci_dev;
387  
388  	struct net_device *dev;
389  	struct napi_struct napi;
390  
391  	/* statistics */
392  	struct {
393  		int max_tx_qlen;
394  		int tx_ints;
395  		int rx_ints;
396  		int tx_underrun;
397  	} lstats;
398  
399  	/* Tx control lock.  This protects the transmit buffer ring
400  	 * state along with the "tx full" state of the driver.  This
401  	 * means all netif_queue flow control actions are protected
402  	 * by this lock as well.
403  	 */
404  	spinlock_t lock;
405  	spinlock_t rx_lock;
406  
407  	struct mii_bus *mii_bus;
408  	int duplex;
409  	int speed;
410  	int link;
411  	struct work_struct restart_work;
412  
413  	/*
414  	 * Transmitting: Batch Mode.
415  	 *	1 BD in 1 TxFD.
416  	 * Receiving: Non-Packing Mode.
417  	 *	1 circular FD for Free Buffer List.
418  	 *	RX_BUF_NUM BD in Free Buffer FD.
419  	 *	One Free Buffer BD has ETH_FRAME_LEN data buffer.
420  	 */
421  	void *fd_buf;	/* for TxFD, RxFD, FrFD */
422  	dma_addr_t fd_buf_dma;
423  	struct TxFD *tfd_base;
424  	unsigned int tfd_start;
425  	unsigned int tfd_end;
426  	struct RxFD *rfd_base;
427  	struct RxFD *rfd_limit;
428  	struct RxFD *rfd_cur;
429  	struct FrFD *fbl_ptr;
430  	unsigned int fbl_count;
431  	struct {
432  		struct sk_buff *skb;
433  		dma_addr_t skb_dma;
434  	} tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
435  	u32 msg_enable;
436  	enum tc35815_chiptype chiptype;
437  };
438  
fd_virt_to_bus(struct tc35815_local * lp,void * virt)439  static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
440  {
441  	return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf);
442  }
443  #ifdef DEBUG
fd_bus_to_virt(struct tc35815_local * lp,dma_addr_t bus)444  static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
445  {
446  	return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
447  }
448  #endif
alloc_rxbuf_skb(struct net_device * dev,struct pci_dev * hwdev,dma_addr_t * dma_handle)449  static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
450  				       struct pci_dev *hwdev,
451  				       dma_addr_t *dma_handle)
452  {
453  	struct sk_buff *skb;
454  	skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
455  	if (!skb)
456  		return NULL;
457  	*dma_handle = dma_map_single(&hwdev->dev, skb->data, RX_BUF_SIZE,
458  				     DMA_FROM_DEVICE);
459  	if (dma_mapping_error(&hwdev->dev, *dma_handle)) {
460  		dev_kfree_skb_any(skb);
461  		return NULL;
462  	}
463  	skb_reserve(skb, 2);	/* make IP header 4byte aligned */
464  	return skb;
465  }
466  
free_rxbuf_skb(struct pci_dev * hwdev,struct sk_buff * skb,dma_addr_t dma_handle)467  static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
468  {
469  	dma_unmap_single(&hwdev->dev, dma_handle, RX_BUF_SIZE,
470  			 DMA_FROM_DEVICE);
471  	dev_kfree_skb_any(skb);
472  }
473  
474  /* Index to functions, as function prototypes. */
475  
476  static int	tc35815_open(struct net_device *dev);
477  static netdev_tx_t	tc35815_send_packet(struct sk_buff *skb,
478  					    struct net_device *dev);
479  static irqreturn_t	tc35815_interrupt(int irq, void *dev_id);
480  static int	tc35815_rx(struct net_device *dev, int limit);
481  static int	tc35815_poll(struct napi_struct *napi, int budget);
482  static void	tc35815_txdone(struct net_device *dev);
483  static int	tc35815_close(struct net_device *dev);
484  static struct	net_device_stats *tc35815_get_stats(struct net_device *dev);
485  static void	tc35815_set_multicast_list(struct net_device *dev);
486  static void	tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue);
487  #ifdef CONFIG_NET_POLL_CONTROLLER
488  static void	tc35815_poll_controller(struct net_device *dev);
489  #endif
490  static const struct ethtool_ops tc35815_ethtool_ops;
491  
492  /* Example routines you must write ;->. */
493  static void	tc35815_chip_reset(struct net_device *dev);
494  static void	tc35815_chip_init(struct net_device *dev);
495  
496  #ifdef DEBUG
497  static void	panic_queues(struct net_device *dev);
498  #endif
499  
500  static void tc35815_restart_work(struct work_struct *work);
501  
tc_mdio_read(struct mii_bus * bus,int mii_id,int regnum)502  static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
503  {
504  	struct net_device *dev = bus->priv;
505  	struct tc35815_regs __iomem *tr =
506  		(struct tc35815_regs __iomem *)dev->base_addr;
507  	unsigned long timeout = jiffies + HZ;
508  
509  	tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA);
510  	udelay(12); /* it takes 32 x 400ns at least */
511  	while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
512  		if (time_after(jiffies, timeout))
513  			return -EIO;
514  		cpu_relax();
515  	}
516  	return tc_readl(&tr->MD_Data) & 0xffff;
517  }
518  
tc_mdio_write(struct mii_bus * bus,int mii_id,int regnum,u16 val)519  static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
520  {
521  	struct net_device *dev = bus->priv;
522  	struct tc35815_regs __iomem *tr =
523  		(struct tc35815_regs __iomem *)dev->base_addr;
524  	unsigned long timeout = jiffies + HZ;
525  
526  	tc_writel(val, &tr->MD_Data);
527  	tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f),
528  		  &tr->MD_CA);
529  	udelay(12); /* it takes 32 x 400ns at least */
530  	while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
531  		if (time_after(jiffies, timeout))
532  			return -EIO;
533  		cpu_relax();
534  	}
535  	return 0;
536  }
537  
tc_handle_link_change(struct net_device * dev)538  static void tc_handle_link_change(struct net_device *dev)
539  {
540  	struct tc35815_local *lp = netdev_priv(dev);
541  	struct phy_device *phydev = dev->phydev;
542  	unsigned long flags;
543  	int status_change = 0;
544  
545  	spin_lock_irqsave(&lp->lock, flags);
546  	if (phydev->link &&
547  	    (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) {
548  		struct tc35815_regs __iomem *tr =
549  			(struct tc35815_regs __iomem *)dev->base_addr;
550  		u32 reg;
551  
552  		reg = tc_readl(&tr->MAC_Ctl);
553  		reg |= MAC_HaltReq;
554  		tc_writel(reg, &tr->MAC_Ctl);
555  		if (phydev->duplex == DUPLEX_FULL)
556  			reg |= MAC_FullDup;
557  		else
558  			reg &= ~MAC_FullDup;
559  		tc_writel(reg, &tr->MAC_Ctl);
560  		reg &= ~MAC_HaltReq;
561  		tc_writel(reg, &tr->MAC_Ctl);
562  
563  		/*
564  		 * TX4939 PCFG.SPEEDn bit will be changed on
565  		 * NETDEV_CHANGE event.
566  		 */
567  		/*
568  		 * WORKAROUND: enable LostCrS only if half duplex
569  		 * operation.
570  		 * (TX4939 does not have EnLCarr)
571  		 */
572  		if (phydev->duplex == DUPLEX_HALF &&
573  		    lp->chiptype != TC35815_TX4939)
574  			tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
575  				  &tr->Tx_Ctl);
576  
577  		lp->speed = phydev->speed;
578  		lp->duplex = phydev->duplex;
579  		status_change = 1;
580  	}
581  
582  	if (phydev->link != lp->link) {
583  		if (phydev->link) {
584  			/* delayed promiscuous enabling */
585  			if (dev->flags & IFF_PROMISC)
586  				tc35815_set_multicast_list(dev);
587  		} else {
588  			lp->speed = 0;
589  			lp->duplex = -1;
590  		}
591  		lp->link = phydev->link;
592  
593  		status_change = 1;
594  	}
595  	spin_unlock_irqrestore(&lp->lock, flags);
596  
597  	if (status_change && netif_msg_link(lp)) {
598  		phy_print_status(phydev);
599  		pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n",
600  			 dev->name,
601  			 phy_read(phydev, MII_BMCR),
602  			 phy_read(phydev, MII_BMSR),
603  			 phy_read(phydev, MII_LPA));
604  	}
605  }
606  
tc_mii_probe(struct net_device * dev)607  static int tc_mii_probe(struct net_device *dev)
608  {
609  	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
610  	struct tc35815_local *lp = netdev_priv(dev);
611  	struct phy_device *phydev;
612  
613  	phydev = phy_find_first(lp->mii_bus);
614  	if (!phydev) {
615  		printk(KERN_ERR "%s: no PHY found\n", dev->name);
616  		return -ENODEV;
617  	}
618  
619  	/* attach the mac to the phy */
620  	phydev = phy_connect(dev, phydev_name(phydev),
621  			     &tc_handle_link_change,
622  			     lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
623  	if (IS_ERR(phydev)) {
624  		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
625  		return PTR_ERR(phydev);
626  	}
627  
628  	phy_attached_info(phydev);
629  
630  	/* mask with MAC supported features */
631  	phy_set_max_speed(phydev, SPEED_100);
632  	if (options.speed == 10) {
633  		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
634  		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
635  	} else if (options.speed == 100) {
636  		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
637  		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
638  	}
639  	if (options.duplex == 1) {
640  		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
641  		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
642  	} else if (options.duplex == 2) {
643  		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
644  		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
645  	}
646  	linkmode_andnot(phydev->supported, phydev->supported, mask);
647  	linkmode_copy(phydev->advertising, phydev->supported);
648  
649  	lp->link = 0;
650  	lp->speed = 0;
651  	lp->duplex = -1;
652  
653  	return 0;
654  }
655  
tc_mii_init(struct net_device * dev)656  static int tc_mii_init(struct net_device *dev)
657  {
658  	struct tc35815_local *lp = netdev_priv(dev);
659  	int err;
660  
661  	lp->mii_bus = mdiobus_alloc();
662  	if (lp->mii_bus == NULL) {
663  		err = -ENOMEM;
664  		goto err_out;
665  	}
666  
667  	lp->mii_bus->name = "tc35815_mii_bus";
668  	lp->mii_bus->read = tc_mdio_read;
669  	lp->mii_bus->write = tc_mdio_write;
670  	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(lp->pci_dev));
671  	lp->mii_bus->priv = dev;
672  	lp->mii_bus->parent = &lp->pci_dev->dev;
673  	err = mdiobus_register(lp->mii_bus);
674  	if (err)
675  		goto err_out_free_mii_bus;
676  	err = tc_mii_probe(dev);
677  	if (err)
678  		goto err_out_unregister_bus;
679  	return 0;
680  
681  err_out_unregister_bus:
682  	mdiobus_unregister(lp->mii_bus);
683  err_out_free_mii_bus:
684  	mdiobus_free(lp->mii_bus);
685  err_out:
686  	return err;
687  }
688  
689  #ifdef CONFIG_CPU_TX49XX
690  /*
691   * Find a platform_device providing a MAC address.  The platform code
692   * should provide a "tc35815-mac" device with a MAC address in its
693   * platform_data.
694   */
tc35815_mac_match(struct device * dev,const void * data)695  static int tc35815_mac_match(struct device *dev, const void *data)
696  {
697  	struct platform_device *plat_dev = to_platform_device(dev);
698  	const struct pci_dev *pci_dev = data;
699  	unsigned int id = pci_dev->irq;
700  	return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
701  }
702  
tc35815_read_plat_dev_addr(struct net_device * dev)703  static int tc35815_read_plat_dev_addr(struct net_device *dev)
704  {
705  	struct tc35815_local *lp = netdev_priv(dev);
706  	struct device *pd = bus_find_device(&platform_bus_type, NULL,
707  					    lp->pci_dev, tc35815_mac_match);
708  	if (pd) {
709  		if (pd->platform_data)
710  			eth_hw_addr_set(dev, pd->platform_data);
711  		put_device(pd);
712  		return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
713  	}
714  	return -ENODEV;
715  }
716  #else
tc35815_read_plat_dev_addr(struct net_device * dev)717  static int tc35815_read_plat_dev_addr(struct net_device *dev)
718  {
719  	return -ENODEV;
720  }
721  #endif
722  
tc35815_init_dev_addr(struct net_device * dev)723  static int tc35815_init_dev_addr(struct net_device *dev)
724  {
725  	struct tc35815_regs __iomem *tr =
726  		(struct tc35815_regs __iomem *)dev->base_addr;
727  	u8 addr[ETH_ALEN];
728  	int i;
729  
730  	while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
731  		;
732  	for (i = 0; i < 6; i += 2) {
733  		unsigned short data;
734  		tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl);
735  		while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
736  			;
737  		data = tc_readl(&tr->PROM_Data);
738  		addr[i] = data & 0xff;
739  		addr[i+1] = data >> 8;
740  	}
741  	eth_hw_addr_set(dev, addr);
742  	if (!is_valid_ether_addr(dev->dev_addr))
743  		return tc35815_read_plat_dev_addr(dev);
744  	return 0;
745  }
746  
747  static const struct net_device_ops tc35815_netdev_ops = {
748  	.ndo_open		= tc35815_open,
749  	.ndo_stop		= tc35815_close,
750  	.ndo_start_xmit		= tc35815_send_packet,
751  	.ndo_get_stats		= tc35815_get_stats,
752  	.ndo_set_rx_mode	= tc35815_set_multicast_list,
753  	.ndo_tx_timeout		= tc35815_tx_timeout,
754  	.ndo_eth_ioctl		= phy_do_ioctl_running,
755  	.ndo_validate_addr	= eth_validate_addr,
756  	.ndo_set_mac_address	= eth_mac_addr,
757  #ifdef CONFIG_NET_POLL_CONTROLLER
758  	.ndo_poll_controller	= tc35815_poll_controller,
759  #endif
760  };
761  
tc35815_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)762  static int tc35815_init_one(struct pci_dev *pdev,
763  			    const struct pci_device_id *ent)
764  {
765  	void __iomem *ioaddr = NULL;
766  	struct net_device *dev;
767  	struct tc35815_local *lp;
768  	int rc;
769  
770  	static int printed_version;
771  	if (!printed_version++) {
772  		printk(version);
773  		dev_printk(KERN_DEBUG, &pdev->dev,
774  			   "speed:%d duplex:%d\n",
775  			   options.speed, options.duplex);
776  	}
777  
778  	if (!pdev->irq) {
779  		dev_warn(&pdev->dev, "no IRQ assigned.\n");
780  		return -ENODEV;
781  	}
782  
783  	/* dev zeroed in alloc_etherdev */
784  	dev = alloc_etherdev(sizeof(*lp));
785  	if (dev == NULL)
786  		return -ENOMEM;
787  
788  	SET_NETDEV_DEV(dev, &pdev->dev);
789  	lp = netdev_priv(dev);
790  	lp->dev = dev;
791  
792  	/* enable device (incl. PCI PM wakeup), and bus-mastering */
793  	rc = pcim_enable_device(pdev);
794  	if (rc)
795  		goto err_out;
796  	rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME);
797  	if (rc)
798  		goto err_out;
799  	pci_set_master(pdev);
800  	ioaddr = pcim_iomap_table(pdev)[1];
801  
802  	/* Initialize the device structure. */
803  	dev->netdev_ops = &tc35815_netdev_ops;
804  	dev->ethtool_ops = &tc35815_ethtool_ops;
805  	dev->watchdog_timeo = TC35815_TX_TIMEOUT;
806  	netif_napi_add_weight(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
807  
808  	dev->irq = pdev->irq;
809  	dev->base_addr = (unsigned long)ioaddr;
810  
811  	INIT_WORK(&lp->restart_work, tc35815_restart_work);
812  	spin_lock_init(&lp->lock);
813  	spin_lock_init(&lp->rx_lock);
814  	lp->pci_dev = pdev;
815  	lp->chiptype = ent->driver_data;
816  
817  	lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
818  	pci_set_drvdata(pdev, dev);
819  
820  	/* Soft reset the chip. */
821  	tc35815_chip_reset(dev);
822  
823  	/* Retrieve the ethernet address. */
824  	if (tc35815_init_dev_addr(dev)) {
825  		dev_warn(&pdev->dev, "not valid ether addr\n");
826  		eth_hw_addr_random(dev);
827  	}
828  
829  	rc = register_netdev(dev);
830  	if (rc)
831  		goto err_out;
832  
833  	printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
834  		dev->name,
835  		chip_info[ent->driver_data].name,
836  		dev->base_addr,
837  		dev->dev_addr,
838  		dev->irq);
839  
840  	rc = tc_mii_init(dev);
841  	if (rc)
842  		goto err_out_unregister;
843  
844  	return 0;
845  
846  err_out_unregister:
847  	unregister_netdev(dev);
848  err_out:
849  	free_netdev(dev);
850  	return rc;
851  }
852  
853  
tc35815_remove_one(struct pci_dev * pdev)854  static void tc35815_remove_one(struct pci_dev *pdev)
855  {
856  	struct net_device *dev = pci_get_drvdata(pdev);
857  	struct tc35815_local *lp = netdev_priv(dev);
858  
859  	phy_disconnect(dev->phydev);
860  	mdiobus_unregister(lp->mii_bus);
861  	mdiobus_free(lp->mii_bus);
862  	unregister_netdev(dev);
863  	free_netdev(dev);
864  }
865  
866  static int
tc35815_init_queues(struct net_device * dev)867  tc35815_init_queues(struct net_device *dev)
868  {
869  	struct tc35815_local *lp = netdev_priv(dev);
870  	int i;
871  	unsigned long fd_addr;
872  
873  	if (!lp->fd_buf) {
874  		BUG_ON(sizeof(struct FDesc) +
875  		       sizeof(struct BDesc) * RX_BUF_NUM +
876  		       sizeof(struct FDesc) * RX_FD_NUM +
877  		       sizeof(struct TxFD) * TX_FD_NUM >
878  		       PAGE_SIZE * FD_PAGE_NUM);
879  
880  		lp->fd_buf = dma_alloc_coherent(&lp->pci_dev->dev,
881  						PAGE_SIZE * FD_PAGE_NUM,
882  						&lp->fd_buf_dma, GFP_ATOMIC);
883  		if (!lp->fd_buf)
884  			return -ENOMEM;
885  		for (i = 0; i < RX_BUF_NUM; i++) {
886  			lp->rx_skbs[i].skb =
887  				alloc_rxbuf_skb(dev, lp->pci_dev,
888  						&lp->rx_skbs[i].skb_dma);
889  			if (!lp->rx_skbs[i].skb) {
890  				while (--i >= 0) {
891  					free_rxbuf_skb(lp->pci_dev,
892  						       lp->rx_skbs[i].skb,
893  						       lp->rx_skbs[i].skb_dma);
894  					lp->rx_skbs[i].skb = NULL;
895  				}
896  				dma_free_coherent(&lp->pci_dev->dev,
897  						  PAGE_SIZE * FD_PAGE_NUM,
898  						  lp->fd_buf, lp->fd_buf_dma);
899  				lp->fd_buf = NULL;
900  				return -ENOMEM;
901  			}
902  		}
903  		printk(KERN_DEBUG "%s: FD buf %p DataBuf",
904  		       dev->name, lp->fd_buf);
905  		printk("\n");
906  	} else {
907  		for (i = 0; i < FD_PAGE_NUM; i++)
908  			clear_page((void *)((unsigned long)lp->fd_buf +
909  					    i * PAGE_SIZE));
910  	}
911  	fd_addr = (unsigned long)lp->fd_buf;
912  
913  	/* Free Descriptors (for Receive) */
914  	lp->rfd_base = (struct RxFD *)fd_addr;
915  	fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
916  	for (i = 0; i < RX_FD_NUM; i++)
917  		lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
918  	lp->rfd_cur = lp->rfd_base;
919  	lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
920  
921  	/* Transmit Descriptors */
922  	lp->tfd_base = (struct TxFD *)fd_addr;
923  	fd_addr += sizeof(struct TxFD) * TX_FD_NUM;
924  	for (i = 0; i < TX_FD_NUM; i++) {
925  		lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1]));
926  		lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
927  		lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0);
928  	}
929  	lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0]));
930  	lp->tfd_start = 0;
931  	lp->tfd_end = 0;
932  
933  	/* Buffer List (for Receive) */
934  	lp->fbl_ptr = (struct FrFD *)fd_addr;
935  	lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
936  	lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
937  	/*
938  	 * move all allocated skbs to head of rx_skbs[] array.
939  	 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
940  	 * tc35815_rx() had failed.
941  	 */
942  	lp->fbl_count = 0;
943  	for (i = 0; i < RX_BUF_NUM; i++) {
944  		if (lp->rx_skbs[i].skb) {
945  			if (i != lp->fbl_count) {
946  				lp->rx_skbs[lp->fbl_count].skb =
947  					lp->rx_skbs[i].skb;
948  				lp->rx_skbs[lp->fbl_count].skb_dma =
949  					lp->rx_skbs[i].skb_dma;
950  			}
951  			lp->fbl_count++;
952  		}
953  	}
954  	for (i = 0; i < RX_BUF_NUM; i++) {
955  		if (i >= lp->fbl_count) {
956  			lp->fbl_ptr->bd[i].BuffData = 0;
957  			lp->fbl_ptr->bd[i].BDCtl = 0;
958  			continue;
959  		}
960  		lp->fbl_ptr->bd[i].BuffData =
961  			cpu_to_le32(lp->rx_skbs[i].skb_dma);
962  		/* BDID is index of FrFD.bd[] */
963  		lp->fbl_ptr->bd[i].BDCtl =
964  			cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
965  				    RX_BUF_SIZE);
966  	}
967  
968  	printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
969  	       dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
970  	return 0;
971  }
972  
973  static void
tc35815_clear_queues(struct net_device * dev)974  tc35815_clear_queues(struct net_device *dev)
975  {
976  	struct tc35815_local *lp = netdev_priv(dev);
977  	int i;
978  
979  	for (i = 0; i < TX_FD_NUM; i++) {
980  		u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
981  		struct sk_buff *skb =
982  			fdsystem != 0xffffffff ?
983  			lp->tx_skbs[fdsystem].skb : NULL;
984  #ifdef DEBUG
985  		if (lp->tx_skbs[i].skb != skb) {
986  			printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
987  			panic_queues(dev);
988  		}
989  #else
990  		BUG_ON(lp->tx_skbs[i].skb != skb);
991  #endif
992  		if (skb) {
993  			dma_unmap_single(&lp->pci_dev->dev,
994  					 lp->tx_skbs[i].skb_dma, skb->len,
995  					 DMA_TO_DEVICE);
996  			lp->tx_skbs[i].skb = NULL;
997  			lp->tx_skbs[i].skb_dma = 0;
998  			dev_kfree_skb_any(skb);
999  		}
1000  		lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1001  	}
1002  
1003  	tc35815_init_queues(dev);
1004  }
1005  
1006  static void
tc35815_free_queues(struct net_device * dev)1007  tc35815_free_queues(struct net_device *dev)
1008  {
1009  	struct tc35815_local *lp = netdev_priv(dev);
1010  	int i;
1011  
1012  	if (lp->tfd_base) {
1013  		for (i = 0; i < TX_FD_NUM; i++) {
1014  			u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
1015  			struct sk_buff *skb =
1016  				fdsystem != 0xffffffff ?
1017  				lp->tx_skbs[fdsystem].skb : NULL;
1018  #ifdef DEBUG
1019  			if (lp->tx_skbs[i].skb != skb) {
1020  				printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
1021  				panic_queues(dev);
1022  			}
1023  #else
1024  			BUG_ON(lp->tx_skbs[i].skb != skb);
1025  #endif
1026  			if (skb) {
1027  				dma_unmap_single(&lp->pci_dev->dev,
1028  						 lp->tx_skbs[i].skb_dma,
1029  						 skb->len, DMA_TO_DEVICE);
1030  				dev_kfree_skb(skb);
1031  				lp->tx_skbs[i].skb = NULL;
1032  				lp->tx_skbs[i].skb_dma = 0;
1033  			}
1034  			lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1035  		}
1036  	}
1037  
1038  	lp->rfd_base = NULL;
1039  	lp->rfd_limit = NULL;
1040  	lp->rfd_cur = NULL;
1041  	lp->fbl_ptr = NULL;
1042  
1043  	for (i = 0; i < RX_BUF_NUM; i++) {
1044  		if (lp->rx_skbs[i].skb) {
1045  			free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
1046  				       lp->rx_skbs[i].skb_dma);
1047  			lp->rx_skbs[i].skb = NULL;
1048  		}
1049  	}
1050  	if (lp->fd_buf) {
1051  		dma_free_coherent(&lp->pci_dev->dev, PAGE_SIZE * FD_PAGE_NUM,
1052  				  lp->fd_buf, lp->fd_buf_dma);
1053  		lp->fd_buf = NULL;
1054  	}
1055  }
1056  
1057  static void
dump_txfd(struct TxFD * fd)1058  dump_txfd(struct TxFD *fd)
1059  {
1060  	printk("TxFD(%p): %08x %08x %08x %08x\n", fd,
1061  	       le32_to_cpu(fd->fd.FDNext),
1062  	       le32_to_cpu(fd->fd.FDSystem),
1063  	       le32_to_cpu(fd->fd.FDStat),
1064  	       le32_to_cpu(fd->fd.FDCtl));
1065  	printk("BD: ");
1066  	printk(" %08x %08x",
1067  	       le32_to_cpu(fd->bd.BuffData),
1068  	       le32_to_cpu(fd->bd.BDCtl));
1069  	printk("\n");
1070  }
1071  
1072  static int
dump_rxfd(struct RxFD * fd)1073  dump_rxfd(struct RxFD *fd)
1074  {
1075  	int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1076  	if (bd_count > 8)
1077  		bd_count = 8;
1078  	printk("RxFD(%p): %08x %08x %08x %08x\n", fd,
1079  	       le32_to_cpu(fd->fd.FDNext),
1080  	       le32_to_cpu(fd->fd.FDSystem),
1081  	       le32_to_cpu(fd->fd.FDStat),
1082  	       le32_to_cpu(fd->fd.FDCtl));
1083  	if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
1084  		return 0;
1085  	printk("BD: ");
1086  	for (i = 0; i < bd_count; i++)
1087  		printk(" %08x %08x",
1088  		       le32_to_cpu(fd->bd[i].BuffData),
1089  		       le32_to_cpu(fd->bd[i].BDCtl));
1090  	printk("\n");
1091  	return bd_count;
1092  }
1093  
1094  #ifdef DEBUG
1095  static void
dump_frfd(struct FrFD * fd)1096  dump_frfd(struct FrFD *fd)
1097  {
1098  	int i;
1099  	printk("FrFD(%p): %08x %08x %08x %08x\n", fd,
1100  	       le32_to_cpu(fd->fd.FDNext),
1101  	       le32_to_cpu(fd->fd.FDSystem),
1102  	       le32_to_cpu(fd->fd.FDStat),
1103  	       le32_to_cpu(fd->fd.FDCtl));
1104  	printk("BD: ");
1105  	for (i = 0; i < RX_BUF_NUM; i++)
1106  		printk(" %08x %08x",
1107  		       le32_to_cpu(fd->bd[i].BuffData),
1108  		       le32_to_cpu(fd->bd[i].BDCtl));
1109  	printk("\n");
1110  }
1111  
1112  static void
panic_queues(struct net_device * dev)1113  panic_queues(struct net_device *dev)
1114  {
1115  	struct tc35815_local *lp = netdev_priv(dev);
1116  	int i;
1117  
1118  	printk("TxFD base %p, start %u, end %u\n",
1119  	       lp->tfd_base, lp->tfd_start, lp->tfd_end);
1120  	printk("RxFD base %p limit %p cur %p\n",
1121  	       lp->rfd_base, lp->rfd_limit, lp->rfd_cur);
1122  	printk("FrFD %p\n", lp->fbl_ptr);
1123  	for (i = 0; i < TX_FD_NUM; i++)
1124  		dump_txfd(&lp->tfd_base[i]);
1125  	for (i = 0; i < RX_FD_NUM; i++) {
1126  		int bd_count = dump_rxfd(&lp->rfd_base[i]);
1127  		i += (bd_count + 1) / 2;	/* skip BDs */
1128  	}
1129  	dump_frfd(lp->fbl_ptr);
1130  	panic("%s: Illegal queue state.", dev->name);
1131  }
1132  #endif
1133  
print_eth(const u8 * add)1134  static void print_eth(const u8 *add)
1135  {
1136  	printk(KERN_DEBUG "print_eth(%p)\n", add);
1137  	printk(KERN_DEBUG " %pM => %pM : %02x%02x\n",
1138  		add + 6, add, add[12], add[13]);
1139  }
1140  
tc35815_tx_full(struct net_device * dev)1141  static int tc35815_tx_full(struct net_device *dev)
1142  {
1143  	struct tc35815_local *lp = netdev_priv(dev);
1144  	return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end;
1145  }
1146  
tc35815_restart(struct net_device * dev)1147  static void tc35815_restart(struct net_device *dev)
1148  {
1149  	struct tc35815_local *lp = netdev_priv(dev);
1150  	int ret;
1151  
1152  	if (dev->phydev) {
1153  		ret = phy_init_hw(dev->phydev);
1154  		if (ret)
1155  			printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
1156  	}
1157  
1158  	spin_lock_bh(&lp->rx_lock);
1159  	spin_lock_irq(&lp->lock);
1160  	tc35815_chip_reset(dev);
1161  	tc35815_clear_queues(dev);
1162  	tc35815_chip_init(dev);
1163  	/* Reconfigure CAM again since tc35815_chip_init() initialize it. */
1164  	tc35815_set_multicast_list(dev);
1165  	spin_unlock_irq(&lp->lock);
1166  	spin_unlock_bh(&lp->rx_lock);
1167  
1168  	netif_wake_queue(dev);
1169  }
1170  
tc35815_restart_work(struct work_struct * work)1171  static void tc35815_restart_work(struct work_struct *work)
1172  {
1173  	struct tc35815_local *lp =
1174  		container_of(work, struct tc35815_local, restart_work);
1175  	struct net_device *dev = lp->dev;
1176  
1177  	tc35815_restart(dev);
1178  }
1179  
tc35815_schedule_restart(struct net_device * dev)1180  static void tc35815_schedule_restart(struct net_device *dev)
1181  {
1182  	struct tc35815_local *lp = netdev_priv(dev);
1183  	struct tc35815_regs __iomem *tr =
1184  		(struct tc35815_regs __iomem *)dev->base_addr;
1185  	unsigned long flags;
1186  
1187  	/* disable interrupts */
1188  	spin_lock_irqsave(&lp->lock, flags);
1189  	tc_writel(0, &tr->Int_En);
1190  	tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
1191  	schedule_work(&lp->restart_work);
1192  	spin_unlock_irqrestore(&lp->lock, flags);
1193  }
1194  
tc35815_tx_timeout(struct net_device * dev,unsigned int txqueue)1195  static void tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue)
1196  {
1197  	struct tc35815_regs __iomem *tr =
1198  		(struct tc35815_regs __iomem *)dev->base_addr;
1199  
1200  	printk(KERN_WARNING "%s: transmit timed out, status %#x\n",
1201  	       dev->name, tc_readl(&tr->Tx_Stat));
1202  
1203  	/* Try to restart the adaptor. */
1204  	tc35815_schedule_restart(dev);
1205  	dev->stats.tx_errors++;
1206  }
1207  
1208  /*
1209   * Open/initialize the controller. This is called (in the current kernel)
1210   * sometime after booting when the 'ifconfig' program is run.
1211   *
1212   * This routine should set everything up anew at each open, even
1213   * registers that "should" only need to be set once at boot, so that
1214   * there is non-reboot way to recover if something goes wrong.
1215   */
1216  static int
tc35815_open(struct net_device * dev)1217  tc35815_open(struct net_device *dev)
1218  {
1219  	struct tc35815_local *lp = netdev_priv(dev);
1220  
1221  	/*
1222  	 * This is used if the interrupt line can turned off (shared).
1223  	 * See 3c503.c for an example of selecting the IRQ at config-time.
1224  	 */
1225  	if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED,
1226  			dev->name, dev))
1227  		return -EAGAIN;
1228  
1229  	tc35815_chip_reset(dev);
1230  
1231  	if (tc35815_init_queues(dev) != 0) {
1232  		free_irq(dev->irq, dev);
1233  		return -EAGAIN;
1234  	}
1235  
1236  	napi_enable(&lp->napi);
1237  
1238  	/* Reset the hardware here. Don't forget to set the station address. */
1239  	spin_lock_irq(&lp->lock);
1240  	tc35815_chip_init(dev);
1241  	spin_unlock_irq(&lp->lock);
1242  
1243  	netif_carrier_off(dev);
1244  	/* schedule a link state check */
1245  	phy_start(dev->phydev);
1246  
1247  	/* We are now ready to accept transmit requeusts from
1248  	 * the queueing layer of the networking.
1249  	 */
1250  	netif_start_queue(dev);
1251  
1252  	return 0;
1253  }
1254  
1255  /* This will only be invoked if your driver is _not_ in XOFF state.
1256   * What this means is that you need not check it, and that this
1257   * invariant will hold if you make sure that the netif_*_queue()
1258   * calls are done at the proper times.
1259   */
1260  static netdev_tx_t
tc35815_send_packet(struct sk_buff * skb,struct net_device * dev)1261  tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1262  {
1263  	struct tc35815_local *lp = netdev_priv(dev);
1264  	struct TxFD *txfd;
1265  	unsigned long flags;
1266  
1267  	/* If some error occurs while trying to transmit this
1268  	 * packet, you should return '1' from this function.
1269  	 * In such a case you _may not_ do anything to the
1270  	 * SKB, it is still owned by the network queueing
1271  	 * layer when an error is returned.  This means you
1272  	 * may not modify any SKB fields, you may not free
1273  	 * the SKB, etc.
1274  	 */
1275  
1276  	/* This is the most common case for modern hardware.
1277  	 * The spinlock protects this code from the TX complete
1278  	 * hardware interrupt handler.  Queue flow control is
1279  	 * thus managed under this lock as well.
1280  	 */
1281  	spin_lock_irqsave(&lp->lock, flags);
1282  
1283  	/* failsafe... (handle txdone now if half of FDs are used) */
1284  	if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM >
1285  	    TX_FD_NUM / 2)
1286  		tc35815_txdone(dev);
1287  
1288  	if (netif_msg_pktdata(lp))
1289  		print_eth(skb->data);
1290  #ifdef DEBUG
1291  	if (lp->tx_skbs[lp->tfd_start].skb) {
1292  		printk("%s: tx_skbs conflict.\n", dev->name);
1293  		panic_queues(dev);
1294  	}
1295  #else
1296  	BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
1297  #endif
1298  	lp->tx_skbs[lp->tfd_start].skb = skb;
1299  	lp->tx_skbs[lp->tfd_start].skb_dma = dma_map_single(&lp->pci_dev->dev,
1300  							    skb->data,
1301  							    skb->len,
1302  							    DMA_TO_DEVICE);
1303  
1304  	/*add to ring */
1305  	txfd = &lp->tfd_base[lp->tfd_start];
1306  	txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma);
1307  	txfd->bd.BDCtl = cpu_to_le32(skb->len);
1308  	txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start);
1309  	txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT));
1310  
1311  	if (lp->tfd_start == lp->tfd_end) {
1312  		struct tc35815_regs __iomem *tr =
1313  			(struct tc35815_regs __iomem *)dev->base_addr;
1314  		/* Start DMA Transmitter. */
1315  		txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1316  		txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1317  		if (netif_msg_tx_queued(lp)) {
1318  			printk("%s: starting TxFD.\n", dev->name);
1319  			dump_txfd(txfd);
1320  		}
1321  		tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1322  	} else {
1323  		txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL);
1324  		if (netif_msg_tx_queued(lp)) {
1325  			printk("%s: queueing TxFD.\n", dev->name);
1326  			dump_txfd(txfd);
1327  		}
1328  	}
1329  	lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
1330  
1331  	/* If we just used up the very last entry in the
1332  	 * TX ring on this device, tell the queueing
1333  	 * layer to send no more.
1334  	 */
1335  	if (tc35815_tx_full(dev)) {
1336  		if (netif_msg_tx_queued(lp))
1337  			printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name);
1338  		netif_stop_queue(dev);
1339  	}
1340  
1341  	/* When the TX completion hw interrupt arrives, this
1342  	 * is when the transmit statistics are updated.
1343  	 */
1344  
1345  	spin_unlock_irqrestore(&lp->lock, flags);
1346  	return NETDEV_TX_OK;
1347  }
1348  
1349  #define FATAL_ERROR_INT \
1350  	(Int_IntPCI | Int_DmParErr | Int_IntNRAbt)
tc35815_fatal_error_interrupt(struct net_device * dev,u32 status)1351  static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1352  {
1353  	static int count;
1354  	printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):",
1355  	       dev->name, status);
1356  	if (status & Int_IntPCI)
1357  		printk(" IntPCI");
1358  	if (status & Int_DmParErr)
1359  		printk(" DmParErr");
1360  	if (status & Int_IntNRAbt)
1361  		printk(" IntNRAbt");
1362  	printk("\n");
1363  	if (count++ > 100)
1364  		panic("%s: Too many fatal errors.", dev->name);
1365  	printk(KERN_WARNING "%s: Resetting ...\n", dev->name);
1366  	/* Try to restart the adaptor. */
1367  	tc35815_schedule_restart(dev);
1368  }
1369  
tc35815_do_interrupt(struct net_device * dev,u32 status,int limit)1370  static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1371  {
1372  	struct tc35815_local *lp = netdev_priv(dev);
1373  	int ret = -1;
1374  
1375  	/* Fatal errors... */
1376  	if (status & FATAL_ERROR_INT) {
1377  		tc35815_fatal_error_interrupt(dev, status);
1378  		return 0;
1379  	}
1380  	/* recoverable errors */
1381  	if (status & Int_IntFDAEx) {
1382  		if (netif_msg_rx_err(lp))
1383  			dev_warn(&dev->dev,
1384  				 "Free Descriptor Area Exhausted (%#x).\n",
1385  				 status);
1386  		dev->stats.rx_dropped++;
1387  		ret = 0;
1388  	}
1389  	if (status & Int_IntBLEx) {
1390  		if (netif_msg_rx_err(lp))
1391  			dev_warn(&dev->dev,
1392  				 "Buffer List Exhausted (%#x).\n",
1393  				 status);
1394  		dev->stats.rx_dropped++;
1395  		ret = 0;
1396  	}
1397  	if (status & Int_IntExBD) {
1398  		if (netif_msg_rx_err(lp))
1399  			dev_warn(&dev->dev,
1400  				 "Excessive Buffer Descriptors (%#x).\n",
1401  				 status);
1402  		dev->stats.rx_length_errors++;
1403  		ret = 0;
1404  	}
1405  
1406  	/* normal notification */
1407  	if (status & Int_IntMacRx) {
1408  		/* Got a packet(s). */
1409  		ret = tc35815_rx(dev, limit);
1410  		lp->lstats.rx_ints++;
1411  	}
1412  	if (status & Int_IntMacTx) {
1413  		/* Transmit complete. */
1414  		lp->lstats.tx_ints++;
1415  		spin_lock_irq(&lp->lock);
1416  		tc35815_txdone(dev);
1417  		spin_unlock_irq(&lp->lock);
1418  		if (ret < 0)
1419  			ret = 0;
1420  	}
1421  	return ret;
1422  }
1423  
1424  /*
1425   * The typical workload of the driver:
1426   * Handle the network interface interrupts.
1427   */
tc35815_interrupt(int irq,void * dev_id)1428  static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1429  {
1430  	struct net_device *dev = dev_id;
1431  	struct tc35815_local *lp = netdev_priv(dev);
1432  	struct tc35815_regs __iomem *tr =
1433  		(struct tc35815_regs __iomem *)dev->base_addr;
1434  	u32 dmactl = tc_readl(&tr->DMA_Ctl);
1435  
1436  	if (!(dmactl & DMA_IntMask)) {
1437  		/* disable interrupts */
1438  		tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
1439  		if (napi_schedule_prep(&lp->napi))
1440  			__napi_schedule(&lp->napi);
1441  		else {
1442  			printk(KERN_ERR "%s: interrupt taken in poll\n",
1443  			       dev->name);
1444  			BUG();
1445  		}
1446  		(void)tc_readl(&tr->Int_Src);	/* flush */
1447  		return IRQ_HANDLED;
1448  	}
1449  	return IRQ_NONE;
1450  }
1451  
1452  #ifdef CONFIG_NET_POLL_CONTROLLER
tc35815_poll_controller(struct net_device * dev)1453  static void tc35815_poll_controller(struct net_device *dev)
1454  {
1455  	disable_irq(dev->irq);
1456  	tc35815_interrupt(dev->irq, dev);
1457  	enable_irq(dev->irq);
1458  }
1459  #endif
1460  
1461  /* We have a good packet(s), get it/them out of the buffers. */
1462  static int
tc35815_rx(struct net_device * dev,int limit)1463  tc35815_rx(struct net_device *dev, int limit)
1464  {
1465  	struct tc35815_local *lp = netdev_priv(dev);
1466  	unsigned int fdctl;
1467  	int i;
1468  	int received = 0;
1469  
1470  	while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
1471  		int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
1472  		int pkt_len = fdctl & FD_FDLength_MASK;
1473  		int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1474  #ifdef DEBUG
1475  		struct RxFD *next_rfd;
1476  #endif
1477  #if (RX_CTL_CMD & Rx_StripCRC) == 0
1478  		pkt_len -= ETH_FCS_LEN;
1479  #endif
1480  
1481  		if (netif_msg_rx_status(lp))
1482  			dump_rxfd(lp->rfd_cur);
1483  		if (status & Rx_Good) {
1484  			struct sk_buff *skb;
1485  			unsigned char *data;
1486  			int cur_bd;
1487  
1488  			if (--limit < 0)
1489  				break;
1490  			BUG_ON(bd_count > 1);
1491  			cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
1492  				  & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1493  #ifdef DEBUG
1494  			if (cur_bd >= RX_BUF_NUM) {
1495  				printk("%s: invalid BDID.\n", dev->name);
1496  				panic_queues(dev);
1497  			}
1498  			BUG_ON(lp->rx_skbs[cur_bd].skb_dma !=
1499  			       (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3));
1500  			if (!lp->rx_skbs[cur_bd].skb) {
1501  				printk("%s: NULL skb.\n", dev->name);
1502  				panic_queues(dev);
1503  			}
1504  #else
1505  			BUG_ON(cur_bd >= RX_BUF_NUM);
1506  #endif
1507  			skb = lp->rx_skbs[cur_bd].skb;
1508  			prefetch(skb->data);
1509  			lp->rx_skbs[cur_bd].skb = NULL;
1510  			dma_unmap_single(&lp->pci_dev->dev,
1511  					 lp->rx_skbs[cur_bd].skb_dma,
1512  					 RX_BUF_SIZE, DMA_FROM_DEVICE);
1513  			if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
1514  				memmove(skb->data, skb->data - NET_IP_ALIGN,
1515  					pkt_len);
1516  			data = skb_put(skb, pkt_len);
1517  			if (netif_msg_pktdata(lp))
1518  				print_eth(data);
1519  			skb->protocol = eth_type_trans(skb, dev);
1520  			netif_receive_skb(skb);
1521  			received++;
1522  			dev->stats.rx_packets++;
1523  			dev->stats.rx_bytes += pkt_len;
1524  		} else {
1525  			dev->stats.rx_errors++;
1526  			if (netif_msg_rx_err(lp))
1527  				dev_info(&dev->dev, "Rx error (status %x)\n",
1528  					 status & Rx_Stat_Mask);
1529  			/* WORKAROUND: LongErr and CRCErr means Overflow. */
1530  			if ((status & Rx_LongErr) && (status & Rx_CRCErr)) {
1531  				status &= ~(Rx_LongErr|Rx_CRCErr);
1532  				status |= Rx_Over;
1533  			}
1534  			if (status & Rx_LongErr)
1535  				dev->stats.rx_length_errors++;
1536  			if (status & Rx_Over)
1537  				dev->stats.rx_fifo_errors++;
1538  			if (status & Rx_CRCErr)
1539  				dev->stats.rx_crc_errors++;
1540  			if (status & Rx_Align)
1541  				dev->stats.rx_frame_errors++;
1542  		}
1543  
1544  		if (bd_count > 0) {
1545  			/* put Free Buffer back to controller */
1546  			int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl);
1547  			unsigned char id =
1548  				(bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1549  #ifdef DEBUG
1550  			if (id >= RX_BUF_NUM) {
1551  				printk("%s: invalid BDID.\n", dev->name);
1552  				panic_queues(dev);
1553  			}
1554  #else
1555  			BUG_ON(id >= RX_BUF_NUM);
1556  #endif
1557  			/* free old buffers */
1558  			lp->fbl_count--;
1559  			while (lp->fbl_count < RX_BUF_NUM)
1560  			{
1561  				unsigned char curid =
1562  					(id + 1 + lp->fbl_count) % RX_BUF_NUM;
1563  				struct BDesc *bd = &lp->fbl_ptr->bd[curid];
1564  #ifdef DEBUG
1565  				bdctl = le32_to_cpu(bd->BDCtl);
1566  				if (bdctl & BD_CownsBD) {
1567  					printk("%s: Freeing invalid BD.\n",
1568  					       dev->name);
1569  					panic_queues(dev);
1570  				}
1571  #endif
1572  				/* pass BD to controller */
1573  				if (!lp->rx_skbs[curid].skb) {
1574  					lp->rx_skbs[curid].skb =
1575  						alloc_rxbuf_skb(dev,
1576  								lp->pci_dev,
1577  								&lp->rx_skbs[curid].skb_dma);
1578  					if (!lp->rx_skbs[curid].skb)
1579  						break; /* try on next reception */
1580  					bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
1581  				}
1582  				/* Note: BDLength was modified by chip. */
1583  				bd->BDCtl = cpu_to_le32(BD_CownsBD |
1584  							(curid << BD_RxBDID_SHIFT) |
1585  							RX_BUF_SIZE);
1586  				lp->fbl_count++;
1587  			}
1588  		}
1589  
1590  		/* put RxFD back to controller */
1591  #ifdef DEBUG
1592  		next_rfd = fd_bus_to_virt(lp,
1593  					  le32_to_cpu(lp->rfd_cur->fd.FDNext));
1594  		if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
1595  			printk("%s: RxFD FDNext invalid.\n", dev->name);
1596  			panic_queues(dev);
1597  		}
1598  #endif
1599  		for (i = 0; i < (bd_count + 1) / 2 + 1; i++) {
1600  			/* pass FD to controller */
1601  #ifdef DEBUG
1602  			lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead);
1603  #else
1604  			lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL);
1605  #endif
1606  			lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
1607  			lp->rfd_cur++;
1608  		}
1609  		if (lp->rfd_cur > lp->rfd_limit)
1610  			lp->rfd_cur = lp->rfd_base;
1611  #ifdef DEBUG
1612  		if (lp->rfd_cur != next_rfd)
1613  			printk("rfd_cur = %p, next_rfd %p\n",
1614  			       lp->rfd_cur, next_rfd);
1615  #endif
1616  	}
1617  
1618  	return received;
1619  }
1620  
tc35815_poll(struct napi_struct * napi,int budget)1621  static int tc35815_poll(struct napi_struct *napi, int budget)
1622  {
1623  	struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
1624  	struct net_device *dev = lp->dev;
1625  	struct tc35815_regs __iomem *tr =
1626  		(struct tc35815_regs __iomem *)dev->base_addr;
1627  	int received = 0, handled;
1628  	u32 status;
1629  
1630  	if (budget <= 0)
1631  		return received;
1632  
1633  	spin_lock(&lp->rx_lock);
1634  	status = tc_readl(&tr->Int_Src);
1635  	do {
1636  		/* BLEx, FDAEx will be cleared later */
1637  		tc_writel(status & ~(Int_BLEx | Int_FDAEx),
1638  			  &tr->Int_Src);	/* write to clear */
1639  
1640  		handled = tc35815_do_interrupt(dev, status, budget - received);
1641  		if (status & (Int_BLEx | Int_FDAEx))
1642  			tc_writel(status & (Int_BLEx | Int_FDAEx),
1643  				  &tr->Int_Src);
1644  		if (handled >= 0) {
1645  			received += handled;
1646  			if (received >= budget)
1647  				break;
1648  		}
1649  		status = tc_readl(&tr->Int_Src);
1650  	} while (status);
1651  	spin_unlock(&lp->rx_lock);
1652  
1653  	if (received < budget) {
1654  		napi_complete_done(napi, received);
1655  		/* enable interrupts */
1656  		tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
1657  	}
1658  	return received;
1659  }
1660  
1661  #define TX_STA_ERR	(Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1662  
1663  static void
tc35815_check_tx_stat(struct net_device * dev,int status)1664  tc35815_check_tx_stat(struct net_device *dev, int status)
1665  {
1666  	struct tc35815_local *lp = netdev_priv(dev);
1667  	const char *msg = NULL;
1668  
1669  	/* count collisions */
1670  	if (status & Tx_ExColl)
1671  		dev->stats.collisions += 16;
1672  	if (status & Tx_TxColl_MASK)
1673  		dev->stats.collisions += status & Tx_TxColl_MASK;
1674  
1675  	/* TX4939 does not have NCarr */
1676  	if (lp->chiptype == TC35815_TX4939)
1677  		status &= ~Tx_NCarr;
1678  	/* WORKAROUND: ignore LostCrS in full duplex operation */
1679  	if (!lp->link || lp->duplex == DUPLEX_FULL)
1680  		status &= ~Tx_NCarr;
1681  
1682  	if (!(status & TX_STA_ERR)) {
1683  		/* no error. */
1684  		dev->stats.tx_packets++;
1685  		return;
1686  	}
1687  
1688  	dev->stats.tx_errors++;
1689  	if (status & Tx_ExColl) {
1690  		dev->stats.tx_aborted_errors++;
1691  		msg = "Excessive Collision.";
1692  	}
1693  	if (status & Tx_Under) {
1694  		dev->stats.tx_fifo_errors++;
1695  		msg = "Tx FIFO Underrun.";
1696  		if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
1697  			lp->lstats.tx_underrun++;
1698  			if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) {
1699  				struct tc35815_regs __iomem *tr =
1700  					(struct tc35815_regs __iomem *)dev->base_addr;
1701  				tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh);
1702  				msg = "Tx FIFO Underrun.Change Tx threshold to max.";
1703  			}
1704  		}
1705  	}
1706  	if (status & Tx_Defer) {
1707  		dev->stats.tx_fifo_errors++;
1708  		msg = "Excessive Deferral.";
1709  	}
1710  	if (status & Tx_NCarr) {
1711  		dev->stats.tx_carrier_errors++;
1712  		msg = "Lost Carrier Sense.";
1713  	}
1714  	if (status & Tx_LateColl) {
1715  		dev->stats.tx_aborted_errors++;
1716  		msg = "Late Collision.";
1717  	}
1718  	if (status & Tx_TxPar) {
1719  		dev->stats.tx_fifo_errors++;
1720  		msg = "Transmit Parity Error.";
1721  	}
1722  	if (status & Tx_SQErr) {
1723  		dev->stats.tx_heartbeat_errors++;
1724  		msg = "Signal Quality Error.";
1725  	}
1726  	if (msg && netif_msg_tx_err(lp))
1727  		printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status);
1728  }
1729  
1730  /* This handles TX complete events posted by the device
1731   * via interrupts.
1732   */
1733  static void
tc35815_txdone(struct net_device * dev)1734  tc35815_txdone(struct net_device *dev)
1735  {
1736  	struct tc35815_local *lp = netdev_priv(dev);
1737  	struct TxFD *txfd;
1738  	unsigned int fdctl;
1739  
1740  	txfd = &lp->tfd_base[lp->tfd_end];
1741  	while (lp->tfd_start != lp->tfd_end &&
1742  	       !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) {
1743  		int status = le32_to_cpu(txfd->fd.FDStat);
1744  		struct sk_buff *skb;
1745  		unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext);
1746  		u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem);
1747  
1748  		if (netif_msg_tx_done(lp)) {
1749  			printk("%s: complete TxFD.\n", dev->name);
1750  			dump_txfd(txfd);
1751  		}
1752  		tc35815_check_tx_stat(dev, status);
1753  
1754  		skb = fdsystem != 0xffffffff ?
1755  			lp->tx_skbs[fdsystem].skb : NULL;
1756  #ifdef DEBUG
1757  		if (lp->tx_skbs[lp->tfd_end].skb != skb) {
1758  			printk("%s: tx_skbs mismatch.\n", dev->name);
1759  			panic_queues(dev);
1760  		}
1761  #else
1762  		BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
1763  #endif
1764  		if (skb) {
1765  			dev->stats.tx_bytes += skb->len;
1766  			dma_unmap_single(&lp->pci_dev->dev,
1767  					 lp->tx_skbs[lp->tfd_end].skb_dma,
1768  					 skb->len, DMA_TO_DEVICE);
1769  			lp->tx_skbs[lp->tfd_end].skb = NULL;
1770  			lp->tx_skbs[lp->tfd_end].skb_dma = 0;
1771  			dev_kfree_skb_any(skb);
1772  		}
1773  		txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
1774  
1775  		lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
1776  		txfd = &lp->tfd_base[lp->tfd_end];
1777  #ifdef DEBUG
1778  		if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) {
1779  			printk("%s: TxFD FDNext invalid.\n", dev->name);
1780  			panic_queues(dev);
1781  		}
1782  #endif
1783  		if (fdnext & FD_Next_EOL) {
1784  			/* DMA Transmitter has been stopping... */
1785  			if (lp->tfd_end != lp->tfd_start) {
1786  				struct tc35815_regs __iomem *tr =
1787  					(struct tc35815_regs __iomem *)dev->base_addr;
1788  				int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
1789  				struct TxFD *txhead = &lp->tfd_base[head];
1790  				int qlen = (lp->tfd_start + TX_FD_NUM
1791  					    - lp->tfd_end) % TX_FD_NUM;
1792  
1793  #ifdef DEBUG
1794  				if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) {
1795  					printk("%s: TxFD FDCtl invalid.\n", dev->name);
1796  					panic_queues(dev);
1797  				}
1798  #endif
1799  				/* log max queue length */
1800  				if (lp->lstats.max_tx_qlen < qlen)
1801  					lp->lstats.max_tx_qlen = qlen;
1802  
1803  
1804  				/* start DMA Transmitter again */
1805  				txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1806  				txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1807  				if (netif_msg_tx_queued(lp)) {
1808  					printk("%s: start TxFD on queue.\n",
1809  					       dev->name);
1810  					dump_txfd(txfd);
1811  				}
1812  				tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1813  			}
1814  			break;
1815  		}
1816  	}
1817  
1818  	/* If we had stopped the queue due to a "tx full"
1819  	 * condition, and space has now been made available,
1820  	 * wake up the queue.
1821  	 */
1822  	if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
1823  		netif_wake_queue(dev);
1824  }
1825  
1826  /* The inverse routine to tc35815_open(). */
1827  static int
tc35815_close(struct net_device * dev)1828  tc35815_close(struct net_device *dev)
1829  {
1830  	struct tc35815_local *lp = netdev_priv(dev);
1831  
1832  	netif_stop_queue(dev);
1833  	napi_disable(&lp->napi);
1834  	if (dev->phydev)
1835  		phy_stop(dev->phydev);
1836  	cancel_work_sync(&lp->restart_work);
1837  
1838  	/* Flush the Tx and disable Rx here. */
1839  	tc35815_chip_reset(dev);
1840  	free_irq(dev->irq, dev);
1841  
1842  	tc35815_free_queues(dev);
1843  
1844  	return 0;
1845  
1846  }
1847  
1848  /*
1849   * Get the current statistics.
1850   * This may be called with the card open or closed.
1851   */
tc35815_get_stats(struct net_device * dev)1852  static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
1853  {
1854  	struct tc35815_regs __iomem *tr =
1855  		(struct tc35815_regs __iomem *)dev->base_addr;
1856  	if (netif_running(dev))
1857  		/* Update the statistics from the device registers. */
1858  		dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt);
1859  
1860  	return &dev->stats;
1861  }
1862  
tc35815_set_cam_entry(struct net_device * dev,int index,const unsigned char * addr)1863  static void tc35815_set_cam_entry(struct net_device *dev, int index,
1864  				  const unsigned char *addr)
1865  {
1866  	struct tc35815_local *lp = netdev_priv(dev);
1867  	struct tc35815_regs __iomem *tr =
1868  		(struct tc35815_regs __iomem *)dev->base_addr;
1869  	int cam_index = index * 6;
1870  	u32 cam_data;
1871  	u32 saved_addr;
1872  
1873  	saved_addr = tc_readl(&tr->CAM_Adr);
1874  
1875  	if (netif_msg_hw(lp))
1876  		printk(KERN_DEBUG "%s: CAM %d: %pM\n",
1877  			dev->name, index, addr);
1878  	if (index & 1) {
1879  		/* read modify write */
1880  		tc_writel(cam_index - 2, &tr->CAM_Adr);
1881  		cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000;
1882  		cam_data |= addr[0] << 8 | addr[1];
1883  		tc_writel(cam_data, &tr->CAM_Data);
1884  		/* write whole word */
1885  		tc_writel(cam_index + 2, &tr->CAM_Adr);
1886  		cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1887  		tc_writel(cam_data, &tr->CAM_Data);
1888  	} else {
1889  		/* write whole word */
1890  		tc_writel(cam_index, &tr->CAM_Adr);
1891  		cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1892  		tc_writel(cam_data, &tr->CAM_Data);
1893  		/* read modify write */
1894  		tc_writel(cam_index + 4, &tr->CAM_Adr);
1895  		cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff;
1896  		cam_data |= addr[4] << 24 | (addr[5] << 16);
1897  		tc_writel(cam_data, &tr->CAM_Data);
1898  	}
1899  
1900  	tc_writel(saved_addr, &tr->CAM_Adr);
1901  }
1902  
1903  
1904  /*
1905   * Set or clear the multicast filter for this adaptor.
1906   * num_addrs == -1	Promiscuous mode, receive all packets
1907   * num_addrs == 0	Normal mode, clear multicast list
1908   * num_addrs > 0	Multicast mode, receive normal and MC packets,
1909   *			and do best-effort filtering.
1910   */
1911  static void
tc35815_set_multicast_list(struct net_device * dev)1912  tc35815_set_multicast_list(struct net_device *dev)
1913  {
1914  	struct tc35815_regs __iomem *tr =
1915  		(struct tc35815_regs __iomem *)dev->base_addr;
1916  
1917  	if (dev->flags & IFF_PROMISC) {
1918  		/* With some (all?) 100MHalf HUB, controller will hang
1919  		 * if we enabled promiscuous mode before linkup...
1920  		 */
1921  		struct tc35815_local *lp = netdev_priv(dev);
1922  
1923  		if (!lp->link)
1924  			return;
1925  		/* Enable promiscuous mode */
1926  		tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
1927  	} else if ((dev->flags & IFF_ALLMULTI) ||
1928  		  netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
1929  		/* CAM 0, 1, 20 are reserved. */
1930  		/* Disable promiscuous mode, use normal mode. */
1931  		tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
1932  	} else if (!netdev_mc_empty(dev)) {
1933  		struct netdev_hw_addr *ha;
1934  		int i;
1935  		int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
1936  
1937  		tc_writel(0, &tr->CAM_Ctl);
1938  		/* Walk the address list, and load the filter */
1939  		i = 0;
1940  		netdev_for_each_mc_addr(ha, dev) {
1941  			/* entry 0,1 is reserved. */
1942  			tc35815_set_cam_entry(dev, i + 2, ha->addr);
1943  			ena_bits |= CAM_Ena_Bit(i + 2);
1944  			i++;
1945  		}
1946  		tc_writel(ena_bits, &tr->CAM_Ena);
1947  		tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
1948  	} else {
1949  		tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
1950  		tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
1951  	}
1952  }
1953  
tc35815_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1954  static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1955  {
1956  	struct tc35815_local *lp = netdev_priv(dev);
1957  
1958  	strscpy(info->driver, MODNAME, sizeof(info->driver));
1959  	strscpy(info->version, DRV_VERSION, sizeof(info->version));
1960  	strscpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
1961  }
1962  
tc35815_get_msglevel(struct net_device * dev)1963  static u32 tc35815_get_msglevel(struct net_device *dev)
1964  {
1965  	struct tc35815_local *lp = netdev_priv(dev);
1966  	return lp->msg_enable;
1967  }
1968  
tc35815_set_msglevel(struct net_device * dev,u32 datum)1969  static void tc35815_set_msglevel(struct net_device *dev, u32 datum)
1970  {
1971  	struct tc35815_local *lp = netdev_priv(dev);
1972  	lp->msg_enable = datum;
1973  }
1974  
tc35815_get_sset_count(struct net_device * dev,int sset)1975  static int tc35815_get_sset_count(struct net_device *dev, int sset)
1976  {
1977  	struct tc35815_local *lp = netdev_priv(dev);
1978  
1979  	switch (sset) {
1980  	case ETH_SS_STATS:
1981  		return sizeof(lp->lstats) / sizeof(int);
1982  	default:
1983  		return -EOPNOTSUPP;
1984  	}
1985  }
1986  
tc35815_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1987  static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
1988  {
1989  	struct tc35815_local *lp = netdev_priv(dev);
1990  	data[0] = lp->lstats.max_tx_qlen;
1991  	data[1] = lp->lstats.tx_ints;
1992  	data[2] = lp->lstats.rx_ints;
1993  	data[3] = lp->lstats.tx_underrun;
1994  }
1995  
1996  static struct {
1997  	const char str[ETH_GSTRING_LEN];
1998  } ethtool_stats_keys[] = {
1999  	{ "max_tx_qlen" },
2000  	{ "tx_ints" },
2001  	{ "rx_ints" },
2002  	{ "tx_underrun" },
2003  };
2004  
tc35815_get_strings(struct net_device * dev,u32 stringset,u8 * data)2005  static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2006  {
2007  	memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
2008  }
2009  
2010  static const struct ethtool_ops tc35815_ethtool_ops = {
2011  	.get_drvinfo		= tc35815_get_drvinfo,
2012  	.get_link		= ethtool_op_get_link,
2013  	.get_msglevel		= tc35815_get_msglevel,
2014  	.set_msglevel		= tc35815_set_msglevel,
2015  	.get_strings		= tc35815_get_strings,
2016  	.get_sset_count		= tc35815_get_sset_count,
2017  	.get_ethtool_stats	= tc35815_get_ethtool_stats,
2018  	.get_link_ksettings = phy_ethtool_get_link_ksettings,
2019  	.set_link_ksettings = phy_ethtool_set_link_ksettings,
2020  };
2021  
tc35815_chip_reset(struct net_device * dev)2022  static void tc35815_chip_reset(struct net_device *dev)
2023  {
2024  	struct tc35815_regs __iomem *tr =
2025  		(struct tc35815_regs __iomem *)dev->base_addr;
2026  	int i;
2027  	/* reset the controller */
2028  	tc_writel(MAC_Reset, &tr->MAC_Ctl);
2029  	udelay(4); /* 3200ns */
2030  	i = 0;
2031  	while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) {
2032  		if (i++ > 100) {
2033  			printk(KERN_ERR "%s: MAC reset failed.\n", dev->name);
2034  			break;
2035  		}
2036  		mdelay(1);
2037  	}
2038  	tc_writel(0, &tr->MAC_Ctl);
2039  
2040  	/* initialize registers to default value */
2041  	tc_writel(0, &tr->DMA_Ctl);
2042  	tc_writel(0, &tr->TxThrsh);
2043  	tc_writel(0, &tr->TxPollCtr);
2044  	tc_writel(0, &tr->RxFragSize);
2045  	tc_writel(0, &tr->Int_En);
2046  	tc_writel(0, &tr->FDA_Bas);
2047  	tc_writel(0, &tr->FDA_Lim);
2048  	tc_writel(0xffffffff, &tr->Int_Src);	/* Write 1 to clear */
2049  	tc_writel(0, &tr->CAM_Ctl);
2050  	tc_writel(0, &tr->Tx_Ctl);
2051  	tc_writel(0, &tr->Rx_Ctl);
2052  	tc_writel(0, &tr->CAM_Ena);
2053  	(void)tc_readl(&tr->Miss_Cnt);	/* Read to clear */
2054  
2055  	/* initialize internal SRAM */
2056  	tc_writel(DMA_TestMode, &tr->DMA_Ctl);
2057  	for (i = 0; i < 0x1000; i += 4) {
2058  		tc_writel(i, &tr->CAM_Adr);
2059  		tc_writel(0, &tr->CAM_Data);
2060  	}
2061  	tc_writel(0, &tr->DMA_Ctl);
2062  }
2063  
tc35815_chip_init(struct net_device * dev)2064  static void tc35815_chip_init(struct net_device *dev)
2065  {
2066  	struct tc35815_local *lp = netdev_priv(dev);
2067  	struct tc35815_regs __iomem *tr =
2068  		(struct tc35815_regs __iomem *)dev->base_addr;
2069  	unsigned long txctl = TX_CTL_CMD;
2070  
2071  	/* load station address to CAM */
2072  	tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr);
2073  
2074  	/* Enable CAM (broadcast and unicast) */
2075  	tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
2076  	tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2077  
2078  	/* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */
2079  	if (HAVE_DMA_RXALIGN(lp))
2080  		tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
2081  	else
2082  		tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
2083  	tc_writel(0, &tr->TxPollCtr);	/* Batch mode */
2084  	tc_writel(TX_THRESHOLD, &tr->TxThrsh);
2085  	tc_writel(INT_EN_CMD, &tr->Int_En);
2086  
2087  	/* set queues */
2088  	tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas);
2089  	tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base,
2090  		  &tr->FDA_Lim);
2091  	/*
2092  	 * Activation method:
2093  	 * First, enable the MAC Transmitter and the DMA Receive circuits.
2094  	 * Then enable the DMA Transmitter and the MAC Receive circuits.
2095  	 */
2096  	tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr);	/* start DMA receiver */
2097  	tc_writel(RX_CTL_CMD, &tr->Rx_Ctl);	/* start MAC receiver */
2098  
2099  	/* start MAC transmitter */
2100  	/* TX4939 does not have EnLCarr */
2101  	if (lp->chiptype == TC35815_TX4939)
2102  		txctl &= ~Tx_EnLCarr;
2103  	/* WORKAROUND: ignore LostCrS in full duplex operation */
2104  	if (!dev->phydev || !lp->link || lp->duplex == DUPLEX_FULL)
2105  		txctl &= ~Tx_EnLCarr;
2106  	tc_writel(txctl, &tr->Tx_Ctl);
2107  }
2108  
2109  #ifdef CONFIG_PM
tc35815_suspend(struct pci_dev * pdev,pm_message_t state)2110  static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
2111  {
2112  	struct net_device *dev = pci_get_drvdata(pdev);
2113  	struct tc35815_local *lp = netdev_priv(dev);
2114  	unsigned long flags;
2115  
2116  	pci_save_state(pdev);
2117  	if (!netif_running(dev))
2118  		return 0;
2119  	netif_device_detach(dev);
2120  	if (dev->phydev)
2121  		phy_stop(dev->phydev);
2122  	spin_lock_irqsave(&lp->lock, flags);
2123  	tc35815_chip_reset(dev);
2124  	spin_unlock_irqrestore(&lp->lock, flags);
2125  	pci_set_power_state(pdev, PCI_D3hot);
2126  	return 0;
2127  }
2128  
tc35815_resume(struct pci_dev * pdev)2129  static int tc35815_resume(struct pci_dev *pdev)
2130  {
2131  	struct net_device *dev = pci_get_drvdata(pdev);
2132  
2133  	pci_restore_state(pdev);
2134  	if (!netif_running(dev))
2135  		return 0;
2136  	pci_set_power_state(pdev, PCI_D0);
2137  	tc35815_restart(dev);
2138  	netif_carrier_off(dev);
2139  	if (dev->phydev)
2140  		phy_start(dev->phydev);
2141  	netif_device_attach(dev);
2142  	return 0;
2143  }
2144  #endif /* CONFIG_PM */
2145  
2146  static struct pci_driver tc35815_pci_driver = {
2147  	.name		= MODNAME,
2148  	.id_table	= tc35815_pci_tbl,
2149  	.probe		= tc35815_init_one,
2150  	.remove		= tc35815_remove_one,
2151  #ifdef CONFIG_PM
2152  	.suspend	= tc35815_suspend,
2153  	.resume		= tc35815_resume,
2154  #endif
2155  };
2156  
2157  module_param_named(speed, options.speed, int, 0);
2158  MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
2159  module_param_named(duplex, options.duplex, int, 0);
2160  MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
2161  
2162  module_pci_driver(tc35815_pci_driver);
2163  MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
2164  MODULE_LICENSE("GPL");
2165