xref: /openbmc/linux/drivers/net/ethernet/packetengines/yellowfin.c (revision 9baa3c34ac4e27f7e062f266f50cc5dbea26a6c1)
1554f4ffdSJeff Kirsher /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2554f4ffdSJeff Kirsher /*
3554f4ffdSJeff Kirsher 	Written 1997-2001 by Donald Becker.
4554f4ffdSJeff Kirsher 
5554f4ffdSJeff Kirsher 	This software may be used and distributed according to the terms of
6554f4ffdSJeff Kirsher 	the GNU General Public License (GPL), incorporated herein by reference.
7554f4ffdSJeff Kirsher 	Drivers based on or derived from this code fall under the GPL and must
8554f4ffdSJeff Kirsher 	retain the authorship, copyright and license notice.  This file is not
9554f4ffdSJeff Kirsher 	a complete program and may only be used when the entire operating
10554f4ffdSJeff Kirsher 	system is licensed under the GPL.
11554f4ffdSJeff Kirsher 
12554f4ffdSJeff Kirsher 	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
13554f4ffdSJeff Kirsher 	It also supports the Symbios Logic version of the same chip core.
14554f4ffdSJeff Kirsher 
15554f4ffdSJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
16554f4ffdSJeff Kirsher 	Scyld Computing Corporation
17554f4ffdSJeff Kirsher 	410 Severn Ave., Suite 210
18554f4ffdSJeff Kirsher 	Annapolis MD 21403
19554f4ffdSJeff Kirsher 
20554f4ffdSJeff Kirsher 	Support and updates available at
21554f4ffdSJeff Kirsher 	http://www.scyld.com/network/yellowfin.html
22554f4ffdSJeff Kirsher 	[link no longer provides useful info -jgarzik]
23554f4ffdSJeff Kirsher 
24554f4ffdSJeff Kirsher */
25554f4ffdSJeff Kirsher 
26554f4ffdSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27554f4ffdSJeff Kirsher 
28554f4ffdSJeff Kirsher #define DRV_NAME	"yellowfin"
29554f4ffdSJeff Kirsher #define DRV_VERSION	"2.1"
30554f4ffdSJeff Kirsher #define DRV_RELDATE	"Sep 11, 2006"
31554f4ffdSJeff Kirsher 
32554f4ffdSJeff Kirsher /* The user-configurable values.
33554f4ffdSJeff Kirsher    These may be modified when a driver module is loaded.*/
34554f4ffdSJeff Kirsher 
35554f4ffdSJeff Kirsher static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36554f4ffdSJeff Kirsher /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
37554f4ffdSJeff Kirsher static int max_interrupt_work = 20;
38554f4ffdSJeff Kirsher static int mtu;
39554f4ffdSJeff Kirsher #ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
40554f4ffdSJeff Kirsher /* System-wide count of bogus-rx frames. */
41554f4ffdSJeff Kirsher static int bogus_rx;
42554f4ffdSJeff Kirsher static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
43554f4ffdSJeff Kirsher static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
44554f4ffdSJeff Kirsher #elif defined(YF_NEW)					/* A future perfect board :->.  */
45554f4ffdSJeff Kirsher static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
46554f4ffdSJeff Kirsher static int fifo_cfg = 0x0028;
47554f4ffdSJeff Kirsher #else
48554f4ffdSJeff Kirsher static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
49554f4ffdSJeff Kirsher static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
50554f4ffdSJeff Kirsher #endif
51554f4ffdSJeff Kirsher 
52554f4ffdSJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
53554f4ffdSJeff Kirsher    Setting to > 1514 effectively disables this feature. */
54554f4ffdSJeff Kirsher static int rx_copybreak;
55554f4ffdSJeff Kirsher 
56554f4ffdSJeff Kirsher /* Used to pass the media type, etc.
57554f4ffdSJeff Kirsher    No media types are currently defined.  These exist for driver
58554f4ffdSJeff Kirsher    interoperability.
59554f4ffdSJeff Kirsher */
60554f4ffdSJeff Kirsher #define MAX_UNITS 8				/* More are supported, limit only on options */
61554f4ffdSJeff Kirsher static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
62554f4ffdSJeff Kirsher static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
63554f4ffdSJeff Kirsher 
64554f4ffdSJeff Kirsher /* Do ugly workaround for GX server chipset errata. */
65554f4ffdSJeff Kirsher static int gx_fix;
66554f4ffdSJeff Kirsher 
67554f4ffdSJeff Kirsher /* Operational parameters that are set at compile time. */
68554f4ffdSJeff Kirsher 
69554f4ffdSJeff Kirsher /* Keep the ring sizes a power of two for efficiency.
70554f4ffdSJeff Kirsher    Making the Tx ring too long decreases the effectiveness of channel
71554f4ffdSJeff Kirsher    bonding and packet priority.
72554f4ffdSJeff Kirsher    There are no ill effects from too-large receive rings. */
73554f4ffdSJeff Kirsher #define TX_RING_SIZE	16
74554f4ffdSJeff Kirsher #define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
75554f4ffdSJeff Kirsher #define RX_RING_SIZE	64
76554f4ffdSJeff Kirsher #define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
77554f4ffdSJeff Kirsher #define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
78554f4ffdSJeff Kirsher #define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
79554f4ffdSJeff Kirsher 
80554f4ffdSJeff Kirsher /* Operational parameters that usually are not changed. */
81554f4ffdSJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
82554f4ffdSJeff Kirsher #define TX_TIMEOUT  (2*HZ)
83554f4ffdSJeff Kirsher #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
84554f4ffdSJeff Kirsher 
85554f4ffdSJeff Kirsher #define yellowfin_debug debug
86554f4ffdSJeff Kirsher 
87554f4ffdSJeff Kirsher #include <linux/module.h>
88554f4ffdSJeff Kirsher #include <linux/kernel.h>
89554f4ffdSJeff Kirsher #include <linux/string.h>
90554f4ffdSJeff Kirsher #include <linux/timer.h>
91554f4ffdSJeff Kirsher #include <linux/errno.h>
92554f4ffdSJeff Kirsher #include <linux/ioport.h>
93554f4ffdSJeff Kirsher #include <linux/interrupt.h>
94554f4ffdSJeff Kirsher #include <linux/pci.h>
95554f4ffdSJeff Kirsher #include <linux/init.h>
96554f4ffdSJeff Kirsher #include <linux/mii.h>
97554f4ffdSJeff Kirsher #include <linux/netdevice.h>
98554f4ffdSJeff Kirsher #include <linux/etherdevice.h>
99554f4ffdSJeff Kirsher #include <linux/skbuff.h>
100554f4ffdSJeff Kirsher #include <linux/ethtool.h>
101554f4ffdSJeff Kirsher #include <linux/crc32.h>
102554f4ffdSJeff Kirsher #include <linux/bitops.h>
103554f4ffdSJeff Kirsher #include <asm/uaccess.h>
104554f4ffdSJeff Kirsher #include <asm/processor.h>		/* Processor type for cache alignment. */
105554f4ffdSJeff Kirsher #include <asm/unaligned.h>
106554f4ffdSJeff Kirsher #include <asm/io.h>
107554f4ffdSJeff Kirsher 
108554f4ffdSJeff Kirsher /* These identify the driver base version and may not be removed. */
109134c1f15SBill Pemberton static const char version[] =
110554f4ffdSJeff Kirsher   KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
111554f4ffdSJeff Kirsher   "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
112554f4ffdSJeff Kirsher 
113554f4ffdSJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
114554f4ffdSJeff Kirsher MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
115554f4ffdSJeff Kirsher MODULE_LICENSE("GPL");
116554f4ffdSJeff Kirsher 
117554f4ffdSJeff Kirsher module_param(max_interrupt_work, int, 0);
118554f4ffdSJeff Kirsher module_param(mtu, int, 0);
119554f4ffdSJeff Kirsher module_param(debug, int, 0);
120554f4ffdSJeff Kirsher module_param(rx_copybreak, int, 0);
121554f4ffdSJeff Kirsher module_param_array(options, int, NULL, 0);
122554f4ffdSJeff Kirsher module_param_array(full_duplex, int, NULL, 0);
123554f4ffdSJeff Kirsher module_param(gx_fix, int, 0);
124554f4ffdSJeff Kirsher MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
125554f4ffdSJeff Kirsher MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
126554f4ffdSJeff Kirsher MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
127554f4ffdSJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
128554f4ffdSJeff Kirsher MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
129554f4ffdSJeff Kirsher MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
130554f4ffdSJeff Kirsher MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
131554f4ffdSJeff Kirsher 
132554f4ffdSJeff Kirsher /*
133554f4ffdSJeff Kirsher 				Theory of Operation
134554f4ffdSJeff Kirsher 
135554f4ffdSJeff Kirsher I. Board Compatibility
136554f4ffdSJeff Kirsher 
137554f4ffdSJeff Kirsher This device driver is designed for the Packet Engines "Yellowfin" Gigabit
138554f4ffdSJeff Kirsher Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
139554f4ffdSJeff Kirsher Symbios 53C885E dual function chip.
140554f4ffdSJeff Kirsher 
141554f4ffdSJeff Kirsher II. Board-specific settings
142554f4ffdSJeff Kirsher 
143554f4ffdSJeff Kirsher PCI bus devices are configured by the system at boot time, so no jumpers
144554f4ffdSJeff Kirsher need to be set on the board.  The system BIOS preferably should assign the
145554f4ffdSJeff Kirsher PCI INTA signal to an otherwise unused system IRQ line.
146554f4ffdSJeff Kirsher Note: Kernel versions earlier than 1.3.73 do not support shared PCI
147554f4ffdSJeff Kirsher interrupt lines.
148554f4ffdSJeff Kirsher 
149554f4ffdSJeff Kirsher III. Driver operation
150554f4ffdSJeff Kirsher 
151554f4ffdSJeff Kirsher IIIa. Ring buffers
152554f4ffdSJeff Kirsher 
153554f4ffdSJeff Kirsher The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
154554f4ffdSJeff Kirsher This is a descriptor list scheme similar to that used by the EEPro100 and
155554f4ffdSJeff Kirsher Tulip.  This driver uses two statically allocated fixed-size descriptor lists
156554f4ffdSJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
157554f4ffdSJeff Kirsher the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
158554f4ffdSJeff Kirsher 
159554f4ffdSJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
160554f4ffdSJeff Kirsher open() time and passes the skb->data field to the Yellowfin as receive data
161554f4ffdSJeff Kirsher buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
162554f4ffdSJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
163554f4ffdSJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
164554f4ffdSJeff Kirsher protocol stack and replaced by a newly allocated skbuff.
165554f4ffdSJeff Kirsher 
166554f4ffdSJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
167554f4ffdSJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
168554f4ffdSJeff Kirsher frames.  For small frames the copying cost is negligible (esp. considering
169554f4ffdSJeff Kirsher that we are pre-loading the cache with immediately useful header
170554f4ffdSJeff Kirsher information).  For large frames the copying cost is non-trivial, and the
171554f4ffdSJeff Kirsher larger copy might flush the cache of useful data.
172554f4ffdSJeff Kirsher 
173554f4ffdSJeff Kirsher IIIC. Synchronization
174554f4ffdSJeff Kirsher 
175554f4ffdSJeff Kirsher The driver runs as two independent, single-threaded flows of control.  One
176554f4ffdSJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
177554f4ffdSJeff Kirsher dev->tbusy flag.  The other thread is the interrupt handler, which is single
178554f4ffdSJeff Kirsher threaded by the hardware and other software.
179554f4ffdSJeff Kirsher 
180554f4ffdSJeff Kirsher The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181554f4ffdSJeff Kirsher flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182554f4ffdSJeff Kirsher queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183554f4ffdSJeff Kirsher the 'yp->tx_full' flag.
184554f4ffdSJeff Kirsher 
185554f4ffdSJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
186554f4ffdSJeff Kirsher from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
187554f4ffdSJeff Kirsher empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
188554f4ffdSJeff Kirsher clears both the tx_full and tbusy flags.
189554f4ffdSJeff Kirsher 
190554f4ffdSJeff Kirsher IV. Notes
191554f4ffdSJeff Kirsher 
192554f4ffdSJeff Kirsher Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
193554f4ffdSJeff Kirsher Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
194554f4ffdSJeff Kirsher and an AlphaStation to verifty the Alpha port!
195554f4ffdSJeff Kirsher 
196554f4ffdSJeff Kirsher IVb. References
197554f4ffdSJeff Kirsher 
198554f4ffdSJeff Kirsher Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
199554f4ffdSJeff Kirsher Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
200554f4ffdSJeff Kirsher    Data Manual v3.0
201554f4ffdSJeff Kirsher http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
202554f4ffdSJeff Kirsher http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
203554f4ffdSJeff Kirsher 
204554f4ffdSJeff Kirsher IVc. Errata
205554f4ffdSJeff Kirsher 
206554f4ffdSJeff Kirsher See Packet Engines confidential appendix (prototype chips only).
207554f4ffdSJeff Kirsher */
208554f4ffdSJeff Kirsher 
209554f4ffdSJeff Kirsher 
210554f4ffdSJeff Kirsher 
211554f4ffdSJeff Kirsher enum capability_flags {
212554f4ffdSJeff Kirsher 	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
213554f4ffdSJeff Kirsher 	HasMACAddrBug=32, /* Only on early revs.  */
214554f4ffdSJeff Kirsher 	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
215554f4ffdSJeff Kirsher };
216554f4ffdSJeff Kirsher 
217554f4ffdSJeff Kirsher /* The PCI I/O space extent. */
218554f4ffdSJeff Kirsher enum {
219554f4ffdSJeff Kirsher 	YELLOWFIN_SIZE	= 0x100,
220554f4ffdSJeff Kirsher };
221554f4ffdSJeff Kirsher 
222554f4ffdSJeff Kirsher struct pci_id_info {
223554f4ffdSJeff Kirsher         const char *name;
224554f4ffdSJeff Kirsher         struct match_info {
225554f4ffdSJeff Kirsher                 int     pci, pci_mask, subsystem, subsystem_mask;
226554f4ffdSJeff Kirsher                 int revision, revision_mask;                            /* Only 8 bits. */
227554f4ffdSJeff Kirsher         } id;
228554f4ffdSJeff Kirsher         int drv_flags;                          /* Driver use, intended as capability flags. */
229554f4ffdSJeff Kirsher };
230554f4ffdSJeff Kirsher 
231554f4ffdSJeff Kirsher static const struct pci_id_info pci_id_tbl[] = {
232554f4ffdSJeff Kirsher 	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
233554f4ffdSJeff Kirsher 	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
234554f4ffdSJeff Kirsher 	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
235554f4ffdSJeff Kirsher 	  HasMII | DontUseEeprom },
236554f4ffdSJeff Kirsher 	{ }
237554f4ffdSJeff Kirsher };
238554f4ffdSJeff Kirsher 
239*9baa3c34SBenoit Taine static const struct pci_device_id yellowfin_pci_tbl[] = {
240554f4ffdSJeff Kirsher 	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
241554f4ffdSJeff Kirsher 	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
242554f4ffdSJeff Kirsher 	{ }
243554f4ffdSJeff Kirsher };
244554f4ffdSJeff Kirsher MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
245554f4ffdSJeff Kirsher 
246554f4ffdSJeff Kirsher 
247554f4ffdSJeff Kirsher /* Offsets to the Yellowfin registers.  Various sizes and alignments. */
248554f4ffdSJeff Kirsher enum yellowfin_offsets {
249554f4ffdSJeff Kirsher 	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
250554f4ffdSJeff Kirsher 	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
251554f4ffdSJeff Kirsher 	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
252554f4ffdSJeff Kirsher 	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
253554f4ffdSJeff Kirsher 	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
254554f4ffdSJeff Kirsher 	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
255554f4ffdSJeff Kirsher 	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
256554f4ffdSJeff Kirsher 	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
257554f4ffdSJeff Kirsher 	MII_Status=0xAE,
258554f4ffdSJeff Kirsher 	RxDepth=0xB8, FlowCtrl=0xBC,
259554f4ffdSJeff Kirsher 	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
260554f4ffdSJeff Kirsher 	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
261554f4ffdSJeff Kirsher 	EEFeature=0xF5,
262554f4ffdSJeff Kirsher };
263554f4ffdSJeff Kirsher 
264554f4ffdSJeff Kirsher /* The Yellowfin Rx and Tx buffer descriptors.
265554f4ffdSJeff Kirsher    Elements are written as 32 bit for endian portability. */
266554f4ffdSJeff Kirsher struct yellowfin_desc {
267554f4ffdSJeff Kirsher 	__le32 dbdma_cmd;
268554f4ffdSJeff Kirsher 	__le32 addr;
269554f4ffdSJeff Kirsher 	__le32 branch_addr;
270554f4ffdSJeff Kirsher 	__le32 result_status;
271554f4ffdSJeff Kirsher };
272554f4ffdSJeff Kirsher 
273554f4ffdSJeff Kirsher struct tx_status_words {
274554f4ffdSJeff Kirsher #ifdef __BIG_ENDIAN
275554f4ffdSJeff Kirsher 	u16 tx_errs;
276554f4ffdSJeff Kirsher 	u16 tx_cnt;
277554f4ffdSJeff Kirsher 	u16 paused;
278554f4ffdSJeff Kirsher 	u16 total_tx_cnt;
279554f4ffdSJeff Kirsher #else  /* Little endian chips. */
280554f4ffdSJeff Kirsher 	u16 tx_cnt;
281554f4ffdSJeff Kirsher 	u16 tx_errs;
282554f4ffdSJeff Kirsher 	u16 total_tx_cnt;
283554f4ffdSJeff Kirsher 	u16 paused;
284554f4ffdSJeff Kirsher #endif /* __BIG_ENDIAN */
285554f4ffdSJeff Kirsher };
286554f4ffdSJeff Kirsher 
287554f4ffdSJeff Kirsher /* Bits in yellowfin_desc.cmd */
288554f4ffdSJeff Kirsher enum desc_cmd_bits {
289554f4ffdSJeff Kirsher 	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
290554f4ffdSJeff Kirsher 	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
291554f4ffdSJeff Kirsher 	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
292554f4ffdSJeff Kirsher 	BRANCH_IFTRUE=0x040000,
293554f4ffdSJeff Kirsher };
294554f4ffdSJeff Kirsher 
295554f4ffdSJeff Kirsher /* Bits in yellowfin_desc.status */
296554f4ffdSJeff Kirsher enum desc_status_bits { RX_EOP=0x0040, };
297554f4ffdSJeff Kirsher 
298554f4ffdSJeff Kirsher /* Bits in the interrupt status/mask registers. */
299554f4ffdSJeff Kirsher enum intr_status_bits {
300554f4ffdSJeff Kirsher 	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
301554f4ffdSJeff Kirsher 	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
302554f4ffdSJeff Kirsher 	IntrEarlyRx=0x100, IntrWakeup=0x200, };
303554f4ffdSJeff Kirsher 
304554f4ffdSJeff Kirsher #define PRIV_ALIGN	31 	/* Required alignment mask */
305554f4ffdSJeff Kirsher #define MII_CNT		4
306554f4ffdSJeff Kirsher struct yellowfin_private {
307554f4ffdSJeff Kirsher 	/* Descriptor rings first for alignment.
308554f4ffdSJeff Kirsher 	   Tx requires a second descriptor for status. */
309554f4ffdSJeff Kirsher 	struct yellowfin_desc *rx_ring;
310554f4ffdSJeff Kirsher 	struct yellowfin_desc *tx_ring;
311554f4ffdSJeff Kirsher 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
312554f4ffdSJeff Kirsher 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
313554f4ffdSJeff Kirsher 	dma_addr_t rx_ring_dma;
314554f4ffdSJeff Kirsher 	dma_addr_t tx_ring_dma;
315554f4ffdSJeff Kirsher 
316554f4ffdSJeff Kirsher 	struct tx_status_words *tx_status;
317554f4ffdSJeff Kirsher 	dma_addr_t tx_status_dma;
318554f4ffdSJeff Kirsher 
319554f4ffdSJeff Kirsher 	struct timer_list timer;	/* Media selection timer. */
320554f4ffdSJeff Kirsher 	/* Frequently used and paired value: keep adjacent for cache effect. */
321554f4ffdSJeff Kirsher 	int chip_id, drv_flags;
322554f4ffdSJeff Kirsher 	struct pci_dev *pci_dev;
323554f4ffdSJeff Kirsher 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
324554f4ffdSJeff Kirsher 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
325554f4ffdSJeff Kirsher 	struct tx_status_words *tx_tail_desc;
326554f4ffdSJeff Kirsher 	unsigned int cur_tx, dirty_tx;
327554f4ffdSJeff Kirsher 	int tx_threshold;
328554f4ffdSJeff Kirsher 	unsigned int tx_full:1;				/* The Tx queue is full. */
329554f4ffdSJeff Kirsher 	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
330554f4ffdSJeff Kirsher 	unsigned int duplex_lock:1;
331554f4ffdSJeff Kirsher 	unsigned int medialock:1;			/* Do not sense media. */
332554f4ffdSJeff Kirsher 	unsigned int default_port:4;		/* Last dev->if_port value. */
333554f4ffdSJeff Kirsher 	/* MII transceiver section. */
334554f4ffdSJeff Kirsher 	int mii_cnt;						/* MII device addresses. */
335554f4ffdSJeff Kirsher 	u16 advertising;					/* NWay media advertisement */
336554f4ffdSJeff Kirsher 	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
337554f4ffdSJeff Kirsher 	spinlock_t lock;
338554f4ffdSJeff Kirsher 	void __iomem *base;
339554f4ffdSJeff Kirsher };
340554f4ffdSJeff Kirsher 
341554f4ffdSJeff Kirsher static int read_eeprom(void __iomem *ioaddr, int location);
342554f4ffdSJeff Kirsher static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
343554f4ffdSJeff Kirsher static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
344554f4ffdSJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345554f4ffdSJeff Kirsher static int yellowfin_open(struct net_device *dev);
346554f4ffdSJeff Kirsher static void yellowfin_timer(unsigned long data);
347554f4ffdSJeff Kirsher static void yellowfin_tx_timeout(struct net_device *dev);
348554f4ffdSJeff Kirsher static int yellowfin_init_ring(struct net_device *dev);
349554f4ffdSJeff Kirsher static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
350554f4ffdSJeff Kirsher 					struct net_device *dev);
351554f4ffdSJeff Kirsher static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
352554f4ffdSJeff Kirsher static int yellowfin_rx(struct net_device *dev);
353554f4ffdSJeff Kirsher static void yellowfin_error(struct net_device *dev, int intr_status);
354554f4ffdSJeff Kirsher static int yellowfin_close(struct net_device *dev);
355554f4ffdSJeff Kirsher static void set_rx_mode(struct net_device *dev);
356554f4ffdSJeff Kirsher static const struct ethtool_ops ethtool_ops;
357554f4ffdSJeff Kirsher 
358554f4ffdSJeff Kirsher static const struct net_device_ops netdev_ops = {
359554f4ffdSJeff Kirsher 	.ndo_open 		= yellowfin_open,
360554f4ffdSJeff Kirsher 	.ndo_stop 		= yellowfin_close,
361554f4ffdSJeff Kirsher 	.ndo_start_xmit 	= yellowfin_start_xmit,
362afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= set_rx_mode,
363554f4ffdSJeff Kirsher 	.ndo_change_mtu		= eth_change_mtu,
364554f4ffdSJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
365554f4ffdSJeff Kirsher 	.ndo_set_mac_address 	= eth_mac_addr,
366554f4ffdSJeff Kirsher 	.ndo_do_ioctl 		= netdev_ioctl,
367554f4ffdSJeff Kirsher 	.ndo_tx_timeout 	= yellowfin_tx_timeout,
368554f4ffdSJeff Kirsher };
369554f4ffdSJeff Kirsher 
370134c1f15SBill Pemberton static int yellowfin_init_one(struct pci_dev *pdev,
371554f4ffdSJeff Kirsher 			      const struct pci_device_id *ent)
372554f4ffdSJeff Kirsher {
373554f4ffdSJeff Kirsher 	struct net_device *dev;
374554f4ffdSJeff Kirsher 	struct yellowfin_private *np;
375554f4ffdSJeff Kirsher 	int irq;
376554f4ffdSJeff Kirsher 	int chip_idx = ent->driver_data;
377554f4ffdSJeff Kirsher 	static int find_cnt;
378554f4ffdSJeff Kirsher 	void __iomem *ioaddr;
379554f4ffdSJeff Kirsher 	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
380554f4ffdSJeff Kirsher 	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
381554f4ffdSJeff Kirsher         void *ring_space;
382554f4ffdSJeff Kirsher         dma_addr_t ring_dma;
383554f4ffdSJeff Kirsher #ifdef USE_IO_OPS
384554f4ffdSJeff Kirsher 	int bar = 0;
385554f4ffdSJeff Kirsher #else
386554f4ffdSJeff Kirsher 	int bar = 1;
387554f4ffdSJeff Kirsher #endif
388554f4ffdSJeff Kirsher 
389554f4ffdSJeff Kirsher /* when built into the kernel, we only print version if device is found */
390554f4ffdSJeff Kirsher #ifndef MODULE
391554f4ffdSJeff Kirsher 	static int printed_version;
392554f4ffdSJeff Kirsher 	if (!printed_version++)
393554f4ffdSJeff Kirsher 		printk(version);
394554f4ffdSJeff Kirsher #endif
395554f4ffdSJeff Kirsher 
396554f4ffdSJeff Kirsher 	i = pci_enable_device(pdev);
397554f4ffdSJeff Kirsher 	if (i) return i;
398554f4ffdSJeff Kirsher 
399554f4ffdSJeff Kirsher 	dev = alloc_etherdev(sizeof(*np));
40041de8d4cSJoe Perches 	if (!dev)
401554f4ffdSJeff Kirsher 		return -ENOMEM;
40241de8d4cSJoe Perches 
403554f4ffdSJeff Kirsher 	SET_NETDEV_DEV(dev, &pdev->dev);
404554f4ffdSJeff Kirsher 
405554f4ffdSJeff Kirsher 	np = netdev_priv(dev);
406554f4ffdSJeff Kirsher 
407554f4ffdSJeff Kirsher 	if (pci_request_regions(pdev, DRV_NAME))
408554f4ffdSJeff Kirsher 		goto err_out_free_netdev;
409554f4ffdSJeff Kirsher 
410554f4ffdSJeff Kirsher 	pci_set_master (pdev);
411554f4ffdSJeff Kirsher 
412554f4ffdSJeff Kirsher 	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
413554f4ffdSJeff Kirsher 	if (!ioaddr)
414554f4ffdSJeff Kirsher 		goto err_out_free_res;
415554f4ffdSJeff Kirsher 
416554f4ffdSJeff Kirsher 	irq = pdev->irq;
417554f4ffdSJeff Kirsher 
418554f4ffdSJeff Kirsher 	if (drv_flags & DontUseEeprom)
419554f4ffdSJeff Kirsher 		for (i = 0; i < 6; i++)
420554f4ffdSJeff Kirsher 			dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
421554f4ffdSJeff Kirsher 	else {
422554f4ffdSJeff Kirsher 		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
423554f4ffdSJeff Kirsher 		for (i = 0; i < 6; i++)
424554f4ffdSJeff Kirsher 			dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
425554f4ffdSJeff Kirsher 	}
426554f4ffdSJeff Kirsher 
427554f4ffdSJeff Kirsher 	/* Reset the chip. */
428554f4ffdSJeff Kirsher 	iowrite32(0x80000000, ioaddr + DMACtrl);
429554f4ffdSJeff Kirsher 
430554f4ffdSJeff Kirsher 	pci_set_drvdata(pdev, dev);
431554f4ffdSJeff Kirsher 	spin_lock_init(&np->lock);
432554f4ffdSJeff Kirsher 
433554f4ffdSJeff Kirsher 	np->pci_dev = pdev;
434554f4ffdSJeff Kirsher 	np->chip_id = chip_idx;
435554f4ffdSJeff Kirsher 	np->drv_flags = drv_flags;
436554f4ffdSJeff Kirsher 	np->base = ioaddr;
437554f4ffdSJeff Kirsher 
438554f4ffdSJeff Kirsher 	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
439554f4ffdSJeff Kirsher 	if (!ring_space)
440554f4ffdSJeff Kirsher 		goto err_out_cleardev;
441554f4ffdSJeff Kirsher 	np->tx_ring = ring_space;
442554f4ffdSJeff Kirsher 	np->tx_ring_dma = ring_dma;
443554f4ffdSJeff Kirsher 
444554f4ffdSJeff Kirsher 	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
445554f4ffdSJeff Kirsher 	if (!ring_space)
446554f4ffdSJeff Kirsher 		goto err_out_unmap_tx;
447554f4ffdSJeff Kirsher 	np->rx_ring = ring_space;
448554f4ffdSJeff Kirsher 	np->rx_ring_dma = ring_dma;
449554f4ffdSJeff Kirsher 
450554f4ffdSJeff Kirsher 	ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
451554f4ffdSJeff Kirsher 	if (!ring_space)
452554f4ffdSJeff Kirsher 		goto err_out_unmap_rx;
453554f4ffdSJeff Kirsher 	np->tx_status = ring_space;
454554f4ffdSJeff Kirsher 	np->tx_status_dma = ring_dma;
455554f4ffdSJeff Kirsher 
456554f4ffdSJeff Kirsher 	if (dev->mem_start)
457554f4ffdSJeff Kirsher 		option = dev->mem_start;
458554f4ffdSJeff Kirsher 
459554f4ffdSJeff Kirsher 	/* The lower four bits are the media type. */
460554f4ffdSJeff Kirsher 	if (option > 0) {
461554f4ffdSJeff Kirsher 		if (option & 0x200)
462554f4ffdSJeff Kirsher 			np->full_duplex = 1;
463554f4ffdSJeff Kirsher 		np->default_port = option & 15;
464554f4ffdSJeff Kirsher 		if (np->default_port)
465554f4ffdSJeff Kirsher 			np->medialock = 1;
466554f4ffdSJeff Kirsher 	}
467554f4ffdSJeff Kirsher 	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
468554f4ffdSJeff Kirsher 		np->full_duplex = 1;
469554f4ffdSJeff Kirsher 
470554f4ffdSJeff Kirsher 	if (np->full_duplex)
471554f4ffdSJeff Kirsher 		np->duplex_lock = 1;
472554f4ffdSJeff Kirsher 
473554f4ffdSJeff Kirsher 	/* The Yellowfin-specific entries in the device structure. */
474554f4ffdSJeff Kirsher 	dev->netdev_ops = &netdev_ops;
4757ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &ethtool_ops;
476554f4ffdSJeff Kirsher 	dev->watchdog_timeo = TX_TIMEOUT;
477554f4ffdSJeff Kirsher 
478554f4ffdSJeff Kirsher 	if (mtu)
479554f4ffdSJeff Kirsher 		dev->mtu = mtu;
480554f4ffdSJeff Kirsher 
481554f4ffdSJeff Kirsher 	i = register_netdev(dev);
482554f4ffdSJeff Kirsher 	if (i)
483554f4ffdSJeff Kirsher 		goto err_out_unmap_status;
484554f4ffdSJeff Kirsher 
485554f4ffdSJeff Kirsher 	netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
486554f4ffdSJeff Kirsher 		    pci_id_tbl[chip_idx].name,
487554f4ffdSJeff Kirsher 		    ioread32(ioaddr + ChipRev), ioaddr,
488554f4ffdSJeff Kirsher 		    dev->dev_addr, irq);
489554f4ffdSJeff Kirsher 
490554f4ffdSJeff Kirsher 	if (np->drv_flags & HasMII) {
491554f4ffdSJeff Kirsher 		int phy, phy_idx = 0;
492554f4ffdSJeff Kirsher 		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
493554f4ffdSJeff Kirsher 			int mii_status = mdio_read(ioaddr, phy, 1);
494554f4ffdSJeff Kirsher 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
495554f4ffdSJeff Kirsher 				np->phys[phy_idx++] = phy;
496554f4ffdSJeff Kirsher 				np->advertising = mdio_read(ioaddr, phy, 4);
497554f4ffdSJeff Kirsher 				netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
498554f4ffdSJeff Kirsher 					    phy, mii_status, np->advertising);
499554f4ffdSJeff Kirsher 			}
500554f4ffdSJeff Kirsher 		}
501554f4ffdSJeff Kirsher 		np->mii_cnt = phy_idx;
502554f4ffdSJeff Kirsher 	}
503554f4ffdSJeff Kirsher 
504554f4ffdSJeff Kirsher 	find_cnt++;
505554f4ffdSJeff Kirsher 
506554f4ffdSJeff Kirsher 	return 0;
507554f4ffdSJeff Kirsher 
508554f4ffdSJeff Kirsher err_out_unmap_status:
509554f4ffdSJeff Kirsher         pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
510554f4ffdSJeff Kirsher 		np->tx_status_dma);
511554f4ffdSJeff Kirsher err_out_unmap_rx:
512554f4ffdSJeff Kirsher         pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
513554f4ffdSJeff Kirsher err_out_unmap_tx:
514554f4ffdSJeff Kirsher         pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
515554f4ffdSJeff Kirsher err_out_cleardev:
516554f4ffdSJeff Kirsher 	pci_iounmap(pdev, ioaddr);
517554f4ffdSJeff Kirsher err_out_free_res:
518554f4ffdSJeff Kirsher 	pci_release_regions(pdev);
519554f4ffdSJeff Kirsher err_out_free_netdev:
520554f4ffdSJeff Kirsher 	free_netdev (dev);
521554f4ffdSJeff Kirsher 	return -ENODEV;
522554f4ffdSJeff Kirsher }
523554f4ffdSJeff Kirsher 
524134c1f15SBill Pemberton static int read_eeprom(void __iomem *ioaddr, int location)
525554f4ffdSJeff Kirsher {
526554f4ffdSJeff Kirsher 	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
527554f4ffdSJeff Kirsher 
528554f4ffdSJeff Kirsher 	iowrite8(location, ioaddr + EEAddr);
529554f4ffdSJeff Kirsher 	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
530554f4ffdSJeff Kirsher 	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
531554f4ffdSJeff Kirsher 		;
532554f4ffdSJeff Kirsher 	return ioread8(ioaddr + EERead);
533554f4ffdSJeff Kirsher }
534554f4ffdSJeff Kirsher 
535554f4ffdSJeff Kirsher /* MII Managemen Data I/O accesses.
536554f4ffdSJeff Kirsher    These routines assume the MDIO controller is idle, and do not exit until
537554f4ffdSJeff Kirsher    the command is finished. */
538554f4ffdSJeff Kirsher 
539554f4ffdSJeff Kirsher static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
540554f4ffdSJeff Kirsher {
541554f4ffdSJeff Kirsher 	int i;
542554f4ffdSJeff Kirsher 
543554f4ffdSJeff Kirsher 	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
544554f4ffdSJeff Kirsher 	iowrite16(1, ioaddr + MII_Cmd);
545554f4ffdSJeff Kirsher 	for (i = 10000; i >= 0; i--)
546554f4ffdSJeff Kirsher 		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
547554f4ffdSJeff Kirsher 			break;
548554f4ffdSJeff Kirsher 	return ioread16(ioaddr + MII_Rd_Data);
549554f4ffdSJeff Kirsher }
550554f4ffdSJeff Kirsher 
551554f4ffdSJeff Kirsher static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
552554f4ffdSJeff Kirsher {
553554f4ffdSJeff Kirsher 	int i;
554554f4ffdSJeff Kirsher 
555554f4ffdSJeff Kirsher 	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
556554f4ffdSJeff Kirsher 	iowrite16(value, ioaddr + MII_Wr_Data);
557554f4ffdSJeff Kirsher 
558554f4ffdSJeff Kirsher 	/* Wait for the command to finish. */
559554f4ffdSJeff Kirsher 	for (i = 10000; i >= 0; i--)
560554f4ffdSJeff Kirsher 		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
561554f4ffdSJeff Kirsher 			break;
562554f4ffdSJeff Kirsher }
563554f4ffdSJeff Kirsher 
564554f4ffdSJeff Kirsher 
565554f4ffdSJeff Kirsher static int yellowfin_open(struct net_device *dev)
566554f4ffdSJeff Kirsher {
567554f4ffdSJeff Kirsher 	struct yellowfin_private *yp = netdev_priv(dev);
5680c18acc1SFrancois Romieu 	const int irq = yp->pci_dev->irq;
569554f4ffdSJeff Kirsher 	void __iomem *ioaddr = yp->base;
5700c18acc1SFrancois Romieu 	int i, rc;
571554f4ffdSJeff Kirsher 
572554f4ffdSJeff Kirsher 	/* Reset the chip. */
573554f4ffdSJeff Kirsher 	iowrite32(0x80000000, ioaddr + DMACtrl);
574554f4ffdSJeff Kirsher 
5750c18acc1SFrancois Romieu 	rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
5760c18acc1SFrancois Romieu 	if (rc)
5770c18acc1SFrancois Romieu 		return rc;
578554f4ffdSJeff Kirsher 
5790c18acc1SFrancois Romieu 	rc = yellowfin_init_ring(dev);
5800c18acc1SFrancois Romieu 	if (rc < 0)
5810c18acc1SFrancois Romieu 		goto err_free_irq;
582554f4ffdSJeff Kirsher 
583554f4ffdSJeff Kirsher 	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
584554f4ffdSJeff Kirsher 	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
585554f4ffdSJeff Kirsher 
586554f4ffdSJeff Kirsher 	for (i = 0; i < 6; i++)
587554f4ffdSJeff Kirsher 		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
588554f4ffdSJeff Kirsher 
589554f4ffdSJeff Kirsher 	/* Set up various condition 'select' registers.
590554f4ffdSJeff Kirsher 	   There are no options here. */
591554f4ffdSJeff Kirsher 	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
592554f4ffdSJeff Kirsher 	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
593554f4ffdSJeff Kirsher 	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
594554f4ffdSJeff Kirsher 	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
595554f4ffdSJeff Kirsher 	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
596554f4ffdSJeff Kirsher 	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
597554f4ffdSJeff Kirsher 
598554f4ffdSJeff Kirsher 	/* Initialize other registers: with so many this eventually this will
599554f4ffdSJeff Kirsher 	   converted to an offset/value list. */
600554f4ffdSJeff Kirsher 	iowrite32(dma_ctrl, ioaddr + DMACtrl);
601554f4ffdSJeff Kirsher 	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
602554f4ffdSJeff Kirsher 	/* Enable automatic generation of flow control frames, period 0xffff. */
603554f4ffdSJeff Kirsher 	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
604554f4ffdSJeff Kirsher 
605554f4ffdSJeff Kirsher 	yp->tx_threshold = 32;
606554f4ffdSJeff Kirsher 	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
607554f4ffdSJeff Kirsher 
608554f4ffdSJeff Kirsher 	if (dev->if_port == 0)
609554f4ffdSJeff Kirsher 		dev->if_port = yp->default_port;
610554f4ffdSJeff Kirsher 
611554f4ffdSJeff Kirsher 	netif_start_queue(dev);
612554f4ffdSJeff Kirsher 
613554f4ffdSJeff Kirsher 	/* Setting the Rx mode will start the Rx process. */
614554f4ffdSJeff Kirsher 	if (yp->drv_flags & IsGigabit) {
615554f4ffdSJeff Kirsher 		/* We are always in full-duplex mode with gigabit! */
616554f4ffdSJeff Kirsher 		yp->full_duplex = 1;
617554f4ffdSJeff Kirsher 		iowrite16(0x01CF, ioaddr + Cnfg);
618554f4ffdSJeff Kirsher 	} else {
619554f4ffdSJeff Kirsher 		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
620554f4ffdSJeff Kirsher 		iowrite16(0x1018, ioaddr + FrameGap1);
621554f4ffdSJeff Kirsher 		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
622554f4ffdSJeff Kirsher 	}
623554f4ffdSJeff Kirsher 	set_rx_mode(dev);
624554f4ffdSJeff Kirsher 
625554f4ffdSJeff Kirsher 	/* Enable interrupts by setting the interrupt mask. */
626554f4ffdSJeff Kirsher 	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
627554f4ffdSJeff Kirsher 	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
628554f4ffdSJeff Kirsher 	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
629554f4ffdSJeff Kirsher 	iowrite32(0x80008000, ioaddr + TxCtrl);
630554f4ffdSJeff Kirsher 
631554f4ffdSJeff Kirsher 	if (yellowfin_debug > 2) {
632554f4ffdSJeff Kirsher 		netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
633554f4ffdSJeff Kirsher 	}
634554f4ffdSJeff Kirsher 
635554f4ffdSJeff Kirsher 	/* Set the timer to check for link beat. */
636554f4ffdSJeff Kirsher 	init_timer(&yp->timer);
637554f4ffdSJeff Kirsher 	yp->timer.expires = jiffies + 3*HZ;
638554f4ffdSJeff Kirsher 	yp->timer.data = (unsigned long)dev;
639554f4ffdSJeff Kirsher 	yp->timer.function = yellowfin_timer;				/* timer handler */
640554f4ffdSJeff Kirsher 	add_timer(&yp->timer);
6410c18acc1SFrancois Romieu out:
6420c18acc1SFrancois Romieu 	return rc;
643554f4ffdSJeff Kirsher 
6440c18acc1SFrancois Romieu err_free_irq:
6450c18acc1SFrancois Romieu 	free_irq(irq, dev);
6460c18acc1SFrancois Romieu 	goto out;
647554f4ffdSJeff Kirsher }
648554f4ffdSJeff Kirsher 
649554f4ffdSJeff Kirsher static void yellowfin_timer(unsigned long data)
650554f4ffdSJeff Kirsher {
651554f4ffdSJeff Kirsher 	struct net_device *dev = (struct net_device *)data;
652554f4ffdSJeff Kirsher 	struct yellowfin_private *yp = netdev_priv(dev);
653554f4ffdSJeff Kirsher 	void __iomem *ioaddr = yp->base;
654554f4ffdSJeff Kirsher 	int next_tick = 60*HZ;
655554f4ffdSJeff Kirsher 
656554f4ffdSJeff Kirsher 	if (yellowfin_debug > 3) {
657554f4ffdSJeff Kirsher 		netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
658554f4ffdSJeff Kirsher 			      ioread16(ioaddr + IntrStatus));
659554f4ffdSJeff Kirsher 	}
660554f4ffdSJeff Kirsher 
661554f4ffdSJeff Kirsher 	if (yp->mii_cnt) {
662554f4ffdSJeff Kirsher 		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
663554f4ffdSJeff Kirsher 		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
664554f4ffdSJeff Kirsher 		int negotiated = lpa & yp->advertising;
665554f4ffdSJeff Kirsher 		if (yellowfin_debug > 1)
666554f4ffdSJeff Kirsher 			netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
667554f4ffdSJeff Kirsher 				      yp->phys[0], bmsr, lpa);
668554f4ffdSJeff Kirsher 
669554f4ffdSJeff Kirsher 		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
670554f4ffdSJeff Kirsher 
671554f4ffdSJeff Kirsher 		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
672554f4ffdSJeff Kirsher 
673554f4ffdSJeff Kirsher 		if (bmsr & BMSR_LSTATUS)
674554f4ffdSJeff Kirsher 			next_tick = 60*HZ;
675554f4ffdSJeff Kirsher 		else
676554f4ffdSJeff Kirsher 			next_tick = 3*HZ;
677554f4ffdSJeff Kirsher 	}
678554f4ffdSJeff Kirsher 
679554f4ffdSJeff Kirsher 	yp->timer.expires = jiffies + next_tick;
680554f4ffdSJeff Kirsher 	add_timer(&yp->timer);
681554f4ffdSJeff Kirsher }
682554f4ffdSJeff Kirsher 
683554f4ffdSJeff Kirsher static void yellowfin_tx_timeout(struct net_device *dev)
684554f4ffdSJeff Kirsher {
685554f4ffdSJeff Kirsher 	struct yellowfin_private *yp = netdev_priv(dev);
686554f4ffdSJeff Kirsher 	void __iomem *ioaddr = yp->base;
687554f4ffdSJeff Kirsher 
688554f4ffdSJeff Kirsher 	netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
689554f4ffdSJeff Kirsher 		    yp->cur_tx, yp->dirty_tx,
690554f4ffdSJeff Kirsher 		    ioread32(ioaddr + TxStatus),
691554f4ffdSJeff Kirsher 		    ioread32(ioaddr + RxStatus));
692554f4ffdSJeff Kirsher 
693554f4ffdSJeff Kirsher 	/* Note: these should be KERN_DEBUG. */
694554f4ffdSJeff Kirsher 	if (yellowfin_debug) {
695554f4ffdSJeff Kirsher 		int i;
696554f4ffdSJeff Kirsher 		pr_warning("  Rx ring %p: ", yp->rx_ring);
697554f4ffdSJeff Kirsher 		for (i = 0; i < RX_RING_SIZE; i++)
698554f4ffdSJeff Kirsher 			pr_cont(" %08x", yp->rx_ring[i].result_status);
699554f4ffdSJeff Kirsher 		pr_cont("\n");
700554f4ffdSJeff Kirsher 		pr_warning("  Tx ring %p: ", yp->tx_ring);
701554f4ffdSJeff Kirsher 		for (i = 0; i < TX_RING_SIZE; i++)
702554f4ffdSJeff Kirsher 			pr_cont(" %04x /%08x",
703554f4ffdSJeff Kirsher 			       yp->tx_status[i].tx_errs,
704554f4ffdSJeff Kirsher 			       yp->tx_ring[i].result_status);
705554f4ffdSJeff Kirsher 		pr_cont("\n");
706554f4ffdSJeff Kirsher 	}
707554f4ffdSJeff Kirsher 
708554f4ffdSJeff Kirsher 	/* If the hardware is found to hang regularly, we will update the code
709554f4ffdSJeff Kirsher 	   to reinitialize the chip here. */
710554f4ffdSJeff Kirsher 	dev->if_port = 0;
711554f4ffdSJeff Kirsher 
712554f4ffdSJeff Kirsher 	/* Wake the potentially-idle transmit channel. */
713554f4ffdSJeff Kirsher 	iowrite32(0x10001000, yp->base + TxCtrl);
714554f4ffdSJeff Kirsher 	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
715554f4ffdSJeff Kirsher 		netif_wake_queue (dev);		/* Typical path */
716554f4ffdSJeff Kirsher 
717554f4ffdSJeff Kirsher 	dev->trans_start = jiffies; /* prevent tx timeout */
718554f4ffdSJeff Kirsher 	dev->stats.tx_errors++;
719554f4ffdSJeff Kirsher }
720554f4ffdSJeff Kirsher 
721554f4ffdSJeff Kirsher /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
722554f4ffdSJeff Kirsher static int yellowfin_init_ring(struct net_device *dev)
723554f4ffdSJeff Kirsher {
724554f4ffdSJeff Kirsher 	struct yellowfin_private *yp = netdev_priv(dev);
725554f4ffdSJeff Kirsher 	int i, j;
726554f4ffdSJeff Kirsher 
727554f4ffdSJeff Kirsher 	yp->tx_full = 0;
728554f4ffdSJeff Kirsher 	yp->cur_rx = yp->cur_tx = 0;
729554f4ffdSJeff Kirsher 	yp->dirty_tx = 0;
730554f4ffdSJeff Kirsher 
731554f4ffdSJeff Kirsher 	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
732554f4ffdSJeff Kirsher 
733554f4ffdSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
734554f4ffdSJeff Kirsher 		yp->rx_ring[i].dbdma_cmd =
735554f4ffdSJeff Kirsher 			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
736554f4ffdSJeff Kirsher 		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
737554f4ffdSJeff Kirsher 			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
738554f4ffdSJeff Kirsher 	}
739554f4ffdSJeff Kirsher 
740554f4ffdSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
741dae2e9f4SPradeep A. Dalvi 		struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
742554f4ffdSJeff Kirsher 		yp->rx_skbuff[i] = skb;
743554f4ffdSJeff Kirsher 		if (skb == NULL)
744554f4ffdSJeff Kirsher 			break;
745554f4ffdSJeff Kirsher 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
746554f4ffdSJeff Kirsher 		yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
747554f4ffdSJeff Kirsher 			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
748554f4ffdSJeff Kirsher 	}
749554f4ffdSJeff Kirsher 	if (i != RX_RING_SIZE) {
750554f4ffdSJeff Kirsher 		for (j = 0; j < i; j++)
751554f4ffdSJeff Kirsher 			dev_kfree_skb(yp->rx_skbuff[j]);
752554f4ffdSJeff Kirsher 		return -ENOMEM;
753554f4ffdSJeff Kirsher 	}
754554f4ffdSJeff Kirsher 	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
755554f4ffdSJeff Kirsher 	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
756554f4ffdSJeff Kirsher 
757554f4ffdSJeff Kirsher #define NO_TXSTATS
758554f4ffdSJeff Kirsher #ifdef NO_TXSTATS
759554f4ffdSJeff Kirsher 	/* In this mode the Tx ring needs only a single descriptor. */
760554f4ffdSJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
761554f4ffdSJeff Kirsher 		yp->tx_skbuff[i] = NULL;
762554f4ffdSJeff Kirsher 		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
763554f4ffdSJeff Kirsher 		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
764554f4ffdSJeff Kirsher 			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
765554f4ffdSJeff Kirsher 	}
766554f4ffdSJeff Kirsher 	/* Wrap ring */
767554f4ffdSJeff Kirsher 	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
768554f4ffdSJeff Kirsher #else
769554f4ffdSJeff Kirsher {
770554f4ffdSJeff Kirsher 	/* Tx ring needs a pair of descriptors, the second for the status. */
771554f4ffdSJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
772554f4ffdSJeff Kirsher 		j = 2*i;
773554f4ffdSJeff Kirsher 		yp->tx_skbuff[i] = 0;
774554f4ffdSJeff Kirsher 		/* Branch on Tx error. */
775554f4ffdSJeff Kirsher 		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
776554f4ffdSJeff Kirsher 		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
777554f4ffdSJeff Kirsher 			(j+1)*sizeof(struct yellowfin_desc));
778554f4ffdSJeff Kirsher 		j++;
779554f4ffdSJeff Kirsher 		if (yp->flags & FullTxStatus) {
780554f4ffdSJeff Kirsher 			yp->tx_ring[j].dbdma_cmd =
781554f4ffdSJeff Kirsher 				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
782554f4ffdSJeff Kirsher 			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
783554f4ffdSJeff Kirsher 			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
784554f4ffdSJeff Kirsher 				i*sizeof(struct tx_status_words));
785554f4ffdSJeff Kirsher 		} else {
786554f4ffdSJeff Kirsher 			/* Symbios chips write only tx_errs word. */
787554f4ffdSJeff Kirsher 			yp->tx_ring[j].dbdma_cmd =
788554f4ffdSJeff Kirsher 				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
789554f4ffdSJeff Kirsher 			yp->tx_ring[j].request_cnt = 2;
790554f4ffdSJeff Kirsher 			/* Om pade ummmmm... */
791554f4ffdSJeff Kirsher 			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
792554f4ffdSJeff Kirsher 				i*sizeof(struct tx_status_words) +
793554f4ffdSJeff Kirsher 				&(yp->tx_status[0].tx_errs) -
794554f4ffdSJeff Kirsher 				&(yp->tx_status[0]));
795554f4ffdSJeff Kirsher 		}
796554f4ffdSJeff Kirsher 		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
797554f4ffdSJeff Kirsher 			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
798554f4ffdSJeff Kirsher 	}
799554f4ffdSJeff Kirsher 	/* Wrap ring */
800554f4ffdSJeff Kirsher 	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
801554f4ffdSJeff Kirsher }
802554f4ffdSJeff Kirsher #endif
803554f4ffdSJeff Kirsher 	yp->tx_tail_desc = &yp->tx_status[0];
804554f4ffdSJeff Kirsher 	return 0;
805554f4ffdSJeff Kirsher }
806554f4ffdSJeff Kirsher 
807554f4ffdSJeff Kirsher static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
808554f4ffdSJeff Kirsher 					struct net_device *dev)
809554f4ffdSJeff Kirsher {
810554f4ffdSJeff Kirsher 	struct yellowfin_private *yp = netdev_priv(dev);
811554f4ffdSJeff Kirsher 	unsigned entry;
812554f4ffdSJeff Kirsher 	int len = skb->len;
813554f4ffdSJeff Kirsher 
814554f4ffdSJeff Kirsher 	netif_stop_queue (dev);
815554f4ffdSJeff Kirsher 
816554f4ffdSJeff Kirsher 	/* Note: Ordering is important here, set the field with the
817554f4ffdSJeff Kirsher 	   "ownership" bit last, and only then increment cur_tx. */
818554f4ffdSJeff Kirsher 
819554f4ffdSJeff Kirsher 	/* Calculate the next Tx descriptor entry. */
820554f4ffdSJeff Kirsher 	entry = yp->cur_tx % TX_RING_SIZE;
821554f4ffdSJeff Kirsher 
822554f4ffdSJeff Kirsher 	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
823554f4ffdSJeff Kirsher 		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
824554f4ffdSJeff Kirsher 		/* Fix GX chipset errata. */
825554f4ffdSJeff Kirsher 		if (cacheline_end > 24  || cacheline_end == 0) {
826554f4ffdSJeff Kirsher 			len = skb->len + 32 - cacheline_end + 1;
827554f4ffdSJeff Kirsher 			if (skb_padto(skb, len)) {
828554f4ffdSJeff Kirsher 				yp->tx_skbuff[entry] = NULL;
829554f4ffdSJeff Kirsher 				netif_wake_queue(dev);
830554f4ffdSJeff Kirsher 				return NETDEV_TX_OK;
831554f4ffdSJeff Kirsher 			}
832554f4ffdSJeff Kirsher 		}
833554f4ffdSJeff Kirsher 	}
834554f4ffdSJeff Kirsher 	yp->tx_skbuff[entry] = skb;
835554f4ffdSJeff Kirsher 
836554f4ffdSJeff Kirsher #ifdef NO_TXSTATS
837554f4ffdSJeff Kirsher 	yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
838554f4ffdSJeff Kirsher 		skb->data, len, PCI_DMA_TODEVICE));
839554f4ffdSJeff Kirsher 	yp->tx_ring[entry].result_status = 0;
840554f4ffdSJeff Kirsher 	if (entry >= TX_RING_SIZE-1) {
841554f4ffdSJeff Kirsher 		/* New stop command. */
842554f4ffdSJeff Kirsher 		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
843554f4ffdSJeff Kirsher 		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
844554f4ffdSJeff Kirsher 			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
845554f4ffdSJeff Kirsher 	} else {
846554f4ffdSJeff Kirsher 		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
847554f4ffdSJeff Kirsher 		yp->tx_ring[entry].dbdma_cmd =
848554f4ffdSJeff Kirsher 			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
849554f4ffdSJeff Kirsher 	}
850554f4ffdSJeff Kirsher 	yp->cur_tx++;
851554f4ffdSJeff Kirsher #else
852554f4ffdSJeff Kirsher 	yp->tx_ring[entry<<1].request_cnt = len;
853554f4ffdSJeff Kirsher 	yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
854554f4ffdSJeff Kirsher 		skb->data, len, PCI_DMA_TODEVICE));
855554f4ffdSJeff Kirsher 	/* The input_last (status-write) command is constant, but we must
856554f4ffdSJeff Kirsher 	   rewrite the subsequent 'stop' command. */
857554f4ffdSJeff Kirsher 
858554f4ffdSJeff Kirsher 	yp->cur_tx++;
859554f4ffdSJeff Kirsher 	{
860554f4ffdSJeff Kirsher 		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
861554f4ffdSJeff Kirsher 		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
862554f4ffdSJeff Kirsher 	}
863554f4ffdSJeff Kirsher 	/* Final step -- overwrite the old 'stop' command. */
864554f4ffdSJeff Kirsher 
865554f4ffdSJeff Kirsher 	yp->tx_ring[entry<<1].dbdma_cmd =
866554f4ffdSJeff Kirsher 		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
867554f4ffdSJeff Kirsher 					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
868554f4ffdSJeff Kirsher #endif
869554f4ffdSJeff Kirsher 
870554f4ffdSJeff Kirsher 	/* Non-x86 Todo: explicitly flush cache lines here. */
871554f4ffdSJeff Kirsher 
872554f4ffdSJeff Kirsher 	/* Wake the potentially-idle transmit channel. */
873554f4ffdSJeff Kirsher 	iowrite32(0x10001000, yp->base + TxCtrl);
874554f4ffdSJeff Kirsher 
875554f4ffdSJeff Kirsher 	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
876554f4ffdSJeff Kirsher 		netif_start_queue (dev);		/* Typical path */
877554f4ffdSJeff Kirsher 	else
878554f4ffdSJeff Kirsher 		yp->tx_full = 1;
879554f4ffdSJeff Kirsher 
880554f4ffdSJeff Kirsher 	if (yellowfin_debug > 4) {
881554f4ffdSJeff Kirsher 		netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
882554f4ffdSJeff Kirsher 			      yp->cur_tx, entry);
883554f4ffdSJeff Kirsher 	}
884554f4ffdSJeff Kirsher 	return NETDEV_TX_OK;
885554f4ffdSJeff Kirsher }
886554f4ffdSJeff Kirsher 
887554f4ffdSJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
888554f4ffdSJeff Kirsher    after the Tx thread. */
889554f4ffdSJeff Kirsher static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
890554f4ffdSJeff Kirsher {
891554f4ffdSJeff Kirsher 	struct net_device *dev = dev_instance;
892554f4ffdSJeff Kirsher 	struct yellowfin_private *yp;
893554f4ffdSJeff Kirsher 	void __iomem *ioaddr;
894554f4ffdSJeff Kirsher 	int boguscnt = max_interrupt_work;
895554f4ffdSJeff Kirsher 	unsigned int handled = 0;
896554f4ffdSJeff Kirsher 
897554f4ffdSJeff Kirsher 	yp = netdev_priv(dev);
898554f4ffdSJeff Kirsher 	ioaddr = yp->base;
899554f4ffdSJeff Kirsher 
900554f4ffdSJeff Kirsher 	spin_lock (&yp->lock);
901554f4ffdSJeff Kirsher 
902554f4ffdSJeff Kirsher 	do {
903554f4ffdSJeff Kirsher 		u16 intr_status = ioread16(ioaddr + IntrClear);
904554f4ffdSJeff Kirsher 
905554f4ffdSJeff Kirsher 		if (yellowfin_debug > 4)
906554f4ffdSJeff Kirsher 			netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
907554f4ffdSJeff Kirsher 				      intr_status);
908554f4ffdSJeff Kirsher 
909554f4ffdSJeff Kirsher 		if (intr_status == 0)
910554f4ffdSJeff Kirsher 			break;
911554f4ffdSJeff Kirsher 		handled = 1;
912554f4ffdSJeff Kirsher 
913554f4ffdSJeff Kirsher 		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
914554f4ffdSJeff Kirsher 			yellowfin_rx(dev);
915554f4ffdSJeff Kirsher 			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
916554f4ffdSJeff Kirsher 		}
917554f4ffdSJeff Kirsher 
918554f4ffdSJeff Kirsher #ifdef NO_TXSTATS
919554f4ffdSJeff Kirsher 		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
920554f4ffdSJeff Kirsher 			int entry = yp->dirty_tx % TX_RING_SIZE;
921554f4ffdSJeff Kirsher 			struct sk_buff *skb;
922554f4ffdSJeff Kirsher 
923554f4ffdSJeff Kirsher 			if (yp->tx_ring[entry].result_status == 0)
924554f4ffdSJeff Kirsher 				break;
925554f4ffdSJeff Kirsher 			skb = yp->tx_skbuff[entry];
926554f4ffdSJeff Kirsher 			dev->stats.tx_packets++;
927554f4ffdSJeff Kirsher 			dev->stats.tx_bytes += skb->len;
928554f4ffdSJeff Kirsher 			/* Free the original skb. */
929554f4ffdSJeff Kirsher 			pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
930554f4ffdSJeff Kirsher 				skb->len, PCI_DMA_TODEVICE);
931554f4ffdSJeff Kirsher 			dev_kfree_skb_irq(skb);
932554f4ffdSJeff Kirsher 			yp->tx_skbuff[entry] = NULL;
933554f4ffdSJeff Kirsher 		}
934554f4ffdSJeff Kirsher 		if (yp->tx_full &&
935554f4ffdSJeff Kirsher 		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
936554f4ffdSJeff Kirsher 			/* The ring is no longer full, clear tbusy. */
937554f4ffdSJeff Kirsher 			yp->tx_full = 0;
938554f4ffdSJeff Kirsher 			netif_wake_queue(dev);
939554f4ffdSJeff Kirsher 		}
940554f4ffdSJeff Kirsher #else
941554f4ffdSJeff Kirsher 		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
942554f4ffdSJeff Kirsher 			unsigned dirty_tx = yp->dirty_tx;
943554f4ffdSJeff Kirsher 
944554f4ffdSJeff Kirsher 			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
945554f4ffdSJeff Kirsher 				 dirty_tx++) {
946554f4ffdSJeff Kirsher 				/* Todo: optimize this. */
947554f4ffdSJeff Kirsher 				int entry = dirty_tx % TX_RING_SIZE;
948554f4ffdSJeff Kirsher 				u16 tx_errs = yp->tx_status[entry].tx_errs;
949554f4ffdSJeff Kirsher 				struct sk_buff *skb;
950554f4ffdSJeff Kirsher 
951554f4ffdSJeff Kirsher #ifndef final_version
952554f4ffdSJeff Kirsher 				if (yellowfin_debug > 5)
953554f4ffdSJeff Kirsher 					netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
954554f4ffdSJeff Kirsher 						      entry,
955554f4ffdSJeff Kirsher 						      yp->tx_status[entry].tx_cnt,
956554f4ffdSJeff Kirsher 						      yp->tx_status[entry].tx_errs,
957554f4ffdSJeff Kirsher 						      yp->tx_status[entry].total_tx_cnt,
958554f4ffdSJeff Kirsher 						      yp->tx_status[entry].paused);
959554f4ffdSJeff Kirsher #endif
960554f4ffdSJeff Kirsher 				if (tx_errs == 0)
961554f4ffdSJeff Kirsher 					break;	/* It still hasn't been Txed */
962554f4ffdSJeff Kirsher 				skb = yp->tx_skbuff[entry];
963554f4ffdSJeff Kirsher 				if (tx_errs & 0xF810) {
964554f4ffdSJeff Kirsher 					/* There was an major error, log it. */
965554f4ffdSJeff Kirsher #ifndef final_version
966554f4ffdSJeff Kirsher 					if (yellowfin_debug > 1)
967554f4ffdSJeff Kirsher 						netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
968554f4ffdSJeff Kirsher 							      tx_errs);
969554f4ffdSJeff Kirsher #endif
970554f4ffdSJeff Kirsher 					dev->stats.tx_errors++;
971554f4ffdSJeff Kirsher 					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
972554f4ffdSJeff Kirsher 					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
973554f4ffdSJeff Kirsher 					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
974554f4ffdSJeff Kirsher 					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
975554f4ffdSJeff Kirsher 				} else {
976554f4ffdSJeff Kirsher #ifndef final_version
977554f4ffdSJeff Kirsher 					if (yellowfin_debug > 4)
978554f4ffdSJeff Kirsher 						netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
979554f4ffdSJeff Kirsher 							      tx_errs);
980554f4ffdSJeff Kirsher #endif
981554f4ffdSJeff Kirsher 					dev->stats.tx_bytes += skb->len;
982554f4ffdSJeff Kirsher 					dev->stats.collisions += tx_errs & 15;
983554f4ffdSJeff Kirsher 					dev->stats.tx_packets++;
984554f4ffdSJeff Kirsher 				}
985554f4ffdSJeff Kirsher 				/* Free the original skb. */
986554f4ffdSJeff Kirsher 				pci_unmap_single(yp->pci_dev,
987554f4ffdSJeff Kirsher 					yp->tx_ring[entry<<1].addr, skb->len,
988554f4ffdSJeff Kirsher 					PCI_DMA_TODEVICE);
989554f4ffdSJeff Kirsher 				dev_kfree_skb_irq(skb);
990554f4ffdSJeff Kirsher 				yp->tx_skbuff[entry] = 0;
991554f4ffdSJeff Kirsher 				/* Mark status as empty. */
992554f4ffdSJeff Kirsher 				yp->tx_status[entry].tx_errs = 0;
993554f4ffdSJeff Kirsher 			}
994554f4ffdSJeff Kirsher 
995554f4ffdSJeff Kirsher #ifndef final_version
996554f4ffdSJeff Kirsher 			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
997554f4ffdSJeff Kirsher 				netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
998554f4ffdSJeff Kirsher 					   dirty_tx, yp->cur_tx, yp->tx_full);
999554f4ffdSJeff Kirsher 				dirty_tx += TX_RING_SIZE;
1000554f4ffdSJeff Kirsher 			}
1001554f4ffdSJeff Kirsher #endif
1002554f4ffdSJeff Kirsher 
1003554f4ffdSJeff Kirsher 			if (yp->tx_full &&
1004554f4ffdSJeff Kirsher 			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1005554f4ffdSJeff Kirsher 				/* The ring is no longer full, clear tbusy. */
1006554f4ffdSJeff Kirsher 				yp->tx_full = 0;
1007554f4ffdSJeff Kirsher 				netif_wake_queue(dev);
1008554f4ffdSJeff Kirsher 			}
1009554f4ffdSJeff Kirsher 
1010554f4ffdSJeff Kirsher 			yp->dirty_tx = dirty_tx;
1011554f4ffdSJeff Kirsher 			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1012554f4ffdSJeff Kirsher 		}
1013554f4ffdSJeff Kirsher #endif
1014554f4ffdSJeff Kirsher 
1015554f4ffdSJeff Kirsher 		/* Log errors and other uncommon events. */
1016554f4ffdSJeff Kirsher 		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1017554f4ffdSJeff Kirsher 			yellowfin_error(dev, intr_status);
1018554f4ffdSJeff Kirsher 
1019554f4ffdSJeff Kirsher 		if (--boguscnt < 0) {
1020554f4ffdSJeff Kirsher 			netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1021554f4ffdSJeff Kirsher 				    intr_status);
1022554f4ffdSJeff Kirsher 			break;
1023554f4ffdSJeff Kirsher 		}
1024554f4ffdSJeff Kirsher 	} while (1);
1025554f4ffdSJeff Kirsher 
1026554f4ffdSJeff Kirsher 	if (yellowfin_debug > 3)
1027554f4ffdSJeff Kirsher 		netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1028554f4ffdSJeff Kirsher 			      ioread16(ioaddr + IntrStatus));
1029554f4ffdSJeff Kirsher 
1030554f4ffdSJeff Kirsher 	spin_unlock (&yp->lock);
1031554f4ffdSJeff Kirsher 	return IRQ_RETVAL(handled);
1032554f4ffdSJeff Kirsher }
1033554f4ffdSJeff Kirsher 
1034554f4ffdSJeff Kirsher /* This routine is logically part of the interrupt handler, but separated
1035554f4ffdSJeff Kirsher    for clarity and better register allocation. */
1036554f4ffdSJeff Kirsher static int yellowfin_rx(struct net_device *dev)
1037554f4ffdSJeff Kirsher {
1038554f4ffdSJeff Kirsher 	struct yellowfin_private *yp = netdev_priv(dev);
1039554f4ffdSJeff Kirsher 	int entry = yp->cur_rx % RX_RING_SIZE;
1040554f4ffdSJeff Kirsher 	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1041554f4ffdSJeff Kirsher 
1042554f4ffdSJeff Kirsher 	if (yellowfin_debug > 4) {
1043554f4ffdSJeff Kirsher 		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1044554f4ffdSJeff Kirsher 			   entry, yp->rx_ring[entry].result_status);
1045554f4ffdSJeff Kirsher 		printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1046554f4ffdSJeff Kirsher 			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1047554f4ffdSJeff Kirsher 			   yp->rx_ring[entry].result_status);
1048554f4ffdSJeff Kirsher 	}
1049554f4ffdSJeff Kirsher 
1050554f4ffdSJeff Kirsher 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1051554f4ffdSJeff Kirsher 	while (1) {
1052554f4ffdSJeff Kirsher 		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1053554f4ffdSJeff Kirsher 		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1054554f4ffdSJeff Kirsher 		s16 frame_status;
1055554f4ffdSJeff Kirsher 		u16 desc_status;
10563a8e87ecSdingtianhong 		int data_size, yf_size;
1057554f4ffdSJeff Kirsher 		u8 *buf_addr;
1058554f4ffdSJeff Kirsher 
1059554f4ffdSJeff Kirsher 		if(!desc->result_status)
1060554f4ffdSJeff Kirsher 			break;
1061554f4ffdSJeff Kirsher 		pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1062554f4ffdSJeff Kirsher 			yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1063554f4ffdSJeff Kirsher 		desc_status = le32_to_cpu(desc->result_status) >> 16;
1064554f4ffdSJeff Kirsher 		buf_addr = rx_skb->data;
1065554f4ffdSJeff Kirsher 		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1066554f4ffdSJeff Kirsher 			le32_to_cpu(desc->result_status)) & 0xffff;
1067554f4ffdSJeff Kirsher 		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1068554f4ffdSJeff Kirsher 		if (yellowfin_debug > 4)
1069554f4ffdSJeff Kirsher 			printk(KERN_DEBUG "  %s() status was %04x\n",
1070554f4ffdSJeff Kirsher 			       __func__, frame_status);
1071554f4ffdSJeff Kirsher 		if (--boguscnt < 0)
1072554f4ffdSJeff Kirsher 			break;
10733a8e87ecSdingtianhong 
10743a8e87ecSdingtianhong 		yf_size = sizeof(struct yellowfin_desc);
10753a8e87ecSdingtianhong 
1076554f4ffdSJeff Kirsher 		if ( ! (desc_status & RX_EOP)) {
1077554f4ffdSJeff Kirsher 			if (data_size != 0)
1078554f4ffdSJeff Kirsher 				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1079554f4ffdSJeff Kirsher 					    desc_status, data_size);
1080554f4ffdSJeff Kirsher 			dev->stats.rx_length_errors++;
1081554f4ffdSJeff Kirsher 		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1082554f4ffdSJeff Kirsher 			/* There was a error. */
1083554f4ffdSJeff Kirsher 			if (yellowfin_debug > 3)
1084554f4ffdSJeff Kirsher 				printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1085554f4ffdSJeff Kirsher 				       __func__, frame_status);
1086554f4ffdSJeff Kirsher 			dev->stats.rx_errors++;
1087554f4ffdSJeff Kirsher 			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1088554f4ffdSJeff Kirsher 			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1089554f4ffdSJeff Kirsher 			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1090554f4ffdSJeff Kirsher 			if (frame_status < 0) dev->stats.rx_dropped++;
1091554f4ffdSJeff Kirsher 		} else if ( !(yp->drv_flags & IsGigabit)  &&
1092554f4ffdSJeff Kirsher 				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1093554f4ffdSJeff Kirsher 			u8 status1 = buf_addr[data_size-2];
1094554f4ffdSJeff Kirsher 			u8 status2 = buf_addr[data_size-1];
1095554f4ffdSJeff Kirsher 			dev->stats.rx_errors++;
1096554f4ffdSJeff Kirsher 			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1097554f4ffdSJeff Kirsher 			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1098554f4ffdSJeff Kirsher 			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1099554f4ffdSJeff Kirsher 			if (status2 & 0x80) dev->stats.rx_dropped++;
1100554f4ffdSJeff Kirsher #ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
1101554f4ffdSJeff Kirsher 		} else if ((yp->flags & HasMACAddrBug)  &&
11023a8e87ecSdingtianhong 			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
11033a8e87ecSdingtianhong 						      entry * yf_size),
11043a8e87ecSdingtianhong 					  dev->dev_addr) &&
11053a8e87ecSdingtianhong 			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
11063a8e87ecSdingtianhong 						      entry * yf_size),
11073a8e87ecSdingtianhong 					  "\377\377\377\377\377\377")) {
1108554f4ffdSJeff Kirsher 			if (bogus_rx++ == 0)
1109554f4ffdSJeff Kirsher 				netdev_warn(dev, "Bad frame to %pM\n",
1110554f4ffdSJeff Kirsher 					    buf_addr);
1111554f4ffdSJeff Kirsher #endif
1112554f4ffdSJeff Kirsher 		} else {
1113554f4ffdSJeff Kirsher 			struct sk_buff *skb;
1114554f4ffdSJeff Kirsher 			int pkt_len = data_size -
1115554f4ffdSJeff Kirsher 				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1116554f4ffdSJeff Kirsher 			/* To verify: Yellowfin Length should omit the CRC! */
1117554f4ffdSJeff Kirsher 
1118554f4ffdSJeff Kirsher #ifndef final_version
1119554f4ffdSJeff Kirsher 			if (yellowfin_debug > 4)
1120554f4ffdSJeff Kirsher 				printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1121554f4ffdSJeff Kirsher 				       __func__, pkt_len, data_size, boguscnt);
1122554f4ffdSJeff Kirsher #endif
1123554f4ffdSJeff Kirsher 			/* Check if the packet is long enough to just pass up the skbuff
1124554f4ffdSJeff Kirsher 			   without copying to a properly sized skbuff. */
1125554f4ffdSJeff Kirsher 			if (pkt_len > rx_copybreak) {
1126554f4ffdSJeff Kirsher 				skb_put(skb = rx_skb, pkt_len);
1127554f4ffdSJeff Kirsher 				pci_unmap_single(yp->pci_dev,
1128554f4ffdSJeff Kirsher 					le32_to_cpu(yp->rx_ring[entry].addr),
1129554f4ffdSJeff Kirsher 					yp->rx_buf_sz,
1130554f4ffdSJeff Kirsher 					PCI_DMA_FROMDEVICE);
1131554f4ffdSJeff Kirsher 				yp->rx_skbuff[entry] = NULL;
1132554f4ffdSJeff Kirsher 			} else {
1133dae2e9f4SPradeep A. Dalvi 				skb = netdev_alloc_skb(dev, pkt_len + 2);
1134554f4ffdSJeff Kirsher 				if (skb == NULL)
1135554f4ffdSJeff Kirsher 					break;
1136554f4ffdSJeff Kirsher 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1137554f4ffdSJeff Kirsher 				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1138554f4ffdSJeff Kirsher 				skb_put(skb, pkt_len);
1139554f4ffdSJeff Kirsher 				pci_dma_sync_single_for_device(yp->pci_dev,
1140554f4ffdSJeff Kirsher 								le32_to_cpu(desc->addr),
1141554f4ffdSJeff Kirsher 								yp->rx_buf_sz,
1142554f4ffdSJeff Kirsher 								PCI_DMA_FROMDEVICE);
1143554f4ffdSJeff Kirsher 			}
1144554f4ffdSJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
1145554f4ffdSJeff Kirsher 			netif_rx(skb);
1146554f4ffdSJeff Kirsher 			dev->stats.rx_packets++;
1147554f4ffdSJeff Kirsher 			dev->stats.rx_bytes += pkt_len;
1148554f4ffdSJeff Kirsher 		}
1149554f4ffdSJeff Kirsher 		entry = (++yp->cur_rx) % RX_RING_SIZE;
1150554f4ffdSJeff Kirsher 	}
1151554f4ffdSJeff Kirsher 
1152554f4ffdSJeff Kirsher 	/* Refill the Rx ring buffers. */
1153554f4ffdSJeff Kirsher 	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1154554f4ffdSJeff Kirsher 		entry = yp->dirty_rx % RX_RING_SIZE;
1155554f4ffdSJeff Kirsher 		if (yp->rx_skbuff[entry] == NULL) {
1156dae2e9f4SPradeep A. Dalvi 			struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1157554f4ffdSJeff Kirsher 			if (skb == NULL)
1158554f4ffdSJeff Kirsher 				break;				/* Better luck next round. */
1159554f4ffdSJeff Kirsher 			yp->rx_skbuff[entry] = skb;
1160554f4ffdSJeff Kirsher 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1161554f4ffdSJeff Kirsher 			yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1162554f4ffdSJeff Kirsher 				skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1163554f4ffdSJeff Kirsher 		}
1164554f4ffdSJeff Kirsher 		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1165554f4ffdSJeff Kirsher 		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1166554f4ffdSJeff Kirsher 		if (entry != 0)
1167554f4ffdSJeff Kirsher 			yp->rx_ring[entry - 1].dbdma_cmd =
1168554f4ffdSJeff Kirsher 				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1169554f4ffdSJeff Kirsher 		else
1170554f4ffdSJeff Kirsher 			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1171554f4ffdSJeff Kirsher 				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1172554f4ffdSJeff Kirsher 							| yp->rx_buf_sz);
1173554f4ffdSJeff Kirsher 	}
1174554f4ffdSJeff Kirsher 
1175554f4ffdSJeff Kirsher 	return 0;
1176554f4ffdSJeff Kirsher }
1177554f4ffdSJeff Kirsher 
1178554f4ffdSJeff Kirsher static void yellowfin_error(struct net_device *dev, int intr_status)
1179554f4ffdSJeff Kirsher {
1180554f4ffdSJeff Kirsher 	netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1181554f4ffdSJeff Kirsher 	/* Hmmmmm, it's not clear what to do here. */
1182554f4ffdSJeff Kirsher 	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1183554f4ffdSJeff Kirsher 		dev->stats.tx_errors++;
1184554f4ffdSJeff Kirsher 	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1185554f4ffdSJeff Kirsher 		dev->stats.rx_errors++;
1186554f4ffdSJeff Kirsher }
1187554f4ffdSJeff Kirsher 
1188554f4ffdSJeff Kirsher static int yellowfin_close(struct net_device *dev)
1189554f4ffdSJeff Kirsher {
1190554f4ffdSJeff Kirsher 	struct yellowfin_private *yp = netdev_priv(dev);
1191554f4ffdSJeff Kirsher 	void __iomem *ioaddr = yp->base;
1192554f4ffdSJeff Kirsher 	int i;
1193554f4ffdSJeff Kirsher 
1194554f4ffdSJeff Kirsher 	netif_stop_queue (dev);
1195554f4ffdSJeff Kirsher 
1196554f4ffdSJeff Kirsher 	if (yellowfin_debug > 1) {
1197554f4ffdSJeff Kirsher 		netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1198554f4ffdSJeff Kirsher 			      ioread16(ioaddr + TxStatus),
1199554f4ffdSJeff Kirsher 			      ioread16(ioaddr + RxStatus),
1200554f4ffdSJeff Kirsher 			      ioread16(ioaddr + IntrStatus));
1201554f4ffdSJeff Kirsher 		netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1202554f4ffdSJeff Kirsher 			      yp->cur_tx, yp->dirty_tx,
1203554f4ffdSJeff Kirsher 			      yp->cur_rx, yp->dirty_rx);
1204554f4ffdSJeff Kirsher 	}
1205554f4ffdSJeff Kirsher 
1206554f4ffdSJeff Kirsher 	/* Disable interrupts by clearing the interrupt mask. */
1207554f4ffdSJeff Kirsher 	iowrite16(0x0000, ioaddr + IntrEnb);
1208554f4ffdSJeff Kirsher 
1209554f4ffdSJeff Kirsher 	/* Stop the chip's Tx and Rx processes. */
1210554f4ffdSJeff Kirsher 	iowrite32(0x80000000, ioaddr + RxCtrl);
1211554f4ffdSJeff Kirsher 	iowrite32(0x80000000, ioaddr + TxCtrl);
1212554f4ffdSJeff Kirsher 
1213554f4ffdSJeff Kirsher 	del_timer(&yp->timer);
1214554f4ffdSJeff Kirsher 
1215554f4ffdSJeff Kirsher #if defined(__i386__)
1216554f4ffdSJeff Kirsher 	if (yellowfin_debug > 2) {
1217554f4ffdSJeff Kirsher 		printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1218554f4ffdSJeff Kirsher 				(unsigned long long)yp->tx_ring_dma);
1219554f4ffdSJeff Kirsher 		for (i = 0; i < TX_RING_SIZE*2; i++)
1220554f4ffdSJeff Kirsher 			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1221554f4ffdSJeff Kirsher 				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1222554f4ffdSJeff Kirsher 				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1223554f4ffdSJeff Kirsher 				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1224554f4ffdSJeff Kirsher 		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1225554f4ffdSJeff Kirsher 		for (i = 0; i < TX_RING_SIZE; i++)
1226554f4ffdSJeff Kirsher 			printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1227554f4ffdSJeff Kirsher 				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1228554f4ffdSJeff Kirsher 				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1229554f4ffdSJeff Kirsher 
1230554f4ffdSJeff Kirsher 		printk(KERN_DEBUG "  Rx ring %08llx:\n",
1231554f4ffdSJeff Kirsher 				(unsigned long long)yp->rx_ring_dma);
1232554f4ffdSJeff Kirsher 		for (i = 0; i < RX_RING_SIZE; i++) {
1233554f4ffdSJeff Kirsher 			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1234554f4ffdSJeff Kirsher 				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1235554f4ffdSJeff Kirsher 				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1236554f4ffdSJeff Kirsher 				   yp->rx_ring[i].result_status);
1237554f4ffdSJeff Kirsher 			if (yellowfin_debug > 6) {
1238554f4ffdSJeff Kirsher 				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1239554f4ffdSJeff Kirsher 					int j;
1240554f4ffdSJeff Kirsher 
1241554f4ffdSJeff Kirsher 					printk(KERN_DEBUG);
1242554f4ffdSJeff Kirsher 					for (j = 0; j < 0x50; j++)
1243554f4ffdSJeff Kirsher 						pr_cont(" %04x",
1244554f4ffdSJeff Kirsher 							get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1245554f4ffdSJeff Kirsher 					pr_cont("\n");
1246554f4ffdSJeff Kirsher 				}
1247554f4ffdSJeff Kirsher 			}
1248554f4ffdSJeff Kirsher 		}
1249554f4ffdSJeff Kirsher 	}
1250554f4ffdSJeff Kirsher #endif /* __i386__ debugging only */
1251554f4ffdSJeff Kirsher 
12520c18acc1SFrancois Romieu 	free_irq(yp->pci_dev->irq, dev);
1253554f4ffdSJeff Kirsher 
1254554f4ffdSJeff Kirsher 	/* Free all the skbuffs in the Rx queue. */
1255554f4ffdSJeff Kirsher 	for (i = 0; i < RX_RING_SIZE; i++) {
1256554f4ffdSJeff Kirsher 		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1257554f4ffdSJeff Kirsher 		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1258554f4ffdSJeff Kirsher 		if (yp->rx_skbuff[i]) {
1259554f4ffdSJeff Kirsher 			dev_kfree_skb(yp->rx_skbuff[i]);
1260554f4ffdSJeff Kirsher 		}
1261554f4ffdSJeff Kirsher 		yp->rx_skbuff[i] = NULL;
1262554f4ffdSJeff Kirsher 	}
1263554f4ffdSJeff Kirsher 	for (i = 0; i < TX_RING_SIZE; i++) {
1264554f4ffdSJeff Kirsher 		if (yp->tx_skbuff[i])
1265554f4ffdSJeff Kirsher 			dev_kfree_skb(yp->tx_skbuff[i]);
1266554f4ffdSJeff Kirsher 		yp->tx_skbuff[i] = NULL;
1267554f4ffdSJeff Kirsher 	}
1268554f4ffdSJeff Kirsher 
1269554f4ffdSJeff Kirsher #ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
1270554f4ffdSJeff Kirsher 	if (yellowfin_debug > 0) {
1271554f4ffdSJeff Kirsher 		netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1272554f4ffdSJeff Kirsher 			      bogus_rx);
1273554f4ffdSJeff Kirsher 	}
1274554f4ffdSJeff Kirsher #endif
1275554f4ffdSJeff Kirsher 
1276554f4ffdSJeff Kirsher 	return 0;
1277554f4ffdSJeff Kirsher }
1278554f4ffdSJeff Kirsher 
1279554f4ffdSJeff Kirsher /* Set or clear the multicast filter for this adaptor. */
1280554f4ffdSJeff Kirsher 
1281554f4ffdSJeff Kirsher static void set_rx_mode(struct net_device *dev)
1282554f4ffdSJeff Kirsher {
1283554f4ffdSJeff Kirsher 	struct yellowfin_private *yp = netdev_priv(dev);
1284554f4ffdSJeff Kirsher 	void __iomem *ioaddr = yp->base;
1285554f4ffdSJeff Kirsher 	u16 cfg_value = ioread16(ioaddr + Cnfg);
1286554f4ffdSJeff Kirsher 
1287554f4ffdSJeff Kirsher 	/* Stop the Rx process to change any value. */
1288554f4ffdSJeff Kirsher 	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1289554f4ffdSJeff Kirsher 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1290554f4ffdSJeff Kirsher 		iowrite16(0x000F, ioaddr + AddrMode);
1291554f4ffdSJeff Kirsher 	} else if ((netdev_mc_count(dev) > 64) ||
1292554f4ffdSJeff Kirsher 		   (dev->flags & IFF_ALLMULTI)) {
1293554f4ffdSJeff Kirsher 		/* Too many to filter well, or accept all multicasts. */
1294554f4ffdSJeff Kirsher 		iowrite16(0x000B, ioaddr + AddrMode);
1295554f4ffdSJeff Kirsher 	} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1296554f4ffdSJeff Kirsher 		struct netdev_hw_addr *ha;
1297554f4ffdSJeff Kirsher 		u16 hash_table[4];
1298554f4ffdSJeff Kirsher 		int i;
1299554f4ffdSJeff Kirsher 
1300554f4ffdSJeff Kirsher 		memset(hash_table, 0, sizeof(hash_table));
1301554f4ffdSJeff Kirsher 		netdev_for_each_mc_addr(ha, dev) {
1302554f4ffdSJeff Kirsher 			unsigned int bit;
1303554f4ffdSJeff Kirsher 
1304554f4ffdSJeff Kirsher 			/* Due to a bug in the early chip versions, multiple filter
1305554f4ffdSJeff Kirsher 			   slots must be set for each address. */
1306554f4ffdSJeff Kirsher 			if (yp->drv_flags & HasMulticastBug) {
1307554f4ffdSJeff Kirsher 				bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1308554f4ffdSJeff Kirsher 				hash_table[bit >> 4] |= (1 << bit);
1309554f4ffdSJeff Kirsher 				bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1310554f4ffdSJeff Kirsher 				hash_table[bit >> 4] |= (1 << bit);
1311554f4ffdSJeff Kirsher 				bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1312554f4ffdSJeff Kirsher 				hash_table[bit >> 4] |= (1 << bit);
1313554f4ffdSJeff Kirsher 			}
1314554f4ffdSJeff Kirsher 			bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1315554f4ffdSJeff Kirsher 			hash_table[bit >> 4] |= (1 << bit);
1316554f4ffdSJeff Kirsher 		}
1317554f4ffdSJeff Kirsher 		/* Copy the hash table to the chip. */
1318554f4ffdSJeff Kirsher 		for (i = 0; i < 4; i++)
1319554f4ffdSJeff Kirsher 			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1320554f4ffdSJeff Kirsher 		iowrite16(0x0003, ioaddr + AddrMode);
1321554f4ffdSJeff Kirsher 	} else {					/* Normal, unicast/broadcast-only mode. */
1322554f4ffdSJeff Kirsher 		iowrite16(0x0001, ioaddr + AddrMode);
1323554f4ffdSJeff Kirsher 	}
1324554f4ffdSJeff Kirsher 	/* Restart the Rx process. */
1325554f4ffdSJeff Kirsher 	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1326554f4ffdSJeff Kirsher }
1327554f4ffdSJeff Kirsher 
1328554f4ffdSJeff Kirsher static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1329554f4ffdSJeff Kirsher {
1330554f4ffdSJeff Kirsher 	struct yellowfin_private *np = netdev_priv(dev);
13317826d43fSJiri Pirko 
13327826d43fSJiri Pirko 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
13337826d43fSJiri Pirko 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
13347826d43fSJiri Pirko 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1335554f4ffdSJeff Kirsher }
1336554f4ffdSJeff Kirsher 
1337554f4ffdSJeff Kirsher static const struct ethtool_ops ethtool_ops = {
1338554f4ffdSJeff Kirsher 	.get_drvinfo = yellowfin_get_drvinfo
1339554f4ffdSJeff Kirsher };
1340554f4ffdSJeff Kirsher 
1341554f4ffdSJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1342554f4ffdSJeff Kirsher {
1343554f4ffdSJeff Kirsher 	struct yellowfin_private *np = netdev_priv(dev);
1344554f4ffdSJeff Kirsher 	void __iomem *ioaddr = np->base;
1345554f4ffdSJeff Kirsher 	struct mii_ioctl_data *data = if_mii(rq);
1346554f4ffdSJeff Kirsher 
1347554f4ffdSJeff Kirsher 	switch(cmd) {
1348554f4ffdSJeff Kirsher 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1349554f4ffdSJeff Kirsher 		data->phy_id = np->phys[0] & 0x1f;
1350554f4ffdSJeff Kirsher 		/* Fall Through */
1351554f4ffdSJeff Kirsher 
1352554f4ffdSJeff Kirsher 	case SIOCGMIIREG:		/* Read MII PHY register. */
1353554f4ffdSJeff Kirsher 		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1354554f4ffdSJeff Kirsher 		return 0;
1355554f4ffdSJeff Kirsher 
1356554f4ffdSJeff Kirsher 	case SIOCSMIIREG:		/* Write MII PHY register. */
1357554f4ffdSJeff Kirsher 		if (data->phy_id == np->phys[0]) {
1358554f4ffdSJeff Kirsher 			u16 value = data->val_in;
1359554f4ffdSJeff Kirsher 			switch (data->reg_num) {
1360554f4ffdSJeff Kirsher 			case 0:
1361554f4ffdSJeff Kirsher 				/* Check for autonegotiation on or reset. */
1362554f4ffdSJeff Kirsher 				np->medialock = (value & 0x9000) ? 0 : 1;
1363554f4ffdSJeff Kirsher 				if (np->medialock)
1364554f4ffdSJeff Kirsher 					np->full_duplex = (value & 0x0100) ? 1 : 0;
1365554f4ffdSJeff Kirsher 				break;
1366554f4ffdSJeff Kirsher 			case 4: np->advertising = value; break;
1367554f4ffdSJeff Kirsher 			}
1368554f4ffdSJeff Kirsher 			/* Perhaps check_duplex(dev), depending on chip semantics. */
1369554f4ffdSJeff Kirsher 		}
1370554f4ffdSJeff Kirsher 		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1371554f4ffdSJeff Kirsher 		return 0;
1372554f4ffdSJeff Kirsher 	default:
1373554f4ffdSJeff Kirsher 		return -EOPNOTSUPP;
1374554f4ffdSJeff Kirsher 	}
1375554f4ffdSJeff Kirsher }
1376554f4ffdSJeff Kirsher 
1377554f4ffdSJeff Kirsher 
1378134c1f15SBill Pemberton static void yellowfin_remove_one(struct pci_dev *pdev)
1379554f4ffdSJeff Kirsher {
1380554f4ffdSJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
1381554f4ffdSJeff Kirsher 	struct yellowfin_private *np;
1382554f4ffdSJeff Kirsher 
1383554f4ffdSJeff Kirsher 	BUG_ON(!dev);
1384554f4ffdSJeff Kirsher 	np = netdev_priv(dev);
1385554f4ffdSJeff Kirsher 
1386554f4ffdSJeff Kirsher         pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1387554f4ffdSJeff Kirsher 		np->tx_status_dma);
1388554f4ffdSJeff Kirsher 	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1389554f4ffdSJeff Kirsher 	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1390554f4ffdSJeff Kirsher 	unregister_netdev (dev);
1391554f4ffdSJeff Kirsher 
1392554f4ffdSJeff Kirsher 	pci_iounmap(pdev, np->base);
1393554f4ffdSJeff Kirsher 
1394554f4ffdSJeff Kirsher 	pci_release_regions (pdev);
1395554f4ffdSJeff Kirsher 
1396554f4ffdSJeff Kirsher 	free_netdev (dev);
1397554f4ffdSJeff Kirsher }
1398554f4ffdSJeff Kirsher 
1399554f4ffdSJeff Kirsher 
1400554f4ffdSJeff Kirsher static struct pci_driver yellowfin_driver = {
1401554f4ffdSJeff Kirsher 	.name		= DRV_NAME,
1402554f4ffdSJeff Kirsher 	.id_table	= yellowfin_pci_tbl,
1403554f4ffdSJeff Kirsher 	.probe		= yellowfin_init_one,
1404134c1f15SBill Pemberton 	.remove		= yellowfin_remove_one,
1405554f4ffdSJeff Kirsher };
1406554f4ffdSJeff Kirsher 
1407554f4ffdSJeff Kirsher 
1408554f4ffdSJeff Kirsher static int __init yellowfin_init (void)
1409554f4ffdSJeff Kirsher {
1410554f4ffdSJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
1411554f4ffdSJeff Kirsher #ifdef MODULE
1412554f4ffdSJeff Kirsher 	printk(version);
1413554f4ffdSJeff Kirsher #endif
1414554f4ffdSJeff Kirsher 	return pci_register_driver(&yellowfin_driver);
1415554f4ffdSJeff Kirsher }
1416554f4ffdSJeff Kirsher 
1417554f4ffdSJeff Kirsher 
1418554f4ffdSJeff Kirsher static void __exit yellowfin_cleanup (void)
1419554f4ffdSJeff Kirsher {
1420554f4ffdSJeff Kirsher 	pci_unregister_driver (&yellowfin_driver);
1421554f4ffdSJeff Kirsher }
1422554f4ffdSJeff Kirsher 
1423554f4ffdSJeff Kirsher 
1424554f4ffdSJeff Kirsher module_init(yellowfin_init);
1425554f4ffdSJeff Kirsher module_exit(yellowfin_cleanup);
1426