1554f4ffdSJeff Kirsher /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2554f4ffdSJeff Kirsher /*
3554f4ffdSJeff Kirsher Written 1997-2001 by Donald Becker.
4554f4ffdSJeff Kirsher
5554f4ffdSJeff Kirsher This software may be used and distributed according to the terms of
6554f4ffdSJeff Kirsher the GNU General Public License (GPL), incorporated herein by reference.
7554f4ffdSJeff Kirsher Drivers based on or derived from this code fall under the GPL and must
8554f4ffdSJeff Kirsher retain the authorship, copyright and license notice. This file is not
9554f4ffdSJeff Kirsher a complete program and may only be used when the entire operating
10554f4ffdSJeff Kirsher system is licensed under the GPL.
11554f4ffdSJeff Kirsher
12554f4ffdSJeff Kirsher This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
13554f4ffdSJeff Kirsher It also supports the Symbios Logic version of the same chip core.
14554f4ffdSJeff Kirsher
15554f4ffdSJeff Kirsher The author may be reached as becker@scyld.com, or C/O
16554f4ffdSJeff Kirsher Scyld Computing Corporation
17554f4ffdSJeff Kirsher 410 Severn Ave., Suite 210
18554f4ffdSJeff Kirsher Annapolis MD 21403
19554f4ffdSJeff Kirsher
20554f4ffdSJeff Kirsher Support and updates available at
21554f4ffdSJeff Kirsher http://www.scyld.com/network/yellowfin.html
22554f4ffdSJeff Kirsher [link no longer provides useful info -jgarzik]
23554f4ffdSJeff Kirsher
24554f4ffdSJeff Kirsher */
25554f4ffdSJeff Kirsher
26554f4ffdSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27554f4ffdSJeff Kirsher
28554f4ffdSJeff Kirsher #define DRV_NAME "yellowfin"
29554f4ffdSJeff Kirsher #define DRV_VERSION "2.1"
30554f4ffdSJeff Kirsher #define DRV_RELDATE "Sep 11, 2006"
31554f4ffdSJeff Kirsher
32554f4ffdSJeff Kirsher /* The user-configurable values.
33554f4ffdSJeff Kirsher These may be modified when a driver module is loaded.*/
34554f4ffdSJeff Kirsher
35554f4ffdSJeff Kirsher static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
36554f4ffdSJeff Kirsher /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
37554f4ffdSJeff Kirsher static int max_interrupt_work = 20;
38554f4ffdSJeff Kirsher static int mtu;
39554f4ffdSJeff Kirsher #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
40554f4ffdSJeff Kirsher /* System-wide count of bogus-rx frames. */
41554f4ffdSJeff Kirsher static int bogus_rx;
42554f4ffdSJeff Kirsher static int dma_ctrl = 0x004A0263; /* Constrained by errata */
43554f4ffdSJeff Kirsher static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
44554f4ffdSJeff Kirsher #elif defined(YF_NEW) /* A future perfect board :->. */
45554f4ffdSJeff Kirsher static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
46554f4ffdSJeff Kirsher static int fifo_cfg = 0x0028;
47554f4ffdSJeff Kirsher #else
48554f4ffdSJeff Kirsher static const int dma_ctrl = 0x004A0263; /* Constrained by errata */
49554f4ffdSJeff Kirsher static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
50554f4ffdSJeff Kirsher #endif
51554f4ffdSJeff Kirsher
52554f4ffdSJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
53554f4ffdSJeff Kirsher Setting to > 1514 effectively disables this feature. */
54554f4ffdSJeff Kirsher static int rx_copybreak;
55554f4ffdSJeff Kirsher
56554f4ffdSJeff Kirsher /* Used to pass the media type, etc.
57554f4ffdSJeff Kirsher No media types are currently defined. These exist for driver
58554f4ffdSJeff Kirsher interoperability.
59554f4ffdSJeff Kirsher */
60554f4ffdSJeff Kirsher #define MAX_UNITS 8 /* More are supported, limit only on options */
61554f4ffdSJeff Kirsher static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
62554f4ffdSJeff Kirsher static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
63554f4ffdSJeff Kirsher
64554f4ffdSJeff Kirsher /* Do ugly workaround for GX server chipset errata. */
65554f4ffdSJeff Kirsher static int gx_fix;
66554f4ffdSJeff Kirsher
67554f4ffdSJeff Kirsher /* Operational parameters that are set at compile time. */
68554f4ffdSJeff Kirsher
69554f4ffdSJeff Kirsher /* Keep the ring sizes a power of two for efficiency.
70554f4ffdSJeff Kirsher Making the Tx ring too long decreases the effectiveness of channel
71554f4ffdSJeff Kirsher bonding and packet priority.
72554f4ffdSJeff Kirsher There are no ill effects from too-large receive rings. */
73554f4ffdSJeff Kirsher #define TX_RING_SIZE 16
74554f4ffdSJeff Kirsher #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
75554f4ffdSJeff Kirsher #define RX_RING_SIZE 64
76554f4ffdSJeff Kirsher #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
77554f4ffdSJeff Kirsher #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
78554f4ffdSJeff Kirsher #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
79554f4ffdSJeff Kirsher
80554f4ffdSJeff Kirsher /* Operational parameters that usually are not changed. */
81554f4ffdSJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
82554f4ffdSJeff Kirsher #define TX_TIMEOUT (2*HZ)
83554f4ffdSJeff Kirsher #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
84554f4ffdSJeff Kirsher
85554f4ffdSJeff Kirsher #define yellowfin_debug debug
86554f4ffdSJeff Kirsher
87554f4ffdSJeff Kirsher #include <linux/module.h>
88554f4ffdSJeff Kirsher #include <linux/kernel.h>
89554f4ffdSJeff Kirsher #include <linux/string.h>
90554f4ffdSJeff Kirsher #include <linux/timer.h>
91554f4ffdSJeff Kirsher #include <linux/errno.h>
92554f4ffdSJeff Kirsher #include <linux/ioport.h>
93554f4ffdSJeff Kirsher #include <linux/interrupt.h>
94554f4ffdSJeff Kirsher #include <linux/pci.h>
95554f4ffdSJeff Kirsher #include <linux/init.h>
96554f4ffdSJeff Kirsher #include <linux/mii.h>
97554f4ffdSJeff Kirsher #include <linux/netdevice.h>
98554f4ffdSJeff Kirsher #include <linux/etherdevice.h>
99554f4ffdSJeff Kirsher #include <linux/skbuff.h>
100554f4ffdSJeff Kirsher #include <linux/ethtool.h>
101554f4ffdSJeff Kirsher #include <linux/crc32.h>
102554f4ffdSJeff Kirsher #include <linux/bitops.h>
1037c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
104554f4ffdSJeff Kirsher #include <asm/processor.h> /* Processor type for cache alignment. */
105554f4ffdSJeff Kirsher #include <asm/unaligned.h>
106554f4ffdSJeff Kirsher #include <asm/io.h>
107554f4ffdSJeff Kirsher
108554f4ffdSJeff Kirsher /* These identify the driver base version and may not be removed. */
109134c1f15SBill Pemberton static const char version[] =
110554f4ffdSJeff Kirsher KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
111554f4ffdSJeff Kirsher " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
112554f4ffdSJeff Kirsher
113554f4ffdSJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
114554f4ffdSJeff Kirsher MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
115554f4ffdSJeff Kirsher MODULE_LICENSE("GPL");
116554f4ffdSJeff Kirsher
117554f4ffdSJeff Kirsher module_param(max_interrupt_work, int, 0);
118554f4ffdSJeff Kirsher module_param(mtu, int, 0);
119554f4ffdSJeff Kirsher module_param(debug, int, 0);
120554f4ffdSJeff Kirsher module_param(rx_copybreak, int, 0);
121554f4ffdSJeff Kirsher module_param_array(options, int, NULL, 0);
122554f4ffdSJeff Kirsher module_param_array(full_duplex, int, NULL, 0);
123554f4ffdSJeff Kirsher module_param(gx_fix, int, 0);
124554f4ffdSJeff Kirsher MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
125554f4ffdSJeff Kirsher MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
126554f4ffdSJeff Kirsher MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
127554f4ffdSJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
128554f4ffdSJeff Kirsher MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
129554f4ffdSJeff Kirsher MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
130554f4ffdSJeff Kirsher MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
131554f4ffdSJeff Kirsher
132554f4ffdSJeff Kirsher /*
133554f4ffdSJeff Kirsher Theory of Operation
134554f4ffdSJeff Kirsher
135554f4ffdSJeff Kirsher I. Board Compatibility
136554f4ffdSJeff Kirsher
137554f4ffdSJeff Kirsher This device driver is designed for the Packet Engines "Yellowfin" Gigabit
138554f4ffdSJeff Kirsher Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
139554f4ffdSJeff Kirsher Symbios 53C885E dual function chip.
140554f4ffdSJeff Kirsher
141554f4ffdSJeff Kirsher II. Board-specific settings
142554f4ffdSJeff Kirsher
143554f4ffdSJeff Kirsher PCI bus devices are configured by the system at boot time, so no jumpers
144554f4ffdSJeff Kirsher need to be set on the board. The system BIOS preferably should assign the
145554f4ffdSJeff Kirsher PCI INTA signal to an otherwise unused system IRQ line.
146554f4ffdSJeff Kirsher Note: Kernel versions earlier than 1.3.73 do not support shared PCI
147554f4ffdSJeff Kirsher interrupt lines.
148554f4ffdSJeff Kirsher
149554f4ffdSJeff Kirsher III. Driver operation
150554f4ffdSJeff Kirsher
151554f4ffdSJeff Kirsher IIIa. Ring buffers
152554f4ffdSJeff Kirsher
153554f4ffdSJeff Kirsher The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
154554f4ffdSJeff Kirsher This is a descriptor list scheme similar to that used by the EEPro100 and
155554f4ffdSJeff Kirsher Tulip. This driver uses two statically allocated fixed-size descriptor lists
156554f4ffdSJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of
157554f4ffdSJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
158554f4ffdSJeff Kirsher
159554f4ffdSJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at
160554f4ffdSJeff Kirsher open() time and passes the skb->data field to the Yellowfin as receive data
161554f4ffdSJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
162554f4ffdSJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff.
163554f4ffdSJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the
164554f4ffdSJeff Kirsher protocol stack and replaced by a newly allocated skbuff.
165554f4ffdSJeff Kirsher
166554f4ffdSJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by
167554f4ffdSJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger
168554f4ffdSJeff Kirsher frames. For small frames the copying cost is negligible (esp. considering
169554f4ffdSJeff Kirsher that we are pre-loading the cache with immediately useful header
170554f4ffdSJeff Kirsher information). For large frames the copying cost is non-trivial, and the
171554f4ffdSJeff Kirsher larger copy might flush the cache of useful data.
172554f4ffdSJeff Kirsher
173554f4ffdSJeff Kirsher IIIC. Synchronization
174554f4ffdSJeff Kirsher
175554f4ffdSJeff Kirsher The driver runs as two independent, single-threaded flows of control. One
176554f4ffdSJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
177554f4ffdSJeff Kirsher dev->tbusy flag. The other thread is the interrupt handler, which is single
178554f4ffdSJeff Kirsher threaded by the hardware and other software.
179554f4ffdSJeff Kirsher
180554f4ffdSJeff Kirsher The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181554f4ffdSJeff Kirsher flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182554f4ffdSJeff Kirsher queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183554f4ffdSJeff Kirsher the 'yp->tx_full' flag.
184554f4ffdSJeff Kirsher
185554f4ffdSJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
186554f4ffdSJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as
187554f4ffdSJeff Kirsher empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
188554f4ffdSJeff Kirsher clears both the tx_full and tbusy flags.
189554f4ffdSJeff Kirsher
190554f4ffdSJeff Kirsher IV. Notes
191554f4ffdSJeff Kirsher
192554f4ffdSJeff Kirsher Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
193554f4ffdSJeff Kirsher Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
194ebc0b8b5SJulia Lawall and an AlphaStation to verify the Alpha port!
195554f4ffdSJeff Kirsher
196554f4ffdSJeff Kirsher IVb. References
197554f4ffdSJeff Kirsher
198554f4ffdSJeff Kirsher Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
199554f4ffdSJeff Kirsher Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
200554f4ffdSJeff Kirsher Data Manual v3.0
201554f4ffdSJeff Kirsher http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
202554f4ffdSJeff Kirsher http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
203554f4ffdSJeff Kirsher
204554f4ffdSJeff Kirsher IVc. Errata
205554f4ffdSJeff Kirsher
206554f4ffdSJeff Kirsher See Packet Engines confidential appendix (prototype chips only).
207554f4ffdSJeff Kirsher */
208554f4ffdSJeff Kirsher
209554f4ffdSJeff Kirsher
210554f4ffdSJeff Kirsher
211554f4ffdSJeff Kirsher enum capability_flags {
212554f4ffdSJeff Kirsher HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
213554f4ffdSJeff Kirsher HasMACAddrBug=32, /* Only on early revs. */
214554f4ffdSJeff Kirsher DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
215554f4ffdSJeff Kirsher };
216554f4ffdSJeff Kirsher
217554f4ffdSJeff Kirsher /* The PCI I/O space extent. */
218554f4ffdSJeff Kirsher enum {
219554f4ffdSJeff Kirsher YELLOWFIN_SIZE = 0x100,
220554f4ffdSJeff Kirsher };
221554f4ffdSJeff Kirsher
222554f4ffdSJeff Kirsher struct pci_id_info {
223554f4ffdSJeff Kirsher const char *name;
224554f4ffdSJeff Kirsher struct match_info {
225554f4ffdSJeff Kirsher int pci, pci_mask, subsystem, subsystem_mask;
226554f4ffdSJeff Kirsher int revision, revision_mask; /* Only 8 bits. */
227554f4ffdSJeff Kirsher } id;
228554f4ffdSJeff Kirsher int drv_flags; /* Driver use, intended as capability flags. */
229554f4ffdSJeff Kirsher };
230554f4ffdSJeff Kirsher
231554f4ffdSJeff Kirsher static const struct pci_id_info pci_id_tbl[] = {
232554f4ffdSJeff Kirsher {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
233554f4ffdSJeff Kirsher FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
234554f4ffdSJeff Kirsher {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
235554f4ffdSJeff Kirsher HasMII | DontUseEeprom },
236554f4ffdSJeff Kirsher { }
237554f4ffdSJeff Kirsher };
238554f4ffdSJeff Kirsher
2399baa3c34SBenoit Taine static const struct pci_device_id yellowfin_pci_tbl[] = {
240554f4ffdSJeff Kirsher { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
241554f4ffdSJeff Kirsher { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
242554f4ffdSJeff Kirsher { }
243554f4ffdSJeff Kirsher };
244554f4ffdSJeff Kirsher MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
245554f4ffdSJeff Kirsher
246554f4ffdSJeff Kirsher
247554f4ffdSJeff Kirsher /* Offsets to the Yellowfin registers. Various sizes and alignments. */
248554f4ffdSJeff Kirsher enum yellowfin_offsets {
249554f4ffdSJeff Kirsher TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
250554f4ffdSJeff Kirsher TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
251554f4ffdSJeff Kirsher RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
252554f4ffdSJeff Kirsher RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
253554f4ffdSJeff Kirsher EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
254554f4ffdSJeff Kirsher ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
255554f4ffdSJeff Kirsher Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
256554f4ffdSJeff Kirsher MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
257554f4ffdSJeff Kirsher MII_Status=0xAE,
258554f4ffdSJeff Kirsher RxDepth=0xB8, FlowCtrl=0xBC,
259554f4ffdSJeff Kirsher AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
260554f4ffdSJeff Kirsher EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
261554f4ffdSJeff Kirsher EEFeature=0xF5,
262554f4ffdSJeff Kirsher };
263554f4ffdSJeff Kirsher
264554f4ffdSJeff Kirsher /* The Yellowfin Rx and Tx buffer descriptors.
265554f4ffdSJeff Kirsher Elements are written as 32 bit for endian portability. */
266554f4ffdSJeff Kirsher struct yellowfin_desc {
267554f4ffdSJeff Kirsher __le32 dbdma_cmd;
268554f4ffdSJeff Kirsher __le32 addr;
269554f4ffdSJeff Kirsher __le32 branch_addr;
270554f4ffdSJeff Kirsher __le32 result_status;
271554f4ffdSJeff Kirsher };
272554f4ffdSJeff Kirsher
273554f4ffdSJeff Kirsher struct tx_status_words {
274554f4ffdSJeff Kirsher #ifdef __BIG_ENDIAN
275554f4ffdSJeff Kirsher u16 tx_errs;
276554f4ffdSJeff Kirsher u16 tx_cnt;
277554f4ffdSJeff Kirsher u16 paused;
278554f4ffdSJeff Kirsher u16 total_tx_cnt;
279554f4ffdSJeff Kirsher #else /* Little endian chips. */
280554f4ffdSJeff Kirsher u16 tx_cnt;
281554f4ffdSJeff Kirsher u16 tx_errs;
282554f4ffdSJeff Kirsher u16 total_tx_cnt;
283554f4ffdSJeff Kirsher u16 paused;
284554f4ffdSJeff Kirsher #endif /* __BIG_ENDIAN */
285554f4ffdSJeff Kirsher };
286554f4ffdSJeff Kirsher
287554f4ffdSJeff Kirsher /* Bits in yellowfin_desc.cmd */
288554f4ffdSJeff Kirsher enum desc_cmd_bits {
289554f4ffdSJeff Kirsher CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
290554f4ffdSJeff Kirsher CMD_NOP=0x60000000, CMD_STOP=0x70000000,
291554f4ffdSJeff Kirsher BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
292554f4ffdSJeff Kirsher BRANCH_IFTRUE=0x040000,
293554f4ffdSJeff Kirsher };
294554f4ffdSJeff Kirsher
295554f4ffdSJeff Kirsher /* Bits in yellowfin_desc.status */
296554f4ffdSJeff Kirsher enum desc_status_bits { RX_EOP=0x0040, };
297554f4ffdSJeff Kirsher
298554f4ffdSJeff Kirsher /* Bits in the interrupt status/mask registers. */
299554f4ffdSJeff Kirsher enum intr_status_bits {
300554f4ffdSJeff Kirsher IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
301554f4ffdSJeff Kirsher IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
302554f4ffdSJeff Kirsher IntrEarlyRx=0x100, IntrWakeup=0x200, };
303554f4ffdSJeff Kirsher
304554f4ffdSJeff Kirsher #define PRIV_ALIGN 31 /* Required alignment mask */
305554f4ffdSJeff Kirsher #define MII_CNT 4
306554f4ffdSJeff Kirsher struct yellowfin_private {
307554f4ffdSJeff Kirsher /* Descriptor rings first for alignment.
308554f4ffdSJeff Kirsher Tx requires a second descriptor for status. */
309554f4ffdSJeff Kirsher struct yellowfin_desc *rx_ring;
310554f4ffdSJeff Kirsher struct yellowfin_desc *tx_ring;
311554f4ffdSJeff Kirsher struct sk_buff* rx_skbuff[RX_RING_SIZE];
312554f4ffdSJeff Kirsher struct sk_buff* tx_skbuff[TX_RING_SIZE];
313554f4ffdSJeff Kirsher dma_addr_t rx_ring_dma;
314554f4ffdSJeff Kirsher dma_addr_t tx_ring_dma;
315554f4ffdSJeff Kirsher
316554f4ffdSJeff Kirsher struct tx_status_words *tx_status;
317554f4ffdSJeff Kirsher dma_addr_t tx_status_dma;
318554f4ffdSJeff Kirsher
319554f4ffdSJeff Kirsher struct timer_list timer; /* Media selection timer. */
320554f4ffdSJeff Kirsher /* Frequently used and paired value: keep adjacent for cache effect. */
321554f4ffdSJeff Kirsher int chip_id, drv_flags;
322554f4ffdSJeff Kirsher struct pci_dev *pci_dev;
323554f4ffdSJeff Kirsher unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
324554f4ffdSJeff Kirsher unsigned int rx_buf_sz; /* Based on MTU+slack. */
325554f4ffdSJeff Kirsher struct tx_status_words *tx_tail_desc;
326554f4ffdSJeff Kirsher unsigned int cur_tx, dirty_tx;
327554f4ffdSJeff Kirsher int tx_threshold;
328554f4ffdSJeff Kirsher unsigned int tx_full:1; /* The Tx queue is full. */
329554f4ffdSJeff Kirsher unsigned int full_duplex:1; /* Full-duplex operation requested. */
330554f4ffdSJeff Kirsher unsigned int duplex_lock:1;
331554f4ffdSJeff Kirsher unsigned int medialock:1; /* Do not sense media. */
332554f4ffdSJeff Kirsher unsigned int default_port:4; /* Last dev->if_port value. */
333554f4ffdSJeff Kirsher /* MII transceiver section. */
334554f4ffdSJeff Kirsher int mii_cnt; /* MII device addresses. */
335554f4ffdSJeff Kirsher u16 advertising; /* NWay media advertisement */
336554f4ffdSJeff Kirsher unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
337554f4ffdSJeff Kirsher spinlock_t lock;
338554f4ffdSJeff Kirsher void __iomem *base;
339554f4ffdSJeff Kirsher };
340554f4ffdSJeff Kirsher
341554f4ffdSJeff Kirsher static int read_eeprom(void __iomem *ioaddr, int location);
342554f4ffdSJeff Kirsher static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
343554f4ffdSJeff Kirsher static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
344554f4ffdSJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345554f4ffdSJeff Kirsher static int yellowfin_open(struct net_device *dev);
3468089c6f4SKees Cook static void yellowfin_timer(struct timer_list *t);
3470290bd29SMichael S. Tsirkin static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
348554f4ffdSJeff Kirsher static int yellowfin_init_ring(struct net_device *dev);
349554f4ffdSJeff Kirsher static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
350554f4ffdSJeff Kirsher struct net_device *dev);
351554f4ffdSJeff Kirsher static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
352554f4ffdSJeff Kirsher static int yellowfin_rx(struct net_device *dev);
353554f4ffdSJeff Kirsher static void yellowfin_error(struct net_device *dev, int intr_status);
354554f4ffdSJeff Kirsher static int yellowfin_close(struct net_device *dev);
355554f4ffdSJeff Kirsher static void set_rx_mode(struct net_device *dev);
356554f4ffdSJeff Kirsher static const struct ethtool_ops ethtool_ops;
357554f4ffdSJeff Kirsher
358554f4ffdSJeff Kirsher static const struct net_device_ops netdev_ops = {
359554f4ffdSJeff Kirsher .ndo_open = yellowfin_open,
360554f4ffdSJeff Kirsher .ndo_stop = yellowfin_close,
361554f4ffdSJeff Kirsher .ndo_start_xmit = yellowfin_start_xmit,
362afc4b13dSJiri Pirko .ndo_set_rx_mode = set_rx_mode,
363554f4ffdSJeff Kirsher .ndo_validate_addr = eth_validate_addr,
364554f4ffdSJeff Kirsher .ndo_set_mac_address = eth_mac_addr,
365a7605370SArnd Bergmann .ndo_eth_ioctl = netdev_ioctl,
366554f4ffdSJeff Kirsher .ndo_tx_timeout = yellowfin_tx_timeout,
367554f4ffdSJeff Kirsher };
368554f4ffdSJeff Kirsher
yellowfin_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)369134c1f15SBill Pemberton static int yellowfin_init_one(struct pci_dev *pdev,
370554f4ffdSJeff Kirsher const struct pci_device_id *ent)
371554f4ffdSJeff Kirsher {
372554f4ffdSJeff Kirsher struct net_device *dev;
373554f4ffdSJeff Kirsher struct yellowfin_private *np;
374554f4ffdSJeff Kirsher int irq;
375554f4ffdSJeff Kirsher int chip_idx = ent->driver_data;
376554f4ffdSJeff Kirsher static int find_cnt;
377554f4ffdSJeff Kirsher void __iomem *ioaddr;
378554f4ffdSJeff Kirsher int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
379554f4ffdSJeff Kirsher int drv_flags = pci_id_tbl[chip_idx].drv_flags;
380554f4ffdSJeff Kirsher void *ring_space;
381554f4ffdSJeff Kirsher dma_addr_t ring_dma;
382554f4ffdSJeff Kirsher #ifdef USE_IO_OPS
383554f4ffdSJeff Kirsher int bar = 0;
384554f4ffdSJeff Kirsher #else
385554f4ffdSJeff Kirsher int bar = 1;
386554f4ffdSJeff Kirsher #endif
3874abd7cffSJakub Kicinski u8 addr[ETH_ALEN];
388554f4ffdSJeff Kirsher
389554f4ffdSJeff Kirsher /* when built into the kernel, we only print version if device is found */
390554f4ffdSJeff Kirsher #ifndef MODULE
391554f4ffdSJeff Kirsher static int printed_version;
392554f4ffdSJeff Kirsher if (!printed_version++)
393554f4ffdSJeff Kirsher printk(version);
394554f4ffdSJeff Kirsher #endif
395554f4ffdSJeff Kirsher
396554f4ffdSJeff Kirsher i = pci_enable_device(pdev);
397554f4ffdSJeff Kirsher if (i) return i;
398554f4ffdSJeff Kirsher
399554f4ffdSJeff Kirsher dev = alloc_etherdev(sizeof(*np));
40041de8d4cSJoe Perches if (!dev)
401554f4ffdSJeff Kirsher return -ENOMEM;
40241de8d4cSJoe Perches
403554f4ffdSJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev);
404554f4ffdSJeff Kirsher
405554f4ffdSJeff Kirsher np = netdev_priv(dev);
406554f4ffdSJeff Kirsher
407554f4ffdSJeff Kirsher if (pci_request_regions(pdev, DRV_NAME))
408554f4ffdSJeff Kirsher goto err_out_free_netdev;
409554f4ffdSJeff Kirsher
410554f4ffdSJeff Kirsher pci_set_master (pdev);
411554f4ffdSJeff Kirsher
412554f4ffdSJeff Kirsher ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
413554f4ffdSJeff Kirsher if (!ioaddr)
414554f4ffdSJeff Kirsher goto err_out_free_res;
415554f4ffdSJeff Kirsher
416554f4ffdSJeff Kirsher irq = pdev->irq;
417554f4ffdSJeff Kirsher
418554f4ffdSJeff Kirsher if (drv_flags & DontUseEeprom)
419554f4ffdSJeff Kirsher for (i = 0; i < 6; i++)
4204abd7cffSJakub Kicinski addr[i] = ioread8(ioaddr + StnAddr + i);
421554f4ffdSJeff Kirsher else {
422554f4ffdSJeff Kirsher int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
423554f4ffdSJeff Kirsher for (i = 0; i < 6; i++)
4244abd7cffSJakub Kicinski addr[i] = read_eeprom(ioaddr, ee_offset + i);
425554f4ffdSJeff Kirsher }
4264abd7cffSJakub Kicinski eth_hw_addr_set(dev, addr);
427554f4ffdSJeff Kirsher
428554f4ffdSJeff Kirsher /* Reset the chip. */
429554f4ffdSJeff Kirsher iowrite32(0x80000000, ioaddr + DMACtrl);
430554f4ffdSJeff Kirsher
431554f4ffdSJeff Kirsher pci_set_drvdata(pdev, dev);
432554f4ffdSJeff Kirsher spin_lock_init(&np->lock);
433554f4ffdSJeff Kirsher
434554f4ffdSJeff Kirsher np->pci_dev = pdev;
435554f4ffdSJeff Kirsher np->chip_id = chip_idx;
436554f4ffdSJeff Kirsher np->drv_flags = drv_flags;
437554f4ffdSJeff Kirsher np->base = ioaddr;
438554f4ffdSJeff Kirsher
43973e283dfSChristophe JAILLET ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
44073e283dfSChristophe JAILLET GFP_KERNEL);
441554f4ffdSJeff Kirsher if (!ring_space)
442554f4ffdSJeff Kirsher goto err_out_cleardev;
443554f4ffdSJeff Kirsher np->tx_ring = ring_space;
444554f4ffdSJeff Kirsher np->tx_ring_dma = ring_dma;
445554f4ffdSJeff Kirsher
44673e283dfSChristophe JAILLET ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
44773e283dfSChristophe JAILLET GFP_KERNEL);
448554f4ffdSJeff Kirsher if (!ring_space)
449554f4ffdSJeff Kirsher goto err_out_unmap_tx;
450554f4ffdSJeff Kirsher np->rx_ring = ring_space;
451554f4ffdSJeff Kirsher np->rx_ring_dma = ring_dma;
452554f4ffdSJeff Kirsher
45373e283dfSChristophe JAILLET ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE,
45473e283dfSChristophe JAILLET &ring_dma, GFP_KERNEL);
455554f4ffdSJeff Kirsher if (!ring_space)
456554f4ffdSJeff Kirsher goto err_out_unmap_rx;
457554f4ffdSJeff Kirsher np->tx_status = ring_space;
458554f4ffdSJeff Kirsher np->tx_status_dma = ring_dma;
459554f4ffdSJeff Kirsher
460554f4ffdSJeff Kirsher if (dev->mem_start)
461554f4ffdSJeff Kirsher option = dev->mem_start;
462554f4ffdSJeff Kirsher
463554f4ffdSJeff Kirsher /* The lower four bits are the media type. */
464554f4ffdSJeff Kirsher if (option > 0) {
465554f4ffdSJeff Kirsher if (option & 0x200)
466554f4ffdSJeff Kirsher np->full_duplex = 1;
467554f4ffdSJeff Kirsher np->default_port = option & 15;
468554f4ffdSJeff Kirsher if (np->default_port)
469554f4ffdSJeff Kirsher np->medialock = 1;
470554f4ffdSJeff Kirsher }
471554f4ffdSJeff Kirsher if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
472554f4ffdSJeff Kirsher np->full_duplex = 1;
473554f4ffdSJeff Kirsher
474554f4ffdSJeff Kirsher if (np->full_duplex)
475554f4ffdSJeff Kirsher np->duplex_lock = 1;
476554f4ffdSJeff Kirsher
477554f4ffdSJeff Kirsher /* The Yellowfin-specific entries in the device structure. */
478554f4ffdSJeff Kirsher dev->netdev_ops = &netdev_ops;
4797ad24ea4SWilfried Klaebe dev->ethtool_ops = ðtool_ops;
480554f4ffdSJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT;
481554f4ffdSJeff Kirsher
482554f4ffdSJeff Kirsher if (mtu)
483554f4ffdSJeff Kirsher dev->mtu = mtu;
484554f4ffdSJeff Kirsher
485554f4ffdSJeff Kirsher i = register_netdev(dev);
486554f4ffdSJeff Kirsher if (i)
487554f4ffdSJeff Kirsher goto err_out_unmap_status;
488554f4ffdSJeff Kirsher
489554f4ffdSJeff Kirsher netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
490554f4ffdSJeff Kirsher pci_id_tbl[chip_idx].name,
491554f4ffdSJeff Kirsher ioread32(ioaddr + ChipRev), ioaddr,
492554f4ffdSJeff Kirsher dev->dev_addr, irq);
493554f4ffdSJeff Kirsher
494554f4ffdSJeff Kirsher if (np->drv_flags & HasMII) {
495554f4ffdSJeff Kirsher int phy, phy_idx = 0;
496554f4ffdSJeff Kirsher for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
497554f4ffdSJeff Kirsher int mii_status = mdio_read(ioaddr, phy, 1);
498554f4ffdSJeff Kirsher if (mii_status != 0xffff && mii_status != 0x0000) {
499554f4ffdSJeff Kirsher np->phys[phy_idx++] = phy;
500554f4ffdSJeff Kirsher np->advertising = mdio_read(ioaddr, phy, 4);
501554f4ffdSJeff Kirsher netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
502554f4ffdSJeff Kirsher phy, mii_status, np->advertising);
503554f4ffdSJeff Kirsher }
504554f4ffdSJeff Kirsher }
505554f4ffdSJeff Kirsher np->mii_cnt = phy_idx;
506554f4ffdSJeff Kirsher }
507554f4ffdSJeff Kirsher
508554f4ffdSJeff Kirsher find_cnt++;
509554f4ffdSJeff Kirsher
510554f4ffdSJeff Kirsher return 0;
511554f4ffdSJeff Kirsher
512554f4ffdSJeff Kirsher err_out_unmap_status:
51373e283dfSChristophe JAILLET dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
514554f4ffdSJeff Kirsher np->tx_status_dma);
515554f4ffdSJeff Kirsher err_out_unmap_rx:
51673e283dfSChristophe JAILLET dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
51773e283dfSChristophe JAILLET np->rx_ring_dma);
518554f4ffdSJeff Kirsher err_out_unmap_tx:
51973e283dfSChristophe JAILLET dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
52073e283dfSChristophe JAILLET np->tx_ring_dma);
521554f4ffdSJeff Kirsher err_out_cleardev:
522554f4ffdSJeff Kirsher pci_iounmap(pdev, ioaddr);
523554f4ffdSJeff Kirsher err_out_free_res:
524554f4ffdSJeff Kirsher pci_release_regions(pdev);
525554f4ffdSJeff Kirsher err_out_free_netdev:
526554f4ffdSJeff Kirsher free_netdev (dev);
527554f4ffdSJeff Kirsher return -ENODEV;
528554f4ffdSJeff Kirsher }
529554f4ffdSJeff Kirsher
read_eeprom(void __iomem * ioaddr,int location)530134c1f15SBill Pemberton static int read_eeprom(void __iomem *ioaddr, int location)
531554f4ffdSJeff Kirsher {
532554f4ffdSJeff Kirsher int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
533554f4ffdSJeff Kirsher
534554f4ffdSJeff Kirsher iowrite8(location, ioaddr + EEAddr);
535554f4ffdSJeff Kirsher iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
536554f4ffdSJeff Kirsher while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
537554f4ffdSJeff Kirsher ;
538554f4ffdSJeff Kirsher return ioread8(ioaddr + EERead);
539554f4ffdSJeff Kirsher }
540554f4ffdSJeff Kirsher
541554f4ffdSJeff Kirsher /* MII Managemen Data I/O accesses.
542554f4ffdSJeff Kirsher These routines assume the MDIO controller is idle, and do not exit until
543554f4ffdSJeff Kirsher the command is finished. */
544554f4ffdSJeff Kirsher
mdio_read(void __iomem * ioaddr,int phy_id,int location)545554f4ffdSJeff Kirsher static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
546554f4ffdSJeff Kirsher {
547554f4ffdSJeff Kirsher int i;
548554f4ffdSJeff Kirsher
549554f4ffdSJeff Kirsher iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
550554f4ffdSJeff Kirsher iowrite16(1, ioaddr + MII_Cmd);
551554f4ffdSJeff Kirsher for (i = 10000; i >= 0; i--)
552554f4ffdSJeff Kirsher if ((ioread16(ioaddr + MII_Status) & 1) == 0)
553554f4ffdSJeff Kirsher break;
554554f4ffdSJeff Kirsher return ioread16(ioaddr + MII_Rd_Data);
555554f4ffdSJeff Kirsher }
556554f4ffdSJeff Kirsher
mdio_write(void __iomem * ioaddr,int phy_id,int location,int value)557554f4ffdSJeff Kirsher static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
558554f4ffdSJeff Kirsher {
559554f4ffdSJeff Kirsher int i;
560554f4ffdSJeff Kirsher
561554f4ffdSJeff Kirsher iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
562554f4ffdSJeff Kirsher iowrite16(value, ioaddr + MII_Wr_Data);
563554f4ffdSJeff Kirsher
564554f4ffdSJeff Kirsher /* Wait for the command to finish. */
565554f4ffdSJeff Kirsher for (i = 10000; i >= 0; i--)
566554f4ffdSJeff Kirsher if ((ioread16(ioaddr + MII_Status) & 1) == 0)
567554f4ffdSJeff Kirsher break;
568554f4ffdSJeff Kirsher }
569554f4ffdSJeff Kirsher
570554f4ffdSJeff Kirsher
yellowfin_open(struct net_device * dev)571554f4ffdSJeff Kirsher static int yellowfin_open(struct net_device *dev)
572554f4ffdSJeff Kirsher {
573554f4ffdSJeff Kirsher struct yellowfin_private *yp = netdev_priv(dev);
5740c18acc1SFrancois Romieu const int irq = yp->pci_dev->irq;
575554f4ffdSJeff Kirsher void __iomem *ioaddr = yp->base;
5760c18acc1SFrancois Romieu int i, rc;
577554f4ffdSJeff Kirsher
578554f4ffdSJeff Kirsher /* Reset the chip. */
579554f4ffdSJeff Kirsher iowrite32(0x80000000, ioaddr + DMACtrl);
580554f4ffdSJeff Kirsher
5810c18acc1SFrancois Romieu rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
5820c18acc1SFrancois Romieu if (rc)
5830c18acc1SFrancois Romieu return rc;
584554f4ffdSJeff Kirsher
5850c18acc1SFrancois Romieu rc = yellowfin_init_ring(dev);
5860c18acc1SFrancois Romieu if (rc < 0)
5870c18acc1SFrancois Romieu goto err_free_irq;
588554f4ffdSJeff Kirsher
589554f4ffdSJeff Kirsher iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
590554f4ffdSJeff Kirsher iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
591554f4ffdSJeff Kirsher
592554f4ffdSJeff Kirsher for (i = 0; i < 6; i++)
593554f4ffdSJeff Kirsher iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
594554f4ffdSJeff Kirsher
595554f4ffdSJeff Kirsher /* Set up various condition 'select' registers.
596554f4ffdSJeff Kirsher There are no options here. */
597554f4ffdSJeff Kirsher iowrite32(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
598554f4ffdSJeff Kirsher iowrite32(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
599554f4ffdSJeff Kirsher iowrite32(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
600554f4ffdSJeff Kirsher iowrite32(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
601554f4ffdSJeff Kirsher iowrite32(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
602554f4ffdSJeff Kirsher iowrite32(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
603554f4ffdSJeff Kirsher
604554f4ffdSJeff Kirsher /* Initialize other registers: with so many this eventually this will
605554f4ffdSJeff Kirsher converted to an offset/value list. */
606554f4ffdSJeff Kirsher iowrite32(dma_ctrl, ioaddr + DMACtrl);
607554f4ffdSJeff Kirsher iowrite16(fifo_cfg, ioaddr + FIFOcfg);
608554f4ffdSJeff Kirsher /* Enable automatic generation of flow control frames, period 0xffff. */
609554f4ffdSJeff Kirsher iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
610554f4ffdSJeff Kirsher
611554f4ffdSJeff Kirsher yp->tx_threshold = 32;
612554f4ffdSJeff Kirsher iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
613554f4ffdSJeff Kirsher
614554f4ffdSJeff Kirsher if (dev->if_port == 0)
615554f4ffdSJeff Kirsher dev->if_port = yp->default_port;
616554f4ffdSJeff Kirsher
617554f4ffdSJeff Kirsher netif_start_queue(dev);
618554f4ffdSJeff Kirsher
619554f4ffdSJeff Kirsher /* Setting the Rx mode will start the Rx process. */
620554f4ffdSJeff Kirsher if (yp->drv_flags & IsGigabit) {
621554f4ffdSJeff Kirsher /* We are always in full-duplex mode with gigabit! */
622554f4ffdSJeff Kirsher yp->full_duplex = 1;
623554f4ffdSJeff Kirsher iowrite16(0x01CF, ioaddr + Cnfg);
624554f4ffdSJeff Kirsher } else {
625554f4ffdSJeff Kirsher iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
626554f4ffdSJeff Kirsher iowrite16(0x1018, ioaddr + FrameGap1);
627554f4ffdSJeff Kirsher iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
628554f4ffdSJeff Kirsher }
629554f4ffdSJeff Kirsher set_rx_mode(dev);
630554f4ffdSJeff Kirsher
631554f4ffdSJeff Kirsher /* Enable interrupts by setting the interrupt mask. */
632554f4ffdSJeff Kirsher iowrite16(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
633554f4ffdSJeff Kirsher iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
634554f4ffdSJeff Kirsher iowrite32(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
635554f4ffdSJeff Kirsher iowrite32(0x80008000, ioaddr + TxCtrl);
636554f4ffdSJeff Kirsher
637554f4ffdSJeff Kirsher if (yellowfin_debug > 2) {
638554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
639554f4ffdSJeff Kirsher }
640554f4ffdSJeff Kirsher
641554f4ffdSJeff Kirsher /* Set the timer to check for link beat. */
6428089c6f4SKees Cook timer_setup(&yp->timer, yellowfin_timer, 0);
643554f4ffdSJeff Kirsher yp->timer.expires = jiffies + 3*HZ;
644554f4ffdSJeff Kirsher add_timer(&yp->timer);
6450c18acc1SFrancois Romieu out:
6460c18acc1SFrancois Romieu return rc;
647554f4ffdSJeff Kirsher
6480c18acc1SFrancois Romieu err_free_irq:
6490c18acc1SFrancois Romieu free_irq(irq, dev);
6500c18acc1SFrancois Romieu goto out;
651554f4ffdSJeff Kirsher }
652554f4ffdSJeff Kirsher
yellowfin_timer(struct timer_list * t)6538089c6f4SKees Cook static void yellowfin_timer(struct timer_list *t)
654554f4ffdSJeff Kirsher {
6558089c6f4SKees Cook struct yellowfin_private *yp = from_timer(yp, t, timer);
6568089c6f4SKees Cook struct net_device *dev = pci_get_drvdata(yp->pci_dev);
657554f4ffdSJeff Kirsher void __iomem *ioaddr = yp->base;
658554f4ffdSJeff Kirsher int next_tick = 60*HZ;
659554f4ffdSJeff Kirsher
660554f4ffdSJeff Kirsher if (yellowfin_debug > 3) {
661554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
662554f4ffdSJeff Kirsher ioread16(ioaddr + IntrStatus));
663554f4ffdSJeff Kirsher }
664554f4ffdSJeff Kirsher
665554f4ffdSJeff Kirsher if (yp->mii_cnt) {
666554f4ffdSJeff Kirsher int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
667554f4ffdSJeff Kirsher int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
668554f4ffdSJeff Kirsher int negotiated = lpa & yp->advertising;
669554f4ffdSJeff Kirsher if (yellowfin_debug > 1)
670554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
671554f4ffdSJeff Kirsher yp->phys[0], bmsr, lpa);
672554f4ffdSJeff Kirsher
673554f4ffdSJeff Kirsher yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
674554f4ffdSJeff Kirsher
675554f4ffdSJeff Kirsher iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
676554f4ffdSJeff Kirsher
677554f4ffdSJeff Kirsher if (bmsr & BMSR_LSTATUS)
678554f4ffdSJeff Kirsher next_tick = 60*HZ;
679554f4ffdSJeff Kirsher else
680554f4ffdSJeff Kirsher next_tick = 3*HZ;
681554f4ffdSJeff Kirsher }
682554f4ffdSJeff Kirsher
683554f4ffdSJeff Kirsher yp->timer.expires = jiffies + next_tick;
684554f4ffdSJeff Kirsher add_timer(&yp->timer);
685554f4ffdSJeff Kirsher }
686554f4ffdSJeff Kirsher
yellowfin_tx_timeout(struct net_device * dev,unsigned int txqueue)6870290bd29SMichael S. Tsirkin static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
688554f4ffdSJeff Kirsher {
689554f4ffdSJeff Kirsher struct yellowfin_private *yp = netdev_priv(dev);
690554f4ffdSJeff Kirsher void __iomem *ioaddr = yp->base;
691554f4ffdSJeff Kirsher
692554f4ffdSJeff Kirsher netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
693554f4ffdSJeff Kirsher yp->cur_tx, yp->dirty_tx,
694554f4ffdSJeff Kirsher ioread32(ioaddr + TxStatus),
695554f4ffdSJeff Kirsher ioread32(ioaddr + RxStatus));
696554f4ffdSJeff Kirsher
697554f4ffdSJeff Kirsher /* Note: these should be KERN_DEBUG. */
698554f4ffdSJeff Kirsher if (yellowfin_debug) {
699554f4ffdSJeff Kirsher int i;
700fe3881cfSJoe Perches pr_warn(" Rx ring %p: ", yp->rx_ring);
701554f4ffdSJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++)
702554f4ffdSJeff Kirsher pr_cont(" %08x", yp->rx_ring[i].result_status);
703554f4ffdSJeff Kirsher pr_cont("\n");
704fe3881cfSJoe Perches pr_warn(" Tx ring %p: ", yp->tx_ring);
705554f4ffdSJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++)
706554f4ffdSJeff Kirsher pr_cont(" %04x /%08x",
707554f4ffdSJeff Kirsher yp->tx_status[i].tx_errs,
708554f4ffdSJeff Kirsher yp->tx_ring[i].result_status);
709554f4ffdSJeff Kirsher pr_cont("\n");
710554f4ffdSJeff Kirsher }
711554f4ffdSJeff Kirsher
712554f4ffdSJeff Kirsher /* If the hardware is found to hang regularly, we will update the code
713554f4ffdSJeff Kirsher to reinitialize the chip here. */
714554f4ffdSJeff Kirsher dev->if_port = 0;
715554f4ffdSJeff Kirsher
716554f4ffdSJeff Kirsher /* Wake the potentially-idle transmit channel. */
717554f4ffdSJeff Kirsher iowrite32(0x10001000, yp->base + TxCtrl);
718554f4ffdSJeff Kirsher if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
719554f4ffdSJeff Kirsher netif_wake_queue (dev); /* Typical path */
720554f4ffdSJeff Kirsher
721860e9538SFlorian Westphal netif_trans_update(dev); /* prevent tx timeout */
722554f4ffdSJeff Kirsher dev->stats.tx_errors++;
723554f4ffdSJeff Kirsher }
724554f4ffdSJeff Kirsher
725554f4ffdSJeff Kirsher /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
yellowfin_init_ring(struct net_device * dev)726554f4ffdSJeff Kirsher static int yellowfin_init_ring(struct net_device *dev)
727554f4ffdSJeff Kirsher {
728554f4ffdSJeff Kirsher struct yellowfin_private *yp = netdev_priv(dev);
729554f4ffdSJeff Kirsher int i, j;
730554f4ffdSJeff Kirsher
731554f4ffdSJeff Kirsher yp->tx_full = 0;
732554f4ffdSJeff Kirsher yp->cur_rx = yp->cur_tx = 0;
733554f4ffdSJeff Kirsher yp->dirty_tx = 0;
734554f4ffdSJeff Kirsher
735554f4ffdSJeff Kirsher yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
736554f4ffdSJeff Kirsher
737554f4ffdSJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) {
738554f4ffdSJeff Kirsher yp->rx_ring[i].dbdma_cmd =
739554f4ffdSJeff Kirsher cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
740554f4ffdSJeff Kirsher yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
741554f4ffdSJeff Kirsher ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
742554f4ffdSJeff Kirsher }
743554f4ffdSJeff Kirsher
744554f4ffdSJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) {
745dae2e9f4SPradeep A. Dalvi struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
746554f4ffdSJeff Kirsher yp->rx_skbuff[i] = skb;
747554f4ffdSJeff Kirsher if (skb == NULL)
748554f4ffdSJeff Kirsher break;
749554f4ffdSJeff Kirsher skb_reserve(skb, 2); /* 16 byte align the IP header. */
75073e283dfSChristophe JAILLET yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
75173e283dfSChristophe JAILLET skb->data,
75273e283dfSChristophe JAILLET yp->rx_buf_sz,
75373e283dfSChristophe JAILLET DMA_FROM_DEVICE));
754554f4ffdSJeff Kirsher }
755554f4ffdSJeff Kirsher if (i != RX_RING_SIZE) {
756554f4ffdSJeff Kirsher for (j = 0; j < i; j++)
757554f4ffdSJeff Kirsher dev_kfree_skb(yp->rx_skbuff[j]);
758554f4ffdSJeff Kirsher return -ENOMEM;
759554f4ffdSJeff Kirsher }
760554f4ffdSJeff Kirsher yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
761554f4ffdSJeff Kirsher yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
762554f4ffdSJeff Kirsher
763554f4ffdSJeff Kirsher #define NO_TXSTATS
764554f4ffdSJeff Kirsher #ifdef NO_TXSTATS
765554f4ffdSJeff Kirsher /* In this mode the Tx ring needs only a single descriptor. */
766554f4ffdSJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) {
767554f4ffdSJeff Kirsher yp->tx_skbuff[i] = NULL;
768554f4ffdSJeff Kirsher yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
769554f4ffdSJeff Kirsher yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
770554f4ffdSJeff Kirsher ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
771554f4ffdSJeff Kirsher }
772554f4ffdSJeff Kirsher /* Wrap ring */
773554f4ffdSJeff Kirsher yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
774554f4ffdSJeff Kirsher #else
775554f4ffdSJeff Kirsher {
776554f4ffdSJeff Kirsher /* Tx ring needs a pair of descriptors, the second for the status. */
777554f4ffdSJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) {
778554f4ffdSJeff Kirsher j = 2*i;
779554f4ffdSJeff Kirsher yp->tx_skbuff[i] = 0;
780554f4ffdSJeff Kirsher /* Branch on Tx error. */
781554f4ffdSJeff Kirsher yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
782554f4ffdSJeff Kirsher yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
783554f4ffdSJeff Kirsher (j+1)*sizeof(struct yellowfin_desc));
784554f4ffdSJeff Kirsher j++;
785554f4ffdSJeff Kirsher if (yp->flags & FullTxStatus) {
786554f4ffdSJeff Kirsher yp->tx_ring[j].dbdma_cmd =
787554f4ffdSJeff Kirsher cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
788554f4ffdSJeff Kirsher yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
789554f4ffdSJeff Kirsher yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
790554f4ffdSJeff Kirsher i*sizeof(struct tx_status_words));
791554f4ffdSJeff Kirsher } else {
792554f4ffdSJeff Kirsher /* Symbios chips write only tx_errs word. */
793554f4ffdSJeff Kirsher yp->tx_ring[j].dbdma_cmd =
794554f4ffdSJeff Kirsher cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
795554f4ffdSJeff Kirsher yp->tx_ring[j].request_cnt = 2;
796554f4ffdSJeff Kirsher /* Om pade ummmmm... */
797554f4ffdSJeff Kirsher yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
798554f4ffdSJeff Kirsher i*sizeof(struct tx_status_words) +
799554f4ffdSJeff Kirsher &(yp->tx_status[0].tx_errs) -
800554f4ffdSJeff Kirsher &(yp->tx_status[0]));
801554f4ffdSJeff Kirsher }
802554f4ffdSJeff Kirsher yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
803554f4ffdSJeff Kirsher ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
804554f4ffdSJeff Kirsher }
805554f4ffdSJeff Kirsher /* Wrap ring */
806554f4ffdSJeff Kirsher yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
807554f4ffdSJeff Kirsher }
808554f4ffdSJeff Kirsher #endif
809554f4ffdSJeff Kirsher yp->tx_tail_desc = &yp->tx_status[0];
810554f4ffdSJeff Kirsher return 0;
811554f4ffdSJeff Kirsher }
812554f4ffdSJeff Kirsher
yellowfin_start_xmit(struct sk_buff * skb,struct net_device * dev)813554f4ffdSJeff Kirsher static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
814554f4ffdSJeff Kirsher struct net_device *dev)
815554f4ffdSJeff Kirsher {
816554f4ffdSJeff Kirsher struct yellowfin_private *yp = netdev_priv(dev);
817554f4ffdSJeff Kirsher unsigned entry;
818554f4ffdSJeff Kirsher int len = skb->len;
819554f4ffdSJeff Kirsher
820554f4ffdSJeff Kirsher netif_stop_queue (dev);
821554f4ffdSJeff Kirsher
822554f4ffdSJeff Kirsher /* Note: Ordering is important here, set the field with the
823554f4ffdSJeff Kirsher "ownership" bit last, and only then increment cur_tx. */
824554f4ffdSJeff Kirsher
825554f4ffdSJeff Kirsher /* Calculate the next Tx descriptor entry. */
826554f4ffdSJeff Kirsher entry = yp->cur_tx % TX_RING_SIZE;
827554f4ffdSJeff Kirsher
828554f4ffdSJeff Kirsher if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
829554f4ffdSJeff Kirsher int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
830554f4ffdSJeff Kirsher /* Fix GX chipset errata. */
831554f4ffdSJeff Kirsher if (cacheline_end > 24 || cacheline_end == 0) {
832554f4ffdSJeff Kirsher len = skb->len + 32 - cacheline_end + 1;
833554f4ffdSJeff Kirsher if (skb_padto(skb, len)) {
834554f4ffdSJeff Kirsher yp->tx_skbuff[entry] = NULL;
835554f4ffdSJeff Kirsher netif_wake_queue(dev);
836554f4ffdSJeff Kirsher return NETDEV_TX_OK;
837554f4ffdSJeff Kirsher }
838554f4ffdSJeff Kirsher }
839554f4ffdSJeff Kirsher }
840554f4ffdSJeff Kirsher yp->tx_skbuff[entry] = skb;
841554f4ffdSJeff Kirsher
842554f4ffdSJeff Kirsher #ifdef NO_TXSTATS
84373e283dfSChristophe JAILLET yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
84473e283dfSChristophe JAILLET skb->data,
84573e283dfSChristophe JAILLET len, DMA_TO_DEVICE));
846554f4ffdSJeff Kirsher yp->tx_ring[entry].result_status = 0;
847554f4ffdSJeff Kirsher if (entry >= TX_RING_SIZE-1) {
848554f4ffdSJeff Kirsher /* New stop command. */
849554f4ffdSJeff Kirsher yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
850554f4ffdSJeff Kirsher yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
851554f4ffdSJeff Kirsher cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
852554f4ffdSJeff Kirsher } else {
853554f4ffdSJeff Kirsher yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
854554f4ffdSJeff Kirsher yp->tx_ring[entry].dbdma_cmd =
855554f4ffdSJeff Kirsher cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
856554f4ffdSJeff Kirsher }
857554f4ffdSJeff Kirsher yp->cur_tx++;
858554f4ffdSJeff Kirsher #else
859554f4ffdSJeff Kirsher yp->tx_ring[entry<<1].request_cnt = len;
86073e283dfSChristophe JAILLET yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
86173e283dfSChristophe JAILLET skb->data,
86273e283dfSChristophe JAILLET len, DMA_TO_DEVICE));
863554f4ffdSJeff Kirsher /* The input_last (status-write) command is constant, but we must
864554f4ffdSJeff Kirsher rewrite the subsequent 'stop' command. */
865554f4ffdSJeff Kirsher
866554f4ffdSJeff Kirsher yp->cur_tx++;
867554f4ffdSJeff Kirsher {
868554f4ffdSJeff Kirsher unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
869554f4ffdSJeff Kirsher yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
870554f4ffdSJeff Kirsher }
871554f4ffdSJeff Kirsher /* Final step -- overwrite the old 'stop' command. */
872554f4ffdSJeff Kirsher
873554f4ffdSJeff Kirsher yp->tx_ring[entry<<1].dbdma_cmd =
874554f4ffdSJeff Kirsher cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
875554f4ffdSJeff Kirsher CMD_TX_PKT | BRANCH_IFTRUE) | len);
876554f4ffdSJeff Kirsher #endif
877554f4ffdSJeff Kirsher
878554f4ffdSJeff Kirsher /* Non-x86 Todo: explicitly flush cache lines here. */
879554f4ffdSJeff Kirsher
880554f4ffdSJeff Kirsher /* Wake the potentially-idle transmit channel. */
881554f4ffdSJeff Kirsher iowrite32(0x10001000, yp->base + TxCtrl);
882554f4ffdSJeff Kirsher
883554f4ffdSJeff Kirsher if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
884554f4ffdSJeff Kirsher netif_start_queue (dev); /* Typical path */
885554f4ffdSJeff Kirsher else
886554f4ffdSJeff Kirsher yp->tx_full = 1;
887554f4ffdSJeff Kirsher
888554f4ffdSJeff Kirsher if (yellowfin_debug > 4) {
889554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
890554f4ffdSJeff Kirsher yp->cur_tx, entry);
891554f4ffdSJeff Kirsher }
892554f4ffdSJeff Kirsher return NETDEV_TX_OK;
893554f4ffdSJeff Kirsher }
894554f4ffdSJeff Kirsher
895554f4ffdSJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
896554f4ffdSJeff Kirsher after the Tx thread. */
yellowfin_interrupt(int irq,void * dev_instance)897554f4ffdSJeff Kirsher static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
898554f4ffdSJeff Kirsher {
899554f4ffdSJeff Kirsher struct net_device *dev = dev_instance;
900554f4ffdSJeff Kirsher struct yellowfin_private *yp;
901554f4ffdSJeff Kirsher void __iomem *ioaddr;
902554f4ffdSJeff Kirsher int boguscnt = max_interrupt_work;
903554f4ffdSJeff Kirsher unsigned int handled = 0;
904554f4ffdSJeff Kirsher
905554f4ffdSJeff Kirsher yp = netdev_priv(dev);
906554f4ffdSJeff Kirsher ioaddr = yp->base;
907554f4ffdSJeff Kirsher
908554f4ffdSJeff Kirsher spin_lock (&yp->lock);
909554f4ffdSJeff Kirsher
910554f4ffdSJeff Kirsher do {
911554f4ffdSJeff Kirsher u16 intr_status = ioread16(ioaddr + IntrClear);
912554f4ffdSJeff Kirsher
913554f4ffdSJeff Kirsher if (yellowfin_debug > 4)
914554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
915554f4ffdSJeff Kirsher intr_status);
916554f4ffdSJeff Kirsher
917554f4ffdSJeff Kirsher if (intr_status == 0)
918554f4ffdSJeff Kirsher break;
919554f4ffdSJeff Kirsher handled = 1;
920554f4ffdSJeff Kirsher
921554f4ffdSJeff Kirsher if (intr_status & (IntrRxDone | IntrEarlyRx)) {
922554f4ffdSJeff Kirsher yellowfin_rx(dev);
923554f4ffdSJeff Kirsher iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
924554f4ffdSJeff Kirsher }
925554f4ffdSJeff Kirsher
926554f4ffdSJeff Kirsher #ifdef NO_TXSTATS
927554f4ffdSJeff Kirsher for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
928554f4ffdSJeff Kirsher int entry = yp->dirty_tx % TX_RING_SIZE;
929554f4ffdSJeff Kirsher struct sk_buff *skb;
930554f4ffdSJeff Kirsher
931554f4ffdSJeff Kirsher if (yp->tx_ring[entry].result_status == 0)
932554f4ffdSJeff Kirsher break;
933554f4ffdSJeff Kirsher skb = yp->tx_skbuff[entry];
934554f4ffdSJeff Kirsher dev->stats.tx_packets++;
935554f4ffdSJeff Kirsher dev->stats.tx_bytes += skb->len;
936554f4ffdSJeff Kirsher /* Free the original skb. */
93773e283dfSChristophe JAILLET dma_unmap_single(&yp->pci_dev->dev,
93873e283dfSChristophe JAILLET le32_to_cpu(yp->tx_ring[entry].addr),
93973e283dfSChristophe JAILLET skb->len, DMA_TO_DEVICE);
9401bba6de1SYang Wei dev_consume_skb_irq(skb);
941554f4ffdSJeff Kirsher yp->tx_skbuff[entry] = NULL;
942554f4ffdSJeff Kirsher }
943554f4ffdSJeff Kirsher if (yp->tx_full &&
944554f4ffdSJeff Kirsher yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
945554f4ffdSJeff Kirsher /* The ring is no longer full, clear tbusy. */
946554f4ffdSJeff Kirsher yp->tx_full = 0;
947554f4ffdSJeff Kirsher netif_wake_queue(dev);
948554f4ffdSJeff Kirsher }
949554f4ffdSJeff Kirsher #else
950554f4ffdSJeff Kirsher if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
951554f4ffdSJeff Kirsher unsigned dirty_tx = yp->dirty_tx;
952554f4ffdSJeff Kirsher
953554f4ffdSJeff Kirsher for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
954554f4ffdSJeff Kirsher dirty_tx++) {
955554f4ffdSJeff Kirsher /* Todo: optimize this. */
956554f4ffdSJeff Kirsher int entry = dirty_tx % TX_RING_SIZE;
957554f4ffdSJeff Kirsher u16 tx_errs = yp->tx_status[entry].tx_errs;
958554f4ffdSJeff Kirsher struct sk_buff *skb;
959554f4ffdSJeff Kirsher
960554f4ffdSJeff Kirsher #ifndef final_version
961554f4ffdSJeff Kirsher if (yellowfin_debug > 5)
962554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
963554f4ffdSJeff Kirsher entry,
964554f4ffdSJeff Kirsher yp->tx_status[entry].tx_cnt,
965554f4ffdSJeff Kirsher yp->tx_status[entry].tx_errs,
966554f4ffdSJeff Kirsher yp->tx_status[entry].total_tx_cnt,
967554f4ffdSJeff Kirsher yp->tx_status[entry].paused);
968554f4ffdSJeff Kirsher #endif
969554f4ffdSJeff Kirsher if (tx_errs == 0)
970554f4ffdSJeff Kirsher break; /* It still hasn't been Txed */
971554f4ffdSJeff Kirsher skb = yp->tx_skbuff[entry];
972554f4ffdSJeff Kirsher if (tx_errs & 0xF810) {
973554f4ffdSJeff Kirsher /* There was an major error, log it. */
974554f4ffdSJeff Kirsher #ifndef final_version
975554f4ffdSJeff Kirsher if (yellowfin_debug > 1)
976554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
977554f4ffdSJeff Kirsher tx_errs);
978554f4ffdSJeff Kirsher #endif
979554f4ffdSJeff Kirsher dev->stats.tx_errors++;
980554f4ffdSJeff Kirsher if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
981554f4ffdSJeff Kirsher if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
982554f4ffdSJeff Kirsher if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
983554f4ffdSJeff Kirsher if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
984554f4ffdSJeff Kirsher } else {
985554f4ffdSJeff Kirsher #ifndef final_version
986554f4ffdSJeff Kirsher if (yellowfin_debug > 4)
987554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
988554f4ffdSJeff Kirsher tx_errs);
989554f4ffdSJeff Kirsher #endif
990554f4ffdSJeff Kirsher dev->stats.tx_bytes += skb->len;
991554f4ffdSJeff Kirsher dev->stats.collisions += tx_errs & 15;
992554f4ffdSJeff Kirsher dev->stats.tx_packets++;
993554f4ffdSJeff Kirsher }
994554f4ffdSJeff Kirsher /* Free the original skb. */
99573e283dfSChristophe JAILLET dma_unmap_single(&yp->pci_dev->dev,
99673e283dfSChristophe JAILLET yp->tx_ring[entry << 1].addr,
99773e283dfSChristophe JAILLET skb->len, DMA_TO_DEVICE);
9981bba6de1SYang Wei dev_consume_skb_irq(skb);
999554f4ffdSJeff Kirsher yp->tx_skbuff[entry] = 0;
1000554f4ffdSJeff Kirsher /* Mark status as empty. */
1001554f4ffdSJeff Kirsher yp->tx_status[entry].tx_errs = 0;
1002554f4ffdSJeff Kirsher }
1003554f4ffdSJeff Kirsher
1004554f4ffdSJeff Kirsher #ifndef final_version
1005554f4ffdSJeff Kirsher if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1006554f4ffdSJeff Kirsher netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1007554f4ffdSJeff Kirsher dirty_tx, yp->cur_tx, yp->tx_full);
1008554f4ffdSJeff Kirsher dirty_tx += TX_RING_SIZE;
1009554f4ffdSJeff Kirsher }
1010554f4ffdSJeff Kirsher #endif
1011554f4ffdSJeff Kirsher
1012554f4ffdSJeff Kirsher if (yp->tx_full &&
1013554f4ffdSJeff Kirsher yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1014554f4ffdSJeff Kirsher /* The ring is no longer full, clear tbusy. */
1015554f4ffdSJeff Kirsher yp->tx_full = 0;
1016554f4ffdSJeff Kirsher netif_wake_queue(dev);
1017554f4ffdSJeff Kirsher }
1018554f4ffdSJeff Kirsher
1019554f4ffdSJeff Kirsher yp->dirty_tx = dirty_tx;
1020554f4ffdSJeff Kirsher yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1021554f4ffdSJeff Kirsher }
1022554f4ffdSJeff Kirsher #endif
1023554f4ffdSJeff Kirsher
1024554f4ffdSJeff Kirsher /* Log errors and other uncommon events. */
1025554f4ffdSJeff Kirsher if (intr_status & 0x2ee) /* Abnormal error summary. */
1026554f4ffdSJeff Kirsher yellowfin_error(dev, intr_status);
1027554f4ffdSJeff Kirsher
1028554f4ffdSJeff Kirsher if (--boguscnt < 0) {
1029554f4ffdSJeff Kirsher netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1030554f4ffdSJeff Kirsher intr_status);
1031554f4ffdSJeff Kirsher break;
1032554f4ffdSJeff Kirsher }
1033554f4ffdSJeff Kirsher } while (1);
1034554f4ffdSJeff Kirsher
1035554f4ffdSJeff Kirsher if (yellowfin_debug > 3)
1036554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1037554f4ffdSJeff Kirsher ioread16(ioaddr + IntrStatus));
1038554f4ffdSJeff Kirsher
1039554f4ffdSJeff Kirsher spin_unlock (&yp->lock);
1040554f4ffdSJeff Kirsher return IRQ_RETVAL(handled);
1041554f4ffdSJeff Kirsher }
1042554f4ffdSJeff Kirsher
1043554f4ffdSJeff Kirsher /* This routine is logically part of the interrupt handler, but separated
1044554f4ffdSJeff Kirsher for clarity and better register allocation. */
yellowfin_rx(struct net_device * dev)1045554f4ffdSJeff Kirsher static int yellowfin_rx(struct net_device *dev)
1046554f4ffdSJeff Kirsher {
1047554f4ffdSJeff Kirsher struct yellowfin_private *yp = netdev_priv(dev);
1048554f4ffdSJeff Kirsher int entry = yp->cur_rx % RX_RING_SIZE;
1049554f4ffdSJeff Kirsher int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1050554f4ffdSJeff Kirsher
1051554f4ffdSJeff Kirsher if (yellowfin_debug > 4) {
1052554f4ffdSJeff Kirsher printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1053554f4ffdSJeff Kirsher entry, yp->rx_ring[entry].result_status);
1054554f4ffdSJeff Kirsher printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n",
1055554f4ffdSJeff Kirsher entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1056554f4ffdSJeff Kirsher yp->rx_ring[entry].result_status);
1057554f4ffdSJeff Kirsher }
1058554f4ffdSJeff Kirsher
1059554f4ffdSJeff Kirsher /* If EOP is set on the next entry, it's a new packet. Send it up. */
1060554f4ffdSJeff Kirsher while (1) {
1061554f4ffdSJeff Kirsher struct yellowfin_desc *desc = &yp->rx_ring[entry];
1062554f4ffdSJeff Kirsher struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1063554f4ffdSJeff Kirsher s16 frame_status;
1064554f4ffdSJeff Kirsher u16 desc_status;
1065d0ea5cbdSJesse Brandeburg int data_size, __maybe_unused yf_size;
1066554f4ffdSJeff Kirsher u8 *buf_addr;
1067554f4ffdSJeff Kirsher
1068554f4ffdSJeff Kirsher if(!desc->result_status)
1069554f4ffdSJeff Kirsher break;
107073e283dfSChristophe JAILLET dma_sync_single_for_cpu(&yp->pci_dev->dev,
107173e283dfSChristophe JAILLET le32_to_cpu(desc->addr),
107273e283dfSChristophe JAILLET yp->rx_buf_sz, DMA_FROM_DEVICE);
1073554f4ffdSJeff Kirsher desc_status = le32_to_cpu(desc->result_status) >> 16;
1074554f4ffdSJeff Kirsher buf_addr = rx_skb->data;
1075554f4ffdSJeff Kirsher data_size = (le32_to_cpu(desc->dbdma_cmd) -
1076554f4ffdSJeff Kirsher le32_to_cpu(desc->result_status)) & 0xffff;
1077554f4ffdSJeff Kirsher frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1078554f4ffdSJeff Kirsher if (yellowfin_debug > 4)
1079554f4ffdSJeff Kirsher printk(KERN_DEBUG " %s() status was %04x\n",
1080554f4ffdSJeff Kirsher __func__, frame_status);
1081554f4ffdSJeff Kirsher if (--boguscnt < 0)
1082554f4ffdSJeff Kirsher break;
10833a8e87ecSdingtianhong
10843a8e87ecSdingtianhong yf_size = sizeof(struct yellowfin_desc);
10853a8e87ecSdingtianhong
1086554f4ffdSJeff Kirsher if ( ! (desc_status & RX_EOP)) {
1087554f4ffdSJeff Kirsher if (data_size != 0)
1088554f4ffdSJeff Kirsher netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1089554f4ffdSJeff Kirsher desc_status, data_size);
1090554f4ffdSJeff Kirsher dev->stats.rx_length_errors++;
1091554f4ffdSJeff Kirsher } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1092554f4ffdSJeff Kirsher /* There was a error. */
1093554f4ffdSJeff Kirsher if (yellowfin_debug > 3)
1094554f4ffdSJeff Kirsher printk(KERN_DEBUG " %s() Rx error was %04x\n",
1095554f4ffdSJeff Kirsher __func__, frame_status);
1096554f4ffdSJeff Kirsher dev->stats.rx_errors++;
1097554f4ffdSJeff Kirsher if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1098554f4ffdSJeff Kirsher if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1099554f4ffdSJeff Kirsher if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1100554f4ffdSJeff Kirsher if (frame_status < 0) dev->stats.rx_dropped++;
1101554f4ffdSJeff Kirsher } else if ( !(yp->drv_flags & IsGigabit) &&
1102554f4ffdSJeff Kirsher ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1103554f4ffdSJeff Kirsher u8 status1 = buf_addr[data_size-2];
1104554f4ffdSJeff Kirsher u8 status2 = buf_addr[data_size-1];
1105554f4ffdSJeff Kirsher dev->stats.rx_errors++;
1106554f4ffdSJeff Kirsher if (status1 & 0xC0) dev->stats.rx_length_errors++;
1107554f4ffdSJeff Kirsher if (status2 & 0x03) dev->stats.rx_frame_errors++;
1108554f4ffdSJeff Kirsher if (status2 & 0x04) dev->stats.rx_crc_errors++;
1109554f4ffdSJeff Kirsher if (status2 & 0x80) dev->stats.rx_dropped++;
1110554f4ffdSJeff Kirsher #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1111554f4ffdSJeff Kirsher } else if ((yp->flags & HasMACAddrBug) &&
11123a8e87ecSdingtianhong !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
11133a8e87ecSdingtianhong entry * yf_size),
11143a8e87ecSdingtianhong dev->dev_addr) &&
11153a8e87ecSdingtianhong !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
11163a8e87ecSdingtianhong entry * yf_size),
11173a8e87ecSdingtianhong "\377\377\377\377\377\377")) {
1118554f4ffdSJeff Kirsher if (bogus_rx++ == 0)
1119554f4ffdSJeff Kirsher netdev_warn(dev, "Bad frame to %pM\n",
1120554f4ffdSJeff Kirsher buf_addr);
1121554f4ffdSJeff Kirsher #endif
1122554f4ffdSJeff Kirsher } else {
1123554f4ffdSJeff Kirsher struct sk_buff *skb;
1124554f4ffdSJeff Kirsher int pkt_len = data_size -
1125554f4ffdSJeff Kirsher (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1126554f4ffdSJeff Kirsher /* To verify: Yellowfin Length should omit the CRC! */
1127554f4ffdSJeff Kirsher
1128554f4ffdSJeff Kirsher #ifndef final_version
1129554f4ffdSJeff Kirsher if (yellowfin_debug > 4)
1130554f4ffdSJeff Kirsher printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1131554f4ffdSJeff Kirsher __func__, pkt_len, data_size, boguscnt);
1132554f4ffdSJeff Kirsher #endif
1133554f4ffdSJeff Kirsher /* Check if the packet is long enough to just pass up the skbuff
1134554f4ffdSJeff Kirsher without copying to a properly sized skbuff. */
1135554f4ffdSJeff Kirsher if (pkt_len > rx_copybreak) {
1136554f4ffdSJeff Kirsher skb_put(skb = rx_skb, pkt_len);
113773e283dfSChristophe JAILLET dma_unmap_single(&yp->pci_dev->dev,
1138554f4ffdSJeff Kirsher le32_to_cpu(yp->rx_ring[entry].addr),
1139554f4ffdSJeff Kirsher yp->rx_buf_sz,
114073e283dfSChristophe JAILLET DMA_FROM_DEVICE);
1141554f4ffdSJeff Kirsher yp->rx_skbuff[entry] = NULL;
1142554f4ffdSJeff Kirsher } else {
1143dae2e9f4SPradeep A. Dalvi skb = netdev_alloc_skb(dev, pkt_len + 2);
1144554f4ffdSJeff Kirsher if (skb == NULL)
1145554f4ffdSJeff Kirsher break;
1146554f4ffdSJeff Kirsher skb_reserve(skb, 2); /* 16 byte align the IP header */
1147554f4ffdSJeff Kirsher skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1148554f4ffdSJeff Kirsher skb_put(skb, pkt_len);
114973e283dfSChristophe JAILLET dma_sync_single_for_device(&yp->pci_dev->dev,
1150554f4ffdSJeff Kirsher le32_to_cpu(desc->addr),
1151554f4ffdSJeff Kirsher yp->rx_buf_sz,
115273e283dfSChristophe JAILLET DMA_FROM_DEVICE);
1153554f4ffdSJeff Kirsher }
1154554f4ffdSJeff Kirsher skb->protocol = eth_type_trans(skb, dev);
1155554f4ffdSJeff Kirsher netif_rx(skb);
1156554f4ffdSJeff Kirsher dev->stats.rx_packets++;
1157554f4ffdSJeff Kirsher dev->stats.rx_bytes += pkt_len;
1158554f4ffdSJeff Kirsher }
1159554f4ffdSJeff Kirsher entry = (++yp->cur_rx) % RX_RING_SIZE;
1160554f4ffdSJeff Kirsher }
1161554f4ffdSJeff Kirsher
1162554f4ffdSJeff Kirsher /* Refill the Rx ring buffers. */
1163554f4ffdSJeff Kirsher for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1164554f4ffdSJeff Kirsher entry = yp->dirty_rx % RX_RING_SIZE;
1165554f4ffdSJeff Kirsher if (yp->rx_skbuff[entry] == NULL) {
1166dae2e9f4SPradeep A. Dalvi struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1167554f4ffdSJeff Kirsher if (skb == NULL)
1168554f4ffdSJeff Kirsher break; /* Better luck next round. */
1169554f4ffdSJeff Kirsher yp->rx_skbuff[entry] = skb;
1170554f4ffdSJeff Kirsher skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
117173e283dfSChristophe JAILLET yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
117273e283dfSChristophe JAILLET skb->data,
117373e283dfSChristophe JAILLET yp->rx_buf_sz,
117473e283dfSChristophe JAILLET DMA_FROM_DEVICE));
1175554f4ffdSJeff Kirsher }
1176554f4ffdSJeff Kirsher yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1177554f4ffdSJeff Kirsher yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1178554f4ffdSJeff Kirsher if (entry != 0)
1179554f4ffdSJeff Kirsher yp->rx_ring[entry - 1].dbdma_cmd =
1180554f4ffdSJeff Kirsher cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1181554f4ffdSJeff Kirsher else
1182554f4ffdSJeff Kirsher yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1183554f4ffdSJeff Kirsher cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1184554f4ffdSJeff Kirsher | yp->rx_buf_sz);
1185554f4ffdSJeff Kirsher }
1186554f4ffdSJeff Kirsher
1187554f4ffdSJeff Kirsher return 0;
1188554f4ffdSJeff Kirsher }
1189554f4ffdSJeff Kirsher
yellowfin_error(struct net_device * dev,int intr_status)1190554f4ffdSJeff Kirsher static void yellowfin_error(struct net_device *dev, int intr_status)
1191554f4ffdSJeff Kirsher {
1192554f4ffdSJeff Kirsher netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1193554f4ffdSJeff Kirsher /* Hmmmmm, it's not clear what to do here. */
1194554f4ffdSJeff Kirsher if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1195554f4ffdSJeff Kirsher dev->stats.tx_errors++;
1196554f4ffdSJeff Kirsher if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1197554f4ffdSJeff Kirsher dev->stats.rx_errors++;
1198554f4ffdSJeff Kirsher }
1199554f4ffdSJeff Kirsher
yellowfin_close(struct net_device * dev)1200554f4ffdSJeff Kirsher static int yellowfin_close(struct net_device *dev)
1201554f4ffdSJeff Kirsher {
1202554f4ffdSJeff Kirsher struct yellowfin_private *yp = netdev_priv(dev);
1203554f4ffdSJeff Kirsher void __iomem *ioaddr = yp->base;
1204554f4ffdSJeff Kirsher int i;
1205554f4ffdSJeff Kirsher
1206554f4ffdSJeff Kirsher netif_stop_queue (dev);
1207554f4ffdSJeff Kirsher
1208554f4ffdSJeff Kirsher if (yellowfin_debug > 1) {
1209554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1210554f4ffdSJeff Kirsher ioread16(ioaddr + TxStatus),
1211554f4ffdSJeff Kirsher ioread16(ioaddr + RxStatus),
1212554f4ffdSJeff Kirsher ioread16(ioaddr + IntrStatus));
1213554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1214554f4ffdSJeff Kirsher yp->cur_tx, yp->dirty_tx,
1215554f4ffdSJeff Kirsher yp->cur_rx, yp->dirty_rx);
1216554f4ffdSJeff Kirsher }
1217554f4ffdSJeff Kirsher
1218554f4ffdSJeff Kirsher /* Disable interrupts by clearing the interrupt mask. */
1219554f4ffdSJeff Kirsher iowrite16(0x0000, ioaddr + IntrEnb);
1220554f4ffdSJeff Kirsher
1221554f4ffdSJeff Kirsher /* Stop the chip's Tx and Rx processes. */
1222554f4ffdSJeff Kirsher iowrite32(0x80000000, ioaddr + RxCtrl);
1223554f4ffdSJeff Kirsher iowrite32(0x80000000, ioaddr + TxCtrl);
1224554f4ffdSJeff Kirsher
1225554f4ffdSJeff Kirsher del_timer(&yp->timer);
1226554f4ffdSJeff Kirsher
1227554f4ffdSJeff Kirsher #if defined(__i386__)
1228554f4ffdSJeff Kirsher if (yellowfin_debug > 2) {
1229554f4ffdSJeff Kirsher printk(KERN_DEBUG " Tx ring at %08llx:\n",
1230554f4ffdSJeff Kirsher (unsigned long long)yp->tx_ring_dma);
1231554f4ffdSJeff Kirsher for (i = 0; i < TX_RING_SIZE*2; i++)
1232554f4ffdSJeff Kirsher printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1233554f4ffdSJeff Kirsher ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1234554f4ffdSJeff Kirsher i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1235554f4ffdSJeff Kirsher yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1236554f4ffdSJeff Kirsher printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1237554f4ffdSJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++)
1238554f4ffdSJeff Kirsher printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n",
1239554f4ffdSJeff Kirsher i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1240554f4ffdSJeff Kirsher yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1241554f4ffdSJeff Kirsher
1242554f4ffdSJeff Kirsher printk(KERN_DEBUG " Rx ring %08llx:\n",
1243554f4ffdSJeff Kirsher (unsigned long long)yp->rx_ring_dma);
1244554f4ffdSJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) {
1245554f4ffdSJeff Kirsher printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1246554f4ffdSJeff Kirsher ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1247554f4ffdSJeff Kirsher i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1248554f4ffdSJeff Kirsher yp->rx_ring[i].result_status);
1249554f4ffdSJeff Kirsher if (yellowfin_debug > 6) {
1250554f4ffdSJeff Kirsher if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1251554f4ffdSJeff Kirsher int j;
1252554f4ffdSJeff Kirsher
1253554f4ffdSJeff Kirsher printk(KERN_DEBUG);
1254554f4ffdSJeff Kirsher for (j = 0; j < 0x50; j++)
1255554f4ffdSJeff Kirsher pr_cont(" %04x",
1256554f4ffdSJeff Kirsher get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1257554f4ffdSJeff Kirsher pr_cont("\n");
1258554f4ffdSJeff Kirsher }
1259554f4ffdSJeff Kirsher }
1260554f4ffdSJeff Kirsher }
1261554f4ffdSJeff Kirsher }
1262554f4ffdSJeff Kirsher #endif /* __i386__ debugging only */
1263554f4ffdSJeff Kirsher
12640c18acc1SFrancois Romieu free_irq(yp->pci_dev->irq, dev);
1265554f4ffdSJeff Kirsher
1266554f4ffdSJeff Kirsher /* Free all the skbuffs in the Rx queue. */
1267554f4ffdSJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) {
1268554f4ffdSJeff Kirsher yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1269554f4ffdSJeff Kirsher yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1270554f4ffdSJeff Kirsher if (yp->rx_skbuff[i]) {
1271554f4ffdSJeff Kirsher dev_kfree_skb(yp->rx_skbuff[i]);
1272554f4ffdSJeff Kirsher }
1273554f4ffdSJeff Kirsher yp->rx_skbuff[i] = NULL;
1274554f4ffdSJeff Kirsher }
1275554f4ffdSJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) {
1276554f4ffdSJeff Kirsher dev_kfree_skb(yp->tx_skbuff[i]);
1277554f4ffdSJeff Kirsher yp->tx_skbuff[i] = NULL;
1278554f4ffdSJeff Kirsher }
1279554f4ffdSJeff Kirsher
1280554f4ffdSJeff Kirsher #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1281554f4ffdSJeff Kirsher if (yellowfin_debug > 0) {
1282554f4ffdSJeff Kirsher netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1283554f4ffdSJeff Kirsher bogus_rx);
1284554f4ffdSJeff Kirsher }
1285554f4ffdSJeff Kirsher #endif
1286554f4ffdSJeff Kirsher
1287554f4ffdSJeff Kirsher return 0;
1288554f4ffdSJeff Kirsher }
1289554f4ffdSJeff Kirsher
1290554f4ffdSJeff Kirsher /* Set or clear the multicast filter for this adaptor. */
1291554f4ffdSJeff Kirsher
set_rx_mode(struct net_device * dev)1292554f4ffdSJeff Kirsher static void set_rx_mode(struct net_device *dev)
1293554f4ffdSJeff Kirsher {
1294554f4ffdSJeff Kirsher struct yellowfin_private *yp = netdev_priv(dev);
1295554f4ffdSJeff Kirsher void __iomem *ioaddr = yp->base;
1296554f4ffdSJeff Kirsher u16 cfg_value = ioread16(ioaddr + Cnfg);
1297554f4ffdSJeff Kirsher
1298554f4ffdSJeff Kirsher /* Stop the Rx process to change any value. */
1299554f4ffdSJeff Kirsher iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1300554f4ffdSJeff Kirsher if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1301554f4ffdSJeff Kirsher iowrite16(0x000F, ioaddr + AddrMode);
1302554f4ffdSJeff Kirsher } else if ((netdev_mc_count(dev) > 64) ||
1303554f4ffdSJeff Kirsher (dev->flags & IFF_ALLMULTI)) {
1304554f4ffdSJeff Kirsher /* Too many to filter well, or accept all multicasts. */
1305554f4ffdSJeff Kirsher iowrite16(0x000B, ioaddr + AddrMode);
1306554f4ffdSJeff Kirsher } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1307554f4ffdSJeff Kirsher struct netdev_hw_addr *ha;
1308554f4ffdSJeff Kirsher u16 hash_table[4];
1309554f4ffdSJeff Kirsher int i;
1310554f4ffdSJeff Kirsher
1311554f4ffdSJeff Kirsher memset(hash_table, 0, sizeof(hash_table));
1312554f4ffdSJeff Kirsher netdev_for_each_mc_addr(ha, dev) {
1313554f4ffdSJeff Kirsher unsigned int bit;
1314554f4ffdSJeff Kirsher
1315554f4ffdSJeff Kirsher /* Due to a bug in the early chip versions, multiple filter
1316554f4ffdSJeff Kirsher slots must be set for each address. */
1317554f4ffdSJeff Kirsher if (yp->drv_flags & HasMulticastBug) {
1318554f4ffdSJeff Kirsher bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1319554f4ffdSJeff Kirsher hash_table[bit >> 4] |= (1 << bit);
1320554f4ffdSJeff Kirsher bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1321554f4ffdSJeff Kirsher hash_table[bit >> 4] |= (1 << bit);
1322554f4ffdSJeff Kirsher bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1323554f4ffdSJeff Kirsher hash_table[bit >> 4] |= (1 << bit);
1324554f4ffdSJeff Kirsher }
1325554f4ffdSJeff Kirsher bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1326554f4ffdSJeff Kirsher hash_table[bit >> 4] |= (1 << bit);
1327554f4ffdSJeff Kirsher }
1328554f4ffdSJeff Kirsher /* Copy the hash table to the chip. */
1329554f4ffdSJeff Kirsher for (i = 0; i < 4; i++)
1330554f4ffdSJeff Kirsher iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1331554f4ffdSJeff Kirsher iowrite16(0x0003, ioaddr + AddrMode);
1332554f4ffdSJeff Kirsher } else { /* Normal, unicast/broadcast-only mode. */
1333554f4ffdSJeff Kirsher iowrite16(0x0001, ioaddr + AddrMode);
1334554f4ffdSJeff Kirsher }
1335554f4ffdSJeff Kirsher /* Restart the Rx process. */
1336554f4ffdSJeff Kirsher iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1337554f4ffdSJeff Kirsher }
1338554f4ffdSJeff Kirsher
yellowfin_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1339554f4ffdSJeff Kirsher static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1340554f4ffdSJeff Kirsher {
1341554f4ffdSJeff Kirsher struct yellowfin_private *np = netdev_priv(dev);
13427826d43fSJiri Pirko
1343*f029c781SWolfram Sang strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1344*f029c781SWolfram Sang strscpy(info->version, DRV_VERSION, sizeof(info->version));
1345*f029c781SWolfram Sang strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1346554f4ffdSJeff Kirsher }
1347554f4ffdSJeff Kirsher
1348554f4ffdSJeff Kirsher static const struct ethtool_ops ethtool_ops = {
1349554f4ffdSJeff Kirsher .get_drvinfo = yellowfin_get_drvinfo
1350554f4ffdSJeff Kirsher };
1351554f4ffdSJeff Kirsher
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1352554f4ffdSJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1353554f4ffdSJeff Kirsher {
1354554f4ffdSJeff Kirsher struct yellowfin_private *np = netdev_priv(dev);
1355554f4ffdSJeff Kirsher void __iomem *ioaddr = np->base;
1356554f4ffdSJeff Kirsher struct mii_ioctl_data *data = if_mii(rq);
1357554f4ffdSJeff Kirsher
1358554f4ffdSJeff Kirsher switch(cmd) {
1359554f4ffdSJeff Kirsher case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1360554f4ffdSJeff Kirsher data->phy_id = np->phys[0] & 0x1f;
1361df561f66SGustavo A. R. Silva fallthrough;
1362554f4ffdSJeff Kirsher
1363554f4ffdSJeff Kirsher case SIOCGMIIREG: /* Read MII PHY register. */
1364554f4ffdSJeff Kirsher data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1365554f4ffdSJeff Kirsher return 0;
1366554f4ffdSJeff Kirsher
1367554f4ffdSJeff Kirsher case SIOCSMIIREG: /* Write MII PHY register. */
1368554f4ffdSJeff Kirsher if (data->phy_id == np->phys[0]) {
1369554f4ffdSJeff Kirsher u16 value = data->val_in;
1370554f4ffdSJeff Kirsher switch (data->reg_num) {
1371554f4ffdSJeff Kirsher case 0:
1372554f4ffdSJeff Kirsher /* Check for autonegotiation on or reset. */
1373554f4ffdSJeff Kirsher np->medialock = (value & 0x9000) ? 0 : 1;
1374554f4ffdSJeff Kirsher if (np->medialock)
1375554f4ffdSJeff Kirsher np->full_duplex = (value & 0x0100) ? 1 : 0;
1376554f4ffdSJeff Kirsher break;
1377554f4ffdSJeff Kirsher case 4: np->advertising = value; break;
1378554f4ffdSJeff Kirsher }
1379554f4ffdSJeff Kirsher /* Perhaps check_duplex(dev), depending on chip semantics. */
1380554f4ffdSJeff Kirsher }
1381554f4ffdSJeff Kirsher mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1382554f4ffdSJeff Kirsher return 0;
1383554f4ffdSJeff Kirsher default:
1384554f4ffdSJeff Kirsher return -EOPNOTSUPP;
1385554f4ffdSJeff Kirsher }
1386554f4ffdSJeff Kirsher }
1387554f4ffdSJeff Kirsher
1388554f4ffdSJeff Kirsher
yellowfin_remove_one(struct pci_dev * pdev)1389134c1f15SBill Pemberton static void yellowfin_remove_one(struct pci_dev *pdev)
1390554f4ffdSJeff Kirsher {
1391554f4ffdSJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev);
1392554f4ffdSJeff Kirsher struct yellowfin_private *np;
1393554f4ffdSJeff Kirsher
1394554f4ffdSJeff Kirsher BUG_ON(!dev);
1395554f4ffdSJeff Kirsher np = netdev_priv(dev);
1396554f4ffdSJeff Kirsher
139773e283dfSChristophe JAILLET dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
1398554f4ffdSJeff Kirsher np->tx_status_dma);
139973e283dfSChristophe JAILLET dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
140073e283dfSChristophe JAILLET np->rx_ring_dma);
140173e283dfSChristophe JAILLET dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
140273e283dfSChristophe JAILLET np->tx_ring_dma);
1403554f4ffdSJeff Kirsher unregister_netdev (dev);
1404554f4ffdSJeff Kirsher
1405554f4ffdSJeff Kirsher pci_iounmap(pdev, np->base);
1406554f4ffdSJeff Kirsher
1407554f4ffdSJeff Kirsher pci_release_regions (pdev);
1408554f4ffdSJeff Kirsher
1409554f4ffdSJeff Kirsher free_netdev (dev);
1410554f4ffdSJeff Kirsher }
1411554f4ffdSJeff Kirsher
1412554f4ffdSJeff Kirsher
1413554f4ffdSJeff Kirsher static struct pci_driver yellowfin_driver = {
1414554f4ffdSJeff Kirsher .name = DRV_NAME,
1415554f4ffdSJeff Kirsher .id_table = yellowfin_pci_tbl,
1416554f4ffdSJeff Kirsher .probe = yellowfin_init_one,
1417134c1f15SBill Pemberton .remove = yellowfin_remove_one,
1418554f4ffdSJeff Kirsher };
1419554f4ffdSJeff Kirsher
1420554f4ffdSJeff Kirsher
yellowfin_init(void)1421554f4ffdSJeff Kirsher static int __init yellowfin_init (void)
1422554f4ffdSJeff Kirsher {
1423554f4ffdSJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
1424554f4ffdSJeff Kirsher #ifdef MODULE
1425554f4ffdSJeff Kirsher printk(version);
1426554f4ffdSJeff Kirsher #endif
1427554f4ffdSJeff Kirsher return pci_register_driver(&yellowfin_driver);
1428554f4ffdSJeff Kirsher }
1429554f4ffdSJeff Kirsher
1430554f4ffdSJeff Kirsher
yellowfin_cleanup(void)1431554f4ffdSJeff Kirsher static void __exit yellowfin_cleanup (void)
1432554f4ffdSJeff Kirsher {
1433554f4ffdSJeff Kirsher pci_unregister_driver (&yellowfin_driver);
1434554f4ffdSJeff Kirsher }
1435554f4ffdSJeff Kirsher
1436554f4ffdSJeff Kirsher
1437554f4ffdSJeff Kirsher module_init(yellowfin_init);
1438554f4ffdSJeff Kirsher module_exit(yellowfin_cleanup);
1439