19bba23b0SJeff Kirsher /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
29bba23b0SJeff Kirsher /*
39bba23b0SJeff Kirsher Written 1998-2000 by Donald Becker.
49bba23b0SJeff Kirsher
59bba23b0SJeff Kirsher Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
69bba23b0SJeff Kirsher send all bug reports to me, and not to Donald Becker, as this code
79bba23b0SJeff Kirsher has been heavily modified from Donald's original version.
89bba23b0SJeff Kirsher
99bba23b0SJeff Kirsher This software may be used and distributed according to the terms of
109bba23b0SJeff Kirsher the GNU General Public License (GPL), incorporated herein by reference.
119bba23b0SJeff Kirsher Drivers based on or derived from this code fall under the GPL and must
129bba23b0SJeff Kirsher retain the authorship, copyright and license notice. This file is not
139bba23b0SJeff Kirsher a complete program and may only be used when the entire operating
149bba23b0SJeff Kirsher system is licensed under the GPL.
159bba23b0SJeff Kirsher
169bba23b0SJeff Kirsher The information below comes from Donald Becker's original driver:
179bba23b0SJeff Kirsher
189bba23b0SJeff Kirsher The author may be reached as becker@scyld.com, or C/O
199bba23b0SJeff Kirsher Scyld Computing Corporation
209bba23b0SJeff Kirsher 410 Severn Ave., Suite 210
219bba23b0SJeff Kirsher Annapolis MD 21403
229bba23b0SJeff Kirsher
239bba23b0SJeff Kirsher Support and updates available at
249bba23b0SJeff Kirsher http://www.scyld.com/network/starfire.html
259bba23b0SJeff Kirsher [link no longer provides useful info -jgarzik]
269bba23b0SJeff Kirsher
279bba23b0SJeff Kirsher */
289bba23b0SJeff Kirsher
299bba23b0SJeff Kirsher #define DRV_NAME "starfire"
309bba23b0SJeff Kirsher
319bba23b0SJeff Kirsher #include <linux/interrupt.h>
329bba23b0SJeff Kirsher #include <linux/module.h>
339bba23b0SJeff Kirsher #include <linux/kernel.h>
349bba23b0SJeff Kirsher #include <linux/pci.h>
359bba23b0SJeff Kirsher #include <linux/netdevice.h>
369bba23b0SJeff Kirsher #include <linux/etherdevice.h>
379bba23b0SJeff Kirsher #include <linux/init.h>
389bba23b0SJeff Kirsher #include <linux/delay.h>
399bba23b0SJeff Kirsher #include <linux/crc32.h>
409bba23b0SJeff Kirsher #include <linux/ethtool.h>
419bba23b0SJeff Kirsher #include <linux/mii.h>
429bba23b0SJeff Kirsher #include <linux/if_vlan.h>
439bba23b0SJeff Kirsher #include <linux/mm.h>
449bba23b0SJeff Kirsher #include <linux/firmware.h>
459bba23b0SJeff Kirsher #include <asm/processor.h> /* Processor type for cache alignment. */
467c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
479bba23b0SJeff Kirsher #include <asm/io.h>
489bba23b0SJeff Kirsher
499bba23b0SJeff Kirsher /*
509bba23b0SJeff Kirsher * The current frame processor firmware fails to checksum a fragment
519bba23b0SJeff Kirsher * of length 1. If and when this is fixed, the #define below can be removed.
529bba23b0SJeff Kirsher */
539bba23b0SJeff Kirsher #define HAS_BROKEN_FIRMWARE
549bba23b0SJeff Kirsher
559bba23b0SJeff Kirsher /*
569bba23b0SJeff Kirsher * If using the broken firmware, data must be padded to the next 32-bit boundary.
579bba23b0SJeff Kirsher */
589bba23b0SJeff Kirsher #ifdef HAS_BROKEN_FIRMWARE
599bba23b0SJeff Kirsher #define PADDING_MASK 3
609bba23b0SJeff Kirsher #endif
619bba23b0SJeff Kirsher
629bba23b0SJeff Kirsher /*
639bba23b0SJeff Kirsher * Define this if using the driver with the zero-copy patch
649bba23b0SJeff Kirsher */
659bba23b0SJeff Kirsher #define ZEROCOPY
669bba23b0SJeff Kirsher
675a5ab161SJavier Martinez Canillas #if IS_ENABLED(CONFIG_VLAN_8021Q)
689bba23b0SJeff Kirsher #define VLAN_SUPPORT
699bba23b0SJeff Kirsher #endif
709bba23b0SJeff Kirsher
719bba23b0SJeff Kirsher /* The user-configurable values.
729bba23b0SJeff Kirsher These may be modified when a driver module is loaded.*/
739bba23b0SJeff Kirsher
749bba23b0SJeff Kirsher /* Used for tuning interrupt latency vs. overhead. */
759bba23b0SJeff Kirsher static int intr_latency;
769bba23b0SJeff Kirsher static int small_frames;
779bba23b0SJeff Kirsher
789bba23b0SJeff Kirsher static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
799bba23b0SJeff Kirsher static int max_interrupt_work = 20;
809bba23b0SJeff Kirsher static int mtu;
819bba23b0SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
829bba23b0SJeff Kirsher The Starfire has a 512 element hash table based on the Ethernet CRC. */
839bba23b0SJeff Kirsher static const int multicast_filter_limit = 512;
849bba23b0SJeff Kirsher /* Whether to do TCP/UDP checksums in hardware */
859bba23b0SJeff Kirsher static int enable_hw_cksum = 1;
869bba23b0SJeff Kirsher
879bba23b0SJeff Kirsher #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
889bba23b0SJeff Kirsher /*
899bba23b0SJeff Kirsher * Set the copy breakpoint for the copy-only-tiny-frames scheme.
909bba23b0SJeff Kirsher * Setting to > 1518 effectively disables this feature.
919bba23b0SJeff Kirsher *
929bba23b0SJeff Kirsher * NOTE:
939bba23b0SJeff Kirsher * The ia64 doesn't allow for unaligned loads even of integers being
949bba23b0SJeff Kirsher * misaligned on a 2 byte boundary. Thus always force copying of
959bba23b0SJeff Kirsher * packets as the starfire doesn't allow for misaligned DMAs ;-(
969bba23b0SJeff Kirsher * 23/10/2000 - Jes
979bba23b0SJeff Kirsher *
989bba23b0SJeff Kirsher * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
999bba23b0SJeff Kirsher * at least, having unaligned frames leads to a rather serious performance
1009bba23b0SJeff Kirsher * penalty. -Ion
1019bba23b0SJeff Kirsher */
1029bba23b0SJeff Kirsher #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
1039bba23b0SJeff Kirsher static int rx_copybreak = PKT_BUF_SZ;
1049bba23b0SJeff Kirsher #else
1059bba23b0SJeff Kirsher static int rx_copybreak /* = 0 */;
1069bba23b0SJeff Kirsher #endif
1079bba23b0SJeff Kirsher
1089bba23b0SJeff Kirsher /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
1099bba23b0SJeff Kirsher #ifdef __sparc__
1109bba23b0SJeff Kirsher #define DMA_BURST_SIZE 64
1119bba23b0SJeff Kirsher #else
1129bba23b0SJeff Kirsher #define DMA_BURST_SIZE 128
1139bba23b0SJeff Kirsher #endif
1149bba23b0SJeff Kirsher
1159bba23b0SJeff Kirsher /* Operational parameters that are set at compile time. */
1169bba23b0SJeff Kirsher
1179bba23b0SJeff Kirsher /* The "native" ring sizes are either 256 or 2048.
1189bba23b0SJeff Kirsher However in some modes a descriptor may be marked to wrap the ring earlier.
1199bba23b0SJeff Kirsher */
1209bba23b0SJeff Kirsher #define RX_RING_SIZE 256
1219bba23b0SJeff Kirsher #define TX_RING_SIZE 32
1229bba23b0SJeff Kirsher /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
1239bba23b0SJeff Kirsher #define DONE_Q_SIZE 1024
1249bba23b0SJeff Kirsher /* All queues must be aligned on a 256-byte boundary */
1259bba23b0SJeff Kirsher #define QUEUE_ALIGN 256
1269bba23b0SJeff Kirsher
1279bba23b0SJeff Kirsher #if RX_RING_SIZE > 256
1289bba23b0SJeff Kirsher #define RX_Q_ENTRIES Rx2048QEntries
1299bba23b0SJeff Kirsher #else
1309bba23b0SJeff Kirsher #define RX_Q_ENTRIES Rx256QEntries
1319bba23b0SJeff Kirsher #endif
1329bba23b0SJeff Kirsher
1339bba23b0SJeff Kirsher /* Operational parameters that usually are not changed. */
1349bba23b0SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */
1359bba23b0SJeff Kirsher #define TX_TIMEOUT (2 * HZ)
1369bba23b0SJeff Kirsher
1379bba23b0SJeff Kirsher #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1389bba23b0SJeff Kirsher /* 64-bit dma_addr_t */
1399bba23b0SJeff Kirsher #define ADDR_64BITS /* This chip uses 64 bit addresses. */
1409bba23b0SJeff Kirsher #define netdrv_addr_t __le64
1419bba23b0SJeff Kirsher #define cpu_to_dma(x) cpu_to_le64(x)
1429bba23b0SJeff Kirsher #define dma_to_cpu(x) le64_to_cpu(x)
1439bba23b0SJeff Kirsher #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
1449bba23b0SJeff Kirsher #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
1459bba23b0SJeff Kirsher #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
1469bba23b0SJeff Kirsher #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
1479bba23b0SJeff Kirsher #define RX_DESC_ADDR_SIZE RxDescAddr64bit
1489bba23b0SJeff Kirsher #else /* 32-bit dma_addr_t */
1499bba23b0SJeff Kirsher #define netdrv_addr_t __le32
1509bba23b0SJeff Kirsher #define cpu_to_dma(x) cpu_to_le32(x)
1519bba23b0SJeff Kirsher #define dma_to_cpu(x) le32_to_cpu(x)
1529bba23b0SJeff Kirsher #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
1539bba23b0SJeff Kirsher #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
1549bba23b0SJeff Kirsher #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
1559bba23b0SJeff Kirsher #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
1569bba23b0SJeff Kirsher #define RX_DESC_ADDR_SIZE RxDescAddr32bit
1579bba23b0SJeff Kirsher #endif
1589bba23b0SJeff Kirsher
1599bba23b0SJeff Kirsher #define skb_first_frag_len(skb) skb_headlen(skb)
1609bba23b0SJeff Kirsher #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
1619bba23b0SJeff Kirsher
1629bba23b0SJeff Kirsher /* Firmware names */
1639bba23b0SJeff Kirsher #define FIRMWARE_RX "adaptec/starfire_rx.bin"
1649bba23b0SJeff Kirsher #define FIRMWARE_TX "adaptec/starfire_tx.bin"
1659bba23b0SJeff Kirsher
1669bba23b0SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
1679bba23b0SJeff Kirsher MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
1689bba23b0SJeff Kirsher MODULE_LICENSE("GPL");
1699bba23b0SJeff Kirsher MODULE_FIRMWARE(FIRMWARE_RX);
1709bba23b0SJeff Kirsher MODULE_FIRMWARE(FIRMWARE_TX);
1719bba23b0SJeff Kirsher
1729bba23b0SJeff Kirsher module_param(max_interrupt_work, int, 0);
1739bba23b0SJeff Kirsher module_param(mtu, int, 0);
1749bba23b0SJeff Kirsher module_param(debug, int, 0);
1759bba23b0SJeff Kirsher module_param(rx_copybreak, int, 0);
1769bba23b0SJeff Kirsher module_param(intr_latency, int, 0);
1779bba23b0SJeff Kirsher module_param(small_frames, int, 0);
1789bba23b0SJeff Kirsher module_param(enable_hw_cksum, int, 0);
1799bba23b0SJeff Kirsher MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
1809bba23b0SJeff Kirsher MODULE_PARM_DESC(mtu, "MTU (all boards)");
1819bba23b0SJeff Kirsher MODULE_PARM_DESC(debug, "Debug level (0-6)");
1829bba23b0SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
1839bba23b0SJeff Kirsher MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
1849bba23b0SJeff Kirsher MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
1859bba23b0SJeff Kirsher MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
1869bba23b0SJeff Kirsher
1879bba23b0SJeff Kirsher /*
1889bba23b0SJeff Kirsher Theory of Operation
1899bba23b0SJeff Kirsher
1909bba23b0SJeff Kirsher I. Board Compatibility
1919bba23b0SJeff Kirsher
1929bba23b0SJeff Kirsher This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
1939bba23b0SJeff Kirsher
1949bba23b0SJeff Kirsher II. Board-specific settings
1959bba23b0SJeff Kirsher
1969bba23b0SJeff Kirsher III. Driver operation
1979bba23b0SJeff Kirsher
1989bba23b0SJeff Kirsher IIIa. Ring buffers
1999bba23b0SJeff Kirsher
2009bba23b0SJeff Kirsher The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
2019bba23b0SJeff Kirsher ring sizes are set fixed by the hardware, but may optionally be wrapped
2029bba23b0SJeff Kirsher earlier by the END bit in the descriptor.
2039bba23b0SJeff Kirsher This driver uses that hardware queue size for the Rx ring, where a large
2049bba23b0SJeff Kirsher number of entries has no ill effect beyond increases the potential backlog.
2059bba23b0SJeff Kirsher The Tx ring is wrapped with the END bit, since a large hardware Tx queue
2069bba23b0SJeff Kirsher disables the queue layer priority ordering and we have no mechanism to
2079bba23b0SJeff Kirsher utilize the hardware two-level priority queue. When modifying the
2089bba23b0SJeff Kirsher RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
2099bba23b0SJeff Kirsher levels.
2109bba23b0SJeff Kirsher
2119bba23b0SJeff Kirsher IIIb/c. Transmit/Receive Structure
2129bba23b0SJeff Kirsher
2139bba23b0SJeff Kirsher See the Adaptec manual for the many possible structures, and options for
2149bba23b0SJeff Kirsher each structure. There are far too many to document all of them here.
2159bba23b0SJeff Kirsher
2169bba23b0SJeff Kirsher For transmit this driver uses type 0/1 transmit descriptors (depending
2179bba23b0SJeff Kirsher on the 32/64 bitness of the architecture), and relies on automatic
2189bba23b0SJeff Kirsher minimum-length padding. It does not use the completion queue
2199bba23b0SJeff Kirsher consumer index, but instead checks for non-zero status entries.
2209bba23b0SJeff Kirsher
2219bba23b0SJeff Kirsher For receive this driver uses type 2/3 receive descriptors. The driver
2229bba23b0SJeff Kirsher allocates full frame size skbuffs for the Rx ring buffers, so all frames
2239bba23b0SJeff Kirsher should fit in a single descriptor. The driver does not use the completion
2249bba23b0SJeff Kirsher queue consumer index, but instead checks for non-zero status entries.
2259bba23b0SJeff Kirsher
2269bba23b0SJeff Kirsher When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
2279bba23b0SJeff Kirsher is allocated and the frame is copied to the new skbuff. When the incoming
2289bba23b0SJeff Kirsher frame is larger, the skbuff is passed directly up the protocol stack.
2299bba23b0SJeff Kirsher Buffers consumed this way are replaced by newly allocated skbuffs in a later
2309bba23b0SJeff Kirsher phase of receive.
2319bba23b0SJeff Kirsher
2329bba23b0SJeff Kirsher A notable aspect of operation is that unaligned buffers are not permitted by
2339bba23b0SJeff Kirsher the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
2349bba23b0SJeff Kirsher isn't longword aligned, which may cause problems on some machine
2359bba23b0SJeff Kirsher e.g. Alphas and IA64. For these architectures, the driver is forced to copy
2369bba23b0SJeff Kirsher the frame into a new skbuff unconditionally. Copied frames are put into the
2379bba23b0SJeff Kirsher skbuff at an offset of "+2", thus 16-byte aligning the IP header.
2389bba23b0SJeff Kirsher
2399bba23b0SJeff Kirsher IIId. Synchronization
2409bba23b0SJeff Kirsher
2419bba23b0SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One
2429bba23b0SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the
2439bba23b0SJeff Kirsher dev->tbusy flag. The other thread is the interrupt handler, which is single
2449bba23b0SJeff Kirsher threaded by the hardware and interrupt handling software.
2459bba23b0SJeff Kirsher
2469bba23b0SJeff Kirsher The send packet thread has partial control over the Tx ring and the netif_queue
2479bba23b0SJeff Kirsher status. If the number of free Tx slots in the ring falls below a certain number
2489bba23b0SJeff Kirsher (currently hardcoded to 4), it signals the upper layer to stop the queue.
2499bba23b0SJeff Kirsher
2509bba23b0SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats
2519bba23b0SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as
2529bba23b0SJeff Kirsher empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
2539bba23b0SJeff Kirsher number of free Tx slow is above the threshold, it signals the upper layer to
2549bba23b0SJeff Kirsher restart the queue.
2559bba23b0SJeff Kirsher
2569bba23b0SJeff Kirsher IV. Notes
2579bba23b0SJeff Kirsher
2589bba23b0SJeff Kirsher IVb. References
2599bba23b0SJeff Kirsher
2609bba23b0SJeff Kirsher The Adaptec Starfire manuals, available only from Adaptec.
2619bba23b0SJeff Kirsher http://www.scyld.com/expert/100mbps.html
2629bba23b0SJeff Kirsher http://www.scyld.com/expert/NWay.html
2639bba23b0SJeff Kirsher
2649bba23b0SJeff Kirsher IVc. Errata
2659bba23b0SJeff Kirsher
2669bba23b0SJeff Kirsher - StopOnPerr is broken, don't enable
2679bba23b0SJeff Kirsher - Hardware ethernet padding exposes random data, perform software padding
2689bba23b0SJeff Kirsher instead (unverified -- works correctly for all the hardware I have)
2699bba23b0SJeff Kirsher
2709bba23b0SJeff Kirsher */
2719bba23b0SJeff Kirsher
2729bba23b0SJeff Kirsher
2739bba23b0SJeff Kirsher
2749bba23b0SJeff Kirsher enum chip_capability_flags {CanHaveMII=1, };
2759bba23b0SJeff Kirsher
2769bba23b0SJeff Kirsher enum chipset {
2779bba23b0SJeff Kirsher CH_6915 = 0,
2789bba23b0SJeff Kirsher };
2799bba23b0SJeff Kirsher
2809baa3c34SBenoit Taine static const struct pci_device_id starfire_pci_tbl[] = {
2819bba23b0SJeff Kirsher { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
2829bba23b0SJeff Kirsher { 0, }
2839bba23b0SJeff Kirsher };
2849bba23b0SJeff Kirsher MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
2859bba23b0SJeff Kirsher
2869bba23b0SJeff Kirsher /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
2879bba23b0SJeff Kirsher static const struct chip_info {
2889bba23b0SJeff Kirsher const char *name;
2899bba23b0SJeff Kirsher int drv_flags;
290d3ace588SBill Pemberton } netdrv_tbl[] = {
2919bba23b0SJeff Kirsher { "Adaptec Starfire 6915", CanHaveMII },
2929bba23b0SJeff Kirsher };
2939bba23b0SJeff Kirsher
2949bba23b0SJeff Kirsher
2959bba23b0SJeff Kirsher /* Offsets to the device registers.
2969bba23b0SJeff Kirsher Unlike software-only systems, device drivers interact with complex hardware.
2979bba23b0SJeff Kirsher It's not useful to define symbolic names for every register bit in the
2989bba23b0SJeff Kirsher device. The name can only partially document the semantics and make
2999bba23b0SJeff Kirsher the driver longer and more difficult to read.
3009bba23b0SJeff Kirsher In general, only the important configuration values or bits changed
3019bba23b0SJeff Kirsher multiple times should be defined symbolically.
3029bba23b0SJeff Kirsher */
3039bba23b0SJeff Kirsher enum register_offsets {
3049bba23b0SJeff Kirsher PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
3059bba23b0SJeff Kirsher IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
3069bba23b0SJeff Kirsher MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
3079bba23b0SJeff Kirsher GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
3089bba23b0SJeff Kirsher TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
3099bba23b0SJeff Kirsher TxRingHiAddr=0x5009C, /* 64 bit address extension. */
3109bba23b0SJeff Kirsher TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
3119bba23b0SJeff Kirsher TxThreshold=0x500B0,
3129bba23b0SJeff Kirsher CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
3139bba23b0SJeff Kirsher RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
3149bba23b0SJeff Kirsher CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
3159bba23b0SJeff Kirsher RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
3169bba23b0SJeff Kirsher RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
3179bba23b0SJeff Kirsher TxMode=0x55000, VlanType=0x55064,
3189bba23b0SJeff Kirsher PerfFilterTable=0x56000, HashTable=0x56100,
3199bba23b0SJeff Kirsher TxGfpMem=0x58000, RxGfpMem=0x5a000,
3209bba23b0SJeff Kirsher };
3219bba23b0SJeff Kirsher
3229bba23b0SJeff Kirsher /*
3239bba23b0SJeff Kirsher * Bits in the interrupt status/mask registers.
3249bba23b0SJeff Kirsher * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
3259bba23b0SJeff Kirsher * enables all the interrupt sources that are or'ed into those status bits.
3269bba23b0SJeff Kirsher */
3279bba23b0SJeff Kirsher enum intr_status_bits {
3289bba23b0SJeff Kirsher IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
3299bba23b0SJeff Kirsher IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
3309bba23b0SJeff Kirsher IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
3319bba23b0SJeff Kirsher IntrTxComplQLow=0x200000, IntrPCI=0x100000,
3329bba23b0SJeff Kirsher IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
3339bba23b0SJeff Kirsher IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
3349bba23b0SJeff Kirsher IntrNormalSummary=0x8000, IntrTxDone=0x4000,
3359bba23b0SJeff Kirsher IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
3369bba23b0SJeff Kirsher IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
3379bba23b0SJeff Kirsher IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
3389bba23b0SJeff Kirsher IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
3399bba23b0SJeff Kirsher IntrNoTxCsum=0x20, IntrTxBadID=0x10,
3409bba23b0SJeff Kirsher IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
3419bba23b0SJeff Kirsher IntrTxGfp=0x02, IntrPCIPad=0x01,
3429bba23b0SJeff Kirsher /* not quite bits */
3439bba23b0SJeff Kirsher IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
3449bba23b0SJeff Kirsher IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
3459bba23b0SJeff Kirsher IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
3469bba23b0SJeff Kirsher };
3479bba23b0SJeff Kirsher
3489bba23b0SJeff Kirsher /* Bits in the RxFilterMode register. */
3499bba23b0SJeff Kirsher enum rx_mode_bits {
3509bba23b0SJeff Kirsher AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
3519bba23b0SJeff Kirsher AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
3529bba23b0SJeff Kirsher PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
3539bba23b0SJeff Kirsher WakeupOnGFP=0x0800,
3549bba23b0SJeff Kirsher };
3559bba23b0SJeff Kirsher
3569bba23b0SJeff Kirsher /* Bits in the TxMode register */
3579bba23b0SJeff Kirsher enum tx_mode_bits {
3589bba23b0SJeff Kirsher MiiSoftReset=0x8000, MIILoopback=0x4000,
3599bba23b0SJeff Kirsher TxFlowEnable=0x0800, RxFlowEnable=0x0400,
3609bba23b0SJeff Kirsher PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
3619bba23b0SJeff Kirsher };
3629bba23b0SJeff Kirsher
3639bba23b0SJeff Kirsher /* Bits in the TxDescCtrl register. */
3649bba23b0SJeff Kirsher enum tx_ctrl_bits {
3659bba23b0SJeff Kirsher TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
3669bba23b0SJeff Kirsher TxDescSpace128=0x30, TxDescSpace256=0x40,
3679bba23b0SJeff Kirsher TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
3689bba23b0SJeff Kirsher TxDescType3=0x03, TxDescType4=0x04,
3699bba23b0SJeff Kirsher TxNoDMACompletion=0x08,
3709bba23b0SJeff Kirsher TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
3719bba23b0SJeff Kirsher TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
3729bba23b0SJeff Kirsher TxDMABurstSizeShift=8,
3739bba23b0SJeff Kirsher };
3749bba23b0SJeff Kirsher
3759bba23b0SJeff Kirsher /* Bits in the RxDescQCtrl register. */
3769bba23b0SJeff Kirsher enum rx_ctrl_bits {
3779bba23b0SJeff Kirsher RxBufferLenShift=16, RxMinDescrThreshShift=0,
3789bba23b0SJeff Kirsher RxPrefetchMode=0x8000, RxVariableQ=0x2000,
3799bba23b0SJeff Kirsher Rx2048QEntries=0x4000, Rx256QEntries=0,
3809bba23b0SJeff Kirsher RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
3819bba23b0SJeff Kirsher RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
3829bba23b0SJeff Kirsher RxDescSpace4=0x000, RxDescSpace8=0x100,
3839bba23b0SJeff Kirsher RxDescSpace16=0x200, RxDescSpace32=0x300,
3849bba23b0SJeff Kirsher RxDescSpace64=0x400, RxDescSpace128=0x500,
3859bba23b0SJeff Kirsher RxConsumerWrEn=0x80,
3869bba23b0SJeff Kirsher };
3879bba23b0SJeff Kirsher
3889bba23b0SJeff Kirsher /* Bits in the RxDMACtrl register. */
3899bba23b0SJeff Kirsher enum rx_dmactrl_bits {
3909bba23b0SJeff Kirsher RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
3919bba23b0SJeff Kirsher RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
3929bba23b0SJeff Kirsher RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
3939bba23b0SJeff Kirsher RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
3949bba23b0SJeff Kirsher RxChecksumRejectTCPOnly=0x01000000,
3959bba23b0SJeff Kirsher RxCompletionQ2Enable=0x800000,
3969bba23b0SJeff Kirsher RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
3979bba23b0SJeff Kirsher RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
3989bba23b0SJeff Kirsher RxDMAQ2NonIP=0x400000,
3999bba23b0SJeff Kirsher RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
4009bba23b0SJeff Kirsher RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
4019bba23b0SJeff Kirsher RxBurstSizeShift=0,
4029bba23b0SJeff Kirsher };
4039bba23b0SJeff Kirsher
4049bba23b0SJeff Kirsher /* Bits in the RxCompletionAddr register */
4059bba23b0SJeff Kirsher enum rx_compl_bits {
4069bba23b0SJeff Kirsher RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
4079bba23b0SJeff Kirsher RxComplProducerWrEn=0x40,
4089bba23b0SJeff Kirsher RxComplType0=0x00, RxComplType1=0x10,
4099bba23b0SJeff Kirsher RxComplType2=0x20, RxComplType3=0x30,
4109bba23b0SJeff Kirsher RxComplThreshShift=0,
4119bba23b0SJeff Kirsher };
4129bba23b0SJeff Kirsher
4139bba23b0SJeff Kirsher /* Bits in the TxCompletionAddr register */
4149bba23b0SJeff Kirsher enum tx_compl_bits {
4159bba23b0SJeff Kirsher TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
4169bba23b0SJeff Kirsher TxComplProducerWrEn=0x40,
4179bba23b0SJeff Kirsher TxComplIntrStatus=0x20,
4189bba23b0SJeff Kirsher CommonQueueMode=0x10,
4199bba23b0SJeff Kirsher TxComplThreshShift=0,
4209bba23b0SJeff Kirsher };
4219bba23b0SJeff Kirsher
4229bba23b0SJeff Kirsher /* Bits in the GenCtrl register */
4239bba23b0SJeff Kirsher enum gen_ctrl_bits {
4249bba23b0SJeff Kirsher RxEnable=0x05, TxEnable=0x0a,
4259bba23b0SJeff Kirsher RxGFPEnable=0x10, TxGFPEnable=0x20,
4269bba23b0SJeff Kirsher };
4279bba23b0SJeff Kirsher
4289bba23b0SJeff Kirsher /* Bits in the IntrTimerCtrl register */
4299bba23b0SJeff Kirsher enum intr_ctrl_bits {
4309bba23b0SJeff Kirsher Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
4319bba23b0SJeff Kirsher SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
4329bba23b0SJeff Kirsher IntrLatencyMask=0x1f,
4339bba23b0SJeff Kirsher };
4349bba23b0SJeff Kirsher
4359bba23b0SJeff Kirsher /* The Rx and Tx buffer descriptors. */
4369bba23b0SJeff Kirsher struct starfire_rx_desc {
4379bba23b0SJeff Kirsher netdrv_addr_t rxaddr;
4389bba23b0SJeff Kirsher };
4399bba23b0SJeff Kirsher enum rx_desc_bits {
4409bba23b0SJeff Kirsher RxDescValid=1, RxDescEndRing=2,
4419bba23b0SJeff Kirsher };
4429bba23b0SJeff Kirsher
4439bba23b0SJeff Kirsher /* Completion queue entry. */
4449bba23b0SJeff Kirsher struct short_rx_done_desc {
4459bba23b0SJeff Kirsher __le32 status; /* Low 16 bits is length. */
4469bba23b0SJeff Kirsher };
4479bba23b0SJeff Kirsher struct basic_rx_done_desc {
4489bba23b0SJeff Kirsher __le32 status; /* Low 16 bits is length. */
4499bba23b0SJeff Kirsher __le16 vlanid;
4509bba23b0SJeff Kirsher __le16 status2;
4519bba23b0SJeff Kirsher };
4529bba23b0SJeff Kirsher struct csum_rx_done_desc {
4539bba23b0SJeff Kirsher __le32 status; /* Low 16 bits is length. */
4549bba23b0SJeff Kirsher __le16 csum; /* Partial checksum */
4559bba23b0SJeff Kirsher __le16 status2;
4569bba23b0SJeff Kirsher };
4579bba23b0SJeff Kirsher struct full_rx_done_desc {
4589bba23b0SJeff Kirsher __le32 status; /* Low 16 bits is length. */
4599bba23b0SJeff Kirsher __le16 status3;
4609bba23b0SJeff Kirsher __le16 status2;
4619bba23b0SJeff Kirsher __le16 vlanid;
4629bba23b0SJeff Kirsher __le16 csum; /* partial checksum */
4639bba23b0SJeff Kirsher __le32 timestamp;
4649bba23b0SJeff Kirsher };
4659bba23b0SJeff Kirsher /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
4669bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
4679bba23b0SJeff Kirsher typedef struct full_rx_done_desc rx_done_desc;
4689bba23b0SJeff Kirsher #define RxComplType RxComplType3
4699bba23b0SJeff Kirsher #else /* not VLAN_SUPPORT */
4709bba23b0SJeff Kirsher typedef struct csum_rx_done_desc rx_done_desc;
4719bba23b0SJeff Kirsher #define RxComplType RxComplType2
4729bba23b0SJeff Kirsher #endif /* not VLAN_SUPPORT */
4739bba23b0SJeff Kirsher
4749bba23b0SJeff Kirsher enum rx_done_bits {
4759bba23b0SJeff Kirsher RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
4769bba23b0SJeff Kirsher };
4779bba23b0SJeff Kirsher
4789bba23b0SJeff Kirsher /* Type 1 Tx descriptor. */
4799bba23b0SJeff Kirsher struct starfire_tx_desc_1 {
4809bba23b0SJeff Kirsher __le32 status; /* Upper bits are status, lower 16 length. */
4819bba23b0SJeff Kirsher __le32 addr;
4829bba23b0SJeff Kirsher };
4839bba23b0SJeff Kirsher
4849bba23b0SJeff Kirsher /* Type 2 Tx descriptor. */
4859bba23b0SJeff Kirsher struct starfire_tx_desc_2 {
4869bba23b0SJeff Kirsher __le32 status; /* Upper bits are status, lower 16 length. */
4879bba23b0SJeff Kirsher __le32 reserved;
4889bba23b0SJeff Kirsher __le64 addr;
4899bba23b0SJeff Kirsher };
4909bba23b0SJeff Kirsher
4919bba23b0SJeff Kirsher #ifdef ADDR_64BITS
4929bba23b0SJeff Kirsher typedef struct starfire_tx_desc_2 starfire_tx_desc;
4939bba23b0SJeff Kirsher #define TX_DESC_TYPE TxDescType2
4949bba23b0SJeff Kirsher #else /* not ADDR_64BITS */
4959bba23b0SJeff Kirsher typedef struct starfire_tx_desc_1 starfire_tx_desc;
4969bba23b0SJeff Kirsher #define TX_DESC_TYPE TxDescType1
4979bba23b0SJeff Kirsher #endif /* not ADDR_64BITS */
4989bba23b0SJeff Kirsher #define TX_DESC_SPACING TxDescSpaceUnlim
4999bba23b0SJeff Kirsher
5009bba23b0SJeff Kirsher enum tx_desc_bits {
5019bba23b0SJeff Kirsher TxDescID=0xB0000000,
5029bba23b0SJeff Kirsher TxCRCEn=0x01000000, TxDescIntr=0x08000000,
5039bba23b0SJeff Kirsher TxRingWrap=0x04000000, TxCalTCP=0x02000000,
5049bba23b0SJeff Kirsher };
5059bba23b0SJeff Kirsher struct tx_done_desc {
5069bba23b0SJeff Kirsher __le32 status; /* timestamp, index. */
5079bba23b0SJeff Kirsher #if 0
5089bba23b0SJeff Kirsher __le32 intrstatus; /* interrupt status */
5099bba23b0SJeff Kirsher #endif
5109bba23b0SJeff Kirsher };
5119bba23b0SJeff Kirsher
5129bba23b0SJeff Kirsher struct rx_ring_info {
5139bba23b0SJeff Kirsher struct sk_buff *skb;
5149bba23b0SJeff Kirsher dma_addr_t mapping;
5159bba23b0SJeff Kirsher };
5169bba23b0SJeff Kirsher struct tx_ring_info {
5179bba23b0SJeff Kirsher struct sk_buff *skb;
5189bba23b0SJeff Kirsher dma_addr_t mapping;
5199bba23b0SJeff Kirsher unsigned int used_slots;
5209bba23b0SJeff Kirsher };
5219bba23b0SJeff Kirsher
5229bba23b0SJeff Kirsher #define PHY_CNT 2
5239bba23b0SJeff Kirsher struct netdev_private {
5249bba23b0SJeff Kirsher /* Descriptor rings first for alignment. */
5259bba23b0SJeff Kirsher struct starfire_rx_desc *rx_ring;
5269bba23b0SJeff Kirsher starfire_tx_desc *tx_ring;
5279bba23b0SJeff Kirsher dma_addr_t rx_ring_dma;
5289bba23b0SJeff Kirsher dma_addr_t tx_ring_dma;
5299bba23b0SJeff Kirsher /* The addresses of rx/tx-in-place skbuffs. */
5309bba23b0SJeff Kirsher struct rx_ring_info rx_info[RX_RING_SIZE];
5319bba23b0SJeff Kirsher struct tx_ring_info tx_info[TX_RING_SIZE];
5329bba23b0SJeff Kirsher /* Pointers to completion queues (full pages). */
5339bba23b0SJeff Kirsher rx_done_desc *rx_done_q;
5349bba23b0SJeff Kirsher dma_addr_t rx_done_q_dma;
5359bba23b0SJeff Kirsher unsigned int rx_done;
5369bba23b0SJeff Kirsher struct tx_done_desc *tx_done_q;
5379bba23b0SJeff Kirsher dma_addr_t tx_done_q_dma;
5389bba23b0SJeff Kirsher unsigned int tx_done;
5399bba23b0SJeff Kirsher struct napi_struct napi;
5409bba23b0SJeff Kirsher struct net_device *dev;
5419bba23b0SJeff Kirsher struct pci_dev *pci_dev;
5429bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
5439bba23b0SJeff Kirsher unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
5449bba23b0SJeff Kirsher #endif
5459bba23b0SJeff Kirsher void *queue_mem;
5469bba23b0SJeff Kirsher dma_addr_t queue_mem_dma;
5479bba23b0SJeff Kirsher size_t queue_mem_size;
5489bba23b0SJeff Kirsher
5499bba23b0SJeff Kirsher /* Frequently used values: keep some adjacent for cache effect. */
5509bba23b0SJeff Kirsher spinlock_t lock;
5519bba23b0SJeff Kirsher unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
5529bba23b0SJeff Kirsher unsigned int cur_tx, dirty_tx, reap_tx;
5539bba23b0SJeff Kirsher unsigned int rx_buf_sz; /* Based on MTU+slack. */
5549bba23b0SJeff Kirsher /* These values keep track of the transceiver/media in use. */
5559bba23b0SJeff Kirsher int speed100; /* Set if speed == 100MBit. */
5569bba23b0SJeff Kirsher u32 tx_mode;
5579bba23b0SJeff Kirsher u32 intr_timer_ctrl;
5589bba23b0SJeff Kirsher u8 tx_threshold;
5599bba23b0SJeff Kirsher /* MII transceiver section. */
5609bba23b0SJeff Kirsher struct mii_if_info mii_if; /* MII lib hooks/info */
5619bba23b0SJeff Kirsher int phy_cnt; /* MII device addresses. */
5629bba23b0SJeff Kirsher unsigned char phys[PHY_CNT]; /* MII device addresses. */
5639bba23b0SJeff Kirsher void __iomem *base;
5649bba23b0SJeff Kirsher };
5659bba23b0SJeff Kirsher
5669bba23b0SJeff Kirsher
5679bba23b0SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int location);
5689bba23b0SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
5699bba23b0SJeff Kirsher static int netdev_open(struct net_device *dev);
5709bba23b0SJeff Kirsher static void check_duplex(struct net_device *dev);
5710290bd29SMichael S. Tsirkin static void tx_timeout(struct net_device *dev, unsigned int txqueue);
5729bba23b0SJeff Kirsher static void init_ring(struct net_device *dev);
5739bba23b0SJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
5749bba23b0SJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance);
5759bba23b0SJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status);
5769bba23b0SJeff Kirsher static int __netdev_rx(struct net_device *dev, int *quota);
5779bba23b0SJeff Kirsher static int netdev_poll(struct napi_struct *napi, int budget);
5789bba23b0SJeff Kirsher static void refill_rx_ring(struct net_device *dev);
5799bba23b0SJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status);
5809bba23b0SJeff Kirsher static void set_rx_mode(struct net_device *dev);
5819bba23b0SJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev);
5829bba23b0SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
5839bba23b0SJeff Kirsher static int netdev_close(struct net_device *dev);
5849bba23b0SJeff Kirsher static void netdev_media_change(struct net_device *dev);
5859bba23b0SJeff Kirsher static const struct ethtool_ops ethtool_ops;
5869bba23b0SJeff Kirsher
5879bba23b0SJeff Kirsher
5889bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
netdev_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)58980d5c368SPatrick McHardy static int netdev_vlan_rx_add_vid(struct net_device *dev,
59080d5c368SPatrick McHardy __be16 proto, u16 vid)
5919bba23b0SJeff Kirsher {
5929bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
5939bba23b0SJeff Kirsher
5949bba23b0SJeff Kirsher spin_lock(&np->lock);
5959bba23b0SJeff Kirsher if (debug > 1)
5969bba23b0SJeff Kirsher printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
5979bba23b0SJeff Kirsher set_bit(vid, np->active_vlans);
5989bba23b0SJeff Kirsher set_rx_mode(dev);
5999bba23b0SJeff Kirsher spin_unlock(&np->lock);
6008e586137SJiri Pirko
6018e586137SJiri Pirko return 0;
6029bba23b0SJeff Kirsher }
6039bba23b0SJeff Kirsher
netdev_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)60480d5c368SPatrick McHardy static int netdev_vlan_rx_kill_vid(struct net_device *dev,
60580d5c368SPatrick McHardy __be16 proto, u16 vid)
6069bba23b0SJeff Kirsher {
6079bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
6089bba23b0SJeff Kirsher
6099bba23b0SJeff Kirsher spin_lock(&np->lock);
6109bba23b0SJeff Kirsher if (debug > 1)
6119bba23b0SJeff Kirsher printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
6129bba23b0SJeff Kirsher clear_bit(vid, np->active_vlans);
6139bba23b0SJeff Kirsher set_rx_mode(dev);
6149bba23b0SJeff Kirsher spin_unlock(&np->lock);
6158e586137SJiri Pirko
6168e586137SJiri Pirko return 0;
6179bba23b0SJeff Kirsher }
6189bba23b0SJeff Kirsher #endif /* VLAN_SUPPORT */
6199bba23b0SJeff Kirsher
6209bba23b0SJeff Kirsher
6219bba23b0SJeff Kirsher static const struct net_device_ops netdev_ops = {
6229bba23b0SJeff Kirsher .ndo_open = netdev_open,
6239bba23b0SJeff Kirsher .ndo_stop = netdev_close,
6249bba23b0SJeff Kirsher .ndo_start_xmit = start_tx,
6259bba23b0SJeff Kirsher .ndo_tx_timeout = tx_timeout,
6269bba23b0SJeff Kirsher .ndo_get_stats = get_stats,
627afc4b13dSJiri Pirko .ndo_set_rx_mode = set_rx_mode,
628a7605370SArnd Bergmann .ndo_eth_ioctl = netdev_ioctl,
6299bba23b0SJeff Kirsher .ndo_set_mac_address = eth_mac_addr,
6309bba23b0SJeff Kirsher .ndo_validate_addr = eth_validate_addr,
6319bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
6329bba23b0SJeff Kirsher .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
6339bba23b0SJeff Kirsher .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
6349bba23b0SJeff Kirsher #endif
6359bba23b0SJeff Kirsher };
6369bba23b0SJeff Kirsher
starfire_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)637d3ace588SBill Pemberton static int starfire_init_one(struct pci_dev *pdev,
6389bba23b0SJeff Kirsher const struct pci_device_id *ent)
6399bba23b0SJeff Kirsher {
6402d5fb628SFrancois Romieu struct device *d = &pdev->dev;
6419bba23b0SJeff Kirsher struct netdev_private *np;
6422d5fb628SFrancois Romieu int i, irq, chip_idx = ent->driver_data;
6439bba23b0SJeff Kirsher struct net_device *dev;
6448ec53ed9SJakub Kicinski u8 addr[ETH_ALEN];
6459bba23b0SJeff Kirsher long ioaddr;
6469bba23b0SJeff Kirsher void __iomem *base;
6479bba23b0SJeff Kirsher int drv_flags, io_size;
6489bba23b0SJeff Kirsher int boguscnt;
6499bba23b0SJeff Kirsher
6509bba23b0SJeff Kirsher if (pci_enable_device (pdev))
6519bba23b0SJeff Kirsher return -EIO;
6529bba23b0SJeff Kirsher
6539bba23b0SJeff Kirsher ioaddr = pci_resource_start(pdev, 0);
6549bba23b0SJeff Kirsher io_size = pci_resource_len(pdev, 0);
6559bba23b0SJeff Kirsher if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
6562d5fb628SFrancois Romieu dev_err(d, "no PCI MEM resources, aborting\n");
6579bba23b0SJeff Kirsher return -ENODEV;
6589bba23b0SJeff Kirsher }
6599bba23b0SJeff Kirsher
6609bba23b0SJeff Kirsher dev = alloc_etherdev(sizeof(*np));
66141de8d4cSJoe Perches if (!dev)
6629bba23b0SJeff Kirsher return -ENOMEM;
66341de8d4cSJoe Perches
6649bba23b0SJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev);
6659bba23b0SJeff Kirsher
6669bba23b0SJeff Kirsher irq = pdev->irq;
6679bba23b0SJeff Kirsher
6689bba23b0SJeff Kirsher if (pci_request_regions (pdev, DRV_NAME)) {
6692d5fb628SFrancois Romieu dev_err(d, "cannot reserve PCI resources, aborting\n");
6709bba23b0SJeff Kirsher goto err_out_free_netdev;
6719bba23b0SJeff Kirsher }
6729bba23b0SJeff Kirsher
6739bba23b0SJeff Kirsher base = ioremap(ioaddr, io_size);
6749bba23b0SJeff Kirsher if (!base) {
6752d5fb628SFrancois Romieu dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
6762d5fb628SFrancois Romieu io_size, ioaddr);
6779bba23b0SJeff Kirsher goto err_out_free_res;
6789bba23b0SJeff Kirsher }
6799bba23b0SJeff Kirsher
6809bba23b0SJeff Kirsher pci_set_master(pdev);
6819bba23b0SJeff Kirsher
6829bba23b0SJeff Kirsher /* enable MWI -- it vastly improves Rx performance on sparc64 */
6839bba23b0SJeff Kirsher pci_try_set_mwi(pdev);
6849bba23b0SJeff Kirsher
6859bba23b0SJeff Kirsher #ifdef ZEROCOPY
6869bba23b0SJeff Kirsher /* Starfire can do TCP/UDP checksumming */
6879bba23b0SJeff Kirsher if (enable_hw_cksum)
6889bba23b0SJeff Kirsher dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6899bba23b0SJeff Kirsher #endif /* ZEROCOPY */
6909bba23b0SJeff Kirsher
6919bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
692f646968fSPatrick McHardy dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
6939bba23b0SJeff Kirsher #endif /* VLAN_RX_KILL_VID */
6949bba23b0SJeff Kirsher #ifdef ADDR_64BITS
6959bba23b0SJeff Kirsher dev->features |= NETIF_F_HIGHDMA;
6969bba23b0SJeff Kirsher #endif /* ADDR_64BITS */
6979bba23b0SJeff Kirsher
6989bba23b0SJeff Kirsher /* Serial EEPROM reads are hidden by the hardware. */
6999bba23b0SJeff Kirsher for (i = 0; i < 6; i++)
7008ec53ed9SJakub Kicinski addr[i] = readb(base + EEPROMCtrl + 20 - i);
7018ec53ed9SJakub Kicinski eth_hw_addr_set(dev, addr);
7029bba23b0SJeff Kirsher
7039bba23b0SJeff Kirsher #if ! defined(final_version) /* Dump the EEPROM contents during development. */
7049bba23b0SJeff Kirsher if (debug > 4)
7059bba23b0SJeff Kirsher for (i = 0; i < 0x20; i++)
7069bba23b0SJeff Kirsher printk("%2.2x%s",
7079bba23b0SJeff Kirsher (unsigned int)readb(base + EEPROMCtrl + i),
7089bba23b0SJeff Kirsher i % 16 != 15 ? " " : "\n");
7099bba23b0SJeff Kirsher #endif
7109bba23b0SJeff Kirsher
7119bba23b0SJeff Kirsher /* Issue soft reset */
7129bba23b0SJeff Kirsher writel(MiiSoftReset, base + TxMode);
7139bba23b0SJeff Kirsher udelay(1000);
7149bba23b0SJeff Kirsher writel(0, base + TxMode);
7159bba23b0SJeff Kirsher
7169bba23b0SJeff Kirsher /* Reset the chip to erase previous misconfiguration. */
7179bba23b0SJeff Kirsher writel(1, base + PCIDeviceConfig);
7189bba23b0SJeff Kirsher boguscnt = 1000;
7199bba23b0SJeff Kirsher while (--boguscnt > 0) {
7209bba23b0SJeff Kirsher udelay(10);
7219bba23b0SJeff Kirsher if ((readl(base + PCIDeviceConfig) & 1) == 0)
7229bba23b0SJeff Kirsher break;
7239bba23b0SJeff Kirsher }
7249bba23b0SJeff Kirsher if (boguscnt == 0)
7259bba23b0SJeff Kirsher printk("%s: chipset reset never completed!\n", dev->name);
7269bba23b0SJeff Kirsher /* wait a little longer */
7279bba23b0SJeff Kirsher udelay(1000);
7289bba23b0SJeff Kirsher
7299bba23b0SJeff Kirsher np = netdev_priv(dev);
7309bba23b0SJeff Kirsher np->dev = dev;
7319bba23b0SJeff Kirsher np->base = base;
7329bba23b0SJeff Kirsher spin_lock_init(&np->lock);
7339bba23b0SJeff Kirsher pci_set_drvdata(pdev, dev);
7349bba23b0SJeff Kirsher
7359bba23b0SJeff Kirsher np->pci_dev = pdev;
7369bba23b0SJeff Kirsher
7379bba23b0SJeff Kirsher np->mii_if.dev = dev;
7389bba23b0SJeff Kirsher np->mii_if.mdio_read = mdio_read;
7399bba23b0SJeff Kirsher np->mii_if.mdio_write = mdio_write;
7409bba23b0SJeff Kirsher np->mii_if.phy_id_mask = 0x1f;
7419bba23b0SJeff Kirsher np->mii_if.reg_num_mask = 0x1f;
7429bba23b0SJeff Kirsher
7439bba23b0SJeff Kirsher drv_flags = netdrv_tbl[chip_idx].drv_flags;
7449bba23b0SJeff Kirsher
7459bba23b0SJeff Kirsher np->speed100 = 1;
7469bba23b0SJeff Kirsher
7479bba23b0SJeff Kirsher /* timer resolution is 128 * 0.8us */
7489bba23b0SJeff Kirsher np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
7499bba23b0SJeff Kirsher Timer10X | EnableIntrMasking;
7509bba23b0SJeff Kirsher
7519bba23b0SJeff Kirsher if (small_frames > 0) {
7529bba23b0SJeff Kirsher np->intr_timer_ctrl |= SmallFrameBypass;
7539bba23b0SJeff Kirsher switch (small_frames) {
7549bba23b0SJeff Kirsher case 1 ... 64:
7559bba23b0SJeff Kirsher np->intr_timer_ctrl |= SmallFrame64;
7569bba23b0SJeff Kirsher break;
7579bba23b0SJeff Kirsher case 65 ... 128:
7589bba23b0SJeff Kirsher np->intr_timer_ctrl |= SmallFrame128;
7599bba23b0SJeff Kirsher break;
7609bba23b0SJeff Kirsher case 129 ... 256:
7619bba23b0SJeff Kirsher np->intr_timer_ctrl |= SmallFrame256;
7629bba23b0SJeff Kirsher break;
7639bba23b0SJeff Kirsher default:
7649bba23b0SJeff Kirsher np->intr_timer_ctrl |= SmallFrame512;
7659bba23b0SJeff Kirsher if (small_frames > 512)
7669bba23b0SJeff Kirsher printk("Adjusting small_frames down to 512\n");
7679bba23b0SJeff Kirsher break;
7689bba23b0SJeff Kirsher }
7699bba23b0SJeff Kirsher }
7709bba23b0SJeff Kirsher
7719bba23b0SJeff Kirsher dev->netdev_ops = &netdev_ops;
7729bba23b0SJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT;
7737ad24ea4SWilfried Klaebe dev->ethtool_ops = ðtool_ops;
7749bba23b0SJeff Kirsher
775b707b89fSJakub Kicinski netif_napi_add_weight(dev, &np->napi, netdev_poll, max_interrupt_work);
7769bba23b0SJeff Kirsher
7779bba23b0SJeff Kirsher if (mtu)
7789bba23b0SJeff Kirsher dev->mtu = mtu;
7799bba23b0SJeff Kirsher
7809bba23b0SJeff Kirsher if (register_netdev(dev))
7819bba23b0SJeff Kirsher goto err_out_cleardev;
7829bba23b0SJeff Kirsher
7839bba23b0SJeff Kirsher printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
7849bba23b0SJeff Kirsher dev->name, netdrv_tbl[chip_idx].name, base,
7859bba23b0SJeff Kirsher dev->dev_addr, irq);
7869bba23b0SJeff Kirsher
7879bba23b0SJeff Kirsher if (drv_flags & CanHaveMII) {
7889bba23b0SJeff Kirsher int phy, phy_idx = 0;
7899bba23b0SJeff Kirsher int mii_status;
7909bba23b0SJeff Kirsher for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
7919bba23b0SJeff Kirsher mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
792d8ad2f31SJia-Ju Bai msleep(100);
7939bba23b0SJeff Kirsher boguscnt = 1000;
7949bba23b0SJeff Kirsher while (--boguscnt > 0)
7959bba23b0SJeff Kirsher if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
7969bba23b0SJeff Kirsher break;
7979bba23b0SJeff Kirsher if (boguscnt == 0) {
7989bba23b0SJeff Kirsher printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
7999bba23b0SJeff Kirsher continue;
8009bba23b0SJeff Kirsher }
8019bba23b0SJeff Kirsher mii_status = mdio_read(dev, phy, MII_BMSR);
8029bba23b0SJeff Kirsher if (mii_status != 0) {
8039bba23b0SJeff Kirsher np->phys[phy_idx++] = phy;
8049bba23b0SJeff Kirsher np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
8059bba23b0SJeff Kirsher printk(KERN_INFO "%s: MII PHY found at address %d, status "
8069bba23b0SJeff Kirsher "%#4.4x advertising %#4.4x.\n",
8079bba23b0SJeff Kirsher dev->name, phy, mii_status, np->mii_if.advertising);
8089bba23b0SJeff Kirsher /* there can be only one PHY on-board */
8099bba23b0SJeff Kirsher break;
8109bba23b0SJeff Kirsher }
8119bba23b0SJeff Kirsher }
8129bba23b0SJeff Kirsher np->phy_cnt = phy_idx;
8139bba23b0SJeff Kirsher if (np->phy_cnt > 0)
8149bba23b0SJeff Kirsher np->mii_if.phy_id = np->phys[0];
8159bba23b0SJeff Kirsher else
8169bba23b0SJeff Kirsher memset(&np->mii_if, 0, sizeof(np->mii_if));
8179bba23b0SJeff Kirsher }
8189bba23b0SJeff Kirsher
8199bba23b0SJeff Kirsher printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
8209bba23b0SJeff Kirsher dev->name, enable_hw_cksum ? "enabled" : "disabled");
8219bba23b0SJeff Kirsher return 0;
8229bba23b0SJeff Kirsher
8239bba23b0SJeff Kirsher err_out_cleardev:
8249bba23b0SJeff Kirsher iounmap(base);
8259bba23b0SJeff Kirsher err_out_free_res:
8269bba23b0SJeff Kirsher pci_release_regions (pdev);
8279bba23b0SJeff Kirsher err_out_free_netdev:
8289bba23b0SJeff Kirsher free_netdev(dev);
8299bba23b0SJeff Kirsher return -ENODEV;
8309bba23b0SJeff Kirsher }
8319bba23b0SJeff Kirsher
8329bba23b0SJeff Kirsher
8339bba23b0SJeff Kirsher /* Read the MII Management Data I/O (MDIO) interfaces. */
mdio_read(struct net_device * dev,int phy_id,int location)8349bba23b0SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int location)
8359bba23b0SJeff Kirsher {
8369bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
8379bba23b0SJeff Kirsher void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
8389bba23b0SJeff Kirsher int result, boguscnt=1000;
8399bba23b0SJeff Kirsher /* ??? Should we add a busy-wait here? */
8409bba23b0SJeff Kirsher do {
8419bba23b0SJeff Kirsher result = readl(mdio_addr);
8429bba23b0SJeff Kirsher } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
8439bba23b0SJeff Kirsher if (boguscnt == 0)
8449bba23b0SJeff Kirsher return 0;
8459bba23b0SJeff Kirsher if ((result & 0xffff) == 0xffff)
8469bba23b0SJeff Kirsher return 0;
8479bba23b0SJeff Kirsher return result & 0xffff;
8489bba23b0SJeff Kirsher }
8499bba23b0SJeff Kirsher
8509bba23b0SJeff Kirsher
mdio_write(struct net_device * dev,int phy_id,int location,int value)8519bba23b0SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
8529bba23b0SJeff Kirsher {
8539bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
8549bba23b0SJeff Kirsher void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
8559bba23b0SJeff Kirsher writel(value, mdio_addr);
8569bba23b0SJeff Kirsher /* The busy-wait will occur before a read. */
8579bba23b0SJeff Kirsher }
8589bba23b0SJeff Kirsher
8599bba23b0SJeff Kirsher
netdev_open(struct net_device * dev)8609bba23b0SJeff Kirsher static int netdev_open(struct net_device *dev)
8619bba23b0SJeff Kirsher {
8629bba23b0SJeff Kirsher const struct firmware *fw_rx, *fw_tx;
8639bba23b0SJeff Kirsher const __be32 *fw_rx_data, *fw_tx_data;
8649bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
8659bba23b0SJeff Kirsher void __iomem *ioaddr = np->base;
866ea8f2ed0SFrancois Romieu const int irq = np->pci_dev->irq;
8679bba23b0SJeff Kirsher int i, retval;
8689bba23b0SJeff Kirsher size_t tx_size, rx_size;
8699bba23b0SJeff Kirsher size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
8709bba23b0SJeff Kirsher
8719bba23b0SJeff Kirsher /* Do we ever need to reset the chip??? */
8729bba23b0SJeff Kirsher
873ea8f2ed0SFrancois Romieu retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
8749bba23b0SJeff Kirsher if (retval)
8759bba23b0SJeff Kirsher return retval;
8769bba23b0SJeff Kirsher
8779bba23b0SJeff Kirsher /* Disable the Rx and Tx, and reset the chip. */
8789bba23b0SJeff Kirsher writel(0, ioaddr + GenCtrl);
8799bba23b0SJeff Kirsher writel(1, ioaddr + PCIDeviceConfig);
8809bba23b0SJeff Kirsher if (debug > 1)
8819bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
882ea8f2ed0SFrancois Romieu dev->name, irq);
8839bba23b0SJeff Kirsher
8849bba23b0SJeff Kirsher /* Allocate the various queues. */
8859bba23b0SJeff Kirsher if (!np->queue_mem) {
8869bba23b0SJeff Kirsher tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
8879bba23b0SJeff Kirsher rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
8889bba23b0SJeff Kirsher tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
8899bba23b0SJeff Kirsher rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
8909bba23b0SJeff Kirsher np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
8915d63cceaSChristophe JAILLET np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
8925d63cceaSChristophe JAILLET np->queue_mem_size,
8935d63cceaSChristophe JAILLET &np->queue_mem_dma, GFP_ATOMIC);
8949bba23b0SJeff Kirsher if (np->queue_mem == NULL) {
895ea8f2ed0SFrancois Romieu free_irq(irq, dev);
8969bba23b0SJeff Kirsher return -ENOMEM;
8979bba23b0SJeff Kirsher }
8989bba23b0SJeff Kirsher
8999bba23b0SJeff Kirsher np->tx_done_q = np->queue_mem;
9009bba23b0SJeff Kirsher np->tx_done_q_dma = np->queue_mem_dma;
9019bba23b0SJeff Kirsher np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
9029bba23b0SJeff Kirsher np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
9039bba23b0SJeff Kirsher np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
9049bba23b0SJeff Kirsher np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
9059bba23b0SJeff Kirsher np->rx_ring = (void *) np->tx_ring + tx_ring_size;
9069bba23b0SJeff Kirsher np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
9079bba23b0SJeff Kirsher }
9089bba23b0SJeff Kirsher
9099bba23b0SJeff Kirsher /* Start with no carrier, it gets adjusted later */
9109bba23b0SJeff Kirsher netif_carrier_off(dev);
9119bba23b0SJeff Kirsher init_ring(dev);
9129bba23b0SJeff Kirsher /* Set the size of the Rx buffers. */
9139bba23b0SJeff Kirsher writel((np->rx_buf_sz << RxBufferLenShift) |
9149bba23b0SJeff Kirsher (0 << RxMinDescrThreshShift) |
9159bba23b0SJeff Kirsher RxPrefetchMode | RxVariableQ |
9169bba23b0SJeff Kirsher RX_Q_ENTRIES |
9179bba23b0SJeff Kirsher RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
9189bba23b0SJeff Kirsher RxDescSpace4,
9199bba23b0SJeff Kirsher ioaddr + RxDescQCtrl);
9209bba23b0SJeff Kirsher
9219bba23b0SJeff Kirsher /* Set up the Rx DMA controller. */
9229bba23b0SJeff Kirsher writel(RxChecksumIgnore |
9239bba23b0SJeff Kirsher (0 << RxEarlyIntThreshShift) |
9249bba23b0SJeff Kirsher (6 << RxHighPrioThreshShift) |
9259bba23b0SJeff Kirsher ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
9269bba23b0SJeff Kirsher ioaddr + RxDMACtrl);
9279bba23b0SJeff Kirsher
9289bba23b0SJeff Kirsher /* Set Tx descriptor */
9299bba23b0SJeff Kirsher writel((2 << TxHiPriFIFOThreshShift) |
9309bba23b0SJeff Kirsher (0 << TxPadLenShift) |
9319bba23b0SJeff Kirsher ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
9329bba23b0SJeff Kirsher TX_DESC_Q_ADDR_SIZE |
9339bba23b0SJeff Kirsher TX_DESC_SPACING | TX_DESC_TYPE,
9349bba23b0SJeff Kirsher ioaddr + TxDescCtrl);
9359bba23b0SJeff Kirsher
9369bba23b0SJeff Kirsher writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
9379bba23b0SJeff Kirsher writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
9389bba23b0SJeff Kirsher writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
9399bba23b0SJeff Kirsher writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
9409bba23b0SJeff Kirsher writel(np->tx_ring_dma, ioaddr + TxRingPtr);
9419bba23b0SJeff Kirsher
9429bba23b0SJeff Kirsher writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
9439bba23b0SJeff Kirsher writel(np->rx_done_q_dma |
9449bba23b0SJeff Kirsher RxComplType |
9459bba23b0SJeff Kirsher (0 << RxComplThreshShift),
9469bba23b0SJeff Kirsher ioaddr + RxCompletionAddr);
9479bba23b0SJeff Kirsher
9489bba23b0SJeff Kirsher if (debug > 1)
9499bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
9509bba23b0SJeff Kirsher
9519bba23b0SJeff Kirsher /* Fill both the Tx SA register and the Rx perfect filter. */
9529bba23b0SJeff Kirsher for (i = 0; i < 6; i++)
9539bba23b0SJeff Kirsher writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
9549bba23b0SJeff Kirsher /* The first entry is special because it bypasses the VLAN filter.
9559bba23b0SJeff Kirsher Don't use it. */
9569bba23b0SJeff Kirsher writew(0, ioaddr + PerfFilterTable);
9579bba23b0SJeff Kirsher writew(0, ioaddr + PerfFilterTable + 4);
9589bba23b0SJeff Kirsher writew(0, ioaddr + PerfFilterTable + 8);
9599bba23b0SJeff Kirsher for (i = 1; i < 16; i++) {
96076660757SJakub Kicinski const __be16 *eaddrs = (const __be16 *)dev->dev_addr;
9619bba23b0SJeff Kirsher void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
9629bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
9639bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
9649bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
9659bba23b0SJeff Kirsher }
9669bba23b0SJeff Kirsher
9679bba23b0SJeff Kirsher /* Initialize other registers. */
9689bba23b0SJeff Kirsher /* Configure the PCI bus bursts and FIFO thresholds. */
9699bba23b0SJeff Kirsher np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
9709bba23b0SJeff Kirsher writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
9719bba23b0SJeff Kirsher udelay(1000);
9729bba23b0SJeff Kirsher writel(np->tx_mode, ioaddr + TxMode);
9739bba23b0SJeff Kirsher np->tx_threshold = 4;
9749bba23b0SJeff Kirsher writel(np->tx_threshold, ioaddr + TxThreshold);
9759bba23b0SJeff Kirsher
9769bba23b0SJeff Kirsher writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
9779bba23b0SJeff Kirsher
9789bba23b0SJeff Kirsher napi_enable(&np->napi);
9799bba23b0SJeff Kirsher
9809bba23b0SJeff Kirsher netif_start_queue(dev);
9819bba23b0SJeff Kirsher
9829bba23b0SJeff Kirsher if (debug > 1)
9839bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
9849bba23b0SJeff Kirsher set_rx_mode(dev);
9859bba23b0SJeff Kirsher
9869bba23b0SJeff Kirsher np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
9879bba23b0SJeff Kirsher check_duplex(dev);
9889bba23b0SJeff Kirsher
9899bba23b0SJeff Kirsher /* Enable GPIO interrupts on link change */
9909bba23b0SJeff Kirsher writel(0x0f00ff00, ioaddr + GPIOCtrl);
9919bba23b0SJeff Kirsher
9929bba23b0SJeff Kirsher /* Set the interrupt mask */
9939bba23b0SJeff Kirsher writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
9949bba23b0SJeff Kirsher IntrTxDMADone | IntrStatsMax | IntrLinkChange |
9959bba23b0SJeff Kirsher IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
9969bba23b0SJeff Kirsher ioaddr + IntrEnable);
9979bba23b0SJeff Kirsher /* Enable PCI interrupts. */
9989bba23b0SJeff Kirsher writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
9999bba23b0SJeff Kirsher ioaddr + PCIDeviceConfig);
10009bba23b0SJeff Kirsher
10019bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
10029bba23b0SJeff Kirsher /* Set VLAN type to 802.1q */
10039bba23b0SJeff Kirsher writel(ETH_P_8021Q, ioaddr + VlanType);
10049bba23b0SJeff Kirsher #endif /* VLAN_SUPPORT */
10059bba23b0SJeff Kirsher
10069bba23b0SJeff Kirsher retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
10079bba23b0SJeff Kirsher if (retval) {
10089bba23b0SJeff Kirsher printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
10099bba23b0SJeff Kirsher FIRMWARE_RX);
10109bba23b0SJeff Kirsher goto out_init;
10119bba23b0SJeff Kirsher }
10129bba23b0SJeff Kirsher if (fw_rx->size % 4) {
10139bba23b0SJeff Kirsher printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
10149bba23b0SJeff Kirsher fw_rx->size, FIRMWARE_RX);
10159bba23b0SJeff Kirsher retval = -EINVAL;
10169bba23b0SJeff Kirsher goto out_rx;
10179bba23b0SJeff Kirsher }
10189bba23b0SJeff Kirsher retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
10199bba23b0SJeff Kirsher if (retval) {
10209bba23b0SJeff Kirsher printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
10219bba23b0SJeff Kirsher FIRMWARE_TX);
10229bba23b0SJeff Kirsher goto out_rx;
10239bba23b0SJeff Kirsher }
10249bba23b0SJeff Kirsher if (fw_tx->size % 4) {
10259bba23b0SJeff Kirsher printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
10269bba23b0SJeff Kirsher fw_tx->size, FIRMWARE_TX);
10279bba23b0SJeff Kirsher retval = -EINVAL;
10289bba23b0SJeff Kirsher goto out_tx;
10299bba23b0SJeff Kirsher }
10309bba23b0SJeff Kirsher fw_rx_data = (const __be32 *)&fw_rx->data[0];
10319bba23b0SJeff Kirsher fw_tx_data = (const __be32 *)&fw_tx->data[0];
10329bba23b0SJeff Kirsher rx_size = fw_rx->size / 4;
10339bba23b0SJeff Kirsher tx_size = fw_tx->size / 4;
10349bba23b0SJeff Kirsher
10359bba23b0SJeff Kirsher /* Load Rx/Tx firmware into the frame processors */
10369bba23b0SJeff Kirsher for (i = 0; i < rx_size; i++)
10379bba23b0SJeff Kirsher writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
10389bba23b0SJeff Kirsher for (i = 0; i < tx_size; i++)
10399bba23b0SJeff Kirsher writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
10409bba23b0SJeff Kirsher if (enable_hw_cksum)
10419bba23b0SJeff Kirsher /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
10429bba23b0SJeff Kirsher writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
10439bba23b0SJeff Kirsher else
10449bba23b0SJeff Kirsher /* Enable the Rx and Tx units only. */
10459bba23b0SJeff Kirsher writel(TxEnable|RxEnable, ioaddr + GenCtrl);
10469bba23b0SJeff Kirsher
10479bba23b0SJeff Kirsher if (debug > 1)
10489bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Done netdev_open().\n",
10499bba23b0SJeff Kirsher dev->name);
10509bba23b0SJeff Kirsher
10519bba23b0SJeff Kirsher out_tx:
10529bba23b0SJeff Kirsher release_firmware(fw_tx);
10539bba23b0SJeff Kirsher out_rx:
10549bba23b0SJeff Kirsher release_firmware(fw_rx);
10559bba23b0SJeff Kirsher out_init:
10569bba23b0SJeff Kirsher if (retval)
10579bba23b0SJeff Kirsher netdev_close(dev);
10589bba23b0SJeff Kirsher return retval;
10599bba23b0SJeff Kirsher }
10609bba23b0SJeff Kirsher
10619bba23b0SJeff Kirsher
check_duplex(struct net_device * dev)10629bba23b0SJeff Kirsher static void check_duplex(struct net_device *dev)
10639bba23b0SJeff Kirsher {
10649bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
10659bba23b0SJeff Kirsher u16 reg0;
10669bba23b0SJeff Kirsher int silly_count = 1000;
10679bba23b0SJeff Kirsher
10689bba23b0SJeff Kirsher mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
10699bba23b0SJeff Kirsher mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
10709bba23b0SJeff Kirsher udelay(500);
10719bba23b0SJeff Kirsher while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
10729bba23b0SJeff Kirsher /* do nothing */;
10739bba23b0SJeff Kirsher if (!silly_count) {
10749bba23b0SJeff Kirsher printk("%s: MII reset failed!\n", dev->name);
10759bba23b0SJeff Kirsher return;
10769bba23b0SJeff Kirsher }
10779bba23b0SJeff Kirsher
10789bba23b0SJeff Kirsher reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
10799bba23b0SJeff Kirsher
10809bba23b0SJeff Kirsher if (!np->mii_if.force_media) {
10819bba23b0SJeff Kirsher reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
10829bba23b0SJeff Kirsher } else {
10839bba23b0SJeff Kirsher reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
10849bba23b0SJeff Kirsher if (np->speed100)
10859bba23b0SJeff Kirsher reg0 |= BMCR_SPEED100;
10869bba23b0SJeff Kirsher if (np->mii_if.full_duplex)
10879bba23b0SJeff Kirsher reg0 |= BMCR_FULLDPLX;
10889bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
10899bba23b0SJeff Kirsher dev->name,
10909bba23b0SJeff Kirsher np->speed100 ? "100" : "10",
10919bba23b0SJeff Kirsher np->mii_if.full_duplex ? "full" : "half");
10929bba23b0SJeff Kirsher }
10939bba23b0SJeff Kirsher mdio_write(dev, np->phys[0], MII_BMCR, reg0);
10949bba23b0SJeff Kirsher }
10959bba23b0SJeff Kirsher
10969bba23b0SJeff Kirsher
tx_timeout(struct net_device * dev,unsigned int txqueue)10970290bd29SMichael S. Tsirkin static void tx_timeout(struct net_device *dev, unsigned int txqueue)
10989bba23b0SJeff Kirsher {
10999bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
11009bba23b0SJeff Kirsher void __iomem *ioaddr = np->base;
11019bba23b0SJeff Kirsher int old_debug;
11029bba23b0SJeff Kirsher
11039bba23b0SJeff Kirsher printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
11049bba23b0SJeff Kirsher "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
11059bba23b0SJeff Kirsher
11069bba23b0SJeff Kirsher /* Perhaps we should reinitialize the hardware here. */
11079bba23b0SJeff Kirsher
11089bba23b0SJeff Kirsher /*
11099bba23b0SJeff Kirsher * Stop and restart the interface.
11109bba23b0SJeff Kirsher * Cheat and increase the debug level temporarily.
11119bba23b0SJeff Kirsher */
11129bba23b0SJeff Kirsher old_debug = debug;
11139bba23b0SJeff Kirsher debug = 2;
11149bba23b0SJeff Kirsher netdev_close(dev);
11159bba23b0SJeff Kirsher netdev_open(dev);
11169bba23b0SJeff Kirsher debug = old_debug;
11179bba23b0SJeff Kirsher
11189bba23b0SJeff Kirsher /* Trigger an immediate transmit demand. */
11199bba23b0SJeff Kirsher
1120860e9538SFlorian Westphal netif_trans_update(dev); /* prevent tx timeout */
11219bba23b0SJeff Kirsher dev->stats.tx_errors++;
11229bba23b0SJeff Kirsher netif_wake_queue(dev);
11239bba23b0SJeff Kirsher }
11249bba23b0SJeff Kirsher
11259bba23b0SJeff Kirsher
11269bba23b0SJeff Kirsher /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
init_ring(struct net_device * dev)11279bba23b0SJeff Kirsher static void init_ring(struct net_device *dev)
11289bba23b0SJeff Kirsher {
11299bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
11309bba23b0SJeff Kirsher int i;
11319bba23b0SJeff Kirsher
11329bba23b0SJeff Kirsher np->cur_rx = np->cur_tx = np->reap_tx = 0;
11339bba23b0SJeff Kirsher np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
11349bba23b0SJeff Kirsher
11359bba23b0SJeff Kirsher np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
11369bba23b0SJeff Kirsher
11379bba23b0SJeff Kirsher /* Fill in the Rx buffers. Handle allocation failure gracefully. */
11389bba23b0SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) {
11391d266430SPradeep A Dalvi struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
11409bba23b0SJeff Kirsher np->rx_info[i].skb = skb;
11419bba23b0SJeff Kirsher if (skb == NULL)
11429bba23b0SJeff Kirsher break;
11435d63cceaSChristophe JAILLET np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
11445d63cceaSChristophe JAILLET skb->data,
11455d63cceaSChristophe JAILLET np->rx_buf_sz,
11465d63cceaSChristophe JAILLET DMA_FROM_DEVICE);
11475d63cceaSChristophe JAILLET if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
1148d1156b48SAlexey Khoroshilov dev_kfree_skb(skb);
1149d1156b48SAlexey Khoroshilov np->rx_info[i].skb = NULL;
1150d1156b48SAlexey Khoroshilov break;
1151d1156b48SAlexey Khoroshilov }
11529bba23b0SJeff Kirsher /* Grrr, we cannot offset to correctly align the IP header. */
11539bba23b0SJeff Kirsher np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
11549bba23b0SJeff Kirsher }
11559bba23b0SJeff Kirsher writew(i - 1, np->base + RxDescQIdx);
11569bba23b0SJeff Kirsher np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
11579bba23b0SJeff Kirsher
11589bba23b0SJeff Kirsher /* Clear the remainder of the Rx buffer ring. */
11599bba23b0SJeff Kirsher for ( ; i < RX_RING_SIZE; i++) {
11609bba23b0SJeff Kirsher np->rx_ring[i].rxaddr = 0;
11619bba23b0SJeff Kirsher np->rx_info[i].skb = NULL;
11629bba23b0SJeff Kirsher np->rx_info[i].mapping = 0;
11639bba23b0SJeff Kirsher }
11649bba23b0SJeff Kirsher /* Mark the last entry as wrapping the ring. */
11659bba23b0SJeff Kirsher np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
11669bba23b0SJeff Kirsher
11679bba23b0SJeff Kirsher /* Clear the completion rings. */
11689bba23b0SJeff Kirsher for (i = 0; i < DONE_Q_SIZE; i++) {
11699bba23b0SJeff Kirsher np->rx_done_q[i].status = 0;
11709bba23b0SJeff Kirsher np->tx_done_q[i].status = 0;
11719bba23b0SJeff Kirsher }
11729bba23b0SJeff Kirsher
11739bba23b0SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++)
11749bba23b0SJeff Kirsher memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
11759bba23b0SJeff Kirsher }
11769bba23b0SJeff Kirsher
11779bba23b0SJeff Kirsher
start_tx(struct sk_buff * skb,struct net_device * dev)11789bba23b0SJeff Kirsher static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
11799bba23b0SJeff Kirsher {
11809bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
11819bba23b0SJeff Kirsher unsigned int entry;
1182d1156b48SAlexey Khoroshilov unsigned int prev_tx;
11839bba23b0SJeff Kirsher u32 status;
1184d1156b48SAlexey Khoroshilov int i, j;
11859bba23b0SJeff Kirsher
11869bba23b0SJeff Kirsher /*
11879bba23b0SJeff Kirsher * be cautious here, wrapping the queue has weird semantics
11889bba23b0SJeff Kirsher * and we may not have enough slots even when it seems we do.
11899bba23b0SJeff Kirsher */
11909bba23b0SJeff Kirsher if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
11919bba23b0SJeff Kirsher netif_stop_queue(dev);
11929bba23b0SJeff Kirsher return NETDEV_TX_BUSY;
11939bba23b0SJeff Kirsher }
11949bba23b0SJeff Kirsher
11959bba23b0SJeff Kirsher #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
11969bba23b0SJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL) {
11979bba23b0SJeff Kirsher if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
11989bba23b0SJeff Kirsher return NETDEV_TX_OK;
11999bba23b0SJeff Kirsher }
12009bba23b0SJeff Kirsher #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
12019bba23b0SJeff Kirsher
1202d1156b48SAlexey Khoroshilov prev_tx = np->cur_tx;
12039bba23b0SJeff Kirsher entry = np->cur_tx % TX_RING_SIZE;
12049bba23b0SJeff Kirsher for (i = 0; i < skb_num_frags(skb); i++) {
12059bba23b0SJeff Kirsher int wrap_ring = 0;
12069bba23b0SJeff Kirsher status = TxDescID;
12079bba23b0SJeff Kirsher
12089bba23b0SJeff Kirsher if (i == 0) {
12099bba23b0SJeff Kirsher np->tx_info[entry].skb = skb;
12109bba23b0SJeff Kirsher status |= TxCRCEn;
12119bba23b0SJeff Kirsher if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
12129bba23b0SJeff Kirsher status |= TxRingWrap;
12139bba23b0SJeff Kirsher wrap_ring = 1;
12149bba23b0SJeff Kirsher }
12159bba23b0SJeff Kirsher if (np->reap_tx) {
12169bba23b0SJeff Kirsher status |= TxDescIntr;
12179bba23b0SJeff Kirsher np->reap_tx = 0;
12189bba23b0SJeff Kirsher }
12199bba23b0SJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL) {
12209bba23b0SJeff Kirsher status |= TxCalTCP;
12219bba23b0SJeff Kirsher dev->stats.tx_compressed++;
12229bba23b0SJeff Kirsher }
12239bba23b0SJeff Kirsher status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
12249bba23b0SJeff Kirsher
12259bba23b0SJeff Kirsher np->tx_info[entry].mapping =
12265d63cceaSChristophe JAILLET dma_map_single(&np->pci_dev->dev, skb->data,
12275d63cceaSChristophe JAILLET skb_first_frag_len(skb),
12285d63cceaSChristophe JAILLET DMA_TO_DEVICE);
12299bba23b0SJeff Kirsher } else {
12309e903e08SEric Dumazet const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
12319e903e08SEric Dumazet status |= skb_frag_size(this_frag);
12329bba23b0SJeff Kirsher np->tx_info[entry].mapping =
12335d63cceaSChristophe JAILLET dma_map_single(&np->pci_dev->dev,
12340cd83cc0SIan Campbell skb_frag_address(this_frag),
12359e903e08SEric Dumazet skb_frag_size(this_frag),
12365d63cceaSChristophe JAILLET DMA_TO_DEVICE);
12379bba23b0SJeff Kirsher }
12385d63cceaSChristophe JAILLET if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
1239d1156b48SAlexey Khoroshilov dev->stats.tx_dropped++;
1240d1156b48SAlexey Khoroshilov goto err_out;
1241d1156b48SAlexey Khoroshilov }
12429bba23b0SJeff Kirsher
12439bba23b0SJeff Kirsher np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
12449bba23b0SJeff Kirsher np->tx_ring[entry].status = cpu_to_le32(status);
12459bba23b0SJeff Kirsher if (debug > 3)
12469bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
12479bba23b0SJeff Kirsher dev->name, np->cur_tx, np->dirty_tx,
12489bba23b0SJeff Kirsher entry, status);
12499bba23b0SJeff Kirsher if (wrap_ring) {
12509bba23b0SJeff Kirsher np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
12519bba23b0SJeff Kirsher np->cur_tx += np->tx_info[entry].used_slots;
12529bba23b0SJeff Kirsher entry = 0;
12539bba23b0SJeff Kirsher } else {
12549bba23b0SJeff Kirsher np->tx_info[entry].used_slots = 1;
12559bba23b0SJeff Kirsher np->cur_tx += np->tx_info[entry].used_slots;
12569bba23b0SJeff Kirsher entry++;
12579bba23b0SJeff Kirsher }
12589bba23b0SJeff Kirsher /* scavenge the tx descriptors twice per TX_RING_SIZE */
12599bba23b0SJeff Kirsher if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
12609bba23b0SJeff Kirsher np->reap_tx = 1;
12619bba23b0SJeff Kirsher }
12629bba23b0SJeff Kirsher
12639bba23b0SJeff Kirsher /* Non-x86: explicitly flush descriptor cache lines here. */
12649bba23b0SJeff Kirsher /* Ensure all descriptors are written back before the transmit is
12659bba23b0SJeff Kirsher initiated. - Jes */
12669bba23b0SJeff Kirsher wmb();
12679bba23b0SJeff Kirsher
12689bba23b0SJeff Kirsher /* Update the producer index. */
12699bba23b0SJeff Kirsher writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
12709bba23b0SJeff Kirsher
12719bba23b0SJeff Kirsher /* 4 is arbitrary, but should be ok */
12729bba23b0SJeff Kirsher if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
12739bba23b0SJeff Kirsher netif_stop_queue(dev);
12749bba23b0SJeff Kirsher
12759bba23b0SJeff Kirsher return NETDEV_TX_OK;
12769bba23b0SJeff Kirsher
1277d1156b48SAlexey Khoroshilov err_out:
1278d1156b48SAlexey Khoroshilov entry = prev_tx % TX_RING_SIZE;
1279d1156b48SAlexey Khoroshilov np->tx_info[entry].skb = NULL;
1280d1156b48SAlexey Khoroshilov if (i > 0) {
12815d63cceaSChristophe JAILLET dma_unmap_single(&np->pci_dev->dev,
1282d1156b48SAlexey Khoroshilov np->tx_info[entry].mapping,
12835d63cceaSChristophe JAILLET skb_first_frag_len(skb), DMA_TO_DEVICE);
1284d1156b48SAlexey Khoroshilov np->tx_info[entry].mapping = 0;
1285d1156b48SAlexey Khoroshilov entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1286d1156b48SAlexey Khoroshilov for (j = 1; j < i; j++) {
12875d63cceaSChristophe JAILLET dma_unmap_single(&np->pci_dev->dev,
1288d1156b48SAlexey Khoroshilov np->tx_info[entry].mapping,
12895d63cceaSChristophe JAILLET skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
12905d63cceaSChristophe JAILLET DMA_TO_DEVICE);
1291d1156b48SAlexey Khoroshilov entry++;
1292d1156b48SAlexey Khoroshilov }
1293d1156b48SAlexey Khoroshilov }
1294d1156b48SAlexey Khoroshilov dev_kfree_skb_any(skb);
1295d1156b48SAlexey Khoroshilov np->cur_tx = prev_tx;
1296d1156b48SAlexey Khoroshilov return NETDEV_TX_OK;
1297d1156b48SAlexey Khoroshilov }
12989bba23b0SJeff Kirsher
12999bba23b0SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up
13009bba23b0SJeff Kirsher after the Tx thread. */
intr_handler(int irq,void * dev_instance)13019bba23b0SJeff Kirsher static irqreturn_t intr_handler(int irq, void *dev_instance)
13029bba23b0SJeff Kirsher {
13039bba23b0SJeff Kirsher struct net_device *dev = dev_instance;
13049bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
13059bba23b0SJeff Kirsher void __iomem *ioaddr = np->base;
13069bba23b0SJeff Kirsher int boguscnt = max_interrupt_work;
13079bba23b0SJeff Kirsher int consumer;
13089bba23b0SJeff Kirsher int tx_status;
13099bba23b0SJeff Kirsher int handled = 0;
13109bba23b0SJeff Kirsher
13119bba23b0SJeff Kirsher do {
13129bba23b0SJeff Kirsher u32 intr_status = readl(ioaddr + IntrClear);
13139bba23b0SJeff Kirsher
13149bba23b0SJeff Kirsher if (debug > 4)
13159bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
13169bba23b0SJeff Kirsher dev->name, intr_status);
13179bba23b0SJeff Kirsher
13189bba23b0SJeff Kirsher if (intr_status == 0 || intr_status == (u32) -1)
13199bba23b0SJeff Kirsher break;
13209bba23b0SJeff Kirsher
13219bba23b0SJeff Kirsher handled = 1;
13229bba23b0SJeff Kirsher
13239bba23b0SJeff Kirsher if (intr_status & (IntrRxDone | IntrRxEmpty)) {
13249bba23b0SJeff Kirsher u32 enable;
13259bba23b0SJeff Kirsher
13269bba23b0SJeff Kirsher if (likely(napi_schedule_prep(&np->napi))) {
13279bba23b0SJeff Kirsher __napi_schedule(&np->napi);
13289bba23b0SJeff Kirsher enable = readl(ioaddr + IntrEnable);
13299bba23b0SJeff Kirsher enable &= ~(IntrRxDone | IntrRxEmpty);
13309bba23b0SJeff Kirsher writel(enable, ioaddr + IntrEnable);
13319bba23b0SJeff Kirsher /* flush PCI posting buffers */
13329bba23b0SJeff Kirsher readl(ioaddr + IntrEnable);
13339bba23b0SJeff Kirsher } else {
13349bba23b0SJeff Kirsher /* Paranoia check */
13359bba23b0SJeff Kirsher enable = readl(ioaddr + IntrEnable);
13369bba23b0SJeff Kirsher if (enable & (IntrRxDone | IntrRxEmpty)) {
13379bba23b0SJeff Kirsher printk(KERN_INFO
13389bba23b0SJeff Kirsher "%s: interrupt while in poll!\n",
13399bba23b0SJeff Kirsher dev->name);
13409bba23b0SJeff Kirsher enable &= ~(IntrRxDone | IntrRxEmpty);
13419bba23b0SJeff Kirsher writel(enable, ioaddr + IntrEnable);
13429bba23b0SJeff Kirsher }
13439bba23b0SJeff Kirsher }
13449bba23b0SJeff Kirsher }
13459bba23b0SJeff Kirsher
13469bba23b0SJeff Kirsher /* Scavenge the skbuff list based on the Tx-done queue.
13479bba23b0SJeff Kirsher There are redundant checks here that may be cleaned up
13489bba23b0SJeff Kirsher after the driver has proven to be reliable. */
13499bba23b0SJeff Kirsher consumer = readl(ioaddr + TxConsumerIdx);
13509bba23b0SJeff Kirsher if (debug > 3)
13519bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
13529bba23b0SJeff Kirsher dev->name, consumer);
13539bba23b0SJeff Kirsher
13549bba23b0SJeff Kirsher while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
13559bba23b0SJeff Kirsher if (debug > 3)
13569bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
13579bba23b0SJeff Kirsher dev->name, np->dirty_tx, np->tx_done, tx_status);
13589bba23b0SJeff Kirsher if ((tx_status & 0xe0000000) == 0xa0000000) {
13599bba23b0SJeff Kirsher dev->stats.tx_packets++;
13609bba23b0SJeff Kirsher } else if ((tx_status & 0xe0000000) == 0x80000000) {
13619bba23b0SJeff Kirsher u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
13629bba23b0SJeff Kirsher struct sk_buff *skb = np->tx_info[entry].skb;
13639bba23b0SJeff Kirsher np->tx_info[entry].skb = NULL;
13645d63cceaSChristophe JAILLET dma_unmap_single(&np->pci_dev->dev,
13659bba23b0SJeff Kirsher np->tx_info[entry].mapping,
13669bba23b0SJeff Kirsher skb_first_frag_len(skb),
13675d63cceaSChristophe JAILLET DMA_TO_DEVICE);
13689bba23b0SJeff Kirsher np->tx_info[entry].mapping = 0;
13699bba23b0SJeff Kirsher np->dirty_tx += np->tx_info[entry].used_slots;
13709bba23b0SJeff Kirsher entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
13719bba23b0SJeff Kirsher {
13729bba23b0SJeff Kirsher int i;
13739bba23b0SJeff Kirsher for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
13745d63cceaSChristophe JAILLET dma_unmap_single(&np->pci_dev->dev,
13759bba23b0SJeff Kirsher np->tx_info[entry].mapping,
13769e903e08SEric Dumazet skb_frag_size(&skb_shinfo(skb)->frags[i]),
13775d63cceaSChristophe JAILLET DMA_TO_DEVICE);
13789bba23b0SJeff Kirsher np->dirty_tx++;
13799bba23b0SJeff Kirsher entry++;
13809bba23b0SJeff Kirsher }
13819bba23b0SJeff Kirsher }
13829bba23b0SJeff Kirsher
1383e772261bSYang Wei dev_consume_skb_irq(skb);
13849bba23b0SJeff Kirsher }
13859bba23b0SJeff Kirsher np->tx_done_q[np->tx_done].status = 0;
13869bba23b0SJeff Kirsher np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
13879bba23b0SJeff Kirsher }
13889bba23b0SJeff Kirsher writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
13899bba23b0SJeff Kirsher
13909bba23b0SJeff Kirsher if (netif_queue_stopped(dev) &&
13919bba23b0SJeff Kirsher (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
13929bba23b0SJeff Kirsher /* The ring is no longer full, wake the queue. */
13939bba23b0SJeff Kirsher netif_wake_queue(dev);
13949bba23b0SJeff Kirsher }
13959bba23b0SJeff Kirsher
13969bba23b0SJeff Kirsher /* Stats overflow */
13979bba23b0SJeff Kirsher if (intr_status & IntrStatsMax)
13989bba23b0SJeff Kirsher get_stats(dev);
13999bba23b0SJeff Kirsher
14009bba23b0SJeff Kirsher /* Media change interrupt. */
14019bba23b0SJeff Kirsher if (intr_status & IntrLinkChange)
14029bba23b0SJeff Kirsher netdev_media_change(dev);
14039bba23b0SJeff Kirsher
14049bba23b0SJeff Kirsher /* Abnormal error summary/uncommon events handlers. */
14059bba23b0SJeff Kirsher if (intr_status & IntrAbnormalSummary)
14069bba23b0SJeff Kirsher netdev_error(dev, intr_status);
14079bba23b0SJeff Kirsher
14089bba23b0SJeff Kirsher if (--boguscnt < 0) {
14099bba23b0SJeff Kirsher if (debug > 1)
14109bba23b0SJeff Kirsher printk(KERN_WARNING "%s: Too much work at interrupt, "
14119bba23b0SJeff Kirsher "status=%#8.8x.\n",
14129bba23b0SJeff Kirsher dev->name, intr_status);
14139bba23b0SJeff Kirsher break;
14149bba23b0SJeff Kirsher }
14159bba23b0SJeff Kirsher } while (1);
14169bba23b0SJeff Kirsher
14179bba23b0SJeff Kirsher if (debug > 4)
14189bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
14199bba23b0SJeff Kirsher dev->name, (int) readl(ioaddr + IntrStatus));
14209bba23b0SJeff Kirsher return IRQ_RETVAL(handled);
14219bba23b0SJeff Kirsher }
14229bba23b0SJeff Kirsher
14239bba23b0SJeff Kirsher
14249bba23b0SJeff Kirsher /*
14259bba23b0SJeff Kirsher * This routine is logically part of the interrupt/poll handler, but separated
14269bba23b0SJeff Kirsher * for clarity and better register allocation.
14279bba23b0SJeff Kirsher */
__netdev_rx(struct net_device * dev,int * quota)14289bba23b0SJeff Kirsher static int __netdev_rx(struct net_device *dev, int *quota)
14299bba23b0SJeff Kirsher {
14309bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
14319bba23b0SJeff Kirsher u32 desc_status;
14329bba23b0SJeff Kirsher int retcode = 0;
14339bba23b0SJeff Kirsher
14349bba23b0SJeff Kirsher /* If EOP is set on the next entry, it's a new packet. Send it up. */
14359bba23b0SJeff Kirsher while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
14369bba23b0SJeff Kirsher struct sk_buff *skb;
14379bba23b0SJeff Kirsher u16 pkt_len;
14389bba23b0SJeff Kirsher int entry;
14399bba23b0SJeff Kirsher rx_done_desc *desc = &np->rx_done_q[np->rx_done];
14409bba23b0SJeff Kirsher
14419bba23b0SJeff Kirsher if (debug > 4)
14429bba23b0SJeff Kirsher printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
14439bba23b0SJeff Kirsher if (!(desc_status & RxOK)) {
14449bba23b0SJeff Kirsher /* There was an error. */
14459bba23b0SJeff Kirsher if (debug > 2)
14469bba23b0SJeff Kirsher printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
14479bba23b0SJeff Kirsher dev->stats.rx_errors++;
14489bba23b0SJeff Kirsher if (desc_status & RxFIFOErr)
14499bba23b0SJeff Kirsher dev->stats.rx_fifo_errors++;
14509bba23b0SJeff Kirsher goto next_rx;
14519bba23b0SJeff Kirsher }
14529bba23b0SJeff Kirsher
14539bba23b0SJeff Kirsher if (*quota <= 0) { /* out of rx quota */
14549bba23b0SJeff Kirsher retcode = 1;
14559bba23b0SJeff Kirsher goto out;
14569bba23b0SJeff Kirsher }
14579bba23b0SJeff Kirsher (*quota)--;
14589bba23b0SJeff Kirsher
14599bba23b0SJeff Kirsher pkt_len = desc_status; /* Implicitly Truncate */
14609bba23b0SJeff Kirsher entry = (desc_status >> 16) & 0x7ff;
14619bba23b0SJeff Kirsher
14629bba23b0SJeff Kirsher if (debug > 4)
14639bba23b0SJeff Kirsher printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
14649bba23b0SJeff Kirsher /* Check if the packet is long enough to accept without copying
14659bba23b0SJeff Kirsher to a minimally-sized skbuff. */
14669bba23b0SJeff Kirsher if (pkt_len < rx_copybreak &&
14671d266430SPradeep A Dalvi (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
14689bba23b0SJeff Kirsher skb_reserve(skb, 2); /* 16 byte align the IP header */
14695d63cceaSChristophe JAILLET dma_sync_single_for_cpu(&np->pci_dev->dev,
14709bba23b0SJeff Kirsher np->rx_info[entry].mapping,
14715d63cceaSChristophe JAILLET pkt_len, DMA_FROM_DEVICE);
14729bba23b0SJeff Kirsher skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
14735d63cceaSChristophe JAILLET dma_sync_single_for_device(&np->pci_dev->dev,
14749bba23b0SJeff Kirsher np->rx_info[entry].mapping,
14755d63cceaSChristophe JAILLET pkt_len, DMA_FROM_DEVICE);
14769bba23b0SJeff Kirsher skb_put(skb, pkt_len);
14779bba23b0SJeff Kirsher } else {
14785d63cceaSChristophe JAILLET dma_unmap_single(&np->pci_dev->dev,
14795d63cceaSChristophe JAILLET np->rx_info[entry].mapping,
14805d63cceaSChristophe JAILLET np->rx_buf_sz, DMA_FROM_DEVICE);
14819bba23b0SJeff Kirsher skb = np->rx_info[entry].skb;
14829bba23b0SJeff Kirsher skb_put(skb, pkt_len);
14839bba23b0SJeff Kirsher np->rx_info[entry].skb = NULL;
14849bba23b0SJeff Kirsher np->rx_info[entry].mapping = 0;
14859bba23b0SJeff Kirsher }
14869bba23b0SJeff Kirsher #ifndef final_version /* Remove after testing. */
14879bba23b0SJeff Kirsher /* You will want this info for the initial debug. */
14889bba23b0SJeff Kirsher if (debug > 5) {
14899bba23b0SJeff Kirsher printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
14909bba23b0SJeff Kirsher skb->data, skb->data + 6,
14919bba23b0SJeff Kirsher skb->data[12], skb->data[13]);
14929bba23b0SJeff Kirsher }
14939bba23b0SJeff Kirsher #endif
14949bba23b0SJeff Kirsher
14959bba23b0SJeff Kirsher skb->protocol = eth_type_trans(skb, dev);
14969bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
14979bba23b0SJeff Kirsher if (debug > 4)
14989bba23b0SJeff Kirsher printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
14999bba23b0SJeff Kirsher #endif
15009bba23b0SJeff Kirsher if (le16_to_cpu(desc->status2) & 0x0100) {
15019bba23b0SJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY;
15029bba23b0SJeff Kirsher dev->stats.rx_compressed++;
15039bba23b0SJeff Kirsher }
15049bba23b0SJeff Kirsher /*
15059bba23b0SJeff Kirsher * This feature doesn't seem to be working, at least
15069bba23b0SJeff Kirsher * with the two firmware versions I have. If the GFP sees
15079bba23b0SJeff Kirsher * an IP fragment, it either ignores it completely, or reports
15089bba23b0SJeff Kirsher * "bad checksum" on it.
15099bba23b0SJeff Kirsher *
15109bba23b0SJeff Kirsher * Maybe I missed something -- corrections are welcome.
15119bba23b0SJeff Kirsher * Until then, the printk stays. :-) -Ion
15129bba23b0SJeff Kirsher */
15139bba23b0SJeff Kirsher else if (le16_to_cpu(desc->status2) & 0x0040) {
15149bba23b0SJeff Kirsher skb->ip_summed = CHECKSUM_COMPLETE;
15159bba23b0SJeff Kirsher skb->csum = le16_to_cpu(desc->csum);
15169bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
15179bba23b0SJeff Kirsher }
15189bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
15199bba23b0SJeff Kirsher if (le16_to_cpu(desc->status2) & 0x0200) {
15209bba23b0SJeff Kirsher u16 vlid = le16_to_cpu(desc->vlanid);
15219bba23b0SJeff Kirsher
15229bba23b0SJeff Kirsher if (debug > 4) {
15239bba23b0SJeff Kirsher printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
15249bba23b0SJeff Kirsher vlid);
15259bba23b0SJeff Kirsher }
152686a9bad3SPatrick McHardy __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
15279bba23b0SJeff Kirsher }
15289bba23b0SJeff Kirsher #endif /* VLAN_SUPPORT */
15299bba23b0SJeff Kirsher netif_receive_skb(skb);
15309bba23b0SJeff Kirsher dev->stats.rx_packets++;
15319bba23b0SJeff Kirsher
15329bba23b0SJeff Kirsher next_rx:
15339bba23b0SJeff Kirsher np->cur_rx++;
15349bba23b0SJeff Kirsher desc->status = 0;
15359bba23b0SJeff Kirsher np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
15369bba23b0SJeff Kirsher }
15379bba23b0SJeff Kirsher
15389bba23b0SJeff Kirsher if (*quota == 0) { /* out of rx quota */
15399bba23b0SJeff Kirsher retcode = 1;
15409bba23b0SJeff Kirsher goto out;
15419bba23b0SJeff Kirsher }
15429bba23b0SJeff Kirsher writew(np->rx_done, np->base + CompletionQConsumerIdx);
15439bba23b0SJeff Kirsher
15449bba23b0SJeff Kirsher out:
15459bba23b0SJeff Kirsher refill_rx_ring(dev);
15469bba23b0SJeff Kirsher if (debug > 5)
15479bba23b0SJeff Kirsher printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
15489bba23b0SJeff Kirsher retcode, np->rx_done, desc_status);
15499bba23b0SJeff Kirsher return retcode;
15509bba23b0SJeff Kirsher }
15519bba23b0SJeff Kirsher
netdev_poll(struct napi_struct * napi,int budget)15529bba23b0SJeff Kirsher static int netdev_poll(struct napi_struct *napi, int budget)
15539bba23b0SJeff Kirsher {
15549bba23b0SJeff Kirsher struct netdev_private *np = container_of(napi, struct netdev_private, napi);
15559bba23b0SJeff Kirsher struct net_device *dev = np->dev;
15569bba23b0SJeff Kirsher u32 intr_status;
15579bba23b0SJeff Kirsher void __iomem *ioaddr = np->base;
15589bba23b0SJeff Kirsher int quota = budget;
15599bba23b0SJeff Kirsher
15609bba23b0SJeff Kirsher do {
15619bba23b0SJeff Kirsher writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
15629bba23b0SJeff Kirsher
15639bba23b0SJeff Kirsher if (__netdev_rx(dev, "a))
15649bba23b0SJeff Kirsher goto out;
15659bba23b0SJeff Kirsher
15669bba23b0SJeff Kirsher intr_status = readl(ioaddr + IntrStatus);
15679bba23b0SJeff Kirsher } while (intr_status & (IntrRxDone | IntrRxEmpty));
15689bba23b0SJeff Kirsher
15699bba23b0SJeff Kirsher napi_complete(napi);
15709bba23b0SJeff Kirsher intr_status = readl(ioaddr + IntrEnable);
15719bba23b0SJeff Kirsher intr_status |= IntrRxDone | IntrRxEmpty;
15729bba23b0SJeff Kirsher writel(intr_status, ioaddr + IntrEnable);
15739bba23b0SJeff Kirsher
15749bba23b0SJeff Kirsher out:
15759bba23b0SJeff Kirsher if (debug > 5)
15769bba23b0SJeff Kirsher printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
15779bba23b0SJeff Kirsher budget - quota);
15789bba23b0SJeff Kirsher
15799bba23b0SJeff Kirsher /* Restart Rx engine if stopped. */
15809bba23b0SJeff Kirsher return budget - quota;
15819bba23b0SJeff Kirsher }
15829bba23b0SJeff Kirsher
refill_rx_ring(struct net_device * dev)15839bba23b0SJeff Kirsher static void refill_rx_ring(struct net_device *dev)
15849bba23b0SJeff Kirsher {
15859bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
15869bba23b0SJeff Kirsher struct sk_buff *skb;
15879bba23b0SJeff Kirsher int entry = -1;
15889bba23b0SJeff Kirsher
15899bba23b0SJeff Kirsher /* Refill the Rx ring buffers. */
15909bba23b0SJeff Kirsher for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
15919bba23b0SJeff Kirsher entry = np->dirty_rx % RX_RING_SIZE;
15929bba23b0SJeff Kirsher if (np->rx_info[entry].skb == NULL) {
15931d266430SPradeep A Dalvi skb = netdev_alloc_skb(dev, np->rx_buf_sz);
15949bba23b0SJeff Kirsher np->rx_info[entry].skb = skb;
15959bba23b0SJeff Kirsher if (skb == NULL)
15969bba23b0SJeff Kirsher break; /* Better luck next round. */
15979bba23b0SJeff Kirsher np->rx_info[entry].mapping =
15985d63cceaSChristophe JAILLET dma_map_single(&np->pci_dev->dev, skb->data,
15995d63cceaSChristophe JAILLET np->rx_buf_sz, DMA_FROM_DEVICE);
16005d63cceaSChristophe JAILLET if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
1601d1156b48SAlexey Khoroshilov dev_kfree_skb(skb);
1602d1156b48SAlexey Khoroshilov np->rx_info[entry].skb = NULL;
1603d1156b48SAlexey Khoroshilov break;
1604d1156b48SAlexey Khoroshilov }
16059bba23b0SJeff Kirsher np->rx_ring[entry].rxaddr =
16069bba23b0SJeff Kirsher cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
16079bba23b0SJeff Kirsher }
16089bba23b0SJeff Kirsher if (entry == RX_RING_SIZE - 1)
16099bba23b0SJeff Kirsher np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
16109bba23b0SJeff Kirsher }
16119bba23b0SJeff Kirsher if (entry >= 0)
16129bba23b0SJeff Kirsher writew(entry, np->base + RxDescQIdx);
16139bba23b0SJeff Kirsher }
16149bba23b0SJeff Kirsher
16159bba23b0SJeff Kirsher
netdev_media_change(struct net_device * dev)16169bba23b0SJeff Kirsher static void netdev_media_change(struct net_device *dev)
16179bba23b0SJeff Kirsher {
16189bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
16199bba23b0SJeff Kirsher void __iomem *ioaddr = np->base;
16209bba23b0SJeff Kirsher u16 reg0, reg1, reg4, reg5;
16219bba23b0SJeff Kirsher u32 new_tx_mode;
16229bba23b0SJeff Kirsher u32 new_intr_timer_ctrl;
16239bba23b0SJeff Kirsher
16249bba23b0SJeff Kirsher /* reset status first */
16259bba23b0SJeff Kirsher mdio_read(dev, np->phys[0], MII_BMCR);
16269bba23b0SJeff Kirsher mdio_read(dev, np->phys[0], MII_BMSR);
16279bba23b0SJeff Kirsher
16289bba23b0SJeff Kirsher reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
16299bba23b0SJeff Kirsher reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
16309bba23b0SJeff Kirsher
16319bba23b0SJeff Kirsher if (reg1 & BMSR_LSTATUS) {
16329bba23b0SJeff Kirsher /* link is up */
16339bba23b0SJeff Kirsher if (reg0 & BMCR_ANENABLE) {
16349bba23b0SJeff Kirsher /* autonegotiation is enabled */
16359bba23b0SJeff Kirsher reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
16369bba23b0SJeff Kirsher reg5 = mdio_read(dev, np->phys[0], MII_LPA);
16379bba23b0SJeff Kirsher if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
16389bba23b0SJeff Kirsher np->speed100 = 1;
16399bba23b0SJeff Kirsher np->mii_if.full_duplex = 1;
16409bba23b0SJeff Kirsher } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
16419bba23b0SJeff Kirsher np->speed100 = 1;
16429bba23b0SJeff Kirsher np->mii_if.full_duplex = 0;
16439bba23b0SJeff Kirsher } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
16449bba23b0SJeff Kirsher np->speed100 = 0;
16459bba23b0SJeff Kirsher np->mii_if.full_duplex = 1;
16469bba23b0SJeff Kirsher } else {
16479bba23b0SJeff Kirsher np->speed100 = 0;
16489bba23b0SJeff Kirsher np->mii_if.full_duplex = 0;
16499bba23b0SJeff Kirsher }
16509bba23b0SJeff Kirsher } else {
16519bba23b0SJeff Kirsher /* autonegotiation is disabled */
16529bba23b0SJeff Kirsher if (reg0 & BMCR_SPEED100)
16539bba23b0SJeff Kirsher np->speed100 = 1;
16549bba23b0SJeff Kirsher else
16559bba23b0SJeff Kirsher np->speed100 = 0;
16569bba23b0SJeff Kirsher if (reg0 & BMCR_FULLDPLX)
16579bba23b0SJeff Kirsher np->mii_if.full_duplex = 1;
16589bba23b0SJeff Kirsher else
16599bba23b0SJeff Kirsher np->mii_if.full_duplex = 0;
16609bba23b0SJeff Kirsher }
16619bba23b0SJeff Kirsher netif_carrier_on(dev);
16629bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
16639bba23b0SJeff Kirsher dev->name,
16649bba23b0SJeff Kirsher np->speed100 ? "100" : "10",
16659bba23b0SJeff Kirsher np->mii_if.full_duplex ? "full" : "half");
16669bba23b0SJeff Kirsher
16679bba23b0SJeff Kirsher new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
16689bba23b0SJeff Kirsher if (np->mii_if.full_duplex)
16699bba23b0SJeff Kirsher new_tx_mode |= FullDuplex;
16709bba23b0SJeff Kirsher if (np->tx_mode != new_tx_mode) {
16719bba23b0SJeff Kirsher np->tx_mode = new_tx_mode;
16729bba23b0SJeff Kirsher writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
16739bba23b0SJeff Kirsher udelay(1000);
16749bba23b0SJeff Kirsher writel(np->tx_mode, ioaddr + TxMode);
16759bba23b0SJeff Kirsher }
16769bba23b0SJeff Kirsher
16779bba23b0SJeff Kirsher new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
16789bba23b0SJeff Kirsher if (np->speed100)
16799bba23b0SJeff Kirsher new_intr_timer_ctrl |= Timer10X;
16809bba23b0SJeff Kirsher if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
16819bba23b0SJeff Kirsher np->intr_timer_ctrl = new_intr_timer_ctrl;
16829bba23b0SJeff Kirsher writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
16839bba23b0SJeff Kirsher }
16849bba23b0SJeff Kirsher } else {
16859bba23b0SJeff Kirsher netif_carrier_off(dev);
16869bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Link is down\n", dev->name);
16879bba23b0SJeff Kirsher }
16889bba23b0SJeff Kirsher }
16899bba23b0SJeff Kirsher
16909bba23b0SJeff Kirsher
netdev_error(struct net_device * dev,int intr_status)16919bba23b0SJeff Kirsher static void netdev_error(struct net_device *dev, int intr_status)
16929bba23b0SJeff Kirsher {
16939bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
16949bba23b0SJeff Kirsher
16959bba23b0SJeff Kirsher /* Came close to underrunning the Tx FIFO, increase threshold. */
16969bba23b0SJeff Kirsher if (intr_status & IntrTxDataLow) {
16979bba23b0SJeff Kirsher if (np->tx_threshold <= PKT_BUF_SZ / 16) {
16989bba23b0SJeff Kirsher writel(++np->tx_threshold, np->base + TxThreshold);
16999bba23b0SJeff Kirsher printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
17009bba23b0SJeff Kirsher dev->name, np->tx_threshold * 16);
17019bba23b0SJeff Kirsher } else
17029bba23b0SJeff Kirsher printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
17039bba23b0SJeff Kirsher }
17049bba23b0SJeff Kirsher if (intr_status & IntrRxGFPDead) {
17059bba23b0SJeff Kirsher dev->stats.rx_fifo_errors++;
17069bba23b0SJeff Kirsher dev->stats.rx_errors++;
17079bba23b0SJeff Kirsher }
17089bba23b0SJeff Kirsher if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
17099bba23b0SJeff Kirsher dev->stats.tx_fifo_errors++;
17109bba23b0SJeff Kirsher dev->stats.tx_errors++;
17119bba23b0SJeff Kirsher }
17129bba23b0SJeff Kirsher if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
17139bba23b0SJeff Kirsher printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
17149bba23b0SJeff Kirsher dev->name, intr_status);
17159bba23b0SJeff Kirsher }
17169bba23b0SJeff Kirsher
17179bba23b0SJeff Kirsher
get_stats(struct net_device * dev)17189bba23b0SJeff Kirsher static struct net_device_stats *get_stats(struct net_device *dev)
17199bba23b0SJeff Kirsher {
17209bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
17219bba23b0SJeff Kirsher void __iomem *ioaddr = np->base;
17229bba23b0SJeff Kirsher
17239bba23b0SJeff Kirsher /* This adapter architecture needs no SMP locks. */
17249bba23b0SJeff Kirsher dev->stats.tx_bytes = readl(ioaddr + 0x57010);
17259bba23b0SJeff Kirsher dev->stats.rx_bytes = readl(ioaddr + 0x57044);
17269bba23b0SJeff Kirsher dev->stats.tx_packets = readl(ioaddr + 0x57000);
17279bba23b0SJeff Kirsher dev->stats.tx_aborted_errors =
17289bba23b0SJeff Kirsher readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
17299bba23b0SJeff Kirsher dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
17309bba23b0SJeff Kirsher dev->stats.collisions =
17319bba23b0SJeff Kirsher readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
17329bba23b0SJeff Kirsher
17339bba23b0SJeff Kirsher /* The chip only need report frame silently dropped. */
17349bba23b0SJeff Kirsher dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
17359bba23b0SJeff Kirsher writew(0, ioaddr + RxDMAStatus);
17369bba23b0SJeff Kirsher dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
17379bba23b0SJeff Kirsher dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
17389bba23b0SJeff Kirsher dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
17399bba23b0SJeff Kirsher dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
17409bba23b0SJeff Kirsher
17419bba23b0SJeff Kirsher return &dev->stats;
17429bba23b0SJeff Kirsher }
17439bba23b0SJeff Kirsher
17449bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
set_vlan_mode(struct netdev_private * np)17459bba23b0SJeff Kirsher static u32 set_vlan_mode(struct netdev_private *np)
17469bba23b0SJeff Kirsher {
17479bba23b0SJeff Kirsher u32 ret = VlanMode;
17489bba23b0SJeff Kirsher u16 vid;
17499bba23b0SJeff Kirsher void __iomem *filter_addr = np->base + HashTable + 8;
17509bba23b0SJeff Kirsher int vlan_count = 0;
17519bba23b0SJeff Kirsher
17529bba23b0SJeff Kirsher for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
17539bba23b0SJeff Kirsher if (vlan_count == 32)
17549bba23b0SJeff Kirsher break;
17559bba23b0SJeff Kirsher writew(vid, filter_addr);
17569bba23b0SJeff Kirsher filter_addr += 16;
17579bba23b0SJeff Kirsher vlan_count++;
17589bba23b0SJeff Kirsher }
17599bba23b0SJeff Kirsher if (vlan_count == 32) {
17609bba23b0SJeff Kirsher ret |= PerfectFilterVlan;
17619bba23b0SJeff Kirsher while (vlan_count < 32) {
17629bba23b0SJeff Kirsher writew(0, filter_addr);
17639bba23b0SJeff Kirsher filter_addr += 16;
17649bba23b0SJeff Kirsher vlan_count++;
17659bba23b0SJeff Kirsher }
17669bba23b0SJeff Kirsher }
17679bba23b0SJeff Kirsher return ret;
17689bba23b0SJeff Kirsher }
17699bba23b0SJeff Kirsher #endif /* VLAN_SUPPORT */
17709bba23b0SJeff Kirsher
set_rx_mode(struct net_device * dev)17719bba23b0SJeff Kirsher static void set_rx_mode(struct net_device *dev)
17729bba23b0SJeff Kirsher {
17739bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
17749bba23b0SJeff Kirsher void __iomem *ioaddr = np->base;
17759bba23b0SJeff Kirsher u32 rx_mode = MinVLANPrio;
17769bba23b0SJeff Kirsher struct netdev_hw_addr *ha;
17779bba23b0SJeff Kirsher int i;
17789bba23b0SJeff Kirsher
17799bba23b0SJeff Kirsher #ifdef VLAN_SUPPORT
17809bba23b0SJeff Kirsher rx_mode |= set_vlan_mode(np);
17819bba23b0SJeff Kirsher #endif /* VLAN_SUPPORT */
17829bba23b0SJeff Kirsher
17839bba23b0SJeff Kirsher if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
17849bba23b0SJeff Kirsher rx_mode |= AcceptAll;
17859bba23b0SJeff Kirsher } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
17869bba23b0SJeff Kirsher (dev->flags & IFF_ALLMULTI)) {
17879bba23b0SJeff Kirsher /* Too many to match, or accept all multicasts. */
17889bba23b0SJeff Kirsher rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
17899bba23b0SJeff Kirsher } else if (netdev_mc_count(dev) <= 14) {
17909bba23b0SJeff Kirsher /* Use the 16 element perfect filter, skip first two entries. */
17919bba23b0SJeff Kirsher void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
179276660757SJakub Kicinski const __be16 *eaddrs;
17939bba23b0SJeff Kirsher netdev_for_each_mc_addr(ha, dev) {
17949bba23b0SJeff Kirsher eaddrs = (__be16 *) ha->addr;
17959bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
17969bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
17979bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
17989bba23b0SJeff Kirsher }
179976660757SJakub Kicinski eaddrs = (const __be16 *)dev->dev_addr;
18009bba23b0SJeff Kirsher i = netdev_mc_count(dev) + 2;
18019bba23b0SJeff Kirsher while (i++ < 16) {
18029bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
18039bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
18049bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
18059bba23b0SJeff Kirsher }
18069bba23b0SJeff Kirsher rx_mode |= AcceptBroadcast|PerfectFilter;
18079bba23b0SJeff Kirsher } else {
18089bba23b0SJeff Kirsher /* Must use a multicast hash table. */
18099bba23b0SJeff Kirsher void __iomem *filter_addr;
181076660757SJakub Kicinski const __be16 *eaddrs;
18119bba23b0SJeff Kirsher __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
18129bba23b0SJeff Kirsher
18139bba23b0SJeff Kirsher memset(mc_filter, 0, sizeof(mc_filter));
18149bba23b0SJeff Kirsher netdev_for_each_mc_addr(ha, dev) {
18159bba23b0SJeff Kirsher /* The chip uses the upper 9 CRC bits
18169bba23b0SJeff Kirsher as index into the hash table */
18179bba23b0SJeff Kirsher int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
18189bba23b0SJeff Kirsher __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
18199bba23b0SJeff Kirsher
18209bba23b0SJeff Kirsher *fptr |= cpu_to_le32(1 << (bit_nr & 31));
18219bba23b0SJeff Kirsher }
18229bba23b0SJeff Kirsher /* Clear the perfect filter list, skip first two entries. */
18239bba23b0SJeff Kirsher filter_addr = ioaddr + PerfFilterTable + 2 * 16;
182476660757SJakub Kicinski eaddrs = (const __be16 *)dev->dev_addr;
18259bba23b0SJeff Kirsher for (i = 2; i < 16; i++) {
18269bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
18279bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
18289bba23b0SJeff Kirsher writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
18299bba23b0SJeff Kirsher }
18309bba23b0SJeff Kirsher for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
18319bba23b0SJeff Kirsher writew(mc_filter[i], filter_addr);
18329bba23b0SJeff Kirsher rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
18339bba23b0SJeff Kirsher }
18349bba23b0SJeff Kirsher writel(rx_mode, ioaddr + RxFilterMode);
18359bba23b0SJeff Kirsher }
18369bba23b0SJeff Kirsher
check_if_running(struct net_device * dev)18379bba23b0SJeff Kirsher static int check_if_running(struct net_device *dev)
18389bba23b0SJeff Kirsher {
18399bba23b0SJeff Kirsher if (!netif_running(dev))
18409bba23b0SJeff Kirsher return -EINVAL;
18419bba23b0SJeff Kirsher return 0;
18429bba23b0SJeff Kirsher }
18439bba23b0SJeff Kirsher
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)18449bba23b0SJeff Kirsher static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
18459bba23b0SJeff Kirsher {
18469bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
1847*f029c781SWolfram Sang strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1848*f029c781SWolfram Sang strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
18499bba23b0SJeff Kirsher }
18509bba23b0SJeff Kirsher
get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1851f1cd5aa0SPhilippe Reynes static int get_link_ksettings(struct net_device *dev,
1852f1cd5aa0SPhilippe Reynes struct ethtool_link_ksettings *cmd)
18539bba23b0SJeff Kirsher {
18549bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
18559bba23b0SJeff Kirsher spin_lock_irq(&np->lock);
1856f1cd5aa0SPhilippe Reynes mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
18579bba23b0SJeff Kirsher spin_unlock_irq(&np->lock);
18589bba23b0SJeff Kirsher return 0;
18599bba23b0SJeff Kirsher }
18609bba23b0SJeff Kirsher
set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1861f1cd5aa0SPhilippe Reynes static int set_link_ksettings(struct net_device *dev,
1862f1cd5aa0SPhilippe Reynes const struct ethtool_link_ksettings *cmd)
18639bba23b0SJeff Kirsher {
18649bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
18659bba23b0SJeff Kirsher int res;
18669bba23b0SJeff Kirsher spin_lock_irq(&np->lock);
1867f1cd5aa0SPhilippe Reynes res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
18689bba23b0SJeff Kirsher spin_unlock_irq(&np->lock);
18699bba23b0SJeff Kirsher check_duplex(dev);
18709bba23b0SJeff Kirsher return res;
18719bba23b0SJeff Kirsher }
18729bba23b0SJeff Kirsher
nway_reset(struct net_device * dev)18739bba23b0SJeff Kirsher static int nway_reset(struct net_device *dev)
18749bba23b0SJeff Kirsher {
18759bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
18769bba23b0SJeff Kirsher return mii_nway_restart(&np->mii_if);
18779bba23b0SJeff Kirsher }
18789bba23b0SJeff Kirsher
get_link(struct net_device * dev)18799bba23b0SJeff Kirsher static u32 get_link(struct net_device *dev)
18809bba23b0SJeff Kirsher {
18819bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
18829bba23b0SJeff Kirsher return mii_link_ok(&np->mii_if);
18839bba23b0SJeff Kirsher }
18849bba23b0SJeff Kirsher
get_msglevel(struct net_device * dev)18859bba23b0SJeff Kirsher static u32 get_msglevel(struct net_device *dev)
18869bba23b0SJeff Kirsher {
18879bba23b0SJeff Kirsher return debug;
18889bba23b0SJeff Kirsher }
18899bba23b0SJeff Kirsher
set_msglevel(struct net_device * dev,u32 val)18909bba23b0SJeff Kirsher static void set_msglevel(struct net_device *dev, u32 val)
18919bba23b0SJeff Kirsher {
18929bba23b0SJeff Kirsher debug = val;
18939bba23b0SJeff Kirsher }
18949bba23b0SJeff Kirsher
18959bba23b0SJeff Kirsher static const struct ethtool_ops ethtool_ops = {
18969bba23b0SJeff Kirsher .begin = check_if_running,
18979bba23b0SJeff Kirsher .get_drvinfo = get_drvinfo,
18989bba23b0SJeff Kirsher .nway_reset = nway_reset,
18999bba23b0SJeff Kirsher .get_link = get_link,
19009bba23b0SJeff Kirsher .get_msglevel = get_msglevel,
19019bba23b0SJeff Kirsher .set_msglevel = set_msglevel,
1902f1cd5aa0SPhilippe Reynes .get_link_ksettings = get_link_ksettings,
1903f1cd5aa0SPhilippe Reynes .set_link_ksettings = set_link_ksettings,
19049bba23b0SJeff Kirsher };
19059bba23b0SJeff Kirsher
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)19069bba23b0SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
19079bba23b0SJeff Kirsher {
19089bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
19099bba23b0SJeff Kirsher struct mii_ioctl_data *data = if_mii(rq);
19109bba23b0SJeff Kirsher int rc;
19119bba23b0SJeff Kirsher
19129bba23b0SJeff Kirsher if (!netif_running(dev))
19139bba23b0SJeff Kirsher return -EINVAL;
19149bba23b0SJeff Kirsher
19159bba23b0SJeff Kirsher spin_lock_irq(&np->lock);
19169bba23b0SJeff Kirsher rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
19179bba23b0SJeff Kirsher spin_unlock_irq(&np->lock);
19189bba23b0SJeff Kirsher
19199bba23b0SJeff Kirsher if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
19209bba23b0SJeff Kirsher check_duplex(dev);
19219bba23b0SJeff Kirsher
19229bba23b0SJeff Kirsher return rc;
19239bba23b0SJeff Kirsher }
19249bba23b0SJeff Kirsher
netdev_close(struct net_device * dev)19259bba23b0SJeff Kirsher static int netdev_close(struct net_device *dev)
19269bba23b0SJeff Kirsher {
19279bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
19289bba23b0SJeff Kirsher void __iomem *ioaddr = np->base;
19299bba23b0SJeff Kirsher int i;
19309bba23b0SJeff Kirsher
19319bba23b0SJeff Kirsher netif_stop_queue(dev);
19329bba23b0SJeff Kirsher
19339bba23b0SJeff Kirsher napi_disable(&np->napi);
19349bba23b0SJeff Kirsher
19359bba23b0SJeff Kirsher if (debug > 1) {
19369bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
19379bba23b0SJeff Kirsher dev->name, (int) readl(ioaddr + IntrStatus));
19389bba23b0SJeff Kirsher printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
19399bba23b0SJeff Kirsher dev->name, np->cur_tx, np->dirty_tx,
19409bba23b0SJeff Kirsher np->cur_rx, np->dirty_rx);
19419bba23b0SJeff Kirsher }
19429bba23b0SJeff Kirsher
19439bba23b0SJeff Kirsher /* Disable interrupts by clearing the interrupt mask. */
19449bba23b0SJeff Kirsher writel(0, ioaddr + IntrEnable);
19459bba23b0SJeff Kirsher
19469bba23b0SJeff Kirsher /* Stop the chip's Tx and Rx processes. */
19479bba23b0SJeff Kirsher writel(0, ioaddr + GenCtrl);
19489bba23b0SJeff Kirsher readl(ioaddr + GenCtrl);
19499bba23b0SJeff Kirsher
19509bba23b0SJeff Kirsher if (debug > 5) {
19519bba23b0SJeff Kirsher printk(KERN_DEBUG" Tx ring at %#llx:\n",
19529bba23b0SJeff Kirsher (long long) np->tx_ring_dma);
19539bba23b0SJeff Kirsher for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
19549bba23b0SJeff Kirsher printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
19559bba23b0SJeff Kirsher i, le32_to_cpu(np->tx_ring[i].status),
19569bba23b0SJeff Kirsher (long long) dma_to_cpu(np->tx_ring[i].addr),
19579bba23b0SJeff Kirsher le32_to_cpu(np->tx_done_q[i].status));
19589bba23b0SJeff Kirsher printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
19599bba23b0SJeff Kirsher (long long) np->rx_ring_dma, np->rx_done_q);
19609bba23b0SJeff Kirsher if (np->rx_done_q)
19619bba23b0SJeff Kirsher for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
19629bba23b0SJeff Kirsher printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
19639bba23b0SJeff Kirsher i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
19649bba23b0SJeff Kirsher }
19659bba23b0SJeff Kirsher }
19669bba23b0SJeff Kirsher
1967ea8f2ed0SFrancois Romieu free_irq(np->pci_dev->irq, dev);
19689bba23b0SJeff Kirsher
19699bba23b0SJeff Kirsher /* Free all the skbuffs in the Rx queue. */
19709bba23b0SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) {
19719bba23b0SJeff Kirsher np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
19729bba23b0SJeff Kirsher if (np->rx_info[i].skb != NULL) {
19735d63cceaSChristophe JAILLET dma_unmap_single(&np->pci_dev->dev,
19745d63cceaSChristophe JAILLET np->rx_info[i].mapping,
19755d63cceaSChristophe JAILLET np->rx_buf_sz, DMA_FROM_DEVICE);
19769bba23b0SJeff Kirsher dev_kfree_skb(np->rx_info[i].skb);
19779bba23b0SJeff Kirsher }
19789bba23b0SJeff Kirsher np->rx_info[i].skb = NULL;
19799bba23b0SJeff Kirsher np->rx_info[i].mapping = 0;
19809bba23b0SJeff Kirsher }
19819bba23b0SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) {
19829bba23b0SJeff Kirsher struct sk_buff *skb = np->tx_info[i].skb;
19839bba23b0SJeff Kirsher if (skb == NULL)
19849bba23b0SJeff Kirsher continue;
19855d63cceaSChristophe JAILLET dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
19865d63cceaSChristophe JAILLET skb_first_frag_len(skb), DMA_TO_DEVICE);
19879bba23b0SJeff Kirsher np->tx_info[i].mapping = 0;
19889bba23b0SJeff Kirsher dev_kfree_skb(skb);
19899bba23b0SJeff Kirsher np->tx_info[i].skb = NULL;
19909bba23b0SJeff Kirsher }
19919bba23b0SJeff Kirsher
19929bba23b0SJeff Kirsher return 0;
19939bba23b0SJeff Kirsher }
19949bba23b0SJeff Kirsher
starfire_suspend(struct device * dev_d)1995a7c48c72SVaibhav Gupta static int __maybe_unused starfire_suspend(struct device *dev_d)
19969bba23b0SJeff Kirsher {
1997a7c48c72SVaibhav Gupta struct net_device *dev = dev_get_drvdata(dev_d);
19989bba23b0SJeff Kirsher
19999bba23b0SJeff Kirsher if (netif_running(dev)) {
20009bba23b0SJeff Kirsher netif_device_detach(dev);
20019bba23b0SJeff Kirsher netdev_close(dev);
20029bba23b0SJeff Kirsher }
20039bba23b0SJeff Kirsher
20049bba23b0SJeff Kirsher return 0;
20059bba23b0SJeff Kirsher }
20069bba23b0SJeff Kirsher
starfire_resume(struct device * dev_d)2007a7c48c72SVaibhav Gupta static int __maybe_unused starfire_resume(struct device *dev_d)
20089bba23b0SJeff Kirsher {
2009a7c48c72SVaibhav Gupta struct net_device *dev = dev_get_drvdata(dev_d);
20109bba23b0SJeff Kirsher
20119bba23b0SJeff Kirsher if (netif_running(dev)) {
20129bba23b0SJeff Kirsher netdev_open(dev);
20139bba23b0SJeff Kirsher netif_device_attach(dev);
20149bba23b0SJeff Kirsher }
20159bba23b0SJeff Kirsher
20169bba23b0SJeff Kirsher return 0;
20179bba23b0SJeff Kirsher }
20189bba23b0SJeff Kirsher
starfire_remove_one(struct pci_dev * pdev)2019d3ace588SBill Pemberton static void starfire_remove_one(struct pci_dev *pdev)
20209bba23b0SJeff Kirsher {
20219bba23b0SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev);
20229bba23b0SJeff Kirsher struct netdev_private *np = netdev_priv(dev);
20239bba23b0SJeff Kirsher
20249bba23b0SJeff Kirsher BUG_ON(!dev);
20259bba23b0SJeff Kirsher
20269bba23b0SJeff Kirsher unregister_netdev(dev);
20279bba23b0SJeff Kirsher
20289bba23b0SJeff Kirsher if (np->queue_mem)
20295d63cceaSChristophe JAILLET dma_free_coherent(&pdev->dev, np->queue_mem_size,
20305d63cceaSChristophe JAILLET np->queue_mem, np->queue_mem_dma);
20319bba23b0SJeff Kirsher
20329bba23b0SJeff Kirsher
20339bba23b0SJeff Kirsher /* XXX: add wakeup code -- requires firmware for MagicPacket */
20349bba23b0SJeff Kirsher pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
20359bba23b0SJeff Kirsher pci_disable_device(pdev);
20369bba23b0SJeff Kirsher
20379bba23b0SJeff Kirsher iounmap(np->base);
20389bba23b0SJeff Kirsher pci_release_regions(pdev);
20399bba23b0SJeff Kirsher
20409bba23b0SJeff Kirsher free_netdev(dev); /* Will also free np!! */
20419bba23b0SJeff Kirsher }
20429bba23b0SJeff Kirsher
2043a7c48c72SVaibhav Gupta static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume);
20449bba23b0SJeff Kirsher
20459bba23b0SJeff Kirsher static struct pci_driver starfire_driver = {
20469bba23b0SJeff Kirsher .name = DRV_NAME,
20479bba23b0SJeff Kirsher .probe = starfire_init_one,
2048d3ace588SBill Pemberton .remove = starfire_remove_one,
2049a7c48c72SVaibhav Gupta .driver.pm = &starfire_pm_ops,
20509bba23b0SJeff Kirsher .id_table = starfire_pci_tbl,
20519bba23b0SJeff Kirsher };
20529bba23b0SJeff Kirsher
20539bba23b0SJeff Kirsher
starfire_init(void)20549bba23b0SJeff Kirsher static int __init starfire_init (void)
20559bba23b0SJeff Kirsher {
20569bba23b0SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */
20579bba23b0SJeff Kirsher #ifdef MODULE
20589bba23b0SJeff Kirsher printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
20599bba23b0SJeff Kirsher #endif
20609bba23b0SJeff Kirsher
20619bba23b0SJeff Kirsher BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
20629bba23b0SJeff Kirsher
20639bba23b0SJeff Kirsher return pci_register_driver(&starfire_driver);
20649bba23b0SJeff Kirsher }
20659bba23b0SJeff Kirsher
20669bba23b0SJeff Kirsher
starfire_cleanup(void)20679bba23b0SJeff Kirsher static void __exit starfire_cleanup (void)
20689bba23b0SJeff Kirsher {
20699bba23b0SJeff Kirsher pci_unregister_driver (&starfire_driver);
20709bba23b0SJeff Kirsher }
20719bba23b0SJeff Kirsher
20729bba23b0SJeff Kirsher
20739bba23b0SJeff Kirsher module_init(starfire_init);
20749bba23b0SJeff Kirsher module_exit(starfire_cleanup);
2075