1f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */ 2f2148a47SJeff Kirsher /* 3f2148a47SJeff Kirsher Written 1998-2001 by Donald Becker. 4f2148a47SJeff Kirsher 52b6b78e0SKevin Brace Current Maintainer: Kevin Brace <kevinbrace@bracecomputerlab.com> 6f2148a47SJeff Kirsher 7f2148a47SJeff Kirsher This software may be used and distributed according to the terms of 8f2148a47SJeff Kirsher the GNU General Public License (GPL), incorporated herein by reference. 9f2148a47SJeff Kirsher Drivers based on or derived from this code fall under the GPL and must 10f2148a47SJeff Kirsher retain the authorship, copyright and license notice. This file is not 11f2148a47SJeff Kirsher a complete program and may only be used when the entire operating 12f2148a47SJeff Kirsher system is licensed under the GPL. 13f2148a47SJeff Kirsher 14f2148a47SJeff Kirsher This driver is designed for the VIA VT86C100A Rhine-I. 15f2148a47SJeff Kirsher It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM 16f2148a47SJeff Kirsher and management NIC 6105M). 17f2148a47SJeff Kirsher 18f2148a47SJeff Kirsher The author may be reached as becker@scyld.com, or C/O 19f2148a47SJeff Kirsher Scyld Computing Corporation 20f2148a47SJeff Kirsher 410 Severn Ave., Suite 210 21f2148a47SJeff Kirsher Annapolis MD 21403 22f2148a47SJeff Kirsher 23f2148a47SJeff Kirsher 24f2148a47SJeff Kirsher This driver contains some changes from the original Donald Becker 25f2148a47SJeff Kirsher version. He may or may not be interested in bug reports on this 26f2148a47SJeff Kirsher code. You can find his versions at: 27f2148a47SJeff Kirsher http://www.scyld.com/network/via-rhine.html 28f2148a47SJeff Kirsher [link no longer provides useful info -jgarzik] 29f2148a47SJeff Kirsher 30f2148a47SJeff Kirsher */ 31f2148a47SJeff Kirsher 32f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33f2148a47SJeff Kirsher 34f2148a47SJeff Kirsher #define DRV_NAME "via-rhine" 35f2148a47SJeff Kirsher 36eb939922SRusty Russell #include <linux/types.h> 37f2148a47SJeff Kirsher 38f2148a47SJeff Kirsher /* A few user-configurable values. 39f2148a47SJeff Kirsher These may be modified when a driver module is loaded. */ 40fc3e0f8aSFrancois Romieu static int debug = 0; 41fc3e0f8aSFrancois Romieu #define RHINE_MSG_DEFAULT \ 42fc3e0f8aSFrancois Romieu (0x0000) 43f2148a47SJeff Kirsher 44f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme. 45f2148a47SJeff Kirsher Setting to > 1518 effectively disables this feature. */ 46f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ 47f2148a47SJeff Kirsher defined(CONFIG_SPARC) || defined(__ia64__) || \ 48f2148a47SJeff Kirsher defined(__sh__) || defined(__mips__) 49f2148a47SJeff Kirsher static int rx_copybreak = 1518; 50f2148a47SJeff Kirsher #else 51f2148a47SJeff Kirsher static int rx_copybreak; 52f2148a47SJeff Kirsher #endif 53f2148a47SJeff Kirsher 54f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of 55f2148a47SJeff Kirsher power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ 56eb939922SRusty Russell static bool avoid_D3; 57f2148a47SJeff Kirsher 58f2148a47SJeff Kirsher /* 59f2148a47SJeff Kirsher * In case you are looking for 'options[]' or 'full_duplex[]', they 60f2148a47SJeff Kirsher * are gone. Use ethtool(8) instead. 61f2148a47SJeff Kirsher */ 62f2148a47SJeff Kirsher 63f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 64f2148a47SJeff Kirsher The Rhine has a 64 element 8390-like hash table. */ 65f2148a47SJeff Kirsher static const int multicast_filter_limit = 32; 66f2148a47SJeff Kirsher 67f2148a47SJeff Kirsher 68f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */ 69f2148a47SJeff Kirsher 70f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency. 7192bf2008STino Reichardt * The compiler will convert <unsigned>'%'<2^N> into a bit mask. 7292bf2008STino Reichardt * Making the Tx ring too large decreases the effectiveness of channel 7392bf2008STino Reichardt * bonding and packet priority. 7492bf2008STino Reichardt * With BQL support, we can increase TX ring safely. 7592bf2008STino Reichardt * There are no ill effects from too-large receive rings. 7692bf2008STino Reichardt */ 7792bf2008STino Reichardt #define TX_RING_SIZE 64 7892bf2008STino Reichardt #define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ 79f2148a47SJeff Kirsher #define RX_RING_SIZE 64 80f2148a47SJeff Kirsher 81f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */ 82f2148a47SJeff Kirsher 83f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */ 84f2148a47SJeff Kirsher #define TX_TIMEOUT (2*HZ) 85f2148a47SJeff Kirsher 86f2148a47SJeff Kirsher #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 87f2148a47SJeff Kirsher 88f2148a47SJeff Kirsher #include <linux/module.h> 89f2148a47SJeff Kirsher #include <linux/moduleparam.h> 90f2148a47SJeff Kirsher #include <linux/kernel.h> 91f2148a47SJeff Kirsher #include <linux/string.h> 92f2148a47SJeff Kirsher #include <linux/timer.h> 93f2148a47SJeff Kirsher #include <linux/errno.h> 94f2148a47SJeff Kirsher #include <linux/ioport.h> 95f2148a47SJeff Kirsher #include <linux/interrupt.h> 96f2148a47SJeff Kirsher #include <linux/pci.h> 972d283862SAlexey Charkov #include <linux/of_device.h> 982d283862SAlexey Charkov #include <linux/of_irq.h> 992d283862SAlexey Charkov #include <linux/platform_device.h> 100f2148a47SJeff Kirsher #include <linux/dma-mapping.h> 101f2148a47SJeff Kirsher #include <linux/netdevice.h> 102f2148a47SJeff Kirsher #include <linux/etherdevice.h> 103f2148a47SJeff Kirsher #include <linux/skbuff.h> 104f2148a47SJeff Kirsher #include <linux/init.h> 105f2148a47SJeff Kirsher #include <linux/delay.h> 106f2148a47SJeff Kirsher #include <linux/mii.h> 107f2148a47SJeff Kirsher #include <linux/ethtool.h> 108f2148a47SJeff Kirsher #include <linux/crc32.h> 109f2148a47SJeff Kirsher #include <linux/if_vlan.h> 110f2148a47SJeff Kirsher #include <linux/bitops.h> 111f2148a47SJeff Kirsher #include <linux/workqueue.h> 112f2148a47SJeff Kirsher #include <asm/processor.h> /* Processor type for cache alignment. */ 113f2148a47SJeff Kirsher #include <asm/io.h> 114f2148a47SJeff Kirsher #include <asm/irq.h> 1157c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 116f2148a47SJeff Kirsher #include <linux/dmi.h> 117f2148a47SJeff Kirsher 118f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 119f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 120f2148a47SJeff Kirsher MODULE_LICENSE("GPL"); 121f2148a47SJeff Kirsher 122f2148a47SJeff Kirsher module_param(debug, int, 0); 123f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0); 124f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0); 125fc3e0f8aSFrancois Romieu MODULE_PARM_DESC(debug, "VIA Rhine debug message flags"); 126f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 127f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 128f2148a47SJeff Kirsher 129f2148a47SJeff Kirsher #define MCAM_SIZE 32 130f2148a47SJeff Kirsher #define VCAM_SIZE 32 131f2148a47SJeff Kirsher 132f2148a47SJeff Kirsher /* 133f2148a47SJeff Kirsher Theory of Operation 134f2148a47SJeff Kirsher 135f2148a47SJeff Kirsher I. Board Compatibility 136f2148a47SJeff Kirsher 137f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet 138f2148a47SJeff Kirsher controller. 139f2148a47SJeff Kirsher 140f2148a47SJeff Kirsher II. Board-specific settings 141f2148a47SJeff Kirsher 142f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot. 143f2148a47SJeff Kirsher 144f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at 145f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are 146f2148a47SJeff Kirsher correct. 147f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM 148f2148a47SJeff Kirsher must be configured to enable memory ops. 149f2148a47SJeff Kirsher 150f2148a47SJeff Kirsher III. Driver operation 151f2148a47SJeff Kirsher 152f2148a47SJeff Kirsher IIIa. Ring buffers 153f2148a47SJeff Kirsher 154f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists 155f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of 156f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. 157f2148a47SJeff Kirsher 158f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure 159f2148a47SJeff Kirsher 160f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme. 161f2148a47SJeff Kirsher 162f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so 163f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers. 164f2148a47SJeff Kirsher 165f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at 166f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data 167f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long, 168f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff. 169f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the 170f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated 171f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx(). 172f2148a47SJeff Kirsher 173f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by 174f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger 175f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines 176f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of 177f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never 178f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using 179f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is 180f2148a47SJeff Kirsher most useful with small frames. 181f2148a47SJeff Kirsher 182f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit 183f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't 184f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers 185f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header. 186f2148a47SJeff Kirsher 187f2148a47SJeff Kirsher IIId. Synchronization 188f2148a47SJeff Kirsher 189f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One 190f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the 191f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler, 192f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software. 193f2148a47SJeff Kirsher 194f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the 195f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in 196f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by 197f2148a47SJeff Kirsher calling netif_stop_queue. 198f2148a47SJeff Kirsher 199f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats 200f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as 201f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in 202f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped. 203f2148a47SJeff Kirsher 204f2148a47SJeff Kirsher IV. Notes 205f2148a47SJeff Kirsher 206f2148a47SJeff Kirsher IVb. References 207f2148a47SJeff Kirsher 208f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/ 209f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html 210f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html 211f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf 212f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF 213f2148a47SJeff Kirsher 214f2148a47SJeff Kirsher 215f2148a47SJeff Kirsher IVc. Errata 216f2148a47SJeff Kirsher 217f2148a47SJeff Kirsher The VT86C100A manual is not reliable information. 218f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting 219f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit 220f2148a47SJeff Kirsher and unaligned IP headers on receive. 221f2148a47SJeff Kirsher The chip does not pad to minimum transmit length. 222f2148a47SJeff Kirsher 223f2148a47SJeff Kirsher */ 224f2148a47SJeff Kirsher 225f2148a47SJeff Kirsher 226f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all 227f2148a47SJeff Kirsher of the drivers, and will likely be provided by some future kernel. 228f2148a47SJeff Kirsher Note the matching code -- the first table entry matchs all 56** cards but 229f2148a47SJeff Kirsher second only the 1234 card. 230f2148a47SJeff Kirsher */ 231f2148a47SJeff Kirsher 232f2148a47SJeff Kirsher enum rhine_revs { 233f2148a47SJeff Kirsher VT86C100A = 0x00, 234f2148a47SJeff Kirsher VTunknown0 = 0x20, 235f2148a47SJeff Kirsher VT6102 = 0x40, 236f2148a47SJeff Kirsher VT8231 = 0x50, /* Integrated MAC */ 237f2148a47SJeff Kirsher VT8233 = 0x60, /* Integrated MAC */ 238f2148a47SJeff Kirsher VT8235 = 0x74, /* Integrated MAC */ 239f2148a47SJeff Kirsher VT8237 = 0x78, /* Integrated MAC */ 240aa15190cSKevin Brace VT8251 = 0x7C, /* Integrated MAC */ 241f2148a47SJeff Kirsher VT6105 = 0x80, 242f2148a47SJeff Kirsher VT6105_B0 = 0x83, 243f2148a47SJeff Kirsher VT6105L = 0x8A, 244f2148a47SJeff Kirsher VT6107 = 0x8C, 245f2148a47SJeff Kirsher VTunknown2 = 0x8E, 246f2148a47SJeff Kirsher VT6105M = 0x90, /* Management adapter */ 247f2148a47SJeff Kirsher }; 248f2148a47SJeff Kirsher 249f2148a47SJeff Kirsher enum rhine_quirks { 250f2148a47SJeff Kirsher rqWOL = 0x0001, /* Wake-On-LAN support */ 251f2148a47SJeff Kirsher rqForceReset = 0x0002, 252f2148a47SJeff Kirsher rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ 253f2148a47SJeff Kirsher rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ 254f2148a47SJeff Kirsher rqRhineI = 0x0100, /* See comment below */ 255ca8b6e04SAlexey Charkov rqIntPHY = 0x0200, /* Integrated PHY */ 256ca8b6e04SAlexey Charkov rqMgmt = 0x0400, /* Management adapter */ 2575b579e21SAlexey Charkov rqNeedEnMMIO = 0x0800, /* Whether the core needs to be 2585b579e21SAlexey Charkov * switched from PIO mode to MMIO 2595b579e21SAlexey Charkov * (only applies to PCI) 2605b579e21SAlexey Charkov */ 261f2148a47SJeff Kirsher }; 262f2148a47SJeff Kirsher /* 263f2148a47SJeff Kirsher * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable 264f2148a47SJeff Kirsher * MMIO as well as for the collision counter and the Tx FIFO underflow 265f2148a47SJeff Kirsher * indicator. In addition, Tx and Rx buffers need to 4 byte aligned. 266f2148a47SJeff Kirsher */ 267f2148a47SJeff Kirsher 268f2148a47SJeff Kirsher /* Beware of PCI posted writes */ 269f2148a47SJeff Kirsher #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) 270f2148a47SJeff Kirsher 2719baa3c34SBenoit Taine static const struct pci_device_id rhine_pci_tbl[] = { 272f2148a47SJeff Kirsher { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ 273f2148a47SJeff Kirsher { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ 274f2148a47SJeff Kirsher { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ 275f2148a47SJeff Kirsher { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */ 276f2148a47SJeff Kirsher { } /* terminate list */ 277f2148a47SJeff Kirsher }; 278f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); 279f2148a47SJeff Kirsher 2802d283862SAlexey Charkov /* OpenFirmware identifiers for platform-bus devices 281ca8b6e04SAlexey Charkov * The .data field is currently only used to store quirks 2822d283862SAlexey Charkov */ 283ca8b6e04SAlexey Charkov static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns; 284d2b75a3fSFabian Frederick static const struct of_device_id rhine_of_tbl[] = { 285ca8b6e04SAlexey Charkov { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks }, 2862d283862SAlexey Charkov { } /* terminate list */ 2872d283862SAlexey Charkov }; 2882d283862SAlexey Charkov MODULE_DEVICE_TABLE(of, rhine_of_tbl); 289f2148a47SJeff Kirsher 290f2148a47SJeff Kirsher /* Offsets to the device registers. */ 291f2148a47SJeff Kirsher enum register_offsets { 292f2148a47SJeff Kirsher StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, 293f2148a47SJeff Kirsher ChipCmd1=0x09, TQWake=0x0A, 294f2148a47SJeff Kirsher IntrStatus=0x0C, IntrEnable=0x0E, 295f2148a47SJeff Kirsher MulticastFilter0=0x10, MulticastFilter1=0x14, 296f2148a47SJeff Kirsher RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, 297f2148a47SJeff Kirsher MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F, 298f2148a47SJeff Kirsher MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, 299f2148a47SJeff Kirsher ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, 300f2148a47SJeff Kirsher RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, 301f2148a47SJeff Kirsher StickyHW=0x83, IntrStatus2=0x84, 302f2148a47SJeff Kirsher CamMask=0x88, CamCon=0x92, CamAddr=0x93, 303f2148a47SJeff Kirsher WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, 304f2148a47SJeff Kirsher WOLcrClr1=0xA6, WOLcgClr=0xA7, 305f2148a47SJeff Kirsher PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, 306f2148a47SJeff Kirsher }; 307f2148a47SJeff Kirsher 308f2148a47SJeff Kirsher /* Bits in ConfigD */ 309f2148a47SJeff Kirsher enum backoff_bits { 310f2148a47SJeff Kirsher BackOptional=0x01, BackModify=0x02, 311f2148a47SJeff Kirsher BackCaptureEffect=0x04, BackRandom=0x08 312f2148a47SJeff Kirsher }; 313f2148a47SJeff Kirsher 314f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */ 315f2148a47SJeff Kirsher enum tcr_bits { 316f2148a47SJeff Kirsher TCR_PQEN=0x01, 317f2148a47SJeff Kirsher TCR_LB0=0x02, /* loopback[0] */ 318f2148a47SJeff Kirsher TCR_LB1=0x04, /* loopback[1] */ 319f2148a47SJeff Kirsher TCR_OFSET=0x08, 320f2148a47SJeff Kirsher TCR_RTGOPT=0x10, 321f2148a47SJeff Kirsher TCR_RTFT0=0x20, 322f2148a47SJeff Kirsher TCR_RTFT1=0x40, 323f2148a47SJeff Kirsher TCR_RTSF=0x80, 324f2148a47SJeff Kirsher }; 325f2148a47SJeff Kirsher 326f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */ 327f2148a47SJeff Kirsher enum camcon_bits { 328f2148a47SJeff Kirsher CAMC_CAMEN=0x01, 329f2148a47SJeff Kirsher CAMC_VCAMSL=0x02, 330f2148a47SJeff Kirsher CAMC_CAMWR=0x04, 331f2148a47SJeff Kirsher CAMC_CAMRD=0x08, 332f2148a47SJeff Kirsher }; 333f2148a47SJeff Kirsher 334f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */ 335f2148a47SJeff Kirsher enum bcr1_bits { 336f2148a47SJeff Kirsher BCR1_POT0=0x01, 337f2148a47SJeff Kirsher BCR1_POT1=0x02, 338f2148a47SJeff Kirsher BCR1_POT2=0x04, 339f2148a47SJeff Kirsher BCR1_CTFT0=0x08, 340f2148a47SJeff Kirsher BCR1_CTFT1=0x10, 341f2148a47SJeff Kirsher BCR1_CTSF=0x20, 342f2148a47SJeff Kirsher BCR1_TXQNOBK=0x40, /* for VT6105 */ 343f2148a47SJeff Kirsher BCR1_VIDFR=0x80, /* for VT6105 */ 344f2148a47SJeff Kirsher BCR1_MED0=0x40, /* for VT6102 */ 345f2148a47SJeff Kirsher BCR1_MED1=0x80, /* for VT6102 */ 346f2148a47SJeff Kirsher }; 347f2148a47SJeff Kirsher 348f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */ 349f2148a47SJeff Kirsher static const int mmio_verify_registers[] = { 350f2148a47SJeff Kirsher RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, 351f2148a47SJeff Kirsher 0 352f2148a47SJeff Kirsher }; 353f2148a47SJeff Kirsher 354f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */ 355f2148a47SJeff Kirsher enum intr_status_bits { 3567ab87ff4SFrancois Romieu IntrRxDone = 0x0001, 3577ab87ff4SFrancois Romieu IntrTxDone = 0x0002, 3587ab87ff4SFrancois Romieu IntrRxErr = 0x0004, 3597ab87ff4SFrancois Romieu IntrTxError = 0x0008, 3607ab87ff4SFrancois Romieu IntrRxEmpty = 0x0020, 361f2148a47SJeff Kirsher IntrPCIErr = 0x0040, 3627ab87ff4SFrancois Romieu IntrStatsMax = 0x0080, 3637ab87ff4SFrancois Romieu IntrRxEarly = 0x0100, 3647ab87ff4SFrancois Romieu IntrTxUnderrun = 0x0210, 3657ab87ff4SFrancois Romieu IntrRxOverflow = 0x0400, 3667ab87ff4SFrancois Romieu IntrRxDropped = 0x0800, 3677ab87ff4SFrancois Romieu IntrRxNoBuf = 0x1000, 3687ab87ff4SFrancois Romieu IntrTxAborted = 0x2000, 3697ab87ff4SFrancois Romieu IntrLinkChange = 0x4000, 370f2148a47SJeff Kirsher IntrRxWakeUp = 0x8000, 371f2148a47SJeff Kirsher IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */ 3727ab87ff4SFrancois Romieu IntrNormalSummary = IntrRxDone | IntrTxDone, 3737ab87ff4SFrancois Romieu IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError | 3747ab87ff4SFrancois Romieu IntrTxUnderrun, 375f2148a47SJeff Kirsher }; 376f2148a47SJeff Kirsher 377f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ 378f2148a47SJeff Kirsher enum wol_bits { 379f2148a47SJeff Kirsher WOLucast = 0x10, 380f2148a47SJeff Kirsher WOLmagic = 0x20, 381f2148a47SJeff Kirsher WOLbmcast = 0x30, 382f2148a47SJeff Kirsher WOLlnkon = 0x40, 383f2148a47SJeff Kirsher WOLlnkoff = 0x80, 384f2148a47SJeff Kirsher }; 385f2148a47SJeff Kirsher 386f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */ 387f2148a47SJeff Kirsher struct rx_desc { 388f2148a47SJeff Kirsher __le32 rx_status; 389f2148a47SJeff Kirsher __le32 desc_length; /* Chain flag, Buffer/frame length */ 390f2148a47SJeff Kirsher __le32 addr; 391f2148a47SJeff Kirsher __le32 next_desc; 392f2148a47SJeff Kirsher }; 393f2148a47SJeff Kirsher struct tx_desc { 394f2148a47SJeff Kirsher __le32 tx_status; 395f2148a47SJeff Kirsher __le32 desc_length; /* Chain flag, Tx Config, Frame length */ 396f2148a47SJeff Kirsher __le32 addr; 397f2148a47SJeff Kirsher __le32 next_desc; 398f2148a47SJeff Kirsher }; 399f2148a47SJeff Kirsher 400f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ 401f2148a47SJeff Kirsher #define TXDESC 0x00e08000 402f2148a47SJeff Kirsher 403f2148a47SJeff Kirsher enum rx_status_bits { 404f2148a47SJeff Kirsher RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F 405f2148a47SJeff Kirsher }; 406f2148a47SJeff Kirsher 407f2148a47SJeff Kirsher /* Bits in *_desc.*_status */ 408f2148a47SJeff Kirsher enum desc_status_bits { 409f2148a47SJeff Kirsher DescOwn=0x80000000 410f2148a47SJeff Kirsher }; 411f2148a47SJeff Kirsher 412f2148a47SJeff Kirsher /* Bits in *_desc.*_length */ 413f2148a47SJeff Kirsher enum desc_length_bits { 414f2148a47SJeff Kirsher DescTag=0x00010000 415f2148a47SJeff Kirsher }; 416f2148a47SJeff Kirsher 417f2148a47SJeff Kirsher /* Bits in ChipCmd. */ 418f2148a47SJeff Kirsher enum chip_cmd_bits { 419f2148a47SJeff Kirsher CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, 420f2148a47SJeff Kirsher CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40, 421f2148a47SJeff Kirsher Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04, 422f2148a47SJeff Kirsher Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, 423f2148a47SJeff Kirsher }; 424f2148a47SJeff Kirsher 425f7b5d1b9SJamie Gloudon struct rhine_stats { 426f7b5d1b9SJamie Gloudon u64 packets; 427f7b5d1b9SJamie Gloudon u64 bytes; 428f7b5d1b9SJamie Gloudon struct u64_stats_sync syncp; 429f7b5d1b9SJamie Gloudon }; 430f7b5d1b9SJamie Gloudon 431f2148a47SJeff Kirsher struct rhine_private { 432f2148a47SJeff Kirsher /* Bit mask for configured VLAN ids */ 433f2148a47SJeff Kirsher unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 434f2148a47SJeff Kirsher 435f2148a47SJeff Kirsher /* Descriptor rings */ 436f2148a47SJeff Kirsher struct rx_desc *rx_ring; 437f2148a47SJeff Kirsher struct tx_desc *tx_ring; 438f2148a47SJeff Kirsher dma_addr_t rx_ring_dma; 439f2148a47SJeff Kirsher dma_addr_t tx_ring_dma; 440f2148a47SJeff Kirsher 441f2148a47SJeff Kirsher /* The addresses of receive-in-place skbuffs. */ 442f2148a47SJeff Kirsher struct sk_buff *rx_skbuff[RX_RING_SIZE]; 443f2148a47SJeff Kirsher dma_addr_t rx_skbuff_dma[RX_RING_SIZE]; 444f2148a47SJeff Kirsher 445f2148a47SJeff Kirsher /* The saved address of a sent-in-place packet/buffer, for later free(). */ 446f2148a47SJeff Kirsher struct sk_buff *tx_skbuff[TX_RING_SIZE]; 447f2148a47SJeff Kirsher dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; 448f2148a47SJeff Kirsher 449f2148a47SJeff Kirsher /* Tx bounce buffers (Rhine-I only) */ 450f2148a47SJeff Kirsher unsigned char *tx_buf[TX_RING_SIZE]; 451f2148a47SJeff Kirsher unsigned char *tx_bufs; 452f2148a47SJeff Kirsher dma_addr_t tx_bufs_dma; 453f2148a47SJeff Kirsher 454f7630d18SAlexey Charkov int irq; 455f2148a47SJeff Kirsher long pioaddr; 456f2148a47SJeff Kirsher struct net_device *dev; 457f2148a47SJeff Kirsher struct napi_struct napi; 458f2148a47SJeff Kirsher spinlock_t lock; 4597ab87ff4SFrancois Romieu struct mutex task_lock; 4607ab87ff4SFrancois Romieu bool task_enable; 4617ab87ff4SFrancois Romieu struct work_struct slow_event_task; 462f2148a47SJeff Kirsher struct work_struct reset_task; 463f2148a47SJeff Kirsher 464fc3e0f8aSFrancois Romieu u32 msg_enable; 465fc3e0f8aSFrancois Romieu 466f2148a47SJeff Kirsher /* Frequently used values: keep some adjacent for cache effect. */ 467f2148a47SJeff Kirsher u32 quirks; 4688709bb2cSfrançois romieu unsigned int cur_rx; 469f2148a47SJeff Kirsher unsigned int cur_tx, dirty_tx; 470f2148a47SJeff Kirsher unsigned int rx_buf_sz; /* Based on MTU+slack. */ 471f7b5d1b9SJamie Gloudon struct rhine_stats rx_stats; 472f7b5d1b9SJamie Gloudon struct rhine_stats tx_stats; 473f2148a47SJeff Kirsher u8 wolopts; 474f2148a47SJeff Kirsher 475f2148a47SJeff Kirsher u8 tx_thresh, rx_thresh; 476f2148a47SJeff Kirsher 477f2148a47SJeff Kirsher struct mii_if_info mii_if; 478f2148a47SJeff Kirsher void __iomem *base; 479f2148a47SJeff Kirsher }; 480f2148a47SJeff Kirsher 481f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0) 482f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0) 483f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0) 484f2148a47SJeff Kirsher 485f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x)) 486f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x)) 487f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x)) 488f2148a47SJeff Kirsher 489f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0) 490f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0) 491f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0) 492f2148a47SJeff Kirsher 493f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0) 494f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0) 495f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0) 496f2148a47SJeff Kirsher 497f2148a47SJeff Kirsher 498f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int location); 499f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 500f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev); 501f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work); 5027ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work); 5030290bd29SMichael S. Tsirkin static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue); 504f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 505f2148a47SJeff Kirsher struct net_device *dev); 506f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance); 507f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev); 508f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit); 509f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev); 510bc1f4470Sstephen hemminger static void rhine_get_stats64(struct net_device *dev, 511f7b5d1b9SJamie Gloudon struct rtnl_link_stats64 *stats); 512f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 513f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops; 514f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev); 51580d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, 51680d5c368SPatrick McHardy __be16 proto, u16 vid); 51780d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, 51880d5c368SPatrick McHardy __be16 proto, u16 vid); 5197ab87ff4SFrancois Romieu static void rhine_restart_tx(struct net_device *dev); 520f2148a47SJeff Kirsher 5213f8c91a7SAndreas Mohr static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) 522a384a33bSFrancois Romieu { 523a384a33bSFrancois Romieu void __iomem *ioaddr = rp->base; 524a384a33bSFrancois Romieu int i; 525a384a33bSFrancois Romieu 526a384a33bSFrancois Romieu for (i = 0; i < 1024; i++) { 5273f8c91a7SAndreas Mohr bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask); 5283f8c91a7SAndreas Mohr 5293f8c91a7SAndreas Mohr if (low ^ has_mask_bits) 530a384a33bSFrancois Romieu break; 531a384a33bSFrancois Romieu udelay(10); 532a384a33bSFrancois Romieu } 533a384a33bSFrancois Romieu if (i > 64) { 534fc3e0f8aSFrancois Romieu netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " 5353f8c91a7SAndreas Mohr "count: %04d\n", low ? "low" : "high", reg, mask, i); 536a384a33bSFrancois Romieu } 537a384a33bSFrancois Romieu } 538a384a33bSFrancois Romieu 539a384a33bSFrancois Romieu static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) 540a384a33bSFrancois Romieu { 5413f8c91a7SAndreas Mohr rhine_wait_bit(rp, reg, mask, false); 542a384a33bSFrancois Romieu } 543a384a33bSFrancois Romieu 544a384a33bSFrancois Romieu static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) 545a384a33bSFrancois Romieu { 5463f8c91a7SAndreas Mohr rhine_wait_bit(rp, reg, mask, true); 547a384a33bSFrancois Romieu } 548f2148a47SJeff Kirsher 549a20a28bcSFrancois Romieu static u32 rhine_get_events(struct rhine_private *rp) 550f2148a47SJeff Kirsher { 551f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 552f2148a47SJeff Kirsher u32 intr_status; 553f2148a47SJeff Kirsher 554f2148a47SJeff Kirsher intr_status = ioread16(ioaddr + IntrStatus); 555f2148a47SJeff Kirsher /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ 556f2148a47SJeff Kirsher if (rp->quirks & rqStatusWBRace) 557f2148a47SJeff Kirsher intr_status |= ioread8(ioaddr + IntrStatus2) << 16; 558f2148a47SJeff Kirsher return intr_status; 559f2148a47SJeff Kirsher } 560f2148a47SJeff Kirsher 561a20a28bcSFrancois Romieu static void rhine_ack_events(struct rhine_private *rp, u32 mask) 562a20a28bcSFrancois Romieu { 563a20a28bcSFrancois Romieu void __iomem *ioaddr = rp->base; 564a20a28bcSFrancois Romieu 565a20a28bcSFrancois Romieu if (rp->quirks & rqStatusWBRace) 566a20a28bcSFrancois Romieu iowrite8(mask >> 16, ioaddr + IntrStatus2); 567a20a28bcSFrancois Romieu iowrite16(mask, ioaddr + IntrStatus); 568a20a28bcSFrancois Romieu } 569a20a28bcSFrancois Romieu 570f2148a47SJeff Kirsher /* 571f2148a47SJeff Kirsher * Get power related registers into sane state. 572f2148a47SJeff Kirsher * Notify user about past WOL event. 573f2148a47SJeff Kirsher */ 574f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev) 575f2148a47SJeff Kirsher { 576f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 577f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 578f2148a47SJeff Kirsher u16 wolstat; 579f2148a47SJeff Kirsher 580f2148a47SJeff Kirsher if (rp->quirks & rqWOL) { 581f2148a47SJeff Kirsher /* Make sure chip is in power state D0 */ 582f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); 583f2148a47SJeff Kirsher 584f2148a47SJeff Kirsher /* Disable "force PME-enable" */ 585f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + WOLcgClr); 586f2148a47SJeff Kirsher 587f2148a47SJeff Kirsher /* Clear power-event config bits (WOL) */ 588f2148a47SJeff Kirsher iowrite8(0xFF, ioaddr + WOLcrClr); 589f2148a47SJeff Kirsher /* More recent cards can manage two additional patterns */ 590f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 591f2148a47SJeff Kirsher iowrite8(0x03, ioaddr + WOLcrClr1); 592f2148a47SJeff Kirsher 593f2148a47SJeff Kirsher /* Save power-event status bits */ 594f2148a47SJeff Kirsher wolstat = ioread8(ioaddr + PwrcsrSet); 595f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 596f2148a47SJeff Kirsher wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8; 597f2148a47SJeff Kirsher 598f2148a47SJeff Kirsher /* Clear power-event status bits */ 599f2148a47SJeff Kirsher iowrite8(0xFF, ioaddr + PwrcsrClr); 600f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 601f2148a47SJeff Kirsher iowrite8(0x03, ioaddr + PwrcsrClr1); 602f2148a47SJeff Kirsher 603f2148a47SJeff Kirsher if (wolstat) { 604f2148a47SJeff Kirsher char *reason; 605f2148a47SJeff Kirsher switch (wolstat) { 606f2148a47SJeff Kirsher case WOLmagic: 607f2148a47SJeff Kirsher reason = "Magic packet"; 608f2148a47SJeff Kirsher break; 609f2148a47SJeff Kirsher case WOLlnkon: 610f2148a47SJeff Kirsher reason = "Link went up"; 611f2148a47SJeff Kirsher break; 612f2148a47SJeff Kirsher case WOLlnkoff: 613f2148a47SJeff Kirsher reason = "Link went down"; 614f2148a47SJeff Kirsher break; 615f2148a47SJeff Kirsher case WOLucast: 616f2148a47SJeff Kirsher reason = "Unicast packet"; 617f2148a47SJeff Kirsher break; 618f2148a47SJeff Kirsher case WOLbmcast: 619f2148a47SJeff Kirsher reason = "Multicast/broadcast packet"; 620f2148a47SJeff Kirsher break; 621f2148a47SJeff Kirsher default: 622f2148a47SJeff Kirsher reason = "Unknown"; 623f2148a47SJeff Kirsher } 624f2148a47SJeff Kirsher netdev_info(dev, "Woke system up. Reason: %s\n", 625f2148a47SJeff Kirsher reason); 626f2148a47SJeff Kirsher } 627f2148a47SJeff Kirsher } 628f2148a47SJeff Kirsher } 629f2148a47SJeff Kirsher 630f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev) 631f2148a47SJeff Kirsher { 632f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 633f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 634fc3e0f8aSFrancois Romieu u8 cmd1; 635f2148a47SJeff Kirsher 636f2148a47SJeff Kirsher iowrite8(Cmd1Reset, ioaddr + ChipCmd1); 637f2148a47SJeff Kirsher IOSYNC; 638f2148a47SJeff Kirsher 639f2148a47SJeff Kirsher if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { 640f2148a47SJeff Kirsher netdev_info(dev, "Reset not complete yet. Trying harder.\n"); 641f2148a47SJeff Kirsher 642f2148a47SJeff Kirsher /* Force reset */ 643f2148a47SJeff Kirsher if (rp->quirks & rqForceReset) 644f2148a47SJeff Kirsher iowrite8(0x40, ioaddr + MiscCmd); 645f2148a47SJeff Kirsher 646f2148a47SJeff Kirsher /* Reset can take somewhat longer (rare) */ 647a384a33bSFrancois Romieu rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); 648f2148a47SJeff Kirsher } 649f2148a47SJeff Kirsher 650fc3e0f8aSFrancois Romieu cmd1 = ioread8(ioaddr + ChipCmd1); 651fc3e0f8aSFrancois Romieu netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? 652f2148a47SJeff Kirsher "failed" : "succeeded"); 653f2148a47SJeff Kirsher } 654f2148a47SJeff Kirsher 655f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks) 656f2148a47SJeff Kirsher { 657f2148a47SJeff Kirsher int n; 6585b579e21SAlexey Charkov 6595b579e21SAlexey Charkov if (quirks & rqNeedEnMMIO) { 660f2148a47SJeff Kirsher if (quirks & rqRhineI) { 6615b579e21SAlexey Charkov /* More recent docs say that this bit is reserved */ 662f2148a47SJeff Kirsher n = inb(pioaddr + ConfigA) | 0x20; 663f2148a47SJeff Kirsher outb(n, pioaddr + ConfigA); 664f2148a47SJeff Kirsher } else { 665f2148a47SJeff Kirsher n = inb(pioaddr + ConfigD) | 0x80; 666f2148a47SJeff Kirsher outb(n, pioaddr + ConfigD); 667f2148a47SJeff Kirsher } 668f2148a47SJeff Kirsher } 6695b579e21SAlexey Charkov } 6705b579e21SAlexey Charkov 6715b579e21SAlexey Charkov static inline int verify_mmio(struct device *hwdev, 6725b579e21SAlexey Charkov long pioaddr, 6735b579e21SAlexey Charkov void __iomem *ioaddr, 6745b579e21SAlexey Charkov u32 quirks) 6755b579e21SAlexey Charkov { 6765b579e21SAlexey Charkov if (quirks & rqNeedEnMMIO) { 6775b579e21SAlexey Charkov int i = 0; 6785b579e21SAlexey Charkov 6795b579e21SAlexey Charkov /* Check that selected MMIO registers match the PIO ones */ 6805b579e21SAlexey Charkov while (mmio_verify_registers[i]) { 6815b579e21SAlexey Charkov int reg = mmio_verify_registers[i++]; 6825b579e21SAlexey Charkov unsigned char a = inb(pioaddr+reg); 6835b579e21SAlexey Charkov unsigned char b = readb(ioaddr+reg); 6845b579e21SAlexey Charkov 6855b579e21SAlexey Charkov if (a != b) { 6865b579e21SAlexey Charkov dev_err(hwdev, 6875b579e21SAlexey Charkov "MMIO do not match PIO [%02x] (%02x != %02x)\n", 6885b579e21SAlexey Charkov reg, a, b); 6895b579e21SAlexey Charkov return -EIO; 6905b579e21SAlexey Charkov } 6915b579e21SAlexey Charkov } 6925b579e21SAlexey Charkov } 6935b579e21SAlexey Charkov return 0; 6945b579e21SAlexey Charkov } 695f2148a47SJeff Kirsher 696f2148a47SJeff Kirsher /* 697f2148a47SJeff Kirsher * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM 698f2148a47SJeff Kirsher * (plus 0x6C for Rhine-I/II) 699f2148a47SJeff Kirsher */ 70076e239e1SBill Pemberton static void rhine_reload_eeprom(long pioaddr, struct net_device *dev) 701f2148a47SJeff Kirsher { 702f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 703f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 704a384a33bSFrancois Romieu int i; 705f2148a47SJeff Kirsher 706f2148a47SJeff Kirsher outb(0x20, pioaddr + MACRegEEcsr); 707a384a33bSFrancois Romieu for (i = 0; i < 1024; i++) { 708a384a33bSFrancois Romieu if (!(inb(pioaddr + MACRegEEcsr) & 0x20)) 709a384a33bSFrancois Romieu break; 710a384a33bSFrancois Romieu } 711a384a33bSFrancois Romieu if (i > 512) 712a384a33bSFrancois Romieu pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__); 713f2148a47SJeff Kirsher 714f2148a47SJeff Kirsher /* 715f2148a47SJeff Kirsher * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable 716f2148a47SJeff Kirsher * MMIO. If reloading EEPROM was done first this could be avoided, but 717f2148a47SJeff Kirsher * it is not known if that still works with the "win98-reboot" problem. 718f2148a47SJeff Kirsher */ 719f2148a47SJeff Kirsher enable_mmio(pioaddr, rp->quirks); 720f2148a47SJeff Kirsher 721f2148a47SJeff Kirsher /* Turn off EEPROM-controlled wake-up (magic packet) */ 722f2148a47SJeff Kirsher if (rp->quirks & rqWOL) 723f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA); 724f2148a47SJeff Kirsher 725f2148a47SJeff Kirsher } 726f2148a47SJeff Kirsher 727f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 728f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev) 729f2148a47SJeff Kirsher { 73005d334ecSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 731f7630d18SAlexey Charkov const int irq = rp->irq; 73205d334ecSFrancois Romieu 73305d334ecSFrancois Romieu disable_irq(irq); 73405d334ecSFrancois Romieu rhine_interrupt(irq, dev); 73505d334ecSFrancois Romieu enable_irq(irq); 736f2148a47SJeff Kirsher } 737f2148a47SJeff Kirsher #endif 738f2148a47SJeff Kirsher 739269f3114SFrancois Romieu static void rhine_kick_tx_threshold(struct rhine_private *rp) 740269f3114SFrancois Romieu { 741269f3114SFrancois Romieu if (rp->tx_thresh < 0xe0) { 742269f3114SFrancois Romieu void __iomem *ioaddr = rp->base; 743269f3114SFrancois Romieu 744269f3114SFrancois Romieu rp->tx_thresh += 0x20; 745269f3114SFrancois Romieu BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); 746269f3114SFrancois Romieu } 747269f3114SFrancois Romieu } 748269f3114SFrancois Romieu 7497ab87ff4SFrancois Romieu static void rhine_tx_err(struct rhine_private *rp, u32 status) 7507ab87ff4SFrancois Romieu { 7517ab87ff4SFrancois Romieu struct net_device *dev = rp->dev; 7527ab87ff4SFrancois Romieu 7537ab87ff4SFrancois Romieu if (status & IntrTxAborted) { 754fc3e0f8aSFrancois Romieu netif_info(rp, tx_err, dev, 755fc3e0f8aSFrancois Romieu "Abort %08x, frame dropped\n", status); 7567ab87ff4SFrancois Romieu } 7577ab87ff4SFrancois Romieu 7587ab87ff4SFrancois Romieu if (status & IntrTxUnderrun) { 7597ab87ff4SFrancois Romieu rhine_kick_tx_threshold(rp); 760fc3e0f8aSFrancois Romieu netif_info(rp, tx_err ,dev, "Transmitter underrun, " 761fc3e0f8aSFrancois Romieu "Tx threshold now %02x\n", rp->tx_thresh); 7627ab87ff4SFrancois Romieu } 7637ab87ff4SFrancois Romieu 764fc3e0f8aSFrancois Romieu if (status & IntrTxDescRace) 765fc3e0f8aSFrancois Romieu netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); 7667ab87ff4SFrancois Romieu 7677ab87ff4SFrancois Romieu if ((status & IntrTxError) && 7687ab87ff4SFrancois Romieu (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { 7697ab87ff4SFrancois Romieu rhine_kick_tx_threshold(rp); 770fc3e0f8aSFrancois Romieu netif_info(rp, tx_err, dev, "Unspecified error. " 771fc3e0f8aSFrancois Romieu "Tx threshold now %02x\n", rp->tx_thresh); 7727ab87ff4SFrancois Romieu } 7737ab87ff4SFrancois Romieu 7747ab87ff4SFrancois Romieu rhine_restart_tx(dev); 7757ab87ff4SFrancois Romieu } 7767ab87ff4SFrancois Romieu 7777ab87ff4SFrancois Romieu static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) 7787ab87ff4SFrancois Romieu { 7797ab87ff4SFrancois Romieu void __iomem *ioaddr = rp->base; 7807ab87ff4SFrancois Romieu struct net_device_stats *stats = &rp->dev->stats; 7817ab87ff4SFrancois Romieu 7827ab87ff4SFrancois Romieu stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 7837ab87ff4SFrancois Romieu stats->rx_missed_errors += ioread16(ioaddr + RxMissed); 7847ab87ff4SFrancois Romieu 7857ab87ff4SFrancois Romieu /* 7867ab87ff4SFrancois Romieu * Clears the "tally counters" for CRC errors and missed frames(?). 7877ab87ff4SFrancois Romieu * It has been reported that some chips need a write of 0 to clear 7887ab87ff4SFrancois Romieu * these, for others the counters are set to 1 when written to and 7897ab87ff4SFrancois Romieu * instead cleared when read. So we clear them both ways ... 7907ab87ff4SFrancois Romieu */ 7917ab87ff4SFrancois Romieu iowrite32(0, ioaddr + RxMissed); 7927ab87ff4SFrancois Romieu ioread16(ioaddr + RxCRCErrs); 7937ab87ff4SFrancois Romieu ioread16(ioaddr + RxMissed); 7947ab87ff4SFrancois Romieu } 7957ab87ff4SFrancois Romieu 7967ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_RX (IntrRxDone | \ 7977ab87ff4SFrancois Romieu IntrRxErr | \ 7987ab87ff4SFrancois Romieu IntrRxEmpty | \ 7997ab87ff4SFrancois Romieu IntrRxOverflow | \ 8007ab87ff4SFrancois Romieu IntrRxDropped | \ 8017ab87ff4SFrancois Romieu IntrRxNoBuf | \ 8027ab87ff4SFrancois Romieu IntrRxWakeUp) 8037ab87ff4SFrancois Romieu 8047ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \ 8057ab87ff4SFrancois Romieu IntrTxAborted | \ 8067ab87ff4SFrancois Romieu IntrTxUnderrun | \ 8077ab87ff4SFrancois Romieu IntrTxDescRace) 8087ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR) 8097ab87ff4SFrancois Romieu 8107ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \ 8117ab87ff4SFrancois Romieu RHINE_EVENT_NAPI_TX | \ 8127ab87ff4SFrancois Romieu IntrStatsMax) 8137ab87ff4SFrancois Romieu #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange) 8147ab87ff4SFrancois Romieu #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW) 8157ab87ff4SFrancois Romieu 816f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget) 817f2148a47SJeff Kirsher { 818f2148a47SJeff Kirsher struct rhine_private *rp = container_of(napi, struct rhine_private, napi); 819f2148a47SJeff Kirsher struct net_device *dev = rp->dev; 820f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 8217ab87ff4SFrancois Romieu u16 enable_mask = RHINE_EVENT & 0xffff; 8227ab87ff4SFrancois Romieu int work_done = 0; 8237ab87ff4SFrancois Romieu u32 status; 824f2148a47SJeff Kirsher 8257ab87ff4SFrancois Romieu status = rhine_get_events(rp); 8267ab87ff4SFrancois Romieu rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); 8277ab87ff4SFrancois Romieu 8287ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_RX) 8297ab87ff4SFrancois Romieu work_done += rhine_rx(dev, budget); 8307ab87ff4SFrancois Romieu 8317ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_TX) { 8327ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_TX_ERR) { 8337ab87ff4SFrancois Romieu /* Avoid scavenging before Tx engine turned off */ 834a384a33bSFrancois Romieu rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); 835fc3e0f8aSFrancois Romieu if (ioread8(ioaddr + ChipCmd) & CmdTxOn) 836fc3e0f8aSFrancois Romieu netif_warn(rp, tx_err, dev, "Tx still on\n"); 8377ab87ff4SFrancois Romieu } 838fc3e0f8aSFrancois Romieu 8397ab87ff4SFrancois Romieu rhine_tx(dev); 8407ab87ff4SFrancois Romieu 8417ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_TX_ERR) 8427ab87ff4SFrancois Romieu rhine_tx_err(rp, status); 8437ab87ff4SFrancois Romieu } 8447ab87ff4SFrancois Romieu 8457ab87ff4SFrancois Romieu if (status & IntrStatsMax) { 8467ab87ff4SFrancois Romieu spin_lock(&rp->lock); 8477ab87ff4SFrancois Romieu rhine_update_rx_crc_and_missed_errord(rp); 8487ab87ff4SFrancois Romieu spin_unlock(&rp->lock); 8497ab87ff4SFrancois Romieu } 8507ab87ff4SFrancois Romieu 8517ab87ff4SFrancois Romieu if (status & RHINE_EVENT_SLOW) { 8527ab87ff4SFrancois Romieu enable_mask &= ~RHINE_EVENT_SLOW; 8537ab87ff4SFrancois Romieu schedule_work(&rp->slow_event_task); 8547ab87ff4SFrancois Romieu } 855f2148a47SJeff Kirsher 856f2148a47SJeff Kirsher if (work_done < budget) { 8576ad20165SEric Dumazet napi_complete_done(napi, work_done); 8587ab87ff4SFrancois Romieu iowrite16(enable_mask, ioaddr + IntrEnable); 859f2148a47SJeff Kirsher } 860f2148a47SJeff Kirsher return work_done; 861f2148a47SJeff Kirsher } 862f2148a47SJeff Kirsher 86376e239e1SBill Pemberton static void rhine_hw_init(struct net_device *dev, long pioaddr) 864f2148a47SJeff Kirsher { 865f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 866f2148a47SJeff Kirsher 867f2148a47SJeff Kirsher /* Reset the chip to erase previous misconfiguration. */ 868f2148a47SJeff Kirsher rhine_chip_reset(dev); 869f2148a47SJeff Kirsher 870f2148a47SJeff Kirsher /* Rhine-I needs extra time to recuperate before EEPROM reload */ 871f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 872f2148a47SJeff Kirsher msleep(5); 873f2148a47SJeff Kirsher 874f2148a47SJeff Kirsher /* Reload EEPROM controlled bytes cleared by soft reset */ 8752d283862SAlexey Charkov if (dev_is_pci(dev->dev.parent)) 876f2148a47SJeff Kirsher rhine_reload_eeprom(pioaddr, dev); 877f2148a47SJeff Kirsher } 878f2148a47SJeff Kirsher 879f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = { 880f2148a47SJeff Kirsher .ndo_open = rhine_open, 881f2148a47SJeff Kirsher .ndo_stop = rhine_close, 882f2148a47SJeff Kirsher .ndo_start_xmit = rhine_start_tx, 883f7b5d1b9SJamie Gloudon .ndo_get_stats64 = rhine_get_stats64, 884afc4b13dSJiri Pirko .ndo_set_rx_mode = rhine_set_rx_mode, 885f2148a47SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 886f2148a47SJeff Kirsher .ndo_set_mac_address = eth_mac_addr, 887*a7605370SArnd Bergmann .ndo_eth_ioctl = netdev_ioctl, 888f2148a47SJeff Kirsher .ndo_tx_timeout = rhine_tx_timeout, 889f2148a47SJeff Kirsher .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, 890f2148a47SJeff Kirsher .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, 891f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 892f2148a47SJeff Kirsher .ndo_poll_controller = rhine_poll, 893f2148a47SJeff Kirsher #endif 894f2148a47SJeff Kirsher }; 895f2148a47SJeff Kirsher 896ca8b6e04SAlexey Charkov static int rhine_init_one_common(struct device *hwdev, u32 quirks, 8972d283862SAlexey Charkov long pioaddr, void __iomem *ioaddr, int irq) 898f2148a47SJeff Kirsher { 899f2148a47SJeff Kirsher struct net_device *dev; 900f2148a47SJeff Kirsher struct rhine_private *rp; 9012d283862SAlexey Charkov int i, rc, phy_id; 902f2148a47SJeff Kirsher const char *name; 903f2148a47SJeff Kirsher 904f2148a47SJeff Kirsher /* this should always be supported */ 905f7630d18SAlexey Charkov rc = dma_set_mask(hwdev, DMA_BIT_MASK(32)); 906f2148a47SJeff Kirsher if (rc) { 907f7630d18SAlexey Charkov dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n"); 9082d283862SAlexey Charkov goto err_out; 909f2148a47SJeff Kirsher } 910f2148a47SJeff Kirsher 911f2148a47SJeff Kirsher dev = alloc_etherdev(sizeof(struct rhine_private)); 912f2148a47SJeff Kirsher if (!dev) { 913f2148a47SJeff Kirsher rc = -ENOMEM; 9142d283862SAlexey Charkov goto err_out; 915f2148a47SJeff Kirsher } 916f7630d18SAlexey Charkov SET_NETDEV_DEV(dev, hwdev); 917f2148a47SJeff Kirsher 918f2148a47SJeff Kirsher rp = netdev_priv(dev); 919f2148a47SJeff Kirsher rp->dev = dev; 920ca8b6e04SAlexey Charkov rp->quirks = quirks; 921f2148a47SJeff Kirsher rp->pioaddr = pioaddr; 9222d283862SAlexey Charkov rp->base = ioaddr; 9232d283862SAlexey Charkov rp->irq = irq; 924fc3e0f8aSFrancois Romieu rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); 925f2148a47SJeff Kirsher 926ca8b6e04SAlexey Charkov phy_id = rp->quirks & rqIntPHY ? 1 : 0; 927f2148a47SJeff Kirsher 928827da44cSJohn Stultz u64_stats_init(&rp->tx_stats.syncp); 929827da44cSJohn Stultz u64_stats_init(&rp->rx_stats.syncp); 930827da44cSJohn Stultz 931f2148a47SJeff Kirsher /* Get chip registers into a sane state */ 932f2148a47SJeff Kirsher rhine_power_init(dev); 933f2148a47SJeff Kirsher rhine_hw_init(dev, pioaddr); 934f2148a47SJeff Kirsher 935f2148a47SJeff Kirsher for (i = 0; i < 6; i++) 936f2148a47SJeff Kirsher dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); 937f2148a47SJeff Kirsher 938f2148a47SJeff Kirsher if (!is_valid_ether_addr(dev->dev_addr)) { 939f2148a47SJeff Kirsher /* Report it and use a random ethernet address instead */ 940f2148a47SJeff Kirsher netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr); 941f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 942f2148a47SJeff Kirsher netdev_info(dev, "Using random MAC address: %pM\n", 943f2148a47SJeff Kirsher dev->dev_addr); 944f2148a47SJeff Kirsher } 945f2148a47SJeff Kirsher 946f2148a47SJeff Kirsher /* For Rhine-I/II, phy_id is loaded from EEPROM */ 947f2148a47SJeff Kirsher if (!phy_id) 948f2148a47SJeff Kirsher phy_id = ioread8(ioaddr + 0x6C); 949f2148a47SJeff Kirsher 950f2148a47SJeff Kirsher spin_lock_init(&rp->lock); 9517ab87ff4SFrancois Romieu mutex_init(&rp->task_lock); 952f2148a47SJeff Kirsher INIT_WORK(&rp->reset_task, rhine_reset_task); 9537ab87ff4SFrancois Romieu INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); 954f2148a47SJeff Kirsher 955f2148a47SJeff Kirsher rp->mii_if.dev = dev; 956f2148a47SJeff Kirsher rp->mii_if.mdio_read = mdio_read; 957f2148a47SJeff Kirsher rp->mii_if.mdio_write = mdio_write; 958f2148a47SJeff Kirsher rp->mii_if.phy_id_mask = 0x1f; 959f2148a47SJeff Kirsher rp->mii_if.reg_num_mask = 0x1f; 960f2148a47SJeff Kirsher 961f2148a47SJeff Kirsher /* The chip-specific entries in the device structure. */ 962f2148a47SJeff Kirsher dev->netdev_ops = &rhine_netdev_ops; 963e76070f2Swangweidong dev->ethtool_ops = &netdev_ethtool_ops; 964f2148a47SJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT; 965f2148a47SJeff Kirsher 966f2148a47SJeff Kirsher netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); 967f2148a47SJeff Kirsher 968f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 969f2148a47SJeff Kirsher dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 970f2148a47SJeff Kirsher 971ca8b6e04SAlexey Charkov if (rp->quirks & rqMgmt) 972f646968fSPatrick McHardy dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 973f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_RX | 974f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_FILTER; 975f2148a47SJeff Kirsher 976f2148a47SJeff Kirsher /* dev->name not defined before register_netdev()! */ 977f2148a47SJeff Kirsher rc = register_netdev(dev); 978f2148a47SJeff Kirsher if (rc) 9792d283862SAlexey Charkov goto err_out_free_netdev; 980f2148a47SJeff Kirsher 981ca8b6e04SAlexey Charkov if (rp->quirks & rqRhineI) 982ca8b6e04SAlexey Charkov name = "Rhine"; 983ca8b6e04SAlexey Charkov else if (rp->quirks & rqStatusWBRace) 984ca8b6e04SAlexey Charkov name = "Rhine II"; 985ca8b6e04SAlexey Charkov else if (rp->quirks & rqMgmt) 986ca8b6e04SAlexey Charkov name = "Rhine III (Management Adapter)"; 987ca8b6e04SAlexey Charkov else 988ca8b6e04SAlexey Charkov name = "Rhine III"; 989ca8b6e04SAlexey Charkov 990a7e4fbbfSColin Ian King netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n", 991a7e4fbbfSColin Ian King name, ioaddr, dev->dev_addr, rp->irq); 992f2148a47SJeff Kirsher 993f7630d18SAlexey Charkov dev_set_drvdata(hwdev, dev); 994f2148a47SJeff Kirsher 995f2148a47SJeff Kirsher { 996f2148a47SJeff Kirsher u16 mii_cmd; 997f2148a47SJeff Kirsher int mii_status = mdio_read(dev, phy_id, 1); 998f2148a47SJeff Kirsher mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; 999f2148a47SJeff Kirsher mdio_write(dev, phy_id, MII_BMCR, mii_cmd); 1000f2148a47SJeff Kirsher if (mii_status != 0xffff && mii_status != 0x0000) { 1001f2148a47SJeff Kirsher rp->mii_if.advertising = mdio_read(dev, phy_id, 4); 1002f2148a47SJeff Kirsher netdev_info(dev, 1003f2148a47SJeff Kirsher "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n", 1004f2148a47SJeff Kirsher phy_id, 1005f2148a47SJeff Kirsher mii_status, rp->mii_if.advertising, 1006f2148a47SJeff Kirsher mdio_read(dev, phy_id, 5)); 1007f2148a47SJeff Kirsher 1008f2148a47SJeff Kirsher /* set IFF_RUNNING */ 1009f2148a47SJeff Kirsher if (mii_status & BMSR_LSTATUS) 1010f2148a47SJeff Kirsher netif_carrier_on(dev); 1011f2148a47SJeff Kirsher else 1012f2148a47SJeff Kirsher netif_carrier_off(dev); 1013f2148a47SJeff Kirsher 1014f2148a47SJeff Kirsher } 1015f2148a47SJeff Kirsher } 1016f2148a47SJeff Kirsher rp->mii_if.phy_id = phy_id; 1017fc3e0f8aSFrancois Romieu if (avoid_D3) 1018fc3e0f8aSFrancois Romieu netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); 1019f2148a47SJeff Kirsher 1020f2148a47SJeff Kirsher return 0; 1021f2148a47SJeff Kirsher 10222d283862SAlexey Charkov err_out_free_netdev: 10232d283862SAlexey Charkov free_netdev(dev); 10242d283862SAlexey Charkov err_out: 10252d283862SAlexey Charkov return rc; 10262d283862SAlexey Charkov } 10272d283862SAlexey Charkov 10282d283862SAlexey Charkov static int rhine_init_one_pci(struct pci_dev *pdev, 10292d283862SAlexey Charkov const struct pci_device_id *ent) 10302d283862SAlexey Charkov { 10312d283862SAlexey Charkov struct device *hwdev = &pdev->dev; 10325b579e21SAlexey Charkov int rc; 10332d283862SAlexey Charkov long pioaddr, memaddr; 10342d283862SAlexey Charkov void __iomem *ioaddr; 10352d283862SAlexey Charkov int io_size = pdev->revision < VTunknown0 ? 128 : 256; 10365b579e21SAlexey Charkov 10375b579e21SAlexey Charkov /* This driver was written to use PCI memory space. Some early versions 10385b579e21SAlexey Charkov * of the Rhine may only work correctly with I/O space accesses. 10395b579e21SAlexey Charkov * TODO: determine for which revisions this is true and assign the flag 10405b579e21SAlexey Charkov * in code as opposed to this Kconfig option (???) 10415b579e21SAlexey Charkov */ 10425b579e21SAlexey Charkov #ifdef CONFIG_VIA_RHINE_MMIO 10435b579e21SAlexey Charkov u32 quirks = rqNeedEnMMIO; 10442d283862SAlexey Charkov #else 10455b579e21SAlexey Charkov u32 quirks = 0; 10462d283862SAlexey Charkov #endif 10472d283862SAlexey Charkov 10482d283862SAlexey Charkov rc = pci_enable_device(pdev); 10492d283862SAlexey Charkov if (rc) 10502d283862SAlexey Charkov goto err_out; 10512d283862SAlexey Charkov 1052ca8b6e04SAlexey Charkov if (pdev->revision < VTunknown0) { 10535b579e21SAlexey Charkov quirks |= rqRhineI; 1054ca8b6e04SAlexey Charkov } else if (pdev->revision >= VT6102) { 10555b579e21SAlexey Charkov quirks |= rqWOL | rqForceReset; 1056ca8b6e04SAlexey Charkov if (pdev->revision < VT6105) { 1057ca8b6e04SAlexey Charkov quirks |= rqStatusWBRace; 1058ca8b6e04SAlexey Charkov } else { 1059ca8b6e04SAlexey Charkov quirks |= rqIntPHY; 1060ca8b6e04SAlexey Charkov if (pdev->revision >= VT6105_B0) 1061ca8b6e04SAlexey Charkov quirks |= rq6patterns; 1062ca8b6e04SAlexey Charkov if (pdev->revision >= VT6105M) 1063ca8b6e04SAlexey Charkov quirks |= rqMgmt; 1064ca8b6e04SAlexey Charkov } 1065ca8b6e04SAlexey Charkov } 1066ca8b6e04SAlexey Charkov 10672d283862SAlexey Charkov /* sanity check */ 10682d283862SAlexey Charkov if ((pci_resource_len(pdev, 0) < io_size) || 10692d283862SAlexey Charkov (pci_resource_len(pdev, 1) < io_size)) { 10702d283862SAlexey Charkov rc = -EIO; 10712d283862SAlexey Charkov dev_err(hwdev, "Insufficient PCI resources, aborting\n"); 10722d283862SAlexey Charkov goto err_out_pci_disable; 10732d283862SAlexey Charkov } 10742d283862SAlexey Charkov 10752d283862SAlexey Charkov pioaddr = pci_resource_start(pdev, 0); 10762d283862SAlexey Charkov memaddr = pci_resource_start(pdev, 1); 10772d283862SAlexey Charkov 10782d283862SAlexey Charkov pci_set_master(pdev); 10792d283862SAlexey Charkov 10802d283862SAlexey Charkov rc = pci_request_regions(pdev, DRV_NAME); 10812d283862SAlexey Charkov if (rc) 10822d283862SAlexey Charkov goto err_out_pci_disable; 10832d283862SAlexey Charkov 10845b579e21SAlexey Charkov ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size); 10852d283862SAlexey Charkov if (!ioaddr) { 10862d283862SAlexey Charkov rc = -EIO; 10872d283862SAlexey Charkov dev_err(hwdev, 10882d283862SAlexey Charkov "ioremap failed for device %s, region 0x%X @ 0x%lX\n", 10892d283862SAlexey Charkov dev_name(hwdev), io_size, memaddr); 10902d283862SAlexey Charkov goto err_out_free_res; 10912d283862SAlexey Charkov } 10922d283862SAlexey Charkov 10932d283862SAlexey Charkov enable_mmio(pioaddr, quirks); 10942d283862SAlexey Charkov 10955b579e21SAlexey Charkov rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks); 10965b579e21SAlexey Charkov if (rc) 10972d283862SAlexey Charkov goto err_out_unmap; 10982d283862SAlexey Charkov 1099ca8b6e04SAlexey Charkov rc = rhine_init_one_common(&pdev->dev, quirks, 11002d283862SAlexey Charkov pioaddr, ioaddr, pdev->irq); 11012d283862SAlexey Charkov if (!rc) 11022d283862SAlexey Charkov return 0; 11032d283862SAlexey Charkov 1104f2148a47SJeff Kirsher err_out_unmap: 1105f2148a47SJeff Kirsher pci_iounmap(pdev, ioaddr); 1106f2148a47SJeff Kirsher err_out_free_res: 1107f2148a47SJeff Kirsher pci_release_regions(pdev); 1108ae996154SRoger Luethi err_out_pci_disable: 1109ae996154SRoger Luethi pci_disable_device(pdev); 1110f2148a47SJeff Kirsher err_out: 1111f2148a47SJeff Kirsher return rc; 1112f2148a47SJeff Kirsher } 1113f2148a47SJeff Kirsher 11142d283862SAlexey Charkov static int rhine_init_one_platform(struct platform_device *pdev) 11152d283862SAlexey Charkov { 11162d283862SAlexey Charkov const struct of_device_id *match; 1117ca8b6e04SAlexey Charkov const u32 *quirks; 11182d283862SAlexey Charkov int irq; 11192d283862SAlexey Charkov void __iomem *ioaddr; 11202d283862SAlexey Charkov 11212d283862SAlexey Charkov match = of_match_device(rhine_of_tbl, &pdev->dev); 11222d283862SAlexey Charkov if (!match) 11232d283862SAlexey Charkov return -EINVAL; 11242d283862SAlexey Charkov 11258a54d4c2SYueHaibing ioaddr = devm_platform_ioremap_resource(pdev, 0); 11262d283862SAlexey Charkov if (IS_ERR(ioaddr)) 11272d283862SAlexey Charkov return PTR_ERR(ioaddr); 11282d283862SAlexey Charkov 11292d283862SAlexey Charkov irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 11302d283862SAlexey Charkov if (!irq) 11312d283862SAlexey Charkov return -EINVAL; 11322d283862SAlexey Charkov 1133ca8b6e04SAlexey Charkov quirks = match->data; 1134ca8b6e04SAlexey Charkov if (!quirks) 11352d283862SAlexey Charkov return -EINVAL; 11362d283862SAlexey Charkov 1137ca8b6e04SAlexey Charkov return rhine_init_one_common(&pdev->dev, *quirks, 11382d283862SAlexey Charkov (long)ioaddr, ioaddr, irq); 11392d283862SAlexey Charkov } 11402d283862SAlexey Charkov 1141f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev) 1142f2148a47SJeff Kirsher { 1143f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1144f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1145f2148a47SJeff Kirsher void *ring; 1146f2148a47SJeff Kirsher dma_addr_t ring_dma; 1147f2148a47SJeff Kirsher 1148f7630d18SAlexey Charkov ring = dma_alloc_coherent(hwdev, 1149f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 1150f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 11514087c4dcSAlexey Charkov &ring_dma, 11524087c4dcSAlexey Charkov GFP_ATOMIC); 1153f2148a47SJeff Kirsher if (!ring) { 1154f2148a47SJeff Kirsher netdev_err(dev, "Could not allocate DMA memory\n"); 1155f2148a47SJeff Kirsher return -ENOMEM; 1156f2148a47SJeff Kirsher } 1157f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) { 1158f7630d18SAlexey Charkov rp->tx_bufs = dma_alloc_coherent(hwdev, 1159f2148a47SJeff Kirsher PKT_BUF_SZ * TX_RING_SIZE, 11604087c4dcSAlexey Charkov &rp->tx_bufs_dma, 11614087c4dcSAlexey Charkov GFP_ATOMIC); 1162f2148a47SJeff Kirsher if (rp->tx_bufs == NULL) { 1163f7630d18SAlexey Charkov dma_free_coherent(hwdev, 1164f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 1165f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 1166f2148a47SJeff Kirsher ring, ring_dma); 1167f2148a47SJeff Kirsher return -ENOMEM; 1168f2148a47SJeff Kirsher } 1169f2148a47SJeff Kirsher } 1170f2148a47SJeff Kirsher 1171f2148a47SJeff Kirsher rp->rx_ring = ring; 1172f2148a47SJeff Kirsher rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); 1173f2148a47SJeff Kirsher rp->rx_ring_dma = ring_dma; 1174f2148a47SJeff Kirsher rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); 1175f2148a47SJeff Kirsher 1176f2148a47SJeff Kirsher return 0; 1177f2148a47SJeff Kirsher } 1178f2148a47SJeff Kirsher 1179f2148a47SJeff Kirsher static void free_ring(struct net_device* dev) 1180f2148a47SJeff Kirsher { 1181f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1182f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1183f2148a47SJeff Kirsher 1184f7630d18SAlexey Charkov dma_free_coherent(hwdev, 1185f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 1186f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 1187f2148a47SJeff Kirsher rp->rx_ring, rp->rx_ring_dma); 1188f2148a47SJeff Kirsher rp->tx_ring = NULL; 1189f2148a47SJeff Kirsher 1190f2148a47SJeff Kirsher if (rp->tx_bufs) 1191f7630d18SAlexey Charkov dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE, 1192f2148a47SJeff Kirsher rp->tx_bufs, rp->tx_bufs_dma); 1193f2148a47SJeff Kirsher 1194f2148a47SJeff Kirsher rp->tx_bufs = NULL; 1195f2148a47SJeff Kirsher 1196f2148a47SJeff Kirsher } 1197f2148a47SJeff Kirsher 1198a21bb8baSfrançois romieu struct rhine_skb_dma { 1199a21bb8baSfrançois romieu struct sk_buff *skb; 1200a21bb8baSfrançois romieu dma_addr_t dma; 1201a21bb8baSfrançois romieu }; 1202a21bb8baSfrançois romieu 1203a21bb8baSfrançois romieu static inline int rhine_skb_dma_init(struct net_device *dev, 1204a21bb8baSfrançois romieu struct rhine_skb_dma *sd) 1205f2148a47SJeff Kirsher { 1206f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1207f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1208a21bb8baSfrançois romieu const int size = rp->rx_buf_sz; 1209a21bb8baSfrançois romieu 1210a21bb8baSfrançois romieu sd->skb = netdev_alloc_skb(dev, size); 1211a21bb8baSfrançois romieu if (!sd->skb) 1212a21bb8baSfrançois romieu return -ENOMEM; 1213a21bb8baSfrançois romieu 1214a21bb8baSfrançois romieu sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE); 1215a21bb8baSfrançois romieu if (unlikely(dma_mapping_error(hwdev, sd->dma))) { 1216a21bb8baSfrançois romieu netif_err(rp, drv, dev, "Rx DMA mapping failure\n"); 1217a21bb8baSfrançois romieu dev_kfree_skb_any(sd->skb); 1218a21bb8baSfrançois romieu return -EIO; 1219a21bb8baSfrançois romieu } 1220a21bb8baSfrançois romieu 1221a21bb8baSfrançois romieu return 0; 1222a21bb8baSfrançois romieu } 1223a21bb8baSfrançois romieu 12248709bb2cSfrançois romieu static void rhine_reset_rbufs(struct rhine_private *rp) 12258709bb2cSfrançois romieu { 12268709bb2cSfrançois romieu int i; 12278709bb2cSfrançois romieu 12288709bb2cSfrançois romieu rp->cur_rx = 0; 12298709bb2cSfrançois romieu 12308709bb2cSfrançois romieu for (i = 0; i < RX_RING_SIZE; i++) 12318709bb2cSfrançois romieu rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); 12328709bb2cSfrançois romieu } 12338709bb2cSfrançois romieu 1234a21bb8baSfrançois romieu static inline void rhine_skb_dma_nic_store(struct rhine_private *rp, 1235a21bb8baSfrançois romieu struct rhine_skb_dma *sd, int entry) 1236a21bb8baSfrançois romieu { 1237a21bb8baSfrançois romieu rp->rx_skbuff_dma[entry] = sd->dma; 1238a21bb8baSfrançois romieu rp->rx_skbuff[entry] = sd->skb; 1239a21bb8baSfrançois romieu 1240a21bb8baSfrançois romieu rp->rx_ring[entry].addr = cpu_to_le32(sd->dma); 1241a21bb8baSfrançois romieu dma_wmb(); 1242a21bb8baSfrançois romieu } 1243a21bb8baSfrançois romieu 12448709bb2cSfrançois romieu static void free_rbufs(struct net_device* dev); 12458709bb2cSfrançois romieu 12468709bb2cSfrançois romieu static int alloc_rbufs(struct net_device *dev) 1247a21bb8baSfrançois romieu { 1248a21bb8baSfrançois romieu struct rhine_private *rp = netdev_priv(dev); 1249f2148a47SJeff Kirsher dma_addr_t next; 1250a21bb8baSfrançois romieu int rc, i; 1251f2148a47SJeff Kirsher 1252f2148a47SJeff Kirsher rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1253f2148a47SJeff Kirsher next = rp->rx_ring_dma; 1254f2148a47SJeff Kirsher 1255f2148a47SJeff Kirsher /* Init the ring entries */ 1256f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1257f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = 0; 1258f2148a47SJeff Kirsher rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); 1259f2148a47SJeff Kirsher next += sizeof(struct rx_desc); 1260f2148a47SJeff Kirsher rp->rx_ring[i].next_desc = cpu_to_le32(next); 1261f2148a47SJeff Kirsher rp->rx_skbuff[i] = NULL; 1262f2148a47SJeff Kirsher } 1263f2148a47SJeff Kirsher /* Mark the last entry as wrapping the ring. */ 1264f2148a47SJeff Kirsher rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); 1265f2148a47SJeff Kirsher 1266f2148a47SJeff Kirsher /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1267f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1268a21bb8baSfrançois romieu struct rhine_skb_dma sd; 1269a21bb8baSfrançois romieu 1270a21bb8baSfrançois romieu rc = rhine_skb_dma_init(dev, &sd); 12718709bb2cSfrançois romieu if (rc < 0) { 12728709bb2cSfrançois romieu free_rbufs(dev); 12738709bb2cSfrançois romieu goto out; 12748709bb2cSfrançois romieu } 1275f2148a47SJeff Kirsher 1276a21bb8baSfrançois romieu rhine_skb_dma_nic_store(rp, &sd, i); 1277f2148a47SJeff Kirsher } 12788709bb2cSfrançois romieu 12798709bb2cSfrançois romieu rhine_reset_rbufs(rp); 12808709bb2cSfrançois romieu out: 12818709bb2cSfrançois romieu return rc; 1282f2148a47SJeff Kirsher } 1283f2148a47SJeff Kirsher 1284f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev) 1285f2148a47SJeff Kirsher { 1286f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1287f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1288f2148a47SJeff Kirsher int i; 1289f2148a47SJeff Kirsher 1290f2148a47SJeff Kirsher /* Free all the skbuffs in the Rx queue. */ 1291f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1292f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = 0; 1293f2148a47SJeff Kirsher rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1294f2148a47SJeff Kirsher if (rp->rx_skbuff[i]) { 1295f7630d18SAlexey Charkov dma_unmap_single(hwdev, 1296f2148a47SJeff Kirsher rp->rx_skbuff_dma[i], 12974087c4dcSAlexey Charkov rp->rx_buf_sz, DMA_FROM_DEVICE); 1298f2148a47SJeff Kirsher dev_kfree_skb(rp->rx_skbuff[i]); 1299f2148a47SJeff Kirsher } 1300f2148a47SJeff Kirsher rp->rx_skbuff[i] = NULL; 1301f2148a47SJeff Kirsher } 1302f2148a47SJeff Kirsher } 1303f2148a47SJeff Kirsher 1304f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev) 1305f2148a47SJeff Kirsher { 1306f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1307f2148a47SJeff Kirsher dma_addr_t next; 1308f2148a47SJeff Kirsher int i; 1309f2148a47SJeff Kirsher 1310f2148a47SJeff Kirsher rp->dirty_tx = rp->cur_tx = 0; 1311f2148a47SJeff Kirsher next = rp->tx_ring_dma; 1312f2148a47SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) { 1313f2148a47SJeff Kirsher rp->tx_skbuff[i] = NULL; 1314f2148a47SJeff Kirsher rp->tx_ring[i].tx_status = 0; 1315f2148a47SJeff Kirsher rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); 1316f2148a47SJeff Kirsher next += sizeof(struct tx_desc); 1317f2148a47SJeff Kirsher rp->tx_ring[i].next_desc = cpu_to_le32(next); 1318f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 1319f2148a47SJeff Kirsher rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; 1320f2148a47SJeff Kirsher } 1321f2148a47SJeff Kirsher rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); 1322f2148a47SJeff Kirsher 132392bf2008STino Reichardt netdev_reset_queue(dev); 1324f2148a47SJeff Kirsher } 1325f2148a47SJeff Kirsher 1326f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev) 1327f2148a47SJeff Kirsher { 1328f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1329f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1330f2148a47SJeff Kirsher int i; 1331f2148a47SJeff Kirsher 1332f2148a47SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) { 1333f2148a47SJeff Kirsher rp->tx_ring[i].tx_status = 0; 1334f2148a47SJeff Kirsher rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); 1335f2148a47SJeff Kirsher rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1336f2148a47SJeff Kirsher if (rp->tx_skbuff[i]) { 1337f2148a47SJeff Kirsher if (rp->tx_skbuff_dma[i]) { 1338f7630d18SAlexey Charkov dma_unmap_single(hwdev, 1339f2148a47SJeff Kirsher rp->tx_skbuff_dma[i], 1340f2148a47SJeff Kirsher rp->tx_skbuff[i]->len, 13414087c4dcSAlexey Charkov DMA_TO_DEVICE); 1342f2148a47SJeff Kirsher } 1343f2148a47SJeff Kirsher dev_kfree_skb(rp->tx_skbuff[i]); 1344f2148a47SJeff Kirsher } 1345f2148a47SJeff Kirsher rp->tx_skbuff[i] = NULL; 1346f2148a47SJeff Kirsher rp->tx_buf[i] = NULL; 1347f2148a47SJeff Kirsher } 1348f2148a47SJeff Kirsher } 1349f2148a47SJeff Kirsher 1350f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media) 1351f2148a47SJeff Kirsher { 1352f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1353f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1354f2148a47SJeff Kirsher 13555bdc7380SBen Hutchings if (!rp->mii_if.force_media) 1356fc3e0f8aSFrancois Romieu mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); 1357f2148a47SJeff Kirsher 1358f2148a47SJeff Kirsher if (rp->mii_if.full_duplex) 1359f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, 1360f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1361f2148a47SJeff Kirsher else 1362f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, 1363f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1364fc3e0f8aSFrancois Romieu 1365fc3e0f8aSFrancois Romieu netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1366f2148a47SJeff Kirsher rp->mii_if.force_media, netif_carrier_ok(dev)); 1367f2148a47SJeff Kirsher } 1368f2148a47SJeff Kirsher 1369f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */ 1370f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii) 1371f2148a47SJeff Kirsher { 1372fc3e0f8aSFrancois Romieu struct net_device *dev = mii->dev; 1373fc3e0f8aSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 1374fc3e0f8aSFrancois Romieu 1375f2148a47SJeff Kirsher if (mii->force_media) { 1376f2148a47SJeff Kirsher /* autoneg is off: Link is always assumed to be up */ 1377fc3e0f8aSFrancois Romieu if (!netif_carrier_ok(dev)) 1378fc3e0f8aSFrancois Romieu netif_carrier_on(dev); 137917958438SFrançois Cachereul } 138017958438SFrançois Cachereul 1381fc3e0f8aSFrancois Romieu rhine_check_media(dev, 0); 1382fc3e0f8aSFrancois Romieu 1383fc3e0f8aSFrancois Romieu netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1384fc3e0f8aSFrancois Romieu mii->force_media, netif_carrier_ok(dev)); 1385f2148a47SJeff Kirsher } 1386f2148a47SJeff Kirsher 1387f2148a47SJeff Kirsher /** 1388f2148a47SJeff Kirsher * rhine_set_cam - set CAM multicast filters 1389f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1390f2148a47SJeff Kirsher * @idx: multicast CAM index [0..MCAM_SIZE-1] 1391f2148a47SJeff Kirsher * @addr: multicast address (6 bytes) 1392f2148a47SJeff Kirsher * 1393f2148a47SJeff Kirsher * Load addresses into multicast filters. 1394f2148a47SJeff Kirsher */ 1395f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr) 1396f2148a47SJeff Kirsher { 1397f2148a47SJeff Kirsher int i; 1398f2148a47SJeff Kirsher 1399f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN, ioaddr + CamCon); 1400f2148a47SJeff Kirsher wmb(); 1401f2148a47SJeff Kirsher 1402f2148a47SJeff Kirsher /* Paranoid -- idx out of range should never happen */ 1403f2148a47SJeff Kirsher idx &= (MCAM_SIZE - 1); 1404f2148a47SJeff Kirsher 1405f2148a47SJeff Kirsher iowrite8((u8) idx, ioaddr + CamAddr); 1406f2148a47SJeff Kirsher 1407f2148a47SJeff Kirsher for (i = 0; i < 6; i++, addr++) 1408f2148a47SJeff Kirsher iowrite8(*addr, ioaddr + MulticastFilter0 + i); 1409f2148a47SJeff Kirsher udelay(10); 1410f2148a47SJeff Kirsher wmb(); 1411f2148a47SJeff Kirsher 1412f2148a47SJeff Kirsher iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); 1413f2148a47SJeff Kirsher udelay(10); 1414f2148a47SJeff Kirsher 1415f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1416f2148a47SJeff Kirsher } 1417f2148a47SJeff Kirsher 1418f2148a47SJeff Kirsher /** 1419f2148a47SJeff Kirsher * rhine_set_vlan_cam - set CAM VLAN filters 1420f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1421f2148a47SJeff Kirsher * @idx: VLAN CAM index [0..VCAM_SIZE-1] 1422f2148a47SJeff Kirsher * @addr: VLAN ID (2 bytes) 1423f2148a47SJeff Kirsher * 1424f2148a47SJeff Kirsher * Load addresses into VLAN filters. 1425f2148a47SJeff Kirsher */ 1426f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr) 1427f2148a47SJeff Kirsher { 1428f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); 1429f2148a47SJeff Kirsher wmb(); 1430f2148a47SJeff Kirsher 1431f2148a47SJeff Kirsher /* Paranoid -- idx out of range should never happen */ 1432f2148a47SJeff Kirsher idx &= (VCAM_SIZE - 1); 1433f2148a47SJeff Kirsher 1434f2148a47SJeff Kirsher iowrite8((u8) idx, ioaddr + CamAddr); 1435f2148a47SJeff Kirsher 1436f2148a47SJeff Kirsher iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6); 1437f2148a47SJeff Kirsher udelay(10); 1438f2148a47SJeff Kirsher wmb(); 1439f2148a47SJeff Kirsher 1440f2148a47SJeff Kirsher iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); 1441f2148a47SJeff Kirsher udelay(10); 1442f2148a47SJeff Kirsher 1443f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1444f2148a47SJeff Kirsher } 1445f2148a47SJeff Kirsher 1446f2148a47SJeff Kirsher /** 1447f2148a47SJeff Kirsher * rhine_set_cam_mask - set multicast CAM mask 1448f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1449f2148a47SJeff Kirsher * @mask: multicast CAM mask 1450f2148a47SJeff Kirsher * 1451f2148a47SJeff Kirsher * Mask sets multicast filters active/inactive. 1452f2148a47SJeff Kirsher */ 1453f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask) 1454f2148a47SJeff Kirsher { 1455f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN, ioaddr + CamCon); 1456f2148a47SJeff Kirsher wmb(); 1457f2148a47SJeff Kirsher 1458f2148a47SJeff Kirsher /* write mask */ 1459f2148a47SJeff Kirsher iowrite32(mask, ioaddr + CamMask); 1460f2148a47SJeff Kirsher 1461f2148a47SJeff Kirsher /* disable CAMEN */ 1462f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1463f2148a47SJeff Kirsher } 1464f2148a47SJeff Kirsher 1465f2148a47SJeff Kirsher /** 1466f2148a47SJeff Kirsher * rhine_set_vlan_cam_mask - set VLAN CAM mask 1467f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1468f2148a47SJeff Kirsher * @mask: VLAN CAM mask 1469f2148a47SJeff Kirsher * 1470f2148a47SJeff Kirsher * Mask sets VLAN filters active/inactive. 1471f2148a47SJeff Kirsher */ 1472f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask) 1473f2148a47SJeff Kirsher { 1474f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); 1475f2148a47SJeff Kirsher wmb(); 1476f2148a47SJeff Kirsher 1477f2148a47SJeff Kirsher /* write mask */ 1478f2148a47SJeff Kirsher iowrite32(mask, ioaddr + CamMask); 1479f2148a47SJeff Kirsher 1480f2148a47SJeff Kirsher /* disable CAMEN */ 1481f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1482f2148a47SJeff Kirsher } 1483f2148a47SJeff Kirsher 1484f2148a47SJeff Kirsher /** 1485f2148a47SJeff Kirsher * rhine_init_cam_filter - initialize CAM filters 1486f2148a47SJeff Kirsher * @dev: network device 1487f2148a47SJeff Kirsher * 1488f2148a47SJeff Kirsher * Initialize (disable) hardware VLAN and multicast support on this 1489f2148a47SJeff Kirsher * Rhine. 1490f2148a47SJeff Kirsher */ 1491f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev) 1492f2148a47SJeff Kirsher { 1493f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1494f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1495f2148a47SJeff Kirsher 1496f2148a47SJeff Kirsher /* Disable all CAMs */ 1497f2148a47SJeff Kirsher rhine_set_vlan_cam_mask(ioaddr, 0); 1498f2148a47SJeff Kirsher rhine_set_cam_mask(ioaddr, 0); 1499f2148a47SJeff Kirsher 1500f2148a47SJeff Kirsher /* disable hardware VLAN support */ 1501f2148a47SJeff Kirsher BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig); 1502f2148a47SJeff Kirsher BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 1503f2148a47SJeff Kirsher } 1504f2148a47SJeff Kirsher 1505f2148a47SJeff Kirsher /** 1506f2148a47SJeff Kirsher * rhine_update_vcam - update VLAN CAM filters 1507d0ea5cbdSJesse Brandeburg * @dev: rhine_private data of this Rhine 1508f2148a47SJeff Kirsher * 1509f2148a47SJeff Kirsher * Update VLAN CAM filters to match configuration change. 1510f2148a47SJeff Kirsher */ 1511f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev) 1512f2148a47SJeff Kirsher { 1513f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1514f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1515f2148a47SJeff Kirsher u16 vid; 1516f2148a47SJeff Kirsher u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */ 1517f2148a47SJeff Kirsher unsigned int i = 0; 1518f2148a47SJeff Kirsher 1519f2148a47SJeff Kirsher for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { 1520f2148a47SJeff Kirsher rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid); 1521f2148a47SJeff Kirsher vCAMmask |= 1 << i; 1522f2148a47SJeff Kirsher if (++i >= VCAM_SIZE) 1523f2148a47SJeff Kirsher break; 1524f2148a47SJeff Kirsher } 1525f2148a47SJeff Kirsher rhine_set_vlan_cam_mask(ioaddr, vCAMmask); 1526f2148a47SJeff Kirsher } 1527f2148a47SJeff Kirsher 152880d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1529f2148a47SJeff Kirsher { 1530f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1531f2148a47SJeff Kirsher 15327ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 1533f2148a47SJeff Kirsher set_bit(vid, rp->active_vlans); 1534f2148a47SJeff Kirsher rhine_update_vcam(dev); 15357ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 15368e586137SJiri Pirko return 0; 1537f2148a47SJeff Kirsher } 1538f2148a47SJeff Kirsher 153980d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 1540f2148a47SJeff Kirsher { 1541f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1542f2148a47SJeff Kirsher 15437ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 1544f2148a47SJeff Kirsher clear_bit(vid, rp->active_vlans); 1545f2148a47SJeff Kirsher rhine_update_vcam(dev); 15467ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 15478e586137SJiri Pirko return 0; 1548f2148a47SJeff Kirsher } 1549f2148a47SJeff Kirsher 1550f2148a47SJeff Kirsher static void init_registers(struct net_device *dev) 1551f2148a47SJeff Kirsher { 1552f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1553f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1554f2148a47SJeff Kirsher int i; 1555f2148a47SJeff Kirsher 1556f2148a47SJeff Kirsher for (i = 0; i < 6; i++) 1557f2148a47SJeff Kirsher iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); 1558f2148a47SJeff Kirsher 1559f2148a47SJeff Kirsher /* Initialize other registers. */ 1560f2148a47SJeff Kirsher iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ 1561f2148a47SJeff Kirsher /* Configure initial FIFO thresholds. */ 1562f2148a47SJeff Kirsher iowrite8(0x20, ioaddr + TxConfig); 1563f2148a47SJeff Kirsher rp->tx_thresh = 0x20; 1564f2148a47SJeff Kirsher rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ 1565f2148a47SJeff Kirsher 1566f2148a47SJeff Kirsher iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); 1567f2148a47SJeff Kirsher iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); 1568f2148a47SJeff Kirsher 1569f2148a47SJeff Kirsher rhine_set_rx_mode(dev); 1570f2148a47SJeff Kirsher 1571ca8b6e04SAlexey Charkov if (rp->quirks & rqMgmt) 1572f2148a47SJeff Kirsher rhine_init_cam_filter(dev); 1573f2148a47SJeff Kirsher 1574f2148a47SJeff Kirsher napi_enable(&rp->napi); 1575f2148a47SJeff Kirsher 15767ab87ff4SFrancois Romieu iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable); 1577f2148a47SJeff Kirsher 1578f2148a47SJeff Kirsher iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), 1579f2148a47SJeff Kirsher ioaddr + ChipCmd); 1580f2148a47SJeff Kirsher rhine_check_media(dev, 1); 1581f2148a47SJeff Kirsher } 1582f2148a47SJeff Kirsher 1583f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */ 1584a384a33bSFrancois Romieu static void rhine_enable_linkmon(struct rhine_private *rp) 1585f2148a47SJeff Kirsher { 1586a384a33bSFrancois Romieu void __iomem *ioaddr = rp->base; 1587a384a33bSFrancois Romieu 1588f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1589f2148a47SJeff Kirsher iowrite8(MII_BMSR, ioaddr + MIIRegAddr); 1590f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + MIICmd); 1591f2148a47SJeff Kirsher 1592a384a33bSFrancois Romieu rhine_wait_bit_high(rp, MIIRegAddr, 0x20); 1593f2148a47SJeff Kirsher 1594f2148a47SJeff Kirsher iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); 1595f2148a47SJeff Kirsher } 1596f2148a47SJeff Kirsher 1597f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */ 1598a384a33bSFrancois Romieu static void rhine_disable_linkmon(struct rhine_private *rp) 1599f2148a47SJeff Kirsher { 1600a384a33bSFrancois Romieu void __iomem *ioaddr = rp->base; 1601a384a33bSFrancois Romieu 1602f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1603f2148a47SJeff Kirsher 1604a384a33bSFrancois Romieu if (rp->quirks & rqRhineI) { 1605f2148a47SJeff Kirsher iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1606f2148a47SJeff Kirsher 1607f2148a47SJeff Kirsher /* Can be called from ISR. Evil. */ 1608f2148a47SJeff Kirsher mdelay(1); 1609f2148a47SJeff Kirsher 1610f2148a47SJeff Kirsher /* 0x80 must be set immediately before turning it off */ 1611f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + MIICmd); 1612f2148a47SJeff Kirsher 1613a384a33bSFrancois Romieu rhine_wait_bit_high(rp, MIIRegAddr, 0x20); 1614f2148a47SJeff Kirsher 1615f2148a47SJeff Kirsher /* Heh. Now clear 0x80 again. */ 1616f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1617f2148a47SJeff Kirsher } 1618f2148a47SJeff Kirsher else 1619a384a33bSFrancois Romieu rhine_wait_bit_high(rp, MIIRegAddr, 0x80); 1620f2148a47SJeff Kirsher } 1621f2148a47SJeff Kirsher 1622f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */ 1623f2148a47SJeff Kirsher 1624f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum) 1625f2148a47SJeff Kirsher { 1626f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1627f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1628f2148a47SJeff Kirsher int result; 1629f2148a47SJeff Kirsher 1630a384a33bSFrancois Romieu rhine_disable_linkmon(rp); 1631f2148a47SJeff Kirsher 1632f2148a47SJeff Kirsher /* rhine_disable_linkmon already cleared MIICmd */ 1633f2148a47SJeff Kirsher iowrite8(phy_id, ioaddr + MIIPhyAddr); 1634f2148a47SJeff Kirsher iowrite8(regnum, ioaddr + MIIRegAddr); 1635f2148a47SJeff Kirsher iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ 1636a384a33bSFrancois Romieu rhine_wait_bit_low(rp, MIICmd, 0x40); 1637f2148a47SJeff Kirsher result = ioread16(ioaddr + MIIData); 1638f2148a47SJeff Kirsher 1639a384a33bSFrancois Romieu rhine_enable_linkmon(rp); 1640f2148a47SJeff Kirsher return result; 1641f2148a47SJeff Kirsher } 1642f2148a47SJeff Kirsher 1643f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) 1644f2148a47SJeff Kirsher { 1645f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1646f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1647f2148a47SJeff Kirsher 1648a384a33bSFrancois Romieu rhine_disable_linkmon(rp); 1649f2148a47SJeff Kirsher 1650f2148a47SJeff Kirsher /* rhine_disable_linkmon already cleared MIICmd */ 1651f2148a47SJeff Kirsher iowrite8(phy_id, ioaddr + MIIPhyAddr); 1652f2148a47SJeff Kirsher iowrite8(regnum, ioaddr + MIIRegAddr); 1653f2148a47SJeff Kirsher iowrite16(value, ioaddr + MIIData); 1654f2148a47SJeff Kirsher iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ 1655a384a33bSFrancois Romieu rhine_wait_bit_low(rp, MIICmd, 0x20); 1656f2148a47SJeff Kirsher 1657a384a33bSFrancois Romieu rhine_enable_linkmon(rp); 1658f2148a47SJeff Kirsher } 1659f2148a47SJeff Kirsher 16607ab87ff4SFrancois Romieu static void rhine_task_disable(struct rhine_private *rp) 16617ab87ff4SFrancois Romieu { 16627ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 16637ab87ff4SFrancois Romieu rp->task_enable = false; 16647ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 16657ab87ff4SFrancois Romieu 16667ab87ff4SFrancois Romieu cancel_work_sync(&rp->slow_event_task); 16677ab87ff4SFrancois Romieu cancel_work_sync(&rp->reset_task); 16687ab87ff4SFrancois Romieu } 16697ab87ff4SFrancois Romieu 16707ab87ff4SFrancois Romieu static void rhine_task_enable(struct rhine_private *rp) 16717ab87ff4SFrancois Romieu { 16727ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 16737ab87ff4SFrancois Romieu rp->task_enable = true; 16747ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 16757ab87ff4SFrancois Romieu } 16767ab87ff4SFrancois Romieu 1677f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev) 1678f2148a47SJeff Kirsher { 1679f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1680f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1681f2148a47SJeff Kirsher int rc; 1682f2148a47SJeff Kirsher 1683f7630d18SAlexey Charkov rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev); 1684f2148a47SJeff Kirsher if (rc) 16854d1fd9c1Sfrançois romieu goto out; 1686f2148a47SJeff Kirsher 1687f7630d18SAlexey Charkov netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq); 1688f2148a47SJeff Kirsher 1689f2148a47SJeff Kirsher rc = alloc_ring(dev); 16904d1fd9c1Sfrançois romieu if (rc < 0) 16914d1fd9c1Sfrançois romieu goto out_free_irq; 16924d1fd9c1Sfrançois romieu 16938709bb2cSfrançois romieu rc = alloc_rbufs(dev); 16948709bb2cSfrançois romieu if (rc < 0) 16958709bb2cSfrançois romieu goto out_free_ring; 16968709bb2cSfrançois romieu 1697f2148a47SJeff Kirsher alloc_tbufs(dev); 1698d120c9a8SKevin Brace enable_mmio(rp->pioaddr, rp->quirks); 1699d120c9a8SKevin Brace rhine_power_init(dev); 1700f2148a47SJeff Kirsher rhine_chip_reset(dev); 17017ab87ff4SFrancois Romieu rhine_task_enable(rp); 1702f2148a47SJeff Kirsher init_registers(dev); 1703fc3e0f8aSFrancois Romieu 1704fc3e0f8aSFrancois Romieu netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", 1705f2148a47SJeff Kirsher __func__, ioread16(ioaddr + ChipCmd), 1706f2148a47SJeff Kirsher mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1707f2148a47SJeff Kirsher 1708f2148a47SJeff Kirsher netif_start_queue(dev); 1709f2148a47SJeff Kirsher 17104d1fd9c1Sfrançois romieu out: 17114d1fd9c1Sfrançois romieu return rc; 17124d1fd9c1Sfrançois romieu 17138709bb2cSfrançois romieu out_free_ring: 17148709bb2cSfrançois romieu free_ring(dev); 17154d1fd9c1Sfrançois romieu out_free_irq: 17164d1fd9c1Sfrançois romieu free_irq(rp->irq, dev); 17174d1fd9c1Sfrançois romieu goto out; 1718f2148a47SJeff Kirsher } 1719f2148a47SJeff Kirsher 1720f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work) 1721f2148a47SJeff Kirsher { 1722f2148a47SJeff Kirsher struct rhine_private *rp = container_of(work, struct rhine_private, 1723f2148a47SJeff Kirsher reset_task); 1724f2148a47SJeff Kirsher struct net_device *dev = rp->dev; 1725f2148a47SJeff Kirsher 17267ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 17277ab87ff4SFrancois Romieu 17287ab87ff4SFrancois Romieu if (!rp->task_enable) 17297ab87ff4SFrancois Romieu goto out_unlock; 1730f2148a47SJeff Kirsher 1731f2148a47SJeff Kirsher napi_disable(&rp->napi); 1732a926592fSRichard Weinberger netif_tx_disable(dev); 1733f2148a47SJeff Kirsher spin_lock_bh(&rp->lock); 1734f2148a47SJeff Kirsher 1735f2148a47SJeff Kirsher /* clear all descriptors */ 1736f2148a47SJeff Kirsher free_tbufs(dev); 1737f2148a47SJeff Kirsher alloc_tbufs(dev); 17388709bb2cSfrançois romieu 17398709bb2cSfrançois romieu rhine_reset_rbufs(rp); 1740f2148a47SJeff Kirsher 1741f2148a47SJeff Kirsher /* Reinitialize the hardware. */ 1742f2148a47SJeff Kirsher rhine_chip_reset(dev); 1743f2148a47SJeff Kirsher init_registers(dev); 1744f2148a47SJeff Kirsher 1745f2148a47SJeff Kirsher spin_unlock_bh(&rp->lock); 1746f2148a47SJeff Kirsher 1747860e9538SFlorian Westphal netif_trans_update(dev); /* prevent tx timeout */ 1748f2148a47SJeff Kirsher dev->stats.tx_errors++; 1749f2148a47SJeff Kirsher netif_wake_queue(dev); 17507ab87ff4SFrancois Romieu 17517ab87ff4SFrancois Romieu out_unlock: 17527ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 1753f2148a47SJeff Kirsher } 1754f2148a47SJeff Kirsher 17550290bd29SMichael S. Tsirkin static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue) 1756f2148a47SJeff Kirsher { 1757f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1758f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1759f2148a47SJeff Kirsher 1760f2148a47SJeff Kirsher netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n", 1761f2148a47SJeff Kirsher ioread16(ioaddr + IntrStatus), 1762f2148a47SJeff Kirsher mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1763f2148a47SJeff Kirsher 1764f2148a47SJeff Kirsher schedule_work(&rp->reset_task); 1765f2148a47SJeff Kirsher } 1766f2148a47SJeff Kirsher 17673a5a883aSfrançois romieu static inline bool rhine_tx_queue_full(struct rhine_private *rp) 17683a5a883aSfrançois romieu { 17693a5a883aSfrançois romieu return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN; 17703a5a883aSfrançois romieu } 17713a5a883aSfrançois romieu 1772f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 1773f2148a47SJeff Kirsher struct net_device *dev) 1774f2148a47SJeff Kirsher { 1775f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1776f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1777f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1778f2148a47SJeff Kirsher unsigned entry; 1779f2148a47SJeff Kirsher 1780f2148a47SJeff Kirsher /* Caution: the write order is important here, set the field 1781f2148a47SJeff Kirsher with the "ownership" bits last. */ 1782f2148a47SJeff Kirsher 1783f2148a47SJeff Kirsher /* Calculate the next Tx descriptor entry. */ 1784f2148a47SJeff Kirsher entry = rp->cur_tx % TX_RING_SIZE; 1785f2148a47SJeff Kirsher 1786f2148a47SJeff Kirsher if (skb_padto(skb, ETH_ZLEN)) 1787f2148a47SJeff Kirsher return NETDEV_TX_OK; 1788f2148a47SJeff Kirsher 1789f2148a47SJeff Kirsher rp->tx_skbuff[entry] = skb; 1790f2148a47SJeff Kirsher 1791f2148a47SJeff Kirsher if ((rp->quirks & rqRhineI) && 1792f2148a47SJeff Kirsher (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) { 1793f2148a47SJeff Kirsher /* Must use alignment buffer. */ 1794f2148a47SJeff Kirsher if (skb->len > PKT_BUF_SZ) { 1795f2148a47SJeff Kirsher /* packet too long, drop it */ 17964b3afc6eSEric W. Biederman dev_kfree_skb_any(skb); 1797f2148a47SJeff Kirsher rp->tx_skbuff[entry] = NULL; 1798f2148a47SJeff Kirsher dev->stats.tx_dropped++; 1799f2148a47SJeff Kirsher return NETDEV_TX_OK; 1800f2148a47SJeff Kirsher } 1801f2148a47SJeff Kirsher 1802f2148a47SJeff Kirsher /* Padding is not copied and so must be redone. */ 1803f2148a47SJeff Kirsher skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); 1804f2148a47SJeff Kirsher if (skb->len < ETH_ZLEN) 1805f2148a47SJeff Kirsher memset(rp->tx_buf[entry] + skb->len, 0, 1806f2148a47SJeff Kirsher ETH_ZLEN - skb->len); 1807f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry] = 0; 1808f2148a47SJeff Kirsher rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + 1809f2148a47SJeff Kirsher (rp->tx_buf[entry] - 1810f2148a47SJeff Kirsher rp->tx_bufs)); 1811f2148a47SJeff Kirsher } else { 1812f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry] = 1813f7630d18SAlexey Charkov dma_map_single(hwdev, skb->data, skb->len, 18144087c4dcSAlexey Charkov DMA_TO_DEVICE); 1815f7630d18SAlexey Charkov if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) { 18164b3afc6eSEric W. Biederman dev_kfree_skb_any(skb); 18179b4fe5fbSNeil Horman rp->tx_skbuff_dma[entry] = 0; 18189b4fe5fbSNeil Horman dev->stats.tx_dropped++; 18199b4fe5fbSNeil Horman return NETDEV_TX_OK; 18209b4fe5fbSNeil Horman } 1821f2148a47SJeff Kirsher rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); 1822f2148a47SJeff Kirsher } 1823f2148a47SJeff Kirsher 1824f2148a47SJeff Kirsher rp->tx_ring[entry].desc_length = 1825f2148a47SJeff Kirsher cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1826f2148a47SJeff Kirsher 1827df8a39deSJiri Pirko if (unlikely(skb_vlan_tag_present(skb))) { 1828df8a39deSJiri Pirko u16 vid_pcp = skb_vlan_tag_get(skb); 1829207070f5SRoger Luethi 1830207070f5SRoger Luethi /* drop CFI/DEI bit, register needs VID and PCP */ 1831207070f5SRoger Luethi vid_pcp = (vid_pcp & VLAN_VID_MASK) | 1832207070f5SRoger Luethi ((vid_pcp & VLAN_PRIO_MASK) >> 1); 1833207070f5SRoger Luethi rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); 1834f2148a47SJeff Kirsher /* request tagging */ 1835f2148a47SJeff Kirsher rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); 1836f2148a47SJeff Kirsher } 1837f2148a47SJeff Kirsher else 1838f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status = 0; 1839f2148a47SJeff Kirsher 184092bf2008STino Reichardt netdev_sent_queue(dev, skb->len); 1841f2148a47SJeff Kirsher /* lock eth irq */ 1842e1efa872Sfrançois romieu dma_wmb(); 1843f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); 1844f2148a47SJeff Kirsher wmb(); 1845f2148a47SJeff Kirsher 1846f2148a47SJeff Kirsher rp->cur_tx++; 18473a5a883aSfrançois romieu /* 18483a5a883aSfrançois romieu * Nobody wants cur_tx write to rot for ages after the NIC will have 18493a5a883aSfrançois romieu * seen the transmit request, especially as the transmit completion 18503a5a883aSfrançois romieu * handler could miss it. 18513a5a883aSfrançois romieu */ 18523a5a883aSfrançois romieu smp_wmb(); 1853f2148a47SJeff Kirsher 1854f2148a47SJeff Kirsher /* Non-x86 Todo: explicitly flush cache lines here. */ 1855f2148a47SJeff Kirsher 1856df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 1857f2148a47SJeff Kirsher /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 1858f2148a47SJeff Kirsher BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 1859f2148a47SJeff Kirsher 1860f2148a47SJeff Kirsher /* Wake the potentially-idle transmit channel */ 1861f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1862f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1863f2148a47SJeff Kirsher IOSYNC; 1864f2148a47SJeff Kirsher 18653a5a883aSfrançois romieu /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */ 18663a5a883aSfrançois romieu if (rhine_tx_queue_full(rp)) { 1867f2148a47SJeff Kirsher netif_stop_queue(dev); 18683a5a883aSfrançois romieu smp_rmb(); 18693a5a883aSfrançois romieu /* Rejuvenate. */ 18703a5a883aSfrançois romieu if (!rhine_tx_queue_full(rp)) 18713a5a883aSfrançois romieu netif_wake_queue(dev); 18723a5a883aSfrançois romieu } 1873f2148a47SJeff Kirsher 1874fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", 1875f2148a47SJeff Kirsher rp->cur_tx - 1, entry); 1876fc3e0f8aSFrancois Romieu 1877f2148a47SJeff Kirsher return NETDEV_TX_OK; 1878f2148a47SJeff Kirsher } 1879f2148a47SJeff Kirsher 18807ab87ff4SFrancois Romieu static void rhine_irq_disable(struct rhine_private *rp) 18817ab87ff4SFrancois Romieu { 18827ab87ff4SFrancois Romieu iowrite16(0x0000, rp->base + IntrEnable); 18837ab87ff4SFrancois Romieu } 18847ab87ff4SFrancois Romieu 1885f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up 1886f2148a47SJeff Kirsher after the Tx thread. */ 1887f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance) 1888f2148a47SJeff Kirsher { 1889f2148a47SJeff Kirsher struct net_device *dev = dev_instance; 1890f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 18917ab87ff4SFrancois Romieu u32 status; 1892f2148a47SJeff Kirsher int handled = 0; 1893f2148a47SJeff Kirsher 18947ab87ff4SFrancois Romieu status = rhine_get_events(rp); 1895f2148a47SJeff Kirsher 1896fc3e0f8aSFrancois Romieu netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); 1897f2148a47SJeff Kirsher 18987ab87ff4SFrancois Romieu if (status & RHINE_EVENT) { 18997ab87ff4SFrancois Romieu handled = 1; 1900f2148a47SJeff Kirsher 19017ab87ff4SFrancois Romieu rhine_irq_disable(rp); 1902f2148a47SJeff Kirsher napi_schedule(&rp->napi); 1903f2148a47SJeff Kirsher } 1904f2148a47SJeff Kirsher 19057ab87ff4SFrancois Romieu if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) { 1906fc3e0f8aSFrancois Romieu netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", 19077ab87ff4SFrancois Romieu status); 1908f2148a47SJeff Kirsher } 1909f2148a47SJeff Kirsher 1910f2148a47SJeff Kirsher return IRQ_RETVAL(handled); 1911f2148a47SJeff Kirsher } 1912f2148a47SJeff Kirsher 1913f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated 1914f2148a47SJeff Kirsher for clarity. */ 1915f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev) 1916f2148a47SJeff Kirsher { 1917f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1918f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 191992bf2008STino Reichardt unsigned int pkts_compl = 0, bytes_compl = 0; 19203a5a883aSfrançois romieu unsigned int dirty_tx = rp->dirty_tx; 19213a5a883aSfrançois romieu unsigned int cur_tx; 192292bf2008STino Reichardt struct sk_buff *skb; 1923f2148a47SJeff Kirsher 19243a5a883aSfrançois romieu /* 19253a5a883aSfrançois romieu * The race with rhine_start_tx does not matter here as long as the 19263a5a883aSfrançois romieu * driver enforces a value of cur_tx that was relevant when the 19273a5a883aSfrançois romieu * packet was scheduled to the network chipset. 19283a5a883aSfrançois romieu * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx. 19293a5a883aSfrançois romieu */ 19303a5a883aSfrançois romieu smp_rmb(); 19313a5a883aSfrançois romieu cur_tx = rp->cur_tx; 1932f2148a47SJeff Kirsher /* find and cleanup dirty tx descriptors */ 19333a5a883aSfrançois romieu while (dirty_tx != cur_tx) { 19343a5a883aSfrançois romieu unsigned int entry = dirty_tx % TX_RING_SIZE; 19353a5a883aSfrançois romieu u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); 19363a5a883aSfrançois romieu 1937fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", 1938f2148a47SJeff Kirsher entry, txstatus); 1939f2148a47SJeff Kirsher if (txstatus & DescOwn) 1940f2148a47SJeff Kirsher break; 194192bf2008STino Reichardt skb = rp->tx_skbuff[entry]; 1942f2148a47SJeff Kirsher if (txstatus & 0x8000) { 1943fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_done, dev, 1944fc3e0f8aSFrancois Romieu "Transmit error, Tx status %08x\n", txstatus); 1945f2148a47SJeff Kirsher dev->stats.tx_errors++; 1946f2148a47SJeff Kirsher if (txstatus & 0x0400) 1947f2148a47SJeff Kirsher dev->stats.tx_carrier_errors++; 1948f2148a47SJeff Kirsher if (txstatus & 0x0200) 1949f2148a47SJeff Kirsher dev->stats.tx_window_errors++; 1950f2148a47SJeff Kirsher if (txstatus & 0x0100) 1951f2148a47SJeff Kirsher dev->stats.tx_aborted_errors++; 1952f2148a47SJeff Kirsher if (txstatus & 0x0080) 1953f2148a47SJeff Kirsher dev->stats.tx_heartbeat_errors++; 1954f2148a47SJeff Kirsher if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || 1955f2148a47SJeff Kirsher (txstatus & 0x0800) || (txstatus & 0x1000)) { 1956f2148a47SJeff Kirsher dev->stats.tx_fifo_errors++; 1957f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1958f2148a47SJeff Kirsher break; /* Keep the skb - we try again */ 1959f2148a47SJeff Kirsher } 1960f2148a47SJeff Kirsher /* Transmitter restarted in 'abnormal' handler. */ 1961f2148a47SJeff Kirsher } else { 1962f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 1963f2148a47SJeff Kirsher dev->stats.collisions += (txstatus >> 3) & 0x0F; 1964f2148a47SJeff Kirsher else 1965f2148a47SJeff Kirsher dev->stats.collisions += txstatus & 0x0F; 1966fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", 1967fc3e0f8aSFrancois Romieu (txstatus >> 3) & 0xF, txstatus & 0xF); 1968f7b5d1b9SJamie Gloudon 1969f7b5d1b9SJamie Gloudon u64_stats_update_begin(&rp->tx_stats.syncp); 197092bf2008STino Reichardt rp->tx_stats.bytes += skb->len; 1971f7b5d1b9SJamie Gloudon rp->tx_stats.packets++; 1972f7b5d1b9SJamie Gloudon u64_stats_update_end(&rp->tx_stats.syncp); 1973f2148a47SJeff Kirsher } 1974f2148a47SJeff Kirsher /* Free the original skb. */ 1975f2148a47SJeff Kirsher if (rp->tx_skbuff_dma[entry]) { 1976f7630d18SAlexey Charkov dma_unmap_single(hwdev, 1977f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry], 197892bf2008STino Reichardt skb->len, 19794087c4dcSAlexey Charkov DMA_TO_DEVICE); 1980f2148a47SJeff Kirsher } 198192bf2008STino Reichardt bytes_compl += skb->len; 198292bf2008STino Reichardt pkts_compl++; 198392bf2008STino Reichardt dev_consume_skb_any(skb); 1984f2148a47SJeff Kirsher rp->tx_skbuff[entry] = NULL; 19853a5a883aSfrançois romieu dirty_tx++; 1986f2148a47SJeff Kirsher } 198792bf2008STino Reichardt 19883a5a883aSfrançois romieu rp->dirty_tx = dirty_tx; 19893a5a883aSfrançois romieu /* Pity we can't rely on the nearby BQL completion implicit barrier. */ 19903a5a883aSfrançois romieu smp_wmb(); 19913a5a883aSfrançois romieu 199292bf2008STino Reichardt netdev_completed_queue(dev, pkts_compl, bytes_compl); 19933a5a883aSfrançois romieu 19943a5a883aSfrançois romieu /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */ 19953a5a883aSfrançois romieu if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) { 1996f2148a47SJeff Kirsher netif_wake_queue(dev); 19973a5a883aSfrançois romieu smp_rmb(); 19983a5a883aSfrançois romieu /* Rejuvenate. */ 19993a5a883aSfrançois romieu if (rhine_tx_queue_full(rp)) 20003a5a883aSfrançois romieu netif_stop_queue(dev); 20013a5a883aSfrançois romieu } 2002f2148a47SJeff Kirsher } 2003f2148a47SJeff Kirsher 2004f2148a47SJeff Kirsher /** 2005f2148a47SJeff Kirsher * rhine_get_vlan_tci - extract TCI from Rx data buffer 2006f2148a47SJeff Kirsher * @skb: pointer to sk_buff 2007f2148a47SJeff Kirsher * @data_size: used data area of the buffer including CRC 2008f2148a47SJeff Kirsher * 2009f2148a47SJeff Kirsher * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q 2010f2148a47SJeff Kirsher * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte 2011f2148a47SJeff Kirsher * aligned following the CRC. 2012f2148a47SJeff Kirsher */ 2013f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) 2014f2148a47SJeff Kirsher { 2015f2148a47SJeff Kirsher u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; 2016f2148a47SJeff Kirsher return be16_to_cpup((__be16 *)trailer); 2017f2148a47SJeff Kirsher } 2018f2148a47SJeff Kirsher 2019810f19bcSfrançois romieu static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc, 2020810f19bcSfrançois romieu int data_size) 2021810f19bcSfrançois romieu { 2022810f19bcSfrançois romieu dma_rmb(); 2023810f19bcSfrançois romieu if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) { 2024810f19bcSfrançois romieu u16 vlan_tci; 2025810f19bcSfrançois romieu 2026810f19bcSfrançois romieu vlan_tci = rhine_get_vlan_tci(skb, data_size); 2027810f19bcSfrançois romieu __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 2028810f19bcSfrançois romieu } 2029810f19bcSfrançois romieu } 2030810f19bcSfrançois romieu 2031f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */ 2032f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit) 2033f2148a47SJeff Kirsher { 2034f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2035f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 2036f2148a47SJeff Kirsher int entry = rp->cur_rx % RX_RING_SIZE; 203762ca1ba0Sfrançois romieu int count; 2038f2148a47SJeff Kirsher 2039fc3e0f8aSFrancois Romieu netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, 204062ca1ba0Sfrançois romieu entry, le32_to_cpu(rp->rx_ring[entry].rx_status)); 2041f2148a47SJeff Kirsher 2042f2148a47SJeff Kirsher /* If EOP is set on the next entry, it's a new packet. Send it up. */ 2043f2148a47SJeff Kirsher for (count = 0; count < limit; ++count) { 204462ca1ba0Sfrançois romieu struct rx_desc *desc = rp->rx_ring + entry; 2045f2148a47SJeff Kirsher u32 desc_status = le32_to_cpu(desc->rx_status); 2046f2148a47SJeff Kirsher int data_size = desc_status >> 16; 2047f2148a47SJeff Kirsher 2048f2148a47SJeff Kirsher if (desc_status & DescOwn) 2049f2148a47SJeff Kirsher break; 2050f2148a47SJeff Kirsher 2051fc3e0f8aSFrancois Romieu netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, 2052fc3e0f8aSFrancois Romieu desc_status); 2053f2148a47SJeff Kirsher 2054f2148a47SJeff Kirsher if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 2055f2148a47SJeff Kirsher if ((desc_status & RxWholePkt) != RxWholePkt) { 2056f2148a47SJeff Kirsher netdev_warn(dev, 2057f2148a47SJeff Kirsher "Oversized Ethernet frame spanned multiple buffers, " 2058f2148a47SJeff Kirsher "entry %#x length %d status %08x!\n", 2059f2148a47SJeff Kirsher entry, data_size, 2060f2148a47SJeff Kirsher desc_status); 2061f2148a47SJeff Kirsher dev->stats.rx_length_errors++; 2062f2148a47SJeff Kirsher } else if (desc_status & RxErr) { 2063f2148a47SJeff Kirsher /* There was a error. */ 2064fc3e0f8aSFrancois Romieu netif_dbg(rp, rx_err, dev, 2065fc3e0f8aSFrancois Romieu "%s() Rx error %08x\n", __func__, 2066fc3e0f8aSFrancois Romieu desc_status); 2067f2148a47SJeff Kirsher dev->stats.rx_errors++; 2068f2148a47SJeff Kirsher if (desc_status & 0x0030) 2069f2148a47SJeff Kirsher dev->stats.rx_length_errors++; 2070f2148a47SJeff Kirsher if (desc_status & 0x0048) 2071f2148a47SJeff Kirsher dev->stats.rx_fifo_errors++; 2072f2148a47SJeff Kirsher if (desc_status & 0x0004) 2073f2148a47SJeff Kirsher dev->stats.rx_frame_errors++; 2074f2148a47SJeff Kirsher if (desc_status & 0x0002) { 2075f2148a47SJeff Kirsher /* this can also be updated outside the interrupt handler */ 2076f2148a47SJeff Kirsher spin_lock(&rp->lock); 2077f2148a47SJeff Kirsher dev->stats.rx_crc_errors++; 2078f2148a47SJeff Kirsher spin_unlock(&rp->lock); 2079f2148a47SJeff Kirsher } 2080f2148a47SJeff Kirsher } 2081f2148a47SJeff Kirsher } else { 2082f2148a47SJeff Kirsher /* Length should omit the CRC */ 2083f2148a47SJeff Kirsher int pkt_len = data_size - 4; 20848709bb2cSfrançois romieu struct sk_buff *skb; 2085f2148a47SJeff Kirsher 2086f2148a47SJeff Kirsher /* Check if the packet is long enough to accept without 2087f2148a47SJeff Kirsher copying to a minimally-sized skbuff. */ 20888709bb2cSfrançois romieu if (pkt_len < rx_copybreak) { 2089f2148a47SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, pkt_len); 20908709bb2cSfrançois romieu if (unlikely(!skb)) 20918709bb2cSfrançois romieu goto drop; 20928709bb2cSfrançois romieu 2093f7630d18SAlexey Charkov dma_sync_single_for_cpu(hwdev, 2094f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 2095f2148a47SJeff Kirsher rp->rx_buf_sz, 20964087c4dcSAlexey Charkov DMA_FROM_DEVICE); 2097f2148a47SJeff Kirsher 2098f2148a47SJeff Kirsher skb_copy_to_linear_data(skb, 2099f2148a47SJeff Kirsher rp->rx_skbuff[entry]->data, 2100f2148a47SJeff Kirsher pkt_len); 21018709bb2cSfrançois romieu 2102f7630d18SAlexey Charkov dma_sync_single_for_device(hwdev, 2103f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 2104f2148a47SJeff Kirsher rp->rx_buf_sz, 21054087c4dcSAlexey Charkov DMA_FROM_DEVICE); 2106f2148a47SJeff Kirsher } else { 21078709bb2cSfrançois romieu struct rhine_skb_dma sd; 21088709bb2cSfrançois romieu 21098709bb2cSfrançois romieu if (unlikely(rhine_skb_dma_init(dev, &sd) < 0)) 21108709bb2cSfrançois romieu goto drop; 21118709bb2cSfrançois romieu 2112f2148a47SJeff Kirsher skb = rp->rx_skbuff[entry]; 21138709bb2cSfrançois romieu 2114f7630d18SAlexey Charkov dma_unmap_single(hwdev, 2115f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 2116f2148a47SJeff Kirsher rp->rx_buf_sz, 21174087c4dcSAlexey Charkov DMA_FROM_DEVICE); 21188709bb2cSfrançois romieu rhine_skb_dma_nic_store(rp, &sd, entry); 2119f2148a47SJeff Kirsher } 2120f2148a47SJeff Kirsher 21218709bb2cSfrançois romieu skb_put(skb, pkt_len); 2122f2148a47SJeff Kirsher 2123810f19bcSfrançois romieu rhine_rx_vlan_tag(skb, desc, data_size); 2124810f19bcSfrançois romieu 21255f715c09SAndrej Ota skb->protocol = eth_type_trans(skb, dev); 21265f715c09SAndrej Ota 2127f2148a47SJeff Kirsher netif_receive_skb(skb); 2128f7b5d1b9SJamie Gloudon 2129f7b5d1b9SJamie Gloudon u64_stats_update_begin(&rp->rx_stats.syncp); 2130f7b5d1b9SJamie Gloudon rp->rx_stats.bytes += pkt_len; 2131f7b5d1b9SJamie Gloudon rp->rx_stats.packets++; 2132f7b5d1b9SJamie Gloudon u64_stats_update_end(&rp->rx_stats.syncp); 2133f2148a47SJeff Kirsher } 21348709bb2cSfrançois romieu give_descriptor_to_nic: 21358709bb2cSfrançois romieu desc->rx_status = cpu_to_le32(DescOwn); 2136f2148a47SJeff Kirsher entry = (++rp->cur_rx) % RX_RING_SIZE; 2137f2148a47SJeff Kirsher } 2138f2148a47SJeff Kirsher 2139f2148a47SJeff Kirsher return count; 21408709bb2cSfrançois romieu 21418709bb2cSfrançois romieu drop: 21428709bb2cSfrançois romieu dev->stats.rx_dropped++; 21438709bb2cSfrançois romieu goto give_descriptor_to_nic; 2144f2148a47SJeff Kirsher } 2145f2148a47SJeff Kirsher 2146f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) { 2147f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2148f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2149f2148a47SJeff Kirsher int entry = rp->dirty_tx % TX_RING_SIZE; 2150f2148a47SJeff Kirsher u32 intr_status; 2151f2148a47SJeff Kirsher 2152f2148a47SJeff Kirsher /* 2153f2148a47SJeff Kirsher * If new errors occurred, we need to sort them out before doing Tx. 2154f2148a47SJeff Kirsher * In that case the ISR will be back here RSN anyway. 2155f2148a47SJeff Kirsher */ 2156a20a28bcSFrancois Romieu intr_status = rhine_get_events(rp); 2157f2148a47SJeff Kirsher 2158f2148a47SJeff Kirsher if ((intr_status & IntrTxErrSummary) == 0) { 2159f2148a47SJeff Kirsher 2160f2148a47SJeff Kirsher /* We know better than the chip where it should continue. */ 2161f2148a47SJeff Kirsher iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), 2162f2148a47SJeff Kirsher ioaddr + TxRingPtr); 2163f2148a47SJeff Kirsher 2164f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, 2165f2148a47SJeff Kirsher ioaddr + ChipCmd); 2166f2148a47SJeff Kirsher 2167f2148a47SJeff Kirsher if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) 2168f2148a47SJeff Kirsher /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 2169f2148a47SJeff Kirsher BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 2170f2148a47SJeff Kirsher 2171f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 2172f2148a47SJeff Kirsher ioaddr + ChipCmd1); 2173f2148a47SJeff Kirsher IOSYNC; 2174f2148a47SJeff Kirsher } 2175f2148a47SJeff Kirsher else { 2176f2148a47SJeff Kirsher /* This should never happen */ 2177fc3e0f8aSFrancois Romieu netif_warn(rp, tx_err, dev, "another error occurred %08x\n", 2178fc3e0f8aSFrancois Romieu intr_status); 2179f2148a47SJeff Kirsher } 2180f2148a47SJeff Kirsher 2181f2148a47SJeff Kirsher } 2182f2148a47SJeff Kirsher 21837ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work) 2184f2148a47SJeff Kirsher { 21857ab87ff4SFrancois Romieu struct rhine_private *rp = 21867ab87ff4SFrancois Romieu container_of(work, struct rhine_private, slow_event_task); 21877ab87ff4SFrancois Romieu struct net_device *dev = rp->dev; 21887ab87ff4SFrancois Romieu u32 intr_status; 2189f2148a47SJeff Kirsher 21907ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 21917ab87ff4SFrancois Romieu 21927ab87ff4SFrancois Romieu if (!rp->task_enable) 21937ab87ff4SFrancois Romieu goto out_unlock; 21947ab87ff4SFrancois Romieu 21957ab87ff4SFrancois Romieu intr_status = rhine_get_events(rp); 21967ab87ff4SFrancois Romieu rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); 2197f2148a47SJeff Kirsher 2198f2148a47SJeff Kirsher if (intr_status & IntrLinkChange) 2199f2148a47SJeff Kirsher rhine_check_media(dev, 0); 2200f2148a47SJeff Kirsher 2201fc3e0f8aSFrancois Romieu if (intr_status & IntrPCIErr) 2202fc3e0f8aSFrancois Romieu netif_warn(rp, hw, dev, "PCI error\n"); 2203fc3e0f8aSFrancois Romieu 2204559bcac3SDavid S. Miller iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); 2205f2148a47SJeff Kirsher 22067ab87ff4SFrancois Romieu out_unlock: 22077ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2208f2148a47SJeff Kirsher } 2209f2148a47SJeff Kirsher 2210bc1f4470Sstephen hemminger static void 2211f7b5d1b9SJamie Gloudon rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 2212f2148a47SJeff Kirsher { 2213f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2214f7b5d1b9SJamie Gloudon unsigned int start; 2215f2148a47SJeff Kirsher 22167ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 22177ab87ff4SFrancois Romieu rhine_update_rx_crc_and_missed_errord(rp); 22187ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 2219f2148a47SJeff Kirsher 2220f7b5d1b9SJamie Gloudon netdev_stats_to_stats64(stats, &dev->stats); 2221f7b5d1b9SJamie Gloudon 2222f7b5d1b9SJamie Gloudon do { 222357a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); 2224f7b5d1b9SJamie Gloudon stats->rx_packets = rp->rx_stats.packets; 2225f7b5d1b9SJamie Gloudon stats->rx_bytes = rp->rx_stats.bytes; 222657a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); 2227f7b5d1b9SJamie Gloudon 2228f7b5d1b9SJamie Gloudon do { 222957a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); 2230f7b5d1b9SJamie Gloudon stats->tx_packets = rp->tx_stats.packets; 2231f7b5d1b9SJamie Gloudon stats->tx_bytes = rp->tx_stats.bytes; 223257a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); 2233f2148a47SJeff Kirsher } 2234f2148a47SJeff Kirsher 2235f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev) 2236f2148a47SJeff Kirsher { 2237f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2238f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2239f2148a47SJeff Kirsher u32 mc_filter[2]; /* Multicast hash filter */ 2240f2148a47SJeff Kirsher u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */ 2241f2148a47SJeff Kirsher struct netdev_hw_addr *ha; 2242f2148a47SJeff Kirsher 2243f2148a47SJeff Kirsher if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 2244f2148a47SJeff Kirsher rx_mode = 0x1C; 2245f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter0); 2246f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter1); 2247f2148a47SJeff Kirsher } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 2248f2148a47SJeff Kirsher (dev->flags & IFF_ALLMULTI)) { 2249f2148a47SJeff Kirsher /* Too many to match, or accept all multicasts. */ 2250f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter0); 2251f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter1); 2252ca8b6e04SAlexey Charkov } else if (rp->quirks & rqMgmt) { 2253f2148a47SJeff Kirsher int i = 0; 2254f2148a47SJeff Kirsher u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ 2255f2148a47SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 2256f2148a47SJeff Kirsher if (i == MCAM_SIZE) 2257f2148a47SJeff Kirsher break; 2258f2148a47SJeff Kirsher rhine_set_cam(ioaddr, i, ha->addr); 2259f2148a47SJeff Kirsher mCAMmask |= 1 << i; 2260f2148a47SJeff Kirsher i++; 2261f2148a47SJeff Kirsher } 2262f2148a47SJeff Kirsher rhine_set_cam_mask(ioaddr, mCAMmask); 2263f2148a47SJeff Kirsher } else { 2264f2148a47SJeff Kirsher memset(mc_filter, 0, sizeof(mc_filter)); 2265f2148a47SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 2266f2148a47SJeff Kirsher int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; 2267f2148a47SJeff Kirsher 2268f2148a47SJeff Kirsher mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 2269f2148a47SJeff Kirsher } 2270f2148a47SJeff Kirsher iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 2271f2148a47SJeff Kirsher iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 2272f2148a47SJeff Kirsher } 2273f2148a47SJeff Kirsher /* enable/disable VLAN receive filtering */ 2274ca8b6e04SAlexey Charkov if (rp->quirks & rqMgmt) { 2275f2148a47SJeff Kirsher if (dev->flags & IFF_PROMISC) 2276f2148a47SJeff Kirsher BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2277f2148a47SJeff Kirsher else 2278f2148a47SJeff Kirsher BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2279f2148a47SJeff Kirsher } 2280f2148a47SJeff Kirsher BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig); 2281f2148a47SJeff Kirsher } 2282f2148a47SJeff Kirsher 2283f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2284f2148a47SJeff Kirsher { 2285f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 2286f2148a47SJeff Kirsher 228723020ab3SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 2288f7630d18SAlexey Charkov strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info)); 2289f2148a47SJeff Kirsher } 2290f2148a47SJeff Kirsher 2291f918b986SPhilippe Reynes static int netdev_get_link_ksettings(struct net_device *dev, 2292f918b986SPhilippe Reynes struct ethtool_link_ksettings *cmd) 2293f2148a47SJeff Kirsher { 2294f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2295f2148a47SJeff Kirsher 22967ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 229782c01a84Syuval.shaia@oracle.com mii_ethtool_get_link_ksettings(&rp->mii_if, cmd); 22987ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2299f2148a47SJeff Kirsher 230082c01a84Syuval.shaia@oracle.com return 0; 2301f2148a47SJeff Kirsher } 2302f2148a47SJeff Kirsher 2303f918b986SPhilippe Reynes static int netdev_set_link_ksettings(struct net_device *dev, 2304f918b986SPhilippe Reynes const struct ethtool_link_ksettings *cmd) 2305f2148a47SJeff Kirsher { 2306f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2307f2148a47SJeff Kirsher int rc; 2308f2148a47SJeff Kirsher 23097ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 2310f918b986SPhilippe Reynes rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd); 2311f2148a47SJeff Kirsher rhine_set_carrier(&rp->mii_if); 23127ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2313f2148a47SJeff Kirsher 2314f2148a47SJeff Kirsher return rc; 2315f2148a47SJeff Kirsher } 2316f2148a47SJeff Kirsher 2317f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev) 2318f2148a47SJeff Kirsher { 2319f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2320f2148a47SJeff Kirsher 2321f2148a47SJeff Kirsher return mii_nway_restart(&rp->mii_if); 2322f2148a47SJeff Kirsher } 2323f2148a47SJeff Kirsher 2324f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev) 2325f2148a47SJeff Kirsher { 2326f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2327f2148a47SJeff Kirsher 2328f2148a47SJeff Kirsher return mii_link_ok(&rp->mii_if); 2329f2148a47SJeff Kirsher } 2330f2148a47SJeff Kirsher 2331f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev) 2332f2148a47SJeff Kirsher { 2333fc3e0f8aSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 2334fc3e0f8aSFrancois Romieu 2335fc3e0f8aSFrancois Romieu return rp->msg_enable; 2336f2148a47SJeff Kirsher } 2337f2148a47SJeff Kirsher 2338f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value) 2339f2148a47SJeff Kirsher { 2340fc3e0f8aSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 2341fc3e0f8aSFrancois Romieu 2342fc3e0f8aSFrancois Romieu rp->msg_enable = value; 2343f2148a47SJeff Kirsher } 2344f2148a47SJeff Kirsher 2345f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2346f2148a47SJeff Kirsher { 2347f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2348f2148a47SJeff Kirsher 2349f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2350f2148a47SJeff Kirsher return; 2351f2148a47SJeff Kirsher 2352f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2353f2148a47SJeff Kirsher wol->supported = WAKE_PHY | WAKE_MAGIC | 2354f2148a47SJeff Kirsher WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ 2355f2148a47SJeff Kirsher wol->wolopts = rp->wolopts; 2356f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2357f2148a47SJeff Kirsher } 2358f2148a47SJeff Kirsher 2359f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2360f2148a47SJeff Kirsher { 2361f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2362f2148a47SJeff Kirsher u32 support = WAKE_PHY | WAKE_MAGIC | 2363f2148a47SJeff Kirsher WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ 2364f2148a47SJeff Kirsher 2365f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2366f2148a47SJeff Kirsher return -EINVAL; 2367f2148a47SJeff Kirsher 2368f2148a47SJeff Kirsher if (wol->wolopts & ~support) 2369f2148a47SJeff Kirsher return -EINVAL; 2370f2148a47SJeff Kirsher 2371f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2372f2148a47SJeff Kirsher rp->wolopts = wol->wolopts; 2373f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2374f2148a47SJeff Kirsher 2375f2148a47SJeff Kirsher return 0; 2376f2148a47SJeff Kirsher } 2377f2148a47SJeff Kirsher 2378f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = { 2379f2148a47SJeff Kirsher .get_drvinfo = netdev_get_drvinfo, 2380f2148a47SJeff Kirsher .nway_reset = netdev_nway_reset, 2381f2148a47SJeff Kirsher .get_link = netdev_get_link, 2382f2148a47SJeff Kirsher .get_msglevel = netdev_get_msglevel, 2383f2148a47SJeff Kirsher .set_msglevel = netdev_set_msglevel, 2384f2148a47SJeff Kirsher .get_wol = rhine_get_wol, 2385f2148a47SJeff Kirsher .set_wol = rhine_set_wol, 2386f918b986SPhilippe Reynes .get_link_ksettings = netdev_get_link_ksettings, 2387f918b986SPhilippe Reynes .set_link_ksettings = netdev_set_link_ksettings, 2388f2148a47SJeff Kirsher }; 2389f2148a47SJeff Kirsher 2390f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2391f2148a47SJeff Kirsher { 2392f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2393f2148a47SJeff Kirsher int rc; 2394f2148a47SJeff Kirsher 2395f2148a47SJeff Kirsher if (!netif_running(dev)) 2396f2148a47SJeff Kirsher return -EINVAL; 2397f2148a47SJeff Kirsher 23987ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 2399f2148a47SJeff Kirsher rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); 2400f2148a47SJeff Kirsher rhine_set_carrier(&rp->mii_if); 24017ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2402f2148a47SJeff Kirsher 2403f2148a47SJeff Kirsher return rc; 2404f2148a47SJeff Kirsher } 2405f2148a47SJeff Kirsher 2406f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev) 2407f2148a47SJeff Kirsher { 2408f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2409f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2410f2148a47SJeff Kirsher 24117ab87ff4SFrancois Romieu rhine_task_disable(rp); 2412f2148a47SJeff Kirsher napi_disable(&rp->napi); 2413f2148a47SJeff Kirsher netif_stop_queue(dev); 2414f2148a47SJeff Kirsher 2415fc3e0f8aSFrancois Romieu netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", 2416f2148a47SJeff Kirsher ioread16(ioaddr + ChipCmd)); 2417f2148a47SJeff Kirsher 2418f2148a47SJeff Kirsher /* Switch to loopback mode to avoid hardware races. */ 2419f2148a47SJeff Kirsher iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); 2420f2148a47SJeff Kirsher 24217ab87ff4SFrancois Romieu rhine_irq_disable(rp); 2422f2148a47SJeff Kirsher 2423f2148a47SJeff Kirsher /* Stop the chip's Tx and Rx processes. */ 2424f2148a47SJeff Kirsher iowrite16(CmdStop, ioaddr + ChipCmd); 2425f2148a47SJeff Kirsher 2426f7630d18SAlexey Charkov free_irq(rp->irq, dev); 2427f2148a47SJeff Kirsher free_rbufs(dev); 2428f2148a47SJeff Kirsher free_tbufs(dev); 2429f2148a47SJeff Kirsher free_ring(dev); 2430f2148a47SJeff Kirsher 2431f2148a47SJeff Kirsher return 0; 2432f2148a47SJeff Kirsher } 2433f2148a47SJeff Kirsher 2434f2148a47SJeff Kirsher 24352d283862SAlexey Charkov static void rhine_remove_one_pci(struct pci_dev *pdev) 2436f2148a47SJeff Kirsher { 2437f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2438f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2439f2148a47SJeff Kirsher 2440f2148a47SJeff Kirsher unregister_netdev(dev); 2441f2148a47SJeff Kirsher 2442f2148a47SJeff Kirsher pci_iounmap(pdev, rp->base); 2443f2148a47SJeff Kirsher pci_release_regions(pdev); 2444f2148a47SJeff Kirsher 2445f2148a47SJeff Kirsher free_netdev(dev); 2446f2148a47SJeff Kirsher pci_disable_device(pdev); 2447f2148a47SJeff Kirsher } 2448f2148a47SJeff Kirsher 24492d283862SAlexey Charkov static int rhine_remove_one_platform(struct platform_device *pdev) 24502d283862SAlexey Charkov { 24512d283862SAlexey Charkov struct net_device *dev = platform_get_drvdata(pdev); 24522d283862SAlexey Charkov struct rhine_private *rp = netdev_priv(dev); 24532d283862SAlexey Charkov 24542d283862SAlexey Charkov unregister_netdev(dev); 24552d283862SAlexey Charkov 24562d283862SAlexey Charkov iounmap(rp->base); 24572d283862SAlexey Charkov 24582d283862SAlexey Charkov free_netdev(dev); 24592d283862SAlexey Charkov 24602d283862SAlexey Charkov return 0; 24612d283862SAlexey Charkov } 24622d283862SAlexey Charkov 24632d283862SAlexey Charkov static void rhine_shutdown_pci(struct pci_dev *pdev) 2464f2148a47SJeff Kirsher { 2465f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2466f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2467f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2468f2148a47SJeff Kirsher 2469f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2470f2148a47SJeff Kirsher return; /* Nothing to do for non-WOL adapters */ 2471f2148a47SJeff Kirsher 2472f2148a47SJeff Kirsher rhine_power_init(dev); 2473f2148a47SJeff Kirsher 2474f2148a47SJeff Kirsher /* Make sure we use pattern 0, 1 and not 4, 5 */ 2475f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 2476f2148a47SJeff Kirsher iowrite8(0x04, ioaddr + WOLcgClr); 2477f2148a47SJeff Kirsher 24787ab87ff4SFrancois Romieu spin_lock(&rp->lock); 24797ab87ff4SFrancois Romieu 2480f2148a47SJeff Kirsher if (rp->wolopts & WAKE_MAGIC) { 2481f2148a47SJeff Kirsher iowrite8(WOLmagic, ioaddr + WOLcrSet); 2482f2148a47SJeff Kirsher /* 2483f2148a47SJeff Kirsher * Turn EEPROM-controlled wake-up back on -- some hardware may 2484f2148a47SJeff Kirsher * not cooperate otherwise. 2485f2148a47SJeff Kirsher */ 2486f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA); 2487f2148a47SJeff Kirsher } 2488f2148a47SJeff Kirsher 2489f2148a47SJeff Kirsher if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) 2490f2148a47SJeff Kirsher iowrite8(WOLbmcast, ioaddr + WOLcgSet); 2491f2148a47SJeff Kirsher 2492f2148a47SJeff Kirsher if (rp->wolopts & WAKE_PHY) 2493f2148a47SJeff Kirsher iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); 2494f2148a47SJeff Kirsher 2495f2148a47SJeff Kirsher if (rp->wolopts & WAKE_UCAST) 2496f2148a47SJeff Kirsher iowrite8(WOLucast, ioaddr + WOLcrSet); 2497f2148a47SJeff Kirsher 2498f2148a47SJeff Kirsher if (rp->wolopts) { 2499f2148a47SJeff Kirsher /* Enable legacy WOL (for old motherboards) */ 2500f2148a47SJeff Kirsher iowrite8(0x01, ioaddr + PwcfgSet); 2501f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); 2502f2148a47SJeff Kirsher } 2503f2148a47SJeff Kirsher 25047ab87ff4SFrancois Romieu spin_unlock(&rp->lock); 25057ab87ff4SFrancois Romieu 2506e92b9b3bSFrancois Romieu if (system_state == SYSTEM_POWER_OFF && !avoid_D3) { 2507f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); 2508f2148a47SJeff Kirsher 2509e92b9b3bSFrancois Romieu pci_wake_from_d3(pdev, true); 2510e92b9b3bSFrancois Romieu pci_set_power_state(pdev, PCI_D3hot); 2511e92b9b3bSFrancois Romieu } 2512f2148a47SJeff Kirsher } 2513f2148a47SJeff Kirsher 2514e92b9b3bSFrancois Romieu #ifdef CONFIG_PM_SLEEP 2515e92b9b3bSFrancois Romieu static int rhine_suspend(struct device *device) 2516f2148a47SJeff Kirsher { 2517f7630d18SAlexey Charkov struct net_device *dev = dev_get_drvdata(device); 2518f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2519f2148a47SJeff Kirsher 2520f2148a47SJeff Kirsher if (!netif_running(dev)) 2521f2148a47SJeff Kirsher return 0; 2522f2148a47SJeff Kirsher 25237ab87ff4SFrancois Romieu rhine_task_disable(rp); 25247ab87ff4SFrancois Romieu rhine_irq_disable(rp); 2525f2148a47SJeff Kirsher napi_disable(&rp->napi); 2526f2148a47SJeff Kirsher 2527f2148a47SJeff Kirsher netif_device_detach(dev); 2528f2148a47SJeff Kirsher 2529f7630d18SAlexey Charkov if (dev_is_pci(device)) 25302d283862SAlexey Charkov rhine_shutdown_pci(to_pci_dev(device)); 2531f2148a47SJeff Kirsher 2532f2148a47SJeff Kirsher return 0; 2533f2148a47SJeff Kirsher } 2534f2148a47SJeff Kirsher 2535e92b9b3bSFrancois Romieu static int rhine_resume(struct device *device) 2536f2148a47SJeff Kirsher { 2537f7630d18SAlexey Charkov struct net_device *dev = dev_get_drvdata(device); 2538f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2539f2148a47SJeff Kirsher 2540f2148a47SJeff Kirsher if (!netif_running(dev)) 2541f2148a47SJeff Kirsher return 0; 2542f2148a47SJeff Kirsher 2543f2148a47SJeff Kirsher enable_mmio(rp->pioaddr, rp->quirks); 2544f2148a47SJeff Kirsher rhine_power_init(dev); 2545f2148a47SJeff Kirsher free_tbufs(dev); 2546f2148a47SJeff Kirsher alloc_tbufs(dev); 25478709bb2cSfrançois romieu rhine_reset_rbufs(rp); 25487ab87ff4SFrancois Romieu rhine_task_enable(rp); 25497ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 2550f2148a47SJeff Kirsher init_registers(dev); 25517ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 2552f2148a47SJeff Kirsher 2553f2148a47SJeff Kirsher netif_device_attach(dev); 2554f2148a47SJeff Kirsher 2555f2148a47SJeff Kirsher return 0; 2556f2148a47SJeff Kirsher } 2557e92b9b3bSFrancois Romieu 2558e92b9b3bSFrancois Romieu static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume); 2559e92b9b3bSFrancois Romieu #define RHINE_PM_OPS (&rhine_pm_ops) 2560e92b9b3bSFrancois Romieu 2561e92b9b3bSFrancois Romieu #else 2562e92b9b3bSFrancois Romieu 2563e92b9b3bSFrancois Romieu #define RHINE_PM_OPS NULL 2564e92b9b3bSFrancois Romieu 2565e92b9b3bSFrancois Romieu #endif /* !CONFIG_PM_SLEEP */ 2566f2148a47SJeff Kirsher 25672d283862SAlexey Charkov static struct pci_driver rhine_driver_pci = { 2568f2148a47SJeff Kirsher .name = DRV_NAME, 2569f2148a47SJeff Kirsher .id_table = rhine_pci_tbl, 25702d283862SAlexey Charkov .probe = rhine_init_one_pci, 25712d283862SAlexey Charkov .remove = rhine_remove_one_pci, 25722d283862SAlexey Charkov .shutdown = rhine_shutdown_pci, 2573e92b9b3bSFrancois Romieu .driver.pm = RHINE_PM_OPS, 2574f2148a47SJeff Kirsher }; 2575f2148a47SJeff Kirsher 25762d283862SAlexey Charkov static struct platform_driver rhine_driver_platform = { 25772d283862SAlexey Charkov .probe = rhine_init_one_platform, 25782d283862SAlexey Charkov .remove = rhine_remove_one_platform, 25792d283862SAlexey Charkov .driver = { 25802d283862SAlexey Charkov .name = DRV_NAME, 25812d283862SAlexey Charkov .of_match_table = rhine_of_tbl, 25822d283862SAlexey Charkov .pm = RHINE_PM_OPS, 25832d283862SAlexey Charkov } 25842d283862SAlexey Charkov }; 25852d283862SAlexey Charkov 25866faadbbbSChristoph Hellwig static const struct dmi_system_id rhine_dmi_table[] __initconst = { 2587f2148a47SJeff Kirsher { 2588f2148a47SJeff Kirsher .ident = "EPIA-M", 2589f2148a47SJeff Kirsher .matches = { 2590f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), 2591f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), 2592f2148a47SJeff Kirsher }, 2593f2148a47SJeff Kirsher }, 2594f2148a47SJeff Kirsher { 2595f2148a47SJeff Kirsher .ident = "KV7", 2596f2148a47SJeff Kirsher .matches = { 2597f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), 2598f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), 2599f2148a47SJeff Kirsher }, 2600f2148a47SJeff Kirsher }, 2601f2148a47SJeff Kirsher { NULL } 2602f2148a47SJeff Kirsher }; 2603f2148a47SJeff Kirsher 2604f2148a47SJeff Kirsher static int __init rhine_init(void) 2605f2148a47SJeff Kirsher { 26062d283862SAlexey Charkov int ret_pci, ret_platform; 26072d283862SAlexey Charkov 2608f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */ 2609f2148a47SJeff Kirsher if (dmi_check_system(rhine_dmi_table)) { 2610f2148a47SJeff Kirsher /* these BIOSes fail at PXE boot if chip is in D3 */ 2611eb939922SRusty Russell avoid_D3 = true; 2612f2148a47SJeff Kirsher pr_warn("Broken BIOS detected, avoid_D3 enabled\n"); 2613f2148a47SJeff Kirsher } 2614f2148a47SJeff Kirsher else if (avoid_D3) 2615f2148a47SJeff Kirsher pr_info("avoid_D3 set\n"); 2616f2148a47SJeff Kirsher 26172d283862SAlexey Charkov ret_pci = pci_register_driver(&rhine_driver_pci); 26182d283862SAlexey Charkov ret_platform = platform_driver_register(&rhine_driver_platform); 26192d283862SAlexey Charkov if ((ret_pci < 0) && (ret_platform < 0)) 26202d283862SAlexey Charkov return ret_pci; 26212d283862SAlexey Charkov 26222d283862SAlexey Charkov return 0; 2623f2148a47SJeff Kirsher } 2624f2148a47SJeff Kirsher 2625f2148a47SJeff Kirsher 2626f2148a47SJeff Kirsher static void __exit rhine_cleanup(void) 2627f2148a47SJeff Kirsher { 26282d283862SAlexey Charkov platform_driver_unregister(&rhine_driver_platform); 26292d283862SAlexey Charkov pci_unregister_driver(&rhine_driver_pci); 2630f2148a47SJeff Kirsher } 2631f2148a47SJeff Kirsher 2632f2148a47SJeff Kirsher 2633f2148a47SJeff Kirsher module_init(rhine_init); 2634f2148a47SJeff Kirsher module_exit(rhine_cleanup); 2635