1f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */ 2f2148a47SJeff Kirsher /* 3f2148a47SJeff Kirsher Written 1998-2001 by Donald Becker. 4f2148a47SJeff Kirsher 5f2148a47SJeff Kirsher Current Maintainer: Roger Luethi <rl@hellgate.ch> 6f2148a47SJeff Kirsher 7f2148a47SJeff Kirsher This software may be used and distributed according to the terms of 8f2148a47SJeff Kirsher the GNU General Public License (GPL), incorporated herein by reference. 9f2148a47SJeff Kirsher Drivers based on or derived from this code fall under the GPL and must 10f2148a47SJeff Kirsher retain the authorship, copyright and license notice. This file is not 11f2148a47SJeff Kirsher a complete program and may only be used when the entire operating 12f2148a47SJeff Kirsher system is licensed under the GPL. 13f2148a47SJeff Kirsher 14f2148a47SJeff Kirsher This driver is designed for the VIA VT86C100A Rhine-I. 15f2148a47SJeff Kirsher It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM 16f2148a47SJeff Kirsher and management NIC 6105M). 17f2148a47SJeff Kirsher 18f2148a47SJeff Kirsher The author may be reached as becker@scyld.com, or C/O 19f2148a47SJeff Kirsher Scyld Computing Corporation 20f2148a47SJeff Kirsher 410 Severn Ave., Suite 210 21f2148a47SJeff Kirsher Annapolis MD 21403 22f2148a47SJeff Kirsher 23f2148a47SJeff Kirsher 24f2148a47SJeff Kirsher This driver contains some changes from the original Donald Becker 25f2148a47SJeff Kirsher version. He may or may not be interested in bug reports on this 26f2148a47SJeff Kirsher code. You can find his versions at: 27f2148a47SJeff Kirsher http://www.scyld.com/network/via-rhine.html 28f2148a47SJeff Kirsher [link no longer provides useful info -jgarzik] 29f2148a47SJeff Kirsher 30f2148a47SJeff Kirsher */ 31f2148a47SJeff Kirsher 32f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33f2148a47SJeff Kirsher 34f2148a47SJeff Kirsher #define DRV_NAME "via-rhine" 35207070f5SRoger Luethi #define DRV_VERSION "1.5.1" 36f2148a47SJeff Kirsher #define DRV_RELDATE "2010-10-09" 37f2148a47SJeff Kirsher 38eb939922SRusty Russell #include <linux/types.h> 39f2148a47SJeff Kirsher 40f2148a47SJeff Kirsher /* A few user-configurable values. 41f2148a47SJeff Kirsher These may be modified when a driver module is loaded. */ 42fc3e0f8aSFrancois Romieu static int debug = 0; 43fc3e0f8aSFrancois Romieu #define RHINE_MSG_DEFAULT \ 44fc3e0f8aSFrancois Romieu (0x0000) 45f2148a47SJeff Kirsher 46f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme. 47f2148a47SJeff Kirsher Setting to > 1518 effectively disables this feature. */ 48f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ 49f2148a47SJeff Kirsher defined(CONFIG_SPARC) || defined(__ia64__) || \ 50f2148a47SJeff Kirsher defined(__sh__) || defined(__mips__) 51f2148a47SJeff Kirsher static int rx_copybreak = 1518; 52f2148a47SJeff Kirsher #else 53f2148a47SJeff Kirsher static int rx_copybreak; 54f2148a47SJeff Kirsher #endif 55f2148a47SJeff Kirsher 56f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of 57f2148a47SJeff Kirsher power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ 58eb939922SRusty Russell static bool avoid_D3; 59f2148a47SJeff Kirsher 60f2148a47SJeff Kirsher /* 61f2148a47SJeff Kirsher * In case you are looking for 'options[]' or 'full_duplex[]', they 62f2148a47SJeff Kirsher * are gone. Use ethtool(8) instead. 63f2148a47SJeff Kirsher */ 64f2148a47SJeff Kirsher 65f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 66f2148a47SJeff Kirsher The Rhine has a 64 element 8390-like hash table. */ 67f2148a47SJeff Kirsher static const int multicast_filter_limit = 32; 68f2148a47SJeff Kirsher 69f2148a47SJeff Kirsher 70f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */ 71f2148a47SJeff Kirsher 72f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency. 73f2148a47SJeff Kirsher The compiler will convert <unsigned>'%'<2^N> into a bit mask. 74f2148a47SJeff Kirsher Making the Tx ring too large decreases the effectiveness of channel 75f2148a47SJeff Kirsher bonding and packet priority. 76f2148a47SJeff Kirsher There are no ill effects from too-large receive rings. */ 77f2148a47SJeff Kirsher #define TX_RING_SIZE 16 78f2148a47SJeff Kirsher #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 79f2148a47SJeff Kirsher #define RX_RING_SIZE 64 80f2148a47SJeff Kirsher 81f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */ 82f2148a47SJeff Kirsher 83f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */ 84f2148a47SJeff Kirsher #define TX_TIMEOUT (2*HZ) 85f2148a47SJeff Kirsher 86f2148a47SJeff Kirsher #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 87f2148a47SJeff Kirsher 88f2148a47SJeff Kirsher #include <linux/module.h> 89f2148a47SJeff Kirsher #include <linux/moduleparam.h> 90f2148a47SJeff Kirsher #include <linux/kernel.h> 91f2148a47SJeff Kirsher #include <linux/string.h> 92f2148a47SJeff Kirsher #include <linux/timer.h> 93f2148a47SJeff Kirsher #include <linux/errno.h> 94f2148a47SJeff Kirsher #include <linux/ioport.h> 95f2148a47SJeff Kirsher #include <linux/interrupt.h> 96f2148a47SJeff Kirsher #include <linux/pci.h> 972d283862SAlexey Charkov #include <linux/of_address.h> 982d283862SAlexey Charkov #include <linux/of_device.h> 992d283862SAlexey Charkov #include <linux/of_irq.h> 1002d283862SAlexey Charkov #include <linux/platform_device.h> 101f2148a47SJeff Kirsher #include <linux/dma-mapping.h> 102f2148a47SJeff Kirsher #include <linux/netdevice.h> 103f2148a47SJeff Kirsher #include <linux/etherdevice.h> 104f2148a47SJeff Kirsher #include <linux/skbuff.h> 105f2148a47SJeff Kirsher #include <linux/init.h> 106f2148a47SJeff Kirsher #include <linux/delay.h> 107f2148a47SJeff Kirsher #include <linux/mii.h> 108f2148a47SJeff Kirsher #include <linux/ethtool.h> 109f2148a47SJeff Kirsher #include <linux/crc32.h> 110f2148a47SJeff Kirsher #include <linux/if_vlan.h> 111f2148a47SJeff Kirsher #include <linux/bitops.h> 112f2148a47SJeff Kirsher #include <linux/workqueue.h> 113f2148a47SJeff Kirsher #include <asm/processor.h> /* Processor type for cache alignment. */ 114f2148a47SJeff Kirsher #include <asm/io.h> 115f2148a47SJeff Kirsher #include <asm/irq.h> 116f2148a47SJeff Kirsher #include <asm/uaccess.h> 117f2148a47SJeff Kirsher #include <linux/dmi.h> 118f2148a47SJeff Kirsher 119f2148a47SJeff Kirsher /* These identify the driver base version and may not be removed. */ 12076e239e1SBill Pemberton static const char version[] = 121f2148a47SJeff Kirsher "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker"; 122f2148a47SJeff Kirsher 123f2148a47SJeff Kirsher /* This driver was written to use PCI memory space. Some early versions 124f2148a47SJeff Kirsher of the Rhine may only work correctly with I/O space accesses. */ 125f2148a47SJeff Kirsher #ifdef CONFIG_VIA_RHINE_MMIO 126f2148a47SJeff Kirsher #define USE_MMIO 127f2148a47SJeff Kirsher #else 128f2148a47SJeff Kirsher #endif 129f2148a47SJeff Kirsher 130f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 131f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 132f2148a47SJeff Kirsher MODULE_LICENSE("GPL"); 133f2148a47SJeff Kirsher 134f2148a47SJeff Kirsher module_param(debug, int, 0); 135f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0); 136f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0); 137fc3e0f8aSFrancois Romieu MODULE_PARM_DESC(debug, "VIA Rhine debug message flags"); 138f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 139f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 140f2148a47SJeff Kirsher 141f2148a47SJeff Kirsher #define MCAM_SIZE 32 142f2148a47SJeff Kirsher #define VCAM_SIZE 32 143f2148a47SJeff Kirsher 144f2148a47SJeff Kirsher /* 145f2148a47SJeff Kirsher Theory of Operation 146f2148a47SJeff Kirsher 147f2148a47SJeff Kirsher I. Board Compatibility 148f2148a47SJeff Kirsher 149f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet 150f2148a47SJeff Kirsher controller. 151f2148a47SJeff Kirsher 152f2148a47SJeff Kirsher II. Board-specific settings 153f2148a47SJeff Kirsher 154f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot. 155f2148a47SJeff Kirsher 156f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at 157f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are 158f2148a47SJeff Kirsher correct. 159f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM 160f2148a47SJeff Kirsher must be configured to enable memory ops. 161f2148a47SJeff Kirsher 162f2148a47SJeff Kirsher III. Driver operation 163f2148a47SJeff Kirsher 164f2148a47SJeff Kirsher IIIa. Ring buffers 165f2148a47SJeff Kirsher 166f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists 167f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of 168f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. 169f2148a47SJeff Kirsher 170f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure 171f2148a47SJeff Kirsher 172f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme. 173f2148a47SJeff Kirsher 174f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so 175f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers. 176f2148a47SJeff Kirsher 177f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at 178f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data 179f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long, 180f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff. 181f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the 182f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated 183f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx(). 184f2148a47SJeff Kirsher 185f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by 186f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger 187f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines 188f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of 189f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never 190f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using 191f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is 192f2148a47SJeff Kirsher most useful with small frames. 193f2148a47SJeff Kirsher 194f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit 195f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't 196f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers 197f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header. 198f2148a47SJeff Kirsher 199f2148a47SJeff Kirsher IIId. Synchronization 200f2148a47SJeff Kirsher 201f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One 202f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the 203f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler, 204f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software. 205f2148a47SJeff Kirsher 206f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the 207f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in 208f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by 209f2148a47SJeff Kirsher calling netif_stop_queue. 210f2148a47SJeff Kirsher 211f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats 212f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as 213f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in 214f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped. 215f2148a47SJeff Kirsher 216f2148a47SJeff Kirsher IV. Notes 217f2148a47SJeff Kirsher 218f2148a47SJeff Kirsher IVb. References 219f2148a47SJeff Kirsher 220f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/ 221f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html 222f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html 223f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf 224f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF 225f2148a47SJeff Kirsher 226f2148a47SJeff Kirsher 227f2148a47SJeff Kirsher IVc. Errata 228f2148a47SJeff Kirsher 229f2148a47SJeff Kirsher The VT86C100A manual is not reliable information. 230f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting 231f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit 232f2148a47SJeff Kirsher and unaligned IP headers on receive. 233f2148a47SJeff Kirsher The chip does not pad to minimum transmit length. 234f2148a47SJeff Kirsher 235f2148a47SJeff Kirsher */ 236f2148a47SJeff Kirsher 237f2148a47SJeff Kirsher 238f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all 239f2148a47SJeff Kirsher of the drivers, and will likely be provided by some future kernel. 240f2148a47SJeff Kirsher Note the matching code -- the first table entry matchs all 56** cards but 241f2148a47SJeff Kirsher second only the 1234 card. 242f2148a47SJeff Kirsher */ 243f2148a47SJeff Kirsher 244f2148a47SJeff Kirsher enum rhine_revs { 245f2148a47SJeff Kirsher VT86C100A = 0x00, 246f2148a47SJeff Kirsher VTunknown0 = 0x20, 247f2148a47SJeff Kirsher VT6102 = 0x40, 248f2148a47SJeff Kirsher VT8231 = 0x50, /* Integrated MAC */ 249f2148a47SJeff Kirsher VT8233 = 0x60, /* Integrated MAC */ 250f2148a47SJeff Kirsher VT8235 = 0x74, /* Integrated MAC */ 251f2148a47SJeff Kirsher VT8237 = 0x78, /* Integrated MAC */ 252f2148a47SJeff Kirsher VTunknown1 = 0x7C, 253f2148a47SJeff Kirsher VT6105 = 0x80, 254f2148a47SJeff Kirsher VT6105_B0 = 0x83, 255f2148a47SJeff Kirsher VT6105L = 0x8A, 256f2148a47SJeff Kirsher VT6107 = 0x8C, 257f2148a47SJeff Kirsher VTunknown2 = 0x8E, 258f2148a47SJeff Kirsher VT6105M = 0x90, /* Management adapter */ 259f2148a47SJeff Kirsher }; 260f2148a47SJeff Kirsher 261f2148a47SJeff Kirsher enum rhine_quirks { 262f2148a47SJeff Kirsher rqWOL = 0x0001, /* Wake-On-LAN support */ 263f2148a47SJeff Kirsher rqForceReset = 0x0002, 264f2148a47SJeff Kirsher rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ 265f2148a47SJeff Kirsher rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ 266f2148a47SJeff Kirsher rqRhineI = 0x0100, /* See comment below */ 267*ca8b6e04SAlexey Charkov rqIntPHY = 0x0200, /* Integrated PHY */ 268*ca8b6e04SAlexey Charkov rqMgmt = 0x0400, /* Management adapter */ 269f2148a47SJeff Kirsher }; 270f2148a47SJeff Kirsher /* 271f2148a47SJeff Kirsher * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable 272f2148a47SJeff Kirsher * MMIO as well as for the collision counter and the Tx FIFO underflow 273f2148a47SJeff Kirsher * indicator. In addition, Tx and Rx buffers need to 4 byte aligned. 274f2148a47SJeff Kirsher */ 275f2148a47SJeff Kirsher 276f2148a47SJeff Kirsher /* Beware of PCI posted writes */ 277f2148a47SJeff Kirsher #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) 278f2148a47SJeff Kirsher 279f2148a47SJeff Kirsher static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = { 280f2148a47SJeff Kirsher { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ 281f2148a47SJeff Kirsher { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ 282f2148a47SJeff Kirsher { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ 283f2148a47SJeff Kirsher { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */ 284f2148a47SJeff Kirsher { } /* terminate list */ 285f2148a47SJeff Kirsher }; 286f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); 287f2148a47SJeff Kirsher 2882d283862SAlexey Charkov /* OpenFirmware identifiers for platform-bus devices 289*ca8b6e04SAlexey Charkov * The .data field is currently only used to store quirks 2902d283862SAlexey Charkov */ 291*ca8b6e04SAlexey Charkov static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns; 2922d283862SAlexey Charkov static struct of_device_id rhine_of_tbl[] = { 293*ca8b6e04SAlexey Charkov { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks }, 2942d283862SAlexey Charkov { } /* terminate list */ 2952d283862SAlexey Charkov }; 2962d283862SAlexey Charkov MODULE_DEVICE_TABLE(of, rhine_of_tbl); 297f2148a47SJeff Kirsher 298f2148a47SJeff Kirsher /* Offsets to the device registers. */ 299f2148a47SJeff Kirsher enum register_offsets { 300f2148a47SJeff Kirsher StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, 301f2148a47SJeff Kirsher ChipCmd1=0x09, TQWake=0x0A, 302f2148a47SJeff Kirsher IntrStatus=0x0C, IntrEnable=0x0E, 303f2148a47SJeff Kirsher MulticastFilter0=0x10, MulticastFilter1=0x14, 304f2148a47SJeff Kirsher RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, 305f2148a47SJeff Kirsher MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F, 306f2148a47SJeff Kirsher MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, 307f2148a47SJeff Kirsher ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, 308f2148a47SJeff Kirsher RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, 309f2148a47SJeff Kirsher StickyHW=0x83, IntrStatus2=0x84, 310f2148a47SJeff Kirsher CamMask=0x88, CamCon=0x92, CamAddr=0x93, 311f2148a47SJeff Kirsher WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, 312f2148a47SJeff Kirsher WOLcrClr1=0xA6, WOLcgClr=0xA7, 313f2148a47SJeff Kirsher PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, 314f2148a47SJeff Kirsher }; 315f2148a47SJeff Kirsher 316f2148a47SJeff Kirsher /* Bits in ConfigD */ 317f2148a47SJeff Kirsher enum backoff_bits { 318f2148a47SJeff Kirsher BackOptional=0x01, BackModify=0x02, 319f2148a47SJeff Kirsher BackCaptureEffect=0x04, BackRandom=0x08 320f2148a47SJeff Kirsher }; 321f2148a47SJeff Kirsher 322f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */ 323f2148a47SJeff Kirsher enum tcr_bits { 324f2148a47SJeff Kirsher TCR_PQEN=0x01, 325f2148a47SJeff Kirsher TCR_LB0=0x02, /* loopback[0] */ 326f2148a47SJeff Kirsher TCR_LB1=0x04, /* loopback[1] */ 327f2148a47SJeff Kirsher TCR_OFSET=0x08, 328f2148a47SJeff Kirsher TCR_RTGOPT=0x10, 329f2148a47SJeff Kirsher TCR_RTFT0=0x20, 330f2148a47SJeff Kirsher TCR_RTFT1=0x40, 331f2148a47SJeff Kirsher TCR_RTSF=0x80, 332f2148a47SJeff Kirsher }; 333f2148a47SJeff Kirsher 334f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */ 335f2148a47SJeff Kirsher enum camcon_bits { 336f2148a47SJeff Kirsher CAMC_CAMEN=0x01, 337f2148a47SJeff Kirsher CAMC_VCAMSL=0x02, 338f2148a47SJeff Kirsher CAMC_CAMWR=0x04, 339f2148a47SJeff Kirsher CAMC_CAMRD=0x08, 340f2148a47SJeff Kirsher }; 341f2148a47SJeff Kirsher 342f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */ 343f2148a47SJeff Kirsher enum bcr1_bits { 344f2148a47SJeff Kirsher BCR1_POT0=0x01, 345f2148a47SJeff Kirsher BCR1_POT1=0x02, 346f2148a47SJeff Kirsher BCR1_POT2=0x04, 347f2148a47SJeff Kirsher BCR1_CTFT0=0x08, 348f2148a47SJeff Kirsher BCR1_CTFT1=0x10, 349f2148a47SJeff Kirsher BCR1_CTSF=0x20, 350f2148a47SJeff Kirsher BCR1_TXQNOBK=0x40, /* for VT6105 */ 351f2148a47SJeff Kirsher BCR1_VIDFR=0x80, /* for VT6105 */ 352f2148a47SJeff Kirsher BCR1_MED0=0x40, /* for VT6102 */ 353f2148a47SJeff Kirsher BCR1_MED1=0x80, /* for VT6102 */ 354f2148a47SJeff Kirsher }; 355f2148a47SJeff Kirsher 356f2148a47SJeff Kirsher #ifdef USE_MMIO 357f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */ 358f2148a47SJeff Kirsher static const int mmio_verify_registers[] = { 359f2148a47SJeff Kirsher RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, 360f2148a47SJeff Kirsher 0 361f2148a47SJeff Kirsher }; 362f2148a47SJeff Kirsher #endif 363f2148a47SJeff Kirsher 364f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */ 365f2148a47SJeff Kirsher enum intr_status_bits { 3667ab87ff4SFrancois Romieu IntrRxDone = 0x0001, 3677ab87ff4SFrancois Romieu IntrTxDone = 0x0002, 3687ab87ff4SFrancois Romieu IntrRxErr = 0x0004, 3697ab87ff4SFrancois Romieu IntrTxError = 0x0008, 3707ab87ff4SFrancois Romieu IntrRxEmpty = 0x0020, 371f2148a47SJeff Kirsher IntrPCIErr = 0x0040, 3727ab87ff4SFrancois Romieu IntrStatsMax = 0x0080, 3737ab87ff4SFrancois Romieu IntrRxEarly = 0x0100, 3747ab87ff4SFrancois Romieu IntrTxUnderrun = 0x0210, 3757ab87ff4SFrancois Romieu IntrRxOverflow = 0x0400, 3767ab87ff4SFrancois Romieu IntrRxDropped = 0x0800, 3777ab87ff4SFrancois Romieu IntrRxNoBuf = 0x1000, 3787ab87ff4SFrancois Romieu IntrTxAborted = 0x2000, 3797ab87ff4SFrancois Romieu IntrLinkChange = 0x4000, 380f2148a47SJeff Kirsher IntrRxWakeUp = 0x8000, 381f2148a47SJeff Kirsher IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */ 3827ab87ff4SFrancois Romieu IntrNormalSummary = IntrRxDone | IntrTxDone, 3837ab87ff4SFrancois Romieu IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError | 3847ab87ff4SFrancois Romieu IntrTxUnderrun, 385f2148a47SJeff Kirsher }; 386f2148a47SJeff Kirsher 387f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ 388f2148a47SJeff Kirsher enum wol_bits { 389f2148a47SJeff Kirsher WOLucast = 0x10, 390f2148a47SJeff Kirsher WOLmagic = 0x20, 391f2148a47SJeff Kirsher WOLbmcast = 0x30, 392f2148a47SJeff Kirsher WOLlnkon = 0x40, 393f2148a47SJeff Kirsher WOLlnkoff = 0x80, 394f2148a47SJeff Kirsher }; 395f2148a47SJeff Kirsher 396f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */ 397f2148a47SJeff Kirsher struct rx_desc { 398f2148a47SJeff Kirsher __le32 rx_status; 399f2148a47SJeff Kirsher __le32 desc_length; /* Chain flag, Buffer/frame length */ 400f2148a47SJeff Kirsher __le32 addr; 401f2148a47SJeff Kirsher __le32 next_desc; 402f2148a47SJeff Kirsher }; 403f2148a47SJeff Kirsher struct tx_desc { 404f2148a47SJeff Kirsher __le32 tx_status; 405f2148a47SJeff Kirsher __le32 desc_length; /* Chain flag, Tx Config, Frame length */ 406f2148a47SJeff Kirsher __le32 addr; 407f2148a47SJeff Kirsher __le32 next_desc; 408f2148a47SJeff Kirsher }; 409f2148a47SJeff Kirsher 410f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ 411f2148a47SJeff Kirsher #define TXDESC 0x00e08000 412f2148a47SJeff Kirsher 413f2148a47SJeff Kirsher enum rx_status_bits { 414f2148a47SJeff Kirsher RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F 415f2148a47SJeff Kirsher }; 416f2148a47SJeff Kirsher 417f2148a47SJeff Kirsher /* Bits in *_desc.*_status */ 418f2148a47SJeff Kirsher enum desc_status_bits { 419f2148a47SJeff Kirsher DescOwn=0x80000000 420f2148a47SJeff Kirsher }; 421f2148a47SJeff Kirsher 422f2148a47SJeff Kirsher /* Bits in *_desc.*_length */ 423f2148a47SJeff Kirsher enum desc_length_bits { 424f2148a47SJeff Kirsher DescTag=0x00010000 425f2148a47SJeff Kirsher }; 426f2148a47SJeff Kirsher 427f2148a47SJeff Kirsher /* Bits in ChipCmd. */ 428f2148a47SJeff Kirsher enum chip_cmd_bits { 429f2148a47SJeff Kirsher CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, 430f2148a47SJeff Kirsher CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40, 431f2148a47SJeff Kirsher Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04, 432f2148a47SJeff Kirsher Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, 433f2148a47SJeff Kirsher }; 434f2148a47SJeff Kirsher 435f7b5d1b9SJamie Gloudon struct rhine_stats { 436f7b5d1b9SJamie Gloudon u64 packets; 437f7b5d1b9SJamie Gloudon u64 bytes; 438f7b5d1b9SJamie Gloudon struct u64_stats_sync syncp; 439f7b5d1b9SJamie Gloudon }; 440f7b5d1b9SJamie Gloudon 441f2148a47SJeff Kirsher struct rhine_private { 442f2148a47SJeff Kirsher /* Bit mask for configured VLAN ids */ 443f2148a47SJeff Kirsher unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 444f2148a47SJeff Kirsher 445f2148a47SJeff Kirsher /* Descriptor rings */ 446f2148a47SJeff Kirsher struct rx_desc *rx_ring; 447f2148a47SJeff Kirsher struct tx_desc *tx_ring; 448f2148a47SJeff Kirsher dma_addr_t rx_ring_dma; 449f2148a47SJeff Kirsher dma_addr_t tx_ring_dma; 450f2148a47SJeff Kirsher 451f2148a47SJeff Kirsher /* The addresses of receive-in-place skbuffs. */ 452f2148a47SJeff Kirsher struct sk_buff *rx_skbuff[RX_RING_SIZE]; 453f2148a47SJeff Kirsher dma_addr_t rx_skbuff_dma[RX_RING_SIZE]; 454f2148a47SJeff Kirsher 455f2148a47SJeff Kirsher /* The saved address of a sent-in-place packet/buffer, for later free(). */ 456f2148a47SJeff Kirsher struct sk_buff *tx_skbuff[TX_RING_SIZE]; 457f2148a47SJeff Kirsher dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; 458f2148a47SJeff Kirsher 459f2148a47SJeff Kirsher /* Tx bounce buffers (Rhine-I only) */ 460f2148a47SJeff Kirsher unsigned char *tx_buf[TX_RING_SIZE]; 461f2148a47SJeff Kirsher unsigned char *tx_bufs; 462f2148a47SJeff Kirsher dma_addr_t tx_bufs_dma; 463f2148a47SJeff Kirsher 464f7630d18SAlexey Charkov int irq; 465f2148a47SJeff Kirsher long pioaddr; 466f2148a47SJeff Kirsher struct net_device *dev; 467f2148a47SJeff Kirsher struct napi_struct napi; 468f2148a47SJeff Kirsher spinlock_t lock; 4697ab87ff4SFrancois Romieu struct mutex task_lock; 4707ab87ff4SFrancois Romieu bool task_enable; 4717ab87ff4SFrancois Romieu struct work_struct slow_event_task; 472f2148a47SJeff Kirsher struct work_struct reset_task; 473f2148a47SJeff Kirsher 474fc3e0f8aSFrancois Romieu u32 msg_enable; 475fc3e0f8aSFrancois Romieu 476f2148a47SJeff Kirsher /* Frequently used values: keep some adjacent for cache effect. */ 477f2148a47SJeff Kirsher u32 quirks; 478f2148a47SJeff Kirsher struct rx_desc *rx_head_desc; 479f2148a47SJeff Kirsher unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ 480f2148a47SJeff Kirsher unsigned int cur_tx, dirty_tx; 481f2148a47SJeff Kirsher unsigned int rx_buf_sz; /* Based on MTU+slack. */ 482f7b5d1b9SJamie Gloudon struct rhine_stats rx_stats; 483f7b5d1b9SJamie Gloudon struct rhine_stats tx_stats; 484f2148a47SJeff Kirsher u8 wolopts; 485f2148a47SJeff Kirsher 486f2148a47SJeff Kirsher u8 tx_thresh, rx_thresh; 487f2148a47SJeff Kirsher 488f2148a47SJeff Kirsher struct mii_if_info mii_if; 489f2148a47SJeff Kirsher void __iomem *base; 490f2148a47SJeff Kirsher }; 491f2148a47SJeff Kirsher 492f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0) 493f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0) 494f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0) 495f2148a47SJeff Kirsher 496f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x)) 497f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x)) 498f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x)) 499f2148a47SJeff Kirsher 500f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0) 501f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0) 502f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0) 503f2148a47SJeff Kirsher 504f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0) 505f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0) 506f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0) 507f2148a47SJeff Kirsher 508f2148a47SJeff Kirsher 509f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int location); 510f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 511f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev); 512f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work); 5137ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work); 514f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev); 515f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 516f2148a47SJeff Kirsher struct net_device *dev); 517f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance); 518f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev); 519f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit); 520f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev); 521f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, 522f7b5d1b9SJamie Gloudon struct rtnl_link_stats64 *stats); 523f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 524f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops; 525f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev); 52680d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, 52780d5c368SPatrick McHardy __be16 proto, u16 vid); 52880d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, 52980d5c368SPatrick McHardy __be16 proto, u16 vid); 5307ab87ff4SFrancois Romieu static void rhine_restart_tx(struct net_device *dev); 531f2148a47SJeff Kirsher 5323f8c91a7SAndreas Mohr static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) 533a384a33bSFrancois Romieu { 534a384a33bSFrancois Romieu void __iomem *ioaddr = rp->base; 535a384a33bSFrancois Romieu int i; 536a384a33bSFrancois Romieu 537a384a33bSFrancois Romieu for (i = 0; i < 1024; i++) { 5383f8c91a7SAndreas Mohr bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask); 5393f8c91a7SAndreas Mohr 5403f8c91a7SAndreas Mohr if (low ^ has_mask_bits) 541a384a33bSFrancois Romieu break; 542a384a33bSFrancois Romieu udelay(10); 543a384a33bSFrancois Romieu } 544a384a33bSFrancois Romieu if (i > 64) { 545fc3e0f8aSFrancois Romieu netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " 5463f8c91a7SAndreas Mohr "count: %04d\n", low ? "low" : "high", reg, mask, i); 547a384a33bSFrancois Romieu } 548a384a33bSFrancois Romieu } 549a384a33bSFrancois Romieu 550a384a33bSFrancois Romieu static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) 551a384a33bSFrancois Romieu { 5523f8c91a7SAndreas Mohr rhine_wait_bit(rp, reg, mask, false); 553a384a33bSFrancois Romieu } 554a384a33bSFrancois Romieu 555a384a33bSFrancois Romieu static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) 556a384a33bSFrancois Romieu { 5573f8c91a7SAndreas Mohr rhine_wait_bit(rp, reg, mask, true); 558a384a33bSFrancois Romieu } 559f2148a47SJeff Kirsher 560a20a28bcSFrancois Romieu static u32 rhine_get_events(struct rhine_private *rp) 561f2148a47SJeff Kirsher { 562f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 563f2148a47SJeff Kirsher u32 intr_status; 564f2148a47SJeff Kirsher 565f2148a47SJeff Kirsher intr_status = ioread16(ioaddr + IntrStatus); 566f2148a47SJeff Kirsher /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ 567f2148a47SJeff Kirsher if (rp->quirks & rqStatusWBRace) 568f2148a47SJeff Kirsher intr_status |= ioread8(ioaddr + IntrStatus2) << 16; 569f2148a47SJeff Kirsher return intr_status; 570f2148a47SJeff Kirsher } 571f2148a47SJeff Kirsher 572a20a28bcSFrancois Romieu static void rhine_ack_events(struct rhine_private *rp, u32 mask) 573a20a28bcSFrancois Romieu { 574a20a28bcSFrancois Romieu void __iomem *ioaddr = rp->base; 575a20a28bcSFrancois Romieu 576a20a28bcSFrancois Romieu if (rp->quirks & rqStatusWBRace) 577a20a28bcSFrancois Romieu iowrite8(mask >> 16, ioaddr + IntrStatus2); 578a20a28bcSFrancois Romieu iowrite16(mask, ioaddr + IntrStatus); 5797ab87ff4SFrancois Romieu mmiowb(); 580a20a28bcSFrancois Romieu } 581a20a28bcSFrancois Romieu 582f2148a47SJeff Kirsher /* 583f2148a47SJeff Kirsher * Get power related registers into sane state. 584f2148a47SJeff Kirsher * Notify user about past WOL event. 585f2148a47SJeff Kirsher */ 586f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev) 587f2148a47SJeff Kirsher { 588f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 589f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 590f2148a47SJeff Kirsher u16 wolstat; 591f2148a47SJeff Kirsher 592f2148a47SJeff Kirsher if (rp->quirks & rqWOL) { 593f2148a47SJeff Kirsher /* Make sure chip is in power state D0 */ 594f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); 595f2148a47SJeff Kirsher 596f2148a47SJeff Kirsher /* Disable "force PME-enable" */ 597f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + WOLcgClr); 598f2148a47SJeff Kirsher 599f2148a47SJeff Kirsher /* Clear power-event config bits (WOL) */ 600f2148a47SJeff Kirsher iowrite8(0xFF, ioaddr + WOLcrClr); 601f2148a47SJeff Kirsher /* More recent cards can manage two additional patterns */ 602f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 603f2148a47SJeff Kirsher iowrite8(0x03, ioaddr + WOLcrClr1); 604f2148a47SJeff Kirsher 605f2148a47SJeff Kirsher /* Save power-event status bits */ 606f2148a47SJeff Kirsher wolstat = ioread8(ioaddr + PwrcsrSet); 607f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 608f2148a47SJeff Kirsher wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8; 609f2148a47SJeff Kirsher 610f2148a47SJeff Kirsher /* Clear power-event status bits */ 611f2148a47SJeff Kirsher iowrite8(0xFF, ioaddr + PwrcsrClr); 612f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 613f2148a47SJeff Kirsher iowrite8(0x03, ioaddr + PwrcsrClr1); 614f2148a47SJeff Kirsher 615f2148a47SJeff Kirsher if (wolstat) { 616f2148a47SJeff Kirsher char *reason; 617f2148a47SJeff Kirsher switch (wolstat) { 618f2148a47SJeff Kirsher case WOLmagic: 619f2148a47SJeff Kirsher reason = "Magic packet"; 620f2148a47SJeff Kirsher break; 621f2148a47SJeff Kirsher case WOLlnkon: 622f2148a47SJeff Kirsher reason = "Link went up"; 623f2148a47SJeff Kirsher break; 624f2148a47SJeff Kirsher case WOLlnkoff: 625f2148a47SJeff Kirsher reason = "Link went down"; 626f2148a47SJeff Kirsher break; 627f2148a47SJeff Kirsher case WOLucast: 628f2148a47SJeff Kirsher reason = "Unicast packet"; 629f2148a47SJeff Kirsher break; 630f2148a47SJeff Kirsher case WOLbmcast: 631f2148a47SJeff Kirsher reason = "Multicast/broadcast packet"; 632f2148a47SJeff Kirsher break; 633f2148a47SJeff Kirsher default: 634f2148a47SJeff Kirsher reason = "Unknown"; 635f2148a47SJeff Kirsher } 636f2148a47SJeff Kirsher netdev_info(dev, "Woke system up. Reason: %s\n", 637f2148a47SJeff Kirsher reason); 638f2148a47SJeff Kirsher } 639f2148a47SJeff Kirsher } 640f2148a47SJeff Kirsher } 641f2148a47SJeff Kirsher 642f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev) 643f2148a47SJeff Kirsher { 644f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 645f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 646fc3e0f8aSFrancois Romieu u8 cmd1; 647f2148a47SJeff Kirsher 648f2148a47SJeff Kirsher iowrite8(Cmd1Reset, ioaddr + ChipCmd1); 649f2148a47SJeff Kirsher IOSYNC; 650f2148a47SJeff Kirsher 651f2148a47SJeff Kirsher if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { 652f2148a47SJeff Kirsher netdev_info(dev, "Reset not complete yet. Trying harder.\n"); 653f2148a47SJeff Kirsher 654f2148a47SJeff Kirsher /* Force reset */ 655f2148a47SJeff Kirsher if (rp->quirks & rqForceReset) 656f2148a47SJeff Kirsher iowrite8(0x40, ioaddr + MiscCmd); 657f2148a47SJeff Kirsher 658f2148a47SJeff Kirsher /* Reset can take somewhat longer (rare) */ 659a384a33bSFrancois Romieu rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); 660f2148a47SJeff Kirsher } 661f2148a47SJeff Kirsher 662fc3e0f8aSFrancois Romieu cmd1 = ioread8(ioaddr + ChipCmd1); 663fc3e0f8aSFrancois Romieu netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? 664f2148a47SJeff Kirsher "failed" : "succeeded"); 665f2148a47SJeff Kirsher } 666f2148a47SJeff Kirsher 667f2148a47SJeff Kirsher #ifdef USE_MMIO 668f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks) 669f2148a47SJeff Kirsher { 670f2148a47SJeff Kirsher int n; 671f2148a47SJeff Kirsher if (quirks & rqRhineI) { 672f2148a47SJeff Kirsher /* More recent docs say that this bit is reserved ... */ 673f2148a47SJeff Kirsher n = inb(pioaddr + ConfigA) | 0x20; 674f2148a47SJeff Kirsher outb(n, pioaddr + ConfigA); 675f2148a47SJeff Kirsher } else { 676f2148a47SJeff Kirsher n = inb(pioaddr + ConfigD) | 0x80; 677f2148a47SJeff Kirsher outb(n, pioaddr + ConfigD); 678f2148a47SJeff Kirsher } 679f2148a47SJeff Kirsher } 680f2148a47SJeff Kirsher #endif 681f2148a47SJeff Kirsher 682f2148a47SJeff Kirsher /* 683f2148a47SJeff Kirsher * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM 684f2148a47SJeff Kirsher * (plus 0x6C for Rhine-I/II) 685f2148a47SJeff Kirsher */ 68676e239e1SBill Pemberton static void rhine_reload_eeprom(long pioaddr, struct net_device *dev) 687f2148a47SJeff Kirsher { 688f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 689f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 690a384a33bSFrancois Romieu int i; 691f2148a47SJeff Kirsher 692f2148a47SJeff Kirsher outb(0x20, pioaddr + MACRegEEcsr); 693a384a33bSFrancois Romieu for (i = 0; i < 1024; i++) { 694a384a33bSFrancois Romieu if (!(inb(pioaddr + MACRegEEcsr) & 0x20)) 695a384a33bSFrancois Romieu break; 696a384a33bSFrancois Romieu } 697a384a33bSFrancois Romieu if (i > 512) 698a384a33bSFrancois Romieu pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__); 699f2148a47SJeff Kirsher 700f2148a47SJeff Kirsher #ifdef USE_MMIO 701f2148a47SJeff Kirsher /* 702f2148a47SJeff Kirsher * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable 703f2148a47SJeff Kirsher * MMIO. If reloading EEPROM was done first this could be avoided, but 704f2148a47SJeff Kirsher * it is not known if that still works with the "win98-reboot" problem. 705f2148a47SJeff Kirsher */ 706f2148a47SJeff Kirsher enable_mmio(pioaddr, rp->quirks); 707f2148a47SJeff Kirsher #endif 708f2148a47SJeff Kirsher 709f2148a47SJeff Kirsher /* Turn off EEPROM-controlled wake-up (magic packet) */ 710f2148a47SJeff Kirsher if (rp->quirks & rqWOL) 711f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA); 712f2148a47SJeff Kirsher 713f2148a47SJeff Kirsher } 714f2148a47SJeff Kirsher 715f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 716f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev) 717f2148a47SJeff Kirsher { 71805d334ecSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 719f7630d18SAlexey Charkov const int irq = rp->irq; 72005d334ecSFrancois Romieu 72105d334ecSFrancois Romieu disable_irq(irq); 72205d334ecSFrancois Romieu rhine_interrupt(irq, dev); 72305d334ecSFrancois Romieu enable_irq(irq); 724f2148a47SJeff Kirsher } 725f2148a47SJeff Kirsher #endif 726f2148a47SJeff Kirsher 727269f3114SFrancois Romieu static void rhine_kick_tx_threshold(struct rhine_private *rp) 728269f3114SFrancois Romieu { 729269f3114SFrancois Romieu if (rp->tx_thresh < 0xe0) { 730269f3114SFrancois Romieu void __iomem *ioaddr = rp->base; 731269f3114SFrancois Romieu 732269f3114SFrancois Romieu rp->tx_thresh += 0x20; 733269f3114SFrancois Romieu BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); 734269f3114SFrancois Romieu } 735269f3114SFrancois Romieu } 736269f3114SFrancois Romieu 7377ab87ff4SFrancois Romieu static void rhine_tx_err(struct rhine_private *rp, u32 status) 7387ab87ff4SFrancois Romieu { 7397ab87ff4SFrancois Romieu struct net_device *dev = rp->dev; 7407ab87ff4SFrancois Romieu 7417ab87ff4SFrancois Romieu if (status & IntrTxAborted) { 742fc3e0f8aSFrancois Romieu netif_info(rp, tx_err, dev, 743fc3e0f8aSFrancois Romieu "Abort %08x, frame dropped\n", status); 7447ab87ff4SFrancois Romieu } 7457ab87ff4SFrancois Romieu 7467ab87ff4SFrancois Romieu if (status & IntrTxUnderrun) { 7477ab87ff4SFrancois Romieu rhine_kick_tx_threshold(rp); 748fc3e0f8aSFrancois Romieu netif_info(rp, tx_err ,dev, "Transmitter underrun, " 749fc3e0f8aSFrancois Romieu "Tx threshold now %02x\n", rp->tx_thresh); 7507ab87ff4SFrancois Romieu } 7517ab87ff4SFrancois Romieu 752fc3e0f8aSFrancois Romieu if (status & IntrTxDescRace) 753fc3e0f8aSFrancois Romieu netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); 7547ab87ff4SFrancois Romieu 7557ab87ff4SFrancois Romieu if ((status & IntrTxError) && 7567ab87ff4SFrancois Romieu (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { 7577ab87ff4SFrancois Romieu rhine_kick_tx_threshold(rp); 758fc3e0f8aSFrancois Romieu netif_info(rp, tx_err, dev, "Unspecified error. " 759fc3e0f8aSFrancois Romieu "Tx threshold now %02x\n", rp->tx_thresh); 7607ab87ff4SFrancois Romieu } 7617ab87ff4SFrancois Romieu 7627ab87ff4SFrancois Romieu rhine_restart_tx(dev); 7637ab87ff4SFrancois Romieu } 7647ab87ff4SFrancois Romieu 7657ab87ff4SFrancois Romieu static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) 7667ab87ff4SFrancois Romieu { 7677ab87ff4SFrancois Romieu void __iomem *ioaddr = rp->base; 7687ab87ff4SFrancois Romieu struct net_device_stats *stats = &rp->dev->stats; 7697ab87ff4SFrancois Romieu 7707ab87ff4SFrancois Romieu stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 7717ab87ff4SFrancois Romieu stats->rx_missed_errors += ioread16(ioaddr + RxMissed); 7727ab87ff4SFrancois Romieu 7737ab87ff4SFrancois Romieu /* 7747ab87ff4SFrancois Romieu * Clears the "tally counters" for CRC errors and missed frames(?). 7757ab87ff4SFrancois Romieu * It has been reported that some chips need a write of 0 to clear 7767ab87ff4SFrancois Romieu * these, for others the counters are set to 1 when written to and 7777ab87ff4SFrancois Romieu * instead cleared when read. So we clear them both ways ... 7787ab87ff4SFrancois Romieu */ 7797ab87ff4SFrancois Romieu iowrite32(0, ioaddr + RxMissed); 7807ab87ff4SFrancois Romieu ioread16(ioaddr + RxCRCErrs); 7817ab87ff4SFrancois Romieu ioread16(ioaddr + RxMissed); 7827ab87ff4SFrancois Romieu } 7837ab87ff4SFrancois Romieu 7847ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_RX (IntrRxDone | \ 7857ab87ff4SFrancois Romieu IntrRxErr | \ 7867ab87ff4SFrancois Romieu IntrRxEmpty | \ 7877ab87ff4SFrancois Romieu IntrRxOverflow | \ 7887ab87ff4SFrancois Romieu IntrRxDropped | \ 7897ab87ff4SFrancois Romieu IntrRxNoBuf | \ 7907ab87ff4SFrancois Romieu IntrRxWakeUp) 7917ab87ff4SFrancois Romieu 7927ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \ 7937ab87ff4SFrancois Romieu IntrTxAborted | \ 7947ab87ff4SFrancois Romieu IntrTxUnderrun | \ 7957ab87ff4SFrancois Romieu IntrTxDescRace) 7967ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR) 7977ab87ff4SFrancois Romieu 7987ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \ 7997ab87ff4SFrancois Romieu RHINE_EVENT_NAPI_TX | \ 8007ab87ff4SFrancois Romieu IntrStatsMax) 8017ab87ff4SFrancois Romieu #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange) 8027ab87ff4SFrancois Romieu #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW) 8037ab87ff4SFrancois Romieu 804f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget) 805f2148a47SJeff Kirsher { 806f2148a47SJeff Kirsher struct rhine_private *rp = container_of(napi, struct rhine_private, napi); 807f2148a47SJeff Kirsher struct net_device *dev = rp->dev; 808f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 8097ab87ff4SFrancois Romieu u16 enable_mask = RHINE_EVENT & 0xffff; 8107ab87ff4SFrancois Romieu int work_done = 0; 8117ab87ff4SFrancois Romieu u32 status; 812f2148a47SJeff Kirsher 8137ab87ff4SFrancois Romieu status = rhine_get_events(rp); 8147ab87ff4SFrancois Romieu rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); 8157ab87ff4SFrancois Romieu 8167ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_RX) 8177ab87ff4SFrancois Romieu work_done += rhine_rx(dev, budget); 8187ab87ff4SFrancois Romieu 8197ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_TX) { 8207ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_TX_ERR) { 8217ab87ff4SFrancois Romieu /* Avoid scavenging before Tx engine turned off */ 822a384a33bSFrancois Romieu rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); 823fc3e0f8aSFrancois Romieu if (ioread8(ioaddr + ChipCmd) & CmdTxOn) 824fc3e0f8aSFrancois Romieu netif_warn(rp, tx_err, dev, "Tx still on\n"); 8257ab87ff4SFrancois Romieu } 826fc3e0f8aSFrancois Romieu 8277ab87ff4SFrancois Romieu rhine_tx(dev); 8287ab87ff4SFrancois Romieu 8297ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_TX_ERR) 8307ab87ff4SFrancois Romieu rhine_tx_err(rp, status); 8317ab87ff4SFrancois Romieu } 8327ab87ff4SFrancois Romieu 8337ab87ff4SFrancois Romieu if (status & IntrStatsMax) { 8347ab87ff4SFrancois Romieu spin_lock(&rp->lock); 8357ab87ff4SFrancois Romieu rhine_update_rx_crc_and_missed_errord(rp); 8367ab87ff4SFrancois Romieu spin_unlock(&rp->lock); 8377ab87ff4SFrancois Romieu } 8387ab87ff4SFrancois Romieu 8397ab87ff4SFrancois Romieu if (status & RHINE_EVENT_SLOW) { 8407ab87ff4SFrancois Romieu enable_mask &= ~RHINE_EVENT_SLOW; 8417ab87ff4SFrancois Romieu schedule_work(&rp->slow_event_task); 8427ab87ff4SFrancois Romieu } 843f2148a47SJeff Kirsher 844f2148a47SJeff Kirsher if (work_done < budget) { 845f2148a47SJeff Kirsher napi_complete(napi); 8467ab87ff4SFrancois Romieu iowrite16(enable_mask, ioaddr + IntrEnable); 8477ab87ff4SFrancois Romieu mmiowb(); 848f2148a47SJeff Kirsher } 849f2148a47SJeff Kirsher return work_done; 850f2148a47SJeff Kirsher } 851f2148a47SJeff Kirsher 85276e239e1SBill Pemberton static void rhine_hw_init(struct net_device *dev, long pioaddr) 853f2148a47SJeff Kirsher { 854f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 855f2148a47SJeff Kirsher 856f2148a47SJeff Kirsher /* Reset the chip to erase previous misconfiguration. */ 857f2148a47SJeff Kirsher rhine_chip_reset(dev); 858f2148a47SJeff Kirsher 859f2148a47SJeff Kirsher /* Rhine-I needs extra time to recuperate before EEPROM reload */ 860f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 861f2148a47SJeff Kirsher msleep(5); 862f2148a47SJeff Kirsher 863f2148a47SJeff Kirsher /* Reload EEPROM controlled bytes cleared by soft reset */ 8642d283862SAlexey Charkov if (dev_is_pci(dev->dev.parent)) 865f2148a47SJeff Kirsher rhine_reload_eeprom(pioaddr, dev); 866f2148a47SJeff Kirsher } 867f2148a47SJeff Kirsher 868f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = { 869f2148a47SJeff Kirsher .ndo_open = rhine_open, 870f2148a47SJeff Kirsher .ndo_stop = rhine_close, 871f2148a47SJeff Kirsher .ndo_start_xmit = rhine_start_tx, 872f7b5d1b9SJamie Gloudon .ndo_get_stats64 = rhine_get_stats64, 873afc4b13dSJiri Pirko .ndo_set_rx_mode = rhine_set_rx_mode, 874f2148a47SJeff Kirsher .ndo_change_mtu = eth_change_mtu, 875f2148a47SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 876f2148a47SJeff Kirsher .ndo_set_mac_address = eth_mac_addr, 877f2148a47SJeff Kirsher .ndo_do_ioctl = netdev_ioctl, 878f2148a47SJeff Kirsher .ndo_tx_timeout = rhine_tx_timeout, 879f2148a47SJeff Kirsher .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, 880f2148a47SJeff Kirsher .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, 881f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 882f2148a47SJeff Kirsher .ndo_poll_controller = rhine_poll, 883f2148a47SJeff Kirsher #endif 884f2148a47SJeff Kirsher }; 885f2148a47SJeff Kirsher 886*ca8b6e04SAlexey Charkov static int rhine_init_one_common(struct device *hwdev, u32 quirks, 8872d283862SAlexey Charkov long pioaddr, void __iomem *ioaddr, int irq) 888f2148a47SJeff Kirsher { 889f2148a47SJeff Kirsher struct net_device *dev; 890f2148a47SJeff Kirsher struct rhine_private *rp; 8912d283862SAlexey Charkov int i, rc, phy_id; 892f2148a47SJeff Kirsher const char *name; 893f2148a47SJeff Kirsher 894f2148a47SJeff Kirsher /* this should always be supported */ 895f7630d18SAlexey Charkov rc = dma_set_mask(hwdev, DMA_BIT_MASK(32)); 896f2148a47SJeff Kirsher if (rc) { 897f7630d18SAlexey Charkov dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n"); 8982d283862SAlexey Charkov goto err_out; 899f2148a47SJeff Kirsher } 900f2148a47SJeff Kirsher 901f2148a47SJeff Kirsher dev = alloc_etherdev(sizeof(struct rhine_private)); 902f2148a47SJeff Kirsher if (!dev) { 903f2148a47SJeff Kirsher rc = -ENOMEM; 9042d283862SAlexey Charkov goto err_out; 905f2148a47SJeff Kirsher } 906f7630d18SAlexey Charkov SET_NETDEV_DEV(dev, hwdev); 907f2148a47SJeff Kirsher 908f2148a47SJeff Kirsher rp = netdev_priv(dev); 909f2148a47SJeff Kirsher rp->dev = dev; 910*ca8b6e04SAlexey Charkov rp->quirks = quirks; 911f2148a47SJeff Kirsher rp->pioaddr = pioaddr; 9122d283862SAlexey Charkov rp->base = ioaddr; 9132d283862SAlexey Charkov rp->irq = irq; 914fc3e0f8aSFrancois Romieu rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); 915f2148a47SJeff Kirsher 916*ca8b6e04SAlexey Charkov phy_id = rp->quirks & rqIntPHY ? 1 : 0; 917f2148a47SJeff Kirsher 918827da44cSJohn Stultz u64_stats_init(&rp->tx_stats.syncp); 919827da44cSJohn Stultz u64_stats_init(&rp->rx_stats.syncp); 920827da44cSJohn Stultz 921f2148a47SJeff Kirsher /* Get chip registers into a sane state */ 922f2148a47SJeff Kirsher rhine_power_init(dev); 923f2148a47SJeff Kirsher rhine_hw_init(dev, pioaddr); 924f2148a47SJeff Kirsher 925f2148a47SJeff Kirsher for (i = 0; i < 6; i++) 926f2148a47SJeff Kirsher dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); 927f2148a47SJeff Kirsher 928f2148a47SJeff Kirsher if (!is_valid_ether_addr(dev->dev_addr)) { 929f2148a47SJeff Kirsher /* Report it and use a random ethernet address instead */ 930f2148a47SJeff Kirsher netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr); 931f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 932f2148a47SJeff Kirsher netdev_info(dev, "Using random MAC address: %pM\n", 933f2148a47SJeff Kirsher dev->dev_addr); 934f2148a47SJeff Kirsher } 935f2148a47SJeff Kirsher 936f2148a47SJeff Kirsher /* For Rhine-I/II, phy_id is loaded from EEPROM */ 937f2148a47SJeff Kirsher if (!phy_id) 938f2148a47SJeff Kirsher phy_id = ioread8(ioaddr + 0x6C); 939f2148a47SJeff Kirsher 940f2148a47SJeff Kirsher spin_lock_init(&rp->lock); 9417ab87ff4SFrancois Romieu mutex_init(&rp->task_lock); 942f2148a47SJeff Kirsher INIT_WORK(&rp->reset_task, rhine_reset_task); 9437ab87ff4SFrancois Romieu INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); 944f2148a47SJeff Kirsher 945f2148a47SJeff Kirsher rp->mii_if.dev = dev; 946f2148a47SJeff Kirsher rp->mii_if.mdio_read = mdio_read; 947f2148a47SJeff Kirsher rp->mii_if.mdio_write = mdio_write; 948f2148a47SJeff Kirsher rp->mii_if.phy_id_mask = 0x1f; 949f2148a47SJeff Kirsher rp->mii_if.reg_num_mask = 0x1f; 950f2148a47SJeff Kirsher 951f2148a47SJeff Kirsher /* The chip-specific entries in the device structure. */ 952f2148a47SJeff Kirsher dev->netdev_ops = &rhine_netdev_ops; 953e76070f2Swangweidong dev->ethtool_ops = &netdev_ethtool_ops; 954f2148a47SJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT; 955f2148a47SJeff Kirsher 956f2148a47SJeff Kirsher netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); 957f2148a47SJeff Kirsher 958f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 959f2148a47SJeff Kirsher dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 960f2148a47SJeff Kirsher 961*ca8b6e04SAlexey Charkov if (rp->quirks & rqMgmt) 962f646968fSPatrick McHardy dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 963f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_RX | 964f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_FILTER; 965f2148a47SJeff Kirsher 966f2148a47SJeff Kirsher /* dev->name not defined before register_netdev()! */ 967f2148a47SJeff Kirsher rc = register_netdev(dev); 968f2148a47SJeff Kirsher if (rc) 9692d283862SAlexey Charkov goto err_out_free_netdev; 970f2148a47SJeff Kirsher 971*ca8b6e04SAlexey Charkov if (rp->quirks & rqRhineI) 972*ca8b6e04SAlexey Charkov name = "Rhine"; 973*ca8b6e04SAlexey Charkov else if (rp->quirks & rqStatusWBRace) 974*ca8b6e04SAlexey Charkov name = "Rhine II"; 975*ca8b6e04SAlexey Charkov else if (rp->quirks & rqMgmt) 976*ca8b6e04SAlexey Charkov name = "Rhine III (Management Adapter)"; 977*ca8b6e04SAlexey Charkov else 978*ca8b6e04SAlexey Charkov name = "Rhine III"; 979*ca8b6e04SAlexey Charkov 980f2148a47SJeff Kirsher netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", 9812d283862SAlexey Charkov name, (long)ioaddr, dev->dev_addr, rp->irq); 982f2148a47SJeff Kirsher 983f7630d18SAlexey Charkov dev_set_drvdata(hwdev, dev); 984f2148a47SJeff Kirsher 985f2148a47SJeff Kirsher { 986f2148a47SJeff Kirsher u16 mii_cmd; 987f2148a47SJeff Kirsher int mii_status = mdio_read(dev, phy_id, 1); 988f2148a47SJeff Kirsher mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; 989f2148a47SJeff Kirsher mdio_write(dev, phy_id, MII_BMCR, mii_cmd); 990f2148a47SJeff Kirsher if (mii_status != 0xffff && mii_status != 0x0000) { 991f2148a47SJeff Kirsher rp->mii_if.advertising = mdio_read(dev, phy_id, 4); 992f2148a47SJeff Kirsher netdev_info(dev, 993f2148a47SJeff Kirsher "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n", 994f2148a47SJeff Kirsher phy_id, 995f2148a47SJeff Kirsher mii_status, rp->mii_if.advertising, 996f2148a47SJeff Kirsher mdio_read(dev, phy_id, 5)); 997f2148a47SJeff Kirsher 998f2148a47SJeff Kirsher /* set IFF_RUNNING */ 999f2148a47SJeff Kirsher if (mii_status & BMSR_LSTATUS) 1000f2148a47SJeff Kirsher netif_carrier_on(dev); 1001f2148a47SJeff Kirsher else 1002f2148a47SJeff Kirsher netif_carrier_off(dev); 1003f2148a47SJeff Kirsher 1004f2148a47SJeff Kirsher } 1005f2148a47SJeff Kirsher } 1006f2148a47SJeff Kirsher rp->mii_if.phy_id = phy_id; 1007fc3e0f8aSFrancois Romieu if (avoid_D3) 1008fc3e0f8aSFrancois Romieu netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); 1009f2148a47SJeff Kirsher 1010f2148a47SJeff Kirsher return 0; 1011f2148a47SJeff Kirsher 10122d283862SAlexey Charkov err_out_free_netdev: 10132d283862SAlexey Charkov free_netdev(dev); 10142d283862SAlexey Charkov err_out: 10152d283862SAlexey Charkov return rc; 10162d283862SAlexey Charkov } 10172d283862SAlexey Charkov 10182d283862SAlexey Charkov static int rhine_init_one_pci(struct pci_dev *pdev, 10192d283862SAlexey Charkov const struct pci_device_id *ent) 10202d283862SAlexey Charkov { 10212d283862SAlexey Charkov struct device *hwdev = &pdev->dev; 10222d283862SAlexey Charkov int i, rc; 10232d283862SAlexey Charkov long pioaddr, memaddr; 10242d283862SAlexey Charkov void __iomem *ioaddr; 10252d283862SAlexey Charkov int io_size = pdev->revision < VTunknown0 ? 128 : 256; 1026*ca8b6e04SAlexey Charkov u32 quirks; 10272d283862SAlexey Charkov #ifdef USE_MMIO 10282d283862SAlexey Charkov int bar = 1; 10292d283862SAlexey Charkov #else 10302d283862SAlexey Charkov int bar = 0; 10312d283862SAlexey Charkov #endif 10322d283862SAlexey Charkov 10332d283862SAlexey Charkov /* when built into the kernel, we only print version if device is found */ 10342d283862SAlexey Charkov #ifndef MODULE 10352d283862SAlexey Charkov pr_info_once("%s\n", version); 10362d283862SAlexey Charkov #endif 10372d283862SAlexey Charkov 10382d283862SAlexey Charkov rc = pci_enable_device(pdev); 10392d283862SAlexey Charkov if (rc) 10402d283862SAlexey Charkov goto err_out; 10412d283862SAlexey Charkov 1042*ca8b6e04SAlexey Charkov if (pdev->revision < VTunknown0) { 1043*ca8b6e04SAlexey Charkov quirks = rqRhineI; 1044*ca8b6e04SAlexey Charkov } else if (pdev->revision >= VT6102) { 1045*ca8b6e04SAlexey Charkov quirks = rqWOL | rqForceReset; 1046*ca8b6e04SAlexey Charkov if (pdev->revision < VT6105) { 1047*ca8b6e04SAlexey Charkov quirks |= rqStatusWBRace; 1048*ca8b6e04SAlexey Charkov } else { 1049*ca8b6e04SAlexey Charkov quirks |= rqIntPHY; 1050*ca8b6e04SAlexey Charkov if (pdev->revision >= VT6105_B0) 1051*ca8b6e04SAlexey Charkov quirks |= rq6patterns; 1052*ca8b6e04SAlexey Charkov if (pdev->revision >= VT6105M) 1053*ca8b6e04SAlexey Charkov quirks |= rqMgmt; 1054*ca8b6e04SAlexey Charkov } 1055*ca8b6e04SAlexey Charkov } 1056*ca8b6e04SAlexey Charkov 10572d283862SAlexey Charkov /* sanity check */ 10582d283862SAlexey Charkov if ((pci_resource_len(pdev, 0) < io_size) || 10592d283862SAlexey Charkov (pci_resource_len(pdev, 1) < io_size)) { 10602d283862SAlexey Charkov rc = -EIO; 10612d283862SAlexey Charkov dev_err(hwdev, "Insufficient PCI resources, aborting\n"); 10622d283862SAlexey Charkov goto err_out_pci_disable; 10632d283862SAlexey Charkov } 10642d283862SAlexey Charkov 10652d283862SAlexey Charkov pioaddr = pci_resource_start(pdev, 0); 10662d283862SAlexey Charkov memaddr = pci_resource_start(pdev, 1); 10672d283862SAlexey Charkov 10682d283862SAlexey Charkov pci_set_master(pdev); 10692d283862SAlexey Charkov 10702d283862SAlexey Charkov rc = pci_request_regions(pdev, DRV_NAME); 10712d283862SAlexey Charkov if (rc) 10722d283862SAlexey Charkov goto err_out_pci_disable; 10732d283862SAlexey Charkov 10742d283862SAlexey Charkov ioaddr = pci_iomap(pdev, bar, io_size); 10752d283862SAlexey Charkov if (!ioaddr) { 10762d283862SAlexey Charkov rc = -EIO; 10772d283862SAlexey Charkov dev_err(hwdev, 10782d283862SAlexey Charkov "ioremap failed for device %s, region 0x%X @ 0x%lX\n", 10792d283862SAlexey Charkov dev_name(hwdev), io_size, memaddr); 10802d283862SAlexey Charkov goto err_out_free_res; 10812d283862SAlexey Charkov } 10822d283862SAlexey Charkov 10832d283862SAlexey Charkov #ifdef USE_MMIO 10842d283862SAlexey Charkov enable_mmio(pioaddr, quirks); 10852d283862SAlexey Charkov 10862d283862SAlexey Charkov /* Check that selected MMIO registers match the PIO ones */ 10872d283862SAlexey Charkov i = 0; 10882d283862SAlexey Charkov while (mmio_verify_registers[i]) { 10892d283862SAlexey Charkov int reg = mmio_verify_registers[i++]; 10902d283862SAlexey Charkov unsigned char a = inb(pioaddr+reg); 10912d283862SAlexey Charkov unsigned char b = readb(ioaddr+reg); 10922d283862SAlexey Charkov 10932d283862SAlexey Charkov if (a != b) { 10942d283862SAlexey Charkov rc = -EIO; 10952d283862SAlexey Charkov dev_err(hwdev, 10962d283862SAlexey Charkov "MMIO do not match PIO [%02x] (%02x != %02x)\n", 10972d283862SAlexey Charkov reg, a, b); 10982d283862SAlexey Charkov goto err_out_unmap; 10992d283862SAlexey Charkov } 11002d283862SAlexey Charkov } 11012d283862SAlexey Charkov #endif /* USE_MMIO */ 11022d283862SAlexey Charkov 1103*ca8b6e04SAlexey Charkov rc = rhine_init_one_common(&pdev->dev, quirks, 11042d283862SAlexey Charkov pioaddr, ioaddr, pdev->irq); 11052d283862SAlexey Charkov if (!rc) 11062d283862SAlexey Charkov return 0; 11072d283862SAlexey Charkov 1108f2148a47SJeff Kirsher err_out_unmap: 1109f2148a47SJeff Kirsher pci_iounmap(pdev, ioaddr); 1110f2148a47SJeff Kirsher err_out_free_res: 1111f2148a47SJeff Kirsher pci_release_regions(pdev); 1112ae996154SRoger Luethi err_out_pci_disable: 1113ae996154SRoger Luethi pci_disable_device(pdev); 1114f2148a47SJeff Kirsher err_out: 1115f2148a47SJeff Kirsher return rc; 1116f2148a47SJeff Kirsher } 1117f2148a47SJeff Kirsher 11182d283862SAlexey Charkov static int rhine_init_one_platform(struct platform_device *pdev) 11192d283862SAlexey Charkov { 11202d283862SAlexey Charkov const struct of_device_id *match; 1121*ca8b6e04SAlexey Charkov const u32 *quirks; 11222d283862SAlexey Charkov int irq; 11232d283862SAlexey Charkov struct resource *res; 11242d283862SAlexey Charkov void __iomem *ioaddr; 11252d283862SAlexey Charkov 11262d283862SAlexey Charkov match = of_match_device(rhine_of_tbl, &pdev->dev); 11272d283862SAlexey Charkov if (!match) 11282d283862SAlexey Charkov return -EINVAL; 11292d283862SAlexey Charkov 11302d283862SAlexey Charkov res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 11312d283862SAlexey Charkov ioaddr = devm_ioremap_resource(&pdev->dev, res); 11322d283862SAlexey Charkov if (IS_ERR(ioaddr)) 11332d283862SAlexey Charkov return PTR_ERR(ioaddr); 11342d283862SAlexey Charkov 11352d283862SAlexey Charkov irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 11362d283862SAlexey Charkov if (!irq) 11372d283862SAlexey Charkov return -EINVAL; 11382d283862SAlexey Charkov 1139*ca8b6e04SAlexey Charkov quirks = match->data; 1140*ca8b6e04SAlexey Charkov if (!quirks) 11412d283862SAlexey Charkov return -EINVAL; 11422d283862SAlexey Charkov 1143*ca8b6e04SAlexey Charkov return rhine_init_one_common(&pdev->dev, *quirks, 11442d283862SAlexey Charkov (long)ioaddr, ioaddr, irq); 11452d283862SAlexey Charkov } 11462d283862SAlexey Charkov 1147f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev) 1148f2148a47SJeff Kirsher { 1149f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1150f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1151f2148a47SJeff Kirsher void *ring; 1152f2148a47SJeff Kirsher dma_addr_t ring_dma; 1153f2148a47SJeff Kirsher 1154f7630d18SAlexey Charkov ring = dma_alloc_coherent(hwdev, 1155f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 1156f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 11574087c4dcSAlexey Charkov &ring_dma, 11584087c4dcSAlexey Charkov GFP_ATOMIC); 1159f2148a47SJeff Kirsher if (!ring) { 1160f2148a47SJeff Kirsher netdev_err(dev, "Could not allocate DMA memory\n"); 1161f2148a47SJeff Kirsher return -ENOMEM; 1162f2148a47SJeff Kirsher } 1163f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) { 1164f7630d18SAlexey Charkov rp->tx_bufs = dma_alloc_coherent(hwdev, 1165f2148a47SJeff Kirsher PKT_BUF_SZ * TX_RING_SIZE, 11664087c4dcSAlexey Charkov &rp->tx_bufs_dma, 11674087c4dcSAlexey Charkov GFP_ATOMIC); 1168f2148a47SJeff Kirsher if (rp->tx_bufs == NULL) { 1169f7630d18SAlexey Charkov dma_free_coherent(hwdev, 1170f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 1171f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 1172f2148a47SJeff Kirsher ring, ring_dma); 1173f2148a47SJeff Kirsher return -ENOMEM; 1174f2148a47SJeff Kirsher } 1175f2148a47SJeff Kirsher } 1176f2148a47SJeff Kirsher 1177f2148a47SJeff Kirsher rp->rx_ring = ring; 1178f2148a47SJeff Kirsher rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); 1179f2148a47SJeff Kirsher rp->rx_ring_dma = ring_dma; 1180f2148a47SJeff Kirsher rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); 1181f2148a47SJeff Kirsher 1182f2148a47SJeff Kirsher return 0; 1183f2148a47SJeff Kirsher } 1184f2148a47SJeff Kirsher 1185f2148a47SJeff Kirsher static void free_ring(struct net_device* dev) 1186f2148a47SJeff Kirsher { 1187f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1188f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1189f2148a47SJeff Kirsher 1190f7630d18SAlexey Charkov dma_free_coherent(hwdev, 1191f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 1192f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 1193f2148a47SJeff Kirsher rp->rx_ring, rp->rx_ring_dma); 1194f2148a47SJeff Kirsher rp->tx_ring = NULL; 1195f2148a47SJeff Kirsher 1196f2148a47SJeff Kirsher if (rp->tx_bufs) 1197f7630d18SAlexey Charkov dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE, 1198f2148a47SJeff Kirsher rp->tx_bufs, rp->tx_bufs_dma); 1199f2148a47SJeff Kirsher 1200f2148a47SJeff Kirsher rp->tx_bufs = NULL; 1201f2148a47SJeff Kirsher 1202f2148a47SJeff Kirsher } 1203f2148a47SJeff Kirsher 1204f2148a47SJeff Kirsher static void alloc_rbufs(struct net_device *dev) 1205f2148a47SJeff Kirsher { 1206f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1207f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1208f2148a47SJeff Kirsher dma_addr_t next; 1209f2148a47SJeff Kirsher int i; 1210f2148a47SJeff Kirsher 1211f2148a47SJeff Kirsher rp->dirty_rx = rp->cur_rx = 0; 1212f2148a47SJeff Kirsher 1213f2148a47SJeff Kirsher rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1214f2148a47SJeff Kirsher rp->rx_head_desc = &rp->rx_ring[0]; 1215f2148a47SJeff Kirsher next = rp->rx_ring_dma; 1216f2148a47SJeff Kirsher 1217f2148a47SJeff Kirsher /* Init the ring entries */ 1218f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1219f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = 0; 1220f2148a47SJeff Kirsher rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); 1221f2148a47SJeff Kirsher next += sizeof(struct rx_desc); 1222f2148a47SJeff Kirsher rp->rx_ring[i].next_desc = cpu_to_le32(next); 1223f2148a47SJeff Kirsher rp->rx_skbuff[i] = NULL; 1224f2148a47SJeff Kirsher } 1225f2148a47SJeff Kirsher /* Mark the last entry as wrapping the ring. */ 1226f2148a47SJeff Kirsher rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); 1227f2148a47SJeff Kirsher 1228f2148a47SJeff Kirsher /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1229f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1230f2148a47SJeff Kirsher struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz); 1231f2148a47SJeff Kirsher rp->rx_skbuff[i] = skb; 1232f2148a47SJeff Kirsher if (skb == NULL) 1233f2148a47SJeff Kirsher break; 1234f2148a47SJeff Kirsher 1235f2148a47SJeff Kirsher rp->rx_skbuff_dma[i] = 1236f7630d18SAlexey Charkov dma_map_single(hwdev, skb->data, rp->rx_buf_sz, 12374087c4dcSAlexey Charkov DMA_FROM_DEVICE); 1238f7630d18SAlexey Charkov if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) { 12399b4fe5fbSNeil Horman rp->rx_skbuff_dma[i] = 0; 12409b4fe5fbSNeil Horman dev_kfree_skb(skb); 12419b4fe5fbSNeil Horman break; 12429b4fe5fbSNeil Horman } 1243f2148a47SJeff Kirsher rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); 1244f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); 1245f2148a47SJeff Kirsher } 1246f2148a47SJeff Kirsher rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 1247f2148a47SJeff Kirsher } 1248f2148a47SJeff Kirsher 1249f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev) 1250f2148a47SJeff Kirsher { 1251f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1252f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1253f2148a47SJeff Kirsher int i; 1254f2148a47SJeff Kirsher 1255f2148a47SJeff Kirsher /* Free all the skbuffs in the Rx queue. */ 1256f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1257f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = 0; 1258f2148a47SJeff Kirsher rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1259f2148a47SJeff Kirsher if (rp->rx_skbuff[i]) { 1260f7630d18SAlexey Charkov dma_unmap_single(hwdev, 1261f2148a47SJeff Kirsher rp->rx_skbuff_dma[i], 12624087c4dcSAlexey Charkov rp->rx_buf_sz, DMA_FROM_DEVICE); 1263f2148a47SJeff Kirsher dev_kfree_skb(rp->rx_skbuff[i]); 1264f2148a47SJeff Kirsher } 1265f2148a47SJeff Kirsher rp->rx_skbuff[i] = NULL; 1266f2148a47SJeff Kirsher } 1267f2148a47SJeff Kirsher } 1268f2148a47SJeff Kirsher 1269f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev) 1270f2148a47SJeff Kirsher { 1271f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1272f2148a47SJeff Kirsher dma_addr_t next; 1273f2148a47SJeff Kirsher int i; 1274f2148a47SJeff Kirsher 1275f2148a47SJeff Kirsher rp->dirty_tx = rp->cur_tx = 0; 1276f2148a47SJeff Kirsher next = rp->tx_ring_dma; 1277f2148a47SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) { 1278f2148a47SJeff Kirsher rp->tx_skbuff[i] = NULL; 1279f2148a47SJeff Kirsher rp->tx_ring[i].tx_status = 0; 1280f2148a47SJeff Kirsher rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); 1281f2148a47SJeff Kirsher next += sizeof(struct tx_desc); 1282f2148a47SJeff Kirsher rp->tx_ring[i].next_desc = cpu_to_le32(next); 1283f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 1284f2148a47SJeff Kirsher rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; 1285f2148a47SJeff Kirsher } 1286f2148a47SJeff Kirsher rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); 1287f2148a47SJeff Kirsher 1288f2148a47SJeff Kirsher } 1289f2148a47SJeff Kirsher 1290f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev) 1291f2148a47SJeff Kirsher { 1292f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1293f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1294f2148a47SJeff Kirsher int i; 1295f2148a47SJeff Kirsher 1296f2148a47SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) { 1297f2148a47SJeff Kirsher rp->tx_ring[i].tx_status = 0; 1298f2148a47SJeff Kirsher rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); 1299f2148a47SJeff Kirsher rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1300f2148a47SJeff Kirsher if (rp->tx_skbuff[i]) { 1301f2148a47SJeff Kirsher if (rp->tx_skbuff_dma[i]) { 1302f7630d18SAlexey Charkov dma_unmap_single(hwdev, 1303f2148a47SJeff Kirsher rp->tx_skbuff_dma[i], 1304f2148a47SJeff Kirsher rp->tx_skbuff[i]->len, 13054087c4dcSAlexey Charkov DMA_TO_DEVICE); 1306f2148a47SJeff Kirsher } 1307f2148a47SJeff Kirsher dev_kfree_skb(rp->tx_skbuff[i]); 1308f2148a47SJeff Kirsher } 1309f2148a47SJeff Kirsher rp->tx_skbuff[i] = NULL; 1310f2148a47SJeff Kirsher rp->tx_buf[i] = NULL; 1311f2148a47SJeff Kirsher } 1312f2148a47SJeff Kirsher } 1313f2148a47SJeff Kirsher 1314f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media) 1315f2148a47SJeff Kirsher { 1316f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1317f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1318f2148a47SJeff Kirsher 1319fc3e0f8aSFrancois Romieu mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); 1320f2148a47SJeff Kirsher 1321f2148a47SJeff Kirsher if (rp->mii_if.full_duplex) 1322f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, 1323f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1324f2148a47SJeff Kirsher else 1325f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, 1326f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1327fc3e0f8aSFrancois Romieu 1328fc3e0f8aSFrancois Romieu netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1329f2148a47SJeff Kirsher rp->mii_if.force_media, netif_carrier_ok(dev)); 1330f2148a47SJeff Kirsher } 1331f2148a47SJeff Kirsher 1332f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */ 1333f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii) 1334f2148a47SJeff Kirsher { 1335fc3e0f8aSFrancois Romieu struct net_device *dev = mii->dev; 1336fc3e0f8aSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 1337fc3e0f8aSFrancois Romieu 1338f2148a47SJeff Kirsher if (mii->force_media) { 1339f2148a47SJeff Kirsher /* autoneg is off: Link is always assumed to be up */ 1340fc3e0f8aSFrancois Romieu if (!netif_carrier_ok(dev)) 1341fc3e0f8aSFrancois Romieu netif_carrier_on(dev); 1342fc3e0f8aSFrancois Romieu } else /* Let MMI library update carrier status */ 1343fc3e0f8aSFrancois Romieu rhine_check_media(dev, 0); 1344fc3e0f8aSFrancois Romieu 1345fc3e0f8aSFrancois Romieu netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1346fc3e0f8aSFrancois Romieu mii->force_media, netif_carrier_ok(dev)); 1347f2148a47SJeff Kirsher } 1348f2148a47SJeff Kirsher 1349f2148a47SJeff Kirsher /** 1350f2148a47SJeff Kirsher * rhine_set_cam - set CAM multicast filters 1351f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1352f2148a47SJeff Kirsher * @idx: multicast CAM index [0..MCAM_SIZE-1] 1353f2148a47SJeff Kirsher * @addr: multicast address (6 bytes) 1354f2148a47SJeff Kirsher * 1355f2148a47SJeff Kirsher * Load addresses into multicast filters. 1356f2148a47SJeff Kirsher */ 1357f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr) 1358f2148a47SJeff Kirsher { 1359f2148a47SJeff Kirsher int i; 1360f2148a47SJeff Kirsher 1361f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN, ioaddr + CamCon); 1362f2148a47SJeff Kirsher wmb(); 1363f2148a47SJeff Kirsher 1364f2148a47SJeff Kirsher /* Paranoid -- idx out of range should never happen */ 1365f2148a47SJeff Kirsher idx &= (MCAM_SIZE - 1); 1366f2148a47SJeff Kirsher 1367f2148a47SJeff Kirsher iowrite8((u8) idx, ioaddr + CamAddr); 1368f2148a47SJeff Kirsher 1369f2148a47SJeff Kirsher for (i = 0; i < 6; i++, addr++) 1370f2148a47SJeff Kirsher iowrite8(*addr, ioaddr + MulticastFilter0 + i); 1371f2148a47SJeff Kirsher udelay(10); 1372f2148a47SJeff Kirsher wmb(); 1373f2148a47SJeff Kirsher 1374f2148a47SJeff Kirsher iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); 1375f2148a47SJeff Kirsher udelay(10); 1376f2148a47SJeff Kirsher 1377f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1378f2148a47SJeff Kirsher } 1379f2148a47SJeff Kirsher 1380f2148a47SJeff Kirsher /** 1381f2148a47SJeff Kirsher * rhine_set_vlan_cam - set CAM VLAN filters 1382f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1383f2148a47SJeff Kirsher * @idx: VLAN CAM index [0..VCAM_SIZE-1] 1384f2148a47SJeff Kirsher * @addr: VLAN ID (2 bytes) 1385f2148a47SJeff Kirsher * 1386f2148a47SJeff Kirsher * Load addresses into VLAN filters. 1387f2148a47SJeff Kirsher */ 1388f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr) 1389f2148a47SJeff Kirsher { 1390f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); 1391f2148a47SJeff Kirsher wmb(); 1392f2148a47SJeff Kirsher 1393f2148a47SJeff Kirsher /* Paranoid -- idx out of range should never happen */ 1394f2148a47SJeff Kirsher idx &= (VCAM_SIZE - 1); 1395f2148a47SJeff Kirsher 1396f2148a47SJeff Kirsher iowrite8((u8) idx, ioaddr + CamAddr); 1397f2148a47SJeff Kirsher 1398f2148a47SJeff Kirsher iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6); 1399f2148a47SJeff Kirsher udelay(10); 1400f2148a47SJeff Kirsher wmb(); 1401f2148a47SJeff Kirsher 1402f2148a47SJeff Kirsher iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); 1403f2148a47SJeff Kirsher udelay(10); 1404f2148a47SJeff Kirsher 1405f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1406f2148a47SJeff Kirsher } 1407f2148a47SJeff Kirsher 1408f2148a47SJeff Kirsher /** 1409f2148a47SJeff Kirsher * rhine_set_cam_mask - set multicast CAM mask 1410f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1411f2148a47SJeff Kirsher * @mask: multicast CAM mask 1412f2148a47SJeff Kirsher * 1413f2148a47SJeff Kirsher * Mask sets multicast filters active/inactive. 1414f2148a47SJeff Kirsher */ 1415f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask) 1416f2148a47SJeff Kirsher { 1417f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN, ioaddr + CamCon); 1418f2148a47SJeff Kirsher wmb(); 1419f2148a47SJeff Kirsher 1420f2148a47SJeff Kirsher /* write mask */ 1421f2148a47SJeff Kirsher iowrite32(mask, ioaddr + CamMask); 1422f2148a47SJeff Kirsher 1423f2148a47SJeff Kirsher /* disable CAMEN */ 1424f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1425f2148a47SJeff Kirsher } 1426f2148a47SJeff Kirsher 1427f2148a47SJeff Kirsher /** 1428f2148a47SJeff Kirsher * rhine_set_vlan_cam_mask - set VLAN CAM mask 1429f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1430f2148a47SJeff Kirsher * @mask: VLAN CAM mask 1431f2148a47SJeff Kirsher * 1432f2148a47SJeff Kirsher * Mask sets VLAN filters active/inactive. 1433f2148a47SJeff Kirsher */ 1434f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask) 1435f2148a47SJeff Kirsher { 1436f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); 1437f2148a47SJeff Kirsher wmb(); 1438f2148a47SJeff Kirsher 1439f2148a47SJeff Kirsher /* write mask */ 1440f2148a47SJeff Kirsher iowrite32(mask, ioaddr + CamMask); 1441f2148a47SJeff Kirsher 1442f2148a47SJeff Kirsher /* disable CAMEN */ 1443f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1444f2148a47SJeff Kirsher } 1445f2148a47SJeff Kirsher 1446f2148a47SJeff Kirsher /** 1447f2148a47SJeff Kirsher * rhine_init_cam_filter - initialize CAM filters 1448f2148a47SJeff Kirsher * @dev: network device 1449f2148a47SJeff Kirsher * 1450f2148a47SJeff Kirsher * Initialize (disable) hardware VLAN and multicast support on this 1451f2148a47SJeff Kirsher * Rhine. 1452f2148a47SJeff Kirsher */ 1453f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev) 1454f2148a47SJeff Kirsher { 1455f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1456f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1457f2148a47SJeff Kirsher 1458f2148a47SJeff Kirsher /* Disable all CAMs */ 1459f2148a47SJeff Kirsher rhine_set_vlan_cam_mask(ioaddr, 0); 1460f2148a47SJeff Kirsher rhine_set_cam_mask(ioaddr, 0); 1461f2148a47SJeff Kirsher 1462f2148a47SJeff Kirsher /* disable hardware VLAN support */ 1463f2148a47SJeff Kirsher BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig); 1464f2148a47SJeff Kirsher BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 1465f2148a47SJeff Kirsher } 1466f2148a47SJeff Kirsher 1467f2148a47SJeff Kirsher /** 1468f2148a47SJeff Kirsher * rhine_update_vcam - update VLAN CAM filters 1469f2148a47SJeff Kirsher * @rp: rhine_private data of this Rhine 1470f2148a47SJeff Kirsher * 1471f2148a47SJeff Kirsher * Update VLAN CAM filters to match configuration change. 1472f2148a47SJeff Kirsher */ 1473f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev) 1474f2148a47SJeff Kirsher { 1475f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1476f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1477f2148a47SJeff Kirsher u16 vid; 1478f2148a47SJeff Kirsher u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */ 1479f2148a47SJeff Kirsher unsigned int i = 0; 1480f2148a47SJeff Kirsher 1481f2148a47SJeff Kirsher for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { 1482f2148a47SJeff Kirsher rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid); 1483f2148a47SJeff Kirsher vCAMmask |= 1 << i; 1484f2148a47SJeff Kirsher if (++i >= VCAM_SIZE) 1485f2148a47SJeff Kirsher break; 1486f2148a47SJeff Kirsher } 1487f2148a47SJeff Kirsher rhine_set_vlan_cam_mask(ioaddr, vCAMmask); 1488f2148a47SJeff Kirsher } 1489f2148a47SJeff Kirsher 149080d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1491f2148a47SJeff Kirsher { 1492f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1493f2148a47SJeff Kirsher 14947ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 1495f2148a47SJeff Kirsher set_bit(vid, rp->active_vlans); 1496f2148a47SJeff Kirsher rhine_update_vcam(dev); 14977ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 14988e586137SJiri Pirko return 0; 1499f2148a47SJeff Kirsher } 1500f2148a47SJeff Kirsher 150180d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 1502f2148a47SJeff Kirsher { 1503f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1504f2148a47SJeff Kirsher 15057ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 1506f2148a47SJeff Kirsher clear_bit(vid, rp->active_vlans); 1507f2148a47SJeff Kirsher rhine_update_vcam(dev); 15087ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 15098e586137SJiri Pirko return 0; 1510f2148a47SJeff Kirsher } 1511f2148a47SJeff Kirsher 1512f2148a47SJeff Kirsher static void init_registers(struct net_device *dev) 1513f2148a47SJeff Kirsher { 1514f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1515f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1516f2148a47SJeff Kirsher int i; 1517f2148a47SJeff Kirsher 1518f2148a47SJeff Kirsher for (i = 0; i < 6; i++) 1519f2148a47SJeff Kirsher iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); 1520f2148a47SJeff Kirsher 1521f2148a47SJeff Kirsher /* Initialize other registers. */ 1522f2148a47SJeff Kirsher iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ 1523f2148a47SJeff Kirsher /* Configure initial FIFO thresholds. */ 1524f2148a47SJeff Kirsher iowrite8(0x20, ioaddr + TxConfig); 1525f2148a47SJeff Kirsher rp->tx_thresh = 0x20; 1526f2148a47SJeff Kirsher rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ 1527f2148a47SJeff Kirsher 1528f2148a47SJeff Kirsher iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); 1529f2148a47SJeff Kirsher iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); 1530f2148a47SJeff Kirsher 1531f2148a47SJeff Kirsher rhine_set_rx_mode(dev); 1532f2148a47SJeff Kirsher 1533*ca8b6e04SAlexey Charkov if (rp->quirks & rqMgmt) 1534f2148a47SJeff Kirsher rhine_init_cam_filter(dev); 1535f2148a47SJeff Kirsher 1536f2148a47SJeff Kirsher napi_enable(&rp->napi); 1537f2148a47SJeff Kirsher 15387ab87ff4SFrancois Romieu iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable); 1539f2148a47SJeff Kirsher 1540f2148a47SJeff Kirsher iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), 1541f2148a47SJeff Kirsher ioaddr + ChipCmd); 1542f2148a47SJeff Kirsher rhine_check_media(dev, 1); 1543f2148a47SJeff Kirsher } 1544f2148a47SJeff Kirsher 1545f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */ 1546a384a33bSFrancois Romieu static void rhine_enable_linkmon(struct rhine_private *rp) 1547f2148a47SJeff Kirsher { 1548a384a33bSFrancois Romieu void __iomem *ioaddr = rp->base; 1549a384a33bSFrancois Romieu 1550f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1551f2148a47SJeff Kirsher iowrite8(MII_BMSR, ioaddr + MIIRegAddr); 1552f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + MIICmd); 1553f2148a47SJeff Kirsher 1554a384a33bSFrancois Romieu rhine_wait_bit_high(rp, MIIRegAddr, 0x20); 1555f2148a47SJeff Kirsher 1556f2148a47SJeff Kirsher iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); 1557f2148a47SJeff Kirsher } 1558f2148a47SJeff Kirsher 1559f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */ 1560a384a33bSFrancois Romieu static void rhine_disable_linkmon(struct rhine_private *rp) 1561f2148a47SJeff Kirsher { 1562a384a33bSFrancois Romieu void __iomem *ioaddr = rp->base; 1563a384a33bSFrancois Romieu 1564f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1565f2148a47SJeff Kirsher 1566a384a33bSFrancois Romieu if (rp->quirks & rqRhineI) { 1567f2148a47SJeff Kirsher iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1568f2148a47SJeff Kirsher 1569f2148a47SJeff Kirsher /* Can be called from ISR. Evil. */ 1570f2148a47SJeff Kirsher mdelay(1); 1571f2148a47SJeff Kirsher 1572f2148a47SJeff Kirsher /* 0x80 must be set immediately before turning it off */ 1573f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + MIICmd); 1574f2148a47SJeff Kirsher 1575a384a33bSFrancois Romieu rhine_wait_bit_high(rp, MIIRegAddr, 0x20); 1576f2148a47SJeff Kirsher 1577f2148a47SJeff Kirsher /* Heh. Now clear 0x80 again. */ 1578f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1579f2148a47SJeff Kirsher } 1580f2148a47SJeff Kirsher else 1581a384a33bSFrancois Romieu rhine_wait_bit_high(rp, MIIRegAddr, 0x80); 1582f2148a47SJeff Kirsher } 1583f2148a47SJeff Kirsher 1584f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */ 1585f2148a47SJeff Kirsher 1586f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum) 1587f2148a47SJeff Kirsher { 1588f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1589f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1590f2148a47SJeff Kirsher int result; 1591f2148a47SJeff Kirsher 1592a384a33bSFrancois Romieu rhine_disable_linkmon(rp); 1593f2148a47SJeff Kirsher 1594f2148a47SJeff Kirsher /* rhine_disable_linkmon already cleared MIICmd */ 1595f2148a47SJeff Kirsher iowrite8(phy_id, ioaddr + MIIPhyAddr); 1596f2148a47SJeff Kirsher iowrite8(regnum, ioaddr + MIIRegAddr); 1597f2148a47SJeff Kirsher iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ 1598a384a33bSFrancois Romieu rhine_wait_bit_low(rp, MIICmd, 0x40); 1599f2148a47SJeff Kirsher result = ioread16(ioaddr + MIIData); 1600f2148a47SJeff Kirsher 1601a384a33bSFrancois Romieu rhine_enable_linkmon(rp); 1602f2148a47SJeff Kirsher return result; 1603f2148a47SJeff Kirsher } 1604f2148a47SJeff Kirsher 1605f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) 1606f2148a47SJeff Kirsher { 1607f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1608f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1609f2148a47SJeff Kirsher 1610a384a33bSFrancois Romieu rhine_disable_linkmon(rp); 1611f2148a47SJeff Kirsher 1612f2148a47SJeff Kirsher /* rhine_disable_linkmon already cleared MIICmd */ 1613f2148a47SJeff Kirsher iowrite8(phy_id, ioaddr + MIIPhyAddr); 1614f2148a47SJeff Kirsher iowrite8(regnum, ioaddr + MIIRegAddr); 1615f2148a47SJeff Kirsher iowrite16(value, ioaddr + MIIData); 1616f2148a47SJeff Kirsher iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ 1617a384a33bSFrancois Romieu rhine_wait_bit_low(rp, MIICmd, 0x20); 1618f2148a47SJeff Kirsher 1619a384a33bSFrancois Romieu rhine_enable_linkmon(rp); 1620f2148a47SJeff Kirsher } 1621f2148a47SJeff Kirsher 16227ab87ff4SFrancois Romieu static void rhine_task_disable(struct rhine_private *rp) 16237ab87ff4SFrancois Romieu { 16247ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 16257ab87ff4SFrancois Romieu rp->task_enable = false; 16267ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 16277ab87ff4SFrancois Romieu 16287ab87ff4SFrancois Romieu cancel_work_sync(&rp->slow_event_task); 16297ab87ff4SFrancois Romieu cancel_work_sync(&rp->reset_task); 16307ab87ff4SFrancois Romieu } 16317ab87ff4SFrancois Romieu 16327ab87ff4SFrancois Romieu static void rhine_task_enable(struct rhine_private *rp) 16337ab87ff4SFrancois Romieu { 16347ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 16357ab87ff4SFrancois Romieu rp->task_enable = true; 16367ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 16377ab87ff4SFrancois Romieu } 16387ab87ff4SFrancois Romieu 1639f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev) 1640f2148a47SJeff Kirsher { 1641f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1642f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1643f2148a47SJeff Kirsher int rc; 1644f2148a47SJeff Kirsher 1645f7630d18SAlexey Charkov rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev); 1646f2148a47SJeff Kirsher if (rc) 1647f2148a47SJeff Kirsher return rc; 1648f2148a47SJeff Kirsher 1649f7630d18SAlexey Charkov netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq); 1650f2148a47SJeff Kirsher 1651f2148a47SJeff Kirsher rc = alloc_ring(dev); 1652f2148a47SJeff Kirsher if (rc) { 1653f7630d18SAlexey Charkov free_irq(rp->irq, dev); 1654f2148a47SJeff Kirsher return rc; 1655f2148a47SJeff Kirsher } 1656f2148a47SJeff Kirsher alloc_rbufs(dev); 1657f2148a47SJeff Kirsher alloc_tbufs(dev); 1658f2148a47SJeff Kirsher rhine_chip_reset(dev); 16597ab87ff4SFrancois Romieu rhine_task_enable(rp); 1660f2148a47SJeff Kirsher init_registers(dev); 1661fc3e0f8aSFrancois Romieu 1662fc3e0f8aSFrancois Romieu netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", 1663f2148a47SJeff Kirsher __func__, ioread16(ioaddr + ChipCmd), 1664f2148a47SJeff Kirsher mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1665f2148a47SJeff Kirsher 1666f2148a47SJeff Kirsher netif_start_queue(dev); 1667f2148a47SJeff Kirsher 1668f2148a47SJeff Kirsher return 0; 1669f2148a47SJeff Kirsher } 1670f2148a47SJeff Kirsher 1671f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work) 1672f2148a47SJeff Kirsher { 1673f2148a47SJeff Kirsher struct rhine_private *rp = container_of(work, struct rhine_private, 1674f2148a47SJeff Kirsher reset_task); 1675f2148a47SJeff Kirsher struct net_device *dev = rp->dev; 1676f2148a47SJeff Kirsher 16777ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 16787ab87ff4SFrancois Romieu 16797ab87ff4SFrancois Romieu if (!rp->task_enable) 16807ab87ff4SFrancois Romieu goto out_unlock; 1681f2148a47SJeff Kirsher 1682f2148a47SJeff Kirsher napi_disable(&rp->napi); 1683a926592fSRichard Weinberger netif_tx_disable(dev); 1684f2148a47SJeff Kirsher spin_lock_bh(&rp->lock); 1685f2148a47SJeff Kirsher 1686f2148a47SJeff Kirsher /* clear all descriptors */ 1687f2148a47SJeff Kirsher free_tbufs(dev); 1688f2148a47SJeff Kirsher free_rbufs(dev); 1689f2148a47SJeff Kirsher alloc_tbufs(dev); 1690f2148a47SJeff Kirsher alloc_rbufs(dev); 1691f2148a47SJeff Kirsher 1692f2148a47SJeff Kirsher /* Reinitialize the hardware. */ 1693f2148a47SJeff Kirsher rhine_chip_reset(dev); 1694f2148a47SJeff Kirsher init_registers(dev); 1695f2148a47SJeff Kirsher 1696f2148a47SJeff Kirsher spin_unlock_bh(&rp->lock); 1697f2148a47SJeff Kirsher 1698f2148a47SJeff Kirsher dev->trans_start = jiffies; /* prevent tx timeout */ 1699f2148a47SJeff Kirsher dev->stats.tx_errors++; 1700f2148a47SJeff Kirsher netif_wake_queue(dev); 17017ab87ff4SFrancois Romieu 17027ab87ff4SFrancois Romieu out_unlock: 17037ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 1704f2148a47SJeff Kirsher } 1705f2148a47SJeff Kirsher 1706f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev) 1707f2148a47SJeff Kirsher { 1708f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1709f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1710f2148a47SJeff Kirsher 1711f2148a47SJeff Kirsher netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n", 1712f2148a47SJeff Kirsher ioread16(ioaddr + IntrStatus), 1713f2148a47SJeff Kirsher mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1714f2148a47SJeff Kirsher 1715f2148a47SJeff Kirsher schedule_work(&rp->reset_task); 1716f2148a47SJeff Kirsher } 1717f2148a47SJeff Kirsher 1718f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 1719f2148a47SJeff Kirsher struct net_device *dev) 1720f2148a47SJeff Kirsher { 1721f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1722f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1723f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1724f2148a47SJeff Kirsher unsigned entry; 1725f2148a47SJeff Kirsher 1726f2148a47SJeff Kirsher /* Caution: the write order is important here, set the field 1727f2148a47SJeff Kirsher with the "ownership" bits last. */ 1728f2148a47SJeff Kirsher 1729f2148a47SJeff Kirsher /* Calculate the next Tx descriptor entry. */ 1730f2148a47SJeff Kirsher entry = rp->cur_tx % TX_RING_SIZE; 1731f2148a47SJeff Kirsher 1732f2148a47SJeff Kirsher if (skb_padto(skb, ETH_ZLEN)) 1733f2148a47SJeff Kirsher return NETDEV_TX_OK; 1734f2148a47SJeff Kirsher 1735f2148a47SJeff Kirsher rp->tx_skbuff[entry] = skb; 1736f2148a47SJeff Kirsher 1737f2148a47SJeff Kirsher if ((rp->quirks & rqRhineI) && 1738f2148a47SJeff Kirsher (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) { 1739f2148a47SJeff Kirsher /* Must use alignment buffer. */ 1740f2148a47SJeff Kirsher if (skb->len > PKT_BUF_SZ) { 1741f2148a47SJeff Kirsher /* packet too long, drop it */ 17424b3afc6eSEric W. Biederman dev_kfree_skb_any(skb); 1743f2148a47SJeff Kirsher rp->tx_skbuff[entry] = NULL; 1744f2148a47SJeff Kirsher dev->stats.tx_dropped++; 1745f2148a47SJeff Kirsher return NETDEV_TX_OK; 1746f2148a47SJeff Kirsher } 1747f2148a47SJeff Kirsher 1748f2148a47SJeff Kirsher /* Padding is not copied and so must be redone. */ 1749f2148a47SJeff Kirsher skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); 1750f2148a47SJeff Kirsher if (skb->len < ETH_ZLEN) 1751f2148a47SJeff Kirsher memset(rp->tx_buf[entry] + skb->len, 0, 1752f2148a47SJeff Kirsher ETH_ZLEN - skb->len); 1753f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry] = 0; 1754f2148a47SJeff Kirsher rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + 1755f2148a47SJeff Kirsher (rp->tx_buf[entry] - 1756f2148a47SJeff Kirsher rp->tx_bufs)); 1757f2148a47SJeff Kirsher } else { 1758f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry] = 1759f7630d18SAlexey Charkov dma_map_single(hwdev, skb->data, skb->len, 17604087c4dcSAlexey Charkov DMA_TO_DEVICE); 1761f7630d18SAlexey Charkov if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) { 17624b3afc6eSEric W. Biederman dev_kfree_skb_any(skb); 17639b4fe5fbSNeil Horman rp->tx_skbuff_dma[entry] = 0; 17649b4fe5fbSNeil Horman dev->stats.tx_dropped++; 17659b4fe5fbSNeil Horman return NETDEV_TX_OK; 17669b4fe5fbSNeil Horman } 1767f2148a47SJeff Kirsher rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); 1768f2148a47SJeff Kirsher } 1769f2148a47SJeff Kirsher 1770f2148a47SJeff Kirsher rp->tx_ring[entry].desc_length = 1771f2148a47SJeff Kirsher cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1772f2148a47SJeff Kirsher 1773f2148a47SJeff Kirsher if (unlikely(vlan_tx_tag_present(skb))) { 1774207070f5SRoger Luethi u16 vid_pcp = vlan_tx_tag_get(skb); 1775207070f5SRoger Luethi 1776207070f5SRoger Luethi /* drop CFI/DEI bit, register needs VID and PCP */ 1777207070f5SRoger Luethi vid_pcp = (vid_pcp & VLAN_VID_MASK) | 1778207070f5SRoger Luethi ((vid_pcp & VLAN_PRIO_MASK) >> 1); 1779207070f5SRoger Luethi rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); 1780f2148a47SJeff Kirsher /* request tagging */ 1781f2148a47SJeff Kirsher rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); 1782f2148a47SJeff Kirsher } 1783f2148a47SJeff Kirsher else 1784f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status = 0; 1785f2148a47SJeff Kirsher 1786f2148a47SJeff Kirsher /* lock eth irq */ 1787f2148a47SJeff Kirsher wmb(); 1788f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); 1789f2148a47SJeff Kirsher wmb(); 1790f2148a47SJeff Kirsher 1791f2148a47SJeff Kirsher rp->cur_tx++; 1792f2148a47SJeff Kirsher 1793f2148a47SJeff Kirsher /* Non-x86 Todo: explicitly flush cache lines here. */ 1794f2148a47SJeff Kirsher 1795f2148a47SJeff Kirsher if (vlan_tx_tag_present(skb)) 1796f2148a47SJeff Kirsher /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 1797f2148a47SJeff Kirsher BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 1798f2148a47SJeff Kirsher 1799f2148a47SJeff Kirsher /* Wake the potentially-idle transmit channel */ 1800f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1801f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1802f2148a47SJeff Kirsher IOSYNC; 1803f2148a47SJeff Kirsher 1804f2148a47SJeff Kirsher if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) 1805f2148a47SJeff Kirsher netif_stop_queue(dev); 1806f2148a47SJeff Kirsher 1807fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", 1808f2148a47SJeff Kirsher rp->cur_tx - 1, entry); 1809fc3e0f8aSFrancois Romieu 1810f2148a47SJeff Kirsher return NETDEV_TX_OK; 1811f2148a47SJeff Kirsher } 1812f2148a47SJeff Kirsher 18137ab87ff4SFrancois Romieu static void rhine_irq_disable(struct rhine_private *rp) 18147ab87ff4SFrancois Romieu { 18157ab87ff4SFrancois Romieu iowrite16(0x0000, rp->base + IntrEnable); 18167ab87ff4SFrancois Romieu mmiowb(); 18177ab87ff4SFrancois Romieu } 18187ab87ff4SFrancois Romieu 1819f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up 1820f2148a47SJeff Kirsher after the Tx thread. */ 1821f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance) 1822f2148a47SJeff Kirsher { 1823f2148a47SJeff Kirsher struct net_device *dev = dev_instance; 1824f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 18257ab87ff4SFrancois Romieu u32 status; 1826f2148a47SJeff Kirsher int handled = 0; 1827f2148a47SJeff Kirsher 18287ab87ff4SFrancois Romieu status = rhine_get_events(rp); 1829f2148a47SJeff Kirsher 1830fc3e0f8aSFrancois Romieu netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); 1831f2148a47SJeff Kirsher 18327ab87ff4SFrancois Romieu if (status & RHINE_EVENT) { 18337ab87ff4SFrancois Romieu handled = 1; 1834f2148a47SJeff Kirsher 18357ab87ff4SFrancois Romieu rhine_irq_disable(rp); 1836f2148a47SJeff Kirsher napi_schedule(&rp->napi); 1837f2148a47SJeff Kirsher } 1838f2148a47SJeff Kirsher 18397ab87ff4SFrancois Romieu if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) { 1840fc3e0f8aSFrancois Romieu netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", 18417ab87ff4SFrancois Romieu status); 1842f2148a47SJeff Kirsher } 1843f2148a47SJeff Kirsher 1844f2148a47SJeff Kirsher return IRQ_RETVAL(handled); 1845f2148a47SJeff Kirsher } 1846f2148a47SJeff Kirsher 1847f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated 1848f2148a47SJeff Kirsher for clarity. */ 1849f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev) 1850f2148a47SJeff Kirsher { 1851f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1852f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1853f2148a47SJeff Kirsher int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; 1854f2148a47SJeff Kirsher 1855f2148a47SJeff Kirsher /* find and cleanup dirty tx descriptors */ 1856f2148a47SJeff Kirsher while (rp->dirty_tx != rp->cur_tx) { 1857f2148a47SJeff Kirsher txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); 1858fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", 1859f2148a47SJeff Kirsher entry, txstatus); 1860f2148a47SJeff Kirsher if (txstatus & DescOwn) 1861f2148a47SJeff Kirsher break; 1862f2148a47SJeff Kirsher if (txstatus & 0x8000) { 1863fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_done, dev, 1864fc3e0f8aSFrancois Romieu "Transmit error, Tx status %08x\n", txstatus); 1865f2148a47SJeff Kirsher dev->stats.tx_errors++; 1866f2148a47SJeff Kirsher if (txstatus & 0x0400) 1867f2148a47SJeff Kirsher dev->stats.tx_carrier_errors++; 1868f2148a47SJeff Kirsher if (txstatus & 0x0200) 1869f2148a47SJeff Kirsher dev->stats.tx_window_errors++; 1870f2148a47SJeff Kirsher if (txstatus & 0x0100) 1871f2148a47SJeff Kirsher dev->stats.tx_aborted_errors++; 1872f2148a47SJeff Kirsher if (txstatus & 0x0080) 1873f2148a47SJeff Kirsher dev->stats.tx_heartbeat_errors++; 1874f2148a47SJeff Kirsher if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || 1875f2148a47SJeff Kirsher (txstatus & 0x0800) || (txstatus & 0x1000)) { 1876f2148a47SJeff Kirsher dev->stats.tx_fifo_errors++; 1877f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1878f2148a47SJeff Kirsher break; /* Keep the skb - we try again */ 1879f2148a47SJeff Kirsher } 1880f2148a47SJeff Kirsher /* Transmitter restarted in 'abnormal' handler. */ 1881f2148a47SJeff Kirsher } else { 1882f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 1883f2148a47SJeff Kirsher dev->stats.collisions += (txstatus >> 3) & 0x0F; 1884f2148a47SJeff Kirsher else 1885f2148a47SJeff Kirsher dev->stats.collisions += txstatus & 0x0F; 1886fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", 1887fc3e0f8aSFrancois Romieu (txstatus >> 3) & 0xF, txstatus & 0xF); 1888f7b5d1b9SJamie Gloudon 1889f7b5d1b9SJamie Gloudon u64_stats_update_begin(&rp->tx_stats.syncp); 1890f7b5d1b9SJamie Gloudon rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; 1891f7b5d1b9SJamie Gloudon rp->tx_stats.packets++; 1892f7b5d1b9SJamie Gloudon u64_stats_update_end(&rp->tx_stats.syncp); 1893f2148a47SJeff Kirsher } 1894f2148a47SJeff Kirsher /* Free the original skb. */ 1895f2148a47SJeff Kirsher if (rp->tx_skbuff_dma[entry]) { 1896f7630d18SAlexey Charkov dma_unmap_single(hwdev, 1897f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry], 1898f2148a47SJeff Kirsher rp->tx_skbuff[entry]->len, 18994087c4dcSAlexey Charkov DMA_TO_DEVICE); 1900f2148a47SJeff Kirsher } 19014b3afc6eSEric W. Biederman dev_consume_skb_any(rp->tx_skbuff[entry]); 1902f2148a47SJeff Kirsher rp->tx_skbuff[entry] = NULL; 1903f2148a47SJeff Kirsher entry = (++rp->dirty_tx) % TX_RING_SIZE; 1904f2148a47SJeff Kirsher } 1905f2148a47SJeff Kirsher if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) 1906f2148a47SJeff Kirsher netif_wake_queue(dev); 1907f2148a47SJeff Kirsher } 1908f2148a47SJeff Kirsher 1909f2148a47SJeff Kirsher /** 1910f2148a47SJeff Kirsher * rhine_get_vlan_tci - extract TCI from Rx data buffer 1911f2148a47SJeff Kirsher * @skb: pointer to sk_buff 1912f2148a47SJeff Kirsher * @data_size: used data area of the buffer including CRC 1913f2148a47SJeff Kirsher * 1914f2148a47SJeff Kirsher * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q 1915f2148a47SJeff Kirsher * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte 1916f2148a47SJeff Kirsher * aligned following the CRC. 1917f2148a47SJeff Kirsher */ 1918f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) 1919f2148a47SJeff Kirsher { 1920f2148a47SJeff Kirsher u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; 1921f2148a47SJeff Kirsher return be16_to_cpup((__be16 *)trailer); 1922f2148a47SJeff Kirsher } 1923f2148a47SJeff Kirsher 1924f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */ 1925f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit) 1926f2148a47SJeff Kirsher { 1927f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1928f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1929f2148a47SJeff Kirsher int count; 1930f2148a47SJeff Kirsher int entry = rp->cur_rx % RX_RING_SIZE; 1931f2148a47SJeff Kirsher 1932fc3e0f8aSFrancois Romieu netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, 1933fc3e0f8aSFrancois Romieu entry, le32_to_cpu(rp->rx_head_desc->rx_status)); 1934f2148a47SJeff Kirsher 1935f2148a47SJeff Kirsher /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1936f2148a47SJeff Kirsher for (count = 0; count < limit; ++count) { 1937f2148a47SJeff Kirsher struct rx_desc *desc = rp->rx_head_desc; 1938f2148a47SJeff Kirsher u32 desc_status = le32_to_cpu(desc->rx_status); 1939f2148a47SJeff Kirsher u32 desc_length = le32_to_cpu(desc->desc_length); 1940f2148a47SJeff Kirsher int data_size = desc_status >> 16; 1941f2148a47SJeff Kirsher 1942f2148a47SJeff Kirsher if (desc_status & DescOwn) 1943f2148a47SJeff Kirsher break; 1944f2148a47SJeff Kirsher 1945fc3e0f8aSFrancois Romieu netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, 1946fc3e0f8aSFrancois Romieu desc_status); 1947f2148a47SJeff Kirsher 1948f2148a47SJeff Kirsher if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 1949f2148a47SJeff Kirsher if ((desc_status & RxWholePkt) != RxWholePkt) { 1950f2148a47SJeff Kirsher netdev_warn(dev, 1951f2148a47SJeff Kirsher "Oversized Ethernet frame spanned multiple buffers, " 1952f2148a47SJeff Kirsher "entry %#x length %d status %08x!\n", 1953f2148a47SJeff Kirsher entry, data_size, 1954f2148a47SJeff Kirsher desc_status); 1955f2148a47SJeff Kirsher netdev_warn(dev, 1956f2148a47SJeff Kirsher "Oversized Ethernet frame %p vs %p\n", 1957f2148a47SJeff Kirsher rp->rx_head_desc, 1958f2148a47SJeff Kirsher &rp->rx_ring[entry]); 1959f2148a47SJeff Kirsher dev->stats.rx_length_errors++; 1960f2148a47SJeff Kirsher } else if (desc_status & RxErr) { 1961f2148a47SJeff Kirsher /* There was a error. */ 1962fc3e0f8aSFrancois Romieu netif_dbg(rp, rx_err, dev, 1963fc3e0f8aSFrancois Romieu "%s() Rx error %08x\n", __func__, 1964fc3e0f8aSFrancois Romieu desc_status); 1965f2148a47SJeff Kirsher dev->stats.rx_errors++; 1966f2148a47SJeff Kirsher if (desc_status & 0x0030) 1967f2148a47SJeff Kirsher dev->stats.rx_length_errors++; 1968f2148a47SJeff Kirsher if (desc_status & 0x0048) 1969f2148a47SJeff Kirsher dev->stats.rx_fifo_errors++; 1970f2148a47SJeff Kirsher if (desc_status & 0x0004) 1971f2148a47SJeff Kirsher dev->stats.rx_frame_errors++; 1972f2148a47SJeff Kirsher if (desc_status & 0x0002) { 1973f2148a47SJeff Kirsher /* this can also be updated outside the interrupt handler */ 1974f2148a47SJeff Kirsher spin_lock(&rp->lock); 1975f2148a47SJeff Kirsher dev->stats.rx_crc_errors++; 1976f2148a47SJeff Kirsher spin_unlock(&rp->lock); 1977f2148a47SJeff Kirsher } 1978f2148a47SJeff Kirsher } 1979f2148a47SJeff Kirsher } else { 1980f2148a47SJeff Kirsher struct sk_buff *skb = NULL; 1981f2148a47SJeff Kirsher /* Length should omit the CRC */ 1982f2148a47SJeff Kirsher int pkt_len = data_size - 4; 1983f2148a47SJeff Kirsher u16 vlan_tci = 0; 1984f2148a47SJeff Kirsher 1985f2148a47SJeff Kirsher /* Check if the packet is long enough to accept without 1986f2148a47SJeff Kirsher copying to a minimally-sized skbuff. */ 1987f2148a47SJeff Kirsher if (pkt_len < rx_copybreak) 1988f2148a47SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, pkt_len); 1989f2148a47SJeff Kirsher if (skb) { 1990f7630d18SAlexey Charkov dma_sync_single_for_cpu(hwdev, 1991f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 1992f2148a47SJeff Kirsher rp->rx_buf_sz, 19934087c4dcSAlexey Charkov DMA_FROM_DEVICE); 1994f2148a47SJeff Kirsher 1995f2148a47SJeff Kirsher skb_copy_to_linear_data(skb, 1996f2148a47SJeff Kirsher rp->rx_skbuff[entry]->data, 1997f2148a47SJeff Kirsher pkt_len); 1998f2148a47SJeff Kirsher skb_put(skb, pkt_len); 1999f7630d18SAlexey Charkov dma_sync_single_for_device(hwdev, 2000f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 2001f2148a47SJeff Kirsher rp->rx_buf_sz, 20024087c4dcSAlexey Charkov DMA_FROM_DEVICE); 2003f2148a47SJeff Kirsher } else { 2004f2148a47SJeff Kirsher skb = rp->rx_skbuff[entry]; 2005f2148a47SJeff Kirsher if (skb == NULL) { 2006f2148a47SJeff Kirsher netdev_err(dev, "Inconsistent Rx descriptor chain\n"); 2007f2148a47SJeff Kirsher break; 2008f2148a47SJeff Kirsher } 2009f2148a47SJeff Kirsher rp->rx_skbuff[entry] = NULL; 2010f2148a47SJeff Kirsher skb_put(skb, pkt_len); 2011f7630d18SAlexey Charkov dma_unmap_single(hwdev, 2012f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 2013f2148a47SJeff Kirsher rp->rx_buf_sz, 20144087c4dcSAlexey Charkov DMA_FROM_DEVICE); 2015f2148a47SJeff Kirsher } 2016f2148a47SJeff Kirsher 2017f2148a47SJeff Kirsher if (unlikely(desc_length & DescTag)) 2018f2148a47SJeff Kirsher vlan_tci = rhine_get_vlan_tci(skb, data_size); 2019f2148a47SJeff Kirsher 2020f2148a47SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 2021f2148a47SJeff Kirsher 2022f2148a47SJeff Kirsher if (unlikely(desc_length & DescTag)) 202386a9bad3SPatrick McHardy __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 2024f2148a47SJeff Kirsher netif_receive_skb(skb); 2025f7b5d1b9SJamie Gloudon 2026f7b5d1b9SJamie Gloudon u64_stats_update_begin(&rp->rx_stats.syncp); 2027f7b5d1b9SJamie Gloudon rp->rx_stats.bytes += pkt_len; 2028f7b5d1b9SJamie Gloudon rp->rx_stats.packets++; 2029f7b5d1b9SJamie Gloudon u64_stats_update_end(&rp->rx_stats.syncp); 2030f2148a47SJeff Kirsher } 2031f2148a47SJeff Kirsher entry = (++rp->cur_rx) % RX_RING_SIZE; 2032f2148a47SJeff Kirsher rp->rx_head_desc = &rp->rx_ring[entry]; 2033f2148a47SJeff Kirsher } 2034f2148a47SJeff Kirsher 2035f2148a47SJeff Kirsher /* Refill the Rx ring buffers. */ 2036f2148a47SJeff Kirsher for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) { 2037f2148a47SJeff Kirsher struct sk_buff *skb; 2038f2148a47SJeff Kirsher entry = rp->dirty_rx % RX_RING_SIZE; 2039f2148a47SJeff Kirsher if (rp->rx_skbuff[entry] == NULL) { 2040f2148a47SJeff Kirsher skb = netdev_alloc_skb(dev, rp->rx_buf_sz); 2041f2148a47SJeff Kirsher rp->rx_skbuff[entry] = skb; 2042f2148a47SJeff Kirsher if (skb == NULL) 2043f2148a47SJeff Kirsher break; /* Better luck next round. */ 2044f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry] = 2045f7630d18SAlexey Charkov dma_map_single(hwdev, skb->data, 2046f2148a47SJeff Kirsher rp->rx_buf_sz, 20474087c4dcSAlexey Charkov DMA_FROM_DEVICE); 2048f7630d18SAlexey Charkov if (dma_mapping_error(hwdev, 2049f7630d18SAlexey Charkov rp->rx_skbuff_dma[entry])) { 20509b4fe5fbSNeil Horman dev_kfree_skb(skb); 20519b4fe5fbSNeil Horman rp->rx_skbuff_dma[entry] = 0; 20529b4fe5fbSNeil Horman break; 20539b4fe5fbSNeil Horman } 2054f2148a47SJeff Kirsher rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); 2055f2148a47SJeff Kirsher } 2056f2148a47SJeff Kirsher rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); 2057f2148a47SJeff Kirsher } 2058f2148a47SJeff Kirsher 2059f2148a47SJeff Kirsher return count; 2060f2148a47SJeff Kirsher } 2061f2148a47SJeff Kirsher 2062f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) { 2063f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2064f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2065f2148a47SJeff Kirsher int entry = rp->dirty_tx % TX_RING_SIZE; 2066f2148a47SJeff Kirsher u32 intr_status; 2067f2148a47SJeff Kirsher 2068f2148a47SJeff Kirsher /* 2069f2148a47SJeff Kirsher * If new errors occurred, we need to sort them out before doing Tx. 2070f2148a47SJeff Kirsher * In that case the ISR will be back here RSN anyway. 2071f2148a47SJeff Kirsher */ 2072a20a28bcSFrancois Romieu intr_status = rhine_get_events(rp); 2073f2148a47SJeff Kirsher 2074f2148a47SJeff Kirsher if ((intr_status & IntrTxErrSummary) == 0) { 2075f2148a47SJeff Kirsher 2076f2148a47SJeff Kirsher /* We know better than the chip where it should continue. */ 2077f2148a47SJeff Kirsher iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), 2078f2148a47SJeff Kirsher ioaddr + TxRingPtr); 2079f2148a47SJeff Kirsher 2080f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, 2081f2148a47SJeff Kirsher ioaddr + ChipCmd); 2082f2148a47SJeff Kirsher 2083f2148a47SJeff Kirsher if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) 2084f2148a47SJeff Kirsher /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 2085f2148a47SJeff Kirsher BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 2086f2148a47SJeff Kirsher 2087f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 2088f2148a47SJeff Kirsher ioaddr + ChipCmd1); 2089f2148a47SJeff Kirsher IOSYNC; 2090f2148a47SJeff Kirsher } 2091f2148a47SJeff Kirsher else { 2092f2148a47SJeff Kirsher /* This should never happen */ 2093fc3e0f8aSFrancois Romieu netif_warn(rp, tx_err, dev, "another error occurred %08x\n", 2094fc3e0f8aSFrancois Romieu intr_status); 2095f2148a47SJeff Kirsher } 2096f2148a47SJeff Kirsher 2097f2148a47SJeff Kirsher } 2098f2148a47SJeff Kirsher 20997ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work) 2100f2148a47SJeff Kirsher { 21017ab87ff4SFrancois Romieu struct rhine_private *rp = 21027ab87ff4SFrancois Romieu container_of(work, struct rhine_private, slow_event_task); 21037ab87ff4SFrancois Romieu struct net_device *dev = rp->dev; 21047ab87ff4SFrancois Romieu u32 intr_status; 2105f2148a47SJeff Kirsher 21067ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 21077ab87ff4SFrancois Romieu 21087ab87ff4SFrancois Romieu if (!rp->task_enable) 21097ab87ff4SFrancois Romieu goto out_unlock; 21107ab87ff4SFrancois Romieu 21117ab87ff4SFrancois Romieu intr_status = rhine_get_events(rp); 21127ab87ff4SFrancois Romieu rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); 2113f2148a47SJeff Kirsher 2114f2148a47SJeff Kirsher if (intr_status & IntrLinkChange) 2115f2148a47SJeff Kirsher rhine_check_media(dev, 0); 2116f2148a47SJeff Kirsher 2117fc3e0f8aSFrancois Romieu if (intr_status & IntrPCIErr) 2118fc3e0f8aSFrancois Romieu netif_warn(rp, hw, dev, "PCI error\n"); 2119fc3e0f8aSFrancois Romieu 2120559bcac3SDavid S. Miller iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); 2121f2148a47SJeff Kirsher 21227ab87ff4SFrancois Romieu out_unlock: 21237ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2124f2148a47SJeff Kirsher } 2125f2148a47SJeff Kirsher 2126f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 * 2127f7b5d1b9SJamie Gloudon rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 2128f2148a47SJeff Kirsher { 2129f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2130f7b5d1b9SJamie Gloudon unsigned int start; 2131f2148a47SJeff Kirsher 21327ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 21337ab87ff4SFrancois Romieu rhine_update_rx_crc_and_missed_errord(rp); 21347ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 2135f2148a47SJeff Kirsher 2136f7b5d1b9SJamie Gloudon netdev_stats_to_stats64(stats, &dev->stats); 2137f7b5d1b9SJamie Gloudon 2138f7b5d1b9SJamie Gloudon do { 213957a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); 2140f7b5d1b9SJamie Gloudon stats->rx_packets = rp->rx_stats.packets; 2141f7b5d1b9SJamie Gloudon stats->rx_bytes = rp->rx_stats.bytes; 214257a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); 2143f7b5d1b9SJamie Gloudon 2144f7b5d1b9SJamie Gloudon do { 214557a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); 2146f7b5d1b9SJamie Gloudon stats->tx_packets = rp->tx_stats.packets; 2147f7b5d1b9SJamie Gloudon stats->tx_bytes = rp->tx_stats.bytes; 214857a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); 2149f7b5d1b9SJamie Gloudon 2150f7b5d1b9SJamie Gloudon return stats; 2151f2148a47SJeff Kirsher } 2152f2148a47SJeff Kirsher 2153f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev) 2154f2148a47SJeff Kirsher { 2155f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2156f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2157f2148a47SJeff Kirsher u32 mc_filter[2]; /* Multicast hash filter */ 2158f2148a47SJeff Kirsher u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */ 2159f2148a47SJeff Kirsher struct netdev_hw_addr *ha; 2160f2148a47SJeff Kirsher 2161f2148a47SJeff Kirsher if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 2162f2148a47SJeff Kirsher rx_mode = 0x1C; 2163f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter0); 2164f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter1); 2165f2148a47SJeff Kirsher } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 2166f2148a47SJeff Kirsher (dev->flags & IFF_ALLMULTI)) { 2167f2148a47SJeff Kirsher /* Too many to match, or accept all multicasts. */ 2168f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter0); 2169f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter1); 2170*ca8b6e04SAlexey Charkov } else if (rp->quirks & rqMgmt) { 2171f2148a47SJeff Kirsher int i = 0; 2172f2148a47SJeff Kirsher u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ 2173f2148a47SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 2174f2148a47SJeff Kirsher if (i == MCAM_SIZE) 2175f2148a47SJeff Kirsher break; 2176f2148a47SJeff Kirsher rhine_set_cam(ioaddr, i, ha->addr); 2177f2148a47SJeff Kirsher mCAMmask |= 1 << i; 2178f2148a47SJeff Kirsher i++; 2179f2148a47SJeff Kirsher } 2180f2148a47SJeff Kirsher rhine_set_cam_mask(ioaddr, mCAMmask); 2181f2148a47SJeff Kirsher } else { 2182f2148a47SJeff Kirsher memset(mc_filter, 0, sizeof(mc_filter)); 2183f2148a47SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 2184f2148a47SJeff Kirsher int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; 2185f2148a47SJeff Kirsher 2186f2148a47SJeff Kirsher mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 2187f2148a47SJeff Kirsher } 2188f2148a47SJeff Kirsher iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 2189f2148a47SJeff Kirsher iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 2190f2148a47SJeff Kirsher } 2191f2148a47SJeff Kirsher /* enable/disable VLAN receive filtering */ 2192*ca8b6e04SAlexey Charkov if (rp->quirks & rqMgmt) { 2193f2148a47SJeff Kirsher if (dev->flags & IFF_PROMISC) 2194f2148a47SJeff Kirsher BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2195f2148a47SJeff Kirsher else 2196f2148a47SJeff Kirsher BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2197f2148a47SJeff Kirsher } 2198f2148a47SJeff Kirsher BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig); 2199f2148a47SJeff Kirsher } 2200f2148a47SJeff Kirsher 2201f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2202f2148a47SJeff Kirsher { 2203f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 2204f2148a47SJeff Kirsher 220523020ab3SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 220623020ab3SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 2207f7630d18SAlexey Charkov strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info)); 2208f2148a47SJeff Kirsher } 2209f2148a47SJeff Kirsher 2210f2148a47SJeff Kirsher static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2211f2148a47SJeff Kirsher { 2212f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2213f2148a47SJeff Kirsher int rc; 2214f2148a47SJeff Kirsher 22157ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 2216f2148a47SJeff Kirsher rc = mii_ethtool_gset(&rp->mii_if, cmd); 22177ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2218f2148a47SJeff Kirsher 2219f2148a47SJeff Kirsher return rc; 2220f2148a47SJeff Kirsher } 2221f2148a47SJeff Kirsher 2222f2148a47SJeff Kirsher static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2223f2148a47SJeff Kirsher { 2224f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2225f2148a47SJeff Kirsher int rc; 2226f2148a47SJeff Kirsher 22277ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 2228f2148a47SJeff Kirsher rc = mii_ethtool_sset(&rp->mii_if, cmd); 2229f2148a47SJeff Kirsher rhine_set_carrier(&rp->mii_if); 22307ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2231f2148a47SJeff Kirsher 2232f2148a47SJeff Kirsher return rc; 2233f2148a47SJeff Kirsher } 2234f2148a47SJeff Kirsher 2235f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev) 2236f2148a47SJeff Kirsher { 2237f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2238f2148a47SJeff Kirsher 2239f2148a47SJeff Kirsher return mii_nway_restart(&rp->mii_if); 2240f2148a47SJeff Kirsher } 2241f2148a47SJeff Kirsher 2242f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev) 2243f2148a47SJeff Kirsher { 2244f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2245f2148a47SJeff Kirsher 2246f2148a47SJeff Kirsher return mii_link_ok(&rp->mii_if); 2247f2148a47SJeff Kirsher } 2248f2148a47SJeff Kirsher 2249f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev) 2250f2148a47SJeff Kirsher { 2251fc3e0f8aSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 2252fc3e0f8aSFrancois Romieu 2253fc3e0f8aSFrancois Romieu return rp->msg_enable; 2254f2148a47SJeff Kirsher } 2255f2148a47SJeff Kirsher 2256f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value) 2257f2148a47SJeff Kirsher { 2258fc3e0f8aSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 2259fc3e0f8aSFrancois Romieu 2260fc3e0f8aSFrancois Romieu rp->msg_enable = value; 2261f2148a47SJeff Kirsher } 2262f2148a47SJeff Kirsher 2263f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2264f2148a47SJeff Kirsher { 2265f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2266f2148a47SJeff Kirsher 2267f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2268f2148a47SJeff Kirsher return; 2269f2148a47SJeff Kirsher 2270f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2271f2148a47SJeff Kirsher wol->supported = WAKE_PHY | WAKE_MAGIC | 2272f2148a47SJeff Kirsher WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ 2273f2148a47SJeff Kirsher wol->wolopts = rp->wolopts; 2274f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2275f2148a47SJeff Kirsher } 2276f2148a47SJeff Kirsher 2277f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2278f2148a47SJeff Kirsher { 2279f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2280f2148a47SJeff Kirsher u32 support = WAKE_PHY | WAKE_MAGIC | 2281f2148a47SJeff Kirsher WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ 2282f2148a47SJeff Kirsher 2283f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2284f2148a47SJeff Kirsher return -EINVAL; 2285f2148a47SJeff Kirsher 2286f2148a47SJeff Kirsher if (wol->wolopts & ~support) 2287f2148a47SJeff Kirsher return -EINVAL; 2288f2148a47SJeff Kirsher 2289f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2290f2148a47SJeff Kirsher rp->wolopts = wol->wolopts; 2291f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2292f2148a47SJeff Kirsher 2293f2148a47SJeff Kirsher return 0; 2294f2148a47SJeff Kirsher } 2295f2148a47SJeff Kirsher 2296f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = { 2297f2148a47SJeff Kirsher .get_drvinfo = netdev_get_drvinfo, 2298f2148a47SJeff Kirsher .get_settings = netdev_get_settings, 2299f2148a47SJeff Kirsher .set_settings = netdev_set_settings, 2300f2148a47SJeff Kirsher .nway_reset = netdev_nway_reset, 2301f2148a47SJeff Kirsher .get_link = netdev_get_link, 2302f2148a47SJeff Kirsher .get_msglevel = netdev_get_msglevel, 2303f2148a47SJeff Kirsher .set_msglevel = netdev_set_msglevel, 2304f2148a47SJeff Kirsher .get_wol = rhine_get_wol, 2305f2148a47SJeff Kirsher .set_wol = rhine_set_wol, 2306f2148a47SJeff Kirsher }; 2307f2148a47SJeff Kirsher 2308f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2309f2148a47SJeff Kirsher { 2310f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2311f2148a47SJeff Kirsher int rc; 2312f2148a47SJeff Kirsher 2313f2148a47SJeff Kirsher if (!netif_running(dev)) 2314f2148a47SJeff Kirsher return -EINVAL; 2315f2148a47SJeff Kirsher 23167ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 2317f2148a47SJeff Kirsher rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); 2318f2148a47SJeff Kirsher rhine_set_carrier(&rp->mii_if); 23197ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2320f2148a47SJeff Kirsher 2321f2148a47SJeff Kirsher return rc; 2322f2148a47SJeff Kirsher } 2323f2148a47SJeff Kirsher 2324f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev) 2325f2148a47SJeff Kirsher { 2326f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2327f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2328f2148a47SJeff Kirsher 23297ab87ff4SFrancois Romieu rhine_task_disable(rp); 2330f2148a47SJeff Kirsher napi_disable(&rp->napi); 2331f2148a47SJeff Kirsher netif_stop_queue(dev); 2332f2148a47SJeff Kirsher 2333fc3e0f8aSFrancois Romieu netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", 2334f2148a47SJeff Kirsher ioread16(ioaddr + ChipCmd)); 2335f2148a47SJeff Kirsher 2336f2148a47SJeff Kirsher /* Switch to loopback mode to avoid hardware races. */ 2337f2148a47SJeff Kirsher iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); 2338f2148a47SJeff Kirsher 23397ab87ff4SFrancois Romieu rhine_irq_disable(rp); 2340f2148a47SJeff Kirsher 2341f2148a47SJeff Kirsher /* Stop the chip's Tx and Rx processes. */ 2342f2148a47SJeff Kirsher iowrite16(CmdStop, ioaddr + ChipCmd); 2343f2148a47SJeff Kirsher 2344f7630d18SAlexey Charkov free_irq(rp->irq, dev); 2345f2148a47SJeff Kirsher free_rbufs(dev); 2346f2148a47SJeff Kirsher free_tbufs(dev); 2347f2148a47SJeff Kirsher free_ring(dev); 2348f2148a47SJeff Kirsher 2349f2148a47SJeff Kirsher return 0; 2350f2148a47SJeff Kirsher } 2351f2148a47SJeff Kirsher 2352f2148a47SJeff Kirsher 23532d283862SAlexey Charkov static void rhine_remove_one_pci(struct pci_dev *pdev) 2354f2148a47SJeff Kirsher { 2355f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2356f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2357f2148a47SJeff Kirsher 2358f2148a47SJeff Kirsher unregister_netdev(dev); 2359f2148a47SJeff Kirsher 2360f2148a47SJeff Kirsher pci_iounmap(pdev, rp->base); 2361f2148a47SJeff Kirsher pci_release_regions(pdev); 2362f2148a47SJeff Kirsher 2363f2148a47SJeff Kirsher free_netdev(dev); 2364f2148a47SJeff Kirsher pci_disable_device(pdev); 2365f2148a47SJeff Kirsher } 2366f2148a47SJeff Kirsher 23672d283862SAlexey Charkov static int rhine_remove_one_platform(struct platform_device *pdev) 23682d283862SAlexey Charkov { 23692d283862SAlexey Charkov struct net_device *dev = platform_get_drvdata(pdev); 23702d283862SAlexey Charkov struct rhine_private *rp = netdev_priv(dev); 23712d283862SAlexey Charkov 23722d283862SAlexey Charkov unregister_netdev(dev); 23732d283862SAlexey Charkov 23742d283862SAlexey Charkov iounmap(rp->base); 23752d283862SAlexey Charkov 23762d283862SAlexey Charkov free_netdev(dev); 23772d283862SAlexey Charkov 23782d283862SAlexey Charkov return 0; 23792d283862SAlexey Charkov } 23802d283862SAlexey Charkov 23812d283862SAlexey Charkov static void rhine_shutdown_pci(struct pci_dev *pdev) 2382f2148a47SJeff Kirsher { 2383f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2384f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2385f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2386f2148a47SJeff Kirsher 2387f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2388f2148a47SJeff Kirsher return; /* Nothing to do for non-WOL adapters */ 2389f2148a47SJeff Kirsher 2390f2148a47SJeff Kirsher rhine_power_init(dev); 2391f2148a47SJeff Kirsher 2392f2148a47SJeff Kirsher /* Make sure we use pattern 0, 1 and not 4, 5 */ 2393f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 2394f2148a47SJeff Kirsher iowrite8(0x04, ioaddr + WOLcgClr); 2395f2148a47SJeff Kirsher 23967ab87ff4SFrancois Romieu spin_lock(&rp->lock); 23977ab87ff4SFrancois Romieu 2398f2148a47SJeff Kirsher if (rp->wolopts & WAKE_MAGIC) { 2399f2148a47SJeff Kirsher iowrite8(WOLmagic, ioaddr + WOLcrSet); 2400f2148a47SJeff Kirsher /* 2401f2148a47SJeff Kirsher * Turn EEPROM-controlled wake-up back on -- some hardware may 2402f2148a47SJeff Kirsher * not cooperate otherwise. 2403f2148a47SJeff Kirsher */ 2404f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA); 2405f2148a47SJeff Kirsher } 2406f2148a47SJeff Kirsher 2407f2148a47SJeff Kirsher if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) 2408f2148a47SJeff Kirsher iowrite8(WOLbmcast, ioaddr + WOLcgSet); 2409f2148a47SJeff Kirsher 2410f2148a47SJeff Kirsher if (rp->wolopts & WAKE_PHY) 2411f2148a47SJeff Kirsher iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); 2412f2148a47SJeff Kirsher 2413f2148a47SJeff Kirsher if (rp->wolopts & WAKE_UCAST) 2414f2148a47SJeff Kirsher iowrite8(WOLucast, ioaddr + WOLcrSet); 2415f2148a47SJeff Kirsher 2416f2148a47SJeff Kirsher if (rp->wolopts) { 2417f2148a47SJeff Kirsher /* Enable legacy WOL (for old motherboards) */ 2418f2148a47SJeff Kirsher iowrite8(0x01, ioaddr + PwcfgSet); 2419f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); 2420f2148a47SJeff Kirsher } 2421f2148a47SJeff Kirsher 24227ab87ff4SFrancois Romieu spin_unlock(&rp->lock); 24237ab87ff4SFrancois Romieu 2424e92b9b3bSFrancois Romieu if (system_state == SYSTEM_POWER_OFF && !avoid_D3) { 2425f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); 2426f2148a47SJeff Kirsher 2427e92b9b3bSFrancois Romieu pci_wake_from_d3(pdev, true); 2428e92b9b3bSFrancois Romieu pci_set_power_state(pdev, PCI_D3hot); 2429e92b9b3bSFrancois Romieu } 2430f2148a47SJeff Kirsher } 2431f2148a47SJeff Kirsher 2432e92b9b3bSFrancois Romieu #ifdef CONFIG_PM_SLEEP 2433e92b9b3bSFrancois Romieu static int rhine_suspend(struct device *device) 2434f2148a47SJeff Kirsher { 2435f7630d18SAlexey Charkov struct net_device *dev = dev_get_drvdata(device); 2436f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2437f2148a47SJeff Kirsher 2438f2148a47SJeff Kirsher if (!netif_running(dev)) 2439f2148a47SJeff Kirsher return 0; 2440f2148a47SJeff Kirsher 24417ab87ff4SFrancois Romieu rhine_task_disable(rp); 24427ab87ff4SFrancois Romieu rhine_irq_disable(rp); 2443f2148a47SJeff Kirsher napi_disable(&rp->napi); 2444f2148a47SJeff Kirsher 2445f2148a47SJeff Kirsher netif_device_detach(dev); 2446f2148a47SJeff Kirsher 2447f7630d18SAlexey Charkov if (dev_is_pci(device)) 24482d283862SAlexey Charkov rhine_shutdown_pci(to_pci_dev(device)); 2449f2148a47SJeff Kirsher 2450f2148a47SJeff Kirsher return 0; 2451f2148a47SJeff Kirsher } 2452f2148a47SJeff Kirsher 2453e92b9b3bSFrancois Romieu static int rhine_resume(struct device *device) 2454f2148a47SJeff Kirsher { 2455f7630d18SAlexey Charkov struct net_device *dev = dev_get_drvdata(device); 2456f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2457f2148a47SJeff Kirsher 2458f2148a47SJeff Kirsher if (!netif_running(dev)) 2459f2148a47SJeff Kirsher return 0; 2460f2148a47SJeff Kirsher 2461f2148a47SJeff Kirsher #ifdef USE_MMIO 2462f2148a47SJeff Kirsher enable_mmio(rp->pioaddr, rp->quirks); 2463f2148a47SJeff Kirsher #endif 2464f2148a47SJeff Kirsher rhine_power_init(dev); 2465f2148a47SJeff Kirsher free_tbufs(dev); 2466f2148a47SJeff Kirsher free_rbufs(dev); 2467f2148a47SJeff Kirsher alloc_tbufs(dev); 2468f2148a47SJeff Kirsher alloc_rbufs(dev); 24697ab87ff4SFrancois Romieu rhine_task_enable(rp); 24707ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 2471f2148a47SJeff Kirsher init_registers(dev); 24727ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 2473f2148a47SJeff Kirsher 2474f2148a47SJeff Kirsher netif_device_attach(dev); 2475f2148a47SJeff Kirsher 2476f2148a47SJeff Kirsher return 0; 2477f2148a47SJeff Kirsher } 2478e92b9b3bSFrancois Romieu 2479e92b9b3bSFrancois Romieu static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume); 2480e92b9b3bSFrancois Romieu #define RHINE_PM_OPS (&rhine_pm_ops) 2481e92b9b3bSFrancois Romieu 2482e92b9b3bSFrancois Romieu #else 2483e92b9b3bSFrancois Romieu 2484e92b9b3bSFrancois Romieu #define RHINE_PM_OPS NULL 2485e92b9b3bSFrancois Romieu 2486e92b9b3bSFrancois Romieu #endif /* !CONFIG_PM_SLEEP */ 2487f2148a47SJeff Kirsher 24882d283862SAlexey Charkov static struct pci_driver rhine_driver_pci = { 2489f2148a47SJeff Kirsher .name = DRV_NAME, 2490f2148a47SJeff Kirsher .id_table = rhine_pci_tbl, 24912d283862SAlexey Charkov .probe = rhine_init_one_pci, 24922d283862SAlexey Charkov .remove = rhine_remove_one_pci, 24932d283862SAlexey Charkov .shutdown = rhine_shutdown_pci, 2494e92b9b3bSFrancois Romieu .driver.pm = RHINE_PM_OPS, 2495f2148a47SJeff Kirsher }; 2496f2148a47SJeff Kirsher 24972d283862SAlexey Charkov static struct platform_driver rhine_driver_platform = { 24982d283862SAlexey Charkov .probe = rhine_init_one_platform, 24992d283862SAlexey Charkov .remove = rhine_remove_one_platform, 25002d283862SAlexey Charkov .driver = { 25012d283862SAlexey Charkov .name = DRV_NAME, 25022d283862SAlexey Charkov .owner = THIS_MODULE, 25032d283862SAlexey Charkov .of_match_table = rhine_of_tbl, 25042d283862SAlexey Charkov .pm = RHINE_PM_OPS, 25052d283862SAlexey Charkov } 25062d283862SAlexey Charkov }; 25072d283862SAlexey Charkov 250877273eaaSSachin Kamat static struct dmi_system_id rhine_dmi_table[] __initdata = { 2509f2148a47SJeff Kirsher { 2510f2148a47SJeff Kirsher .ident = "EPIA-M", 2511f2148a47SJeff Kirsher .matches = { 2512f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), 2513f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), 2514f2148a47SJeff Kirsher }, 2515f2148a47SJeff Kirsher }, 2516f2148a47SJeff Kirsher { 2517f2148a47SJeff Kirsher .ident = "KV7", 2518f2148a47SJeff Kirsher .matches = { 2519f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), 2520f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), 2521f2148a47SJeff Kirsher }, 2522f2148a47SJeff Kirsher }, 2523f2148a47SJeff Kirsher { NULL } 2524f2148a47SJeff Kirsher }; 2525f2148a47SJeff Kirsher 2526f2148a47SJeff Kirsher static int __init rhine_init(void) 2527f2148a47SJeff Kirsher { 25282d283862SAlexey Charkov int ret_pci, ret_platform; 25292d283862SAlexey Charkov 2530f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */ 2531f2148a47SJeff Kirsher #ifdef MODULE 2532f2148a47SJeff Kirsher pr_info("%s\n", version); 2533f2148a47SJeff Kirsher #endif 2534f2148a47SJeff Kirsher if (dmi_check_system(rhine_dmi_table)) { 2535f2148a47SJeff Kirsher /* these BIOSes fail at PXE boot if chip is in D3 */ 2536eb939922SRusty Russell avoid_D3 = true; 2537f2148a47SJeff Kirsher pr_warn("Broken BIOS detected, avoid_D3 enabled\n"); 2538f2148a47SJeff Kirsher } 2539f2148a47SJeff Kirsher else if (avoid_D3) 2540f2148a47SJeff Kirsher pr_info("avoid_D3 set\n"); 2541f2148a47SJeff Kirsher 25422d283862SAlexey Charkov ret_pci = pci_register_driver(&rhine_driver_pci); 25432d283862SAlexey Charkov ret_platform = platform_driver_register(&rhine_driver_platform); 25442d283862SAlexey Charkov if ((ret_pci < 0) && (ret_platform < 0)) 25452d283862SAlexey Charkov return ret_pci; 25462d283862SAlexey Charkov 25472d283862SAlexey Charkov return 0; 2548f2148a47SJeff Kirsher } 2549f2148a47SJeff Kirsher 2550f2148a47SJeff Kirsher 2551f2148a47SJeff Kirsher static void __exit rhine_cleanup(void) 2552f2148a47SJeff Kirsher { 25532d283862SAlexey Charkov platform_driver_unregister(&rhine_driver_platform); 25542d283862SAlexey Charkov pci_unregister_driver(&rhine_driver_pci); 2555f2148a47SJeff Kirsher } 2556f2148a47SJeff Kirsher 2557f2148a47SJeff Kirsher 2558f2148a47SJeff Kirsher module_init(rhine_init); 2559f2148a47SJeff Kirsher module_exit(rhine_cleanup); 2560