1f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */ 2f2148a47SJeff Kirsher /* 3f2148a47SJeff Kirsher Written 1998-2001 by Donald Becker. 4f2148a47SJeff Kirsher 5f2148a47SJeff Kirsher Current Maintainer: Roger Luethi <rl@hellgate.ch> 6f2148a47SJeff Kirsher 7f2148a47SJeff Kirsher This software may be used and distributed according to the terms of 8f2148a47SJeff Kirsher the GNU General Public License (GPL), incorporated herein by reference. 9f2148a47SJeff Kirsher Drivers based on or derived from this code fall under the GPL and must 10f2148a47SJeff Kirsher retain the authorship, copyright and license notice. This file is not 11f2148a47SJeff Kirsher a complete program and may only be used when the entire operating 12f2148a47SJeff Kirsher system is licensed under the GPL. 13f2148a47SJeff Kirsher 14f2148a47SJeff Kirsher This driver is designed for the VIA VT86C100A Rhine-I. 15f2148a47SJeff Kirsher It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM 16f2148a47SJeff Kirsher and management NIC 6105M). 17f2148a47SJeff Kirsher 18f2148a47SJeff Kirsher The author may be reached as becker@scyld.com, or C/O 19f2148a47SJeff Kirsher Scyld Computing Corporation 20f2148a47SJeff Kirsher 410 Severn Ave., Suite 210 21f2148a47SJeff Kirsher Annapolis MD 21403 22f2148a47SJeff Kirsher 23f2148a47SJeff Kirsher 24f2148a47SJeff Kirsher This driver contains some changes from the original Donald Becker 25f2148a47SJeff Kirsher version. He may or may not be interested in bug reports on this 26f2148a47SJeff Kirsher code. You can find his versions at: 27f2148a47SJeff Kirsher http://www.scyld.com/network/via-rhine.html 28f2148a47SJeff Kirsher [link no longer provides useful info -jgarzik] 29f2148a47SJeff Kirsher 30f2148a47SJeff Kirsher */ 31f2148a47SJeff Kirsher 32f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33f2148a47SJeff Kirsher 34f2148a47SJeff Kirsher #define DRV_NAME "via-rhine" 35207070f5SRoger Luethi #define DRV_VERSION "1.5.1" 36f2148a47SJeff Kirsher #define DRV_RELDATE "2010-10-09" 37f2148a47SJeff Kirsher 38eb939922SRusty Russell #include <linux/types.h> 39f2148a47SJeff Kirsher 40f2148a47SJeff Kirsher /* A few user-configurable values. 41f2148a47SJeff Kirsher These may be modified when a driver module is loaded. */ 42fc3e0f8aSFrancois Romieu static int debug = 0; 43fc3e0f8aSFrancois Romieu #define RHINE_MSG_DEFAULT \ 44fc3e0f8aSFrancois Romieu (0x0000) 45f2148a47SJeff Kirsher 46f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme. 47f2148a47SJeff Kirsher Setting to > 1518 effectively disables this feature. */ 48f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ 49f2148a47SJeff Kirsher defined(CONFIG_SPARC) || defined(__ia64__) || \ 50f2148a47SJeff Kirsher defined(__sh__) || defined(__mips__) 51f2148a47SJeff Kirsher static int rx_copybreak = 1518; 52f2148a47SJeff Kirsher #else 53f2148a47SJeff Kirsher static int rx_copybreak; 54f2148a47SJeff Kirsher #endif 55f2148a47SJeff Kirsher 56f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of 57f2148a47SJeff Kirsher power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ 58eb939922SRusty Russell static bool avoid_D3; 59f2148a47SJeff Kirsher 60f2148a47SJeff Kirsher /* 61f2148a47SJeff Kirsher * In case you are looking for 'options[]' or 'full_duplex[]', they 62f2148a47SJeff Kirsher * are gone. Use ethtool(8) instead. 63f2148a47SJeff Kirsher */ 64f2148a47SJeff Kirsher 65f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 66f2148a47SJeff Kirsher The Rhine has a 64 element 8390-like hash table. */ 67f2148a47SJeff Kirsher static const int multicast_filter_limit = 32; 68f2148a47SJeff Kirsher 69f2148a47SJeff Kirsher 70f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */ 71f2148a47SJeff Kirsher 72f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency. 7392bf2008STino Reichardt * The compiler will convert <unsigned>'%'<2^N> into a bit mask. 7492bf2008STino Reichardt * Making the Tx ring too large decreases the effectiveness of channel 7592bf2008STino Reichardt * bonding and packet priority. 7692bf2008STino Reichardt * With BQL support, we can increase TX ring safely. 7792bf2008STino Reichardt * There are no ill effects from too-large receive rings. 7892bf2008STino Reichardt */ 7992bf2008STino Reichardt #define TX_RING_SIZE 64 8092bf2008STino Reichardt #define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ 81f2148a47SJeff Kirsher #define RX_RING_SIZE 64 82f2148a47SJeff Kirsher 83f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */ 84f2148a47SJeff Kirsher 85f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */ 86f2148a47SJeff Kirsher #define TX_TIMEOUT (2*HZ) 87f2148a47SJeff Kirsher 88f2148a47SJeff Kirsher #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 89f2148a47SJeff Kirsher 90f2148a47SJeff Kirsher #include <linux/module.h> 91f2148a47SJeff Kirsher #include <linux/moduleparam.h> 92f2148a47SJeff Kirsher #include <linux/kernel.h> 93f2148a47SJeff Kirsher #include <linux/string.h> 94f2148a47SJeff Kirsher #include <linux/timer.h> 95f2148a47SJeff Kirsher #include <linux/errno.h> 96f2148a47SJeff Kirsher #include <linux/ioport.h> 97f2148a47SJeff Kirsher #include <linux/interrupt.h> 98f2148a47SJeff Kirsher #include <linux/pci.h> 992d283862SAlexey Charkov #include <linux/of_device.h> 1002d283862SAlexey Charkov #include <linux/of_irq.h> 1012d283862SAlexey Charkov #include <linux/platform_device.h> 102f2148a47SJeff Kirsher #include <linux/dma-mapping.h> 103f2148a47SJeff Kirsher #include <linux/netdevice.h> 104f2148a47SJeff Kirsher #include <linux/etherdevice.h> 105f2148a47SJeff Kirsher #include <linux/skbuff.h> 106f2148a47SJeff Kirsher #include <linux/init.h> 107f2148a47SJeff Kirsher #include <linux/delay.h> 108f2148a47SJeff Kirsher #include <linux/mii.h> 109f2148a47SJeff Kirsher #include <linux/ethtool.h> 110f2148a47SJeff Kirsher #include <linux/crc32.h> 111f2148a47SJeff Kirsher #include <linux/if_vlan.h> 112f2148a47SJeff Kirsher #include <linux/bitops.h> 113f2148a47SJeff Kirsher #include <linux/workqueue.h> 114f2148a47SJeff Kirsher #include <asm/processor.h> /* Processor type for cache alignment. */ 115f2148a47SJeff Kirsher #include <asm/io.h> 116f2148a47SJeff Kirsher #include <asm/irq.h> 117f2148a47SJeff Kirsher #include <asm/uaccess.h> 118f2148a47SJeff Kirsher #include <linux/dmi.h> 119f2148a47SJeff Kirsher 120f2148a47SJeff Kirsher /* These identify the driver base version and may not be removed. */ 12176e239e1SBill Pemberton static const char version[] = 122f2148a47SJeff Kirsher "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker"; 123f2148a47SJeff Kirsher 124f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 125f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 126f2148a47SJeff Kirsher MODULE_LICENSE("GPL"); 127f2148a47SJeff Kirsher 128f2148a47SJeff Kirsher module_param(debug, int, 0); 129f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0); 130f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0); 131fc3e0f8aSFrancois Romieu MODULE_PARM_DESC(debug, "VIA Rhine debug message flags"); 132f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 133f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 134f2148a47SJeff Kirsher 135f2148a47SJeff Kirsher #define MCAM_SIZE 32 136f2148a47SJeff Kirsher #define VCAM_SIZE 32 137f2148a47SJeff Kirsher 138f2148a47SJeff Kirsher /* 139f2148a47SJeff Kirsher Theory of Operation 140f2148a47SJeff Kirsher 141f2148a47SJeff Kirsher I. Board Compatibility 142f2148a47SJeff Kirsher 143f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet 144f2148a47SJeff Kirsher controller. 145f2148a47SJeff Kirsher 146f2148a47SJeff Kirsher II. Board-specific settings 147f2148a47SJeff Kirsher 148f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot. 149f2148a47SJeff Kirsher 150f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at 151f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are 152f2148a47SJeff Kirsher correct. 153f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM 154f2148a47SJeff Kirsher must be configured to enable memory ops. 155f2148a47SJeff Kirsher 156f2148a47SJeff Kirsher III. Driver operation 157f2148a47SJeff Kirsher 158f2148a47SJeff Kirsher IIIa. Ring buffers 159f2148a47SJeff Kirsher 160f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists 161f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of 162f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. 163f2148a47SJeff Kirsher 164f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure 165f2148a47SJeff Kirsher 166f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme. 167f2148a47SJeff Kirsher 168f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so 169f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers. 170f2148a47SJeff Kirsher 171f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at 172f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data 173f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long, 174f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff. 175f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the 176f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated 177f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx(). 178f2148a47SJeff Kirsher 179f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by 180f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger 181f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines 182f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of 183f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never 184f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using 185f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is 186f2148a47SJeff Kirsher most useful with small frames. 187f2148a47SJeff Kirsher 188f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit 189f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't 190f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers 191f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header. 192f2148a47SJeff Kirsher 193f2148a47SJeff Kirsher IIId. Synchronization 194f2148a47SJeff Kirsher 195f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One 196f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the 197f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler, 198f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software. 199f2148a47SJeff Kirsher 200f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the 201f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in 202f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by 203f2148a47SJeff Kirsher calling netif_stop_queue. 204f2148a47SJeff Kirsher 205f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats 206f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as 207f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in 208f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped. 209f2148a47SJeff Kirsher 210f2148a47SJeff Kirsher IV. Notes 211f2148a47SJeff Kirsher 212f2148a47SJeff Kirsher IVb. References 213f2148a47SJeff Kirsher 214f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/ 215f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html 216f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html 217f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf 218f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF 219f2148a47SJeff Kirsher 220f2148a47SJeff Kirsher 221f2148a47SJeff Kirsher IVc. Errata 222f2148a47SJeff Kirsher 223f2148a47SJeff Kirsher The VT86C100A manual is not reliable information. 224f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting 225f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit 226f2148a47SJeff Kirsher and unaligned IP headers on receive. 227f2148a47SJeff Kirsher The chip does not pad to minimum transmit length. 228f2148a47SJeff Kirsher 229f2148a47SJeff Kirsher */ 230f2148a47SJeff Kirsher 231f2148a47SJeff Kirsher 232f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all 233f2148a47SJeff Kirsher of the drivers, and will likely be provided by some future kernel. 234f2148a47SJeff Kirsher Note the matching code -- the first table entry matchs all 56** cards but 235f2148a47SJeff Kirsher second only the 1234 card. 236f2148a47SJeff Kirsher */ 237f2148a47SJeff Kirsher 238f2148a47SJeff Kirsher enum rhine_revs { 239f2148a47SJeff Kirsher VT86C100A = 0x00, 240f2148a47SJeff Kirsher VTunknown0 = 0x20, 241f2148a47SJeff Kirsher VT6102 = 0x40, 242f2148a47SJeff Kirsher VT8231 = 0x50, /* Integrated MAC */ 243f2148a47SJeff Kirsher VT8233 = 0x60, /* Integrated MAC */ 244f2148a47SJeff Kirsher VT8235 = 0x74, /* Integrated MAC */ 245f2148a47SJeff Kirsher VT8237 = 0x78, /* Integrated MAC */ 246f2148a47SJeff Kirsher VTunknown1 = 0x7C, 247f2148a47SJeff Kirsher VT6105 = 0x80, 248f2148a47SJeff Kirsher VT6105_B0 = 0x83, 249f2148a47SJeff Kirsher VT6105L = 0x8A, 250f2148a47SJeff Kirsher VT6107 = 0x8C, 251f2148a47SJeff Kirsher VTunknown2 = 0x8E, 252f2148a47SJeff Kirsher VT6105M = 0x90, /* Management adapter */ 253f2148a47SJeff Kirsher }; 254f2148a47SJeff Kirsher 255f2148a47SJeff Kirsher enum rhine_quirks { 256f2148a47SJeff Kirsher rqWOL = 0x0001, /* Wake-On-LAN support */ 257f2148a47SJeff Kirsher rqForceReset = 0x0002, 258f2148a47SJeff Kirsher rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ 259f2148a47SJeff Kirsher rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ 260f2148a47SJeff Kirsher rqRhineI = 0x0100, /* See comment below */ 261ca8b6e04SAlexey Charkov rqIntPHY = 0x0200, /* Integrated PHY */ 262ca8b6e04SAlexey Charkov rqMgmt = 0x0400, /* Management adapter */ 2635b579e21SAlexey Charkov rqNeedEnMMIO = 0x0800, /* Whether the core needs to be 2645b579e21SAlexey Charkov * switched from PIO mode to MMIO 2655b579e21SAlexey Charkov * (only applies to PCI) 2665b579e21SAlexey Charkov */ 267f2148a47SJeff Kirsher }; 268f2148a47SJeff Kirsher /* 269f2148a47SJeff Kirsher * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable 270f2148a47SJeff Kirsher * MMIO as well as for the collision counter and the Tx FIFO underflow 271f2148a47SJeff Kirsher * indicator. In addition, Tx and Rx buffers need to 4 byte aligned. 272f2148a47SJeff Kirsher */ 273f2148a47SJeff Kirsher 274f2148a47SJeff Kirsher /* Beware of PCI posted writes */ 275f2148a47SJeff Kirsher #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) 276f2148a47SJeff Kirsher 2779baa3c34SBenoit Taine static const struct pci_device_id rhine_pci_tbl[] = { 278f2148a47SJeff Kirsher { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ 279f2148a47SJeff Kirsher { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ 280f2148a47SJeff Kirsher { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ 281f2148a47SJeff Kirsher { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */ 282f2148a47SJeff Kirsher { } /* terminate list */ 283f2148a47SJeff Kirsher }; 284f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); 285f2148a47SJeff Kirsher 2862d283862SAlexey Charkov /* OpenFirmware identifiers for platform-bus devices 287ca8b6e04SAlexey Charkov * The .data field is currently only used to store quirks 2882d283862SAlexey Charkov */ 289ca8b6e04SAlexey Charkov static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns; 290d2b75a3fSFabian Frederick static const struct of_device_id rhine_of_tbl[] = { 291ca8b6e04SAlexey Charkov { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks }, 2922d283862SAlexey Charkov { } /* terminate list */ 2932d283862SAlexey Charkov }; 2942d283862SAlexey Charkov MODULE_DEVICE_TABLE(of, rhine_of_tbl); 295f2148a47SJeff Kirsher 296f2148a47SJeff Kirsher /* Offsets to the device registers. */ 297f2148a47SJeff Kirsher enum register_offsets { 298f2148a47SJeff Kirsher StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, 299f2148a47SJeff Kirsher ChipCmd1=0x09, TQWake=0x0A, 300f2148a47SJeff Kirsher IntrStatus=0x0C, IntrEnable=0x0E, 301f2148a47SJeff Kirsher MulticastFilter0=0x10, MulticastFilter1=0x14, 302f2148a47SJeff Kirsher RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, 303f2148a47SJeff Kirsher MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F, 304f2148a47SJeff Kirsher MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, 305f2148a47SJeff Kirsher ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, 306f2148a47SJeff Kirsher RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, 307f2148a47SJeff Kirsher StickyHW=0x83, IntrStatus2=0x84, 308f2148a47SJeff Kirsher CamMask=0x88, CamCon=0x92, CamAddr=0x93, 309f2148a47SJeff Kirsher WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, 310f2148a47SJeff Kirsher WOLcrClr1=0xA6, WOLcgClr=0xA7, 311f2148a47SJeff Kirsher PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, 312f2148a47SJeff Kirsher }; 313f2148a47SJeff Kirsher 314f2148a47SJeff Kirsher /* Bits in ConfigD */ 315f2148a47SJeff Kirsher enum backoff_bits { 316f2148a47SJeff Kirsher BackOptional=0x01, BackModify=0x02, 317f2148a47SJeff Kirsher BackCaptureEffect=0x04, BackRandom=0x08 318f2148a47SJeff Kirsher }; 319f2148a47SJeff Kirsher 320f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */ 321f2148a47SJeff Kirsher enum tcr_bits { 322f2148a47SJeff Kirsher TCR_PQEN=0x01, 323f2148a47SJeff Kirsher TCR_LB0=0x02, /* loopback[0] */ 324f2148a47SJeff Kirsher TCR_LB1=0x04, /* loopback[1] */ 325f2148a47SJeff Kirsher TCR_OFSET=0x08, 326f2148a47SJeff Kirsher TCR_RTGOPT=0x10, 327f2148a47SJeff Kirsher TCR_RTFT0=0x20, 328f2148a47SJeff Kirsher TCR_RTFT1=0x40, 329f2148a47SJeff Kirsher TCR_RTSF=0x80, 330f2148a47SJeff Kirsher }; 331f2148a47SJeff Kirsher 332f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */ 333f2148a47SJeff Kirsher enum camcon_bits { 334f2148a47SJeff Kirsher CAMC_CAMEN=0x01, 335f2148a47SJeff Kirsher CAMC_VCAMSL=0x02, 336f2148a47SJeff Kirsher CAMC_CAMWR=0x04, 337f2148a47SJeff Kirsher CAMC_CAMRD=0x08, 338f2148a47SJeff Kirsher }; 339f2148a47SJeff Kirsher 340f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */ 341f2148a47SJeff Kirsher enum bcr1_bits { 342f2148a47SJeff Kirsher BCR1_POT0=0x01, 343f2148a47SJeff Kirsher BCR1_POT1=0x02, 344f2148a47SJeff Kirsher BCR1_POT2=0x04, 345f2148a47SJeff Kirsher BCR1_CTFT0=0x08, 346f2148a47SJeff Kirsher BCR1_CTFT1=0x10, 347f2148a47SJeff Kirsher BCR1_CTSF=0x20, 348f2148a47SJeff Kirsher BCR1_TXQNOBK=0x40, /* for VT6105 */ 349f2148a47SJeff Kirsher BCR1_VIDFR=0x80, /* for VT6105 */ 350f2148a47SJeff Kirsher BCR1_MED0=0x40, /* for VT6102 */ 351f2148a47SJeff Kirsher BCR1_MED1=0x80, /* for VT6102 */ 352f2148a47SJeff Kirsher }; 353f2148a47SJeff Kirsher 354f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */ 355f2148a47SJeff Kirsher static const int mmio_verify_registers[] = { 356f2148a47SJeff Kirsher RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, 357f2148a47SJeff Kirsher 0 358f2148a47SJeff Kirsher }; 359f2148a47SJeff Kirsher 360f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */ 361f2148a47SJeff Kirsher enum intr_status_bits { 3627ab87ff4SFrancois Romieu IntrRxDone = 0x0001, 3637ab87ff4SFrancois Romieu IntrTxDone = 0x0002, 3647ab87ff4SFrancois Romieu IntrRxErr = 0x0004, 3657ab87ff4SFrancois Romieu IntrTxError = 0x0008, 3667ab87ff4SFrancois Romieu IntrRxEmpty = 0x0020, 367f2148a47SJeff Kirsher IntrPCIErr = 0x0040, 3687ab87ff4SFrancois Romieu IntrStatsMax = 0x0080, 3697ab87ff4SFrancois Romieu IntrRxEarly = 0x0100, 3707ab87ff4SFrancois Romieu IntrTxUnderrun = 0x0210, 3717ab87ff4SFrancois Romieu IntrRxOverflow = 0x0400, 3727ab87ff4SFrancois Romieu IntrRxDropped = 0x0800, 3737ab87ff4SFrancois Romieu IntrRxNoBuf = 0x1000, 3747ab87ff4SFrancois Romieu IntrTxAborted = 0x2000, 3757ab87ff4SFrancois Romieu IntrLinkChange = 0x4000, 376f2148a47SJeff Kirsher IntrRxWakeUp = 0x8000, 377f2148a47SJeff Kirsher IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */ 3787ab87ff4SFrancois Romieu IntrNormalSummary = IntrRxDone | IntrTxDone, 3797ab87ff4SFrancois Romieu IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError | 3807ab87ff4SFrancois Romieu IntrTxUnderrun, 381f2148a47SJeff Kirsher }; 382f2148a47SJeff Kirsher 383f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ 384f2148a47SJeff Kirsher enum wol_bits { 385f2148a47SJeff Kirsher WOLucast = 0x10, 386f2148a47SJeff Kirsher WOLmagic = 0x20, 387f2148a47SJeff Kirsher WOLbmcast = 0x30, 388f2148a47SJeff Kirsher WOLlnkon = 0x40, 389f2148a47SJeff Kirsher WOLlnkoff = 0x80, 390f2148a47SJeff Kirsher }; 391f2148a47SJeff Kirsher 392f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */ 393f2148a47SJeff Kirsher struct rx_desc { 394f2148a47SJeff Kirsher __le32 rx_status; 395f2148a47SJeff Kirsher __le32 desc_length; /* Chain flag, Buffer/frame length */ 396f2148a47SJeff Kirsher __le32 addr; 397f2148a47SJeff Kirsher __le32 next_desc; 398f2148a47SJeff Kirsher }; 399f2148a47SJeff Kirsher struct tx_desc { 400f2148a47SJeff Kirsher __le32 tx_status; 401f2148a47SJeff Kirsher __le32 desc_length; /* Chain flag, Tx Config, Frame length */ 402f2148a47SJeff Kirsher __le32 addr; 403f2148a47SJeff Kirsher __le32 next_desc; 404f2148a47SJeff Kirsher }; 405f2148a47SJeff Kirsher 406f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ 407f2148a47SJeff Kirsher #define TXDESC 0x00e08000 408f2148a47SJeff Kirsher 409f2148a47SJeff Kirsher enum rx_status_bits { 410f2148a47SJeff Kirsher RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F 411f2148a47SJeff Kirsher }; 412f2148a47SJeff Kirsher 413f2148a47SJeff Kirsher /* Bits in *_desc.*_status */ 414f2148a47SJeff Kirsher enum desc_status_bits { 415f2148a47SJeff Kirsher DescOwn=0x80000000 416f2148a47SJeff Kirsher }; 417f2148a47SJeff Kirsher 418f2148a47SJeff Kirsher /* Bits in *_desc.*_length */ 419f2148a47SJeff Kirsher enum desc_length_bits { 420f2148a47SJeff Kirsher DescTag=0x00010000 421f2148a47SJeff Kirsher }; 422f2148a47SJeff Kirsher 423f2148a47SJeff Kirsher /* Bits in ChipCmd. */ 424f2148a47SJeff Kirsher enum chip_cmd_bits { 425f2148a47SJeff Kirsher CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, 426f2148a47SJeff Kirsher CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40, 427f2148a47SJeff Kirsher Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04, 428f2148a47SJeff Kirsher Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, 429f2148a47SJeff Kirsher }; 430f2148a47SJeff Kirsher 431f7b5d1b9SJamie Gloudon struct rhine_stats { 432f7b5d1b9SJamie Gloudon u64 packets; 433f7b5d1b9SJamie Gloudon u64 bytes; 434f7b5d1b9SJamie Gloudon struct u64_stats_sync syncp; 435f7b5d1b9SJamie Gloudon }; 436f7b5d1b9SJamie Gloudon 437f2148a47SJeff Kirsher struct rhine_private { 438f2148a47SJeff Kirsher /* Bit mask for configured VLAN ids */ 439f2148a47SJeff Kirsher unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 440f2148a47SJeff Kirsher 441f2148a47SJeff Kirsher /* Descriptor rings */ 442f2148a47SJeff Kirsher struct rx_desc *rx_ring; 443f2148a47SJeff Kirsher struct tx_desc *tx_ring; 444f2148a47SJeff Kirsher dma_addr_t rx_ring_dma; 445f2148a47SJeff Kirsher dma_addr_t tx_ring_dma; 446f2148a47SJeff Kirsher 447f2148a47SJeff Kirsher /* The addresses of receive-in-place skbuffs. */ 448f2148a47SJeff Kirsher struct sk_buff *rx_skbuff[RX_RING_SIZE]; 449f2148a47SJeff Kirsher dma_addr_t rx_skbuff_dma[RX_RING_SIZE]; 450f2148a47SJeff Kirsher 451f2148a47SJeff Kirsher /* The saved address of a sent-in-place packet/buffer, for later free(). */ 452f2148a47SJeff Kirsher struct sk_buff *tx_skbuff[TX_RING_SIZE]; 453f2148a47SJeff Kirsher dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; 454f2148a47SJeff Kirsher 455f2148a47SJeff Kirsher /* Tx bounce buffers (Rhine-I only) */ 456f2148a47SJeff Kirsher unsigned char *tx_buf[TX_RING_SIZE]; 457f2148a47SJeff Kirsher unsigned char *tx_bufs; 458f2148a47SJeff Kirsher dma_addr_t tx_bufs_dma; 459f2148a47SJeff Kirsher 460f7630d18SAlexey Charkov int irq; 461f2148a47SJeff Kirsher long pioaddr; 462f2148a47SJeff Kirsher struct net_device *dev; 463f2148a47SJeff Kirsher struct napi_struct napi; 464f2148a47SJeff Kirsher spinlock_t lock; 4657ab87ff4SFrancois Romieu struct mutex task_lock; 4667ab87ff4SFrancois Romieu bool task_enable; 4677ab87ff4SFrancois Romieu struct work_struct slow_event_task; 468f2148a47SJeff Kirsher struct work_struct reset_task; 469f2148a47SJeff Kirsher 470fc3e0f8aSFrancois Romieu u32 msg_enable; 471fc3e0f8aSFrancois Romieu 472f2148a47SJeff Kirsher /* Frequently used values: keep some adjacent for cache effect. */ 473f2148a47SJeff Kirsher u32 quirks; 4748709bb2cSfrançois romieu unsigned int cur_rx; 475f2148a47SJeff Kirsher unsigned int cur_tx, dirty_tx; 476f2148a47SJeff Kirsher unsigned int rx_buf_sz; /* Based on MTU+slack. */ 477f7b5d1b9SJamie Gloudon struct rhine_stats rx_stats; 478f7b5d1b9SJamie Gloudon struct rhine_stats tx_stats; 479f2148a47SJeff Kirsher u8 wolopts; 480f2148a47SJeff Kirsher 481f2148a47SJeff Kirsher u8 tx_thresh, rx_thresh; 482f2148a47SJeff Kirsher 483f2148a47SJeff Kirsher struct mii_if_info mii_if; 484f2148a47SJeff Kirsher void __iomem *base; 485f2148a47SJeff Kirsher }; 486f2148a47SJeff Kirsher 487f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0) 488f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0) 489f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0) 490f2148a47SJeff Kirsher 491f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x)) 492f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x)) 493f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x)) 494f2148a47SJeff Kirsher 495f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0) 496f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0) 497f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0) 498f2148a47SJeff Kirsher 499f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0) 500f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0) 501f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0) 502f2148a47SJeff Kirsher 503f2148a47SJeff Kirsher 504f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int location); 505f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 506f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev); 507f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work); 5087ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work); 509f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev); 510f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 511f2148a47SJeff Kirsher struct net_device *dev); 512f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance); 513f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev); 514f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit); 515f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev); 516f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, 517f7b5d1b9SJamie Gloudon struct rtnl_link_stats64 *stats); 518f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 519f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops; 520f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev); 52180d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, 52280d5c368SPatrick McHardy __be16 proto, u16 vid); 52380d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, 52480d5c368SPatrick McHardy __be16 proto, u16 vid); 5257ab87ff4SFrancois Romieu static void rhine_restart_tx(struct net_device *dev); 526f2148a47SJeff Kirsher 5273f8c91a7SAndreas Mohr static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) 528a384a33bSFrancois Romieu { 529a384a33bSFrancois Romieu void __iomem *ioaddr = rp->base; 530a384a33bSFrancois Romieu int i; 531a384a33bSFrancois Romieu 532a384a33bSFrancois Romieu for (i = 0; i < 1024; i++) { 5333f8c91a7SAndreas Mohr bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask); 5343f8c91a7SAndreas Mohr 5353f8c91a7SAndreas Mohr if (low ^ has_mask_bits) 536a384a33bSFrancois Romieu break; 537a384a33bSFrancois Romieu udelay(10); 538a384a33bSFrancois Romieu } 539a384a33bSFrancois Romieu if (i > 64) { 540fc3e0f8aSFrancois Romieu netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " 5413f8c91a7SAndreas Mohr "count: %04d\n", low ? "low" : "high", reg, mask, i); 542a384a33bSFrancois Romieu } 543a384a33bSFrancois Romieu } 544a384a33bSFrancois Romieu 545a384a33bSFrancois Romieu static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) 546a384a33bSFrancois Romieu { 5473f8c91a7SAndreas Mohr rhine_wait_bit(rp, reg, mask, false); 548a384a33bSFrancois Romieu } 549a384a33bSFrancois Romieu 550a384a33bSFrancois Romieu static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) 551a384a33bSFrancois Romieu { 5523f8c91a7SAndreas Mohr rhine_wait_bit(rp, reg, mask, true); 553a384a33bSFrancois Romieu } 554f2148a47SJeff Kirsher 555a20a28bcSFrancois Romieu static u32 rhine_get_events(struct rhine_private *rp) 556f2148a47SJeff Kirsher { 557f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 558f2148a47SJeff Kirsher u32 intr_status; 559f2148a47SJeff Kirsher 560f2148a47SJeff Kirsher intr_status = ioread16(ioaddr + IntrStatus); 561f2148a47SJeff Kirsher /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ 562f2148a47SJeff Kirsher if (rp->quirks & rqStatusWBRace) 563f2148a47SJeff Kirsher intr_status |= ioread8(ioaddr + IntrStatus2) << 16; 564f2148a47SJeff Kirsher return intr_status; 565f2148a47SJeff Kirsher } 566f2148a47SJeff Kirsher 567a20a28bcSFrancois Romieu static void rhine_ack_events(struct rhine_private *rp, u32 mask) 568a20a28bcSFrancois Romieu { 569a20a28bcSFrancois Romieu void __iomem *ioaddr = rp->base; 570a20a28bcSFrancois Romieu 571a20a28bcSFrancois Romieu if (rp->quirks & rqStatusWBRace) 572a20a28bcSFrancois Romieu iowrite8(mask >> 16, ioaddr + IntrStatus2); 573a20a28bcSFrancois Romieu iowrite16(mask, ioaddr + IntrStatus); 5747ab87ff4SFrancois Romieu mmiowb(); 575a20a28bcSFrancois Romieu } 576a20a28bcSFrancois Romieu 577f2148a47SJeff Kirsher /* 578f2148a47SJeff Kirsher * Get power related registers into sane state. 579f2148a47SJeff Kirsher * Notify user about past WOL event. 580f2148a47SJeff Kirsher */ 581f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev) 582f2148a47SJeff Kirsher { 583f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 584f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 585f2148a47SJeff Kirsher u16 wolstat; 586f2148a47SJeff Kirsher 587f2148a47SJeff Kirsher if (rp->quirks & rqWOL) { 588f2148a47SJeff Kirsher /* Make sure chip is in power state D0 */ 589f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); 590f2148a47SJeff Kirsher 591f2148a47SJeff Kirsher /* Disable "force PME-enable" */ 592f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + WOLcgClr); 593f2148a47SJeff Kirsher 594f2148a47SJeff Kirsher /* Clear power-event config bits (WOL) */ 595f2148a47SJeff Kirsher iowrite8(0xFF, ioaddr + WOLcrClr); 596f2148a47SJeff Kirsher /* More recent cards can manage two additional patterns */ 597f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 598f2148a47SJeff Kirsher iowrite8(0x03, ioaddr + WOLcrClr1); 599f2148a47SJeff Kirsher 600f2148a47SJeff Kirsher /* Save power-event status bits */ 601f2148a47SJeff Kirsher wolstat = ioread8(ioaddr + PwrcsrSet); 602f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 603f2148a47SJeff Kirsher wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8; 604f2148a47SJeff Kirsher 605f2148a47SJeff Kirsher /* Clear power-event status bits */ 606f2148a47SJeff Kirsher iowrite8(0xFF, ioaddr + PwrcsrClr); 607f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 608f2148a47SJeff Kirsher iowrite8(0x03, ioaddr + PwrcsrClr1); 609f2148a47SJeff Kirsher 610f2148a47SJeff Kirsher if (wolstat) { 611f2148a47SJeff Kirsher char *reason; 612f2148a47SJeff Kirsher switch (wolstat) { 613f2148a47SJeff Kirsher case WOLmagic: 614f2148a47SJeff Kirsher reason = "Magic packet"; 615f2148a47SJeff Kirsher break; 616f2148a47SJeff Kirsher case WOLlnkon: 617f2148a47SJeff Kirsher reason = "Link went up"; 618f2148a47SJeff Kirsher break; 619f2148a47SJeff Kirsher case WOLlnkoff: 620f2148a47SJeff Kirsher reason = "Link went down"; 621f2148a47SJeff Kirsher break; 622f2148a47SJeff Kirsher case WOLucast: 623f2148a47SJeff Kirsher reason = "Unicast packet"; 624f2148a47SJeff Kirsher break; 625f2148a47SJeff Kirsher case WOLbmcast: 626f2148a47SJeff Kirsher reason = "Multicast/broadcast packet"; 627f2148a47SJeff Kirsher break; 628f2148a47SJeff Kirsher default: 629f2148a47SJeff Kirsher reason = "Unknown"; 630f2148a47SJeff Kirsher } 631f2148a47SJeff Kirsher netdev_info(dev, "Woke system up. Reason: %s\n", 632f2148a47SJeff Kirsher reason); 633f2148a47SJeff Kirsher } 634f2148a47SJeff Kirsher } 635f2148a47SJeff Kirsher } 636f2148a47SJeff Kirsher 637f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev) 638f2148a47SJeff Kirsher { 639f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 640f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 641fc3e0f8aSFrancois Romieu u8 cmd1; 642f2148a47SJeff Kirsher 643f2148a47SJeff Kirsher iowrite8(Cmd1Reset, ioaddr + ChipCmd1); 644f2148a47SJeff Kirsher IOSYNC; 645f2148a47SJeff Kirsher 646f2148a47SJeff Kirsher if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { 647f2148a47SJeff Kirsher netdev_info(dev, "Reset not complete yet. Trying harder.\n"); 648f2148a47SJeff Kirsher 649f2148a47SJeff Kirsher /* Force reset */ 650f2148a47SJeff Kirsher if (rp->quirks & rqForceReset) 651f2148a47SJeff Kirsher iowrite8(0x40, ioaddr + MiscCmd); 652f2148a47SJeff Kirsher 653f2148a47SJeff Kirsher /* Reset can take somewhat longer (rare) */ 654a384a33bSFrancois Romieu rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); 655f2148a47SJeff Kirsher } 656f2148a47SJeff Kirsher 657fc3e0f8aSFrancois Romieu cmd1 = ioread8(ioaddr + ChipCmd1); 658fc3e0f8aSFrancois Romieu netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? 659f2148a47SJeff Kirsher "failed" : "succeeded"); 660f2148a47SJeff Kirsher } 661f2148a47SJeff Kirsher 662f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks) 663f2148a47SJeff Kirsher { 664f2148a47SJeff Kirsher int n; 6655b579e21SAlexey Charkov 6665b579e21SAlexey Charkov if (quirks & rqNeedEnMMIO) { 667f2148a47SJeff Kirsher if (quirks & rqRhineI) { 6685b579e21SAlexey Charkov /* More recent docs say that this bit is reserved */ 669f2148a47SJeff Kirsher n = inb(pioaddr + ConfigA) | 0x20; 670f2148a47SJeff Kirsher outb(n, pioaddr + ConfigA); 671f2148a47SJeff Kirsher } else { 672f2148a47SJeff Kirsher n = inb(pioaddr + ConfigD) | 0x80; 673f2148a47SJeff Kirsher outb(n, pioaddr + ConfigD); 674f2148a47SJeff Kirsher } 675f2148a47SJeff Kirsher } 6765b579e21SAlexey Charkov } 6775b579e21SAlexey Charkov 6785b579e21SAlexey Charkov static inline int verify_mmio(struct device *hwdev, 6795b579e21SAlexey Charkov long pioaddr, 6805b579e21SAlexey Charkov void __iomem *ioaddr, 6815b579e21SAlexey Charkov u32 quirks) 6825b579e21SAlexey Charkov { 6835b579e21SAlexey Charkov if (quirks & rqNeedEnMMIO) { 6845b579e21SAlexey Charkov int i = 0; 6855b579e21SAlexey Charkov 6865b579e21SAlexey Charkov /* Check that selected MMIO registers match the PIO ones */ 6875b579e21SAlexey Charkov while (mmio_verify_registers[i]) { 6885b579e21SAlexey Charkov int reg = mmio_verify_registers[i++]; 6895b579e21SAlexey Charkov unsigned char a = inb(pioaddr+reg); 6905b579e21SAlexey Charkov unsigned char b = readb(ioaddr+reg); 6915b579e21SAlexey Charkov 6925b579e21SAlexey Charkov if (a != b) { 6935b579e21SAlexey Charkov dev_err(hwdev, 6945b579e21SAlexey Charkov "MMIO do not match PIO [%02x] (%02x != %02x)\n", 6955b579e21SAlexey Charkov reg, a, b); 6965b579e21SAlexey Charkov return -EIO; 6975b579e21SAlexey Charkov } 6985b579e21SAlexey Charkov } 6995b579e21SAlexey Charkov } 7005b579e21SAlexey Charkov return 0; 7015b579e21SAlexey Charkov } 702f2148a47SJeff Kirsher 703f2148a47SJeff Kirsher /* 704f2148a47SJeff Kirsher * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM 705f2148a47SJeff Kirsher * (plus 0x6C for Rhine-I/II) 706f2148a47SJeff Kirsher */ 70776e239e1SBill Pemberton static void rhine_reload_eeprom(long pioaddr, struct net_device *dev) 708f2148a47SJeff Kirsher { 709f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 710f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 711a384a33bSFrancois Romieu int i; 712f2148a47SJeff Kirsher 713f2148a47SJeff Kirsher outb(0x20, pioaddr + MACRegEEcsr); 714a384a33bSFrancois Romieu for (i = 0; i < 1024; i++) { 715a384a33bSFrancois Romieu if (!(inb(pioaddr + MACRegEEcsr) & 0x20)) 716a384a33bSFrancois Romieu break; 717a384a33bSFrancois Romieu } 718a384a33bSFrancois Romieu if (i > 512) 719a384a33bSFrancois Romieu pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__); 720f2148a47SJeff Kirsher 721f2148a47SJeff Kirsher /* 722f2148a47SJeff Kirsher * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable 723f2148a47SJeff Kirsher * MMIO. If reloading EEPROM was done first this could be avoided, but 724f2148a47SJeff Kirsher * it is not known if that still works with the "win98-reboot" problem. 725f2148a47SJeff Kirsher */ 726f2148a47SJeff Kirsher enable_mmio(pioaddr, rp->quirks); 727f2148a47SJeff Kirsher 728f2148a47SJeff Kirsher /* Turn off EEPROM-controlled wake-up (magic packet) */ 729f2148a47SJeff Kirsher if (rp->quirks & rqWOL) 730f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA); 731f2148a47SJeff Kirsher 732f2148a47SJeff Kirsher } 733f2148a47SJeff Kirsher 734f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 735f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev) 736f2148a47SJeff Kirsher { 73705d334ecSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 738f7630d18SAlexey Charkov const int irq = rp->irq; 73905d334ecSFrancois Romieu 74005d334ecSFrancois Romieu disable_irq(irq); 74105d334ecSFrancois Romieu rhine_interrupt(irq, dev); 74205d334ecSFrancois Romieu enable_irq(irq); 743f2148a47SJeff Kirsher } 744f2148a47SJeff Kirsher #endif 745f2148a47SJeff Kirsher 746269f3114SFrancois Romieu static void rhine_kick_tx_threshold(struct rhine_private *rp) 747269f3114SFrancois Romieu { 748269f3114SFrancois Romieu if (rp->tx_thresh < 0xe0) { 749269f3114SFrancois Romieu void __iomem *ioaddr = rp->base; 750269f3114SFrancois Romieu 751269f3114SFrancois Romieu rp->tx_thresh += 0x20; 752269f3114SFrancois Romieu BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); 753269f3114SFrancois Romieu } 754269f3114SFrancois Romieu } 755269f3114SFrancois Romieu 7567ab87ff4SFrancois Romieu static void rhine_tx_err(struct rhine_private *rp, u32 status) 7577ab87ff4SFrancois Romieu { 7587ab87ff4SFrancois Romieu struct net_device *dev = rp->dev; 7597ab87ff4SFrancois Romieu 7607ab87ff4SFrancois Romieu if (status & IntrTxAborted) { 761fc3e0f8aSFrancois Romieu netif_info(rp, tx_err, dev, 762fc3e0f8aSFrancois Romieu "Abort %08x, frame dropped\n", status); 7637ab87ff4SFrancois Romieu } 7647ab87ff4SFrancois Romieu 7657ab87ff4SFrancois Romieu if (status & IntrTxUnderrun) { 7667ab87ff4SFrancois Romieu rhine_kick_tx_threshold(rp); 767fc3e0f8aSFrancois Romieu netif_info(rp, tx_err ,dev, "Transmitter underrun, " 768fc3e0f8aSFrancois Romieu "Tx threshold now %02x\n", rp->tx_thresh); 7697ab87ff4SFrancois Romieu } 7707ab87ff4SFrancois Romieu 771fc3e0f8aSFrancois Romieu if (status & IntrTxDescRace) 772fc3e0f8aSFrancois Romieu netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); 7737ab87ff4SFrancois Romieu 7747ab87ff4SFrancois Romieu if ((status & IntrTxError) && 7757ab87ff4SFrancois Romieu (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { 7767ab87ff4SFrancois Romieu rhine_kick_tx_threshold(rp); 777fc3e0f8aSFrancois Romieu netif_info(rp, tx_err, dev, "Unspecified error. " 778fc3e0f8aSFrancois Romieu "Tx threshold now %02x\n", rp->tx_thresh); 7797ab87ff4SFrancois Romieu } 7807ab87ff4SFrancois Romieu 7817ab87ff4SFrancois Romieu rhine_restart_tx(dev); 7827ab87ff4SFrancois Romieu } 7837ab87ff4SFrancois Romieu 7847ab87ff4SFrancois Romieu static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) 7857ab87ff4SFrancois Romieu { 7867ab87ff4SFrancois Romieu void __iomem *ioaddr = rp->base; 7877ab87ff4SFrancois Romieu struct net_device_stats *stats = &rp->dev->stats; 7887ab87ff4SFrancois Romieu 7897ab87ff4SFrancois Romieu stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 7907ab87ff4SFrancois Romieu stats->rx_missed_errors += ioread16(ioaddr + RxMissed); 7917ab87ff4SFrancois Romieu 7927ab87ff4SFrancois Romieu /* 7937ab87ff4SFrancois Romieu * Clears the "tally counters" for CRC errors and missed frames(?). 7947ab87ff4SFrancois Romieu * It has been reported that some chips need a write of 0 to clear 7957ab87ff4SFrancois Romieu * these, for others the counters are set to 1 when written to and 7967ab87ff4SFrancois Romieu * instead cleared when read. So we clear them both ways ... 7977ab87ff4SFrancois Romieu */ 7987ab87ff4SFrancois Romieu iowrite32(0, ioaddr + RxMissed); 7997ab87ff4SFrancois Romieu ioread16(ioaddr + RxCRCErrs); 8007ab87ff4SFrancois Romieu ioread16(ioaddr + RxMissed); 8017ab87ff4SFrancois Romieu } 8027ab87ff4SFrancois Romieu 8037ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_RX (IntrRxDone | \ 8047ab87ff4SFrancois Romieu IntrRxErr | \ 8057ab87ff4SFrancois Romieu IntrRxEmpty | \ 8067ab87ff4SFrancois Romieu IntrRxOverflow | \ 8077ab87ff4SFrancois Romieu IntrRxDropped | \ 8087ab87ff4SFrancois Romieu IntrRxNoBuf | \ 8097ab87ff4SFrancois Romieu IntrRxWakeUp) 8107ab87ff4SFrancois Romieu 8117ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \ 8127ab87ff4SFrancois Romieu IntrTxAborted | \ 8137ab87ff4SFrancois Romieu IntrTxUnderrun | \ 8147ab87ff4SFrancois Romieu IntrTxDescRace) 8157ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR) 8167ab87ff4SFrancois Romieu 8177ab87ff4SFrancois Romieu #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \ 8187ab87ff4SFrancois Romieu RHINE_EVENT_NAPI_TX | \ 8197ab87ff4SFrancois Romieu IntrStatsMax) 8207ab87ff4SFrancois Romieu #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange) 8217ab87ff4SFrancois Romieu #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW) 8227ab87ff4SFrancois Romieu 823f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget) 824f2148a47SJeff Kirsher { 825f2148a47SJeff Kirsher struct rhine_private *rp = container_of(napi, struct rhine_private, napi); 826f2148a47SJeff Kirsher struct net_device *dev = rp->dev; 827f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 8287ab87ff4SFrancois Romieu u16 enable_mask = RHINE_EVENT & 0xffff; 8297ab87ff4SFrancois Romieu int work_done = 0; 8307ab87ff4SFrancois Romieu u32 status; 831f2148a47SJeff Kirsher 8327ab87ff4SFrancois Romieu status = rhine_get_events(rp); 8337ab87ff4SFrancois Romieu rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); 8347ab87ff4SFrancois Romieu 8357ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_RX) 8367ab87ff4SFrancois Romieu work_done += rhine_rx(dev, budget); 8377ab87ff4SFrancois Romieu 8387ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_TX) { 8397ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_TX_ERR) { 8407ab87ff4SFrancois Romieu /* Avoid scavenging before Tx engine turned off */ 841a384a33bSFrancois Romieu rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); 842fc3e0f8aSFrancois Romieu if (ioread8(ioaddr + ChipCmd) & CmdTxOn) 843fc3e0f8aSFrancois Romieu netif_warn(rp, tx_err, dev, "Tx still on\n"); 8447ab87ff4SFrancois Romieu } 845fc3e0f8aSFrancois Romieu 8467ab87ff4SFrancois Romieu rhine_tx(dev); 8477ab87ff4SFrancois Romieu 8487ab87ff4SFrancois Romieu if (status & RHINE_EVENT_NAPI_TX_ERR) 8497ab87ff4SFrancois Romieu rhine_tx_err(rp, status); 8507ab87ff4SFrancois Romieu } 8517ab87ff4SFrancois Romieu 8527ab87ff4SFrancois Romieu if (status & IntrStatsMax) { 8537ab87ff4SFrancois Romieu spin_lock(&rp->lock); 8547ab87ff4SFrancois Romieu rhine_update_rx_crc_and_missed_errord(rp); 8557ab87ff4SFrancois Romieu spin_unlock(&rp->lock); 8567ab87ff4SFrancois Romieu } 8577ab87ff4SFrancois Romieu 8587ab87ff4SFrancois Romieu if (status & RHINE_EVENT_SLOW) { 8597ab87ff4SFrancois Romieu enable_mask &= ~RHINE_EVENT_SLOW; 8607ab87ff4SFrancois Romieu schedule_work(&rp->slow_event_task); 8617ab87ff4SFrancois Romieu } 862f2148a47SJeff Kirsher 863f2148a47SJeff Kirsher if (work_done < budget) { 864f2148a47SJeff Kirsher napi_complete(napi); 8657ab87ff4SFrancois Romieu iowrite16(enable_mask, ioaddr + IntrEnable); 8667ab87ff4SFrancois Romieu mmiowb(); 867f2148a47SJeff Kirsher } 868f2148a47SJeff Kirsher return work_done; 869f2148a47SJeff Kirsher } 870f2148a47SJeff Kirsher 87176e239e1SBill Pemberton static void rhine_hw_init(struct net_device *dev, long pioaddr) 872f2148a47SJeff Kirsher { 873f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 874f2148a47SJeff Kirsher 875f2148a47SJeff Kirsher /* Reset the chip to erase previous misconfiguration. */ 876f2148a47SJeff Kirsher rhine_chip_reset(dev); 877f2148a47SJeff Kirsher 878f2148a47SJeff Kirsher /* Rhine-I needs extra time to recuperate before EEPROM reload */ 879f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 880f2148a47SJeff Kirsher msleep(5); 881f2148a47SJeff Kirsher 882f2148a47SJeff Kirsher /* Reload EEPROM controlled bytes cleared by soft reset */ 8832d283862SAlexey Charkov if (dev_is_pci(dev->dev.parent)) 884f2148a47SJeff Kirsher rhine_reload_eeprom(pioaddr, dev); 885f2148a47SJeff Kirsher } 886f2148a47SJeff Kirsher 887f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = { 888f2148a47SJeff Kirsher .ndo_open = rhine_open, 889f2148a47SJeff Kirsher .ndo_stop = rhine_close, 890f2148a47SJeff Kirsher .ndo_start_xmit = rhine_start_tx, 891f7b5d1b9SJamie Gloudon .ndo_get_stats64 = rhine_get_stats64, 892afc4b13dSJiri Pirko .ndo_set_rx_mode = rhine_set_rx_mode, 893f2148a47SJeff Kirsher .ndo_change_mtu = eth_change_mtu, 894f2148a47SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 895f2148a47SJeff Kirsher .ndo_set_mac_address = eth_mac_addr, 896f2148a47SJeff Kirsher .ndo_do_ioctl = netdev_ioctl, 897f2148a47SJeff Kirsher .ndo_tx_timeout = rhine_tx_timeout, 898f2148a47SJeff Kirsher .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, 899f2148a47SJeff Kirsher .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, 900f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 901f2148a47SJeff Kirsher .ndo_poll_controller = rhine_poll, 902f2148a47SJeff Kirsher #endif 903f2148a47SJeff Kirsher }; 904f2148a47SJeff Kirsher 905ca8b6e04SAlexey Charkov static int rhine_init_one_common(struct device *hwdev, u32 quirks, 9062d283862SAlexey Charkov long pioaddr, void __iomem *ioaddr, int irq) 907f2148a47SJeff Kirsher { 908f2148a47SJeff Kirsher struct net_device *dev; 909f2148a47SJeff Kirsher struct rhine_private *rp; 9102d283862SAlexey Charkov int i, rc, phy_id; 911f2148a47SJeff Kirsher const char *name; 912f2148a47SJeff Kirsher 913f2148a47SJeff Kirsher /* this should always be supported */ 914f7630d18SAlexey Charkov rc = dma_set_mask(hwdev, DMA_BIT_MASK(32)); 915f2148a47SJeff Kirsher if (rc) { 916f7630d18SAlexey Charkov dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n"); 9172d283862SAlexey Charkov goto err_out; 918f2148a47SJeff Kirsher } 919f2148a47SJeff Kirsher 920f2148a47SJeff Kirsher dev = alloc_etherdev(sizeof(struct rhine_private)); 921f2148a47SJeff Kirsher if (!dev) { 922f2148a47SJeff Kirsher rc = -ENOMEM; 9232d283862SAlexey Charkov goto err_out; 924f2148a47SJeff Kirsher } 925f7630d18SAlexey Charkov SET_NETDEV_DEV(dev, hwdev); 926f2148a47SJeff Kirsher 927f2148a47SJeff Kirsher rp = netdev_priv(dev); 928f2148a47SJeff Kirsher rp->dev = dev; 929ca8b6e04SAlexey Charkov rp->quirks = quirks; 930f2148a47SJeff Kirsher rp->pioaddr = pioaddr; 9312d283862SAlexey Charkov rp->base = ioaddr; 9322d283862SAlexey Charkov rp->irq = irq; 933fc3e0f8aSFrancois Romieu rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); 934f2148a47SJeff Kirsher 935ca8b6e04SAlexey Charkov phy_id = rp->quirks & rqIntPHY ? 1 : 0; 936f2148a47SJeff Kirsher 937827da44cSJohn Stultz u64_stats_init(&rp->tx_stats.syncp); 938827da44cSJohn Stultz u64_stats_init(&rp->rx_stats.syncp); 939827da44cSJohn Stultz 940f2148a47SJeff Kirsher /* Get chip registers into a sane state */ 941f2148a47SJeff Kirsher rhine_power_init(dev); 942f2148a47SJeff Kirsher rhine_hw_init(dev, pioaddr); 943f2148a47SJeff Kirsher 944f2148a47SJeff Kirsher for (i = 0; i < 6; i++) 945f2148a47SJeff Kirsher dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); 946f2148a47SJeff Kirsher 947f2148a47SJeff Kirsher if (!is_valid_ether_addr(dev->dev_addr)) { 948f2148a47SJeff Kirsher /* Report it and use a random ethernet address instead */ 949f2148a47SJeff Kirsher netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr); 950f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 951f2148a47SJeff Kirsher netdev_info(dev, "Using random MAC address: %pM\n", 952f2148a47SJeff Kirsher dev->dev_addr); 953f2148a47SJeff Kirsher } 954f2148a47SJeff Kirsher 955f2148a47SJeff Kirsher /* For Rhine-I/II, phy_id is loaded from EEPROM */ 956f2148a47SJeff Kirsher if (!phy_id) 957f2148a47SJeff Kirsher phy_id = ioread8(ioaddr + 0x6C); 958f2148a47SJeff Kirsher 959f2148a47SJeff Kirsher spin_lock_init(&rp->lock); 9607ab87ff4SFrancois Romieu mutex_init(&rp->task_lock); 961f2148a47SJeff Kirsher INIT_WORK(&rp->reset_task, rhine_reset_task); 9627ab87ff4SFrancois Romieu INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); 963f2148a47SJeff Kirsher 964f2148a47SJeff Kirsher rp->mii_if.dev = dev; 965f2148a47SJeff Kirsher rp->mii_if.mdio_read = mdio_read; 966f2148a47SJeff Kirsher rp->mii_if.mdio_write = mdio_write; 967f2148a47SJeff Kirsher rp->mii_if.phy_id_mask = 0x1f; 968f2148a47SJeff Kirsher rp->mii_if.reg_num_mask = 0x1f; 969f2148a47SJeff Kirsher 970f2148a47SJeff Kirsher /* The chip-specific entries in the device structure. */ 971f2148a47SJeff Kirsher dev->netdev_ops = &rhine_netdev_ops; 972e76070f2Swangweidong dev->ethtool_ops = &netdev_ethtool_ops; 973f2148a47SJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT; 974f2148a47SJeff Kirsher 975f2148a47SJeff Kirsher netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); 976f2148a47SJeff Kirsher 977f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 978f2148a47SJeff Kirsher dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 979f2148a47SJeff Kirsher 980ca8b6e04SAlexey Charkov if (rp->quirks & rqMgmt) 981f646968fSPatrick McHardy dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 982f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_RX | 983f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_FILTER; 984f2148a47SJeff Kirsher 985f2148a47SJeff Kirsher /* dev->name not defined before register_netdev()! */ 986f2148a47SJeff Kirsher rc = register_netdev(dev); 987f2148a47SJeff Kirsher if (rc) 9882d283862SAlexey Charkov goto err_out_free_netdev; 989f2148a47SJeff Kirsher 990ca8b6e04SAlexey Charkov if (rp->quirks & rqRhineI) 991ca8b6e04SAlexey Charkov name = "Rhine"; 992ca8b6e04SAlexey Charkov else if (rp->quirks & rqStatusWBRace) 993ca8b6e04SAlexey Charkov name = "Rhine II"; 994ca8b6e04SAlexey Charkov else if (rp->quirks & rqMgmt) 995ca8b6e04SAlexey Charkov name = "Rhine III (Management Adapter)"; 996ca8b6e04SAlexey Charkov else 997ca8b6e04SAlexey Charkov name = "Rhine III"; 998ca8b6e04SAlexey Charkov 999f2148a47SJeff Kirsher netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", 10002d283862SAlexey Charkov name, (long)ioaddr, dev->dev_addr, rp->irq); 1001f2148a47SJeff Kirsher 1002f7630d18SAlexey Charkov dev_set_drvdata(hwdev, dev); 1003f2148a47SJeff Kirsher 1004f2148a47SJeff Kirsher { 1005f2148a47SJeff Kirsher u16 mii_cmd; 1006f2148a47SJeff Kirsher int mii_status = mdio_read(dev, phy_id, 1); 1007f2148a47SJeff Kirsher mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; 1008f2148a47SJeff Kirsher mdio_write(dev, phy_id, MII_BMCR, mii_cmd); 1009f2148a47SJeff Kirsher if (mii_status != 0xffff && mii_status != 0x0000) { 1010f2148a47SJeff Kirsher rp->mii_if.advertising = mdio_read(dev, phy_id, 4); 1011f2148a47SJeff Kirsher netdev_info(dev, 1012f2148a47SJeff Kirsher "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n", 1013f2148a47SJeff Kirsher phy_id, 1014f2148a47SJeff Kirsher mii_status, rp->mii_if.advertising, 1015f2148a47SJeff Kirsher mdio_read(dev, phy_id, 5)); 1016f2148a47SJeff Kirsher 1017f2148a47SJeff Kirsher /* set IFF_RUNNING */ 1018f2148a47SJeff Kirsher if (mii_status & BMSR_LSTATUS) 1019f2148a47SJeff Kirsher netif_carrier_on(dev); 1020f2148a47SJeff Kirsher else 1021f2148a47SJeff Kirsher netif_carrier_off(dev); 1022f2148a47SJeff Kirsher 1023f2148a47SJeff Kirsher } 1024f2148a47SJeff Kirsher } 1025f2148a47SJeff Kirsher rp->mii_if.phy_id = phy_id; 1026fc3e0f8aSFrancois Romieu if (avoid_D3) 1027fc3e0f8aSFrancois Romieu netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); 1028f2148a47SJeff Kirsher 1029f2148a47SJeff Kirsher return 0; 1030f2148a47SJeff Kirsher 10312d283862SAlexey Charkov err_out_free_netdev: 10322d283862SAlexey Charkov free_netdev(dev); 10332d283862SAlexey Charkov err_out: 10342d283862SAlexey Charkov return rc; 10352d283862SAlexey Charkov } 10362d283862SAlexey Charkov 10372d283862SAlexey Charkov static int rhine_init_one_pci(struct pci_dev *pdev, 10382d283862SAlexey Charkov const struct pci_device_id *ent) 10392d283862SAlexey Charkov { 10402d283862SAlexey Charkov struct device *hwdev = &pdev->dev; 10415b579e21SAlexey Charkov int rc; 10422d283862SAlexey Charkov long pioaddr, memaddr; 10432d283862SAlexey Charkov void __iomem *ioaddr; 10442d283862SAlexey Charkov int io_size = pdev->revision < VTunknown0 ? 128 : 256; 10455b579e21SAlexey Charkov 10465b579e21SAlexey Charkov /* This driver was written to use PCI memory space. Some early versions 10475b579e21SAlexey Charkov * of the Rhine may only work correctly with I/O space accesses. 10485b579e21SAlexey Charkov * TODO: determine for which revisions this is true and assign the flag 10495b579e21SAlexey Charkov * in code as opposed to this Kconfig option (???) 10505b579e21SAlexey Charkov */ 10515b579e21SAlexey Charkov #ifdef CONFIG_VIA_RHINE_MMIO 10525b579e21SAlexey Charkov u32 quirks = rqNeedEnMMIO; 10532d283862SAlexey Charkov #else 10545b579e21SAlexey Charkov u32 quirks = 0; 10552d283862SAlexey Charkov #endif 10562d283862SAlexey Charkov 10572d283862SAlexey Charkov /* when built into the kernel, we only print version if device is found */ 10582d283862SAlexey Charkov #ifndef MODULE 10592d283862SAlexey Charkov pr_info_once("%s\n", version); 10602d283862SAlexey Charkov #endif 10612d283862SAlexey Charkov 10622d283862SAlexey Charkov rc = pci_enable_device(pdev); 10632d283862SAlexey Charkov if (rc) 10642d283862SAlexey Charkov goto err_out; 10652d283862SAlexey Charkov 1066ca8b6e04SAlexey Charkov if (pdev->revision < VTunknown0) { 10675b579e21SAlexey Charkov quirks |= rqRhineI; 1068ca8b6e04SAlexey Charkov } else if (pdev->revision >= VT6102) { 10695b579e21SAlexey Charkov quirks |= rqWOL | rqForceReset; 1070ca8b6e04SAlexey Charkov if (pdev->revision < VT6105) { 1071ca8b6e04SAlexey Charkov quirks |= rqStatusWBRace; 1072ca8b6e04SAlexey Charkov } else { 1073ca8b6e04SAlexey Charkov quirks |= rqIntPHY; 1074ca8b6e04SAlexey Charkov if (pdev->revision >= VT6105_B0) 1075ca8b6e04SAlexey Charkov quirks |= rq6patterns; 1076ca8b6e04SAlexey Charkov if (pdev->revision >= VT6105M) 1077ca8b6e04SAlexey Charkov quirks |= rqMgmt; 1078ca8b6e04SAlexey Charkov } 1079ca8b6e04SAlexey Charkov } 1080ca8b6e04SAlexey Charkov 10812d283862SAlexey Charkov /* sanity check */ 10822d283862SAlexey Charkov if ((pci_resource_len(pdev, 0) < io_size) || 10832d283862SAlexey Charkov (pci_resource_len(pdev, 1) < io_size)) { 10842d283862SAlexey Charkov rc = -EIO; 10852d283862SAlexey Charkov dev_err(hwdev, "Insufficient PCI resources, aborting\n"); 10862d283862SAlexey Charkov goto err_out_pci_disable; 10872d283862SAlexey Charkov } 10882d283862SAlexey Charkov 10892d283862SAlexey Charkov pioaddr = pci_resource_start(pdev, 0); 10902d283862SAlexey Charkov memaddr = pci_resource_start(pdev, 1); 10912d283862SAlexey Charkov 10922d283862SAlexey Charkov pci_set_master(pdev); 10932d283862SAlexey Charkov 10942d283862SAlexey Charkov rc = pci_request_regions(pdev, DRV_NAME); 10952d283862SAlexey Charkov if (rc) 10962d283862SAlexey Charkov goto err_out_pci_disable; 10972d283862SAlexey Charkov 10985b579e21SAlexey Charkov ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size); 10992d283862SAlexey Charkov if (!ioaddr) { 11002d283862SAlexey Charkov rc = -EIO; 11012d283862SAlexey Charkov dev_err(hwdev, 11022d283862SAlexey Charkov "ioremap failed for device %s, region 0x%X @ 0x%lX\n", 11032d283862SAlexey Charkov dev_name(hwdev), io_size, memaddr); 11042d283862SAlexey Charkov goto err_out_free_res; 11052d283862SAlexey Charkov } 11062d283862SAlexey Charkov 11072d283862SAlexey Charkov enable_mmio(pioaddr, quirks); 11082d283862SAlexey Charkov 11095b579e21SAlexey Charkov rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks); 11105b579e21SAlexey Charkov if (rc) 11112d283862SAlexey Charkov goto err_out_unmap; 11122d283862SAlexey Charkov 1113ca8b6e04SAlexey Charkov rc = rhine_init_one_common(&pdev->dev, quirks, 11142d283862SAlexey Charkov pioaddr, ioaddr, pdev->irq); 11152d283862SAlexey Charkov if (!rc) 11162d283862SAlexey Charkov return 0; 11172d283862SAlexey Charkov 1118f2148a47SJeff Kirsher err_out_unmap: 1119f2148a47SJeff Kirsher pci_iounmap(pdev, ioaddr); 1120f2148a47SJeff Kirsher err_out_free_res: 1121f2148a47SJeff Kirsher pci_release_regions(pdev); 1122ae996154SRoger Luethi err_out_pci_disable: 1123ae996154SRoger Luethi pci_disable_device(pdev); 1124f2148a47SJeff Kirsher err_out: 1125f2148a47SJeff Kirsher return rc; 1126f2148a47SJeff Kirsher } 1127f2148a47SJeff Kirsher 11282d283862SAlexey Charkov static int rhine_init_one_platform(struct platform_device *pdev) 11292d283862SAlexey Charkov { 11302d283862SAlexey Charkov const struct of_device_id *match; 1131ca8b6e04SAlexey Charkov const u32 *quirks; 11322d283862SAlexey Charkov int irq; 11332d283862SAlexey Charkov struct resource *res; 11342d283862SAlexey Charkov void __iomem *ioaddr; 11352d283862SAlexey Charkov 11362d283862SAlexey Charkov match = of_match_device(rhine_of_tbl, &pdev->dev); 11372d283862SAlexey Charkov if (!match) 11382d283862SAlexey Charkov return -EINVAL; 11392d283862SAlexey Charkov 11402d283862SAlexey Charkov res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 11412d283862SAlexey Charkov ioaddr = devm_ioremap_resource(&pdev->dev, res); 11422d283862SAlexey Charkov if (IS_ERR(ioaddr)) 11432d283862SAlexey Charkov return PTR_ERR(ioaddr); 11442d283862SAlexey Charkov 11452d283862SAlexey Charkov irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 11462d283862SAlexey Charkov if (!irq) 11472d283862SAlexey Charkov return -EINVAL; 11482d283862SAlexey Charkov 1149ca8b6e04SAlexey Charkov quirks = match->data; 1150ca8b6e04SAlexey Charkov if (!quirks) 11512d283862SAlexey Charkov return -EINVAL; 11522d283862SAlexey Charkov 1153ca8b6e04SAlexey Charkov return rhine_init_one_common(&pdev->dev, *quirks, 11542d283862SAlexey Charkov (long)ioaddr, ioaddr, irq); 11552d283862SAlexey Charkov } 11562d283862SAlexey Charkov 1157f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev) 1158f2148a47SJeff Kirsher { 1159f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1160f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1161f2148a47SJeff Kirsher void *ring; 1162f2148a47SJeff Kirsher dma_addr_t ring_dma; 1163f2148a47SJeff Kirsher 1164f7630d18SAlexey Charkov ring = dma_alloc_coherent(hwdev, 1165f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 1166f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 11674087c4dcSAlexey Charkov &ring_dma, 11684087c4dcSAlexey Charkov GFP_ATOMIC); 1169f2148a47SJeff Kirsher if (!ring) { 1170f2148a47SJeff Kirsher netdev_err(dev, "Could not allocate DMA memory\n"); 1171f2148a47SJeff Kirsher return -ENOMEM; 1172f2148a47SJeff Kirsher } 1173f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) { 1174f7630d18SAlexey Charkov rp->tx_bufs = dma_alloc_coherent(hwdev, 1175f2148a47SJeff Kirsher PKT_BUF_SZ * TX_RING_SIZE, 11764087c4dcSAlexey Charkov &rp->tx_bufs_dma, 11774087c4dcSAlexey Charkov GFP_ATOMIC); 1178f2148a47SJeff Kirsher if (rp->tx_bufs == NULL) { 1179f7630d18SAlexey Charkov dma_free_coherent(hwdev, 1180f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 1181f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 1182f2148a47SJeff Kirsher ring, ring_dma); 1183f2148a47SJeff Kirsher return -ENOMEM; 1184f2148a47SJeff Kirsher } 1185f2148a47SJeff Kirsher } 1186f2148a47SJeff Kirsher 1187f2148a47SJeff Kirsher rp->rx_ring = ring; 1188f2148a47SJeff Kirsher rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); 1189f2148a47SJeff Kirsher rp->rx_ring_dma = ring_dma; 1190f2148a47SJeff Kirsher rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); 1191f2148a47SJeff Kirsher 1192f2148a47SJeff Kirsher return 0; 1193f2148a47SJeff Kirsher } 1194f2148a47SJeff Kirsher 1195f2148a47SJeff Kirsher static void free_ring(struct net_device* dev) 1196f2148a47SJeff Kirsher { 1197f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1198f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1199f2148a47SJeff Kirsher 1200f7630d18SAlexey Charkov dma_free_coherent(hwdev, 1201f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 1202f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 1203f2148a47SJeff Kirsher rp->rx_ring, rp->rx_ring_dma); 1204f2148a47SJeff Kirsher rp->tx_ring = NULL; 1205f2148a47SJeff Kirsher 1206f2148a47SJeff Kirsher if (rp->tx_bufs) 1207f7630d18SAlexey Charkov dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE, 1208f2148a47SJeff Kirsher rp->tx_bufs, rp->tx_bufs_dma); 1209f2148a47SJeff Kirsher 1210f2148a47SJeff Kirsher rp->tx_bufs = NULL; 1211f2148a47SJeff Kirsher 1212f2148a47SJeff Kirsher } 1213f2148a47SJeff Kirsher 1214a21bb8baSfrançois romieu struct rhine_skb_dma { 1215a21bb8baSfrançois romieu struct sk_buff *skb; 1216a21bb8baSfrançois romieu dma_addr_t dma; 1217a21bb8baSfrançois romieu }; 1218a21bb8baSfrançois romieu 1219a21bb8baSfrançois romieu static inline int rhine_skb_dma_init(struct net_device *dev, 1220a21bb8baSfrançois romieu struct rhine_skb_dma *sd) 1221f2148a47SJeff Kirsher { 1222f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1223f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1224a21bb8baSfrançois romieu const int size = rp->rx_buf_sz; 1225a21bb8baSfrançois romieu 1226a21bb8baSfrançois romieu sd->skb = netdev_alloc_skb(dev, size); 1227a21bb8baSfrançois romieu if (!sd->skb) 1228a21bb8baSfrançois romieu return -ENOMEM; 1229a21bb8baSfrançois romieu 1230a21bb8baSfrançois romieu sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE); 1231a21bb8baSfrançois romieu if (unlikely(dma_mapping_error(hwdev, sd->dma))) { 1232a21bb8baSfrançois romieu netif_err(rp, drv, dev, "Rx DMA mapping failure\n"); 1233a21bb8baSfrançois romieu dev_kfree_skb_any(sd->skb); 1234a21bb8baSfrançois romieu return -EIO; 1235a21bb8baSfrançois romieu } 1236a21bb8baSfrançois romieu 1237a21bb8baSfrançois romieu return 0; 1238a21bb8baSfrançois romieu } 1239a21bb8baSfrançois romieu 12408709bb2cSfrançois romieu static void rhine_reset_rbufs(struct rhine_private *rp) 12418709bb2cSfrançois romieu { 12428709bb2cSfrançois romieu int i; 12438709bb2cSfrançois romieu 12448709bb2cSfrançois romieu rp->cur_rx = 0; 12458709bb2cSfrançois romieu 12468709bb2cSfrançois romieu for (i = 0; i < RX_RING_SIZE; i++) 12478709bb2cSfrançois romieu rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); 12488709bb2cSfrançois romieu } 12498709bb2cSfrançois romieu 1250a21bb8baSfrançois romieu static inline void rhine_skb_dma_nic_store(struct rhine_private *rp, 1251a21bb8baSfrançois romieu struct rhine_skb_dma *sd, int entry) 1252a21bb8baSfrançois romieu { 1253a21bb8baSfrançois romieu rp->rx_skbuff_dma[entry] = sd->dma; 1254a21bb8baSfrançois romieu rp->rx_skbuff[entry] = sd->skb; 1255a21bb8baSfrançois romieu 1256a21bb8baSfrançois romieu rp->rx_ring[entry].addr = cpu_to_le32(sd->dma); 1257a21bb8baSfrançois romieu dma_wmb(); 1258a21bb8baSfrançois romieu } 1259a21bb8baSfrançois romieu 12608709bb2cSfrançois romieu static void free_rbufs(struct net_device* dev); 12618709bb2cSfrançois romieu 12628709bb2cSfrançois romieu static int alloc_rbufs(struct net_device *dev) 1263a21bb8baSfrançois romieu { 1264a21bb8baSfrançois romieu struct rhine_private *rp = netdev_priv(dev); 1265f2148a47SJeff Kirsher dma_addr_t next; 1266a21bb8baSfrançois romieu int rc, i; 1267f2148a47SJeff Kirsher 1268f2148a47SJeff Kirsher rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1269f2148a47SJeff Kirsher next = rp->rx_ring_dma; 1270f2148a47SJeff Kirsher 1271f2148a47SJeff Kirsher /* Init the ring entries */ 1272f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1273f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = 0; 1274f2148a47SJeff Kirsher rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); 1275f2148a47SJeff Kirsher next += sizeof(struct rx_desc); 1276f2148a47SJeff Kirsher rp->rx_ring[i].next_desc = cpu_to_le32(next); 1277f2148a47SJeff Kirsher rp->rx_skbuff[i] = NULL; 1278f2148a47SJeff Kirsher } 1279f2148a47SJeff Kirsher /* Mark the last entry as wrapping the ring. */ 1280f2148a47SJeff Kirsher rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); 1281f2148a47SJeff Kirsher 1282f2148a47SJeff Kirsher /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1283f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1284a21bb8baSfrançois romieu struct rhine_skb_dma sd; 1285a21bb8baSfrançois romieu 1286a21bb8baSfrançois romieu rc = rhine_skb_dma_init(dev, &sd); 12878709bb2cSfrançois romieu if (rc < 0) { 12888709bb2cSfrançois romieu free_rbufs(dev); 12898709bb2cSfrançois romieu goto out; 12908709bb2cSfrançois romieu } 1291f2148a47SJeff Kirsher 1292a21bb8baSfrançois romieu rhine_skb_dma_nic_store(rp, &sd, i); 1293f2148a47SJeff Kirsher } 12948709bb2cSfrançois romieu 12958709bb2cSfrançois romieu rhine_reset_rbufs(rp); 12968709bb2cSfrançois romieu out: 12978709bb2cSfrançois romieu return rc; 1298f2148a47SJeff Kirsher } 1299f2148a47SJeff Kirsher 1300f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev) 1301f2148a47SJeff Kirsher { 1302f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1303f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1304f2148a47SJeff Kirsher int i; 1305f2148a47SJeff Kirsher 1306f2148a47SJeff Kirsher /* Free all the skbuffs in the Rx queue. */ 1307f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1308f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = 0; 1309f2148a47SJeff Kirsher rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1310f2148a47SJeff Kirsher if (rp->rx_skbuff[i]) { 1311f7630d18SAlexey Charkov dma_unmap_single(hwdev, 1312f2148a47SJeff Kirsher rp->rx_skbuff_dma[i], 13134087c4dcSAlexey Charkov rp->rx_buf_sz, DMA_FROM_DEVICE); 1314f2148a47SJeff Kirsher dev_kfree_skb(rp->rx_skbuff[i]); 1315f2148a47SJeff Kirsher } 1316f2148a47SJeff Kirsher rp->rx_skbuff[i] = NULL; 1317f2148a47SJeff Kirsher } 1318f2148a47SJeff Kirsher } 1319f2148a47SJeff Kirsher 1320f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev) 1321f2148a47SJeff Kirsher { 1322f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1323f2148a47SJeff Kirsher dma_addr_t next; 1324f2148a47SJeff Kirsher int i; 1325f2148a47SJeff Kirsher 1326f2148a47SJeff Kirsher rp->dirty_tx = rp->cur_tx = 0; 1327f2148a47SJeff Kirsher next = rp->tx_ring_dma; 1328f2148a47SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) { 1329f2148a47SJeff Kirsher rp->tx_skbuff[i] = NULL; 1330f2148a47SJeff Kirsher rp->tx_ring[i].tx_status = 0; 1331f2148a47SJeff Kirsher rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); 1332f2148a47SJeff Kirsher next += sizeof(struct tx_desc); 1333f2148a47SJeff Kirsher rp->tx_ring[i].next_desc = cpu_to_le32(next); 1334f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 1335f2148a47SJeff Kirsher rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; 1336f2148a47SJeff Kirsher } 1337f2148a47SJeff Kirsher rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); 1338f2148a47SJeff Kirsher 133992bf2008STino Reichardt netdev_reset_queue(dev); 1340f2148a47SJeff Kirsher } 1341f2148a47SJeff Kirsher 1342f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev) 1343f2148a47SJeff Kirsher { 1344f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1345f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1346f2148a47SJeff Kirsher int i; 1347f2148a47SJeff Kirsher 1348f2148a47SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) { 1349f2148a47SJeff Kirsher rp->tx_ring[i].tx_status = 0; 1350f2148a47SJeff Kirsher rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); 1351f2148a47SJeff Kirsher rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1352f2148a47SJeff Kirsher if (rp->tx_skbuff[i]) { 1353f2148a47SJeff Kirsher if (rp->tx_skbuff_dma[i]) { 1354f7630d18SAlexey Charkov dma_unmap_single(hwdev, 1355f2148a47SJeff Kirsher rp->tx_skbuff_dma[i], 1356f2148a47SJeff Kirsher rp->tx_skbuff[i]->len, 13574087c4dcSAlexey Charkov DMA_TO_DEVICE); 1358f2148a47SJeff Kirsher } 1359f2148a47SJeff Kirsher dev_kfree_skb(rp->tx_skbuff[i]); 1360f2148a47SJeff Kirsher } 1361f2148a47SJeff Kirsher rp->tx_skbuff[i] = NULL; 1362f2148a47SJeff Kirsher rp->tx_buf[i] = NULL; 1363f2148a47SJeff Kirsher } 1364f2148a47SJeff Kirsher } 1365f2148a47SJeff Kirsher 1366f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media) 1367f2148a47SJeff Kirsher { 1368f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1369f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1370f2148a47SJeff Kirsher 13715bdc7380SBen Hutchings if (!rp->mii_if.force_media) 1372fc3e0f8aSFrancois Romieu mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); 1373f2148a47SJeff Kirsher 1374f2148a47SJeff Kirsher if (rp->mii_if.full_duplex) 1375f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, 1376f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1377f2148a47SJeff Kirsher else 1378f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, 1379f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1380fc3e0f8aSFrancois Romieu 1381fc3e0f8aSFrancois Romieu netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1382f2148a47SJeff Kirsher rp->mii_if.force_media, netif_carrier_ok(dev)); 1383f2148a47SJeff Kirsher } 1384f2148a47SJeff Kirsher 1385f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */ 1386f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii) 1387f2148a47SJeff Kirsher { 1388fc3e0f8aSFrancois Romieu struct net_device *dev = mii->dev; 1389fc3e0f8aSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 1390fc3e0f8aSFrancois Romieu 1391f2148a47SJeff Kirsher if (mii->force_media) { 1392f2148a47SJeff Kirsher /* autoneg is off: Link is always assumed to be up */ 1393fc3e0f8aSFrancois Romieu if (!netif_carrier_ok(dev)) 1394fc3e0f8aSFrancois Romieu netif_carrier_on(dev); 139517958438SFrançois Cachereul } 139617958438SFrançois Cachereul 1397fc3e0f8aSFrancois Romieu rhine_check_media(dev, 0); 1398fc3e0f8aSFrancois Romieu 1399fc3e0f8aSFrancois Romieu netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1400fc3e0f8aSFrancois Romieu mii->force_media, netif_carrier_ok(dev)); 1401f2148a47SJeff Kirsher } 1402f2148a47SJeff Kirsher 1403f2148a47SJeff Kirsher /** 1404f2148a47SJeff Kirsher * rhine_set_cam - set CAM multicast filters 1405f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1406f2148a47SJeff Kirsher * @idx: multicast CAM index [0..MCAM_SIZE-1] 1407f2148a47SJeff Kirsher * @addr: multicast address (6 bytes) 1408f2148a47SJeff Kirsher * 1409f2148a47SJeff Kirsher * Load addresses into multicast filters. 1410f2148a47SJeff Kirsher */ 1411f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr) 1412f2148a47SJeff Kirsher { 1413f2148a47SJeff Kirsher int i; 1414f2148a47SJeff Kirsher 1415f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN, ioaddr + CamCon); 1416f2148a47SJeff Kirsher wmb(); 1417f2148a47SJeff Kirsher 1418f2148a47SJeff Kirsher /* Paranoid -- idx out of range should never happen */ 1419f2148a47SJeff Kirsher idx &= (MCAM_SIZE - 1); 1420f2148a47SJeff Kirsher 1421f2148a47SJeff Kirsher iowrite8((u8) idx, ioaddr + CamAddr); 1422f2148a47SJeff Kirsher 1423f2148a47SJeff Kirsher for (i = 0; i < 6; i++, addr++) 1424f2148a47SJeff Kirsher iowrite8(*addr, ioaddr + MulticastFilter0 + i); 1425f2148a47SJeff Kirsher udelay(10); 1426f2148a47SJeff Kirsher wmb(); 1427f2148a47SJeff Kirsher 1428f2148a47SJeff Kirsher iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); 1429f2148a47SJeff Kirsher udelay(10); 1430f2148a47SJeff Kirsher 1431f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1432f2148a47SJeff Kirsher } 1433f2148a47SJeff Kirsher 1434f2148a47SJeff Kirsher /** 1435f2148a47SJeff Kirsher * rhine_set_vlan_cam - set CAM VLAN filters 1436f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1437f2148a47SJeff Kirsher * @idx: VLAN CAM index [0..VCAM_SIZE-1] 1438f2148a47SJeff Kirsher * @addr: VLAN ID (2 bytes) 1439f2148a47SJeff Kirsher * 1440f2148a47SJeff Kirsher * Load addresses into VLAN filters. 1441f2148a47SJeff Kirsher */ 1442f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr) 1443f2148a47SJeff Kirsher { 1444f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); 1445f2148a47SJeff Kirsher wmb(); 1446f2148a47SJeff Kirsher 1447f2148a47SJeff Kirsher /* Paranoid -- idx out of range should never happen */ 1448f2148a47SJeff Kirsher idx &= (VCAM_SIZE - 1); 1449f2148a47SJeff Kirsher 1450f2148a47SJeff Kirsher iowrite8((u8) idx, ioaddr + CamAddr); 1451f2148a47SJeff Kirsher 1452f2148a47SJeff Kirsher iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6); 1453f2148a47SJeff Kirsher udelay(10); 1454f2148a47SJeff Kirsher wmb(); 1455f2148a47SJeff Kirsher 1456f2148a47SJeff Kirsher iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); 1457f2148a47SJeff Kirsher udelay(10); 1458f2148a47SJeff Kirsher 1459f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1460f2148a47SJeff Kirsher } 1461f2148a47SJeff Kirsher 1462f2148a47SJeff Kirsher /** 1463f2148a47SJeff Kirsher * rhine_set_cam_mask - set multicast CAM mask 1464f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1465f2148a47SJeff Kirsher * @mask: multicast CAM mask 1466f2148a47SJeff Kirsher * 1467f2148a47SJeff Kirsher * Mask sets multicast filters active/inactive. 1468f2148a47SJeff Kirsher */ 1469f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask) 1470f2148a47SJeff Kirsher { 1471f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN, ioaddr + CamCon); 1472f2148a47SJeff Kirsher wmb(); 1473f2148a47SJeff Kirsher 1474f2148a47SJeff Kirsher /* write mask */ 1475f2148a47SJeff Kirsher iowrite32(mask, ioaddr + CamMask); 1476f2148a47SJeff Kirsher 1477f2148a47SJeff Kirsher /* disable CAMEN */ 1478f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1479f2148a47SJeff Kirsher } 1480f2148a47SJeff Kirsher 1481f2148a47SJeff Kirsher /** 1482f2148a47SJeff Kirsher * rhine_set_vlan_cam_mask - set VLAN CAM mask 1483f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1484f2148a47SJeff Kirsher * @mask: VLAN CAM mask 1485f2148a47SJeff Kirsher * 1486f2148a47SJeff Kirsher * Mask sets VLAN filters active/inactive. 1487f2148a47SJeff Kirsher */ 1488f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask) 1489f2148a47SJeff Kirsher { 1490f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); 1491f2148a47SJeff Kirsher wmb(); 1492f2148a47SJeff Kirsher 1493f2148a47SJeff Kirsher /* write mask */ 1494f2148a47SJeff Kirsher iowrite32(mask, ioaddr + CamMask); 1495f2148a47SJeff Kirsher 1496f2148a47SJeff Kirsher /* disable CAMEN */ 1497f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1498f2148a47SJeff Kirsher } 1499f2148a47SJeff Kirsher 1500f2148a47SJeff Kirsher /** 1501f2148a47SJeff Kirsher * rhine_init_cam_filter - initialize CAM filters 1502f2148a47SJeff Kirsher * @dev: network device 1503f2148a47SJeff Kirsher * 1504f2148a47SJeff Kirsher * Initialize (disable) hardware VLAN and multicast support on this 1505f2148a47SJeff Kirsher * Rhine. 1506f2148a47SJeff Kirsher */ 1507f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev) 1508f2148a47SJeff Kirsher { 1509f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1510f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1511f2148a47SJeff Kirsher 1512f2148a47SJeff Kirsher /* Disable all CAMs */ 1513f2148a47SJeff Kirsher rhine_set_vlan_cam_mask(ioaddr, 0); 1514f2148a47SJeff Kirsher rhine_set_cam_mask(ioaddr, 0); 1515f2148a47SJeff Kirsher 1516f2148a47SJeff Kirsher /* disable hardware VLAN support */ 1517f2148a47SJeff Kirsher BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig); 1518f2148a47SJeff Kirsher BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 1519f2148a47SJeff Kirsher } 1520f2148a47SJeff Kirsher 1521f2148a47SJeff Kirsher /** 1522f2148a47SJeff Kirsher * rhine_update_vcam - update VLAN CAM filters 1523f2148a47SJeff Kirsher * @rp: rhine_private data of this Rhine 1524f2148a47SJeff Kirsher * 1525f2148a47SJeff Kirsher * Update VLAN CAM filters to match configuration change. 1526f2148a47SJeff Kirsher */ 1527f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev) 1528f2148a47SJeff Kirsher { 1529f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1530f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1531f2148a47SJeff Kirsher u16 vid; 1532f2148a47SJeff Kirsher u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */ 1533f2148a47SJeff Kirsher unsigned int i = 0; 1534f2148a47SJeff Kirsher 1535f2148a47SJeff Kirsher for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { 1536f2148a47SJeff Kirsher rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid); 1537f2148a47SJeff Kirsher vCAMmask |= 1 << i; 1538f2148a47SJeff Kirsher if (++i >= VCAM_SIZE) 1539f2148a47SJeff Kirsher break; 1540f2148a47SJeff Kirsher } 1541f2148a47SJeff Kirsher rhine_set_vlan_cam_mask(ioaddr, vCAMmask); 1542f2148a47SJeff Kirsher } 1543f2148a47SJeff Kirsher 154480d5c368SPatrick McHardy static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1545f2148a47SJeff Kirsher { 1546f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1547f2148a47SJeff Kirsher 15487ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 1549f2148a47SJeff Kirsher set_bit(vid, rp->active_vlans); 1550f2148a47SJeff Kirsher rhine_update_vcam(dev); 15517ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 15528e586137SJiri Pirko return 0; 1553f2148a47SJeff Kirsher } 1554f2148a47SJeff Kirsher 155580d5c368SPatrick McHardy static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 1556f2148a47SJeff Kirsher { 1557f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1558f2148a47SJeff Kirsher 15597ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 1560f2148a47SJeff Kirsher clear_bit(vid, rp->active_vlans); 1561f2148a47SJeff Kirsher rhine_update_vcam(dev); 15627ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 15638e586137SJiri Pirko return 0; 1564f2148a47SJeff Kirsher } 1565f2148a47SJeff Kirsher 1566f2148a47SJeff Kirsher static void init_registers(struct net_device *dev) 1567f2148a47SJeff Kirsher { 1568f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1569f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1570f2148a47SJeff Kirsher int i; 1571f2148a47SJeff Kirsher 1572f2148a47SJeff Kirsher for (i = 0; i < 6; i++) 1573f2148a47SJeff Kirsher iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); 1574f2148a47SJeff Kirsher 1575f2148a47SJeff Kirsher /* Initialize other registers. */ 1576f2148a47SJeff Kirsher iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ 1577f2148a47SJeff Kirsher /* Configure initial FIFO thresholds. */ 1578f2148a47SJeff Kirsher iowrite8(0x20, ioaddr + TxConfig); 1579f2148a47SJeff Kirsher rp->tx_thresh = 0x20; 1580f2148a47SJeff Kirsher rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ 1581f2148a47SJeff Kirsher 1582f2148a47SJeff Kirsher iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); 1583f2148a47SJeff Kirsher iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); 1584f2148a47SJeff Kirsher 1585f2148a47SJeff Kirsher rhine_set_rx_mode(dev); 1586f2148a47SJeff Kirsher 1587ca8b6e04SAlexey Charkov if (rp->quirks & rqMgmt) 1588f2148a47SJeff Kirsher rhine_init_cam_filter(dev); 1589f2148a47SJeff Kirsher 1590f2148a47SJeff Kirsher napi_enable(&rp->napi); 1591f2148a47SJeff Kirsher 15927ab87ff4SFrancois Romieu iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable); 1593f2148a47SJeff Kirsher 1594f2148a47SJeff Kirsher iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), 1595f2148a47SJeff Kirsher ioaddr + ChipCmd); 1596f2148a47SJeff Kirsher rhine_check_media(dev, 1); 1597f2148a47SJeff Kirsher } 1598f2148a47SJeff Kirsher 1599f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */ 1600a384a33bSFrancois Romieu static void rhine_enable_linkmon(struct rhine_private *rp) 1601f2148a47SJeff Kirsher { 1602a384a33bSFrancois Romieu void __iomem *ioaddr = rp->base; 1603a384a33bSFrancois Romieu 1604f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1605f2148a47SJeff Kirsher iowrite8(MII_BMSR, ioaddr + MIIRegAddr); 1606f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + MIICmd); 1607f2148a47SJeff Kirsher 1608a384a33bSFrancois Romieu rhine_wait_bit_high(rp, MIIRegAddr, 0x20); 1609f2148a47SJeff Kirsher 1610f2148a47SJeff Kirsher iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); 1611f2148a47SJeff Kirsher } 1612f2148a47SJeff Kirsher 1613f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */ 1614a384a33bSFrancois Romieu static void rhine_disable_linkmon(struct rhine_private *rp) 1615f2148a47SJeff Kirsher { 1616a384a33bSFrancois Romieu void __iomem *ioaddr = rp->base; 1617a384a33bSFrancois Romieu 1618f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1619f2148a47SJeff Kirsher 1620a384a33bSFrancois Romieu if (rp->quirks & rqRhineI) { 1621f2148a47SJeff Kirsher iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1622f2148a47SJeff Kirsher 1623f2148a47SJeff Kirsher /* Can be called from ISR. Evil. */ 1624f2148a47SJeff Kirsher mdelay(1); 1625f2148a47SJeff Kirsher 1626f2148a47SJeff Kirsher /* 0x80 must be set immediately before turning it off */ 1627f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + MIICmd); 1628f2148a47SJeff Kirsher 1629a384a33bSFrancois Romieu rhine_wait_bit_high(rp, MIIRegAddr, 0x20); 1630f2148a47SJeff Kirsher 1631f2148a47SJeff Kirsher /* Heh. Now clear 0x80 again. */ 1632f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1633f2148a47SJeff Kirsher } 1634f2148a47SJeff Kirsher else 1635a384a33bSFrancois Romieu rhine_wait_bit_high(rp, MIIRegAddr, 0x80); 1636f2148a47SJeff Kirsher } 1637f2148a47SJeff Kirsher 1638f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */ 1639f2148a47SJeff Kirsher 1640f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum) 1641f2148a47SJeff Kirsher { 1642f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1643f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1644f2148a47SJeff Kirsher int result; 1645f2148a47SJeff Kirsher 1646a384a33bSFrancois Romieu rhine_disable_linkmon(rp); 1647f2148a47SJeff Kirsher 1648f2148a47SJeff Kirsher /* rhine_disable_linkmon already cleared MIICmd */ 1649f2148a47SJeff Kirsher iowrite8(phy_id, ioaddr + MIIPhyAddr); 1650f2148a47SJeff Kirsher iowrite8(regnum, ioaddr + MIIRegAddr); 1651f2148a47SJeff Kirsher iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ 1652a384a33bSFrancois Romieu rhine_wait_bit_low(rp, MIICmd, 0x40); 1653f2148a47SJeff Kirsher result = ioread16(ioaddr + MIIData); 1654f2148a47SJeff Kirsher 1655a384a33bSFrancois Romieu rhine_enable_linkmon(rp); 1656f2148a47SJeff Kirsher return result; 1657f2148a47SJeff Kirsher } 1658f2148a47SJeff Kirsher 1659f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) 1660f2148a47SJeff Kirsher { 1661f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1662f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1663f2148a47SJeff Kirsher 1664a384a33bSFrancois Romieu rhine_disable_linkmon(rp); 1665f2148a47SJeff Kirsher 1666f2148a47SJeff Kirsher /* rhine_disable_linkmon already cleared MIICmd */ 1667f2148a47SJeff Kirsher iowrite8(phy_id, ioaddr + MIIPhyAddr); 1668f2148a47SJeff Kirsher iowrite8(regnum, ioaddr + MIIRegAddr); 1669f2148a47SJeff Kirsher iowrite16(value, ioaddr + MIIData); 1670f2148a47SJeff Kirsher iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ 1671a384a33bSFrancois Romieu rhine_wait_bit_low(rp, MIICmd, 0x20); 1672f2148a47SJeff Kirsher 1673a384a33bSFrancois Romieu rhine_enable_linkmon(rp); 1674f2148a47SJeff Kirsher } 1675f2148a47SJeff Kirsher 16767ab87ff4SFrancois Romieu static void rhine_task_disable(struct rhine_private *rp) 16777ab87ff4SFrancois Romieu { 16787ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 16797ab87ff4SFrancois Romieu rp->task_enable = false; 16807ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 16817ab87ff4SFrancois Romieu 16827ab87ff4SFrancois Romieu cancel_work_sync(&rp->slow_event_task); 16837ab87ff4SFrancois Romieu cancel_work_sync(&rp->reset_task); 16847ab87ff4SFrancois Romieu } 16857ab87ff4SFrancois Romieu 16867ab87ff4SFrancois Romieu static void rhine_task_enable(struct rhine_private *rp) 16877ab87ff4SFrancois Romieu { 16887ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 16897ab87ff4SFrancois Romieu rp->task_enable = true; 16907ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 16917ab87ff4SFrancois Romieu } 16927ab87ff4SFrancois Romieu 1693f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev) 1694f2148a47SJeff Kirsher { 1695f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1696f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1697f2148a47SJeff Kirsher int rc; 1698f2148a47SJeff Kirsher 1699f7630d18SAlexey Charkov rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev); 1700f2148a47SJeff Kirsher if (rc) 17014d1fd9c1Sfrançois romieu goto out; 1702f2148a47SJeff Kirsher 1703f7630d18SAlexey Charkov netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq); 1704f2148a47SJeff Kirsher 1705f2148a47SJeff Kirsher rc = alloc_ring(dev); 17064d1fd9c1Sfrançois romieu if (rc < 0) 17074d1fd9c1Sfrançois romieu goto out_free_irq; 17084d1fd9c1Sfrançois romieu 17098709bb2cSfrançois romieu rc = alloc_rbufs(dev); 17108709bb2cSfrançois romieu if (rc < 0) 17118709bb2cSfrançois romieu goto out_free_ring; 17128709bb2cSfrançois romieu 1713f2148a47SJeff Kirsher alloc_tbufs(dev); 1714f2148a47SJeff Kirsher rhine_chip_reset(dev); 17157ab87ff4SFrancois Romieu rhine_task_enable(rp); 1716f2148a47SJeff Kirsher init_registers(dev); 1717fc3e0f8aSFrancois Romieu 1718fc3e0f8aSFrancois Romieu netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", 1719f2148a47SJeff Kirsher __func__, ioread16(ioaddr + ChipCmd), 1720f2148a47SJeff Kirsher mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1721f2148a47SJeff Kirsher 1722f2148a47SJeff Kirsher netif_start_queue(dev); 1723f2148a47SJeff Kirsher 17244d1fd9c1Sfrançois romieu out: 17254d1fd9c1Sfrançois romieu return rc; 17264d1fd9c1Sfrançois romieu 17278709bb2cSfrançois romieu out_free_ring: 17288709bb2cSfrançois romieu free_ring(dev); 17294d1fd9c1Sfrançois romieu out_free_irq: 17304d1fd9c1Sfrançois romieu free_irq(rp->irq, dev); 17314d1fd9c1Sfrançois romieu goto out; 1732f2148a47SJeff Kirsher } 1733f2148a47SJeff Kirsher 1734f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work) 1735f2148a47SJeff Kirsher { 1736f2148a47SJeff Kirsher struct rhine_private *rp = container_of(work, struct rhine_private, 1737f2148a47SJeff Kirsher reset_task); 1738f2148a47SJeff Kirsher struct net_device *dev = rp->dev; 1739f2148a47SJeff Kirsher 17407ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 17417ab87ff4SFrancois Romieu 17427ab87ff4SFrancois Romieu if (!rp->task_enable) 17437ab87ff4SFrancois Romieu goto out_unlock; 1744f2148a47SJeff Kirsher 1745f2148a47SJeff Kirsher napi_disable(&rp->napi); 1746a926592fSRichard Weinberger netif_tx_disable(dev); 1747f2148a47SJeff Kirsher spin_lock_bh(&rp->lock); 1748f2148a47SJeff Kirsher 1749f2148a47SJeff Kirsher /* clear all descriptors */ 1750f2148a47SJeff Kirsher free_tbufs(dev); 1751f2148a47SJeff Kirsher alloc_tbufs(dev); 17528709bb2cSfrançois romieu 17538709bb2cSfrançois romieu rhine_reset_rbufs(rp); 1754f2148a47SJeff Kirsher 1755f2148a47SJeff Kirsher /* Reinitialize the hardware. */ 1756f2148a47SJeff Kirsher rhine_chip_reset(dev); 1757f2148a47SJeff Kirsher init_registers(dev); 1758f2148a47SJeff Kirsher 1759f2148a47SJeff Kirsher spin_unlock_bh(&rp->lock); 1760f2148a47SJeff Kirsher 1761*860e9538SFlorian Westphal netif_trans_update(dev); /* prevent tx timeout */ 1762f2148a47SJeff Kirsher dev->stats.tx_errors++; 1763f2148a47SJeff Kirsher netif_wake_queue(dev); 17647ab87ff4SFrancois Romieu 17657ab87ff4SFrancois Romieu out_unlock: 17667ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 1767f2148a47SJeff Kirsher } 1768f2148a47SJeff Kirsher 1769f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev) 1770f2148a47SJeff Kirsher { 1771f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1772f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1773f2148a47SJeff Kirsher 1774f2148a47SJeff Kirsher netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n", 1775f2148a47SJeff Kirsher ioread16(ioaddr + IntrStatus), 1776f2148a47SJeff Kirsher mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1777f2148a47SJeff Kirsher 1778f2148a47SJeff Kirsher schedule_work(&rp->reset_task); 1779f2148a47SJeff Kirsher } 1780f2148a47SJeff Kirsher 17813a5a883aSfrançois romieu static inline bool rhine_tx_queue_full(struct rhine_private *rp) 17823a5a883aSfrançois romieu { 17833a5a883aSfrançois romieu return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN; 17843a5a883aSfrançois romieu } 17853a5a883aSfrançois romieu 1786f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 1787f2148a47SJeff Kirsher struct net_device *dev) 1788f2148a47SJeff Kirsher { 1789f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1790f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 1791f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1792f2148a47SJeff Kirsher unsigned entry; 1793f2148a47SJeff Kirsher 1794f2148a47SJeff Kirsher /* Caution: the write order is important here, set the field 1795f2148a47SJeff Kirsher with the "ownership" bits last. */ 1796f2148a47SJeff Kirsher 1797f2148a47SJeff Kirsher /* Calculate the next Tx descriptor entry. */ 1798f2148a47SJeff Kirsher entry = rp->cur_tx % TX_RING_SIZE; 1799f2148a47SJeff Kirsher 1800f2148a47SJeff Kirsher if (skb_padto(skb, ETH_ZLEN)) 1801f2148a47SJeff Kirsher return NETDEV_TX_OK; 1802f2148a47SJeff Kirsher 1803f2148a47SJeff Kirsher rp->tx_skbuff[entry] = skb; 1804f2148a47SJeff Kirsher 1805f2148a47SJeff Kirsher if ((rp->quirks & rqRhineI) && 1806f2148a47SJeff Kirsher (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) { 1807f2148a47SJeff Kirsher /* Must use alignment buffer. */ 1808f2148a47SJeff Kirsher if (skb->len > PKT_BUF_SZ) { 1809f2148a47SJeff Kirsher /* packet too long, drop it */ 18104b3afc6eSEric W. Biederman dev_kfree_skb_any(skb); 1811f2148a47SJeff Kirsher rp->tx_skbuff[entry] = NULL; 1812f2148a47SJeff Kirsher dev->stats.tx_dropped++; 1813f2148a47SJeff Kirsher return NETDEV_TX_OK; 1814f2148a47SJeff Kirsher } 1815f2148a47SJeff Kirsher 1816f2148a47SJeff Kirsher /* Padding is not copied and so must be redone. */ 1817f2148a47SJeff Kirsher skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); 1818f2148a47SJeff Kirsher if (skb->len < ETH_ZLEN) 1819f2148a47SJeff Kirsher memset(rp->tx_buf[entry] + skb->len, 0, 1820f2148a47SJeff Kirsher ETH_ZLEN - skb->len); 1821f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry] = 0; 1822f2148a47SJeff Kirsher rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + 1823f2148a47SJeff Kirsher (rp->tx_buf[entry] - 1824f2148a47SJeff Kirsher rp->tx_bufs)); 1825f2148a47SJeff Kirsher } else { 1826f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry] = 1827f7630d18SAlexey Charkov dma_map_single(hwdev, skb->data, skb->len, 18284087c4dcSAlexey Charkov DMA_TO_DEVICE); 1829f7630d18SAlexey Charkov if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) { 18304b3afc6eSEric W. Biederman dev_kfree_skb_any(skb); 18319b4fe5fbSNeil Horman rp->tx_skbuff_dma[entry] = 0; 18329b4fe5fbSNeil Horman dev->stats.tx_dropped++; 18339b4fe5fbSNeil Horman return NETDEV_TX_OK; 18349b4fe5fbSNeil Horman } 1835f2148a47SJeff Kirsher rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); 1836f2148a47SJeff Kirsher } 1837f2148a47SJeff Kirsher 1838f2148a47SJeff Kirsher rp->tx_ring[entry].desc_length = 1839f2148a47SJeff Kirsher cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1840f2148a47SJeff Kirsher 1841df8a39deSJiri Pirko if (unlikely(skb_vlan_tag_present(skb))) { 1842df8a39deSJiri Pirko u16 vid_pcp = skb_vlan_tag_get(skb); 1843207070f5SRoger Luethi 1844207070f5SRoger Luethi /* drop CFI/DEI bit, register needs VID and PCP */ 1845207070f5SRoger Luethi vid_pcp = (vid_pcp & VLAN_VID_MASK) | 1846207070f5SRoger Luethi ((vid_pcp & VLAN_PRIO_MASK) >> 1); 1847207070f5SRoger Luethi rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); 1848f2148a47SJeff Kirsher /* request tagging */ 1849f2148a47SJeff Kirsher rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); 1850f2148a47SJeff Kirsher } 1851f2148a47SJeff Kirsher else 1852f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status = 0; 1853f2148a47SJeff Kirsher 185492bf2008STino Reichardt netdev_sent_queue(dev, skb->len); 1855f2148a47SJeff Kirsher /* lock eth irq */ 1856e1efa872Sfrançois romieu dma_wmb(); 1857f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); 1858f2148a47SJeff Kirsher wmb(); 1859f2148a47SJeff Kirsher 1860f2148a47SJeff Kirsher rp->cur_tx++; 18613a5a883aSfrançois romieu /* 18623a5a883aSfrançois romieu * Nobody wants cur_tx write to rot for ages after the NIC will have 18633a5a883aSfrançois romieu * seen the transmit request, especially as the transmit completion 18643a5a883aSfrançois romieu * handler could miss it. 18653a5a883aSfrançois romieu */ 18663a5a883aSfrançois romieu smp_wmb(); 1867f2148a47SJeff Kirsher 1868f2148a47SJeff Kirsher /* Non-x86 Todo: explicitly flush cache lines here. */ 1869f2148a47SJeff Kirsher 1870df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 1871f2148a47SJeff Kirsher /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 1872f2148a47SJeff Kirsher BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 1873f2148a47SJeff Kirsher 1874f2148a47SJeff Kirsher /* Wake the potentially-idle transmit channel */ 1875f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1876f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1877f2148a47SJeff Kirsher IOSYNC; 1878f2148a47SJeff Kirsher 18793a5a883aSfrançois romieu /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */ 18803a5a883aSfrançois romieu if (rhine_tx_queue_full(rp)) { 1881f2148a47SJeff Kirsher netif_stop_queue(dev); 18823a5a883aSfrançois romieu smp_rmb(); 18833a5a883aSfrançois romieu /* Rejuvenate. */ 18843a5a883aSfrançois romieu if (!rhine_tx_queue_full(rp)) 18853a5a883aSfrançois romieu netif_wake_queue(dev); 18863a5a883aSfrançois romieu } 1887f2148a47SJeff Kirsher 1888fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", 1889f2148a47SJeff Kirsher rp->cur_tx - 1, entry); 1890fc3e0f8aSFrancois Romieu 1891f2148a47SJeff Kirsher return NETDEV_TX_OK; 1892f2148a47SJeff Kirsher } 1893f2148a47SJeff Kirsher 18947ab87ff4SFrancois Romieu static void rhine_irq_disable(struct rhine_private *rp) 18957ab87ff4SFrancois Romieu { 18967ab87ff4SFrancois Romieu iowrite16(0x0000, rp->base + IntrEnable); 18977ab87ff4SFrancois Romieu mmiowb(); 18987ab87ff4SFrancois Romieu } 18997ab87ff4SFrancois Romieu 1900f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up 1901f2148a47SJeff Kirsher after the Tx thread. */ 1902f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance) 1903f2148a47SJeff Kirsher { 1904f2148a47SJeff Kirsher struct net_device *dev = dev_instance; 1905f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 19067ab87ff4SFrancois Romieu u32 status; 1907f2148a47SJeff Kirsher int handled = 0; 1908f2148a47SJeff Kirsher 19097ab87ff4SFrancois Romieu status = rhine_get_events(rp); 1910f2148a47SJeff Kirsher 1911fc3e0f8aSFrancois Romieu netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); 1912f2148a47SJeff Kirsher 19137ab87ff4SFrancois Romieu if (status & RHINE_EVENT) { 19147ab87ff4SFrancois Romieu handled = 1; 1915f2148a47SJeff Kirsher 19167ab87ff4SFrancois Romieu rhine_irq_disable(rp); 1917f2148a47SJeff Kirsher napi_schedule(&rp->napi); 1918f2148a47SJeff Kirsher } 1919f2148a47SJeff Kirsher 19207ab87ff4SFrancois Romieu if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) { 1921fc3e0f8aSFrancois Romieu netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", 19227ab87ff4SFrancois Romieu status); 1923f2148a47SJeff Kirsher } 1924f2148a47SJeff Kirsher 1925f2148a47SJeff Kirsher return IRQ_RETVAL(handled); 1926f2148a47SJeff Kirsher } 1927f2148a47SJeff Kirsher 1928f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated 1929f2148a47SJeff Kirsher for clarity. */ 1930f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev) 1931f2148a47SJeff Kirsher { 1932f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1933f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 193492bf2008STino Reichardt unsigned int pkts_compl = 0, bytes_compl = 0; 19353a5a883aSfrançois romieu unsigned int dirty_tx = rp->dirty_tx; 19363a5a883aSfrançois romieu unsigned int cur_tx; 193792bf2008STino Reichardt struct sk_buff *skb; 1938f2148a47SJeff Kirsher 19393a5a883aSfrançois romieu /* 19403a5a883aSfrançois romieu * The race with rhine_start_tx does not matter here as long as the 19413a5a883aSfrançois romieu * driver enforces a value of cur_tx that was relevant when the 19423a5a883aSfrançois romieu * packet was scheduled to the network chipset. 19433a5a883aSfrançois romieu * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx. 19443a5a883aSfrançois romieu */ 19453a5a883aSfrançois romieu smp_rmb(); 19463a5a883aSfrançois romieu cur_tx = rp->cur_tx; 1947f2148a47SJeff Kirsher /* find and cleanup dirty tx descriptors */ 19483a5a883aSfrançois romieu while (dirty_tx != cur_tx) { 19493a5a883aSfrançois romieu unsigned int entry = dirty_tx % TX_RING_SIZE; 19503a5a883aSfrançois romieu u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); 19513a5a883aSfrançois romieu 1952fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", 1953f2148a47SJeff Kirsher entry, txstatus); 1954f2148a47SJeff Kirsher if (txstatus & DescOwn) 1955f2148a47SJeff Kirsher break; 195692bf2008STino Reichardt skb = rp->tx_skbuff[entry]; 1957f2148a47SJeff Kirsher if (txstatus & 0x8000) { 1958fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_done, dev, 1959fc3e0f8aSFrancois Romieu "Transmit error, Tx status %08x\n", txstatus); 1960f2148a47SJeff Kirsher dev->stats.tx_errors++; 1961f2148a47SJeff Kirsher if (txstatus & 0x0400) 1962f2148a47SJeff Kirsher dev->stats.tx_carrier_errors++; 1963f2148a47SJeff Kirsher if (txstatus & 0x0200) 1964f2148a47SJeff Kirsher dev->stats.tx_window_errors++; 1965f2148a47SJeff Kirsher if (txstatus & 0x0100) 1966f2148a47SJeff Kirsher dev->stats.tx_aborted_errors++; 1967f2148a47SJeff Kirsher if (txstatus & 0x0080) 1968f2148a47SJeff Kirsher dev->stats.tx_heartbeat_errors++; 1969f2148a47SJeff Kirsher if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || 1970f2148a47SJeff Kirsher (txstatus & 0x0800) || (txstatus & 0x1000)) { 1971f2148a47SJeff Kirsher dev->stats.tx_fifo_errors++; 1972f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1973f2148a47SJeff Kirsher break; /* Keep the skb - we try again */ 1974f2148a47SJeff Kirsher } 1975f2148a47SJeff Kirsher /* Transmitter restarted in 'abnormal' handler. */ 1976f2148a47SJeff Kirsher } else { 1977f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 1978f2148a47SJeff Kirsher dev->stats.collisions += (txstatus >> 3) & 0x0F; 1979f2148a47SJeff Kirsher else 1980f2148a47SJeff Kirsher dev->stats.collisions += txstatus & 0x0F; 1981fc3e0f8aSFrancois Romieu netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", 1982fc3e0f8aSFrancois Romieu (txstatus >> 3) & 0xF, txstatus & 0xF); 1983f7b5d1b9SJamie Gloudon 1984f7b5d1b9SJamie Gloudon u64_stats_update_begin(&rp->tx_stats.syncp); 198592bf2008STino Reichardt rp->tx_stats.bytes += skb->len; 1986f7b5d1b9SJamie Gloudon rp->tx_stats.packets++; 1987f7b5d1b9SJamie Gloudon u64_stats_update_end(&rp->tx_stats.syncp); 1988f2148a47SJeff Kirsher } 1989f2148a47SJeff Kirsher /* Free the original skb. */ 1990f2148a47SJeff Kirsher if (rp->tx_skbuff_dma[entry]) { 1991f7630d18SAlexey Charkov dma_unmap_single(hwdev, 1992f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry], 199392bf2008STino Reichardt skb->len, 19944087c4dcSAlexey Charkov DMA_TO_DEVICE); 1995f2148a47SJeff Kirsher } 199692bf2008STino Reichardt bytes_compl += skb->len; 199792bf2008STino Reichardt pkts_compl++; 199892bf2008STino Reichardt dev_consume_skb_any(skb); 1999f2148a47SJeff Kirsher rp->tx_skbuff[entry] = NULL; 20003a5a883aSfrançois romieu dirty_tx++; 2001f2148a47SJeff Kirsher } 200292bf2008STino Reichardt 20033a5a883aSfrançois romieu rp->dirty_tx = dirty_tx; 20043a5a883aSfrançois romieu /* Pity we can't rely on the nearby BQL completion implicit barrier. */ 20053a5a883aSfrançois romieu smp_wmb(); 20063a5a883aSfrançois romieu 200792bf2008STino Reichardt netdev_completed_queue(dev, pkts_compl, bytes_compl); 20083a5a883aSfrançois romieu 20093a5a883aSfrançois romieu /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */ 20103a5a883aSfrançois romieu if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) { 2011f2148a47SJeff Kirsher netif_wake_queue(dev); 20123a5a883aSfrançois romieu smp_rmb(); 20133a5a883aSfrançois romieu /* Rejuvenate. */ 20143a5a883aSfrançois romieu if (rhine_tx_queue_full(rp)) 20153a5a883aSfrançois romieu netif_stop_queue(dev); 20163a5a883aSfrançois romieu } 2017f2148a47SJeff Kirsher } 2018f2148a47SJeff Kirsher 2019f2148a47SJeff Kirsher /** 2020f2148a47SJeff Kirsher * rhine_get_vlan_tci - extract TCI from Rx data buffer 2021f2148a47SJeff Kirsher * @skb: pointer to sk_buff 2022f2148a47SJeff Kirsher * @data_size: used data area of the buffer including CRC 2023f2148a47SJeff Kirsher * 2024f2148a47SJeff Kirsher * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q 2025f2148a47SJeff Kirsher * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte 2026f2148a47SJeff Kirsher * aligned following the CRC. 2027f2148a47SJeff Kirsher */ 2028f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) 2029f2148a47SJeff Kirsher { 2030f2148a47SJeff Kirsher u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; 2031f2148a47SJeff Kirsher return be16_to_cpup((__be16 *)trailer); 2032f2148a47SJeff Kirsher } 2033f2148a47SJeff Kirsher 2034810f19bcSfrançois romieu static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc, 2035810f19bcSfrançois romieu int data_size) 2036810f19bcSfrançois romieu { 2037810f19bcSfrançois romieu dma_rmb(); 2038810f19bcSfrançois romieu if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) { 2039810f19bcSfrançois romieu u16 vlan_tci; 2040810f19bcSfrançois romieu 2041810f19bcSfrançois romieu vlan_tci = rhine_get_vlan_tci(skb, data_size); 2042810f19bcSfrançois romieu __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 2043810f19bcSfrançois romieu } 2044810f19bcSfrançois romieu } 2045810f19bcSfrançois romieu 2046f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */ 2047f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit) 2048f2148a47SJeff Kirsher { 2049f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2050f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 2051f2148a47SJeff Kirsher int entry = rp->cur_rx % RX_RING_SIZE; 205262ca1ba0Sfrançois romieu int count; 2053f2148a47SJeff Kirsher 2054fc3e0f8aSFrancois Romieu netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, 205562ca1ba0Sfrançois romieu entry, le32_to_cpu(rp->rx_ring[entry].rx_status)); 2056f2148a47SJeff Kirsher 2057f2148a47SJeff Kirsher /* If EOP is set on the next entry, it's a new packet. Send it up. */ 2058f2148a47SJeff Kirsher for (count = 0; count < limit; ++count) { 205962ca1ba0Sfrançois romieu struct rx_desc *desc = rp->rx_ring + entry; 2060f2148a47SJeff Kirsher u32 desc_status = le32_to_cpu(desc->rx_status); 2061f2148a47SJeff Kirsher int data_size = desc_status >> 16; 2062f2148a47SJeff Kirsher 2063f2148a47SJeff Kirsher if (desc_status & DescOwn) 2064f2148a47SJeff Kirsher break; 2065f2148a47SJeff Kirsher 2066fc3e0f8aSFrancois Romieu netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, 2067fc3e0f8aSFrancois Romieu desc_status); 2068f2148a47SJeff Kirsher 2069f2148a47SJeff Kirsher if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 2070f2148a47SJeff Kirsher if ((desc_status & RxWholePkt) != RxWholePkt) { 2071f2148a47SJeff Kirsher netdev_warn(dev, 2072f2148a47SJeff Kirsher "Oversized Ethernet frame spanned multiple buffers, " 2073f2148a47SJeff Kirsher "entry %#x length %d status %08x!\n", 2074f2148a47SJeff Kirsher entry, data_size, 2075f2148a47SJeff Kirsher desc_status); 2076f2148a47SJeff Kirsher dev->stats.rx_length_errors++; 2077f2148a47SJeff Kirsher } else if (desc_status & RxErr) { 2078f2148a47SJeff Kirsher /* There was a error. */ 2079fc3e0f8aSFrancois Romieu netif_dbg(rp, rx_err, dev, 2080fc3e0f8aSFrancois Romieu "%s() Rx error %08x\n", __func__, 2081fc3e0f8aSFrancois Romieu desc_status); 2082f2148a47SJeff Kirsher dev->stats.rx_errors++; 2083f2148a47SJeff Kirsher if (desc_status & 0x0030) 2084f2148a47SJeff Kirsher dev->stats.rx_length_errors++; 2085f2148a47SJeff Kirsher if (desc_status & 0x0048) 2086f2148a47SJeff Kirsher dev->stats.rx_fifo_errors++; 2087f2148a47SJeff Kirsher if (desc_status & 0x0004) 2088f2148a47SJeff Kirsher dev->stats.rx_frame_errors++; 2089f2148a47SJeff Kirsher if (desc_status & 0x0002) { 2090f2148a47SJeff Kirsher /* this can also be updated outside the interrupt handler */ 2091f2148a47SJeff Kirsher spin_lock(&rp->lock); 2092f2148a47SJeff Kirsher dev->stats.rx_crc_errors++; 2093f2148a47SJeff Kirsher spin_unlock(&rp->lock); 2094f2148a47SJeff Kirsher } 2095f2148a47SJeff Kirsher } 2096f2148a47SJeff Kirsher } else { 2097f2148a47SJeff Kirsher /* Length should omit the CRC */ 2098f2148a47SJeff Kirsher int pkt_len = data_size - 4; 20998709bb2cSfrançois romieu struct sk_buff *skb; 2100f2148a47SJeff Kirsher 2101f2148a47SJeff Kirsher /* Check if the packet is long enough to accept without 2102f2148a47SJeff Kirsher copying to a minimally-sized skbuff. */ 21038709bb2cSfrançois romieu if (pkt_len < rx_copybreak) { 2104f2148a47SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, pkt_len); 21058709bb2cSfrançois romieu if (unlikely(!skb)) 21068709bb2cSfrançois romieu goto drop; 21078709bb2cSfrançois romieu 2108f7630d18SAlexey Charkov dma_sync_single_for_cpu(hwdev, 2109f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 2110f2148a47SJeff Kirsher rp->rx_buf_sz, 21114087c4dcSAlexey Charkov DMA_FROM_DEVICE); 2112f2148a47SJeff Kirsher 2113f2148a47SJeff Kirsher skb_copy_to_linear_data(skb, 2114f2148a47SJeff Kirsher rp->rx_skbuff[entry]->data, 2115f2148a47SJeff Kirsher pkt_len); 21168709bb2cSfrançois romieu 2117f7630d18SAlexey Charkov dma_sync_single_for_device(hwdev, 2118f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 2119f2148a47SJeff Kirsher rp->rx_buf_sz, 21204087c4dcSAlexey Charkov DMA_FROM_DEVICE); 2121f2148a47SJeff Kirsher } else { 21228709bb2cSfrançois romieu struct rhine_skb_dma sd; 21238709bb2cSfrançois romieu 21248709bb2cSfrançois romieu if (unlikely(rhine_skb_dma_init(dev, &sd) < 0)) 21258709bb2cSfrançois romieu goto drop; 21268709bb2cSfrançois romieu 2127f2148a47SJeff Kirsher skb = rp->rx_skbuff[entry]; 21288709bb2cSfrançois romieu 2129f7630d18SAlexey Charkov dma_unmap_single(hwdev, 2130f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 2131f2148a47SJeff Kirsher rp->rx_buf_sz, 21324087c4dcSAlexey Charkov DMA_FROM_DEVICE); 21338709bb2cSfrançois romieu rhine_skb_dma_nic_store(rp, &sd, entry); 2134f2148a47SJeff Kirsher } 2135f2148a47SJeff Kirsher 21368709bb2cSfrançois romieu skb_put(skb, pkt_len); 2137f2148a47SJeff Kirsher 2138810f19bcSfrançois romieu rhine_rx_vlan_tag(skb, desc, data_size); 2139810f19bcSfrançois romieu 21405f715c09SAndrej Ota skb->protocol = eth_type_trans(skb, dev); 21415f715c09SAndrej Ota 2142f2148a47SJeff Kirsher netif_receive_skb(skb); 2143f7b5d1b9SJamie Gloudon 2144f7b5d1b9SJamie Gloudon u64_stats_update_begin(&rp->rx_stats.syncp); 2145f7b5d1b9SJamie Gloudon rp->rx_stats.bytes += pkt_len; 2146f7b5d1b9SJamie Gloudon rp->rx_stats.packets++; 2147f7b5d1b9SJamie Gloudon u64_stats_update_end(&rp->rx_stats.syncp); 2148f2148a47SJeff Kirsher } 21498709bb2cSfrançois romieu give_descriptor_to_nic: 21508709bb2cSfrançois romieu desc->rx_status = cpu_to_le32(DescOwn); 2151f2148a47SJeff Kirsher entry = (++rp->cur_rx) % RX_RING_SIZE; 2152f2148a47SJeff Kirsher } 2153f2148a47SJeff Kirsher 2154f2148a47SJeff Kirsher return count; 21558709bb2cSfrançois romieu 21568709bb2cSfrançois romieu drop: 21578709bb2cSfrançois romieu dev->stats.rx_dropped++; 21588709bb2cSfrançois romieu goto give_descriptor_to_nic; 2159f2148a47SJeff Kirsher } 2160f2148a47SJeff Kirsher 2161f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) { 2162f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2163f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2164f2148a47SJeff Kirsher int entry = rp->dirty_tx % TX_RING_SIZE; 2165f2148a47SJeff Kirsher u32 intr_status; 2166f2148a47SJeff Kirsher 2167f2148a47SJeff Kirsher /* 2168f2148a47SJeff Kirsher * If new errors occurred, we need to sort them out before doing Tx. 2169f2148a47SJeff Kirsher * In that case the ISR will be back here RSN anyway. 2170f2148a47SJeff Kirsher */ 2171a20a28bcSFrancois Romieu intr_status = rhine_get_events(rp); 2172f2148a47SJeff Kirsher 2173f2148a47SJeff Kirsher if ((intr_status & IntrTxErrSummary) == 0) { 2174f2148a47SJeff Kirsher 2175f2148a47SJeff Kirsher /* We know better than the chip where it should continue. */ 2176f2148a47SJeff Kirsher iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), 2177f2148a47SJeff Kirsher ioaddr + TxRingPtr); 2178f2148a47SJeff Kirsher 2179f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, 2180f2148a47SJeff Kirsher ioaddr + ChipCmd); 2181f2148a47SJeff Kirsher 2182f2148a47SJeff Kirsher if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) 2183f2148a47SJeff Kirsher /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 2184f2148a47SJeff Kirsher BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 2185f2148a47SJeff Kirsher 2186f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 2187f2148a47SJeff Kirsher ioaddr + ChipCmd1); 2188f2148a47SJeff Kirsher IOSYNC; 2189f2148a47SJeff Kirsher } 2190f2148a47SJeff Kirsher else { 2191f2148a47SJeff Kirsher /* This should never happen */ 2192fc3e0f8aSFrancois Romieu netif_warn(rp, tx_err, dev, "another error occurred %08x\n", 2193fc3e0f8aSFrancois Romieu intr_status); 2194f2148a47SJeff Kirsher } 2195f2148a47SJeff Kirsher 2196f2148a47SJeff Kirsher } 2197f2148a47SJeff Kirsher 21987ab87ff4SFrancois Romieu static void rhine_slow_event_task(struct work_struct *work) 2199f2148a47SJeff Kirsher { 22007ab87ff4SFrancois Romieu struct rhine_private *rp = 22017ab87ff4SFrancois Romieu container_of(work, struct rhine_private, slow_event_task); 22027ab87ff4SFrancois Romieu struct net_device *dev = rp->dev; 22037ab87ff4SFrancois Romieu u32 intr_status; 2204f2148a47SJeff Kirsher 22057ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 22067ab87ff4SFrancois Romieu 22077ab87ff4SFrancois Romieu if (!rp->task_enable) 22087ab87ff4SFrancois Romieu goto out_unlock; 22097ab87ff4SFrancois Romieu 22107ab87ff4SFrancois Romieu intr_status = rhine_get_events(rp); 22117ab87ff4SFrancois Romieu rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); 2212f2148a47SJeff Kirsher 2213f2148a47SJeff Kirsher if (intr_status & IntrLinkChange) 2214f2148a47SJeff Kirsher rhine_check_media(dev, 0); 2215f2148a47SJeff Kirsher 2216fc3e0f8aSFrancois Romieu if (intr_status & IntrPCIErr) 2217fc3e0f8aSFrancois Romieu netif_warn(rp, hw, dev, "PCI error\n"); 2218fc3e0f8aSFrancois Romieu 2219559bcac3SDavid S. Miller iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); 2220f2148a47SJeff Kirsher 22217ab87ff4SFrancois Romieu out_unlock: 22227ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2223f2148a47SJeff Kirsher } 2224f2148a47SJeff Kirsher 2225f7b5d1b9SJamie Gloudon static struct rtnl_link_stats64 * 2226f7b5d1b9SJamie Gloudon rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 2227f2148a47SJeff Kirsher { 2228f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2229f7b5d1b9SJamie Gloudon unsigned int start; 2230f2148a47SJeff Kirsher 22317ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 22327ab87ff4SFrancois Romieu rhine_update_rx_crc_and_missed_errord(rp); 22337ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 2234f2148a47SJeff Kirsher 2235f7b5d1b9SJamie Gloudon netdev_stats_to_stats64(stats, &dev->stats); 2236f7b5d1b9SJamie Gloudon 2237f7b5d1b9SJamie Gloudon do { 223857a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); 2239f7b5d1b9SJamie Gloudon stats->rx_packets = rp->rx_stats.packets; 2240f7b5d1b9SJamie Gloudon stats->rx_bytes = rp->rx_stats.bytes; 224157a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); 2242f7b5d1b9SJamie Gloudon 2243f7b5d1b9SJamie Gloudon do { 224457a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); 2245f7b5d1b9SJamie Gloudon stats->tx_packets = rp->tx_stats.packets; 2246f7b5d1b9SJamie Gloudon stats->tx_bytes = rp->tx_stats.bytes; 224757a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); 2248f7b5d1b9SJamie Gloudon 2249f7b5d1b9SJamie Gloudon return stats; 2250f2148a47SJeff Kirsher } 2251f2148a47SJeff Kirsher 2252f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev) 2253f2148a47SJeff Kirsher { 2254f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2255f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2256f2148a47SJeff Kirsher u32 mc_filter[2]; /* Multicast hash filter */ 2257f2148a47SJeff Kirsher u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */ 2258f2148a47SJeff Kirsher struct netdev_hw_addr *ha; 2259f2148a47SJeff Kirsher 2260f2148a47SJeff Kirsher if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 2261f2148a47SJeff Kirsher rx_mode = 0x1C; 2262f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter0); 2263f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter1); 2264f2148a47SJeff Kirsher } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 2265f2148a47SJeff Kirsher (dev->flags & IFF_ALLMULTI)) { 2266f2148a47SJeff Kirsher /* Too many to match, or accept all multicasts. */ 2267f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter0); 2268f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter1); 2269ca8b6e04SAlexey Charkov } else if (rp->quirks & rqMgmt) { 2270f2148a47SJeff Kirsher int i = 0; 2271f2148a47SJeff Kirsher u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ 2272f2148a47SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 2273f2148a47SJeff Kirsher if (i == MCAM_SIZE) 2274f2148a47SJeff Kirsher break; 2275f2148a47SJeff Kirsher rhine_set_cam(ioaddr, i, ha->addr); 2276f2148a47SJeff Kirsher mCAMmask |= 1 << i; 2277f2148a47SJeff Kirsher i++; 2278f2148a47SJeff Kirsher } 2279f2148a47SJeff Kirsher rhine_set_cam_mask(ioaddr, mCAMmask); 2280f2148a47SJeff Kirsher } else { 2281f2148a47SJeff Kirsher memset(mc_filter, 0, sizeof(mc_filter)); 2282f2148a47SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 2283f2148a47SJeff Kirsher int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; 2284f2148a47SJeff Kirsher 2285f2148a47SJeff Kirsher mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 2286f2148a47SJeff Kirsher } 2287f2148a47SJeff Kirsher iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 2288f2148a47SJeff Kirsher iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 2289f2148a47SJeff Kirsher } 2290f2148a47SJeff Kirsher /* enable/disable VLAN receive filtering */ 2291ca8b6e04SAlexey Charkov if (rp->quirks & rqMgmt) { 2292f2148a47SJeff Kirsher if (dev->flags & IFF_PROMISC) 2293f2148a47SJeff Kirsher BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2294f2148a47SJeff Kirsher else 2295f2148a47SJeff Kirsher BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2296f2148a47SJeff Kirsher } 2297f2148a47SJeff Kirsher BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig); 2298f2148a47SJeff Kirsher } 2299f2148a47SJeff Kirsher 2300f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2301f2148a47SJeff Kirsher { 2302f7630d18SAlexey Charkov struct device *hwdev = dev->dev.parent; 2303f2148a47SJeff Kirsher 230423020ab3SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 230523020ab3SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 2306f7630d18SAlexey Charkov strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info)); 2307f2148a47SJeff Kirsher } 2308f2148a47SJeff Kirsher 2309f2148a47SJeff Kirsher static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2310f2148a47SJeff Kirsher { 2311f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2312f2148a47SJeff Kirsher int rc; 2313f2148a47SJeff Kirsher 23147ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 2315f2148a47SJeff Kirsher rc = mii_ethtool_gset(&rp->mii_if, cmd); 23167ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2317f2148a47SJeff Kirsher 2318f2148a47SJeff Kirsher return rc; 2319f2148a47SJeff Kirsher } 2320f2148a47SJeff Kirsher 2321f2148a47SJeff Kirsher static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2322f2148a47SJeff Kirsher { 2323f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2324f2148a47SJeff Kirsher int rc; 2325f2148a47SJeff Kirsher 23267ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 2327f2148a47SJeff Kirsher rc = mii_ethtool_sset(&rp->mii_if, cmd); 2328f2148a47SJeff Kirsher rhine_set_carrier(&rp->mii_if); 23297ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2330f2148a47SJeff Kirsher 2331f2148a47SJeff Kirsher return rc; 2332f2148a47SJeff Kirsher } 2333f2148a47SJeff Kirsher 2334f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev) 2335f2148a47SJeff Kirsher { 2336f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2337f2148a47SJeff Kirsher 2338f2148a47SJeff Kirsher return mii_nway_restart(&rp->mii_if); 2339f2148a47SJeff Kirsher } 2340f2148a47SJeff Kirsher 2341f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev) 2342f2148a47SJeff Kirsher { 2343f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2344f2148a47SJeff Kirsher 2345f2148a47SJeff Kirsher return mii_link_ok(&rp->mii_if); 2346f2148a47SJeff Kirsher } 2347f2148a47SJeff Kirsher 2348f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev) 2349f2148a47SJeff Kirsher { 2350fc3e0f8aSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 2351fc3e0f8aSFrancois Romieu 2352fc3e0f8aSFrancois Romieu return rp->msg_enable; 2353f2148a47SJeff Kirsher } 2354f2148a47SJeff Kirsher 2355f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value) 2356f2148a47SJeff Kirsher { 2357fc3e0f8aSFrancois Romieu struct rhine_private *rp = netdev_priv(dev); 2358fc3e0f8aSFrancois Romieu 2359fc3e0f8aSFrancois Romieu rp->msg_enable = value; 2360f2148a47SJeff Kirsher } 2361f2148a47SJeff Kirsher 2362f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2363f2148a47SJeff Kirsher { 2364f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2365f2148a47SJeff Kirsher 2366f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2367f2148a47SJeff Kirsher return; 2368f2148a47SJeff Kirsher 2369f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2370f2148a47SJeff Kirsher wol->supported = WAKE_PHY | WAKE_MAGIC | 2371f2148a47SJeff Kirsher WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ 2372f2148a47SJeff Kirsher wol->wolopts = rp->wolopts; 2373f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2374f2148a47SJeff Kirsher } 2375f2148a47SJeff Kirsher 2376f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2377f2148a47SJeff Kirsher { 2378f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2379f2148a47SJeff Kirsher u32 support = WAKE_PHY | WAKE_MAGIC | 2380f2148a47SJeff Kirsher WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ 2381f2148a47SJeff Kirsher 2382f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2383f2148a47SJeff Kirsher return -EINVAL; 2384f2148a47SJeff Kirsher 2385f2148a47SJeff Kirsher if (wol->wolopts & ~support) 2386f2148a47SJeff Kirsher return -EINVAL; 2387f2148a47SJeff Kirsher 2388f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2389f2148a47SJeff Kirsher rp->wolopts = wol->wolopts; 2390f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2391f2148a47SJeff Kirsher 2392f2148a47SJeff Kirsher return 0; 2393f2148a47SJeff Kirsher } 2394f2148a47SJeff Kirsher 2395f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = { 2396f2148a47SJeff Kirsher .get_drvinfo = netdev_get_drvinfo, 2397f2148a47SJeff Kirsher .get_settings = netdev_get_settings, 2398f2148a47SJeff Kirsher .set_settings = netdev_set_settings, 2399f2148a47SJeff Kirsher .nway_reset = netdev_nway_reset, 2400f2148a47SJeff Kirsher .get_link = netdev_get_link, 2401f2148a47SJeff Kirsher .get_msglevel = netdev_get_msglevel, 2402f2148a47SJeff Kirsher .set_msglevel = netdev_set_msglevel, 2403f2148a47SJeff Kirsher .get_wol = rhine_get_wol, 2404f2148a47SJeff Kirsher .set_wol = rhine_set_wol, 2405f2148a47SJeff Kirsher }; 2406f2148a47SJeff Kirsher 2407f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2408f2148a47SJeff Kirsher { 2409f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2410f2148a47SJeff Kirsher int rc; 2411f2148a47SJeff Kirsher 2412f2148a47SJeff Kirsher if (!netif_running(dev)) 2413f2148a47SJeff Kirsher return -EINVAL; 2414f2148a47SJeff Kirsher 24157ab87ff4SFrancois Romieu mutex_lock(&rp->task_lock); 2416f2148a47SJeff Kirsher rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); 2417f2148a47SJeff Kirsher rhine_set_carrier(&rp->mii_if); 24187ab87ff4SFrancois Romieu mutex_unlock(&rp->task_lock); 2419f2148a47SJeff Kirsher 2420f2148a47SJeff Kirsher return rc; 2421f2148a47SJeff Kirsher } 2422f2148a47SJeff Kirsher 2423f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev) 2424f2148a47SJeff Kirsher { 2425f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2426f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2427f2148a47SJeff Kirsher 24287ab87ff4SFrancois Romieu rhine_task_disable(rp); 2429f2148a47SJeff Kirsher napi_disable(&rp->napi); 2430f2148a47SJeff Kirsher netif_stop_queue(dev); 2431f2148a47SJeff Kirsher 2432fc3e0f8aSFrancois Romieu netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", 2433f2148a47SJeff Kirsher ioread16(ioaddr + ChipCmd)); 2434f2148a47SJeff Kirsher 2435f2148a47SJeff Kirsher /* Switch to loopback mode to avoid hardware races. */ 2436f2148a47SJeff Kirsher iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); 2437f2148a47SJeff Kirsher 24387ab87ff4SFrancois Romieu rhine_irq_disable(rp); 2439f2148a47SJeff Kirsher 2440f2148a47SJeff Kirsher /* Stop the chip's Tx and Rx processes. */ 2441f2148a47SJeff Kirsher iowrite16(CmdStop, ioaddr + ChipCmd); 2442f2148a47SJeff Kirsher 2443f7630d18SAlexey Charkov free_irq(rp->irq, dev); 2444f2148a47SJeff Kirsher free_rbufs(dev); 2445f2148a47SJeff Kirsher free_tbufs(dev); 2446f2148a47SJeff Kirsher free_ring(dev); 2447f2148a47SJeff Kirsher 2448f2148a47SJeff Kirsher return 0; 2449f2148a47SJeff Kirsher } 2450f2148a47SJeff Kirsher 2451f2148a47SJeff Kirsher 24522d283862SAlexey Charkov static void rhine_remove_one_pci(struct pci_dev *pdev) 2453f2148a47SJeff Kirsher { 2454f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2455f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2456f2148a47SJeff Kirsher 2457f2148a47SJeff Kirsher unregister_netdev(dev); 2458f2148a47SJeff Kirsher 2459f2148a47SJeff Kirsher pci_iounmap(pdev, rp->base); 2460f2148a47SJeff Kirsher pci_release_regions(pdev); 2461f2148a47SJeff Kirsher 2462f2148a47SJeff Kirsher free_netdev(dev); 2463f2148a47SJeff Kirsher pci_disable_device(pdev); 2464f2148a47SJeff Kirsher } 2465f2148a47SJeff Kirsher 24662d283862SAlexey Charkov static int rhine_remove_one_platform(struct platform_device *pdev) 24672d283862SAlexey Charkov { 24682d283862SAlexey Charkov struct net_device *dev = platform_get_drvdata(pdev); 24692d283862SAlexey Charkov struct rhine_private *rp = netdev_priv(dev); 24702d283862SAlexey Charkov 24712d283862SAlexey Charkov unregister_netdev(dev); 24722d283862SAlexey Charkov 24732d283862SAlexey Charkov iounmap(rp->base); 24742d283862SAlexey Charkov 24752d283862SAlexey Charkov free_netdev(dev); 24762d283862SAlexey Charkov 24772d283862SAlexey Charkov return 0; 24782d283862SAlexey Charkov } 24792d283862SAlexey Charkov 24802d283862SAlexey Charkov static void rhine_shutdown_pci(struct pci_dev *pdev) 2481f2148a47SJeff Kirsher { 2482f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2483f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2484f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2485f2148a47SJeff Kirsher 2486f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2487f2148a47SJeff Kirsher return; /* Nothing to do for non-WOL adapters */ 2488f2148a47SJeff Kirsher 2489f2148a47SJeff Kirsher rhine_power_init(dev); 2490f2148a47SJeff Kirsher 2491f2148a47SJeff Kirsher /* Make sure we use pattern 0, 1 and not 4, 5 */ 2492f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 2493f2148a47SJeff Kirsher iowrite8(0x04, ioaddr + WOLcgClr); 2494f2148a47SJeff Kirsher 24957ab87ff4SFrancois Romieu spin_lock(&rp->lock); 24967ab87ff4SFrancois Romieu 2497f2148a47SJeff Kirsher if (rp->wolopts & WAKE_MAGIC) { 2498f2148a47SJeff Kirsher iowrite8(WOLmagic, ioaddr + WOLcrSet); 2499f2148a47SJeff Kirsher /* 2500f2148a47SJeff Kirsher * Turn EEPROM-controlled wake-up back on -- some hardware may 2501f2148a47SJeff Kirsher * not cooperate otherwise. 2502f2148a47SJeff Kirsher */ 2503f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA); 2504f2148a47SJeff Kirsher } 2505f2148a47SJeff Kirsher 2506f2148a47SJeff Kirsher if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) 2507f2148a47SJeff Kirsher iowrite8(WOLbmcast, ioaddr + WOLcgSet); 2508f2148a47SJeff Kirsher 2509f2148a47SJeff Kirsher if (rp->wolopts & WAKE_PHY) 2510f2148a47SJeff Kirsher iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); 2511f2148a47SJeff Kirsher 2512f2148a47SJeff Kirsher if (rp->wolopts & WAKE_UCAST) 2513f2148a47SJeff Kirsher iowrite8(WOLucast, ioaddr + WOLcrSet); 2514f2148a47SJeff Kirsher 2515f2148a47SJeff Kirsher if (rp->wolopts) { 2516f2148a47SJeff Kirsher /* Enable legacy WOL (for old motherboards) */ 2517f2148a47SJeff Kirsher iowrite8(0x01, ioaddr + PwcfgSet); 2518f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); 2519f2148a47SJeff Kirsher } 2520f2148a47SJeff Kirsher 25217ab87ff4SFrancois Romieu spin_unlock(&rp->lock); 25227ab87ff4SFrancois Romieu 2523e92b9b3bSFrancois Romieu if (system_state == SYSTEM_POWER_OFF && !avoid_D3) { 2524f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); 2525f2148a47SJeff Kirsher 2526e92b9b3bSFrancois Romieu pci_wake_from_d3(pdev, true); 2527e92b9b3bSFrancois Romieu pci_set_power_state(pdev, PCI_D3hot); 2528e92b9b3bSFrancois Romieu } 2529f2148a47SJeff Kirsher } 2530f2148a47SJeff Kirsher 2531e92b9b3bSFrancois Romieu #ifdef CONFIG_PM_SLEEP 2532e92b9b3bSFrancois Romieu static int rhine_suspend(struct device *device) 2533f2148a47SJeff Kirsher { 2534f7630d18SAlexey Charkov struct net_device *dev = dev_get_drvdata(device); 2535f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2536f2148a47SJeff Kirsher 2537f2148a47SJeff Kirsher if (!netif_running(dev)) 2538f2148a47SJeff Kirsher return 0; 2539f2148a47SJeff Kirsher 25407ab87ff4SFrancois Romieu rhine_task_disable(rp); 25417ab87ff4SFrancois Romieu rhine_irq_disable(rp); 2542f2148a47SJeff Kirsher napi_disable(&rp->napi); 2543f2148a47SJeff Kirsher 2544f2148a47SJeff Kirsher netif_device_detach(dev); 2545f2148a47SJeff Kirsher 2546f7630d18SAlexey Charkov if (dev_is_pci(device)) 25472d283862SAlexey Charkov rhine_shutdown_pci(to_pci_dev(device)); 2548f2148a47SJeff Kirsher 2549f2148a47SJeff Kirsher return 0; 2550f2148a47SJeff Kirsher } 2551f2148a47SJeff Kirsher 2552e92b9b3bSFrancois Romieu static int rhine_resume(struct device *device) 2553f2148a47SJeff Kirsher { 2554f7630d18SAlexey Charkov struct net_device *dev = dev_get_drvdata(device); 2555f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2556f2148a47SJeff Kirsher 2557f2148a47SJeff Kirsher if (!netif_running(dev)) 2558f2148a47SJeff Kirsher return 0; 2559f2148a47SJeff Kirsher 2560f2148a47SJeff Kirsher enable_mmio(rp->pioaddr, rp->quirks); 2561f2148a47SJeff Kirsher rhine_power_init(dev); 2562f2148a47SJeff Kirsher free_tbufs(dev); 2563f2148a47SJeff Kirsher alloc_tbufs(dev); 25648709bb2cSfrançois romieu rhine_reset_rbufs(rp); 25657ab87ff4SFrancois Romieu rhine_task_enable(rp); 25667ab87ff4SFrancois Romieu spin_lock_bh(&rp->lock); 2567f2148a47SJeff Kirsher init_registers(dev); 25687ab87ff4SFrancois Romieu spin_unlock_bh(&rp->lock); 2569f2148a47SJeff Kirsher 2570f2148a47SJeff Kirsher netif_device_attach(dev); 2571f2148a47SJeff Kirsher 2572f2148a47SJeff Kirsher return 0; 2573f2148a47SJeff Kirsher } 2574e92b9b3bSFrancois Romieu 2575e92b9b3bSFrancois Romieu static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume); 2576e92b9b3bSFrancois Romieu #define RHINE_PM_OPS (&rhine_pm_ops) 2577e92b9b3bSFrancois Romieu 2578e92b9b3bSFrancois Romieu #else 2579e92b9b3bSFrancois Romieu 2580e92b9b3bSFrancois Romieu #define RHINE_PM_OPS NULL 2581e92b9b3bSFrancois Romieu 2582e92b9b3bSFrancois Romieu #endif /* !CONFIG_PM_SLEEP */ 2583f2148a47SJeff Kirsher 25842d283862SAlexey Charkov static struct pci_driver rhine_driver_pci = { 2585f2148a47SJeff Kirsher .name = DRV_NAME, 2586f2148a47SJeff Kirsher .id_table = rhine_pci_tbl, 25872d283862SAlexey Charkov .probe = rhine_init_one_pci, 25882d283862SAlexey Charkov .remove = rhine_remove_one_pci, 25892d283862SAlexey Charkov .shutdown = rhine_shutdown_pci, 2590e92b9b3bSFrancois Romieu .driver.pm = RHINE_PM_OPS, 2591f2148a47SJeff Kirsher }; 2592f2148a47SJeff Kirsher 25932d283862SAlexey Charkov static struct platform_driver rhine_driver_platform = { 25942d283862SAlexey Charkov .probe = rhine_init_one_platform, 25952d283862SAlexey Charkov .remove = rhine_remove_one_platform, 25962d283862SAlexey Charkov .driver = { 25972d283862SAlexey Charkov .name = DRV_NAME, 25982d283862SAlexey Charkov .of_match_table = rhine_of_tbl, 25992d283862SAlexey Charkov .pm = RHINE_PM_OPS, 26002d283862SAlexey Charkov } 26012d283862SAlexey Charkov }; 26022d283862SAlexey Charkov 260377273eaaSSachin Kamat static struct dmi_system_id rhine_dmi_table[] __initdata = { 2604f2148a47SJeff Kirsher { 2605f2148a47SJeff Kirsher .ident = "EPIA-M", 2606f2148a47SJeff Kirsher .matches = { 2607f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), 2608f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), 2609f2148a47SJeff Kirsher }, 2610f2148a47SJeff Kirsher }, 2611f2148a47SJeff Kirsher { 2612f2148a47SJeff Kirsher .ident = "KV7", 2613f2148a47SJeff Kirsher .matches = { 2614f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), 2615f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), 2616f2148a47SJeff Kirsher }, 2617f2148a47SJeff Kirsher }, 2618f2148a47SJeff Kirsher { NULL } 2619f2148a47SJeff Kirsher }; 2620f2148a47SJeff Kirsher 2621f2148a47SJeff Kirsher static int __init rhine_init(void) 2622f2148a47SJeff Kirsher { 26232d283862SAlexey Charkov int ret_pci, ret_platform; 26242d283862SAlexey Charkov 2625f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */ 2626f2148a47SJeff Kirsher #ifdef MODULE 2627f2148a47SJeff Kirsher pr_info("%s\n", version); 2628f2148a47SJeff Kirsher #endif 2629f2148a47SJeff Kirsher if (dmi_check_system(rhine_dmi_table)) { 2630f2148a47SJeff Kirsher /* these BIOSes fail at PXE boot if chip is in D3 */ 2631eb939922SRusty Russell avoid_D3 = true; 2632f2148a47SJeff Kirsher pr_warn("Broken BIOS detected, avoid_D3 enabled\n"); 2633f2148a47SJeff Kirsher } 2634f2148a47SJeff Kirsher else if (avoid_D3) 2635f2148a47SJeff Kirsher pr_info("avoid_D3 set\n"); 2636f2148a47SJeff Kirsher 26372d283862SAlexey Charkov ret_pci = pci_register_driver(&rhine_driver_pci); 26382d283862SAlexey Charkov ret_platform = platform_driver_register(&rhine_driver_platform); 26392d283862SAlexey Charkov if ((ret_pci < 0) && (ret_platform < 0)) 26402d283862SAlexey Charkov return ret_pci; 26412d283862SAlexey Charkov 26422d283862SAlexey Charkov return 0; 2643f2148a47SJeff Kirsher } 2644f2148a47SJeff Kirsher 2645f2148a47SJeff Kirsher 2646f2148a47SJeff Kirsher static void __exit rhine_cleanup(void) 2647f2148a47SJeff Kirsher { 26482d283862SAlexey Charkov platform_driver_unregister(&rhine_driver_platform); 26492d283862SAlexey Charkov pci_unregister_driver(&rhine_driver_pci); 2650f2148a47SJeff Kirsher } 2651f2148a47SJeff Kirsher 2652f2148a47SJeff Kirsher 2653f2148a47SJeff Kirsher module_init(rhine_init); 2654f2148a47SJeff Kirsher module_exit(rhine_cleanup); 2655