1*f2148a47SJeff Kirsher /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */ 2*f2148a47SJeff Kirsher /* 3*f2148a47SJeff Kirsher Written 1998-2001 by Donald Becker. 4*f2148a47SJeff Kirsher 5*f2148a47SJeff Kirsher Current Maintainer: Roger Luethi <rl@hellgate.ch> 6*f2148a47SJeff Kirsher 7*f2148a47SJeff Kirsher This software may be used and distributed according to the terms of 8*f2148a47SJeff Kirsher the GNU General Public License (GPL), incorporated herein by reference. 9*f2148a47SJeff Kirsher Drivers based on or derived from this code fall under the GPL and must 10*f2148a47SJeff Kirsher retain the authorship, copyright and license notice. This file is not 11*f2148a47SJeff Kirsher a complete program and may only be used when the entire operating 12*f2148a47SJeff Kirsher system is licensed under the GPL. 13*f2148a47SJeff Kirsher 14*f2148a47SJeff Kirsher This driver is designed for the VIA VT86C100A Rhine-I. 15*f2148a47SJeff Kirsher It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM 16*f2148a47SJeff Kirsher and management NIC 6105M). 17*f2148a47SJeff Kirsher 18*f2148a47SJeff Kirsher The author may be reached as becker@scyld.com, or C/O 19*f2148a47SJeff Kirsher Scyld Computing Corporation 20*f2148a47SJeff Kirsher 410 Severn Ave., Suite 210 21*f2148a47SJeff Kirsher Annapolis MD 21403 22*f2148a47SJeff Kirsher 23*f2148a47SJeff Kirsher 24*f2148a47SJeff Kirsher This driver contains some changes from the original Donald Becker 25*f2148a47SJeff Kirsher version. He may or may not be interested in bug reports on this 26*f2148a47SJeff Kirsher code. You can find his versions at: 27*f2148a47SJeff Kirsher http://www.scyld.com/network/via-rhine.html 28*f2148a47SJeff Kirsher [link no longer provides useful info -jgarzik] 29*f2148a47SJeff Kirsher 30*f2148a47SJeff Kirsher */ 31*f2148a47SJeff Kirsher 32*f2148a47SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33*f2148a47SJeff Kirsher 34*f2148a47SJeff Kirsher #define DRV_NAME "via-rhine" 35*f2148a47SJeff Kirsher #define DRV_VERSION "1.5.0" 36*f2148a47SJeff Kirsher #define DRV_RELDATE "2010-10-09" 37*f2148a47SJeff Kirsher 38*f2148a47SJeff Kirsher 39*f2148a47SJeff Kirsher /* A few user-configurable values. 40*f2148a47SJeff Kirsher These may be modified when a driver module is loaded. */ 41*f2148a47SJeff Kirsher 42*f2148a47SJeff Kirsher #define DEBUG 43*f2148a47SJeff Kirsher static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ 44*f2148a47SJeff Kirsher static int max_interrupt_work = 20; 45*f2148a47SJeff Kirsher 46*f2148a47SJeff Kirsher /* Set the copy breakpoint for the copy-only-tiny-frames scheme. 47*f2148a47SJeff Kirsher Setting to > 1518 effectively disables this feature. */ 48*f2148a47SJeff Kirsher #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ 49*f2148a47SJeff Kirsher defined(CONFIG_SPARC) || defined(__ia64__) || \ 50*f2148a47SJeff Kirsher defined(__sh__) || defined(__mips__) 51*f2148a47SJeff Kirsher static int rx_copybreak = 1518; 52*f2148a47SJeff Kirsher #else 53*f2148a47SJeff Kirsher static int rx_copybreak; 54*f2148a47SJeff Kirsher #endif 55*f2148a47SJeff Kirsher 56*f2148a47SJeff Kirsher /* Work-around for broken BIOSes: they are unable to get the chip back out of 57*f2148a47SJeff Kirsher power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ 58*f2148a47SJeff Kirsher static int avoid_D3; 59*f2148a47SJeff Kirsher 60*f2148a47SJeff Kirsher /* 61*f2148a47SJeff Kirsher * In case you are looking for 'options[]' or 'full_duplex[]', they 62*f2148a47SJeff Kirsher * are gone. Use ethtool(8) instead. 63*f2148a47SJeff Kirsher */ 64*f2148a47SJeff Kirsher 65*f2148a47SJeff Kirsher /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 66*f2148a47SJeff Kirsher The Rhine has a 64 element 8390-like hash table. */ 67*f2148a47SJeff Kirsher static const int multicast_filter_limit = 32; 68*f2148a47SJeff Kirsher 69*f2148a47SJeff Kirsher 70*f2148a47SJeff Kirsher /* Operational parameters that are set at compile time. */ 71*f2148a47SJeff Kirsher 72*f2148a47SJeff Kirsher /* Keep the ring sizes a power of two for compile efficiency. 73*f2148a47SJeff Kirsher The compiler will convert <unsigned>'%'<2^N> into a bit mask. 74*f2148a47SJeff Kirsher Making the Tx ring too large decreases the effectiveness of channel 75*f2148a47SJeff Kirsher bonding and packet priority. 76*f2148a47SJeff Kirsher There are no ill effects from too-large receive rings. */ 77*f2148a47SJeff Kirsher #define TX_RING_SIZE 16 78*f2148a47SJeff Kirsher #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 79*f2148a47SJeff Kirsher #define RX_RING_SIZE 64 80*f2148a47SJeff Kirsher 81*f2148a47SJeff Kirsher /* Operational parameters that usually are not changed. */ 82*f2148a47SJeff Kirsher 83*f2148a47SJeff Kirsher /* Time in jiffies before concluding the transmitter is hung. */ 84*f2148a47SJeff Kirsher #define TX_TIMEOUT (2*HZ) 85*f2148a47SJeff Kirsher 86*f2148a47SJeff Kirsher #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 87*f2148a47SJeff Kirsher 88*f2148a47SJeff Kirsher #include <linux/module.h> 89*f2148a47SJeff Kirsher #include <linux/moduleparam.h> 90*f2148a47SJeff Kirsher #include <linux/kernel.h> 91*f2148a47SJeff Kirsher #include <linux/string.h> 92*f2148a47SJeff Kirsher #include <linux/timer.h> 93*f2148a47SJeff Kirsher #include <linux/errno.h> 94*f2148a47SJeff Kirsher #include <linux/ioport.h> 95*f2148a47SJeff Kirsher #include <linux/interrupt.h> 96*f2148a47SJeff Kirsher #include <linux/pci.h> 97*f2148a47SJeff Kirsher #include <linux/dma-mapping.h> 98*f2148a47SJeff Kirsher #include <linux/netdevice.h> 99*f2148a47SJeff Kirsher #include <linux/etherdevice.h> 100*f2148a47SJeff Kirsher #include <linux/skbuff.h> 101*f2148a47SJeff Kirsher #include <linux/init.h> 102*f2148a47SJeff Kirsher #include <linux/delay.h> 103*f2148a47SJeff Kirsher #include <linux/mii.h> 104*f2148a47SJeff Kirsher #include <linux/ethtool.h> 105*f2148a47SJeff Kirsher #include <linux/crc32.h> 106*f2148a47SJeff Kirsher #include <linux/if_vlan.h> 107*f2148a47SJeff Kirsher #include <linux/bitops.h> 108*f2148a47SJeff Kirsher #include <linux/workqueue.h> 109*f2148a47SJeff Kirsher #include <asm/processor.h> /* Processor type for cache alignment. */ 110*f2148a47SJeff Kirsher #include <asm/io.h> 111*f2148a47SJeff Kirsher #include <asm/irq.h> 112*f2148a47SJeff Kirsher #include <asm/uaccess.h> 113*f2148a47SJeff Kirsher #include <linux/dmi.h> 114*f2148a47SJeff Kirsher 115*f2148a47SJeff Kirsher /* These identify the driver base version and may not be removed. */ 116*f2148a47SJeff Kirsher static const char version[] __devinitconst = 117*f2148a47SJeff Kirsher "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker"; 118*f2148a47SJeff Kirsher 119*f2148a47SJeff Kirsher /* This driver was written to use PCI memory space. Some early versions 120*f2148a47SJeff Kirsher of the Rhine may only work correctly with I/O space accesses. */ 121*f2148a47SJeff Kirsher #ifdef CONFIG_VIA_RHINE_MMIO 122*f2148a47SJeff Kirsher #define USE_MMIO 123*f2148a47SJeff Kirsher #else 124*f2148a47SJeff Kirsher #endif 125*f2148a47SJeff Kirsher 126*f2148a47SJeff Kirsher MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 127*f2148a47SJeff Kirsher MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 128*f2148a47SJeff Kirsher MODULE_LICENSE("GPL"); 129*f2148a47SJeff Kirsher 130*f2148a47SJeff Kirsher module_param(max_interrupt_work, int, 0); 131*f2148a47SJeff Kirsher module_param(debug, int, 0); 132*f2148a47SJeff Kirsher module_param(rx_copybreak, int, 0); 133*f2148a47SJeff Kirsher module_param(avoid_D3, bool, 0); 134*f2148a47SJeff Kirsher MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); 135*f2148a47SJeff Kirsher MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); 136*f2148a47SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 137*f2148a47SJeff Kirsher MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 138*f2148a47SJeff Kirsher 139*f2148a47SJeff Kirsher #define MCAM_SIZE 32 140*f2148a47SJeff Kirsher #define VCAM_SIZE 32 141*f2148a47SJeff Kirsher 142*f2148a47SJeff Kirsher /* 143*f2148a47SJeff Kirsher Theory of Operation 144*f2148a47SJeff Kirsher 145*f2148a47SJeff Kirsher I. Board Compatibility 146*f2148a47SJeff Kirsher 147*f2148a47SJeff Kirsher This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet 148*f2148a47SJeff Kirsher controller. 149*f2148a47SJeff Kirsher 150*f2148a47SJeff Kirsher II. Board-specific settings 151*f2148a47SJeff Kirsher 152*f2148a47SJeff Kirsher Boards with this chip are functional only in a bus-master PCI slot. 153*f2148a47SJeff Kirsher 154*f2148a47SJeff Kirsher Many operational settings are loaded from the EEPROM to the Config word at 155*f2148a47SJeff Kirsher offset 0x78. For most of these settings, this driver assumes that they are 156*f2148a47SJeff Kirsher correct. 157*f2148a47SJeff Kirsher If this driver is compiled to use PCI memory space operations the EEPROM 158*f2148a47SJeff Kirsher must be configured to enable memory ops. 159*f2148a47SJeff Kirsher 160*f2148a47SJeff Kirsher III. Driver operation 161*f2148a47SJeff Kirsher 162*f2148a47SJeff Kirsher IIIa. Ring buffers 163*f2148a47SJeff Kirsher 164*f2148a47SJeff Kirsher This driver uses two statically allocated fixed-size descriptor lists 165*f2148a47SJeff Kirsher formed into rings by a branch from the final descriptor to the beginning of 166*f2148a47SJeff Kirsher the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. 167*f2148a47SJeff Kirsher 168*f2148a47SJeff Kirsher IIIb/c. Transmit/Receive Structure 169*f2148a47SJeff Kirsher 170*f2148a47SJeff Kirsher This driver attempts to use a zero-copy receive and transmit scheme. 171*f2148a47SJeff Kirsher 172*f2148a47SJeff Kirsher Alas, all data buffers are required to start on a 32 bit boundary, so 173*f2148a47SJeff Kirsher the driver must often copy transmit packets into bounce buffers. 174*f2148a47SJeff Kirsher 175*f2148a47SJeff Kirsher The driver allocates full frame size skbuffs for the Rx ring buffers at 176*f2148a47SJeff Kirsher open() time and passes the skb->data field to the chip as receive data 177*f2148a47SJeff Kirsher buffers. When an incoming frame is less than RX_COPYBREAK bytes long, 178*f2148a47SJeff Kirsher a fresh skbuff is allocated and the frame is copied to the new skbuff. 179*f2148a47SJeff Kirsher When the incoming frame is larger, the skbuff is passed directly up the 180*f2148a47SJeff Kirsher protocol stack. Buffers consumed this way are replaced by newly allocated 181*f2148a47SJeff Kirsher skbuffs in the last phase of rhine_rx(). 182*f2148a47SJeff Kirsher 183*f2148a47SJeff Kirsher The RX_COPYBREAK value is chosen to trade-off the memory wasted by 184*f2148a47SJeff Kirsher using a full-sized skbuff for small frames vs. the copying costs of larger 185*f2148a47SJeff Kirsher frames. New boards are typically used in generously configured machines 186*f2148a47SJeff Kirsher and the underfilled buffers have negligible impact compared to the benefit of 187*f2148a47SJeff Kirsher a single allocation size, so the default value of zero results in never 188*f2148a47SJeff Kirsher copying packets. When copying is done, the cost is usually mitigated by using 189*f2148a47SJeff Kirsher a combined copy/checksum routine. Copying also preloads the cache, which is 190*f2148a47SJeff Kirsher most useful with small frames. 191*f2148a47SJeff Kirsher 192*f2148a47SJeff Kirsher Since the VIA chips are only able to transfer data to buffers on 32 bit 193*f2148a47SJeff Kirsher boundaries, the IP header at offset 14 in an ethernet frame isn't 194*f2148a47SJeff Kirsher longword aligned for further processing. Copying these unaligned buffers 195*f2148a47SJeff Kirsher has the beneficial effect of 16-byte aligning the IP header. 196*f2148a47SJeff Kirsher 197*f2148a47SJeff Kirsher IIId. Synchronization 198*f2148a47SJeff Kirsher 199*f2148a47SJeff Kirsher The driver runs as two independent, single-threaded flows of control. One 200*f2148a47SJeff Kirsher is the send-packet routine, which enforces single-threaded use by the 201*f2148a47SJeff Kirsher netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler, 202*f2148a47SJeff Kirsher which is single threaded by the hardware and interrupt handling software. 203*f2148a47SJeff Kirsher 204*f2148a47SJeff Kirsher The send packet thread has partial control over the Tx ring. It locks the 205*f2148a47SJeff Kirsher netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in 206*f2148a47SJeff Kirsher the ring is not available it stops the transmit queue by 207*f2148a47SJeff Kirsher calling netif_stop_queue. 208*f2148a47SJeff Kirsher 209*f2148a47SJeff Kirsher The interrupt handler has exclusive control over the Rx ring and records stats 210*f2148a47SJeff Kirsher from the Tx ring. After reaping the stats, it marks the Tx queue entry as 211*f2148a47SJeff Kirsher empty by incrementing the dirty_tx mark. If at least half of the entries in 212*f2148a47SJeff Kirsher the Rx ring are available the transmit queue is woken up if it was stopped. 213*f2148a47SJeff Kirsher 214*f2148a47SJeff Kirsher IV. Notes 215*f2148a47SJeff Kirsher 216*f2148a47SJeff Kirsher IVb. References 217*f2148a47SJeff Kirsher 218*f2148a47SJeff Kirsher Preliminary VT86C100A manual from http://www.via.com.tw/ 219*f2148a47SJeff Kirsher http://www.scyld.com/expert/100mbps.html 220*f2148a47SJeff Kirsher http://www.scyld.com/expert/NWay.html 221*f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf 222*f2148a47SJeff Kirsher ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF 223*f2148a47SJeff Kirsher 224*f2148a47SJeff Kirsher 225*f2148a47SJeff Kirsher IVc. Errata 226*f2148a47SJeff Kirsher 227*f2148a47SJeff Kirsher The VT86C100A manual is not reliable information. 228*f2148a47SJeff Kirsher The 3043 chip does not handle unaligned transmit or receive buffers, resulting 229*f2148a47SJeff Kirsher in significant performance degradation for bounce buffer copies on transmit 230*f2148a47SJeff Kirsher and unaligned IP headers on receive. 231*f2148a47SJeff Kirsher The chip does not pad to minimum transmit length. 232*f2148a47SJeff Kirsher 233*f2148a47SJeff Kirsher */ 234*f2148a47SJeff Kirsher 235*f2148a47SJeff Kirsher 236*f2148a47SJeff Kirsher /* This table drives the PCI probe routines. It's mostly boilerplate in all 237*f2148a47SJeff Kirsher of the drivers, and will likely be provided by some future kernel. 238*f2148a47SJeff Kirsher Note the matching code -- the first table entry matchs all 56** cards but 239*f2148a47SJeff Kirsher second only the 1234 card. 240*f2148a47SJeff Kirsher */ 241*f2148a47SJeff Kirsher 242*f2148a47SJeff Kirsher enum rhine_revs { 243*f2148a47SJeff Kirsher VT86C100A = 0x00, 244*f2148a47SJeff Kirsher VTunknown0 = 0x20, 245*f2148a47SJeff Kirsher VT6102 = 0x40, 246*f2148a47SJeff Kirsher VT8231 = 0x50, /* Integrated MAC */ 247*f2148a47SJeff Kirsher VT8233 = 0x60, /* Integrated MAC */ 248*f2148a47SJeff Kirsher VT8235 = 0x74, /* Integrated MAC */ 249*f2148a47SJeff Kirsher VT8237 = 0x78, /* Integrated MAC */ 250*f2148a47SJeff Kirsher VTunknown1 = 0x7C, 251*f2148a47SJeff Kirsher VT6105 = 0x80, 252*f2148a47SJeff Kirsher VT6105_B0 = 0x83, 253*f2148a47SJeff Kirsher VT6105L = 0x8A, 254*f2148a47SJeff Kirsher VT6107 = 0x8C, 255*f2148a47SJeff Kirsher VTunknown2 = 0x8E, 256*f2148a47SJeff Kirsher VT6105M = 0x90, /* Management adapter */ 257*f2148a47SJeff Kirsher }; 258*f2148a47SJeff Kirsher 259*f2148a47SJeff Kirsher enum rhine_quirks { 260*f2148a47SJeff Kirsher rqWOL = 0x0001, /* Wake-On-LAN support */ 261*f2148a47SJeff Kirsher rqForceReset = 0x0002, 262*f2148a47SJeff Kirsher rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ 263*f2148a47SJeff Kirsher rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ 264*f2148a47SJeff Kirsher rqRhineI = 0x0100, /* See comment below */ 265*f2148a47SJeff Kirsher }; 266*f2148a47SJeff Kirsher /* 267*f2148a47SJeff Kirsher * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable 268*f2148a47SJeff Kirsher * MMIO as well as for the collision counter and the Tx FIFO underflow 269*f2148a47SJeff Kirsher * indicator. In addition, Tx and Rx buffers need to 4 byte aligned. 270*f2148a47SJeff Kirsher */ 271*f2148a47SJeff Kirsher 272*f2148a47SJeff Kirsher /* Beware of PCI posted writes */ 273*f2148a47SJeff Kirsher #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) 274*f2148a47SJeff Kirsher 275*f2148a47SJeff Kirsher static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = { 276*f2148a47SJeff Kirsher { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ 277*f2148a47SJeff Kirsher { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ 278*f2148a47SJeff Kirsher { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ 279*f2148a47SJeff Kirsher { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */ 280*f2148a47SJeff Kirsher { } /* terminate list */ 281*f2148a47SJeff Kirsher }; 282*f2148a47SJeff Kirsher MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); 283*f2148a47SJeff Kirsher 284*f2148a47SJeff Kirsher 285*f2148a47SJeff Kirsher /* Offsets to the device registers. */ 286*f2148a47SJeff Kirsher enum register_offsets { 287*f2148a47SJeff Kirsher StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, 288*f2148a47SJeff Kirsher ChipCmd1=0x09, TQWake=0x0A, 289*f2148a47SJeff Kirsher IntrStatus=0x0C, IntrEnable=0x0E, 290*f2148a47SJeff Kirsher MulticastFilter0=0x10, MulticastFilter1=0x14, 291*f2148a47SJeff Kirsher RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, 292*f2148a47SJeff Kirsher MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F, 293*f2148a47SJeff Kirsher MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, 294*f2148a47SJeff Kirsher ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, 295*f2148a47SJeff Kirsher RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, 296*f2148a47SJeff Kirsher StickyHW=0x83, IntrStatus2=0x84, 297*f2148a47SJeff Kirsher CamMask=0x88, CamCon=0x92, CamAddr=0x93, 298*f2148a47SJeff Kirsher WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, 299*f2148a47SJeff Kirsher WOLcrClr1=0xA6, WOLcgClr=0xA7, 300*f2148a47SJeff Kirsher PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, 301*f2148a47SJeff Kirsher }; 302*f2148a47SJeff Kirsher 303*f2148a47SJeff Kirsher /* Bits in ConfigD */ 304*f2148a47SJeff Kirsher enum backoff_bits { 305*f2148a47SJeff Kirsher BackOptional=0x01, BackModify=0x02, 306*f2148a47SJeff Kirsher BackCaptureEffect=0x04, BackRandom=0x08 307*f2148a47SJeff Kirsher }; 308*f2148a47SJeff Kirsher 309*f2148a47SJeff Kirsher /* Bits in the TxConfig (TCR) register */ 310*f2148a47SJeff Kirsher enum tcr_bits { 311*f2148a47SJeff Kirsher TCR_PQEN=0x01, 312*f2148a47SJeff Kirsher TCR_LB0=0x02, /* loopback[0] */ 313*f2148a47SJeff Kirsher TCR_LB1=0x04, /* loopback[1] */ 314*f2148a47SJeff Kirsher TCR_OFSET=0x08, 315*f2148a47SJeff Kirsher TCR_RTGOPT=0x10, 316*f2148a47SJeff Kirsher TCR_RTFT0=0x20, 317*f2148a47SJeff Kirsher TCR_RTFT1=0x40, 318*f2148a47SJeff Kirsher TCR_RTSF=0x80, 319*f2148a47SJeff Kirsher }; 320*f2148a47SJeff Kirsher 321*f2148a47SJeff Kirsher /* Bits in the CamCon (CAMC) register */ 322*f2148a47SJeff Kirsher enum camcon_bits { 323*f2148a47SJeff Kirsher CAMC_CAMEN=0x01, 324*f2148a47SJeff Kirsher CAMC_VCAMSL=0x02, 325*f2148a47SJeff Kirsher CAMC_CAMWR=0x04, 326*f2148a47SJeff Kirsher CAMC_CAMRD=0x08, 327*f2148a47SJeff Kirsher }; 328*f2148a47SJeff Kirsher 329*f2148a47SJeff Kirsher /* Bits in the PCIBusConfig1 (BCR1) register */ 330*f2148a47SJeff Kirsher enum bcr1_bits { 331*f2148a47SJeff Kirsher BCR1_POT0=0x01, 332*f2148a47SJeff Kirsher BCR1_POT1=0x02, 333*f2148a47SJeff Kirsher BCR1_POT2=0x04, 334*f2148a47SJeff Kirsher BCR1_CTFT0=0x08, 335*f2148a47SJeff Kirsher BCR1_CTFT1=0x10, 336*f2148a47SJeff Kirsher BCR1_CTSF=0x20, 337*f2148a47SJeff Kirsher BCR1_TXQNOBK=0x40, /* for VT6105 */ 338*f2148a47SJeff Kirsher BCR1_VIDFR=0x80, /* for VT6105 */ 339*f2148a47SJeff Kirsher BCR1_MED0=0x40, /* for VT6102 */ 340*f2148a47SJeff Kirsher BCR1_MED1=0x80, /* for VT6102 */ 341*f2148a47SJeff Kirsher }; 342*f2148a47SJeff Kirsher 343*f2148a47SJeff Kirsher #ifdef USE_MMIO 344*f2148a47SJeff Kirsher /* Registers we check that mmio and reg are the same. */ 345*f2148a47SJeff Kirsher static const int mmio_verify_registers[] = { 346*f2148a47SJeff Kirsher RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, 347*f2148a47SJeff Kirsher 0 348*f2148a47SJeff Kirsher }; 349*f2148a47SJeff Kirsher #endif 350*f2148a47SJeff Kirsher 351*f2148a47SJeff Kirsher /* Bits in the interrupt status/mask registers. */ 352*f2148a47SJeff Kirsher enum intr_status_bits { 353*f2148a47SJeff Kirsher IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020, 354*f2148a47SJeff Kirsher IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210, 355*f2148a47SJeff Kirsher IntrPCIErr=0x0040, 356*f2148a47SJeff Kirsher IntrStatsMax=0x0080, IntrRxEarly=0x0100, 357*f2148a47SJeff Kirsher IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000, 358*f2148a47SJeff Kirsher IntrTxAborted=0x2000, IntrLinkChange=0x4000, 359*f2148a47SJeff Kirsher IntrRxWakeUp=0x8000, 360*f2148a47SJeff Kirsher IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260, 361*f2148a47SJeff Kirsher IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */ 362*f2148a47SJeff Kirsher IntrTxErrSummary=0x082218, 363*f2148a47SJeff Kirsher }; 364*f2148a47SJeff Kirsher 365*f2148a47SJeff Kirsher /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ 366*f2148a47SJeff Kirsher enum wol_bits { 367*f2148a47SJeff Kirsher WOLucast = 0x10, 368*f2148a47SJeff Kirsher WOLmagic = 0x20, 369*f2148a47SJeff Kirsher WOLbmcast = 0x30, 370*f2148a47SJeff Kirsher WOLlnkon = 0x40, 371*f2148a47SJeff Kirsher WOLlnkoff = 0x80, 372*f2148a47SJeff Kirsher }; 373*f2148a47SJeff Kirsher 374*f2148a47SJeff Kirsher /* The Rx and Tx buffer descriptors. */ 375*f2148a47SJeff Kirsher struct rx_desc { 376*f2148a47SJeff Kirsher __le32 rx_status; 377*f2148a47SJeff Kirsher __le32 desc_length; /* Chain flag, Buffer/frame length */ 378*f2148a47SJeff Kirsher __le32 addr; 379*f2148a47SJeff Kirsher __le32 next_desc; 380*f2148a47SJeff Kirsher }; 381*f2148a47SJeff Kirsher struct tx_desc { 382*f2148a47SJeff Kirsher __le32 tx_status; 383*f2148a47SJeff Kirsher __le32 desc_length; /* Chain flag, Tx Config, Frame length */ 384*f2148a47SJeff Kirsher __le32 addr; 385*f2148a47SJeff Kirsher __le32 next_desc; 386*f2148a47SJeff Kirsher }; 387*f2148a47SJeff Kirsher 388*f2148a47SJeff Kirsher /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ 389*f2148a47SJeff Kirsher #define TXDESC 0x00e08000 390*f2148a47SJeff Kirsher 391*f2148a47SJeff Kirsher enum rx_status_bits { 392*f2148a47SJeff Kirsher RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F 393*f2148a47SJeff Kirsher }; 394*f2148a47SJeff Kirsher 395*f2148a47SJeff Kirsher /* Bits in *_desc.*_status */ 396*f2148a47SJeff Kirsher enum desc_status_bits { 397*f2148a47SJeff Kirsher DescOwn=0x80000000 398*f2148a47SJeff Kirsher }; 399*f2148a47SJeff Kirsher 400*f2148a47SJeff Kirsher /* Bits in *_desc.*_length */ 401*f2148a47SJeff Kirsher enum desc_length_bits { 402*f2148a47SJeff Kirsher DescTag=0x00010000 403*f2148a47SJeff Kirsher }; 404*f2148a47SJeff Kirsher 405*f2148a47SJeff Kirsher /* Bits in ChipCmd. */ 406*f2148a47SJeff Kirsher enum chip_cmd_bits { 407*f2148a47SJeff Kirsher CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, 408*f2148a47SJeff Kirsher CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40, 409*f2148a47SJeff Kirsher Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04, 410*f2148a47SJeff Kirsher Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, 411*f2148a47SJeff Kirsher }; 412*f2148a47SJeff Kirsher 413*f2148a47SJeff Kirsher struct rhine_private { 414*f2148a47SJeff Kirsher /* Bit mask for configured VLAN ids */ 415*f2148a47SJeff Kirsher unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 416*f2148a47SJeff Kirsher 417*f2148a47SJeff Kirsher /* Descriptor rings */ 418*f2148a47SJeff Kirsher struct rx_desc *rx_ring; 419*f2148a47SJeff Kirsher struct tx_desc *tx_ring; 420*f2148a47SJeff Kirsher dma_addr_t rx_ring_dma; 421*f2148a47SJeff Kirsher dma_addr_t tx_ring_dma; 422*f2148a47SJeff Kirsher 423*f2148a47SJeff Kirsher /* The addresses of receive-in-place skbuffs. */ 424*f2148a47SJeff Kirsher struct sk_buff *rx_skbuff[RX_RING_SIZE]; 425*f2148a47SJeff Kirsher dma_addr_t rx_skbuff_dma[RX_RING_SIZE]; 426*f2148a47SJeff Kirsher 427*f2148a47SJeff Kirsher /* The saved address of a sent-in-place packet/buffer, for later free(). */ 428*f2148a47SJeff Kirsher struct sk_buff *tx_skbuff[TX_RING_SIZE]; 429*f2148a47SJeff Kirsher dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; 430*f2148a47SJeff Kirsher 431*f2148a47SJeff Kirsher /* Tx bounce buffers (Rhine-I only) */ 432*f2148a47SJeff Kirsher unsigned char *tx_buf[TX_RING_SIZE]; 433*f2148a47SJeff Kirsher unsigned char *tx_bufs; 434*f2148a47SJeff Kirsher dma_addr_t tx_bufs_dma; 435*f2148a47SJeff Kirsher 436*f2148a47SJeff Kirsher struct pci_dev *pdev; 437*f2148a47SJeff Kirsher long pioaddr; 438*f2148a47SJeff Kirsher struct net_device *dev; 439*f2148a47SJeff Kirsher struct napi_struct napi; 440*f2148a47SJeff Kirsher spinlock_t lock; 441*f2148a47SJeff Kirsher struct work_struct reset_task; 442*f2148a47SJeff Kirsher 443*f2148a47SJeff Kirsher /* Frequently used values: keep some adjacent for cache effect. */ 444*f2148a47SJeff Kirsher u32 quirks; 445*f2148a47SJeff Kirsher struct rx_desc *rx_head_desc; 446*f2148a47SJeff Kirsher unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ 447*f2148a47SJeff Kirsher unsigned int cur_tx, dirty_tx; 448*f2148a47SJeff Kirsher unsigned int rx_buf_sz; /* Based on MTU+slack. */ 449*f2148a47SJeff Kirsher u8 wolopts; 450*f2148a47SJeff Kirsher 451*f2148a47SJeff Kirsher u8 tx_thresh, rx_thresh; 452*f2148a47SJeff Kirsher 453*f2148a47SJeff Kirsher struct mii_if_info mii_if; 454*f2148a47SJeff Kirsher void __iomem *base; 455*f2148a47SJeff Kirsher }; 456*f2148a47SJeff Kirsher 457*f2148a47SJeff Kirsher #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0) 458*f2148a47SJeff Kirsher #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0) 459*f2148a47SJeff Kirsher #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0) 460*f2148a47SJeff Kirsher 461*f2148a47SJeff Kirsher #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x)) 462*f2148a47SJeff Kirsher #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x)) 463*f2148a47SJeff Kirsher #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x)) 464*f2148a47SJeff Kirsher 465*f2148a47SJeff Kirsher #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0) 466*f2148a47SJeff Kirsher #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0) 467*f2148a47SJeff Kirsher #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0) 468*f2148a47SJeff Kirsher 469*f2148a47SJeff Kirsher #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0) 470*f2148a47SJeff Kirsher #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0) 471*f2148a47SJeff Kirsher #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0) 472*f2148a47SJeff Kirsher 473*f2148a47SJeff Kirsher 474*f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int location); 475*f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 476*f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev); 477*f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work); 478*f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev); 479*f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 480*f2148a47SJeff Kirsher struct net_device *dev); 481*f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance); 482*f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev); 483*f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit); 484*f2148a47SJeff Kirsher static void rhine_error(struct net_device *dev, int intr_status); 485*f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev); 486*f2148a47SJeff Kirsher static struct net_device_stats *rhine_get_stats(struct net_device *dev); 487*f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 488*f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops; 489*f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev); 490*f2148a47SJeff Kirsher static void rhine_shutdown (struct pci_dev *pdev); 491*f2148a47SJeff Kirsher static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); 492*f2148a47SJeff Kirsher static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); 493*f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr); 494*f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr); 495*f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask); 496*f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask); 497*f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev); 498*f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev); 499*f2148a47SJeff Kirsher 500*f2148a47SJeff Kirsher #define RHINE_WAIT_FOR(condition) \ 501*f2148a47SJeff Kirsher do { \ 502*f2148a47SJeff Kirsher int i = 1024; \ 503*f2148a47SJeff Kirsher while (!(condition) && --i) \ 504*f2148a47SJeff Kirsher ; \ 505*f2148a47SJeff Kirsher if (debug > 1 && i < 512) \ 506*f2148a47SJeff Kirsher pr_info("%4d cycles used @ %s:%d\n", \ 507*f2148a47SJeff Kirsher 1024 - i, __func__, __LINE__); \ 508*f2148a47SJeff Kirsher } while (0) 509*f2148a47SJeff Kirsher 510*f2148a47SJeff Kirsher static inline u32 get_intr_status(struct net_device *dev) 511*f2148a47SJeff Kirsher { 512*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 513*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 514*f2148a47SJeff Kirsher u32 intr_status; 515*f2148a47SJeff Kirsher 516*f2148a47SJeff Kirsher intr_status = ioread16(ioaddr + IntrStatus); 517*f2148a47SJeff Kirsher /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ 518*f2148a47SJeff Kirsher if (rp->quirks & rqStatusWBRace) 519*f2148a47SJeff Kirsher intr_status |= ioread8(ioaddr + IntrStatus2) << 16; 520*f2148a47SJeff Kirsher return intr_status; 521*f2148a47SJeff Kirsher } 522*f2148a47SJeff Kirsher 523*f2148a47SJeff Kirsher /* 524*f2148a47SJeff Kirsher * Get power related registers into sane state. 525*f2148a47SJeff Kirsher * Notify user about past WOL event. 526*f2148a47SJeff Kirsher */ 527*f2148a47SJeff Kirsher static void rhine_power_init(struct net_device *dev) 528*f2148a47SJeff Kirsher { 529*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 530*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 531*f2148a47SJeff Kirsher u16 wolstat; 532*f2148a47SJeff Kirsher 533*f2148a47SJeff Kirsher if (rp->quirks & rqWOL) { 534*f2148a47SJeff Kirsher /* Make sure chip is in power state D0 */ 535*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); 536*f2148a47SJeff Kirsher 537*f2148a47SJeff Kirsher /* Disable "force PME-enable" */ 538*f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + WOLcgClr); 539*f2148a47SJeff Kirsher 540*f2148a47SJeff Kirsher /* Clear power-event config bits (WOL) */ 541*f2148a47SJeff Kirsher iowrite8(0xFF, ioaddr + WOLcrClr); 542*f2148a47SJeff Kirsher /* More recent cards can manage two additional patterns */ 543*f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 544*f2148a47SJeff Kirsher iowrite8(0x03, ioaddr + WOLcrClr1); 545*f2148a47SJeff Kirsher 546*f2148a47SJeff Kirsher /* Save power-event status bits */ 547*f2148a47SJeff Kirsher wolstat = ioread8(ioaddr + PwrcsrSet); 548*f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 549*f2148a47SJeff Kirsher wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8; 550*f2148a47SJeff Kirsher 551*f2148a47SJeff Kirsher /* Clear power-event status bits */ 552*f2148a47SJeff Kirsher iowrite8(0xFF, ioaddr + PwrcsrClr); 553*f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 554*f2148a47SJeff Kirsher iowrite8(0x03, ioaddr + PwrcsrClr1); 555*f2148a47SJeff Kirsher 556*f2148a47SJeff Kirsher if (wolstat) { 557*f2148a47SJeff Kirsher char *reason; 558*f2148a47SJeff Kirsher switch (wolstat) { 559*f2148a47SJeff Kirsher case WOLmagic: 560*f2148a47SJeff Kirsher reason = "Magic packet"; 561*f2148a47SJeff Kirsher break; 562*f2148a47SJeff Kirsher case WOLlnkon: 563*f2148a47SJeff Kirsher reason = "Link went up"; 564*f2148a47SJeff Kirsher break; 565*f2148a47SJeff Kirsher case WOLlnkoff: 566*f2148a47SJeff Kirsher reason = "Link went down"; 567*f2148a47SJeff Kirsher break; 568*f2148a47SJeff Kirsher case WOLucast: 569*f2148a47SJeff Kirsher reason = "Unicast packet"; 570*f2148a47SJeff Kirsher break; 571*f2148a47SJeff Kirsher case WOLbmcast: 572*f2148a47SJeff Kirsher reason = "Multicast/broadcast packet"; 573*f2148a47SJeff Kirsher break; 574*f2148a47SJeff Kirsher default: 575*f2148a47SJeff Kirsher reason = "Unknown"; 576*f2148a47SJeff Kirsher } 577*f2148a47SJeff Kirsher netdev_info(dev, "Woke system up. Reason: %s\n", 578*f2148a47SJeff Kirsher reason); 579*f2148a47SJeff Kirsher } 580*f2148a47SJeff Kirsher } 581*f2148a47SJeff Kirsher } 582*f2148a47SJeff Kirsher 583*f2148a47SJeff Kirsher static void rhine_chip_reset(struct net_device *dev) 584*f2148a47SJeff Kirsher { 585*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 586*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 587*f2148a47SJeff Kirsher 588*f2148a47SJeff Kirsher iowrite8(Cmd1Reset, ioaddr + ChipCmd1); 589*f2148a47SJeff Kirsher IOSYNC; 590*f2148a47SJeff Kirsher 591*f2148a47SJeff Kirsher if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { 592*f2148a47SJeff Kirsher netdev_info(dev, "Reset not complete yet. Trying harder.\n"); 593*f2148a47SJeff Kirsher 594*f2148a47SJeff Kirsher /* Force reset */ 595*f2148a47SJeff Kirsher if (rp->quirks & rqForceReset) 596*f2148a47SJeff Kirsher iowrite8(0x40, ioaddr + MiscCmd); 597*f2148a47SJeff Kirsher 598*f2148a47SJeff Kirsher /* Reset can take somewhat longer (rare) */ 599*f2148a47SJeff Kirsher RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset)); 600*f2148a47SJeff Kirsher } 601*f2148a47SJeff Kirsher 602*f2148a47SJeff Kirsher if (debug > 1) 603*f2148a47SJeff Kirsher netdev_info(dev, "Reset %s\n", 604*f2148a47SJeff Kirsher (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ? 605*f2148a47SJeff Kirsher "failed" : "succeeded"); 606*f2148a47SJeff Kirsher } 607*f2148a47SJeff Kirsher 608*f2148a47SJeff Kirsher #ifdef USE_MMIO 609*f2148a47SJeff Kirsher static void enable_mmio(long pioaddr, u32 quirks) 610*f2148a47SJeff Kirsher { 611*f2148a47SJeff Kirsher int n; 612*f2148a47SJeff Kirsher if (quirks & rqRhineI) { 613*f2148a47SJeff Kirsher /* More recent docs say that this bit is reserved ... */ 614*f2148a47SJeff Kirsher n = inb(pioaddr + ConfigA) | 0x20; 615*f2148a47SJeff Kirsher outb(n, pioaddr + ConfigA); 616*f2148a47SJeff Kirsher } else { 617*f2148a47SJeff Kirsher n = inb(pioaddr + ConfigD) | 0x80; 618*f2148a47SJeff Kirsher outb(n, pioaddr + ConfigD); 619*f2148a47SJeff Kirsher } 620*f2148a47SJeff Kirsher } 621*f2148a47SJeff Kirsher #endif 622*f2148a47SJeff Kirsher 623*f2148a47SJeff Kirsher /* 624*f2148a47SJeff Kirsher * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM 625*f2148a47SJeff Kirsher * (plus 0x6C for Rhine-I/II) 626*f2148a47SJeff Kirsher */ 627*f2148a47SJeff Kirsher static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev) 628*f2148a47SJeff Kirsher { 629*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 630*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 631*f2148a47SJeff Kirsher 632*f2148a47SJeff Kirsher outb(0x20, pioaddr + MACRegEEcsr); 633*f2148a47SJeff Kirsher RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20)); 634*f2148a47SJeff Kirsher 635*f2148a47SJeff Kirsher #ifdef USE_MMIO 636*f2148a47SJeff Kirsher /* 637*f2148a47SJeff Kirsher * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable 638*f2148a47SJeff Kirsher * MMIO. If reloading EEPROM was done first this could be avoided, but 639*f2148a47SJeff Kirsher * it is not known if that still works with the "win98-reboot" problem. 640*f2148a47SJeff Kirsher */ 641*f2148a47SJeff Kirsher enable_mmio(pioaddr, rp->quirks); 642*f2148a47SJeff Kirsher #endif 643*f2148a47SJeff Kirsher 644*f2148a47SJeff Kirsher /* Turn off EEPROM-controlled wake-up (magic packet) */ 645*f2148a47SJeff Kirsher if (rp->quirks & rqWOL) 646*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA); 647*f2148a47SJeff Kirsher 648*f2148a47SJeff Kirsher } 649*f2148a47SJeff Kirsher 650*f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 651*f2148a47SJeff Kirsher static void rhine_poll(struct net_device *dev) 652*f2148a47SJeff Kirsher { 653*f2148a47SJeff Kirsher disable_irq(dev->irq); 654*f2148a47SJeff Kirsher rhine_interrupt(dev->irq, (void *)dev); 655*f2148a47SJeff Kirsher enable_irq(dev->irq); 656*f2148a47SJeff Kirsher } 657*f2148a47SJeff Kirsher #endif 658*f2148a47SJeff Kirsher 659*f2148a47SJeff Kirsher static int rhine_napipoll(struct napi_struct *napi, int budget) 660*f2148a47SJeff Kirsher { 661*f2148a47SJeff Kirsher struct rhine_private *rp = container_of(napi, struct rhine_private, napi); 662*f2148a47SJeff Kirsher struct net_device *dev = rp->dev; 663*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 664*f2148a47SJeff Kirsher int work_done; 665*f2148a47SJeff Kirsher 666*f2148a47SJeff Kirsher work_done = rhine_rx(dev, budget); 667*f2148a47SJeff Kirsher 668*f2148a47SJeff Kirsher if (work_done < budget) { 669*f2148a47SJeff Kirsher napi_complete(napi); 670*f2148a47SJeff Kirsher 671*f2148a47SJeff Kirsher iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 672*f2148a47SJeff Kirsher IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 673*f2148a47SJeff Kirsher IntrTxDone | IntrTxError | IntrTxUnderrun | 674*f2148a47SJeff Kirsher IntrPCIErr | IntrStatsMax | IntrLinkChange, 675*f2148a47SJeff Kirsher ioaddr + IntrEnable); 676*f2148a47SJeff Kirsher } 677*f2148a47SJeff Kirsher return work_done; 678*f2148a47SJeff Kirsher } 679*f2148a47SJeff Kirsher 680*f2148a47SJeff Kirsher static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr) 681*f2148a47SJeff Kirsher { 682*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 683*f2148a47SJeff Kirsher 684*f2148a47SJeff Kirsher /* Reset the chip to erase previous misconfiguration. */ 685*f2148a47SJeff Kirsher rhine_chip_reset(dev); 686*f2148a47SJeff Kirsher 687*f2148a47SJeff Kirsher /* Rhine-I needs extra time to recuperate before EEPROM reload */ 688*f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 689*f2148a47SJeff Kirsher msleep(5); 690*f2148a47SJeff Kirsher 691*f2148a47SJeff Kirsher /* Reload EEPROM controlled bytes cleared by soft reset */ 692*f2148a47SJeff Kirsher rhine_reload_eeprom(pioaddr, dev); 693*f2148a47SJeff Kirsher } 694*f2148a47SJeff Kirsher 695*f2148a47SJeff Kirsher static const struct net_device_ops rhine_netdev_ops = { 696*f2148a47SJeff Kirsher .ndo_open = rhine_open, 697*f2148a47SJeff Kirsher .ndo_stop = rhine_close, 698*f2148a47SJeff Kirsher .ndo_start_xmit = rhine_start_tx, 699*f2148a47SJeff Kirsher .ndo_get_stats = rhine_get_stats, 700*f2148a47SJeff Kirsher .ndo_set_multicast_list = rhine_set_rx_mode, 701*f2148a47SJeff Kirsher .ndo_change_mtu = eth_change_mtu, 702*f2148a47SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 703*f2148a47SJeff Kirsher .ndo_set_mac_address = eth_mac_addr, 704*f2148a47SJeff Kirsher .ndo_do_ioctl = netdev_ioctl, 705*f2148a47SJeff Kirsher .ndo_tx_timeout = rhine_tx_timeout, 706*f2148a47SJeff Kirsher .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, 707*f2148a47SJeff Kirsher .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, 708*f2148a47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 709*f2148a47SJeff Kirsher .ndo_poll_controller = rhine_poll, 710*f2148a47SJeff Kirsher #endif 711*f2148a47SJeff Kirsher }; 712*f2148a47SJeff Kirsher 713*f2148a47SJeff Kirsher static int __devinit rhine_init_one(struct pci_dev *pdev, 714*f2148a47SJeff Kirsher const struct pci_device_id *ent) 715*f2148a47SJeff Kirsher { 716*f2148a47SJeff Kirsher struct net_device *dev; 717*f2148a47SJeff Kirsher struct rhine_private *rp; 718*f2148a47SJeff Kirsher int i, rc; 719*f2148a47SJeff Kirsher u32 quirks; 720*f2148a47SJeff Kirsher long pioaddr; 721*f2148a47SJeff Kirsher long memaddr; 722*f2148a47SJeff Kirsher void __iomem *ioaddr; 723*f2148a47SJeff Kirsher int io_size, phy_id; 724*f2148a47SJeff Kirsher const char *name; 725*f2148a47SJeff Kirsher #ifdef USE_MMIO 726*f2148a47SJeff Kirsher int bar = 1; 727*f2148a47SJeff Kirsher #else 728*f2148a47SJeff Kirsher int bar = 0; 729*f2148a47SJeff Kirsher #endif 730*f2148a47SJeff Kirsher 731*f2148a47SJeff Kirsher /* when built into the kernel, we only print version if device is found */ 732*f2148a47SJeff Kirsher #ifndef MODULE 733*f2148a47SJeff Kirsher pr_info_once("%s\n", version); 734*f2148a47SJeff Kirsher #endif 735*f2148a47SJeff Kirsher 736*f2148a47SJeff Kirsher io_size = 256; 737*f2148a47SJeff Kirsher phy_id = 0; 738*f2148a47SJeff Kirsher quirks = 0; 739*f2148a47SJeff Kirsher name = "Rhine"; 740*f2148a47SJeff Kirsher if (pdev->revision < VTunknown0) { 741*f2148a47SJeff Kirsher quirks = rqRhineI; 742*f2148a47SJeff Kirsher io_size = 128; 743*f2148a47SJeff Kirsher } 744*f2148a47SJeff Kirsher else if (pdev->revision >= VT6102) { 745*f2148a47SJeff Kirsher quirks = rqWOL | rqForceReset; 746*f2148a47SJeff Kirsher if (pdev->revision < VT6105) { 747*f2148a47SJeff Kirsher name = "Rhine II"; 748*f2148a47SJeff Kirsher quirks |= rqStatusWBRace; /* Rhine-II exclusive */ 749*f2148a47SJeff Kirsher } 750*f2148a47SJeff Kirsher else { 751*f2148a47SJeff Kirsher phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ 752*f2148a47SJeff Kirsher if (pdev->revision >= VT6105_B0) 753*f2148a47SJeff Kirsher quirks |= rq6patterns; 754*f2148a47SJeff Kirsher if (pdev->revision < VT6105M) 755*f2148a47SJeff Kirsher name = "Rhine III"; 756*f2148a47SJeff Kirsher else 757*f2148a47SJeff Kirsher name = "Rhine III (Management Adapter)"; 758*f2148a47SJeff Kirsher } 759*f2148a47SJeff Kirsher } 760*f2148a47SJeff Kirsher 761*f2148a47SJeff Kirsher rc = pci_enable_device(pdev); 762*f2148a47SJeff Kirsher if (rc) 763*f2148a47SJeff Kirsher goto err_out; 764*f2148a47SJeff Kirsher 765*f2148a47SJeff Kirsher /* this should always be supported */ 766*f2148a47SJeff Kirsher rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 767*f2148a47SJeff Kirsher if (rc) { 768*f2148a47SJeff Kirsher dev_err(&pdev->dev, 769*f2148a47SJeff Kirsher "32-bit PCI DMA addresses not supported by the card!?\n"); 770*f2148a47SJeff Kirsher goto err_out; 771*f2148a47SJeff Kirsher } 772*f2148a47SJeff Kirsher 773*f2148a47SJeff Kirsher /* sanity check */ 774*f2148a47SJeff Kirsher if ((pci_resource_len(pdev, 0) < io_size) || 775*f2148a47SJeff Kirsher (pci_resource_len(pdev, 1) < io_size)) { 776*f2148a47SJeff Kirsher rc = -EIO; 777*f2148a47SJeff Kirsher dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); 778*f2148a47SJeff Kirsher goto err_out; 779*f2148a47SJeff Kirsher } 780*f2148a47SJeff Kirsher 781*f2148a47SJeff Kirsher pioaddr = pci_resource_start(pdev, 0); 782*f2148a47SJeff Kirsher memaddr = pci_resource_start(pdev, 1); 783*f2148a47SJeff Kirsher 784*f2148a47SJeff Kirsher pci_set_master(pdev); 785*f2148a47SJeff Kirsher 786*f2148a47SJeff Kirsher dev = alloc_etherdev(sizeof(struct rhine_private)); 787*f2148a47SJeff Kirsher if (!dev) { 788*f2148a47SJeff Kirsher rc = -ENOMEM; 789*f2148a47SJeff Kirsher dev_err(&pdev->dev, "alloc_etherdev failed\n"); 790*f2148a47SJeff Kirsher goto err_out; 791*f2148a47SJeff Kirsher } 792*f2148a47SJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev); 793*f2148a47SJeff Kirsher 794*f2148a47SJeff Kirsher rp = netdev_priv(dev); 795*f2148a47SJeff Kirsher rp->dev = dev; 796*f2148a47SJeff Kirsher rp->quirks = quirks; 797*f2148a47SJeff Kirsher rp->pioaddr = pioaddr; 798*f2148a47SJeff Kirsher rp->pdev = pdev; 799*f2148a47SJeff Kirsher 800*f2148a47SJeff Kirsher rc = pci_request_regions(pdev, DRV_NAME); 801*f2148a47SJeff Kirsher if (rc) 802*f2148a47SJeff Kirsher goto err_out_free_netdev; 803*f2148a47SJeff Kirsher 804*f2148a47SJeff Kirsher ioaddr = pci_iomap(pdev, bar, io_size); 805*f2148a47SJeff Kirsher if (!ioaddr) { 806*f2148a47SJeff Kirsher rc = -EIO; 807*f2148a47SJeff Kirsher dev_err(&pdev->dev, 808*f2148a47SJeff Kirsher "ioremap failed for device %s, region 0x%X @ 0x%lX\n", 809*f2148a47SJeff Kirsher pci_name(pdev), io_size, memaddr); 810*f2148a47SJeff Kirsher goto err_out_free_res; 811*f2148a47SJeff Kirsher } 812*f2148a47SJeff Kirsher 813*f2148a47SJeff Kirsher #ifdef USE_MMIO 814*f2148a47SJeff Kirsher enable_mmio(pioaddr, quirks); 815*f2148a47SJeff Kirsher 816*f2148a47SJeff Kirsher /* Check that selected MMIO registers match the PIO ones */ 817*f2148a47SJeff Kirsher i = 0; 818*f2148a47SJeff Kirsher while (mmio_verify_registers[i]) { 819*f2148a47SJeff Kirsher int reg = mmio_verify_registers[i++]; 820*f2148a47SJeff Kirsher unsigned char a = inb(pioaddr+reg); 821*f2148a47SJeff Kirsher unsigned char b = readb(ioaddr+reg); 822*f2148a47SJeff Kirsher if (a != b) { 823*f2148a47SJeff Kirsher rc = -EIO; 824*f2148a47SJeff Kirsher dev_err(&pdev->dev, 825*f2148a47SJeff Kirsher "MMIO do not match PIO [%02x] (%02x != %02x)\n", 826*f2148a47SJeff Kirsher reg, a, b); 827*f2148a47SJeff Kirsher goto err_out_unmap; 828*f2148a47SJeff Kirsher } 829*f2148a47SJeff Kirsher } 830*f2148a47SJeff Kirsher #endif /* USE_MMIO */ 831*f2148a47SJeff Kirsher 832*f2148a47SJeff Kirsher dev->base_addr = (unsigned long)ioaddr; 833*f2148a47SJeff Kirsher rp->base = ioaddr; 834*f2148a47SJeff Kirsher 835*f2148a47SJeff Kirsher /* Get chip registers into a sane state */ 836*f2148a47SJeff Kirsher rhine_power_init(dev); 837*f2148a47SJeff Kirsher rhine_hw_init(dev, pioaddr); 838*f2148a47SJeff Kirsher 839*f2148a47SJeff Kirsher for (i = 0; i < 6; i++) 840*f2148a47SJeff Kirsher dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); 841*f2148a47SJeff Kirsher 842*f2148a47SJeff Kirsher if (!is_valid_ether_addr(dev->dev_addr)) { 843*f2148a47SJeff Kirsher /* Report it and use a random ethernet address instead */ 844*f2148a47SJeff Kirsher netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr); 845*f2148a47SJeff Kirsher random_ether_addr(dev->dev_addr); 846*f2148a47SJeff Kirsher netdev_info(dev, "Using random MAC address: %pM\n", 847*f2148a47SJeff Kirsher dev->dev_addr); 848*f2148a47SJeff Kirsher } 849*f2148a47SJeff Kirsher memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 850*f2148a47SJeff Kirsher 851*f2148a47SJeff Kirsher /* For Rhine-I/II, phy_id is loaded from EEPROM */ 852*f2148a47SJeff Kirsher if (!phy_id) 853*f2148a47SJeff Kirsher phy_id = ioread8(ioaddr + 0x6C); 854*f2148a47SJeff Kirsher 855*f2148a47SJeff Kirsher dev->irq = pdev->irq; 856*f2148a47SJeff Kirsher 857*f2148a47SJeff Kirsher spin_lock_init(&rp->lock); 858*f2148a47SJeff Kirsher INIT_WORK(&rp->reset_task, rhine_reset_task); 859*f2148a47SJeff Kirsher 860*f2148a47SJeff Kirsher rp->mii_if.dev = dev; 861*f2148a47SJeff Kirsher rp->mii_if.mdio_read = mdio_read; 862*f2148a47SJeff Kirsher rp->mii_if.mdio_write = mdio_write; 863*f2148a47SJeff Kirsher rp->mii_if.phy_id_mask = 0x1f; 864*f2148a47SJeff Kirsher rp->mii_if.reg_num_mask = 0x1f; 865*f2148a47SJeff Kirsher 866*f2148a47SJeff Kirsher /* The chip-specific entries in the device structure. */ 867*f2148a47SJeff Kirsher dev->netdev_ops = &rhine_netdev_ops; 868*f2148a47SJeff Kirsher dev->ethtool_ops = &netdev_ethtool_ops, 869*f2148a47SJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT; 870*f2148a47SJeff Kirsher 871*f2148a47SJeff Kirsher netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); 872*f2148a47SJeff Kirsher 873*f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 874*f2148a47SJeff Kirsher dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 875*f2148a47SJeff Kirsher 876*f2148a47SJeff Kirsher if (pdev->revision >= VT6105M) 877*f2148a47SJeff Kirsher dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 878*f2148a47SJeff Kirsher NETIF_F_HW_VLAN_FILTER; 879*f2148a47SJeff Kirsher 880*f2148a47SJeff Kirsher /* dev->name not defined before register_netdev()! */ 881*f2148a47SJeff Kirsher rc = register_netdev(dev); 882*f2148a47SJeff Kirsher if (rc) 883*f2148a47SJeff Kirsher goto err_out_unmap; 884*f2148a47SJeff Kirsher 885*f2148a47SJeff Kirsher netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", 886*f2148a47SJeff Kirsher name, 887*f2148a47SJeff Kirsher #ifdef USE_MMIO 888*f2148a47SJeff Kirsher memaddr, 889*f2148a47SJeff Kirsher #else 890*f2148a47SJeff Kirsher (long)ioaddr, 891*f2148a47SJeff Kirsher #endif 892*f2148a47SJeff Kirsher dev->dev_addr, pdev->irq); 893*f2148a47SJeff Kirsher 894*f2148a47SJeff Kirsher pci_set_drvdata(pdev, dev); 895*f2148a47SJeff Kirsher 896*f2148a47SJeff Kirsher { 897*f2148a47SJeff Kirsher u16 mii_cmd; 898*f2148a47SJeff Kirsher int mii_status = mdio_read(dev, phy_id, 1); 899*f2148a47SJeff Kirsher mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; 900*f2148a47SJeff Kirsher mdio_write(dev, phy_id, MII_BMCR, mii_cmd); 901*f2148a47SJeff Kirsher if (mii_status != 0xffff && mii_status != 0x0000) { 902*f2148a47SJeff Kirsher rp->mii_if.advertising = mdio_read(dev, phy_id, 4); 903*f2148a47SJeff Kirsher netdev_info(dev, 904*f2148a47SJeff Kirsher "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n", 905*f2148a47SJeff Kirsher phy_id, 906*f2148a47SJeff Kirsher mii_status, rp->mii_if.advertising, 907*f2148a47SJeff Kirsher mdio_read(dev, phy_id, 5)); 908*f2148a47SJeff Kirsher 909*f2148a47SJeff Kirsher /* set IFF_RUNNING */ 910*f2148a47SJeff Kirsher if (mii_status & BMSR_LSTATUS) 911*f2148a47SJeff Kirsher netif_carrier_on(dev); 912*f2148a47SJeff Kirsher else 913*f2148a47SJeff Kirsher netif_carrier_off(dev); 914*f2148a47SJeff Kirsher 915*f2148a47SJeff Kirsher } 916*f2148a47SJeff Kirsher } 917*f2148a47SJeff Kirsher rp->mii_if.phy_id = phy_id; 918*f2148a47SJeff Kirsher if (debug > 1 && avoid_D3) 919*f2148a47SJeff Kirsher netdev_info(dev, "No D3 power state at shutdown\n"); 920*f2148a47SJeff Kirsher 921*f2148a47SJeff Kirsher return 0; 922*f2148a47SJeff Kirsher 923*f2148a47SJeff Kirsher err_out_unmap: 924*f2148a47SJeff Kirsher pci_iounmap(pdev, ioaddr); 925*f2148a47SJeff Kirsher err_out_free_res: 926*f2148a47SJeff Kirsher pci_release_regions(pdev); 927*f2148a47SJeff Kirsher err_out_free_netdev: 928*f2148a47SJeff Kirsher free_netdev(dev); 929*f2148a47SJeff Kirsher err_out: 930*f2148a47SJeff Kirsher return rc; 931*f2148a47SJeff Kirsher } 932*f2148a47SJeff Kirsher 933*f2148a47SJeff Kirsher static int alloc_ring(struct net_device* dev) 934*f2148a47SJeff Kirsher { 935*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 936*f2148a47SJeff Kirsher void *ring; 937*f2148a47SJeff Kirsher dma_addr_t ring_dma; 938*f2148a47SJeff Kirsher 939*f2148a47SJeff Kirsher ring = pci_alloc_consistent(rp->pdev, 940*f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 941*f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 942*f2148a47SJeff Kirsher &ring_dma); 943*f2148a47SJeff Kirsher if (!ring) { 944*f2148a47SJeff Kirsher netdev_err(dev, "Could not allocate DMA memory\n"); 945*f2148a47SJeff Kirsher return -ENOMEM; 946*f2148a47SJeff Kirsher } 947*f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) { 948*f2148a47SJeff Kirsher rp->tx_bufs = pci_alloc_consistent(rp->pdev, 949*f2148a47SJeff Kirsher PKT_BUF_SZ * TX_RING_SIZE, 950*f2148a47SJeff Kirsher &rp->tx_bufs_dma); 951*f2148a47SJeff Kirsher if (rp->tx_bufs == NULL) { 952*f2148a47SJeff Kirsher pci_free_consistent(rp->pdev, 953*f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 954*f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 955*f2148a47SJeff Kirsher ring, ring_dma); 956*f2148a47SJeff Kirsher return -ENOMEM; 957*f2148a47SJeff Kirsher } 958*f2148a47SJeff Kirsher } 959*f2148a47SJeff Kirsher 960*f2148a47SJeff Kirsher rp->rx_ring = ring; 961*f2148a47SJeff Kirsher rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); 962*f2148a47SJeff Kirsher rp->rx_ring_dma = ring_dma; 963*f2148a47SJeff Kirsher rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); 964*f2148a47SJeff Kirsher 965*f2148a47SJeff Kirsher return 0; 966*f2148a47SJeff Kirsher } 967*f2148a47SJeff Kirsher 968*f2148a47SJeff Kirsher static void free_ring(struct net_device* dev) 969*f2148a47SJeff Kirsher { 970*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 971*f2148a47SJeff Kirsher 972*f2148a47SJeff Kirsher pci_free_consistent(rp->pdev, 973*f2148a47SJeff Kirsher RX_RING_SIZE * sizeof(struct rx_desc) + 974*f2148a47SJeff Kirsher TX_RING_SIZE * sizeof(struct tx_desc), 975*f2148a47SJeff Kirsher rp->rx_ring, rp->rx_ring_dma); 976*f2148a47SJeff Kirsher rp->tx_ring = NULL; 977*f2148a47SJeff Kirsher 978*f2148a47SJeff Kirsher if (rp->tx_bufs) 979*f2148a47SJeff Kirsher pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, 980*f2148a47SJeff Kirsher rp->tx_bufs, rp->tx_bufs_dma); 981*f2148a47SJeff Kirsher 982*f2148a47SJeff Kirsher rp->tx_bufs = NULL; 983*f2148a47SJeff Kirsher 984*f2148a47SJeff Kirsher } 985*f2148a47SJeff Kirsher 986*f2148a47SJeff Kirsher static void alloc_rbufs(struct net_device *dev) 987*f2148a47SJeff Kirsher { 988*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 989*f2148a47SJeff Kirsher dma_addr_t next; 990*f2148a47SJeff Kirsher int i; 991*f2148a47SJeff Kirsher 992*f2148a47SJeff Kirsher rp->dirty_rx = rp->cur_rx = 0; 993*f2148a47SJeff Kirsher 994*f2148a47SJeff Kirsher rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 995*f2148a47SJeff Kirsher rp->rx_head_desc = &rp->rx_ring[0]; 996*f2148a47SJeff Kirsher next = rp->rx_ring_dma; 997*f2148a47SJeff Kirsher 998*f2148a47SJeff Kirsher /* Init the ring entries */ 999*f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1000*f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = 0; 1001*f2148a47SJeff Kirsher rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); 1002*f2148a47SJeff Kirsher next += sizeof(struct rx_desc); 1003*f2148a47SJeff Kirsher rp->rx_ring[i].next_desc = cpu_to_le32(next); 1004*f2148a47SJeff Kirsher rp->rx_skbuff[i] = NULL; 1005*f2148a47SJeff Kirsher } 1006*f2148a47SJeff Kirsher /* Mark the last entry as wrapping the ring. */ 1007*f2148a47SJeff Kirsher rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); 1008*f2148a47SJeff Kirsher 1009*f2148a47SJeff Kirsher /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1010*f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1011*f2148a47SJeff Kirsher struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz); 1012*f2148a47SJeff Kirsher rp->rx_skbuff[i] = skb; 1013*f2148a47SJeff Kirsher if (skb == NULL) 1014*f2148a47SJeff Kirsher break; 1015*f2148a47SJeff Kirsher skb->dev = dev; /* Mark as being used by this device. */ 1016*f2148a47SJeff Kirsher 1017*f2148a47SJeff Kirsher rp->rx_skbuff_dma[i] = 1018*f2148a47SJeff Kirsher pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, 1019*f2148a47SJeff Kirsher PCI_DMA_FROMDEVICE); 1020*f2148a47SJeff Kirsher 1021*f2148a47SJeff Kirsher rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); 1022*f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); 1023*f2148a47SJeff Kirsher } 1024*f2148a47SJeff Kirsher rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 1025*f2148a47SJeff Kirsher } 1026*f2148a47SJeff Kirsher 1027*f2148a47SJeff Kirsher static void free_rbufs(struct net_device* dev) 1028*f2148a47SJeff Kirsher { 1029*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1030*f2148a47SJeff Kirsher int i; 1031*f2148a47SJeff Kirsher 1032*f2148a47SJeff Kirsher /* Free all the skbuffs in the Rx queue. */ 1033*f2148a47SJeff Kirsher for (i = 0; i < RX_RING_SIZE; i++) { 1034*f2148a47SJeff Kirsher rp->rx_ring[i].rx_status = 0; 1035*f2148a47SJeff Kirsher rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1036*f2148a47SJeff Kirsher if (rp->rx_skbuff[i]) { 1037*f2148a47SJeff Kirsher pci_unmap_single(rp->pdev, 1038*f2148a47SJeff Kirsher rp->rx_skbuff_dma[i], 1039*f2148a47SJeff Kirsher rp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1040*f2148a47SJeff Kirsher dev_kfree_skb(rp->rx_skbuff[i]); 1041*f2148a47SJeff Kirsher } 1042*f2148a47SJeff Kirsher rp->rx_skbuff[i] = NULL; 1043*f2148a47SJeff Kirsher } 1044*f2148a47SJeff Kirsher } 1045*f2148a47SJeff Kirsher 1046*f2148a47SJeff Kirsher static void alloc_tbufs(struct net_device* dev) 1047*f2148a47SJeff Kirsher { 1048*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1049*f2148a47SJeff Kirsher dma_addr_t next; 1050*f2148a47SJeff Kirsher int i; 1051*f2148a47SJeff Kirsher 1052*f2148a47SJeff Kirsher rp->dirty_tx = rp->cur_tx = 0; 1053*f2148a47SJeff Kirsher next = rp->tx_ring_dma; 1054*f2148a47SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) { 1055*f2148a47SJeff Kirsher rp->tx_skbuff[i] = NULL; 1056*f2148a47SJeff Kirsher rp->tx_ring[i].tx_status = 0; 1057*f2148a47SJeff Kirsher rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); 1058*f2148a47SJeff Kirsher next += sizeof(struct tx_desc); 1059*f2148a47SJeff Kirsher rp->tx_ring[i].next_desc = cpu_to_le32(next); 1060*f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 1061*f2148a47SJeff Kirsher rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; 1062*f2148a47SJeff Kirsher } 1063*f2148a47SJeff Kirsher rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); 1064*f2148a47SJeff Kirsher 1065*f2148a47SJeff Kirsher } 1066*f2148a47SJeff Kirsher 1067*f2148a47SJeff Kirsher static void free_tbufs(struct net_device* dev) 1068*f2148a47SJeff Kirsher { 1069*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1070*f2148a47SJeff Kirsher int i; 1071*f2148a47SJeff Kirsher 1072*f2148a47SJeff Kirsher for (i = 0; i < TX_RING_SIZE; i++) { 1073*f2148a47SJeff Kirsher rp->tx_ring[i].tx_status = 0; 1074*f2148a47SJeff Kirsher rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); 1075*f2148a47SJeff Kirsher rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ 1076*f2148a47SJeff Kirsher if (rp->tx_skbuff[i]) { 1077*f2148a47SJeff Kirsher if (rp->tx_skbuff_dma[i]) { 1078*f2148a47SJeff Kirsher pci_unmap_single(rp->pdev, 1079*f2148a47SJeff Kirsher rp->tx_skbuff_dma[i], 1080*f2148a47SJeff Kirsher rp->tx_skbuff[i]->len, 1081*f2148a47SJeff Kirsher PCI_DMA_TODEVICE); 1082*f2148a47SJeff Kirsher } 1083*f2148a47SJeff Kirsher dev_kfree_skb(rp->tx_skbuff[i]); 1084*f2148a47SJeff Kirsher } 1085*f2148a47SJeff Kirsher rp->tx_skbuff[i] = NULL; 1086*f2148a47SJeff Kirsher rp->tx_buf[i] = NULL; 1087*f2148a47SJeff Kirsher } 1088*f2148a47SJeff Kirsher } 1089*f2148a47SJeff Kirsher 1090*f2148a47SJeff Kirsher static void rhine_check_media(struct net_device *dev, unsigned int init_media) 1091*f2148a47SJeff Kirsher { 1092*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1093*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1094*f2148a47SJeff Kirsher 1095*f2148a47SJeff Kirsher mii_check_media(&rp->mii_if, debug, init_media); 1096*f2148a47SJeff Kirsher 1097*f2148a47SJeff Kirsher if (rp->mii_if.full_duplex) 1098*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, 1099*f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1100*f2148a47SJeff Kirsher else 1101*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, 1102*f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1103*f2148a47SJeff Kirsher if (debug > 1) 1104*f2148a47SJeff Kirsher netdev_info(dev, "force_media %d, carrier %d\n", 1105*f2148a47SJeff Kirsher rp->mii_if.force_media, netif_carrier_ok(dev)); 1106*f2148a47SJeff Kirsher } 1107*f2148a47SJeff Kirsher 1108*f2148a47SJeff Kirsher /* Called after status of force_media possibly changed */ 1109*f2148a47SJeff Kirsher static void rhine_set_carrier(struct mii_if_info *mii) 1110*f2148a47SJeff Kirsher { 1111*f2148a47SJeff Kirsher if (mii->force_media) { 1112*f2148a47SJeff Kirsher /* autoneg is off: Link is always assumed to be up */ 1113*f2148a47SJeff Kirsher if (!netif_carrier_ok(mii->dev)) 1114*f2148a47SJeff Kirsher netif_carrier_on(mii->dev); 1115*f2148a47SJeff Kirsher } 1116*f2148a47SJeff Kirsher else /* Let MMI library update carrier status */ 1117*f2148a47SJeff Kirsher rhine_check_media(mii->dev, 0); 1118*f2148a47SJeff Kirsher if (debug > 1) 1119*f2148a47SJeff Kirsher netdev_info(mii->dev, "force_media %d, carrier %d\n", 1120*f2148a47SJeff Kirsher mii->force_media, netif_carrier_ok(mii->dev)); 1121*f2148a47SJeff Kirsher } 1122*f2148a47SJeff Kirsher 1123*f2148a47SJeff Kirsher /** 1124*f2148a47SJeff Kirsher * rhine_set_cam - set CAM multicast filters 1125*f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1126*f2148a47SJeff Kirsher * @idx: multicast CAM index [0..MCAM_SIZE-1] 1127*f2148a47SJeff Kirsher * @addr: multicast address (6 bytes) 1128*f2148a47SJeff Kirsher * 1129*f2148a47SJeff Kirsher * Load addresses into multicast filters. 1130*f2148a47SJeff Kirsher */ 1131*f2148a47SJeff Kirsher static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr) 1132*f2148a47SJeff Kirsher { 1133*f2148a47SJeff Kirsher int i; 1134*f2148a47SJeff Kirsher 1135*f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN, ioaddr + CamCon); 1136*f2148a47SJeff Kirsher wmb(); 1137*f2148a47SJeff Kirsher 1138*f2148a47SJeff Kirsher /* Paranoid -- idx out of range should never happen */ 1139*f2148a47SJeff Kirsher idx &= (MCAM_SIZE - 1); 1140*f2148a47SJeff Kirsher 1141*f2148a47SJeff Kirsher iowrite8((u8) idx, ioaddr + CamAddr); 1142*f2148a47SJeff Kirsher 1143*f2148a47SJeff Kirsher for (i = 0; i < 6; i++, addr++) 1144*f2148a47SJeff Kirsher iowrite8(*addr, ioaddr + MulticastFilter0 + i); 1145*f2148a47SJeff Kirsher udelay(10); 1146*f2148a47SJeff Kirsher wmb(); 1147*f2148a47SJeff Kirsher 1148*f2148a47SJeff Kirsher iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); 1149*f2148a47SJeff Kirsher udelay(10); 1150*f2148a47SJeff Kirsher 1151*f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1152*f2148a47SJeff Kirsher } 1153*f2148a47SJeff Kirsher 1154*f2148a47SJeff Kirsher /** 1155*f2148a47SJeff Kirsher * rhine_set_vlan_cam - set CAM VLAN filters 1156*f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1157*f2148a47SJeff Kirsher * @idx: VLAN CAM index [0..VCAM_SIZE-1] 1158*f2148a47SJeff Kirsher * @addr: VLAN ID (2 bytes) 1159*f2148a47SJeff Kirsher * 1160*f2148a47SJeff Kirsher * Load addresses into VLAN filters. 1161*f2148a47SJeff Kirsher */ 1162*f2148a47SJeff Kirsher static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr) 1163*f2148a47SJeff Kirsher { 1164*f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); 1165*f2148a47SJeff Kirsher wmb(); 1166*f2148a47SJeff Kirsher 1167*f2148a47SJeff Kirsher /* Paranoid -- idx out of range should never happen */ 1168*f2148a47SJeff Kirsher idx &= (VCAM_SIZE - 1); 1169*f2148a47SJeff Kirsher 1170*f2148a47SJeff Kirsher iowrite8((u8) idx, ioaddr + CamAddr); 1171*f2148a47SJeff Kirsher 1172*f2148a47SJeff Kirsher iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6); 1173*f2148a47SJeff Kirsher udelay(10); 1174*f2148a47SJeff Kirsher wmb(); 1175*f2148a47SJeff Kirsher 1176*f2148a47SJeff Kirsher iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); 1177*f2148a47SJeff Kirsher udelay(10); 1178*f2148a47SJeff Kirsher 1179*f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1180*f2148a47SJeff Kirsher } 1181*f2148a47SJeff Kirsher 1182*f2148a47SJeff Kirsher /** 1183*f2148a47SJeff Kirsher * rhine_set_cam_mask - set multicast CAM mask 1184*f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1185*f2148a47SJeff Kirsher * @mask: multicast CAM mask 1186*f2148a47SJeff Kirsher * 1187*f2148a47SJeff Kirsher * Mask sets multicast filters active/inactive. 1188*f2148a47SJeff Kirsher */ 1189*f2148a47SJeff Kirsher static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask) 1190*f2148a47SJeff Kirsher { 1191*f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN, ioaddr + CamCon); 1192*f2148a47SJeff Kirsher wmb(); 1193*f2148a47SJeff Kirsher 1194*f2148a47SJeff Kirsher /* write mask */ 1195*f2148a47SJeff Kirsher iowrite32(mask, ioaddr + CamMask); 1196*f2148a47SJeff Kirsher 1197*f2148a47SJeff Kirsher /* disable CAMEN */ 1198*f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1199*f2148a47SJeff Kirsher } 1200*f2148a47SJeff Kirsher 1201*f2148a47SJeff Kirsher /** 1202*f2148a47SJeff Kirsher * rhine_set_vlan_cam_mask - set VLAN CAM mask 1203*f2148a47SJeff Kirsher * @ioaddr: register block of this Rhine 1204*f2148a47SJeff Kirsher * @mask: VLAN CAM mask 1205*f2148a47SJeff Kirsher * 1206*f2148a47SJeff Kirsher * Mask sets VLAN filters active/inactive. 1207*f2148a47SJeff Kirsher */ 1208*f2148a47SJeff Kirsher static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask) 1209*f2148a47SJeff Kirsher { 1210*f2148a47SJeff Kirsher iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); 1211*f2148a47SJeff Kirsher wmb(); 1212*f2148a47SJeff Kirsher 1213*f2148a47SJeff Kirsher /* write mask */ 1214*f2148a47SJeff Kirsher iowrite32(mask, ioaddr + CamMask); 1215*f2148a47SJeff Kirsher 1216*f2148a47SJeff Kirsher /* disable CAMEN */ 1217*f2148a47SJeff Kirsher iowrite8(0, ioaddr + CamCon); 1218*f2148a47SJeff Kirsher } 1219*f2148a47SJeff Kirsher 1220*f2148a47SJeff Kirsher /** 1221*f2148a47SJeff Kirsher * rhine_init_cam_filter - initialize CAM filters 1222*f2148a47SJeff Kirsher * @dev: network device 1223*f2148a47SJeff Kirsher * 1224*f2148a47SJeff Kirsher * Initialize (disable) hardware VLAN and multicast support on this 1225*f2148a47SJeff Kirsher * Rhine. 1226*f2148a47SJeff Kirsher */ 1227*f2148a47SJeff Kirsher static void rhine_init_cam_filter(struct net_device *dev) 1228*f2148a47SJeff Kirsher { 1229*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1230*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1231*f2148a47SJeff Kirsher 1232*f2148a47SJeff Kirsher /* Disable all CAMs */ 1233*f2148a47SJeff Kirsher rhine_set_vlan_cam_mask(ioaddr, 0); 1234*f2148a47SJeff Kirsher rhine_set_cam_mask(ioaddr, 0); 1235*f2148a47SJeff Kirsher 1236*f2148a47SJeff Kirsher /* disable hardware VLAN support */ 1237*f2148a47SJeff Kirsher BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig); 1238*f2148a47SJeff Kirsher BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 1239*f2148a47SJeff Kirsher } 1240*f2148a47SJeff Kirsher 1241*f2148a47SJeff Kirsher /** 1242*f2148a47SJeff Kirsher * rhine_update_vcam - update VLAN CAM filters 1243*f2148a47SJeff Kirsher * @rp: rhine_private data of this Rhine 1244*f2148a47SJeff Kirsher * 1245*f2148a47SJeff Kirsher * Update VLAN CAM filters to match configuration change. 1246*f2148a47SJeff Kirsher */ 1247*f2148a47SJeff Kirsher static void rhine_update_vcam(struct net_device *dev) 1248*f2148a47SJeff Kirsher { 1249*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1250*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1251*f2148a47SJeff Kirsher u16 vid; 1252*f2148a47SJeff Kirsher u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */ 1253*f2148a47SJeff Kirsher unsigned int i = 0; 1254*f2148a47SJeff Kirsher 1255*f2148a47SJeff Kirsher for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { 1256*f2148a47SJeff Kirsher rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid); 1257*f2148a47SJeff Kirsher vCAMmask |= 1 << i; 1258*f2148a47SJeff Kirsher if (++i >= VCAM_SIZE) 1259*f2148a47SJeff Kirsher break; 1260*f2148a47SJeff Kirsher } 1261*f2148a47SJeff Kirsher rhine_set_vlan_cam_mask(ioaddr, vCAMmask); 1262*f2148a47SJeff Kirsher } 1263*f2148a47SJeff Kirsher 1264*f2148a47SJeff Kirsher static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 1265*f2148a47SJeff Kirsher { 1266*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1267*f2148a47SJeff Kirsher 1268*f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 1269*f2148a47SJeff Kirsher set_bit(vid, rp->active_vlans); 1270*f2148a47SJeff Kirsher rhine_update_vcam(dev); 1271*f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 1272*f2148a47SJeff Kirsher } 1273*f2148a47SJeff Kirsher 1274*f2148a47SJeff Kirsher static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 1275*f2148a47SJeff Kirsher { 1276*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1277*f2148a47SJeff Kirsher 1278*f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 1279*f2148a47SJeff Kirsher clear_bit(vid, rp->active_vlans); 1280*f2148a47SJeff Kirsher rhine_update_vcam(dev); 1281*f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 1282*f2148a47SJeff Kirsher } 1283*f2148a47SJeff Kirsher 1284*f2148a47SJeff Kirsher static void init_registers(struct net_device *dev) 1285*f2148a47SJeff Kirsher { 1286*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1287*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1288*f2148a47SJeff Kirsher int i; 1289*f2148a47SJeff Kirsher 1290*f2148a47SJeff Kirsher for (i = 0; i < 6; i++) 1291*f2148a47SJeff Kirsher iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); 1292*f2148a47SJeff Kirsher 1293*f2148a47SJeff Kirsher /* Initialize other registers. */ 1294*f2148a47SJeff Kirsher iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ 1295*f2148a47SJeff Kirsher /* Configure initial FIFO thresholds. */ 1296*f2148a47SJeff Kirsher iowrite8(0x20, ioaddr + TxConfig); 1297*f2148a47SJeff Kirsher rp->tx_thresh = 0x20; 1298*f2148a47SJeff Kirsher rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ 1299*f2148a47SJeff Kirsher 1300*f2148a47SJeff Kirsher iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); 1301*f2148a47SJeff Kirsher iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); 1302*f2148a47SJeff Kirsher 1303*f2148a47SJeff Kirsher rhine_set_rx_mode(dev); 1304*f2148a47SJeff Kirsher 1305*f2148a47SJeff Kirsher if (rp->pdev->revision >= VT6105M) 1306*f2148a47SJeff Kirsher rhine_init_cam_filter(dev); 1307*f2148a47SJeff Kirsher 1308*f2148a47SJeff Kirsher napi_enable(&rp->napi); 1309*f2148a47SJeff Kirsher 1310*f2148a47SJeff Kirsher /* Enable interrupts by setting the interrupt mask. */ 1311*f2148a47SJeff Kirsher iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 1312*f2148a47SJeff Kirsher IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 1313*f2148a47SJeff Kirsher IntrTxDone | IntrTxError | IntrTxUnderrun | 1314*f2148a47SJeff Kirsher IntrPCIErr | IntrStatsMax | IntrLinkChange, 1315*f2148a47SJeff Kirsher ioaddr + IntrEnable); 1316*f2148a47SJeff Kirsher 1317*f2148a47SJeff Kirsher iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), 1318*f2148a47SJeff Kirsher ioaddr + ChipCmd); 1319*f2148a47SJeff Kirsher rhine_check_media(dev, 1); 1320*f2148a47SJeff Kirsher } 1321*f2148a47SJeff Kirsher 1322*f2148a47SJeff Kirsher /* Enable MII link status auto-polling (required for IntrLinkChange) */ 1323*f2148a47SJeff Kirsher static void rhine_enable_linkmon(void __iomem *ioaddr) 1324*f2148a47SJeff Kirsher { 1325*f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1326*f2148a47SJeff Kirsher iowrite8(MII_BMSR, ioaddr + MIIRegAddr); 1327*f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + MIICmd); 1328*f2148a47SJeff Kirsher 1329*f2148a47SJeff Kirsher RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20)); 1330*f2148a47SJeff Kirsher 1331*f2148a47SJeff Kirsher iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); 1332*f2148a47SJeff Kirsher } 1333*f2148a47SJeff Kirsher 1334*f2148a47SJeff Kirsher /* Disable MII link status auto-polling (required for MDIO access) */ 1335*f2148a47SJeff Kirsher static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) 1336*f2148a47SJeff Kirsher { 1337*f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1338*f2148a47SJeff Kirsher 1339*f2148a47SJeff Kirsher if (quirks & rqRhineI) { 1340*f2148a47SJeff Kirsher iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1341*f2148a47SJeff Kirsher 1342*f2148a47SJeff Kirsher /* Can be called from ISR. Evil. */ 1343*f2148a47SJeff Kirsher mdelay(1); 1344*f2148a47SJeff Kirsher 1345*f2148a47SJeff Kirsher /* 0x80 must be set immediately before turning it off */ 1346*f2148a47SJeff Kirsher iowrite8(0x80, ioaddr + MIICmd); 1347*f2148a47SJeff Kirsher 1348*f2148a47SJeff Kirsher RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20); 1349*f2148a47SJeff Kirsher 1350*f2148a47SJeff Kirsher /* Heh. Now clear 0x80 again. */ 1351*f2148a47SJeff Kirsher iowrite8(0, ioaddr + MIICmd); 1352*f2148a47SJeff Kirsher } 1353*f2148a47SJeff Kirsher else 1354*f2148a47SJeff Kirsher RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80); 1355*f2148a47SJeff Kirsher } 1356*f2148a47SJeff Kirsher 1357*f2148a47SJeff Kirsher /* Read and write over the MII Management Data I/O (MDIO) interface. */ 1358*f2148a47SJeff Kirsher 1359*f2148a47SJeff Kirsher static int mdio_read(struct net_device *dev, int phy_id, int regnum) 1360*f2148a47SJeff Kirsher { 1361*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1362*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1363*f2148a47SJeff Kirsher int result; 1364*f2148a47SJeff Kirsher 1365*f2148a47SJeff Kirsher rhine_disable_linkmon(ioaddr, rp->quirks); 1366*f2148a47SJeff Kirsher 1367*f2148a47SJeff Kirsher /* rhine_disable_linkmon already cleared MIICmd */ 1368*f2148a47SJeff Kirsher iowrite8(phy_id, ioaddr + MIIPhyAddr); 1369*f2148a47SJeff Kirsher iowrite8(regnum, ioaddr + MIIRegAddr); 1370*f2148a47SJeff Kirsher iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ 1371*f2148a47SJeff Kirsher RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40)); 1372*f2148a47SJeff Kirsher result = ioread16(ioaddr + MIIData); 1373*f2148a47SJeff Kirsher 1374*f2148a47SJeff Kirsher rhine_enable_linkmon(ioaddr); 1375*f2148a47SJeff Kirsher return result; 1376*f2148a47SJeff Kirsher } 1377*f2148a47SJeff Kirsher 1378*f2148a47SJeff Kirsher static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) 1379*f2148a47SJeff Kirsher { 1380*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1381*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1382*f2148a47SJeff Kirsher 1383*f2148a47SJeff Kirsher rhine_disable_linkmon(ioaddr, rp->quirks); 1384*f2148a47SJeff Kirsher 1385*f2148a47SJeff Kirsher /* rhine_disable_linkmon already cleared MIICmd */ 1386*f2148a47SJeff Kirsher iowrite8(phy_id, ioaddr + MIIPhyAddr); 1387*f2148a47SJeff Kirsher iowrite8(regnum, ioaddr + MIIRegAddr); 1388*f2148a47SJeff Kirsher iowrite16(value, ioaddr + MIIData); 1389*f2148a47SJeff Kirsher iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ 1390*f2148a47SJeff Kirsher RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20)); 1391*f2148a47SJeff Kirsher 1392*f2148a47SJeff Kirsher rhine_enable_linkmon(ioaddr); 1393*f2148a47SJeff Kirsher } 1394*f2148a47SJeff Kirsher 1395*f2148a47SJeff Kirsher static int rhine_open(struct net_device *dev) 1396*f2148a47SJeff Kirsher { 1397*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1398*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1399*f2148a47SJeff Kirsher int rc; 1400*f2148a47SJeff Kirsher 1401*f2148a47SJeff Kirsher rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name, 1402*f2148a47SJeff Kirsher dev); 1403*f2148a47SJeff Kirsher if (rc) 1404*f2148a47SJeff Kirsher return rc; 1405*f2148a47SJeff Kirsher 1406*f2148a47SJeff Kirsher if (debug > 1) 1407*f2148a47SJeff Kirsher netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq); 1408*f2148a47SJeff Kirsher 1409*f2148a47SJeff Kirsher rc = alloc_ring(dev); 1410*f2148a47SJeff Kirsher if (rc) { 1411*f2148a47SJeff Kirsher free_irq(rp->pdev->irq, dev); 1412*f2148a47SJeff Kirsher return rc; 1413*f2148a47SJeff Kirsher } 1414*f2148a47SJeff Kirsher alloc_rbufs(dev); 1415*f2148a47SJeff Kirsher alloc_tbufs(dev); 1416*f2148a47SJeff Kirsher rhine_chip_reset(dev); 1417*f2148a47SJeff Kirsher init_registers(dev); 1418*f2148a47SJeff Kirsher if (debug > 2) 1419*f2148a47SJeff Kirsher netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n", 1420*f2148a47SJeff Kirsher __func__, ioread16(ioaddr + ChipCmd), 1421*f2148a47SJeff Kirsher mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1422*f2148a47SJeff Kirsher 1423*f2148a47SJeff Kirsher netif_start_queue(dev); 1424*f2148a47SJeff Kirsher 1425*f2148a47SJeff Kirsher return 0; 1426*f2148a47SJeff Kirsher } 1427*f2148a47SJeff Kirsher 1428*f2148a47SJeff Kirsher static void rhine_reset_task(struct work_struct *work) 1429*f2148a47SJeff Kirsher { 1430*f2148a47SJeff Kirsher struct rhine_private *rp = container_of(work, struct rhine_private, 1431*f2148a47SJeff Kirsher reset_task); 1432*f2148a47SJeff Kirsher struct net_device *dev = rp->dev; 1433*f2148a47SJeff Kirsher 1434*f2148a47SJeff Kirsher /* protect against concurrent rx interrupts */ 1435*f2148a47SJeff Kirsher disable_irq(rp->pdev->irq); 1436*f2148a47SJeff Kirsher 1437*f2148a47SJeff Kirsher napi_disable(&rp->napi); 1438*f2148a47SJeff Kirsher 1439*f2148a47SJeff Kirsher spin_lock_bh(&rp->lock); 1440*f2148a47SJeff Kirsher 1441*f2148a47SJeff Kirsher /* clear all descriptors */ 1442*f2148a47SJeff Kirsher free_tbufs(dev); 1443*f2148a47SJeff Kirsher free_rbufs(dev); 1444*f2148a47SJeff Kirsher alloc_tbufs(dev); 1445*f2148a47SJeff Kirsher alloc_rbufs(dev); 1446*f2148a47SJeff Kirsher 1447*f2148a47SJeff Kirsher /* Reinitialize the hardware. */ 1448*f2148a47SJeff Kirsher rhine_chip_reset(dev); 1449*f2148a47SJeff Kirsher init_registers(dev); 1450*f2148a47SJeff Kirsher 1451*f2148a47SJeff Kirsher spin_unlock_bh(&rp->lock); 1452*f2148a47SJeff Kirsher enable_irq(rp->pdev->irq); 1453*f2148a47SJeff Kirsher 1454*f2148a47SJeff Kirsher dev->trans_start = jiffies; /* prevent tx timeout */ 1455*f2148a47SJeff Kirsher dev->stats.tx_errors++; 1456*f2148a47SJeff Kirsher netif_wake_queue(dev); 1457*f2148a47SJeff Kirsher } 1458*f2148a47SJeff Kirsher 1459*f2148a47SJeff Kirsher static void rhine_tx_timeout(struct net_device *dev) 1460*f2148a47SJeff Kirsher { 1461*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1462*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1463*f2148a47SJeff Kirsher 1464*f2148a47SJeff Kirsher netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n", 1465*f2148a47SJeff Kirsher ioread16(ioaddr + IntrStatus), 1466*f2148a47SJeff Kirsher mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1467*f2148a47SJeff Kirsher 1468*f2148a47SJeff Kirsher schedule_work(&rp->reset_task); 1469*f2148a47SJeff Kirsher } 1470*f2148a47SJeff Kirsher 1471*f2148a47SJeff Kirsher static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 1472*f2148a47SJeff Kirsher struct net_device *dev) 1473*f2148a47SJeff Kirsher { 1474*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1475*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1476*f2148a47SJeff Kirsher unsigned entry; 1477*f2148a47SJeff Kirsher unsigned long flags; 1478*f2148a47SJeff Kirsher 1479*f2148a47SJeff Kirsher /* Caution: the write order is important here, set the field 1480*f2148a47SJeff Kirsher with the "ownership" bits last. */ 1481*f2148a47SJeff Kirsher 1482*f2148a47SJeff Kirsher /* Calculate the next Tx descriptor entry. */ 1483*f2148a47SJeff Kirsher entry = rp->cur_tx % TX_RING_SIZE; 1484*f2148a47SJeff Kirsher 1485*f2148a47SJeff Kirsher if (skb_padto(skb, ETH_ZLEN)) 1486*f2148a47SJeff Kirsher return NETDEV_TX_OK; 1487*f2148a47SJeff Kirsher 1488*f2148a47SJeff Kirsher rp->tx_skbuff[entry] = skb; 1489*f2148a47SJeff Kirsher 1490*f2148a47SJeff Kirsher if ((rp->quirks & rqRhineI) && 1491*f2148a47SJeff Kirsher (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) { 1492*f2148a47SJeff Kirsher /* Must use alignment buffer. */ 1493*f2148a47SJeff Kirsher if (skb->len > PKT_BUF_SZ) { 1494*f2148a47SJeff Kirsher /* packet too long, drop it */ 1495*f2148a47SJeff Kirsher dev_kfree_skb(skb); 1496*f2148a47SJeff Kirsher rp->tx_skbuff[entry] = NULL; 1497*f2148a47SJeff Kirsher dev->stats.tx_dropped++; 1498*f2148a47SJeff Kirsher return NETDEV_TX_OK; 1499*f2148a47SJeff Kirsher } 1500*f2148a47SJeff Kirsher 1501*f2148a47SJeff Kirsher /* Padding is not copied and so must be redone. */ 1502*f2148a47SJeff Kirsher skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); 1503*f2148a47SJeff Kirsher if (skb->len < ETH_ZLEN) 1504*f2148a47SJeff Kirsher memset(rp->tx_buf[entry] + skb->len, 0, 1505*f2148a47SJeff Kirsher ETH_ZLEN - skb->len); 1506*f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry] = 0; 1507*f2148a47SJeff Kirsher rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + 1508*f2148a47SJeff Kirsher (rp->tx_buf[entry] - 1509*f2148a47SJeff Kirsher rp->tx_bufs)); 1510*f2148a47SJeff Kirsher } else { 1511*f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry] = 1512*f2148a47SJeff Kirsher pci_map_single(rp->pdev, skb->data, skb->len, 1513*f2148a47SJeff Kirsher PCI_DMA_TODEVICE); 1514*f2148a47SJeff Kirsher rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); 1515*f2148a47SJeff Kirsher } 1516*f2148a47SJeff Kirsher 1517*f2148a47SJeff Kirsher rp->tx_ring[entry].desc_length = 1518*f2148a47SJeff Kirsher cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1519*f2148a47SJeff Kirsher 1520*f2148a47SJeff Kirsher if (unlikely(vlan_tx_tag_present(skb))) { 1521*f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); 1522*f2148a47SJeff Kirsher /* request tagging */ 1523*f2148a47SJeff Kirsher rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); 1524*f2148a47SJeff Kirsher } 1525*f2148a47SJeff Kirsher else 1526*f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status = 0; 1527*f2148a47SJeff Kirsher 1528*f2148a47SJeff Kirsher /* lock eth irq */ 1529*f2148a47SJeff Kirsher spin_lock_irqsave(&rp->lock, flags); 1530*f2148a47SJeff Kirsher wmb(); 1531*f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); 1532*f2148a47SJeff Kirsher wmb(); 1533*f2148a47SJeff Kirsher 1534*f2148a47SJeff Kirsher rp->cur_tx++; 1535*f2148a47SJeff Kirsher 1536*f2148a47SJeff Kirsher /* Non-x86 Todo: explicitly flush cache lines here. */ 1537*f2148a47SJeff Kirsher 1538*f2148a47SJeff Kirsher if (vlan_tx_tag_present(skb)) 1539*f2148a47SJeff Kirsher /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 1540*f2148a47SJeff Kirsher BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 1541*f2148a47SJeff Kirsher 1542*f2148a47SJeff Kirsher /* Wake the potentially-idle transmit channel */ 1543*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1544*f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1545*f2148a47SJeff Kirsher IOSYNC; 1546*f2148a47SJeff Kirsher 1547*f2148a47SJeff Kirsher if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) 1548*f2148a47SJeff Kirsher netif_stop_queue(dev); 1549*f2148a47SJeff Kirsher 1550*f2148a47SJeff Kirsher spin_unlock_irqrestore(&rp->lock, flags); 1551*f2148a47SJeff Kirsher 1552*f2148a47SJeff Kirsher if (debug > 4) { 1553*f2148a47SJeff Kirsher netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n", 1554*f2148a47SJeff Kirsher rp->cur_tx-1, entry); 1555*f2148a47SJeff Kirsher } 1556*f2148a47SJeff Kirsher return NETDEV_TX_OK; 1557*f2148a47SJeff Kirsher } 1558*f2148a47SJeff Kirsher 1559*f2148a47SJeff Kirsher /* The interrupt handler does all of the Rx thread work and cleans up 1560*f2148a47SJeff Kirsher after the Tx thread. */ 1561*f2148a47SJeff Kirsher static irqreturn_t rhine_interrupt(int irq, void *dev_instance) 1562*f2148a47SJeff Kirsher { 1563*f2148a47SJeff Kirsher struct net_device *dev = dev_instance; 1564*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1565*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1566*f2148a47SJeff Kirsher u32 intr_status; 1567*f2148a47SJeff Kirsher int boguscnt = max_interrupt_work; 1568*f2148a47SJeff Kirsher int handled = 0; 1569*f2148a47SJeff Kirsher 1570*f2148a47SJeff Kirsher while ((intr_status = get_intr_status(dev))) { 1571*f2148a47SJeff Kirsher handled = 1; 1572*f2148a47SJeff Kirsher 1573*f2148a47SJeff Kirsher /* Acknowledge all of the current interrupt sources ASAP. */ 1574*f2148a47SJeff Kirsher if (intr_status & IntrTxDescRace) 1575*f2148a47SJeff Kirsher iowrite8(0x08, ioaddr + IntrStatus2); 1576*f2148a47SJeff Kirsher iowrite16(intr_status & 0xffff, ioaddr + IntrStatus); 1577*f2148a47SJeff Kirsher IOSYNC; 1578*f2148a47SJeff Kirsher 1579*f2148a47SJeff Kirsher if (debug > 4) 1580*f2148a47SJeff Kirsher netdev_dbg(dev, "Interrupt, status %08x\n", 1581*f2148a47SJeff Kirsher intr_status); 1582*f2148a47SJeff Kirsher 1583*f2148a47SJeff Kirsher if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | 1584*f2148a47SJeff Kirsher IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { 1585*f2148a47SJeff Kirsher iowrite16(IntrTxAborted | 1586*f2148a47SJeff Kirsher IntrTxDone | IntrTxError | IntrTxUnderrun | 1587*f2148a47SJeff Kirsher IntrPCIErr | IntrStatsMax | IntrLinkChange, 1588*f2148a47SJeff Kirsher ioaddr + IntrEnable); 1589*f2148a47SJeff Kirsher 1590*f2148a47SJeff Kirsher napi_schedule(&rp->napi); 1591*f2148a47SJeff Kirsher } 1592*f2148a47SJeff Kirsher 1593*f2148a47SJeff Kirsher if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1594*f2148a47SJeff Kirsher if (intr_status & IntrTxErrSummary) { 1595*f2148a47SJeff Kirsher /* Avoid scavenging before Tx engine turned off */ 1596*f2148a47SJeff Kirsher RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn)); 1597*f2148a47SJeff Kirsher if (debug > 2 && 1598*f2148a47SJeff Kirsher ioread8(ioaddr+ChipCmd) & CmdTxOn) 1599*f2148a47SJeff Kirsher netdev_warn(dev, 1600*f2148a47SJeff Kirsher "%s: Tx engine still on\n", 1601*f2148a47SJeff Kirsher __func__); 1602*f2148a47SJeff Kirsher } 1603*f2148a47SJeff Kirsher rhine_tx(dev); 1604*f2148a47SJeff Kirsher } 1605*f2148a47SJeff Kirsher 1606*f2148a47SJeff Kirsher /* Abnormal error summary/uncommon events handlers. */ 1607*f2148a47SJeff Kirsher if (intr_status & (IntrPCIErr | IntrLinkChange | 1608*f2148a47SJeff Kirsher IntrStatsMax | IntrTxError | IntrTxAborted | 1609*f2148a47SJeff Kirsher IntrTxUnderrun | IntrTxDescRace)) 1610*f2148a47SJeff Kirsher rhine_error(dev, intr_status); 1611*f2148a47SJeff Kirsher 1612*f2148a47SJeff Kirsher if (--boguscnt < 0) { 1613*f2148a47SJeff Kirsher netdev_warn(dev, "Too much work at interrupt, status=%#08x\n", 1614*f2148a47SJeff Kirsher intr_status); 1615*f2148a47SJeff Kirsher break; 1616*f2148a47SJeff Kirsher } 1617*f2148a47SJeff Kirsher } 1618*f2148a47SJeff Kirsher 1619*f2148a47SJeff Kirsher if (debug > 3) 1620*f2148a47SJeff Kirsher netdev_dbg(dev, "exiting interrupt, status=%08x\n", 1621*f2148a47SJeff Kirsher ioread16(ioaddr + IntrStatus)); 1622*f2148a47SJeff Kirsher return IRQ_RETVAL(handled); 1623*f2148a47SJeff Kirsher } 1624*f2148a47SJeff Kirsher 1625*f2148a47SJeff Kirsher /* This routine is logically part of the interrupt handler, but isolated 1626*f2148a47SJeff Kirsher for clarity. */ 1627*f2148a47SJeff Kirsher static void rhine_tx(struct net_device *dev) 1628*f2148a47SJeff Kirsher { 1629*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1630*f2148a47SJeff Kirsher int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; 1631*f2148a47SJeff Kirsher 1632*f2148a47SJeff Kirsher spin_lock(&rp->lock); 1633*f2148a47SJeff Kirsher 1634*f2148a47SJeff Kirsher /* find and cleanup dirty tx descriptors */ 1635*f2148a47SJeff Kirsher while (rp->dirty_tx != rp->cur_tx) { 1636*f2148a47SJeff Kirsher txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); 1637*f2148a47SJeff Kirsher if (debug > 6) 1638*f2148a47SJeff Kirsher netdev_dbg(dev, "Tx scavenge %d status %08x\n", 1639*f2148a47SJeff Kirsher entry, txstatus); 1640*f2148a47SJeff Kirsher if (txstatus & DescOwn) 1641*f2148a47SJeff Kirsher break; 1642*f2148a47SJeff Kirsher if (txstatus & 0x8000) { 1643*f2148a47SJeff Kirsher if (debug > 1) 1644*f2148a47SJeff Kirsher netdev_dbg(dev, "Transmit error, Tx status %08x\n", 1645*f2148a47SJeff Kirsher txstatus); 1646*f2148a47SJeff Kirsher dev->stats.tx_errors++; 1647*f2148a47SJeff Kirsher if (txstatus & 0x0400) 1648*f2148a47SJeff Kirsher dev->stats.tx_carrier_errors++; 1649*f2148a47SJeff Kirsher if (txstatus & 0x0200) 1650*f2148a47SJeff Kirsher dev->stats.tx_window_errors++; 1651*f2148a47SJeff Kirsher if (txstatus & 0x0100) 1652*f2148a47SJeff Kirsher dev->stats.tx_aborted_errors++; 1653*f2148a47SJeff Kirsher if (txstatus & 0x0080) 1654*f2148a47SJeff Kirsher dev->stats.tx_heartbeat_errors++; 1655*f2148a47SJeff Kirsher if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || 1656*f2148a47SJeff Kirsher (txstatus & 0x0800) || (txstatus & 0x1000)) { 1657*f2148a47SJeff Kirsher dev->stats.tx_fifo_errors++; 1658*f2148a47SJeff Kirsher rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1659*f2148a47SJeff Kirsher break; /* Keep the skb - we try again */ 1660*f2148a47SJeff Kirsher } 1661*f2148a47SJeff Kirsher /* Transmitter restarted in 'abnormal' handler. */ 1662*f2148a47SJeff Kirsher } else { 1663*f2148a47SJeff Kirsher if (rp->quirks & rqRhineI) 1664*f2148a47SJeff Kirsher dev->stats.collisions += (txstatus >> 3) & 0x0F; 1665*f2148a47SJeff Kirsher else 1666*f2148a47SJeff Kirsher dev->stats.collisions += txstatus & 0x0F; 1667*f2148a47SJeff Kirsher if (debug > 6) 1668*f2148a47SJeff Kirsher netdev_dbg(dev, "collisions: %1.1x:%1.1x\n", 1669*f2148a47SJeff Kirsher (txstatus >> 3) & 0xF, 1670*f2148a47SJeff Kirsher txstatus & 0xF); 1671*f2148a47SJeff Kirsher dev->stats.tx_bytes += rp->tx_skbuff[entry]->len; 1672*f2148a47SJeff Kirsher dev->stats.tx_packets++; 1673*f2148a47SJeff Kirsher } 1674*f2148a47SJeff Kirsher /* Free the original skb. */ 1675*f2148a47SJeff Kirsher if (rp->tx_skbuff_dma[entry]) { 1676*f2148a47SJeff Kirsher pci_unmap_single(rp->pdev, 1677*f2148a47SJeff Kirsher rp->tx_skbuff_dma[entry], 1678*f2148a47SJeff Kirsher rp->tx_skbuff[entry]->len, 1679*f2148a47SJeff Kirsher PCI_DMA_TODEVICE); 1680*f2148a47SJeff Kirsher } 1681*f2148a47SJeff Kirsher dev_kfree_skb_irq(rp->tx_skbuff[entry]); 1682*f2148a47SJeff Kirsher rp->tx_skbuff[entry] = NULL; 1683*f2148a47SJeff Kirsher entry = (++rp->dirty_tx) % TX_RING_SIZE; 1684*f2148a47SJeff Kirsher } 1685*f2148a47SJeff Kirsher if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) 1686*f2148a47SJeff Kirsher netif_wake_queue(dev); 1687*f2148a47SJeff Kirsher 1688*f2148a47SJeff Kirsher spin_unlock(&rp->lock); 1689*f2148a47SJeff Kirsher } 1690*f2148a47SJeff Kirsher 1691*f2148a47SJeff Kirsher /** 1692*f2148a47SJeff Kirsher * rhine_get_vlan_tci - extract TCI from Rx data buffer 1693*f2148a47SJeff Kirsher * @skb: pointer to sk_buff 1694*f2148a47SJeff Kirsher * @data_size: used data area of the buffer including CRC 1695*f2148a47SJeff Kirsher * 1696*f2148a47SJeff Kirsher * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q 1697*f2148a47SJeff Kirsher * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte 1698*f2148a47SJeff Kirsher * aligned following the CRC. 1699*f2148a47SJeff Kirsher */ 1700*f2148a47SJeff Kirsher static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) 1701*f2148a47SJeff Kirsher { 1702*f2148a47SJeff Kirsher u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; 1703*f2148a47SJeff Kirsher return be16_to_cpup((__be16 *)trailer); 1704*f2148a47SJeff Kirsher } 1705*f2148a47SJeff Kirsher 1706*f2148a47SJeff Kirsher /* Process up to limit frames from receive ring */ 1707*f2148a47SJeff Kirsher static int rhine_rx(struct net_device *dev, int limit) 1708*f2148a47SJeff Kirsher { 1709*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1710*f2148a47SJeff Kirsher int count; 1711*f2148a47SJeff Kirsher int entry = rp->cur_rx % RX_RING_SIZE; 1712*f2148a47SJeff Kirsher 1713*f2148a47SJeff Kirsher if (debug > 4) { 1714*f2148a47SJeff Kirsher netdev_dbg(dev, "%s(), entry %d status %08x\n", 1715*f2148a47SJeff Kirsher __func__, entry, 1716*f2148a47SJeff Kirsher le32_to_cpu(rp->rx_head_desc->rx_status)); 1717*f2148a47SJeff Kirsher } 1718*f2148a47SJeff Kirsher 1719*f2148a47SJeff Kirsher /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1720*f2148a47SJeff Kirsher for (count = 0; count < limit; ++count) { 1721*f2148a47SJeff Kirsher struct rx_desc *desc = rp->rx_head_desc; 1722*f2148a47SJeff Kirsher u32 desc_status = le32_to_cpu(desc->rx_status); 1723*f2148a47SJeff Kirsher u32 desc_length = le32_to_cpu(desc->desc_length); 1724*f2148a47SJeff Kirsher int data_size = desc_status >> 16; 1725*f2148a47SJeff Kirsher 1726*f2148a47SJeff Kirsher if (desc_status & DescOwn) 1727*f2148a47SJeff Kirsher break; 1728*f2148a47SJeff Kirsher 1729*f2148a47SJeff Kirsher if (debug > 4) 1730*f2148a47SJeff Kirsher netdev_dbg(dev, "%s() status is %08x\n", 1731*f2148a47SJeff Kirsher __func__, desc_status); 1732*f2148a47SJeff Kirsher 1733*f2148a47SJeff Kirsher if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 1734*f2148a47SJeff Kirsher if ((desc_status & RxWholePkt) != RxWholePkt) { 1735*f2148a47SJeff Kirsher netdev_warn(dev, 1736*f2148a47SJeff Kirsher "Oversized Ethernet frame spanned multiple buffers, " 1737*f2148a47SJeff Kirsher "entry %#x length %d status %08x!\n", 1738*f2148a47SJeff Kirsher entry, data_size, 1739*f2148a47SJeff Kirsher desc_status); 1740*f2148a47SJeff Kirsher netdev_warn(dev, 1741*f2148a47SJeff Kirsher "Oversized Ethernet frame %p vs %p\n", 1742*f2148a47SJeff Kirsher rp->rx_head_desc, 1743*f2148a47SJeff Kirsher &rp->rx_ring[entry]); 1744*f2148a47SJeff Kirsher dev->stats.rx_length_errors++; 1745*f2148a47SJeff Kirsher } else if (desc_status & RxErr) { 1746*f2148a47SJeff Kirsher /* There was a error. */ 1747*f2148a47SJeff Kirsher if (debug > 2) 1748*f2148a47SJeff Kirsher netdev_dbg(dev, "%s() Rx error was %08x\n", 1749*f2148a47SJeff Kirsher __func__, desc_status); 1750*f2148a47SJeff Kirsher dev->stats.rx_errors++; 1751*f2148a47SJeff Kirsher if (desc_status & 0x0030) 1752*f2148a47SJeff Kirsher dev->stats.rx_length_errors++; 1753*f2148a47SJeff Kirsher if (desc_status & 0x0048) 1754*f2148a47SJeff Kirsher dev->stats.rx_fifo_errors++; 1755*f2148a47SJeff Kirsher if (desc_status & 0x0004) 1756*f2148a47SJeff Kirsher dev->stats.rx_frame_errors++; 1757*f2148a47SJeff Kirsher if (desc_status & 0x0002) { 1758*f2148a47SJeff Kirsher /* this can also be updated outside the interrupt handler */ 1759*f2148a47SJeff Kirsher spin_lock(&rp->lock); 1760*f2148a47SJeff Kirsher dev->stats.rx_crc_errors++; 1761*f2148a47SJeff Kirsher spin_unlock(&rp->lock); 1762*f2148a47SJeff Kirsher } 1763*f2148a47SJeff Kirsher } 1764*f2148a47SJeff Kirsher } else { 1765*f2148a47SJeff Kirsher struct sk_buff *skb = NULL; 1766*f2148a47SJeff Kirsher /* Length should omit the CRC */ 1767*f2148a47SJeff Kirsher int pkt_len = data_size - 4; 1768*f2148a47SJeff Kirsher u16 vlan_tci = 0; 1769*f2148a47SJeff Kirsher 1770*f2148a47SJeff Kirsher /* Check if the packet is long enough to accept without 1771*f2148a47SJeff Kirsher copying to a minimally-sized skbuff. */ 1772*f2148a47SJeff Kirsher if (pkt_len < rx_copybreak) 1773*f2148a47SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, pkt_len); 1774*f2148a47SJeff Kirsher if (skb) { 1775*f2148a47SJeff Kirsher pci_dma_sync_single_for_cpu(rp->pdev, 1776*f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 1777*f2148a47SJeff Kirsher rp->rx_buf_sz, 1778*f2148a47SJeff Kirsher PCI_DMA_FROMDEVICE); 1779*f2148a47SJeff Kirsher 1780*f2148a47SJeff Kirsher skb_copy_to_linear_data(skb, 1781*f2148a47SJeff Kirsher rp->rx_skbuff[entry]->data, 1782*f2148a47SJeff Kirsher pkt_len); 1783*f2148a47SJeff Kirsher skb_put(skb, pkt_len); 1784*f2148a47SJeff Kirsher pci_dma_sync_single_for_device(rp->pdev, 1785*f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 1786*f2148a47SJeff Kirsher rp->rx_buf_sz, 1787*f2148a47SJeff Kirsher PCI_DMA_FROMDEVICE); 1788*f2148a47SJeff Kirsher } else { 1789*f2148a47SJeff Kirsher skb = rp->rx_skbuff[entry]; 1790*f2148a47SJeff Kirsher if (skb == NULL) { 1791*f2148a47SJeff Kirsher netdev_err(dev, "Inconsistent Rx descriptor chain\n"); 1792*f2148a47SJeff Kirsher break; 1793*f2148a47SJeff Kirsher } 1794*f2148a47SJeff Kirsher rp->rx_skbuff[entry] = NULL; 1795*f2148a47SJeff Kirsher skb_put(skb, pkt_len); 1796*f2148a47SJeff Kirsher pci_unmap_single(rp->pdev, 1797*f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry], 1798*f2148a47SJeff Kirsher rp->rx_buf_sz, 1799*f2148a47SJeff Kirsher PCI_DMA_FROMDEVICE); 1800*f2148a47SJeff Kirsher } 1801*f2148a47SJeff Kirsher 1802*f2148a47SJeff Kirsher if (unlikely(desc_length & DescTag)) 1803*f2148a47SJeff Kirsher vlan_tci = rhine_get_vlan_tci(skb, data_size); 1804*f2148a47SJeff Kirsher 1805*f2148a47SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 1806*f2148a47SJeff Kirsher 1807*f2148a47SJeff Kirsher if (unlikely(desc_length & DescTag)) 1808*f2148a47SJeff Kirsher __vlan_hwaccel_put_tag(skb, vlan_tci); 1809*f2148a47SJeff Kirsher netif_receive_skb(skb); 1810*f2148a47SJeff Kirsher dev->stats.rx_bytes += pkt_len; 1811*f2148a47SJeff Kirsher dev->stats.rx_packets++; 1812*f2148a47SJeff Kirsher } 1813*f2148a47SJeff Kirsher entry = (++rp->cur_rx) % RX_RING_SIZE; 1814*f2148a47SJeff Kirsher rp->rx_head_desc = &rp->rx_ring[entry]; 1815*f2148a47SJeff Kirsher } 1816*f2148a47SJeff Kirsher 1817*f2148a47SJeff Kirsher /* Refill the Rx ring buffers. */ 1818*f2148a47SJeff Kirsher for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) { 1819*f2148a47SJeff Kirsher struct sk_buff *skb; 1820*f2148a47SJeff Kirsher entry = rp->dirty_rx % RX_RING_SIZE; 1821*f2148a47SJeff Kirsher if (rp->rx_skbuff[entry] == NULL) { 1822*f2148a47SJeff Kirsher skb = netdev_alloc_skb(dev, rp->rx_buf_sz); 1823*f2148a47SJeff Kirsher rp->rx_skbuff[entry] = skb; 1824*f2148a47SJeff Kirsher if (skb == NULL) 1825*f2148a47SJeff Kirsher break; /* Better luck next round. */ 1826*f2148a47SJeff Kirsher skb->dev = dev; /* Mark as being used by this device. */ 1827*f2148a47SJeff Kirsher rp->rx_skbuff_dma[entry] = 1828*f2148a47SJeff Kirsher pci_map_single(rp->pdev, skb->data, 1829*f2148a47SJeff Kirsher rp->rx_buf_sz, 1830*f2148a47SJeff Kirsher PCI_DMA_FROMDEVICE); 1831*f2148a47SJeff Kirsher rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); 1832*f2148a47SJeff Kirsher } 1833*f2148a47SJeff Kirsher rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); 1834*f2148a47SJeff Kirsher } 1835*f2148a47SJeff Kirsher 1836*f2148a47SJeff Kirsher return count; 1837*f2148a47SJeff Kirsher } 1838*f2148a47SJeff Kirsher 1839*f2148a47SJeff Kirsher /* 1840*f2148a47SJeff Kirsher * Clears the "tally counters" for CRC errors and missed frames(?). 1841*f2148a47SJeff Kirsher * It has been reported that some chips need a write of 0 to clear 1842*f2148a47SJeff Kirsher * these, for others the counters are set to 1 when written to and 1843*f2148a47SJeff Kirsher * instead cleared when read. So we clear them both ways ... 1844*f2148a47SJeff Kirsher */ 1845*f2148a47SJeff Kirsher static inline void clear_tally_counters(void __iomem *ioaddr) 1846*f2148a47SJeff Kirsher { 1847*f2148a47SJeff Kirsher iowrite32(0, ioaddr + RxMissed); 1848*f2148a47SJeff Kirsher ioread16(ioaddr + RxCRCErrs); 1849*f2148a47SJeff Kirsher ioread16(ioaddr + RxMissed); 1850*f2148a47SJeff Kirsher } 1851*f2148a47SJeff Kirsher 1852*f2148a47SJeff Kirsher static void rhine_restart_tx(struct net_device *dev) { 1853*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1854*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1855*f2148a47SJeff Kirsher int entry = rp->dirty_tx % TX_RING_SIZE; 1856*f2148a47SJeff Kirsher u32 intr_status; 1857*f2148a47SJeff Kirsher 1858*f2148a47SJeff Kirsher /* 1859*f2148a47SJeff Kirsher * If new errors occurred, we need to sort them out before doing Tx. 1860*f2148a47SJeff Kirsher * In that case the ISR will be back here RSN anyway. 1861*f2148a47SJeff Kirsher */ 1862*f2148a47SJeff Kirsher intr_status = get_intr_status(dev); 1863*f2148a47SJeff Kirsher 1864*f2148a47SJeff Kirsher if ((intr_status & IntrTxErrSummary) == 0) { 1865*f2148a47SJeff Kirsher 1866*f2148a47SJeff Kirsher /* We know better than the chip where it should continue. */ 1867*f2148a47SJeff Kirsher iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), 1868*f2148a47SJeff Kirsher ioaddr + TxRingPtr); 1869*f2148a47SJeff Kirsher 1870*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, 1871*f2148a47SJeff Kirsher ioaddr + ChipCmd); 1872*f2148a47SJeff Kirsher 1873*f2148a47SJeff Kirsher if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) 1874*f2148a47SJeff Kirsher /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 1875*f2148a47SJeff Kirsher BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 1876*f2148a47SJeff Kirsher 1877*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1878*f2148a47SJeff Kirsher ioaddr + ChipCmd1); 1879*f2148a47SJeff Kirsher IOSYNC; 1880*f2148a47SJeff Kirsher } 1881*f2148a47SJeff Kirsher else { 1882*f2148a47SJeff Kirsher /* This should never happen */ 1883*f2148a47SJeff Kirsher if (debug > 1) 1884*f2148a47SJeff Kirsher netdev_warn(dev, "%s() Another error occurred %08x\n", 1885*f2148a47SJeff Kirsher __func__, intr_status); 1886*f2148a47SJeff Kirsher } 1887*f2148a47SJeff Kirsher 1888*f2148a47SJeff Kirsher } 1889*f2148a47SJeff Kirsher 1890*f2148a47SJeff Kirsher static void rhine_error(struct net_device *dev, int intr_status) 1891*f2148a47SJeff Kirsher { 1892*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1893*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1894*f2148a47SJeff Kirsher 1895*f2148a47SJeff Kirsher spin_lock(&rp->lock); 1896*f2148a47SJeff Kirsher 1897*f2148a47SJeff Kirsher if (intr_status & IntrLinkChange) 1898*f2148a47SJeff Kirsher rhine_check_media(dev, 0); 1899*f2148a47SJeff Kirsher if (intr_status & IntrStatsMax) { 1900*f2148a47SJeff Kirsher dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1901*f2148a47SJeff Kirsher dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 1902*f2148a47SJeff Kirsher clear_tally_counters(ioaddr); 1903*f2148a47SJeff Kirsher } 1904*f2148a47SJeff Kirsher if (intr_status & IntrTxAborted) { 1905*f2148a47SJeff Kirsher if (debug > 1) 1906*f2148a47SJeff Kirsher netdev_info(dev, "Abort %08x, frame dropped\n", 1907*f2148a47SJeff Kirsher intr_status); 1908*f2148a47SJeff Kirsher } 1909*f2148a47SJeff Kirsher if (intr_status & IntrTxUnderrun) { 1910*f2148a47SJeff Kirsher if (rp->tx_thresh < 0xE0) 1911*f2148a47SJeff Kirsher BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig); 1912*f2148a47SJeff Kirsher if (debug > 1) 1913*f2148a47SJeff Kirsher netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n", 1914*f2148a47SJeff Kirsher rp->tx_thresh); 1915*f2148a47SJeff Kirsher } 1916*f2148a47SJeff Kirsher if (intr_status & IntrTxDescRace) { 1917*f2148a47SJeff Kirsher if (debug > 2) 1918*f2148a47SJeff Kirsher netdev_info(dev, "Tx descriptor write-back race\n"); 1919*f2148a47SJeff Kirsher } 1920*f2148a47SJeff Kirsher if ((intr_status & IntrTxError) && 1921*f2148a47SJeff Kirsher (intr_status & (IntrTxAborted | 1922*f2148a47SJeff Kirsher IntrTxUnderrun | IntrTxDescRace)) == 0) { 1923*f2148a47SJeff Kirsher if (rp->tx_thresh < 0xE0) { 1924*f2148a47SJeff Kirsher BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig); 1925*f2148a47SJeff Kirsher } 1926*f2148a47SJeff Kirsher if (debug > 1) 1927*f2148a47SJeff Kirsher netdev_info(dev, "Unspecified error. Tx threshold now %02x\n", 1928*f2148a47SJeff Kirsher rp->tx_thresh); 1929*f2148a47SJeff Kirsher } 1930*f2148a47SJeff Kirsher if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace | 1931*f2148a47SJeff Kirsher IntrTxError)) 1932*f2148a47SJeff Kirsher rhine_restart_tx(dev); 1933*f2148a47SJeff Kirsher 1934*f2148a47SJeff Kirsher if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun | 1935*f2148a47SJeff Kirsher IntrTxError | IntrTxAborted | IntrNormalSummary | 1936*f2148a47SJeff Kirsher IntrTxDescRace)) { 1937*f2148a47SJeff Kirsher if (debug > 1) 1938*f2148a47SJeff Kirsher netdev_err(dev, "Something Wicked happened! %08x\n", 1939*f2148a47SJeff Kirsher intr_status); 1940*f2148a47SJeff Kirsher } 1941*f2148a47SJeff Kirsher 1942*f2148a47SJeff Kirsher spin_unlock(&rp->lock); 1943*f2148a47SJeff Kirsher } 1944*f2148a47SJeff Kirsher 1945*f2148a47SJeff Kirsher static struct net_device_stats *rhine_get_stats(struct net_device *dev) 1946*f2148a47SJeff Kirsher { 1947*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1948*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1949*f2148a47SJeff Kirsher unsigned long flags; 1950*f2148a47SJeff Kirsher 1951*f2148a47SJeff Kirsher spin_lock_irqsave(&rp->lock, flags); 1952*f2148a47SJeff Kirsher dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1953*f2148a47SJeff Kirsher dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 1954*f2148a47SJeff Kirsher clear_tally_counters(ioaddr); 1955*f2148a47SJeff Kirsher spin_unlock_irqrestore(&rp->lock, flags); 1956*f2148a47SJeff Kirsher 1957*f2148a47SJeff Kirsher return &dev->stats; 1958*f2148a47SJeff Kirsher } 1959*f2148a47SJeff Kirsher 1960*f2148a47SJeff Kirsher static void rhine_set_rx_mode(struct net_device *dev) 1961*f2148a47SJeff Kirsher { 1962*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 1963*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 1964*f2148a47SJeff Kirsher u32 mc_filter[2]; /* Multicast hash filter */ 1965*f2148a47SJeff Kirsher u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */ 1966*f2148a47SJeff Kirsher struct netdev_hw_addr *ha; 1967*f2148a47SJeff Kirsher 1968*f2148a47SJeff Kirsher if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1969*f2148a47SJeff Kirsher rx_mode = 0x1C; 1970*f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter0); 1971*f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter1); 1972*f2148a47SJeff Kirsher } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 1973*f2148a47SJeff Kirsher (dev->flags & IFF_ALLMULTI)) { 1974*f2148a47SJeff Kirsher /* Too many to match, or accept all multicasts. */ 1975*f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter0); 1976*f2148a47SJeff Kirsher iowrite32(0xffffffff, ioaddr + MulticastFilter1); 1977*f2148a47SJeff Kirsher } else if (rp->pdev->revision >= VT6105M) { 1978*f2148a47SJeff Kirsher int i = 0; 1979*f2148a47SJeff Kirsher u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ 1980*f2148a47SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 1981*f2148a47SJeff Kirsher if (i == MCAM_SIZE) 1982*f2148a47SJeff Kirsher break; 1983*f2148a47SJeff Kirsher rhine_set_cam(ioaddr, i, ha->addr); 1984*f2148a47SJeff Kirsher mCAMmask |= 1 << i; 1985*f2148a47SJeff Kirsher i++; 1986*f2148a47SJeff Kirsher } 1987*f2148a47SJeff Kirsher rhine_set_cam_mask(ioaddr, mCAMmask); 1988*f2148a47SJeff Kirsher } else { 1989*f2148a47SJeff Kirsher memset(mc_filter, 0, sizeof(mc_filter)); 1990*f2148a47SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 1991*f2148a47SJeff Kirsher int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; 1992*f2148a47SJeff Kirsher 1993*f2148a47SJeff Kirsher mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1994*f2148a47SJeff Kirsher } 1995*f2148a47SJeff Kirsher iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 1996*f2148a47SJeff Kirsher iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 1997*f2148a47SJeff Kirsher } 1998*f2148a47SJeff Kirsher /* enable/disable VLAN receive filtering */ 1999*f2148a47SJeff Kirsher if (rp->pdev->revision >= VT6105M) { 2000*f2148a47SJeff Kirsher if (dev->flags & IFF_PROMISC) 2001*f2148a47SJeff Kirsher BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2002*f2148a47SJeff Kirsher else 2003*f2148a47SJeff Kirsher BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1); 2004*f2148a47SJeff Kirsher } 2005*f2148a47SJeff Kirsher BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig); 2006*f2148a47SJeff Kirsher } 2007*f2148a47SJeff Kirsher 2008*f2148a47SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2009*f2148a47SJeff Kirsher { 2010*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2011*f2148a47SJeff Kirsher 2012*f2148a47SJeff Kirsher strcpy(info->driver, DRV_NAME); 2013*f2148a47SJeff Kirsher strcpy(info->version, DRV_VERSION); 2014*f2148a47SJeff Kirsher strcpy(info->bus_info, pci_name(rp->pdev)); 2015*f2148a47SJeff Kirsher } 2016*f2148a47SJeff Kirsher 2017*f2148a47SJeff Kirsher static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2018*f2148a47SJeff Kirsher { 2019*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2020*f2148a47SJeff Kirsher int rc; 2021*f2148a47SJeff Kirsher 2022*f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2023*f2148a47SJeff Kirsher rc = mii_ethtool_gset(&rp->mii_if, cmd); 2024*f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2025*f2148a47SJeff Kirsher 2026*f2148a47SJeff Kirsher return rc; 2027*f2148a47SJeff Kirsher } 2028*f2148a47SJeff Kirsher 2029*f2148a47SJeff Kirsher static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2030*f2148a47SJeff Kirsher { 2031*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2032*f2148a47SJeff Kirsher int rc; 2033*f2148a47SJeff Kirsher 2034*f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2035*f2148a47SJeff Kirsher rc = mii_ethtool_sset(&rp->mii_if, cmd); 2036*f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2037*f2148a47SJeff Kirsher rhine_set_carrier(&rp->mii_if); 2038*f2148a47SJeff Kirsher 2039*f2148a47SJeff Kirsher return rc; 2040*f2148a47SJeff Kirsher } 2041*f2148a47SJeff Kirsher 2042*f2148a47SJeff Kirsher static int netdev_nway_reset(struct net_device *dev) 2043*f2148a47SJeff Kirsher { 2044*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2045*f2148a47SJeff Kirsher 2046*f2148a47SJeff Kirsher return mii_nway_restart(&rp->mii_if); 2047*f2148a47SJeff Kirsher } 2048*f2148a47SJeff Kirsher 2049*f2148a47SJeff Kirsher static u32 netdev_get_link(struct net_device *dev) 2050*f2148a47SJeff Kirsher { 2051*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2052*f2148a47SJeff Kirsher 2053*f2148a47SJeff Kirsher return mii_link_ok(&rp->mii_if); 2054*f2148a47SJeff Kirsher } 2055*f2148a47SJeff Kirsher 2056*f2148a47SJeff Kirsher static u32 netdev_get_msglevel(struct net_device *dev) 2057*f2148a47SJeff Kirsher { 2058*f2148a47SJeff Kirsher return debug; 2059*f2148a47SJeff Kirsher } 2060*f2148a47SJeff Kirsher 2061*f2148a47SJeff Kirsher static void netdev_set_msglevel(struct net_device *dev, u32 value) 2062*f2148a47SJeff Kirsher { 2063*f2148a47SJeff Kirsher debug = value; 2064*f2148a47SJeff Kirsher } 2065*f2148a47SJeff Kirsher 2066*f2148a47SJeff Kirsher static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2067*f2148a47SJeff Kirsher { 2068*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2069*f2148a47SJeff Kirsher 2070*f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2071*f2148a47SJeff Kirsher return; 2072*f2148a47SJeff Kirsher 2073*f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2074*f2148a47SJeff Kirsher wol->supported = WAKE_PHY | WAKE_MAGIC | 2075*f2148a47SJeff Kirsher WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ 2076*f2148a47SJeff Kirsher wol->wolopts = rp->wolopts; 2077*f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2078*f2148a47SJeff Kirsher } 2079*f2148a47SJeff Kirsher 2080*f2148a47SJeff Kirsher static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2081*f2148a47SJeff Kirsher { 2082*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2083*f2148a47SJeff Kirsher u32 support = WAKE_PHY | WAKE_MAGIC | 2084*f2148a47SJeff Kirsher WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ 2085*f2148a47SJeff Kirsher 2086*f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2087*f2148a47SJeff Kirsher return -EINVAL; 2088*f2148a47SJeff Kirsher 2089*f2148a47SJeff Kirsher if (wol->wolopts & ~support) 2090*f2148a47SJeff Kirsher return -EINVAL; 2091*f2148a47SJeff Kirsher 2092*f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2093*f2148a47SJeff Kirsher rp->wolopts = wol->wolopts; 2094*f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2095*f2148a47SJeff Kirsher 2096*f2148a47SJeff Kirsher return 0; 2097*f2148a47SJeff Kirsher } 2098*f2148a47SJeff Kirsher 2099*f2148a47SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = { 2100*f2148a47SJeff Kirsher .get_drvinfo = netdev_get_drvinfo, 2101*f2148a47SJeff Kirsher .get_settings = netdev_get_settings, 2102*f2148a47SJeff Kirsher .set_settings = netdev_set_settings, 2103*f2148a47SJeff Kirsher .nway_reset = netdev_nway_reset, 2104*f2148a47SJeff Kirsher .get_link = netdev_get_link, 2105*f2148a47SJeff Kirsher .get_msglevel = netdev_get_msglevel, 2106*f2148a47SJeff Kirsher .set_msglevel = netdev_set_msglevel, 2107*f2148a47SJeff Kirsher .get_wol = rhine_get_wol, 2108*f2148a47SJeff Kirsher .set_wol = rhine_set_wol, 2109*f2148a47SJeff Kirsher }; 2110*f2148a47SJeff Kirsher 2111*f2148a47SJeff Kirsher static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2112*f2148a47SJeff Kirsher { 2113*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2114*f2148a47SJeff Kirsher int rc; 2115*f2148a47SJeff Kirsher 2116*f2148a47SJeff Kirsher if (!netif_running(dev)) 2117*f2148a47SJeff Kirsher return -EINVAL; 2118*f2148a47SJeff Kirsher 2119*f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2120*f2148a47SJeff Kirsher rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); 2121*f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2122*f2148a47SJeff Kirsher rhine_set_carrier(&rp->mii_if); 2123*f2148a47SJeff Kirsher 2124*f2148a47SJeff Kirsher return rc; 2125*f2148a47SJeff Kirsher } 2126*f2148a47SJeff Kirsher 2127*f2148a47SJeff Kirsher static int rhine_close(struct net_device *dev) 2128*f2148a47SJeff Kirsher { 2129*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2130*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2131*f2148a47SJeff Kirsher 2132*f2148a47SJeff Kirsher napi_disable(&rp->napi); 2133*f2148a47SJeff Kirsher cancel_work_sync(&rp->reset_task); 2134*f2148a47SJeff Kirsher netif_stop_queue(dev); 2135*f2148a47SJeff Kirsher 2136*f2148a47SJeff Kirsher spin_lock_irq(&rp->lock); 2137*f2148a47SJeff Kirsher 2138*f2148a47SJeff Kirsher if (debug > 1) 2139*f2148a47SJeff Kirsher netdev_dbg(dev, "Shutting down ethercard, status was %04x\n", 2140*f2148a47SJeff Kirsher ioread16(ioaddr + ChipCmd)); 2141*f2148a47SJeff Kirsher 2142*f2148a47SJeff Kirsher /* Switch to loopback mode to avoid hardware races. */ 2143*f2148a47SJeff Kirsher iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); 2144*f2148a47SJeff Kirsher 2145*f2148a47SJeff Kirsher /* Disable interrupts by clearing the interrupt mask. */ 2146*f2148a47SJeff Kirsher iowrite16(0x0000, ioaddr + IntrEnable); 2147*f2148a47SJeff Kirsher 2148*f2148a47SJeff Kirsher /* Stop the chip's Tx and Rx processes. */ 2149*f2148a47SJeff Kirsher iowrite16(CmdStop, ioaddr + ChipCmd); 2150*f2148a47SJeff Kirsher 2151*f2148a47SJeff Kirsher spin_unlock_irq(&rp->lock); 2152*f2148a47SJeff Kirsher 2153*f2148a47SJeff Kirsher free_irq(rp->pdev->irq, dev); 2154*f2148a47SJeff Kirsher free_rbufs(dev); 2155*f2148a47SJeff Kirsher free_tbufs(dev); 2156*f2148a47SJeff Kirsher free_ring(dev); 2157*f2148a47SJeff Kirsher 2158*f2148a47SJeff Kirsher return 0; 2159*f2148a47SJeff Kirsher } 2160*f2148a47SJeff Kirsher 2161*f2148a47SJeff Kirsher 2162*f2148a47SJeff Kirsher static void __devexit rhine_remove_one(struct pci_dev *pdev) 2163*f2148a47SJeff Kirsher { 2164*f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2165*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2166*f2148a47SJeff Kirsher 2167*f2148a47SJeff Kirsher unregister_netdev(dev); 2168*f2148a47SJeff Kirsher 2169*f2148a47SJeff Kirsher pci_iounmap(pdev, rp->base); 2170*f2148a47SJeff Kirsher pci_release_regions(pdev); 2171*f2148a47SJeff Kirsher 2172*f2148a47SJeff Kirsher free_netdev(dev); 2173*f2148a47SJeff Kirsher pci_disable_device(pdev); 2174*f2148a47SJeff Kirsher pci_set_drvdata(pdev, NULL); 2175*f2148a47SJeff Kirsher } 2176*f2148a47SJeff Kirsher 2177*f2148a47SJeff Kirsher static void rhine_shutdown (struct pci_dev *pdev) 2178*f2148a47SJeff Kirsher { 2179*f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2180*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2181*f2148a47SJeff Kirsher void __iomem *ioaddr = rp->base; 2182*f2148a47SJeff Kirsher 2183*f2148a47SJeff Kirsher if (!(rp->quirks & rqWOL)) 2184*f2148a47SJeff Kirsher return; /* Nothing to do for non-WOL adapters */ 2185*f2148a47SJeff Kirsher 2186*f2148a47SJeff Kirsher rhine_power_init(dev); 2187*f2148a47SJeff Kirsher 2188*f2148a47SJeff Kirsher /* Make sure we use pattern 0, 1 and not 4, 5 */ 2189*f2148a47SJeff Kirsher if (rp->quirks & rq6patterns) 2190*f2148a47SJeff Kirsher iowrite8(0x04, ioaddr + WOLcgClr); 2191*f2148a47SJeff Kirsher 2192*f2148a47SJeff Kirsher if (rp->wolopts & WAKE_MAGIC) { 2193*f2148a47SJeff Kirsher iowrite8(WOLmagic, ioaddr + WOLcrSet); 2194*f2148a47SJeff Kirsher /* 2195*f2148a47SJeff Kirsher * Turn EEPROM-controlled wake-up back on -- some hardware may 2196*f2148a47SJeff Kirsher * not cooperate otherwise. 2197*f2148a47SJeff Kirsher */ 2198*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA); 2199*f2148a47SJeff Kirsher } 2200*f2148a47SJeff Kirsher 2201*f2148a47SJeff Kirsher if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) 2202*f2148a47SJeff Kirsher iowrite8(WOLbmcast, ioaddr + WOLcgSet); 2203*f2148a47SJeff Kirsher 2204*f2148a47SJeff Kirsher if (rp->wolopts & WAKE_PHY) 2205*f2148a47SJeff Kirsher iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); 2206*f2148a47SJeff Kirsher 2207*f2148a47SJeff Kirsher if (rp->wolopts & WAKE_UCAST) 2208*f2148a47SJeff Kirsher iowrite8(WOLucast, ioaddr + WOLcrSet); 2209*f2148a47SJeff Kirsher 2210*f2148a47SJeff Kirsher if (rp->wolopts) { 2211*f2148a47SJeff Kirsher /* Enable legacy WOL (for old motherboards) */ 2212*f2148a47SJeff Kirsher iowrite8(0x01, ioaddr + PwcfgSet); 2213*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); 2214*f2148a47SJeff Kirsher } 2215*f2148a47SJeff Kirsher 2216*f2148a47SJeff Kirsher /* Hit power state D3 (sleep) */ 2217*f2148a47SJeff Kirsher if (!avoid_D3) 2218*f2148a47SJeff Kirsher iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); 2219*f2148a47SJeff Kirsher 2220*f2148a47SJeff Kirsher /* TODO: Check use of pci_enable_wake() */ 2221*f2148a47SJeff Kirsher 2222*f2148a47SJeff Kirsher } 2223*f2148a47SJeff Kirsher 2224*f2148a47SJeff Kirsher #ifdef CONFIG_PM 2225*f2148a47SJeff Kirsher static int rhine_suspend(struct pci_dev *pdev, pm_message_t state) 2226*f2148a47SJeff Kirsher { 2227*f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2228*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2229*f2148a47SJeff Kirsher unsigned long flags; 2230*f2148a47SJeff Kirsher 2231*f2148a47SJeff Kirsher if (!netif_running(dev)) 2232*f2148a47SJeff Kirsher return 0; 2233*f2148a47SJeff Kirsher 2234*f2148a47SJeff Kirsher napi_disable(&rp->napi); 2235*f2148a47SJeff Kirsher 2236*f2148a47SJeff Kirsher netif_device_detach(dev); 2237*f2148a47SJeff Kirsher pci_save_state(pdev); 2238*f2148a47SJeff Kirsher 2239*f2148a47SJeff Kirsher spin_lock_irqsave(&rp->lock, flags); 2240*f2148a47SJeff Kirsher rhine_shutdown(pdev); 2241*f2148a47SJeff Kirsher spin_unlock_irqrestore(&rp->lock, flags); 2242*f2148a47SJeff Kirsher 2243*f2148a47SJeff Kirsher free_irq(dev->irq, dev); 2244*f2148a47SJeff Kirsher return 0; 2245*f2148a47SJeff Kirsher } 2246*f2148a47SJeff Kirsher 2247*f2148a47SJeff Kirsher static int rhine_resume(struct pci_dev *pdev) 2248*f2148a47SJeff Kirsher { 2249*f2148a47SJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev); 2250*f2148a47SJeff Kirsher struct rhine_private *rp = netdev_priv(dev); 2251*f2148a47SJeff Kirsher unsigned long flags; 2252*f2148a47SJeff Kirsher int ret; 2253*f2148a47SJeff Kirsher 2254*f2148a47SJeff Kirsher if (!netif_running(dev)) 2255*f2148a47SJeff Kirsher return 0; 2256*f2148a47SJeff Kirsher 2257*f2148a47SJeff Kirsher if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) 2258*f2148a47SJeff Kirsher netdev_err(dev, "request_irq failed\n"); 2259*f2148a47SJeff Kirsher 2260*f2148a47SJeff Kirsher ret = pci_set_power_state(pdev, PCI_D0); 2261*f2148a47SJeff Kirsher if (debug > 1) 2262*f2148a47SJeff Kirsher netdev_info(dev, "Entering power state D0 %s (%d)\n", 2263*f2148a47SJeff Kirsher ret ? "failed" : "succeeded", ret); 2264*f2148a47SJeff Kirsher 2265*f2148a47SJeff Kirsher pci_restore_state(pdev); 2266*f2148a47SJeff Kirsher 2267*f2148a47SJeff Kirsher spin_lock_irqsave(&rp->lock, flags); 2268*f2148a47SJeff Kirsher #ifdef USE_MMIO 2269*f2148a47SJeff Kirsher enable_mmio(rp->pioaddr, rp->quirks); 2270*f2148a47SJeff Kirsher #endif 2271*f2148a47SJeff Kirsher rhine_power_init(dev); 2272*f2148a47SJeff Kirsher free_tbufs(dev); 2273*f2148a47SJeff Kirsher free_rbufs(dev); 2274*f2148a47SJeff Kirsher alloc_tbufs(dev); 2275*f2148a47SJeff Kirsher alloc_rbufs(dev); 2276*f2148a47SJeff Kirsher init_registers(dev); 2277*f2148a47SJeff Kirsher spin_unlock_irqrestore(&rp->lock, flags); 2278*f2148a47SJeff Kirsher 2279*f2148a47SJeff Kirsher netif_device_attach(dev); 2280*f2148a47SJeff Kirsher 2281*f2148a47SJeff Kirsher return 0; 2282*f2148a47SJeff Kirsher } 2283*f2148a47SJeff Kirsher #endif /* CONFIG_PM */ 2284*f2148a47SJeff Kirsher 2285*f2148a47SJeff Kirsher static struct pci_driver rhine_driver = { 2286*f2148a47SJeff Kirsher .name = DRV_NAME, 2287*f2148a47SJeff Kirsher .id_table = rhine_pci_tbl, 2288*f2148a47SJeff Kirsher .probe = rhine_init_one, 2289*f2148a47SJeff Kirsher .remove = __devexit_p(rhine_remove_one), 2290*f2148a47SJeff Kirsher #ifdef CONFIG_PM 2291*f2148a47SJeff Kirsher .suspend = rhine_suspend, 2292*f2148a47SJeff Kirsher .resume = rhine_resume, 2293*f2148a47SJeff Kirsher #endif /* CONFIG_PM */ 2294*f2148a47SJeff Kirsher .shutdown = rhine_shutdown, 2295*f2148a47SJeff Kirsher }; 2296*f2148a47SJeff Kirsher 2297*f2148a47SJeff Kirsher static struct dmi_system_id __initdata rhine_dmi_table[] = { 2298*f2148a47SJeff Kirsher { 2299*f2148a47SJeff Kirsher .ident = "EPIA-M", 2300*f2148a47SJeff Kirsher .matches = { 2301*f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), 2302*f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), 2303*f2148a47SJeff Kirsher }, 2304*f2148a47SJeff Kirsher }, 2305*f2148a47SJeff Kirsher { 2306*f2148a47SJeff Kirsher .ident = "KV7", 2307*f2148a47SJeff Kirsher .matches = { 2308*f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), 2309*f2148a47SJeff Kirsher DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), 2310*f2148a47SJeff Kirsher }, 2311*f2148a47SJeff Kirsher }, 2312*f2148a47SJeff Kirsher { NULL } 2313*f2148a47SJeff Kirsher }; 2314*f2148a47SJeff Kirsher 2315*f2148a47SJeff Kirsher static int __init rhine_init(void) 2316*f2148a47SJeff Kirsher { 2317*f2148a47SJeff Kirsher /* when a module, this is printed whether or not devices are found in probe */ 2318*f2148a47SJeff Kirsher #ifdef MODULE 2319*f2148a47SJeff Kirsher pr_info("%s\n", version); 2320*f2148a47SJeff Kirsher #endif 2321*f2148a47SJeff Kirsher if (dmi_check_system(rhine_dmi_table)) { 2322*f2148a47SJeff Kirsher /* these BIOSes fail at PXE boot if chip is in D3 */ 2323*f2148a47SJeff Kirsher avoid_D3 = 1; 2324*f2148a47SJeff Kirsher pr_warn("Broken BIOS detected, avoid_D3 enabled\n"); 2325*f2148a47SJeff Kirsher } 2326*f2148a47SJeff Kirsher else if (avoid_D3) 2327*f2148a47SJeff Kirsher pr_info("avoid_D3 set\n"); 2328*f2148a47SJeff Kirsher 2329*f2148a47SJeff Kirsher return pci_register_driver(&rhine_driver); 2330*f2148a47SJeff Kirsher } 2331*f2148a47SJeff Kirsher 2332*f2148a47SJeff Kirsher 2333*f2148a47SJeff Kirsher static void __exit rhine_cleanup(void) 2334*f2148a47SJeff Kirsher { 2335*f2148a47SJeff Kirsher pci_unregister_driver(&rhine_driver); 2336*f2148a47SJeff Kirsher } 2337*f2148a47SJeff Kirsher 2338*f2148a47SJeff Kirsher 2339*f2148a47SJeff Kirsher module_init(rhine_init); 2340*f2148a47SJeff Kirsher module_exit(rhine_cleanup); 2341