1 /* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, see <http://www.gnu.org/licenses/>. 30 * 31 * Known bugs: 32 * We suspect that on some hardware no TX done interrupts are generated. 33 * This means recovery from netif_stop_queue only happens if the hw timer 34 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 35 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 36 * If your hardware reliably generates tx done interrupts, then you can remove 37 * DEV_NEED_TIMERIRQ from the driver_data flags. 38 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 39 * superfluous timer interrupts from the nic. 40 */ 41 42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 43 44 #define FORCEDETH_VERSION "0.64" 45 #define DRV_NAME "forcedeth" 46 47 #include <linux/module.h> 48 #include <linux/types.h> 49 #include <linux/pci.h> 50 #include <linux/interrupt.h> 51 #include <linux/netdevice.h> 52 #include <linux/etherdevice.h> 53 #include <linux/delay.h> 54 #include <linux/sched.h> 55 #include <linux/spinlock.h> 56 #include <linux/ethtool.h> 57 #include <linux/timer.h> 58 #include <linux/skbuff.h> 59 #include <linux/mii.h> 60 #include <linux/random.h> 61 #include <linux/if_vlan.h> 62 #include <linux/dma-mapping.h> 63 #include <linux/slab.h> 64 #include <linux/uaccess.h> 65 #include <linux/prefetch.h> 66 #include <linux/u64_stats_sync.h> 67 #include <linux/io.h> 68 69 #include <asm/irq.h> 70 71 #define TX_WORK_PER_LOOP 64 72 #define RX_WORK_PER_LOOP 64 73 74 /* 75 * Hardware access: 76 */ 77 78 #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */ 79 #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */ 80 #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */ 81 #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */ 82 #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */ 83 #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */ 84 #define DEV_HAS_MSI 0x0000040 /* device supports MSI */ 85 #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ 86 #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ 87 #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ 88 #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */ 89 #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */ 90 #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */ 91 #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */ 92 #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ 93 #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ 94 #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ 95 #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */ 96 #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */ 97 #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */ 98 #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */ 99 #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */ 100 #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */ 101 #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */ 102 #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */ 103 #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */ 104 #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */ 105 106 enum { 107 NvRegIrqStatus = 0x000, 108 #define NVREG_IRQSTAT_MIIEVENT 0x040 109 #define NVREG_IRQSTAT_MASK 0x83ff 110 NvRegIrqMask = 0x004, 111 #define NVREG_IRQ_RX_ERROR 0x0001 112 #define NVREG_IRQ_RX 0x0002 113 #define NVREG_IRQ_RX_NOBUF 0x0004 114 #define NVREG_IRQ_TX_ERR 0x0008 115 #define NVREG_IRQ_TX_OK 0x0010 116 #define NVREG_IRQ_TIMER 0x0020 117 #define NVREG_IRQ_LINK 0x0040 118 #define NVREG_IRQ_RX_FORCED 0x0080 119 #define NVREG_IRQ_TX_FORCED 0x0100 120 #define NVREG_IRQ_RECOVER_ERROR 0x8200 121 #define NVREG_IRQMASK_THROUGHPUT 0x00df 122 #define NVREG_IRQMASK_CPU 0x0060 123 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 124 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 125 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 126 127 NvRegUnknownSetupReg6 = 0x008, 128 #define NVREG_UNKSETUP6_VAL 3 129 130 /* 131 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 132 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 133 */ 134 NvRegPollingInterval = 0x00c, 135 #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */ 136 #define NVREG_POLL_DEFAULT_CPU 13 137 NvRegMSIMap0 = 0x020, 138 NvRegMSIMap1 = 0x024, 139 NvRegMSIIrqMask = 0x030, 140 #define NVREG_MSI_VECTOR_0_ENABLED 0x01 141 NvRegMisc1 = 0x080, 142 #define NVREG_MISC1_PAUSE_TX 0x01 143 #define NVREG_MISC1_HD 0x02 144 #define NVREG_MISC1_FORCE 0x3b0f3c 145 146 NvRegMacReset = 0x34, 147 #define NVREG_MAC_RESET_ASSERT 0x0F3 148 NvRegTransmitterControl = 0x084, 149 #define NVREG_XMITCTL_START 0x01 150 #define NVREG_XMITCTL_MGMT_ST 0x40000000 151 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000 152 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0 153 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 154 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 155 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 156 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 157 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 158 #define NVREG_XMITCTL_HOST_LOADED 0x00004000 159 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000 160 #define NVREG_XMITCTL_DATA_START 0x00100000 161 #define NVREG_XMITCTL_DATA_READY 0x00010000 162 #define NVREG_XMITCTL_DATA_ERROR 0x00020000 163 NvRegTransmitterStatus = 0x088, 164 #define NVREG_XMITSTAT_BUSY 0x01 165 166 NvRegPacketFilterFlags = 0x8c, 167 #define NVREG_PFF_PAUSE_RX 0x08 168 #define NVREG_PFF_ALWAYS 0x7F0000 169 #define NVREG_PFF_PROMISC 0x80 170 #define NVREG_PFF_MYADDR 0x20 171 #define NVREG_PFF_LOOPBACK 0x10 172 173 NvRegOffloadConfig = 0x90, 174 #define NVREG_OFFLOAD_HOMEPHY 0x601 175 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 176 NvRegReceiverControl = 0x094, 177 #define NVREG_RCVCTL_START 0x01 178 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000 179 NvRegReceiverStatus = 0x98, 180 #define NVREG_RCVSTAT_BUSY 0x01 181 182 NvRegSlotTime = 0x9c, 183 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 184 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00 185 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00 186 #define NVREG_SLOTTIME_HALF 0x0000ff00 187 #define NVREG_SLOTTIME_DEFAULT 0x00007f00 188 #define NVREG_SLOTTIME_MASK 0x000000ff 189 190 NvRegTxDeferral = 0xA0, 191 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 192 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 193 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 194 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 195 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 196 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 197 NvRegRxDeferral = 0xA4, 198 #define NVREG_RX_DEFERRAL_DEFAULT 0x16 199 NvRegMacAddrA = 0xA8, 200 NvRegMacAddrB = 0xAC, 201 NvRegMulticastAddrA = 0xB0, 202 #define NVREG_MCASTADDRA_FORCE 0x01 203 NvRegMulticastAddrB = 0xB4, 204 NvRegMulticastMaskA = 0xB8, 205 #define NVREG_MCASTMASKA_NONE 0xffffffff 206 NvRegMulticastMaskB = 0xBC, 207 #define NVREG_MCASTMASKB_NONE 0xffff 208 209 NvRegPhyInterface = 0xC0, 210 #define PHY_RGMII 0x10000000 211 NvRegBackOffControl = 0xC4, 212 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000 213 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff 214 #define NVREG_BKOFFCTRL_SELECT 24 215 #define NVREG_BKOFFCTRL_GEAR 12 216 217 NvRegTxRingPhysAddr = 0x100, 218 NvRegRxRingPhysAddr = 0x104, 219 NvRegRingSizes = 0x108, 220 #define NVREG_RINGSZ_TXSHIFT 0 221 #define NVREG_RINGSZ_RXSHIFT 16 222 NvRegTransmitPoll = 0x10c, 223 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 224 NvRegLinkSpeed = 0x110, 225 #define NVREG_LINKSPEED_FORCE 0x10000 226 #define NVREG_LINKSPEED_10 1000 227 #define NVREG_LINKSPEED_100 100 228 #define NVREG_LINKSPEED_1000 50 229 #define NVREG_LINKSPEED_MASK (0xFFF) 230 NvRegUnknownSetupReg5 = 0x130, 231 #define NVREG_UNKSETUP5_BIT31 (1<<31) 232 NvRegTxWatermark = 0x13c, 233 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 234 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 235 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000 236 NvRegTxRxControl = 0x144, 237 #define NVREG_TXRXCTL_KICK 0x0001 238 #define NVREG_TXRXCTL_BIT1 0x0002 239 #define NVREG_TXRXCTL_BIT2 0x0004 240 #define NVREG_TXRXCTL_IDLE 0x0008 241 #define NVREG_TXRXCTL_RESET 0x0010 242 #define NVREG_TXRXCTL_RXCHECK 0x0400 243 #define NVREG_TXRXCTL_DESC_1 0 244 #define NVREG_TXRXCTL_DESC_2 0x002100 245 #define NVREG_TXRXCTL_DESC_3 0xc02200 246 #define NVREG_TXRXCTL_VLANSTRIP 0x00040 247 #define NVREG_TXRXCTL_VLANINS 0x00080 248 NvRegTxRingPhysAddrHigh = 0x148, 249 NvRegRxRingPhysAddrHigh = 0x14C, 250 NvRegTxPauseFrame = 0x170, 251 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 252 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 253 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 254 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 255 NvRegTxPauseFrameLimit = 0x174, 256 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 257 NvRegMIIStatus = 0x180, 258 #define NVREG_MIISTAT_ERROR 0x0001 259 #define NVREG_MIISTAT_LINKCHANGE 0x0008 260 #define NVREG_MIISTAT_MASK_RW 0x0007 261 #define NVREG_MIISTAT_MASK_ALL 0x000f 262 NvRegMIIMask = 0x184, 263 #define NVREG_MII_LINKCHANGE 0x0008 264 265 NvRegAdapterControl = 0x188, 266 #define NVREG_ADAPTCTL_START 0x02 267 #define NVREG_ADAPTCTL_LINKUP 0x04 268 #define NVREG_ADAPTCTL_PHYVALID 0x40000 269 #define NVREG_ADAPTCTL_RUNNING 0x100000 270 #define NVREG_ADAPTCTL_PHYSHIFT 24 271 NvRegMIISpeed = 0x18c, 272 #define NVREG_MIISPEED_BIT8 (1<<8) 273 #define NVREG_MIIDELAY 5 274 NvRegMIIControl = 0x190, 275 #define NVREG_MIICTL_INUSE 0x08000 276 #define NVREG_MIICTL_WRITE 0x00400 277 #define NVREG_MIICTL_ADDRSHIFT 5 278 NvRegMIIData = 0x194, 279 NvRegTxUnicast = 0x1a0, 280 NvRegTxMulticast = 0x1a4, 281 NvRegTxBroadcast = 0x1a8, 282 NvRegWakeUpFlags = 0x200, 283 #define NVREG_WAKEUPFLAGS_VAL 0x7770 284 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 285 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 286 #define NVREG_WAKEUPFLAGS_D3SHIFT 12 287 #define NVREG_WAKEUPFLAGS_D2SHIFT 8 288 #define NVREG_WAKEUPFLAGS_D1SHIFT 4 289 #define NVREG_WAKEUPFLAGS_D0SHIFT 0 290 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 291 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 292 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 293 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111 294 295 NvRegMgmtUnitGetVersion = 0x204, 296 #define NVREG_MGMTUNITGETVERSION 0x01 297 NvRegMgmtUnitVersion = 0x208, 298 #define NVREG_MGMTUNITVERSION 0x08 299 NvRegPowerCap = 0x268, 300 #define NVREG_POWERCAP_D3SUPP (1<<30) 301 #define NVREG_POWERCAP_D2SUPP (1<<26) 302 #define NVREG_POWERCAP_D1SUPP (1<<25) 303 NvRegPowerState = 0x26c, 304 #define NVREG_POWERSTATE_POWEREDUP 0x8000 305 #define NVREG_POWERSTATE_VALID 0x0100 306 #define NVREG_POWERSTATE_MASK 0x0003 307 #define NVREG_POWERSTATE_D0 0x0000 308 #define NVREG_POWERSTATE_D1 0x0001 309 #define NVREG_POWERSTATE_D2 0x0002 310 #define NVREG_POWERSTATE_D3 0x0003 311 NvRegMgmtUnitControl = 0x278, 312 #define NVREG_MGMTUNITCONTROL_INUSE 0x20000 313 NvRegTxCnt = 0x280, 314 NvRegTxZeroReXmt = 0x284, 315 NvRegTxOneReXmt = 0x288, 316 NvRegTxManyReXmt = 0x28c, 317 NvRegTxLateCol = 0x290, 318 NvRegTxUnderflow = 0x294, 319 NvRegTxLossCarrier = 0x298, 320 NvRegTxExcessDef = 0x29c, 321 NvRegTxRetryErr = 0x2a0, 322 NvRegRxFrameErr = 0x2a4, 323 NvRegRxExtraByte = 0x2a8, 324 NvRegRxLateCol = 0x2ac, 325 NvRegRxRunt = 0x2b0, 326 NvRegRxFrameTooLong = 0x2b4, 327 NvRegRxOverflow = 0x2b8, 328 NvRegRxFCSErr = 0x2bc, 329 NvRegRxFrameAlignErr = 0x2c0, 330 NvRegRxLenErr = 0x2c4, 331 NvRegRxUnicast = 0x2c8, 332 NvRegRxMulticast = 0x2cc, 333 NvRegRxBroadcast = 0x2d0, 334 NvRegTxDef = 0x2d4, 335 NvRegTxFrame = 0x2d8, 336 NvRegRxCnt = 0x2dc, 337 NvRegTxPause = 0x2e0, 338 NvRegRxPause = 0x2e4, 339 NvRegRxDropFrame = 0x2e8, 340 NvRegVlanControl = 0x300, 341 #define NVREG_VLANCONTROL_ENABLE 0x2000 342 NvRegMSIXMap0 = 0x3e0, 343 NvRegMSIXMap1 = 0x3e4, 344 NvRegMSIXIrqStatus = 0x3f0, 345 346 NvRegPowerState2 = 0x600, 347 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15 348 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 349 #define NVREG_POWERSTATE2_PHY_RESET 0x0004 350 #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00 351 }; 352 353 /* Big endian: should work, but is untested */ 354 struct ring_desc { 355 __le32 buf; 356 __le32 flaglen; 357 }; 358 359 struct ring_desc_ex { 360 __le32 bufhigh; 361 __le32 buflow; 362 __le32 txvlan; 363 __le32 flaglen; 364 }; 365 366 union ring_type { 367 struct ring_desc *orig; 368 struct ring_desc_ex *ex; 369 }; 370 371 #define FLAG_MASK_V1 0xffff0000 372 #define FLAG_MASK_V2 0xffffc000 373 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 374 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 375 376 #define NV_TX_LASTPACKET (1<<16) 377 #define NV_TX_RETRYERROR (1<<19) 378 #define NV_TX_RETRYCOUNT_MASK (0xF<<20) 379 #define NV_TX_FORCED_INTERRUPT (1<<24) 380 #define NV_TX_DEFERRED (1<<26) 381 #define NV_TX_CARRIERLOST (1<<27) 382 #define NV_TX_LATECOLLISION (1<<28) 383 #define NV_TX_UNDERFLOW (1<<29) 384 #define NV_TX_ERROR (1<<30) 385 #define NV_TX_VALID (1<<31) 386 387 #define NV_TX2_LASTPACKET (1<<29) 388 #define NV_TX2_RETRYERROR (1<<18) 389 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19) 390 #define NV_TX2_FORCED_INTERRUPT (1<<30) 391 #define NV_TX2_DEFERRED (1<<25) 392 #define NV_TX2_CARRIERLOST (1<<26) 393 #define NV_TX2_LATECOLLISION (1<<27) 394 #define NV_TX2_UNDERFLOW (1<<28) 395 /* error and valid are the same for both */ 396 #define NV_TX2_ERROR (1<<30) 397 #define NV_TX2_VALID (1<<31) 398 #define NV_TX2_TSO (1<<28) 399 #define NV_TX2_TSO_SHIFT 14 400 #define NV_TX2_TSO_MAX_SHIFT 14 401 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 402 #define NV_TX2_CHECKSUM_L3 (1<<27) 403 #define NV_TX2_CHECKSUM_L4 (1<<26) 404 405 #define NV_TX3_VLAN_TAG_PRESENT (1<<18) 406 407 #define NV_RX_DESCRIPTORVALID (1<<16) 408 #define NV_RX_MISSEDFRAME (1<<17) 409 #define NV_RX_SUBTRACT1 (1<<18) 410 #define NV_RX_ERROR1 (1<<23) 411 #define NV_RX_ERROR2 (1<<24) 412 #define NV_RX_ERROR3 (1<<25) 413 #define NV_RX_ERROR4 (1<<26) 414 #define NV_RX_CRCERR (1<<27) 415 #define NV_RX_OVERFLOW (1<<28) 416 #define NV_RX_FRAMINGERR (1<<29) 417 #define NV_RX_ERROR (1<<30) 418 #define NV_RX_AVAIL (1<<31) 419 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) 420 421 #define NV_RX2_CHECKSUMMASK (0x1C000000) 422 #define NV_RX2_CHECKSUM_IP (0x10000000) 423 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 424 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 425 #define NV_RX2_DESCRIPTORVALID (1<<29) 426 #define NV_RX2_SUBTRACT1 (1<<25) 427 #define NV_RX2_ERROR1 (1<<18) 428 #define NV_RX2_ERROR2 (1<<19) 429 #define NV_RX2_ERROR3 (1<<20) 430 #define NV_RX2_ERROR4 (1<<21) 431 #define NV_RX2_CRCERR (1<<22) 432 #define NV_RX2_OVERFLOW (1<<23) 433 #define NV_RX2_FRAMINGERR (1<<24) 434 /* error and avail are the same for both */ 435 #define NV_RX2_ERROR (1<<30) 436 #define NV_RX2_AVAIL (1<<31) 437 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) 438 439 #define NV_RX3_VLAN_TAG_PRESENT (1<<16) 440 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 441 442 /* Miscellaneous hardware related defines: */ 443 #define NV_PCI_REGSZ_VER1 0x270 444 #define NV_PCI_REGSZ_VER2 0x2d4 445 #define NV_PCI_REGSZ_VER3 0x604 446 #define NV_PCI_REGSZ_MAX 0x604 447 448 /* various timeout delays: all in usec */ 449 #define NV_TXRX_RESET_DELAY 4 450 #define NV_TXSTOP_DELAY1 10 451 #define NV_TXSTOP_DELAY1MAX 500000 452 #define NV_TXSTOP_DELAY2 100 453 #define NV_RXSTOP_DELAY1 10 454 #define NV_RXSTOP_DELAY1MAX 500000 455 #define NV_RXSTOP_DELAY2 100 456 #define NV_SETUP5_DELAY 5 457 #define NV_SETUP5_DELAYMAX 50000 458 #define NV_POWERUP_DELAY 5 459 #define NV_POWERUP_DELAYMAX 5000 460 #define NV_MIIBUSY_DELAY 50 461 #define NV_MIIPHY_DELAY 10 462 #define NV_MIIPHY_DELAYMAX 10000 463 #define NV_MAC_RESET_DELAY 64 464 465 #define NV_WAKEUPPATTERNS 5 466 #define NV_WAKEUPMASKENTRIES 4 467 468 /* General driver defaults */ 469 #define NV_WATCHDOG_TIMEO (5*HZ) 470 471 #define RX_RING_DEFAULT 512 472 #define TX_RING_DEFAULT 256 473 #define RX_RING_MIN 128 474 #define TX_RING_MIN 64 475 #define RING_MAX_DESC_VER_1 1024 476 #define RING_MAX_DESC_VER_2_3 16384 477 478 /* rx/tx mac addr + type + vlan + align + slack*/ 479 #define NV_RX_HEADERS (64) 480 /* even more slack. */ 481 #define NV_RX_ALLOC_PAD (64) 482 483 /* maximum mtu size */ 484 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 485 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 486 487 #define OOM_REFILL (1+HZ/20) 488 #define POLL_WAIT (1+HZ/100) 489 #define LINK_TIMEOUT (3*HZ) 490 #define STATS_INTERVAL (10*HZ) 491 492 /* 493 * desc_ver values: 494 * The nic supports three different descriptor types: 495 * - DESC_VER_1: Original 496 * - DESC_VER_2: support for jumbo frames. 497 * - DESC_VER_3: 64-bit format. 498 */ 499 #define DESC_VER_1 1 500 #define DESC_VER_2 2 501 #define DESC_VER_3 3 502 503 /* PHY defines */ 504 #define PHY_OUI_MARVELL 0x5043 505 #define PHY_OUI_CICADA 0x03f1 506 #define PHY_OUI_VITESSE 0x01c1 507 #define PHY_OUI_REALTEK 0x0732 508 #define PHY_OUI_REALTEK2 0x0020 509 #define PHYID1_OUI_MASK 0x03ff 510 #define PHYID1_OUI_SHFT 6 511 #define PHYID2_OUI_MASK 0xfc00 512 #define PHYID2_OUI_SHFT 10 513 #define PHYID2_MODEL_MASK 0x03f0 514 #define PHY_MODEL_REALTEK_8211 0x0110 515 #define PHY_REV_MASK 0x0001 516 #define PHY_REV_REALTEK_8211B 0x0000 517 #define PHY_REV_REALTEK_8211C 0x0001 518 #define PHY_MODEL_REALTEK_8201 0x0200 519 #define PHY_MODEL_MARVELL_E3016 0x0220 520 #define PHY_MARVELL_E3016_INITMASK 0x0300 521 #define PHY_CICADA_INIT1 0x0f000 522 #define PHY_CICADA_INIT2 0x0e00 523 #define PHY_CICADA_INIT3 0x01000 524 #define PHY_CICADA_INIT4 0x0200 525 #define PHY_CICADA_INIT5 0x0004 526 #define PHY_CICADA_INIT6 0x02000 527 #define PHY_VITESSE_INIT_REG1 0x1f 528 #define PHY_VITESSE_INIT_REG2 0x10 529 #define PHY_VITESSE_INIT_REG3 0x11 530 #define PHY_VITESSE_INIT_REG4 0x12 531 #define PHY_VITESSE_INIT_MSK1 0xc 532 #define PHY_VITESSE_INIT_MSK2 0x0180 533 #define PHY_VITESSE_INIT1 0x52b5 534 #define PHY_VITESSE_INIT2 0xaf8a 535 #define PHY_VITESSE_INIT3 0x8 536 #define PHY_VITESSE_INIT4 0x8f8a 537 #define PHY_VITESSE_INIT5 0xaf86 538 #define PHY_VITESSE_INIT6 0x8f86 539 #define PHY_VITESSE_INIT7 0xaf82 540 #define PHY_VITESSE_INIT8 0x0100 541 #define PHY_VITESSE_INIT9 0x8f82 542 #define PHY_VITESSE_INIT10 0x0 543 #define PHY_REALTEK_INIT_REG1 0x1f 544 #define PHY_REALTEK_INIT_REG2 0x19 545 #define PHY_REALTEK_INIT_REG3 0x13 546 #define PHY_REALTEK_INIT_REG4 0x14 547 #define PHY_REALTEK_INIT_REG5 0x18 548 #define PHY_REALTEK_INIT_REG6 0x11 549 #define PHY_REALTEK_INIT_REG7 0x01 550 #define PHY_REALTEK_INIT1 0x0000 551 #define PHY_REALTEK_INIT2 0x8e00 552 #define PHY_REALTEK_INIT3 0x0001 553 #define PHY_REALTEK_INIT4 0xad17 554 #define PHY_REALTEK_INIT5 0xfb54 555 #define PHY_REALTEK_INIT6 0xf5c7 556 #define PHY_REALTEK_INIT7 0x1000 557 #define PHY_REALTEK_INIT8 0x0003 558 #define PHY_REALTEK_INIT9 0x0008 559 #define PHY_REALTEK_INIT10 0x0005 560 #define PHY_REALTEK_INIT11 0x0200 561 #define PHY_REALTEK_INIT_MSK1 0x0003 562 563 #define PHY_GIGABIT 0x0100 564 565 #define PHY_TIMEOUT 0x1 566 #define PHY_ERROR 0x2 567 568 #define PHY_100 0x1 569 #define PHY_1000 0x2 570 #define PHY_HALF 0x100 571 572 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001 573 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002 574 #define NV_PAUSEFRAME_RX_ENABLE 0x0004 575 #define NV_PAUSEFRAME_TX_ENABLE 0x0008 576 #define NV_PAUSEFRAME_RX_REQ 0x0010 577 #define NV_PAUSEFRAME_TX_REQ 0x0020 578 #define NV_PAUSEFRAME_AUTONEG 0x0040 579 580 /* MSI/MSI-X defines */ 581 #define NV_MSI_X_MAX_VECTORS 8 582 #define NV_MSI_X_VECTORS_MASK 0x000f 583 #define NV_MSI_CAPABLE 0x0010 584 #define NV_MSI_X_CAPABLE 0x0020 585 #define NV_MSI_ENABLED 0x0040 586 #define NV_MSI_X_ENABLED 0x0080 587 588 #define NV_MSI_X_VECTOR_ALL 0x0 589 #define NV_MSI_X_VECTOR_RX 0x0 590 #define NV_MSI_X_VECTOR_TX 0x1 591 #define NV_MSI_X_VECTOR_OTHER 0x2 592 593 #define NV_MSI_PRIV_OFFSET 0x68 594 #define NV_MSI_PRIV_VALUE 0xffffffff 595 596 #define NV_RESTART_TX 0x1 597 #define NV_RESTART_RX 0x2 598 599 #define NV_TX_LIMIT_COUNT 16 600 601 #define NV_DYNAMIC_THRESHOLD 4 602 #define NV_DYNAMIC_MAX_QUIET_COUNT 2048 603 604 /* statistics */ 605 struct nv_ethtool_str { 606 char name[ETH_GSTRING_LEN]; 607 }; 608 609 static const struct nv_ethtool_str nv_estats_str[] = { 610 { "tx_bytes" }, /* includes Ethernet FCS CRC */ 611 { "tx_zero_rexmt" }, 612 { "tx_one_rexmt" }, 613 { "tx_many_rexmt" }, 614 { "tx_late_collision" }, 615 { "tx_fifo_errors" }, 616 { "tx_carrier_errors" }, 617 { "tx_excess_deferral" }, 618 { "tx_retry_error" }, 619 { "rx_frame_error" }, 620 { "rx_extra_byte" }, 621 { "rx_late_collision" }, 622 { "rx_runt" }, 623 { "rx_frame_too_long" }, 624 { "rx_over_errors" }, 625 { "rx_crc_errors" }, 626 { "rx_frame_align_error" }, 627 { "rx_length_error" }, 628 { "rx_unicast" }, 629 { "rx_multicast" }, 630 { "rx_broadcast" }, 631 { "rx_packets" }, 632 { "rx_errors_total" }, 633 { "tx_errors_total" }, 634 635 /* version 2 stats */ 636 { "tx_deferral" }, 637 { "tx_packets" }, 638 { "rx_bytes" }, /* includes Ethernet FCS CRC */ 639 { "tx_pause" }, 640 { "rx_pause" }, 641 { "rx_drop_frame" }, 642 643 /* version 3 stats */ 644 { "tx_unicast" }, 645 { "tx_multicast" }, 646 { "tx_broadcast" } 647 }; 648 649 struct nv_ethtool_stats { 650 u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */ 651 u64 tx_zero_rexmt; 652 u64 tx_one_rexmt; 653 u64 tx_many_rexmt; 654 u64 tx_late_collision; 655 u64 tx_fifo_errors; 656 u64 tx_carrier_errors; 657 u64 tx_excess_deferral; 658 u64 tx_retry_error; 659 u64 rx_frame_error; 660 u64 rx_extra_byte; 661 u64 rx_late_collision; 662 u64 rx_runt; 663 u64 rx_frame_too_long; 664 u64 rx_over_errors; 665 u64 rx_crc_errors; 666 u64 rx_frame_align_error; 667 u64 rx_length_error; 668 u64 rx_unicast; 669 u64 rx_multicast; 670 u64 rx_broadcast; 671 u64 rx_packets; /* should be ifconfig->rx_packets */ 672 u64 rx_errors_total; 673 u64 tx_errors_total; 674 675 /* version 2 stats */ 676 u64 tx_deferral; 677 u64 tx_packets; /* should be ifconfig->tx_packets */ 678 u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */ 679 u64 tx_pause; 680 u64 rx_pause; 681 u64 rx_drop_frame; 682 683 /* version 3 stats */ 684 u64 tx_unicast; 685 u64 tx_multicast; 686 u64 tx_broadcast; 687 }; 688 689 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 690 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) 691 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 692 693 /* diagnostics */ 694 #define NV_TEST_COUNT_BASE 3 695 #define NV_TEST_COUNT_EXTENDED 4 696 697 static const struct nv_ethtool_str nv_etests_str[] = { 698 { "link (online/offline)" }, 699 { "register (offline) " }, 700 { "interrupt (offline) " }, 701 { "loopback (offline) " } 702 }; 703 704 struct register_test { 705 __u32 reg; 706 __u32 mask; 707 }; 708 709 static const struct register_test nv_registers_test[] = { 710 { NvRegUnknownSetupReg6, 0x01 }, 711 { NvRegMisc1, 0x03c }, 712 { NvRegOffloadConfig, 0x03ff }, 713 { NvRegMulticastAddrA, 0xffffffff }, 714 { NvRegTxWatermark, 0x0ff }, 715 { NvRegWakeUpFlags, 0x07777 }, 716 { 0, 0 } 717 }; 718 719 struct nv_skb_map { 720 struct sk_buff *skb; 721 dma_addr_t dma; 722 unsigned int dma_len:31; 723 unsigned int dma_single:1; 724 struct ring_desc_ex *first_tx_desc; 725 struct nv_skb_map *next_tx_ctx; 726 }; 727 728 /* 729 * SMP locking: 730 * All hardware access under netdev_priv(dev)->lock, except the performance 731 * critical parts: 732 * - rx is (pseudo-) lockless: it relies on the single-threading provided 733 * by the arch code for interrupts. 734 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 735 * needs netdev_priv(dev)->lock :-( 736 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 737 * 738 * Hardware stats updates are protected by hwstats_lock: 739 * - updated by nv_do_stats_poll (timer). This is meant to avoid 740 * integer wraparound in the NIC stats registers, at low frequency 741 * (0.1 Hz) 742 * - updated by nv_get_ethtool_stats + nv_get_stats64 743 * 744 * Software stats are accessed only through 64b synchronization points 745 * and are not subject to other synchronization techniques (single 746 * update thread on the TX or RX paths). 747 */ 748 749 /* in dev: base, irq */ 750 struct fe_priv { 751 spinlock_t lock; 752 753 struct net_device *dev; 754 struct napi_struct napi; 755 756 /* hardware stats are updated in syscall and timer */ 757 spinlock_t hwstats_lock; 758 struct nv_ethtool_stats estats; 759 760 int in_shutdown; 761 u32 linkspeed; 762 int duplex; 763 int autoneg; 764 int fixed_mode; 765 int phyaddr; 766 int wolenabled; 767 unsigned int phy_oui; 768 unsigned int phy_model; 769 unsigned int phy_rev; 770 u16 gigabit; 771 int intr_test; 772 int recover_error; 773 int quiet_count; 774 775 /* General data: RO fields */ 776 dma_addr_t ring_addr; 777 struct pci_dev *pci_dev; 778 u32 orig_mac[2]; 779 u32 events; 780 u32 irqmask; 781 u32 desc_ver; 782 u32 txrxctl_bits; 783 u32 vlanctl_bits; 784 u32 driver_data; 785 u32 device_id; 786 u32 register_size; 787 u32 mac_in_use; 788 int mgmt_version; 789 int mgmt_sema; 790 791 void __iomem *base; 792 793 /* rx specific fields. 794 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 795 */ 796 union ring_type get_rx, put_rx, first_rx, last_rx; 797 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 798 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 799 struct nv_skb_map *rx_skb; 800 801 union ring_type rx_ring; 802 unsigned int rx_buf_sz; 803 unsigned int pkt_limit; 804 struct timer_list oom_kick; 805 struct timer_list nic_poll; 806 struct timer_list stats_poll; 807 u32 nic_poll_irq; 808 int rx_ring_size; 809 810 /* RX software stats */ 811 struct u64_stats_sync swstats_rx_syncp; 812 u64 stat_rx_packets; 813 u64 stat_rx_bytes; /* not always available in HW */ 814 u64 stat_rx_missed_errors; 815 u64 stat_rx_dropped; 816 817 /* media detection workaround. 818 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 819 */ 820 int need_linktimer; 821 unsigned long link_timeout; 822 /* 823 * tx specific fields. 824 */ 825 union ring_type get_tx, put_tx, last_tx; 826 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 827 struct nv_skb_map *last_tx_ctx; 828 struct nv_skb_map *tx_skb; 829 830 union ring_type tx_ring; 831 u32 tx_flags; 832 int tx_ring_size; 833 int tx_limit; 834 u32 tx_pkts_in_progress; 835 struct nv_skb_map *tx_change_owner; 836 struct nv_skb_map *tx_end_flip; 837 int tx_stop; 838 839 /* TX software stats */ 840 struct u64_stats_sync swstats_tx_syncp; 841 u64 stat_tx_packets; /* not always available in HW */ 842 u64 stat_tx_bytes; 843 u64 stat_tx_dropped; 844 845 /* msi/msi-x fields */ 846 u32 msi_flags; 847 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 848 849 /* flow control */ 850 u32 pause_flags; 851 852 /* power saved state */ 853 u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; 854 855 /* for different msi-x irq type */ 856 char name_rx[IFNAMSIZ + 3]; /* -rx */ 857 char name_tx[IFNAMSIZ + 3]; /* -tx */ 858 char name_other[IFNAMSIZ + 6]; /* -other */ 859 }; 860 861 /* 862 * Maximum number of loops until we assume that a bit in the irq mask 863 * is stuck. Overridable with module param. 864 */ 865 static int max_interrupt_work = 4; 866 867 /* 868 * Optimization can be either throuput mode or cpu mode 869 * 870 * Throughput Mode: Every tx and rx packet will generate an interrupt. 871 * CPU Mode: Interrupts are controlled by a timer. 872 */ 873 enum { 874 NV_OPTIMIZATION_MODE_THROUGHPUT, 875 NV_OPTIMIZATION_MODE_CPU, 876 NV_OPTIMIZATION_MODE_DYNAMIC 877 }; 878 static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC; 879 880 /* 881 * Poll interval for timer irq 882 * 883 * This interval determines how frequent an interrupt is generated. 884 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 885 * Min = 0, and Max = 65535 886 */ 887 static int poll_interval = -1; 888 889 /* 890 * MSI interrupts 891 */ 892 enum { 893 NV_MSI_INT_DISABLED, 894 NV_MSI_INT_ENABLED 895 }; 896 static int msi = NV_MSI_INT_ENABLED; 897 898 /* 899 * MSIX interrupts 900 */ 901 enum { 902 NV_MSIX_INT_DISABLED, 903 NV_MSIX_INT_ENABLED 904 }; 905 static int msix = NV_MSIX_INT_ENABLED; 906 907 /* 908 * DMA 64bit 909 */ 910 enum { 911 NV_DMA_64BIT_DISABLED, 912 NV_DMA_64BIT_ENABLED 913 }; 914 static int dma_64bit = NV_DMA_64BIT_ENABLED; 915 916 /* 917 * Debug output control for tx_timeout 918 */ 919 static bool debug_tx_timeout = false; 920 921 /* 922 * Crossover Detection 923 * Realtek 8201 phy + some OEM boards do not work properly. 924 */ 925 enum { 926 NV_CROSSOVER_DETECTION_DISABLED, 927 NV_CROSSOVER_DETECTION_ENABLED 928 }; 929 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 930 931 /* 932 * Power down phy when interface is down (persists through reboot; 933 * older Linux and other OSes may not power it up again) 934 */ 935 static int phy_power_down; 936 937 static inline struct fe_priv *get_nvpriv(struct net_device *dev) 938 { 939 return netdev_priv(dev); 940 } 941 942 static inline u8 __iomem *get_hwbase(struct net_device *dev) 943 { 944 return ((struct fe_priv *)netdev_priv(dev))->base; 945 } 946 947 static inline void pci_push(u8 __iomem *base) 948 { 949 /* force out pending posted writes */ 950 readl(base); 951 } 952 953 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 954 { 955 return le32_to_cpu(prd->flaglen) 956 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 957 } 958 959 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 960 { 961 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 962 } 963 964 static bool nv_optimized(struct fe_priv *np) 965 { 966 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 967 return false; 968 return true; 969 } 970 971 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 972 int delay, int delaymax) 973 { 974 u8 __iomem *base = get_hwbase(dev); 975 976 pci_push(base); 977 do { 978 udelay(delay); 979 delaymax -= delay; 980 if (delaymax < 0) 981 return 1; 982 } while ((readl(base + offset) & mask) != target); 983 return 0; 984 } 985 986 #define NV_SETUP_RX_RING 0x01 987 #define NV_SETUP_TX_RING 0x02 988 989 static inline u32 dma_low(dma_addr_t addr) 990 { 991 return addr; 992 } 993 994 static inline u32 dma_high(dma_addr_t addr) 995 { 996 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 997 } 998 999 static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 1000 { 1001 struct fe_priv *np = get_nvpriv(dev); 1002 u8 __iomem *base = get_hwbase(dev); 1003 1004 if (!nv_optimized(np)) { 1005 if (rxtx_flags & NV_SETUP_RX_RING) 1006 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 1007 if (rxtx_flags & NV_SETUP_TX_RING) 1008 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 1009 } else { 1010 if (rxtx_flags & NV_SETUP_RX_RING) { 1011 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 1012 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 1013 } 1014 if (rxtx_flags & NV_SETUP_TX_RING) { 1015 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 1016 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 1017 } 1018 } 1019 } 1020 1021 static void free_rings(struct net_device *dev) 1022 { 1023 struct fe_priv *np = get_nvpriv(dev); 1024 1025 if (!nv_optimized(np)) { 1026 if (np->rx_ring.orig) 1027 dma_free_coherent(&np->pci_dev->dev, 1028 sizeof(struct ring_desc) * 1029 (np->rx_ring_size + 1030 np->tx_ring_size), 1031 np->rx_ring.orig, np->ring_addr); 1032 } else { 1033 if (np->rx_ring.ex) 1034 dma_free_coherent(&np->pci_dev->dev, 1035 sizeof(struct ring_desc_ex) * 1036 (np->rx_ring_size + 1037 np->tx_ring_size), 1038 np->rx_ring.ex, np->ring_addr); 1039 } 1040 kfree(np->rx_skb); 1041 kfree(np->tx_skb); 1042 } 1043 1044 static int using_multi_irqs(struct net_device *dev) 1045 { 1046 struct fe_priv *np = get_nvpriv(dev); 1047 1048 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1049 ((np->msi_flags & NV_MSI_X_ENABLED) && 1050 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 1051 return 0; 1052 else 1053 return 1; 1054 } 1055 1056 static void nv_txrx_gate(struct net_device *dev, bool gate) 1057 { 1058 struct fe_priv *np = get_nvpriv(dev); 1059 u8 __iomem *base = get_hwbase(dev); 1060 u32 powerstate; 1061 1062 if (!np->mac_in_use && 1063 (np->driver_data & DEV_HAS_POWER_CNTRL)) { 1064 powerstate = readl(base + NvRegPowerState2); 1065 if (gate) 1066 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS; 1067 else 1068 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS; 1069 writel(powerstate, base + NvRegPowerState2); 1070 } 1071 } 1072 1073 static void nv_enable_irq(struct net_device *dev) 1074 { 1075 struct fe_priv *np = get_nvpriv(dev); 1076 1077 if (!using_multi_irqs(dev)) { 1078 if (np->msi_flags & NV_MSI_X_ENABLED) 1079 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1080 else 1081 enable_irq(np->pci_dev->irq); 1082 } else { 1083 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1084 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1085 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1086 } 1087 } 1088 1089 static void nv_disable_irq(struct net_device *dev) 1090 { 1091 struct fe_priv *np = get_nvpriv(dev); 1092 1093 if (!using_multi_irqs(dev)) { 1094 if (np->msi_flags & NV_MSI_X_ENABLED) 1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1096 else 1097 disable_irq(np->pci_dev->irq); 1098 } else { 1099 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1100 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1101 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1102 } 1103 } 1104 1105 /* In MSIX mode, a write to irqmask behaves as XOR */ 1106 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1107 { 1108 u8 __iomem *base = get_hwbase(dev); 1109 1110 writel(mask, base + NvRegIrqMask); 1111 } 1112 1113 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1114 { 1115 struct fe_priv *np = get_nvpriv(dev); 1116 u8 __iomem *base = get_hwbase(dev); 1117 1118 if (np->msi_flags & NV_MSI_X_ENABLED) { 1119 writel(mask, base + NvRegIrqMask); 1120 } else { 1121 if (np->msi_flags & NV_MSI_ENABLED) 1122 writel(0, base + NvRegMSIIrqMask); 1123 writel(0, base + NvRegIrqMask); 1124 } 1125 } 1126 1127 static void nv_napi_enable(struct net_device *dev) 1128 { 1129 struct fe_priv *np = get_nvpriv(dev); 1130 1131 napi_enable(&np->napi); 1132 } 1133 1134 static void nv_napi_disable(struct net_device *dev) 1135 { 1136 struct fe_priv *np = get_nvpriv(dev); 1137 1138 napi_disable(&np->napi); 1139 } 1140 1141 #define MII_READ (-1) 1142 /* mii_rw: read/write a register on the PHY. 1143 * 1144 * Caller must guarantee serialization 1145 */ 1146 static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1147 { 1148 u8 __iomem *base = get_hwbase(dev); 1149 u32 reg; 1150 int retval; 1151 1152 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); 1153 1154 reg = readl(base + NvRegMIIControl); 1155 if (reg & NVREG_MIICTL_INUSE) { 1156 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1157 udelay(NV_MIIBUSY_DELAY); 1158 } 1159 1160 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1161 if (value != MII_READ) { 1162 writel(value, base + NvRegMIIData); 1163 reg |= NVREG_MIICTL_WRITE; 1164 } 1165 writel(reg, base + NvRegMIIControl); 1166 1167 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1168 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) { 1169 retval = -1; 1170 } else if (value != MII_READ) { 1171 /* it was a write operation - fewer failures are detectable */ 1172 retval = 0; 1173 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1174 retval = -1; 1175 } else { 1176 retval = readl(base + NvRegMIIData); 1177 } 1178 1179 return retval; 1180 } 1181 1182 static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1183 { 1184 struct fe_priv *np = netdev_priv(dev); 1185 u32 miicontrol; 1186 unsigned int tries = 0; 1187 1188 miicontrol = BMCR_RESET | bmcr_setup; 1189 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) 1190 return -1; 1191 1192 /* wait for 500ms */ 1193 msleep(500); 1194 1195 /* must wait till reset is deasserted */ 1196 while (miicontrol & BMCR_RESET) { 1197 usleep_range(10000, 20000); 1198 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1199 /* FIXME: 100 tries seem excessive */ 1200 if (tries++ > 100) 1201 return -1; 1202 } 1203 return 0; 1204 } 1205 1206 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np) 1207 { 1208 static const struct { 1209 int reg; 1210 int init; 1211 } ri[] = { 1212 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, 1213 { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 }, 1214 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 }, 1215 { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 }, 1216 { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 }, 1217 { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 }, 1218 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, 1219 }; 1220 int i; 1221 1222 for (i = 0; i < ARRAY_SIZE(ri); i++) { 1223 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init)) 1224 return PHY_ERROR; 1225 } 1226 1227 return 0; 1228 } 1229 1230 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np) 1231 { 1232 u32 reg; 1233 u8 __iomem *base = get_hwbase(dev); 1234 u32 powerstate = readl(base + NvRegPowerState2); 1235 1236 /* need to perform hw phy reset */ 1237 powerstate |= NVREG_POWERSTATE2_PHY_RESET; 1238 writel(powerstate, base + NvRegPowerState2); 1239 msleep(25); 1240 1241 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; 1242 writel(powerstate, base + NvRegPowerState2); 1243 msleep(25); 1244 1245 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1246 reg |= PHY_REALTEK_INIT9; 1247 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) 1248 return PHY_ERROR; 1249 if (mii_rw(dev, np->phyaddr, 1250 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) 1251 return PHY_ERROR; 1252 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); 1253 if (!(reg & PHY_REALTEK_INIT11)) { 1254 reg |= PHY_REALTEK_INIT11; 1255 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) 1256 return PHY_ERROR; 1257 } 1258 if (mii_rw(dev, np->phyaddr, 1259 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) 1260 return PHY_ERROR; 1261 1262 return 0; 1263 } 1264 1265 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np) 1266 { 1267 u32 phy_reserved; 1268 1269 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { 1270 phy_reserved = mii_rw(dev, np->phyaddr, 1271 PHY_REALTEK_INIT_REG6, MII_READ); 1272 phy_reserved |= PHY_REALTEK_INIT7; 1273 if (mii_rw(dev, np->phyaddr, 1274 PHY_REALTEK_INIT_REG6, phy_reserved)) 1275 return PHY_ERROR; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np) 1282 { 1283 u32 phy_reserved; 1284 1285 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 1286 if (mii_rw(dev, np->phyaddr, 1287 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) 1288 return PHY_ERROR; 1289 phy_reserved = mii_rw(dev, np->phyaddr, 1290 PHY_REALTEK_INIT_REG2, MII_READ); 1291 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 1292 phy_reserved |= PHY_REALTEK_INIT3; 1293 if (mii_rw(dev, np->phyaddr, 1294 PHY_REALTEK_INIT_REG2, phy_reserved)) 1295 return PHY_ERROR; 1296 if (mii_rw(dev, np->phyaddr, 1297 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) 1298 return PHY_ERROR; 1299 } 1300 1301 return 0; 1302 } 1303 1304 static int init_cicada(struct net_device *dev, struct fe_priv *np, 1305 u32 phyinterface) 1306 { 1307 u32 phy_reserved; 1308 1309 if (phyinterface & PHY_RGMII) { 1310 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1311 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1312 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1313 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) 1314 return PHY_ERROR; 1315 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1316 phy_reserved |= PHY_CICADA_INIT5; 1317 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) 1318 return PHY_ERROR; 1319 } 1320 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1321 phy_reserved |= PHY_CICADA_INIT6; 1322 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) 1323 return PHY_ERROR; 1324 1325 return 0; 1326 } 1327 1328 static int init_vitesse(struct net_device *dev, struct fe_priv *np) 1329 { 1330 u32 phy_reserved; 1331 1332 if (mii_rw(dev, np->phyaddr, 1333 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) 1334 return PHY_ERROR; 1335 if (mii_rw(dev, np->phyaddr, 1336 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) 1337 return PHY_ERROR; 1338 phy_reserved = mii_rw(dev, np->phyaddr, 1339 PHY_VITESSE_INIT_REG4, MII_READ); 1340 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) 1341 return PHY_ERROR; 1342 phy_reserved = mii_rw(dev, np->phyaddr, 1343 PHY_VITESSE_INIT_REG3, MII_READ); 1344 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1345 phy_reserved |= PHY_VITESSE_INIT3; 1346 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) 1347 return PHY_ERROR; 1348 if (mii_rw(dev, np->phyaddr, 1349 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) 1350 return PHY_ERROR; 1351 if (mii_rw(dev, np->phyaddr, 1352 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) 1353 return PHY_ERROR; 1354 phy_reserved = mii_rw(dev, np->phyaddr, 1355 PHY_VITESSE_INIT_REG4, MII_READ); 1356 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1357 phy_reserved |= PHY_VITESSE_INIT3; 1358 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) 1359 return PHY_ERROR; 1360 phy_reserved = mii_rw(dev, np->phyaddr, 1361 PHY_VITESSE_INIT_REG3, MII_READ); 1362 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) 1363 return PHY_ERROR; 1364 if (mii_rw(dev, np->phyaddr, 1365 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) 1366 return PHY_ERROR; 1367 if (mii_rw(dev, np->phyaddr, 1368 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) 1369 return PHY_ERROR; 1370 phy_reserved = mii_rw(dev, np->phyaddr, 1371 PHY_VITESSE_INIT_REG4, MII_READ); 1372 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) 1373 return PHY_ERROR; 1374 phy_reserved = mii_rw(dev, np->phyaddr, 1375 PHY_VITESSE_INIT_REG3, MII_READ); 1376 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1377 phy_reserved |= PHY_VITESSE_INIT8; 1378 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) 1379 return PHY_ERROR; 1380 if (mii_rw(dev, np->phyaddr, 1381 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) 1382 return PHY_ERROR; 1383 if (mii_rw(dev, np->phyaddr, 1384 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) 1385 return PHY_ERROR; 1386 1387 return 0; 1388 } 1389 1390 static int phy_init(struct net_device *dev) 1391 { 1392 struct fe_priv *np = get_nvpriv(dev); 1393 u8 __iomem *base = get_hwbase(dev); 1394 u32 phyinterface; 1395 u32 mii_status, mii_control, mii_control_1000, reg; 1396 1397 /* phy errata for E3016 phy */ 1398 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1399 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1400 reg &= ~PHY_MARVELL_E3016_INITMASK; 1401 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1402 netdev_info(dev, "%s: phy write to errata reg failed\n", 1403 pci_name(np->pci_dev)); 1404 return PHY_ERROR; 1405 } 1406 } 1407 if (np->phy_oui == PHY_OUI_REALTEK) { 1408 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1409 np->phy_rev == PHY_REV_REALTEK_8211B) { 1410 if (init_realtek_8211b(dev, np)) { 1411 netdev_info(dev, "%s: phy init failed\n", 1412 pci_name(np->pci_dev)); 1413 return PHY_ERROR; 1414 } 1415 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1416 np->phy_rev == PHY_REV_REALTEK_8211C) { 1417 if (init_realtek_8211c(dev, np)) { 1418 netdev_info(dev, "%s: phy init failed\n", 1419 pci_name(np->pci_dev)); 1420 return PHY_ERROR; 1421 } 1422 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1423 if (init_realtek_8201(dev, np)) { 1424 netdev_info(dev, "%s: phy init failed\n", 1425 pci_name(np->pci_dev)); 1426 return PHY_ERROR; 1427 } 1428 } 1429 } 1430 1431 /* set advertise register */ 1432 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1433 reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL | 1434 ADVERTISE_100HALF | ADVERTISE_100FULL | 1435 ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); 1436 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1437 netdev_info(dev, "%s: phy write to advertise failed\n", 1438 pci_name(np->pci_dev)); 1439 return PHY_ERROR; 1440 } 1441 1442 /* get phy interface type */ 1443 phyinterface = readl(base + NvRegPhyInterface); 1444 1445 /* see if gigabit phy */ 1446 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1447 if (mii_status & PHY_GIGABIT) { 1448 np->gigabit = PHY_GIGABIT; 1449 mii_control_1000 = mii_rw(dev, np->phyaddr, 1450 MII_CTRL1000, MII_READ); 1451 mii_control_1000 &= ~ADVERTISE_1000HALF; 1452 if (phyinterface & PHY_RGMII) 1453 mii_control_1000 |= ADVERTISE_1000FULL; 1454 else 1455 mii_control_1000 &= ~ADVERTISE_1000FULL; 1456 1457 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1458 netdev_info(dev, "%s: phy init failed\n", 1459 pci_name(np->pci_dev)); 1460 return PHY_ERROR; 1461 } 1462 } else 1463 np->gigabit = 0; 1464 1465 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1466 mii_control |= BMCR_ANENABLE; 1467 1468 if (np->phy_oui == PHY_OUI_REALTEK && 1469 np->phy_model == PHY_MODEL_REALTEK_8211 && 1470 np->phy_rev == PHY_REV_REALTEK_8211C) { 1471 /* start autoneg since we already performed hw reset above */ 1472 mii_control |= BMCR_ANRESTART; 1473 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1474 netdev_info(dev, "%s: phy init failed\n", 1475 pci_name(np->pci_dev)); 1476 return PHY_ERROR; 1477 } 1478 } else { 1479 /* reset the phy 1480 * (certain phys need bmcr to be setup with reset) 1481 */ 1482 if (phy_reset(dev, mii_control)) { 1483 netdev_info(dev, "%s: phy reset failed\n", 1484 pci_name(np->pci_dev)); 1485 return PHY_ERROR; 1486 } 1487 } 1488 1489 /* phy vendor specific configuration */ 1490 if (np->phy_oui == PHY_OUI_CICADA) { 1491 if (init_cicada(dev, np, phyinterface)) { 1492 netdev_info(dev, "%s: phy init failed\n", 1493 pci_name(np->pci_dev)); 1494 return PHY_ERROR; 1495 } 1496 } else if (np->phy_oui == PHY_OUI_VITESSE) { 1497 if (init_vitesse(dev, np)) { 1498 netdev_info(dev, "%s: phy init failed\n", 1499 pci_name(np->pci_dev)); 1500 return PHY_ERROR; 1501 } 1502 } else if (np->phy_oui == PHY_OUI_REALTEK) { 1503 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1504 np->phy_rev == PHY_REV_REALTEK_8211B) { 1505 /* reset could have cleared these out, set them back */ 1506 if (init_realtek_8211b(dev, np)) { 1507 netdev_info(dev, "%s: phy init failed\n", 1508 pci_name(np->pci_dev)); 1509 return PHY_ERROR; 1510 } 1511 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1512 if (init_realtek_8201(dev, np) || 1513 init_realtek_8201_cross(dev, np)) { 1514 netdev_info(dev, "%s: phy init failed\n", 1515 pci_name(np->pci_dev)); 1516 return PHY_ERROR; 1517 } 1518 } 1519 } 1520 1521 /* some phys clear out pause advertisement on reset, set it back */ 1522 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1523 1524 /* restart auto negotiation, power down phy */ 1525 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1526 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1527 if (phy_power_down) 1528 mii_control |= BMCR_PDOWN; 1529 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) 1530 return PHY_ERROR; 1531 1532 return 0; 1533 } 1534 1535 static void nv_start_rx(struct net_device *dev) 1536 { 1537 struct fe_priv *np = netdev_priv(dev); 1538 u8 __iomem *base = get_hwbase(dev); 1539 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1540 1541 /* Already running? Stop it. */ 1542 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1543 rx_ctrl &= ~NVREG_RCVCTL_START; 1544 writel(rx_ctrl, base + NvRegReceiverControl); 1545 pci_push(base); 1546 } 1547 writel(np->linkspeed, base + NvRegLinkSpeed); 1548 pci_push(base); 1549 rx_ctrl |= NVREG_RCVCTL_START; 1550 if (np->mac_in_use) 1551 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1552 writel(rx_ctrl, base + NvRegReceiverControl); 1553 pci_push(base); 1554 } 1555 1556 static void nv_stop_rx(struct net_device *dev) 1557 { 1558 struct fe_priv *np = netdev_priv(dev); 1559 u8 __iomem *base = get_hwbase(dev); 1560 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1561 1562 if (!np->mac_in_use) 1563 rx_ctrl &= ~NVREG_RCVCTL_START; 1564 else 1565 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1566 writel(rx_ctrl, base + NvRegReceiverControl); 1567 if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1568 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX)) 1569 netdev_info(dev, "%s: ReceiverStatus remained busy\n", 1570 __func__); 1571 1572 udelay(NV_RXSTOP_DELAY2); 1573 if (!np->mac_in_use) 1574 writel(0, base + NvRegLinkSpeed); 1575 } 1576 1577 static void nv_start_tx(struct net_device *dev) 1578 { 1579 struct fe_priv *np = netdev_priv(dev); 1580 u8 __iomem *base = get_hwbase(dev); 1581 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1582 1583 tx_ctrl |= NVREG_XMITCTL_START; 1584 if (np->mac_in_use) 1585 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1586 writel(tx_ctrl, base + NvRegTransmitterControl); 1587 pci_push(base); 1588 } 1589 1590 static void nv_stop_tx(struct net_device *dev) 1591 { 1592 struct fe_priv *np = netdev_priv(dev); 1593 u8 __iomem *base = get_hwbase(dev); 1594 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1595 1596 if (!np->mac_in_use) 1597 tx_ctrl &= ~NVREG_XMITCTL_START; 1598 else 1599 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1600 writel(tx_ctrl, base + NvRegTransmitterControl); 1601 if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1602 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX)) 1603 netdev_info(dev, "%s: TransmitterStatus remained busy\n", 1604 __func__); 1605 1606 udelay(NV_TXSTOP_DELAY2); 1607 if (!np->mac_in_use) 1608 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1609 base + NvRegTransmitPoll); 1610 } 1611 1612 static void nv_start_rxtx(struct net_device *dev) 1613 { 1614 nv_start_rx(dev); 1615 nv_start_tx(dev); 1616 } 1617 1618 static void nv_stop_rxtx(struct net_device *dev) 1619 { 1620 nv_stop_rx(dev); 1621 nv_stop_tx(dev); 1622 } 1623 1624 static void nv_txrx_reset(struct net_device *dev) 1625 { 1626 struct fe_priv *np = netdev_priv(dev); 1627 u8 __iomem *base = get_hwbase(dev); 1628 1629 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1630 pci_push(base); 1631 udelay(NV_TXRX_RESET_DELAY); 1632 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1633 pci_push(base); 1634 } 1635 1636 static void nv_mac_reset(struct net_device *dev) 1637 { 1638 struct fe_priv *np = netdev_priv(dev); 1639 u8 __iomem *base = get_hwbase(dev); 1640 u32 temp1, temp2, temp3; 1641 1642 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1643 pci_push(base); 1644 1645 /* save registers since they will be cleared on reset */ 1646 temp1 = readl(base + NvRegMacAddrA); 1647 temp2 = readl(base + NvRegMacAddrB); 1648 temp3 = readl(base + NvRegTransmitPoll); 1649 1650 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1651 pci_push(base); 1652 udelay(NV_MAC_RESET_DELAY); 1653 writel(0, base + NvRegMacReset); 1654 pci_push(base); 1655 udelay(NV_MAC_RESET_DELAY); 1656 1657 /* restore saved registers */ 1658 writel(temp1, base + NvRegMacAddrA); 1659 writel(temp2, base + NvRegMacAddrB); 1660 writel(temp3, base + NvRegTransmitPoll); 1661 1662 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1663 pci_push(base); 1664 } 1665 1666 /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */ 1667 static void nv_update_stats(struct net_device *dev) 1668 { 1669 struct fe_priv *np = netdev_priv(dev); 1670 u8 __iomem *base = get_hwbase(dev); 1671 1672 /* If it happens that this is run in top-half context, then 1673 * replace the spin_lock of hwstats_lock with 1674 * spin_lock_irqsave() in calling functions. */ 1675 WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half"); 1676 assert_spin_locked(&np->hwstats_lock); 1677 1678 /* query hardware */ 1679 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1680 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1681 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1682 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1683 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1684 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1685 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1686 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1687 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1688 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1689 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1690 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1691 np->estats.rx_runt += readl(base + NvRegRxRunt); 1692 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1693 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1694 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1695 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1696 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1697 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1698 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1699 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1700 np->estats.rx_packets = 1701 np->estats.rx_unicast + 1702 np->estats.rx_multicast + 1703 np->estats.rx_broadcast; 1704 np->estats.rx_errors_total = 1705 np->estats.rx_crc_errors + 1706 np->estats.rx_over_errors + 1707 np->estats.rx_frame_error + 1708 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1709 np->estats.rx_late_collision + 1710 np->estats.rx_runt + 1711 np->estats.rx_frame_too_long; 1712 np->estats.tx_errors_total = 1713 np->estats.tx_late_collision + 1714 np->estats.tx_fifo_errors + 1715 np->estats.tx_carrier_errors + 1716 np->estats.tx_excess_deferral + 1717 np->estats.tx_retry_error; 1718 1719 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1720 np->estats.tx_deferral += readl(base + NvRegTxDef); 1721 np->estats.tx_packets += readl(base + NvRegTxFrame); 1722 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1723 np->estats.tx_pause += readl(base + NvRegTxPause); 1724 np->estats.rx_pause += readl(base + NvRegRxPause); 1725 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1726 np->estats.rx_errors_total += np->estats.rx_drop_frame; 1727 } 1728 1729 if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1730 np->estats.tx_unicast += readl(base + NvRegTxUnicast); 1731 np->estats.tx_multicast += readl(base + NvRegTxMulticast); 1732 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); 1733 } 1734 } 1735 1736 /* 1737 * nv_get_stats64: dev->ndo_get_stats64 function 1738 * Get latest stats value from the nic. 1739 * Called with read_lock(&dev_base_lock) held for read - 1740 * only synchronized against unregister_netdevice. 1741 */ 1742 static void 1743 nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) 1744 __acquires(&netdev_priv(dev)->hwstats_lock) 1745 __releases(&netdev_priv(dev)->hwstats_lock) 1746 { 1747 struct fe_priv *np = netdev_priv(dev); 1748 unsigned int syncp_start; 1749 1750 /* 1751 * Note: because HW stats are not always available and for 1752 * consistency reasons, the following ifconfig stats are 1753 * managed by software: rx_bytes, tx_bytes, rx_packets and 1754 * tx_packets. The related hardware stats reported by ethtool 1755 * should be equivalent to these ifconfig stats, with 4 1756 * additional bytes per packet (Ethernet FCS CRC), except for 1757 * tx_packets when TSO kicks in. 1758 */ 1759 1760 /* software stats */ 1761 do { 1762 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); 1763 storage->rx_packets = np->stat_rx_packets; 1764 storage->rx_bytes = np->stat_rx_bytes; 1765 storage->rx_dropped = np->stat_rx_dropped; 1766 storage->rx_missed_errors = np->stat_rx_missed_errors; 1767 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); 1768 1769 do { 1770 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); 1771 storage->tx_packets = np->stat_tx_packets; 1772 storage->tx_bytes = np->stat_tx_bytes; 1773 storage->tx_dropped = np->stat_tx_dropped; 1774 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); 1775 1776 /* If the nic supports hw counters then retrieve latest values */ 1777 if (np->driver_data & DEV_HAS_STATISTICS_V123) { 1778 spin_lock_bh(&np->hwstats_lock); 1779 1780 nv_update_stats(dev); 1781 1782 /* generic stats */ 1783 storage->rx_errors = np->estats.rx_errors_total; 1784 storage->tx_errors = np->estats.tx_errors_total; 1785 1786 /* meaningful only when NIC supports stats v3 */ 1787 storage->multicast = np->estats.rx_multicast; 1788 1789 /* detailed rx_errors */ 1790 storage->rx_length_errors = np->estats.rx_length_error; 1791 storage->rx_over_errors = np->estats.rx_over_errors; 1792 storage->rx_crc_errors = np->estats.rx_crc_errors; 1793 storage->rx_frame_errors = np->estats.rx_frame_align_error; 1794 storage->rx_fifo_errors = np->estats.rx_drop_frame; 1795 1796 /* detailed tx_errors */ 1797 storage->tx_carrier_errors = np->estats.tx_carrier_errors; 1798 storage->tx_fifo_errors = np->estats.tx_fifo_errors; 1799 1800 spin_unlock_bh(&np->hwstats_lock); 1801 } 1802 } 1803 1804 /* 1805 * nv_alloc_rx: fill rx ring entries. 1806 * Return 1 if the allocations for the skbs failed and the 1807 * rx engine is without Available descriptors 1808 */ 1809 static int nv_alloc_rx(struct net_device *dev) 1810 { 1811 struct fe_priv *np = netdev_priv(dev); 1812 struct ring_desc *less_rx; 1813 1814 less_rx = np->get_rx.orig; 1815 if (less_rx-- == np->first_rx.orig) 1816 less_rx = np->last_rx.orig; 1817 1818 while (np->put_rx.orig != less_rx) { 1819 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); 1820 if (likely(skb)) { 1821 np->put_rx_ctx->skb = skb; 1822 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, 1823 skb->data, 1824 skb_tailroom(skb), 1825 DMA_FROM_DEVICE); 1826 if (unlikely(dma_mapping_error(&np->pci_dev->dev, 1827 np->put_rx_ctx->dma))) { 1828 kfree_skb(skb); 1829 goto packet_dropped; 1830 } 1831 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1832 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1833 wmb(); 1834 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1835 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1836 np->put_rx.orig = np->first_rx.orig; 1837 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1838 np->put_rx_ctx = np->first_rx_ctx; 1839 } else { 1840 packet_dropped: 1841 u64_stats_update_begin(&np->swstats_rx_syncp); 1842 np->stat_rx_dropped++; 1843 u64_stats_update_end(&np->swstats_rx_syncp); 1844 return 1; 1845 } 1846 } 1847 return 0; 1848 } 1849 1850 static int nv_alloc_rx_optimized(struct net_device *dev) 1851 { 1852 struct fe_priv *np = netdev_priv(dev); 1853 struct ring_desc_ex *less_rx; 1854 1855 less_rx = np->get_rx.ex; 1856 if (less_rx-- == np->first_rx.ex) 1857 less_rx = np->last_rx.ex; 1858 1859 while (np->put_rx.ex != less_rx) { 1860 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); 1861 if (likely(skb)) { 1862 np->put_rx_ctx->skb = skb; 1863 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, 1864 skb->data, 1865 skb_tailroom(skb), 1866 DMA_FROM_DEVICE); 1867 if (unlikely(dma_mapping_error(&np->pci_dev->dev, 1868 np->put_rx_ctx->dma))) { 1869 kfree_skb(skb); 1870 goto packet_dropped; 1871 } 1872 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1873 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1874 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1875 wmb(); 1876 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1877 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1878 np->put_rx.ex = np->first_rx.ex; 1879 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1880 np->put_rx_ctx = np->first_rx_ctx; 1881 } else { 1882 packet_dropped: 1883 u64_stats_update_begin(&np->swstats_rx_syncp); 1884 np->stat_rx_dropped++; 1885 u64_stats_update_end(&np->swstats_rx_syncp); 1886 return 1; 1887 } 1888 } 1889 return 0; 1890 } 1891 1892 /* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1893 static void nv_do_rx_refill(struct timer_list *t) 1894 { 1895 struct fe_priv *np = from_timer(np, t, oom_kick); 1896 1897 /* Just reschedule NAPI rx processing */ 1898 napi_schedule(&np->napi); 1899 } 1900 1901 static void nv_init_rx(struct net_device *dev) 1902 { 1903 struct fe_priv *np = netdev_priv(dev); 1904 int i; 1905 1906 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1907 1908 if (!nv_optimized(np)) 1909 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1910 else 1911 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1912 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1913 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1914 1915 for (i = 0; i < np->rx_ring_size; i++) { 1916 if (!nv_optimized(np)) { 1917 np->rx_ring.orig[i].flaglen = 0; 1918 np->rx_ring.orig[i].buf = 0; 1919 } else { 1920 np->rx_ring.ex[i].flaglen = 0; 1921 np->rx_ring.ex[i].txvlan = 0; 1922 np->rx_ring.ex[i].bufhigh = 0; 1923 np->rx_ring.ex[i].buflow = 0; 1924 } 1925 np->rx_skb[i].skb = NULL; 1926 np->rx_skb[i].dma = 0; 1927 } 1928 } 1929 1930 static void nv_init_tx(struct net_device *dev) 1931 { 1932 struct fe_priv *np = netdev_priv(dev); 1933 int i; 1934 1935 np->get_tx = np->tx_ring; 1936 np->put_tx = np->tx_ring; 1937 1938 if (!nv_optimized(np)) 1939 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1940 else 1941 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1942 np->get_tx_ctx = np->tx_skb; 1943 np->put_tx_ctx = np->tx_skb; 1944 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1945 netdev_reset_queue(np->dev); 1946 np->tx_pkts_in_progress = 0; 1947 np->tx_change_owner = NULL; 1948 np->tx_end_flip = NULL; 1949 np->tx_stop = 0; 1950 1951 for (i = 0; i < np->tx_ring_size; i++) { 1952 if (!nv_optimized(np)) { 1953 np->tx_ring.orig[i].flaglen = 0; 1954 np->tx_ring.orig[i].buf = 0; 1955 } else { 1956 np->tx_ring.ex[i].flaglen = 0; 1957 np->tx_ring.ex[i].txvlan = 0; 1958 np->tx_ring.ex[i].bufhigh = 0; 1959 np->tx_ring.ex[i].buflow = 0; 1960 } 1961 np->tx_skb[i].skb = NULL; 1962 np->tx_skb[i].dma = 0; 1963 np->tx_skb[i].dma_len = 0; 1964 np->tx_skb[i].dma_single = 0; 1965 np->tx_skb[i].first_tx_desc = NULL; 1966 np->tx_skb[i].next_tx_ctx = NULL; 1967 } 1968 } 1969 1970 static int nv_init_ring(struct net_device *dev) 1971 { 1972 struct fe_priv *np = netdev_priv(dev); 1973 1974 nv_init_tx(dev); 1975 nv_init_rx(dev); 1976 1977 if (!nv_optimized(np)) 1978 return nv_alloc_rx(dev); 1979 else 1980 return nv_alloc_rx_optimized(dev); 1981 } 1982 1983 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1984 { 1985 if (tx_skb->dma) { 1986 if (tx_skb->dma_single) 1987 dma_unmap_single(&np->pci_dev->dev, tx_skb->dma, 1988 tx_skb->dma_len, 1989 DMA_TO_DEVICE); 1990 else 1991 dma_unmap_page(&np->pci_dev->dev, tx_skb->dma, 1992 tx_skb->dma_len, 1993 DMA_TO_DEVICE); 1994 tx_skb->dma = 0; 1995 } 1996 } 1997 1998 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1999 { 2000 nv_unmap_txskb(np, tx_skb); 2001 if (tx_skb->skb) { 2002 dev_kfree_skb_any(tx_skb->skb); 2003 tx_skb->skb = NULL; 2004 return 1; 2005 } 2006 return 0; 2007 } 2008 2009 static void nv_drain_tx(struct net_device *dev) 2010 { 2011 struct fe_priv *np = netdev_priv(dev); 2012 unsigned int i; 2013 2014 for (i = 0; i < np->tx_ring_size; i++) { 2015 if (!nv_optimized(np)) { 2016 np->tx_ring.orig[i].flaglen = 0; 2017 np->tx_ring.orig[i].buf = 0; 2018 } else { 2019 np->tx_ring.ex[i].flaglen = 0; 2020 np->tx_ring.ex[i].txvlan = 0; 2021 np->tx_ring.ex[i].bufhigh = 0; 2022 np->tx_ring.ex[i].buflow = 0; 2023 } 2024 if (nv_release_txskb(np, &np->tx_skb[i])) { 2025 u64_stats_update_begin(&np->swstats_tx_syncp); 2026 np->stat_tx_dropped++; 2027 u64_stats_update_end(&np->swstats_tx_syncp); 2028 } 2029 np->tx_skb[i].dma = 0; 2030 np->tx_skb[i].dma_len = 0; 2031 np->tx_skb[i].dma_single = 0; 2032 np->tx_skb[i].first_tx_desc = NULL; 2033 np->tx_skb[i].next_tx_ctx = NULL; 2034 } 2035 np->tx_pkts_in_progress = 0; 2036 np->tx_change_owner = NULL; 2037 np->tx_end_flip = NULL; 2038 } 2039 2040 static void nv_drain_rx(struct net_device *dev) 2041 { 2042 struct fe_priv *np = netdev_priv(dev); 2043 int i; 2044 2045 for (i = 0; i < np->rx_ring_size; i++) { 2046 if (!nv_optimized(np)) { 2047 np->rx_ring.orig[i].flaglen = 0; 2048 np->rx_ring.orig[i].buf = 0; 2049 } else { 2050 np->rx_ring.ex[i].flaglen = 0; 2051 np->rx_ring.ex[i].txvlan = 0; 2052 np->rx_ring.ex[i].bufhigh = 0; 2053 np->rx_ring.ex[i].buflow = 0; 2054 } 2055 wmb(); 2056 if (np->rx_skb[i].skb) { 2057 dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma, 2058 (skb_end_pointer(np->rx_skb[i].skb) - 2059 np->rx_skb[i].skb->data), 2060 DMA_FROM_DEVICE); 2061 dev_kfree_skb(np->rx_skb[i].skb); 2062 np->rx_skb[i].skb = NULL; 2063 } 2064 } 2065 } 2066 2067 static void nv_drain_rxtx(struct net_device *dev) 2068 { 2069 nv_drain_tx(dev); 2070 nv_drain_rx(dev); 2071 } 2072 2073 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 2074 { 2075 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 2076 } 2077 2078 static void nv_legacybackoff_reseed(struct net_device *dev) 2079 { 2080 u8 __iomem *base = get_hwbase(dev); 2081 u32 reg; 2082 u32 low; 2083 int tx_status = 0; 2084 2085 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; 2086 get_random_bytes(&low, sizeof(low)); 2087 reg |= low & NVREG_SLOTTIME_MASK; 2088 2089 /* Need to stop tx before change takes effect. 2090 * Caller has already gained np->lock. 2091 */ 2092 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; 2093 if (tx_status) 2094 nv_stop_tx(dev); 2095 nv_stop_rx(dev); 2096 writel(reg, base + NvRegSlotTime); 2097 if (tx_status) 2098 nv_start_tx(dev); 2099 nv_start_rx(dev); 2100 } 2101 2102 /* Gear Backoff Seeds */ 2103 #define BACKOFF_SEEDSET_ROWS 8 2104 #define BACKOFF_SEEDSET_LFSRS 15 2105 2106 /* Known Good seed sets */ 2107 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2108 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2109 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2110 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2111 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2112 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2113 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2114 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2115 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} }; 2116 2117 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2118 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2119 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2120 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2121 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2122 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2123 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2124 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2125 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} }; 2126 2127 static void nv_gear_backoff_reseed(struct net_device *dev) 2128 { 2129 u8 __iomem *base = get_hwbase(dev); 2130 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; 2131 u32 temp, seedset, combinedSeed; 2132 int i; 2133 2134 /* Setup seed for free running LFSR */ 2135 /* We are going to read the time stamp counter 3 times 2136 and swizzle bits around to increase randomness */ 2137 get_random_bytes(&miniseed1, sizeof(miniseed1)); 2138 miniseed1 &= 0x0fff; 2139 if (miniseed1 == 0) 2140 miniseed1 = 0xabc; 2141 2142 get_random_bytes(&miniseed2, sizeof(miniseed2)); 2143 miniseed2 &= 0x0fff; 2144 if (miniseed2 == 0) 2145 miniseed2 = 0xabc; 2146 miniseed2_reversed = 2147 ((miniseed2 & 0xF00) >> 8) | 2148 (miniseed2 & 0x0F0) | 2149 ((miniseed2 & 0x00F) << 8); 2150 2151 get_random_bytes(&miniseed3, sizeof(miniseed3)); 2152 miniseed3 &= 0x0fff; 2153 if (miniseed3 == 0) 2154 miniseed3 = 0xabc; 2155 miniseed3_reversed = 2156 ((miniseed3 & 0xF00) >> 8) | 2157 (miniseed3 & 0x0F0) | 2158 ((miniseed3 & 0x00F) << 8); 2159 2160 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | 2161 (miniseed2 ^ miniseed3_reversed); 2162 2163 /* Seeds can not be zero */ 2164 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) 2165 combinedSeed |= 0x08; 2166 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) 2167 combinedSeed |= 0x8000; 2168 2169 /* No need to disable tx here */ 2170 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2171 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2172 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2173 writel(temp, base + NvRegBackOffControl); 2174 2175 /* Setup seeds for all gear LFSRs. */ 2176 get_random_bytes(&seedset, sizeof(seedset)); 2177 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2178 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) { 2179 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2180 temp |= main_seedset[seedset][i-1] & 0x3ff; 2181 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2182 writel(temp, base + NvRegBackOffControl); 2183 } 2184 } 2185 2186 /* 2187 * nv_start_xmit: dev->hard_start_xmit function 2188 * Called with netif_tx_lock held. 2189 */ 2190 static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 2191 { 2192 struct fe_priv *np = netdev_priv(dev); 2193 u32 tx_flags = 0; 2194 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 2195 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2196 unsigned int i; 2197 u32 offset = 0; 2198 u32 bcnt; 2199 u32 size = skb_headlen(skb); 2200 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2201 u32 empty_slots; 2202 struct ring_desc *put_tx; 2203 struct ring_desc *start_tx; 2204 struct ring_desc *prev_tx; 2205 struct nv_skb_map *prev_tx_ctx; 2206 struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL; 2207 unsigned long flags; 2208 2209 /* add fragments to entries count */ 2210 for (i = 0; i < fragments; i++) { 2211 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2212 2213 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + 2214 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2215 } 2216 2217 spin_lock_irqsave(&np->lock, flags); 2218 empty_slots = nv_get_empty_tx_slots(np); 2219 if (unlikely(empty_slots <= entries)) { 2220 netif_stop_queue(dev); 2221 np->tx_stop = 1; 2222 spin_unlock_irqrestore(&np->lock, flags); 2223 return NETDEV_TX_BUSY; 2224 } 2225 spin_unlock_irqrestore(&np->lock, flags); 2226 2227 start_tx = put_tx = np->put_tx.orig; 2228 2229 /* setup the header buffer */ 2230 do { 2231 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2232 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, 2233 skb->data + offset, bcnt, 2234 DMA_TO_DEVICE); 2235 if (unlikely(dma_mapping_error(&np->pci_dev->dev, 2236 np->put_tx_ctx->dma))) { 2237 /* on DMA mapping error - drop the packet */ 2238 dev_kfree_skb_any(skb); 2239 u64_stats_update_begin(&np->swstats_tx_syncp); 2240 np->stat_tx_dropped++; 2241 u64_stats_update_end(&np->swstats_tx_syncp); 2242 return NETDEV_TX_OK; 2243 } 2244 np->put_tx_ctx->dma_len = bcnt; 2245 np->put_tx_ctx->dma_single = 1; 2246 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2247 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2248 2249 tx_flags = np->tx_flags; 2250 offset += bcnt; 2251 size -= bcnt; 2252 if (unlikely(put_tx++ == np->last_tx.orig)) 2253 put_tx = np->tx_ring.orig; 2254 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2255 np->put_tx_ctx = np->tx_skb; 2256 } while (size); 2257 2258 /* setup the fragments */ 2259 for (i = 0; i < fragments; i++) { 2260 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2261 u32 frag_size = skb_frag_size(frag); 2262 offset = 0; 2263 2264 do { 2265 if (!start_tx_ctx) 2266 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; 2267 2268 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2269 np->put_tx_ctx->dma = skb_frag_dma_map( 2270 &np->pci_dev->dev, 2271 frag, offset, 2272 bcnt, 2273 DMA_TO_DEVICE); 2274 if (unlikely(dma_mapping_error(&np->pci_dev->dev, 2275 np->put_tx_ctx->dma))) { 2276 2277 /* Unwind the mapped fragments */ 2278 do { 2279 nv_unmap_txskb(np, start_tx_ctx); 2280 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2281 tmp_tx_ctx = np->tx_skb; 2282 } while (tmp_tx_ctx != np->put_tx_ctx); 2283 dev_kfree_skb_any(skb); 2284 np->put_tx_ctx = start_tx_ctx; 2285 u64_stats_update_begin(&np->swstats_tx_syncp); 2286 np->stat_tx_dropped++; 2287 u64_stats_update_end(&np->swstats_tx_syncp); 2288 return NETDEV_TX_OK; 2289 } 2290 2291 np->put_tx_ctx->dma_len = bcnt; 2292 np->put_tx_ctx->dma_single = 0; 2293 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2294 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2295 2296 offset += bcnt; 2297 frag_size -= bcnt; 2298 if (unlikely(put_tx++ == np->last_tx.orig)) 2299 put_tx = np->tx_ring.orig; 2300 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2301 np->put_tx_ctx = np->tx_skb; 2302 } while (frag_size); 2303 } 2304 2305 if (unlikely(put_tx == np->tx_ring.orig)) 2306 prev_tx = np->last_tx.orig; 2307 else 2308 prev_tx = put_tx - 1; 2309 2310 if (unlikely(np->put_tx_ctx == np->tx_skb)) 2311 prev_tx_ctx = np->last_tx_ctx; 2312 else 2313 prev_tx_ctx = np->put_tx_ctx - 1; 2314 2315 /* set last fragment flag */ 2316 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 2317 2318 /* save skb in this slot's context area */ 2319 prev_tx_ctx->skb = skb; 2320 2321 if (skb_is_gso(skb)) 2322 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2323 else 2324 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2325 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2326 2327 spin_lock_irqsave(&np->lock, flags); 2328 2329 /* set tx flags */ 2330 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2331 2332 netdev_sent_queue(np->dev, skb->len); 2333 2334 skb_tx_timestamp(skb); 2335 2336 np->put_tx.orig = put_tx; 2337 2338 spin_unlock_irqrestore(&np->lock, flags); 2339 2340 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2341 return NETDEV_TX_OK; 2342 } 2343 2344 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, 2345 struct net_device *dev) 2346 { 2347 struct fe_priv *np = netdev_priv(dev); 2348 u32 tx_flags = 0; 2349 u32 tx_flags_extra; 2350 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2351 unsigned int i; 2352 u32 offset = 0; 2353 u32 bcnt; 2354 u32 size = skb_headlen(skb); 2355 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2356 u32 empty_slots; 2357 struct ring_desc_ex *put_tx; 2358 struct ring_desc_ex *start_tx; 2359 struct ring_desc_ex *prev_tx; 2360 struct nv_skb_map *prev_tx_ctx; 2361 struct nv_skb_map *start_tx_ctx = NULL; 2362 struct nv_skb_map *tmp_tx_ctx = NULL; 2363 unsigned long flags; 2364 2365 /* add fragments to entries count */ 2366 for (i = 0; i < fragments; i++) { 2367 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2368 2369 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + 2370 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2371 } 2372 2373 spin_lock_irqsave(&np->lock, flags); 2374 empty_slots = nv_get_empty_tx_slots(np); 2375 if (unlikely(empty_slots <= entries)) { 2376 netif_stop_queue(dev); 2377 np->tx_stop = 1; 2378 spin_unlock_irqrestore(&np->lock, flags); 2379 return NETDEV_TX_BUSY; 2380 } 2381 spin_unlock_irqrestore(&np->lock, flags); 2382 2383 start_tx = put_tx = np->put_tx.ex; 2384 start_tx_ctx = np->put_tx_ctx; 2385 2386 /* setup the header buffer */ 2387 do { 2388 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2389 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, 2390 skb->data + offset, bcnt, 2391 DMA_TO_DEVICE); 2392 if (unlikely(dma_mapping_error(&np->pci_dev->dev, 2393 np->put_tx_ctx->dma))) { 2394 /* on DMA mapping error - drop the packet */ 2395 dev_kfree_skb_any(skb); 2396 u64_stats_update_begin(&np->swstats_tx_syncp); 2397 np->stat_tx_dropped++; 2398 u64_stats_update_end(&np->swstats_tx_syncp); 2399 return NETDEV_TX_OK; 2400 } 2401 np->put_tx_ctx->dma_len = bcnt; 2402 np->put_tx_ctx->dma_single = 1; 2403 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2404 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2405 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2406 2407 tx_flags = NV_TX2_VALID; 2408 offset += bcnt; 2409 size -= bcnt; 2410 if (unlikely(put_tx++ == np->last_tx.ex)) 2411 put_tx = np->tx_ring.ex; 2412 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2413 np->put_tx_ctx = np->tx_skb; 2414 } while (size); 2415 2416 /* setup the fragments */ 2417 for (i = 0; i < fragments; i++) { 2418 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2419 u32 frag_size = skb_frag_size(frag); 2420 offset = 0; 2421 2422 do { 2423 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2424 if (!start_tx_ctx) 2425 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; 2426 np->put_tx_ctx->dma = skb_frag_dma_map( 2427 &np->pci_dev->dev, 2428 frag, offset, 2429 bcnt, 2430 DMA_TO_DEVICE); 2431 2432 if (unlikely(dma_mapping_error(&np->pci_dev->dev, 2433 np->put_tx_ctx->dma))) { 2434 2435 /* Unwind the mapped fragments */ 2436 do { 2437 nv_unmap_txskb(np, start_tx_ctx); 2438 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2439 tmp_tx_ctx = np->tx_skb; 2440 } while (tmp_tx_ctx != np->put_tx_ctx); 2441 dev_kfree_skb_any(skb); 2442 np->put_tx_ctx = start_tx_ctx; 2443 u64_stats_update_begin(&np->swstats_tx_syncp); 2444 np->stat_tx_dropped++; 2445 u64_stats_update_end(&np->swstats_tx_syncp); 2446 return NETDEV_TX_OK; 2447 } 2448 np->put_tx_ctx->dma_len = bcnt; 2449 np->put_tx_ctx->dma_single = 0; 2450 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2451 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2452 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2453 2454 offset += bcnt; 2455 frag_size -= bcnt; 2456 if (unlikely(put_tx++ == np->last_tx.ex)) 2457 put_tx = np->tx_ring.ex; 2458 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2459 np->put_tx_ctx = np->tx_skb; 2460 } while (frag_size); 2461 } 2462 2463 if (unlikely(put_tx == np->tx_ring.ex)) 2464 prev_tx = np->last_tx.ex; 2465 else 2466 prev_tx = put_tx - 1; 2467 2468 if (unlikely(np->put_tx_ctx == np->tx_skb)) 2469 prev_tx_ctx = np->last_tx_ctx; 2470 else 2471 prev_tx_ctx = np->put_tx_ctx - 1; 2472 2473 /* set last fragment flag */ 2474 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 2475 2476 /* save skb in this slot's context area */ 2477 prev_tx_ctx->skb = skb; 2478 2479 if (skb_is_gso(skb)) 2480 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2481 else 2482 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2483 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2484 2485 /* vlan tag */ 2486 if (skb_vlan_tag_present(skb)) 2487 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | 2488 skb_vlan_tag_get(skb)); 2489 else 2490 start_tx->txvlan = 0; 2491 2492 spin_lock_irqsave(&np->lock, flags); 2493 2494 if (np->tx_limit) { 2495 /* Limit the number of outstanding tx. Setup all fragments, but 2496 * do not set the VALID bit on the first descriptor. Save a pointer 2497 * to that descriptor and also for next skb_map element. 2498 */ 2499 2500 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { 2501 if (!np->tx_change_owner) 2502 np->tx_change_owner = start_tx_ctx; 2503 2504 /* remove VALID bit */ 2505 tx_flags &= ~NV_TX2_VALID; 2506 start_tx_ctx->first_tx_desc = start_tx; 2507 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; 2508 np->tx_end_flip = np->put_tx_ctx; 2509 } else { 2510 np->tx_pkts_in_progress++; 2511 } 2512 } 2513 2514 /* set tx flags */ 2515 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2516 2517 netdev_sent_queue(np->dev, skb->len); 2518 2519 skb_tx_timestamp(skb); 2520 2521 np->put_tx.ex = put_tx; 2522 2523 spin_unlock_irqrestore(&np->lock, flags); 2524 2525 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2526 return NETDEV_TX_OK; 2527 } 2528 2529 static inline void nv_tx_flip_ownership(struct net_device *dev) 2530 { 2531 struct fe_priv *np = netdev_priv(dev); 2532 2533 np->tx_pkts_in_progress--; 2534 if (np->tx_change_owner) { 2535 np->tx_change_owner->first_tx_desc->flaglen |= 2536 cpu_to_le32(NV_TX2_VALID); 2537 np->tx_pkts_in_progress++; 2538 2539 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; 2540 if (np->tx_change_owner == np->tx_end_flip) 2541 np->tx_change_owner = NULL; 2542 2543 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2544 } 2545 } 2546 2547 /* 2548 * nv_tx_done: check for completed packets, release the skbs. 2549 * 2550 * Caller must own np->lock. 2551 */ 2552 static int nv_tx_done(struct net_device *dev, int limit) 2553 { 2554 struct fe_priv *np = netdev_priv(dev); 2555 u32 flags; 2556 int tx_work = 0; 2557 struct ring_desc *orig_get_tx = np->get_tx.orig; 2558 unsigned int bytes_compl = 0; 2559 2560 while ((np->get_tx.orig != np->put_tx.orig) && 2561 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2562 (tx_work < limit)) { 2563 2564 nv_unmap_txskb(np, np->get_tx_ctx); 2565 2566 if (np->desc_ver == DESC_VER_1) { 2567 if (flags & NV_TX_LASTPACKET) { 2568 if (unlikely(flags & NV_TX_ERROR)) { 2569 if ((flags & NV_TX_RETRYERROR) 2570 && !(flags & NV_TX_RETRYCOUNT_MASK)) 2571 nv_legacybackoff_reseed(dev); 2572 } else { 2573 u64_stats_update_begin(&np->swstats_tx_syncp); 2574 np->stat_tx_packets++; 2575 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2576 u64_stats_update_end(&np->swstats_tx_syncp); 2577 } 2578 bytes_compl += np->get_tx_ctx->skb->len; 2579 dev_kfree_skb_any(np->get_tx_ctx->skb); 2580 np->get_tx_ctx->skb = NULL; 2581 tx_work++; 2582 } 2583 } else { 2584 if (flags & NV_TX2_LASTPACKET) { 2585 if (unlikely(flags & NV_TX2_ERROR)) { 2586 if ((flags & NV_TX2_RETRYERROR) 2587 && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2588 nv_legacybackoff_reseed(dev); 2589 } else { 2590 u64_stats_update_begin(&np->swstats_tx_syncp); 2591 np->stat_tx_packets++; 2592 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2593 u64_stats_update_end(&np->swstats_tx_syncp); 2594 } 2595 bytes_compl += np->get_tx_ctx->skb->len; 2596 dev_kfree_skb_any(np->get_tx_ctx->skb); 2597 np->get_tx_ctx->skb = NULL; 2598 tx_work++; 2599 } 2600 } 2601 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2602 np->get_tx.orig = np->tx_ring.orig; 2603 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2604 np->get_tx_ctx = np->tx_skb; 2605 } 2606 2607 netdev_completed_queue(np->dev, tx_work, bytes_compl); 2608 2609 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2610 np->tx_stop = 0; 2611 netif_wake_queue(dev); 2612 } 2613 return tx_work; 2614 } 2615 2616 static int nv_tx_done_optimized(struct net_device *dev, int limit) 2617 { 2618 struct fe_priv *np = netdev_priv(dev); 2619 u32 flags; 2620 int tx_work = 0; 2621 struct ring_desc_ex *orig_get_tx = np->get_tx.ex; 2622 unsigned long bytes_cleaned = 0; 2623 2624 while ((np->get_tx.ex != np->put_tx.ex) && 2625 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && 2626 (tx_work < limit)) { 2627 2628 nv_unmap_txskb(np, np->get_tx_ctx); 2629 2630 if (flags & NV_TX2_LASTPACKET) { 2631 if (unlikely(flags & NV_TX2_ERROR)) { 2632 if ((flags & NV_TX2_RETRYERROR) 2633 && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2634 if (np->driver_data & DEV_HAS_GEAR_MODE) 2635 nv_gear_backoff_reseed(dev); 2636 else 2637 nv_legacybackoff_reseed(dev); 2638 } 2639 } else { 2640 u64_stats_update_begin(&np->swstats_tx_syncp); 2641 np->stat_tx_packets++; 2642 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2643 u64_stats_update_end(&np->swstats_tx_syncp); 2644 } 2645 2646 bytes_cleaned += np->get_tx_ctx->skb->len; 2647 dev_kfree_skb_any(np->get_tx_ctx->skb); 2648 np->get_tx_ctx->skb = NULL; 2649 tx_work++; 2650 2651 if (np->tx_limit) 2652 nv_tx_flip_ownership(dev); 2653 } 2654 2655 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2656 np->get_tx.ex = np->tx_ring.ex; 2657 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2658 np->get_tx_ctx = np->tx_skb; 2659 } 2660 2661 netdev_completed_queue(np->dev, tx_work, bytes_cleaned); 2662 2663 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2664 np->tx_stop = 0; 2665 netif_wake_queue(dev); 2666 } 2667 return tx_work; 2668 } 2669 2670 /* 2671 * nv_tx_timeout: dev->tx_timeout function 2672 * Called with netif_tx_lock held. 2673 */ 2674 static void nv_tx_timeout(struct net_device *dev) 2675 { 2676 struct fe_priv *np = netdev_priv(dev); 2677 u8 __iomem *base = get_hwbase(dev); 2678 u32 status; 2679 union ring_type put_tx; 2680 int saved_tx_limit; 2681 2682 if (np->msi_flags & NV_MSI_X_ENABLED) 2683 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2684 else 2685 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2686 2687 netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status); 2688 2689 if (unlikely(debug_tx_timeout)) { 2690 int i; 2691 2692 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); 2693 netdev_info(dev, "Dumping tx registers\n"); 2694 for (i = 0; i <= np->register_size; i += 32) { 2695 netdev_info(dev, 2696 "%3x: %08x %08x %08x %08x " 2697 "%08x %08x %08x %08x\n", 2698 i, 2699 readl(base + i + 0), readl(base + i + 4), 2700 readl(base + i + 8), readl(base + i + 12), 2701 readl(base + i + 16), readl(base + i + 20), 2702 readl(base + i + 24), readl(base + i + 28)); 2703 } 2704 netdev_info(dev, "Dumping tx ring\n"); 2705 for (i = 0; i < np->tx_ring_size; i += 4) { 2706 if (!nv_optimized(np)) { 2707 netdev_info(dev, 2708 "%03x: %08x %08x // %08x %08x " 2709 "// %08x %08x // %08x %08x\n", 2710 i, 2711 le32_to_cpu(np->tx_ring.orig[i].buf), 2712 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2713 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2714 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2715 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2716 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2717 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2718 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2719 } else { 2720 netdev_info(dev, 2721 "%03x: %08x %08x %08x " 2722 "// %08x %08x %08x " 2723 "// %08x %08x %08x " 2724 "// %08x %08x %08x\n", 2725 i, 2726 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2727 le32_to_cpu(np->tx_ring.ex[i].buflow), 2728 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2729 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2730 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2731 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2732 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2733 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2734 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2735 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2736 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2737 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2738 } 2739 } 2740 } 2741 2742 spin_lock_irq(&np->lock); 2743 2744 /* 1) stop tx engine */ 2745 nv_stop_tx(dev); 2746 2747 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ 2748 saved_tx_limit = np->tx_limit; 2749 np->tx_limit = 0; /* prevent giving HW any limited pkts */ 2750 np->tx_stop = 0; /* prevent waking tx queue */ 2751 if (!nv_optimized(np)) 2752 nv_tx_done(dev, np->tx_ring_size); 2753 else 2754 nv_tx_done_optimized(dev, np->tx_ring_size); 2755 2756 /* save current HW position */ 2757 if (np->tx_change_owner) 2758 put_tx.ex = np->tx_change_owner->first_tx_desc; 2759 else 2760 put_tx = np->put_tx; 2761 2762 /* 3) clear all tx state */ 2763 nv_drain_tx(dev); 2764 nv_init_tx(dev); 2765 2766 /* 4) restore state to current HW position */ 2767 np->get_tx = np->put_tx = put_tx; 2768 np->tx_limit = saved_tx_limit; 2769 2770 /* 5) restart tx engine */ 2771 nv_start_tx(dev); 2772 netif_wake_queue(dev); 2773 spin_unlock_irq(&np->lock); 2774 } 2775 2776 /* 2777 * Called when the nic notices a mismatch between the actual data len on the 2778 * wire and the len indicated in the 802 header 2779 */ 2780 static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2781 { 2782 int hdrlen; /* length of the 802 header */ 2783 int protolen; /* length as stored in the proto field */ 2784 2785 /* 1) calculate len according to header */ 2786 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2787 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto); 2788 hdrlen = VLAN_HLEN; 2789 } else { 2790 protolen = ntohs(((struct ethhdr *)packet)->h_proto); 2791 hdrlen = ETH_HLEN; 2792 } 2793 if (protolen > ETH_DATA_LEN) 2794 return datalen; /* Value in proto field not a len, no checks possible */ 2795 2796 protolen += hdrlen; 2797 /* consistency checks: */ 2798 if (datalen > ETH_ZLEN) { 2799 if (datalen >= protolen) { 2800 /* more data on wire than in 802 header, trim of 2801 * additional data. 2802 */ 2803 return protolen; 2804 } else { 2805 /* less data on wire than mentioned in header. 2806 * Discard the packet. 2807 */ 2808 return -1; 2809 } 2810 } else { 2811 /* short packet. Accept only if 802 values are also short */ 2812 if (protolen > ETH_ZLEN) { 2813 return -1; 2814 } 2815 return datalen; 2816 } 2817 } 2818 2819 static int nv_rx_process(struct net_device *dev, int limit) 2820 { 2821 struct fe_priv *np = netdev_priv(dev); 2822 u32 flags; 2823 int rx_work = 0; 2824 struct sk_buff *skb; 2825 int len; 2826 2827 while ((np->get_rx.orig != np->put_rx.orig) && 2828 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2829 (rx_work < limit)) { 2830 2831 /* 2832 * the packet is for us - immediately tear down the pci mapping. 2833 * TODO: check if a prefetch of the first cacheline improves 2834 * the performance. 2835 */ 2836 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, 2837 np->get_rx_ctx->dma_len, 2838 DMA_FROM_DEVICE); 2839 skb = np->get_rx_ctx->skb; 2840 np->get_rx_ctx->skb = NULL; 2841 2842 /* look at what we actually got: */ 2843 if (np->desc_ver == DESC_VER_1) { 2844 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2845 len = flags & LEN_MASK_V1; 2846 if (unlikely(flags & NV_RX_ERROR)) { 2847 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2848 len = nv_getlen(dev, skb->data, len); 2849 if (len < 0) { 2850 dev_kfree_skb(skb); 2851 goto next_pkt; 2852 } 2853 } 2854 /* framing errors are soft errors */ 2855 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2856 if (flags & NV_RX_SUBTRACT1) 2857 len--; 2858 } 2859 /* the rest are hard errors */ 2860 else { 2861 if (flags & NV_RX_MISSEDFRAME) { 2862 u64_stats_update_begin(&np->swstats_rx_syncp); 2863 np->stat_rx_missed_errors++; 2864 u64_stats_update_end(&np->swstats_rx_syncp); 2865 } 2866 dev_kfree_skb(skb); 2867 goto next_pkt; 2868 } 2869 } 2870 } else { 2871 dev_kfree_skb(skb); 2872 goto next_pkt; 2873 } 2874 } else { 2875 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2876 len = flags & LEN_MASK_V2; 2877 if (unlikely(flags & NV_RX2_ERROR)) { 2878 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2879 len = nv_getlen(dev, skb->data, len); 2880 if (len < 0) { 2881 dev_kfree_skb(skb); 2882 goto next_pkt; 2883 } 2884 } 2885 /* framing errors are soft errors */ 2886 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2887 if (flags & NV_RX2_SUBTRACT1) 2888 len--; 2889 } 2890 /* the rest are hard errors */ 2891 else { 2892 dev_kfree_skb(skb); 2893 goto next_pkt; 2894 } 2895 } 2896 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2897 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2898 skb->ip_summed = CHECKSUM_UNNECESSARY; 2899 } else { 2900 dev_kfree_skb(skb); 2901 goto next_pkt; 2902 } 2903 } 2904 /* got a valid packet - forward it to the network core */ 2905 skb_put(skb, len); 2906 skb->protocol = eth_type_trans(skb, dev); 2907 napi_gro_receive(&np->napi, skb); 2908 u64_stats_update_begin(&np->swstats_rx_syncp); 2909 np->stat_rx_packets++; 2910 np->stat_rx_bytes += len; 2911 u64_stats_update_end(&np->swstats_rx_syncp); 2912 next_pkt: 2913 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2914 np->get_rx.orig = np->first_rx.orig; 2915 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2916 np->get_rx_ctx = np->first_rx_ctx; 2917 2918 rx_work++; 2919 } 2920 2921 return rx_work; 2922 } 2923 2924 static int nv_rx_process_optimized(struct net_device *dev, int limit) 2925 { 2926 struct fe_priv *np = netdev_priv(dev); 2927 u32 flags; 2928 u32 vlanflags = 0; 2929 int rx_work = 0; 2930 struct sk_buff *skb; 2931 int len; 2932 2933 while ((np->get_rx.ex != np->put_rx.ex) && 2934 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2935 (rx_work < limit)) { 2936 2937 /* 2938 * the packet is for us - immediately tear down the pci mapping. 2939 * TODO: check if a prefetch of the first cacheline improves 2940 * the performance. 2941 */ 2942 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, 2943 np->get_rx_ctx->dma_len, 2944 DMA_FROM_DEVICE); 2945 skb = np->get_rx_ctx->skb; 2946 np->get_rx_ctx->skb = NULL; 2947 2948 /* look at what we actually got: */ 2949 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2950 len = flags & LEN_MASK_V2; 2951 if (unlikely(flags & NV_RX2_ERROR)) { 2952 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2953 len = nv_getlen(dev, skb->data, len); 2954 if (len < 0) { 2955 dev_kfree_skb(skb); 2956 goto next_pkt; 2957 } 2958 } 2959 /* framing errors are soft errors */ 2960 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2961 if (flags & NV_RX2_SUBTRACT1) 2962 len--; 2963 } 2964 /* the rest are hard errors */ 2965 else { 2966 dev_kfree_skb(skb); 2967 goto next_pkt; 2968 } 2969 } 2970 2971 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2972 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2973 skb->ip_summed = CHECKSUM_UNNECESSARY; 2974 2975 /* got a valid packet - forward it to the network core */ 2976 skb_put(skb, len); 2977 skb->protocol = eth_type_trans(skb, dev); 2978 prefetch(skb->data); 2979 2980 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2981 2982 /* 2983 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX 2984 * here. Even if vlan rx accel is disabled, 2985 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. 2986 */ 2987 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && 2988 vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2989 u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; 2990 2991 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 2992 } 2993 napi_gro_receive(&np->napi, skb); 2994 u64_stats_update_begin(&np->swstats_rx_syncp); 2995 np->stat_rx_packets++; 2996 np->stat_rx_bytes += len; 2997 u64_stats_update_end(&np->swstats_rx_syncp); 2998 } else { 2999 dev_kfree_skb(skb); 3000 } 3001 next_pkt: 3002 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 3003 np->get_rx.ex = np->first_rx.ex; 3004 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 3005 np->get_rx_ctx = np->first_rx_ctx; 3006 3007 rx_work++; 3008 } 3009 3010 return rx_work; 3011 } 3012 3013 static void set_bufsize(struct net_device *dev) 3014 { 3015 struct fe_priv *np = netdev_priv(dev); 3016 3017 if (dev->mtu <= ETH_DATA_LEN) 3018 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 3019 else 3020 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 3021 } 3022 3023 /* 3024 * nv_change_mtu: dev->change_mtu function 3025 * Called with dev_base_lock held for read. 3026 */ 3027 static int nv_change_mtu(struct net_device *dev, int new_mtu) 3028 { 3029 struct fe_priv *np = netdev_priv(dev); 3030 int old_mtu; 3031 3032 old_mtu = dev->mtu; 3033 dev->mtu = new_mtu; 3034 3035 /* return early if the buffer sizes will not change */ 3036 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 3037 return 0; 3038 3039 /* synchronized against open : rtnl_lock() held by caller */ 3040 if (netif_running(dev)) { 3041 u8 __iomem *base = get_hwbase(dev); 3042 /* 3043 * It seems that the nic preloads valid ring entries into an 3044 * internal buffer. The procedure for flushing everything is 3045 * guessed, there is probably a simpler approach. 3046 * Changing the MTU is a rare event, it shouldn't matter. 3047 */ 3048 nv_disable_irq(dev); 3049 nv_napi_disable(dev); 3050 netif_tx_lock_bh(dev); 3051 netif_addr_lock(dev); 3052 spin_lock(&np->lock); 3053 /* stop engines */ 3054 nv_stop_rxtx(dev); 3055 nv_txrx_reset(dev); 3056 /* drain rx queue */ 3057 nv_drain_rxtx(dev); 3058 /* reinit driver view of the rx queue */ 3059 set_bufsize(dev); 3060 if (nv_init_ring(dev)) { 3061 if (!np->in_shutdown) 3062 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3063 } 3064 /* reinit nic view of the rx queue */ 3065 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3066 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3067 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3068 base + NvRegRingSizes); 3069 pci_push(base); 3070 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3071 pci_push(base); 3072 3073 /* restart rx engine */ 3074 nv_start_rxtx(dev); 3075 spin_unlock(&np->lock); 3076 netif_addr_unlock(dev); 3077 netif_tx_unlock_bh(dev); 3078 nv_napi_enable(dev); 3079 nv_enable_irq(dev); 3080 } 3081 return 0; 3082 } 3083 3084 static void nv_copy_mac_to_hw(struct net_device *dev) 3085 { 3086 u8 __iomem *base = get_hwbase(dev); 3087 u32 mac[2]; 3088 3089 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 3090 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 3091 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 3092 3093 writel(mac[0], base + NvRegMacAddrA); 3094 writel(mac[1], base + NvRegMacAddrB); 3095 } 3096 3097 /* 3098 * nv_set_mac_address: dev->set_mac_address function 3099 * Called with rtnl_lock() held. 3100 */ 3101 static int nv_set_mac_address(struct net_device *dev, void *addr) 3102 { 3103 struct fe_priv *np = netdev_priv(dev); 3104 struct sockaddr *macaddr = (struct sockaddr *)addr; 3105 3106 if (!is_valid_ether_addr(macaddr->sa_data)) 3107 return -EADDRNOTAVAIL; 3108 3109 /* synchronized against open : rtnl_lock() held by caller */ 3110 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 3111 3112 if (netif_running(dev)) { 3113 netif_tx_lock_bh(dev); 3114 netif_addr_lock(dev); 3115 spin_lock_irq(&np->lock); 3116 3117 /* stop rx engine */ 3118 nv_stop_rx(dev); 3119 3120 /* set mac address */ 3121 nv_copy_mac_to_hw(dev); 3122 3123 /* restart rx engine */ 3124 nv_start_rx(dev); 3125 spin_unlock_irq(&np->lock); 3126 netif_addr_unlock(dev); 3127 netif_tx_unlock_bh(dev); 3128 } else { 3129 nv_copy_mac_to_hw(dev); 3130 } 3131 return 0; 3132 } 3133 3134 /* 3135 * nv_set_multicast: dev->set_multicast function 3136 * Called with netif_tx_lock held. 3137 */ 3138 static void nv_set_multicast(struct net_device *dev) 3139 { 3140 struct fe_priv *np = netdev_priv(dev); 3141 u8 __iomem *base = get_hwbase(dev); 3142 u32 addr[2]; 3143 u32 mask[2]; 3144 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 3145 3146 memset(addr, 0, sizeof(addr)); 3147 memset(mask, 0, sizeof(mask)); 3148 3149 if (dev->flags & IFF_PROMISC) { 3150 pff |= NVREG_PFF_PROMISC; 3151 } else { 3152 pff |= NVREG_PFF_MYADDR; 3153 3154 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { 3155 u32 alwaysOff[2]; 3156 u32 alwaysOn[2]; 3157 3158 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 3159 if (dev->flags & IFF_ALLMULTI) { 3160 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 3161 } else { 3162 struct netdev_hw_addr *ha; 3163 3164 netdev_for_each_mc_addr(ha, dev) { 3165 unsigned char *hw_addr = ha->addr; 3166 u32 a, b; 3167 3168 a = le32_to_cpu(*(__le32 *) hw_addr); 3169 b = le16_to_cpu(*(__le16 *) (&hw_addr[4])); 3170 alwaysOn[0] &= a; 3171 alwaysOff[0] &= ~a; 3172 alwaysOn[1] &= b; 3173 alwaysOff[1] &= ~b; 3174 } 3175 } 3176 addr[0] = alwaysOn[0]; 3177 addr[1] = alwaysOn[1]; 3178 mask[0] = alwaysOn[0] | alwaysOff[0]; 3179 mask[1] = alwaysOn[1] | alwaysOff[1]; 3180 } else { 3181 mask[0] = NVREG_MCASTMASKA_NONE; 3182 mask[1] = NVREG_MCASTMASKB_NONE; 3183 } 3184 } 3185 addr[0] |= NVREG_MCASTADDRA_FORCE; 3186 pff |= NVREG_PFF_ALWAYS; 3187 spin_lock_irq(&np->lock); 3188 nv_stop_rx(dev); 3189 writel(addr[0], base + NvRegMulticastAddrA); 3190 writel(addr[1], base + NvRegMulticastAddrB); 3191 writel(mask[0], base + NvRegMulticastMaskA); 3192 writel(mask[1], base + NvRegMulticastMaskB); 3193 writel(pff, base + NvRegPacketFilterFlags); 3194 nv_start_rx(dev); 3195 spin_unlock_irq(&np->lock); 3196 } 3197 3198 static void nv_update_pause(struct net_device *dev, u32 pause_flags) 3199 { 3200 struct fe_priv *np = netdev_priv(dev); 3201 u8 __iomem *base = get_hwbase(dev); 3202 3203 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 3204 3205 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 3206 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 3207 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 3208 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 3209 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3210 } else { 3211 writel(pff, base + NvRegPacketFilterFlags); 3212 } 3213 } 3214 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 3215 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 3216 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 3217 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3218 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3219 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3220 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { 3221 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3222 /* limit the number of tx pause frames to a default of 8 */ 3223 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); 3224 } 3225 writel(pause_enable, base + NvRegTxPauseFrame); 3226 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3227 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3228 } else { 3229 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 3230 writel(regmisc, base + NvRegMisc1); 3231 } 3232 } 3233 } 3234 3235 static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex) 3236 { 3237 struct fe_priv *np = netdev_priv(dev); 3238 u8 __iomem *base = get_hwbase(dev); 3239 u32 phyreg, txreg; 3240 int mii_status; 3241 3242 np->linkspeed = NVREG_LINKSPEED_FORCE|speed; 3243 np->duplex = duplex; 3244 3245 /* see if gigabit phy */ 3246 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3247 if (mii_status & PHY_GIGABIT) { 3248 np->gigabit = PHY_GIGABIT; 3249 phyreg = readl(base + NvRegSlotTime); 3250 phyreg &= ~(0x3FF00); 3251 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) 3252 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3253 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) 3254 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3255 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3256 phyreg |= NVREG_SLOTTIME_1000_FULL; 3257 writel(phyreg, base + NvRegSlotTime); 3258 } 3259 3260 phyreg = readl(base + NvRegPhyInterface); 3261 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3262 if (np->duplex == 0) 3263 phyreg |= PHY_HALF; 3264 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3265 phyreg |= PHY_100; 3266 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == 3267 NVREG_LINKSPEED_1000) 3268 phyreg |= PHY_1000; 3269 writel(phyreg, base + NvRegPhyInterface); 3270 3271 if (phyreg & PHY_RGMII) { 3272 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == 3273 NVREG_LINKSPEED_1000) 3274 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3275 else 3276 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3277 } else { 3278 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3279 } 3280 writel(txreg, base + NvRegTxDeferral); 3281 3282 if (np->desc_ver == DESC_VER_1) { 3283 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3284 } else { 3285 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == 3286 NVREG_LINKSPEED_1000) 3287 txreg = NVREG_TX_WM_DESC2_3_1000; 3288 else 3289 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3290 } 3291 writel(txreg, base + NvRegTxWatermark); 3292 3293 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), 3294 base + NvRegMisc1); 3295 pci_push(base); 3296 writel(np->linkspeed, base + NvRegLinkSpeed); 3297 pci_push(base); 3298 } 3299 3300 /** 3301 * nv_update_linkspeed - Setup the MAC according to the link partner 3302 * @dev: Network device to be configured 3303 * 3304 * The function queries the PHY and checks if there is a link partner. 3305 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 3306 * set to 10 MBit HD. 3307 * 3308 * The function returns 0 if there is no link partner and 1 if there is 3309 * a good link partner. 3310 */ 3311 static int nv_update_linkspeed(struct net_device *dev) 3312 { 3313 struct fe_priv *np = netdev_priv(dev); 3314 u8 __iomem *base = get_hwbase(dev); 3315 int adv = 0; 3316 int lpa = 0; 3317 int adv_lpa, adv_pause, lpa_pause; 3318 int newls = np->linkspeed; 3319 int newdup = np->duplex; 3320 int mii_status; 3321 u32 bmcr; 3322 int retval = 0; 3323 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 3324 u32 txrxFlags = 0; 3325 u32 phy_exp; 3326 3327 /* If device loopback is enabled, set carrier on and enable max link 3328 * speed. 3329 */ 3330 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3331 if (bmcr & BMCR_LOOPBACK) { 3332 if (netif_running(dev)) { 3333 nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1); 3334 if (!netif_carrier_ok(dev)) 3335 netif_carrier_on(dev); 3336 } 3337 return 1; 3338 } 3339 3340 /* BMSR_LSTATUS is latched, read it twice: 3341 * we want the current value. 3342 */ 3343 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3344 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3345 3346 if (!(mii_status & BMSR_LSTATUS)) { 3347 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3348 newdup = 0; 3349 retval = 0; 3350 goto set_speed; 3351 } 3352 3353 if (np->autoneg == 0) { 3354 if (np->fixed_mode & LPA_100FULL) { 3355 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3356 newdup = 1; 3357 } else if (np->fixed_mode & LPA_100HALF) { 3358 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3359 newdup = 0; 3360 } else if (np->fixed_mode & LPA_10FULL) { 3361 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3362 newdup = 1; 3363 } else { 3364 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3365 newdup = 0; 3366 } 3367 retval = 1; 3368 goto set_speed; 3369 } 3370 /* check auto negotiation is complete */ 3371 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 3372 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 3373 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3374 newdup = 0; 3375 retval = 0; 3376 goto set_speed; 3377 } 3378 3379 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3380 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3381 3382 retval = 1; 3383 if (np->gigabit == PHY_GIGABIT) { 3384 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3385 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 3386 3387 if ((control_1000 & ADVERTISE_1000FULL) && 3388 (status_1000 & LPA_1000FULL)) { 3389 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3390 newdup = 1; 3391 goto set_speed; 3392 } 3393 } 3394 3395 /* FIXME: handle parallel detection properly */ 3396 adv_lpa = lpa & adv; 3397 if (adv_lpa & LPA_100FULL) { 3398 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3399 newdup = 1; 3400 } else if (adv_lpa & LPA_100HALF) { 3401 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3402 newdup = 0; 3403 } else if (adv_lpa & LPA_10FULL) { 3404 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3405 newdup = 1; 3406 } else if (adv_lpa & LPA_10HALF) { 3407 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3408 newdup = 0; 3409 } else { 3410 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3411 newdup = 0; 3412 } 3413 3414 set_speed: 3415 if (np->duplex == newdup && np->linkspeed == newls) 3416 return retval; 3417 3418 np->duplex = newdup; 3419 np->linkspeed = newls; 3420 3421 /* The transmitter and receiver must be restarted for safe update */ 3422 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { 3423 txrxFlags |= NV_RESTART_TX; 3424 nv_stop_tx(dev); 3425 } 3426 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 3427 txrxFlags |= NV_RESTART_RX; 3428 nv_stop_rx(dev); 3429 } 3430 3431 if (np->gigabit == PHY_GIGABIT) { 3432 phyreg = readl(base + NvRegSlotTime); 3433 phyreg &= ~(0x3FF00); 3434 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || 3435 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) 3436 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3437 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3438 phyreg |= NVREG_SLOTTIME_1000_FULL; 3439 writel(phyreg, base + NvRegSlotTime); 3440 } 3441 3442 phyreg = readl(base + NvRegPhyInterface); 3443 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3444 if (np->duplex == 0) 3445 phyreg |= PHY_HALF; 3446 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3447 phyreg |= PHY_100; 3448 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3449 phyreg |= PHY_1000; 3450 writel(phyreg, base + NvRegPhyInterface); 3451 3452 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 3453 if (phyreg & PHY_RGMII) { 3454 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 3455 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3456 } else { 3457 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 3458 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 3459 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 3460 else 3461 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 3462 } else { 3463 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3464 } 3465 } 3466 } else { 3467 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 3468 txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 3469 else 3470 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3471 } 3472 writel(txreg, base + NvRegTxDeferral); 3473 3474 if (np->desc_ver == DESC_VER_1) { 3475 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3476 } else { 3477 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3478 txreg = NVREG_TX_WM_DESC2_3_1000; 3479 else 3480 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3481 } 3482 writel(txreg, base + NvRegTxWatermark); 3483 3484 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), 3485 base + NvRegMisc1); 3486 pci_push(base); 3487 writel(np->linkspeed, base + NvRegLinkSpeed); 3488 pci_push(base); 3489 3490 pause_flags = 0; 3491 /* setup pause frame */ 3492 if (netif_running(dev) && (np->duplex != 0)) { 3493 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3494 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3495 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); 3496 3497 switch (adv_pause) { 3498 case ADVERTISE_PAUSE_CAP: 3499 if (lpa_pause & LPA_PAUSE_CAP) { 3500 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3501 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3502 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3503 } 3504 break; 3505 case ADVERTISE_PAUSE_ASYM: 3506 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM)) 3507 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3508 break; 3509 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM: 3510 if (lpa_pause & LPA_PAUSE_CAP) { 3511 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3512 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3513 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3514 } 3515 if (lpa_pause == LPA_PAUSE_ASYM) 3516 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3517 break; 3518 } 3519 } else { 3520 pause_flags = np->pause_flags; 3521 } 3522 } 3523 nv_update_pause(dev, pause_flags); 3524 3525 if (txrxFlags & NV_RESTART_TX) 3526 nv_start_tx(dev); 3527 if (txrxFlags & NV_RESTART_RX) 3528 nv_start_rx(dev); 3529 3530 return retval; 3531 } 3532 3533 static void nv_linkchange(struct net_device *dev) 3534 { 3535 if (nv_update_linkspeed(dev)) { 3536 if (!netif_carrier_ok(dev)) { 3537 netif_carrier_on(dev); 3538 netdev_info(dev, "link up\n"); 3539 nv_txrx_gate(dev, false); 3540 nv_start_rx(dev); 3541 } 3542 } else { 3543 if (netif_carrier_ok(dev)) { 3544 netif_carrier_off(dev); 3545 netdev_info(dev, "link down\n"); 3546 nv_txrx_gate(dev, true); 3547 nv_stop_rx(dev); 3548 } 3549 } 3550 } 3551 3552 static void nv_link_irq(struct net_device *dev) 3553 { 3554 u8 __iomem *base = get_hwbase(dev); 3555 u32 miistat; 3556 3557 miistat = readl(base + NvRegMIIStatus); 3558 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3559 3560 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3561 nv_linkchange(dev); 3562 } 3563 3564 static void nv_msi_workaround(struct fe_priv *np) 3565 { 3566 3567 /* Need to toggle the msi irq mask within the ethernet device, 3568 * otherwise, future interrupts will not be detected. 3569 */ 3570 if (np->msi_flags & NV_MSI_ENABLED) { 3571 u8 __iomem *base = np->base; 3572 3573 writel(0, base + NvRegMSIIrqMask); 3574 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3575 } 3576 } 3577 3578 static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work) 3579 { 3580 struct fe_priv *np = netdev_priv(dev); 3581 3582 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) { 3583 if (total_work > NV_DYNAMIC_THRESHOLD) { 3584 /* transition to poll based interrupts */ 3585 np->quiet_count = 0; 3586 if (np->irqmask != NVREG_IRQMASK_CPU) { 3587 np->irqmask = NVREG_IRQMASK_CPU; 3588 return 1; 3589 } 3590 } else { 3591 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { 3592 np->quiet_count++; 3593 } else { 3594 /* reached a period of low activity, switch 3595 to per tx/rx packet interrupts */ 3596 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { 3597 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 3598 return 1; 3599 } 3600 } 3601 } 3602 } 3603 return 0; 3604 } 3605 3606 static irqreturn_t nv_nic_irq(int foo, void *data) 3607 { 3608 struct net_device *dev = (struct net_device *) data; 3609 struct fe_priv *np = netdev_priv(dev); 3610 u8 __iomem *base = get_hwbase(dev); 3611 3612 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3613 np->events = readl(base + NvRegIrqStatus); 3614 writel(np->events, base + NvRegIrqStatus); 3615 } else { 3616 np->events = readl(base + NvRegMSIXIrqStatus); 3617 writel(np->events, base + NvRegMSIXIrqStatus); 3618 } 3619 if (!(np->events & np->irqmask)) 3620 return IRQ_NONE; 3621 3622 nv_msi_workaround(np); 3623 3624 if (napi_schedule_prep(&np->napi)) { 3625 /* 3626 * Disable further irq's (msix not enabled with napi) 3627 */ 3628 writel(0, base + NvRegIrqMask); 3629 __napi_schedule(&np->napi); 3630 } 3631 3632 return IRQ_HANDLED; 3633 } 3634 3635 /* All _optimized functions are used to help increase performance 3636 * (reduce CPU and increase throughput). They use descripter version 3, 3637 * compiler directives, and reduce memory accesses. 3638 */ 3639 static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3640 { 3641 struct net_device *dev = (struct net_device *) data; 3642 struct fe_priv *np = netdev_priv(dev); 3643 u8 __iomem *base = get_hwbase(dev); 3644 3645 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3646 np->events = readl(base + NvRegIrqStatus); 3647 writel(np->events, base + NvRegIrqStatus); 3648 } else { 3649 np->events = readl(base + NvRegMSIXIrqStatus); 3650 writel(np->events, base + NvRegMSIXIrqStatus); 3651 } 3652 if (!(np->events & np->irqmask)) 3653 return IRQ_NONE; 3654 3655 nv_msi_workaround(np); 3656 3657 if (napi_schedule_prep(&np->napi)) { 3658 /* 3659 * Disable further irq's (msix not enabled with napi) 3660 */ 3661 writel(0, base + NvRegIrqMask); 3662 __napi_schedule(&np->napi); 3663 } 3664 3665 return IRQ_HANDLED; 3666 } 3667 3668 static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3669 { 3670 struct net_device *dev = (struct net_device *) data; 3671 struct fe_priv *np = netdev_priv(dev); 3672 u8 __iomem *base = get_hwbase(dev); 3673 u32 events; 3674 int i; 3675 unsigned long flags; 3676 3677 for (i = 0;; i++) { 3678 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3679 writel(events, base + NvRegMSIXIrqStatus); 3680 netdev_dbg(dev, "tx irq events: %08x\n", events); 3681 if (!(events & np->irqmask)) 3682 break; 3683 3684 spin_lock_irqsave(&np->lock, flags); 3685 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3686 spin_unlock_irqrestore(&np->lock, flags); 3687 3688 if (unlikely(i > max_interrupt_work)) { 3689 spin_lock_irqsave(&np->lock, flags); 3690 /* disable interrupts on the nic */ 3691 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3692 pci_push(base); 3693 3694 if (!np->in_shutdown) { 3695 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3696 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3697 } 3698 spin_unlock_irqrestore(&np->lock, flags); 3699 netdev_dbg(dev, "%s: too many iterations (%d)\n", 3700 __func__, i); 3701 break; 3702 } 3703 3704 } 3705 3706 return IRQ_RETVAL(i); 3707 } 3708 3709 static int nv_napi_poll(struct napi_struct *napi, int budget) 3710 { 3711 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3712 struct net_device *dev = np->dev; 3713 u8 __iomem *base = get_hwbase(dev); 3714 unsigned long flags; 3715 int retcode; 3716 int rx_count, tx_work = 0, rx_work = 0; 3717 3718 do { 3719 if (!nv_optimized(np)) { 3720 spin_lock_irqsave(&np->lock, flags); 3721 tx_work += nv_tx_done(dev, np->tx_ring_size); 3722 spin_unlock_irqrestore(&np->lock, flags); 3723 3724 rx_count = nv_rx_process(dev, budget - rx_work); 3725 retcode = nv_alloc_rx(dev); 3726 } else { 3727 spin_lock_irqsave(&np->lock, flags); 3728 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); 3729 spin_unlock_irqrestore(&np->lock, flags); 3730 3731 rx_count = nv_rx_process_optimized(dev, 3732 budget - rx_work); 3733 retcode = nv_alloc_rx_optimized(dev); 3734 } 3735 } while (retcode == 0 && 3736 rx_count > 0 && (rx_work += rx_count) < budget); 3737 3738 if (retcode) { 3739 spin_lock_irqsave(&np->lock, flags); 3740 if (!np->in_shutdown) 3741 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3742 spin_unlock_irqrestore(&np->lock, flags); 3743 } 3744 3745 nv_change_interrupt_mode(dev, tx_work + rx_work); 3746 3747 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3748 spin_lock_irqsave(&np->lock, flags); 3749 nv_link_irq(dev); 3750 spin_unlock_irqrestore(&np->lock, flags); 3751 } 3752 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3753 spin_lock_irqsave(&np->lock, flags); 3754 nv_linkchange(dev); 3755 spin_unlock_irqrestore(&np->lock, flags); 3756 np->link_timeout = jiffies + LINK_TIMEOUT; 3757 } 3758 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3759 spin_lock_irqsave(&np->lock, flags); 3760 if (!np->in_shutdown) { 3761 np->nic_poll_irq = np->irqmask; 3762 np->recover_error = 1; 3763 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3764 } 3765 spin_unlock_irqrestore(&np->lock, flags); 3766 napi_complete(napi); 3767 return rx_work; 3768 } 3769 3770 if (rx_work < budget) { 3771 /* re-enable interrupts 3772 (msix not enabled in napi) */ 3773 napi_complete_done(napi, rx_work); 3774 3775 writel(np->irqmask, base + NvRegIrqMask); 3776 } 3777 return rx_work; 3778 } 3779 3780 static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3781 { 3782 struct net_device *dev = (struct net_device *) data; 3783 struct fe_priv *np = netdev_priv(dev); 3784 u8 __iomem *base = get_hwbase(dev); 3785 u32 events; 3786 int i; 3787 unsigned long flags; 3788 3789 for (i = 0;; i++) { 3790 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3791 writel(events, base + NvRegMSIXIrqStatus); 3792 netdev_dbg(dev, "rx irq events: %08x\n", events); 3793 if (!(events & np->irqmask)) 3794 break; 3795 3796 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3797 if (unlikely(nv_alloc_rx_optimized(dev))) { 3798 spin_lock_irqsave(&np->lock, flags); 3799 if (!np->in_shutdown) 3800 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3801 spin_unlock_irqrestore(&np->lock, flags); 3802 } 3803 } 3804 3805 if (unlikely(i > max_interrupt_work)) { 3806 spin_lock_irqsave(&np->lock, flags); 3807 /* disable interrupts on the nic */ 3808 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3809 pci_push(base); 3810 3811 if (!np->in_shutdown) { 3812 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3813 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3814 } 3815 spin_unlock_irqrestore(&np->lock, flags); 3816 netdev_dbg(dev, "%s: too many iterations (%d)\n", 3817 __func__, i); 3818 break; 3819 } 3820 } 3821 3822 return IRQ_RETVAL(i); 3823 } 3824 3825 static irqreturn_t nv_nic_irq_other(int foo, void *data) 3826 { 3827 struct net_device *dev = (struct net_device *) data; 3828 struct fe_priv *np = netdev_priv(dev); 3829 u8 __iomem *base = get_hwbase(dev); 3830 u32 events; 3831 int i; 3832 unsigned long flags; 3833 3834 for (i = 0;; i++) { 3835 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3836 writel(events, base + NvRegMSIXIrqStatus); 3837 netdev_dbg(dev, "irq events: %08x\n", events); 3838 if (!(events & np->irqmask)) 3839 break; 3840 3841 /* check tx in case we reached max loop limit in tx isr */ 3842 spin_lock_irqsave(&np->lock, flags); 3843 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3844 spin_unlock_irqrestore(&np->lock, flags); 3845 3846 if (events & NVREG_IRQ_LINK) { 3847 spin_lock_irqsave(&np->lock, flags); 3848 nv_link_irq(dev); 3849 spin_unlock_irqrestore(&np->lock, flags); 3850 } 3851 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3852 spin_lock_irqsave(&np->lock, flags); 3853 nv_linkchange(dev); 3854 spin_unlock_irqrestore(&np->lock, flags); 3855 np->link_timeout = jiffies + LINK_TIMEOUT; 3856 } 3857 if (events & NVREG_IRQ_RECOVER_ERROR) { 3858 spin_lock_irqsave(&np->lock, flags); 3859 /* disable interrupts on the nic */ 3860 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3861 pci_push(base); 3862 3863 if (!np->in_shutdown) { 3864 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3865 np->recover_error = 1; 3866 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3867 } 3868 spin_unlock_irqrestore(&np->lock, flags); 3869 break; 3870 } 3871 if (unlikely(i > max_interrupt_work)) { 3872 spin_lock_irqsave(&np->lock, flags); 3873 /* disable interrupts on the nic */ 3874 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3875 pci_push(base); 3876 3877 if (!np->in_shutdown) { 3878 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3879 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3880 } 3881 spin_unlock_irqrestore(&np->lock, flags); 3882 netdev_dbg(dev, "%s: too many iterations (%d)\n", 3883 __func__, i); 3884 break; 3885 } 3886 3887 } 3888 3889 return IRQ_RETVAL(i); 3890 } 3891 3892 static irqreturn_t nv_nic_irq_test(int foo, void *data) 3893 { 3894 struct net_device *dev = (struct net_device *) data; 3895 struct fe_priv *np = netdev_priv(dev); 3896 u8 __iomem *base = get_hwbase(dev); 3897 u32 events; 3898 3899 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3900 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3901 writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3902 } else { 3903 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3904 writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3905 } 3906 pci_push(base); 3907 if (!(events & NVREG_IRQ_TIMER)) 3908 return IRQ_RETVAL(0); 3909 3910 nv_msi_workaround(np); 3911 3912 spin_lock(&np->lock); 3913 np->intr_test = 1; 3914 spin_unlock(&np->lock); 3915 3916 return IRQ_RETVAL(1); 3917 } 3918 3919 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3920 { 3921 u8 __iomem *base = get_hwbase(dev); 3922 int i; 3923 u32 msixmap = 0; 3924 3925 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3926 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3927 * the remaining 8 interrupts. 3928 */ 3929 for (i = 0; i < 8; i++) { 3930 if ((irqmask >> i) & 0x1) 3931 msixmap |= vector << (i << 2); 3932 } 3933 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3934 3935 msixmap = 0; 3936 for (i = 0; i < 8; i++) { 3937 if ((irqmask >> (i + 8)) & 0x1) 3938 msixmap |= vector << (i << 2); 3939 } 3940 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3941 } 3942 3943 static int nv_request_irq(struct net_device *dev, int intr_test) 3944 { 3945 struct fe_priv *np = get_nvpriv(dev); 3946 u8 __iomem *base = get_hwbase(dev); 3947 int ret; 3948 int i; 3949 irqreturn_t (*handler)(int foo, void *data); 3950 3951 if (intr_test) { 3952 handler = nv_nic_irq_test; 3953 } else { 3954 if (nv_optimized(np)) 3955 handler = nv_nic_irq_optimized; 3956 else 3957 handler = nv_nic_irq; 3958 } 3959 3960 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3961 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) 3962 np->msi_x_entry[i].entry = i; 3963 ret = pci_enable_msix_range(np->pci_dev, 3964 np->msi_x_entry, 3965 np->msi_flags & NV_MSI_X_VECTORS_MASK, 3966 np->msi_flags & NV_MSI_X_VECTORS_MASK); 3967 if (ret > 0) { 3968 np->msi_flags |= NV_MSI_X_ENABLED; 3969 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3970 /* Request irq for rx handling */ 3971 sprintf(np->name_rx, "%s-rx", dev->name); 3972 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3973 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev); 3974 if (ret) { 3975 netdev_info(dev, 3976 "request_irq failed for rx %d\n", 3977 ret); 3978 pci_disable_msix(np->pci_dev); 3979 np->msi_flags &= ~NV_MSI_X_ENABLED; 3980 goto out_err; 3981 } 3982 /* Request irq for tx handling */ 3983 sprintf(np->name_tx, "%s-tx", dev->name); 3984 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3985 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev); 3986 if (ret) { 3987 netdev_info(dev, 3988 "request_irq failed for tx %d\n", 3989 ret); 3990 pci_disable_msix(np->pci_dev); 3991 np->msi_flags &= ~NV_MSI_X_ENABLED; 3992 goto out_free_rx; 3993 } 3994 /* Request irq for link and timer handling */ 3995 sprintf(np->name_other, "%s-other", dev->name); 3996 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3997 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev); 3998 if (ret) { 3999 netdev_info(dev, 4000 "request_irq failed for link %d\n", 4001 ret); 4002 pci_disable_msix(np->pci_dev); 4003 np->msi_flags &= ~NV_MSI_X_ENABLED; 4004 goto out_free_tx; 4005 } 4006 /* map interrupts to their respective vector */ 4007 writel(0, base + NvRegMSIXMap0); 4008 writel(0, base + NvRegMSIXMap1); 4009 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 4010 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 4011 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 4012 } else { 4013 /* Request irq for all interrupts */ 4014 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, 4015 handler, IRQF_SHARED, dev->name, dev); 4016 if (ret) { 4017 netdev_info(dev, 4018 "request_irq failed %d\n", 4019 ret); 4020 pci_disable_msix(np->pci_dev); 4021 np->msi_flags &= ~NV_MSI_X_ENABLED; 4022 goto out_err; 4023 } 4024 4025 /* map interrupts to vector 0 */ 4026 writel(0, base + NvRegMSIXMap0); 4027 writel(0, base + NvRegMSIXMap1); 4028 } 4029 netdev_info(dev, "MSI-X enabled\n"); 4030 return 0; 4031 } 4032 } 4033 if (np->msi_flags & NV_MSI_CAPABLE) { 4034 ret = pci_enable_msi(np->pci_dev); 4035 if (ret == 0) { 4036 np->msi_flags |= NV_MSI_ENABLED; 4037 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev); 4038 if (ret) { 4039 netdev_info(dev, "request_irq failed %d\n", 4040 ret); 4041 pci_disable_msi(np->pci_dev); 4042 np->msi_flags &= ~NV_MSI_ENABLED; 4043 goto out_err; 4044 } 4045 4046 /* map interrupts to vector 0 */ 4047 writel(0, base + NvRegMSIMap0); 4048 writel(0, base + NvRegMSIMap1); 4049 /* enable msi vector 0 */ 4050 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 4051 netdev_info(dev, "MSI enabled\n"); 4052 return 0; 4053 } 4054 } 4055 4056 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 4057 goto out_err; 4058 4059 return 0; 4060 out_free_tx: 4061 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 4062 out_free_rx: 4063 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 4064 out_err: 4065 return 1; 4066 } 4067 4068 static void nv_free_irq(struct net_device *dev) 4069 { 4070 struct fe_priv *np = get_nvpriv(dev); 4071 int i; 4072 4073 if (np->msi_flags & NV_MSI_X_ENABLED) { 4074 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) 4075 free_irq(np->msi_x_entry[i].vector, dev); 4076 pci_disable_msix(np->pci_dev); 4077 np->msi_flags &= ~NV_MSI_X_ENABLED; 4078 } else { 4079 free_irq(np->pci_dev->irq, dev); 4080 if (np->msi_flags & NV_MSI_ENABLED) { 4081 pci_disable_msi(np->pci_dev); 4082 np->msi_flags &= ~NV_MSI_ENABLED; 4083 } 4084 } 4085 } 4086 4087 static void nv_do_nic_poll(struct timer_list *t) 4088 { 4089 struct fe_priv *np = from_timer(np, t, nic_poll); 4090 struct net_device *dev = np->dev; 4091 u8 __iomem *base = get_hwbase(dev); 4092 u32 mask = 0; 4093 unsigned long flags; 4094 unsigned int irq = 0; 4095 4096 /* 4097 * First disable irq(s) and then 4098 * reenable interrupts on the nic, we have to do this before calling 4099 * nv_nic_irq because that may decide to do otherwise 4100 */ 4101 4102 if (!using_multi_irqs(dev)) { 4103 if (np->msi_flags & NV_MSI_X_ENABLED) 4104 irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector; 4105 else 4106 irq = np->pci_dev->irq; 4107 mask = np->irqmask; 4108 } else { 4109 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4110 irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector; 4111 mask |= NVREG_IRQ_RX_ALL; 4112 } 4113 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4114 irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector; 4115 mask |= NVREG_IRQ_TX_ALL; 4116 } 4117 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4118 irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector; 4119 mask |= NVREG_IRQ_OTHER; 4120 } 4121 } 4122 4123 disable_irq_nosync_lockdep_irqsave(irq, &flags); 4124 synchronize_irq(irq); 4125 4126 if (np->recover_error) { 4127 np->recover_error = 0; 4128 netdev_info(dev, "MAC in recoverable error state\n"); 4129 if (netif_running(dev)) { 4130 netif_tx_lock_bh(dev); 4131 netif_addr_lock(dev); 4132 spin_lock(&np->lock); 4133 /* stop engines */ 4134 nv_stop_rxtx(dev); 4135 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4136 nv_mac_reset(dev); 4137 nv_txrx_reset(dev); 4138 /* drain rx queue */ 4139 nv_drain_rxtx(dev); 4140 /* reinit driver view of the rx queue */ 4141 set_bufsize(dev); 4142 if (nv_init_ring(dev)) { 4143 if (!np->in_shutdown) 4144 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4145 } 4146 /* reinit nic view of the rx queue */ 4147 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4148 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4149 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4150 base + NvRegRingSizes); 4151 pci_push(base); 4152 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4153 pci_push(base); 4154 /* clear interrupts */ 4155 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4156 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4157 else 4158 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4159 4160 /* restart rx engine */ 4161 nv_start_rxtx(dev); 4162 spin_unlock(&np->lock); 4163 netif_addr_unlock(dev); 4164 netif_tx_unlock_bh(dev); 4165 } 4166 } 4167 4168 writel(mask, base + NvRegIrqMask); 4169 pci_push(base); 4170 4171 if (!using_multi_irqs(dev)) { 4172 np->nic_poll_irq = 0; 4173 if (nv_optimized(np)) 4174 nv_nic_irq_optimized(0, dev); 4175 else 4176 nv_nic_irq(0, dev); 4177 } else { 4178 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4179 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; 4180 nv_nic_irq_rx(0, dev); 4181 } 4182 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4183 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; 4184 nv_nic_irq_tx(0, dev); 4185 } 4186 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4187 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; 4188 nv_nic_irq_other(0, dev); 4189 } 4190 } 4191 4192 enable_irq_lockdep_irqrestore(irq, &flags); 4193 } 4194 4195 #ifdef CONFIG_NET_POLL_CONTROLLER 4196 static void nv_poll_controller(struct net_device *dev) 4197 { 4198 struct fe_priv *np = netdev_priv(dev); 4199 4200 nv_do_nic_poll(&np->nic_poll); 4201 } 4202 #endif 4203 4204 static void nv_do_stats_poll(struct timer_list *t) 4205 __acquires(&netdev_priv(dev)->hwstats_lock) 4206 __releases(&netdev_priv(dev)->hwstats_lock) 4207 { 4208 struct fe_priv *np = from_timer(np, t, stats_poll); 4209 struct net_device *dev = np->dev; 4210 4211 /* If lock is currently taken, the stats are being refreshed 4212 * and hence fresh enough */ 4213 if (spin_trylock(&np->hwstats_lock)) { 4214 nv_update_stats(dev); 4215 spin_unlock(&np->hwstats_lock); 4216 } 4217 4218 if (!np->in_shutdown) 4219 mod_timer(&np->stats_poll, 4220 round_jiffies(jiffies + STATS_INTERVAL)); 4221 } 4222 4223 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4224 { 4225 struct fe_priv *np = netdev_priv(dev); 4226 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 4227 strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version)); 4228 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); 4229 } 4230 4231 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4232 { 4233 struct fe_priv *np = netdev_priv(dev); 4234 wolinfo->supported = WAKE_MAGIC; 4235 4236 spin_lock_irq(&np->lock); 4237 if (np->wolenabled) 4238 wolinfo->wolopts = WAKE_MAGIC; 4239 spin_unlock_irq(&np->lock); 4240 } 4241 4242 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4243 { 4244 struct fe_priv *np = netdev_priv(dev); 4245 u8 __iomem *base = get_hwbase(dev); 4246 u32 flags = 0; 4247 4248 if (wolinfo->wolopts == 0) { 4249 np->wolenabled = 0; 4250 } else if (wolinfo->wolopts & WAKE_MAGIC) { 4251 np->wolenabled = 1; 4252 flags = NVREG_WAKEUPFLAGS_ENABLE; 4253 } 4254 if (netif_running(dev)) { 4255 spin_lock_irq(&np->lock); 4256 writel(flags, base + NvRegWakeUpFlags); 4257 spin_unlock_irq(&np->lock); 4258 } 4259 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); 4260 return 0; 4261 } 4262 4263 static int nv_get_link_ksettings(struct net_device *dev, 4264 struct ethtool_link_ksettings *cmd) 4265 { 4266 struct fe_priv *np = netdev_priv(dev); 4267 u32 speed, supported, advertising; 4268 int adv; 4269 4270 spin_lock_irq(&np->lock); 4271 cmd->base.port = PORT_MII; 4272 if (!netif_running(dev)) { 4273 /* We do not track link speed / duplex setting if the 4274 * interface is disabled. Force a link check */ 4275 if (nv_update_linkspeed(dev)) { 4276 netif_carrier_on(dev); 4277 } else { 4278 netif_carrier_off(dev); 4279 } 4280 } 4281 4282 if (netif_carrier_ok(dev)) { 4283 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4284 case NVREG_LINKSPEED_10: 4285 speed = SPEED_10; 4286 break; 4287 case NVREG_LINKSPEED_100: 4288 speed = SPEED_100; 4289 break; 4290 case NVREG_LINKSPEED_1000: 4291 speed = SPEED_1000; 4292 break; 4293 default: 4294 speed = -1; 4295 break; 4296 } 4297 cmd->base.duplex = DUPLEX_HALF; 4298 if (np->duplex) 4299 cmd->base.duplex = DUPLEX_FULL; 4300 } else { 4301 speed = SPEED_UNKNOWN; 4302 cmd->base.duplex = DUPLEX_UNKNOWN; 4303 } 4304 cmd->base.speed = speed; 4305 cmd->base.autoneg = np->autoneg; 4306 4307 advertising = ADVERTISED_MII; 4308 if (np->autoneg) { 4309 advertising |= ADVERTISED_Autoneg; 4310 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4311 if (adv & ADVERTISE_10HALF) 4312 advertising |= ADVERTISED_10baseT_Half; 4313 if (adv & ADVERTISE_10FULL) 4314 advertising |= ADVERTISED_10baseT_Full; 4315 if (adv & ADVERTISE_100HALF) 4316 advertising |= ADVERTISED_100baseT_Half; 4317 if (adv & ADVERTISE_100FULL) 4318 advertising |= ADVERTISED_100baseT_Full; 4319 if (np->gigabit == PHY_GIGABIT) { 4320 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4321 if (adv & ADVERTISE_1000FULL) 4322 advertising |= ADVERTISED_1000baseT_Full; 4323 } 4324 } 4325 supported = (SUPPORTED_Autoneg | 4326 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 4327 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 4328 SUPPORTED_MII); 4329 if (np->gigabit == PHY_GIGABIT) 4330 supported |= SUPPORTED_1000baseT_Full; 4331 4332 cmd->base.phy_address = np->phyaddr; 4333 4334 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 4335 supported); 4336 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 4337 advertising); 4338 4339 /* ignore maxtxpkt, maxrxpkt for now */ 4340 spin_unlock_irq(&np->lock); 4341 return 0; 4342 } 4343 4344 static int nv_set_link_ksettings(struct net_device *dev, 4345 const struct ethtool_link_ksettings *cmd) 4346 { 4347 struct fe_priv *np = netdev_priv(dev); 4348 u32 speed = cmd->base.speed; 4349 u32 advertising; 4350 4351 ethtool_convert_link_mode_to_legacy_u32(&advertising, 4352 cmd->link_modes.advertising); 4353 4354 if (cmd->base.port != PORT_MII) 4355 return -EINVAL; 4356 if (cmd->base.phy_address != np->phyaddr) { 4357 /* TODO: support switching between multiple phys. Should be 4358 * trivial, but not enabled due to lack of test hardware. */ 4359 return -EINVAL; 4360 } 4361 if (cmd->base.autoneg == AUTONEG_ENABLE) { 4362 u32 mask; 4363 4364 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 4365 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 4366 if (np->gigabit == PHY_GIGABIT) 4367 mask |= ADVERTISED_1000baseT_Full; 4368 4369 if ((advertising & mask) == 0) 4370 return -EINVAL; 4371 4372 } else if (cmd->base.autoneg == AUTONEG_DISABLE) { 4373 /* Note: autonegotiation disable, speed 1000 intentionally 4374 * forbidden - no one should need that. */ 4375 4376 if (speed != SPEED_10 && speed != SPEED_100) 4377 return -EINVAL; 4378 if (cmd->base.duplex != DUPLEX_HALF && 4379 cmd->base.duplex != DUPLEX_FULL) 4380 return -EINVAL; 4381 } else { 4382 return -EINVAL; 4383 } 4384 4385 netif_carrier_off(dev); 4386 if (netif_running(dev)) { 4387 unsigned long flags; 4388 4389 nv_disable_irq(dev); 4390 netif_tx_lock_bh(dev); 4391 netif_addr_lock(dev); 4392 /* with plain spinlock lockdep complains */ 4393 spin_lock_irqsave(&np->lock, flags); 4394 /* stop engines */ 4395 /* FIXME: 4396 * this can take some time, and interrupts are disabled 4397 * due to spin_lock_irqsave, but let's hope no daemon 4398 * is going to change the settings very often... 4399 * Worst case: 4400 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX 4401 * + some minor delays, which is up to a second approximately 4402 */ 4403 nv_stop_rxtx(dev); 4404 spin_unlock_irqrestore(&np->lock, flags); 4405 netif_addr_unlock(dev); 4406 netif_tx_unlock_bh(dev); 4407 } 4408 4409 if (cmd->base.autoneg == AUTONEG_ENABLE) { 4410 int adv, bmcr; 4411 4412 np->autoneg = 1; 4413 4414 /* advertise only what has been requested */ 4415 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4416 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4417 if (advertising & ADVERTISED_10baseT_Half) 4418 adv |= ADVERTISE_10HALF; 4419 if (advertising & ADVERTISED_10baseT_Full) 4420 adv |= ADVERTISE_10FULL; 4421 if (advertising & ADVERTISED_100baseT_Half) 4422 adv |= ADVERTISE_100HALF; 4423 if (advertising & ADVERTISED_100baseT_Full) 4424 adv |= ADVERTISE_100FULL; 4425 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ 4426 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4427 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4428 adv |= ADVERTISE_PAUSE_ASYM; 4429 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4430 4431 if (np->gigabit == PHY_GIGABIT) { 4432 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4433 adv &= ~ADVERTISE_1000FULL; 4434 if (advertising & ADVERTISED_1000baseT_Full) 4435 adv |= ADVERTISE_1000FULL; 4436 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4437 } 4438 4439 if (netif_running(dev)) 4440 netdev_info(dev, "link down\n"); 4441 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4442 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4443 bmcr |= BMCR_ANENABLE; 4444 /* reset the phy in order for settings to stick, 4445 * and cause autoneg to start */ 4446 if (phy_reset(dev, bmcr)) { 4447 netdev_info(dev, "phy reset failed\n"); 4448 return -EINVAL; 4449 } 4450 } else { 4451 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4452 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4453 } 4454 } else { 4455 int adv, bmcr; 4456 4457 np->autoneg = 0; 4458 4459 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4460 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4461 if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF) 4462 adv |= ADVERTISE_10HALF; 4463 if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL) 4464 adv |= ADVERTISE_10FULL; 4465 if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF) 4466 adv |= ADVERTISE_100HALF; 4467 if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL) 4468 adv |= ADVERTISE_100FULL; 4469 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4470 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */ 4471 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4472 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4473 } 4474 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 4475 adv |= ADVERTISE_PAUSE_ASYM; 4476 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4477 } 4478 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4479 np->fixed_mode = adv; 4480 4481 if (np->gigabit == PHY_GIGABIT) { 4482 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4483 adv &= ~ADVERTISE_1000FULL; 4484 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4485 } 4486 4487 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4488 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 4489 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 4490 bmcr |= BMCR_FULLDPLX; 4491 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 4492 bmcr |= BMCR_SPEED100; 4493 if (np->phy_oui == PHY_OUI_MARVELL) { 4494 /* reset the phy in order for forced mode settings to stick */ 4495 if (phy_reset(dev, bmcr)) { 4496 netdev_info(dev, "phy reset failed\n"); 4497 return -EINVAL; 4498 } 4499 } else { 4500 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4501 if (netif_running(dev)) { 4502 /* Wait a bit and then reconfigure the nic. */ 4503 udelay(10); 4504 nv_linkchange(dev); 4505 } 4506 } 4507 } 4508 4509 if (netif_running(dev)) { 4510 nv_start_rxtx(dev); 4511 nv_enable_irq(dev); 4512 } 4513 4514 return 0; 4515 } 4516 4517 #define FORCEDETH_REGS_VER 1 4518 4519 static int nv_get_regs_len(struct net_device *dev) 4520 { 4521 struct fe_priv *np = netdev_priv(dev); 4522 return np->register_size; 4523 } 4524 4525 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4526 { 4527 struct fe_priv *np = netdev_priv(dev); 4528 u8 __iomem *base = get_hwbase(dev); 4529 u32 *rbuf = buf; 4530 int i; 4531 4532 regs->version = FORCEDETH_REGS_VER; 4533 spin_lock_irq(&np->lock); 4534 for (i = 0; i < np->register_size/sizeof(u32); i++) 4535 rbuf[i] = readl(base + i*sizeof(u32)); 4536 spin_unlock_irq(&np->lock); 4537 } 4538 4539 static int nv_nway_reset(struct net_device *dev) 4540 { 4541 struct fe_priv *np = netdev_priv(dev); 4542 int ret; 4543 4544 if (np->autoneg) { 4545 int bmcr; 4546 4547 netif_carrier_off(dev); 4548 if (netif_running(dev)) { 4549 nv_disable_irq(dev); 4550 netif_tx_lock_bh(dev); 4551 netif_addr_lock(dev); 4552 spin_lock(&np->lock); 4553 /* stop engines */ 4554 nv_stop_rxtx(dev); 4555 spin_unlock(&np->lock); 4556 netif_addr_unlock(dev); 4557 netif_tx_unlock_bh(dev); 4558 netdev_info(dev, "link down\n"); 4559 } 4560 4561 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4562 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4563 bmcr |= BMCR_ANENABLE; 4564 /* reset the phy in order for settings to stick*/ 4565 if (phy_reset(dev, bmcr)) { 4566 netdev_info(dev, "phy reset failed\n"); 4567 return -EINVAL; 4568 } 4569 } else { 4570 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4571 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4572 } 4573 4574 if (netif_running(dev)) { 4575 nv_start_rxtx(dev); 4576 nv_enable_irq(dev); 4577 } 4578 ret = 0; 4579 } else { 4580 ret = -EINVAL; 4581 } 4582 4583 return ret; 4584 } 4585 4586 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4587 { 4588 struct fe_priv *np = netdev_priv(dev); 4589 4590 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4591 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4592 4593 ring->rx_pending = np->rx_ring_size; 4594 ring->tx_pending = np->tx_ring_size; 4595 } 4596 4597 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4598 { 4599 struct fe_priv *np = netdev_priv(dev); 4600 u8 __iomem *base = get_hwbase(dev); 4601 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4602 dma_addr_t ring_addr; 4603 4604 if (ring->rx_pending < RX_RING_MIN || 4605 ring->tx_pending < TX_RING_MIN || 4606 ring->rx_mini_pending != 0 || 4607 ring->rx_jumbo_pending != 0 || 4608 (np->desc_ver == DESC_VER_1 && 4609 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4610 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4611 (np->desc_ver != DESC_VER_1 && 4612 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4613 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4614 return -EINVAL; 4615 } 4616 4617 /* allocate new rings */ 4618 if (!nv_optimized(np)) { 4619 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, 4620 sizeof(struct ring_desc) * 4621 (ring->rx_pending + 4622 ring->tx_pending), 4623 &ring_addr, GFP_ATOMIC); 4624 } else { 4625 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, 4626 sizeof(struct ring_desc_ex) * 4627 (ring->rx_pending + 4628 ring->tx_pending), 4629 &ring_addr, GFP_ATOMIC); 4630 } 4631 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4632 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4633 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4634 /* fall back to old rings */ 4635 if (!nv_optimized(np)) { 4636 if (rxtx_ring) 4637 dma_free_coherent(&np->pci_dev->dev, 4638 sizeof(struct ring_desc) * 4639 (ring->rx_pending + 4640 ring->tx_pending), 4641 rxtx_ring, ring_addr); 4642 } else { 4643 if (rxtx_ring) 4644 dma_free_coherent(&np->pci_dev->dev, 4645 sizeof(struct ring_desc_ex) * 4646 (ring->rx_pending + 4647 ring->tx_pending), 4648 rxtx_ring, ring_addr); 4649 } 4650 4651 kfree(rx_skbuff); 4652 kfree(tx_skbuff); 4653 goto exit; 4654 } 4655 4656 if (netif_running(dev)) { 4657 nv_disable_irq(dev); 4658 nv_napi_disable(dev); 4659 netif_tx_lock_bh(dev); 4660 netif_addr_lock(dev); 4661 spin_lock(&np->lock); 4662 /* stop engines */ 4663 nv_stop_rxtx(dev); 4664 nv_txrx_reset(dev); 4665 /* drain queues */ 4666 nv_drain_rxtx(dev); 4667 /* delete queues */ 4668 free_rings(dev); 4669 } 4670 4671 /* set new values */ 4672 np->rx_ring_size = ring->rx_pending; 4673 np->tx_ring_size = ring->tx_pending; 4674 4675 if (!nv_optimized(np)) { 4676 np->rx_ring.orig = (struct ring_desc *)rxtx_ring; 4677 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4678 } else { 4679 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; 4680 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4681 } 4682 np->rx_skb = (struct nv_skb_map *)rx_skbuff; 4683 np->tx_skb = (struct nv_skb_map *)tx_skbuff; 4684 np->ring_addr = ring_addr; 4685 4686 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4687 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4688 4689 if (netif_running(dev)) { 4690 /* reinit driver view of the queues */ 4691 set_bufsize(dev); 4692 if (nv_init_ring(dev)) { 4693 if (!np->in_shutdown) 4694 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4695 } 4696 4697 /* reinit nic view of the queues */ 4698 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4699 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4700 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4701 base + NvRegRingSizes); 4702 pci_push(base); 4703 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4704 pci_push(base); 4705 4706 /* restart engines */ 4707 nv_start_rxtx(dev); 4708 spin_unlock(&np->lock); 4709 netif_addr_unlock(dev); 4710 netif_tx_unlock_bh(dev); 4711 nv_napi_enable(dev); 4712 nv_enable_irq(dev); 4713 } 4714 return 0; 4715 exit: 4716 return -ENOMEM; 4717 } 4718 4719 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4720 { 4721 struct fe_priv *np = netdev_priv(dev); 4722 4723 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4724 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4725 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4726 } 4727 4728 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4729 { 4730 struct fe_priv *np = netdev_priv(dev); 4731 int adv, bmcr; 4732 4733 if ((!np->autoneg && np->duplex == 0) || 4734 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4735 netdev_info(dev, "can not set pause settings when forced link is in half duplex\n"); 4736 return -EINVAL; 4737 } 4738 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4739 netdev_info(dev, "hardware does not support tx pause frames\n"); 4740 return -EINVAL; 4741 } 4742 4743 netif_carrier_off(dev); 4744 if (netif_running(dev)) { 4745 nv_disable_irq(dev); 4746 netif_tx_lock_bh(dev); 4747 netif_addr_lock(dev); 4748 spin_lock(&np->lock); 4749 /* stop engines */ 4750 nv_stop_rxtx(dev); 4751 spin_unlock(&np->lock); 4752 netif_addr_unlock(dev); 4753 netif_tx_unlock_bh(dev); 4754 } 4755 4756 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4757 if (pause->rx_pause) 4758 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4759 if (pause->tx_pause) 4760 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4761 4762 if (np->autoneg && pause->autoneg) { 4763 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4764 4765 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4766 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4767 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ 4768 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4769 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4770 adv |= ADVERTISE_PAUSE_ASYM; 4771 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4772 4773 if (netif_running(dev)) 4774 netdev_info(dev, "link down\n"); 4775 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4776 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4777 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4778 } else { 4779 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4780 if (pause->rx_pause) 4781 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4782 if (pause->tx_pause) 4783 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4784 4785 if (!netif_running(dev)) 4786 nv_update_linkspeed(dev); 4787 else 4788 nv_update_pause(dev, np->pause_flags); 4789 } 4790 4791 if (netif_running(dev)) { 4792 nv_start_rxtx(dev); 4793 nv_enable_irq(dev); 4794 } 4795 return 0; 4796 } 4797 4798 static int nv_set_loopback(struct net_device *dev, netdev_features_t features) 4799 { 4800 struct fe_priv *np = netdev_priv(dev); 4801 unsigned long flags; 4802 u32 miicontrol; 4803 int err, retval = 0; 4804 4805 spin_lock_irqsave(&np->lock, flags); 4806 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4807 if (features & NETIF_F_LOOPBACK) { 4808 if (miicontrol & BMCR_LOOPBACK) { 4809 spin_unlock_irqrestore(&np->lock, flags); 4810 netdev_info(dev, "Loopback already enabled\n"); 4811 return 0; 4812 } 4813 nv_disable_irq(dev); 4814 /* Turn on loopback mode */ 4815 miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; 4816 err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol); 4817 if (err) { 4818 retval = PHY_ERROR; 4819 spin_unlock_irqrestore(&np->lock, flags); 4820 phy_init(dev); 4821 } else { 4822 if (netif_running(dev)) { 4823 /* Force 1000 Mbps full-duplex */ 4824 nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 4825 1); 4826 /* Force link up */ 4827 netif_carrier_on(dev); 4828 } 4829 spin_unlock_irqrestore(&np->lock, flags); 4830 netdev_info(dev, 4831 "Internal PHY loopback mode enabled.\n"); 4832 } 4833 } else { 4834 if (!(miicontrol & BMCR_LOOPBACK)) { 4835 spin_unlock_irqrestore(&np->lock, flags); 4836 netdev_info(dev, "Loopback already disabled\n"); 4837 return 0; 4838 } 4839 nv_disable_irq(dev); 4840 /* Turn off loopback */ 4841 spin_unlock_irqrestore(&np->lock, flags); 4842 netdev_info(dev, "Internal PHY loopback mode disabled.\n"); 4843 phy_init(dev); 4844 } 4845 msleep(500); 4846 spin_lock_irqsave(&np->lock, flags); 4847 nv_enable_irq(dev); 4848 spin_unlock_irqrestore(&np->lock, flags); 4849 4850 return retval; 4851 } 4852 4853 static netdev_features_t nv_fix_features(struct net_device *dev, 4854 netdev_features_t features) 4855 { 4856 /* vlan is dependent on rx checksum offload */ 4857 if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) 4858 features |= NETIF_F_RXCSUM; 4859 4860 return features; 4861 } 4862 4863 static void nv_vlan_mode(struct net_device *dev, netdev_features_t features) 4864 { 4865 struct fe_priv *np = get_nvpriv(dev); 4866 4867 spin_lock_irq(&np->lock); 4868 4869 if (features & NETIF_F_HW_VLAN_CTAG_RX) 4870 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; 4871 else 4872 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 4873 4874 if (features & NETIF_F_HW_VLAN_CTAG_TX) 4875 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; 4876 else 4877 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 4878 4879 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4880 4881 spin_unlock_irq(&np->lock); 4882 } 4883 4884 static int nv_set_features(struct net_device *dev, netdev_features_t features) 4885 { 4886 struct fe_priv *np = netdev_priv(dev); 4887 u8 __iomem *base = get_hwbase(dev); 4888 netdev_features_t changed = dev->features ^ features; 4889 int retval; 4890 4891 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) { 4892 retval = nv_set_loopback(dev, features); 4893 if (retval != 0) 4894 return retval; 4895 } 4896 4897 if (changed & NETIF_F_RXCSUM) { 4898 spin_lock_irq(&np->lock); 4899 4900 if (features & NETIF_F_RXCSUM) 4901 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4902 else 4903 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4904 4905 if (netif_running(dev)) 4906 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4907 4908 spin_unlock_irq(&np->lock); 4909 } 4910 4911 if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)) 4912 nv_vlan_mode(dev, features); 4913 4914 return 0; 4915 } 4916 4917 static int nv_get_sset_count(struct net_device *dev, int sset) 4918 { 4919 struct fe_priv *np = netdev_priv(dev); 4920 4921 switch (sset) { 4922 case ETH_SS_TEST: 4923 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4924 return NV_TEST_COUNT_EXTENDED; 4925 else 4926 return NV_TEST_COUNT_BASE; 4927 case ETH_SS_STATS: 4928 if (np->driver_data & DEV_HAS_STATISTICS_V3) 4929 return NV_DEV_STATISTICS_V3_COUNT; 4930 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4931 return NV_DEV_STATISTICS_V2_COUNT; 4932 else if (np->driver_data & DEV_HAS_STATISTICS_V1) 4933 return NV_DEV_STATISTICS_V1_COUNT; 4934 else 4935 return 0; 4936 default: 4937 return -EOPNOTSUPP; 4938 } 4939 } 4940 4941 static void nv_get_ethtool_stats(struct net_device *dev, 4942 struct ethtool_stats *estats, u64 *buffer) 4943 __acquires(&netdev_priv(dev)->hwstats_lock) 4944 __releases(&netdev_priv(dev)->hwstats_lock) 4945 { 4946 struct fe_priv *np = netdev_priv(dev); 4947 4948 spin_lock_bh(&np->hwstats_lock); 4949 nv_update_stats(dev); 4950 memcpy(buffer, &np->estats, 4951 nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4952 spin_unlock_bh(&np->hwstats_lock); 4953 } 4954 4955 static int nv_link_test(struct net_device *dev) 4956 { 4957 struct fe_priv *np = netdev_priv(dev); 4958 int mii_status; 4959 4960 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4961 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4962 4963 /* check phy link status */ 4964 if (!(mii_status & BMSR_LSTATUS)) 4965 return 0; 4966 else 4967 return 1; 4968 } 4969 4970 static int nv_register_test(struct net_device *dev) 4971 { 4972 u8 __iomem *base = get_hwbase(dev); 4973 int i = 0; 4974 u32 orig_read, new_read; 4975 4976 do { 4977 orig_read = readl(base + nv_registers_test[i].reg); 4978 4979 /* xor with mask to toggle bits */ 4980 orig_read ^= nv_registers_test[i].mask; 4981 4982 writel(orig_read, base + nv_registers_test[i].reg); 4983 4984 new_read = readl(base + nv_registers_test[i].reg); 4985 4986 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4987 return 0; 4988 4989 /* restore original value */ 4990 orig_read ^= nv_registers_test[i].mask; 4991 writel(orig_read, base + nv_registers_test[i].reg); 4992 4993 } while (nv_registers_test[++i].reg != 0); 4994 4995 return 1; 4996 } 4997 4998 static int nv_interrupt_test(struct net_device *dev) 4999 { 5000 struct fe_priv *np = netdev_priv(dev); 5001 u8 __iomem *base = get_hwbase(dev); 5002 int ret = 1; 5003 int testcnt; 5004 u32 save_msi_flags, save_poll_interval = 0; 5005 5006 if (netif_running(dev)) { 5007 /* free current irq */ 5008 nv_free_irq(dev); 5009 save_poll_interval = readl(base+NvRegPollingInterval); 5010 } 5011 5012 /* flag to test interrupt handler */ 5013 np->intr_test = 0; 5014 5015 /* setup test irq */ 5016 save_msi_flags = np->msi_flags; 5017 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 5018 np->msi_flags |= 0x001; /* setup 1 vector */ 5019 if (nv_request_irq(dev, 1)) 5020 return 0; 5021 5022 /* setup timer interrupt */ 5023 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5024 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5025 5026 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 5027 5028 /* wait for at least one interrupt */ 5029 msleep(100); 5030 5031 spin_lock_irq(&np->lock); 5032 5033 /* flag should be set within ISR */ 5034 testcnt = np->intr_test; 5035 if (!testcnt) 5036 ret = 2; 5037 5038 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 5039 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 5040 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5041 else 5042 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 5043 5044 spin_unlock_irq(&np->lock); 5045 5046 nv_free_irq(dev); 5047 5048 np->msi_flags = save_msi_flags; 5049 5050 if (netif_running(dev)) { 5051 writel(save_poll_interval, base + NvRegPollingInterval); 5052 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5053 /* restore original irq */ 5054 if (nv_request_irq(dev, 0)) 5055 return 0; 5056 } 5057 5058 return ret; 5059 } 5060 5061 static int nv_loopback_test(struct net_device *dev) 5062 { 5063 struct fe_priv *np = netdev_priv(dev); 5064 u8 __iomem *base = get_hwbase(dev); 5065 struct sk_buff *tx_skb, *rx_skb; 5066 dma_addr_t test_dma_addr; 5067 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 5068 u32 flags; 5069 int len, i, pkt_len; 5070 u8 *pkt_data; 5071 u32 filter_flags = 0; 5072 u32 misc1_flags = 0; 5073 int ret = 1; 5074 5075 if (netif_running(dev)) { 5076 nv_disable_irq(dev); 5077 filter_flags = readl(base + NvRegPacketFilterFlags); 5078 misc1_flags = readl(base + NvRegMisc1); 5079 } else { 5080 nv_txrx_reset(dev); 5081 } 5082 5083 /* reinit driver view of the rx queue */ 5084 set_bufsize(dev); 5085 nv_init_ring(dev); 5086 5087 /* setup hardware for loopback */ 5088 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 5089 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 5090 5091 /* reinit nic view of the rx queue */ 5092 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5093 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5094 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5095 base + NvRegRingSizes); 5096 pci_push(base); 5097 5098 /* restart rx engine */ 5099 nv_start_rxtx(dev); 5100 5101 /* setup packet for tx */ 5102 pkt_len = ETH_DATA_LEN; 5103 tx_skb = netdev_alloc_skb(dev, pkt_len); 5104 if (!tx_skb) { 5105 ret = 0; 5106 goto out; 5107 } 5108 test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data, 5109 skb_tailroom(tx_skb), 5110 DMA_FROM_DEVICE); 5111 if (unlikely(dma_mapping_error(&np->pci_dev->dev, 5112 test_dma_addr))) { 5113 dev_kfree_skb_any(tx_skb); 5114 goto out; 5115 } 5116 pkt_data = skb_put(tx_skb, pkt_len); 5117 for (i = 0; i < pkt_len; i++) 5118 pkt_data[i] = (u8)(i & 0xff); 5119 5120 if (!nv_optimized(np)) { 5121 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 5122 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 5123 } else { 5124 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 5125 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 5126 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 5127 } 5128 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5129 pci_push(get_hwbase(dev)); 5130 5131 msleep(500); 5132 5133 /* check for rx of the packet */ 5134 if (!nv_optimized(np)) { 5135 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 5136 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 5137 5138 } else { 5139 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 5140 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 5141 } 5142 5143 if (flags & NV_RX_AVAIL) { 5144 ret = 0; 5145 } else if (np->desc_ver == DESC_VER_1) { 5146 if (flags & NV_RX_ERROR) 5147 ret = 0; 5148 } else { 5149 if (flags & NV_RX2_ERROR) 5150 ret = 0; 5151 } 5152 5153 if (ret) { 5154 if (len != pkt_len) { 5155 ret = 0; 5156 } else { 5157 rx_skb = np->rx_skb[0].skb; 5158 for (i = 0; i < pkt_len; i++) { 5159 if (rx_skb->data[i] != (u8)(i & 0xff)) { 5160 ret = 0; 5161 break; 5162 } 5163 } 5164 } 5165 } 5166 5167 dma_unmap_single(&np->pci_dev->dev, test_dma_addr, 5168 (skb_end_pointer(tx_skb) - tx_skb->data), 5169 DMA_TO_DEVICE); 5170 dev_kfree_skb_any(tx_skb); 5171 out: 5172 /* stop engines */ 5173 nv_stop_rxtx(dev); 5174 nv_txrx_reset(dev); 5175 /* drain rx queue */ 5176 nv_drain_rxtx(dev); 5177 5178 if (netif_running(dev)) { 5179 writel(misc1_flags, base + NvRegMisc1); 5180 writel(filter_flags, base + NvRegPacketFilterFlags); 5181 nv_enable_irq(dev); 5182 } 5183 5184 return ret; 5185 } 5186 5187 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 5188 { 5189 struct fe_priv *np = netdev_priv(dev); 5190 u8 __iomem *base = get_hwbase(dev); 5191 int result, count; 5192 5193 count = nv_get_sset_count(dev, ETH_SS_TEST); 5194 memset(buffer, 0, count * sizeof(u64)); 5195 5196 if (!nv_link_test(dev)) { 5197 test->flags |= ETH_TEST_FL_FAILED; 5198 buffer[0] = 1; 5199 } 5200 5201 if (test->flags & ETH_TEST_FL_OFFLINE) { 5202 if (netif_running(dev)) { 5203 netif_stop_queue(dev); 5204 nv_napi_disable(dev); 5205 netif_tx_lock_bh(dev); 5206 netif_addr_lock(dev); 5207 spin_lock_irq(&np->lock); 5208 nv_disable_hw_interrupts(dev, np->irqmask); 5209 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 5210 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5211 else 5212 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 5213 /* stop engines */ 5214 nv_stop_rxtx(dev); 5215 nv_txrx_reset(dev); 5216 /* drain rx queue */ 5217 nv_drain_rxtx(dev); 5218 spin_unlock_irq(&np->lock); 5219 netif_addr_unlock(dev); 5220 netif_tx_unlock_bh(dev); 5221 } 5222 5223 if (!nv_register_test(dev)) { 5224 test->flags |= ETH_TEST_FL_FAILED; 5225 buffer[1] = 1; 5226 } 5227 5228 result = nv_interrupt_test(dev); 5229 if (result != 1) { 5230 test->flags |= ETH_TEST_FL_FAILED; 5231 buffer[2] = 1; 5232 } 5233 if (result == 0) { 5234 /* bail out */ 5235 return; 5236 } 5237 5238 if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { 5239 test->flags |= ETH_TEST_FL_FAILED; 5240 buffer[3] = 1; 5241 } 5242 5243 if (netif_running(dev)) { 5244 /* reinit driver view of the rx queue */ 5245 set_bufsize(dev); 5246 if (nv_init_ring(dev)) { 5247 if (!np->in_shutdown) 5248 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5249 } 5250 /* reinit nic view of the rx queue */ 5251 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5252 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5253 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5254 base + NvRegRingSizes); 5255 pci_push(base); 5256 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5257 pci_push(base); 5258 /* restart rx engine */ 5259 nv_start_rxtx(dev); 5260 netif_start_queue(dev); 5261 nv_napi_enable(dev); 5262 nv_enable_hw_interrupts(dev, np->irqmask); 5263 } 5264 } 5265 } 5266 5267 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 5268 { 5269 switch (stringset) { 5270 case ETH_SS_STATS: 5271 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 5272 break; 5273 case ETH_SS_TEST: 5274 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 5275 break; 5276 } 5277 } 5278 5279 static const struct ethtool_ops ops = { 5280 .get_drvinfo = nv_get_drvinfo, 5281 .get_link = ethtool_op_get_link, 5282 .get_wol = nv_get_wol, 5283 .set_wol = nv_set_wol, 5284 .get_regs_len = nv_get_regs_len, 5285 .get_regs = nv_get_regs, 5286 .nway_reset = nv_nway_reset, 5287 .get_ringparam = nv_get_ringparam, 5288 .set_ringparam = nv_set_ringparam, 5289 .get_pauseparam = nv_get_pauseparam, 5290 .set_pauseparam = nv_set_pauseparam, 5291 .get_strings = nv_get_strings, 5292 .get_ethtool_stats = nv_get_ethtool_stats, 5293 .get_sset_count = nv_get_sset_count, 5294 .self_test = nv_self_test, 5295 .get_ts_info = ethtool_op_get_ts_info, 5296 .get_link_ksettings = nv_get_link_ksettings, 5297 .set_link_ksettings = nv_set_link_ksettings, 5298 }; 5299 5300 /* The mgmt unit and driver use a semaphore to access the phy during init */ 5301 static int nv_mgmt_acquire_sema(struct net_device *dev) 5302 { 5303 struct fe_priv *np = netdev_priv(dev); 5304 u8 __iomem *base = get_hwbase(dev); 5305 int i; 5306 u32 tx_ctrl, mgmt_sema; 5307 5308 for (i = 0; i < 10; i++) { 5309 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 5310 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 5311 break; 5312 msleep(500); 5313 } 5314 5315 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 5316 return 0; 5317 5318 for (i = 0; i < 2; i++) { 5319 tx_ctrl = readl(base + NvRegTransmitterControl); 5320 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 5321 writel(tx_ctrl, base + NvRegTransmitterControl); 5322 5323 /* verify that semaphore was acquired */ 5324 tx_ctrl = readl(base + NvRegTransmitterControl); 5325 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 5326 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 5327 np->mgmt_sema = 1; 5328 return 1; 5329 } else 5330 udelay(50); 5331 } 5332 5333 return 0; 5334 } 5335 5336 static void nv_mgmt_release_sema(struct net_device *dev) 5337 { 5338 struct fe_priv *np = netdev_priv(dev); 5339 u8 __iomem *base = get_hwbase(dev); 5340 u32 tx_ctrl; 5341 5342 if (np->driver_data & DEV_HAS_MGMT_UNIT) { 5343 if (np->mgmt_sema) { 5344 tx_ctrl = readl(base + NvRegTransmitterControl); 5345 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ; 5346 writel(tx_ctrl, base + NvRegTransmitterControl); 5347 } 5348 } 5349 } 5350 5351 5352 static int nv_mgmt_get_version(struct net_device *dev) 5353 { 5354 struct fe_priv *np = netdev_priv(dev); 5355 u8 __iomem *base = get_hwbase(dev); 5356 u32 data_ready = readl(base + NvRegTransmitterControl); 5357 u32 data_ready2 = 0; 5358 unsigned long start; 5359 int ready = 0; 5360 5361 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion); 5362 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl); 5363 start = jiffies; 5364 while (time_before(jiffies, start + 5*HZ)) { 5365 data_ready2 = readl(base + NvRegTransmitterControl); 5366 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) { 5367 ready = 1; 5368 break; 5369 } 5370 schedule_timeout_uninterruptible(1); 5371 } 5372 5373 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR)) 5374 return 0; 5375 5376 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; 5377 5378 return 1; 5379 } 5380 5381 static int nv_open(struct net_device *dev) 5382 { 5383 struct fe_priv *np = netdev_priv(dev); 5384 u8 __iomem *base = get_hwbase(dev); 5385 int ret = 1; 5386 int oom, i; 5387 u32 low; 5388 5389 /* power up phy */ 5390 mii_rw(dev, np->phyaddr, MII_BMCR, 5391 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); 5392 5393 nv_txrx_gate(dev, false); 5394 /* erase previous misconfiguration */ 5395 if (np->driver_data & DEV_HAS_POWER_CNTRL) 5396 nv_mac_reset(dev); 5397 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5398 writel(0, base + NvRegMulticastAddrB); 5399 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5400 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5401 writel(0, base + NvRegPacketFilterFlags); 5402 5403 writel(0, base + NvRegTransmitterControl); 5404 writel(0, base + NvRegReceiverControl); 5405 5406 writel(0, base + NvRegAdapterControl); 5407 5408 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 5409 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 5410 5411 /* initialize descriptor rings */ 5412 set_bufsize(dev); 5413 oom = nv_init_ring(dev); 5414 5415 writel(0, base + NvRegLinkSpeed); 5416 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5417 nv_txrx_reset(dev); 5418 writel(0, base + NvRegUnknownSetupReg6); 5419 5420 np->in_shutdown = 0; 5421 5422 /* give hw rings */ 5423 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5424 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5425 base + NvRegRingSizes); 5426 5427 writel(np->linkspeed, base + NvRegLinkSpeed); 5428 if (np->desc_ver == DESC_VER_1) 5429 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 5430 else 5431 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 5432 writel(np->txrxctl_bits, base + NvRegTxRxControl); 5433 writel(np->vlanctl_bits, base + NvRegVlanControl); 5434 pci_push(base); 5435 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5436 if (reg_delay(dev, NvRegUnknownSetupReg5, 5437 NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5438 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX)) 5439 netdev_info(dev, 5440 "%s: SetupReg5, Bit 31 remained off\n", __func__); 5441 5442 writel(0, base + NvRegMIIMask); 5443 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5444 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5445 5446 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 5447 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 5448 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 5449 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5450 5451 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 5452 5453 get_random_bytes(&low, sizeof(low)); 5454 low &= NVREG_SLOTTIME_MASK; 5455 if (np->desc_ver == DESC_VER_1) { 5456 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); 5457 } else { 5458 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { 5459 /* setup legacy backoff */ 5460 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); 5461 } else { 5462 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); 5463 nv_gear_backoff_reseed(dev); 5464 } 5465 } 5466 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 5467 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 5468 if (poll_interval == -1) { 5469 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 5470 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5471 else 5472 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5473 } else 5474 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5475 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5476 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5477 base + NvRegAdapterControl); 5478 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 5479 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 5480 if (np->wolenabled) 5481 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5482 5483 i = readl(base + NvRegPowerState); 5484 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0) 5485 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5486 5487 pci_push(base); 5488 udelay(10); 5489 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 5490 5491 nv_disable_hw_interrupts(dev, np->irqmask); 5492 pci_push(base); 5493 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5494 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5495 pci_push(base); 5496 5497 if (nv_request_irq(dev, 0)) 5498 goto out_drain; 5499 5500 /* ask for interrupts */ 5501 nv_enable_hw_interrupts(dev, np->irqmask); 5502 5503 spin_lock_irq(&np->lock); 5504 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5505 writel(0, base + NvRegMulticastAddrB); 5506 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5507 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5508 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5509 /* One manual link speed update: Interrupts are enabled, future link 5510 * speed changes cause interrupts and are handled by nv_link_irq(). 5511 */ 5512 { 5513 u32 miistat; 5514 miistat = readl(base + NvRegMIIStatus); 5515 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5516 } 5517 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5518 * to init hw */ 5519 np->linkspeed = 0; 5520 ret = nv_update_linkspeed(dev); 5521 nv_start_rxtx(dev); 5522 netif_start_queue(dev); 5523 nv_napi_enable(dev); 5524 5525 if (ret) { 5526 netif_carrier_on(dev); 5527 } else { 5528 netdev_info(dev, "no link during initialization\n"); 5529 netif_carrier_off(dev); 5530 } 5531 if (oom) 5532 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5533 5534 /* start statistics timer */ 5535 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5536 mod_timer(&np->stats_poll, 5537 round_jiffies(jiffies + STATS_INTERVAL)); 5538 5539 spin_unlock_irq(&np->lock); 5540 5541 /* If the loopback feature was set while the device was down, make sure 5542 * that it's set correctly now. 5543 */ 5544 if (dev->features & NETIF_F_LOOPBACK) 5545 nv_set_loopback(dev, dev->features); 5546 5547 return 0; 5548 out_drain: 5549 nv_drain_rxtx(dev); 5550 return ret; 5551 } 5552 5553 static int nv_close(struct net_device *dev) 5554 { 5555 struct fe_priv *np = netdev_priv(dev); 5556 u8 __iomem *base; 5557 5558 spin_lock_irq(&np->lock); 5559 np->in_shutdown = 1; 5560 spin_unlock_irq(&np->lock); 5561 nv_napi_disable(dev); 5562 synchronize_irq(np->pci_dev->irq); 5563 5564 del_timer_sync(&np->oom_kick); 5565 del_timer_sync(&np->nic_poll); 5566 del_timer_sync(&np->stats_poll); 5567 5568 netif_stop_queue(dev); 5569 spin_lock_irq(&np->lock); 5570 nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */ 5571 nv_stop_rxtx(dev); 5572 nv_txrx_reset(dev); 5573 5574 /* disable interrupts on the nic or we will lock up */ 5575 base = get_hwbase(dev); 5576 nv_disable_hw_interrupts(dev, np->irqmask); 5577 pci_push(base); 5578 5579 spin_unlock_irq(&np->lock); 5580 5581 nv_free_irq(dev); 5582 5583 nv_drain_rxtx(dev); 5584 5585 if (np->wolenabled || !phy_power_down) { 5586 nv_txrx_gate(dev, false); 5587 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5588 nv_start_rx(dev); 5589 } else { 5590 /* power down phy */ 5591 mii_rw(dev, np->phyaddr, MII_BMCR, 5592 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); 5593 nv_txrx_gate(dev, true); 5594 } 5595 5596 /* FIXME: power down nic */ 5597 5598 return 0; 5599 } 5600 5601 static const struct net_device_ops nv_netdev_ops = { 5602 .ndo_open = nv_open, 5603 .ndo_stop = nv_close, 5604 .ndo_get_stats64 = nv_get_stats64, 5605 .ndo_start_xmit = nv_start_xmit, 5606 .ndo_tx_timeout = nv_tx_timeout, 5607 .ndo_change_mtu = nv_change_mtu, 5608 .ndo_fix_features = nv_fix_features, 5609 .ndo_set_features = nv_set_features, 5610 .ndo_validate_addr = eth_validate_addr, 5611 .ndo_set_mac_address = nv_set_mac_address, 5612 .ndo_set_rx_mode = nv_set_multicast, 5613 #ifdef CONFIG_NET_POLL_CONTROLLER 5614 .ndo_poll_controller = nv_poll_controller, 5615 #endif 5616 }; 5617 5618 static const struct net_device_ops nv_netdev_ops_optimized = { 5619 .ndo_open = nv_open, 5620 .ndo_stop = nv_close, 5621 .ndo_get_stats64 = nv_get_stats64, 5622 .ndo_start_xmit = nv_start_xmit_optimized, 5623 .ndo_tx_timeout = nv_tx_timeout, 5624 .ndo_change_mtu = nv_change_mtu, 5625 .ndo_fix_features = nv_fix_features, 5626 .ndo_set_features = nv_set_features, 5627 .ndo_validate_addr = eth_validate_addr, 5628 .ndo_set_mac_address = nv_set_mac_address, 5629 .ndo_set_rx_mode = nv_set_multicast, 5630 #ifdef CONFIG_NET_POLL_CONTROLLER 5631 .ndo_poll_controller = nv_poll_controller, 5632 #endif 5633 }; 5634 5635 static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5636 { 5637 struct net_device *dev; 5638 struct fe_priv *np; 5639 unsigned long addr; 5640 u8 __iomem *base; 5641 int err, i; 5642 u32 powerstate, txreg; 5643 u32 phystate_orig = 0, phystate; 5644 int phyinitialized = 0; 5645 static int printed_version; 5646 5647 if (!printed_version++) 5648 pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n", 5649 FORCEDETH_VERSION); 5650 5651 dev = alloc_etherdev(sizeof(struct fe_priv)); 5652 err = -ENOMEM; 5653 if (!dev) 5654 goto out; 5655 5656 np = netdev_priv(dev); 5657 np->dev = dev; 5658 np->pci_dev = pci_dev; 5659 spin_lock_init(&np->lock); 5660 spin_lock_init(&np->hwstats_lock); 5661 SET_NETDEV_DEV(dev, &pci_dev->dev); 5662 u64_stats_init(&np->swstats_rx_syncp); 5663 u64_stats_init(&np->swstats_tx_syncp); 5664 5665 timer_setup(&np->oom_kick, nv_do_rx_refill, 0); 5666 timer_setup(&np->nic_poll, nv_do_nic_poll, 0); 5667 timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE); 5668 5669 err = pci_enable_device(pci_dev); 5670 if (err) 5671 goto out_free; 5672 5673 pci_set_master(pci_dev); 5674 5675 err = pci_request_regions(pci_dev, DRV_NAME); 5676 if (err < 0) 5677 goto out_disable; 5678 5679 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5680 np->register_size = NV_PCI_REGSZ_VER3; 5681 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5682 np->register_size = NV_PCI_REGSZ_VER2; 5683 else 5684 np->register_size = NV_PCI_REGSZ_VER1; 5685 5686 err = -EINVAL; 5687 addr = 0; 5688 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5689 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5690 pci_resource_len(pci_dev, i) >= np->register_size) { 5691 addr = pci_resource_start(pci_dev, i); 5692 break; 5693 } 5694 } 5695 if (i == DEVICE_COUNT_RESOURCE) { 5696 dev_info(&pci_dev->dev, "Couldn't find register window\n"); 5697 goto out_relreg; 5698 } 5699 5700 /* copy of driver data */ 5701 np->driver_data = id->driver_data; 5702 /* copy of device id */ 5703 np->device_id = id->device; 5704 5705 /* handle different descriptor versions */ 5706 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5707 /* packet format 3: supports 40-bit addressing */ 5708 np->desc_ver = DESC_VER_3; 5709 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5710 if (dma_64bit) { 5711 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) 5712 dev_info(&pci_dev->dev, 5713 "64-bit DMA failed, using 32-bit addressing\n"); 5714 else 5715 dev->features |= NETIF_F_HIGHDMA; 5716 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { 5717 dev_info(&pci_dev->dev, 5718 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5719 } 5720 } 5721 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5722 /* packet format 2: supports jumbo frames */ 5723 np->desc_ver = DESC_VER_2; 5724 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5725 } else { 5726 /* original packet format */ 5727 np->desc_ver = DESC_VER_1; 5728 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5729 } 5730 5731 np->pkt_limit = NV_PKTLIMIT_1; 5732 if (id->driver_data & DEV_HAS_LARGEDESC) 5733 np->pkt_limit = NV_PKTLIMIT_2; 5734 5735 if (id->driver_data & DEV_HAS_CHECKSUM) { 5736 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5737 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | 5738 NETIF_F_TSO | NETIF_F_RXCSUM; 5739 } 5740 5741 np->vlanctl_bits = 0; 5742 if (id->driver_data & DEV_HAS_VLAN) { 5743 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5744 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | 5745 NETIF_F_HW_VLAN_CTAG_TX; 5746 } 5747 5748 dev->features |= dev->hw_features; 5749 5750 /* Add loopback capability to the device. */ 5751 dev->hw_features |= NETIF_F_LOOPBACK; 5752 5753 /* MTU range: 64 - 1500 or 9100 */ 5754 dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; 5755 dev->max_mtu = np->pkt_limit; 5756 5757 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5758 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5759 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5760 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5761 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5762 } 5763 5764 err = -ENOMEM; 5765 np->base = ioremap(addr, np->register_size); 5766 if (!np->base) 5767 goto out_relreg; 5768 5769 np->rx_ring_size = RX_RING_DEFAULT; 5770 np->tx_ring_size = TX_RING_DEFAULT; 5771 5772 if (!nv_optimized(np)) { 5773 np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev, 5774 sizeof(struct ring_desc) * 5775 (np->rx_ring_size + 5776 np->tx_ring_size), 5777 &np->ring_addr, 5778 GFP_ATOMIC); 5779 if (!np->rx_ring.orig) 5780 goto out_unmap; 5781 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5782 } else { 5783 np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev, 5784 sizeof(struct ring_desc_ex) * 5785 (np->rx_ring_size + 5786 np->tx_ring_size), 5787 &np->ring_addr, GFP_ATOMIC); 5788 if (!np->rx_ring.ex) 5789 goto out_unmap; 5790 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5791 } 5792 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5793 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5794 if (!np->rx_skb || !np->tx_skb) 5795 goto out_freering; 5796 5797 if (!nv_optimized(np)) 5798 dev->netdev_ops = &nv_netdev_ops; 5799 else 5800 dev->netdev_ops = &nv_netdev_ops_optimized; 5801 5802 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5803 dev->ethtool_ops = &ops; 5804 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5805 5806 pci_set_drvdata(pci_dev, dev); 5807 5808 /* read the mac address */ 5809 base = get_hwbase(dev); 5810 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5811 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5812 5813 /* check the workaround bit for correct mac address order */ 5814 txreg = readl(base + NvRegTransmitPoll); 5815 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { 5816 /* mac address is already in correct order */ 5817 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5818 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5819 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5820 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5821 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5822 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5823 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5824 /* mac address is already in correct order */ 5825 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5826 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5827 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5828 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5829 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5830 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5831 /* 5832 * Set orig mac address back to the reversed version. 5833 * This flag will be cleared during low power transition. 5834 * Therefore, we should always put back the reversed address. 5835 */ 5836 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + 5837 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); 5838 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); 5839 } else { 5840 /* need to reverse mac address to correct order */ 5841 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5842 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5843 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5844 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5845 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5846 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5847 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5848 dev_dbg(&pci_dev->dev, 5849 "%s: set workaround bit for reversed mac addr\n", 5850 __func__); 5851 } 5852 5853 if (!is_valid_ether_addr(dev->dev_addr)) { 5854 /* 5855 * Bad mac address. At least one bios sets the mac address 5856 * to 01:23:45:67:89:ab 5857 */ 5858 dev_err(&pci_dev->dev, 5859 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n", 5860 dev->dev_addr); 5861 eth_hw_addr_random(dev); 5862 dev_err(&pci_dev->dev, 5863 "Using random MAC address: %pM\n", dev->dev_addr); 5864 } 5865 5866 /* set mac address */ 5867 nv_copy_mac_to_hw(dev); 5868 5869 /* disable WOL */ 5870 writel(0, base + NvRegWakeUpFlags); 5871 np->wolenabled = 0; 5872 device_set_wakeup_enable(&pci_dev->dev, false); 5873 5874 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5875 5876 /* take phy and nic out of low power mode */ 5877 powerstate = readl(base + NvRegPowerState2); 5878 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5879 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) && 5880 pci_dev->revision >= 0xA3) 5881 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5882 writel(powerstate, base + NvRegPowerState2); 5883 } 5884 5885 if (np->desc_ver == DESC_VER_1) 5886 np->tx_flags = NV_TX_VALID; 5887 else 5888 np->tx_flags = NV_TX2_VALID; 5889 5890 np->msi_flags = 0; 5891 if ((id->driver_data & DEV_HAS_MSI) && msi) 5892 np->msi_flags |= NV_MSI_CAPABLE; 5893 5894 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5895 /* msix has had reported issues when modifying irqmask 5896 as in the case of napi, therefore, disable for now 5897 */ 5898 #if 0 5899 np->msi_flags |= NV_MSI_X_CAPABLE; 5900 #endif 5901 } 5902 5903 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) { 5904 np->irqmask = NVREG_IRQMASK_CPU; 5905 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5906 np->msi_flags |= 0x0001; 5907 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC && 5908 !(id->driver_data & DEV_NEED_TIMERIRQ)) { 5909 /* start off in throughput mode */ 5910 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5911 /* remove support for msix mode */ 5912 np->msi_flags &= ~NV_MSI_X_CAPABLE; 5913 } else { 5914 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 5915 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5916 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5917 np->msi_flags |= 0x0003; 5918 } 5919 5920 if (id->driver_data & DEV_NEED_TIMERIRQ) 5921 np->irqmask |= NVREG_IRQ_TIMER; 5922 if (id->driver_data & DEV_NEED_LINKTIMER) { 5923 np->need_linktimer = 1; 5924 np->link_timeout = jiffies + LINK_TIMEOUT; 5925 } else { 5926 np->need_linktimer = 0; 5927 } 5928 5929 /* Limit the number of tx's outstanding for hw bug */ 5930 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5931 np->tx_limit = 1; 5932 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && 5933 pci_dev->revision >= 0xA2) 5934 np->tx_limit = 0; 5935 } 5936 5937 /* clear phy state and temporarily halt phy interrupts */ 5938 writel(0, base + NvRegMIIMask); 5939 phystate = readl(base + NvRegAdapterControl); 5940 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5941 phystate_orig = 1; 5942 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5943 writel(phystate, base + NvRegAdapterControl); 5944 } 5945 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5946 5947 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5948 /* management unit running on the mac? */ 5949 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) && 5950 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) && 5951 nv_mgmt_acquire_sema(dev) && 5952 nv_mgmt_get_version(dev)) { 5953 np->mac_in_use = 1; 5954 if (np->mgmt_version > 0) 5955 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5956 /* management unit setup the phy already? */ 5957 if (np->mac_in_use && 5958 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5959 NVREG_XMITCTL_SYNC_PHY_INIT)) { 5960 /* phy is inited by mgmt unit */ 5961 phyinitialized = 1; 5962 } else { 5963 /* we need to init the phy */ 5964 } 5965 } 5966 } 5967 5968 /* find a suitable phy */ 5969 for (i = 1; i <= 32; i++) { 5970 int id1, id2; 5971 int phyaddr = i & 0x1F; 5972 5973 spin_lock_irq(&np->lock); 5974 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5975 spin_unlock_irq(&np->lock); 5976 if (id1 < 0 || id1 == 0xffff) 5977 continue; 5978 spin_lock_irq(&np->lock); 5979 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5980 spin_unlock_irq(&np->lock); 5981 if (id2 < 0 || id2 == 0xffff) 5982 continue; 5983 5984 np->phy_model = id2 & PHYID2_MODEL_MASK; 5985 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5986 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5987 np->phyaddr = phyaddr; 5988 np->phy_oui = id1 | id2; 5989 5990 /* Realtek hardcoded phy id1 to all zero's on certain phys */ 5991 if (np->phy_oui == PHY_OUI_REALTEK2) 5992 np->phy_oui = PHY_OUI_REALTEK; 5993 /* Setup phy revision for Realtek */ 5994 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) 5995 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; 5996 5997 break; 5998 } 5999 if (i == 33) { 6000 dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n"); 6001 goto out_error; 6002 } 6003 6004 if (!phyinitialized) { 6005 /* reset it */ 6006 phy_init(dev); 6007 } else { 6008 /* see if it is a gigabit phy */ 6009 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 6010 if (mii_status & PHY_GIGABIT) 6011 np->gigabit = PHY_GIGABIT; 6012 } 6013 6014 /* set default link speed settings */ 6015 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 6016 np->duplex = 0; 6017 np->autoneg = 1; 6018 6019 err = register_netdev(dev); 6020 if (err) { 6021 dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err); 6022 goto out_error; 6023 } 6024 6025 netif_carrier_off(dev); 6026 6027 /* Some NICs freeze when TX pause is enabled while NIC is 6028 * down, and this stays across warm reboots. The sequence 6029 * below should be enough to recover from that state. 6030 */ 6031 nv_update_pause(dev, 0); 6032 nv_start_tx(dev); 6033 nv_stop_tx(dev); 6034 6035 if (id->driver_data & DEV_HAS_VLAN) 6036 nv_vlan_mode(dev, dev->features); 6037 6038 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 6039 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 6040 6041 dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 6042 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 6043 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 6044 "csum " : "", 6045 dev->features & (NETIF_F_HW_VLAN_CTAG_RX | 6046 NETIF_F_HW_VLAN_CTAG_TX) ? 6047 "vlan " : "", 6048 dev->features & (NETIF_F_LOOPBACK) ? 6049 "loopback " : "", 6050 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 6051 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 6052 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 6053 np->gigabit == PHY_GIGABIT ? "gbit " : "", 6054 np->need_linktimer ? "lnktim " : "", 6055 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 6056 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 6057 np->desc_ver); 6058 6059 return 0; 6060 6061 out_error: 6062 if (phystate_orig) 6063 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 6064 out_freering: 6065 free_rings(dev); 6066 out_unmap: 6067 iounmap(get_hwbase(dev)); 6068 out_relreg: 6069 pci_release_regions(pci_dev); 6070 out_disable: 6071 pci_disable_device(pci_dev); 6072 out_free: 6073 free_netdev(dev); 6074 out: 6075 return err; 6076 } 6077 6078 static void nv_restore_phy(struct net_device *dev) 6079 { 6080 struct fe_priv *np = netdev_priv(dev); 6081 u16 phy_reserved, mii_control; 6082 6083 if (np->phy_oui == PHY_OUI_REALTEK && 6084 np->phy_model == PHY_MODEL_REALTEK_8201 && 6085 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 6086 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); 6087 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 6088 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 6089 phy_reserved |= PHY_REALTEK_INIT8; 6090 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); 6091 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); 6092 6093 /* restart auto negotiation */ 6094 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 6095 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 6096 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); 6097 } 6098 } 6099 6100 static void nv_restore_mac_addr(struct pci_dev *pci_dev) 6101 { 6102 struct net_device *dev = pci_get_drvdata(pci_dev); 6103 struct fe_priv *np = netdev_priv(dev); 6104 u8 __iomem *base = get_hwbase(dev); 6105 6106 /* special op: write back the misordered MAC address - otherwise 6107 * the next nv_probe would see a wrong address. 6108 */ 6109 writel(np->orig_mac[0], base + NvRegMacAddrA); 6110 writel(np->orig_mac[1], base + NvRegMacAddrB); 6111 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 6112 base + NvRegTransmitPoll); 6113 } 6114 6115 static void nv_remove(struct pci_dev *pci_dev) 6116 { 6117 struct net_device *dev = pci_get_drvdata(pci_dev); 6118 6119 unregister_netdev(dev); 6120 6121 nv_restore_mac_addr(pci_dev); 6122 6123 /* restore any phy related changes */ 6124 nv_restore_phy(dev); 6125 6126 nv_mgmt_release_sema(dev); 6127 6128 /* free all structures */ 6129 free_rings(dev); 6130 iounmap(get_hwbase(dev)); 6131 pci_release_regions(pci_dev); 6132 pci_disable_device(pci_dev); 6133 free_netdev(dev); 6134 } 6135 6136 #ifdef CONFIG_PM_SLEEP 6137 static int nv_suspend(struct device *device) 6138 { 6139 struct pci_dev *pdev = to_pci_dev(device); 6140 struct net_device *dev = pci_get_drvdata(pdev); 6141 struct fe_priv *np = netdev_priv(dev); 6142 u8 __iomem *base = get_hwbase(dev); 6143 int i; 6144 6145 if (netif_running(dev)) { 6146 /* Gross. */ 6147 nv_close(dev); 6148 } 6149 netif_device_detach(dev); 6150 6151 /* save non-pci configuration space */ 6152 for (i = 0; i <= np->register_size/sizeof(u32); i++) 6153 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 6154 6155 return 0; 6156 } 6157 6158 static int nv_resume(struct device *device) 6159 { 6160 struct pci_dev *pdev = to_pci_dev(device); 6161 struct net_device *dev = pci_get_drvdata(pdev); 6162 struct fe_priv *np = netdev_priv(dev); 6163 u8 __iomem *base = get_hwbase(dev); 6164 int i, rc = 0; 6165 6166 /* restore non-pci configuration space */ 6167 for (i = 0; i <= np->register_size/sizeof(u32); i++) 6168 writel(np->saved_config_space[i], base+i*sizeof(u32)); 6169 6170 if (np->driver_data & DEV_NEED_MSI_FIX) 6171 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); 6172 6173 /* restore phy state, including autoneg */ 6174 phy_init(dev); 6175 6176 netif_device_attach(dev); 6177 if (netif_running(dev)) { 6178 rc = nv_open(dev); 6179 nv_set_multicast(dev); 6180 } 6181 return rc; 6182 } 6183 6184 static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); 6185 #define NV_PM_OPS (&nv_pm_ops) 6186 6187 #else 6188 #define NV_PM_OPS NULL 6189 #endif /* CONFIG_PM_SLEEP */ 6190 6191 #ifdef CONFIG_PM 6192 static void nv_shutdown(struct pci_dev *pdev) 6193 { 6194 struct net_device *dev = pci_get_drvdata(pdev); 6195 struct fe_priv *np = netdev_priv(dev); 6196 6197 if (netif_running(dev)) 6198 nv_close(dev); 6199 6200 /* 6201 * Restore the MAC so a kernel started by kexec won't get confused. 6202 * If we really go for poweroff, we must not restore the MAC, 6203 * otherwise the MAC for WOL will be reversed at least on some boards. 6204 */ 6205 if (system_state != SYSTEM_POWER_OFF) 6206 nv_restore_mac_addr(pdev); 6207 6208 pci_disable_device(pdev); 6209 /* 6210 * Apparently it is not possible to reinitialise from D3 hot, 6211 * only put the device into D3 if we really go for poweroff. 6212 */ 6213 if (system_state == SYSTEM_POWER_OFF) { 6214 pci_wake_from_d3(pdev, np->wolenabled); 6215 pci_set_power_state(pdev, PCI_D3hot); 6216 } 6217 } 6218 #else 6219 #define nv_shutdown NULL 6220 #endif /* CONFIG_PM */ 6221 6222 static const struct pci_device_id pci_tbl[] = { 6223 { /* nForce Ethernet Controller */ 6224 PCI_DEVICE(0x10DE, 0x01C3), 6225 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6226 }, 6227 { /* nForce2 Ethernet Controller */ 6228 PCI_DEVICE(0x10DE, 0x0066), 6229 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6230 }, 6231 { /* nForce3 Ethernet Controller */ 6232 PCI_DEVICE(0x10DE, 0x00D6), 6233 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6234 }, 6235 { /* nForce3 Ethernet Controller */ 6236 PCI_DEVICE(0x10DE, 0x0086), 6237 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6238 }, 6239 { /* nForce3 Ethernet Controller */ 6240 PCI_DEVICE(0x10DE, 0x008C), 6241 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6242 }, 6243 { /* nForce3 Ethernet Controller */ 6244 PCI_DEVICE(0x10DE, 0x00E6), 6245 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6246 }, 6247 { /* nForce3 Ethernet Controller */ 6248 PCI_DEVICE(0x10DE, 0x00DF), 6249 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6250 }, 6251 { /* CK804 Ethernet Controller */ 6252 PCI_DEVICE(0x10DE, 0x0056), 6253 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6254 }, 6255 { /* CK804 Ethernet Controller */ 6256 PCI_DEVICE(0x10DE, 0x0057), 6257 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6258 }, 6259 { /* MCP04 Ethernet Controller */ 6260 PCI_DEVICE(0x10DE, 0x0037), 6261 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6262 }, 6263 { /* MCP04 Ethernet Controller */ 6264 PCI_DEVICE(0x10DE, 0x0038), 6265 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6266 }, 6267 { /* MCP51 Ethernet Controller */ 6268 PCI_DEVICE(0x10DE, 0x0268), 6269 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6270 }, 6271 { /* MCP51 Ethernet Controller */ 6272 PCI_DEVICE(0x10DE, 0x0269), 6273 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6274 }, 6275 { /* MCP55 Ethernet Controller */ 6276 PCI_DEVICE(0x10DE, 0x0372), 6277 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6278 }, 6279 { /* MCP55 Ethernet Controller */ 6280 PCI_DEVICE(0x10DE, 0x0373), 6281 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6282 }, 6283 { /* MCP61 Ethernet Controller */ 6284 PCI_DEVICE(0x10DE, 0x03E5), 6285 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6286 }, 6287 { /* MCP61 Ethernet Controller */ 6288 PCI_DEVICE(0x10DE, 0x03E6), 6289 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6290 }, 6291 { /* MCP61 Ethernet Controller */ 6292 PCI_DEVICE(0x10DE, 0x03EE), 6293 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6294 }, 6295 { /* MCP61 Ethernet Controller */ 6296 PCI_DEVICE(0x10DE, 0x03EF), 6297 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6298 }, 6299 { /* MCP65 Ethernet Controller */ 6300 PCI_DEVICE(0x10DE, 0x0450), 6301 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6302 }, 6303 { /* MCP65 Ethernet Controller */ 6304 PCI_DEVICE(0x10DE, 0x0451), 6305 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6306 }, 6307 { /* MCP65 Ethernet Controller */ 6308 PCI_DEVICE(0x10DE, 0x0452), 6309 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6310 }, 6311 { /* MCP65 Ethernet Controller */ 6312 PCI_DEVICE(0x10DE, 0x0453), 6313 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6314 }, 6315 { /* MCP67 Ethernet Controller */ 6316 PCI_DEVICE(0x10DE, 0x054C), 6317 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6318 }, 6319 { /* MCP67 Ethernet Controller */ 6320 PCI_DEVICE(0x10DE, 0x054D), 6321 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6322 }, 6323 { /* MCP67 Ethernet Controller */ 6324 PCI_DEVICE(0x10DE, 0x054E), 6325 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6326 }, 6327 { /* MCP67 Ethernet Controller */ 6328 PCI_DEVICE(0x10DE, 0x054F), 6329 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6330 }, 6331 { /* MCP73 Ethernet Controller */ 6332 PCI_DEVICE(0x10DE, 0x07DC), 6333 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6334 }, 6335 { /* MCP73 Ethernet Controller */ 6336 PCI_DEVICE(0x10DE, 0x07DD), 6337 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6338 }, 6339 { /* MCP73 Ethernet Controller */ 6340 PCI_DEVICE(0x10DE, 0x07DE), 6341 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6342 }, 6343 { /* MCP73 Ethernet Controller */ 6344 PCI_DEVICE(0x10DE, 0x07DF), 6345 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6346 }, 6347 { /* MCP77 Ethernet Controller */ 6348 PCI_DEVICE(0x10DE, 0x0760), 6349 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6350 }, 6351 { /* MCP77 Ethernet Controller */ 6352 PCI_DEVICE(0x10DE, 0x0761), 6353 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6354 }, 6355 { /* MCP77 Ethernet Controller */ 6356 PCI_DEVICE(0x10DE, 0x0762), 6357 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6358 }, 6359 { /* MCP77 Ethernet Controller */ 6360 PCI_DEVICE(0x10DE, 0x0763), 6361 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6362 }, 6363 { /* MCP79 Ethernet Controller */ 6364 PCI_DEVICE(0x10DE, 0x0AB0), 6365 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6366 }, 6367 { /* MCP79 Ethernet Controller */ 6368 PCI_DEVICE(0x10DE, 0x0AB1), 6369 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6370 }, 6371 { /* MCP79 Ethernet Controller */ 6372 PCI_DEVICE(0x10DE, 0x0AB2), 6373 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6374 }, 6375 { /* MCP79 Ethernet Controller */ 6376 PCI_DEVICE(0x10DE, 0x0AB3), 6377 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6378 }, 6379 { /* MCP89 Ethernet Controller */ 6380 PCI_DEVICE(0x10DE, 0x0D7D), 6381 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, 6382 }, 6383 {0,}, 6384 }; 6385 6386 static struct pci_driver forcedeth_pci_driver = { 6387 .name = DRV_NAME, 6388 .id_table = pci_tbl, 6389 .probe = nv_probe, 6390 .remove = nv_remove, 6391 .shutdown = nv_shutdown, 6392 .driver.pm = NV_PM_OPS, 6393 }; 6394 6395 module_param(max_interrupt_work, int, 0); 6396 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6397 module_param(optimization_mode, int, 0); 6398 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load."); 6399 module_param(poll_interval, int, 0); 6400 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 6401 module_param(msi, int, 0); 6402 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 6403 module_param(msix, int, 0); 6404 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 6405 module_param(dma_64bit, int, 0); 6406 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6407 module_param(phy_cross, int, 0); 6408 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6409 module_param(phy_power_down, int, 0); 6410 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); 6411 module_param(debug_tx_timeout, bool, 0); 6412 MODULE_PARM_DESC(debug_tx_timeout, 6413 "Dump tx related registers and ring when tx_timeout happens"); 6414 6415 module_pci_driver(forcedeth_pci_driver); 6416 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6417 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6418 MODULE_LICENSE("GPL"); 6419 MODULE_DEVICE_TABLE(pci, pci_tbl); 6420