1 /* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, see <http://www.gnu.org/licenses/>. 30 * 31 * Known bugs: 32 * We suspect that on some hardware no TX done interrupts are generated. 33 * This means recovery from netif_stop_queue only happens if the hw timer 34 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 35 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 36 * If your hardware reliably generates tx done interrupts, then you can remove 37 * DEV_NEED_TIMERIRQ from the driver_data flags. 38 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 39 * superfluous timer interrupts from the nic. 40 */ 41 42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 43 44 #define FORCEDETH_VERSION "0.64" 45 #define DRV_NAME "forcedeth" 46 47 #include <linux/module.h> 48 #include <linux/types.h> 49 #include <linux/pci.h> 50 #include <linux/interrupt.h> 51 #include <linux/netdevice.h> 52 #include <linux/etherdevice.h> 53 #include <linux/delay.h> 54 #include <linux/sched.h> 55 #include <linux/spinlock.h> 56 #include <linux/ethtool.h> 57 #include <linux/timer.h> 58 #include <linux/skbuff.h> 59 #include <linux/mii.h> 60 #include <linux/random.h> 61 #include <linux/if_vlan.h> 62 #include <linux/dma-mapping.h> 63 #include <linux/slab.h> 64 #include <linux/uaccess.h> 65 #include <linux/prefetch.h> 66 #include <linux/u64_stats_sync.h> 67 #include <linux/io.h> 68 69 #include <asm/irq.h> 70 71 #define TX_WORK_PER_LOOP 64 72 #define RX_WORK_PER_LOOP 64 73 74 /* 75 * Hardware access: 76 */ 77 78 #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */ 79 #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */ 80 #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */ 81 #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */ 82 #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */ 83 #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */ 84 #define DEV_HAS_MSI 0x0000040 /* device supports MSI */ 85 #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ 86 #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ 87 #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ 88 #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */ 89 #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */ 90 #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */ 91 #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */ 92 #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ 93 #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ 94 #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ 95 #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */ 96 #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */ 97 #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */ 98 #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */ 99 #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */ 100 #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */ 101 #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */ 102 #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */ 103 #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */ 104 #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */ 105 106 enum { 107 NvRegIrqStatus = 0x000, 108 #define NVREG_IRQSTAT_MIIEVENT 0x040 109 #define NVREG_IRQSTAT_MASK 0x83ff 110 NvRegIrqMask = 0x004, 111 #define NVREG_IRQ_RX_ERROR 0x0001 112 #define NVREG_IRQ_RX 0x0002 113 #define NVREG_IRQ_RX_NOBUF 0x0004 114 #define NVREG_IRQ_TX_ERR 0x0008 115 #define NVREG_IRQ_TX_OK 0x0010 116 #define NVREG_IRQ_TIMER 0x0020 117 #define NVREG_IRQ_LINK 0x0040 118 #define NVREG_IRQ_RX_FORCED 0x0080 119 #define NVREG_IRQ_TX_FORCED 0x0100 120 #define NVREG_IRQ_RECOVER_ERROR 0x8200 121 #define NVREG_IRQMASK_THROUGHPUT 0x00df 122 #define NVREG_IRQMASK_CPU 0x0060 123 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 124 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 125 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 126 127 NvRegUnknownSetupReg6 = 0x008, 128 #define NVREG_UNKSETUP6_VAL 3 129 130 /* 131 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 132 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 133 */ 134 NvRegPollingInterval = 0x00c, 135 #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */ 136 #define NVREG_POLL_DEFAULT_CPU 13 137 NvRegMSIMap0 = 0x020, 138 NvRegMSIMap1 = 0x024, 139 NvRegMSIIrqMask = 0x030, 140 #define NVREG_MSI_VECTOR_0_ENABLED 0x01 141 NvRegMisc1 = 0x080, 142 #define NVREG_MISC1_PAUSE_TX 0x01 143 #define NVREG_MISC1_HD 0x02 144 #define NVREG_MISC1_FORCE 0x3b0f3c 145 146 NvRegMacReset = 0x34, 147 #define NVREG_MAC_RESET_ASSERT 0x0F3 148 NvRegTransmitterControl = 0x084, 149 #define NVREG_XMITCTL_START 0x01 150 #define NVREG_XMITCTL_MGMT_ST 0x40000000 151 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000 152 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0 153 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 154 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 155 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 156 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 157 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 158 #define NVREG_XMITCTL_HOST_LOADED 0x00004000 159 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000 160 #define NVREG_XMITCTL_DATA_START 0x00100000 161 #define NVREG_XMITCTL_DATA_READY 0x00010000 162 #define NVREG_XMITCTL_DATA_ERROR 0x00020000 163 NvRegTransmitterStatus = 0x088, 164 #define NVREG_XMITSTAT_BUSY 0x01 165 166 NvRegPacketFilterFlags = 0x8c, 167 #define NVREG_PFF_PAUSE_RX 0x08 168 #define NVREG_PFF_ALWAYS 0x7F0000 169 #define NVREG_PFF_PROMISC 0x80 170 #define NVREG_PFF_MYADDR 0x20 171 #define NVREG_PFF_LOOPBACK 0x10 172 173 NvRegOffloadConfig = 0x90, 174 #define NVREG_OFFLOAD_HOMEPHY 0x601 175 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 176 NvRegReceiverControl = 0x094, 177 #define NVREG_RCVCTL_START 0x01 178 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000 179 NvRegReceiverStatus = 0x98, 180 #define NVREG_RCVSTAT_BUSY 0x01 181 182 NvRegSlotTime = 0x9c, 183 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 184 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00 185 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00 186 #define NVREG_SLOTTIME_HALF 0x0000ff00 187 #define NVREG_SLOTTIME_DEFAULT 0x00007f00 188 #define NVREG_SLOTTIME_MASK 0x000000ff 189 190 NvRegTxDeferral = 0xA0, 191 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 192 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 193 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 194 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 195 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 196 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 197 NvRegRxDeferral = 0xA4, 198 #define NVREG_RX_DEFERRAL_DEFAULT 0x16 199 NvRegMacAddrA = 0xA8, 200 NvRegMacAddrB = 0xAC, 201 NvRegMulticastAddrA = 0xB0, 202 #define NVREG_MCASTADDRA_FORCE 0x01 203 NvRegMulticastAddrB = 0xB4, 204 NvRegMulticastMaskA = 0xB8, 205 #define NVREG_MCASTMASKA_NONE 0xffffffff 206 NvRegMulticastMaskB = 0xBC, 207 #define NVREG_MCASTMASKB_NONE 0xffff 208 209 NvRegPhyInterface = 0xC0, 210 #define PHY_RGMII 0x10000000 211 NvRegBackOffControl = 0xC4, 212 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000 213 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff 214 #define NVREG_BKOFFCTRL_SELECT 24 215 #define NVREG_BKOFFCTRL_GEAR 12 216 217 NvRegTxRingPhysAddr = 0x100, 218 NvRegRxRingPhysAddr = 0x104, 219 NvRegRingSizes = 0x108, 220 #define NVREG_RINGSZ_TXSHIFT 0 221 #define NVREG_RINGSZ_RXSHIFT 16 222 NvRegTransmitPoll = 0x10c, 223 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 224 NvRegLinkSpeed = 0x110, 225 #define NVREG_LINKSPEED_FORCE 0x10000 226 #define NVREG_LINKSPEED_10 1000 227 #define NVREG_LINKSPEED_100 100 228 #define NVREG_LINKSPEED_1000 50 229 #define NVREG_LINKSPEED_MASK (0xFFF) 230 NvRegUnknownSetupReg5 = 0x130, 231 #define NVREG_UNKSETUP5_BIT31 (1<<31) 232 NvRegTxWatermark = 0x13c, 233 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 234 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 235 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000 236 NvRegTxRxControl = 0x144, 237 #define NVREG_TXRXCTL_KICK 0x0001 238 #define NVREG_TXRXCTL_BIT1 0x0002 239 #define NVREG_TXRXCTL_BIT2 0x0004 240 #define NVREG_TXRXCTL_IDLE 0x0008 241 #define NVREG_TXRXCTL_RESET 0x0010 242 #define NVREG_TXRXCTL_RXCHECK 0x0400 243 #define NVREG_TXRXCTL_DESC_1 0 244 #define NVREG_TXRXCTL_DESC_2 0x002100 245 #define NVREG_TXRXCTL_DESC_3 0xc02200 246 #define NVREG_TXRXCTL_VLANSTRIP 0x00040 247 #define NVREG_TXRXCTL_VLANINS 0x00080 248 NvRegTxRingPhysAddrHigh = 0x148, 249 NvRegRxRingPhysAddrHigh = 0x14C, 250 NvRegTxPauseFrame = 0x170, 251 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 252 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 253 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 254 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 255 NvRegTxPauseFrameLimit = 0x174, 256 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 257 NvRegMIIStatus = 0x180, 258 #define NVREG_MIISTAT_ERROR 0x0001 259 #define NVREG_MIISTAT_LINKCHANGE 0x0008 260 #define NVREG_MIISTAT_MASK_RW 0x0007 261 #define NVREG_MIISTAT_MASK_ALL 0x000f 262 NvRegMIIMask = 0x184, 263 #define NVREG_MII_LINKCHANGE 0x0008 264 265 NvRegAdapterControl = 0x188, 266 #define NVREG_ADAPTCTL_START 0x02 267 #define NVREG_ADAPTCTL_LINKUP 0x04 268 #define NVREG_ADAPTCTL_PHYVALID 0x40000 269 #define NVREG_ADAPTCTL_RUNNING 0x100000 270 #define NVREG_ADAPTCTL_PHYSHIFT 24 271 NvRegMIISpeed = 0x18c, 272 #define NVREG_MIISPEED_BIT8 (1<<8) 273 #define NVREG_MIIDELAY 5 274 NvRegMIIControl = 0x190, 275 #define NVREG_MIICTL_INUSE 0x08000 276 #define NVREG_MIICTL_WRITE 0x00400 277 #define NVREG_MIICTL_ADDRSHIFT 5 278 NvRegMIIData = 0x194, 279 NvRegTxUnicast = 0x1a0, 280 NvRegTxMulticast = 0x1a4, 281 NvRegTxBroadcast = 0x1a8, 282 NvRegWakeUpFlags = 0x200, 283 #define NVREG_WAKEUPFLAGS_VAL 0x7770 284 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 285 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 286 #define NVREG_WAKEUPFLAGS_D3SHIFT 12 287 #define NVREG_WAKEUPFLAGS_D2SHIFT 8 288 #define NVREG_WAKEUPFLAGS_D1SHIFT 4 289 #define NVREG_WAKEUPFLAGS_D0SHIFT 0 290 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 291 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 292 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 293 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111 294 295 NvRegMgmtUnitGetVersion = 0x204, 296 #define NVREG_MGMTUNITGETVERSION 0x01 297 NvRegMgmtUnitVersion = 0x208, 298 #define NVREG_MGMTUNITVERSION 0x08 299 NvRegPowerCap = 0x268, 300 #define NVREG_POWERCAP_D3SUPP (1<<30) 301 #define NVREG_POWERCAP_D2SUPP (1<<26) 302 #define NVREG_POWERCAP_D1SUPP (1<<25) 303 NvRegPowerState = 0x26c, 304 #define NVREG_POWERSTATE_POWEREDUP 0x8000 305 #define NVREG_POWERSTATE_VALID 0x0100 306 #define NVREG_POWERSTATE_MASK 0x0003 307 #define NVREG_POWERSTATE_D0 0x0000 308 #define NVREG_POWERSTATE_D1 0x0001 309 #define NVREG_POWERSTATE_D2 0x0002 310 #define NVREG_POWERSTATE_D3 0x0003 311 NvRegMgmtUnitControl = 0x278, 312 #define NVREG_MGMTUNITCONTROL_INUSE 0x20000 313 NvRegTxCnt = 0x280, 314 NvRegTxZeroReXmt = 0x284, 315 NvRegTxOneReXmt = 0x288, 316 NvRegTxManyReXmt = 0x28c, 317 NvRegTxLateCol = 0x290, 318 NvRegTxUnderflow = 0x294, 319 NvRegTxLossCarrier = 0x298, 320 NvRegTxExcessDef = 0x29c, 321 NvRegTxRetryErr = 0x2a0, 322 NvRegRxFrameErr = 0x2a4, 323 NvRegRxExtraByte = 0x2a8, 324 NvRegRxLateCol = 0x2ac, 325 NvRegRxRunt = 0x2b0, 326 NvRegRxFrameTooLong = 0x2b4, 327 NvRegRxOverflow = 0x2b8, 328 NvRegRxFCSErr = 0x2bc, 329 NvRegRxFrameAlignErr = 0x2c0, 330 NvRegRxLenErr = 0x2c4, 331 NvRegRxUnicast = 0x2c8, 332 NvRegRxMulticast = 0x2cc, 333 NvRegRxBroadcast = 0x2d0, 334 NvRegTxDef = 0x2d4, 335 NvRegTxFrame = 0x2d8, 336 NvRegRxCnt = 0x2dc, 337 NvRegTxPause = 0x2e0, 338 NvRegRxPause = 0x2e4, 339 NvRegRxDropFrame = 0x2e8, 340 NvRegVlanControl = 0x300, 341 #define NVREG_VLANCONTROL_ENABLE 0x2000 342 NvRegMSIXMap0 = 0x3e0, 343 NvRegMSIXMap1 = 0x3e4, 344 NvRegMSIXIrqStatus = 0x3f0, 345 346 NvRegPowerState2 = 0x600, 347 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15 348 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 349 #define NVREG_POWERSTATE2_PHY_RESET 0x0004 350 #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00 351 }; 352 353 /* Big endian: should work, but is untested */ 354 struct ring_desc { 355 __le32 buf; 356 __le32 flaglen; 357 }; 358 359 struct ring_desc_ex { 360 __le32 bufhigh; 361 __le32 buflow; 362 __le32 txvlan; 363 __le32 flaglen; 364 }; 365 366 union ring_type { 367 struct ring_desc *orig; 368 struct ring_desc_ex *ex; 369 }; 370 371 #define FLAG_MASK_V1 0xffff0000 372 #define FLAG_MASK_V2 0xffffc000 373 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 374 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 375 376 #define NV_TX_LASTPACKET (1<<16) 377 #define NV_TX_RETRYERROR (1<<19) 378 #define NV_TX_RETRYCOUNT_MASK (0xF<<20) 379 #define NV_TX_FORCED_INTERRUPT (1<<24) 380 #define NV_TX_DEFERRED (1<<26) 381 #define NV_TX_CARRIERLOST (1<<27) 382 #define NV_TX_LATECOLLISION (1<<28) 383 #define NV_TX_UNDERFLOW (1<<29) 384 #define NV_TX_ERROR (1<<30) 385 #define NV_TX_VALID (1<<31) 386 387 #define NV_TX2_LASTPACKET (1<<29) 388 #define NV_TX2_RETRYERROR (1<<18) 389 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19) 390 #define NV_TX2_FORCED_INTERRUPT (1<<30) 391 #define NV_TX2_DEFERRED (1<<25) 392 #define NV_TX2_CARRIERLOST (1<<26) 393 #define NV_TX2_LATECOLLISION (1<<27) 394 #define NV_TX2_UNDERFLOW (1<<28) 395 /* error and valid are the same for both */ 396 #define NV_TX2_ERROR (1<<30) 397 #define NV_TX2_VALID (1<<31) 398 #define NV_TX2_TSO (1<<28) 399 #define NV_TX2_TSO_SHIFT 14 400 #define NV_TX2_TSO_MAX_SHIFT 14 401 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 402 #define NV_TX2_CHECKSUM_L3 (1<<27) 403 #define NV_TX2_CHECKSUM_L4 (1<<26) 404 405 #define NV_TX3_VLAN_TAG_PRESENT (1<<18) 406 407 #define NV_RX_DESCRIPTORVALID (1<<16) 408 #define NV_RX_MISSEDFRAME (1<<17) 409 #define NV_RX_SUBTRACT1 (1<<18) 410 #define NV_RX_ERROR1 (1<<23) 411 #define NV_RX_ERROR2 (1<<24) 412 #define NV_RX_ERROR3 (1<<25) 413 #define NV_RX_ERROR4 (1<<26) 414 #define NV_RX_CRCERR (1<<27) 415 #define NV_RX_OVERFLOW (1<<28) 416 #define NV_RX_FRAMINGERR (1<<29) 417 #define NV_RX_ERROR (1<<30) 418 #define NV_RX_AVAIL (1<<31) 419 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) 420 421 #define NV_RX2_CHECKSUMMASK (0x1C000000) 422 #define NV_RX2_CHECKSUM_IP (0x10000000) 423 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 424 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 425 #define NV_RX2_DESCRIPTORVALID (1<<29) 426 #define NV_RX2_SUBTRACT1 (1<<25) 427 #define NV_RX2_ERROR1 (1<<18) 428 #define NV_RX2_ERROR2 (1<<19) 429 #define NV_RX2_ERROR3 (1<<20) 430 #define NV_RX2_ERROR4 (1<<21) 431 #define NV_RX2_CRCERR (1<<22) 432 #define NV_RX2_OVERFLOW (1<<23) 433 #define NV_RX2_FRAMINGERR (1<<24) 434 /* error and avail are the same for both */ 435 #define NV_RX2_ERROR (1<<30) 436 #define NV_RX2_AVAIL (1<<31) 437 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) 438 439 #define NV_RX3_VLAN_TAG_PRESENT (1<<16) 440 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 441 442 /* Miscellaneous hardware related defines: */ 443 #define NV_PCI_REGSZ_VER1 0x270 444 #define NV_PCI_REGSZ_VER2 0x2d4 445 #define NV_PCI_REGSZ_VER3 0x604 446 #define NV_PCI_REGSZ_MAX 0x604 447 448 /* various timeout delays: all in usec */ 449 #define NV_TXRX_RESET_DELAY 4 450 #define NV_TXSTOP_DELAY1 10 451 #define NV_TXSTOP_DELAY1MAX 500000 452 #define NV_TXSTOP_DELAY2 100 453 #define NV_RXSTOP_DELAY1 10 454 #define NV_RXSTOP_DELAY1MAX 500000 455 #define NV_RXSTOP_DELAY2 100 456 #define NV_SETUP5_DELAY 5 457 #define NV_SETUP5_DELAYMAX 50000 458 #define NV_POWERUP_DELAY 5 459 #define NV_POWERUP_DELAYMAX 5000 460 #define NV_MIIBUSY_DELAY 50 461 #define NV_MIIPHY_DELAY 10 462 #define NV_MIIPHY_DELAYMAX 10000 463 #define NV_MAC_RESET_DELAY 64 464 465 #define NV_WAKEUPPATTERNS 5 466 #define NV_WAKEUPMASKENTRIES 4 467 468 /* General driver defaults */ 469 #define NV_WATCHDOG_TIMEO (5*HZ) 470 471 #define RX_RING_DEFAULT 512 472 #define TX_RING_DEFAULT 256 473 #define RX_RING_MIN 128 474 #define TX_RING_MIN 64 475 #define RING_MAX_DESC_VER_1 1024 476 #define RING_MAX_DESC_VER_2_3 16384 477 478 /* rx/tx mac addr + type + vlan + align + slack*/ 479 #define NV_RX_HEADERS (64) 480 /* even more slack. */ 481 #define NV_RX_ALLOC_PAD (64) 482 483 /* maximum mtu size */ 484 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 485 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 486 487 #define OOM_REFILL (1+HZ/20) 488 #define POLL_WAIT (1+HZ/100) 489 #define LINK_TIMEOUT (3*HZ) 490 #define STATS_INTERVAL (10*HZ) 491 492 /* 493 * desc_ver values: 494 * The nic supports three different descriptor types: 495 * - DESC_VER_1: Original 496 * - DESC_VER_2: support for jumbo frames. 497 * - DESC_VER_3: 64-bit format. 498 */ 499 #define DESC_VER_1 1 500 #define DESC_VER_2 2 501 #define DESC_VER_3 3 502 503 /* PHY defines */ 504 #define PHY_OUI_MARVELL 0x5043 505 #define PHY_OUI_CICADA 0x03f1 506 #define PHY_OUI_VITESSE 0x01c1 507 #define PHY_OUI_REALTEK 0x0732 508 #define PHY_OUI_REALTEK2 0x0020 509 #define PHYID1_OUI_MASK 0x03ff 510 #define PHYID1_OUI_SHFT 6 511 #define PHYID2_OUI_MASK 0xfc00 512 #define PHYID2_OUI_SHFT 10 513 #define PHYID2_MODEL_MASK 0x03f0 514 #define PHY_MODEL_REALTEK_8211 0x0110 515 #define PHY_REV_MASK 0x0001 516 #define PHY_REV_REALTEK_8211B 0x0000 517 #define PHY_REV_REALTEK_8211C 0x0001 518 #define PHY_MODEL_REALTEK_8201 0x0200 519 #define PHY_MODEL_MARVELL_E3016 0x0220 520 #define PHY_MARVELL_E3016_INITMASK 0x0300 521 #define PHY_CICADA_INIT1 0x0f000 522 #define PHY_CICADA_INIT2 0x0e00 523 #define PHY_CICADA_INIT3 0x01000 524 #define PHY_CICADA_INIT4 0x0200 525 #define PHY_CICADA_INIT5 0x0004 526 #define PHY_CICADA_INIT6 0x02000 527 #define PHY_VITESSE_INIT_REG1 0x1f 528 #define PHY_VITESSE_INIT_REG2 0x10 529 #define PHY_VITESSE_INIT_REG3 0x11 530 #define PHY_VITESSE_INIT_REG4 0x12 531 #define PHY_VITESSE_INIT_MSK1 0xc 532 #define PHY_VITESSE_INIT_MSK2 0x0180 533 #define PHY_VITESSE_INIT1 0x52b5 534 #define PHY_VITESSE_INIT2 0xaf8a 535 #define PHY_VITESSE_INIT3 0x8 536 #define PHY_VITESSE_INIT4 0x8f8a 537 #define PHY_VITESSE_INIT5 0xaf86 538 #define PHY_VITESSE_INIT6 0x8f86 539 #define PHY_VITESSE_INIT7 0xaf82 540 #define PHY_VITESSE_INIT8 0x0100 541 #define PHY_VITESSE_INIT9 0x8f82 542 #define PHY_VITESSE_INIT10 0x0 543 #define PHY_REALTEK_INIT_REG1 0x1f 544 #define PHY_REALTEK_INIT_REG2 0x19 545 #define PHY_REALTEK_INIT_REG3 0x13 546 #define PHY_REALTEK_INIT_REG4 0x14 547 #define PHY_REALTEK_INIT_REG5 0x18 548 #define PHY_REALTEK_INIT_REG6 0x11 549 #define PHY_REALTEK_INIT_REG7 0x01 550 #define PHY_REALTEK_INIT1 0x0000 551 #define PHY_REALTEK_INIT2 0x8e00 552 #define PHY_REALTEK_INIT3 0x0001 553 #define PHY_REALTEK_INIT4 0xad17 554 #define PHY_REALTEK_INIT5 0xfb54 555 #define PHY_REALTEK_INIT6 0xf5c7 556 #define PHY_REALTEK_INIT7 0x1000 557 #define PHY_REALTEK_INIT8 0x0003 558 #define PHY_REALTEK_INIT9 0x0008 559 #define PHY_REALTEK_INIT10 0x0005 560 #define PHY_REALTEK_INIT11 0x0200 561 #define PHY_REALTEK_INIT_MSK1 0x0003 562 563 #define PHY_GIGABIT 0x0100 564 565 #define PHY_TIMEOUT 0x1 566 #define PHY_ERROR 0x2 567 568 #define PHY_100 0x1 569 #define PHY_1000 0x2 570 #define PHY_HALF 0x100 571 572 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001 573 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002 574 #define NV_PAUSEFRAME_RX_ENABLE 0x0004 575 #define NV_PAUSEFRAME_TX_ENABLE 0x0008 576 #define NV_PAUSEFRAME_RX_REQ 0x0010 577 #define NV_PAUSEFRAME_TX_REQ 0x0020 578 #define NV_PAUSEFRAME_AUTONEG 0x0040 579 580 /* MSI/MSI-X defines */ 581 #define NV_MSI_X_MAX_VECTORS 8 582 #define NV_MSI_X_VECTORS_MASK 0x000f 583 #define NV_MSI_CAPABLE 0x0010 584 #define NV_MSI_X_CAPABLE 0x0020 585 #define NV_MSI_ENABLED 0x0040 586 #define NV_MSI_X_ENABLED 0x0080 587 588 #define NV_MSI_X_VECTOR_ALL 0x0 589 #define NV_MSI_X_VECTOR_RX 0x0 590 #define NV_MSI_X_VECTOR_TX 0x1 591 #define NV_MSI_X_VECTOR_OTHER 0x2 592 593 #define NV_MSI_PRIV_OFFSET 0x68 594 #define NV_MSI_PRIV_VALUE 0xffffffff 595 596 #define NV_RESTART_TX 0x1 597 #define NV_RESTART_RX 0x2 598 599 #define NV_TX_LIMIT_COUNT 16 600 601 #define NV_DYNAMIC_THRESHOLD 4 602 #define NV_DYNAMIC_MAX_QUIET_COUNT 2048 603 604 /* statistics */ 605 struct nv_ethtool_str { 606 char name[ETH_GSTRING_LEN]; 607 }; 608 609 static const struct nv_ethtool_str nv_estats_str[] = { 610 { "tx_bytes" }, /* includes Ethernet FCS CRC */ 611 { "tx_zero_rexmt" }, 612 { "tx_one_rexmt" }, 613 { "tx_many_rexmt" }, 614 { "tx_late_collision" }, 615 { "tx_fifo_errors" }, 616 { "tx_carrier_errors" }, 617 { "tx_excess_deferral" }, 618 { "tx_retry_error" }, 619 { "rx_frame_error" }, 620 { "rx_extra_byte" }, 621 { "rx_late_collision" }, 622 { "rx_runt" }, 623 { "rx_frame_too_long" }, 624 { "rx_over_errors" }, 625 { "rx_crc_errors" }, 626 { "rx_frame_align_error" }, 627 { "rx_length_error" }, 628 { "rx_unicast" }, 629 { "rx_multicast" }, 630 { "rx_broadcast" }, 631 { "rx_packets" }, 632 { "rx_errors_total" }, 633 { "tx_errors_total" }, 634 635 /* version 2 stats */ 636 { "tx_deferral" }, 637 { "tx_packets" }, 638 { "rx_bytes" }, /* includes Ethernet FCS CRC */ 639 { "tx_pause" }, 640 { "rx_pause" }, 641 { "rx_drop_frame" }, 642 643 /* version 3 stats */ 644 { "tx_unicast" }, 645 { "tx_multicast" }, 646 { "tx_broadcast" } 647 }; 648 649 struct nv_ethtool_stats { 650 u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */ 651 u64 tx_zero_rexmt; 652 u64 tx_one_rexmt; 653 u64 tx_many_rexmt; 654 u64 tx_late_collision; 655 u64 tx_fifo_errors; 656 u64 tx_carrier_errors; 657 u64 tx_excess_deferral; 658 u64 tx_retry_error; 659 u64 rx_frame_error; 660 u64 rx_extra_byte; 661 u64 rx_late_collision; 662 u64 rx_runt; 663 u64 rx_frame_too_long; 664 u64 rx_over_errors; 665 u64 rx_crc_errors; 666 u64 rx_frame_align_error; 667 u64 rx_length_error; 668 u64 rx_unicast; 669 u64 rx_multicast; 670 u64 rx_broadcast; 671 u64 rx_packets; /* should be ifconfig->rx_packets */ 672 u64 rx_errors_total; 673 u64 tx_errors_total; 674 675 /* version 2 stats */ 676 u64 tx_deferral; 677 u64 tx_packets; /* should be ifconfig->tx_packets */ 678 u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */ 679 u64 tx_pause; 680 u64 rx_pause; 681 u64 rx_drop_frame; 682 683 /* version 3 stats */ 684 u64 tx_unicast; 685 u64 tx_multicast; 686 u64 tx_broadcast; 687 }; 688 689 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 690 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) 691 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 692 693 /* diagnostics */ 694 #define NV_TEST_COUNT_BASE 3 695 #define NV_TEST_COUNT_EXTENDED 4 696 697 static const struct nv_ethtool_str nv_etests_str[] = { 698 { "link (online/offline)" }, 699 { "register (offline) " }, 700 { "interrupt (offline) " }, 701 { "loopback (offline) " } 702 }; 703 704 struct register_test { 705 __u32 reg; 706 __u32 mask; 707 }; 708 709 static const struct register_test nv_registers_test[] = { 710 { NvRegUnknownSetupReg6, 0x01 }, 711 { NvRegMisc1, 0x03c }, 712 { NvRegOffloadConfig, 0x03ff }, 713 { NvRegMulticastAddrA, 0xffffffff }, 714 { NvRegTxWatermark, 0x0ff }, 715 { NvRegWakeUpFlags, 0x07777 }, 716 { 0, 0 } 717 }; 718 719 struct nv_skb_map { 720 struct sk_buff *skb; 721 dma_addr_t dma; 722 unsigned int dma_len:31; 723 unsigned int dma_single:1; 724 struct ring_desc_ex *first_tx_desc; 725 struct nv_skb_map *next_tx_ctx; 726 }; 727 728 /* 729 * SMP locking: 730 * All hardware access under netdev_priv(dev)->lock, except the performance 731 * critical parts: 732 * - rx is (pseudo-) lockless: it relies on the single-threading provided 733 * by the arch code for interrupts. 734 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 735 * needs netdev_priv(dev)->lock :-( 736 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 737 * 738 * Hardware stats updates are protected by hwstats_lock: 739 * - updated by nv_do_stats_poll (timer). This is meant to avoid 740 * integer wraparound in the NIC stats registers, at low frequency 741 * (0.1 Hz) 742 * - updated by nv_get_ethtool_stats + nv_get_stats64 743 * 744 * Software stats are accessed only through 64b synchronization points 745 * and are not subject to other synchronization techniques (single 746 * update thread on the TX or RX paths). 747 */ 748 749 /* in dev: base, irq */ 750 struct fe_priv { 751 spinlock_t lock; 752 753 struct net_device *dev; 754 struct napi_struct napi; 755 756 /* hardware stats are updated in syscall and timer */ 757 spinlock_t hwstats_lock; 758 struct nv_ethtool_stats estats; 759 760 int in_shutdown; 761 u32 linkspeed; 762 int duplex; 763 int autoneg; 764 int fixed_mode; 765 int phyaddr; 766 int wolenabled; 767 unsigned int phy_oui; 768 unsigned int phy_model; 769 unsigned int phy_rev; 770 u16 gigabit; 771 int intr_test; 772 int recover_error; 773 int quiet_count; 774 775 /* General data: RO fields */ 776 dma_addr_t ring_addr; 777 struct pci_dev *pci_dev; 778 u32 orig_mac[2]; 779 u32 events; 780 u32 irqmask; 781 u32 desc_ver; 782 u32 txrxctl_bits; 783 u32 vlanctl_bits; 784 u32 driver_data; 785 u32 device_id; 786 u32 register_size; 787 u32 mac_in_use; 788 int mgmt_version; 789 int mgmt_sema; 790 791 void __iomem *base; 792 793 /* rx specific fields. 794 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 795 */ 796 union ring_type get_rx, put_rx, first_rx, last_rx; 797 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 798 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 799 struct nv_skb_map *rx_skb; 800 801 union ring_type rx_ring; 802 unsigned int rx_buf_sz; 803 unsigned int pkt_limit; 804 struct timer_list oom_kick; 805 struct timer_list nic_poll; 806 struct timer_list stats_poll; 807 u32 nic_poll_irq; 808 int rx_ring_size; 809 810 /* RX software stats */ 811 struct u64_stats_sync swstats_rx_syncp; 812 u64 stat_rx_packets; 813 u64 stat_rx_bytes; /* not always available in HW */ 814 u64 stat_rx_missed_errors; 815 u64 stat_rx_dropped; 816 817 /* media detection workaround. 818 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 819 */ 820 int need_linktimer; 821 unsigned long link_timeout; 822 /* 823 * tx specific fields. 824 */ 825 union ring_type get_tx, put_tx, first_tx, last_tx; 826 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 827 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 828 struct nv_skb_map *tx_skb; 829 830 union ring_type tx_ring; 831 u32 tx_flags; 832 int tx_ring_size; 833 int tx_limit; 834 u32 tx_pkts_in_progress; 835 struct nv_skb_map *tx_change_owner; 836 struct nv_skb_map *tx_end_flip; 837 int tx_stop; 838 839 /* TX software stats */ 840 struct u64_stats_sync swstats_tx_syncp; 841 u64 stat_tx_packets; /* not always available in HW */ 842 u64 stat_tx_bytes; 843 u64 stat_tx_dropped; 844 845 /* msi/msi-x fields */ 846 u32 msi_flags; 847 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 848 849 /* flow control */ 850 u32 pause_flags; 851 852 /* power saved state */ 853 u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; 854 855 /* for different msi-x irq type */ 856 char name_rx[IFNAMSIZ + 3]; /* -rx */ 857 char name_tx[IFNAMSIZ + 3]; /* -tx */ 858 char name_other[IFNAMSIZ + 6]; /* -other */ 859 }; 860 861 /* 862 * Maximum number of loops until we assume that a bit in the irq mask 863 * is stuck. Overridable with module param. 864 */ 865 static int max_interrupt_work = 4; 866 867 /* 868 * Optimization can be either throuput mode or cpu mode 869 * 870 * Throughput Mode: Every tx and rx packet will generate an interrupt. 871 * CPU Mode: Interrupts are controlled by a timer. 872 */ 873 enum { 874 NV_OPTIMIZATION_MODE_THROUGHPUT, 875 NV_OPTIMIZATION_MODE_CPU, 876 NV_OPTIMIZATION_MODE_DYNAMIC 877 }; 878 static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC; 879 880 /* 881 * Poll interval for timer irq 882 * 883 * This interval determines how frequent an interrupt is generated. 884 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 885 * Min = 0, and Max = 65535 886 */ 887 static int poll_interval = -1; 888 889 /* 890 * MSI interrupts 891 */ 892 enum { 893 NV_MSI_INT_DISABLED, 894 NV_MSI_INT_ENABLED 895 }; 896 static int msi = NV_MSI_INT_ENABLED; 897 898 /* 899 * MSIX interrupts 900 */ 901 enum { 902 NV_MSIX_INT_DISABLED, 903 NV_MSIX_INT_ENABLED 904 }; 905 static int msix = NV_MSIX_INT_ENABLED; 906 907 /* 908 * DMA 64bit 909 */ 910 enum { 911 NV_DMA_64BIT_DISABLED, 912 NV_DMA_64BIT_ENABLED 913 }; 914 static int dma_64bit = NV_DMA_64BIT_ENABLED; 915 916 /* 917 * Debug output control for tx_timeout 918 */ 919 static bool debug_tx_timeout = false; 920 921 /* 922 * Crossover Detection 923 * Realtek 8201 phy + some OEM boards do not work properly. 924 */ 925 enum { 926 NV_CROSSOVER_DETECTION_DISABLED, 927 NV_CROSSOVER_DETECTION_ENABLED 928 }; 929 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 930 931 /* 932 * Power down phy when interface is down (persists through reboot; 933 * older Linux and other OSes may not power it up again) 934 */ 935 static int phy_power_down; 936 937 static inline struct fe_priv *get_nvpriv(struct net_device *dev) 938 { 939 return netdev_priv(dev); 940 } 941 942 static inline u8 __iomem *get_hwbase(struct net_device *dev) 943 { 944 return ((struct fe_priv *)netdev_priv(dev))->base; 945 } 946 947 static inline void pci_push(u8 __iomem *base) 948 { 949 /* force out pending posted writes */ 950 readl(base); 951 } 952 953 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 954 { 955 return le32_to_cpu(prd->flaglen) 956 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 957 } 958 959 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 960 { 961 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 962 } 963 964 static bool nv_optimized(struct fe_priv *np) 965 { 966 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 967 return false; 968 return true; 969 } 970 971 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 972 int delay, int delaymax) 973 { 974 u8 __iomem *base = get_hwbase(dev); 975 976 pci_push(base); 977 do { 978 udelay(delay); 979 delaymax -= delay; 980 if (delaymax < 0) 981 return 1; 982 } while ((readl(base + offset) & mask) != target); 983 return 0; 984 } 985 986 #define NV_SETUP_RX_RING 0x01 987 #define NV_SETUP_TX_RING 0x02 988 989 static inline u32 dma_low(dma_addr_t addr) 990 { 991 return addr; 992 } 993 994 static inline u32 dma_high(dma_addr_t addr) 995 { 996 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 997 } 998 999 static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 1000 { 1001 struct fe_priv *np = get_nvpriv(dev); 1002 u8 __iomem *base = get_hwbase(dev); 1003 1004 if (!nv_optimized(np)) { 1005 if (rxtx_flags & NV_SETUP_RX_RING) 1006 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 1007 if (rxtx_flags & NV_SETUP_TX_RING) 1008 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 1009 } else { 1010 if (rxtx_flags & NV_SETUP_RX_RING) { 1011 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 1012 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 1013 } 1014 if (rxtx_flags & NV_SETUP_TX_RING) { 1015 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 1016 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 1017 } 1018 } 1019 } 1020 1021 static void free_rings(struct net_device *dev) 1022 { 1023 struct fe_priv *np = get_nvpriv(dev); 1024 1025 if (!nv_optimized(np)) { 1026 if (np->rx_ring.orig) 1027 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 1028 np->rx_ring.orig, np->ring_addr); 1029 } else { 1030 if (np->rx_ring.ex) 1031 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 1032 np->rx_ring.ex, np->ring_addr); 1033 } 1034 kfree(np->rx_skb); 1035 kfree(np->tx_skb); 1036 } 1037 1038 static int using_multi_irqs(struct net_device *dev) 1039 { 1040 struct fe_priv *np = get_nvpriv(dev); 1041 1042 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1043 ((np->msi_flags & NV_MSI_X_ENABLED) && 1044 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 1045 return 0; 1046 else 1047 return 1; 1048 } 1049 1050 static void nv_txrx_gate(struct net_device *dev, bool gate) 1051 { 1052 struct fe_priv *np = get_nvpriv(dev); 1053 u8 __iomem *base = get_hwbase(dev); 1054 u32 powerstate; 1055 1056 if (!np->mac_in_use && 1057 (np->driver_data & DEV_HAS_POWER_CNTRL)) { 1058 powerstate = readl(base + NvRegPowerState2); 1059 if (gate) 1060 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS; 1061 else 1062 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS; 1063 writel(powerstate, base + NvRegPowerState2); 1064 } 1065 } 1066 1067 static void nv_enable_irq(struct net_device *dev) 1068 { 1069 struct fe_priv *np = get_nvpriv(dev); 1070 1071 if (!using_multi_irqs(dev)) { 1072 if (np->msi_flags & NV_MSI_X_ENABLED) 1073 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1074 else 1075 enable_irq(np->pci_dev->irq); 1076 } else { 1077 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1078 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1079 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1080 } 1081 } 1082 1083 static void nv_disable_irq(struct net_device *dev) 1084 { 1085 struct fe_priv *np = get_nvpriv(dev); 1086 1087 if (!using_multi_irqs(dev)) { 1088 if (np->msi_flags & NV_MSI_X_ENABLED) 1089 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1090 else 1091 disable_irq(np->pci_dev->irq); 1092 } else { 1093 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1094 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1096 } 1097 } 1098 1099 /* In MSIX mode, a write to irqmask behaves as XOR */ 1100 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1101 { 1102 u8 __iomem *base = get_hwbase(dev); 1103 1104 writel(mask, base + NvRegIrqMask); 1105 } 1106 1107 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1108 { 1109 struct fe_priv *np = get_nvpriv(dev); 1110 u8 __iomem *base = get_hwbase(dev); 1111 1112 if (np->msi_flags & NV_MSI_X_ENABLED) { 1113 writel(mask, base + NvRegIrqMask); 1114 } else { 1115 if (np->msi_flags & NV_MSI_ENABLED) 1116 writel(0, base + NvRegMSIIrqMask); 1117 writel(0, base + NvRegIrqMask); 1118 } 1119 } 1120 1121 static void nv_napi_enable(struct net_device *dev) 1122 { 1123 struct fe_priv *np = get_nvpriv(dev); 1124 1125 napi_enable(&np->napi); 1126 } 1127 1128 static void nv_napi_disable(struct net_device *dev) 1129 { 1130 struct fe_priv *np = get_nvpriv(dev); 1131 1132 napi_disable(&np->napi); 1133 } 1134 1135 #define MII_READ (-1) 1136 /* mii_rw: read/write a register on the PHY. 1137 * 1138 * Caller must guarantee serialization 1139 */ 1140 static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1141 { 1142 u8 __iomem *base = get_hwbase(dev); 1143 u32 reg; 1144 int retval; 1145 1146 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); 1147 1148 reg = readl(base + NvRegMIIControl); 1149 if (reg & NVREG_MIICTL_INUSE) { 1150 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1151 udelay(NV_MIIBUSY_DELAY); 1152 } 1153 1154 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1155 if (value != MII_READ) { 1156 writel(value, base + NvRegMIIData); 1157 reg |= NVREG_MIICTL_WRITE; 1158 } 1159 writel(reg, base + NvRegMIIControl); 1160 1161 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1162 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) { 1163 retval = -1; 1164 } else if (value != MII_READ) { 1165 /* it was a write operation - fewer failures are detectable */ 1166 retval = 0; 1167 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1168 retval = -1; 1169 } else { 1170 retval = readl(base + NvRegMIIData); 1171 } 1172 1173 return retval; 1174 } 1175 1176 static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1177 { 1178 struct fe_priv *np = netdev_priv(dev); 1179 u32 miicontrol; 1180 unsigned int tries = 0; 1181 1182 miicontrol = BMCR_RESET | bmcr_setup; 1183 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) 1184 return -1; 1185 1186 /* wait for 500ms */ 1187 msleep(500); 1188 1189 /* must wait till reset is deasserted */ 1190 while (miicontrol & BMCR_RESET) { 1191 usleep_range(10000, 20000); 1192 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1193 /* FIXME: 100 tries seem excessive */ 1194 if (tries++ > 100) 1195 return -1; 1196 } 1197 return 0; 1198 } 1199 1200 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np) 1201 { 1202 static const struct { 1203 int reg; 1204 int init; 1205 } ri[] = { 1206 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, 1207 { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 }, 1208 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 }, 1209 { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 }, 1210 { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 }, 1211 { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 }, 1212 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, 1213 }; 1214 int i; 1215 1216 for (i = 0; i < ARRAY_SIZE(ri); i++) { 1217 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init)) 1218 return PHY_ERROR; 1219 } 1220 1221 return 0; 1222 } 1223 1224 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np) 1225 { 1226 u32 reg; 1227 u8 __iomem *base = get_hwbase(dev); 1228 u32 powerstate = readl(base + NvRegPowerState2); 1229 1230 /* need to perform hw phy reset */ 1231 powerstate |= NVREG_POWERSTATE2_PHY_RESET; 1232 writel(powerstate, base + NvRegPowerState2); 1233 msleep(25); 1234 1235 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; 1236 writel(powerstate, base + NvRegPowerState2); 1237 msleep(25); 1238 1239 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1240 reg |= PHY_REALTEK_INIT9; 1241 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) 1242 return PHY_ERROR; 1243 if (mii_rw(dev, np->phyaddr, 1244 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) 1245 return PHY_ERROR; 1246 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); 1247 if (!(reg & PHY_REALTEK_INIT11)) { 1248 reg |= PHY_REALTEK_INIT11; 1249 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) 1250 return PHY_ERROR; 1251 } 1252 if (mii_rw(dev, np->phyaddr, 1253 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) 1254 return PHY_ERROR; 1255 1256 return 0; 1257 } 1258 1259 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np) 1260 { 1261 u32 phy_reserved; 1262 1263 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { 1264 phy_reserved = mii_rw(dev, np->phyaddr, 1265 PHY_REALTEK_INIT_REG6, MII_READ); 1266 phy_reserved |= PHY_REALTEK_INIT7; 1267 if (mii_rw(dev, np->phyaddr, 1268 PHY_REALTEK_INIT_REG6, phy_reserved)) 1269 return PHY_ERROR; 1270 } 1271 1272 return 0; 1273 } 1274 1275 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np) 1276 { 1277 u32 phy_reserved; 1278 1279 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 1280 if (mii_rw(dev, np->phyaddr, 1281 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) 1282 return PHY_ERROR; 1283 phy_reserved = mii_rw(dev, np->phyaddr, 1284 PHY_REALTEK_INIT_REG2, MII_READ); 1285 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 1286 phy_reserved |= PHY_REALTEK_INIT3; 1287 if (mii_rw(dev, np->phyaddr, 1288 PHY_REALTEK_INIT_REG2, phy_reserved)) 1289 return PHY_ERROR; 1290 if (mii_rw(dev, np->phyaddr, 1291 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) 1292 return PHY_ERROR; 1293 } 1294 1295 return 0; 1296 } 1297 1298 static int init_cicada(struct net_device *dev, struct fe_priv *np, 1299 u32 phyinterface) 1300 { 1301 u32 phy_reserved; 1302 1303 if (phyinterface & PHY_RGMII) { 1304 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1305 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1306 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1307 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) 1308 return PHY_ERROR; 1309 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1310 phy_reserved |= PHY_CICADA_INIT5; 1311 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) 1312 return PHY_ERROR; 1313 } 1314 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1315 phy_reserved |= PHY_CICADA_INIT6; 1316 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) 1317 return PHY_ERROR; 1318 1319 return 0; 1320 } 1321 1322 static int init_vitesse(struct net_device *dev, struct fe_priv *np) 1323 { 1324 u32 phy_reserved; 1325 1326 if (mii_rw(dev, np->phyaddr, 1327 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) 1328 return PHY_ERROR; 1329 if (mii_rw(dev, np->phyaddr, 1330 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) 1331 return PHY_ERROR; 1332 phy_reserved = mii_rw(dev, np->phyaddr, 1333 PHY_VITESSE_INIT_REG4, MII_READ); 1334 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) 1335 return PHY_ERROR; 1336 phy_reserved = mii_rw(dev, np->phyaddr, 1337 PHY_VITESSE_INIT_REG3, MII_READ); 1338 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1339 phy_reserved |= PHY_VITESSE_INIT3; 1340 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) 1341 return PHY_ERROR; 1342 if (mii_rw(dev, np->phyaddr, 1343 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) 1344 return PHY_ERROR; 1345 if (mii_rw(dev, np->phyaddr, 1346 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) 1347 return PHY_ERROR; 1348 phy_reserved = mii_rw(dev, np->phyaddr, 1349 PHY_VITESSE_INIT_REG4, MII_READ); 1350 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1351 phy_reserved |= PHY_VITESSE_INIT3; 1352 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) 1353 return PHY_ERROR; 1354 phy_reserved = mii_rw(dev, np->phyaddr, 1355 PHY_VITESSE_INIT_REG3, MII_READ); 1356 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) 1357 return PHY_ERROR; 1358 if (mii_rw(dev, np->phyaddr, 1359 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) 1360 return PHY_ERROR; 1361 if (mii_rw(dev, np->phyaddr, 1362 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) 1363 return PHY_ERROR; 1364 phy_reserved = mii_rw(dev, np->phyaddr, 1365 PHY_VITESSE_INIT_REG4, MII_READ); 1366 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) 1367 return PHY_ERROR; 1368 phy_reserved = mii_rw(dev, np->phyaddr, 1369 PHY_VITESSE_INIT_REG3, MII_READ); 1370 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1371 phy_reserved |= PHY_VITESSE_INIT8; 1372 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) 1373 return PHY_ERROR; 1374 if (mii_rw(dev, np->phyaddr, 1375 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) 1376 return PHY_ERROR; 1377 if (mii_rw(dev, np->phyaddr, 1378 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) 1379 return PHY_ERROR; 1380 1381 return 0; 1382 } 1383 1384 static int phy_init(struct net_device *dev) 1385 { 1386 struct fe_priv *np = get_nvpriv(dev); 1387 u8 __iomem *base = get_hwbase(dev); 1388 u32 phyinterface; 1389 u32 mii_status, mii_control, mii_control_1000, reg; 1390 1391 /* phy errata for E3016 phy */ 1392 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1393 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1394 reg &= ~PHY_MARVELL_E3016_INITMASK; 1395 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1396 netdev_info(dev, "%s: phy write to errata reg failed\n", 1397 pci_name(np->pci_dev)); 1398 return PHY_ERROR; 1399 } 1400 } 1401 if (np->phy_oui == PHY_OUI_REALTEK) { 1402 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1403 np->phy_rev == PHY_REV_REALTEK_8211B) { 1404 if (init_realtek_8211b(dev, np)) { 1405 netdev_info(dev, "%s: phy init failed\n", 1406 pci_name(np->pci_dev)); 1407 return PHY_ERROR; 1408 } 1409 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1410 np->phy_rev == PHY_REV_REALTEK_8211C) { 1411 if (init_realtek_8211c(dev, np)) { 1412 netdev_info(dev, "%s: phy init failed\n", 1413 pci_name(np->pci_dev)); 1414 return PHY_ERROR; 1415 } 1416 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1417 if (init_realtek_8201(dev, np)) { 1418 netdev_info(dev, "%s: phy init failed\n", 1419 pci_name(np->pci_dev)); 1420 return PHY_ERROR; 1421 } 1422 } 1423 } 1424 1425 /* set advertise register */ 1426 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1427 reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL | 1428 ADVERTISE_100HALF | ADVERTISE_100FULL | 1429 ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); 1430 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1431 netdev_info(dev, "%s: phy write to advertise failed\n", 1432 pci_name(np->pci_dev)); 1433 return PHY_ERROR; 1434 } 1435 1436 /* get phy interface type */ 1437 phyinterface = readl(base + NvRegPhyInterface); 1438 1439 /* see if gigabit phy */ 1440 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1441 if (mii_status & PHY_GIGABIT) { 1442 np->gigabit = PHY_GIGABIT; 1443 mii_control_1000 = mii_rw(dev, np->phyaddr, 1444 MII_CTRL1000, MII_READ); 1445 mii_control_1000 &= ~ADVERTISE_1000HALF; 1446 if (phyinterface & PHY_RGMII) 1447 mii_control_1000 |= ADVERTISE_1000FULL; 1448 else 1449 mii_control_1000 &= ~ADVERTISE_1000FULL; 1450 1451 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1452 netdev_info(dev, "%s: phy init failed\n", 1453 pci_name(np->pci_dev)); 1454 return PHY_ERROR; 1455 } 1456 } else 1457 np->gigabit = 0; 1458 1459 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1460 mii_control |= BMCR_ANENABLE; 1461 1462 if (np->phy_oui == PHY_OUI_REALTEK && 1463 np->phy_model == PHY_MODEL_REALTEK_8211 && 1464 np->phy_rev == PHY_REV_REALTEK_8211C) { 1465 /* start autoneg since we already performed hw reset above */ 1466 mii_control |= BMCR_ANRESTART; 1467 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1468 netdev_info(dev, "%s: phy init failed\n", 1469 pci_name(np->pci_dev)); 1470 return PHY_ERROR; 1471 } 1472 } else { 1473 /* reset the phy 1474 * (certain phys need bmcr to be setup with reset) 1475 */ 1476 if (phy_reset(dev, mii_control)) { 1477 netdev_info(dev, "%s: phy reset failed\n", 1478 pci_name(np->pci_dev)); 1479 return PHY_ERROR; 1480 } 1481 } 1482 1483 /* phy vendor specific configuration */ 1484 if (np->phy_oui == PHY_OUI_CICADA) { 1485 if (init_cicada(dev, np, phyinterface)) { 1486 netdev_info(dev, "%s: phy init failed\n", 1487 pci_name(np->pci_dev)); 1488 return PHY_ERROR; 1489 } 1490 } else if (np->phy_oui == PHY_OUI_VITESSE) { 1491 if (init_vitesse(dev, np)) { 1492 netdev_info(dev, "%s: phy init failed\n", 1493 pci_name(np->pci_dev)); 1494 return PHY_ERROR; 1495 } 1496 } else if (np->phy_oui == PHY_OUI_REALTEK) { 1497 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1498 np->phy_rev == PHY_REV_REALTEK_8211B) { 1499 /* reset could have cleared these out, set them back */ 1500 if (init_realtek_8211b(dev, np)) { 1501 netdev_info(dev, "%s: phy init failed\n", 1502 pci_name(np->pci_dev)); 1503 return PHY_ERROR; 1504 } 1505 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1506 if (init_realtek_8201(dev, np) || 1507 init_realtek_8201_cross(dev, np)) { 1508 netdev_info(dev, "%s: phy init failed\n", 1509 pci_name(np->pci_dev)); 1510 return PHY_ERROR; 1511 } 1512 } 1513 } 1514 1515 /* some phys clear out pause advertisement on reset, set it back */ 1516 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1517 1518 /* restart auto negotiation, power down phy */ 1519 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1520 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1521 if (phy_power_down) 1522 mii_control |= BMCR_PDOWN; 1523 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) 1524 return PHY_ERROR; 1525 1526 return 0; 1527 } 1528 1529 static void nv_start_rx(struct net_device *dev) 1530 { 1531 struct fe_priv *np = netdev_priv(dev); 1532 u8 __iomem *base = get_hwbase(dev); 1533 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1534 1535 /* Already running? Stop it. */ 1536 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1537 rx_ctrl &= ~NVREG_RCVCTL_START; 1538 writel(rx_ctrl, base + NvRegReceiverControl); 1539 pci_push(base); 1540 } 1541 writel(np->linkspeed, base + NvRegLinkSpeed); 1542 pci_push(base); 1543 rx_ctrl |= NVREG_RCVCTL_START; 1544 if (np->mac_in_use) 1545 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1546 writel(rx_ctrl, base + NvRegReceiverControl); 1547 pci_push(base); 1548 } 1549 1550 static void nv_stop_rx(struct net_device *dev) 1551 { 1552 struct fe_priv *np = netdev_priv(dev); 1553 u8 __iomem *base = get_hwbase(dev); 1554 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1555 1556 if (!np->mac_in_use) 1557 rx_ctrl &= ~NVREG_RCVCTL_START; 1558 else 1559 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1560 writel(rx_ctrl, base + NvRegReceiverControl); 1561 if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1562 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX)) 1563 netdev_info(dev, "%s: ReceiverStatus remained busy\n", 1564 __func__); 1565 1566 udelay(NV_RXSTOP_DELAY2); 1567 if (!np->mac_in_use) 1568 writel(0, base + NvRegLinkSpeed); 1569 } 1570 1571 static void nv_start_tx(struct net_device *dev) 1572 { 1573 struct fe_priv *np = netdev_priv(dev); 1574 u8 __iomem *base = get_hwbase(dev); 1575 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1576 1577 tx_ctrl |= NVREG_XMITCTL_START; 1578 if (np->mac_in_use) 1579 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1580 writel(tx_ctrl, base + NvRegTransmitterControl); 1581 pci_push(base); 1582 } 1583 1584 static void nv_stop_tx(struct net_device *dev) 1585 { 1586 struct fe_priv *np = netdev_priv(dev); 1587 u8 __iomem *base = get_hwbase(dev); 1588 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1589 1590 if (!np->mac_in_use) 1591 tx_ctrl &= ~NVREG_XMITCTL_START; 1592 else 1593 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1594 writel(tx_ctrl, base + NvRegTransmitterControl); 1595 if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1596 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX)) 1597 netdev_info(dev, "%s: TransmitterStatus remained busy\n", 1598 __func__); 1599 1600 udelay(NV_TXSTOP_DELAY2); 1601 if (!np->mac_in_use) 1602 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1603 base + NvRegTransmitPoll); 1604 } 1605 1606 static void nv_start_rxtx(struct net_device *dev) 1607 { 1608 nv_start_rx(dev); 1609 nv_start_tx(dev); 1610 } 1611 1612 static void nv_stop_rxtx(struct net_device *dev) 1613 { 1614 nv_stop_rx(dev); 1615 nv_stop_tx(dev); 1616 } 1617 1618 static void nv_txrx_reset(struct net_device *dev) 1619 { 1620 struct fe_priv *np = netdev_priv(dev); 1621 u8 __iomem *base = get_hwbase(dev); 1622 1623 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1624 pci_push(base); 1625 udelay(NV_TXRX_RESET_DELAY); 1626 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1627 pci_push(base); 1628 } 1629 1630 static void nv_mac_reset(struct net_device *dev) 1631 { 1632 struct fe_priv *np = netdev_priv(dev); 1633 u8 __iomem *base = get_hwbase(dev); 1634 u32 temp1, temp2, temp3; 1635 1636 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1637 pci_push(base); 1638 1639 /* save registers since they will be cleared on reset */ 1640 temp1 = readl(base + NvRegMacAddrA); 1641 temp2 = readl(base + NvRegMacAddrB); 1642 temp3 = readl(base + NvRegTransmitPoll); 1643 1644 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1645 pci_push(base); 1646 udelay(NV_MAC_RESET_DELAY); 1647 writel(0, base + NvRegMacReset); 1648 pci_push(base); 1649 udelay(NV_MAC_RESET_DELAY); 1650 1651 /* restore saved registers */ 1652 writel(temp1, base + NvRegMacAddrA); 1653 writel(temp2, base + NvRegMacAddrB); 1654 writel(temp3, base + NvRegTransmitPoll); 1655 1656 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1657 pci_push(base); 1658 } 1659 1660 /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */ 1661 static void nv_update_stats(struct net_device *dev) 1662 { 1663 struct fe_priv *np = netdev_priv(dev); 1664 u8 __iomem *base = get_hwbase(dev); 1665 1666 /* If it happens that this is run in top-half context, then 1667 * replace the spin_lock of hwstats_lock with 1668 * spin_lock_irqsave() in calling functions. */ 1669 WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half"); 1670 assert_spin_locked(&np->hwstats_lock); 1671 1672 /* query hardware */ 1673 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1674 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1675 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1676 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1677 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1678 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1679 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1680 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1681 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1682 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1683 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1684 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1685 np->estats.rx_runt += readl(base + NvRegRxRunt); 1686 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1687 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1688 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1689 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1690 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1691 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1692 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1693 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1694 np->estats.rx_packets = 1695 np->estats.rx_unicast + 1696 np->estats.rx_multicast + 1697 np->estats.rx_broadcast; 1698 np->estats.rx_errors_total = 1699 np->estats.rx_crc_errors + 1700 np->estats.rx_over_errors + 1701 np->estats.rx_frame_error + 1702 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1703 np->estats.rx_late_collision + 1704 np->estats.rx_runt + 1705 np->estats.rx_frame_too_long; 1706 np->estats.tx_errors_total = 1707 np->estats.tx_late_collision + 1708 np->estats.tx_fifo_errors + 1709 np->estats.tx_carrier_errors + 1710 np->estats.tx_excess_deferral + 1711 np->estats.tx_retry_error; 1712 1713 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1714 np->estats.tx_deferral += readl(base + NvRegTxDef); 1715 np->estats.tx_packets += readl(base + NvRegTxFrame); 1716 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1717 np->estats.tx_pause += readl(base + NvRegTxPause); 1718 np->estats.rx_pause += readl(base + NvRegRxPause); 1719 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1720 np->estats.rx_errors_total += np->estats.rx_drop_frame; 1721 } 1722 1723 if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1724 np->estats.tx_unicast += readl(base + NvRegTxUnicast); 1725 np->estats.tx_multicast += readl(base + NvRegTxMulticast); 1726 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); 1727 } 1728 } 1729 1730 /* 1731 * nv_get_stats64: dev->ndo_get_stats64 function 1732 * Get latest stats value from the nic. 1733 * Called with read_lock(&dev_base_lock) held for read - 1734 * only synchronized against unregister_netdevice. 1735 */ 1736 static void 1737 nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) 1738 __acquires(&netdev_priv(dev)->hwstats_lock) 1739 __releases(&netdev_priv(dev)->hwstats_lock) 1740 { 1741 struct fe_priv *np = netdev_priv(dev); 1742 unsigned int syncp_start; 1743 1744 /* 1745 * Note: because HW stats are not always available and for 1746 * consistency reasons, the following ifconfig stats are 1747 * managed by software: rx_bytes, tx_bytes, rx_packets and 1748 * tx_packets. The related hardware stats reported by ethtool 1749 * should be equivalent to these ifconfig stats, with 4 1750 * additional bytes per packet (Ethernet FCS CRC), except for 1751 * tx_packets when TSO kicks in. 1752 */ 1753 1754 /* software stats */ 1755 do { 1756 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); 1757 storage->rx_packets = np->stat_rx_packets; 1758 storage->rx_bytes = np->stat_rx_bytes; 1759 storage->rx_dropped = np->stat_rx_dropped; 1760 storage->rx_missed_errors = np->stat_rx_missed_errors; 1761 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); 1762 1763 do { 1764 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); 1765 storage->tx_packets = np->stat_tx_packets; 1766 storage->tx_bytes = np->stat_tx_bytes; 1767 storage->tx_dropped = np->stat_tx_dropped; 1768 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); 1769 1770 /* If the nic supports hw counters then retrieve latest values */ 1771 if (np->driver_data & DEV_HAS_STATISTICS_V123) { 1772 spin_lock_bh(&np->hwstats_lock); 1773 1774 nv_update_stats(dev); 1775 1776 /* generic stats */ 1777 storage->rx_errors = np->estats.rx_errors_total; 1778 storage->tx_errors = np->estats.tx_errors_total; 1779 1780 /* meaningful only when NIC supports stats v3 */ 1781 storage->multicast = np->estats.rx_multicast; 1782 1783 /* detailed rx_errors */ 1784 storage->rx_length_errors = np->estats.rx_length_error; 1785 storage->rx_over_errors = np->estats.rx_over_errors; 1786 storage->rx_crc_errors = np->estats.rx_crc_errors; 1787 storage->rx_frame_errors = np->estats.rx_frame_align_error; 1788 storage->rx_fifo_errors = np->estats.rx_drop_frame; 1789 1790 /* detailed tx_errors */ 1791 storage->tx_carrier_errors = np->estats.tx_carrier_errors; 1792 storage->tx_fifo_errors = np->estats.tx_fifo_errors; 1793 1794 spin_unlock_bh(&np->hwstats_lock); 1795 } 1796 } 1797 1798 /* 1799 * nv_alloc_rx: fill rx ring entries. 1800 * Return 1 if the allocations for the skbs failed and the 1801 * rx engine is without Available descriptors 1802 */ 1803 static int nv_alloc_rx(struct net_device *dev) 1804 { 1805 struct fe_priv *np = netdev_priv(dev); 1806 struct ring_desc *less_rx; 1807 1808 less_rx = np->get_rx.orig; 1809 if (less_rx-- == np->first_rx.orig) 1810 less_rx = np->last_rx.orig; 1811 1812 while (np->put_rx.orig != less_rx) { 1813 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); 1814 if (skb) { 1815 np->put_rx_ctx->skb = skb; 1816 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1817 skb->data, 1818 skb_tailroom(skb), 1819 PCI_DMA_FROMDEVICE); 1820 if (pci_dma_mapping_error(np->pci_dev, 1821 np->put_rx_ctx->dma)) { 1822 kfree_skb(skb); 1823 goto packet_dropped; 1824 } 1825 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1826 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1827 wmb(); 1828 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1829 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1830 np->put_rx.orig = np->first_rx.orig; 1831 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1832 np->put_rx_ctx = np->first_rx_ctx; 1833 } else { 1834 packet_dropped: 1835 u64_stats_update_begin(&np->swstats_rx_syncp); 1836 np->stat_rx_dropped++; 1837 u64_stats_update_end(&np->swstats_rx_syncp); 1838 return 1; 1839 } 1840 } 1841 return 0; 1842 } 1843 1844 static int nv_alloc_rx_optimized(struct net_device *dev) 1845 { 1846 struct fe_priv *np = netdev_priv(dev); 1847 struct ring_desc_ex *less_rx; 1848 1849 less_rx = np->get_rx.ex; 1850 if (less_rx-- == np->first_rx.ex) 1851 less_rx = np->last_rx.ex; 1852 1853 while (np->put_rx.ex != less_rx) { 1854 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); 1855 if (skb) { 1856 np->put_rx_ctx->skb = skb; 1857 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1858 skb->data, 1859 skb_tailroom(skb), 1860 PCI_DMA_FROMDEVICE); 1861 if (pci_dma_mapping_error(np->pci_dev, 1862 np->put_rx_ctx->dma)) { 1863 kfree_skb(skb); 1864 goto packet_dropped; 1865 } 1866 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1867 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1868 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1869 wmb(); 1870 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1871 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1872 np->put_rx.ex = np->first_rx.ex; 1873 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1874 np->put_rx_ctx = np->first_rx_ctx; 1875 } else { 1876 packet_dropped: 1877 u64_stats_update_begin(&np->swstats_rx_syncp); 1878 np->stat_rx_dropped++; 1879 u64_stats_update_end(&np->swstats_rx_syncp); 1880 return 1; 1881 } 1882 } 1883 return 0; 1884 } 1885 1886 /* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1887 static void nv_do_rx_refill(unsigned long data) 1888 { 1889 struct net_device *dev = (struct net_device *) data; 1890 struct fe_priv *np = netdev_priv(dev); 1891 1892 /* Just reschedule NAPI rx processing */ 1893 napi_schedule(&np->napi); 1894 } 1895 1896 static void nv_init_rx(struct net_device *dev) 1897 { 1898 struct fe_priv *np = netdev_priv(dev); 1899 int i; 1900 1901 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1902 1903 if (!nv_optimized(np)) 1904 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1905 else 1906 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1907 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1908 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1909 1910 for (i = 0; i < np->rx_ring_size; i++) { 1911 if (!nv_optimized(np)) { 1912 np->rx_ring.orig[i].flaglen = 0; 1913 np->rx_ring.orig[i].buf = 0; 1914 } else { 1915 np->rx_ring.ex[i].flaglen = 0; 1916 np->rx_ring.ex[i].txvlan = 0; 1917 np->rx_ring.ex[i].bufhigh = 0; 1918 np->rx_ring.ex[i].buflow = 0; 1919 } 1920 np->rx_skb[i].skb = NULL; 1921 np->rx_skb[i].dma = 0; 1922 } 1923 } 1924 1925 static void nv_init_tx(struct net_device *dev) 1926 { 1927 struct fe_priv *np = netdev_priv(dev); 1928 int i; 1929 1930 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1931 1932 if (!nv_optimized(np)) 1933 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1934 else 1935 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1936 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1937 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1938 netdev_reset_queue(np->dev); 1939 np->tx_pkts_in_progress = 0; 1940 np->tx_change_owner = NULL; 1941 np->tx_end_flip = NULL; 1942 np->tx_stop = 0; 1943 1944 for (i = 0; i < np->tx_ring_size; i++) { 1945 if (!nv_optimized(np)) { 1946 np->tx_ring.orig[i].flaglen = 0; 1947 np->tx_ring.orig[i].buf = 0; 1948 } else { 1949 np->tx_ring.ex[i].flaglen = 0; 1950 np->tx_ring.ex[i].txvlan = 0; 1951 np->tx_ring.ex[i].bufhigh = 0; 1952 np->tx_ring.ex[i].buflow = 0; 1953 } 1954 np->tx_skb[i].skb = NULL; 1955 np->tx_skb[i].dma = 0; 1956 np->tx_skb[i].dma_len = 0; 1957 np->tx_skb[i].dma_single = 0; 1958 np->tx_skb[i].first_tx_desc = NULL; 1959 np->tx_skb[i].next_tx_ctx = NULL; 1960 } 1961 } 1962 1963 static int nv_init_ring(struct net_device *dev) 1964 { 1965 struct fe_priv *np = netdev_priv(dev); 1966 1967 nv_init_tx(dev); 1968 nv_init_rx(dev); 1969 1970 if (!nv_optimized(np)) 1971 return nv_alloc_rx(dev); 1972 else 1973 return nv_alloc_rx_optimized(dev); 1974 } 1975 1976 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1977 { 1978 if (tx_skb->dma) { 1979 if (tx_skb->dma_single) 1980 pci_unmap_single(np->pci_dev, tx_skb->dma, 1981 tx_skb->dma_len, 1982 PCI_DMA_TODEVICE); 1983 else 1984 pci_unmap_page(np->pci_dev, tx_skb->dma, 1985 tx_skb->dma_len, 1986 PCI_DMA_TODEVICE); 1987 tx_skb->dma = 0; 1988 } 1989 } 1990 1991 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1992 { 1993 nv_unmap_txskb(np, tx_skb); 1994 if (tx_skb->skb) { 1995 dev_kfree_skb_any(tx_skb->skb); 1996 tx_skb->skb = NULL; 1997 return 1; 1998 } 1999 return 0; 2000 } 2001 2002 static void nv_drain_tx(struct net_device *dev) 2003 { 2004 struct fe_priv *np = netdev_priv(dev); 2005 unsigned int i; 2006 2007 for (i = 0; i < np->tx_ring_size; i++) { 2008 if (!nv_optimized(np)) { 2009 np->tx_ring.orig[i].flaglen = 0; 2010 np->tx_ring.orig[i].buf = 0; 2011 } else { 2012 np->tx_ring.ex[i].flaglen = 0; 2013 np->tx_ring.ex[i].txvlan = 0; 2014 np->tx_ring.ex[i].bufhigh = 0; 2015 np->tx_ring.ex[i].buflow = 0; 2016 } 2017 if (nv_release_txskb(np, &np->tx_skb[i])) { 2018 u64_stats_update_begin(&np->swstats_tx_syncp); 2019 np->stat_tx_dropped++; 2020 u64_stats_update_end(&np->swstats_tx_syncp); 2021 } 2022 np->tx_skb[i].dma = 0; 2023 np->tx_skb[i].dma_len = 0; 2024 np->tx_skb[i].dma_single = 0; 2025 np->tx_skb[i].first_tx_desc = NULL; 2026 np->tx_skb[i].next_tx_ctx = NULL; 2027 } 2028 np->tx_pkts_in_progress = 0; 2029 np->tx_change_owner = NULL; 2030 np->tx_end_flip = NULL; 2031 } 2032 2033 static void nv_drain_rx(struct net_device *dev) 2034 { 2035 struct fe_priv *np = netdev_priv(dev); 2036 int i; 2037 2038 for (i = 0; i < np->rx_ring_size; i++) { 2039 if (!nv_optimized(np)) { 2040 np->rx_ring.orig[i].flaglen = 0; 2041 np->rx_ring.orig[i].buf = 0; 2042 } else { 2043 np->rx_ring.ex[i].flaglen = 0; 2044 np->rx_ring.ex[i].txvlan = 0; 2045 np->rx_ring.ex[i].bufhigh = 0; 2046 np->rx_ring.ex[i].buflow = 0; 2047 } 2048 wmb(); 2049 if (np->rx_skb[i].skb) { 2050 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 2051 (skb_end_pointer(np->rx_skb[i].skb) - 2052 np->rx_skb[i].skb->data), 2053 PCI_DMA_FROMDEVICE); 2054 dev_kfree_skb(np->rx_skb[i].skb); 2055 np->rx_skb[i].skb = NULL; 2056 } 2057 } 2058 } 2059 2060 static void nv_drain_rxtx(struct net_device *dev) 2061 { 2062 nv_drain_tx(dev); 2063 nv_drain_rx(dev); 2064 } 2065 2066 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 2067 { 2068 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 2069 } 2070 2071 static void nv_legacybackoff_reseed(struct net_device *dev) 2072 { 2073 u8 __iomem *base = get_hwbase(dev); 2074 u32 reg; 2075 u32 low; 2076 int tx_status = 0; 2077 2078 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; 2079 get_random_bytes(&low, sizeof(low)); 2080 reg |= low & NVREG_SLOTTIME_MASK; 2081 2082 /* Need to stop tx before change takes effect. 2083 * Caller has already gained np->lock. 2084 */ 2085 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; 2086 if (tx_status) 2087 nv_stop_tx(dev); 2088 nv_stop_rx(dev); 2089 writel(reg, base + NvRegSlotTime); 2090 if (tx_status) 2091 nv_start_tx(dev); 2092 nv_start_rx(dev); 2093 } 2094 2095 /* Gear Backoff Seeds */ 2096 #define BACKOFF_SEEDSET_ROWS 8 2097 #define BACKOFF_SEEDSET_LFSRS 15 2098 2099 /* Known Good seed sets */ 2100 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2101 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2102 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2103 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2104 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2105 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2106 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2107 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2108 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} }; 2109 2110 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2111 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2112 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2113 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2114 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2115 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2116 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2117 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2118 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} }; 2119 2120 static void nv_gear_backoff_reseed(struct net_device *dev) 2121 { 2122 u8 __iomem *base = get_hwbase(dev); 2123 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; 2124 u32 temp, seedset, combinedSeed; 2125 int i; 2126 2127 /* Setup seed for free running LFSR */ 2128 /* We are going to read the time stamp counter 3 times 2129 and swizzle bits around to increase randomness */ 2130 get_random_bytes(&miniseed1, sizeof(miniseed1)); 2131 miniseed1 &= 0x0fff; 2132 if (miniseed1 == 0) 2133 miniseed1 = 0xabc; 2134 2135 get_random_bytes(&miniseed2, sizeof(miniseed2)); 2136 miniseed2 &= 0x0fff; 2137 if (miniseed2 == 0) 2138 miniseed2 = 0xabc; 2139 miniseed2_reversed = 2140 ((miniseed2 & 0xF00) >> 8) | 2141 (miniseed2 & 0x0F0) | 2142 ((miniseed2 & 0x00F) << 8); 2143 2144 get_random_bytes(&miniseed3, sizeof(miniseed3)); 2145 miniseed3 &= 0x0fff; 2146 if (miniseed3 == 0) 2147 miniseed3 = 0xabc; 2148 miniseed3_reversed = 2149 ((miniseed3 & 0xF00) >> 8) | 2150 (miniseed3 & 0x0F0) | 2151 ((miniseed3 & 0x00F) << 8); 2152 2153 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | 2154 (miniseed2 ^ miniseed3_reversed); 2155 2156 /* Seeds can not be zero */ 2157 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) 2158 combinedSeed |= 0x08; 2159 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) 2160 combinedSeed |= 0x8000; 2161 2162 /* No need to disable tx here */ 2163 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2164 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2165 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2166 writel(temp, base + NvRegBackOffControl); 2167 2168 /* Setup seeds for all gear LFSRs. */ 2169 get_random_bytes(&seedset, sizeof(seedset)); 2170 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2171 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) { 2172 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2173 temp |= main_seedset[seedset][i-1] & 0x3ff; 2174 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2175 writel(temp, base + NvRegBackOffControl); 2176 } 2177 } 2178 2179 /* 2180 * nv_start_xmit: dev->hard_start_xmit function 2181 * Called with netif_tx_lock held. 2182 */ 2183 static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 2184 { 2185 struct fe_priv *np = netdev_priv(dev); 2186 u32 tx_flags = 0; 2187 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 2188 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2189 unsigned int i; 2190 u32 offset = 0; 2191 u32 bcnt; 2192 u32 size = skb_headlen(skb); 2193 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2194 u32 empty_slots; 2195 struct ring_desc *put_tx; 2196 struct ring_desc *start_tx; 2197 struct ring_desc *prev_tx; 2198 struct nv_skb_map *prev_tx_ctx; 2199 struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL; 2200 unsigned long flags; 2201 2202 /* add fragments to entries count */ 2203 for (i = 0; i < fragments; i++) { 2204 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2205 2206 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + 2207 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2208 } 2209 2210 spin_lock_irqsave(&np->lock, flags); 2211 empty_slots = nv_get_empty_tx_slots(np); 2212 if (unlikely(empty_slots <= entries)) { 2213 netif_stop_queue(dev); 2214 np->tx_stop = 1; 2215 spin_unlock_irqrestore(&np->lock, flags); 2216 return NETDEV_TX_BUSY; 2217 } 2218 spin_unlock_irqrestore(&np->lock, flags); 2219 2220 start_tx = put_tx = np->put_tx.orig; 2221 2222 /* setup the header buffer */ 2223 do { 2224 prev_tx = put_tx; 2225 prev_tx_ctx = np->put_tx_ctx; 2226 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2227 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2228 PCI_DMA_TODEVICE); 2229 if (pci_dma_mapping_error(np->pci_dev, 2230 np->put_tx_ctx->dma)) { 2231 /* on DMA mapping error - drop the packet */ 2232 dev_kfree_skb_any(skb); 2233 u64_stats_update_begin(&np->swstats_tx_syncp); 2234 np->stat_tx_dropped++; 2235 u64_stats_update_end(&np->swstats_tx_syncp); 2236 return NETDEV_TX_OK; 2237 } 2238 np->put_tx_ctx->dma_len = bcnt; 2239 np->put_tx_ctx->dma_single = 1; 2240 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2241 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2242 2243 tx_flags = np->tx_flags; 2244 offset += bcnt; 2245 size -= bcnt; 2246 if (unlikely(put_tx++ == np->last_tx.orig)) 2247 put_tx = np->first_tx.orig; 2248 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2249 np->put_tx_ctx = np->first_tx_ctx; 2250 } while (size); 2251 2252 /* setup the fragments */ 2253 for (i = 0; i < fragments; i++) { 2254 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2255 u32 frag_size = skb_frag_size(frag); 2256 offset = 0; 2257 2258 do { 2259 prev_tx = put_tx; 2260 prev_tx_ctx = np->put_tx_ctx; 2261 if (!start_tx_ctx) 2262 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; 2263 2264 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2265 np->put_tx_ctx->dma = skb_frag_dma_map( 2266 &np->pci_dev->dev, 2267 frag, offset, 2268 bcnt, 2269 DMA_TO_DEVICE); 2270 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { 2271 2272 /* Unwind the mapped fragments */ 2273 do { 2274 nv_unmap_txskb(np, start_tx_ctx); 2275 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2276 tmp_tx_ctx = np->first_tx_ctx; 2277 } while (tmp_tx_ctx != np->put_tx_ctx); 2278 dev_kfree_skb_any(skb); 2279 np->put_tx_ctx = start_tx_ctx; 2280 u64_stats_update_begin(&np->swstats_tx_syncp); 2281 np->stat_tx_dropped++; 2282 u64_stats_update_end(&np->swstats_tx_syncp); 2283 return NETDEV_TX_OK; 2284 } 2285 2286 np->put_tx_ctx->dma_len = bcnt; 2287 np->put_tx_ctx->dma_single = 0; 2288 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2289 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2290 2291 offset += bcnt; 2292 frag_size -= bcnt; 2293 if (unlikely(put_tx++ == np->last_tx.orig)) 2294 put_tx = np->first_tx.orig; 2295 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2296 np->put_tx_ctx = np->first_tx_ctx; 2297 } while (frag_size); 2298 } 2299 2300 /* set last fragment flag */ 2301 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 2302 2303 /* save skb in this slot's context area */ 2304 prev_tx_ctx->skb = skb; 2305 2306 if (skb_is_gso(skb)) 2307 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2308 else 2309 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2310 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2311 2312 spin_lock_irqsave(&np->lock, flags); 2313 2314 /* set tx flags */ 2315 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2316 2317 netdev_sent_queue(np->dev, skb->len); 2318 2319 skb_tx_timestamp(skb); 2320 2321 np->put_tx.orig = put_tx; 2322 2323 spin_unlock_irqrestore(&np->lock, flags); 2324 2325 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2326 return NETDEV_TX_OK; 2327 } 2328 2329 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, 2330 struct net_device *dev) 2331 { 2332 struct fe_priv *np = netdev_priv(dev); 2333 u32 tx_flags = 0; 2334 u32 tx_flags_extra; 2335 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2336 unsigned int i; 2337 u32 offset = 0; 2338 u32 bcnt; 2339 u32 size = skb_headlen(skb); 2340 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2341 u32 empty_slots; 2342 struct ring_desc_ex *put_tx; 2343 struct ring_desc_ex *start_tx; 2344 struct ring_desc_ex *prev_tx; 2345 struct nv_skb_map *prev_tx_ctx; 2346 struct nv_skb_map *start_tx_ctx = NULL; 2347 struct nv_skb_map *tmp_tx_ctx = NULL; 2348 unsigned long flags; 2349 2350 /* add fragments to entries count */ 2351 for (i = 0; i < fragments; i++) { 2352 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2353 2354 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + 2355 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2356 } 2357 2358 spin_lock_irqsave(&np->lock, flags); 2359 empty_slots = nv_get_empty_tx_slots(np); 2360 if (unlikely(empty_slots <= entries)) { 2361 netif_stop_queue(dev); 2362 np->tx_stop = 1; 2363 spin_unlock_irqrestore(&np->lock, flags); 2364 return NETDEV_TX_BUSY; 2365 } 2366 spin_unlock_irqrestore(&np->lock, flags); 2367 2368 start_tx = put_tx = np->put_tx.ex; 2369 start_tx_ctx = np->put_tx_ctx; 2370 2371 /* setup the header buffer */ 2372 do { 2373 prev_tx = put_tx; 2374 prev_tx_ctx = np->put_tx_ctx; 2375 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2376 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2377 PCI_DMA_TODEVICE); 2378 if (pci_dma_mapping_error(np->pci_dev, 2379 np->put_tx_ctx->dma)) { 2380 /* on DMA mapping error - drop the packet */ 2381 dev_kfree_skb_any(skb); 2382 u64_stats_update_begin(&np->swstats_tx_syncp); 2383 np->stat_tx_dropped++; 2384 u64_stats_update_end(&np->swstats_tx_syncp); 2385 return NETDEV_TX_OK; 2386 } 2387 np->put_tx_ctx->dma_len = bcnt; 2388 np->put_tx_ctx->dma_single = 1; 2389 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2390 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2391 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2392 2393 tx_flags = NV_TX2_VALID; 2394 offset += bcnt; 2395 size -= bcnt; 2396 if (unlikely(put_tx++ == np->last_tx.ex)) 2397 put_tx = np->first_tx.ex; 2398 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2399 np->put_tx_ctx = np->first_tx_ctx; 2400 } while (size); 2401 2402 /* setup the fragments */ 2403 for (i = 0; i < fragments; i++) { 2404 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2405 u32 frag_size = skb_frag_size(frag); 2406 offset = 0; 2407 2408 do { 2409 prev_tx = put_tx; 2410 prev_tx_ctx = np->put_tx_ctx; 2411 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2412 if (!start_tx_ctx) 2413 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; 2414 np->put_tx_ctx->dma = skb_frag_dma_map( 2415 &np->pci_dev->dev, 2416 frag, offset, 2417 bcnt, 2418 DMA_TO_DEVICE); 2419 2420 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { 2421 2422 /* Unwind the mapped fragments */ 2423 do { 2424 nv_unmap_txskb(np, start_tx_ctx); 2425 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2426 tmp_tx_ctx = np->first_tx_ctx; 2427 } while (tmp_tx_ctx != np->put_tx_ctx); 2428 dev_kfree_skb_any(skb); 2429 np->put_tx_ctx = start_tx_ctx; 2430 u64_stats_update_begin(&np->swstats_tx_syncp); 2431 np->stat_tx_dropped++; 2432 u64_stats_update_end(&np->swstats_tx_syncp); 2433 return NETDEV_TX_OK; 2434 } 2435 np->put_tx_ctx->dma_len = bcnt; 2436 np->put_tx_ctx->dma_single = 0; 2437 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2438 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2439 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2440 2441 offset += bcnt; 2442 frag_size -= bcnt; 2443 if (unlikely(put_tx++ == np->last_tx.ex)) 2444 put_tx = np->first_tx.ex; 2445 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2446 np->put_tx_ctx = np->first_tx_ctx; 2447 } while (frag_size); 2448 } 2449 2450 /* set last fragment flag */ 2451 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 2452 2453 /* save skb in this slot's context area */ 2454 prev_tx_ctx->skb = skb; 2455 2456 if (skb_is_gso(skb)) 2457 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2458 else 2459 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2460 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2461 2462 /* vlan tag */ 2463 if (skb_vlan_tag_present(skb)) 2464 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | 2465 skb_vlan_tag_get(skb)); 2466 else 2467 start_tx->txvlan = 0; 2468 2469 spin_lock_irqsave(&np->lock, flags); 2470 2471 if (np->tx_limit) { 2472 /* Limit the number of outstanding tx. Setup all fragments, but 2473 * do not set the VALID bit on the first descriptor. Save a pointer 2474 * to that descriptor and also for next skb_map element. 2475 */ 2476 2477 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { 2478 if (!np->tx_change_owner) 2479 np->tx_change_owner = start_tx_ctx; 2480 2481 /* remove VALID bit */ 2482 tx_flags &= ~NV_TX2_VALID; 2483 start_tx_ctx->first_tx_desc = start_tx; 2484 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; 2485 np->tx_end_flip = np->put_tx_ctx; 2486 } else { 2487 np->tx_pkts_in_progress++; 2488 } 2489 } 2490 2491 /* set tx flags */ 2492 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2493 2494 netdev_sent_queue(np->dev, skb->len); 2495 2496 skb_tx_timestamp(skb); 2497 2498 np->put_tx.ex = put_tx; 2499 2500 spin_unlock_irqrestore(&np->lock, flags); 2501 2502 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2503 return NETDEV_TX_OK; 2504 } 2505 2506 static inline void nv_tx_flip_ownership(struct net_device *dev) 2507 { 2508 struct fe_priv *np = netdev_priv(dev); 2509 2510 np->tx_pkts_in_progress--; 2511 if (np->tx_change_owner) { 2512 np->tx_change_owner->first_tx_desc->flaglen |= 2513 cpu_to_le32(NV_TX2_VALID); 2514 np->tx_pkts_in_progress++; 2515 2516 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; 2517 if (np->tx_change_owner == np->tx_end_flip) 2518 np->tx_change_owner = NULL; 2519 2520 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2521 } 2522 } 2523 2524 /* 2525 * nv_tx_done: check for completed packets, release the skbs. 2526 * 2527 * Caller must own np->lock. 2528 */ 2529 static int nv_tx_done(struct net_device *dev, int limit) 2530 { 2531 struct fe_priv *np = netdev_priv(dev); 2532 u32 flags; 2533 int tx_work = 0; 2534 struct ring_desc *orig_get_tx = np->get_tx.orig; 2535 unsigned int bytes_compl = 0; 2536 2537 while ((np->get_tx.orig != np->put_tx.orig) && 2538 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2539 (tx_work < limit)) { 2540 2541 nv_unmap_txskb(np, np->get_tx_ctx); 2542 2543 if (np->desc_ver == DESC_VER_1) { 2544 if (flags & NV_TX_LASTPACKET) { 2545 if (flags & NV_TX_ERROR) { 2546 if ((flags & NV_TX_RETRYERROR) 2547 && !(flags & NV_TX_RETRYCOUNT_MASK)) 2548 nv_legacybackoff_reseed(dev); 2549 } else { 2550 u64_stats_update_begin(&np->swstats_tx_syncp); 2551 np->stat_tx_packets++; 2552 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2553 u64_stats_update_end(&np->swstats_tx_syncp); 2554 } 2555 bytes_compl += np->get_tx_ctx->skb->len; 2556 dev_kfree_skb_any(np->get_tx_ctx->skb); 2557 np->get_tx_ctx->skb = NULL; 2558 tx_work++; 2559 } 2560 } else { 2561 if (flags & NV_TX2_LASTPACKET) { 2562 if (flags & NV_TX2_ERROR) { 2563 if ((flags & NV_TX2_RETRYERROR) 2564 && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2565 nv_legacybackoff_reseed(dev); 2566 } else { 2567 u64_stats_update_begin(&np->swstats_tx_syncp); 2568 np->stat_tx_packets++; 2569 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2570 u64_stats_update_end(&np->swstats_tx_syncp); 2571 } 2572 bytes_compl += np->get_tx_ctx->skb->len; 2573 dev_kfree_skb_any(np->get_tx_ctx->skb); 2574 np->get_tx_ctx->skb = NULL; 2575 tx_work++; 2576 } 2577 } 2578 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2579 np->get_tx.orig = np->first_tx.orig; 2580 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2581 np->get_tx_ctx = np->first_tx_ctx; 2582 } 2583 2584 netdev_completed_queue(np->dev, tx_work, bytes_compl); 2585 2586 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2587 np->tx_stop = 0; 2588 netif_wake_queue(dev); 2589 } 2590 return tx_work; 2591 } 2592 2593 static int nv_tx_done_optimized(struct net_device *dev, int limit) 2594 { 2595 struct fe_priv *np = netdev_priv(dev); 2596 u32 flags; 2597 int tx_work = 0; 2598 struct ring_desc_ex *orig_get_tx = np->get_tx.ex; 2599 unsigned long bytes_cleaned = 0; 2600 2601 while ((np->get_tx.ex != np->put_tx.ex) && 2602 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && 2603 (tx_work < limit)) { 2604 2605 nv_unmap_txskb(np, np->get_tx_ctx); 2606 2607 if (flags & NV_TX2_LASTPACKET) { 2608 if (flags & NV_TX2_ERROR) { 2609 if ((flags & NV_TX2_RETRYERROR) 2610 && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2611 if (np->driver_data & DEV_HAS_GEAR_MODE) 2612 nv_gear_backoff_reseed(dev); 2613 else 2614 nv_legacybackoff_reseed(dev); 2615 } 2616 } else { 2617 u64_stats_update_begin(&np->swstats_tx_syncp); 2618 np->stat_tx_packets++; 2619 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2620 u64_stats_update_end(&np->swstats_tx_syncp); 2621 } 2622 2623 bytes_cleaned += np->get_tx_ctx->skb->len; 2624 dev_kfree_skb_any(np->get_tx_ctx->skb); 2625 np->get_tx_ctx->skb = NULL; 2626 tx_work++; 2627 2628 if (np->tx_limit) 2629 nv_tx_flip_ownership(dev); 2630 } 2631 2632 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2633 np->get_tx.ex = np->first_tx.ex; 2634 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2635 np->get_tx_ctx = np->first_tx_ctx; 2636 } 2637 2638 netdev_completed_queue(np->dev, tx_work, bytes_cleaned); 2639 2640 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2641 np->tx_stop = 0; 2642 netif_wake_queue(dev); 2643 } 2644 return tx_work; 2645 } 2646 2647 /* 2648 * nv_tx_timeout: dev->tx_timeout function 2649 * Called with netif_tx_lock held. 2650 */ 2651 static void nv_tx_timeout(struct net_device *dev) 2652 { 2653 struct fe_priv *np = netdev_priv(dev); 2654 u8 __iomem *base = get_hwbase(dev); 2655 u32 status; 2656 union ring_type put_tx; 2657 int saved_tx_limit; 2658 2659 if (np->msi_flags & NV_MSI_X_ENABLED) 2660 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2661 else 2662 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2663 2664 netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status); 2665 2666 if (unlikely(debug_tx_timeout)) { 2667 int i; 2668 2669 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); 2670 netdev_info(dev, "Dumping tx registers\n"); 2671 for (i = 0; i <= np->register_size; i += 32) { 2672 netdev_info(dev, 2673 "%3x: %08x %08x %08x %08x " 2674 "%08x %08x %08x %08x\n", 2675 i, 2676 readl(base + i + 0), readl(base + i + 4), 2677 readl(base + i + 8), readl(base + i + 12), 2678 readl(base + i + 16), readl(base + i + 20), 2679 readl(base + i + 24), readl(base + i + 28)); 2680 } 2681 netdev_info(dev, "Dumping tx ring\n"); 2682 for (i = 0; i < np->tx_ring_size; i += 4) { 2683 if (!nv_optimized(np)) { 2684 netdev_info(dev, 2685 "%03x: %08x %08x // %08x %08x " 2686 "// %08x %08x // %08x %08x\n", 2687 i, 2688 le32_to_cpu(np->tx_ring.orig[i].buf), 2689 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2690 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2691 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2692 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2693 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2694 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2695 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2696 } else { 2697 netdev_info(dev, 2698 "%03x: %08x %08x %08x " 2699 "// %08x %08x %08x " 2700 "// %08x %08x %08x " 2701 "// %08x %08x %08x\n", 2702 i, 2703 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2704 le32_to_cpu(np->tx_ring.ex[i].buflow), 2705 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2706 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2707 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2708 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2709 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2710 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2711 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2712 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2713 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2714 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2715 } 2716 } 2717 } 2718 2719 spin_lock_irq(&np->lock); 2720 2721 /* 1) stop tx engine */ 2722 nv_stop_tx(dev); 2723 2724 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ 2725 saved_tx_limit = np->tx_limit; 2726 np->tx_limit = 0; /* prevent giving HW any limited pkts */ 2727 np->tx_stop = 0; /* prevent waking tx queue */ 2728 if (!nv_optimized(np)) 2729 nv_tx_done(dev, np->tx_ring_size); 2730 else 2731 nv_tx_done_optimized(dev, np->tx_ring_size); 2732 2733 /* save current HW position */ 2734 if (np->tx_change_owner) 2735 put_tx.ex = np->tx_change_owner->first_tx_desc; 2736 else 2737 put_tx = np->put_tx; 2738 2739 /* 3) clear all tx state */ 2740 nv_drain_tx(dev); 2741 nv_init_tx(dev); 2742 2743 /* 4) restore state to current HW position */ 2744 np->get_tx = np->put_tx = put_tx; 2745 np->tx_limit = saved_tx_limit; 2746 2747 /* 5) restart tx engine */ 2748 nv_start_tx(dev); 2749 netif_wake_queue(dev); 2750 spin_unlock_irq(&np->lock); 2751 } 2752 2753 /* 2754 * Called when the nic notices a mismatch between the actual data len on the 2755 * wire and the len indicated in the 802 header 2756 */ 2757 static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2758 { 2759 int hdrlen; /* length of the 802 header */ 2760 int protolen; /* length as stored in the proto field */ 2761 2762 /* 1) calculate len according to header */ 2763 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2764 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto); 2765 hdrlen = VLAN_HLEN; 2766 } else { 2767 protolen = ntohs(((struct ethhdr *)packet)->h_proto); 2768 hdrlen = ETH_HLEN; 2769 } 2770 if (protolen > ETH_DATA_LEN) 2771 return datalen; /* Value in proto field not a len, no checks possible */ 2772 2773 protolen += hdrlen; 2774 /* consistency checks: */ 2775 if (datalen > ETH_ZLEN) { 2776 if (datalen >= protolen) { 2777 /* more data on wire than in 802 header, trim of 2778 * additional data. 2779 */ 2780 return protolen; 2781 } else { 2782 /* less data on wire than mentioned in header. 2783 * Discard the packet. 2784 */ 2785 return -1; 2786 } 2787 } else { 2788 /* short packet. Accept only if 802 values are also short */ 2789 if (protolen > ETH_ZLEN) { 2790 return -1; 2791 } 2792 return datalen; 2793 } 2794 } 2795 2796 static int nv_rx_process(struct net_device *dev, int limit) 2797 { 2798 struct fe_priv *np = netdev_priv(dev); 2799 u32 flags; 2800 int rx_work = 0; 2801 struct sk_buff *skb; 2802 int len; 2803 2804 while ((np->get_rx.orig != np->put_rx.orig) && 2805 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2806 (rx_work < limit)) { 2807 2808 /* 2809 * the packet is for us - immediately tear down the pci mapping. 2810 * TODO: check if a prefetch of the first cacheline improves 2811 * the performance. 2812 */ 2813 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2814 np->get_rx_ctx->dma_len, 2815 PCI_DMA_FROMDEVICE); 2816 skb = np->get_rx_ctx->skb; 2817 np->get_rx_ctx->skb = NULL; 2818 2819 /* look at what we actually got: */ 2820 if (np->desc_ver == DESC_VER_1) { 2821 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2822 len = flags & LEN_MASK_V1; 2823 if (unlikely(flags & NV_RX_ERROR)) { 2824 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2825 len = nv_getlen(dev, skb->data, len); 2826 if (len < 0) { 2827 dev_kfree_skb(skb); 2828 goto next_pkt; 2829 } 2830 } 2831 /* framing errors are soft errors */ 2832 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2833 if (flags & NV_RX_SUBTRACT1) 2834 len--; 2835 } 2836 /* the rest are hard errors */ 2837 else { 2838 if (flags & NV_RX_MISSEDFRAME) { 2839 u64_stats_update_begin(&np->swstats_rx_syncp); 2840 np->stat_rx_missed_errors++; 2841 u64_stats_update_end(&np->swstats_rx_syncp); 2842 } 2843 dev_kfree_skb(skb); 2844 goto next_pkt; 2845 } 2846 } 2847 } else { 2848 dev_kfree_skb(skb); 2849 goto next_pkt; 2850 } 2851 } else { 2852 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2853 len = flags & LEN_MASK_V2; 2854 if (unlikely(flags & NV_RX2_ERROR)) { 2855 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2856 len = nv_getlen(dev, skb->data, len); 2857 if (len < 0) { 2858 dev_kfree_skb(skb); 2859 goto next_pkt; 2860 } 2861 } 2862 /* framing errors are soft errors */ 2863 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2864 if (flags & NV_RX2_SUBTRACT1) 2865 len--; 2866 } 2867 /* the rest are hard errors */ 2868 else { 2869 dev_kfree_skb(skb); 2870 goto next_pkt; 2871 } 2872 } 2873 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2874 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2875 skb->ip_summed = CHECKSUM_UNNECESSARY; 2876 } else { 2877 dev_kfree_skb(skb); 2878 goto next_pkt; 2879 } 2880 } 2881 /* got a valid packet - forward it to the network core */ 2882 skb_put(skb, len); 2883 skb->protocol = eth_type_trans(skb, dev); 2884 napi_gro_receive(&np->napi, skb); 2885 u64_stats_update_begin(&np->swstats_rx_syncp); 2886 np->stat_rx_packets++; 2887 np->stat_rx_bytes += len; 2888 u64_stats_update_end(&np->swstats_rx_syncp); 2889 next_pkt: 2890 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2891 np->get_rx.orig = np->first_rx.orig; 2892 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2893 np->get_rx_ctx = np->first_rx_ctx; 2894 2895 rx_work++; 2896 } 2897 2898 return rx_work; 2899 } 2900 2901 static int nv_rx_process_optimized(struct net_device *dev, int limit) 2902 { 2903 struct fe_priv *np = netdev_priv(dev); 2904 u32 flags; 2905 u32 vlanflags = 0; 2906 int rx_work = 0; 2907 struct sk_buff *skb; 2908 int len; 2909 2910 while ((np->get_rx.ex != np->put_rx.ex) && 2911 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2912 (rx_work < limit)) { 2913 2914 /* 2915 * the packet is for us - immediately tear down the pci mapping. 2916 * TODO: check if a prefetch of the first cacheline improves 2917 * the performance. 2918 */ 2919 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2920 np->get_rx_ctx->dma_len, 2921 PCI_DMA_FROMDEVICE); 2922 skb = np->get_rx_ctx->skb; 2923 np->get_rx_ctx->skb = NULL; 2924 2925 /* look at what we actually got: */ 2926 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2927 len = flags & LEN_MASK_V2; 2928 if (unlikely(flags & NV_RX2_ERROR)) { 2929 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2930 len = nv_getlen(dev, skb->data, len); 2931 if (len < 0) { 2932 dev_kfree_skb(skb); 2933 goto next_pkt; 2934 } 2935 } 2936 /* framing errors are soft errors */ 2937 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2938 if (flags & NV_RX2_SUBTRACT1) 2939 len--; 2940 } 2941 /* the rest are hard errors */ 2942 else { 2943 dev_kfree_skb(skb); 2944 goto next_pkt; 2945 } 2946 } 2947 2948 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2949 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2950 skb->ip_summed = CHECKSUM_UNNECESSARY; 2951 2952 /* got a valid packet - forward it to the network core */ 2953 skb_put(skb, len); 2954 skb->protocol = eth_type_trans(skb, dev); 2955 prefetch(skb->data); 2956 2957 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2958 2959 /* 2960 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX 2961 * here. Even if vlan rx accel is disabled, 2962 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. 2963 */ 2964 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && 2965 vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2966 u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; 2967 2968 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 2969 } 2970 napi_gro_receive(&np->napi, skb); 2971 u64_stats_update_begin(&np->swstats_rx_syncp); 2972 np->stat_rx_packets++; 2973 np->stat_rx_bytes += len; 2974 u64_stats_update_end(&np->swstats_rx_syncp); 2975 } else { 2976 dev_kfree_skb(skb); 2977 } 2978 next_pkt: 2979 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2980 np->get_rx.ex = np->first_rx.ex; 2981 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2982 np->get_rx_ctx = np->first_rx_ctx; 2983 2984 rx_work++; 2985 } 2986 2987 return rx_work; 2988 } 2989 2990 static void set_bufsize(struct net_device *dev) 2991 { 2992 struct fe_priv *np = netdev_priv(dev); 2993 2994 if (dev->mtu <= ETH_DATA_LEN) 2995 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2996 else 2997 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2998 } 2999 3000 /* 3001 * nv_change_mtu: dev->change_mtu function 3002 * Called with dev_base_lock held for read. 3003 */ 3004 static int nv_change_mtu(struct net_device *dev, int new_mtu) 3005 { 3006 struct fe_priv *np = netdev_priv(dev); 3007 int old_mtu; 3008 3009 old_mtu = dev->mtu; 3010 dev->mtu = new_mtu; 3011 3012 /* return early if the buffer sizes will not change */ 3013 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 3014 return 0; 3015 3016 /* synchronized against open : rtnl_lock() held by caller */ 3017 if (netif_running(dev)) { 3018 u8 __iomem *base = get_hwbase(dev); 3019 /* 3020 * It seems that the nic preloads valid ring entries into an 3021 * internal buffer. The procedure for flushing everything is 3022 * guessed, there is probably a simpler approach. 3023 * Changing the MTU is a rare event, it shouldn't matter. 3024 */ 3025 nv_disable_irq(dev); 3026 nv_napi_disable(dev); 3027 netif_tx_lock_bh(dev); 3028 netif_addr_lock(dev); 3029 spin_lock(&np->lock); 3030 /* stop engines */ 3031 nv_stop_rxtx(dev); 3032 nv_txrx_reset(dev); 3033 /* drain rx queue */ 3034 nv_drain_rxtx(dev); 3035 /* reinit driver view of the rx queue */ 3036 set_bufsize(dev); 3037 if (nv_init_ring(dev)) { 3038 if (!np->in_shutdown) 3039 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3040 } 3041 /* reinit nic view of the rx queue */ 3042 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3043 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3044 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3045 base + NvRegRingSizes); 3046 pci_push(base); 3047 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3048 pci_push(base); 3049 3050 /* restart rx engine */ 3051 nv_start_rxtx(dev); 3052 spin_unlock(&np->lock); 3053 netif_addr_unlock(dev); 3054 netif_tx_unlock_bh(dev); 3055 nv_napi_enable(dev); 3056 nv_enable_irq(dev); 3057 } 3058 return 0; 3059 } 3060 3061 static void nv_copy_mac_to_hw(struct net_device *dev) 3062 { 3063 u8 __iomem *base = get_hwbase(dev); 3064 u32 mac[2]; 3065 3066 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 3067 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 3068 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 3069 3070 writel(mac[0], base + NvRegMacAddrA); 3071 writel(mac[1], base + NvRegMacAddrB); 3072 } 3073 3074 /* 3075 * nv_set_mac_address: dev->set_mac_address function 3076 * Called with rtnl_lock() held. 3077 */ 3078 static int nv_set_mac_address(struct net_device *dev, void *addr) 3079 { 3080 struct fe_priv *np = netdev_priv(dev); 3081 struct sockaddr *macaddr = (struct sockaddr *)addr; 3082 3083 if (!is_valid_ether_addr(macaddr->sa_data)) 3084 return -EADDRNOTAVAIL; 3085 3086 /* synchronized against open : rtnl_lock() held by caller */ 3087 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 3088 3089 if (netif_running(dev)) { 3090 netif_tx_lock_bh(dev); 3091 netif_addr_lock(dev); 3092 spin_lock_irq(&np->lock); 3093 3094 /* stop rx engine */ 3095 nv_stop_rx(dev); 3096 3097 /* set mac address */ 3098 nv_copy_mac_to_hw(dev); 3099 3100 /* restart rx engine */ 3101 nv_start_rx(dev); 3102 spin_unlock_irq(&np->lock); 3103 netif_addr_unlock(dev); 3104 netif_tx_unlock_bh(dev); 3105 } else { 3106 nv_copy_mac_to_hw(dev); 3107 } 3108 return 0; 3109 } 3110 3111 /* 3112 * nv_set_multicast: dev->set_multicast function 3113 * Called with netif_tx_lock held. 3114 */ 3115 static void nv_set_multicast(struct net_device *dev) 3116 { 3117 struct fe_priv *np = netdev_priv(dev); 3118 u8 __iomem *base = get_hwbase(dev); 3119 u32 addr[2]; 3120 u32 mask[2]; 3121 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 3122 3123 memset(addr, 0, sizeof(addr)); 3124 memset(mask, 0, sizeof(mask)); 3125 3126 if (dev->flags & IFF_PROMISC) { 3127 pff |= NVREG_PFF_PROMISC; 3128 } else { 3129 pff |= NVREG_PFF_MYADDR; 3130 3131 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { 3132 u32 alwaysOff[2]; 3133 u32 alwaysOn[2]; 3134 3135 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 3136 if (dev->flags & IFF_ALLMULTI) { 3137 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 3138 } else { 3139 struct netdev_hw_addr *ha; 3140 3141 netdev_for_each_mc_addr(ha, dev) { 3142 unsigned char *hw_addr = ha->addr; 3143 u32 a, b; 3144 3145 a = le32_to_cpu(*(__le32 *) hw_addr); 3146 b = le16_to_cpu(*(__le16 *) (&hw_addr[4])); 3147 alwaysOn[0] &= a; 3148 alwaysOff[0] &= ~a; 3149 alwaysOn[1] &= b; 3150 alwaysOff[1] &= ~b; 3151 } 3152 } 3153 addr[0] = alwaysOn[0]; 3154 addr[1] = alwaysOn[1]; 3155 mask[0] = alwaysOn[0] | alwaysOff[0]; 3156 mask[1] = alwaysOn[1] | alwaysOff[1]; 3157 } else { 3158 mask[0] = NVREG_MCASTMASKA_NONE; 3159 mask[1] = NVREG_MCASTMASKB_NONE; 3160 } 3161 } 3162 addr[0] |= NVREG_MCASTADDRA_FORCE; 3163 pff |= NVREG_PFF_ALWAYS; 3164 spin_lock_irq(&np->lock); 3165 nv_stop_rx(dev); 3166 writel(addr[0], base + NvRegMulticastAddrA); 3167 writel(addr[1], base + NvRegMulticastAddrB); 3168 writel(mask[0], base + NvRegMulticastMaskA); 3169 writel(mask[1], base + NvRegMulticastMaskB); 3170 writel(pff, base + NvRegPacketFilterFlags); 3171 nv_start_rx(dev); 3172 spin_unlock_irq(&np->lock); 3173 } 3174 3175 static void nv_update_pause(struct net_device *dev, u32 pause_flags) 3176 { 3177 struct fe_priv *np = netdev_priv(dev); 3178 u8 __iomem *base = get_hwbase(dev); 3179 3180 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 3181 3182 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 3183 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 3184 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 3185 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 3186 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3187 } else { 3188 writel(pff, base + NvRegPacketFilterFlags); 3189 } 3190 } 3191 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 3192 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 3193 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 3194 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3195 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3196 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3197 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { 3198 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3199 /* limit the number of tx pause frames to a default of 8 */ 3200 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); 3201 } 3202 writel(pause_enable, base + NvRegTxPauseFrame); 3203 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3204 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3205 } else { 3206 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 3207 writel(regmisc, base + NvRegMisc1); 3208 } 3209 } 3210 } 3211 3212 static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex) 3213 { 3214 struct fe_priv *np = netdev_priv(dev); 3215 u8 __iomem *base = get_hwbase(dev); 3216 u32 phyreg, txreg; 3217 int mii_status; 3218 3219 np->linkspeed = NVREG_LINKSPEED_FORCE|speed; 3220 np->duplex = duplex; 3221 3222 /* see if gigabit phy */ 3223 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3224 if (mii_status & PHY_GIGABIT) { 3225 np->gigabit = PHY_GIGABIT; 3226 phyreg = readl(base + NvRegSlotTime); 3227 phyreg &= ~(0x3FF00); 3228 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) 3229 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3230 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) 3231 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3232 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3233 phyreg |= NVREG_SLOTTIME_1000_FULL; 3234 writel(phyreg, base + NvRegSlotTime); 3235 } 3236 3237 phyreg = readl(base + NvRegPhyInterface); 3238 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3239 if (np->duplex == 0) 3240 phyreg |= PHY_HALF; 3241 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3242 phyreg |= PHY_100; 3243 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == 3244 NVREG_LINKSPEED_1000) 3245 phyreg |= PHY_1000; 3246 writel(phyreg, base + NvRegPhyInterface); 3247 3248 if (phyreg & PHY_RGMII) { 3249 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == 3250 NVREG_LINKSPEED_1000) 3251 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3252 else 3253 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3254 } else { 3255 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3256 } 3257 writel(txreg, base + NvRegTxDeferral); 3258 3259 if (np->desc_ver == DESC_VER_1) { 3260 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3261 } else { 3262 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == 3263 NVREG_LINKSPEED_1000) 3264 txreg = NVREG_TX_WM_DESC2_3_1000; 3265 else 3266 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3267 } 3268 writel(txreg, base + NvRegTxWatermark); 3269 3270 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), 3271 base + NvRegMisc1); 3272 pci_push(base); 3273 writel(np->linkspeed, base + NvRegLinkSpeed); 3274 pci_push(base); 3275 } 3276 3277 /** 3278 * nv_update_linkspeed - Setup the MAC according to the link partner 3279 * @dev: Network device to be configured 3280 * 3281 * The function queries the PHY and checks if there is a link partner. 3282 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 3283 * set to 10 MBit HD. 3284 * 3285 * The function returns 0 if there is no link partner and 1 if there is 3286 * a good link partner. 3287 */ 3288 static int nv_update_linkspeed(struct net_device *dev) 3289 { 3290 struct fe_priv *np = netdev_priv(dev); 3291 u8 __iomem *base = get_hwbase(dev); 3292 int adv = 0; 3293 int lpa = 0; 3294 int adv_lpa, adv_pause, lpa_pause; 3295 int newls = np->linkspeed; 3296 int newdup = np->duplex; 3297 int mii_status; 3298 u32 bmcr; 3299 int retval = 0; 3300 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 3301 u32 txrxFlags = 0; 3302 u32 phy_exp; 3303 3304 /* If device loopback is enabled, set carrier on and enable max link 3305 * speed. 3306 */ 3307 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3308 if (bmcr & BMCR_LOOPBACK) { 3309 if (netif_running(dev)) { 3310 nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1); 3311 if (!netif_carrier_ok(dev)) 3312 netif_carrier_on(dev); 3313 } 3314 return 1; 3315 } 3316 3317 /* BMSR_LSTATUS is latched, read it twice: 3318 * we want the current value. 3319 */ 3320 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3321 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3322 3323 if (!(mii_status & BMSR_LSTATUS)) { 3324 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3325 newdup = 0; 3326 retval = 0; 3327 goto set_speed; 3328 } 3329 3330 if (np->autoneg == 0) { 3331 if (np->fixed_mode & LPA_100FULL) { 3332 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3333 newdup = 1; 3334 } else if (np->fixed_mode & LPA_100HALF) { 3335 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3336 newdup = 0; 3337 } else if (np->fixed_mode & LPA_10FULL) { 3338 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3339 newdup = 1; 3340 } else { 3341 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3342 newdup = 0; 3343 } 3344 retval = 1; 3345 goto set_speed; 3346 } 3347 /* check auto negotiation is complete */ 3348 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 3349 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 3350 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3351 newdup = 0; 3352 retval = 0; 3353 goto set_speed; 3354 } 3355 3356 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3357 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3358 3359 retval = 1; 3360 if (np->gigabit == PHY_GIGABIT) { 3361 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3362 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 3363 3364 if ((control_1000 & ADVERTISE_1000FULL) && 3365 (status_1000 & LPA_1000FULL)) { 3366 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3367 newdup = 1; 3368 goto set_speed; 3369 } 3370 } 3371 3372 /* FIXME: handle parallel detection properly */ 3373 adv_lpa = lpa & adv; 3374 if (adv_lpa & LPA_100FULL) { 3375 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3376 newdup = 1; 3377 } else if (adv_lpa & LPA_100HALF) { 3378 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3379 newdup = 0; 3380 } else if (adv_lpa & LPA_10FULL) { 3381 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3382 newdup = 1; 3383 } else if (adv_lpa & LPA_10HALF) { 3384 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3385 newdup = 0; 3386 } else { 3387 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3388 newdup = 0; 3389 } 3390 3391 set_speed: 3392 if (np->duplex == newdup && np->linkspeed == newls) 3393 return retval; 3394 3395 np->duplex = newdup; 3396 np->linkspeed = newls; 3397 3398 /* The transmitter and receiver must be restarted for safe update */ 3399 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { 3400 txrxFlags |= NV_RESTART_TX; 3401 nv_stop_tx(dev); 3402 } 3403 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 3404 txrxFlags |= NV_RESTART_RX; 3405 nv_stop_rx(dev); 3406 } 3407 3408 if (np->gigabit == PHY_GIGABIT) { 3409 phyreg = readl(base + NvRegSlotTime); 3410 phyreg &= ~(0x3FF00); 3411 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || 3412 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) 3413 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3414 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3415 phyreg |= NVREG_SLOTTIME_1000_FULL; 3416 writel(phyreg, base + NvRegSlotTime); 3417 } 3418 3419 phyreg = readl(base + NvRegPhyInterface); 3420 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3421 if (np->duplex == 0) 3422 phyreg |= PHY_HALF; 3423 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3424 phyreg |= PHY_100; 3425 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3426 phyreg |= PHY_1000; 3427 writel(phyreg, base + NvRegPhyInterface); 3428 3429 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 3430 if (phyreg & PHY_RGMII) { 3431 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 3432 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3433 } else { 3434 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 3435 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 3436 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 3437 else 3438 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 3439 } else { 3440 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3441 } 3442 } 3443 } else { 3444 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 3445 txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 3446 else 3447 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3448 } 3449 writel(txreg, base + NvRegTxDeferral); 3450 3451 if (np->desc_ver == DESC_VER_1) { 3452 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3453 } else { 3454 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3455 txreg = NVREG_TX_WM_DESC2_3_1000; 3456 else 3457 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3458 } 3459 writel(txreg, base + NvRegTxWatermark); 3460 3461 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), 3462 base + NvRegMisc1); 3463 pci_push(base); 3464 writel(np->linkspeed, base + NvRegLinkSpeed); 3465 pci_push(base); 3466 3467 pause_flags = 0; 3468 /* setup pause frame */ 3469 if (netif_running(dev) && (np->duplex != 0)) { 3470 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3471 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3472 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); 3473 3474 switch (adv_pause) { 3475 case ADVERTISE_PAUSE_CAP: 3476 if (lpa_pause & LPA_PAUSE_CAP) { 3477 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3478 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3479 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3480 } 3481 break; 3482 case ADVERTISE_PAUSE_ASYM: 3483 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM)) 3484 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3485 break; 3486 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM: 3487 if (lpa_pause & LPA_PAUSE_CAP) { 3488 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3489 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3490 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3491 } 3492 if (lpa_pause == LPA_PAUSE_ASYM) 3493 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3494 break; 3495 } 3496 } else { 3497 pause_flags = np->pause_flags; 3498 } 3499 } 3500 nv_update_pause(dev, pause_flags); 3501 3502 if (txrxFlags & NV_RESTART_TX) 3503 nv_start_tx(dev); 3504 if (txrxFlags & NV_RESTART_RX) 3505 nv_start_rx(dev); 3506 3507 return retval; 3508 } 3509 3510 static void nv_linkchange(struct net_device *dev) 3511 { 3512 if (nv_update_linkspeed(dev)) { 3513 if (!netif_carrier_ok(dev)) { 3514 netif_carrier_on(dev); 3515 netdev_info(dev, "link up\n"); 3516 nv_txrx_gate(dev, false); 3517 nv_start_rx(dev); 3518 } 3519 } else { 3520 if (netif_carrier_ok(dev)) { 3521 netif_carrier_off(dev); 3522 netdev_info(dev, "link down\n"); 3523 nv_txrx_gate(dev, true); 3524 nv_stop_rx(dev); 3525 } 3526 } 3527 } 3528 3529 static void nv_link_irq(struct net_device *dev) 3530 { 3531 u8 __iomem *base = get_hwbase(dev); 3532 u32 miistat; 3533 3534 miistat = readl(base + NvRegMIIStatus); 3535 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3536 3537 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3538 nv_linkchange(dev); 3539 } 3540 3541 static void nv_msi_workaround(struct fe_priv *np) 3542 { 3543 3544 /* Need to toggle the msi irq mask within the ethernet device, 3545 * otherwise, future interrupts will not be detected. 3546 */ 3547 if (np->msi_flags & NV_MSI_ENABLED) { 3548 u8 __iomem *base = np->base; 3549 3550 writel(0, base + NvRegMSIIrqMask); 3551 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3552 } 3553 } 3554 3555 static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work) 3556 { 3557 struct fe_priv *np = netdev_priv(dev); 3558 3559 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) { 3560 if (total_work > NV_DYNAMIC_THRESHOLD) { 3561 /* transition to poll based interrupts */ 3562 np->quiet_count = 0; 3563 if (np->irqmask != NVREG_IRQMASK_CPU) { 3564 np->irqmask = NVREG_IRQMASK_CPU; 3565 return 1; 3566 } 3567 } else { 3568 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { 3569 np->quiet_count++; 3570 } else { 3571 /* reached a period of low activity, switch 3572 to per tx/rx packet interrupts */ 3573 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { 3574 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 3575 return 1; 3576 } 3577 } 3578 } 3579 } 3580 return 0; 3581 } 3582 3583 static irqreturn_t nv_nic_irq(int foo, void *data) 3584 { 3585 struct net_device *dev = (struct net_device *) data; 3586 struct fe_priv *np = netdev_priv(dev); 3587 u8 __iomem *base = get_hwbase(dev); 3588 3589 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3590 np->events = readl(base + NvRegIrqStatus); 3591 writel(np->events, base + NvRegIrqStatus); 3592 } else { 3593 np->events = readl(base + NvRegMSIXIrqStatus); 3594 writel(np->events, base + NvRegMSIXIrqStatus); 3595 } 3596 if (!(np->events & np->irqmask)) 3597 return IRQ_NONE; 3598 3599 nv_msi_workaround(np); 3600 3601 if (napi_schedule_prep(&np->napi)) { 3602 /* 3603 * Disable further irq's (msix not enabled with napi) 3604 */ 3605 writel(0, base + NvRegIrqMask); 3606 __napi_schedule(&np->napi); 3607 } 3608 3609 return IRQ_HANDLED; 3610 } 3611 3612 /* All _optimized functions are used to help increase performance 3613 * (reduce CPU and increase throughput). They use descripter version 3, 3614 * compiler directives, and reduce memory accesses. 3615 */ 3616 static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3617 { 3618 struct net_device *dev = (struct net_device *) data; 3619 struct fe_priv *np = netdev_priv(dev); 3620 u8 __iomem *base = get_hwbase(dev); 3621 3622 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3623 np->events = readl(base + NvRegIrqStatus); 3624 writel(np->events, base + NvRegIrqStatus); 3625 } else { 3626 np->events = readl(base + NvRegMSIXIrqStatus); 3627 writel(np->events, base + NvRegMSIXIrqStatus); 3628 } 3629 if (!(np->events & np->irqmask)) 3630 return IRQ_NONE; 3631 3632 nv_msi_workaround(np); 3633 3634 if (napi_schedule_prep(&np->napi)) { 3635 /* 3636 * Disable further irq's (msix not enabled with napi) 3637 */ 3638 writel(0, base + NvRegIrqMask); 3639 __napi_schedule(&np->napi); 3640 } 3641 3642 return IRQ_HANDLED; 3643 } 3644 3645 static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3646 { 3647 struct net_device *dev = (struct net_device *) data; 3648 struct fe_priv *np = netdev_priv(dev); 3649 u8 __iomem *base = get_hwbase(dev); 3650 u32 events; 3651 int i; 3652 unsigned long flags; 3653 3654 for (i = 0;; i++) { 3655 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3656 writel(events, base + NvRegMSIXIrqStatus); 3657 netdev_dbg(dev, "tx irq events: %08x\n", events); 3658 if (!(events & np->irqmask)) 3659 break; 3660 3661 spin_lock_irqsave(&np->lock, flags); 3662 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3663 spin_unlock_irqrestore(&np->lock, flags); 3664 3665 if (unlikely(i > max_interrupt_work)) { 3666 spin_lock_irqsave(&np->lock, flags); 3667 /* disable interrupts on the nic */ 3668 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3669 pci_push(base); 3670 3671 if (!np->in_shutdown) { 3672 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3673 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3674 } 3675 spin_unlock_irqrestore(&np->lock, flags); 3676 netdev_dbg(dev, "%s: too many iterations (%d)\n", 3677 __func__, i); 3678 break; 3679 } 3680 3681 } 3682 3683 return IRQ_RETVAL(i); 3684 } 3685 3686 static int nv_napi_poll(struct napi_struct *napi, int budget) 3687 { 3688 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3689 struct net_device *dev = np->dev; 3690 u8 __iomem *base = get_hwbase(dev); 3691 unsigned long flags; 3692 int retcode; 3693 int rx_count, tx_work = 0, rx_work = 0; 3694 3695 do { 3696 if (!nv_optimized(np)) { 3697 spin_lock_irqsave(&np->lock, flags); 3698 tx_work += nv_tx_done(dev, np->tx_ring_size); 3699 spin_unlock_irqrestore(&np->lock, flags); 3700 3701 rx_count = nv_rx_process(dev, budget - rx_work); 3702 retcode = nv_alloc_rx(dev); 3703 } else { 3704 spin_lock_irqsave(&np->lock, flags); 3705 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); 3706 spin_unlock_irqrestore(&np->lock, flags); 3707 3708 rx_count = nv_rx_process_optimized(dev, 3709 budget - rx_work); 3710 retcode = nv_alloc_rx_optimized(dev); 3711 } 3712 } while (retcode == 0 && 3713 rx_count > 0 && (rx_work += rx_count) < budget); 3714 3715 if (retcode) { 3716 spin_lock_irqsave(&np->lock, flags); 3717 if (!np->in_shutdown) 3718 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3719 spin_unlock_irqrestore(&np->lock, flags); 3720 } 3721 3722 nv_change_interrupt_mode(dev, tx_work + rx_work); 3723 3724 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3725 spin_lock_irqsave(&np->lock, flags); 3726 nv_link_irq(dev); 3727 spin_unlock_irqrestore(&np->lock, flags); 3728 } 3729 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3730 spin_lock_irqsave(&np->lock, flags); 3731 nv_linkchange(dev); 3732 spin_unlock_irqrestore(&np->lock, flags); 3733 np->link_timeout = jiffies + LINK_TIMEOUT; 3734 } 3735 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3736 spin_lock_irqsave(&np->lock, flags); 3737 if (!np->in_shutdown) { 3738 np->nic_poll_irq = np->irqmask; 3739 np->recover_error = 1; 3740 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3741 } 3742 spin_unlock_irqrestore(&np->lock, flags); 3743 napi_complete(napi); 3744 return rx_work; 3745 } 3746 3747 if (rx_work < budget) { 3748 /* re-enable interrupts 3749 (msix not enabled in napi) */ 3750 napi_complete_done(napi, rx_work); 3751 3752 writel(np->irqmask, base + NvRegIrqMask); 3753 } 3754 return rx_work; 3755 } 3756 3757 static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3758 { 3759 struct net_device *dev = (struct net_device *) data; 3760 struct fe_priv *np = netdev_priv(dev); 3761 u8 __iomem *base = get_hwbase(dev); 3762 u32 events; 3763 int i; 3764 unsigned long flags; 3765 3766 for (i = 0;; i++) { 3767 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3768 writel(events, base + NvRegMSIXIrqStatus); 3769 netdev_dbg(dev, "rx irq events: %08x\n", events); 3770 if (!(events & np->irqmask)) 3771 break; 3772 3773 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3774 if (unlikely(nv_alloc_rx_optimized(dev))) { 3775 spin_lock_irqsave(&np->lock, flags); 3776 if (!np->in_shutdown) 3777 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3778 spin_unlock_irqrestore(&np->lock, flags); 3779 } 3780 } 3781 3782 if (unlikely(i > max_interrupt_work)) { 3783 spin_lock_irqsave(&np->lock, flags); 3784 /* disable interrupts on the nic */ 3785 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3786 pci_push(base); 3787 3788 if (!np->in_shutdown) { 3789 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3790 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3791 } 3792 spin_unlock_irqrestore(&np->lock, flags); 3793 netdev_dbg(dev, "%s: too many iterations (%d)\n", 3794 __func__, i); 3795 break; 3796 } 3797 } 3798 3799 return IRQ_RETVAL(i); 3800 } 3801 3802 static irqreturn_t nv_nic_irq_other(int foo, void *data) 3803 { 3804 struct net_device *dev = (struct net_device *) data; 3805 struct fe_priv *np = netdev_priv(dev); 3806 u8 __iomem *base = get_hwbase(dev); 3807 u32 events; 3808 int i; 3809 unsigned long flags; 3810 3811 for (i = 0;; i++) { 3812 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3813 writel(events, base + NvRegMSIXIrqStatus); 3814 netdev_dbg(dev, "irq events: %08x\n", events); 3815 if (!(events & np->irqmask)) 3816 break; 3817 3818 /* check tx in case we reached max loop limit in tx isr */ 3819 spin_lock_irqsave(&np->lock, flags); 3820 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3821 spin_unlock_irqrestore(&np->lock, flags); 3822 3823 if (events & NVREG_IRQ_LINK) { 3824 spin_lock_irqsave(&np->lock, flags); 3825 nv_link_irq(dev); 3826 spin_unlock_irqrestore(&np->lock, flags); 3827 } 3828 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3829 spin_lock_irqsave(&np->lock, flags); 3830 nv_linkchange(dev); 3831 spin_unlock_irqrestore(&np->lock, flags); 3832 np->link_timeout = jiffies + LINK_TIMEOUT; 3833 } 3834 if (events & NVREG_IRQ_RECOVER_ERROR) { 3835 spin_lock_irqsave(&np->lock, flags); 3836 /* disable interrupts on the nic */ 3837 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3838 pci_push(base); 3839 3840 if (!np->in_shutdown) { 3841 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3842 np->recover_error = 1; 3843 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3844 } 3845 spin_unlock_irqrestore(&np->lock, flags); 3846 break; 3847 } 3848 if (unlikely(i > max_interrupt_work)) { 3849 spin_lock_irqsave(&np->lock, flags); 3850 /* disable interrupts on the nic */ 3851 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3852 pci_push(base); 3853 3854 if (!np->in_shutdown) { 3855 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3856 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3857 } 3858 spin_unlock_irqrestore(&np->lock, flags); 3859 netdev_dbg(dev, "%s: too many iterations (%d)\n", 3860 __func__, i); 3861 break; 3862 } 3863 3864 } 3865 3866 return IRQ_RETVAL(i); 3867 } 3868 3869 static irqreturn_t nv_nic_irq_test(int foo, void *data) 3870 { 3871 struct net_device *dev = (struct net_device *) data; 3872 struct fe_priv *np = netdev_priv(dev); 3873 u8 __iomem *base = get_hwbase(dev); 3874 u32 events; 3875 3876 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3877 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3878 writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3879 } else { 3880 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3881 writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3882 } 3883 pci_push(base); 3884 if (!(events & NVREG_IRQ_TIMER)) 3885 return IRQ_RETVAL(0); 3886 3887 nv_msi_workaround(np); 3888 3889 spin_lock(&np->lock); 3890 np->intr_test = 1; 3891 spin_unlock(&np->lock); 3892 3893 return IRQ_RETVAL(1); 3894 } 3895 3896 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3897 { 3898 u8 __iomem *base = get_hwbase(dev); 3899 int i; 3900 u32 msixmap = 0; 3901 3902 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3903 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3904 * the remaining 8 interrupts. 3905 */ 3906 for (i = 0; i < 8; i++) { 3907 if ((irqmask >> i) & 0x1) 3908 msixmap |= vector << (i << 2); 3909 } 3910 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3911 3912 msixmap = 0; 3913 for (i = 0; i < 8; i++) { 3914 if ((irqmask >> (i + 8)) & 0x1) 3915 msixmap |= vector << (i << 2); 3916 } 3917 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3918 } 3919 3920 static int nv_request_irq(struct net_device *dev, int intr_test) 3921 { 3922 struct fe_priv *np = get_nvpriv(dev); 3923 u8 __iomem *base = get_hwbase(dev); 3924 int ret; 3925 int i; 3926 irqreturn_t (*handler)(int foo, void *data); 3927 3928 if (intr_test) { 3929 handler = nv_nic_irq_test; 3930 } else { 3931 if (nv_optimized(np)) 3932 handler = nv_nic_irq_optimized; 3933 else 3934 handler = nv_nic_irq; 3935 } 3936 3937 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3938 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) 3939 np->msi_x_entry[i].entry = i; 3940 ret = pci_enable_msix_range(np->pci_dev, 3941 np->msi_x_entry, 3942 np->msi_flags & NV_MSI_X_VECTORS_MASK, 3943 np->msi_flags & NV_MSI_X_VECTORS_MASK); 3944 if (ret > 0) { 3945 np->msi_flags |= NV_MSI_X_ENABLED; 3946 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3947 /* Request irq for rx handling */ 3948 sprintf(np->name_rx, "%s-rx", dev->name); 3949 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3950 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev); 3951 if (ret) { 3952 netdev_info(dev, 3953 "request_irq failed for rx %d\n", 3954 ret); 3955 pci_disable_msix(np->pci_dev); 3956 np->msi_flags &= ~NV_MSI_X_ENABLED; 3957 goto out_err; 3958 } 3959 /* Request irq for tx handling */ 3960 sprintf(np->name_tx, "%s-tx", dev->name); 3961 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3962 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev); 3963 if (ret) { 3964 netdev_info(dev, 3965 "request_irq failed for tx %d\n", 3966 ret); 3967 pci_disable_msix(np->pci_dev); 3968 np->msi_flags &= ~NV_MSI_X_ENABLED; 3969 goto out_free_rx; 3970 } 3971 /* Request irq for link and timer handling */ 3972 sprintf(np->name_other, "%s-other", dev->name); 3973 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3974 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev); 3975 if (ret) { 3976 netdev_info(dev, 3977 "request_irq failed for link %d\n", 3978 ret); 3979 pci_disable_msix(np->pci_dev); 3980 np->msi_flags &= ~NV_MSI_X_ENABLED; 3981 goto out_free_tx; 3982 } 3983 /* map interrupts to their respective vector */ 3984 writel(0, base + NvRegMSIXMap0); 3985 writel(0, base + NvRegMSIXMap1); 3986 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 3987 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 3988 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3989 } else { 3990 /* Request irq for all interrupts */ 3991 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, 3992 handler, IRQF_SHARED, dev->name, dev); 3993 if (ret) { 3994 netdev_info(dev, 3995 "request_irq failed %d\n", 3996 ret); 3997 pci_disable_msix(np->pci_dev); 3998 np->msi_flags &= ~NV_MSI_X_ENABLED; 3999 goto out_err; 4000 } 4001 4002 /* map interrupts to vector 0 */ 4003 writel(0, base + NvRegMSIXMap0); 4004 writel(0, base + NvRegMSIXMap1); 4005 } 4006 netdev_info(dev, "MSI-X enabled\n"); 4007 return 0; 4008 } 4009 } 4010 if (np->msi_flags & NV_MSI_CAPABLE) { 4011 ret = pci_enable_msi(np->pci_dev); 4012 if (ret == 0) { 4013 np->msi_flags |= NV_MSI_ENABLED; 4014 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev); 4015 if (ret) { 4016 netdev_info(dev, "request_irq failed %d\n", 4017 ret); 4018 pci_disable_msi(np->pci_dev); 4019 np->msi_flags &= ~NV_MSI_ENABLED; 4020 goto out_err; 4021 } 4022 4023 /* map interrupts to vector 0 */ 4024 writel(0, base + NvRegMSIMap0); 4025 writel(0, base + NvRegMSIMap1); 4026 /* enable msi vector 0 */ 4027 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 4028 netdev_info(dev, "MSI enabled\n"); 4029 return 0; 4030 } 4031 } 4032 4033 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 4034 goto out_err; 4035 4036 return 0; 4037 out_free_tx: 4038 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 4039 out_free_rx: 4040 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 4041 out_err: 4042 return 1; 4043 } 4044 4045 static void nv_free_irq(struct net_device *dev) 4046 { 4047 struct fe_priv *np = get_nvpriv(dev); 4048 int i; 4049 4050 if (np->msi_flags & NV_MSI_X_ENABLED) { 4051 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) 4052 free_irq(np->msi_x_entry[i].vector, dev); 4053 pci_disable_msix(np->pci_dev); 4054 np->msi_flags &= ~NV_MSI_X_ENABLED; 4055 } else { 4056 free_irq(np->pci_dev->irq, dev); 4057 if (np->msi_flags & NV_MSI_ENABLED) { 4058 pci_disable_msi(np->pci_dev); 4059 np->msi_flags &= ~NV_MSI_ENABLED; 4060 } 4061 } 4062 } 4063 4064 static void nv_do_nic_poll(unsigned long data) 4065 { 4066 struct net_device *dev = (struct net_device *) data; 4067 struct fe_priv *np = netdev_priv(dev); 4068 u8 __iomem *base = get_hwbase(dev); 4069 u32 mask = 0; 4070 unsigned long flags; 4071 unsigned int irq = 0; 4072 4073 /* 4074 * First disable irq(s) and then 4075 * reenable interrupts on the nic, we have to do this before calling 4076 * nv_nic_irq because that may decide to do otherwise 4077 */ 4078 4079 if (!using_multi_irqs(dev)) { 4080 if (np->msi_flags & NV_MSI_X_ENABLED) 4081 irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector; 4082 else 4083 irq = np->pci_dev->irq; 4084 mask = np->irqmask; 4085 } else { 4086 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4087 irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector; 4088 mask |= NVREG_IRQ_RX_ALL; 4089 } 4090 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4091 irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector; 4092 mask |= NVREG_IRQ_TX_ALL; 4093 } 4094 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4095 irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector; 4096 mask |= NVREG_IRQ_OTHER; 4097 } 4098 } 4099 4100 disable_irq_nosync_lockdep_irqsave(irq, &flags); 4101 synchronize_irq(irq); 4102 4103 if (np->recover_error) { 4104 np->recover_error = 0; 4105 netdev_info(dev, "MAC in recoverable error state\n"); 4106 if (netif_running(dev)) { 4107 netif_tx_lock_bh(dev); 4108 netif_addr_lock(dev); 4109 spin_lock(&np->lock); 4110 /* stop engines */ 4111 nv_stop_rxtx(dev); 4112 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4113 nv_mac_reset(dev); 4114 nv_txrx_reset(dev); 4115 /* drain rx queue */ 4116 nv_drain_rxtx(dev); 4117 /* reinit driver view of the rx queue */ 4118 set_bufsize(dev); 4119 if (nv_init_ring(dev)) { 4120 if (!np->in_shutdown) 4121 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4122 } 4123 /* reinit nic view of the rx queue */ 4124 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4125 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4126 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4127 base + NvRegRingSizes); 4128 pci_push(base); 4129 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4130 pci_push(base); 4131 /* clear interrupts */ 4132 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4133 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4134 else 4135 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4136 4137 /* restart rx engine */ 4138 nv_start_rxtx(dev); 4139 spin_unlock(&np->lock); 4140 netif_addr_unlock(dev); 4141 netif_tx_unlock_bh(dev); 4142 } 4143 } 4144 4145 writel(mask, base + NvRegIrqMask); 4146 pci_push(base); 4147 4148 if (!using_multi_irqs(dev)) { 4149 np->nic_poll_irq = 0; 4150 if (nv_optimized(np)) 4151 nv_nic_irq_optimized(0, dev); 4152 else 4153 nv_nic_irq(0, dev); 4154 } else { 4155 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4156 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; 4157 nv_nic_irq_rx(0, dev); 4158 } 4159 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4160 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; 4161 nv_nic_irq_tx(0, dev); 4162 } 4163 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4164 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; 4165 nv_nic_irq_other(0, dev); 4166 } 4167 } 4168 4169 enable_irq_lockdep_irqrestore(irq, &flags); 4170 } 4171 4172 #ifdef CONFIG_NET_POLL_CONTROLLER 4173 static void nv_poll_controller(struct net_device *dev) 4174 { 4175 nv_do_nic_poll((unsigned long) dev); 4176 } 4177 #endif 4178 4179 static void nv_do_stats_poll(unsigned long data) 4180 __acquires(&netdev_priv(dev)->hwstats_lock) 4181 __releases(&netdev_priv(dev)->hwstats_lock) 4182 { 4183 struct net_device *dev = (struct net_device *) data; 4184 struct fe_priv *np = netdev_priv(dev); 4185 4186 /* If lock is currently taken, the stats are being refreshed 4187 * and hence fresh enough */ 4188 if (spin_trylock(&np->hwstats_lock)) { 4189 nv_update_stats(dev); 4190 spin_unlock(&np->hwstats_lock); 4191 } 4192 4193 if (!np->in_shutdown) 4194 mod_timer(&np->stats_poll, 4195 round_jiffies(jiffies + STATS_INTERVAL)); 4196 } 4197 4198 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4199 { 4200 struct fe_priv *np = netdev_priv(dev); 4201 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 4202 strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version)); 4203 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); 4204 } 4205 4206 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4207 { 4208 struct fe_priv *np = netdev_priv(dev); 4209 wolinfo->supported = WAKE_MAGIC; 4210 4211 spin_lock_irq(&np->lock); 4212 if (np->wolenabled) 4213 wolinfo->wolopts = WAKE_MAGIC; 4214 spin_unlock_irq(&np->lock); 4215 } 4216 4217 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4218 { 4219 struct fe_priv *np = netdev_priv(dev); 4220 u8 __iomem *base = get_hwbase(dev); 4221 u32 flags = 0; 4222 4223 if (wolinfo->wolopts == 0) { 4224 np->wolenabled = 0; 4225 } else if (wolinfo->wolopts & WAKE_MAGIC) { 4226 np->wolenabled = 1; 4227 flags = NVREG_WAKEUPFLAGS_ENABLE; 4228 } 4229 if (netif_running(dev)) { 4230 spin_lock_irq(&np->lock); 4231 writel(flags, base + NvRegWakeUpFlags); 4232 spin_unlock_irq(&np->lock); 4233 } 4234 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); 4235 return 0; 4236 } 4237 4238 static int nv_get_link_ksettings(struct net_device *dev, 4239 struct ethtool_link_ksettings *cmd) 4240 { 4241 struct fe_priv *np = netdev_priv(dev); 4242 u32 speed, supported, advertising; 4243 int adv; 4244 4245 spin_lock_irq(&np->lock); 4246 cmd->base.port = PORT_MII; 4247 if (!netif_running(dev)) { 4248 /* We do not track link speed / duplex setting if the 4249 * interface is disabled. Force a link check */ 4250 if (nv_update_linkspeed(dev)) { 4251 netif_carrier_on(dev); 4252 } else { 4253 netif_carrier_off(dev); 4254 } 4255 } 4256 4257 if (netif_carrier_ok(dev)) { 4258 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4259 case NVREG_LINKSPEED_10: 4260 speed = SPEED_10; 4261 break; 4262 case NVREG_LINKSPEED_100: 4263 speed = SPEED_100; 4264 break; 4265 case NVREG_LINKSPEED_1000: 4266 speed = SPEED_1000; 4267 break; 4268 default: 4269 speed = -1; 4270 break; 4271 } 4272 cmd->base.duplex = DUPLEX_HALF; 4273 if (np->duplex) 4274 cmd->base.duplex = DUPLEX_FULL; 4275 } else { 4276 speed = SPEED_UNKNOWN; 4277 cmd->base.duplex = DUPLEX_UNKNOWN; 4278 } 4279 cmd->base.speed = speed; 4280 cmd->base.autoneg = np->autoneg; 4281 4282 advertising = ADVERTISED_MII; 4283 if (np->autoneg) { 4284 advertising |= ADVERTISED_Autoneg; 4285 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4286 if (adv & ADVERTISE_10HALF) 4287 advertising |= ADVERTISED_10baseT_Half; 4288 if (adv & ADVERTISE_10FULL) 4289 advertising |= ADVERTISED_10baseT_Full; 4290 if (adv & ADVERTISE_100HALF) 4291 advertising |= ADVERTISED_100baseT_Half; 4292 if (adv & ADVERTISE_100FULL) 4293 advertising |= ADVERTISED_100baseT_Full; 4294 if (np->gigabit == PHY_GIGABIT) { 4295 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4296 if (adv & ADVERTISE_1000FULL) 4297 advertising |= ADVERTISED_1000baseT_Full; 4298 } 4299 } 4300 supported = (SUPPORTED_Autoneg | 4301 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 4302 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 4303 SUPPORTED_MII); 4304 if (np->gigabit == PHY_GIGABIT) 4305 supported |= SUPPORTED_1000baseT_Full; 4306 4307 cmd->base.phy_address = np->phyaddr; 4308 4309 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 4310 supported); 4311 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 4312 advertising); 4313 4314 /* ignore maxtxpkt, maxrxpkt for now */ 4315 spin_unlock_irq(&np->lock); 4316 return 0; 4317 } 4318 4319 static int nv_set_link_ksettings(struct net_device *dev, 4320 const struct ethtool_link_ksettings *cmd) 4321 { 4322 struct fe_priv *np = netdev_priv(dev); 4323 u32 speed = cmd->base.speed; 4324 u32 advertising; 4325 4326 ethtool_convert_link_mode_to_legacy_u32(&advertising, 4327 cmd->link_modes.advertising); 4328 4329 if (cmd->base.port != PORT_MII) 4330 return -EINVAL; 4331 if (cmd->base.phy_address != np->phyaddr) { 4332 /* TODO: support switching between multiple phys. Should be 4333 * trivial, but not enabled due to lack of test hardware. */ 4334 return -EINVAL; 4335 } 4336 if (cmd->base.autoneg == AUTONEG_ENABLE) { 4337 u32 mask; 4338 4339 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 4340 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 4341 if (np->gigabit == PHY_GIGABIT) 4342 mask |= ADVERTISED_1000baseT_Full; 4343 4344 if ((advertising & mask) == 0) 4345 return -EINVAL; 4346 4347 } else if (cmd->base.autoneg == AUTONEG_DISABLE) { 4348 /* Note: autonegotiation disable, speed 1000 intentionally 4349 * forbidden - no one should need that. */ 4350 4351 if (speed != SPEED_10 && speed != SPEED_100) 4352 return -EINVAL; 4353 if (cmd->base.duplex != DUPLEX_HALF && 4354 cmd->base.duplex != DUPLEX_FULL) 4355 return -EINVAL; 4356 } else { 4357 return -EINVAL; 4358 } 4359 4360 netif_carrier_off(dev); 4361 if (netif_running(dev)) { 4362 unsigned long flags; 4363 4364 nv_disable_irq(dev); 4365 netif_tx_lock_bh(dev); 4366 netif_addr_lock(dev); 4367 /* with plain spinlock lockdep complains */ 4368 spin_lock_irqsave(&np->lock, flags); 4369 /* stop engines */ 4370 /* FIXME: 4371 * this can take some time, and interrupts are disabled 4372 * due to spin_lock_irqsave, but let's hope no daemon 4373 * is going to change the settings very often... 4374 * Worst case: 4375 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX 4376 * + some minor delays, which is up to a second approximately 4377 */ 4378 nv_stop_rxtx(dev); 4379 spin_unlock_irqrestore(&np->lock, flags); 4380 netif_addr_unlock(dev); 4381 netif_tx_unlock_bh(dev); 4382 } 4383 4384 if (cmd->base.autoneg == AUTONEG_ENABLE) { 4385 int adv, bmcr; 4386 4387 np->autoneg = 1; 4388 4389 /* advertise only what has been requested */ 4390 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4391 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4392 if (advertising & ADVERTISED_10baseT_Half) 4393 adv |= ADVERTISE_10HALF; 4394 if (advertising & ADVERTISED_10baseT_Full) 4395 adv |= ADVERTISE_10FULL; 4396 if (advertising & ADVERTISED_100baseT_Half) 4397 adv |= ADVERTISE_100HALF; 4398 if (advertising & ADVERTISED_100baseT_Full) 4399 adv |= ADVERTISE_100FULL; 4400 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ 4401 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4402 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4403 adv |= ADVERTISE_PAUSE_ASYM; 4404 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4405 4406 if (np->gigabit == PHY_GIGABIT) { 4407 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4408 adv &= ~ADVERTISE_1000FULL; 4409 if (advertising & ADVERTISED_1000baseT_Full) 4410 adv |= ADVERTISE_1000FULL; 4411 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4412 } 4413 4414 if (netif_running(dev)) 4415 netdev_info(dev, "link down\n"); 4416 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4417 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4418 bmcr |= BMCR_ANENABLE; 4419 /* reset the phy in order for settings to stick, 4420 * and cause autoneg to start */ 4421 if (phy_reset(dev, bmcr)) { 4422 netdev_info(dev, "phy reset failed\n"); 4423 return -EINVAL; 4424 } 4425 } else { 4426 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4427 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4428 } 4429 } else { 4430 int adv, bmcr; 4431 4432 np->autoneg = 0; 4433 4434 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4435 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4436 if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF) 4437 adv |= ADVERTISE_10HALF; 4438 if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL) 4439 adv |= ADVERTISE_10FULL; 4440 if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF) 4441 adv |= ADVERTISE_100HALF; 4442 if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL) 4443 adv |= ADVERTISE_100FULL; 4444 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4445 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */ 4446 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4447 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4448 } 4449 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 4450 adv |= ADVERTISE_PAUSE_ASYM; 4451 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4452 } 4453 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4454 np->fixed_mode = adv; 4455 4456 if (np->gigabit == PHY_GIGABIT) { 4457 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4458 adv &= ~ADVERTISE_1000FULL; 4459 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4460 } 4461 4462 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4463 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 4464 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 4465 bmcr |= BMCR_FULLDPLX; 4466 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 4467 bmcr |= BMCR_SPEED100; 4468 if (np->phy_oui == PHY_OUI_MARVELL) { 4469 /* reset the phy in order for forced mode settings to stick */ 4470 if (phy_reset(dev, bmcr)) { 4471 netdev_info(dev, "phy reset failed\n"); 4472 return -EINVAL; 4473 } 4474 } else { 4475 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4476 if (netif_running(dev)) { 4477 /* Wait a bit and then reconfigure the nic. */ 4478 udelay(10); 4479 nv_linkchange(dev); 4480 } 4481 } 4482 } 4483 4484 if (netif_running(dev)) { 4485 nv_start_rxtx(dev); 4486 nv_enable_irq(dev); 4487 } 4488 4489 return 0; 4490 } 4491 4492 #define FORCEDETH_REGS_VER 1 4493 4494 static int nv_get_regs_len(struct net_device *dev) 4495 { 4496 struct fe_priv *np = netdev_priv(dev); 4497 return np->register_size; 4498 } 4499 4500 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4501 { 4502 struct fe_priv *np = netdev_priv(dev); 4503 u8 __iomem *base = get_hwbase(dev); 4504 u32 *rbuf = buf; 4505 int i; 4506 4507 regs->version = FORCEDETH_REGS_VER; 4508 spin_lock_irq(&np->lock); 4509 for (i = 0; i < np->register_size/sizeof(u32); i++) 4510 rbuf[i] = readl(base + i*sizeof(u32)); 4511 spin_unlock_irq(&np->lock); 4512 } 4513 4514 static int nv_nway_reset(struct net_device *dev) 4515 { 4516 struct fe_priv *np = netdev_priv(dev); 4517 int ret; 4518 4519 if (np->autoneg) { 4520 int bmcr; 4521 4522 netif_carrier_off(dev); 4523 if (netif_running(dev)) { 4524 nv_disable_irq(dev); 4525 netif_tx_lock_bh(dev); 4526 netif_addr_lock(dev); 4527 spin_lock(&np->lock); 4528 /* stop engines */ 4529 nv_stop_rxtx(dev); 4530 spin_unlock(&np->lock); 4531 netif_addr_unlock(dev); 4532 netif_tx_unlock_bh(dev); 4533 netdev_info(dev, "link down\n"); 4534 } 4535 4536 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4537 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4538 bmcr |= BMCR_ANENABLE; 4539 /* reset the phy in order for settings to stick*/ 4540 if (phy_reset(dev, bmcr)) { 4541 netdev_info(dev, "phy reset failed\n"); 4542 return -EINVAL; 4543 } 4544 } else { 4545 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4546 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4547 } 4548 4549 if (netif_running(dev)) { 4550 nv_start_rxtx(dev); 4551 nv_enable_irq(dev); 4552 } 4553 ret = 0; 4554 } else { 4555 ret = -EINVAL; 4556 } 4557 4558 return ret; 4559 } 4560 4561 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4562 { 4563 struct fe_priv *np = netdev_priv(dev); 4564 4565 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4566 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4567 4568 ring->rx_pending = np->rx_ring_size; 4569 ring->tx_pending = np->tx_ring_size; 4570 } 4571 4572 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4573 { 4574 struct fe_priv *np = netdev_priv(dev); 4575 u8 __iomem *base = get_hwbase(dev); 4576 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4577 dma_addr_t ring_addr; 4578 4579 if (ring->rx_pending < RX_RING_MIN || 4580 ring->tx_pending < TX_RING_MIN || 4581 ring->rx_mini_pending != 0 || 4582 ring->rx_jumbo_pending != 0 || 4583 (np->desc_ver == DESC_VER_1 && 4584 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4585 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4586 (np->desc_ver != DESC_VER_1 && 4587 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4588 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4589 return -EINVAL; 4590 } 4591 4592 /* allocate new rings */ 4593 if (!nv_optimized(np)) { 4594 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4595 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4596 &ring_addr); 4597 } else { 4598 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4599 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4600 &ring_addr); 4601 } 4602 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4603 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4604 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4605 /* fall back to old rings */ 4606 if (!nv_optimized(np)) { 4607 if (rxtx_ring) 4608 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4609 rxtx_ring, ring_addr); 4610 } else { 4611 if (rxtx_ring) 4612 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4613 rxtx_ring, ring_addr); 4614 } 4615 4616 kfree(rx_skbuff); 4617 kfree(tx_skbuff); 4618 goto exit; 4619 } 4620 4621 if (netif_running(dev)) { 4622 nv_disable_irq(dev); 4623 nv_napi_disable(dev); 4624 netif_tx_lock_bh(dev); 4625 netif_addr_lock(dev); 4626 spin_lock(&np->lock); 4627 /* stop engines */ 4628 nv_stop_rxtx(dev); 4629 nv_txrx_reset(dev); 4630 /* drain queues */ 4631 nv_drain_rxtx(dev); 4632 /* delete queues */ 4633 free_rings(dev); 4634 } 4635 4636 /* set new values */ 4637 np->rx_ring_size = ring->rx_pending; 4638 np->tx_ring_size = ring->tx_pending; 4639 4640 if (!nv_optimized(np)) { 4641 np->rx_ring.orig = (struct ring_desc *)rxtx_ring; 4642 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4643 } else { 4644 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; 4645 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4646 } 4647 np->rx_skb = (struct nv_skb_map *)rx_skbuff; 4648 np->tx_skb = (struct nv_skb_map *)tx_skbuff; 4649 np->ring_addr = ring_addr; 4650 4651 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4652 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4653 4654 if (netif_running(dev)) { 4655 /* reinit driver view of the queues */ 4656 set_bufsize(dev); 4657 if (nv_init_ring(dev)) { 4658 if (!np->in_shutdown) 4659 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4660 } 4661 4662 /* reinit nic view of the queues */ 4663 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4664 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4665 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4666 base + NvRegRingSizes); 4667 pci_push(base); 4668 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4669 pci_push(base); 4670 4671 /* restart engines */ 4672 nv_start_rxtx(dev); 4673 spin_unlock(&np->lock); 4674 netif_addr_unlock(dev); 4675 netif_tx_unlock_bh(dev); 4676 nv_napi_enable(dev); 4677 nv_enable_irq(dev); 4678 } 4679 return 0; 4680 exit: 4681 return -ENOMEM; 4682 } 4683 4684 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4685 { 4686 struct fe_priv *np = netdev_priv(dev); 4687 4688 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4689 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4690 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4691 } 4692 4693 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4694 { 4695 struct fe_priv *np = netdev_priv(dev); 4696 int adv, bmcr; 4697 4698 if ((!np->autoneg && np->duplex == 0) || 4699 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4700 netdev_info(dev, "can not set pause settings when forced link is in half duplex\n"); 4701 return -EINVAL; 4702 } 4703 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4704 netdev_info(dev, "hardware does not support tx pause frames\n"); 4705 return -EINVAL; 4706 } 4707 4708 netif_carrier_off(dev); 4709 if (netif_running(dev)) { 4710 nv_disable_irq(dev); 4711 netif_tx_lock_bh(dev); 4712 netif_addr_lock(dev); 4713 spin_lock(&np->lock); 4714 /* stop engines */ 4715 nv_stop_rxtx(dev); 4716 spin_unlock(&np->lock); 4717 netif_addr_unlock(dev); 4718 netif_tx_unlock_bh(dev); 4719 } 4720 4721 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4722 if (pause->rx_pause) 4723 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4724 if (pause->tx_pause) 4725 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4726 4727 if (np->autoneg && pause->autoneg) { 4728 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4729 4730 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4731 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4732 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ 4733 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4734 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4735 adv |= ADVERTISE_PAUSE_ASYM; 4736 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4737 4738 if (netif_running(dev)) 4739 netdev_info(dev, "link down\n"); 4740 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4741 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4742 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4743 } else { 4744 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4745 if (pause->rx_pause) 4746 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4747 if (pause->tx_pause) 4748 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4749 4750 if (!netif_running(dev)) 4751 nv_update_linkspeed(dev); 4752 else 4753 nv_update_pause(dev, np->pause_flags); 4754 } 4755 4756 if (netif_running(dev)) { 4757 nv_start_rxtx(dev); 4758 nv_enable_irq(dev); 4759 } 4760 return 0; 4761 } 4762 4763 static int nv_set_loopback(struct net_device *dev, netdev_features_t features) 4764 { 4765 struct fe_priv *np = netdev_priv(dev); 4766 unsigned long flags; 4767 u32 miicontrol; 4768 int err, retval = 0; 4769 4770 spin_lock_irqsave(&np->lock, flags); 4771 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4772 if (features & NETIF_F_LOOPBACK) { 4773 if (miicontrol & BMCR_LOOPBACK) { 4774 spin_unlock_irqrestore(&np->lock, flags); 4775 netdev_info(dev, "Loopback already enabled\n"); 4776 return 0; 4777 } 4778 nv_disable_irq(dev); 4779 /* Turn on loopback mode */ 4780 miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; 4781 err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol); 4782 if (err) { 4783 retval = PHY_ERROR; 4784 spin_unlock_irqrestore(&np->lock, flags); 4785 phy_init(dev); 4786 } else { 4787 if (netif_running(dev)) { 4788 /* Force 1000 Mbps full-duplex */ 4789 nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 4790 1); 4791 /* Force link up */ 4792 netif_carrier_on(dev); 4793 } 4794 spin_unlock_irqrestore(&np->lock, flags); 4795 netdev_info(dev, 4796 "Internal PHY loopback mode enabled.\n"); 4797 } 4798 } else { 4799 if (!(miicontrol & BMCR_LOOPBACK)) { 4800 spin_unlock_irqrestore(&np->lock, flags); 4801 netdev_info(dev, "Loopback already disabled\n"); 4802 return 0; 4803 } 4804 nv_disable_irq(dev); 4805 /* Turn off loopback */ 4806 spin_unlock_irqrestore(&np->lock, flags); 4807 netdev_info(dev, "Internal PHY loopback mode disabled.\n"); 4808 phy_init(dev); 4809 } 4810 msleep(500); 4811 spin_lock_irqsave(&np->lock, flags); 4812 nv_enable_irq(dev); 4813 spin_unlock_irqrestore(&np->lock, flags); 4814 4815 return retval; 4816 } 4817 4818 static netdev_features_t nv_fix_features(struct net_device *dev, 4819 netdev_features_t features) 4820 { 4821 /* vlan is dependent on rx checksum offload */ 4822 if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) 4823 features |= NETIF_F_RXCSUM; 4824 4825 return features; 4826 } 4827 4828 static void nv_vlan_mode(struct net_device *dev, netdev_features_t features) 4829 { 4830 struct fe_priv *np = get_nvpriv(dev); 4831 4832 spin_lock_irq(&np->lock); 4833 4834 if (features & NETIF_F_HW_VLAN_CTAG_RX) 4835 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; 4836 else 4837 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 4838 4839 if (features & NETIF_F_HW_VLAN_CTAG_TX) 4840 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; 4841 else 4842 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 4843 4844 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4845 4846 spin_unlock_irq(&np->lock); 4847 } 4848 4849 static int nv_set_features(struct net_device *dev, netdev_features_t features) 4850 { 4851 struct fe_priv *np = netdev_priv(dev); 4852 u8 __iomem *base = get_hwbase(dev); 4853 netdev_features_t changed = dev->features ^ features; 4854 int retval; 4855 4856 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) { 4857 retval = nv_set_loopback(dev, features); 4858 if (retval != 0) 4859 return retval; 4860 } 4861 4862 if (changed & NETIF_F_RXCSUM) { 4863 spin_lock_irq(&np->lock); 4864 4865 if (features & NETIF_F_RXCSUM) 4866 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4867 else 4868 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4869 4870 if (netif_running(dev)) 4871 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4872 4873 spin_unlock_irq(&np->lock); 4874 } 4875 4876 if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)) 4877 nv_vlan_mode(dev, features); 4878 4879 return 0; 4880 } 4881 4882 static int nv_get_sset_count(struct net_device *dev, int sset) 4883 { 4884 struct fe_priv *np = netdev_priv(dev); 4885 4886 switch (sset) { 4887 case ETH_SS_TEST: 4888 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4889 return NV_TEST_COUNT_EXTENDED; 4890 else 4891 return NV_TEST_COUNT_BASE; 4892 case ETH_SS_STATS: 4893 if (np->driver_data & DEV_HAS_STATISTICS_V3) 4894 return NV_DEV_STATISTICS_V3_COUNT; 4895 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4896 return NV_DEV_STATISTICS_V2_COUNT; 4897 else if (np->driver_data & DEV_HAS_STATISTICS_V1) 4898 return NV_DEV_STATISTICS_V1_COUNT; 4899 else 4900 return 0; 4901 default: 4902 return -EOPNOTSUPP; 4903 } 4904 } 4905 4906 static void nv_get_ethtool_stats(struct net_device *dev, 4907 struct ethtool_stats *estats, u64 *buffer) 4908 __acquires(&netdev_priv(dev)->hwstats_lock) 4909 __releases(&netdev_priv(dev)->hwstats_lock) 4910 { 4911 struct fe_priv *np = netdev_priv(dev); 4912 4913 spin_lock_bh(&np->hwstats_lock); 4914 nv_update_stats(dev); 4915 memcpy(buffer, &np->estats, 4916 nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4917 spin_unlock_bh(&np->hwstats_lock); 4918 } 4919 4920 static int nv_link_test(struct net_device *dev) 4921 { 4922 struct fe_priv *np = netdev_priv(dev); 4923 int mii_status; 4924 4925 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4926 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4927 4928 /* check phy link status */ 4929 if (!(mii_status & BMSR_LSTATUS)) 4930 return 0; 4931 else 4932 return 1; 4933 } 4934 4935 static int nv_register_test(struct net_device *dev) 4936 { 4937 u8 __iomem *base = get_hwbase(dev); 4938 int i = 0; 4939 u32 orig_read, new_read; 4940 4941 do { 4942 orig_read = readl(base + nv_registers_test[i].reg); 4943 4944 /* xor with mask to toggle bits */ 4945 orig_read ^= nv_registers_test[i].mask; 4946 4947 writel(orig_read, base + nv_registers_test[i].reg); 4948 4949 new_read = readl(base + nv_registers_test[i].reg); 4950 4951 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4952 return 0; 4953 4954 /* restore original value */ 4955 orig_read ^= nv_registers_test[i].mask; 4956 writel(orig_read, base + nv_registers_test[i].reg); 4957 4958 } while (nv_registers_test[++i].reg != 0); 4959 4960 return 1; 4961 } 4962 4963 static int nv_interrupt_test(struct net_device *dev) 4964 { 4965 struct fe_priv *np = netdev_priv(dev); 4966 u8 __iomem *base = get_hwbase(dev); 4967 int ret = 1; 4968 int testcnt; 4969 u32 save_msi_flags, save_poll_interval = 0; 4970 4971 if (netif_running(dev)) { 4972 /* free current irq */ 4973 nv_free_irq(dev); 4974 save_poll_interval = readl(base+NvRegPollingInterval); 4975 } 4976 4977 /* flag to test interrupt handler */ 4978 np->intr_test = 0; 4979 4980 /* setup test irq */ 4981 save_msi_flags = np->msi_flags; 4982 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4983 np->msi_flags |= 0x001; /* setup 1 vector */ 4984 if (nv_request_irq(dev, 1)) 4985 return 0; 4986 4987 /* setup timer interrupt */ 4988 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4989 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4990 4991 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4992 4993 /* wait for at least one interrupt */ 4994 msleep(100); 4995 4996 spin_lock_irq(&np->lock); 4997 4998 /* flag should be set within ISR */ 4999 testcnt = np->intr_test; 5000 if (!testcnt) 5001 ret = 2; 5002 5003 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 5004 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 5005 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5006 else 5007 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 5008 5009 spin_unlock_irq(&np->lock); 5010 5011 nv_free_irq(dev); 5012 5013 np->msi_flags = save_msi_flags; 5014 5015 if (netif_running(dev)) { 5016 writel(save_poll_interval, base + NvRegPollingInterval); 5017 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5018 /* restore original irq */ 5019 if (nv_request_irq(dev, 0)) 5020 return 0; 5021 } 5022 5023 return ret; 5024 } 5025 5026 static int nv_loopback_test(struct net_device *dev) 5027 { 5028 struct fe_priv *np = netdev_priv(dev); 5029 u8 __iomem *base = get_hwbase(dev); 5030 struct sk_buff *tx_skb, *rx_skb; 5031 dma_addr_t test_dma_addr; 5032 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 5033 u32 flags; 5034 int len, i, pkt_len; 5035 u8 *pkt_data; 5036 u32 filter_flags = 0; 5037 u32 misc1_flags = 0; 5038 int ret = 1; 5039 5040 if (netif_running(dev)) { 5041 nv_disable_irq(dev); 5042 filter_flags = readl(base + NvRegPacketFilterFlags); 5043 misc1_flags = readl(base + NvRegMisc1); 5044 } else { 5045 nv_txrx_reset(dev); 5046 } 5047 5048 /* reinit driver view of the rx queue */ 5049 set_bufsize(dev); 5050 nv_init_ring(dev); 5051 5052 /* setup hardware for loopback */ 5053 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 5054 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 5055 5056 /* reinit nic view of the rx queue */ 5057 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5058 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5059 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5060 base + NvRegRingSizes); 5061 pci_push(base); 5062 5063 /* restart rx engine */ 5064 nv_start_rxtx(dev); 5065 5066 /* setup packet for tx */ 5067 pkt_len = ETH_DATA_LEN; 5068 tx_skb = netdev_alloc_skb(dev, pkt_len); 5069 if (!tx_skb) { 5070 ret = 0; 5071 goto out; 5072 } 5073 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 5074 skb_tailroom(tx_skb), 5075 PCI_DMA_FROMDEVICE); 5076 if (pci_dma_mapping_error(np->pci_dev, 5077 test_dma_addr)) { 5078 dev_kfree_skb_any(tx_skb); 5079 goto out; 5080 } 5081 pkt_data = skb_put(tx_skb, pkt_len); 5082 for (i = 0; i < pkt_len; i++) 5083 pkt_data[i] = (u8)(i & 0xff); 5084 5085 if (!nv_optimized(np)) { 5086 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 5087 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 5088 } else { 5089 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 5090 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 5091 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 5092 } 5093 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5094 pci_push(get_hwbase(dev)); 5095 5096 msleep(500); 5097 5098 /* check for rx of the packet */ 5099 if (!nv_optimized(np)) { 5100 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 5101 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 5102 5103 } else { 5104 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 5105 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 5106 } 5107 5108 if (flags & NV_RX_AVAIL) { 5109 ret = 0; 5110 } else if (np->desc_ver == DESC_VER_1) { 5111 if (flags & NV_RX_ERROR) 5112 ret = 0; 5113 } else { 5114 if (flags & NV_RX2_ERROR) 5115 ret = 0; 5116 } 5117 5118 if (ret) { 5119 if (len != pkt_len) { 5120 ret = 0; 5121 } else { 5122 rx_skb = np->rx_skb[0].skb; 5123 for (i = 0; i < pkt_len; i++) { 5124 if (rx_skb->data[i] != (u8)(i & 0xff)) { 5125 ret = 0; 5126 break; 5127 } 5128 } 5129 } 5130 } 5131 5132 pci_unmap_single(np->pci_dev, test_dma_addr, 5133 (skb_end_pointer(tx_skb) - tx_skb->data), 5134 PCI_DMA_TODEVICE); 5135 dev_kfree_skb_any(tx_skb); 5136 out: 5137 /* stop engines */ 5138 nv_stop_rxtx(dev); 5139 nv_txrx_reset(dev); 5140 /* drain rx queue */ 5141 nv_drain_rxtx(dev); 5142 5143 if (netif_running(dev)) { 5144 writel(misc1_flags, base + NvRegMisc1); 5145 writel(filter_flags, base + NvRegPacketFilterFlags); 5146 nv_enable_irq(dev); 5147 } 5148 5149 return ret; 5150 } 5151 5152 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 5153 { 5154 struct fe_priv *np = netdev_priv(dev); 5155 u8 __iomem *base = get_hwbase(dev); 5156 int result, count; 5157 5158 count = nv_get_sset_count(dev, ETH_SS_TEST); 5159 memset(buffer, 0, count * sizeof(u64)); 5160 5161 if (!nv_link_test(dev)) { 5162 test->flags |= ETH_TEST_FL_FAILED; 5163 buffer[0] = 1; 5164 } 5165 5166 if (test->flags & ETH_TEST_FL_OFFLINE) { 5167 if (netif_running(dev)) { 5168 netif_stop_queue(dev); 5169 nv_napi_disable(dev); 5170 netif_tx_lock_bh(dev); 5171 netif_addr_lock(dev); 5172 spin_lock_irq(&np->lock); 5173 nv_disable_hw_interrupts(dev, np->irqmask); 5174 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 5175 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5176 else 5177 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 5178 /* stop engines */ 5179 nv_stop_rxtx(dev); 5180 nv_txrx_reset(dev); 5181 /* drain rx queue */ 5182 nv_drain_rxtx(dev); 5183 spin_unlock_irq(&np->lock); 5184 netif_addr_unlock(dev); 5185 netif_tx_unlock_bh(dev); 5186 } 5187 5188 if (!nv_register_test(dev)) { 5189 test->flags |= ETH_TEST_FL_FAILED; 5190 buffer[1] = 1; 5191 } 5192 5193 result = nv_interrupt_test(dev); 5194 if (result != 1) { 5195 test->flags |= ETH_TEST_FL_FAILED; 5196 buffer[2] = 1; 5197 } 5198 if (result == 0) { 5199 /* bail out */ 5200 return; 5201 } 5202 5203 if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { 5204 test->flags |= ETH_TEST_FL_FAILED; 5205 buffer[3] = 1; 5206 } 5207 5208 if (netif_running(dev)) { 5209 /* reinit driver view of the rx queue */ 5210 set_bufsize(dev); 5211 if (nv_init_ring(dev)) { 5212 if (!np->in_shutdown) 5213 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5214 } 5215 /* reinit nic view of the rx queue */ 5216 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5217 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5218 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5219 base + NvRegRingSizes); 5220 pci_push(base); 5221 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5222 pci_push(base); 5223 /* restart rx engine */ 5224 nv_start_rxtx(dev); 5225 netif_start_queue(dev); 5226 nv_napi_enable(dev); 5227 nv_enable_hw_interrupts(dev, np->irqmask); 5228 } 5229 } 5230 } 5231 5232 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 5233 { 5234 switch (stringset) { 5235 case ETH_SS_STATS: 5236 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 5237 break; 5238 case ETH_SS_TEST: 5239 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 5240 break; 5241 } 5242 } 5243 5244 static const struct ethtool_ops ops = { 5245 .get_drvinfo = nv_get_drvinfo, 5246 .get_link = ethtool_op_get_link, 5247 .get_wol = nv_get_wol, 5248 .set_wol = nv_set_wol, 5249 .get_regs_len = nv_get_regs_len, 5250 .get_regs = nv_get_regs, 5251 .nway_reset = nv_nway_reset, 5252 .get_ringparam = nv_get_ringparam, 5253 .set_ringparam = nv_set_ringparam, 5254 .get_pauseparam = nv_get_pauseparam, 5255 .set_pauseparam = nv_set_pauseparam, 5256 .get_strings = nv_get_strings, 5257 .get_ethtool_stats = nv_get_ethtool_stats, 5258 .get_sset_count = nv_get_sset_count, 5259 .self_test = nv_self_test, 5260 .get_ts_info = ethtool_op_get_ts_info, 5261 .get_link_ksettings = nv_get_link_ksettings, 5262 .set_link_ksettings = nv_set_link_ksettings, 5263 }; 5264 5265 /* The mgmt unit and driver use a semaphore to access the phy during init */ 5266 static int nv_mgmt_acquire_sema(struct net_device *dev) 5267 { 5268 struct fe_priv *np = netdev_priv(dev); 5269 u8 __iomem *base = get_hwbase(dev); 5270 int i; 5271 u32 tx_ctrl, mgmt_sema; 5272 5273 for (i = 0; i < 10; i++) { 5274 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 5275 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 5276 break; 5277 msleep(500); 5278 } 5279 5280 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 5281 return 0; 5282 5283 for (i = 0; i < 2; i++) { 5284 tx_ctrl = readl(base + NvRegTransmitterControl); 5285 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 5286 writel(tx_ctrl, base + NvRegTransmitterControl); 5287 5288 /* verify that semaphore was acquired */ 5289 tx_ctrl = readl(base + NvRegTransmitterControl); 5290 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 5291 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 5292 np->mgmt_sema = 1; 5293 return 1; 5294 } else 5295 udelay(50); 5296 } 5297 5298 return 0; 5299 } 5300 5301 static void nv_mgmt_release_sema(struct net_device *dev) 5302 { 5303 struct fe_priv *np = netdev_priv(dev); 5304 u8 __iomem *base = get_hwbase(dev); 5305 u32 tx_ctrl; 5306 5307 if (np->driver_data & DEV_HAS_MGMT_UNIT) { 5308 if (np->mgmt_sema) { 5309 tx_ctrl = readl(base + NvRegTransmitterControl); 5310 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ; 5311 writel(tx_ctrl, base + NvRegTransmitterControl); 5312 } 5313 } 5314 } 5315 5316 5317 static int nv_mgmt_get_version(struct net_device *dev) 5318 { 5319 struct fe_priv *np = netdev_priv(dev); 5320 u8 __iomem *base = get_hwbase(dev); 5321 u32 data_ready = readl(base + NvRegTransmitterControl); 5322 u32 data_ready2 = 0; 5323 unsigned long start; 5324 int ready = 0; 5325 5326 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion); 5327 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl); 5328 start = jiffies; 5329 while (time_before(jiffies, start + 5*HZ)) { 5330 data_ready2 = readl(base + NvRegTransmitterControl); 5331 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) { 5332 ready = 1; 5333 break; 5334 } 5335 schedule_timeout_uninterruptible(1); 5336 } 5337 5338 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR)) 5339 return 0; 5340 5341 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; 5342 5343 return 1; 5344 } 5345 5346 static int nv_open(struct net_device *dev) 5347 { 5348 struct fe_priv *np = netdev_priv(dev); 5349 u8 __iomem *base = get_hwbase(dev); 5350 int ret = 1; 5351 int oom, i; 5352 u32 low; 5353 5354 /* power up phy */ 5355 mii_rw(dev, np->phyaddr, MII_BMCR, 5356 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); 5357 5358 nv_txrx_gate(dev, false); 5359 /* erase previous misconfiguration */ 5360 if (np->driver_data & DEV_HAS_POWER_CNTRL) 5361 nv_mac_reset(dev); 5362 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5363 writel(0, base + NvRegMulticastAddrB); 5364 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5365 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5366 writel(0, base + NvRegPacketFilterFlags); 5367 5368 writel(0, base + NvRegTransmitterControl); 5369 writel(0, base + NvRegReceiverControl); 5370 5371 writel(0, base + NvRegAdapterControl); 5372 5373 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 5374 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 5375 5376 /* initialize descriptor rings */ 5377 set_bufsize(dev); 5378 oom = nv_init_ring(dev); 5379 5380 writel(0, base + NvRegLinkSpeed); 5381 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5382 nv_txrx_reset(dev); 5383 writel(0, base + NvRegUnknownSetupReg6); 5384 5385 np->in_shutdown = 0; 5386 5387 /* give hw rings */ 5388 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5389 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5390 base + NvRegRingSizes); 5391 5392 writel(np->linkspeed, base + NvRegLinkSpeed); 5393 if (np->desc_ver == DESC_VER_1) 5394 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 5395 else 5396 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 5397 writel(np->txrxctl_bits, base + NvRegTxRxControl); 5398 writel(np->vlanctl_bits, base + NvRegVlanControl); 5399 pci_push(base); 5400 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5401 if (reg_delay(dev, NvRegUnknownSetupReg5, 5402 NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5403 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX)) 5404 netdev_info(dev, 5405 "%s: SetupReg5, Bit 31 remained off\n", __func__); 5406 5407 writel(0, base + NvRegMIIMask); 5408 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5409 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5410 5411 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 5412 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 5413 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 5414 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5415 5416 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 5417 5418 get_random_bytes(&low, sizeof(low)); 5419 low &= NVREG_SLOTTIME_MASK; 5420 if (np->desc_ver == DESC_VER_1) { 5421 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); 5422 } else { 5423 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { 5424 /* setup legacy backoff */ 5425 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); 5426 } else { 5427 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); 5428 nv_gear_backoff_reseed(dev); 5429 } 5430 } 5431 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 5432 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 5433 if (poll_interval == -1) { 5434 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 5435 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5436 else 5437 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5438 } else 5439 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5440 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5441 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5442 base + NvRegAdapterControl); 5443 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 5444 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 5445 if (np->wolenabled) 5446 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5447 5448 i = readl(base + NvRegPowerState); 5449 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0) 5450 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5451 5452 pci_push(base); 5453 udelay(10); 5454 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 5455 5456 nv_disable_hw_interrupts(dev, np->irqmask); 5457 pci_push(base); 5458 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5459 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5460 pci_push(base); 5461 5462 if (nv_request_irq(dev, 0)) 5463 goto out_drain; 5464 5465 /* ask for interrupts */ 5466 nv_enable_hw_interrupts(dev, np->irqmask); 5467 5468 spin_lock_irq(&np->lock); 5469 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5470 writel(0, base + NvRegMulticastAddrB); 5471 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5472 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5473 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5474 /* One manual link speed update: Interrupts are enabled, future link 5475 * speed changes cause interrupts and are handled by nv_link_irq(). 5476 */ 5477 { 5478 u32 miistat; 5479 miistat = readl(base + NvRegMIIStatus); 5480 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5481 } 5482 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5483 * to init hw */ 5484 np->linkspeed = 0; 5485 ret = nv_update_linkspeed(dev); 5486 nv_start_rxtx(dev); 5487 netif_start_queue(dev); 5488 nv_napi_enable(dev); 5489 5490 if (ret) { 5491 netif_carrier_on(dev); 5492 } else { 5493 netdev_info(dev, "no link during initialization\n"); 5494 netif_carrier_off(dev); 5495 } 5496 if (oom) 5497 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5498 5499 /* start statistics timer */ 5500 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5501 mod_timer(&np->stats_poll, 5502 round_jiffies(jiffies + STATS_INTERVAL)); 5503 5504 spin_unlock_irq(&np->lock); 5505 5506 /* If the loopback feature was set while the device was down, make sure 5507 * that it's set correctly now. 5508 */ 5509 if (dev->features & NETIF_F_LOOPBACK) 5510 nv_set_loopback(dev, dev->features); 5511 5512 return 0; 5513 out_drain: 5514 nv_drain_rxtx(dev); 5515 return ret; 5516 } 5517 5518 static int nv_close(struct net_device *dev) 5519 { 5520 struct fe_priv *np = netdev_priv(dev); 5521 u8 __iomem *base; 5522 5523 spin_lock_irq(&np->lock); 5524 np->in_shutdown = 1; 5525 spin_unlock_irq(&np->lock); 5526 nv_napi_disable(dev); 5527 synchronize_irq(np->pci_dev->irq); 5528 5529 del_timer_sync(&np->oom_kick); 5530 del_timer_sync(&np->nic_poll); 5531 del_timer_sync(&np->stats_poll); 5532 5533 netif_stop_queue(dev); 5534 spin_lock_irq(&np->lock); 5535 nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */ 5536 nv_stop_rxtx(dev); 5537 nv_txrx_reset(dev); 5538 5539 /* disable interrupts on the nic or we will lock up */ 5540 base = get_hwbase(dev); 5541 nv_disable_hw_interrupts(dev, np->irqmask); 5542 pci_push(base); 5543 5544 spin_unlock_irq(&np->lock); 5545 5546 nv_free_irq(dev); 5547 5548 nv_drain_rxtx(dev); 5549 5550 if (np->wolenabled || !phy_power_down) { 5551 nv_txrx_gate(dev, false); 5552 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5553 nv_start_rx(dev); 5554 } else { 5555 /* power down phy */ 5556 mii_rw(dev, np->phyaddr, MII_BMCR, 5557 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); 5558 nv_txrx_gate(dev, true); 5559 } 5560 5561 /* FIXME: power down nic */ 5562 5563 return 0; 5564 } 5565 5566 static const struct net_device_ops nv_netdev_ops = { 5567 .ndo_open = nv_open, 5568 .ndo_stop = nv_close, 5569 .ndo_get_stats64 = nv_get_stats64, 5570 .ndo_start_xmit = nv_start_xmit, 5571 .ndo_tx_timeout = nv_tx_timeout, 5572 .ndo_change_mtu = nv_change_mtu, 5573 .ndo_fix_features = nv_fix_features, 5574 .ndo_set_features = nv_set_features, 5575 .ndo_validate_addr = eth_validate_addr, 5576 .ndo_set_mac_address = nv_set_mac_address, 5577 .ndo_set_rx_mode = nv_set_multicast, 5578 #ifdef CONFIG_NET_POLL_CONTROLLER 5579 .ndo_poll_controller = nv_poll_controller, 5580 #endif 5581 }; 5582 5583 static const struct net_device_ops nv_netdev_ops_optimized = { 5584 .ndo_open = nv_open, 5585 .ndo_stop = nv_close, 5586 .ndo_get_stats64 = nv_get_stats64, 5587 .ndo_start_xmit = nv_start_xmit_optimized, 5588 .ndo_tx_timeout = nv_tx_timeout, 5589 .ndo_change_mtu = nv_change_mtu, 5590 .ndo_fix_features = nv_fix_features, 5591 .ndo_set_features = nv_set_features, 5592 .ndo_validate_addr = eth_validate_addr, 5593 .ndo_set_mac_address = nv_set_mac_address, 5594 .ndo_set_rx_mode = nv_set_multicast, 5595 #ifdef CONFIG_NET_POLL_CONTROLLER 5596 .ndo_poll_controller = nv_poll_controller, 5597 #endif 5598 }; 5599 5600 static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5601 { 5602 struct net_device *dev; 5603 struct fe_priv *np; 5604 unsigned long addr; 5605 u8 __iomem *base; 5606 int err, i; 5607 u32 powerstate, txreg; 5608 u32 phystate_orig = 0, phystate; 5609 int phyinitialized = 0; 5610 static int printed_version; 5611 5612 if (!printed_version++) 5613 pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n", 5614 FORCEDETH_VERSION); 5615 5616 dev = alloc_etherdev(sizeof(struct fe_priv)); 5617 err = -ENOMEM; 5618 if (!dev) 5619 goto out; 5620 5621 np = netdev_priv(dev); 5622 np->dev = dev; 5623 np->pci_dev = pci_dev; 5624 spin_lock_init(&np->lock); 5625 spin_lock_init(&np->hwstats_lock); 5626 SET_NETDEV_DEV(dev, &pci_dev->dev); 5627 u64_stats_init(&np->swstats_rx_syncp); 5628 u64_stats_init(&np->swstats_tx_syncp); 5629 5630 setup_timer(&np->oom_kick, nv_do_rx_refill, (unsigned long)dev); 5631 setup_timer(&np->nic_poll, nv_do_nic_poll, (unsigned long)dev); 5632 init_timer_deferrable(&np->stats_poll); 5633 np->stats_poll.data = (unsigned long) dev; 5634 np->stats_poll.function = nv_do_stats_poll; /* timer handler */ 5635 5636 err = pci_enable_device(pci_dev); 5637 if (err) 5638 goto out_free; 5639 5640 pci_set_master(pci_dev); 5641 5642 err = pci_request_regions(pci_dev, DRV_NAME); 5643 if (err < 0) 5644 goto out_disable; 5645 5646 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5647 np->register_size = NV_PCI_REGSZ_VER3; 5648 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5649 np->register_size = NV_PCI_REGSZ_VER2; 5650 else 5651 np->register_size = NV_PCI_REGSZ_VER1; 5652 5653 err = -EINVAL; 5654 addr = 0; 5655 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5656 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5657 pci_resource_len(pci_dev, i) >= np->register_size) { 5658 addr = pci_resource_start(pci_dev, i); 5659 break; 5660 } 5661 } 5662 if (i == DEVICE_COUNT_RESOURCE) { 5663 dev_info(&pci_dev->dev, "Couldn't find register window\n"); 5664 goto out_relreg; 5665 } 5666 5667 /* copy of driver data */ 5668 np->driver_data = id->driver_data; 5669 /* copy of device id */ 5670 np->device_id = id->device; 5671 5672 /* handle different descriptor versions */ 5673 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5674 /* packet format 3: supports 40-bit addressing */ 5675 np->desc_ver = DESC_VER_3; 5676 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5677 if (dma_64bit) { 5678 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) 5679 dev_info(&pci_dev->dev, 5680 "64-bit DMA failed, using 32-bit addressing\n"); 5681 else 5682 dev->features |= NETIF_F_HIGHDMA; 5683 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { 5684 dev_info(&pci_dev->dev, 5685 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5686 } 5687 } 5688 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5689 /* packet format 2: supports jumbo frames */ 5690 np->desc_ver = DESC_VER_2; 5691 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5692 } else { 5693 /* original packet format */ 5694 np->desc_ver = DESC_VER_1; 5695 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5696 } 5697 5698 np->pkt_limit = NV_PKTLIMIT_1; 5699 if (id->driver_data & DEV_HAS_LARGEDESC) 5700 np->pkt_limit = NV_PKTLIMIT_2; 5701 5702 if (id->driver_data & DEV_HAS_CHECKSUM) { 5703 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5704 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | 5705 NETIF_F_TSO | NETIF_F_RXCSUM; 5706 } 5707 5708 np->vlanctl_bits = 0; 5709 if (id->driver_data & DEV_HAS_VLAN) { 5710 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5711 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | 5712 NETIF_F_HW_VLAN_CTAG_TX; 5713 } 5714 5715 dev->features |= dev->hw_features; 5716 5717 /* Add loopback capability to the device. */ 5718 dev->hw_features |= NETIF_F_LOOPBACK; 5719 5720 /* MTU range: 64 - 1500 or 9100 */ 5721 dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; 5722 dev->max_mtu = np->pkt_limit; 5723 5724 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5725 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5726 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5727 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5728 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5729 } 5730 5731 err = -ENOMEM; 5732 np->base = ioremap(addr, np->register_size); 5733 if (!np->base) 5734 goto out_relreg; 5735 5736 np->rx_ring_size = RX_RING_DEFAULT; 5737 np->tx_ring_size = TX_RING_DEFAULT; 5738 5739 if (!nv_optimized(np)) { 5740 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5741 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5742 &np->ring_addr); 5743 if (!np->rx_ring.orig) 5744 goto out_unmap; 5745 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5746 } else { 5747 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 5748 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 5749 &np->ring_addr); 5750 if (!np->rx_ring.ex) 5751 goto out_unmap; 5752 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5753 } 5754 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5755 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5756 if (!np->rx_skb || !np->tx_skb) 5757 goto out_freering; 5758 5759 if (!nv_optimized(np)) 5760 dev->netdev_ops = &nv_netdev_ops; 5761 else 5762 dev->netdev_ops = &nv_netdev_ops_optimized; 5763 5764 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5765 dev->ethtool_ops = &ops; 5766 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5767 5768 pci_set_drvdata(pci_dev, dev); 5769 5770 /* read the mac address */ 5771 base = get_hwbase(dev); 5772 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5773 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5774 5775 /* check the workaround bit for correct mac address order */ 5776 txreg = readl(base + NvRegTransmitPoll); 5777 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { 5778 /* mac address is already in correct order */ 5779 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5780 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5781 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5782 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5783 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5784 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5785 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5786 /* mac address is already in correct order */ 5787 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5788 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5789 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5790 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5791 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5792 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5793 /* 5794 * Set orig mac address back to the reversed version. 5795 * This flag will be cleared during low power transition. 5796 * Therefore, we should always put back the reversed address. 5797 */ 5798 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + 5799 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); 5800 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); 5801 } else { 5802 /* need to reverse mac address to correct order */ 5803 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5804 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5805 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5806 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5807 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5808 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5809 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5810 dev_dbg(&pci_dev->dev, 5811 "%s: set workaround bit for reversed mac addr\n", 5812 __func__); 5813 } 5814 5815 if (!is_valid_ether_addr(dev->dev_addr)) { 5816 /* 5817 * Bad mac address. At least one bios sets the mac address 5818 * to 01:23:45:67:89:ab 5819 */ 5820 dev_err(&pci_dev->dev, 5821 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n", 5822 dev->dev_addr); 5823 eth_hw_addr_random(dev); 5824 dev_err(&pci_dev->dev, 5825 "Using random MAC address: %pM\n", dev->dev_addr); 5826 } 5827 5828 /* set mac address */ 5829 nv_copy_mac_to_hw(dev); 5830 5831 /* disable WOL */ 5832 writel(0, base + NvRegWakeUpFlags); 5833 np->wolenabled = 0; 5834 device_set_wakeup_enable(&pci_dev->dev, false); 5835 5836 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5837 5838 /* take phy and nic out of low power mode */ 5839 powerstate = readl(base + NvRegPowerState2); 5840 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5841 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) && 5842 pci_dev->revision >= 0xA3) 5843 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5844 writel(powerstate, base + NvRegPowerState2); 5845 } 5846 5847 if (np->desc_ver == DESC_VER_1) 5848 np->tx_flags = NV_TX_VALID; 5849 else 5850 np->tx_flags = NV_TX2_VALID; 5851 5852 np->msi_flags = 0; 5853 if ((id->driver_data & DEV_HAS_MSI) && msi) 5854 np->msi_flags |= NV_MSI_CAPABLE; 5855 5856 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5857 /* msix has had reported issues when modifying irqmask 5858 as in the case of napi, therefore, disable for now 5859 */ 5860 #if 0 5861 np->msi_flags |= NV_MSI_X_CAPABLE; 5862 #endif 5863 } 5864 5865 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) { 5866 np->irqmask = NVREG_IRQMASK_CPU; 5867 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5868 np->msi_flags |= 0x0001; 5869 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC && 5870 !(id->driver_data & DEV_NEED_TIMERIRQ)) { 5871 /* start off in throughput mode */ 5872 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5873 /* remove support for msix mode */ 5874 np->msi_flags &= ~NV_MSI_X_CAPABLE; 5875 } else { 5876 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 5877 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5878 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5879 np->msi_flags |= 0x0003; 5880 } 5881 5882 if (id->driver_data & DEV_NEED_TIMERIRQ) 5883 np->irqmask |= NVREG_IRQ_TIMER; 5884 if (id->driver_data & DEV_NEED_LINKTIMER) { 5885 np->need_linktimer = 1; 5886 np->link_timeout = jiffies + LINK_TIMEOUT; 5887 } else { 5888 np->need_linktimer = 0; 5889 } 5890 5891 /* Limit the number of tx's outstanding for hw bug */ 5892 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5893 np->tx_limit = 1; 5894 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && 5895 pci_dev->revision >= 0xA2) 5896 np->tx_limit = 0; 5897 } 5898 5899 /* clear phy state and temporarily halt phy interrupts */ 5900 writel(0, base + NvRegMIIMask); 5901 phystate = readl(base + NvRegAdapterControl); 5902 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5903 phystate_orig = 1; 5904 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5905 writel(phystate, base + NvRegAdapterControl); 5906 } 5907 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5908 5909 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5910 /* management unit running on the mac? */ 5911 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) && 5912 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) && 5913 nv_mgmt_acquire_sema(dev) && 5914 nv_mgmt_get_version(dev)) { 5915 np->mac_in_use = 1; 5916 if (np->mgmt_version > 0) 5917 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5918 /* management unit setup the phy already? */ 5919 if (np->mac_in_use && 5920 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5921 NVREG_XMITCTL_SYNC_PHY_INIT)) { 5922 /* phy is inited by mgmt unit */ 5923 phyinitialized = 1; 5924 } else { 5925 /* we need to init the phy */ 5926 } 5927 } 5928 } 5929 5930 /* find a suitable phy */ 5931 for (i = 1; i <= 32; i++) { 5932 int id1, id2; 5933 int phyaddr = i & 0x1F; 5934 5935 spin_lock_irq(&np->lock); 5936 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5937 spin_unlock_irq(&np->lock); 5938 if (id1 < 0 || id1 == 0xffff) 5939 continue; 5940 spin_lock_irq(&np->lock); 5941 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5942 spin_unlock_irq(&np->lock); 5943 if (id2 < 0 || id2 == 0xffff) 5944 continue; 5945 5946 np->phy_model = id2 & PHYID2_MODEL_MASK; 5947 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5948 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5949 np->phyaddr = phyaddr; 5950 np->phy_oui = id1 | id2; 5951 5952 /* Realtek hardcoded phy id1 to all zero's on certain phys */ 5953 if (np->phy_oui == PHY_OUI_REALTEK2) 5954 np->phy_oui = PHY_OUI_REALTEK; 5955 /* Setup phy revision for Realtek */ 5956 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) 5957 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; 5958 5959 break; 5960 } 5961 if (i == 33) { 5962 dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n"); 5963 goto out_error; 5964 } 5965 5966 if (!phyinitialized) { 5967 /* reset it */ 5968 phy_init(dev); 5969 } else { 5970 /* see if it is a gigabit phy */ 5971 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5972 if (mii_status & PHY_GIGABIT) 5973 np->gigabit = PHY_GIGABIT; 5974 } 5975 5976 /* set default link speed settings */ 5977 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5978 np->duplex = 0; 5979 np->autoneg = 1; 5980 5981 err = register_netdev(dev); 5982 if (err) { 5983 dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err); 5984 goto out_error; 5985 } 5986 5987 netif_carrier_off(dev); 5988 5989 /* Some NICs freeze when TX pause is enabled while NIC is 5990 * down, and this stays across warm reboots. The sequence 5991 * below should be enough to recover from that state. 5992 */ 5993 nv_update_pause(dev, 0); 5994 nv_start_tx(dev); 5995 nv_stop_tx(dev); 5996 5997 if (id->driver_data & DEV_HAS_VLAN) 5998 nv_vlan_mode(dev, dev->features); 5999 6000 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 6001 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 6002 6003 dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 6004 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 6005 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 6006 "csum " : "", 6007 dev->features & (NETIF_F_HW_VLAN_CTAG_RX | 6008 NETIF_F_HW_VLAN_CTAG_TX) ? 6009 "vlan " : "", 6010 dev->features & (NETIF_F_LOOPBACK) ? 6011 "loopback " : "", 6012 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 6013 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 6014 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 6015 np->gigabit == PHY_GIGABIT ? "gbit " : "", 6016 np->need_linktimer ? "lnktim " : "", 6017 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 6018 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 6019 np->desc_ver); 6020 6021 return 0; 6022 6023 out_error: 6024 if (phystate_orig) 6025 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 6026 out_freering: 6027 free_rings(dev); 6028 out_unmap: 6029 iounmap(get_hwbase(dev)); 6030 out_relreg: 6031 pci_release_regions(pci_dev); 6032 out_disable: 6033 pci_disable_device(pci_dev); 6034 out_free: 6035 free_netdev(dev); 6036 out: 6037 return err; 6038 } 6039 6040 static void nv_restore_phy(struct net_device *dev) 6041 { 6042 struct fe_priv *np = netdev_priv(dev); 6043 u16 phy_reserved, mii_control; 6044 6045 if (np->phy_oui == PHY_OUI_REALTEK && 6046 np->phy_model == PHY_MODEL_REALTEK_8201 && 6047 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 6048 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); 6049 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 6050 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 6051 phy_reserved |= PHY_REALTEK_INIT8; 6052 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); 6053 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); 6054 6055 /* restart auto negotiation */ 6056 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 6057 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 6058 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); 6059 } 6060 } 6061 6062 static void nv_restore_mac_addr(struct pci_dev *pci_dev) 6063 { 6064 struct net_device *dev = pci_get_drvdata(pci_dev); 6065 struct fe_priv *np = netdev_priv(dev); 6066 u8 __iomem *base = get_hwbase(dev); 6067 6068 /* special op: write back the misordered MAC address - otherwise 6069 * the next nv_probe would see a wrong address. 6070 */ 6071 writel(np->orig_mac[0], base + NvRegMacAddrA); 6072 writel(np->orig_mac[1], base + NvRegMacAddrB); 6073 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 6074 base + NvRegTransmitPoll); 6075 } 6076 6077 static void nv_remove(struct pci_dev *pci_dev) 6078 { 6079 struct net_device *dev = pci_get_drvdata(pci_dev); 6080 6081 unregister_netdev(dev); 6082 6083 nv_restore_mac_addr(pci_dev); 6084 6085 /* restore any phy related changes */ 6086 nv_restore_phy(dev); 6087 6088 nv_mgmt_release_sema(dev); 6089 6090 /* free all structures */ 6091 free_rings(dev); 6092 iounmap(get_hwbase(dev)); 6093 pci_release_regions(pci_dev); 6094 pci_disable_device(pci_dev); 6095 free_netdev(dev); 6096 } 6097 6098 #ifdef CONFIG_PM_SLEEP 6099 static int nv_suspend(struct device *device) 6100 { 6101 struct pci_dev *pdev = to_pci_dev(device); 6102 struct net_device *dev = pci_get_drvdata(pdev); 6103 struct fe_priv *np = netdev_priv(dev); 6104 u8 __iomem *base = get_hwbase(dev); 6105 int i; 6106 6107 if (netif_running(dev)) { 6108 /* Gross. */ 6109 nv_close(dev); 6110 } 6111 netif_device_detach(dev); 6112 6113 /* save non-pci configuration space */ 6114 for (i = 0; i <= np->register_size/sizeof(u32); i++) 6115 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 6116 6117 return 0; 6118 } 6119 6120 static int nv_resume(struct device *device) 6121 { 6122 struct pci_dev *pdev = to_pci_dev(device); 6123 struct net_device *dev = pci_get_drvdata(pdev); 6124 struct fe_priv *np = netdev_priv(dev); 6125 u8 __iomem *base = get_hwbase(dev); 6126 int i, rc = 0; 6127 6128 /* restore non-pci configuration space */ 6129 for (i = 0; i <= np->register_size/sizeof(u32); i++) 6130 writel(np->saved_config_space[i], base+i*sizeof(u32)); 6131 6132 if (np->driver_data & DEV_NEED_MSI_FIX) 6133 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); 6134 6135 /* restore phy state, including autoneg */ 6136 phy_init(dev); 6137 6138 netif_device_attach(dev); 6139 if (netif_running(dev)) { 6140 rc = nv_open(dev); 6141 nv_set_multicast(dev); 6142 } 6143 return rc; 6144 } 6145 6146 static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); 6147 #define NV_PM_OPS (&nv_pm_ops) 6148 6149 #else 6150 #define NV_PM_OPS NULL 6151 #endif /* CONFIG_PM_SLEEP */ 6152 6153 #ifdef CONFIG_PM 6154 static void nv_shutdown(struct pci_dev *pdev) 6155 { 6156 struct net_device *dev = pci_get_drvdata(pdev); 6157 struct fe_priv *np = netdev_priv(dev); 6158 6159 if (netif_running(dev)) 6160 nv_close(dev); 6161 6162 /* 6163 * Restore the MAC so a kernel started by kexec won't get confused. 6164 * If we really go for poweroff, we must not restore the MAC, 6165 * otherwise the MAC for WOL will be reversed at least on some boards. 6166 */ 6167 if (system_state != SYSTEM_POWER_OFF) 6168 nv_restore_mac_addr(pdev); 6169 6170 pci_disable_device(pdev); 6171 /* 6172 * Apparently it is not possible to reinitialise from D3 hot, 6173 * only put the device into D3 if we really go for poweroff. 6174 */ 6175 if (system_state == SYSTEM_POWER_OFF) { 6176 pci_wake_from_d3(pdev, np->wolenabled); 6177 pci_set_power_state(pdev, PCI_D3hot); 6178 } 6179 } 6180 #else 6181 #define nv_shutdown NULL 6182 #endif /* CONFIG_PM */ 6183 6184 static const struct pci_device_id pci_tbl[] = { 6185 { /* nForce Ethernet Controller */ 6186 PCI_DEVICE(0x10DE, 0x01C3), 6187 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6188 }, 6189 { /* nForce2 Ethernet Controller */ 6190 PCI_DEVICE(0x10DE, 0x0066), 6191 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6192 }, 6193 { /* nForce3 Ethernet Controller */ 6194 PCI_DEVICE(0x10DE, 0x00D6), 6195 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6196 }, 6197 { /* nForce3 Ethernet Controller */ 6198 PCI_DEVICE(0x10DE, 0x0086), 6199 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6200 }, 6201 { /* nForce3 Ethernet Controller */ 6202 PCI_DEVICE(0x10DE, 0x008C), 6203 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6204 }, 6205 { /* nForce3 Ethernet Controller */ 6206 PCI_DEVICE(0x10DE, 0x00E6), 6207 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6208 }, 6209 { /* nForce3 Ethernet Controller */ 6210 PCI_DEVICE(0x10DE, 0x00DF), 6211 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6212 }, 6213 { /* CK804 Ethernet Controller */ 6214 PCI_DEVICE(0x10DE, 0x0056), 6215 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6216 }, 6217 { /* CK804 Ethernet Controller */ 6218 PCI_DEVICE(0x10DE, 0x0057), 6219 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6220 }, 6221 { /* MCP04 Ethernet Controller */ 6222 PCI_DEVICE(0x10DE, 0x0037), 6223 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6224 }, 6225 { /* MCP04 Ethernet Controller */ 6226 PCI_DEVICE(0x10DE, 0x0038), 6227 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6228 }, 6229 { /* MCP51 Ethernet Controller */ 6230 PCI_DEVICE(0x10DE, 0x0268), 6231 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6232 }, 6233 { /* MCP51 Ethernet Controller */ 6234 PCI_DEVICE(0x10DE, 0x0269), 6235 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6236 }, 6237 { /* MCP55 Ethernet Controller */ 6238 PCI_DEVICE(0x10DE, 0x0372), 6239 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6240 }, 6241 { /* MCP55 Ethernet Controller */ 6242 PCI_DEVICE(0x10DE, 0x0373), 6243 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6244 }, 6245 { /* MCP61 Ethernet Controller */ 6246 PCI_DEVICE(0x10DE, 0x03E5), 6247 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6248 }, 6249 { /* MCP61 Ethernet Controller */ 6250 PCI_DEVICE(0x10DE, 0x03E6), 6251 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6252 }, 6253 { /* MCP61 Ethernet Controller */ 6254 PCI_DEVICE(0x10DE, 0x03EE), 6255 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6256 }, 6257 { /* MCP61 Ethernet Controller */ 6258 PCI_DEVICE(0x10DE, 0x03EF), 6259 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6260 }, 6261 { /* MCP65 Ethernet Controller */ 6262 PCI_DEVICE(0x10DE, 0x0450), 6263 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6264 }, 6265 { /* MCP65 Ethernet Controller */ 6266 PCI_DEVICE(0x10DE, 0x0451), 6267 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6268 }, 6269 { /* MCP65 Ethernet Controller */ 6270 PCI_DEVICE(0x10DE, 0x0452), 6271 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6272 }, 6273 { /* MCP65 Ethernet Controller */ 6274 PCI_DEVICE(0x10DE, 0x0453), 6275 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6276 }, 6277 { /* MCP67 Ethernet Controller */ 6278 PCI_DEVICE(0x10DE, 0x054C), 6279 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6280 }, 6281 { /* MCP67 Ethernet Controller */ 6282 PCI_DEVICE(0x10DE, 0x054D), 6283 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6284 }, 6285 { /* MCP67 Ethernet Controller */ 6286 PCI_DEVICE(0x10DE, 0x054E), 6287 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6288 }, 6289 { /* MCP67 Ethernet Controller */ 6290 PCI_DEVICE(0x10DE, 0x054F), 6291 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6292 }, 6293 { /* MCP73 Ethernet Controller */ 6294 PCI_DEVICE(0x10DE, 0x07DC), 6295 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6296 }, 6297 { /* MCP73 Ethernet Controller */ 6298 PCI_DEVICE(0x10DE, 0x07DD), 6299 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6300 }, 6301 { /* MCP73 Ethernet Controller */ 6302 PCI_DEVICE(0x10DE, 0x07DE), 6303 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6304 }, 6305 { /* MCP73 Ethernet Controller */ 6306 PCI_DEVICE(0x10DE, 0x07DF), 6307 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6308 }, 6309 { /* MCP77 Ethernet Controller */ 6310 PCI_DEVICE(0x10DE, 0x0760), 6311 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6312 }, 6313 { /* MCP77 Ethernet Controller */ 6314 PCI_DEVICE(0x10DE, 0x0761), 6315 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6316 }, 6317 { /* MCP77 Ethernet Controller */ 6318 PCI_DEVICE(0x10DE, 0x0762), 6319 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6320 }, 6321 { /* MCP77 Ethernet Controller */ 6322 PCI_DEVICE(0x10DE, 0x0763), 6323 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6324 }, 6325 { /* MCP79 Ethernet Controller */ 6326 PCI_DEVICE(0x10DE, 0x0AB0), 6327 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6328 }, 6329 { /* MCP79 Ethernet Controller */ 6330 PCI_DEVICE(0x10DE, 0x0AB1), 6331 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6332 }, 6333 { /* MCP79 Ethernet Controller */ 6334 PCI_DEVICE(0x10DE, 0x0AB2), 6335 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6336 }, 6337 { /* MCP79 Ethernet Controller */ 6338 PCI_DEVICE(0x10DE, 0x0AB3), 6339 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6340 }, 6341 { /* MCP89 Ethernet Controller */ 6342 PCI_DEVICE(0x10DE, 0x0D7D), 6343 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, 6344 }, 6345 {0,}, 6346 }; 6347 6348 static struct pci_driver forcedeth_pci_driver = { 6349 .name = DRV_NAME, 6350 .id_table = pci_tbl, 6351 .probe = nv_probe, 6352 .remove = nv_remove, 6353 .shutdown = nv_shutdown, 6354 .driver.pm = NV_PM_OPS, 6355 }; 6356 6357 module_param(max_interrupt_work, int, 0); 6358 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6359 module_param(optimization_mode, int, 0); 6360 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load."); 6361 module_param(poll_interval, int, 0); 6362 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 6363 module_param(msi, int, 0); 6364 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 6365 module_param(msix, int, 0); 6366 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 6367 module_param(dma_64bit, int, 0); 6368 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6369 module_param(phy_cross, int, 0); 6370 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6371 module_param(phy_power_down, int, 0); 6372 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); 6373 module_param(debug_tx_timeout, bool, 0); 6374 MODULE_PARM_DESC(debug_tx_timeout, 6375 "Dump tx related registers and ring when tx_timeout happens"); 6376 6377 module_pci_driver(forcedeth_pci_driver); 6378 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6379 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6380 MODULE_LICENSE("GPL"); 6381 MODULE_DEVICE_TABLE(pci, pci_tbl); 6382