1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/pci.h> 14 #include <linux/module.h> 15 #include <linux/seq_file.h> 16 #include <linux/i2c.h> 17 #include <linux/mii.h> 18 #include <linux/slab.h> 19 #include "net_driver.h" 20 #include "bitfield.h" 21 #include "efx.h" 22 #include "nic.h" 23 #include "farch_regs.h" 24 #include "io.h" 25 #include "phy.h" 26 #include "workarounds.h" 27 #include "selftest.h" 28 #include "mdio_10g.h" 29 30 /* Hardware control for SFC4000 (aka Falcon). */ 31 32 /************************************************************************** 33 * 34 * NIC stats 35 * 36 ************************************************************************** 37 */ 38 39 #define FALCON_MAC_STATS_SIZE 0x100 40 41 #define XgRxOctets_offset 0x0 42 #define XgRxOctets_WIDTH 48 43 #define XgRxOctetsOK_offset 0x8 44 #define XgRxOctetsOK_WIDTH 48 45 #define XgRxPkts_offset 0x10 46 #define XgRxPkts_WIDTH 32 47 #define XgRxPktsOK_offset 0x14 48 #define XgRxPktsOK_WIDTH 32 49 #define XgRxBroadcastPkts_offset 0x18 50 #define XgRxBroadcastPkts_WIDTH 32 51 #define XgRxMulticastPkts_offset 0x1C 52 #define XgRxMulticastPkts_WIDTH 32 53 #define XgRxUnicastPkts_offset 0x20 54 #define XgRxUnicastPkts_WIDTH 32 55 #define XgRxUndersizePkts_offset 0x24 56 #define XgRxUndersizePkts_WIDTH 32 57 #define XgRxOversizePkts_offset 0x28 58 #define XgRxOversizePkts_WIDTH 32 59 #define XgRxJabberPkts_offset 0x2C 60 #define XgRxJabberPkts_WIDTH 32 61 #define XgRxUndersizeFCSerrorPkts_offset 0x30 62 #define XgRxUndersizeFCSerrorPkts_WIDTH 32 63 #define XgRxDropEvents_offset 0x34 64 #define XgRxDropEvents_WIDTH 32 65 #define XgRxFCSerrorPkts_offset 0x38 66 #define XgRxFCSerrorPkts_WIDTH 32 67 #define XgRxAlignError_offset 0x3C 68 #define XgRxAlignError_WIDTH 32 69 #define XgRxSymbolError_offset 0x40 70 #define XgRxSymbolError_WIDTH 32 71 #define XgRxInternalMACError_offset 0x44 72 #define XgRxInternalMACError_WIDTH 32 73 #define XgRxControlPkts_offset 0x48 74 #define XgRxControlPkts_WIDTH 32 75 #define XgRxPausePkts_offset 0x4C 76 #define XgRxPausePkts_WIDTH 32 77 #define XgRxPkts64Octets_offset 0x50 78 #define XgRxPkts64Octets_WIDTH 32 79 #define XgRxPkts65to127Octets_offset 0x54 80 #define XgRxPkts65to127Octets_WIDTH 32 81 #define XgRxPkts128to255Octets_offset 0x58 82 #define XgRxPkts128to255Octets_WIDTH 32 83 #define XgRxPkts256to511Octets_offset 0x5C 84 #define XgRxPkts256to511Octets_WIDTH 32 85 #define XgRxPkts512to1023Octets_offset 0x60 86 #define XgRxPkts512to1023Octets_WIDTH 32 87 #define XgRxPkts1024to15xxOctets_offset 0x64 88 #define XgRxPkts1024to15xxOctets_WIDTH 32 89 #define XgRxPkts15xxtoMaxOctets_offset 0x68 90 #define XgRxPkts15xxtoMaxOctets_WIDTH 32 91 #define XgRxLengthError_offset 0x6C 92 #define XgRxLengthError_WIDTH 32 93 #define XgTxPkts_offset 0x80 94 #define XgTxPkts_WIDTH 32 95 #define XgTxOctets_offset 0x88 96 #define XgTxOctets_WIDTH 48 97 #define XgTxMulticastPkts_offset 0x90 98 #define XgTxMulticastPkts_WIDTH 32 99 #define XgTxBroadcastPkts_offset 0x94 100 #define XgTxBroadcastPkts_WIDTH 32 101 #define XgTxUnicastPkts_offset 0x98 102 #define XgTxUnicastPkts_WIDTH 32 103 #define XgTxControlPkts_offset 0x9C 104 #define XgTxControlPkts_WIDTH 32 105 #define XgTxPausePkts_offset 0xA0 106 #define XgTxPausePkts_WIDTH 32 107 #define XgTxPkts64Octets_offset 0xA4 108 #define XgTxPkts64Octets_WIDTH 32 109 #define XgTxPkts65to127Octets_offset 0xA8 110 #define XgTxPkts65to127Octets_WIDTH 32 111 #define XgTxPkts128to255Octets_offset 0xAC 112 #define XgTxPkts128to255Octets_WIDTH 32 113 #define XgTxPkts256to511Octets_offset 0xB0 114 #define XgTxPkts256to511Octets_WIDTH 32 115 #define XgTxPkts512to1023Octets_offset 0xB4 116 #define XgTxPkts512to1023Octets_WIDTH 32 117 #define XgTxPkts1024to15xxOctets_offset 0xB8 118 #define XgTxPkts1024to15xxOctets_WIDTH 32 119 #define XgTxPkts1519toMaxOctets_offset 0xBC 120 #define XgTxPkts1519toMaxOctets_WIDTH 32 121 #define XgTxUndersizePkts_offset 0xC0 122 #define XgTxUndersizePkts_WIDTH 32 123 #define XgTxOversizePkts_offset 0xC4 124 #define XgTxOversizePkts_WIDTH 32 125 #define XgTxNonTcpUdpPkt_offset 0xC8 126 #define XgTxNonTcpUdpPkt_WIDTH 16 127 #define XgTxMacSrcErrPkt_offset 0xCC 128 #define XgTxMacSrcErrPkt_WIDTH 16 129 #define XgTxIpSrcErrPkt_offset 0xD0 130 #define XgTxIpSrcErrPkt_WIDTH 16 131 #define XgDmaDone_offset 0xD4 132 #define XgDmaDone_WIDTH 32 133 134 #define FALCON_XMAC_STATS_DMA_FLAG(efx) \ 135 (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset)) 136 137 #define FALCON_DMA_STAT(ext_name, hw_name) \ 138 [FALCON_STAT_ ## ext_name] = \ 139 { #ext_name, \ 140 /* 48-bit stats are zero-padded to 64 on DMA */ \ 141 hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \ 142 hw_name ## _ ## offset } 143 #define FALCON_OTHER_STAT(ext_name) \ 144 [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 } 145 #define GENERIC_SW_STAT(ext_name) \ 146 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } 147 148 static const struct ef4_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = { 149 FALCON_DMA_STAT(tx_bytes, XgTxOctets), 150 FALCON_DMA_STAT(tx_packets, XgTxPkts), 151 FALCON_DMA_STAT(tx_pause, XgTxPausePkts), 152 FALCON_DMA_STAT(tx_control, XgTxControlPkts), 153 FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts), 154 FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts), 155 FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts), 156 FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts), 157 FALCON_DMA_STAT(tx_64, XgTxPkts64Octets), 158 FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets), 159 FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets), 160 FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets), 161 FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets), 162 FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets), 163 FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets), 164 FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts), 165 FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt), 166 FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt), 167 FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt), 168 FALCON_DMA_STAT(rx_bytes, XgRxOctets), 169 FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK), 170 FALCON_OTHER_STAT(rx_bad_bytes), 171 FALCON_DMA_STAT(rx_packets, XgRxPkts), 172 FALCON_DMA_STAT(rx_good, XgRxPktsOK), 173 FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts), 174 FALCON_DMA_STAT(rx_pause, XgRxPausePkts), 175 FALCON_DMA_STAT(rx_control, XgRxControlPkts), 176 FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts), 177 FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts), 178 FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts), 179 FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts), 180 FALCON_DMA_STAT(rx_64, XgRxPkts64Octets), 181 FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets), 182 FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets), 183 FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets), 184 FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets), 185 FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets), 186 FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets), 187 FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts), 188 FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts), 189 FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts), 190 FALCON_DMA_STAT(rx_overflow, XgRxDropEvents), 191 FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError), 192 FALCON_DMA_STAT(rx_align_error, XgRxAlignError), 193 FALCON_DMA_STAT(rx_length_error, XgRxLengthError), 194 FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError), 195 FALCON_OTHER_STAT(rx_nodesc_drop_cnt), 196 GENERIC_SW_STAT(rx_nodesc_trunc), 197 GENERIC_SW_STAT(rx_noskb_drops), 198 }; 199 static const unsigned long falcon_stat_mask[] = { 200 [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL, 201 }; 202 203 /************************************************************************** 204 * 205 * Basic SPI command set and bit definitions 206 * 207 *************************************************************************/ 208 209 #define SPI_WRSR 0x01 /* Write status register */ 210 #define SPI_WRITE 0x02 /* Write data to memory array */ 211 #define SPI_READ 0x03 /* Read data from memory array */ 212 #define SPI_WRDI 0x04 /* Reset write enable latch */ 213 #define SPI_RDSR 0x05 /* Read status register */ 214 #define SPI_WREN 0x06 /* Set write enable latch */ 215 #define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */ 216 217 #define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */ 218 #define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */ 219 #define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */ 220 #define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */ 221 #define SPI_STATUS_WEN 0x02 /* State of the write enable latch */ 222 #define SPI_STATUS_NRDY 0x01 /* Device busy flag */ 223 224 /************************************************************************** 225 * 226 * Non-volatile memory layout 227 * 228 ************************************************************************** 229 */ 230 231 /* SFC4000 flash is partitioned into: 232 * 0-0x400 chip and board config (see struct falcon_nvconfig) 233 * 0x400-0x8000 unused (or may contain VPD if EEPROM not present) 234 * 0x8000-end boot code (mapped to PCI expansion ROM) 235 * SFC4000 small EEPROM (size < 0x400) is used for VPD only. 236 * SFC4000 large EEPROM (size >= 0x400) is partitioned into: 237 * 0-0x400 chip and board config 238 * configurable VPD 239 * 0x800-0x1800 boot config 240 * Aside from the chip and board config, all of these are optional and may 241 * be absent or truncated depending on the devices used. 242 */ 243 #define FALCON_NVCONFIG_END 0x400U 244 #define FALCON_FLASH_BOOTCODE_START 0x8000U 245 #define FALCON_EEPROM_BOOTCONFIG_START 0x800U 246 #define FALCON_EEPROM_BOOTCONFIG_END 0x1800U 247 248 /* Board configuration v2 (v1 is obsolete; later versions are compatible) */ 249 struct falcon_nvconfig_board_v2 { 250 __le16 nports; 251 u8 port0_phy_addr; 252 u8 port0_phy_type; 253 u8 port1_phy_addr; 254 u8 port1_phy_type; 255 __le16 asic_sub_revision; 256 __le16 board_revision; 257 } __packed; 258 259 /* Board configuration v3 extra information */ 260 struct falcon_nvconfig_board_v3 { 261 __le32 spi_device_type[2]; 262 } __packed; 263 264 /* Bit numbers for spi_device_type */ 265 #define SPI_DEV_TYPE_SIZE_LBN 0 266 #define SPI_DEV_TYPE_SIZE_WIDTH 5 267 #define SPI_DEV_TYPE_ADDR_LEN_LBN 6 268 #define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2 269 #define SPI_DEV_TYPE_ERASE_CMD_LBN 8 270 #define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8 271 #define SPI_DEV_TYPE_ERASE_SIZE_LBN 16 272 #define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5 273 #define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24 274 #define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5 275 #define SPI_DEV_TYPE_FIELD(type, field) \ 276 (((type) >> EF4_LOW_BIT(field)) & EF4_MASK32(EF4_WIDTH(field))) 277 278 #define FALCON_NVCONFIG_OFFSET 0x300 279 280 #define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C 281 struct falcon_nvconfig { 282 ef4_oword_t ee_vpd_cfg_reg; /* 0x300 */ 283 u8 mac_address[2][8]; /* 0x310 */ 284 ef4_oword_t pcie_sd_ctl0123_reg; /* 0x320 */ 285 ef4_oword_t pcie_sd_ctl45_reg; /* 0x330 */ 286 ef4_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */ 287 ef4_oword_t hw_init_reg; /* 0x350 */ 288 ef4_oword_t nic_stat_reg; /* 0x360 */ 289 ef4_oword_t glb_ctl_reg; /* 0x370 */ 290 ef4_oword_t srm_cfg_reg; /* 0x380 */ 291 ef4_oword_t spare_reg; /* 0x390 */ 292 __le16 board_magic_num; /* 0x3A0 */ 293 __le16 board_struct_ver; 294 __le16 board_checksum; 295 struct falcon_nvconfig_board_v2 board_v2; 296 ef4_oword_t ee_base_page_reg; /* 0x3B0 */ 297 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */ 298 } __packed; 299 300 /*************************************************************************/ 301 302 static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method); 303 static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx); 304 305 static const unsigned int 306 /* "Large" EEPROM device: Atmel AT25640 or similar 307 * 8 KB, 16-bit address, 32 B write block */ 308 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN) 309 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN) 310 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)), 311 /* Default flash device: Atmel AT25F1024 312 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */ 313 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN) 314 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN) 315 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN) 316 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) 317 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)); 318 319 /************************************************************************** 320 * 321 * I2C bus - this is a bit-bashing interface using GPIO pins 322 * Note that it uses the output enables to tristate the outputs 323 * SDA is the data pin and SCL is the clock 324 * 325 ************************************************************************** 326 */ 327 static void falcon_setsda(void *data, int state) 328 { 329 struct ef4_nic *efx = (struct ef4_nic *)data; 330 ef4_oword_t reg; 331 332 ef4_reado(efx, ®, FR_AB_GPIO_CTL); 333 EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state); 334 ef4_writeo(efx, ®, FR_AB_GPIO_CTL); 335 } 336 337 static void falcon_setscl(void *data, int state) 338 { 339 struct ef4_nic *efx = (struct ef4_nic *)data; 340 ef4_oword_t reg; 341 342 ef4_reado(efx, ®, FR_AB_GPIO_CTL); 343 EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state); 344 ef4_writeo(efx, ®, FR_AB_GPIO_CTL); 345 } 346 347 static int falcon_getsda(void *data) 348 { 349 struct ef4_nic *efx = (struct ef4_nic *)data; 350 ef4_oword_t reg; 351 352 ef4_reado(efx, ®, FR_AB_GPIO_CTL); 353 return EF4_OWORD_FIELD(reg, FRF_AB_GPIO3_IN); 354 } 355 356 static int falcon_getscl(void *data) 357 { 358 struct ef4_nic *efx = (struct ef4_nic *)data; 359 ef4_oword_t reg; 360 361 ef4_reado(efx, ®, FR_AB_GPIO_CTL); 362 return EF4_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); 363 } 364 365 static const struct i2c_algo_bit_data falcon_i2c_bit_operations = { 366 .setsda = falcon_setsda, 367 .setscl = falcon_setscl, 368 .getsda = falcon_getsda, 369 .getscl = falcon_getscl, 370 .udelay = 5, 371 /* Wait up to 50 ms for slave to let us pull SCL high */ 372 .timeout = DIV_ROUND_UP(HZ, 20), 373 }; 374 375 static void falcon_push_irq_moderation(struct ef4_channel *channel) 376 { 377 ef4_dword_t timer_cmd; 378 struct ef4_nic *efx = channel->efx; 379 380 /* Set timer register */ 381 if (channel->irq_moderation_us) { 382 unsigned int ticks; 383 384 ticks = ef4_usecs_to_ticks(efx, channel->irq_moderation_us); 385 EF4_POPULATE_DWORD_2(timer_cmd, 386 FRF_AB_TC_TIMER_MODE, 387 FFE_BB_TIMER_MODE_INT_HLDOFF, 388 FRF_AB_TC_TIMER_VAL, 389 ticks - 1); 390 } else { 391 EF4_POPULATE_DWORD_2(timer_cmd, 392 FRF_AB_TC_TIMER_MODE, 393 FFE_BB_TIMER_MODE_DIS, 394 FRF_AB_TC_TIMER_VAL, 0); 395 } 396 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0); 397 ef4_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, 398 channel->channel); 399 } 400 401 static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx); 402 403 static void falcon_prepare_flush(struct ef4_nic *efx) 404 { 405 falcon_deconfigure_mac_wrapper(efx); 406 407 /* Wait for the tx and rx fifo's to get to the next packet boundary 408 * (~1ms without back-pressure), then to drain the remainder of the 409 * fifo's at data path speeds (negligible), with a healthy margin. */ 410 msleep(10); 411 } 412 413 /* Acknowledge a legacy interrupt from Falcon 414 * 415 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG. 416 * 417 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the 418 * BIU. Interrupt acknowledge is read sensitive so must write instead 419 * (then read to ensure the BIU collector is flushed) 420 * 421 * NB most hardware supports MSI interrupts 422 */ 423 static inline void falcon_irq_ack_a1(struct ef4_nic *efx) 424 { 425 ef4_dword_t reg; 426 427 EF4_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e); 428 ef4_writed(efx, ®, FR_AA_INT_ACK_KER); 429 ef4_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS); 430 } 431 432 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 433 { 434 struct ef4_nic *efx = dev_id; 435 ef4_oword_t *int_ker = efx->irq_status.addr; 436 int syserr; 437 int queues; 438 439 /* Check to see if this is our interrupt. If it isn't, we 440 * exit without having touched the hardware. 441 */ 442 if (unlikely(EF4_OWORD_IS_ZERO(*int_ker))) { 443 netif_vdbg(efx, intr, efx->net_dev, 444 "IRQ %d on CPU %d not for me\n", irq, 445 raw_smp_processor_id()); 446 return IRQ_NONE; 447 } 448 efx->last_irq_cpu = raw_smp_processor_id(); 449 netif_vdbg(efx, intr, efx->net_dev, 450 "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", 451 irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); 452 453 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) 454 return IRQ_HANDLED; 455 456 /* Check to see if we have a serious error condition */ 457 syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 458 if (unlikely(syserr)) 459 return ef4_farch_fatal_interrupt(efx); 460 461 /* Determine interrupting queues, clear interrupt status 462 * register and acknowledge the device interrupt. 463 */ 464 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EF4_MAX_CHANNELS); 465 queues = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); 466 EF4_ZERO_OWORD(*int_ker); 467 wmb(); /* Ensure the vector is cleared before interrupt ack */ 468 falcon_irq_ack_a1(efx); 469 470 if (queues & 1) 471 ef4_schedule_channel_irq(ef4_get_channel(efx, 0)); 472 if (queues & 2) 473 ef4_schedule_channel_irq(ef4_get_channel(efx, 1)); 474 return IRQ_HANDLED; 475 } 476 477 /************************************************************************** 478 * 479 * RSS 480 * 481 ************************************************************************** 482 */ 483 static int dummy_rx_push_rss_config(struct ef4_nic *efx, bool user, 484 const u32 *rx_indir_table) 485 { 486 (void) efx; 487 (void) user; 488 (void) rx_indir_table; 489 return -ENOSYS; 490 } 491 492 static int falcon_b0_rx_push_rss_config(struct ef4_nic *efx, bool user, 493 const u32 *rx_indir_table) 494 { 495 ef4_oword_t temp; 496 497 (void) user; 498 /* Set hash key for IPv4 */ 499 memcpy(&temp, efx->rx_hash_key, sizeof(temp)); 500 ef4_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); 501 502 memcpy(efx->rx_indir_table, rx_indir_table, 503 sizeof(efx->rx_indir_table)); 504 ef4_farch_rx_push_indir_table(efx); 505 return 0; 506 } 507 508 /************************************************************************** 509 * 510 * EEPROM/flash 511 * 512 ************************************************************************** 513 */ 514 515 #define FALCON_SPI_MAX_LEN sizeof(ef4_oword_t) 516 517 static int falcon_spi_poll(struct ef4_nic *efx) 518 { 519 ef4_oword_t reg; 520 ef4_reado(efx, ®, FR_AB_EE_SPI_HCMD); 521 return EF4_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; 522 } 523 524 /* Wait for SPI command completion */ 525 static int falcon_spi_wait(struct ef4_nic *efx) 526 { 527 /* Most commands will finish quickly, so we start polling at 528 * very short intervals. Sometimes the command may have to 529 * wait for VPD or expansion ROM access outside of our 530 * control, so we allow up to 100 ms. */ 531 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10); 532 int i; 533 534 for (i = 0; i < 10; i++) { 535 if (!falcon_spi_poll(efx)) 536 return 0; 537 udelay(10); 538 } 539 540 for (;;) { 541 if (!falcon_spi_poll(efx)) 542 return 0; 543 if (time_after_eq(jiffies, timeout)) { 544 netif_err(efx, hw, efx->net_dev, 545 "timed out waiting for SPI\n"); 546 return -ETIMEDOUT; 547 } 548 schedule_timeout_uninterruptible(1); 549 } 550 } 551 552 static int 553 falcon_spi_cmd(struct ef4_nic *efx, const struct falcon_spi_device *spi, 554 unsigned int command, int address, 555 const void *in, void *out, size_t len) 556 { 557 bool addressed = (address >= 0); 558 bool reading = (out != NULL); 559 ef4_oword_t reg; 560 int rc; 561 562 /* Input validation */ 563 if (len > FALCON_SPI_MAX_LEN) 564 return -EINVAL; 565 566 /* Check that previous command is not still running */ 567 rc = falcon_spi_poll(efx); 568 if (rc) 569 return rc; 570 571 /* Program address register, if we have an address */ 572 if (addressed) { 573 EF4_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address); 574 ef4_writeo(efx, ®, FR_AB_EE_SPI_HADR); 575 } 576 577 /* Program data register, if we have data */ 578 if (in != NULL) { 579 memcpy(®, in, len); 580 ef4_writeo(efx, ®, FR_AB_EE_SPI_HDATA); 581 } 582 583 /* Issue read/write command */ 584 EF4_POPULATE_OWORD_7(reg, 585 FRF_AB_EE_SPI_HCMD_CMD_EN, 1, 586 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id, 587 FRF_AB_EE_SPI_HCMD_DABCNT, len, 588 FRF_AB_EE_SPI_HCMD_READ, reading, 589 FRF_AB_EE_SPI_HCMD_DUBCNT, 0, 590 FRF_AB_EE_SPI_HCMD_ADBCNT, 591 (addressed ? spi->addr_len : 0), 592 FRF_AB_EE_SPI_HCMD_ENC, command); 593 ef4_writeo(efx, ®, FR_AB_EE_SPI_HCMD); 594 595 /* Wait for read/write to complete */ 596 rc = falcon_spi_wait(efx); 597 if (rc) 598 return rc; 599 600 /* Read data */ 601 if (out != NULL) { 602 ef4_reado(efx, ®, FR_AB_EE_SPI_HDATA); 603 memcpy(out, ®, len); 604 } 605 606 return 0; 607 } 608 609 static inline u8 610 falcon_spi_munge_command(const struct falcon_spi_device *spi, 611 const u8 command, const unsigned int address) 612 { 613 return command | (((address >> 8) & spi->munge_address) << 3); 614 } 615 616 static int 617 falcon_spi_read(struct ef4_nic *efx, const struct falcon_spi_device *spi, 618 loff_t start, size_t len, size_t *retlen, u8 *buffer) 619 { 620 size_t block_len, pos = 0; 621 unsigned int command; 622 int rc = 0; 623 624 while (pos < len) { 625 block_len = min(len - pos, FALCON_SPI_MAX_LEN); 626 627 command = falcon_spi_munge_command(spi, SPI_READ, start + pos); 628 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, 629 buffer + pos, block_len); 630 if (rc) 631 break; 632 pos += block_len; 633 634 /* Avoid locking up the system */ 635 cond_resched(); 636 if (signal_pending(current)) { 637 rc = -EINTR; 638 break; 639 } 640 } 641 642 if (retlen) 643 *retlen = pos; 644 return rc; 645 } 646 647 #ifdef CONFIG_SFC_FALCON_MTD 648 649 struct falcon_mtd_partition { 650 struct ef4_mtd_partition common; 651 const struct falcon_spi_device *spi; 652 size_t offset; 653 }; 654 655 #define to_falcon_mtd_partition(mtd) \ 656 container_of(mtd, struct falcon_mtd_partition, common.mtd) 657 658 static size_t 659 falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start) 660 { 661 return min(FALCON_SPI_MAX_LEN, 662 (spi->block_size - (start & (spi->block_size - 1)))); 663 } 664 665 /* Wait up to 10 ms for buffered write completion */ 666 static int 667 falcon_spi_wait_write(struct ef4_nic *efx, const struct falcon_spi_device *spi) 668 { 669 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); 670 u8 status; 671 int rc; 672 673 for (;;) { 674 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, 675 &status, sizeof(status)); 676 if (rc) 677 return rc; 678 if (!(status & SPI_STATUS_NRDY)) 679 return 0; 680 if (time_after_eq(jiffies, timeout)) { 681 netif_err(efx, hw, efx->net_dev, 682 "SPI write timeout on device %d" 683 " last status=0x%02x\n", 684 spi->device_id, status); 685 return -ETIMEDOUT; 686 } 687 schedule_timeout_uninterruptible(1); 688 } 689 } 690 691 static int 692 falcon_spi_write(struct ef4_nic *efx, const struct falcon_spi_device *spi, 693 loff_t start, size_t len, size_t *retlen, const u8 *buffer) 694 { 695 u8 verify_buffer[FALCON_SPI_MAX_LEN]; 696 size_t block_len, pos = 0; 697 unsigned int command; 698 int rc = 0; 699 700 while (pos < len) { 701 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); 702 if (rc) 703 break; 704 705 block_len = min(len - pos, 706 falcon_spi_write_limit(spi, start + pos)); 707 command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos); 708 rc = falcon_spi_cmd(efx, spi, command, start + pos, 709 buffer + pos, NULL, block_len); 710 if (rc) 711 break; 712 713 rc = falcon_spi_wait_write(efx, spi); 714 if (rc) 715 break; 716 717 command = falcon_spi_munge_command(spi, SPI_READ, start + pos); 718 rc = falcon_spi_cmd(efx, spi, command, start + pos, 719 NULL, verify_buffer, block_len); 720 if (memcmp(verify_buffer, buffer + pos, block_len)) { 721 rc = -EIO; 722 break; 723 } 724 725 pos += block_len; 726 727 /* Avoid locking up the system */ 728 cond_resched(); 729 if (signal_pending(current)) { 730 rc = -EINTR; 731 break; 732 } 733 } 734 735 if (retlen) 736 *retlen = pos; 737 return rc; 738 } 739 740 static int 741 falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible) 742 { 743 const struct falcon_spi_device *spi = part->spi; 744 struct ef4_nic *efx = part->common.mtd.priv; 745 u8 status; 746 int rc, i; 747 748 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */ 749 for (i = 0; i < 40; i++) { 750 __set_current_state(uninterruptible ? 751 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); 752 schedule_timeout(HZ / 10); 753 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, 754 &status, sizeof(status)); 755 if (rc) 756 return rc; 757 if (!(status & SPI_STATUS_NRDY)) 758 return 0; 759 if (signal_pending(current)) 760 return -EINTR; 761 } 762 pr_err("%s: timed out waiting for %s\n", 763 part->common.name, part->common.dev_type_name); 764 return -ETIMEDOUT; 765 } 766 767 static int 768 falcon_spi_unlock(struct ef4_nic *efx, const struct falcon_spi_device *spi) 769 { 770 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | 771 SPI_STATUS_BP0); 772 u8 status; 773 int rc; 774 775 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, 776 &status, sizeof(status)); 777 if (rc) 778 return rc; 779 780 if (!(status & unlock_mask)) 781 return 0; /* already unlocked */ 782 783 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); 784 if (rc) 785 return rc; 786 rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0); 787 if (rc) 788 return rc; 789 790 status &= ~unlock_mask; 791 rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status, 792 NULL, sizeof(status)); 793 if (rc) 794 return rc; 795 rc = falcon_spi_wait_write(efx, spi); 796 if (rc) 797 return rc; 798 799 return 0; 800 } 801 802 #define FALCON_SPI_VERIFY_BUF_LEN 16 803 804 static int 805 falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len) 806 { 807 const struct falcon_spi_device *spi = part->spi; 808 struct ef4_nic *efx = part->common.mtd.priv; 809 unsigned pos, block_len; 810 u8 empty[FALCON_SPI_VERIFY_BUF_LEN]; 811 u8 buffer[FALCON_SPI_VERIFY_BUF_LEN]; 812 int rc; 813 814 if (len != spi->erase_size) 815 return -EINVAL; 816 817 if (spi->erase_command == 0) 818 return -EOPNOTSUPP; 819 820 rc = falcon_spi_unlock(efx, spi); 821 if (rc) 822 return rc; 823 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); 824 if (rc) 825 return rc; 826 rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL, 827 NULL, 0); 828 if (rc) 829 return rc; 830 rc = falcon_spi_slow_wait(part, false); 831 832 /* Verify the entire region has been wiped */ 833 memset(empty, 0xff, sizeof(empty)); 834 for (pos = 0; pos < len; pos += block_len) { 835 block_len = min(len - pos, sizeof(buffer)); 836 rc = falcon_spi_read(efx, spi, start + pos, block_len, 837 NULL, buffer); 838 if (rc) 839 return rc; 840 if (memcmp(empty, buffer, block_len)) 841 return -EIO; 842 843 /* Avoid locking up the system */ 844 cond_resched(); 845 if (signal_pending(current)) 846 return -EINTR; 847 } 848 849 return rc; 850 } 851 852 static void falcon_mtd_rename(struct ef4_mtd_partition *part) 853 { 854 struct ef4_nic *efx = part->mtd.priv; 855 856 snprintf(part->name, sizeof(part->name), "%s %s", 857 efx->name, part->type_name); 858 } 859 860 static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, 861 size_t len, size_t *retlen, u8 *buffer) 862 { 863 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); 864 struct ef4_nic *efx = mtd->priv; 865 struct falcon_nic_data *nic_data = efx->nic_data; 866 int rc; 867 868 rc = mutex_lock_interruptible(&nic_data->spi_lock); 869 if (rc) 870 return rc; 871 rc = falcon_spi_read(efx, part->spi, part->offset + start, 872 len, retlen, buffer); 873 mutex_unlock(&nic_data->spi_lock); 874 return rc; 875 } 876 877 static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) 878 { 879 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); 880 struct ef4_nic *efx = mtd->priv; 881 struct falcon_nic_data *nic_data = efx->nic_data; 882 int rc; 883 884 rc = mutex_lock_interruptible(&nic_data->spi_lock); 885 if (rc) 886 return rc; 887 rc = falcon_spi_erase(part, part->offset + start, len); 888 mutex_unlock(&nic_data->spi_lock); 889 return rc; 890 } 891 892 static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, 893 size_t len, size_t *retlen, const u8 *buffer) 894 { 895 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); 896 struct ef4_nic *efx = mtd->priv; 897 struct falcon_nic_data *nic_data = efx->nic_data; 898 int rc; 899 900 rc = mutex_lock_interruptible(&nic_data->spi_lock); 901 if (rc) 902 return rc; 903 rc = falcon_spi_write(efx, part->spi, part->offset + start, 904 len, retlen, buffer); 905 mutex_unlock(&nic_data->spi_lock); 906 return rc; 907 } 908 909 static int falcon_mtd_sync(struct mtd_info *mtd) 910 { 911 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); 912 struct ef4_nic *efx = mtd->priv; 913 struct falcon_nic_data *nic_data = efx->nic_data; 914 int rc; 915 916 mutex_lock(&nic_data->spi_lock); 917 rc = falcon_spi_slow_wait(part, true); 918 mutex_unlock(&nic_data->spi_lock); 919 return rc; 920 } 921 922 static int falcon_mtd_probe(struct ef4_nic *efx) 923 { 924 struct falcon_nic_data *nic_data = efx->nic_data; 925 struct falcon_mtd_partition *parts; 926 struct falcon_spi_device *spi; 927 size_t n_parts; 928 int rc = -ENODEV; 929 930 ASSERT_RTNL(); 931 932 /* Allocate space for maximum number of partitions */ 933 parts = kcalloc(2, sizeof(*parts), GFP_KERNEL); 934 if (!parts) 935 return -ENOMEM; 936 n_parts = 0; 937 938 spi = &nic_data->spi_flash; 939 if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) { 940 parts[n_parts].spi = spi; 941 parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START; 942 parts[n_parts].common.dev_type_name = "flash"; 943 parts[n_parts].common.type_name = "sfc_flash_bootrom"; 944 parts[n_parts].common.mtd.type = MTD_NORFLASH; 945 parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH; 946 parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; 947 parts[n_parts].common.mtd.erasesize = spi->erase_size; 948 n_parts++; 949 } 950 951 spi = &nic_data->spi_eeprom; 952 if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) { 953 parts[n_parts].spi = spi; 954 parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START; 955 parts[n_parts].common.dev_type_name = "EEPROM"; 956 parts[n_parts].common.type_name = "sfc_bootconfig"; 957 parts[n_parts].common.mtd.type = MTD_RAM; 958 parts[n_parts].common.mtd.flags = MTD_CAP_RAM; 959 parts[n_parts].common.mtd.size = 960 min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) - 961 FALCON_EEPROM_BOOTCONFIG_START; 962 parts[n_parts].common.mtd.erasesize = spi->erase_size; 963 n_parts++; 964 } 965 966 rc = ef4_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 967 if (rc) 968 kfree(parts); 969 return rc; 970 } 971 972 #endif /* CONFIG_SFC_FALCON_MTD */ 973 974 /************************************************************************** 975 * 976 * XMAC operations 977 * 978 ************************************************************************** 979 */ 980 981 /* Configure the XAUI driver that is an output from Falcon */ 982 static void falcon_setup_xaui(struct ef4_nic *efx) 983 { 984 ef4_oword_t sdctl, txdrv; 985 986 /* Move the XAUI into low power, unless there is no PHY, in 987 * which case the XAUI will have to drive a cable. */ 988 if (efx->phy_type == PHY_TYPE_NONE) 989 return; 990 991 ef4_reado(efx, &sdctl, FR_AB_XX_SD_CTL); 992 EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF); 993 EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF); 994 EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF); 995 EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF); 996 EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF); 997 EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF); 998 EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF); 999 EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF); 1000 ef4_writeo(efx, &sdctl, FR_AB_XX_SD_CTL); 1001 1002 EF4_POPULATE_OWORD_8(txdrv, 1003 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF, 1004 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF, 1005 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF, 1006 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF, 1007 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF, 1008 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF, 1009 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF, 1010 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF); 1011 ef4_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL); 1012 } 1013 1014 int falcon_reset_xaui(struct ef4_nic *efx) 1015 { 1016 struct falcon_nic_data *nic_data = efx->nic_data; 1017 ef4_oword_t reg; 1018 int count; 1019 1020 /* Don't fetch MAC statistics over an XMAC reset */ 1021 WARN_ON(nic_data->stats_disable_count == 0); 1022 1023 /* Start reset sequence */ 1024 EF4_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1); 1025 ef4_writeo(efx, ®, FR_AB_XX_PWR_RST); 1026 1027 /* Wait up to 10 ms for completion, then reinitialise */ 1028 for (count = 0; count < 1000; count++) { 1029 ef4_reado(efx, ®, FR_AB_XX_PWR_RST); 1030 if (EF4_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 && 1031 EF4_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) { 1032 falcon_setup_xaui(efx); 1033 return 0; 1034 } 1035 udelay(10); 1036 } 1037 netif_err(efx, hw, efx->net_dev, 1038 "timed out waiting for XAUI/XGXS reset\n"); 1039 return -ETIMEDOUT; 1040 } 1041 1042 static void falcon_ack_status_intr(struct ef4_nic *efx) 1043 { 1044 struct falcon_nic_data *nic_data = efx->nic_data; 1045 ef4_oword_t reg; 1046 1047 if ((ef4_nic_rev(efx) != EF4_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) 1048 return; 1049 1050 /* We expect xgmii faults if the wireside link is down */ 1051 if (!efx->link_state.up) 1052 return; 1053 1054 /* We can only use this interrupt to signal the negative edge of 1055 * xaui_align [we have to poll the positive edge]. */ 1056 if (nic_data->xmac_poll_required) 1057 return; 1058 1059 ef4_reado(efx, ®, FR_AB_XM_MGT_INT_MSK); 1060 } 1061 1062 static bool falcon_xgxs_link_ok(struct ef4_nic *efx) 1063 { 1064 ef4_oword_t reg; 1065 bool align_done, link_ok = false; 1066 int sync_status; 1067 1068 /* Read link status */ 1069 ef4_reado(efx, ®, FR_AB_XX_CORE_STAT); 1070 1071 align_done = EF4_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE); 1072 sync_status = EF4_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT); 1073 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES)) 1074 link_ok = true; 1075 1076 /* Clear link status ready for next read */ 1077 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES); 1078 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES); 1079 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); 1080 ef4_writeo(efx, ®, FR_AB_XX_CORE_STAT); 1081 1082 return link_ok; 1083 } 1084 1085 static bool falcon_xmac_link_ok(struct ef4_nic *efx) 1086 { 1087 /* 1088 * Check MAC's XGXS link status except when using XGMII loopback 1089 * which bypasses the XGXS block. 1090 * If possible, check PHY's XGXS link status except when using 1091 * MAC loopback. 1092 */ 1093 return (efx->loopback_mode == LOOPBACK_XGMII || 1094 falcon_xgxs_link_ok(efx)) && 1095 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || 1096 LOOPBACK_INTERNAL(efx) || 1097 ef4_mdio_phyxgxs_lane_sync(efx)); 1098 } 1099 1100 static void falcon_reconfigure_xmac_core(struct ef4_nic *efx) 1101 { 1102 unsigned int max_frame_len; 1103 ef4_oword_t reg; 1104 bool rx_fc = !!(efx->link_state.fc & EF4_FC_RX); 1105 bool tx_fc = !!(efx->link_state.fc & EF4_FC_TX); 1106 1107 /* Configure MAC - cut-thru mode is hard wired on */ 1108 EF4_POPULATE_OWORD_3(reg, 1109 FRF_AB_XM_RX_JUMBO_MODE, 1, 1110 FRF_AB_XM_TX_STAT_EN, 1, 1111 FRF_AB_XM_RX_STAT_EN, 1); 1112 ef4_writeo(efx, ®, FR_AB_XM_GLB_CFG); 1113 1114 /* Configure TX */ 1115 EF4_POPULATE_OWORD_6(reg, 1116 FRF_AB_XM_TXEN, 1, 1117 FRF_AB_XM_TX_PRMBL, 1, 1118 FRF_AB_XM_AUTO_PAD, 1, 1119 FRF_AB_XM_TXCRC, 1, 1120 FRF_AB_XM_FCNTL, tx_fc, 1121 FRF_AB_XM_IPG, 0x3); 1122 ef4_writeo(efx, ®, FR_AB_XM_TX_CFG); 1123 1124 /* Configure RX */ 1125 EF4_POPULATE_OWORD_5(reg, 1126 FRF_AB_XM_RXEN, 1, 1127 FRF_AB_XM_AUTO_DEPAD, 0, 1128 FRF_AB_XM_ACPT_ALL_MCAST, 1, 1129 FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter, 1130 FRF_AB_XM_PASS_CRC_ERR, 1); 1131 ef4_writeo(efx, ®, FR_AB_XM_RX_CFG); 1132 1133 /* Set frame length */ 1134 max_frame_len = EF4_MAX_FRAME_LEN(efx->net_dev->mtu); 1135 EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len); 1136 ef4_writeo(efx, ®, FR_AB_XM_RX_PARAM); 1137 EF4_POPULATE_OWORD_2(reg, 1138 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len, 1139 FRF_AB_XM_TX_JUMBO_MODE, 1); 1140 ef4_writeo(efx, ®, FR_AB_XM_TX_PARAM); 1141 1142 EF4_POPULATE_OWORD_2(reg, 1143 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ 1144 FRF_AB_XM_DIS_FCNTL, !rx_fc); 1145 ef4_writeo(efx, ®, FR_AB_XM_FC); 1146 1147 /* Set MAC address */ 1148 memcpy(®, &efx->net_dev->dev_addr[0], 4); 1149 ef4_writeo(efx, ®, FR_AB_XM_ADR_LO); 1150 memcpy(®, &efx->net_dev->dev_addr[4], 2); 1151 ef4_writeo(efx, ®, FR_AB_XM_ADR_HI); 1152 } 1153 1154 static void falcon_reconfigure_xgxs_core(struct ef4_nic *efx) 1155 { 1156 ef4_oword_t reg; 1157 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS); 1158 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI); 1159 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII); 1160 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; 1161 1162 /* XGXS block is flaky and will need to be reset if moving 1163 * into our out of XGMII, XGXS or XAUI loopbacks. */ 1164 ef4_reado(efx, ®, FR_AB_XX_CORE_STAT); 1165 old_xgxs_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); 1166 old_xgmii_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); 1167 1168 ef4_reado(efx, ®, FR_AB_XX_SD_CTL); 1169 old_xaui_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); 1170 1171 /* The PHY driver may have turned XAUI off */ 1172 if ((xgxs_loopback != old_xgxs_loopback) || 1173 (xaui_loopback != old_xaui_loopback) || 1174 (xgmii_loopback != old_xgmii_loopback)) 1175 falcon_reset_xaui(efx); 1176 1177 ef4_reado(efx, ®, FR_AB_XX_CORE_STAT); 1178 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, 1179 (xgxs_loopback || xaui_loopback) ? 1180 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0); 1181 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback); 1182 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback); 1183 ef4_writeo(efx, ®, FR_AB_XX_CORE_STAT); 1184 1185 ef4_reado(efx, ®, FR_AB_XX_SD_CTL); 1186 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback); 1187 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback); 1188 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback); 1189 EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback); 1190 ef4_writeo(efx, ®, FR_AB_XX_SD_CTL); 1191 } 1192 1193 1194 /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ 1195 static bool falcon_xmac_link_ok_retry(struct ef4_nic *efx, int tries) 1196 { 1197 bool mac_up = falcon_xmac_link_ok(efx); 1198 1199 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || 1200 ef4_phy_mode_disabled(efx->phy_mode)) 1201 /* XAUI link is expected to be down */ 1202 return mac_up; 1203 1204 falcon_stop_nic_stats(efx); 1205 1206 while (!mac_up && tries) { 1207 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n"); 1208 falcon_reset_xaui(efx); 1209 udelay(200); 1210 1211 mac_up = falcon_xmac_link_ok(efx); 1212 --tries; 1213 } 1214 1215 falcon_start_nic_stats(efx); 1216 1217 return mac_up; 1218 } 1219 1220 static bool falcon_xmac_check_fault(struct ef4_nic *efx) 1221 { 1222 return !falcon_xmac_link_ok_retry(efx, 5); 1223 } 1224 1225 static int falcon_reconfigure_xmac(struct ef4_nic *efx) 1226 { 1227 struct falcon_nic_data *nic_data = efx->nic_data; 1228 1229 ef4_farch_filter_sync_rx_mode(efx); 1230 1231 falcon_reconfigure_xgxs_core(efx); 1232 falcon_reconfigure_xmac_core(efx); 1233 1234 falcon_reconfigure_mac_wrapper(efx); 1235 1236 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); 1237 falcon_ack_status_intr(efx); 1238 1239 return 0; 1240 } 1241 1242 static void falcon_poll_xmac(struct ef4_nic *efx) 1243 { 1244 struct falcon_nic_data *nic_data = efx->nic_data; 1245 1246 /* We expect xgmii faults if the wireside link is down */ 1247 if (!efx->link_state.up || !nic_data->xmac_poll_required) 1248 return; 1249 1250 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 1251 falcon_ack_status_intr(efx); 1252 } 1253 1254 /************************************************************************** 1255 * 1256 * MAC wrapper 1257 * 1258 ************************************************************************** 1259 */ 1260 1261 static void falcon_push_multicast_hash(struct ef4_nic *efx) 1262 { 1263 union ef4_multicast_hash *mc_hash = &efx->multicast_hash; 1264 1265 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 1266 1267 ef4_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0); 1268 ef4_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1); 1269 } 1270 1271 static void falcon_reset_macs(struct ef4_nic *efx) 1272 { 1273 struct falcon_nic_data *nic_data = efx->nic_data; 1274 ef4_oword_t reg, mac_ctrl; 1275 int count; 1276 1277 if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) { 1278 /* It's not safe to use GLB_CTL_REG to reset the 1279 * macs, so instead use the internal MAC resets 1280 */ 1281 EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1); 1282 ef4_writeo(efx, ®, FR_AB_XM_GLB_CFG); 1283 1284 for (count = 0; count < 10000; count++) { 1285 ef4_reado(efx, ®, FR_AB_XM_GLB_CFG); 1286 if (EF4_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) == 1287 0) 1288 return; 1289 udelay(10); 1290 } 1291 1292 netif_err(efx, hw, efx->net_dev, 1293 "timed out waiting for XMAC core reset\n"); 1294 } 1295 1296 /* Mac stats will fail whist the TX fifo is draining */ 1297 WARN_ON(nic_data->stats_disable_count == 0); 1298 1299 ef4_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL); 1300 EF4_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1); 1301 ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); 1302 1303 ef4_reado(efx, ®, FR_AB_GLB_CTL); 1304 EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1); 1305 EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1); 1306 EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1); 1307 ef4_writeo(efx, ®, FR_AB_GLB_CTL); 1308 1309 count = 0; 1310 while (1) { 1311 ef4_reado(efx, ®, FR_AB_GLB_CTL); 1312 if (!EF4_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && 1313 !EF4_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && 1314 !EF4_OWORD_FIELD(reg, FRF_AB_RST_EM)) { 1315 netif_dbg(efx, hw, efx->net_dev, 1316 "Completed MAC reset after %d loops\n", 1317 count); 1318 break; 1319 } 1320 if (count > 20) { 1321 netif_err(efx, hw, efx->net_dev, "MAC reset failed\n"); 1322 break; 1323 } 1324 count++; 1325 udelay(10); 1326 } 1327 1328 /* Ensure the correct MAC is selected before statistics 1329 * are re-enabled by the caller */ 1330 ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); 1331 1332 falcon_setup_xaui(efx); 1333 } 1334 1335 static void falcon_drain_tx_fifo(struct ef4_nic *efx) 1336 { 1337 ef4_oword_t reg; 1338 1339 if ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0) || 1340 (efx->loopback_mode != LOOPBACK_NONE)) 1341 return; 1342 1343 ef4_reado(efx, ®, FR_AB_MAC_CTRL); 1344 /* There is no point in draining more than once */ 1345 if (EF4_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN)) 1346 return; 1347 1348 falcon_reset_macs(efx); 1349 } 1350 1351 static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx) 1352 { 1353 ef4_oword_t reg; 1354 1355 if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) 1356 return; 1357 1358 /* Isolate the MAC -> RX */ 1359 ef4_reado(efx, ®, FR_AZ_RX_CFG); 1360 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0); 1361 ef4_writeo(efx, ®, FR_AZ_RX_CFG); 1362 1363 /* Isolate TX -> MAC */ 1364 falcon_drain_tx_fifo(efx); 1365 } 1366 1367 static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx) 1368 { 1369 struct ef4_link_state *link_state = &efx->link_state; 1370 ef4_oword_t reg; 1371 int link_speed, isolate; 1372 1373 isolate = !!ACCESS_ONCE(efx->reset_pending); 1374 1375 switch (link_state->speed) { 1376 case 10000: link_speed = 3; break; 1377 case 1000: link_speed = 2; break; 1378 case 100: link_speed = 1; break; 1379 default: link_speed = 0; break; 1380 } 1381 1382 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work 1383 * as advertised. Disable to ensure packets are not 1384 * indefinitely held and TX queue can be flushed at any point 1385 * while the link is down. */ 1386 EF4_POPULATE_OWORD_5(reg, 1387 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, 1388 FRF_AB_MAC_BCAD_ACPT, 1, 1389 FRF_AB_MAC_UC_PROM, !efx->unicast_filter, 1390 FRF_AB_MAC_LINK_STATUS, 1, /* always set */ 1391 FRF_AB_MAC_SPEED, link_speed); 1392 /* On B0, MAC backpressure can be disabled and packets get 1393 * discarded. */ 1394 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { 1395 EF4_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1396 !link_state->up || isolate); 1397 } 1398 1399 ef4_writeo(efx, ®, FR_AB_MAC_CTRL); 1400 1401 /* Restore the multicast hash registers. */ 1402 falcon_push_multicast_hash(efx); 1403 1404 ef4_reado(efx, ®, FR_AZ_RX_CFG); 1405 /* Enable XOFF signal from RX FIFO (we enabled it during NIC 1406 * initialisation but it may read back as 0) */ 1407 EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); 1408 /* Unisolate the MAC -> RX */ 1409 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) 1410 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate); 1411 ef4_writeo(efx, ®, FR_AZ_RX_CFG); 1412 } 1413 1414 static void falcon_stats_request(struct ef4_nic *efx) 1415 { 1416 struct falcon_nic_data *nic_data = efx->nic_data; 1417 ef4_oword_t reg; 1418 1419 WARN_ON(nic_data->stats_pending); 1420 WARN_ON(nic_data->stats_disable_count); 1421 1422 FALCON_XMAC_STATS_DMA_FLAG(efx) = 0; 1423 nic_data->stats_pending = true; 1424 wmb(); /* ensure done flag is clear */ 1425 1426 /* Initiate DMA transfer of stats */ 1427 EF4_POPULATE_OWORD_2(reg, 1428 FRF_AB_MAC_STAT_DMA_CMD, 1, 1429 FRF_AB_MAC_STAT_DMA_ADR, 1430 efx->stats_buffer.dma_addr); 1431 ef4_writeo(efx, ®, FR_AB_MAC_STAT_DMA); 1432 1433 mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2)); 1434 } 1435 1436 static void falcon_stats_complete(struct ef4_nic *efx) 1437 { 1438 struct falcon_nic_data *nic_data = efx->nic_data; 1439 1440 if (!nic_data->stats_pending) 1441 return; 1442 1443 nic_data->stats_pending = false; 1444 if (FALCON_XMAC_STATS_DMA_FLAG(efx)) { 1445 rmb(); /* read the done flag before the stats */ 1446 ef4_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT, 1447 falcon_stat_mask, nic_data->stats, 1448 efx->stats_buffer.addr, true); 1449 } else { 1450 netif_err(efx, hw, efx->net_dev, 1451 "timed out waiting for statistics\n"); 1452 } 1453 } 1454 1455 static void falcon_stats_timer_func(unsigned long context) 1456 { 1457 struct ef4_nic *efx = (struct ef4_nic *)context; 1458 struct falcon_nic_data *nic_data = efx->nic_data; 1459 1460 spin_lock(&efx->stats_lock); 1461 1462 falcon_stats_complete(efx); 1463 if (nic_data->stats_disable_count == 0) 1464 falcon_stats_request(efx); 1465 1466 spin_unlock(&efx->stats_lock); 1467 } 1468 1469 static bool falcon_loopback_link_poll(struct ef4_nic *efx) 1470 { 1471 struct ef4_link_state old_state = efx->link_state; 1472 1473 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 1474 WARN_ON(!LOOPBACK_INTERNAL(efx)); 1475 1476 efx->link_state.fd = true; 1477 efx->link_state.fc = efx->wanted_fc; 1478 efx->link_state.up = true; 1479 efx->link_state.speed = 10000; 1480 1481 return !ef4_link_state_equal(&efx->link_state, &old_state); 1482 } 1483 1484 static int falcon_reconfigure_port(struct ef4_nic *efx) 1485 { 1486 int rc; 1487 1488 WARN_ON(ef4_nic_rev(efx) > EF4_REV_FALCON_B0); 1489 1490 /* Poll the PHY link state *before* reconfiguring it. This means we 1491 * will pick up the correct speed (in loopback) to select the correct 1492 * MAC. 1493 */ 1494 if (LOOPBACK_INTERNAL(efx)) 1495 falcon_loopback_link_poll(efx); 1496 else 1497 efx->phy_op->poll(efx); 1498 1499 falcon_stop_nic_stats(efx); 1500 falcon_deconfigure_mac_wrapper(efx); 1501 1502 falcon_reset_macs(efx); 1503 1504 efx->phy_op->reconfigure(efx); 1505 rc = falcon_reconfigure_xmac(efx); 1506 BUG_ON(rc); 1507 1508 falcon_start_nic_stats(efx); 1509 1510 /* Synchronise efx->link_state with the kernel */ 1511 ef4_link_status_changed(efx); 1512 1513 return 0; 1514 } 1515 1516 /* TX flow control may automatically turn itself off if the link 1517 * partner (intermittently) stops responding to pause frames. There 1518 * isn't any indication that this has happened, so the best we do is 1519 * leave it up to the user to spot this and fix it by cycling transmit 1520 * flow control on this end. 1521 */ 1522 1523 static void falcon_a1_prepare_enable_fc_tx(struct ef4_nic *efx) 1524 { 1525 /* Schedule a reset to recover */ 1526 ef4_schedule_reset(efx, RESET_TYPE_INVISIBLE); 1527 } 1528 1529 static void falcon_b0_prepare_enable_fc_tx(struct ef4_nic *efx) 1530 { 1531 /* Recover by resetting the EM block */ 1532 falcon_stop_nic_stats(efx); 1533 falcon_drain_tx_fifo(efx); 1534 falcon_reconfigure_xmac(efx); 1535 falcon_start_nic_stats(efx); 1536 } 1537 1538 /************************************************************************** 1539 * 1540 * PHY access via GMII 1541 * 1542 ************************************************************************** 1543 */ 1544 1545 /* Wait for GMII access to complete */ 1546 static int falcon_gmii_wait(struct ef4_nic *efx) 1547 { 1548 ef4_oword_t md_stat; 1549 int count; 1550 1551 /* wait up to 50ms - taken max from datasheet */ 1552 for (count = 0; count < 5000; count++) { 1553 ef4_reado(efx, &md_stat, FR_AB_MD_STAT); 1554 if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { 1555 if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || 1556 EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { 1557 netif_err(efx, hw, efx->net_dev, 1558 "error from GMII access " 1559 EF4_OWORD_FMT"\n", 1560 EF4_OWORD_VAL(md_stat)); 1561 return -EIO; 1562 } 1563 return 0; 1564 } 1565 udelay(10); 1566 } 1567 netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); 1568 return -ETIMEDOUT; 1569 } 1570 1571 /* Write an MDIO register of a PHY connected to Falcon. */ 1572 static int falcon_mdio_write(struct net_device *net_dev, 1573 int prtad, int devad, u16 addr, u16 value) 1574 { 1575 struct ef4_nic *efx = netdev_priv(net_dev); 1576 struct falcon_nic_data *nic_data = efx->nic_data; 1577 ef4_oword_t reg; 1578 int rc; 1579 1580 netif_vdbg(efx, hw, efx->net_dev, 1581 "writing MDIO %d register %d.%d with 0x%04x\n", 1582 prtad, devad, addr, value); 1583 1584 mutex_lock(&nic_data->mdio_lock); 1585 1586 /* Check MDIO not currently being accessed */ 1587 rc = falcon_gmii_wait(efx); 1588 if (rc) 1589 goto out; 1590 1591 /* Write the address/ID register */ 1592 EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); 1593 ef4_writeo(efx, ®, FR_AB_MD_PHY_ADR); 1594 1595 EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, 1596 FRF_AB_MD_DEV_ADR, devad); 1597 ef4_writeo(efx, ®, FR_AB_MD_ID); 1598 1599 /* Write data */ 1600 EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); 1601 ef4_writeo(efx, ®, FR_AB_MD_TXD); 1602 1603 EF4_POPULATE_OWORD_2(reg, 1604 FRF_AB_MD_WRC, 1, 1605 FRF_AB_MD_GC, 0); 1606 ef4_writeo(efx, ®, FR_AB_MD_CS); 1607 1608 /* Wait for data to be written */ 1609 rc = falcon_gmii_wait(efx); 1610 if (rc) { 1611 /* Abort the write operation */ 1612 EF4_POPULATE_OWORD_2(reg, 1613 FRF_AB_MD_WRC, 0, 1614 FRF_AB_MD_GC, 1); 1615 ef4_writeo(efx, ®, FR_AB_MD_CS); 1616 udelay(10); 1617 } 1618 1619 out: 1620 mutex_unlock(&nic_data->mdio_lock); 1621 return rc; 1622 } 1623 1624 /* Read an MDIO register of a PHY connected to Falcon. */ 1625 static int falcon_mdio_read(struct net_device *net_dev, 1626 int prtad, int devad, u16 addr) 1627 { 1628 struct ef4_nic *efx = netdev_priv(net_dev); 1629 struct falcon_nic_data *nic_data = efx->nic_data; 1630 ef4_oword_t reg; 1631 int rc; 1632 1633 mutex_lock(&nic_data->mdio_lock); 1634 1635 /* Check MDIO not currently being accessed */ 1636 rc = falcon_gmii_wait(efx); 1637 if (rc) 1638 goto out; 1639 1640 EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); 1641 ef4_writeo(efx, ®, FR_AB_MD_PHY_ADR); 1642 1643 EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, 1644 FRF_AB_MD_DEV_ADR, devad); 1645 ef4_writeo(efx, ®, FR_AB_MD_ID); 1646 1647 /* Request data to be read */ 1648 EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); 1649 ef4_writeo(efx, ®, FR_AB_MD_CS); 1650 1651 /* Wait for data to become available */ 1652 rc = falcon_gmii_wait(efx); 1653 if (rc == 0) { 1654 ef4_reado(efx, ®, FR_AB_MD_RXD); 1655 rc = EF4_OWORD_FIELD(reg, FRF_AB_MD_RXD); 1656 netif_vdbg(efx, hw, efx->net_dev, 1657 "read from MDIO %d register %d.%d, got %04x\n", 1658 prtad, devad, addr, rc); 1659 } else { 1660 /* Abort the read operation */ 1661 EF4_POPULATE_OWORD_2(reg, 1662 FRF_AB_MD_RIC, 0, 1663 FRF_AB_MD_GC, 1); 1664 ef4_writeo(efx, ®, FR_AB_MD_CS); 1665 1666 netif_dbg(efx, hw, efx->net_dev, 1667 "read from MDIO %d register %d.%d, got error %d\n", 1668 prtad, devad, addr, rc); 1669 } 1670 1671 out: 1672 mutex_unlock(&nic_data->mdio_lock); 1673 return rc; 1674 } 1675 1676 /* This call is responsible for hooking in the MAC and PHY operations */ 1677 static int falcon_probe_port(struct ef4_nic *efx) 1678 { 1679 struct falcon_nic_data *nic_data = efx->nic_data; 1680 int rc; 1681 1682 switch (efx->phy_type) { 1683 case PHY_TYPE_SFX7101: 1684 efx->phy_op = &falcon_sfx7101_phy_ops; 1685 break; 1686 case PHY_TYPE_QT2022C2: 1687 case PHY_TYPE_QT2025C: 1688 efx->phy_op = &falcon_qt202x_phy_ops; 1689 break; 1690 case PHY_TYPE_TXC43128: 1691 efx->phy_op = &falcon_txc_phy_ops; 1692 break; 1693 default: 1694 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", 1695 efx->phy_type); 1696 return -ENODEV; 1697 } 1698 1699 /* Fill out MDIO structure and loopback modes */ 1700 mutex_init(&nic_data->mdio_lock); 1701 efx->mdio.mdio_read = falcon_mdio_read; 1702 efx->mdio.mdio_write = falcon_mdio_write; 1703 rc = efx->phy_op->probe(efx); 1704 if (rc != 0) 1705 return rc; 1706 1707 /* Initial assumption */ 1708 efx->link_state.speed = 10000; 1709 efx->link_state.fd = true; 1710 1711 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 1712 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) 1713 efx->wanted_fc = EF4_FC_RX | EF4_FC_TX; 1714 else 1715 efx->wanted_fc = EF4_FC_RX; 1716 if (efx->mdio.mmds & MDIO_DEVS_AN) 1717 efx->wanted_fc |= EF4_FC_AUTO; 1718 1719 /* Allocate buffer for stats */ 1720 rc = ef4_nic_alloc_buffer(efx, &efx->stats_buffer, 1721 FALCON_MAC_STATS_SIZE, GFP_KERNEL); 1722 if (rc) 1723 return rc; 1724 netif_dbg(efx, probe, efx->net_dev, 1725 "stats buffer at %llx (virt %p phys %llx)\n", 1726 (u64)efx->stats_buffer.dma_addr, 1727 efx->stats_buffer.addr, 1728 (u64)virt_to_phys(efx->stats_buffer.addr)); 1729 1730 return 0; 1731 } 1732 1733 static void falcon_remove_port(struct ef4_nic *efx) 1734 { 1735 efx->phy_op->remove(efx); 1736 ef4_nic_free_buffer(efx, &efx->stats_buffer); 1737 } 1738 1739 /* Global events are basically PHY events */ 1740 static bool 1741 falcon_handle_global_event(struct ef4_channel *channel, ef4_qword_t *event) 1742 { 1743 struct ef4_nic *efx = channel->efx; 1744 struct falcon_nic_data *nic_data = efx->nic_data; 1745 1746 if (EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || 1747 EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || 1748 EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) 1749 /* Ignored */ 1750 return true; 1751 1752 if ((ef4_nic_rev(efx) == EF4_REV_FALCON_B0) && 1753 EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { 1754 nic_data->xmac_poll_required = true; 1755 return true; 1756 } 1757 1758 if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ? 1759 EF4_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : 1760 EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { 1761 netif_err(efx, rx_err, efx->net_dev, 1762 "channel %d seen global RX_RESET event. Resetting.\n", 1763 channel->channel); 1764 1765 atomic_inc(&efx->rx_reset); 1766 ef4_schedule_reset(efx, EF4_WORKAROUND_6555(efx) ? 1767 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 1768 return true; 1769 } 1770 1771 return false; 1772 } 1773 1774 /************************************************************************** 1775 * 1776 * Falcon test code 1777 * 1778 **************************************************************************/ 1779 1780 static int 1781 falcon_read_nvram(struct ef4_nic *efx, struct falcon_nvconfig *nvconfig_out) 1782 { 1783 struct falcon_nic_data *nic_data = efx->nic_data; 1784 struct falcon_nvconfig *nvconfig; 1785 struct falcon_spi_device *spi; 1786 void *region; 1787 int rc, magic_num, struct_ver; 1788 __le16 *word, *limit; 1789 u32 csum; 1790 1791 if (falcon_spi_present(&nic_data->spi_flash)) 1792 spi = &nic_data->spi_flash; 1793 else if (falcon_spi_present(&nic_data->spi_eeprom)) 1794 spi = &nic_data->spi_eeprom; 1795 else 1796 return -EINVAL; 1797 1798 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); 1799 if (!region) 1800 return -ENOMEM; 1801 nvconfig = region + FALCON_NVCONFIG_OFFSET; 1802 1803 mutex_lock(&nic_data->spi_lock); 1804 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); 1805 mutex_unlock(&nic_data->spi_lock); 1806 if (rc) { 1807 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", 1808 falcon_spi_present(&nic_data->spi_flash) ? 1809 "flash" : "EEPROM"); 1810 rc = -EIO; 1811 goto out; 1812 } 1813 1814 magic_num = le16_to_cpu(nvconfig->board_magic_num); 1815 struct_ver = le16_to_cpu(nvconfig->board_struct_ver); 1816 1817 rc = -EINVAL; 1818 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { 1819 netif_err(efx, hw, efx->net_dev, 1820 "NVRAM bad magic 0x%x\n", magic_num); 1821 goto out; 1822 } 1823 if (struct_ver < 2) { 1824 netif_err(efx, hw, efx->net_dev, 1825 "NVRAM has ancient version 0x%x\n", struct_ver); 1826 goto out; 1827 } else if (struct_ver < 4) { 1828 word = &nvconfig->board_magic_num; 1829 limit = (__le16 *) (nvconfig + 1); 1830 } else { 1831 word = region; 1832 limit = region + FALCON_NVCONFIG_END; 1833 } 1834 for (csum = 0; word < limit; ++word) 1835 csum += le16_to_cpu(*word); 1836 1837 if (~csum & 0xffff) { 1838 netif_err(efx, hw, efx->net_dev, 1839 "NVRAM has incorrect checksum\n"); 1840 goto out; 1841 } 1842 1843 rc = 0; 1844 if (nvconfig_out) 1845 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig)); 1846 1847 out: 1848 kfree(region); 1849 return rc; 1850 } 1851 1852 static int falcon_test_nvram(struct ef4_nic *efx) 1853 { 1854 return falcon_read_nvram(efx, NULL); 1855 } 1856 1857 static const struct ef4_farch_register_test falcon_b0_register_tests[] = { 1858 { FR_AZ_ADR_REGION, 1859 EF4_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, 1860 { FR_AZ_RX_CFG, 1861 EF4_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, 1862 { FR_AZ_TX_CFG, 1863 EF4_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, 1864 { FR_AZ_TX_RESERVED, 1865 EF4_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, 1866 { FR_AB_MAC_CTRL, 1867 EF4_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, 1868 { FR_AZ_SRM_TX_DC_CFG, 1869 EF4_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, 1870 { FR_AZ_RX_DC_CFG, 1871 EF4_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, 1872 { FR_AZ_RX_DC_PF_WM, 1873 EF4_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, 1874 { FR_BZ_DP_CTRL, 1875 EF4_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, 1876 { FR_AB_GM_CFG2, 1877 EF4_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, 1878 { FR_AB_GMF_CFG0, 1879 EF4_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, 1880 { FR_AB_XM_GLB_CFG, 1881 EF4_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, 1882 { FR_AB_XM_TX_CFG, 1883 EF4_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, 1884 { FR_AB_XM_RX_CFG, 1885 EF4_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, 1886 { FR_AB_XM_RX_PARAM, 1887 EF4_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, 1888 { FR_AB_XM_FC, 1889 EF4_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, 1890 { FR_AB_XM_ADR_LO, 1891 EF4_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, 1892 { FR_AB_XX_SD_CTL, 1893 EF4_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, 1894 }; 1895 1896 static int 1897 falcon_b0_test_chip(struct ef4_nic *efx, struct ef4_self_tests *tests) 1898 { 1899 enum reset_type reset_method = RESET_TYPE_INVISIBLE; 1900 int rc, rc2; 1901 1902 mutex_lock(&efx->mac_lock); 1903 if (efx->loopback_modes) { 1904 /* We need the 312 clock from the PHY to test the XMAC 1905 * registers, so move into XGMII loopback if available */ 1906 if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) 1907 efx->loopback_mode = LOOPBACK_XGMII; 1908 else 1909 efx->loopback_mode = __ffs(efx->loopback_modes); 1910 } 1911 __ef4_reconfigure_port(efx); 1912 mutex_unlock(&efx->mac_lock); 1913 1914 ef4_reset_down(efx, reset_method); 1915 1916 tests->registers = 1917 ef4_farch_test_registers(efx, falcon_b0_register_tests, 1918 ARRAY_SIZE(falcon_b0_register_tests)) 1919 ? -1 : 1; 1920 1921 rc = falcon_reset_hw(efx, reset_method); 1922 rc2 = ef4_reset_up(efx, reset_method, rc == 0); 1923 return rc ? rc : rc2; 1924 } 1925 1926 /************************************************************************** 1927 * 1928 * Device reset 1929 * 1930 ************************************************************************** 1931 */ 1932 1933 static enum reset_type falcon_map_reset_reason(enum reset_type reason) 1934 { 1935 switch (reason) { 1936 case RESET_TYPE_RX_RECOVERY: 1937 case RESET_TYPE_DMA_ERROR: 1938 case RESET_TYPE_TX_SKIP: 1939 /* These can occasionally occur due to hardware bugs. 1940 * We try to reset without disrupting the link. 1941 */ 1942 return RESET_TYPE_INVISIBLE; 1943 default: 1944 return RESET_TYPE_ALL; 1945 } 1946 } 1947 1948 static int falcon_map_reset_flags(u32 *flags) 1949 { 1950 enum { 1951 FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER | 1952 ETH_RESET_OFFLOAD | ETH_RESET_MAC), 1953 FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY, 1954 FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ, 1955 }; 1956 1957 if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) { 1958 *flags &= ~FALCON_RESET_WORLD; 1959 return RESET_TYPE_WORLD; 1960 } 1961 1962 if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) { 1963 *flags &= ~FALCON_RESET_ALL; 1964 return RESET_TYPE_ALL; 1965 } 1966 1967 if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) { 1968 *flags &= ~FALCON_RESET_INVISIBLE; 1969 return RESET_TYPE_INVISIBLE; 1970 } 1971 1972 return -EINVAL; 1973 } 1974 1975 /* Resets NIC to known state. This routine must be called in process 1976 * context and is allowed to sleep. */ 1977 static int __falcon_reset_hw(struct ef4_nic *efx, enum reset_type method) 1978 { 1979 struct falcon_nic_data *nic_data = efx->nic_data; 1980 ef4_oword_t glb_ctl_reg_ker; 1981 int rc; 1982 1983 netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", 1984 RESET_TYPE(method)); 1985 1986 /* Initiate device reset */ 1987 if (method == RESET_TYPE_WORLD) { 1988 rc = pci_save_state(efx->pci_dev); 1989 if (rc) { 1990 netif_err(efx, drv, efx->net_dev, 1991 "failed to backup PCI state of primary " 1992 "function prior to hardware reset\n"); 1993 goto fail1; 1994 } 1995 if (ef4_nic_is_dual_func(efx)) { 1996 rc = pci_save_state(nic_data->pci_dev2); 1997 if (rc) { 1998 netif_err(efx, drv, efx->net_dev, 1999 "failed to backup PCI state of " 2000 "secondary function prior to " 2001 "hardware reset\n"); 2002 goto fail2; 2003 } 2004 } 2005 2006 EF4_POPULATE_OWORD_2(glb_ctl_reg_ker, 2007 FRF_AB_EXT_PHY_RST_DUR, 2008 FFE_AB_EXT_PHY_RST_DUR_10240US, 2009 FRF_AB_SWRST, 1); 2010 } else { 2011 EF4_POPULATE_OWORD_7(glb_ctl_reg_ker, 2012 /* exclude PHY from "invisible" reset */ 2013 FRF_AB_EXT_PHY_RST_CTL, 2014 method == RESET_TYPE_INVISIBLE, 2015 /* exclude EEPROM/flash and PCIe */ 2016 FRF_AB_PCIE_CORE_RST_CTL, 1, 2017 FRF_AB_PCIE_NSTKY_RST_CTL, 1, 2018 FRF_AB_PCIE_SD_RST_CTL, 1, 2019 FRF_AB_EE_RST_CTL, 1, 2020 FRF_AB_EXT_PHY_RST_DUR, 2021 FFE_AB_EXT_PHY_RST_DUR_10240US, 2022 FRF_AB_SWRST, 1); 2023 } 2024 ef4_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); 2025 2026 netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); 2027 schedule_timeout_uninterruptible(HZ / 20); 2028 2029 /* Restore PCI configuration if needed */ 2030 if (method == RESET_TYPE_WORLD) { 2031 if (ef4_nic_is_dual_func(efx)) 2032 pci_restore_state(nic_data->pci_dev2); 2033 pci_restore_state(efx->pci_dev); 2034 netif_dbg(efx, drv, efx->net_dev, 2035 "successfully restored PCI config\n"); 2036 } 2037 2038 /* Assert that reset complete */ 2039 ef4_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); 2040 if (EF4_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { 2041 rc = -ETIMEDOUT; 2042 netif_err(efx, hw, efx->net_dev, 2043 "timed out waiting for hardware reset\n"); 2044 goto fail3; 2045 } 2046 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); 2047 2048 return 0; 2049 2050 /* pci_save_state() and pci_restore_state() MUST be called in pairs */ 2051 fail2: 2052 pci_restore_state(efx->pci_dev); 2053 fail1: 2054 fail3: 2055 return rc; 2056 } 2057 2058 static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method) 2059 { 2060 struct falcon_nic_data *nic_data = efx->nic_data; 2061 int rc; 2062 2063 mutex_lock(&nic_data->spi_lock); 2064 rc = __falcon_reset_hw(efx, method); 2065 mutex_unlock(&nic_data->spi_lock); 2066 2067 return rc; 2068 } 2069 2070 static void falcon_monitor(struct ef4_nic *efx) 2071 { 2072 bool link_changed; 2073 int rc; 2074 2075 BUG_ON(!mutex_is_locked(&efx->mac_lock)); 2076 2077 rc = falcon_board(efx)->type->monitor(efx); 2078 if (rc) { 2079 netif_err(efx, hw, efx->net_dev, 2080 "Board sensor %s; shutting down PHY\n", 2081 (rc == -ERANGE) ? "reported fault" : "failed"); 2082 efx->phy_mode |= PHY_MODE_LOW_POWER; 2083 rc = __ef4_reconfigure_port(efx); 2084 WARN_ON(rc); 2085 } 2086 2087 if (LOOPBACK_INTERNAL(efx)) 2088 link_changed = falcon_loopback_link_poll(efx); 2089 else 2090 link_changed = efx->phy_op->poll(efx); 2091 2092 if (link_changed) { 2093 falcon_stop_nic_stats(efx); 2094 falcon_deconfigure_mac_wrapper(efx); 2095 2096 falcon_reset_macs(efx); 2097 rc = falcon_reconfigure_xmac(efx); 2098 BUG_ON(rc); 2099 2100 falcon_start_nic_stats(efx); 2101 2102 ef4_link_status_changed(efx); 2103 } 2104 2105 falcon_poll_xmac(efx); 2106 } 2107 2108 /* Zeroes out the SRAM contents. This routine must be called in 2109 * process context and is allowed to sleep. 2110 */ 2111 static int falcon_reset_sram(struct ef4_nic *efx) 2112 { 2113 ef4_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; 2114 int count; 2115 2116 /* Set the SRAM wake/sleep GPIO appropriately. */ 2117 ef4_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); 2118 EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); 2119 EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); 2120 ef4_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); 2121 2122 /* Initiate SRAM reset */ 2123 EF4_POPULATE_OWORD_2(srm_cfg_reg_ker, 2124 FRF_AZ_SRM_INIT_EN, 1, 2125 FRF_AZ_SRM_NB_SZ, 0); 2126 ef4_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); 2127 2128 /* Wait for SRAM reset to complete */ 2129 count = 0; 2130 do { 2131 netif_dbg(efx, hw, efx->net_dev, 2132 "waiting for SRAM reset (attempt %d)...\n", count); 2133 2134 /* SRAM reset is slow; expect around 16ms */ 2135 schedule_timeout_uninterruptible(HZ / 50); 2136 2137 /* Check for reset complete */ 2138 ef4_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); 2139 if (!EF4_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { 2140 netif_dbg(efx, hw, efx->net_dev, 2141 "SRAM reset complete\n"); 2142 2143 return 0; 2144 } 2145 } while (++count < 20); /* wait up to 0.4 sec */ 2146 2147 netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); 2148 return -ETIMEDOUT; 2149 } 2150 2151 static void falcon_spi_device_init(struct ef4_nic *efx, 2152 struct falcon_spi_device *spi_device, 2153 unsigned int device_id, u32 device_type) 2154 { 2155 if (device_type != 0) { 2156 spi_device->device_id = device_id; 2157 spi_device->size = 2158 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); 2159 spi_device->addr_len = 2160 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN); 2161 spi_device->munge_address = (spi_device->size == 1 << 9 && 2162 spi_device->addr_len == 1); 2163 spi_device->erase_command = 2164 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD); 2165 spi_device->erase_size = 2166 1 << SPI_DEV_TYPE_FIELD(device_type, 2167 SPI_DEV_TYPE_ERASE_SIZE); 2168 spi_device->block_size = 2169 1 << SPI_DEV_TYPE_FIELD(device_type, 2170 SPI_DEV_TYPE_BLOCK_SIZE); 2171 } else { 2172 spi_device->size = 0; 2173 } 2174 } 2175 2176 /* Extract non-volatile configuration */ 2177 static int falcon_probe_nvconfig(struct ef4_nic *efx) 2178 { 2179 struct falcon_nic_data *nic_data = efx->nic_data; 2180 struct falcon_nvconfig *nvconfig; 2181 int rc; 2182 2183 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); 2184 if (!nvconfig) 2185 return -ENOMEM; 2186 2187 rc = falcon_read_nvram(efx, nvconfig); 2188 if (rc) 2189 goto out; 2190 2191 efx->phy_type = nvconfig->board_v2.port0_phy_type; 2192 efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr; 2193 2194 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { 2195 falcon_spi_device_init( 2196 efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH, 2197 le32_to_cpu(nvconfig->board_v3 2198 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH])); 2199 falcon_spi_device_init( 2200 efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, 2201 le32_to_cpu(nvconfig->board_v3 2202 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM])); 2203 } 2204 2205 /* Read the MAC addresses */ 2206 ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]); 2207 2208 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", 2209 efx->phy_type, efx->mdio.prtad); 2210 2211 rc = falcon_probe_board(efx, 2212 le16_to_cpu(nvconfig->board_v2.board_revision)); 2213 out: 2214 kfree(nvconfig); 2215 return rc; 2216 } 2217 2218 static int falcon_dimension_resources(struct ef4_nic *efx) 2219 { 2220 efx->rx_dc_base = 0x20000; 2221 efx->tx_dc_base = 0x26000; 2222 return 0; 2223 } 2224 2225 /* Probe all SPI devices on the NIC */ 2226 static void falcon_probe_spi_devices(struct ef4_nic *efx) 2227 { 2228 struct falcon_nic_data *nic_data = efx->nic_data; 2229 ef4_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 2230 int boot_dev; 2231 2232 ef4_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL); 2233 ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT); 2234 ef4_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); 2235 2236 if (EF4_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { 2237 boot_dev = (EF4_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? 2238 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); 2239 netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n", 2240 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? 2241 "flash" : "EEPROM"); 2242 } else { 2243 /* Disable VPD and set clock dividers to safe 2244 * values for initial programming. */ 2245 boot_dev = -1; 2246 netif_dbg(efx, probe, efx->net_dev, 2247 "Booted from internal ASIC settings;" 2248 " setting SPI config\n"); 2249 EF4_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, 2250 /* 125 MHz / 7 ~= 20 MHz */ 2251 FRF_AB_EE_SF_CLOCK_DIV, 7, 2252 /* 125 MHz / 63 ~= 2 MHz */ 2253 FRF_AB_EE_EE_CLOCK_DIV, 63); 2254 ef4_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); 2255 } 2256 2257 mutex_init(&nic_data->spi_lock); 2258 2259 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) 2260 falcon_spi_device_init(efx, &nic_data->spi_flash, 2261 FFE_AB_SPI_DEVICE_FLASH, 2262 default_flash_type); 2263 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) 2264 falcon_spi_device_init(efx, &nic_data->spi_eeprom, 2265 FFE_AB_SPI_DEVICE_EEPROM, 2266 large_eeprom_type); 2267 } 2268 2269 static unsigned int falcon_a1_mem_map_size(struct ef4_nic *efx) 2270 { 2271 return 0x20000; 2272 } 2273 2274 static unsigned int falcon_b0_mem_map_size(struct ef4_nic *efx) 2275 { 2276 /* Map everything up to and including the RSS indirection table. 2277 * The PCI core takes care of mapping the MSI-X tables. 2278 */ 2279 return FR_BZ_RX_INDIRECTION_TBL + 2280 FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS; 2281 } 2282 2283 static int falcon_probe_nic(struct ef4_nic *efx) 2284 { 2285 struct falcon_nic_data *nic_data; 2286 struct falcon_board *board; 2287 int rc; 2288 2289 efx->primary = efx; /* only one usable function per controller */ 2290 2291 /* Allocate storage for hardware specific data */ 2292 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2293 if (!nic_data) 2294 return -ENOMEM; 2295 efx->nic_data = nic_data; 2296 2297 rc = -ENODEV; 2298 2299 if (ef4_farch_fpga_ver(efx) != 0) { 2300 netif_err(efx, probe, efx->net_dev, 2301 "Falcon FPGA not supported\n"); 2302 goto fail1; 2303 } 2304 2305 if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) { 2306 ef4_oword_t nic_stat; 2307 struct pci_dev *dev; 2308 u8 pci_rev = efx->pci_dev->revision; 2309 2310 if ((pci_rev == 0xff) || (pci_rev == 0)) { 2311 netif_err(efx, probe, efx->net_dev, 2312 "Falcon rev A0 not supported\n"); 2313 goto fail1; 2314 } 2315 ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT); 2316 if (EF4_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { 2317 netif_err(efx, probe, efx->net_dev, 2318 "Falcon rev A1 1G not supported\n"); 2319 goto fail1; 2320 } 2321 if (EF4_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { 2322 netif_err(efx, probe, efx->net_dev, 2323 "Falcon rev A1 PCI-X not supported\n"); 2324 goto fail1; 2325 } 2326 2327 dev = pci_dev_get(efx->pci_dev); 2328 while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE, 2329 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, 2330 dev))) { 2331 if (dev->bus == efx->pci_dev->bus && 2332 dev->devfn == efx->pci_dev->devfn + 1) { 2333 nic_data->pci_dev2 = dev; 2334 break; 2335 } 2336 } 2337 if (!nic_data->pci_dev2) { 2338 netif_err(efx, probe, efx->net_dev, 2339 "failed to find secondary function\n"); 2340 rc = -ENODEV; 2341 goto fail2; 2342 } 2343 } 2344 2345 /* Now we can reset the NIC */ 2346 rc = __falcon_reset_hw(efx, RESET_TYPE_ALL); 2347 if (rc) { 2348 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); 2349 goto fail3; 2350 } 2351 2352 /* Allocate memory for INT_KER */ 2353 rc = ef4_nic_alloc_buffer(efx, &efx->irq_status, sizeof(ef4_oword_t), 2354 GFP_KERNEL); 2355 if (rc) 2356 goto fail4; 2357 BUG_ON(efx->irq_status.dma_addr & 0x0f); 2358 2359 netif_dbg(efx, probe, efx->net_dev, 2360 "INT_KER at %llx (virt %p phys %llx)\n", 2361 (u64)efx->irq_status.dma_addr, 2362 efx->irq_status.addr, 2363 (u64)virt_to_phys(efx->irq_status.addr)); 2364 2365 falcon_probe_spi_devices(efx); 2366 2367 /* Read in the non-volatile configuration */ 2368 rc = falcon_probe_nvconfig(efx); 2369 if (rc) { 2370 if (rc == -EINVAL) 2371 netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n"); 2372 goto fail5; 2373 } 2374 2375 efx->max_channels = (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ? 4 : 2376 EF4_MAX_CHANNELS); 2377 efx->max_tx_channels = efx->max_channels; 2378 efx->timer_quantum_ns = 4968; /* 621 cycles */ 2379 efx->timer_max_ns = efx->type->timer_period_max * 2380 efx->timer_quantum_ns; 2381 2382 /* Initialise I2C adapter */ 2383 board = falcon_board(efx); 2384 board->i2c_adap.owner = THIS_MODULE; 2385 board->i2c_data = falcon_i2c_bit_operations; 2386 board->i2c_data.data = efx; 2387 board->i2c_adap.algo_data = &board->i2c_data; 2388 board->i2c_adap.dev.parent = &efx->pci_dev->dev; 2389 strlcpy(board->i2c_adap.name, "SFC4000 GPIO", 2390 sizeof(board->i2c_adap.name)); 2391 rc = i2c_bit_add_bus(&board->i2c_adap); 2392 if (rc) 2393 goto fail5; 2394 2395 rc = falcon_board(efx)->type->init(efx); 2396 if (rc) { 2397 netif_err(efx, probe, efx->net_dev, 2398 "failed to initialise board\n"); 2399 goto fail6; 2400 } 2401 2402 nic_data->stats_disable_count = 1; 2403 setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, 2404 (unsigned long)efx); 2405 2406 return 0; 2407 2408 fail6: 2409 i2c_del_adapter(&board->i2c_adap); 2410 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 2411 fail5: 2412 ef4_nic_free_buffer(efx, &efx->irq_status); 2413 fail4: 2414 fail3: 2415 if (nic_data->pci_dev2) { 2416 pci_dev_put(nic_data->pci_dev2); 2417 nic_data->pci_dev2 = NULL; 2418 } 2419 fail2: 2420 fail1: 2421 kfree(efx->nic_data); 2422 return rc; 2423 } 2424 2425 static void falcon_init_rx_cfg(struct ef4_nic *efx) 2426 { 2427 /* RX control FIFO thresholds (32 entries) */ 2428 const unsigned ctrl_xon_thr = 20; 2429 const unsigned ctrl_xoff_thr = 25; 2430 ef4_oword_t reg; 2431 2432 ef4_reado(efx, ®, FR_AZ_RX_CFG); 2433 if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) { 2434 /* Data FIFO size is 5.5K. The RX DMA engine only 2435 * supports scattering for user-mode queues, but will 2436 * split DMA writes at intervals of RX_USR_BUF_SIZE 2437 * (32-byte units) even for kernel-mode queues. We 2438 * set it to be so large that that never happens. 2439 */ 2440 EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); 2441 EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, 2442 (3 * 4096) >> 5); 2443 EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); 2444 EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); 2445 EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); 2446 EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); 2447 } else { 2448 /* Data FIFO size is 80K; register fields moved */ 2449 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); 2450 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, 2451 EF4_RX_USR_BUF_SIZE >> 5); 2452 /* Send XON and XOFF at ~3 * max MTU away from empty/full */ 2453 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); 2454 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); 2455 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); 2456 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); 2457 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); 2458 2459 /* Enable hash insertion. This is broken for the 2460 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and 2461 * IPv4 hashes. */ 2462 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1); 2463 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1); 2464 EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1); 2465 } 2466 /* Always enable XOFF signal from RX FIFO. We enable 2467 * or disable transmission of pause frames at the MAC. */ 2468 EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); 2469 ef4_writeo(efx, ®, FR_AZ_RX_CFG); 2470 } 2471 2472 /* This call performs hardware-specific global initialisation, such as 2473 * defining the descriptor cache sizes and number of RSS channels. 2474 * It does not set up any buffers, descriptor rings or event queues. 2475 */ 2476 static int falcon_init_nic(struct ef4_nic *efx) 2477 { 2478 ef4_oword_t temp; 2479 int rc; 2480 2481 /* Use on-chip SRAM */ 2482 ef4_reado(efx, &temp, FR_AB_NIC_STAT); 2483 EF4_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); 2484 ef4_writeo(efx, &temp, FR_AB_NIC_STAT); 2485 2486 rc = falcon_reset_sram(efx); 2487 if (rc) 2488 return rc; 2489 2490 /* Clear the parity enables on the TX data fifos as 2491 * they produce false parity errors because of timing issues 2492 */ 2493 if (EF4_WORKAROUND_5129(efx)) { 2494 ef4_reado(efx, &temp, FR_AZ_CSR_SPARE); 2495 EF4_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0); 2496 ef4_writeo(efx, &temp, FR_AZ_CSR_SPARE); 2497 } 2498 2499 if (EF4_WORKAROUND_7244(efx)) { 2500 ef4_reado(efx, &temp, FR_BZ_RX_FILTER_CTL); 2501 EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8); 2502 EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8); 2503 EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8); 2504 EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8); 2505 ef4_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL); 2506 } 2507 2508 /* XXX This is documented only for Falcon A0/A1 */ 2509 /* Setup RX. Wait for descriptor is broken and must 2510 * be disabled. RXDP recovery shouldn't be needed, but is. 2511 */ 2512 ef4_reado(efx, &temp, FR_AA_RX_SELF_RST); 2513 EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1); 2514 EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1); 2515 if (EF4_WORKAROUND_5583(efx)) 2516 EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1); 2517 ef4_writeo(efx, &temp, FR_AA_RX_SELF_RST); 2518 2519 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 2520 * descriptors (which is bad). 2521 */ 2522 ef4_reado(efx, &temp, FR_AZ_TX_CFG); 2523 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); 2524 ef4_writeo(efx, &temp, FR_AZ_TX_CFG); 2525 2526 falcon_init_rx_cfg(efx); 2527 2528 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { 2529 falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table); 2530 2531 /* Set destination of both TX and RX Flush events */ 2532 EF4_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); 2533 ef4_writeo(efx, &temp, FR_BZ_DP_CTRL); 2534 } 2535 2536 ef4_farch_init_common(efx); 2537 2538 return 0; 2539 } 2540 2541 static void falcon_remove_nic(struct ef4_nic *efx) 2542 { 2543 struct falcon_nic_data *nic_data = efx->nic_data; 2544 struct falcon_board *board = falcon_board(efx); 2545 2546 board->type->fini(efx); 2547 2548 /* Remove I2C adapter and clear it in preparation for a retry */ 2549 i2c_del_adapter(&board->i2c_adap); 2550 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 2551 2552 ef4_nic_free_buffer(efx, &efx->irq_status); 2553 2554 __falcon_reset_hw(efx, RESET_TYPE_ALL); 2555 2556 /* Release the second function after the reset */ 2557 if (nic_data->pci_dev2) { 2558 pci_dev_put(nic_data->pci_dev2); 2559 nic_data->pci_dev2 = NULL; 2560 } 2561 2562 /* Tear down the private nic state */ 2563 kfree(efx->nic_data); 2564 efx->nic_data = NULL; 2565 } 2566 2567 static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names) 2568 { 2569 return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT, 2570 falcon_stat_mask, names); 2571 } 2572 2573 static size_t falcon_update_nic_stats(struct ef4_nic *efx, u64 *full_stats, 2574 struct rtnl_link_stats64 *core_stats) 2575 { 2576 struct falcon_nic_data *nic_data = efx->nic_data; 2577 u64 *stats = nic_data->stats; 2578 ef4_oword_t cnt; 2579 2580 if (!nic_data->stats_disable_count) { 2581 ef4_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); 2582 stats[FALCON_STAT_rx_nodesc_drop_cnt] += 2583 EF4_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); 2584 2585 if (nic_data->stats_pending && 2586 FALCON_XMAC_STATS_DMA_FLAG(efx)) { 2587 nic_data->stats_pending = false; 2588 rmb(); /* read the done flag before the stats */ 2589 ef4_nic_update_stats( 2590 falcon_stat_desc, FALCON_STAT_COUNT, 2591 falcon_stat_mask, 2592 stats, efx->stats_buffer.addr, true); 2593 } 2594 2595 /* Update derived statistic */ 2596 ef4_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes], 2597 stats[FALCON_STAT_rx_bytes] - 2598 stats[FALCON_STAT_rx_good_bytes] - 2599 stats[FALCON_STAT_rx_control] * 64); 2600 ef4_update_sw_stats(efx, stats); 2601 } 2602 2603 if (full_stats) 2604 memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT); 2605 2606 if (core_stats) { 2607 core_stats->rx_packets = stats[FALCON_STAT_rx_packets]; 2608 core_stats->tx_packets = stats[FALCON_STAT_tx_packets]; 2609 core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes]; 2610 core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes]; 2611 core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] + 2612 stats[GENERIC_STAT_rx_nodesc_trunc] + 2613 stats[GENERIC_STAT_rx_noskb_drops]; 2614 core_stats->multicast = stats[FALCON_STAT_rx_multicast]; 2615 core_stats->rx_length_errors = 2616 stats[FALCON_STAT_rx_gtjumbo] + 2617 stats[FALCON_STAT_rx_length_error]; 2618 core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad]; 2619 core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error]; 2620 core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow]; 2621 2622 core_stats->rx_errors = (core_stats->rx_length_errors + 2623 core_stats->rx_crc_errors + 2624 core_stats->rx_frame_errors + 2625 stats[FALCON_STAT_rx_symbol_error]); 2626 } 2627 2628 return FALCON_STAT_COUNT; 2629 } 2630 2631 void falcon_start_nic_stats(struct ef4_nic *efx) 2632 { 2633 struct falcon_nic_data *nic_data = efx->nic_data; 2634 2635 spin_lock_bh(&efx->stats_lock); 2636 if (--nic_data->stats_disable_count == 0) 2637 falcon_stats_request(efx); 2638 spin_unlock_bh(&efx->stats_lock); 2639 } 2640 2641 /* We don't acutally pull stats on falcon. Wait 10ms so that 2642 * they arrive when we call this just after start_stats 2643 */ 2644 static void falcon_pull_nic_stats(struct ef4_nic *efx) 2645 { 2646 msleep(10); 2647 } 2648 2649 void falcon_stop_nic_stats(struct ef4_nic *efx) 2650 { 2651 struct falcon_nic_data *nic_data = efx->nic_data; 2652 int i; 2653 2654 might_sleep(); 2655 2656 spin_lock_bh(&efx->stats_lock); 2657 ++nic_data->stats_disable_count; 2658 spin_unlock_bh(&efx->stats_lock); 2659 2660 del_timer_sync(&nic_data->stats_timer); 2661 2662 /* Wait enough time for the most recent transfer to 2663 * complete. */ 2664 for (i = 0; i < 4 && nic_data->stats_pending; i++) { 2665 if (FALCON_XMAC_STATS_DMA_FLAG(efx)) 2666 break; 2667 msleep(1); 2668 } 2669 2670 spin_lock_bh(&efx->stats_lock); 2671 falcon_stats_complete(efx); 2672 spin_unlock_bh(&efx->stats_lock); 2673 } 2674 2675 static void falcon_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode) 2676 { 2677 falcon_board(efx)->type->set_id_led(efx, mode); 2678 } 2679 2680 /************************************************************************** 2681 * 2682 * Wake on LAN 2683 * 2684 ************************************************************************** 2685 */ 2686 2687 static void falcon_get_wol(struct ef4_nic *efx, struct ethtool_wolinfo *wol) 2688 { 2689 wol->supported = 0; 2690 wol->wolopts = 0; 2691 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2692 } 2693 2694 static int falcon_set_wol(struct ef4_nic *efx, u32 type) 2695 { 2696 if (type != 0) 2697 return -EINVAL; 2698 return 0; 2699 } 2700 2701 /************************************************************************** 2702 * 2703 * Revision-dependent attributes used by efx.c and nic.c 2704 * 2705 ************************************************************************** 2706 */ 2707 2708 const struct ef4_nic_type falcon_a1_nic_type = { 2709 .mem_bar = EF4_MEM_BAR, 2710 .mem_map_size = falcon_a1_mem_map_size, 2711 .probe = falcon_probe_nic, 2712 .remove = falcon_remove_nic, 2713 .init = falcon_init_nic, 2714 .dimension_resources = falcon_dimension_resources, 2715 .fini = falcon_irq_ack_a1, 2716 .monitor = falcon_monitor, 2717 .map_reset_reason = falcon_map_reset_reason, 2718 .map_reset_flags = falcon_map_reset_flags, 2719 .reset = falcon_reset_hw, 2720 .probe_port = falcon_probe_port, 2721 .remove_port = falcon_remove_port, 2722 .handle_global_event = falcon_handle_global_event, 2723 .fini_dmaq = ef4_farch_fini_dmaq, 2724 .prepare_flush = falcon_prepare_flush, 2725 .finish_flush = ef4_port_dummy_op_void, 2726 .prepare_flr = ef4_port_dummy_op_void, 2727 .finish_flr = ef4_farch_finish_flr, 2728 .describe_stats = falcon_describe_nic_stats, 2729 .update_stats = falcon_update_nic_stats, 2730 .start_stats = falcon_start_nic_stats, 2731 .pull_stats = falcon_pull_nic_stats, 2732 .stop_stats = falcon_stop_nic_stats, 2733 .set_id_led = falcon_set_id_led, 2734 .push_irq_moderation = falcon_push_irq_moderation, 2735 .reconfigure_port = falcon_reconfigure_port, 2736 .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx, 2737 .reconfigure_mac = falcon_reconfigure_xmac, 2738 .check_mac_fault = falcon_xmac_check_fault, 2739 .get_wol = falcon_get_wol, 2740 .set_wol = falcon_set_wol, 2741 .resume_wol = ef4_port_dummy_op_void, 2742 .test_nvram = falcon_test_nvram, 2743 .irq_enable_master = ef4_farch_irq_enable_master, 2744 .irq_test_generate = ef4_farch_irq_test_generate, 2745 .irq_disable_non_ev = ef4_farch_irq_disable_master, 2746 .irq_handle_msi = ef4_farch_msi_interrupt, 2747 .irq_handle_legacy = falcon_legacy_interrupt_a1, 2748 .tx_probe = ef4_farch_tx_probe, 2749 .tx_init = ef4_farch_tx_init, 2750 .tx_remove = ef4_farch_tx_remove, 2751 .tx_write = ef4_farch_tx_write, 2752 .tx_limit_len = ef4_farch_tx_limit_len, 2753 .rx_push_rss_config = dummy_rx_push_rss_config, 2754 .rx_probe = ef4_farch_rx_probe, 2755 .rx_init = ef4_farch_rx_init, 2756 .rx_remove = ef4_farch_rx_remove, 2757 .rx_write = ef4_farch_rx_write, 2758 .rx_defer_refill = ef4_farch_rx_defer_refill, 2759 .ev_probe = ef4_farch_ev_probe, 2760 .ev_init = ef4_farch_ev_init, 2761 .ev_fini = ef4_farch_ev_fini, 2762 .ev_remove = ef4_farch_ev_remove, 2763 .ev_process = ef4_farch_ev_process, 2764 .ev_read_ack = ef4_farch_ev_read_ack, 2765 .ev_test_generate = ef4_farch_ev_test_generate, 2766 2767 /* We don't expose the filter table on Falcon A1 as it is not 2768 * mapped into function 0, but these implementations still 2769 * work with a degenerate case of all tables set to size 0. 2770 */ 2771 .filter_table_probe = ef4_farch_filter_table_probe, 2772 .filter_table_restore = ef4_farch_filter_table_restore, 2773 .filter_table_remove = ef4_farch_filter_table_remove, 2774 .filter_insert = ef4_farch_filter_insert, 2775 .filter_remove_safe = ef4_farch_filter_remove_safe, 2776 .filter_get_safe = ef4_farch_filter_get_safe, 2777 .filter_clear_rx = ef4_farch_filter_clear_rx, 2778 .filter_count_rx_used = ef4_farch_filter_count_rx_used, 2779 .filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit, 2780 .filter_get_rx_ids = ef4_farch_filter_get_rx_ids, 2781 2782 #ifdef CONFIG_SFC_FALCON_MTD 2783 .mtd_probe = falcon_mtd_probe, 2784 .mtd_rename = falcon_mtd_rename, 2785 .mtd_read = falcon_mtd_read, 2786 .mtd_erase = falcon_mtd_erase, 2787 .mtd_write = falcon_mtd_write, 2788 .mtd_sync = falcon_mtd_sync, 2789 #endif 2790 2791 .revision = EF4_REV_FALCON_A1, 2792 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, 2793 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, 2794 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, 2795 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER, 2796 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, 2797 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 2798 .rx_buffer_padding = 0x24, 2799 .can_rx_scatter = false, 2800 .max_interrupt_mode = EF4_INT_MODE_MSI, 2801 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 2802 .offload_features = NETIF_F_IP_CSUM, 2803 }; 2804 2805 const struct ef4_nic_type falcon_b0_nic_type = { 2806 .mem_bar = EF4_MEM_BAR, 2807 .mem_map_size = falcon_b0_mem_map_size, 2808 .probe = falcon_probe_nic, 2809 .remove = falcon_remove_nic, 2810 .init = falcon_init_nic, 2811 .dimension_resources = falcon_dimension_resources, 2812 .fini = ef4_port_dummy_op_void, 2813 .monitor = falcon_monitor, 2814 .map_reset_reason = falcon_map_reset_reason, 2815 .map_reset_flags = falcon_map_reset_flags, 2816 .reset = falcon_reset_hw, 2817 .probe_port = falcon_probe_port, 2818 .remove_port = falcon_remove_port, 2819 .handle_global_event = falcon_handle_global_event, 2820 .fini_dmaq = ef4_farch_fini_dmaq, 2821 .prepare_flush = falcon_prepare_flush, 2822 .finish_flush = ef4_port_dummy_op_void, 2823 .prepare_flr = ef4_port_dummy_op_void, 2824 .finish_flr = ef4_farch_finish_flr, 2825 .describe_stats = falcon_describe_nic_stats, 2826 .update_stats = falcon_update_nic_stats, 2827 .start_stats = falcon_start_nic_stats, 2828 .pull_stats = falcon_pull_nic_stats, 2829 .stop_stats = falcon_stop_nic_stats, 2830 .set_id_led = falcon_set_id_led, 2831 .push_irq_moderation = falcon_push_irq_moderation, 2832 .reconfigure_port = falcon_reconfigure_port, 2833 .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx, 2834 .reconfigure_mac = falcon_reconfigure_xmac, 2835 .check_mac_fault = falcon_xmac_check_fault, 2836 .get_wol = falcon_get_wol, 2837 .set_wol = falcon_set_wol, 2838 .resume_wol = ef4_port_dummy_op_void, 2839 .test_chip = falcon_b0_test_chip, 2840 .test_nvram = falcon_test_nvram, 2841 .irq_enable_master = ef4_farch_irq_enable_master, 2842 .irq_test_generate = ef4_farch_irq_test_generate, 2843 .irq_disable_non_ev = ef4_farch_irq_disable_master, 2844 .irq_handle_msi = ef4_farch_msi_interrupt, 2845 .irq_handle_legacy = ef4_farch_legacy_interrupt, 2846 .tx_probe = ef4_farch_tx_probe, 2847 .tx_init = ef4_farch_tx_init, 2848 .tx_remove = ef4_farch_tx_remove, 2849 .tx_write = ef4_farch_tx_write, 2850 .tx_limit_len = ef4_farch_tx_limit_len, 2851 .rx_push_rss_config = falcon_b0_rx_push_rss_config, 2852 .rx_probe = ef4_farch_rx_probe, 2853 .rx_init = ef4_farch_rx_init, 2854 .rx_remove = ef4_farch_rx_remove, 2855 .rx_write = ef4_farch_rx_write, 2856 .rx_defer_refill = ef4_farch_rx_defer_refill, 2857 .ev_probe = ef4_farch_ev_probe, 2858 .ev_init = ef4_farch_ev_init, 2859 .ev_fini = ef4_farch_ev_fini, 2860 .ev_remove = ef4_farch_ev_remove, 2861 .ev_process = ef4_farch_ev_process, 2862 .ev_read_ack = ef4_farch_ev_read_ack, 2863 .ev_test_generate = ef4_farch_ev_test_generate, 2864 .filter_table_probe = ef4_farch_filter_table_probe, 2865 .filter_table_restore = ef4_farch_filter_table_restore, 2866 .filter_table_remove = ef4_farch_filter_table_remove, 2867 .filter_update_rx_scatter = ef4_farch_filter_update_rx_scatter, 2868 .filter_insert = ef4_farch_filter_insert, 2869 .filter_remove_safe = ef4_farch_filter_remove_safe, 2870 .filter_get_safe = ef4_farch_filter_get_safe, 2871 .filter_clear_rx = ef4_farch_filter_clear_rx, 2872 .filter_count_rx_used = ef4_farch_filter_count_rx_used, 2873 .filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit, 2874 .filter_get_rx_ids = ef4_farch_filter_get_rx_ids, 2875 #ifdef CONFIG_RFS_ACCEL 2876 .filter_rfs_insert = ef4_farch_filter_rfs_insert, 2877 .filter_rfs_expire_one = ef4_farch_filter_rfs_expire_one, 2878 #endif 2879 #ifdef CONFIG_SFC_FALCON_MTD 2880 .mtd_probe = falcon_mtd_probe, 2881 .mtd_rename = falcon_mtd_rename, 2882 .mtd_read = falcon_mtd_read, 2883 .mtd_erase = falcon_mtd_erase, 2884 .mtd_write = falcon_mtd_write, 2885 .mtd_sync = falcon_mtd_sync, 2886 #endif 2887 2888 .revision = EF4_REV_FALCON_B0, 2889 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 2890 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 2891 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 2892 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, 2893 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, 2894 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 2895 .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE, 2896 .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, 2897 .rx_buffer_padding = 0, 2898 .can_rx_scatter = true, 2899 .max_interrupt_mode = EF4_INT_MODE_MSIX, 2900 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 2901 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, 2902 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, 2903 }; 2904