1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2018 Intel Corporation. */ 3 4 /* ethtool support for ixgbe */ 5 6 #include <linux/interrupt.h> 7 #include <linux/types.h> 8 #include <linux/module.h> 9 #include <linux/slab.h> 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <linux/ethtool.h> 13 #include <linux/vmalloc.h> 14 #include <linux/highmem.h> 15 #include <linux/uaccess.h> 16 17 #include "ixgbe.h" 18 #include "ixgbe_phy.h" 19 20 21 #define IXGBE_ALL_RAR_ENTRIES 16 22 23 enum {NETDEV_STATS, IXGBE_STATS}; 24 25 struct ixgbe_stats { 26 char stat_string[ETH_GSTRING_LEN]; 27 int type; 28 int sizeof_stat; 29 int stat_offset; 30 }; 31 32 #define IXGBE_STAT(m) IXGBE_STATS, \ 33 sizeof(((struct ixgbe_adapter *)0)->m), \ 34 offsetof(struct ixgbe_adapter, m) 35 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ 36 sizeof(((struct rtnl_link_stats64 *)0)->m), \ 37 offsetof(struct rtnl_link_stats64, m) 38 39 static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 40 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, 41 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, 42 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, 43 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, 44 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 45 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 46 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 47 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, 48 {"lsc_int", IXGBE_STAT(lsc_int)}, 49 {"tx_busy", IXGBE_STAT(tx_busy)}, 50 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 51 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, 52 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, 53 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, 54 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, 55 {"multicast", IXGBE_NETDEV_STAT(multicast)}, 56 {"broadcast", IXGBE_STAT(stats.bprc)}, 57 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 58 {"collisions", IXGBE_NETDEV_STAT(collisions)}, 59 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, 60 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, 61 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, 62 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, 63 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 64 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 65 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 66 {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, 67 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 68 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 69 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 70 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, 71 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, 72 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, 73 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 74 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 75 {"rx_length_errors", IXGBE_STAT(stats.rlec)}, 76 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 77 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, 78 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, 79 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, 80 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, 81 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, 82 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, 83 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)}, 84 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 85 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 86 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 87 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, 88 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, 89 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, 90 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, 91 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)}, 92 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)}, 93 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)}, 94 {"tx_ipsec", IXGBE_STAT(tx_ipsec)}, 95 {"rx_ipsec", IXGBE_STAT(rx_ipsec)}, 96 #ifdef IXGBE_FCOE 97 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 98 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 99 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, 100 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, 101 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, 102 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, 103 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, 104 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, 105 #endif /* IXGBE_FCOE */ 106 }; 107 108 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so 109 * we set the num_rx_queues to evaluate to num_tx_queues. This is 110 * used because we do not have a good way to get the max number of 111 * rx queues with CONFIG_RPS disabled. 112 */ 113 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues 114 115 #define IXGBE_QUEUE_STATS_LEN ( \ 116 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ 117 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 118 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 119 #define IXGBE_PB_STATS_LEN ( \ 120 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ 121 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ 122 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ 123 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 124 / sizeof(u64)) 125 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 126 IXGBE_PB_STATS_LEN + \ 127 IXGBE_QUEUE_STATS_LEN) 128 129 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 130 "Register test (offline)", "Eeprom test (offline)", 131 "Interrupt test (offline)", "Loopback test (offline)", 132 "Link test (on/offline)" 133 }; 134 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 135 136 static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { 137 #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) 138 "legacy-rx", 139 }; 140 141 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) 142 143 /* currently supported speeds for 10G */ 144 #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ 145 SUPPORTED_10000baseKX4_Full | \ 146 SUPPORTED_10000baseKR_Full) 147 148 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane) 149 150 static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) 151 { 152 if (!ixgbe_isbackplane(hw->phy.media_type)) 153 return SUPPORTED_10000baseT_Full; 154 155 switch (hw->device_id) { 156 case IXGBE_DEV_ID_82598: 157 case IXGBE_DEV_ID_82599_KX4: 158 case IXGBE_DEV_ID_82599_KX4_MEZZ: 159 case IXGBE_DEV_ID_X550EM_X_KX4: 160 return SUPPORTED_10000baseKX4_Full; 161 case IXGBE_DEV_ID_82598_BX: 162 case IXGBE_DEV_ID_82599_KR: 163 case IXGBE_DEV_ID_X550EM_X_KR: 164 case IXGBE_DEV_ID_X550EM_X_XFI: 165 return SUPPORTED_10000baseKR_Full; 166 default: 167 return SUPPORTED_10000baseKX4_Full | 168 SUPPORTED_10000baseKR_Full; 169 } 170 } 171 172 static int ixgbe_get_link_ksettings(struct net_device *netdev, 173 struct ethtool_link_ksettings *cmd) 174 { 175 struct ixgbe_adapter *adapter = netdev_priv(netdev); 176 struct ixgbe_hw *hw = &adapter->hw; 177 ixgbe_link_speed supported_link; 178 bool autoneg = false; 179 u32 supported, advertising; 180 181 ethtool_convert_link_mode_to_legacy_u32(&supported, 182 cmd->link_modes.supported); 183 184 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); 185 186 /* set the supported link speeds */ 187 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) 188 supported |= ixgbe_get_supported_10gtypes(hw); 189 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) 190 supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? 191 SUPPORTED_1000baseKX_Full : 192 SUPPORTED_1000baseT_Full; 193 if (supported_link & IXGBE_LINK_SPEED_100_FULL) 194 supported |= SUPPORTED_100baseT_Full; 195 if (supported_link & IXGBE_LINK_SPEED_10_FULL) 196 supported |= SUPPORTED_10baseT_Full; 197 198 /* default advertised speed if phy.autoneg_advertised isn't set */ 199 advertising = supported; 200 /* set the advertised speeds */ 201 if (hw->phy.autoneg_advertised) { 202 advertising = 0; 203 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) 204 advertising |= ADVERTISED_10baseT_Full; 205 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) 206 advertising |= ADVERTISED_100baseT_Full; 207 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 208 advertising |= supported & ADVRTSD_MSK_10G; 209 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { 210 if (supported & SUPPORTED_1000baseKX_Full) 211 advertising |= ADVERTISED_1000baseKX_Full; 212 else 213 advertising |= ADVERTISED_1000baseT_Full; 214 } 215 } else { 216 if (hw->phy.multispeed_fiber && !autoneg) { 217 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) 218 advertising = ADVERTISED_10000baseT_Full; 219 } 220 } 221 222 if (autoneg) { 223 supported |= SUPPORTED_Autoneg; 224 advertising |= ADVERTISED_Autoneg; 225 cmd->base.autoneg = AUTONEG_ENABLE; 226 } else 227 cmd->base.autoneg = AUTONEG_DISABLE; 228 229 /* Determine the remaining settings based on the PHY type. */ 230 switch (adapter->hw.phy.type) { 231 case ixgbe_phy_tn: 232 case ixgbe_phy_aq: 233 case ixgbe_phy_x550em_ext_t: 234 case ixgbe_phy_fw: 235 case ixgbe_phy_cu_unknown: 236 supported |= SUPPORTED_TP; 237 advertising |= ADVERTISED_TP; 238 cmd->base.port = PORT_TP; 239 break; 240 case ixgbe_phy_qt: 241 supported |= SUPPORTED_FIBRE; 242 advertising |= ADVERTISED_FIBRE; 243 cmd->base.port = PORT_FIBRE; 244 break; 245 case ixgbe_phy_nl: 246 case ixgbe_phy_sfp_passive_tyco: 247 case ixgbe_phy_sfp_passive_unknown: 248 case ixgbe_phy_sfp_ftl: 249 case ixgbe_phy_sfp_avago: 250 case ixgbe_phy_sfp_intel: 251 case ixgbe_phy_sfp_unknown: 252 case ixgbe_phy_qsfp_passive_unknown: 253 case ixgbe_phy_qsfp_active_unknown: 254 case ixgbe_phy_qsfp_intel: 255 case ixgbe_phy_qsfp_unknown: 256 /* SFP+ devices, further checking needed */ 257 switch (adapter->hw.phy.sfp_type) { 258 case ixgbe_sfp_type_da_cu: 259 case ixgbe_sfp_type_da_cu_core0: 260 case ixgbe_sfp_type_da_cu_core1: 261 supported |= SUPPORTED_FIBRE; 262 advertising |= ADVERTISED_FIBRE; 263 cmd->base.port = PORT_DA; 264 break; 265 case ixgbe_sfp_type_sr: 266 case ixgbe_sfp_type_lr: 267 case ixgbe_sfp_type_srlr_core0: 268 case ixgbe_sfp_type_srlr_core1: 269 case ixgbe_sfp_type_1g_sx_core0: 270 case ixgbe_sfp_type_1g_sx_core1: 271 case ixgbe_sfp_type_1g_lx_core0: 272 case ixgbe_sfp_type_1g_lx_core1: 273 supported |= SUPPORTED_FIBRE; 274 advertising |= ADVERTISED_FIBRE; 275 cmd->base.port = PORT_FIBRE; 276 break; 277 case ixgbe_sfp_type_not_present: 278 supported |= SUPPORTED_FIBRE; 279 advertising |= ADVERTISED_FIBRE; 280 cmd->base.port = PORT_NONE; 281 break; 282 case ixgbe_sfp_type_1g_cu_core0: 283 case ixgbe_sfp_type_1g_cu_core1: 284 supported |= SUPPORTED_TP; 285 advertising |= ADVERTISED_TP; 286 cmd->base.port = PORT_TP; 287 break; 288 case ixgbe_sfp_type_unknown: 289 default: 290 supported |= SUPPORTED_FIBRE; 291 advertising |= ADVERTISED_FIBRE; 292 cmd->base.port = PORT_OTHER; 293 break; 294 } 295 break; 296 case ixgbe_phy_xaui: 297 supported |= SUPPORTED_FIBRE; 298 advertising |= ADVERTISED_FIBRE; 299 cmd->base.port = PORT_NONE; 300 break; 301 case ixgbe_phy_unknown: 302 case ixgbe_phy_generic: 303 case ixgbe_phy_sfp_unsupported: 304 default: 305 supported |= SUPPORTED_FIBRE; 306 advertising |= ADVERTISED_FIBRE; 307 cmd->base.port = PORT_OTHER; 308 break; 309 } 310 311 /* Indicate pause support */ 312 supported |= SUPPORTED_Pause; 313 314 switch (hw->fc.requested_mode) { 315 case ixgbe_fc_full: 316 advertising |= ADVERTISED_Pause; 317 break; 318 case ixgbe_fc_rx_pause: 319 advertising |= ADVERTISED_Pause | 320 ADVERTISED_Asym_Pause; 321 break; 322 case ixgbe_fc_tx_pause: 323 advertising |= ADVERTISED_Asym_Pause; 324 break; 325 default: 326 advertising &= ~(ADVERTISED_Pause | 327 ADVERTISED_Asym_Pause); 328 } 329 330 if (netif_carrier_ok(netdev)) { 331 switch (adapter->link_speed) { 332 case IXGBE_LINK_SPEED_10GB_FULL: 333 cmd->base.speed = SPEED_10000; 334 break; 335 case IXGBE_LINK_SPEED_5GB_FULL: 336 cmd->base.speed = SPEED_5000; 337 break; 338 case IXGBE_LINK_SPEED_2_5GB_FULL: 339 cmd->base.speed = SPEED_2500; 340 break; 341 case IXGBE_LINK_SPEED_1GB_FULL: 342 cmd->base.speed = SPEED_1000; 343 break; 344 case IXGBE_LINK_SPEED_100_FULL: 345 cmd->base.speed = SPEED_100; 346 break; 347 case IXGBE_LINK_SPEED_10_FULL: 348 cmd->base.speed = SPEED_10; 349 break; 350 default: 351 break; 352 } 353 cmd->base.duplex = DUPLEX_FULL; 354 } else { 355 cmd->base.speed = SPEED_UNKNOWN; 356 cmd->base.duplex = DUPLEX_UNKNOWN; 357 } 358 359 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 360 supported); 361 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 362 advertising); 363 364 return 0; 365 } 366 367 static int ixgbe_set_link_ksettings(struct net_device *netdev, 368 const struct ethtool_link_ksettings *cmd) 369 { 370 struct ixgbe_adapter *adapter = netdev_priv(netdev); 371 struct ixgbe_hw *hw = &adapter->hw; 372 u32 advertised, old; 373 s32 err = 0; 374 u32 supported, advertising; 375 376 ethtool_convert_link_mode_to_legacy_u32(&supported, 377 cmd->link_modes.supported); 378 ethtool_convert_link_mode_to_legacy_u32(&advertising, 379 cmd->link_modes.advertising); 380 381 if ((hw->phy.media_type == ixgbe_media_type_copper) || 382 (hw->phy.multispeed_fiber)) { 383 /* 384 * this function does not support duplex forcing, but can 385 * limit the advertising of the adapter to the specified speed 386 */ 387 if (advertising & ~supported) 388 return -EINVAL; 389 390 /* only allow one speed at a time if no autoneg */ 391 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { 392 if (advertising == 393 (ADVERTISED_10000baseT_Full | 394 ADVERTISED_1000baseT_Full)) 395 return -EINVAL; 396 } 397 398 old = hw->phy.autoneg_advertised; 399 advertised = 0; 400 if (advertising & ADVERTISED_10000baseT_Full) 401 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 402 403 if (advertising & ADVERTISED_1000baseT_Full) 404 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 405 406 if (advertising & ADVERTISED_100baseT_Full) 407 advertised |= IXGBE_LINK_SPEED_100_FULL; 408 409 if (advertising & ADVERTISED_10baseT_Full) 410 advertised |= IXGBE_LINK_SPEED_10_FULL; 411 412 if (old == advertised) 413 return err; 414 /* this sets the link speed and restarts auto-neg */ 415 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 416 usleep_range(1000, 2000); 417 418 hw->mac.autotry_restart = true; 419 err = hw->mac.ops.setup_link(hw, advertised, true); 420 if (err) { 421 e_info(probe, "setup link failed with code %d\n", err); 422 hw->mac.ops.setup_link(hw, old, true); 423 } 424 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 425 } else { 426 /* in this case we currently only support 10Gb/FULL */ 427 u32 speed = cmd->base.speed; 428 429 if ((cmd->base.autoneg == AUTONEG_ENABLE) || 430 (advertising != ADVERTISED_10000baseT_Full) || 431 (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) 432 return -EINVAL; 433 } 434 435 return err; 436 } 437 438 static void ixgbe_get_pauseparam(struct net_device *netdev, 439 struct ethtool_pauseparam *pause) 440 { 441 struct ixgbe_adapter *adapter = netdev_priv(netdev); 442 struct ixgbe_hw *hw = &adapter->hw; 443 444 if (ixgbe_device_supports_autoneg_fc(hw) && 445 !hw->fc.disable_fc_autoneg) 446 pause->autoneg = 1; 447 else 448 pause->autoneg = 0; 449 450 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 451 pause->rx_pause = 1; 452 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 453 pause->tx_pause = 1; 454 } else if (hw->fc.current_mode == ixgbe_fc_full) { 455 pause->rx_pause = 1; 456 pause->tx_pause = 1; 457 } 458 } 459 460 static int ixgbe_set_pauseparam(struct net_device *netdev, 461 struct ethtool_pauseparam *pause) 462 { 463 struct ixgbe_adapter *adapter = netdev_priv(netdev); 464 struct ixgbe_hw *hw = &adapter->hw; 465 struct ixgbe_fc_info fc = hw->fc; 466 467 /* 82598 does no support link flow control with DCB enabled */ 468 if ((hw->mac.type == ixgbe_mac_82598EB) && 469 (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 470 return -EINVAL; 471 472 /* some devices do not support autoneg of link flow control */ 473 if ((pause->autoneg == AUTONEG_ENABLE) && 474 !ixgbe_device_supports_autoneg_fc(hw)) 475 return -EINVAL; 476 477 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); 478 479 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 480 fc.requested_mode = ixgbe_fc_full; 481 else if (pause->rx_pause && !pause->tx_pause) 482 fc.requested_mode = ixgbe_fc_rx_pause; 483 else if (!pause->rx_pause && pause->tx_pause) 484 fc.requested_mode = ixgbe_fc_tx_pause; 485 else 486 fc.requested_mode = ixgbe_fc_none; 487 488 /* if the thing changed then we'll update and use new autoneg */ 489 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { 490 hw->fc = fc; 491 if (netif_running(netdev)) 492 ixgbe_reinit_locked(adapter); 493 else 494 ixgbe_reset(adapter); 495 } 496 497 return 0; 498 } 499 500 static u32 ixgbe_get_msglevel(struct net_device *netdev) 501 { 502 struct ixgbe_adapter *adapter = netdev_priv(netdev); 503 return adapter->msg_enable; 504 } 505 506 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) 507 { 508 struct ixgbe_adapter *adapter = netdev_priv(netdev); 509 adapter->msg_enable = data; 510 } 511 512 static int ixgbe_get_regs_len(struct net_device *netdev) 513 { 514 #define IXGBE_REGS_LEN 1139 515 return IXGBE_REGS_LEN * sizeof(u32); 516 } 517 518 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 519 520 static void ixgbe_get_regs(struct net_device *netdev, 521 struct ethtool_regs *regs, void *p) 522 { 523 struct ixgbe_adapter *adapter = netdev_priv(netdev); 524 struct ixgbe_hw *hw = &adapter->hw; 525 u32 *regs_buff = p; 526 u8 i; 527 528 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); 529 530 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 531 hw->device_id; 532 533 /* General Registers */ 534 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); 535 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); 536 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 537 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); 538 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); 539 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 540 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); 541 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); 542 543 /* NVM Register */ 544 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 545 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); 546 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw)); 547 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); 548 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); 549 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); 550 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); 551 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); 552 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); 553 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw)); 554 555 /* Interrupt */ 556 /* don't read EICR because it can clear interrupt causes, instead 557 * read EICS which is a shadow but doesn't clear EICR */ 558 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); 559 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 560 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 561 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 562 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); 563 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); 564 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); 565 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 566 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 567 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 568 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); 569 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 570 571 /* Flow Control */ 572 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); 573 for (i = 0; i < 4; i++) 574 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i)); 575 for (i = 0; i < 8; i++) { 576 switch (hw->mac.type) { 577 case ixgbe_mac_82598EB: 578 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 579 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 580 break; 581 case ixgbe_mac_82599EB: 582 case ixgbe_mac_X540: 583 case ixgbe_mac_X550: 584 case ixgbe_mac_X550EM_x: 585 case ixgbe_mac_x550em_a: 586 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); 587 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 588 break; 589 default: 590 break; 591 } 592 } 593 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 594 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 595 596 /* Receive DMA */ 597 for (i = 0; i < 64; i++) 598 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 599 for (i = 0; i < 64; i++) 600 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 601 for (i = 0; i < 64; i++) 602 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 603 for (i = 0; i < 64; i++) 604 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 605 for (i = 0; i < 64; i++) 606 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 607 for (i = 0; i < 64; i++) 608 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 609 for (i = 0; i < 16; i++) 610 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 611 for (i = 0; i < 16; i++) 612 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 613 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 614 for (i = 0; i < 8; i++) 615 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 616 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 617 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); 618 619 /* Receive */ 620 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 621 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); 622 for (i = 0; i < 16; i++) 623 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 624 for (i = 0; i < 16; i++) 625 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 626 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); 627 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 628 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 629 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 630 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); 631 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 632 for (i = 0; i < 8; i++) 633 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); 634 for (i = 0; i < 8; i++) 635 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); 636 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); 637 638 /* Transmit */ 639 for (i = 0; i < 32; i++) 640 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 641 for (i = 0; i < 32; i++) 642 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 643 for (i = 0; i < 32; i++) 644 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 645 for (i = 0; i < 32; i++) 646 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 647 for (i = 0; i < 32; i++) 648 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 649 for (i = 0; i < 32; i++) 650 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 651 for (i = 0; i < 32; i++) 652 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); 653 for (i = 0; i < 32; i++) 654 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); 655 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); 656 for (i = 0; i < 16; i++) 657 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 658 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); 659 for (i = 0; i < 8; i++) 660 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); 661 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); 662 663 /* Wake Up */ 664 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); 665 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); 666 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); 667 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); 668 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); 669 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); 670 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); 671 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 672 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 673 674 /* DCB */ 675 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */ 676 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */ 677 678 switch (hw->mac.type) { 679 case ixgbe_mac_82598EB: 680 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 681 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); 682 for (i = 0; i < 8; i++) 683 regs_buff[833 + i] = 684 IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); 685 for (i = 0; i < 8; i++) 686 regs_buff[841 + i] = 687 IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); 688 for (i = 0; i < 8; i++) 689 regs_buff[849 + i] = 690 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); 691 for (i = 0; i < 8; i++) 692 regs_buff[857 + i] = 693 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); 694 break; 695 case ixgbe_mac_82599EB: 696 case ixgbe_mac_X540: 697 case ixgbe_mac_X550: 698 case ixgbe_mac_X550EM_x: 699 case ixgbe_mac_x550em_a: 700 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 701 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); 702 for (i = 0; i < 8; i++) 703 regs_buff[833 + i] = 704 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i)); 705 for (i = 0; i < 8; i++) 706 regs_buff[841 + i] = 707 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i)); 708 for (i = 0; i < 8; i++) 709 regs_buff[849 + i] = 710 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i)); 711 for (i = 0; i < 8; i++) 712 regs_buff[857 + i] = 713 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i)); 714 break; 715 default: 716 break; 717 } 718 719 for (i = 0; i < 8; i++) 720 regs_buff[865 + i] = 721 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ 722 for (i = 0; i < 8; i++) 723 regs_buff[873 + i] = 724 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ 725 726 /* Statistics */ 727 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); 728 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); 729 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); 730 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); 731 for (i = 0; i < 8; i++) 732 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); 733 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); 734 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); 735 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); 736 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); 737 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); 738 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); 739 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); 740 for (i = 0; i < 8; i++) 741 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); 742 for (i = 0; i < 8; i++) 743 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); 744 for (i = 0; i < 8; i++) 745 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); 746 for (i = 0; i < 8; i++) 747 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); 748 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); 749 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); 750 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); 751 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); 752 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); 753 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); 754 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); 755 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); 756 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); 757 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); 758 regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc); 759 regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32); 760 regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc); 761 regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32); 762 for (i = 0; i < 8; i++) 763 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); 764 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); 765 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); 766 regs_buff[956] = IXGBE_GET_STAT(adapter, roc); 767 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); 768 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); 769 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); 770 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); 771 regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor); 772 regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32); 773 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); 774 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); 775 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); 776 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); 777 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); 778 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); 779 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); 780 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); 781 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); 782 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); 783 regs_buff[973] = IXGBE_GET_STAT(adapter, xec); 784 for (i = 0; i < 16; i++) 785 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); 786 for (i = 0; i < 16; i++) 787 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); 788 for (i = 0; i < 16; i++) 789 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); 790 for (i = 0; i < 16; i++) 791 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); 792 793 /* MAC */ 794 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); 795 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 796 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 797 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); 798 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); 799 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 800 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 801 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); 802 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); 803 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); 804 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); 805 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); 806 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); 807 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); 808 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); 809 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); 810 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); 811 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); 812 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); 813 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); 814 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); 815 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); 816 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); 817 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); 818 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); 819 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); 820 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); 821 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); 822 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 823 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); 824 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); 825 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); 826 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 827 828 /* Diagnostic */ 829 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 830 for (i = 0; i < 8; i++) 831 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 832 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 833 for (i = 0; i < 4; i++) 834 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); 835 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 836 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 837 for (i = 0; i < 8; i++) 838 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 839 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 840 for (i = 0; i < 4; i++) 841 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); 842 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 843 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 844 for (i = 0; i < 4; i++) 845 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i)); 846 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); 847 for (i = 0; i < 4; i++) 848 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i)); 849 for (i = 0; i < 8; i++) 850 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 851 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 852 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 853 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 854 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); 855 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); 856 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); 857 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); 858 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); 859 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); 860 861 /* 82599 X540 specific registers */ 862 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); 863 864 /* 82599 X540 specific DCB registers */ 865 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); 866 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC); 867 for (i = 0; i < 4; i++) 868 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i)); 869 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM); 870 /* same as RTTQCNRM */ 871 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD); 872 /* same as RTTQCNRR */ 873 874 /* X540 specific DCB registers */ 875 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); 876 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); 877 } 878 879 static int ixgbe_get_eeprom_len(struct net_device *netdev) 880 { 881 struct ixgbe_adapter *adapter = netdev_priv(netdev); 882 return adapter->hw.eeprom.word_size * 2; 883 } 884 885 static int ixgbe_get_eeprom(struct net_device *netdev, 886 struct ethtool_eeprom *eeprom, u8 *bytes) 887 { 888 struct ixgbe_adapter *adapter = netdev_priv(netdev); 889 struct ixgbe_hw *hw = &adapter->hw; 890 u16 *eeprom_buff; 891 int first_word, last_word, eeprom_len; 892 int ret_val = 0; 893 u16 i; 894 895 if (eeprom->len == 0) 896 return -EINVAL; 897 898 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 899 900 first_word = eeprom->offset >> 1; 901 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 902 eeprom_len = last_word - first_word + 1; 903 904 eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL); 905 if (!eeprom_buff) 906 return -ENOMEM; 907 908 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, 909 eeprom_buff); 910 911 /* Device's eeprom is always little-endian, word addressable */ 912 for (i = 0; i < eeprom_len; i++) 913 le16_to_cpus(&eeprom_buff[i]); 914 915 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); 916 kfree(eeprom_buff); 917 918 return ret_val; 919 } 920 921 static int ixgbe_set_eeprom(struct net_device *netdev, 922 struct ethtool_eeprom *eeprom, u8 *bytes) 923 { 924 struct ixgbe_adapter *adapter = netdev_priv(netdev); 925 struct ixgbe_hw *hw = &adapter->hw; 926 u16 *eeprom_buff; 927 void *ptr; 928 int max_len, first_word, last_word, ret_val = 0; 929 u16 i; 930 931 if (eeprom->len == 0) 932 return -EINVAL; 933 934 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 935 return -EINVAL; 936 937 max_len = hw->eeprom.word_size * 2; 938 939 first_word = eeprom->offset >> 1; 940 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 941 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 942 if (!eeprom_buff) 943 return -ENOMEM; 944 945 ptr = eeprom_buff; 946 947 if (eeprom->offset & 1) { 948 /* 949 * need read/modify/write of first changed EEPROM word 950 * only the second byte of the word is being modified 951 */ 952 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); 953 if (ret_val) 954 goto err; 955 956 ptr++; 957 } 958 if ((eeprom->offset + eeprom->len) & 1) { 959 /* 960 * need read/modify/write of last changed EEPROM word 961 * only the first byte of the word is being modified 962 */ 963 ret_val = hw->eeprom.ops.read(hw, last_word, 964 &eeprom_buff[last_word - first_word]); 965 if (ret_val) 966 goto err; 967 } 968 969 /* Device's eeprom is always little-endian, word addressable */ 970 for (i = 0; i < last_word - first_word + 1; i++) 971 le16_to_cpus(&eeprom_buff[i]); 972 973 memcpy(ptr, bytes, eeprom->len); 974 975 for (i = 0; i < last_word - first_word + 1; i++) 976 cpu_to_le16s(&eeprom_buff[i]); 977 978 ret_val = hw->eeprom.ops.write_buffer(hw, first_word, 979 last_word - first_word + 1, 980 eeprom_buff); 981 982 /* Update the checksum */ 983 if (ret_val == 0) 984 hw->eeprom.ops.update_checksum(hw); 985 986 err: 987 kfree(eeprom_buff); 988 return ret_val; 989 } 990 991 static void ixgbe_get_drvinfo(struct net_device *netdev, 992 struct ethtool_drvinfo *drvinfo) 993 { 994 struct ixgbe_adapter *adapter = netdev_priv(netdev); 995 996 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 997 strlcpy(drvinfo->version, ixgbe_driver_version, 998 sizeof(drvinfo->version)); 999 1000 strlcpy(drvinfo->fw_version, adapter->eeprom_id, 1001 sizeof(drvinfo->fw_version)); 1002 1003 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 1004 sizeof(drvinfo->bus_info)); 1005 1006 drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN; 1007 } 1008 1009 static void ixgbe_get_ringparam(struct net_device *netdev, 1010 struct ethtool_ringparam *ring) 1011 { 1012 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1013 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 1014 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 1015 1016 ring->rx_max_pending = IXGBE_MAX_RXD; 1017 ring->tx_max_pending = IXGBE_MAX_TXD; 1018 ring->rx_pending = rx_ring->count; 1019 ring->tx_pending = tx_ring->count; 1020 } 1021 1022 static int ixgbe_set_ringparam(struct net_device *netdev, 1023 struct ethtool_ringparam *ring) 1024 { 1025 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1026 struct ixgbe_ring *temp_ring; 1027 int i, j, err = 0; 1028 u32 new_rx_count, new_tx_count; 1029 1030 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 1031 return -EINVAL; 1032 1033 new_tx_count = clamp_t(u32, ring->tx_pending, 1034 IXGBE_MIN_TXD, IXGBE_MAX_TXD); 1035 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 1036 1037 new_rx_count = clamp_t(u32, ring->rx_pending, 1038 IXGBE_MIN_RXD, IXGBE_MAX_RXD); 1039 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 1040 1041 if ((new_tx_count == adapter->tx_ring_count) && 1042 (new_rx_count == adapter->rx_ring_count)) { 1043 /* nothing to do */ 1044 return 0; 1045 } 1046 1047 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 1048 usleep_range(1000, 2000); 1049 1050 if (!netif_running(adapter->netdev)) { 1051 for (i = 0; i < adapter->num_tx_queues; i++) 1052 adapter->tx_ring[i]->count = new_tx_count; 1053 for (i = 0; i < adapter->num_xdp_queues; i++) 1054 adapter->xdp_ring[i]->count = new_tx_count; 1055 for (i = 0; i < adapter->num_rx_queues; i++) 1056 adapter->rx_ring[i]->count = new_rx_count; 1057 adapter->tx_ring_count = new_tx_count; 1058 adapter->xdp_ring_count = new_tx_count; 1059 adapter->rx_ring_count = new_rx_count; 1060 goto clear_reset; 1061 } 1062 1063 /* allocate temporary buffer to store rings in */ 1064 i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues, 1065 adapter->num_rx_queues); 1066 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); 1067 1068 if (!temp_ring) { 1069 err = -ENOMEM; 1070 goto clear_reset; 1071 } 1072 1073 ixgbe_down(adapter); 1074 1075 /* 1076 * Setup new Tx resources and free the old Tx resources in that order. 1077 * We can then assign the new resources to the rings via a memcpy. 1078 * The advantage to this approach is that we are guaranteed to still 1079 * have resources even in the case of an allocation failure. 1080 */ 1081 if (new_tx_count != adapter->tx_ring_count) { 1082 for (i = 0; i < adapter->num_tx_queues; i++) { 1083 memcpy(&temp_ring[i], adapter->tx_ring[i], 1084 sizeof(struct ixgbe_ring)); 1085 1086 temp_ring[i].count = new_tx_count; 1087 err = ixgbe_setup_tx_resources(&temp_ring[i]); 1088 if (err) { 1089 while (i) { 1090 i--; 1091 ixgbe_free_tx_resources(&temp_ring[i]); 1092 } 1093 goto err_setup; 1094 } 1095 } 1096 1097 for (j = 0; j < adapter->num_xdp_queues; j++, i++) { 1098 memcpy(&temp_ring[i], adapter->xdp_ring[j], 1099 sizeof(struct ixgbe_ring)); 1100 1101 temp_ring[i].count = new_tx_count; 1102 err = ixgbe_setup_tx_resources(&temp_ring[i]); 1103 if (err) { 1104 while (i) { 1105 i--; 1106 ixgbe_free_tx_resources(&temp_ring[i]); 1107 } 1108 goto err_setup; 1109 } 1110 } 1111 1112 for (i = 0; i < adapter->num_tx_queues; i++) { 1113 ixgbe_free_tx_resources(adapter->tx_ring[i]); 1114 1115 memcpy(adapter->tx_ring[i], &temp_ring[i], 1116 sizeof(struct ixgbe_ring)); 1117 } 1118 for (j = 0; j < adapter->num_xdp_queues; j++, i++) { 1119 ixgbe_free_tx_resources(adapter->xdp_ring[j]); 1120 1121 memcpy(adapter->xdp_ring[j], &temp_ring[i], 1122 sizeof(struct ixgbe_ring)); 1123 } 1124 1125 adapter->tx_ring_count = new_tx_count; 1126 } 1127 1128 /* Repeat the process for the Rx rings if needed */ 1129 if (new_rx_count != adapter->rx_ring_count) { 1130 for (i = 0; i < adapter->num_rx_queues; i++) { 1131 memcpy(&temp_ring[i], adapter->rx_ring[i], 1132 sizeof(struct ixgbe_ring)); 1133 1134 /* Clear copied XDP RX-queue info */ 1135 memset(&temp_ring[i].xdp_rxq, 0, 1136 sizeof(temp_ring[i].xdp_rxq)); 1137 1138 temp_ring[i].count = new_rx_count; 1139 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); 1140 if (err) { 1141 while (i) { 1142 i--; 1143 ixgbe_free_rx_resources(&temp_ring[i]); 1144 } 1145 goto err_setup; 1146 } 1147 1148 } 1149 1150 for (i = 0; i < adapter->num_rx_queues; i++) { 1151 ixgbe_free_rx_resources(adapter->rx_ring[i]); 1152 1153 memcpy(adapter->rx_ring[i], &temp_ring[i], 1154 sizeof(struct ixgbe_ring)); 1155 } 1156 1157 adapter->rx_ring_count = new_rx_count; 1158 } 1159 1160 err_setup: 1161 ixgbe_up(adapter); 1162 vfree(temp_ring); 1163 clear_reset: 1164 clear_bit(__IXGBE_RESETTING, &adapter->state); 1165 return err; 1166 } 1167 1168 static int ixgbe_get_sset_count(struct net_device *netdev, int sset) 1169 { 1170 switch (sset) { 1171 case ETH_SS_TEST: 1172 return IXGBE_TEST_LEN; 1173 case ETH_SS_STATS: 1174 return IXGBE_STATS_LEN; 1175 case ETH_SS_PRIV_FLAGS: 1176 return IXGBE_PRIV_FLAGS_STR_LEN; 1177 default: 1178 return -EOPNOTSUPP; 1179 } 1180 } 1181 1182 static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1183 struct ethtool_stats *stats, u64 *data) 1184 { 1185 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1186 struct rtnl_link_stats64 temp; 1187 const struct rtnl_link_stats64 *net_stats; 1188 unsigned int start; 1189 struct ixgbe_ring *ring; 1190 int i, j; 1191 char *p = NULL; 1192 1193 ixgbe_update_stats(adapter); 1194 net_stats = dev_get_stats(netdev, &temp); 1195 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1196 switch (ixgbe_gstrings_stats[i].type) { 1197 case NETDEV_STATS: 1198 p = (char *) net_stats + 1199 ixgbe_gstrings_stats[i].stat_offset; 1200 break; 1201 case IXGBE_STATS: 1202 p = (char *) adapter + 1203 ixgbe_gstrings_stats[i].stat_offset; 1204 break; 1205 default: 1206 data[i] = 0; 1207 continue; 1208 } 1209 1210 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1211 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1212 } 1213 for (j = 0; j < netdev->num_tx_queues; j++) { 1214 ring = adapter->tx_ring[j]; 1215 if (!ring) { 1216 data[i] = 0; 1217 data[i+1] = 0; 1218 i += 2; 1219 continue; 1220 } 1221 1222 do { 1223 start = u64_stats_fetch_begin_irq(&ring->syncp); 1224 data[i] = ring->stats.packets; 1225 data[i+1] = ring->stats.bytes; 1226 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1227 i += 2; 1228 } 1229 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1230 ring = adapter->rx_ring[j]; 1231 if (!ring) { 1232 data[i] = 0; 1233 data[i+1] = 0; 1234 i += 2; 1235 continue; 1236 } 1237 1238 do { 1239 start = u64_stats_fetch_begin_irq(&ring->syncp); 1240 data[i] = ring->stats.packets; 1241 data[i+1] = ring->stats.bytes; 1242 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1243 i += 2; 1244 } 1245 1246 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1247 data[i++] = adapter->stats.pxontxc[j]; 1248 data[i++] = adapter->stats.pxofftxc[j]; 1249 } 1250 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1251 data[i++] = adapter->stats.pxonrxc[j]; 1252 data[i++] = adapter->stats.pxoffrxc[j]; 1253 } 1254 } 1255 1256 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1257 u8 *data) 1258 { 1259 char *p = (char *)data; 1260 unsigned int i; 1261 1262 switch (stringset) { 1263 case ETH_SS_TEST: 1264 for (i = 0; i < IXGBE_TEST_LEN; i++) { 1265 memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); 1266 data += ETH_GSTRING_LEN; 1267 } 1268 break; 1269 case ETH_SS_STATS: 1270 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1271 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 1272 ETH_GSTRING_LEN); 1273 p += ETH_GSTRING_LEN; 1274 } 1275 for (i = 0; i < netdev->num_tx_queues; i++) { 1276 sprintf(p, "tx_queue_%u_packets", i); 1277 p += ETH_GSTRING_LEN; 1278 sprintf(p, "tx_queue_%u_bytes", i); 1279 p += ETH_GSTRING_LEN; 1280 } 1281 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { 1282 sprintf(p, "rx_queue_%u_packets", i); 1283 p += ETH_GSTRING_LEN; 1284 sprintf(p, "rx_queue_%u_bytes", i); 1285 p += ETH_GSTRING_LEN; 1286 } 1287 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1288 sprintf(p, "tx_pb_%u_pxon", i); 1289 p += ETH_GSTRING_LEN; 1290 sprintf(p, "tx_pb_%u_pxoff", i); 1291 p += ETH_GSTRING_LEN; 1292 } 1293 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1294 sprintf(p, "rx_pb_%u_pxon", i); 1295 p += ETH_GSTRING_LEN; 1296 sprintf(p, "rx_pb_%u_pxoff", i); 1297 p += ETH_GSTRING_LEN; 1298 } 1299 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 1300 break; 1301 case ETH_SS_PRIV_FLAGS: 1302 memcpy(data, ixgbe_priv_flags_strings, 1303 IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); 1304 } 1305 } 1306 1307 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) 1308 { 1309 struct ixgbe_hw *hw = &adapter->hw; 1310 bool link_up; 1311 u32 link_speed = 0; 1312 1313 if (ixgbe_removed(hw->hw_addr)) { 1314 *data = 1; 1315 return 1; 1316 } 1317 *data = 0; 1318 1319 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1320 if (link_up) 1321 return *data; 1322 else 1323 *data = 1; 1324 return *data; 1325 } 1326 1327 /* ethtool register test data */ 1328 struct ixgbe_reg_test { 1329 u16 reg; 1330 u8 array_len; 1331 u8 test_type; 1332 u32 mask; 1333 u32 write; 1334 }; 1335 1336 /* In the hardware, registers are laid out either singly, in arrays 1337 * spaced 0x40 bytes apart, or in contiguous tables. We assume 1338 * most tests take place on arrays or single registers (handled 1339 * as a single-element array) and special-case the tables. 1340 * Table tests are always pattern tests. 1341 * 1342 * We also make provision for some required setup steps by specifying 1343 * registers to be written without any read-back testing. 1344 */ 1345 1346 #define PATTERN_TEST 1 1347 #define SET_READ_TEST 2 1348 #define WRITE_NO_TEST 3 1349 #define TABLE32_TEST 4 1350 #define TABLE64_TEST_LO 5 1351 #define TABLE64_TEST_HI 6 1352 1353 /* default 82599 register test */ 1354 static const struct ixgbe_reg_test reg_test_82599[] = { 1355 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1356 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1357 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1358 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1359 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 1360 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1361 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1362 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1363 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1364 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1365 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1366 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1367 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1368 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1369 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 1370 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, 1371 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1372 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, 1373 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1374 { .reg = 0 } 1375 }; 1376 1377 /* default 82598 register test */ 1378 static const struct ixgbe_reg_test reg_test_82598[] = { 1379 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1380 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1381 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1382 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1383 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1384 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1385 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1386 /* Enable all four RX queues before testing. */ 1387 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1388 /* RDH is read-only for 82598, only test RDT. */ 1389 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1390 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1391 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1392 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1393 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, 1394 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1395 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1396 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1397 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, 1398 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, 1399 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1400 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, 1401 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1402 { .reg = 0 } 1403 }; 1404 1405 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, 1406 u32 mask, u32 write) 1407 { 1408 u32 pat, val, before; 1409 static const u32 test_pattern[] = { 1410 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1411 1412 if (ixgbe_removed(adapter->hw.hw_addr)) { 1413 *data = 1; 1414 return true; 1415 } 1416 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { 1417 before = ixgbe_read_reg(&adapter->hw, reg); 1418 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); 1419 val = ixgbe_read_reg(&adapter->hw, reg); 1420 if (val != (test_pattern[pat] & write & mask)) { 1421 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", 1422 reg, val, (test_pattern[pat] & write & mask)); 1423 *data = reg; 1424 ixgbe_write_reg(&adapter->hw, reg, before); 1425 return true; 1426 } 1427 ixgbe_write_reg(&adapter->hw, reg, before); 1428 } 1429 return false; 1430 } 1431 1432 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, 1433 u32 mask, u32 write) 1434 { 1435 u32 val, before; 1436 1437 if (ixgbe_removed(adapter->hw.hw_addr)) { 1438 *data = 1; 1439 return true; 1440 } 1441 before = ixgbe_read_reg(&adapter->hw, reg); 1442 ixgbe_write_reg(&adapter->hw, reg, write & mask); 1443 val = ixgbe_read_reg(&adapter->hw, reg); 1444 if ((write & mask) != (val & mask)) { 1445 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", 1446 reg, (val & mask), (write & mask)); 1447 *data = reg; 1448 ixgbe_write_reg(&adapter->hw, reg, before); 1449 return true; 1450 } 1451 ixgbe_write_reg(&adapter->hw, reg, before); 1452 return false; 1453 } 1454 1455 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1456 { 1457 const struct ixgbe_reg_test *test; 1458 u32 value, before, after; 1459 u32 i, toggle; 1460 1461 if (ixgbe_removed(adapter->hw.hw_addr)) { 1462 e_err(drv, "Adapter removed - register test blocked\n"); 1463 *data = 1; 1464 return 1; 1465 } 1466 switch (adapter->hw.mac.type) { 1467 case ixgbe_mac_82598EB: 1468 toggle = 0x7FFFF3FF; 1469 test = reg_test_82598; 1470 break; 1471 case ixgbe_mac_82599EB: 1472 case ixgbe_mac_X540: 1473 case ixgbe_mac_X550: 1474 case ixgbe_mac_X550EM_x: 1475 case ixgbe_mac_x550em_a: 1476 toggle = 0x7FFFF30F; 1477 test = reg_test_82599; 1478 break; 1479 default: 1480 *data = 1; 1481 return 1; 1482 } 1483 1484 /* 1485 * Because the status register is such a special case, 1486 * we handle it separately from the rest of the register 1487 * tests. Some bits are read-only, some toggle, and some 1488 * are writeable on newer MACs. 1489 */ 1490 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS); 1491 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle); 1492 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); 1493 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; 1494 if (value != after) { 1495 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", 1496 after, value); 1497 *data = 1; 1498 return 1; 1499 } 1500 /* restore previous status */ 1501 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before); 1502 1503 /* 1504 * Perform the remainder of the register test, looping through 1505 * the test table until we either fail or reach the null entry. 1506 */ 1507 while (test->reg) { 1508 for (i = 0; i < test->array_len; i++) { 1509 bool b = false; 1510 1511 switch (test->test_type) { 1512 case PATTERN_TEST: 1513 b = reg_pattern_test(adapter, data, 1514 test->reg + (i * 0x40), 1515 test->mask, 1516 test->write); 1517 break; 1518 case SET_READ_TEST: 1519 b = reg_set_and_check(adapter, data, 1520 test->reg + (i * 0x40), 1521 test->mask, 1522 test->write); 1523 break; 1524 case WRITE_NO_TEST: 1525 ixgbe_write_reg(&adapter->hw, 1526 test->reg + (i * 0x40), 1527 test->write); 1528 break; 1529 case TABLE32_TEST: 1530 b = reg_pattern_test(adapter, data, 1531 test->reg + (i * 4), 1532 test->mask, 1533 test->write); 1534 break; 1535 case TABLE64_TEST_LO: 1536 b = reg_pattern_test(adapter, data, 1537 test->reg + (i * 8), 1538 test->mask, 1539 test->write); 1540 break; 1541 case TABLE64_TEST_HI: 1542 b = reg_pattern_test(adapter, data, 1543 (test->reg + 4) + (i * 8), 1544 test->mask, 1545 test->write); 1546 break; 1547 } 1548 if (b) 1549 return 1; 1550 } 1551 test++; 1552 } 1553 1554 *data = 0; 1555 return 0; 1556 } 1557 1558 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) 1559 { 1560 struct ixgbe_hw *hw = &adapter->hw; 1561 if (hw->eeprom.ops.validate_checksum(hw, NULL)) 1562 *data = 1; 1563 else 1564 *data = 0; 1565 return *data; 1566 } 1567 1568 static irqreturn_t ixgbe_test_intr(int irq, void *data) 1569 { 1570 struct net_device *netdev = (struct net_device *) data; 1571 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1572 1573 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); 1574 1575 return IRQ_HANDLED; 1576 } 1577 1578 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) 1579 { 1580 struct net_device *netdev = adapter->netdev; 1581 u32 mask, i = 0, shared_int = true; 1582 u32 irq = adapter->pdev->irq; 1583 1584 *data = 0; 1585 1586 /* Hook up test interrupt handler just for this test */ 1587 if (adapter->msix_entries) { 1588 /* NOTE: we don't test MSI-X interrupts here, yet */ 1589 return 0; 1590 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1591 shared_int = false; 1592 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, 1593 netdev)) { 1594 *data = 1; 1595 return -1; 1596 } 1597 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1598 netdev->name, netdev)) { 1599 shared_int = false; 1600 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1601 netdev->name, netdev)) { 1602 *data = 1; 1603 return -1; 1604 } 1605 e_info(hw, "testing %s interrupt\n", shared_int ? 1606 "shared" : "unshared"); 1607 1608 /* Disable all the interrupts */ 1609 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1610 IXGBE_WRITE_FLUSH(&adapter->hw); 1611 usleep_range(10000, 20000); 1612 1613 /* Test each interrupt */ 1614 for (; i < 10; i++) { 1615 /* Interrupt to test */ 1616 mask = BIT(i); 1617 1618 if (!shared_int) { 1619 /* 1620 * Disable the interrupts to be reported in 1621 * the cause register and then force the same 1622 * interrupt and see if one gets posted. If 1623 * an interrupt was posted to the bus, the 1624 * test failed. 1625 */ 1626 adapter->test_icr = 0; 1627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1628 ~mask & 0x00007FFF); 1629 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1630 ~mask & 0x00007FFF); 1631 IXGBE_WRITE_FLUSH(&adapter->hw); 1632 usleep_range(10000, 20000); 1633 1634 if (adapter->test_icr & mask) { 1635 *data = 3; 1636 break; 1637 } 1638 } 1639 1640 /* 1641 * Enable the interrupt to be reported in the cause 1642 * register and then force the same interrupt and see 1643 * if one gets posted. If an interrupt was not posted 1644 * to the bus, the test failed. 1645 */ 1646 adapter->test_icr = 0; 1647 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1648 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1649 IXGBE_WRITE_FLUSH(&adapter->hw); 1650 usleep_range(10000, 20000); 1651 1652 if (!(adapter->test_icr & mask)) { 1653 *data = 4; 1654 break; 1655 } 1656 1657 if (!shared_int) { 1658 /* 1659 * Disable the other interrupts to be reported in 1660 * the cause register and then force the other 1661 * interrupts and see if any get posted. If 1662 * an interrupt was posted to the bus, the 1663 * test failed. 1664 */ 1665 adapter->test_icr = 0; 1666 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1667 ~mask & 0x00007FFF); 1668 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1669 ~mask & 0x00007FFF); 1670 IXGBE_WRITE_FLUSH(&adapter->hw); 1671 usleep_range(10000, 20000); 1672 1673 if (adapter->test_icr) { 1674 *data = 5; 1675 break; 1676 } 1677 } 1678 } 1679 1680 /* Disable all the interrupts */ 1681 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1682 IXGBE_WRITE_FLUSH(&adapter->hw); 1683 usleep_range(10000, 20000); 1684 1685 /* Unhook test interrupt handler */ 1686 free_irq(irq, netdev); 1687 1688 return *data; 1689 } 1690 1691 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) 1692 { 1693 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1694 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1695 struct ixgbe_hw *hw = &adapter->hw; 1696 u32 reg_ctl; 1697 1698 /* shut down the DMA engines now so they can be reinitialized later */ 1699 1700 /* first Rx */ 1701 hw->mac.ops.disable_rx(hw); 1702 ixgbe_disable_rx_queue(adapter, rx_ring); 1703 1704 /* now Tx */ 1705 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1706 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1707 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1708 1709 switch (hw->mac.type) { 1710 case ixgbe_mac_82599EB: 1711 case ixgbe_mac_X540: 1712 case ixgbe_mac_X550: 1713 case ixgbe_mac_X550EM_x: 1714 case ixgbe_mac_x550em_a: 1715 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1716 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1717 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1718 break; 1719 default: 1720 break; 1721 } 1722 1723 ixgbe_reset(adapter); 1724 1725 ixgbe_free_tx_resources(&adapter->test_tx_ring); 1726 ixgbe_free_rx_resources(&adapter->test_rx_ring); 1727 } 1728 1729 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1730 { 1731 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1732 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1733 struct ixgbe_hw *hw = &adapter->hw; 1734 u32 rctl, reg_data; 1735 int ret_val; 1736 int err; 1737 1738 /* Setup Tx descriptor ring and Tx buffers */ 1739 tx_ring->count = IXGBE_DEFAULT_TXD; 1740 tx_ring->queue_index = 0; 1741 tx_ring->dev = &adapter->pdev->dev; 1742 tx_ring->netdev = adapter->netdev; 1743 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1744 1745 err = ixgbe_setup_tx_resources(tx_ring); 1746 if (err) 1747 return 1; 1748 1749 switch (adapter->hw.mac.type) { 1750 case ixgbe_mac_82599EB: 1751 case ixgbe_mac_X540: 1752 case ixgbe_mac_X550: 1753 case ixgbe_mac_X550EM_x: 1754 case ixgbe_mac_x550em_a: 1755 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1756 reg_data |= IXGBE_DMATXCTL_TE; 1757 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1758 break; 1759 default: 1760 break; 1761 } 1762 1763 ixgbe_configure_tx_ring(adapter, tx_ring); 1764 1765 /* Setup Rx Descriptor ring and Rx buffers */ 1766 rx_ring->count = IXGBE_DEFAULT_RXD; 1767 rx_ring->queue_index = 0; 1768 rx_ring->dev = &adapter->pdev->dev; 1769 rx_ring->netdev = adapter->netdev; 1770 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1771 1772 err = ixgbe_setup_rx_resources(adapter, rx_ring); 1773 if (err) { 1774 ret_val = 4; 1775 goto err_nomem; 1776 } 1777 1778 hw->mac.ops.disable_rx(hw); 1779 1780 ixgbe_configure_rx_ring(adapter, rx_ring); 1781 1782 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1783 rctl |= IXGBE_RXCTRL_DMBYPS; 1784 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1785 1786 hw->mac.ops.enable_rx(hw); 1787 1788 return 0; 1789 1790 err_nomem: 1791 ixgbe_free_desc_rings(adapter); 1792 return ret_val; 1793 } 1794 1795 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) 1796 { 1797 struct ixgbe_hw *hw = &adapter->hw; 1798 u32 reg_data; 1799 1800 1801 /* Setup MAC loopback */ 1802 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1803 reg_data |= IXGBE_HLREG0_LPBK; 1804 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); 1805 1806 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1807 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1808 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); 1809 1810 /* X540 and X550 needs to set the MACC.FLU bit to force link up */ 1811 switch (adapter->hw.mac.type) { 1812 case ixgbe_mac_X540: 1813 case ixgbe_mac_X550: 1814 case ixgbe_mac_X550EM_x: 1815 case ixgbe_mac_x550em_a: 1816 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); 1817 reg_data |= IXGBE_MACC_FLU; 1818 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); 1819 break; 1820 default: 1821 if (hw->mac.orig_autoc) { 1822 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; 1823 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); 1824 } else { 1825 return 10; 1826 } 1827 } 1828 IXGBE_WRITE_FLUSH(hw); 1829 usleep_range(10000, 20000); 1830 1831 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1832 if (hw->mac.type == ixgbe_mac_82598EB) { 1833 u8 atlas; 1834 1835 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); 1836 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 1837 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); 1838 1839 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); 1840 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 1841 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); 1842 1843 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); 1844 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 1845 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); 1846 1847 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); 1848 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 1849 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); 1850 } 1851 1852 return 0; 1853 } 1854 1855 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) 1856 { 1857 u32 reg_data; 1858 1859 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1860 reg_data &= ~IXGBE_HLREG0_LPBK; 1861 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1862 } 1863 1864 static void ixgbe_create_lbtest_frame(struct sk_buff *skb, 1865 unsigned int frame_size) 1866 { 1867 memset(skb->data, 0xFF, frame_size); 1868 frame_size >>= 1; 1869 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); 1870 memset(&skb->data[frame_size + 10], 0xBE, 1); 1871 memset(&skb->data[frame_size + 12], 0xAF, 1); 1872 } 1873 1874 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, 1875 unsigned int frame_size) 1876 { 1877 unsigned char *data; 1878 bool match = true; 1879 1880 frame_size >>= 1; 1881 1882 data = kmap(rx_buffer->page) + rx_buffer->page_offset; 1883 1884 if (data[3] != 0xFF || 1885 data[frame_size + 10] != 0xBE || 1886 data[frame_size + 12] != 0xAF) 1887 match = false; 1888 1889 kunmap(rx_buffer->page); 1890 1891 return match; 1892 } 1893 1894 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, 1895 struct ixgbe_ring *tx_ring, 1896 unsigned int size) 1897 { 1898 union ixgbe_adv_rx_desc *rx_desc; 1899 u16 rx_ntc, tx_ntc, count = 0; 1900 1901 /* initialize next to clean and descriptor values */ 1902 rx_ntc = rx_ring->next_to_clean; 1903 tx_ntc = tx_ring->next_to_clean; 1904 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1905 1906 while (tx_ntc != tx_ring->next_to_use) { 1907 union ixgbe_adv_tx_desc *tx_desc; 1908 struct ixgbe_tx_buffer *tx_buffer; 1909 1910 tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc); 1911 1912 /* if DD is not set transmit has not completed */ 1913 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 1914 return count; 1915 1916 /* unmap buffer on Tx side */ 1917 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; 1918 1919 /* Free all the Tx ring sk_buffs */ 1920 dev_kfree_skb_any(tx_buffer->skb); 1921 1922 /* unmap skb header data */ 1923 dma_unmap_single(tx_ring->dev, 1924 dma_unmap_addr(tx_buffer, dma), 1925 dma_unmap_len(tx_buffer, len), 1926 DMA_TO_DEVICE); 1927 dma_unmap_len_set(tx_buffer, len, 0); 1928 1929 /* increment Tx next to clean counter */ 1930 tx_ntc++; 1931 if (tx_ntc == tx_ring->count) 1932 tx_ntc = 0; 1933 } 1934 1935 while (rx_desc->wb.upper.length) { 1936 struct ixgbe_rx_buffer *rx_buffer; 1937 1938 /* check Rx buffer */ 1939 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; 1940 1941 /* sync Rx buffer for CPU read */ 1942 dma_sync_single_for_cpu(rx_ring->dev, 1943 rx_buffer->dma, 1944 ixgbe_rx_bufsz(rx_ring), 1945 DMA_FROM_DEVICE); 1946 1947 /* verify contents of skb */ 1948 if (ixgbe_check_lbtest_frame(rx_buffer, size)) 1949 count++; 1950 else 1951 break; 1952 1953 /* sync Rx buffer for device write */ 1954 dma_sync_single_for_device(rx_ring->dev, 1955 rx_buffer->dma, 1956 ixgbe_rx_bufsz(rx_ring), 1957 DMA_FROM_DEVICE); 1958 1959 /* increment Rx next to clean counter */ 1960 rx_ntc++; 1961 if (rx_ntc == rx_ring->count) 1962 rx_ntc = 0; 1963 1964 /* fetch next descriptor */ 1965 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1966 } 1967 1968 netdev_tx_reset_queue(txring_txq(tx_ring)); 1969 1970 /* re-map buffers to ring, store next to clean values */ 1971 ixgbe_alloc_rx_buffers(rx_ring, count); 1972 rx_ring->next_to_clean = rx_ntc; 1973 tx_ring->next_to_clean = tx_ntc; 1974 1975 return count; 1976 } 1977 1978 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1979 { 1980 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1981 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1982 int i, j, lc, good_cnt, ret_val = 0; 1983 unsigned int size = 1024; 1984 netdev_tx_t tx_ret_val; 1985 struct sk_buff *skb; 1986 u32 flags_orig = adapter->flags; 1987 1988 /* DCB can modify the frames on Tx */ 1989 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1990 1991 /* allocate test skb */ 1992 skb = alloc_skb(size, GFP_KERNEL); 1993 if (!skb) 1994 return 11; 1995 1996 /* place data into test skb */ 1997 ixgbe_create_lbtest_frame(skb, size); 1998 skb_put(skb, size); 1999 2000 /* 2001 * Calculate the loop count based on the largest descriptor ring 2002 * The idea is to wrap the largest ring a number of times using 64 2003 * send/receive pairs during each loop 2004 */ 2005 2006 if (rx_ring->count <= tx_ring->count) 2007 lc = ((tx_ring->count / 64) * 2) + 1; 2008 else 2009 lc = ((rx_ring->count / 64) * 2) + 1; 2010 2011 for (j = 0; j <= lc; j++) { 2012 /* reset count of good packets */ 2013 good_cnt = 0; 2014 2015 /* place 64 packets on the transmit queue*/ 2016 for (i = 0; i < 64; i++) { 2017 skb_get(skb); 2018 tx_ret_val = ixgbe_xmit_frame_ring(skb, 2019 adapter, 2020 tx_ring); 2021 if (tx_ret_val == NETDEV_TX_OK) 2022 good_cnt++; 2023 } 2024 2025 if (good_cnt != 64) { 2026 ret_val = 12; 2027 break; 2028 } 2029 2030 /* allow 200 milliseconds for packets to go from Tx to Rx */ 2031 msleep(200); 2032 2033 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); 2034 if (good_cnt != 64) { 2035 ret_val = 13; 2036 break; 2037 } 2038 } 2039 2040 /* free the original skb */ 2041 kfree_skb(skb); 2042 adapter->flags = flags_orig; 2043 2044 return ret_val; 2045 } 2046 2047 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) 2048 { 2049 *data = ixgbe_setup_desc_rings(adapter); 2050 if (*data) 2051 goto out; 2052 *data = ixgbe_setup_loopback_test(adapter); 2053 if (*data) 2054 goto err_loopback; 2055 *data = ixgbe_run_loopback_test(adapter); 2056 ixgbe_loopback_cleanup(adapter); 2057 2058 err_loopback: 2059 ixgbe_free_desc_rings(adapter); 2060 out: 2061 return *data; 2062 } 2063 2064 static void ixgbe_diag_test(struct net_device *netdev, 2065 struct ethtool_test *eth_test, u64 *data) 2066 { 2067 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2068 bool if_running = netif_running(netdev); 2069 2070 if (ixgbe_removed(adapter->hw.hw_addr)) { 2071 e_err(hw, "Adapter removed - test blocked\n"); 2072 data[0] = 1; 2073 data[1] = 1; 2074 data[2] = 1; 2075 data[3] = 1; 2076 data[4] = 1; 2077 eth_test->flags |= ETH_TEST_FL_FAILED; 2078 return; 2079 } 2080 set_bit(__IXGBE_TESTING, &adapter->state); 2081 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 2082 struct ixgbe_hw *hw = &adapter->hw; 2083 2084 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 2085 int i; 2086 for (i = 0; i < adapter->num_vfs; i++) { 2087 if (adapter->vfinfo[i].clear_to_send) { 2088 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n"); 2089 data[0] = 1; 2090 data[1] = 1; 2091 data[2] = 1; 2092 data[3] = 1; 2093 data[4] = 1; 2094 eth_test->flags |= ETH_TEST_FL_FAILED; 2095 clear_bit(__IXGBE_TESTING, 2096 &adapter->state); 2097 goto skip_ol_tests; 2098 } 2099 } 2100 } 2101 2102 /* Offline tests */ 2103 e_info(hw, "offline testing starting\n"); 2104 2105 /* Link test performed before hardware reset so autoneg doesn't 2106 * interfere with test result 2107 */ 2108 if (ixgbe_link_test(adapter, &data[4])) 2109 eth_test->flags |= ETH_TEST_FL_FAILED; 2110 2111 if (if_running) 2112 /* indicate we're in test mode */ 2113 ixgbe_close(netdev); 2114 else 2115 ixgbe_reset(adapter); 2116 2117 e_info(hw, "register testing starting\n"); 2118 if (ixgbe_reg_test(adapter, &data[0])) 2119 eth_test->flags |= ETH_TEST_FL_FAILED; 2120 2121 ixgbe_reset(adapter); 2122 e_info(hw, "eeprom testing starting\n"); 2123 if (ixgbe_eeprom_test(adapter, &data[1])) 2124 eth_test->flags |= ETH_TEST_FL_FAILED; 2125 2126 ixgbe_reset(adapter); 2127 e_info(hw, "interrupt testing starting\n"); 2128 if (ixgbe_intr_test(adapter, &data[2])) 2129 eth_test->flags |= ETH_TEST_FL_FAILED; 2130 2131 /* If SRIOV or VMDq is enabled then skip MAC 2132 * loopback diagnostic. */ 2133 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 2134 IXGBE_FLAG_VMDQ_ENABLED)) { 2135 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); 2136 data[3] = 0; 2137 goto skip_loopback; 2138 } 2139 2140 ixgbe_reset(adapter); 2141 e_info(hw, "loopback testing starting\n"); 2142 if (ixgbe_loopback_test(adapter, &data[3])) 2143 eth_test->flags |= ETH_TEST_FL_FAILED; 2144 2145 skip_loopback: 2146 ixgbe_reset(adapter); 2147 2148 /* clear testing bit and return adapter to previous state */ 2149 clear_bit(__IXGBE_TESTING, &adapter->state); 2150 if (if_running) 2151 ixgbe_open(netdev); 2152 else if (hw->mac.ops.disable_tx_laser) 2153 hw->mac.ops.disable_tx_laser(hw); 2154 } else { 2155 e_info(hw, "online testing starting\n"); 2156 2157 /* Online tests */ 2158 if (ixgbe_link_test(adapter, &data[4])) 2159 eth_test->flags |= ETH_TEST_FL_FAILED; 2160 2161 /* Offline tests aren't run; pass by default */ 2162 data[0] = 0; 2163 data[1] = 0; 2164 data[2] = 0; 2165 data[3] = 0; 2166 2167 clear_bit(__IXGBE_TESTING, &adapter->state); 2168 } 2169 2170 skip_ol_tests: 2171 msleep_interruptible(4 * 1000); 2172 } 2173 2174 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 2175 struct ethtool_wolinfo *wol) 2176 { 2177 struct ixgbe_hw *hw = &adapter->hw; 2178 int retval = 0; 2179 2180 /* WOL not supported for all devices */ 2181 if (!ixgbe_wol_supported(adapter, hw->device_id, 2182 hw->subsystem_device_id)) { 2183 retval = 1; 2184 wol->supported = 0; 2185 } 2186 2187 return retval; 2188 } 2189 2190 static void ixgbe_get_wol(struct net_device *netdev, 2191 struct ethtool_wolinfo *wol) 2192 { 2193 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2194 2195 wol->supported = WAKE_UCAST | WAKE_MCAST | 2196 WAKE_BCAST | WAKE_MAGIC; 2197 wol->wolopts = 0; 2198 2199 if (ixgbe_wol_exclusion(adapter, wol) || 2200 !device_can_wakeup(&adapter->pdev->dev)) 2201 return; 2202 2203 if (adapter->wol & IXGBE_WUFC_EX) 2204 wol->wolopts |= WAKE_UCAST; 2205 if (adapter->wol & IXGBE_WUFC_MC) 2206 wol->wolopts |= WAKE_MCAST; 2207 if (adapter->wol & IXGBE_WUFC_BC) 2208 wol->wolopts |= WAKE_BCAST; 2209 if (adapter->wol & IXGBE_WUFC_MAG) 2210 wol->wolopts |= WAKE_MAGIC; 2211 } 2212 2213 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2214 { 2215 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2216 2217 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 2218 return -EOPNOTSUPP; 2219 2220 if (ixgbe_wol_exclusion(adapter, wol)) 2221 return wol->wolopts ? -EOPNOTSUPP : 0; 2222 2223 adapter->wol = 0; 2224 2225 if (wol->wolopts & WAKE_UCAST) 2226 adapter->wol |= IXGBE_WUFC_EX; 2227 if (wol->wolopts & WAKE_MCAST) 2228 adapter->wol |= IXGBE_WUFC_MC; 2229 if (wol->wolopts & WAKE_BCAST) 2230 adapter->wol |= IXGBE_WUFC_BC; 2231 if (wol->wolopts & WAKE_MAGIC) 2232 adapter->wol |= IXGBE_WUFC_MAG; 2233 2234 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 2235 2236 return 0; 2237 } 2238 2239 static int ixgbe_nway_reset(struct net_device *netdev) 2240 { 2241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2242 2243 if (netif_running(netdev)) 2244 ixgbe_reinit_locked(adapter); 2245 2246 return 0; 2247 } 2248 2249 static int ixgbe_set_phys_id(struct net_device *netdev, 2250 enum ethtool_phys_id_state state) 2251 { 2252 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2253 struct ixgbe_hw *hw = &adapter->hw; 2254 2255 if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) 2256 return -EOPNOTSUPP; 2257 2258 switch (state) { 2259 case ETHTOOL_ID_ACTIVE: 2260 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2261 return 2; 2262 2263 case ETHTOOL_ID_ON: 2264 hw->mac.ops.led_on(hw, hw->mac.led_link_act); 2265 break; 2266 2267 case ETHTOOL_ID_OFF: 2268 hw->mac.ops.led_off(hw, hw->mac.led_link_act); 2269 break; 2270 2271 case ETHTOOL_ID_INACTIVE: 2272 /* Restore LED settings */ 2273 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); 2274 break; 2275 } 2276 2277 return 0; 2278 } 2279 2280 static int ixgbe_get_coalesce(struct net_device *netdev, 2281 struct ethtool_coalesce *ec) 2282 { 2283 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2284 2285 /* only valid if in constant ITR mode */ 2286 if (adapter->rx_itr_setting <= 1) 2287 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 2288 else 2289 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 2290 2291 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2292 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2293 return 0; 2294 2295 /* only valid if in constant ITR mode */ 2296 if (adapter->tx_itr_setting <= 1) 2297 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 2298 else 2299 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 2300 2301 return 0; 2302 } 2303 2304 /* 2305 * this function must be called before setting the new value of 2306 * rx_itr_setting 2307 */ 2308 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) 2309 { 2310 struct net_device *netdev = adapter->netdev; 2311 2312 /* nothing to do if LRO or RSC are not enabled */ 2313 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || 2314 !(netdev->features & NETIF_F_LRO)) 2315 return false; 2316 2317 /* check the feature flag value and enable RSC if necessary */ 2318 if (adapter->rx_itr_setting == 1 || 2319 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 2320 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2321 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2322 e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); 2323 return true; 2324 } 2325 /* if interrupt rate is too high then disable RSC */ 2326 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2327 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2328 e_info(probe, "rx-usecs set too low, disabling RSC\n"); 2329 return true; 2330 } 2331 return false; 2332 } 2333 2334 static int ixgbe_set_coalesce(struct net_device *netdev, 2335 struct ethtool_coalesce *ec) 2336 { 2337 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2338 struct ixgbe_q_vector *q_vector; 2339 int i; 2340 u16 tx_itr_param, rx_itr_param, tx_itr_prev; 2341 bool need_reset = false; 2342 2343 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { 2344 /* reject Tx specific changes in case of mixed RxTx vectors */ 2345 if (ec->tx_coalesce_usecs) 2346 return -EINVAL; 2347 tx_itr_prev = adapter->rx_itr_setting; 2348 } else { 2349 tx_itr_prev = adapter->tx_itr_setting; 2350 } 2351 2352 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 2353 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 2354 return -EINVAL; 2355 2356 if (ec->rx_coalesce_usecs > 1) 2357 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 2358 else 2359 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2360 2361 if (adapter->rx_itr_setting == 1) 2362 rx_itr_param = IXGBE_20K_ITR; 2363 else 2364 rx_itr_param = adapter->rx_itr_setting; 2365 2366 if (ec->tx_coalesce_usecs > 1) 2367 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 2368 else 2369 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 2370 2371 if (adapter->tx_itr_setting == 1) 2372 tx_itr_param = IXGBE_12K_ITR; 2373 else 2374 tx_itr_param = adapter->tx_itr_setting; 2375 2376 /* mixed Rx/Tx */ 2377 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2378 adapter->tx_itr_setting = adapter->rx_itr_setting; 2379 2380 /* detect ITR changes that require update of TXDCTL.WTHRESH */ 2381 if ((adapter->tx_itr_setting != 1) && 2382 (adapter->tx_itr_setting < IXGBE_100K_ITR)) { 2383 if ((tx_itr_prev == 1) || 2384 (tx_itr_prev >= IXGBE_100K_ITR)) 2385 need_reset = true; 2386 } else { 2387 if ((tx_itr_prev != 1) && 2388 (tx_itr_prev < IXGBE_100K_ITR)) 2389 need_reset = true; 2390 } 2391 2392 /* check the old value and enable RSC if necessary */ 2393 need_reset |= ixgbe_update_rsc(adapter); 2394 2395 for (i = 0; i < adapter->num_q_vectors; i++) { 2396 q_vector = adapter->q_vector[i]; 2397 if (q_vector->tx.count && !q_vector->rx.count) 2398 /* tx only */ 2399 q_vector->itr = tx_itr_param; 2400 else 2401 /* rx only or mixed */ 2402 q_vector->itr = rx_itr_param; 2403 ixgbe_write_eitr(q_vector); 2404 } 2405 2406 /* 2407 * do reset here at the end to make sure EITR==0 case is handled 2408 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings 2409 * also locks in RSC enable/disable which requires reset 2410 */ 2411 if (need_reset) 2412 ixgbe_do_reset(netdev); 2413 2414 return 0; 2415 } 2416 2417 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2418 struct ethtool_rxnfc *cmd) 2419 { 2420 union ixgbe_atr_input *mask = &adapter->fdir_mask; 2421 struct ethtool_rx_flow_spec *fsp = 2422 (struct ethtool_rx_flow_spec *)&cmd->fs; 2423 struct hlist_node *node2; 2424 struct ixgbe_fdir_filter *rule = NULL; 2425 2426 /* report total rule count */ 2427 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2428 2429 hlist_for_each_entry_safe(rule, node2, 2430 &adapter->fdir_filter_list, fdir_node) { 2431 if (fsp->location <= rule->sw_idx) 2432 break; 2433 } 2434 2435 if (!rule || fsp->location != rule->sw_idx) 2436 return -EINVAL; 2437 2438 /* fill out the flow spec entry */ 2439 2440 /* set flow type field */ 2441 switch (rule->filter.formatted.flow_type) { 2442 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2443 fsp->flow_type = TCP_V4_FLOW; 2444 break; 2445 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2446 fsp->flow_type = UDP_V4_FLOW; 2447 break; 2448 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2449 fsp->flow_type = SCTP_V4_FLOW; 2450 break; 2451 case IXGBE_ATR_FLOW_TYPE_IPV4: 2452 fsp->flow_type = IP_USER_FLOW; 2453 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 2454 fsp->h_u.usr_ip4_spec.proto = 0; 2455 fsp->m_u.usr_ip4_spec.proto = 0; 2456 break; 2457 default: 2458 return -EINVAL; 2459 } 2460 2461 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; 2462 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; 2463 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; 2464 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; 2465 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; 2466 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; 2467 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; 2468 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; 2469 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; 2470 fsp->m_ext.vlan_tci = mask->formatted.vlan_id; 2471 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; 2472 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; 2473 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); 2474 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); 2475 fsp->flow_type |= FLOW_EXT; 2476 2477 /* record action */ 2478 if (rule->action == IXGBE_FDIR_DROP_QUEUE) 2479 fsp->ring_cookie = RX_CLS_FLOW_DISC; 2480 else 2481 fsp->ring_cookie = rule->action; 2482 2483 return 0; 2484 } 2485 2486 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, 2487 struct ethtool_rxnfc *cmd, 2488 u32 *rule_locs) 2489 { 2490 struct hlist_node *node2; 2491 struct ixgbe_fdir_filter *rule; 2492 int cnt = 0; 2493 2494 /* report total rule count */ 2495 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2496 2497 hlist_for_each_entry_safe(rule, node2, 2498 &adapter->fdir_filter_list, fdir_node) { 2499 if (cnt == cmd->rule_cnt) 2500 return -EMSGSIZE; 2501 rule_locs[cnt] = rule->sw_idx; 2502 cnt++; 2503 } 2504 2505 cmd->rule_cnt = cnt; 2506 2507 return 0; 2508 } 2509 2510 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, 2511 struct ethtool_rxnfc *cmd) 2512 { 2513 cmd->data = 0; 2514 2515 /* Report default options for RSS on ixgbe */ 2516 switch (cmd->flow_type) { 2517 case TCP_V4_FLOW: 2518 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2519 /* fallthrough */ 2520 case UDP_V4_FLOW: 2521 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2522 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2523 /* fallthrough */ 2524 case SCTP_V4_FLOW: 2525 case AH_ESP_V4_FLOW: 2526 case AH_V4_FLOW: 2527 case ESP_V4_FLOW: 2528 case IPV4_FLOW: 2529 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2530 break; 2531 case TCP_V6_FLOW: 2532 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2533 /* fallthrough */ 2534 case UDP_V6_FLOW: 2535 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2536 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2537 /* fallthrough */ 2538 case SCTP_V6_FLOW: 2539 case AH_ESP_V6_FLOW: 2540 case AH_V6_FLOW: 2541 case ESP_V6_FLOW: 2542 case IPV6_FLOW: 2543 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2544 break; 2545 default: 2546 return -EINVAL; 2547 } 2548 2549 return 0; 2550 } 2551 2552 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2553 u32 *rule_locs) 2554 { 2555 struct ixgbe_adapter *adapter = netdev_priv(dev); 2556 int ret = -EOPNOTSUPP; 2557 2558 switch (cmd->cmd) { 2559 case ETHTOOL_GRXRINGS: 2560 cmd->data = adapter->num_rx_queues; 2561 ret = 0; 2562 break; 2563 case ETHTOOL_GRXCLSRLCNT: 2564 cmd->rule_cnt = adapter->fdir_filter_count; 2565 ret = 0; 2566 break; 2567 case ETHTOOL_GRXCLSRULE: 2568 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); 2569 break; 2570 case ETHTOOL_GRXCLSRLALL: 2571 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); 2572 break; 2573 case ETHTOOL_GRXFH: 2574 ret = ixgbe_get_rss_hash_opts(adapter, cmd); 2575 break; 2576 default: 2577 break; 2578 } 2579 2580 return ret; 2581 } 2582 2583 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2584 struct ixgbe_fdir_filter *input, 2585 u16 sw_idx) 2586 { 2587 struct ixgbe_hw *hw = &adapter->hw; 2588 struct hlist_node *node2; 2589 struct ixgbe_fdir_filter *rule, *parent; 2590 int err = -EINVAL; 2591 2592 parent = NULL; 2593 rule = NULL; 2594 2595 hlist_for_each_entry_safe(rule, node2, 2596 &adapter->fdir_filter_list, fdir_node) { 2597 /* hash found, or no matching entry */ 2598 if (rule->sw_idx >= sw_idx) 2599 break; 2600 parent = rule; 2601 } 2602 2603 /* if there is an old rule occupying our place remove it */ 2604 if (rule && (rule->sw_idx == sw_idx)) { 2605 if (!input || (rule->filter.formatted.bkt_hash != 2606 input->filter.formatted.bkt_hash)) { 2607 err = ixgbe_fdir_erase_perfect_filter_82599(hw, 2608 &rule->filter, 2609 sw_idx); 2610 } 2611 2612 hlist_del(&rule->fdir_node); 2613 kfree(rule); 2614 adapter->fdir_filter_count--; 2615 } 2616 2617 /* 2618 * If no input this was a delete, err should be 0 if a rule was 2619 * successfully found and removed from the list else -EINVAL 2620 */ 2621 if (!input) 2622 return err; 2623 2624 /* initialize node and set software index */ 2625 INIT_HLIST_NODE(&input->fdir_node); 2626 2627 /* add filter to the list */ 2628 if (parent) 2629 hlist_add_behind(&input->fdir_node, &parent->fdir_node); 2630 else 2631 hlist_add_head(&input->fdir_node, 2632 &adapter->fdir_filter_list); 2633 2634 /* update counts */ 2635 adapter->fdir_filter_count++; 2636 2637 return 0; 2638 } 2639 2640 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, 2641 u8 *flow_type) 2642 { 2643 switch (fsp->flow_type & ~FLOW_EXT) { 2644 case TCP_V4_FLOW: 2645 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2646 break; 2647 case UDP_V4_FLOW: 2648 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2649 break; 2650 case SCTP_V4_FLOW: 2651 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2652 break; 2653 case IP_USER_FLOW: 2654 switch (fsp->h_u.usr_ip4_spec.proto) { 2655 case IPPROTO_TCP: 2656 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2657 break; 2658 case IPPROTO_UDP: 2659 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2660 break; 2661 case IPPROTO_SCTP: 2662 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2663 break; 2664 case 0: 2665 if (!fsp->m_u.usr_ip4_spec.proto) { 2666 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; 2667 break; 2668 } 2669 /* fall through */ 2670 default: 2671 return 0; 2672 } 2673 break; 2674 default: 2675 return 0; 2676 } 2677 2678 return 1; 2679 } 2680 2681 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2682 struct ethtool_rxnfc *cmd) 2683 { 2684 struct ethtool_rx_flow_spec *fsp = 2685 (struct ethtool_rx_flow_spec *)&cmd->fs; 2686 struct ixgbe_hw *hw = &adapter->hw; 2687 struct ixgbe_fdir_filter *input; 2688 union ixgbe_atr_input mask; 2689 u8 queue; 2690 int err; 2691 2692 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 2693 return -EOPNOTSUPP; 2694 2695 /* ring_cookie is a masked into a set of queues and ixgbe pools or 2696 * we use the drop index. 2697 */ 2698 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 2699 queue = IXGBE_FDIR_DROP_QUEUE; 2700 } else { 2701 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); 2702 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); 2703 2704 if (!vf && (ring >= adapter->num_rx_queues)) 2705 return -EINVAL; 2706 else if (vf && 2707 ((vf > adapter->num_vfs) || 2708 ring >= adapter->num_rx_queues_per_pool)) 2709 return -EINVAL; 2710 2711 /* Map the ring onto the absolute queue index */ 2712 if (!vf) 2713 queue = adapter->rx_ring[ring]->reg_idx; 2714 else 2715 queue = ((vf - 1) * 2716 adapter->num_rx_queues_per_pool) + ring; 2717 } 2718 2719 /* Don't allow indexes to exist outside of available space */ 2720 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { 2721 e_err(drv, "Location out of range\n"); 2722 return -EINVAL; 2723 } 2724 2725 input = kzalloc(sizeof(*input), GFP_ATOMIC); 2726 if (!input) 2727 return -ENOMEM; 2728 2729 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 2730 2731 /* set SW index */ 2732 input->sw_idx = fsp->location; 2733 2734 /* record flow type */ 2735 if (!ixgbe_flowspec_to_flow_type(fsp, 2736 &input->filter.formatted.flow_type)) { 2737 e_err(drv, "Unrecognized flow type\n"); 2738 goto err_out; 2739 } 2740 2741 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2742 IXGBE_ATR_L4TYPE_MASK; 2743 2744 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) 2745 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; 2746 2747 /* Copy input into formatted structures */ 2748 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2749 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; 2750 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2751 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; 2752 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; 2753 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; 2754 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 2755 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 2756 2757 if (fsp->flow_type & FLOW_EXT) { 2758 input->filter.formatted.vm_pool = 2759 (unsigned char)ntohl(fsp->h_ext.data[1]); 2760 mask.formatted.vm_pool = 2761 (unsigned char)ntohl(fsp->m_ext.data[1]); 2762 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; 2763 mask.formatted.vlan_id = fsp->m_ext.vlan_tci; 2764 input->filter.formatted.flex_bytes = 2765 fsp->h_ext.vlan_etype; 2766 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; 2767 } 2768 2769 /* determine if we need to drop or route the packet */ 2770 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 2771 input->action = IXGBE_FDIR_DROP_QUEUE; 2772 else 2773 input->action = fsp->ring_cookie; 2774 2775 spin_lock(&adapter->fdir_perfect_lock); 2776 2777 if (hlist_empty(&adapter->fdir_filter_list)) { 2778 /* save mask and program input mask into HW */ 2779 memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); 2780 err = ixgbe_fdir_set_input_mask_82599(hw, &mask); 2781 if (err) { 2782 e_err(drv, "Error writing mask\n"); 2783 goto err_out_w_lock; 2784 } 2785 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { 2786 e_err(drv, "Only one mask supported per port\n"); 2787 goto err_out_w_lock; 2788 } 2789 2790 /* apply mask and compute/store hash */ 2791 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); 2792 2793 /* program filters to filter memory */ 2794 err = ixgbe_fdir_write_perfect_filter_82599(hw, 2795 &input->filter, input->sw_idx, queue); 2796 if (err) 2797 goto err_out_w_lock; 2798 2799 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 2800 2801 spin_unlock(&adapter->fdir_perfect_lock); 2802 2803 return err; 2804 err_out_w_lock: 2805 spin_unlock(&adapter->fdir_perfect_lock); 2806 err_out: 2807 kfree(input); 2808 return -EINVAL; 2809 } 2810 2811 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2812 struct ethtool_rxnfc *cmd) 2813 { 2814 struct ethtool_rx_flow_spec *fsp = 2815 (struct ethtool_rx_flow_spec *)&cmd->fs; 2816 int err; 2817 2818 spin_lock(&adapter->fdir_perfect_lock); 2819 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); 2820 spin_unlock(&adapter->fdir_perfect_lock); 2821 2822 return err; 2823 } 2824 2825 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ 2826 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2827 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, 2828 struct ethtool_rxnfc *nfc) 2829 { 2830 u32 flags2 = adapter->flags2; 2831 2832 /* 2833 * RSS does not support anything other than hashing 2834 * to queues on src and dst IPs and ports 2835 */ 2836 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 2837 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 2838 return -EINVAL; 2839 2840 switch (nfc->flow_type) { 2841 case TCP_V4_FLOW: 2842 case TCP_V6_FLOW: 2843 if (!(nfc->data & RXH_IP_SRC) || 2844 !(nfc->data & RXH_IP_DST) || 2845 !(nfc->data & RXH_L4_B_0_1) || 2846 !(nfc->data & RXH_L4_B_2_3)) 2847 return -EINVAL; 2848 break; 2849 case UDP_V4_FLOW: 2850 if (!(nfc->data & RXH_IP_SRC) || 2851 !(nfc->data & RXH_IP_DST)) 2852 return -EINVAL; 2853 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2854 case 0: 2855 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2856 break; 2857 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2858 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2859 break; 2860 default: 2861 return -EINVAL; 2862 } 2863 break; 2864 case UDP_V6_FLOW: 2865 if (!(nfc->data & RXH_IP_SRC) || 2866 !(nfc->data & RXH_IP_DST)) 2867 return -EINVAL; 2868 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2869 case 0: 2870 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2871 break; 2872 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2873 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2874 break; 2875 default: 2876 return -EINVAL; 2877 } 2878 break; 2879 case AH_ESP_V4_FLOW: 2880 case AH_V4_FLOW: 2881 case ESP_V4_FLOW: 2882 case SCTP_V4_FLOW: 2883 case AH_ESP_V6_FLOW: 2884 case AH_V6_FLOW: 2885 case ESP_V6_FLOW: 2886 case SCTP_V6_FLOW: 2887 if (!(nfc->data & RXH_IP_SRC) || 2888 !(nfc->data & RXH_IP_DST) || 2889 (nfc->data & RXH_L4_B_0_1) || 2890 (nfc->data & RXH_L4_B_2_3)) 2891 return -EINVAL; 2892 break; 2893 default: 2894 return -EINVAL; 2895 } 2896 2897 /* if we changed something we need to update flags */ 2898 if (flags2 != adapter->flags2) { 2899 struct ixgbe_hw *hw = &adapter->hw; 2900 u32 mrqc; 2901 unsigned int pf_pool = adapter->num_vfs; 2902 2903 if ((hw->mac.type >= ixgbe_mac_X550) && 2904 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 2905 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool)); 2906 else 2907 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 2908 2909 if ((flags2 & UDP_RSS_FLAGS) && 2910 !(adapter->flags2 & UDP_RSS_FLAGS)) 2911 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); 2912 2913 adapter->flags2 = flags2; 2914 2915 /* Perform hash on these packet types */ 2916 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 2917 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 2918 | IXGBE_MRQC_RSS_FIELD_IPV6 2919 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 2920 2921 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2922 IXGBE_MRQC_RSS_FIELD_IPV6_UDP); 2923 2924 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2925 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 2926 2927 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2928 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 2929 2930 if ((hw->mac.type >= ixgbe_mac_X550) && 2931 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 2932 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc); 2933 else 2934 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2935 } 2936 2937 return 0; 2938 } 2939 2940 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 2941 { 2942 struct ixgbe_adapter *adapter = netdev_priv(dev); 2943 int ret = -EOPNOTSUPP; 2944 2945 switch (cmd->cmd) { 2946 case ETHTOOL_SRXCLSRLINS: 2947 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); 2948 break; 2949 case ETHTOOL_SRXCLSRLDEL: 2950 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); 2951 break; 2952 case ETHTOOL_SRXFH: 2953 ret = ixgbe_set_rss_hash_opt(adapter, cmd); 2954 break; 2955 default: 2956 break; 2957 } 2958 2959 return ret; 2960 } 2961 2962 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) 2963 { 2964 if (adapter->hw.mac.type < ixgbe_mac_X550) 2965 return 16; 2966 else 2967 return 64; 2968 } 2969 2970 static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) 2971 { 2972 return IXGBE_RSS_KEY_SIZE; 2973 } 2974 2975 static u32 ixgbe_rss_indir_size(struct net_device *netdev) 2976 { 2977 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2978 2979 return ixgbe_rss_indir_tbl_entries(adapter); 2980 } 2981 2982 static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) 2983 { 2984 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter); 2985 u16 rss_m = adapter->ring_feature[RING_F_RSS].mask; 2986 2987 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 2988 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1; 2989 2990 for (i = 0; i < reta_size; i++) 2991 indir[i] = adapter->rss_indir_tbl[i] & rss_m; 2992 } 2993 2994 static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, 2995 u8 *hfunc) 2996 { 2997 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2998 2999 if (hfunc) 3000 *hfunc = ETH_RSS_HASH_TOP; 3001 3002 if (indir) 3003 ixgbe_get_reta(adapter, indir); 3004 3005 if (key) 3006 memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); 3007 3008 return 0; 3009 } 3010 3011 static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, 3012 const u8 *key, const u8 hfunc) 3013 { 3014 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3015 int i; 3016 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3017 3018 if (hfunc) 3019 return -EINVAL; 3020 3021 /* Fill out the redirection table */ 3022 if (indir) { 3023 int max_queues = min_t(int, adapter->num_rx_queues, 3024 ixgbe_rss_indir_tbl_max(adapter)); 3025 3026 /*Allow at least 2 queues w/ SR-IOV.*/ 3027 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && 3028 (max_queues < 2)) 3029 max_queues = 2; 3030 3031 /* Verify user input. */ 3032 for (i = 0; i < reta_entries; i++) 3033 if (indir[i] >= max_queues) 3034 return -EINVAL; 3035 3036 for (i = 0; i < reta_entries; i++) 3037 adapter->rss_indir_tbl[i] = indir[i]; 3038 3039 ixgbe_store_reta(adapter); 3040 } 3041 3042 /* Fill out the rss hash key */ 3043 if (key) { 3044 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); 3045 ixgbe_store_key(adapter); 3046 } 3047 3048 return 0; 3049 } 3050 3051 static int ixgbe_get_ts_info(struct net_device *dev, 3052 struct ethtool_ts_info *info) 3053 { 3054 struct ixgbe_adapter *adapter = netdev_priv(dev); 3055 3056 /* we always support timestamping disabled */ 3057 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); 3058 3059 switch (adapter->hw.mac.type) { 3060 case ixgbe_mac_X550: 3061 case ixgbe_mac_X550EM_x: 3062 case ixgbe_mac_x550em_a: 3063 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); 3064 break; 3065 case ixgbe_mac_X540: 3066 case ixgbe_mac_82599EB: 3067 info->rx_filters |= 3068 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 3069 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 3070 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); 3071 break; 3072 default: 3073 return ethtool_op_get_ts_info(dev, info); 3074 } 3075 3076 info->so_timestamping = 3077 SOF_TIMESTAMPING_TX_SOFTWARE | 3078 SOF_TIMESTAMPING_RX_SOFTWARE | 3079 SOF_TIMESTAMPING_SOFTWARE | 3080 SOF_TIMESTAMPING_TX_HARDWARE | 3081 SOF_TIMESTAMPING_RX_HARDWARE | 3082 SOF_TIMESTAMPING_RAW_HARDWARE; 3083 3084 if (adapter->ptp_clock) 3085 info->phc_index = ptp_clock_index(adapter->ptp_clock); 3086 else 3087 info->phc_index = -1; 3088 3089 info->tx_types = 3090 BIT(HWTSTAMP_TX_OFF) | 3091 BIT(HWTSTAMP_TX_ON); 3092 3093 return 0; 3094 } 3095 3096 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) 3097 { 3098 unsigned int max_combined; 3099 u8 tcs = adapter->hw_tcs; 3100 3101 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 3102 /* We only support one q_vector without MSI-X */ 3103 max_combined = 1; 3104 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3105 /* Limit value based on the queue mask */ 3106 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1; 3107 } else if (tcs > 1) { 3108 /* For DCB report channels per traffic class */ 3109 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3110 /* 8 TC w/ 4 queues per TC */ 3111 max_combined = 4; 3112 } else if (tcs > 4) { 3113 /* 8 TC w/ 8 queues per TC */ 3114 max_combined = 8; 3115 } else { 3116 /* 4 TC w/ 16 queues per TC */ 3117 max_combined = 16; 3118 } 3119 } else if (adapter->atr_sample_rate) { 3120 /* support up to 64 queues with ATR */ 3121 max_combined = IXGBE_MAX_FDIR_INDICES; 3122 } else { 3123 /* support up to 16 queues with RSS */ 3124 max_combined = ixgbe_max_rss_indices(adapter); 3125 } 3126 3127 return max_combined; 3128 } 3129 3130 static void ixgbe_get_channels(struct net_device *dev, 3131 struct ethtool_channels *ch) 3132 { 3133 struct ixgbe_adapter *adapter = netdev_priv(dev); 3134 3135 /* report maximum channels */ 3136 ch->max_combined = ixgbe_max_channels(adapter); 3137 3138 /* report info for other vector */ 3139 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3140 ch->max_other = NON_Q_VECTORS; 3141 ch->other_count = NON_Q_VECTORS; 3142 } 3143 3144 /* record RSS queues */ 3145 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; 3146 3147 /* nothing else to report if RSS is disabled */ 3148 if (ch->combined_count == 1) 3149 return; 3150 3151 /* we do not support ATR queueing if SR-IOV is enabled */ 3152 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 3153 return; 3154 3155 /* same thing goes for being DCB enabled */ 3156 if (adapter->hw_tcs > 1) 3157 return; 3158 3159 /* if ATR is disabled we can exit */ 3160 if (!adapter->atr_sample_rate) 3161 return; 3162 3163 /* report flow director queues as maximum channels */ 3164 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; 3165 } 3166 3167 static int ixgbe_set_channels(struct net_device *dev, 3168 struct ethtool_channels *ch) 3169 { 3170 struct ixgbe_adapter *adapter = netdev_priv(dev); 3171 unsigned int count = ch->combined_count; 3172 u8 max_rss_indices = ixgbe_max_rss_indices(adapter); 3173 3174 /* verify they are not requesting separate vectors */ 3175 if (!count || ch->rx_count || ch->tx_count) 3176 return -EINVAL; 3177 3178 /* verify other_count has not changed */ 3179 if (ch->other_count != NON_Q_VECTORS) 3180 return -EINVAL; 3181 3182 /* verify the number of channels does not exceed hardware limits */ 3183 if (count > ixgbe_max_channels(adapter)) 3184 return -EINVAL; 3185 3186 /* update feature limits from largest to smallest supported values */ 3187 adapter->ring_feature[RING_F_FDIR].limit = count; 3188 3189 /* cap RSS limit */ 3190 if (count > max_rss_indices) 3191 count = max_rss_indices; 3192 adapter->ring_feature[RING_F_RSS].limit = count; 3193 3194 #ifdef IXGBE_FCOE 3195 /* cap FCoE limit at 8 */ 3196 if (count > IXGBE_FCRETA_SIZE) 3197 count = IXGBE_FCRETA_SIZE; 3198 adapter->ring_feature[RING_F_FCOE].limit = count; 3199 3200 #endif 3201 /* use setup TC to update any traffic class queue mapping */ 3202 return ixgbe_setup_tc(dev, adapter->hw_tcs); 3203 } 3204 3205 static int ixgbe_get_module_info(struct net_device *dev, 3206 struct ethtool_modinfo *modinfo) 3207 { 3208 struct ixgbe_adapter *adapter = netdev_priv(dev); 3209 struct ixgbe_hw *hw = &adapter->hw; 3210 s32 status; 3211 u8 sff8472_rev, addr_mode; 3212 bool page_swap = false; 3213 3214 if (hw->phy.type == ixgbe_phy_fw) 3215 return -ENXIO; 3216 3217 /* Check whether we support SFF-8472 or not */ 3218 status = hw->phy.ops.read_i2c_eeprom(hw, 3219 IXGBE_SFF_SFF_8472_COMP, 3220 &sff8472_rev); 3221 if (status) 3222 return -EIO; 3223 3224 /* addressing mode is not supported */ 3225 status = hw->phy.ops.read_i2c_eeprom(hw, 3226 IXGBE_SFF_SFF_8472_SWAP, 3227 &addr_mode); 3228 if (status) 3229 return -EIO; 3230 3231 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 3232 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); 3233 page_swap = true; 3234 } 3235 3236 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 3237 /* We have a SFP, but it does not support SFF-8472 */ 3238 modinfo->type = ETH_MODULE_SFF_8079; 3239 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 3240 } else { 3241 /* We have a SFP which supports a revision of SFF-8472. */ 3242 modinfo->type = ETH_MODULE_SFF_8472; 3243 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 3244 } 3245 3246 return 0; 3247 } 3248 3249 static int ixgbe_get_module_eeprom(struct net_device *dev, 3250 struct ethtool_eeprom *ee, 3251 u8 *data) 3252 { 3253 struct ixgbe_adapter *adapter = netdev_priv(dev); 3254 struct ixgbe_hw *hw = &adapter->hw; 3255 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 3256 u8 databyte = 0xFF; 3257 int i = 0; 3258 3259 if (ee->len == 0) 3260 return -EINVAL; 3261 3262 if (hw->phy.type == ixgbe_phy_fw) 3263 return -ENXIO; 3264 3265 for (i = ee->offset; i < ee->offset + ee->len; i++) { 3266 /* I2C reads can take long time */ 3267 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 3268 return -EBUSY; 3269 3270 if (i < ETH_MODULE_SFF_8079_LEN) 3271 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 3272 else 3273 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 3274 3275 if (status) 3276 return -EIO; 3277 3278 data[i - ee->offset] = databyte; 3279 } 3280 3281 return 0; 3282 } 3283 3284 static const struct { 3285 ixgbe_link_speed mac_speed; 3286 u32 supported; 3287 } ixgbe_ls_map[] = { 3288 { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full }, 3289 { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full }, 3290 { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full }, 3291 { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full }, 3292 { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full }, 3293 }; 3294 3295 static const struct { 3296 u32 lp_advertised; 3297 u32 mac_speed; 3298 } ixgbe_lp_map[] = { 3299 { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full }, 3300 { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full }, 3301 { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full }, 3302 { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full }, 3303 { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full }, 3304 { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full}, 3305 }; 3306 3307 static int 3308 ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata) 3309 { 3310 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; 3311 struct ixgbe_hw *hw = &adapter->hw; 3312 s32 rc; 3313 u16 i; 3314 3315 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info); 3316 if (rc) 3317 return rc; 3318 3319 edata->lp_advertised = 0; 3320 for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) { 3321 if (info[0] & ixgbe_lp_map[i].lp_advertised) 3322 edata->lp_advertised |= ixgbe_lp_map[i].mac_speed; 3323 } 3324 3325 edata->supported = 0; 3326 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { 3327 if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed) 3328 edata->supported |= ixgbe_ls_map[i].supported; 3329 } 3330 3331 edata->advertised = 0; 3332 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { 3333 if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed) 3334 edata->advertised |= ixgbe_ls_map[i].supported; 3335 } 3336 3337 edata->eee_enabled = !!edata->advertised; 3338 edata->tx_lpi_enabled = edata->eee_enabled; 3339 if (edata->advertised & edata->lp_advertised) 3340 edata->eee_active = true; 3341 3342 return 0; 3343 } 3344 3345 static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) 3346 { 3347 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3348 struct ixgbe_hw *hw = &adapter->hw; 3349 3350 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) 3351 return -EOPNOTSUPP; 3352 3353 if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw) 3354 return ixgbe_get_eee_fw(adapter, edata); 3355 3356 return -EOPNOTSUPP; 3357 } 3358 3359 static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) 3360 { 3361 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3362 struct ixgbe_hw *hw = &adapter->hw; 3363 struct ethtool_eee eee_data; 3364 s32 ret_val; 3365 3366 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) 3367 return -EOPNOTSUPP; 3368 3369 memset(&eee_data, 0, sizeof(struct ethtool_eee)); 3370 3371 ret_val = ixgbe_get_eee(netdev, &eee_data); 3372 if (ret_val) 3373 return ret_val; 3374 3375 if (eee_data.eee_enabled && !edata->eee_enabled) { 3376 if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { 3377 e_err(drv, "Setting EEE tx-lpi is not supported\n"); 3378 return -EINVAL; 3379 } 3380 3381 if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { 3382 e_err(drv, 3383 "Setting EEE Tx LPI timer is not supported\n"); 3384 return -EINVAL; 3385 } 3386 3387 if (eee_data.advertised != edata->advertised) { 3388 e_err(drv, 3389 "Setting EEE advertised speeds is not supported\n"); 3390 return -EINVAL; 3391 } 3392 } 3393 3394 if (eee_data.eee_enabled != edata->eee_enabled) { 3395 if (edata->eee_enabled) { 3396 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; 3397 hw->phy.eee_speeds_advertised = 3398 hw->phy.eee_speeds_supported; 3399 } else { 3400 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; 3401 hw->phy.eee_speeds_advertised = 0; 3402 } 3403 3404 /* reset link */ 3405 if (netif_running(netdev)) 3406 ixgbe_reinit_locked(adapter); 3407 else 3408 ixgbe_reset(adapter); 3409 } 3410 3411 return 0; 3412 } 3413 3414 static u32 ixgbe_get_priv_flags(struct net_device *netdev) 3415 { 3416 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3417 u32 priv_flags = 0; 3418 3419 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) 3420 priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; 3421 3422 return priv_flags; 3423 } 3424 3425 static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) 3426 { 3427 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3428 unsigned int flags2 = adapter->flags2; 3429 3430 flags2 &= ~IXGBE_FLAG2_RX_LEGACY; 3431 if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) 3432 flags2 |= IXGBE_FLAG2_RX_LEGACY; 3433 3434 if (flags2 != adapter->flags2) { 3435 adapter->flags2 = flags2; 3436 3437 /* reset interface to repopulate queues */ 3438 if (netif_running(netdev)) 3439 ixgbe_reinit_locked(adapter); 3440 } 3441 3442 return 0; 3443 } 3444 3445 static const struct ethtool_ops ixgbe_ethtool_ops = { 3446 .get_drvinfo = ixgbe_get_drvinfo, 3447 .get_regs_len = ixgbe_get_regs_len, 3448 .get_regs = ixgbe_get_regs, 3449 .get_wol = ixgbe_get_wol, 3450 .set_wol = ixgbe_set_wol, 3451 .nway_reset = ixgbe_nway_reset, 3452 .get_link = ethtool_op_get_link, 3453 .get_eeprom_len = ixgbe_get_eeprom_len, 3454 .get_eeprom = ixgbe_get_eeprom, 3455 .set_eeprom = ixgbe_set_eeprom, 3456 .get_ringparam = ixgbe_get_ringparam, 3457 .set_ringparam = ixgbe_set_ringparam, 3458 .get_pauseparam = ixgbe_get_pauseparam, 3459 .set_pauseparam = ixgbe_set_pauseparam, 3460 .get_msglevel = ixgbe_get_msglevel, 3461 .set_msglevel = ixgbe_set_msglevel, 3462 .self_test = ixgbe_diag_test, 3463 .get_strings = ixgbe_get_strings, 3464 .set_phys_id = ixgbe_set_phys_id, 3465 .get_sset_count = ixgbe_get_sset_count, 3466 .get_ethtool_stats = ixgbe_get_ethtool_stats, 3467 .get_coalesce = ixgbe_get_coalesce, 3468 .set_coalesce = ixgbe_set_coalesce, 3469 .get_rxnfc = ixgbe_get_rxnfc, 3470 .set_rxnfc = ixgbe_set_rxnfc, 3471 .get_rxfh_indir_size = ixgbe_rss_indir_size, 3472 .get_rxfh_key_size = ixgbe_get_rxfh_key_size, 3473 .get_rxfh = ixgbe_get_rxfh, 3474 .set_rxfh = ixgbe_set_rxfh, 3475 .get_eee = ixgbe_get_eee, 3476 .set_eee = ixgbe_set_eee, 3477 .get_channels = ixgbe_get_channels, 3478 .set_channels = ixgbe_set_channels, 3479 .get_priv_flags = ixgbe_get_priv_flags, 3480 .set_priv_flags = ixgbe_set_priv_flags, 3481 .get_ts_info = ixgbe_get_ts_info, 3482 .get_module_info = ixgbe_get_module_info, 3483 .get_module_eeprom = ixgbe_get_module_eeprom, 3484 .get_link_ksettings = ixgbe_get_link_ksettings, 3485 .set_link_ksettings = ixgbe_set_link_ksettings, 3486 }; 3487 3488 void ixgbe_set_ethtool_ops(struct net_device *netdev) 3489 { 3490 netdev->ethtool_ops = &ixgbe_ethtool_ops; 3491 } 3492