1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 /* ethtool support for ixgbe */ 30 31 #include <linux/interrupt.h> 32 #include <linux/types.h> 33 #include <linux/module.h> 34 #include <linux/slab.h> 35 #include <linux/pci.h> 36 #include <linux/netdevice.h> 37 #include <linux/ethtool.h> 38 #include <linux/vmalloc.h> 39 #include <linux/highmem.h> 40 #include <linux/uaccess.h> 41 42 #include "ixgbe.h" 43 #include "ixgbe_phy.h" 44 45 46 #define IXGBE_ALL_RAR_ENTRIES 16 47 48 enum {NETDEV_STATS, IXGBE_STATS}; 49 50 struct ixgbe_stats { 51 char stat_string[ETH_GSTRING_LEN]; 52 int type; 53 int sizeof_stat; 54 int stat_offset; 55 }; 56 57 #define IXGBE_STAT(m) IXGBE_STATS, \ 58 sizeof(((struct ixgbe_adapter *)0)->m), \ 59 offsetof(struct ixgbe_adapter, m) 60 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ 61 sizeof(((struct rtnl_link_stats64 *)0)->m), \ 62 offsetof(struct rtnl_link_stats64, m) 63 64 static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 65 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, 66 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, 67 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, 68 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, 69 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 70 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 71 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 72 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, 73 {"lsc_int", IXGBE_STAT(lsc_int)}, 74 {"tx_busy", IXGBE_STAT(tx_busy)}, 75 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 76 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, 77 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, 78 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, 79 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, 80 {"multicast", IXGBE_NETDEV_STAT(multicast)}, 81 {"broadcast", IXGBE_STAT(stats.bprc)}, 82 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 83 {"collisions", IXGBE_NETDEV_STAT(collisions)}, 84 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, 85 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, 86 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, 87 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, 88 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 89 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 90 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 91 {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, 92 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 93 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 94 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 95 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, 96 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, 97 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, 98 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 99 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 100 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 101 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, 102 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, 103 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, 104 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, 105 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, 106 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, 107 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 108 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 109 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 110 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, 111 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, 112 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, 113 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, 114 #ifdef IXGBE_FCOE 115 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 116 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 117 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, 118 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, 119 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, 120 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, 121 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, 122 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, 123 #endif /* IXGBE_FCOE */ 124 }; 125 126 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so 127 * we set the num_rx_queues to evaluate to num_tx_queues. This is 128 * used because we do not have a good way to get the max number of 129 * rx queues with CONFIG_RPS disabled. 130 */ 131 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues 132 133 #define IXGBE_QUEUE_STATS_LEN ( \ 134 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ 135 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 136 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 137 #define IXGBE_PB_STATS_LEN ( \ 138 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ 139 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ 140 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ 141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 142 / sizeof(u64)) 143 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 144 IXGBE_PB_STATS_LEN + \ 145 IXGBE_QUEUE_STATS_LEN) 146 147 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 148 "Register test (offline)", "Eeprom test (offline)", 149 "Interrupt test (offline)", "Loopback test (offline)", 150 "Link test (on/offline)" 151 }; 152 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 153 154 static int ixgbe_get_settings(struct net_device *netdev, 155 struct ethtool_cmd *ecmd) 156 { 157 struct ixgbe_adapter *adapter = netdev_priv(netdev); 158 struct ixgbe_hw *hw = &adapter->hw; 159 ixgbe_link_speed supported_link; 160 u32 link_speed = 0; 161 bool autoneg = false; 162 bool link_up; 163 164 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); 165 166 /* set the supported link speeds */ 167 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) 168 ecmd->supported |= SUPPORTED_10000baseT_Full; 169 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) 170 ecmd->supported |= SUPPORTED_1000baseT_Full; 171 if (supported_link & IXGBE_LINK_SPEED_100_FULL) 172 ecmd->supported |= SUPPORTED_100baseT_Full; 173 174 /* set the advertised speeds */ 175 if (hw->phy.autoneg_advertised) { 176 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) 177 ecmd->advertising |= ADVERTISED_100baseT_Full; 178 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 179 ecmd->advertising |= ADVERTISED_10000baseT_Full; 180 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 181 ecmd->advertising |= ADVERTISED_1000baseT_Full; 182 } else { 183 /* default modes in case phy.autoneg_advertised isn't set */ 184 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) 185 ecmd->advertising |= ADVERTISED_10000baseT_Full; 186 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) 187 ecmd->advertising |= ADVERTISED_1000baseT_Full; 188 if (supported_link & IXGBE_LINK_SPEED_100_FULL) 189 ecmd->advertising |= ADVERTISED_100baseT_Full; 190 191 if (hw->phy.multispeed_fiber && !autoneg) { 192 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) 193 ecmd->advertising = ADVERTISED_10000baseT_Full; 194 } 195 } 196 197 if (autoneg) { 198 ecmd->supported |= SUPPORTED_Autoneg; 199 ecmd->advertising |= ADVERTISED_Autoneg; 200 ecmd->autoneg = AUTONEG_ENABLE; 201 } else 202 ecmd->autoneg = AUTONEG_DISABLE; 203 204 ecmd->transceiver = XCVR_EXTERNAL; 205 206 /* Determine the remaining settings based on the PHY type. */ 207 switch (adapter->hw.phy.type) { 208 case ixgbe_phy_tn: 209 case ixgbe_phy_aq: 210 case ixgbe_phy_cu_unknown: 211 ecmd->supported |= SUPPORTED_TP; 212 ecmd->advertising |= ADVERTISED_TP; 213 ecmd->port = PORT_TP; 214 break; 215 case ixgbe_phy_qt: 216 ecmd->supported |= SUPPORTED_FIBRE; 217 ecmd->advertising |= ADVERTISED_FIBRE; 218 ecmd->port = PORT_FIBRE; 219 break; 220 case ixgbe_phy_nl: 221 case ixgbe_phy_sfp_passive_tyco: 222 case ixgbe_phy_sfp_passive_unknown: 223 case ixgbe_phy_sfp_ftl: 224 case ixgbe_phy_sfp_avago: 225 case ixgbe_phy_sfp_intel: 226 case ixgbe_phy_sfp_unknown: 227 /* SFP+ devices, further checking needed */ 228 switch (adapter->hw.phy.sfp_type) { 229 case ixgbe_sfp_type_da_cu: 230 case ixgbe_sfp_type_da_cu_core0: 231 case ixgbe_sfp_type_da_cu_core1: 232 ecmd->supported |= SUPPORTED_FIBRE; 233 ecmd->advertising |= ADVERTISED_FIBRE; 234 ecmd->port = PORT_DA; 235 break; 236 case ixgbe_sfp_type_sr: 237 case ixgbe_sfp_type_lr: 238 case ixgbe_sfp_type_srlr_core0: 239 case ixgbe_sfp_type_srlr_core1: 240 case ixgbe_sfp_type_1g_sx_core0: 241 case ixgbe_sfp_type_1g_sx_core1: 242 case ixgbe_sfp_type_1g_lx_core0: 243 case ixgbe_sfp_type_1g_lx_core1: 244 ecmd->supported |= SUPPORTED_FIBRE; 245 ecmd->advertising |= ADVERTISED_FIBRE; 246 ecmd->port = PORT_FIBRE; 247 break; 248 case ixgbe_sfp_type_not_present: 249 ecmd->supported |= SUPPORTED_FIBRE; 250 ecmd->advertising |= ADVERTISED_FIBRE; 251 ecmd->port = PORT_NONE; 252 break; 253 case ixgbe_sfp_type_1g_cu_core0: 254 case ixgbe_sfp_type_1g_cu_core1: 255 ecmd->supported |= SUPPORTED_TP; 256 ecmd->advertising |= ADVERTISED_TP; 257 ecmd->port = PORT_TP; 258 break; 259 case ixgbe_sfp_type_unknown: 260 default: 261 ecmd->supported |= SUPPORTED_FIBRE; 262 ecmd->advertising |= ADVERTISED_FIBRE; 263 ecmd->port = PORT_OTHER; 264 break; 265 } 266 break; 267 case ixgbe_phy_xaui: 268 ecmd->supported |= SUPPORTED_FIBRE; 269 ecmd->advertising |= ADVERTISED_FIBRE; 270 ecmd->port = PORT_NONE; 271 break; 272 case ixgbe_phy_unknown: 273 case ixgbe_phy_generic: 274 case ixgbe_phy_sfp_unsupported: 275 default: 276 ecmd->supported |= SUPPORTED_FIBRE; 277 ecmd->advertising |= ADVERTISED_FIBRE; 278 ecmd->port = PORT_OTHER; 279 break; 280 } 281 282 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 283 if (link_up) { 284 switch (link_speed) { 285 case IXGBE_LINK_SPEED_10GB_FULL: 286 ethtool_cmd_speed_set(ecmd, SPEED_10000); 287 break; 288 case IXGBE_LINK_SPEED_1GB_FULL: 289 ethtool_cmd_speed_set(ecmd, SPEED_1000); 290 break; 291 case IXGBE_LINK_SPEED_100_FULL: 292 ethtool_cmd_speed_set(ecmd, SPEED_100); 293 break; 294 default: 295 break; 296 } 297 ecmd->duplex = DUPLEX_FULL; 298 } else { 299 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); 300 ecmd->duplex = DUPLEX_UNKNOWN; 301 } 302 303 return 0; 304 } 305 306 static int ixgbe_set_settings(struct net_device *netdev, 307 struct ethtool_cmd *ecmd) 308 { 309 struct ixgbe_adapter *adapter = netdev_priv(netdev); 310 struct ixgbe_hw *hw = &adapter->hw; 311 u32 advertised, old; 312 s32 err = 0; 313 314 if ((hw->phy.media_type == ixgbe_media_type_copper) || 315 (hw->phy.multispeed_fiber)) { 316 /* 317 * this function does not support duplex forcing, but can 318 * limit the advertising of the adapter to the specified speed 319 */ 320 if (ecmd->advertising & ~ecmd->supported) 321 return -EINVAL; 322 323 /* only allow one speed at a time if no autoneg */ 324 if (!ecmd->autoneg && hw->phy.multispeed_fiber) { 325 if (ecmd->advertising == 326 (ADVERTISED_10000baseT_Full | 327 ADVERTISED_1000baseT_Full)) 328 return -EINVAL; 329 } 330 331 old = hw->phy.autoneg_advertised; 332 advertised = 0; 333 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 334 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 335 336 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 337 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 338 339 if (ecmd->advertising & ADVERTISED_100baseT_Full) 340 advertised |= IXGBE_LINK_SPEED_100_FULL; 341 342 if (old == advertised) 343 return err; 344 /* this sets the link speed and restarts auto-neg */ 345 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 346 usleep_range(1000, 2000); 347 348 hw->mac.autotry_restart = true; 349 err = hw->mac.ops.setup_link(hw, advertised, true); 350 if (err) { 351 e_info(probe, "setup link failed with code %d\n", err); 352 hw->mac.ops.setup_link(hw, old, true); 353 } 354 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 355 } else { 356 /* in this case we currently only support 10Gb/FULL */ 357 u32 speed = ethtool_cmd_speed(ecmd); 358 if ((ecmd->autoneg == AUTONEG_ENABLE) || 359 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 360 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 361 return -EINVAL; 362 } 363 364 return err; 365 } 366 367 static void ixgbe_get_pauseparam(struct net_device *netdev, 368 struct ethtool_pauseparam *pause) 369 { 370 struct ixgbe_adapter *adapter = netdev_priv(netdev); 371 struct ixgbe_hw *hw = &adapter->hw; 372 373 if (ixgbe_device_supports_autoneg_fc(hw) && 374 !hw->fc.disable_fc_autoneg) 375 pause->autoneg = 1; 376 else 377 pause->autoneg = 0; 378 379 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 380 pause->rx_pause = 1; 381 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 382 pause->tx_pause = 1; 383 } else if (hw->fc.current_mode == ixgbe_fc_full) { 384 pause->rx_pause = 1; 385 pause->tx_pause = 1; 386 } 387 } 388 389 static int ixgbe_set_pauseparam(struct net_device *netdev, 390 struct ethtool_pauseparam *pause) 391 { 392 struct ixgbe_adapter *adapter = netdev_priv(netdev); 393 struct ixgbe_hw *hw = &adapter->hw; 394 struct ixgbe_fc_info fc = hw->fc; 395 396 /* 82598 does no support link flow control with DCB enabled */ 397 if ((hw->mac.type == ixgbe_mac_82598EB) && 398 (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 399 return -EINVAL; 400 401 /* some devices do not support autoneg of link flow control */ 402 if ((pause->autoneg == AUTONEG_ENABLE) && 403 !ixgbe_device_supports_autoneg_fc(hw)) 404 return -EINVAL; 405 406 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); 407 408 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 409 fc.requested_mode = ixgbe_fc_full; 410 else if (pause->rx_pause && !pause->tx_pause) 411 fc.requested_mode = ixgbe_fc_rx_pause; 412 else if (!pause->rx_pause && pause->tx_pause) 413 fc.requested_mode = ixgbe_fc_tx_pause; 414 else 415 fc.requested_mode = ixgbe_fc_none; 416 417 /* if the thing changed then we'll update and use new autoneg */ 418 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { 419 hw->fc = fc; 420 if (netif_running(netdev)) 421 ixgbe_reinit_locked(adapter); 422 else 423 ixgbe_reset(adapter); 424 } 425 426 return 0; 427 } 428 429 static u32 ixgbe_get_msglevel(struct net_device *netdev) 430 { 431 struct ixgbe_adapter *adapter = netdev_priv(netdev); 432 return adapter->msg_enable; 433 } 434 435 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) 436 { 437 struct ixgbe_adapter *adapter = netdev_priv(netdev); 438 adapter->msg_enable = data; 439 } 440 441 static int ixgbe_get_regs_len(struct net_device *netdev) 442 { 443 #define IXGBE_REGS_LEN 1139 444 return IXGBE_REGS_LEN * sizeof(u32); 445 } 446 447 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 448 449 static void ixgbe_get_regs(struct net_device *netdev, 450 struct ethtool_regs *regs, void *p) 451 { 452 struct ixgbe_adapter *adapter = netdev_priv(netdev); 453 struct ixgbe_hw *hw = &adapter->hw; 454 u32 *regs_buff = p; 455 u8 i; 456 457 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); 458 459 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 460 hw->device_id; 461 462 /* General Registers */ 463 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); 464 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); 465 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 466 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); 467 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); 468 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 469 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); 470 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); 471 472 /* NVM Register */ 473 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); 474 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); 475 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); 476 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); 477 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); 478 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); 479 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); 480 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); 481 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); 482 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 483 484 /* Interrupt */ 485 /* don't read EICR because it can clear interrupt causes, instead 486 * read EICS which is a shadow but doesn't clear EICR */ 487 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); 488 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 489 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 490 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 491 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); 492 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); 493 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); 494 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 495 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 496 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 497 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); 498 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 499 500 /* Flow Control */ 501 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); 502 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); 503 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 504 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 505 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 506 for (i = 0; i < 8; i++) { 507 switch (hw->mac.type) { 508 case ixgbe_mac_82598EB: 509 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 510 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 511 break; 512 case ixgbe_mac_82599EB: 513 case ixgbe_mac_X540: 514 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); 515 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 516 break; 517 default: 518 break; 519 } 520 } 521 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 522 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 523 524 /* Receive DMA */ 525 for (i = 0; i < 64; i++) 526 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 527 for (i = 0; i < 64; i++) 528 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 529 for (i = 0; i < 64; i++) 530 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 531 for (i = 0; i < 64; i++) 532 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 533 for (i = 0; i < 64; i++) 534 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 535 for (i = 0; i < 64; i++) 536 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 537 for (i = 0; i < 16; i++) 538 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 539 for (i = 0; i < 16; i++) 540 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 541 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 542 for (i = 0; i < 8; i++) 543 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 544 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 545 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); 546 547 /* Receive */ 548 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 549 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); 550 for (i = 0; i < 16; i++) 551 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 552 for (i = 0; i < 16; i++) 553 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 554 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); 555 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 556 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 557 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 558 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); 559 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 560 for (i = 0; i < 8; i++) 561 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); 562 for (i = 0; i < 8; i++) 563 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); 564 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); 565 566 /* Transmit */ 567 for (i = 0; i < 32; i++) 568 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 569 for (i = 0; i < 32; i++) 570 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 571 for (i = 0; i < 32; i++) 572 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 573 for (i = 0; i < 32; i++) 574 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 575 for (i = 0; i < 32; i++) 576 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 577 for (i = 0; i < 32; i++) 578 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 579 for (i = 0; i < 32; i++) 580 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); 581 for (i = 0; i < 32; i++) 582 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); 583 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); 584 for (i = 0; i < 16; i++) 585 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 586 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); 587 for (i = 0; i < 8; i++) 588 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); 589 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); 590 591 /* Wake Up */ 592 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); 593 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); 594 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); 595 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); 596 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); 597 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); 598 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); 599 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 600 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 601 602 /* DCB */ 603 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */ 604 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */ 605 606 switch (hw->mac.type) { 607 case ixgbe_mac_82598EB: 608 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 609 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); 610 for (i = 0; i < 8; i++) 611 regs_buff[833 + i] = 612 IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); 613 for (i = 0; i < 8; i++) 614 regs_buff[841 + i] = 615 IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); 616 for (i = 0; i < 8; i++) 617 regs_buff[849 + i] = 618 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); 619 for (i = 0; i < 8; i++) 620 regs_buff[857 + i] = 621 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); 622 break; 623 case ixgbe_mac_82599EB: 624 case ixgbe_mac_X540: 625 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 626 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); 627 for (i = 0; i < 8; i++) 628 regs_buff[833 + i] = 629 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i)); 630 for (i = 0; i < 8; i++) 631 regs_buff[841 + i] = 632 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i)); 633 for (i = 0; i < 8; i++) 634 regs_buff[849 + i] = 635 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i)); 636 for (i = 0; i < 8; i++) 637 regs_buff[857 + i] = 638 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i)); 639 break; 640 default: 641 break; 642 } 643 644 for (i = 0; i < 8; i++) 645 regs_buff[865 + i] = 646 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ 647 for (i = 0; i < 8; i++) 648 regs_buff[873 + i] = 649 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ 650 651 /* Statistics */ 652 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); 653 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); 654 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); 655 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); 656 for (i = 0; i < 8; i++) 657 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); 658 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); 659 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); 660 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); 661 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); 662 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); 663 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); 664 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); 665 for (i = 0; i < 8; i++) 666 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); 667 for (i = 0; i < 8; i++) 668 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); 669 for (i = 0; i < 8; i++) 670 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); 671 for (i = 0; i < 8; i++) 672 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); 673 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); 674 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); 675 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); 676 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); 677 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); 678 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); 679 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); 680 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); 681 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); 682 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); 683 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); 684 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); 685 for (i = 0; i < 8; i++) 686 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); 687 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); 688 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); 689 regs_buff[956] = IXGBE_GET_STAT(adapter, roc); 690 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); 691 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); 692 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); 693 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); 694 regs_buff[961] = IXGBE_GET_STAT(adapter, tor); 695 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); 696 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); 697 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); 698 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); 699 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); 700 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); 701 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); 702 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); 703 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); 704 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); 705 regs_buff[973] = IXGBE_GET_STAT(adapter, xec); 706 for (i = 0; i < 16; i++) 707 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); 708 for (i = 0; i < 16; i++) 709 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); 710 for (i = 0; i < 16; i++) 711 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); 712 for (i = 0; i < 16; i++) 713 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); 714 715 /* MAC */ 716 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); 717 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 718 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 719 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); 720 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); 721 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 722 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 723 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); 724 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); 725 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); 726 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); 727 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); 728 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); 729 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); 730 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); 731 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); 732 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); 733 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); 734 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); 735 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); 736 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); 737 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); 738 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); 739 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); 740 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); 741 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); 742 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); 743 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); 744 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 745 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); 746 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); 747 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); 748 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 749 750 /* Diagnostic */ 751 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 752 for (i = 0; i < 8; i++) 753 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 754 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 755 for (i = 0; i < 4; i++) 756 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); 757 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 758 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 759 for (i = 0; i < 8; i++) 760 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 761 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 762 for (i = 0; i < 4; i++) 763 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); 764 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 765 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 766 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 767 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); 768 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); 769 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); 770 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); 771 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); 772 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); 773 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 774 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 775 for (i = 0; i < 8; i++) 776 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 777 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 778 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 779 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 780 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); 781 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); 782 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); 783 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); 784 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); 785 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); 786 787 /* 82599 X540 specific registers */ 788 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); 789 790 /* 82599 X540 specific DCB registers */ 791 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); 792 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC); 793 for (i = 0; i < 4; i++) 794 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i)); 795 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM); 796 /* same as RTTQCNRM */ 797 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD); 798 /* same as RTTQCNRR */ 799 800 /* X540 specific DCB registers */ 801 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); 802 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); 803 } 804 805 static int ixgbe_get_eeprom_len(struct net_device *netdev) 806 { 807 struct ixgbe_adapter *adapter = netdev_priv(netdev); 808 return adapter->hw.eeprom.word_size * 2; 809 } 810 811 static int ixgbe_get_eeprom(struct net_device *netdev, 812 struct ethtool_eeprom *eeprom, u8 *bytes) 813 { 814 struct ixgbe_adapter *adapter = netdev_priv(netdev); 815 struct ixgbe_hw *hw = &adapter->hw; 816 u16 *eeprom_buff; 817 int first_word, last_word, eeprom_len; 818 int ret_val = 0; 819 u16 i; 820 821 if (eeprom->len == 0) 822 return -EINVAL; 823 824 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 825 826 first_word = eeprom->offset >> 1; 827 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 828 eeprom_len = last_word - first_word + 1; 829 830 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); 831 if (!eeprom_buff) 832 return -ENOMEM; 833 834 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, 835 eeprom_buff); 836 837 /* Device's eeprom is always little-endian, word addressable */ 838 for (i = 0; i < eeprom_len; i++) 839 le16_to_cpus(&eeprom_buff[i]); 840 841 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); 842 kfree(eeprom_buff); 843 844 return ret_val; 845 } 846 847 static int ixgbe_set_eeprom(struct net_device *netdev, 848 struct ethtool_eeprom *eeprom, u8 *bytes) 849 { 850 struct ixgbe_adapter *adapter = netdev_priv(netdev); 851 struct ixgbe_hw *hw = &adapter->hw; 852 u16 *eeprom_buff; 853 void *ptr; 854 int max_len, first_word, last_word, ret_val = 0; 855 u16 i; 856 857 if (eeprom->len == 0) 858 return -EINVAL; 859 860 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 861 return -EINVAL; 862 863 max_len = hw->eeprom.word_size * 2; 864 865 first_word = eeprom->offset >> 1; 866 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 867 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 868 if (!eeprom_buff) 869 return -ENOMEM; 870 871 ptr = eeprom_buff; 872 873 if (eeprom->offset & 1) { 874 /* 875 * need read/modify/write of first changed EEPROM word 876 * only the second byte of the word is being modified 877 */ 878 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); 879 if (ret_val) 880 goto err; 881 882 ptr++; 883 } 884 if ((eeprom->offset + eeprom->len) & 1) { 885 /* 886 * need read/modify/write of last changed EEPROM word 887 * only the first byte of the word is being modified 888 */ 889 ret_val = hw->eeprom.ops.read(hw, last_word, 890 &eeprom_buff[last_word - first_word]); 891 if (ret_val) 892 goto err; 893 } 894 895 /* Device's eeprom is always little-endian, word addressable */ 896 for (i = 0; i < last_word - first_word + 1; i++) 897 le16_to_cpus(&eeprom_buff[i]); 898 899 memcpy(ptr, bytes, eeprom->len); 900 901 for (i = 0; i < last_word - first_word + 1; i++) 902 cpu_to_le16s(&eeprom_buff[i]); 903 904 ret_val = hw->eeprom.ops.write_buffer(hw, first_word, 905 last_word - first_word + 1, 906 eeprom_buff); 907 908 /* Update the checksum */ 909 if (ret_val == 0) 910 hw->eeprom.ops.update_checksum(hw); 911 912 err: 913 kfree(eeprom_buff); 914 return ret_val; 915 } 916 917 static void ixgbe_get_drvinfo(struct net_device *netdev, 918 struct ethtool_drvinfo *drvinfo) 919 { 920 struct ixgbe_adapter *adapter = netdev_priv(netdev); 921 u32 nvm_track_id; 922 923 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 924 strlcpy(drvinfo->version, ixgbe_driver_version, 925 sizeof(drvinfo->version)); 926 927 nvm_track_id = (adapter->eeprom_verh << 16) | 928 adapter->eeprom_verl; 929 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 930 nvm_track_id); 931 932 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 933 sizeof(drvinfo->bus_info)); 934 drvinfo->n_stats = IXGBE_STATS_LEN; 935 drvinfo->testinfo_len = IXGBE_TEST_LEN; 936 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 937 } 938 939 static void ixgbe_get_ringparam(struct net_device *netdev, 940 struct ethtool_ringparam *ring) 941 { 942 struct ixgbe_adapter *adapter = netdev_priv(netdev); 943 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 944 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 945 946 ring->rx_max_pending = IXGBE_MAX_RXD; 947 ring->tx_max_pending = IXGBE_MAX_TXD; 948 ring->rx_pending = rx_ring->count; 949 ring->tx_pending = tx_ring->count; 950 } 951 952 static int ixgbe_set_ringparam(struct net_device *netdev, 953 struct ethtool_ringparam *ring) 954 { 955 struct ixgbe_adapter *adapter = netdev_priv(netdev); 956 struct ixgbe_ring *temp_ring; 957 int i, err = 0; 958 u32 new_rx_count, new_tx_count; 959 960 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 961 return -EINVAL; 962 963 new_tx_count = clamp_t(u32, ring->tx_pending, 964 IXGBE_MIN_TXD, IXGBE_MAX_TXD); 965 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 966 967 new_rx_count = clamp_t(u32, ring->rx_pending, 968 IXGBE_MIN_RXD, IXGBE_MAX_RXD); 969 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 970 971 if ((new_tx_count == adapter->tx_ring_count) && 972 (new_rx_count == adapter->rx_ring_count)) { 973 /* nothing to do */ 974 return 0; 975 } 976 977 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 978 usleep_range(1000, 2000); 979 980 if (!netif_running(adapter->netdev)) { 981 for (i = 0; i < adapter->num_tx_queues; i++) 982 adapter->tx_ring[i]->count = new_tx_count; 983 for (i = 0; i < adapter->num_rx_queues; i++) 984 adapter->rx_ring[i]->count = new_rx_count; 985 adapter->tx_ring_count = new_tx_count; 986 adapter->rx_ring_count = new_rx_count; 987 goto clear_reset; 988 } 989 990 /* allocate temporary buffer to store rings in */ 991 i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); 992 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); 993 994 if (!temp_ring) { 995 err = -ENOMEM; 996 goto clear_reset; 997 } 998 999 ixgbe_down(adapter); 1000 1001 /* 1002 * Setup new Tx resources and free the old Tx resources in that order. 1003 * We can then assign the new resources to the rings via a memcpy. 1004 * The advantage to this approach is that we are guaranteed to still 1005 * have resources even in the case of an allocation failure. 1006 */ 1007 if (new_tx_count != adapter->tx_ring_count) { 1008 for (i = 0; i < adapter->num_tx_queues; i++) { 1009 memcpy(&temp_ring[i], adapter->tx_ring[i], 1010 sizeof(struct ixgbe_ring)); 1011 1012 temp_ring[i].count = new_tx_count; 1013 err = ixgbe_setup_tx_resources(&temp_ring[i]); 1014 if (err) { 1015 while (i) { 1016 i--; 1017 ixgbe_free_tx_resources(&temp_ring[i]); 1018 } 1019 goto err_setup; 1020 } 1021 } 1022 1023 for (i = 0; i < adapter->num_tx_queues; i++) { 1024 ixgbe_free_tx_resources(adapter->tx_ring[i]); 1025 1026 memcpy(adapter->tx_ring[i], &temp_ring[i], 1027 sizeof(struct ixgbe_ring)); 1028 } 1029 1030 adapter->tx_ring_count = new_tx_count; 1031 } 1032 1033 /* Repeat the process for the Rx rings if needed */ 1034 if (new_rx_count != adapter->rx_ring_count) { 1035 for (i = 0; i < adapter->num_rx_queues; i++) { 1036 memcpy(&temp_ring[i], adapter->rx_ring[i], 1037 sizeof(struct ixgbe_ring)); 1038 1039 temp_ring[i].count = new_rx_count; 1040 err = ixgbe_setup_rx_resources(&temp_ring[i]); 1041 if (err) { 1042 while (i) { 1043 i--; 1044 ixgbe_free_rx_resources(&temp_ring[i]); 1045 } 1046 goto err_setup; 1047 } 1048 1049 } 1050 1051 for (i = 0; i < adapter->num_rx_queues; i++) { 1052 ixgbe_free_rx_resources(adapter->rx_ring[i]); 1053 1054 memcpy(adapter->rx_ring[i], &temp_ring[i], 1055 sizeof(struct ixgbe_ring)); 1056 } 1057 1058 adapter->rx_ring_count = new_rx_count; 1059 } 1060 1061 err_setup: 1062 ixgbe_up(adapter); 1063 vfree(temp_ring); 1064 clear_reset: 1065 clear_bit(__IXGBE_RESETTING, &adapter->state); 1066 return err; 1067 } 1068 1069 static int ixgbe_get_sset_count(struct net_device *netdev, int sset) 1070 { 1071 switch (sset) { 1072 case ETH_SS_TEST: 1073 return IXGBE_TEST_LEN; 1074 case ETH_SS_STATS: 1075 return IXGBE_STATS_LEN; 1076 default: 1077 return -EOPNOTSUPP; 1078 } 1079 } 1080 1081 static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1082 struct ethtool_stats *stats, u64 *data) 1083 { 1084 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1085 struct rtnl_link_stats64 temp; 1086 const struct rtnl_link_stats64 *net_stats; 1087 unsigned int start; 1088 struct ixgbe_ring *ring; 1089 int i, j; 1090 char *p = NULL; 1091 1092 ixgbe_update_stats(adapter); 1093 net_stats = dev_get_stats(netdev, &temp); 1094 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1095 switch (ixgbe_gstrings_stats[i].type) { 1096 case NETDEV_STATS: 1097 p = (char *) net_stats + 1098 ixgbe_gstrings_stats[i].stat_offset; 1099 break; 1100 case IXGBE_STATS: 1101 p = (char *) adapter + 1102 ixgbe_gstrings_stats[i].stat_offset; 1103 break; 1104 default: 1105 data[i] = 0; 1106 continue; 1107 } 1108 1109 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1110 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1111 } 1112 for (j = 0; j < netdev->num_tx_queues; j++) { 1113 ring = adapter->tx_ring[j]; 1114 if (!ring) { 1115 data[i] = 0; 1116 data[i+1] = 0; 1117 i += 2; 1118 #ifdef BP_EXTENDED_STATS 1119 data[i] = 0; 1120 data[i+1] = 0; 1121 data[i+2] = 0; 1122 i += 3; 1123 #endif 1124 continue; 1125 } 1126 1127 do { 1128 start = u64_stats_fetch_begin_irq(&ring->syncp); 1129 data[i] = ring->stats.packets; 1130 data[i+1] = ring->stats.bytes; 1131 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1132 i += 2; 1133 #ifdef BP_EXTENDED_STATS 1134 data[i] = ring->stats.yields; 1135 data[i+1] = ring->stats.misses; 1136 data[i+2] = ring->stats.cleaned; 1137 i += 3; 1138 #endif 1139 } 1140 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1141 ring = adapter->rx_ring[j]; 1142 if (!ring) { 1143 data[i] = 0; 1144 data[i+1] = 0; 1145 i += 2; 1146 #ifdef BP_EXTENDED_STATS 1147 data[i] = 0; 1148 data[i+1] = 0; 1149 data[i+2] = 0; 1150 i += 3; 1151 #endif 1152 continue; 1153 } 1154 1155 do { 1156 start = u64_stats_fetch_begin_irq(&ring->syncp); 1157 data[i] = ring->stats.packets; 1158 data[i+1] = ring->stats.bytes; 1159 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1160 i += 2; 1161 #ifdef BP_EXTENDED_STATS 1162 data[i] = ring->stats.yields; 1163 data[i+1] = ring->stats.misses; 1164 data[i+2] = ring->stats.cleaned; 1165 i += 3; 1166 #endif 1167 } 1168 1169 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1170 data[i++] = adapter->stats.pxontxc[j]; 1171 data[i++] = adapter->stats.pxofftxc[j]; 1172 } 1173 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1174 data[i++] = adapter->stats.pxonrxc[j]; 1175 data[i++] = adapter->stats.pxoffrxc[j]; 1176 } 1177 } 1178 1179 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1180 u8 *data) 1181 { 1182 char *p = (char *)data; 1183 int i; 1184 1185 switch (stringset) { 1186 case ETH_SS_TEST: 1187 for (i = 0; i < IXGBE_TEST_LEN; i++) { 1188 memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); 1189 data += ETH_GSTRING_LEN; 1190 } 1191 break; 1192 case ETH_SS_STATS: 1193 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1194 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 1195 ETH_GSTRING_LEN); 1196 p += ETH_GSTRING_LEN; 1197 } 1198 for (i = 0; i < netdev->num_tx_queues; i++) { 1199 sprintf(p, "tx_queue_%u_packets", i); 1200 p += ETH_GSTRING_LEN; 1201 sprintf(p, "tx_queue_%u_bytes", i); 1202 p += ETH_GSTRING_LEN; 1203 #ifdef BP_EXTENDED_STATS 1204 sprintf(p, "tx_queue_%u_bp_napi_yield", i); 1205 p += ETH_GSTRING_LEN; 1206 sprintf(p, "tx_queue_%u_bp_misses", i); 1207 p += ETH_GSTRING_LEN; 1208 sprintf(p, "tx_queue_%u_bp_cleaned", i); 1209 p += ETH_GSTRING_LEN; 1210 #endif /* BP_EXTENDED_STATS */ 1211 } 1212 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { 1213 sprintf(p, "rx_queue_%u_packets", i); 1214 p += ETH_GSTRING_LEN; 1215 sprintf(p, "rx_queue_%u_bytes", i); 1216 p += ETH_GSTRING_LEN; 1217 #ifdef BP_EXTENDED_STATS 1218 sprintf(p, "rx_queue_%u_bp_poll_yield", i); 1219 p += ETH_GSTRING_LEN; 1220 sprintf(p, "rx_queue_%u_bp_misses", i); 1221 p += ETH_GSTRING_LEN; 1222 sprintf(p, "rx_queue_%u_bp_cleaned", i); 1223 p += ETH_GSTRING_LEN; 1224 #endif /* BP_EXTENDED_STATS */ 1225 } 1226 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1227 sprintf(p, "tx_pb_%u_pxon", i); 1228 p += ETH_GSTRING_LEN; 1229 sprintf(p, "tx_pb_%u_pxoff", i); 1230 p += ETH_GSTRING_LEN; 1231 } 1232 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1233 sprintf(p, "rx_pb_%u_pxon", i); 1234 p += ETH_GSTRING_LEN; 1235 sprintf(p, "rx_pb_%u_pxoff", i); 1236 p += ETH_GSTRING_LEN; 1237 } 1238 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 1239 break; 1240 } 1241 } 1242 1243 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) 1244 { 1245 struct ixgbe_hw *hw = &adapter->hw; 1246 bool link_up; 1247 u32 link_speed = 0; 1248 1249 if (ixgbe_removed(hw->hw_addr)) { 1250 *data = 1; 1251 return 1; 1252 } 1253 *data = 0; 1254 1255 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1256 if (link_up) 1257 return *data; 1258 else 1259 *data = 1; 1260 return *data; 1261 } 1262 1263 /* ethtool register test data */ 1264 struct ixgbe_reg_test { 1265 u16 reg; 1266 u8 array_len; 1267 u8 test_type; 1268 u32 mask; 1269 u32 write; 1270 }; 1271 1272 /* In the hardware, registers are laid out either singly, in arrays 1273 * spaced 0x40 bytes apart, or in contiguous tables. We assume 1274 * most tests take place on arrays or single registers (handled 1275 * as a single-element array) and special-case the tables. 1276 * Table tests are always pattern tests. 1277 * 1278 * We also make provision for some required setup steps by specifying 1279 * registers to be written without any read-back testing. 1280 */ 1281 1282 #define PATTERN_TEST 1 1283 #define SET_READ_TEST 2 1284 #define WRITE_NO_TEST 3 1285 #define TABLE32_TEST 4 1286 #define TABLE64_TEST_LO 5 1287 #define TABLE64_TEST_HI 6 1288 1289 /* default 82599 register test */ 1290 static const struct ixgbe_reg_test reg_test_82599[] = { 1291 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1292 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1293 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1294 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1295 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 1296 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1297 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1298 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1299 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1300 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1301 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1302 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1303 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1304 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1305 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 1306 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, 1307 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1308 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, 1309 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1310 { .reg = 0 } 1311 }; 1312 1313 /* default 82598 register test */ 1314 static const struct ixgbe_reg_test reg_test_82598[] = { 1315 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1316 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1317 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1318 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1319 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1320 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1321 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1322 /* Enable all four RX queues before testing. */ 1323 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1324 /* RDH is read-only for 82598, only test RDT. */ 1325 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1326 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1327 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1328 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1329 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, 1330 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1331 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1332 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1333 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, 1334 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, 1335 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1336 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, 1337 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1338 { .reg = 0 } 1339 }; 1340 1341 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, 1342 u32 mask, u32 write) 1343 { 1344 u32 pat, val, before; 1345 static const u32 test_pattern[] = { 1346 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1347 1348 if (ixgbe_removed(adapter->hw.hw_addr)) { 1349 *data = 1; 1350 return 1; 1351 } 1352 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { 1353 before = ixgbe_read_reg(&adapter->hw, reg); 1354 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); 1355 val = ixgbe_read_reg(&adapter->hw, reg); 1356 if (val != (test_pattern[pat] & write & mask)) { 1357 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", 1358 reg, val, (test_pattern[pat] & write & mask)); 1359 *data = reg; 1360 ixgbe_write_reg(&adapter->hw, reg, before); 1361 return true; 1362 } 1363 ixgbe_write_reg(&adapter->hw, reg, before); 1364 } 1365 return false; 1366 } 1367 1368 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, 1369 u32 mask, u32 write) 1370 { 1371 u32 val, before; 1372 1373 if (ixgbe_removed(adapter->hw.hw_addr)) { 1374 *data = 1; 1375 return 1; 1376 } 1377 before = ixgbe_read_reg(&adapter->hw, reg); 1378 ixgbe_write_reg(&adapter->hw, reg, write & mask); 1379 val = ixgbe_read_reg(&adapter->hw, reg); 1380 if ((write & mask) != (val & mask)) { 1381 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", 1382 reg, (val & mask), (write & mask)); 1383 *data = reg; 1384 ixgbe_write_reg(&adapter->hw, reg, before); 1385 return true; 1386 } 1387 ixgbe_write_reg(&adapter->hw, reg, before); 1388 return false; 1389 } 1390 1391 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1392 { 1393 const struct ixgbe_reg_test *test; 1394 u32 value, before, after; 1395 u32 i, toggle; 1396 1397 if (ixgbe_removed(adapter->hw.hw_addr)) { 1398 e_err(drv, "Adapter removed - register test blocked\n"); 1399 *data = 1; 1400 return 1; 1401 } 1402 switch (adapter->hw.mac.type) { 1403 case ixgbe_mac_82598EB: 1404 toggle = 0x7FFFF3FF; 1405 test = reg_test_82598; 1406 break; 1407 case ixgbe_mac_82599EB: 1408 case ixgbe_mac_X540: 1409 toggle = 0x7FFFF30F; 1410 test = reg_test_82599; 1411 break; 1412 default: 1413 *data = 1; 1414 return 1; 1415 } 1416 1417 /* 1418 * Because the status register is such a special case, 1419 * we handle it separately from the rest of the register 1420 * tests. Some bits are read-only, some toggle, and some 1421 * are writeable on newer MACs. 1422 */ 1423 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS); 1424 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle); 1425 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); 1426 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; 1427 if (value != after) { 1428 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", 1429 after, value); 1430 *data = 1; 1431 return 1; 1432 } 1433 /* restore previous status */ 1434 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before); 1435 1436 /* 1437 * Perform the remainder of the register test, looping through 1438 * the test table until we either fail or reach the null entry. 1439 */ 1440 while (test->reg) { 1441 for (i = 0; i < test->array_len; i++) { 1442 bool b = false; 1443 1444 switch (test->test_type) { 1445 case PATTERN_TEST: 1446 b = reg_pattern_test(adapter, data, 1447 test->reg + (i * 0x40), 1448 test->mask, 1449 test->write); 1450 break; 1451 case SET_READ_TEST: 1452 b = reg_set_and_check(adapter, data, 1453 test->reg + (i * 0x40), 1454 test->mask, 1455 test->write); 1456 break; 1457 case WRITE_NO_TEST: 1458 ixgbe_write_reg(&adapter->hw, 1459 test->reg + (i * 0x40), 1460 test->write); 1461 break; 1462 case TABLE32_TEST: 1463 b = reg_pattern_test(adapter, data, 1464 test->reg + (i * 4), 1465 test->mask, 1466 test->write); 1467 break; 1468 case TABLE64_TEST_LO: 1469 b = reg_pattern_test(adapter, data, 1470 test->reg + (i * 8), 1471 test->mask, 1472 test->write); 1473 break; 1474 case TABLE64_TEST_HI: 1475 b = reg_pattern_test(adapter, data, 1476 (test->reg + 4) + (i * 8), 1477 test->mask, 1478 test->write); 1479 break; 1480 } 1481 if (b) 1482 return 1; 1483 } 1484 test++; 1485 } 1486 1487 *data = 0; 1488 return 0; 1489 } 1490 1491 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) 1492 { 1493 struct ixgbe_hw *hw = &adapter->hw; 1494 if (hw->eeprom.ops.validate_checksum(hw, NULL)) 1495 *data = 1; 1496 else 1497 *data = 0; 1498 return *data; 1499 } 1500 1501 static irqreturn_t ixgbe_test_intr(int irq, void *data) 1502 { 1503 struct net_device *netdev = (struct net_device *) data; 1504 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1505 1506 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); 1507 1508 return IRQ_HANDLED; 1509 } 1510 1511 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) 1512 { 1513 struct net_device *netdev = adapter->netdev; 1514 u32 mask, i = 0, shared_int = true; 1515 u32 irq = adapter->pdev->irq; 1516 1517 *data = 0; 1518 1519 /* Hook up test interrupt handler just for this test */ 1520 if (adapter->msix_entries) { 1521 /* NOTE: we don't test MSI-X interrupts here, yet */ 1522 return 0; 1523 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1524 shared_int = false; 1525 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, 1526 netdev)) { 1527 *data = 1; 1528 return -1; 1529 } 1530 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1531 netdev->name, netdev)) { 1532 shared_int = false; 1533 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1534 netdev->name, netdev)) { 1535 *data = 1; 1536 return -1; 1537 } 1538 e_info(hw, "testing %s interrupt\n", shared_int ? 1539 "shared" : "unshared"); 1540 1541 /* Disable all the interrupts */ 1542 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1543 IXGBE_WRITE_FLUSH(&adapter->hw); 1544 usleep_range(10000, 20000); 1545 1546 /* Test each interrupt */ 1547 for (; i < 10; i++) { 1548 /* Interrupt to test */ 1549 mask = 1 << i; 1550 1551 if (!shared_int) { 1552 /* 1553 * Disable the interrupts to be reported in 1554 * the cause register and then force the same 1555 * interrupt and see if one gets posted. If 1556 * an interrupt was posted to the bus, the 1557 * test failed. 1558 */ 1559 adapter->test_icr = 0; 1560 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1561 ~mask & 0x00007FFF); 1562 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1563 ~mask & 0x00007FFF); 1564 IXGBE_WRITE_FLUSH(&adapter->hw); 1565 usleep_range(10000, 20000); 1566 1567 if (adapter->test_icr & mask) { 1568 *data = 3; 1569 break; 1570 } 1571 } 1572 1573 /* 1574 * Enable the interrupt to be reported in the cause 1575 * register and then force the same interrupt and see 1576 * if one gets posted. If an interrupt was not posted 1577 * to the bus, the test failed. 1578 */ 1579 adapter->test_icr = 0; 1580 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1581 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1582 IXGBE_WRITE_FLUSH(&adapter->hw); 1583 usleep_range(10000, 20000); 1584 1585 if (!(adapter->test_icr & mask)) { 1586 *data = 4; 1587 break; 1588 } 1589 1590 if (!shared_int) { 1591 /* 1592 * Disable the other interrupts to be reported in 1593 * the cause register and then force the other 1594 * interrupts and see if any get posted. If 1595 * an interrupt was posted to the bus, the 1596 * test failed. 1597 */ 1598 adapter->test_icr = 0; 1599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1600 ~mask & 0x00007FFF); 1601 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1602 ~mask & 0x00007FFF); 1603 IXGBE_WRITE_FLUSH(&adapter->hw); 1604 usleep_range(10000, 20000); 1605 1606 if (adapter->test_icr) { 1607 *data = 5; 1608 break; 1609 } 1610 } 1611 } 1612 1613 /* Disable all the interrupts */ 1614 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1615 IXGBE_WRITE_FLUSH(&adapter->hw); 1616 usleep_range(10000, 20000); 1617 1618 /* Unhook test interrupt handler */ 1619 free_irq(irq, netdev); 1620 1621 return *data; 1622 } 1623 1624 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) 1625 { 1626 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1627 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1628 struct ixgbe_hw *hw = &adapter->hw; 1629 u32 reg_ctl; 1630 1631 /* shut down the DMA engines now so they can be reinitialized later */ 1632 1633 /* first Rx */ 1634 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1635 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1636 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1637 ixgbe_disable_rx_queue(adapter, rx_ring); 1638 1639 /* now Tx */ 1640 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1641 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1642 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1643 1644 switch (hw->mac.type) { 1645 case ixgbe_mac_82599EB: 1646 case ixgbe_mac_X540: 1647 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1648 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1649 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1650 break; 1651 default: 1652 break; 1653 } 1654 1655 ixgbe_reset(adapter); 1656 1657 ixgbe_free_tx_resources(&adapter->test_tx_ring); 1658 ixgbe_free_rx_resources(&adapter->test_rx_ring); 1659 } 1660 1661 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1662 { 1663 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1664 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1665 u32 rctl, reg_data; 1666 int ret_val; 1667 int err; 1668 1669 /* Setup Tx descriptor ring and Tx buffers */ 1670 tx_ring->count = IXGBE_DEFAULT_TXD; 1671 tx_ring->queue_index = 0; 1672 tx_ring->dev = &adapter->pdev->dev; 1673 tx_ring->netdev = adapter->netdev; 1674 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1675 1676 err = ixgbe_setup_tx_resources(tx_ring); 1677 if (err) 1678 return 1; 1679 1680 switch (adapter->hw.mac.type) { 1681 case ixgbe_mac_82599EB: 1682 case ixgbe_mac_X540: 1683 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1684 reg_data |= IXGBE_DMATXCTL_TE; 1685 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1686 break; 1687 default: 1688 break; 1689 } 1690 1691 ixgbe_configure_tx_ring(adapter, tx_ring); 1692 1693 /* Setup Rx Descriptor ring and Rx buffers */ 1694 rx_ring->count = IXGBE_DEFAULT_RXD; 1695 rx_ring->queue_index = 0; 1696 rx_ring->dev = &adapter->pdev->dev; 1697 rx_ring->netdev = adapter->netdev; 1698 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1699 1700 err = ixgbe_setup_rx_resources(rx_ring); 1701 if (err) { 1702 ret_val = 4; 1703 goto err_nomem; 1704 } 1705 1706 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1707 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1708 1709 ixgbe_configure_rx_ring(adapter, rx_ring); 1710 1711 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1712 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1713 1714 return 0; 1715 1716 err_nomem: 1717 ixgbe_free_desc_rings(adapter); 1718 return ret_val; 1719 } 1720 1721 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) 1722 { 1723 struct ixgbe_hw *hw = &adapter->hw; 1724 u32 reg_data; 1725 1726 1727 /* Setup MAC loopback */ 1728 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1729 reg_data |= IXGBE_HLREG0_LPBK; 1730 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); 1731 1732 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1733 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1734 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); 1735 1736 /* X540 needs to set the MACC.FLU bit to force link up */ 1737 if (adapter->hw.mac.type == ixgbe_mac_X540) { 1738 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); 1739 reg_data |= IXGBE_MACC_FLU; 1740 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); 1741 } else { 1742 if (hw->mac.orig_autoc) { 1743 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; 1744 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); 1745 } else { 1746 return 10; 1747 } 1748 } 1749 IXGBE_WRITE_FLUSH(hw); 1750 usleep_range(10000, 20000); 1751 1752 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1753 if (hw->mac.type == ixgbe_mac_82598EB) { 1754 u8 atlas; 1755 1756 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); 1757 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 1758 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); 1759 1760 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); 1761 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 1762 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); 1763 1764 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); 1765 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 1766 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); 1767 1768 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); 1769 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 1770 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); 1771 } 1772 1773 return 0; 1774 } 1775 1776 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) 1777 { 1778 u32 reg_data; 1779 1780 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1781 reg_data &= ~IXGBE_HLREG0_LPBK; 1782 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1783 } 1784 1785 static void ixgbe_create_lbtest_frame(struct sk_buff *skb, 1786 unsigned int frame_size) 1787 { 1788 memset(skb->data, 0xFF, frame_size); 1789 frame_size >>= 1; 1790 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); 1791 memset(&skb->data[frame_size + 10], 0xBE, 1); 1792 memset(&skb->data[frame_size + 12], 0xAF, 1); 1793 } 1794 1795 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, 1796 unsigned int frame_size) 1797 { 1798 unsigned char *data; 1799 bool match = true; 1800 1801 frame_size >>= 1; 1802 1803 data = kmap(rx_buffer->page) + rx_buffer->page_offset; 1804 1805 if (data[3] != 0xFF || 1806 data[frame_size + 10] != 0xBE || 1807 data[frame_size + 12] != 0xAF) 1808 match = false; 1809 1810 kunmap(rx_buffer->page); 1811 1812 return match; 1813 } 1814 1815 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, 1816 struct ixgbe_ring *tx_ring, 1817 unsigned int size) 1818 { 1819 union ixgbe_adv_rx_desc *rx_desc; 1820 struct ixgbe_rx_buffer *rx_buffer; 1821 struct ixgbe_tx_buffer *tx_buffer; 1822 u16 rx_ntc, tx_ntc, count = 0; 1823 1824 /* initialize next to clean and descriptor values */ 1825 rx_ntc = rx_ring->next_to_clean; 1826 tx_ntc = tx_ring->next_to_clean; 1827 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1828 1829 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { 1830 /* check Rx buffer */ 1831 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; 1832 1833 /* sync Rx buffer for CPU read */ 1834 dma_sync_single_for_cpu(rx_ring->dev, 1835 rx_buffer->dma, 1836 ixgbe_rx_bufsz(rx_ring), 1837 DMA_FROM_DEVICE); 1838 1839 /* verify contents of skb */ 1840 if (ixgbe_check_lbtest_frame(rx_buffer, size)) 1841 count++; 1842 1843 /* sync Rx buffer for device write */ 1844 dma_sync_single_for_device(rx_ring->dev, 1845 rx_buffer->dma, 1846 ixgbe_rx_bufsz(rx_ring), 1847 DMA_FROM_DEVICE); 1848 1849 /* unmap buffer on Tx side */ 1850 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; 1851 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); 1852 1853 /* increment Rx/Tx next to clean counters */ 1854 rx_ntc++; 1855 if (rx_ntc == rx_ring->count) 1856 rx_ntc = 0; 1857 tx_ntc++; 1858 if (tx_ntc == tx_ring->count) 1859 tx_ntc = 0; 1860 1861 /* fetch next descriptor */ 1862 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1863 } 1864 1865 netdev_tx_reset_queue(txring_txq(tx_ring)); 1866 1867 /* re-map buffers to ring, store next to clean values */ 1868 ixgbe_alloc_rx_buffers(rx_ring, count); 1869 rx_ring->next_to_clean = rx_ntc; 1870 tx_ring->next_to_clean = tx_ntc; 1871 1872 return count; 1873 } 1874 1875 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1876 { 1877 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1878 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1879 int i, j, lc, good_cnt, ret_val = 0; 1880 unsigned int size = 1024; 1881 netdev_tx_t tx_ret_val; 1882 struct sk_buff *skb; 1883 u32 flags_orig = adapter->flags; 1884 1885 /* DCB can modify the frames on Tx */ 1886 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1887 1888 /* allocate test skb */ 1889 skb = alloc_skb(size, GFP_KERNEL); 1890 if (!skb) 1891 return 11; 1892 1893 /* place data into test skb */ 1894 ixgbe_create_lbtest_frame(skb, size); 1895 skb_put(skb, size); 1896 1897 /* 1898 * Calculate the loop count based on the largest descriptor ring 1899 * The idea is to wrap the largest ring a number of times using 64 1900 * send/receive pairs during each loop 1901 */ 1902 1903 if (rx_ring->count <= tx_ring->count) 1904 lc = ((tx_ring->count / 64) * 2) + 1; 1905 else 1906 lc = ((rx_ring->count / 64) * 2) + 1; 1907 1908 for (j = 0; j <= lc; j++) { 1909 /* reset count of good packets */ 1910 good_cnt = 0; 1911 1912 /* place 64 packets on the transmit queue*/ 1913 for (i = 0; i < 64; i++) { 1914 skb_get(skb); 1915 tx_ret_val = ixgbe_xmit_frame_ring(skb, 1916 adapter, 1917 tx_ring); 1918 if (tx_ret_val == NETDEV_TX_OK) 1919 good_cnt++; 1920 } 1921 1922 if (good_cnt != 64) { 1923 ret_val = 12; 1924 break; 1925 } 1926 1927 /* allow 200 milliseconds for packets to go from Tx to Rx */ 1928 msleep(200); 1929 1930 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); 1931 if (good_cnt != 64) { 1932 ret_val = 13; 1933 break; 1934 } 1935 } 1936 1937 /* free the original skb */ 1938 kfree_skb(skb); 1939 adapter->flags = flags_orig; 1940 1941 return ret_val; 1942 } 1943 1944 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) 1945 { 1946 *data = ixgbe_setup_desc_rings(adapter); 1947 if (*data) 1948 goto out; 1949 *data = ixgbe_setup_loopback_test(adapter); 1950 if (*data) 1951 goto err_loopback; 1952 *data = ixgbe_run_loopback_test(adapter); 1953 ixgbe_loopback_cleanup(adapter); 1954 1955 err_loopback: 1956 ixgbe_free_desc_rings(adapter); 1957 out: 1958 return *data; 1959 } 1960 1961 static void ixgbe_diag_test(struct net_device *netdev, 1962 struct ethtool_test *eth_test, u64 *data) 1963 { 1964 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1965 bool if_running = netif_running(netdev); 1966 1967 if (ixgbe_removed(adapter->hw.hw_addr)) { 1968 e_err(hw, "Adapter removed - test blocked\n"); 1969 data[0] = 1; 1970 data[1] = 1; 1971 data[2] = 1; 1972 data[3] = 1; 1973 data[4] = 1; 1974 eth_test->flags |= ETH_TEST_FL_FAILED; 1975 return; 1976 } 1977 set_bit(__IXGBE_TESTING, &adapter->state); 1978 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1979 struct ixgbe_hw *hw = &adapter->hw; 1980 1981 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 1982 int i; 1983 for (i = 0; i < adapter->num_vfs; i++) { 1984 if (adapter->vfinfo[i].clear_to_send) { 1985 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n"); 1986 data[0] = 1; 1987 data[1] = 1; 1988 data[2] = 1; 1989 data[3] = 1; 1990 data[4] = 1; 1991 eth_test->flags |= ETH_TEST_FL_FAILED; 1992 clear_bit(__IXGBE_TESTING, 1993 &adapter->state); 1994 goto skip_ol_tests; 1995 } 1996 } 1997 } 1998 1999 /* Offline tests */ 2000 e_info(hw, "offline testing starting\n"); 2001 2002 /* Link test performed before hardware reset so autoneg doesn't 2003 * interfere with test result 2004 */ 2005 if (ixgbe_link_test(adapter, &data[4])) 2006 eth_test->flags |= ETH_TEST_FL_FAILED; 2007 2008 if (if_running) 2009 /* indicate we're in test mode */ 2010 dev_close(netdev); 2011 else 2012 ixgbe_reset(adapter); 2013 2014 e_info(hw, "register testing starting\n"); 2015 if (ixgbe_reg_test(adapter, &data[0])) 2016 eth_test->flags |= ETH_TEST_FL_FAILED; 2017 2018 ixgbe_reset(adapter); 2019 e_info(hw, "eeprom testing starting\n"); 2020 if (ixgbe_eeprom_test(adapter, &data[1])) 2021 eth_test->flags |= ETH_TEST_FL_FAILED; 2022 2023 ixgbe_reset(adapter); 2024 e_info(hw, "interrupt testing starting\n"); 2025 if (ixgbe_intr_test(adapter, &data[2])) 2026 eth_test->flags |= ETH_TEST_FL_FAILED; 2027 2028 /* If SRIOV or VMDq is enabled then skip MAC 2029 * loopback diagnostic. */ 2030 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 2031 IXGBE_FLAG_VMDQ_ENABLED)) { 2032 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); 2033 data[3] = 0; 2034 goto skip_loopback; 2035 } 2036 2037 ixgbe_reset(adapter); 2038 e_info(hw, "loopback testing starting\n"); 2039 if (ixgbe_loopback_test(adapter, &data[3])) 2040 eth_test->flags |= ETH_TEST_FL_FAILED; 2041 2042 skip_loopback: 2043 ixgbe_reset(adapter); 2044 2045 /* clear testing bit and return adapter to previous state */ 2046 clear_bit(__IXGBE_TESTING, &adapter->state); 2047 if (if_running) 2048 dev_open(netdev); 2049 else if (hw->mac.ops.disable_tx_laser) 2050 hw->mac.ops.disable_tx_laser(hw); 2051 } else { 2052 e_info(hw, "online testing starting\n"); 2053 2054 /* Online tests */ 2055 if (ixgbe_link_test(adapter, &data[4])) 2056 eth_test->flags |= ETH_TEST_FL_FAILED; 2057 2058 /* Offline tests aren't run; pass by default */ 2059 data[0] = 0; 2060 data[1] = 0; 2061 data[2] = 0; 2062 data[3] = 0; 2063 2064 clear_bit(__IXGBE_TESTING, &adapter->state); 2065 } 2066 2067 skip_ol_tests: 2068 msleep_interruptible(4 * 1000); 2069 } 2070 2071 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 2072 struct ethtool_wolinfo *wol) 2073 { 2074 struct ixgbe_hw *hw = &adapter->hw; 2075 int retval = 0; 2076 2077 /* WOL not supported for all devices */ 2078 if (!ixgbe_wol_supported(adapter, hw->device_id, 2079 hw->subsystem_device_id)) { 2080 retval = 1; 2081 wol->supported = 0; 2082 } 2083 2084 return retval; 2085 } 2086 2087 static void ixgbe_get_wol(struct net_device *netdev, 2088 struct ethtool_wolinfo *wol) 2089 { 2090 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2091 2092 wol->supported = WAKE_UCAST | WAKE_MCAST | 2093 WAKE_BCAST | WAKE_MAGIC; 2094 wol->wolopts = 0; 2095 2096 if (ixgbe_wol_exclusion(adapter, wol) || 2097 !device_can_wakeup(&adapter->pdev->dev)) 2098 return; 2099 2100 if (adapter->wol & IXGBE_WUFC_EX) 2101 wol->wolopts |= WAKE_UCAST; 2102 if (adapter->wol & IXGBE_WUFC_MC) 2103 wol->wolopts |= WAKE_MCAST; 2104 if (adapter->wol & IXGBE_WUFC_BC) 2105 wol->wolopts |= WAKE_BCAST; 2106 if (adapter->wol & IXGBE_WUFC_MAG) 2107 wol->wolopts |= WAKE_MAGIC; 2108 } 2109 2110 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2111 { 2112 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2113 2114 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 2115 return -EOPNOTSUPP; 2116 2117 if (ixgbe_wol_exclusion(adapter, wol)) 2118 return wol->wolopts ? -EOPNOTSUPP : 0; 2119 2120 adapter->wol = 0; 2121 2122 if (wol->wolopts & WAKE_UCAST) 2123 adapter->wol |= IXGBE_WUFC_EX; 2124 if (wol->wolopts & WAKE_MCAST) 2125 adapter->wol |= IXGBE_WUFC_MC; 2126 if (wol->wolopts & WAKE_BCAST) 2127 adapter->wol |= IXGBE_WUFC_BC; 2128 if (wol->wolopts & WAKE_MAGIC) 2129 adapter->wol |= IXGBE_WUFC_MAG; 2130 2131 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 2132 2133 return 0; 2134 } 2135 2136 static int ixgbe_nway_reset(struct net_device *netdev) 2137 { 2138 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2139 2140 if (netif_running(netdev)) 2141 ixgbe_reinit_locked(adapter); 2142 2143 return 0; 2144 } 2145 2146 static int ixgbe_set_phys_id(struct net_device *netdev, 2147 enum ethtool_phys_id_state state) 2148 { 2149 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2150 struct ixgbe_hw *hw = &adapter->hw; 2151 2152 switch (state) { 2153 case ETHTOOL_ID_ACTIVE: 2154 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2155 return 2; 2156 2157 case ETHTOOL_ID_ON: 2158 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2159 break; 2160 2161 case ETHTOOL_ID_OFF: 2162 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2163 break; 2164 2165 case ETHTOOL_ID_INACTIVE: 2166 /* Restore LED settings */ 2167 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); 2168 break; 2169 } 2170 2171 return 0; 2172 } 2173 2174 static int ixgbe_get_coalesce(struct net_device *netdev, 2175 struct ethtool_coalesce *ec) 2176 { 2177 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2178 2179 /* only valid if in constant ITR mode */ 2180 if (adapter->rx_itr_setting <= 1) 2181 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 2182 else 2183 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 2184 2185 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2186 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2187 return 0; 2188 2189 /* only valid if in constant ITR mode */ 2190 if (adapter->tx_itr_setting <= 1) 2191 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 2192 else 2193 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 2194 2195 return 0; 2196 } 2197 2198 /* 2199 * this function must be called before setting the new value of 2200 * rx_itr_setting 2201 */ 2202 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) 2203 { 2204 struct net_device *netdev = adapter->netdev; 2205 2206 /* nothing to do if LRO or RSC are not enabled */ 2207 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || 2208 !(netdev->features & NETIF_F_LRO)) 2209 return false; 2210 2211 /* check the feature flag value and enable RSC if necessary */ 2212 if (adapter->rx_itr_setting == 1 || 2213 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 2214 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2215 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2216 e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); 2217 return true; 2218 } 2219 /* if interrupt rate is too high then disable RSC */ 2220 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2221 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2222 e_info(probe, "rx-usecs set too low, disabling RSC\n"); 2223 return true; 2224 } 2225 return false; 2226 } 2227 2228 static int ixgbe_set_coalesce(struct net_device *netdev, 2229 struct ethtool_coalesce *ec) 2230 { 2231 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2232 struct ixgbe_q_vector *q_vector; 2233 int i; 2234 u16 tx_itr_param, rx_itr_param, tx_itr_prev; 2235 bool need_reset = false; 2236 2237 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { 2238 /* reject Tx specific changes in case of mixed RxTx vectors */ 2239 if (ec->tx_coalesce_usecs) 2240 return -EINVAL; 2241 tx_itr_prev = adapter->rx_itr_setting; 2242 } else { 2243 tx_itr_prev = adapter->tx_itr_setting; 2244 } 2245 2246 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 2247 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 2248 return -EINVAL; 2249 2250 if (ec->rx_coalesce_usecs > 1) 2251 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 2252 else 2253 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2254 2255 if (adapter->rx_itr_setting == 1) 2256 rx_itr_param = IXGBE_20K_ITR; 2257 else 2258 rx_itr_param = adapter->rx_itr_setting; 2259 2260 if (ec->tx_coalesce_usecs > 1) 2261 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 2262 else 2263 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 2264 2265 if (adapter->tx_itr_setting == 1) 2266 tx_itr_param = IXGBE_10K_ITR; 2267 else 2268 tx_itr_param = adapter->tx_itr_setting; 2269 2270 /* mixed Rx/Tx */ 2271 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2272 adapter->tx_itr_setting = adapter->rx_itr_setting; 2273 2274 /* detect ITR changes that require update of TXDCTL.WTHRESH */ 2275 if ((adapter->tx_itr_setting != 1) && 2276 (adapter->tx_itr_setting < IXGBE_100K_ITR)) { 2277 if ((tx_itr_prev == 1) || 2278 (tx_itr_prev >= IXGBE_100K_ITR)) 2279 need_reset = true; 2280 } else { 2281 if ((tx_itr_prev != 1) && 2282 (tx_itr_prev < IXGBE_100K_ITR)) 2283 need_reset = true; 2284 } 2285 2286 /* check the old value and enable RSC if necessary */ 2287 need_reset |= ixgbe_update_rsc(adapter); 2288 2289 for (i = 0; i < adapter->num_q_vectors; i++) { 2290 q_vector = adapter->q_vector[i]; 2291 if (q_vector->tx.count && !q_vector->rx.count) 2292 /* tx only */ 2293 q_vector->itr = tx_itr_param; 2294 else 2295 /* rx only or mixed */ 2296 q_vector->itr = rx_itr_param; 2297 ixgbe_write_eitr(q_vector); 2298 } 2299 2300 /* 2301 * do reset here at the end to make sure EITR==0 case is handled 2302 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings 2303 * also locks in RSC enable/disable which requires reset 2304 */ 2305 if (need_reset) 2306 ixgbe_do_reset(netdev); 2307 2308 return 0; 2309 } 2310 2311 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2312 struct ethtool_rxnfc *cmd) 2313 { 2314 union ixgbe_atr_input *mask = &adapter->fdir_mask; 2315 struct ethtool_rx_flow_spec *fsp = 2316 (struct ethtool_rx_flow_spec *)&cmd->fs; 2317 struct hlist_node *node2; 2318 struct ixgbe_fdir_filter *rule = NULL; 2319 2320 /* report total rule count */ 2321 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2322 2323 hlist_for_each_entry_safe(rule, node2, 2324 &adapter->fdir_filter_list, fdir_node) { 2325 if (fsp->location <= rule->sw_idx) 2326 break; 2327 } 2328 2329 if (!rule || fsp->location != rule->sw_idx) 2330 return -EINVAL; 2331 2332 /* fill out the flow spec entry */ 2333 2334 /* set flow type field */ 2335 switch (rule->filter.formatted.flow_type) { 2336 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2337 fsp->flow_type = TCP_V4_FLOW; 2338 break; 2339 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2340 fsp->flow_type = UDP_V4_FLOW; 2341 break; 2342 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2343 fsp->flow_type = SCTP_V4_FLOW; 2344 break; 2345 case IXGBE_ATR_FLOW_TYPE_IPV4: 2346 fsp->flow_type = IP_USER_FLOW; 2347 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 2348 fsp->h_u.usr_ip4_spec.proto = 0; 2349 fsp->m_u.usr_ip4_spec.proto = 0; 2350 break; 2351 default: 2352 return -EINVAL; 2353 } 2354 2355 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; 2356 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; 2357 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; 2358 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; 2359 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; 2360 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; 2361 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; 2362 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; 2363 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; 2364 fsp->m_ext.vlan_tci = mask->formatted.vlan_id; 2365 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; 2366 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; 2367 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); 2368 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); 2369 fsp->flow_type |= FLOW_EXT; 2370 2371 /* record action */ 2372 if (rule->action == IXGBE_FDIR_DROP_QUEUE) 2373 fsp->ring_cookie = RX_CLS_FLOW_DISC; 2374 else 2375 fsp->ring_cookie = rule->action; 2376 2377 return 0; 2378 } 2379 2380 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, 2381 struct ethtool_rxnfc *cmd, 2382 u32 *rule_locs) 2383 { 2384 struct hlist_node *node2; 2385 struct ixgbe_fdir_filter *rule; 2386 int cnt = 0; 2387 2388 /* report total rule count */ 2389 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2390 2391 hlist_for_each_entry_safe(rule, node2, 2392 &adapter->fdir_filter_list, fdir_node) { 2393 if (cnt == cmd->rule_cnt) 2394 return -EMSGSIZE; 2395 rule_locs[cnt] = rule->sw_idx; 2396 cnt++; 2397 } 2398 2399 cmd->rule_cnt = cnt; 2400 2401 return 0; 2402 } 2403 2404 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, 2405 struct ethtool_rxnfc *cmd) 2406 { 2407 cmd->data = 0; 2408 2409 /* Report default options for RSS on ixgbe */ 2410 switch (cmd->flow_type) { 2411 case TCP_V4_FLOW: 2412 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2413 /* fallthrough */ 2414 case UDP_V4_FLOW: 2415 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2416 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2417 /* fallthrough */ 2418 case SCTP_V4_FLOW: 2419 case AH_ESP_V4_FLOW: 2420 case AH_V4_FLOW: 2421 case ESP_V4_FLOW: 2422 case IPV4_FLOW: 2423 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2424 break; 2425 case TCP_V6_FLOW: 2426 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2427 /* fallthrough */ 2428 case UDP_V6_FLOW: 2429 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2430 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2431 /* fallthrough */ 2432 case SCTP_V6_FLOW: 2433 case AH_ESP_V6_FLOW: 2434 case AH_V6_FLOW: 2435 case ESP_V6_FLOW: 2436 case IPV6_FLOW: 2437 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2438 break; 2439 default: 2440 return -EINVAL; 2441 } 2442 2443 return 0; 2444 } 2445 2446 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2447 u32 *rule_locs) 2448 { 2449 struct ixgbe_adapter *adapter = netdev_priv(dev); 2450 int ret = -EOPNOTSUPP; 2451 2452 switch (cmd->cmd) { 2453 case ETHTOOL_GRXRINGS: 2454 cmd->data = adapter->num_rx_queues; 2455 ret = 0; 2456 break; 2457 case ETHTOOL_GRXCLSRLCNT: 2458 cmd->rule_cnt = adapter->fdir_filter_count; 2459 ret = 0; 2460 break; 2461 case ETHTOOL_GRXCLSRULE: 2462 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); 2463 break; 2464 case ETHTOOL_GRXCLSRLALL: 2465 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); 2466 break; 2467 case ETHTOOL_GRXFH: 2468 ret = ixgbe_get_rss_hash_opts(adapter, cmd); 2469 break; 2470 default: 2471 break; 2472 } 2473 2474 return ret; 2475 } 2476 2477 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2478 struct ixgbe_fdir_filter *input, 2479 u16 sw_idx) 2480 { 2481 struct ixgbe_hw *hw = &adapter->hw; 2482 struct hlist_node *node2; 2483 struct ixgbe_fdir_filter *rule, *parent; 2484 int err = -EINVAL; 2485 2486 parent = NULL; 2487 rule = NULL; 2488 2489 hlist_for_each_entry_safe(rule, node2, 2490 &adapter->fdir_filter_list, fdir_node) { 2491 /* hash found, or no matching entry */ 2492 if (rule->sw_idx >= sw_idx) 2493 break; 2494 parent = rule; 2495 } 2496 2497 /* if there is an old rule occupying our place remove it */ 2498 if (rule && (rule->sw_idx == sw_idx)) { 2499 if (!input || (rule->filter.formatted.bkt_hash != 2500 input->filter.formatted.bkt_hash)) { 2501 err = ixgbe_fdir_erase_perfect_filter_82599(hw, 2502 &rule->filter, 2503 sw_idx); 2504 } 2505 2506 hlist_del(&rule->fdir_node); 2507 kfree(rule); 2508 adapter->fdir_filter_count--; 2509 } 2510 2511 /* 2512 * If no input this was a delete, err should be 0 if a rule was 2513 * successfully found and removed from the list else -EINVAL 2514 */ 2515 if (!input) 2516 return err; 2517 2518 /* initialize node and set software index */ 2519 INIT_HLIST_NODE(&input->fdir_node); 2520 2521 /* add filter to the list */ 2522 if (parent) 2523 hlist_add_behind(&input->fdir_node, &parent->fdir_node); 2524 else 2525 hlist_add_head(&input->fdir_node, 2526 &adapter->fdir_filter_list); 2527 2528 /* update counts */ 2529 adapter->fdir_filter_count++; 2530 2531 return 0; 2532 } 2533 2534 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, 2535 u8 *flow_type) 2536 { 2537 switch (fsp->flow_type & ~FLOW_EXT) { 2538 case TCP_V4_FLOW: 2539 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2540 break; 2541 case UDP_V4_FLOW: 2542 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2543 break; 2544 case SCTP_V4_FLOW: 2545 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2546 break; 2547 case IP_USER_FLOW: 2548 switch (fsp->h_u.usr_ip4_spec.proto) { 2549 case IPPROTO_TCP: 2550 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2551 break; 2552 case IPPROTO_UDP: 2553 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2554 break; 2555 case IPPROTO_SCTP: 2556 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2557 break; 2558 case 0: 2559 if (!fsp->m_u.usr_ip4_spec.proto) { 2560 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; 2561 break; 2562 } 2563 default: 2564 return 0; 2565 } 2566 break; 2567 default: 2568 return 0; 2569 } 2570 2571 return 1; 2572 } 2573 2574 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2575 struct ethtool_rxnfc *cmd) 2576 { 2577 struct ethtool_rx_flow_spec *fsp = 2578 (struct ethtool_rx_flow_spec *)&cmd->fs; 2579 struct ixgbe_hw *hw = &adapter->hw; 2580 struct ixgbe_fdir_filter *input; 2581 union ixgbe_atr_input mask; 2582 int err; 2583 2584 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 2585 return -EOPNOTSUPP; 2586 2587 /* 2588 * Don't allow programming if the action is a queue greater than 2589 * the number of online Rx queues. 2590 */ 2591 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 2592 (fsp->ring_cookie >= adapter->num_rx_queues)) 2593 return -EINVAL; 2594 2595 /* Don't allow indexes to exist outside of available space */ 2596 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { 2597 e_err(drv, "Location out of range\n"); 2598 return -EINVAL; 2599 } 2600 2601 input = kzalloc(sizeof(*input), GFP_ATOMIC); 2602 if (!input) 2603 return -ENOMEM; 2604 2605 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 2606 2607 /* set SW index */ 2608 input->sw_idx = fsp->location; 2609 2610 /* record flow type */ 2611 if (!ixgbe_flowspec_to_flow_type(fsp, 2612 &input->filter.formatted.flow_type)) { 2613 e_err(drv, "Unrecognized flow type\n"); 2614 goto err_out; 2615 } 2616 2617 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2618 IXGBE_ATR_L4TYPE_MASK; 2619 2620 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) 2621 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; 2622 2623 /* Copy input into formatted structures */ 2624 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2625 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; 2626 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2627 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; 2628 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; 2629 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; 2630 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 2631 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 2632 2633 if (fsp->flow_type & FLOW_EXT) { 2634 input->filter.formatted.vm_pool = 2635 (unsigned char)ntohl(fsp->h_ext.data[1]); 2636 mask.formatted.vm_pool = 2637 (unsigned char)ntohl(fsp->m_ext.data[1]); 2638 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; 2639 mask.formatted.vlan_id = fsp->m_ext.vlan_tci; 2640 input->filter.formatted.flex_bytes = 2641 fsp->h_ext.vlan_etype; 2642 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; 2643 } 2644 2645 /* determine if we need to drop or route the packet */ 2646 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 2647 input->action = IXGBE_FDIR_DROP_QUEUE; 2648 else 2649 input->action = fsp->ring_cookie; 2650 2651 spin_lock(&adapter->fdir_perfect_lock); 2652 2653 if (hlist_empty(&adapter->fdir_filter_list)) { 2654 /* save mask and program input mask into HW */ 2655 memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); 2656 err = ixgbe_fdir_set_input_mask_82599(hw, &mask); 2657 if (err) { 2658 e_err(drv, "Error writing mask\n"); 2659 goto err_out_w_lock; 2660 } 2661 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { 2662 e_err(drv, "Only one mask supported per port\n"); 2663 goto err_out_w_lock; 2664 } 2665 2666 /* apply mask and compute/store hash */ 2667 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); 2668 2669 /* program filters to filter memory */ 2670 err = ixgbe_fdir_write_perfect_filter_82599(hw, 2671 &input->filter, input->sw_idx, 2672 (input->action == IXGBE_FDIR_DROP_QUEUE) ? 2673 IXGBE_FDIR_DROP_QUEUE : 2674 adapter->rx_ring[input->action]->reg_idx); 2675 if (err) 2676 goto err_out_w_lock; 2677 2678 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 2679 2680 spin_unlock(&adapter->fdir_perfect_lock); 2681 2682 return err; 2683 err_out_w_lock: 2684 spin_unlock(&adapter->fdir_perfect_lock); 2685 err_out: 2686 kfree(input); 2687 return -EINVAL; 2688 } 2689 2690 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2691 struct ethtool_rxnfc *cmd) 2692 { 2693 struct ethtool_rx_flow_spec *fsp = 2694 (struct ethtool_rx_flow_spec *)&cmd->fs; 2695 int err; 2696 2697 spin_lock(&adapter->fdir_perfect_lock); 2698 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); 2699 spin_unlock(&adapter->fdir_perfect_lock); 2700 2701 return err; 2702 } 2703 2704 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ 2705 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2706 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, 2707 struct ethtool_rxnfc *nfc) 2708 { 2709 u32 flags2 = adapter->flags2; 2710 2711 /* 2712 * RSS does not support anything other than hashing 2713 * to queues on src and dst IPs and ports 2714 */ 2715 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 2716 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 2717 return -EINVAL; 2718 2719 switch (nfc->flow_type) { 2720 case TCP_V4_FLOW: 2721 case TCP_V6_FLOW: 2722 if (!(nfc->data & RXH_IP_SRC) || 2723 !(nfc->data & RXH_IP_DST) || 2724 !(nfc->data & RXH_L4_B_0_1) || 2725 !(nfc->data & RXH_L4_B_2_3)) 2726 return -EINVAL; 2727 break; 2728 case UDP_V4_FLOW: 2729 if (!(nfc->data & RXH_IP_SRC) || 2730 !(nfc->data & RXH_IP_DST)) 2731 return -EINVAL; 2732 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2733 case 0: 2734 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2735 break; 2736 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2737 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2738 break; 2739 default: 2740 return -EINVAL; 2741 } 2742 break; 2743 case UDP_V6_FLOW: 2744 if (!(nfc->data & RXH_IP_SRC) || 2745 !(nfc->data & RXH_IP_DST)) 2746 return -EINVAL; 2747 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2748 case 0: 2749 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2750 break; 2751 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2752 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2753 break; 2754 default: 2755 return -EINVAL; 2756 } 2757 break; 2758 case AH_ESP_V4_FLOW: 2759 case AH_V4_FLOW: 2760 case ESP_V4_FLOW: 2761 case SCTP_V4_FLOW: 2762 case AH_ESP_V6_FLOW: 2763 case AH_V6_FLOW: 2764 case ESP_V6_FLOW: 2765 case SCTP_V6_FLOW: 2766 if (!(nfc->data & RXH_IP_SRC) || 2767 !(nfc->data & RXH_IP_DST) || 2768 (nfc->data & RXH_L4_B_0_1) || 2769 (nfc->data & RXH_L4_B_2_3)) 2770 return -EINVAL; 2771 break; 2772 default: 2773 return -EINVAL; 2774 } 2775 2776 /* if we changed something we need to update flags */ 2777 if (flags2 != adapter->flags2) { 2778 struct ixgbe_hw *hw = &adapter->hw; 2779 u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 2780 2781 if ((flags2 & UDP_RSS_FLAGS) && 2782 !(adapter->flags2 & UDP_RSS_FLAGS)) 2783 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); 2784 2785 adapter->flags2 = flags2; 2786 2787 /* Perform hash on these packet types */ 2788 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 2789 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 2790 | IXGBE_MRQC_RSS_FIELD_IPV6 2791 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 2792 2793 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2794 IXGBE_MRQC_RSS_FIELD_IPV6_UDP); 2795 2796 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2797 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 2798 2799 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2800 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 2801 2802 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2803 } 2804 2805 return 0; 2806 } 2807 2808 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 2809 { 2810 struct ixgbe_adapter *adapter = netdev_priv(dev); 2811 int ret = -EOPNOTSUPP; 2812 2813 switch (cmd->cmd) { 2814 case ETHTOOL_SRXCLSRLINS: 2815 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); 2816 break; 2817 case ETHTOOL_SRXCLSRLDEL: 2818 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); 2819 break; 2820 case ETHTOOL_SRXFH: 2821 ret = ixgbe_set_rss_hash_opt(adapter, cmd); 2822 break; 2823 default: 2824 break; 2825 } 2826 2827 return ret; 2828 } 2829 2830 static int ixgbe_get_ts_info(struct net_device *dev, 2831 struct ethtool_ts_info *info) 2832 { 2833 struct ixgbe_adapter *adapter = netdev_priv(dev); 2834 2835 switch (adapter->hw.mac.type) { 2836 case ixgbe_mac_X540: 2837 case ixgbe_mac_82599EB: 2838 info->so_timestamping = 2839 SOF_TIMESTAMPING_TX_SOFTWARE | 2840 SOF_TIMESTAMPING_RX_SOFTWARE | 2841 SOF_TIMESTAMPING_SOFTWARE | 2842 SOF_TIMESTAMPING_TX_HARDWARE | 2843 SOF_TIMESTAMPING_RX_HARDWARE | 2844 SOF_TIMESTAMPING_RAW_HARDWARE; 2845 2846 if (adapter->ptp_clock) 2847 info->phc_index = ptp_clock_index(adapter->ptp_clock); 2848 else 2849 info->phc_index = -1; 2850 2851 info->tx_types = 2852 (1 << HWTSTAMP_TX_OFF) | 2853 (1 << HWTSTAMP_TX_ON); 2854 2855 info->rx_filters = 2856 (1 << HWTSTAMP_FILTER_NONE) | 2857 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 2858 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 2859 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 2860 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 2861 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | 2862 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 2863 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 2864 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 2865 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | 2866 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 2867 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 2868 break; 2869 default: 2870 return ethtool_op_get_ts_info(dev, info); 2871 } 2872 return 0; 2873 } 2874 2875 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) 2876 { 2877 unsigned int max_combined; 2878 u8 tcs = netdev_get_num_tc(adapter->netdev); 2879 2880 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 2881 /* We only support one q_vector without MSI-X */ 2882 max_combined = 1; 2883 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 2884 /* SR-IOV currently only allows one queue on the PF */ 2885 max_combined = 1; 2886 } else if (tcs > 1) { 2887 /* For DCB report channels per traffic class */ 2888 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2889 /* 8 TC w/ 4 queues per TC */ 2890 max_combined = 4; 2891 } else if (tcs > 4) { 2892 /* 8 TC w/ 8 queues per TC */ 2893 max_combined = 8; 2894 } else { 2895 /* 4 TC w/ 16 queues per TC */ 2896 max_combined = 16; 2897 } 2898 } else if (adapter->atr_sample_rate) { 2899 /* support up to 64 queues with ATR */ 2900 max_combined = IXGBE_MAX_FDIR_INDICES; 2901 } else { 2902 /* support up to 16 queues with RSS */ 2903 max_combined = IXGBE_MAX_RSS_INDICES; 2904 } 2905 2906 return max_combined; 2907 } 2908 2909 static void ixgbe_get_channels(struct net_device *dev, 2910 struct ethtool_channels *ch) 2911 { 2912 struct ixgbe_adapter *adapter = netdev_priv(dev); 2913 2914 /* report maximum channels */ 2915 ch->max_combined = ixgbe_max_channels(adapter); 2916 2917 /* report info for other vector */ 2918 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2919 ch->max_other = NON_Q_VECTORS; 2920 ch->other_count = NON_Q_VECTORS; 2921 } 2922 2923 /* record RSS queues */ 2924 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; 2925 2926 /* nothing else to report if RSS is disabled */ 2927 if (ch->combined_count == 1) 2928 return; 2929 2930 /* we do not support ATR queueing if SR-IOV is enabled */ 2931 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 2932 return; 2933 2934 /* same thing goes for being DCB enabled */ 2935 if (netdev_get_num_tc(dev) > 1) 2936 return; 2937 2938 /* if ATR is disabled we can exit */ 2939 if (!adapter->atr_sample_rate) 2940 return; 2941 2942 /* report flow director queues as maximum channels */ 2943 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; 2944 } 2945 2946 static int ixgbe_set_channels(struct net_device *dev, 2947 struct ethtool_channels *ch) 2948 { 2949 struct ixgbe_adapter *adapter = netdev_priv(dev); 2950 unsigned int count = ch->combined_count; 2951 2952 /* verify they are not requesting separate vectors */ 2953 if (!count || ch->rx_count || ch->tx_count) 2954 return -EINVAL; 2955 2956 /* verify other_count has not changed */ 2957 if (ch->other_count != NON_Q_VECTORS) 2958 return -EINVAL; 2959 2960 /* verify the number of channels does not exceed hardware limits */ 2961 if (count > ixgbe_max_channels(adapter)) 2962 return -EINVAL; 2963 2964 /* update feature limits from largest to smallest supported values */ 2965 adapter->ring_feature[RING_F_FDIR].limit = count; 2966 2967 /* cap RSS limit at 16 */ 2968 if (count > IXGBE_MAX_RSS_INDICES) 2969 count = IXGBE_MAX_RSS_INDICES; 2970 adapter->ring_feature[RING_F_RSS].limit = count; 2971 2972 #ifdef IXGBE_FCOE 2973 /* cap FCoE limit at 8 */ 2974 if (count > IXGBE_FCRETA_SIZE) 2975 count = IXGBE_FCRETA_SIZE; 2976 adapter->ring_feature[RING_F_FCOE].limit = count; 2977 2978 #endif 2979 /* use setup TC to update any traffic class queue mapping */ 2980 return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); 2981 } 2982 2983 static int ixgbe_get_module_info(struct net_device *dev, 2984 struct ethtool_modinfo *modinfo) 2985 { 2986 struct ixgbe_adapter *adapter = netdev_priv(dev); 2987 struct ixgbe_hw *hw = &adapter->hw; 2988 u32 status; 2989 u8 sff8472_rev, addr_mode; 2990 bool page_swap = false; 2991 2992 /* Check whether we support SFF-8472 or not */ 2993 status = hw->phy.ops.read_i2c_eeprom(hw, 2994 IXGBE_SFF_SFF_8472_COMP, 2995 &sff8472_rev); 2996 if (status != 0) 2997 return -EIO; 2998 2999 /* addressing mode is not supported */ 3000 status = hw->phy.ops.read_i2c_eeprom(hw, 3001 IXGBE_SFF_SFF_8472_SWAP, 3002 &addr_mode); 3003 if (status != 0) 3004 return -EIO; 3005 3006 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 3007 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); 3008 page_swap = true; 3009 } 3010 3011 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 3012 /* We have a SFP, but it does not support SFF-8472 */ 3013 modinfo->type = ETH_MODULE_SFF_8079; 3014 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 3015 } else { 3016 /* We have a SFP which supports a revision of SFF-8472. */ 3017 modinfo->type = ETH_MODULE_SFF_8472; 3018 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 3019 } 3020 3021 return 0; 3022 } 3023 3024 static int ixgbe_get_module_eeprom(struct net_device *dev, 3025 struct ethtool_eeprom *ee, 3026 u8 *data) 3027 { 3028 struct ixgbe_adapter *adapter = netdev_priv(dev); 3029 struct ixgbe_hw *hw = &adapter->hw; 3030 u32 status = IXGBE_ERR_PHY_ADDR_INVALID; 3031 u8 databyte = 0xFF; 3032 int i = 0; 3033 3034 if (ee->len == 0) 3035 return -EINVAL; 3036 3037 for (i = ee->offset; i < ee->offset + ee->len; i++) { 3038 /* I2C reads can take long time */ 3039 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 3040 return -EBUSY; 3041 3042 if (i < ETH_MODULE_SFF_8079_LEN) 3043 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 3044 else 3045 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 3046 3047 if (status != 0) 3048 return -EIO; 3049 3050 data[i - ee->offset] = databyte; 3051 } 3052 3053 return 0; 3054 } 3055 3056 static const struct ethtool_ops ixgbe_ethtool_ops = { 3057 .get_settings = ixgbe_get_settings, 3058 .set_settings = ixgbe_set_settings, 3059 .get_drvinfo = ixgbe_get_drvinfo, 3060 .get_regs_len = ixgbe_get_regs_len, 3061 .get_regs = ixgbe_get_regs, 3062 .get_wol = ixgbe_get_wol, 3063 .set_wol = ixgbe_set_wol, 3064 .nway_reset = ixgbe_nway_reset, 3065 .get_link = ethtool_op_get_link, 3066 .get_eeprom_len = ixgbe_get_eeprom_len, 3067 .get_eeprom = ixgbe_get_eeprom, 3068 .set_eeprom = ixgbe_set_eeprom, 3069 .get_ringparam = ixgbe_get_ringparam, 3070 .set_ringparam = ixgbe_set_ringparam, 3071 .get_pauseparam = ixgbe_get_pauseparam, 3072 .set_pauseparam = ixgbe_set_pauseparam, 3073 .get_msglevel = ixgbe_get_msglevel, 3074 .set_msglevel = ixgbe_set_msglevel, 3075 .self_test = ixgbe_diag_test, 3076 .get_strings = ixgbe_get_strings, 3077 .set_phys_id = ixgbe_set_phys_id, 3078 .get_sset_count = ixgbe_get_sset_count, 3079 .get_ethtool_stats = ixgbe_get_ethtool_stats, 3080 .get_coalesce = ixgbe_get_coalesce, 3081 .set_coalesce = ixgbe_set_coalesce, 3082 .get_rxnfc = ixgbe_get_rxnfc, 3083 .set_rxnfc = ixgbe_set_rxnfc, 3084 .get_channels = ixgbe_get_channels, 3085 .set_channels = ixgbe_set_channels, 3086 .get_ts_info = ixgbe_get_ts_info, 3087 .get_module_info = ixgbe_get_module_info, 3088 .get_module_eeprom = ixgbe_get_module_eeprom, 3089 }; 3090 3091 void ixgbe_set_ethtool_ops(struct net_device *netdev) 3092 { 3093 netdev->ethtool_ops = &ixgbe_ethtool_ops; 3094 } 3095