1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* ethtool support for ixgbe */ 29 30 #include <linux/interrupt.h> 31 #include <linux/types.h> 32 #include <linux/module.h> 33 #include <linux/slab.h> 34 #include <linux/pci.h> 35 #include <linux/netdevice.h> 36 #include <linux/ethtool.h> 37 #include <linux/vmalloc.h> 38 #include <linux/highmem.h> 39 #include <linux/uaccess.h> 40 41 #include "ixgbe.h" 42 43 44 #define IXGBE_ALL_RAR_ENTRIES 16 45 46 enum {NETDEV_STATS, IXGBE_STATS}; 47 48 struct ixgbe_stats { 49 char stat_string[ETH_GSTRING_LEN]; 50 int type; 51 int sizeof_stat; 52 int stat_offset; 53 }; 54 55 #define IXGBE_STAT(m) IXGBE_STATS, \ 56 sizeof(((struct ixgbe_adapter *)0)->m), \ 57 offsetof(struct ixgbe_adapter, m) 58 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ 59 sizeof(((struct rtnl_link_stats64 *)0)->m), \ 60 offsetof(struct rtnl_link_stats64, m) 61 62 static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 63 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, 64 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, 65 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, 66 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, 67 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 68 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 69 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 70 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, 71 {"lsc_int", IXGBE_STAT(lsc_int)}, 72 {"tx_busy", IXGBE_STAT(tx_busy)}, 73 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 74 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, 75 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, 76 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, 77 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, 78 {"multicast", IXGBE_NETDEV_STAT(multicast)}, 79 {"broadcast", IXGBE_STAT(stats.bprc)}, 80 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 81 {"collisions", IXGBE_NETDEV_STAT(collisions)}, 82 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, 83 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, 84 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, 85 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, 86 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 87 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 88 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 89 {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, 90 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 91 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 92 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 93 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, 94 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, 95 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, 96 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 97 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 98 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 99 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, 100 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, 101 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, 102 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, 103 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, 104 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, 105 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 106 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 107 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 108 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, 109 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, 110 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, 111 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, 112 #ifdef IXGBE_FCOE 113 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 114 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 115 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, 116 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, 117 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, 118 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, 119 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, 120 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, 121 #endif /* IXGBE_FCOE */ 122 }; 123 124 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so 125 * we set the num_rx_queues to evaluate to num_tx_queues. This is 126 * used because we do not have a good way to get the max number of 127 * rx queues with CONFIG_RPS disabled. 128 */ 129 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues 130 131 #define IXGBE_QUEUE_STATS_LEN ( \ 132 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ 133 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 134 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 135 #define IXGBE_PB_STATS_LEN ( \ 136 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ 137 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ 138 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ 139 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 140 / sizeof(u64)) 141 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 142 IXGBE_PB_STATS_LEN + \ 143 IXGBE_QUEUE_STATS_LEN) 144 145 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 146 "Register test (offline)", "Eeprom test (offline)", 147 "Interrupt test (offline)", "Loopback test (offline)", 148 "Link test (on/offline)" 149 }; 150 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 151 152 static int ixgbe_get_settings(struct net_device *netdev, 153 struct ethtool_cmd *ecmd) 154 { 155 struct ixgbe_adapter *adapter = netdev_priv(netdev); 156 struct ixgbe_hw *hw = &adapter->hw; 157 u32 link_speed = 0; 158 bool link_up; 159 160 ecmd->supported = SUPPORTED_10000baseT_Full; 161 ecmd->autoneg = AUTONEG_ENABLE; 162 ecmd->transceiver = XCVR_EXTERNAL; 163 if ((hw->phy.media_type == ixgbe_media_type_copper) || 164 (hw->phy.multispeed_fiber)) { 165 ecmd->supported |= (SUPPORTED_1000baseT_Full | 166 SUPPORTED_Autoneg); 167 168 switch (hw->mac.type) { 169 case ixgbe_mac_X540: 170 ecmd->supported |= SUPPORTED_100baseT_Full; 171 break; 172 default: 173 break; 174 } 175 176 ecmd->advertising = ADVERTISED_Autoneg; 177 if (hw->phy.autoneg_advertised) { 178 if (hw->phy.autoneg_advertised & 179 IXGBE_LINK_SPEED_100_FULL) 180 ecmd->advertising |= ADVERTISED_100baseT_Full; 181 if (hw->phy.autoneg_advertised & 182 IXGBE_LINK_SPEED_10GB_FULL) 183 ecmd->advertising |= ADVERTISED_10000baseT_Full; 184 if (hw->phy.autoneg_advertised & 185 IXGBE_LINK_SPEED_1GB_FULL) 186 ecmd->advertising |= ADVERTISED_1000baseT_Full; 187 } else { 188 /* 189 * Default advertised modes in case 190 * phy.autoneg_advertised isn't set. 191 */ 192 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 193 ADVERTISED_1000baseT_Full); 194 if (hw->mac.type == ixgbe_mac_X540) 195 ecmd->advertising |= ADVERTISED_100baseT_Full; 196 } 197 198 if (hw->phy.media_type == ixgbe_media_type_copper) { 199 ecmd->supported |= SUPPORTED_TP; 200 ecmd->advertising |= ADVERTISED_TP; 201 ecmd->port = PORT_TP; 202 } else { 203 ecmd->supported |= SUPPORTED_FIBRE; 204 ecmd->advertising |= ADVERTISED_FIBRE; 205 ecmd->port = PORT_FIBRE; 206 } 207 } else if (hw->phy.media_type == ixgbe_media_type_backplane) { 208 /* Set as FIBRE until SERDES defined in kernel */ 209 if (hw->device_id == IXGBE_DEV_ID_82598_BX) { 210 ecmd->supported = (SUPPORTED_1000baseT_Full | 211 SUPPORTED_FIBRE); 212 ecmd->advertising = (ADVERTISED_1000baseT_Full | 213 ADVERTISED_FIBRE); 214 ecmd->port = PORT_FIBRE; 215 ecmd->autoneg = AUTONEG_DISABLE; 216 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || 217 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { 218 ecmd->supported |= (SUPPORTED_1000baseT_Full | 219 SUPPORTED_Autoneg | 220 SUPPORTED_FIBRE); 221 ecmd->advertising = (ADVERTISED_10000baseT_Full | 222 ADVERTISED_1000baseT_Full | 223 ADVERTISED_Autoneg | 224 ADVERTISED_FIBRE); 225 ecmd->port = PORT_FIBRE; 226 } else { 227 ecmd->supported |= (SUPPORTED_1000baseT_Full | 228 SUPPORTED_FIBRE); 229 ecmd->advertising = (ADVERTISED_10000baseT_Full | 230 ADVERTISED_1000baseT_Full | 231 ADVERTISED_FIBRE); 232 ecmd->port = PORT_FIBRE; 233 } 234 } else { 235 ecmd->supported |= SUPPORTED_FIBRE; 236 ecmd->advertising = (ADVERTISED_10000baseT_Full | 237 ADVERTISED_FIBRE); 238 ecmd->port = PORT_FIBRE; 239 ecmd->autoneg = AUTONEG_DISABLE; 240 } 241 242 /* Get PHY type */ 243 switch (adapter->hw.phy.type) { 244 case ixgbe_phy_tn: 245 case ixgbe_phy_aq: 246 case ixgbe_phy_cu_unknown: 247 /* Copper 10G-BASET */ 248 ecmd->port = PORT_TP; 249 break; 250 case ixgbe_phy_qt: 251 ecmd->port = PORT_FIBRE; 252 break; 253 case ixgbe_phy_nl: 254 case ixgbe_phy_sfp_passive_tyco: 255 case ixgbe_phy_sfp_passive_unknown: 256 case ixgbe_phy_sfp_ftl: 257 case ixgbe_phy_sfp_avago: 258 case ixgbe_phy_sfp_intel: 259 case ixgbe_phy_sfp_unknown: 260 switch (adapter->hw.phy.sfp_type) { 261 /* SFP+ devices, further checking needed */ 262 case ixgbe_sfp_type_da_cu: 263 case ixgbe_sfp_type_da_cu_core0: 264 case ixgbe_sfp_type_da_cu_core1: 265 ecmd->port = PORT_DA; 266 break; 267 case ixgbe_sfp_type_sr: 268 case ixgbe_sfp_type_lr: 269 case ixgbe_sfp_type_srlr_core0: 270 case ixgbe_sfp_type_srlr_core1: 271 ecmd->port = PORT_FIBRE; 272 break; 273 case ixgbe_sfp_type_not_present: 274 ecmd->port = PORT_NONE; 275 break; 276 case ixgbe_sfp_type_1g_cu_core0: 277 case ixgbe_sfp_type_1g_cu_core1: 278 ecmd->port = PORT_TP; 279 ecmd->supported = SUPPORTED_TP; 280 ecmd->advertising = (ADVERTISED_1000baseT_Full | 281 ADVERTISED_TP); 282 break; 283 case ixgbe_sfp_type_unknown: 284 default: 285 ecmd->port = PORT_OTHER; 286 break; 287 } 288 break; 289 case ixgbe_phy_xaui: 290 ecmd->port = PORT_NONE; 291 break; 292 case ixgbe_phy_unknown: 293 case ixgbe_phy_generic: 294 case ixgbe_phy_sfp_unsupported: 295 default: 296 ecmd->port = PORT_OTHER; 297 break; 298 } 299 300 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 301 if (link_up) { 302 switch (link_speed) { 303 case IXGBE_LINK_SPEED_10GB_FULL: 304 ethtool_cmd_speed_set(ecmd, SPEED_10000); 305 break; 306 case IXGBE_LINK_SPEED_1GB_FULL: 307 ethtool_cmd_speed_set(ecmd, SPEED_1000); 308 break; 309 case IXGBE_LINK_SPEED_100_FULL: 310 ethtool_cmd_speed_set(ecmd, SPEED_100); 311 break; 312 default: 313 break; 314 } 315 ecmd->duplex = DUPLEX_FULL; 316 } else { 317 ethtool_cmd_speed_set(ecmd, -1); 318 ecmd->duplex = -1; 319 } 320 321 return 0; 322 } 323 324 static int ixgbe_set_settings(struct net_device *netdev, 325 struct ethtool_cmd *ecmd) 326 { 327 struct ixgbe_adapter *adapter = netdev_priv(netdev); 328 struct ixgbe_hw *hw = &adapter->hw; 329 u32 advertised, old; 330 s32 err = 0; 331 332 if ((hw->phy.media_type == ixgbe_media_type_copper) || 333 (hw->phy.multispeed_fiber)) { 334 /* 335 * this function does not support duplex forcing, but can 336 * limit the advertising of the adapter to the specified speed 337 */ 338 if (ecmd->autoneg == AUTONEG_DISABLE) 339 return -EINVAL; 340 341 if (ecmd->advertising & ~ecmd->supported) 342 return -EINVAL; 343 344 old = hw->phy.autoneg_advertised; 345 advertised = 0; 346 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 347 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 348 349 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 350 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 351 352 if (ecmd->advertising & ADVERTISED_100baseT_Full) 353 advertised |= IXGBE_LINK_SPEED_100_FULL; 354 355 if (old == advertised) 356 return err; 357 /* this sets the link speed and restarts auto-neg */ 358 hw->mac.autotry_restart = true; 359 err = hw->mac.ops.setup_link(hw, advertised, true, true); 360 if (err) { 361 e_info(probe, "setup link failed with code %d\n", err); 362 hw->mac.ops.setup_link(hw, old, true, true); 363 } 364 } else { 365 /* in this case we currently only support 10Gb/FULL */ 366 u32 speed = ethtool_cmd_speed(ecmd); 367 if ((ecmd->autoneg == AUTONEG_ENABLE) || 368 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 369 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 370 return -EINVAL; 371 } 372 373 return err; 374 } 375 376 static void ixgbe_get_pauseparam(struct net_device *netdev, 377 struct ethtool_pauseparam *pause) 378 { 379 struct ixgbe_adapter *adapter = netdev_priv(netdev); 380 struct ixgbe_hw *hw = &adapter->hw; 381 382 if (hw->fc.disable_fc_autoneg) 383 pause->autoneg = 0; 384 else 385 pause->autoneg = 1; 386 387 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 388 pause->rx_pause = 1; 389 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 390 pause->tx_pause = 1; 391 } else if (hw->fc.current_mode == ixgbe_fc_full) { 392 pause->rx_pause = 1; 393 pause->tx_pause = 1; 394 } 395 } 396 397 static int ixgbe_set_pauseparam(struct net_device *netdev, 398 struct ethtool_pauseparam *pause) 399 { 400 struct ixgbe_adapter *adapter = netdev_priv(netdev); 401 struct ixgbe_hw *hw = &adapter->hw; 402 struct ixgbe_fc_info fc = hw->fc; 403 404 /* 82598 does no support link flow control with DCB enabled */ 405 if ((hw->mac.type == ixgbe_mac_82598EB) && 406 (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 407 return -EINVAL; 408 409 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); 410 411 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 412 fc.requested_mode = ixgbe_fc_full; 413 else if (pause->rx_pause && !pause->tx_pause) 414 fc.requested_mode = ixgbe_fc_rx_pause; 415 else if (!pause->rx_pause && pause->tx_pause) 416 fc.requested_mode = ixgbe_fc_tx_pause; 417 else 418 fc.requested_mode = ixgbe_fc_none; 419 420 /* if the thing changed then we'll update and use new autoneg */ 421 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { 422 hw->fc = fc; 423 if (netif_running(netdev)) 424 ixgbe_reinit_locked(adapter); 425 else 426 ixgbe_reset(adapter); 427 } 428 429 return 0; 430 } 431 432 static u32 ixgbe_get_msglevel(struct net_device *netdev) 433 { 434 struct ixgbe_adapter *adapter = netdev_priv(netdev); 435 return adapter->msg_enable; 436 } 437 438 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) 439 { 440 struct ixgbe_adapter *adapter = netdev_priv(netdev); 441 adapter->msg_enable = data; 442 } 443 444 static int ixgbe_get_regs_len(struct net_device *netdev) 445 { 446 #define IXGBE_REGS_LEN 1129 447 return IXGBE_REGS_LEN * sizeof(u32); 448 } 449 450 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 451 452 static void ixgbe_get_regs(struct net_device *netdev, 453 struct ethtool_regs *regs, void *p) 454 { 455 struct ixgbe_adapter *adapter = netdev_priv(netdev); 456 struct ixgbe_hw *hw = &adapter->hw; 457 u32 *regs_buff = p; 458 u8 i; 459 460 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); 461 462 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; 463 464 /* General Registers */ 465 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); 466 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); 467 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 468 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); 469 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); 470 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 471 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); 472 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); 473 474 /* NVM Register */ 475 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); 476 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); 477 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); 478 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); 479 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); 480 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); 481 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); 482 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); 483 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); 484 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 485 486 /* Interrupt */ 487 /* don't read EICR because it can clear interrupt causes, instead 488 * read EICS which is a shadow but doesn't clear EICR */ 489 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); 490 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 491 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 492 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 493 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); 494 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); 495 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); 496 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 497 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 498 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 499 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); 500 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 501 502 /* Flow Control */ 503 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); 504 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); 505 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 506 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 507 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 508 for (i = 0; i < 8; i++) { 509 switch (hw->mac.type) { 510 case ixgbe_mac_82598EB: 511 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 512 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 513 break; 514 case ixgbe_mac_82599EB: 515 case ixgbe_mac_X540: 516 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); 517 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 518 break; 519 default: 520 break; 521 } 522 } 523 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 524 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 525 526 /* Receive DMA */ 527 for (i = 0; i < 64; i++) 528 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 529 for (i = 0; i < 64; i++) 530 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 531 for (i = 0; i < 64; i++) 532 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 533 for (i = 0; i < 64; i++) 534 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 535 for (i = 0; i < 64; i++) 536 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 537 for (i = 0; i < 64; i++) 538 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 539 for (i = 0; i < 16; i++) 540 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 541 for (i = 0; i < 16; i++) 542 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 543 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 544 for (i = 0; i < 8; i++) 545 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 546 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 547 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); 548 549 /* Receive */ 550 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 551 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); 552 for (i = 0; i < 16; i++) 553 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 554 for (i = 0; i < 16; i++) 555 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 556 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); 557 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 558 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 559 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 560 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); 561 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 562 for (i = 0; i < 8; i++) 563 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); 564 for (i = 0; i < 8; i++) 565 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); 566 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); 567 568 /* Transmit */ 569 for (i = 0; i < 32; i++) 570 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 571 for (i = 0; i < 32; i++) 572 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 573 for (i = 0; i < 32; i++) 574 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 575 for (i = 0; i < 32; i++) 576 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 577 for (i = 0; i < 32; i++) 578 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 579 for (i = 0; i < 32; i++) 580 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 581 for (i = 0; i < 32; i++) 582 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); 583 for (i = 0; i < 32; i++) 584 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); 585 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); 586 for (i = 0; i < 16; i++) 587 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 588 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); 589 for (i = 0; i < 8; i++) 590 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); 591 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); 592 593 /* Wake Up */ 594 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); 595 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); 596 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); 597 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); 598 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); 599 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); 600 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); 601 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 602 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 603 604 /* DCB */ 605 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 606 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 607 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 608 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); 609 for (i = 0; i < 8; i++) 610 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); 611 for (i = 0; i < 8; i++) 612 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); 613 for (i = 0; i < 8; i++) 614 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); 615 for (i = 0; i < 8; i++) 616 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); 617 for (i = 0; i < 8; i++) 618 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); 619 for (i = 0; i < 8; i++) 620 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); 621 622 /* Statistics */ 623 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); 624 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); 625 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); 626 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); 627 for (i = 0; i < 8; i++) 628 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); 629 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); 630 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); 631 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); 632 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); 633 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); 634 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); 635 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); 636 for (i = 0; i < 8; i++) 637 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); 638 for (i = 0; i < 8; i++) 639 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); 640 for (i = 0; i < 8; i++) 641 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); 642 for (i = 0; i < 8; i++) 643 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); 644 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); 645 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); 646 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); 647 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); 648 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); 649 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); 650 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); 651 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); 652 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); 653 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); 654 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); 655 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); 656 for (i = 0; i < 8; i++) 657 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); 658 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); 659 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); 660 regs_buff[956] = IXGBE_GET_STAT(adapter, roc); 661 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); 662 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); 663 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); 664 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); 665 regs_buff[961] = IXGBE_GET_STAT(adapter, tor); 666 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); 667 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); 668 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); 669 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); 670 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); 671 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); 672 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); 673 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); 674 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); 675 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); 676 regs_buff[973] = IXGBE_GET_STAT(adapter, xec); 677 for (i = 0; i < 16; i++) 678 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); 679 for (i = 0; i < 16; i++) 680 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); 681 for (i = 0; i < 16; i++) 682 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); 683 for (i = 0; i < 16; i++) 684 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); 685 686 /* MAC */ 687 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); 688 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 689 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 690 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); 691 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); 692 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 693 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 694 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); 695 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); 696 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); 697 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); 698 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); 699 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); 700 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); 701 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); 702 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); 703 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); 704 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); 705 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); 706 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); 707 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); 708 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); 709 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); 710 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); 711 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); 712 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); 713 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); 714 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); 715 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 716 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); 717 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); 718 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); 719 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 720 721 /* Diagnostic */ 722 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 723 for (i = 0; i < 8; i++) 724 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 725 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 726 for (i = 0; i < 4; i++) 727 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); 728 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 729 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 730 for (i = 0; i < 8; i++) 731 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 732 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 733 for (i = 0; i < 4; i++) 734 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); 735 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 736 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 737 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 738 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); 739 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); 740 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); 741 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); 742 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); 743 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); 744 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 745 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 746 for (i = 0; i < 8; i++) 747 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 748 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 749 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 750 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 751 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); 752 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); 753 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); 754 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); 755 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); 756 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); 757 758 /* 82599 X540 specific registers */ 759 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); 760 } 761 762 static int ixgbe_get_eeprom_len(struct net_device *netdev) 763 { 764 struct ixgbe_adapter *adapter = netdev_priv(netdev); 765 return adapter->hw.eeprom.word_size * 2; 766 } 767 768 static int ixgbe_get_eeprom(struct net_device *netdev, 769 struct ethtool_eeprom *eeprom, u8 *bytes) 770 { 771 struct ixgbe_adapter *adapter = netdev_priv(netdev); 772 struct ixgbe_hw *hw = &adapter->hw; 773 u16 *eeprom_buff; 774 int first_word, last_word, eeprom_len; 775 int ret_val = 0; 776 u16 i; 777 778 if (eeprom->len == 0) 779 return -EINVAL; 780 781 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 782 783 first_word = eeprom->offset >> 1; 784 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 785 eeprom_len = last_word - first_word + 1; 786 787 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); 788 if (!eeprom_buff) 789 return -ENOMEM; 790 791 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, 792 eeprom_buff); 793 794 /* Device's eeprom is always little-endian, word addressable */ 795 for (i = 0; i < eeprom_len; i++) 796 le16_to_cpus(&eeprom_buff[i]); 797 798 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); 799 kfree(eeprom_buff); 800 801 return ret_val; 802 } 803 804 static int ixgbe_set_eeprom(struct net_device *netdev, 805 struct ethtool_eeprom *eeprom, u8 *bytes) 806 { 807 struct ixgbe_adapter *adapter = netdev_priv(netdev); 808 struct ixgbe_hw *hw = &adapter->hw; 809 u16 *eeprom_buff; 810 void *ptr; 811 int max_len, first_word, last_word, ret_val = 0; 812 u16 i; 813 814 if (eeprom->len == 0) 815 return -EINVAL; 816 817 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 818 return -EINVAL; 819 820 max_len = hw->eeprom.word_size * 2; 821 822 first_word = eeprom->offset >> 1; 823 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 824 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 825 if (!eeprom_buff) 826 return -ENOMEM; 827 828 ptr = eeprom_buff; 829 830 if (eeprom->offset & 1) { 831 /* 832 * need read/modify/write of first changed EEPROM word 833 * only the second byte of the word is being modified 834 */ 835 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); 836 if (ret_val) 837 goto err; 838 839 ptr++; 840 } 841 if ((eeprom->offset + eeprom->len) & 1) { 842 /* 843 * need read/modify/write of last changed EEPROM word 844 * only the first byte of the word is being modified 845 */ 846 ret_val = hw->eeprom.ops.read(hw, last_word, 847 &eeprom_buff[last_word - first_word]); 848 if (ret_val) 849 goto err; 850 } 851 852 /* Device's eeprom is always little-endian, word addressable */ 853 for (i = 0; i < last_word - first_word + 1; i++) 854 le16_to_cpus(&eeprom_buff[i]); 855 856 memcpy(ptr, bytes, eeprom->len); 857 858 for (i = 0; i < last_word - first_word + 1; i++) 859 cpu_to_le16s(&eeprom_buff[i]); 860 861 ret_val = hw->eeprom.ops.write_buffer(hw, first_word, 862 last_word - first_word + 1, 863 eeprom_buff); 864 865 /* Update the checksum */ 866 if (ret_val == 0) 867 hw->eeprom.ops.update_checksum(hw); 868 869 err: 870 kfree(eeprom_buff); 871 return ret_val; 872 } 873 874 static void ixgbe_get_drvinfo(struct net_device *netdev, 875 struct ethtool_drvinfo *drvinfo) 876 { 877 struct ixgbe_adapter *adapter = netdev_priv(netdev); 878 u32 nvm_track_id; 879 880 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 881 strlcpy(drvinfo->version, ixgbe_driver_version, 882 sizeof(drvinfo->version)); 883 884 nvm_track_id = (adapter->eeprom_verh << 16) | 885 adapter->eeprom_verl; 886 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 887 nvm_track_id); 888 889 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 890 sizeof(drvinfo->bus_info)); 891 drvinfo->n_stats = IXGBE_STATS_LEN; 892 drvinfo->testinfo_len = IXGBE_TEST_LEN; 893 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 894 } 895 896 static void ixgbe_get_ringparam(struct net_device *netdev, 897 struct ethtool_ringparam *ring) 898 { 899 struct ixgbe_adapter *adapter = netdev_priv(netdev); 900 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 901 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 902 903 ring->rx_max_pending = IXGBE_MAX_RXD; 904 ring->tx_max_pending = IXGBE_MAX_TXD; 905 ring->rx_pending = rx_ring->count; 906 ring->tx_pending = tx_ring->count; 907 } 908 909 static int ixgbe_set_ringparam(struct net_device *netdev, 910 struct ethtool_ringparam *ring) 911 { 912 struct ixgbe_adapter *adapter = netdev_priv(netdev); 913 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 914 int i, err = 0; 915 u32 new_rx_count, new_tx_count; 916 bool need_update = false; 917 918 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 919 return -EINVAL; 920 921 new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); 922 new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); 923 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 924 925 new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD); 926 new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD); 927 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 928 929 if ((new_tx_count == adapter->tx_ring[0]->count) && 930 (new_rx_count == adapter->rx_ring[0]->count)) { 931 /* nothing to do */ 932 return 0; 933 } 934 935 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 936 usleep_range(1000, 2000); 937 938 if (!netif_running(adapter->netdev)) { 939 for (i = 0; i < adapter->num_tx_queues; i++) 940 adapter->tx_ring[i]->count = new_tx_count; 941 for (i = 0; i < adapter->num_rx_queues; i++) 942 adapter->rx_ring[i]->count = new_rx_count; 943 adapter->tx_ring_count = new_tx_count; 944 adapter->rx_ring_count = new_rx_count; 945 goto clear_reset; 946 } 947 948 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 949 if (!temp_tx_ring) { 950 err = -ENOMEM; 951 goto clear_reset; 952 } 953 954 if (new_tx_count != adapter->tx_ring_count) { 955 for (i = 0; i < adapter->num_tx_queues; i++) { 956 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 957 sizeof(struct ixgbe_ring)); 958 temp_tx_ring[i].count = new_tx_count; 959 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); 960 if (err) { 961 while (i) { 962 i--; 963 ixgbe_free_tx_resources(&temp_tx_ring[i]); 964 } 965 goto clear_reset; 966 } 967 } 968 need_update = true; 969 } 970 971 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 972 if (!temp_rx_ring) { 973 err = -ENOMEM; 974 goto err_setup; 975 } 976 977 if (new_rx_count != adapter->rx_ring_count) { 978 for (i = 0; i < adapter->num_rx_queues; i++) { 979 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 980 sizeof(struct ixgbe_ring)); 981 temp_rx_ring[i].count = new_rx_count; 982 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); 983 if (err) { 984 while (i) { 985 i--; 986 ixgbe_free_rx_resources(&temp_rx_ring[i]); 987 } 988 goto err_setup; 989 } 990 } 991 need_update = true; 992 } 993 994 /* if rings need to be updated, here's the place to do it in one shot */ 995 if (need_update) { 996 ixgbe_down(adapter); 997 998 /* tx */ 999 if (new_tx_count != adapter->tx_ring_count) { 1000 for (i = 0; i < adapter->num_tx_queues; i++) { 1001 ixgbe_free_tx_resources(adapter->tx_ring[i]); 1002 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 1003 sizeof(struct ixgbe_ring)); 1004 } 1005 adapter->tx_ring_count = new_tx_count; 1006 } 1007 1008 /* rx */ 1009 if (new_rx_count != adapter->rx_ring_count) { 1010 for (i = 0; i < adapter->num_rx_queues; i++) { 1011 ixgbe_free_rx_resources(adapter->rx_ring[i]); 1012 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 1013 sizeof(struct ixgbe_ring)); 1014 } 1015 adapter->rx_ring_count = new_rx_count; 1016 } 1017 ixgbe_up(adapter); 1018 } 1019 1020 vfree(temp_rx_ring); 1021 err_setup: 1022 vfree(temp_tx_ring); 1023 clear_reset: 1024 clear_bit(__IXGBE_RESETTING, &adapter->state); 1025 return err; 1026 } 1027 1028 static int ixgbe_get_sset_count(struct net_device *netdev, int sset) 1029 { 1030 switch (sset) { 1031 case ETH_SS_TEST: 1032 return IXGBE_TEST_LEN; 1033 case ETH_SS_STATS: 1034 return IXGBE_STATS_LEN; 1035 default: 1036 return -EOPNOTSUPP; 1037 } 1038 } 1039 1040 static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1041 struct ethtool_stats *stats, u64 *data) 1042 { 1043 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1044 struct rtnl_link_stats64 temp; 1045 const struct rtnl_link_stats64 *net_stats; 1046 unsigned int start; 1047 struct ixgbe_ring *ring; 1048 int i, j; 1049 char *p = NULL; 1050 1051 ixgbe_update_stats(adapter); 1052 net_stats = dev_get_stats(netdev, &temp); 1053 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1054 switch (ixgbe_gstrings_stats[i].type) { 1055 case NETDEV_STATS: 1056 p = (char *) net_stats + 1057 ixgbe_gstrings_stats[i].stat_offset; 1058 break; 1059 case IXGBE_STATS: 1060 p = (char *) adapter + 1061 ixgbe_gstrings_stats[i].stat_offset; 1062 break; 1063 } 1064 1065 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1066 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1067 } 1068 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1069 ring = adapter->tx_ring[j]; 1070 if (!ring) { 1071 data[i] = 0; 1072 data[i+1] = 0; 1073 i += 2; 1074 continue; 1075 } 1076 1077 do { 1078 start = u64_stats_fetch_begin_bh(&ring->syncp); 1079 data[i] = ring->stats.packets; 1080 data[i+1] = ring->stats.bytes; 1081 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1082 i += 2; 1083 } 1084 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1085 ring = adapter->rx_ring[j]; 1086 if (!ring) { 1087 data[i] = 0; 1088 data[i+1] = 0; 1089 i += 2; 1090 continue; 1091 } 1092 1093 do { 1094 start = u64_stats_fetch_begin_bh(&ring->syncp); 1095 data[i] = ring->stats.packets; 1096 data[i+1] = ring->stats.bytes; 1097 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1098 i += 2; 1099 } 1100 1101 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1102 data[i++] = adapter->stats.pxontxc[j]; 1103 data[i++] = adapter->stats.pxofftxc[j]; 1104 } 1105 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1106 data[i++] = adapter->stats.pxonrxc[j]; 1107 data[i++] = adapter->stats.pxoffrxc[j]; 1108 } 1109 } 1110 1111 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1112 u8 *data) 1113 { 1114 char *p = (char *)data; 1115 int i; 1116 1117 switch (stringset) { 1118 case ETH_SS_TEST: 1119 memcpy(data, *ixgbe_gstrings_test, 1120 IXGBE_TEST_LEN * ETH_GSTRING_LEN); 1121 break; 1122 case ETH_SS_STATS: 1123 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1124 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 1125 ETH_GSTRING_LEN); 1126 p += ETH_GSTRING_LEN; 1127 } 1128 for (i = 0; i < netdev->num_tx_queues; i++) { 1129 sprintf(p, "tx_queue_%u_packets", i); 1130 p += ETH_GSTRING_LEN; 1131 sprintf(p, "tx_queue_%u_bytes", i); 1132 p += ETH_GSTRING_LEN; 1133 } 1134 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { 1135 sprintf(p, "rx_queue_%u_packets", i); 1136 p += ETH_GSTRING_LEN; 1137 sprintf(p, "rx_queue_%u_bytes", i); 1138 p += ETH_GSTRING_LEN; 1139 } 1140 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1141 sprintf(p, "tx_pb_%u_pxon", i); 1142 p += ETH_GSTRING_LEN; 1143 sprintf(p, "tx_pb_%u_pxoff", i); 1144 p += ETH_GSTRING_LEN; 1145 } 1146 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1147 sprintf(p, "rx_pb_%u_pxon", i); 1148 p += ETH_GSTRING_LEN; 1149 sprintf(p, "rx_pb_%u_pxoff", i); 1150 p += ETH_GSTRING_LEN; 1151 } 1152 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 1153 break; 1154 } 1155 } 1156 1157 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) 1158 { 1159 struct ixgbe_hw *hw = &adapter->hw; 1160 bool link_up; 1161 u32 link_speed = 0; 1162 *data = 0; 1163 1164 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1165 if (link_up) 1166 return *data; 1167 else 1168 *data = 1; 1169 return *data; 1170 } 1171 1172 /* ethtool register test data */ 1173 struct ixgbe_reg_test { 1174 u16 reg; 1175 u8 array_len; 1176 u8 test_type; 1177 u32 mask; 1178 u32 write; 1179 }; 1180 1181 /* In the hardware, registers are laid out either singly, in arrays 1182 * spaced 0x40 bytes apart, or in contiguous tables. We assume 1183 * most tests take place on arrays or single registers (handled 1184 * as a single-element array) and special-case the tables. 1185 * Table tests are always pattern tests. 1186 * 1187 * We also make provision for some required setup steps by specifying 1188 * registers to be written without any read-back testing. 1189 */ 1190 1191 #define PATTERN_TEST 1 1192 #define SET_READ_TEST 2 1193 #define WRITE_NO_TEST 3 1194 #define TABLE32_TEST 4 1195 #define TABLE64_TEST_LO 5 1196 #define TABLE64_TEST_HI 6 1197 1198 /* default 82599 register test */ 1199 static const struct ixgbe_reg_test reg_test_82599[] = { 1200 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1201 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1202 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1203 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1204 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 1205 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1206 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1207 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1208 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1209 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1210 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1211 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1212 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1213 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1214 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 1215 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, 1216 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1217 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, 1218 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1219 { 0, 0, 0, 0 } 1220 }; 1221 1222 /* default 82598 register test */ 1223 static const struct ixgbe_reg_test reg_test_82598[] = { 1224 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1225 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1226 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1227 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1228 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1229 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1230 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1231 /* Enable all four RX queues before testing. */ 1232 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1233 /* RDH is read-only for 82598, only test RDT. */ 1234 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1235 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1236 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1237 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1238 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, 1239 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1240 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1241 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1242 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, 1243 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, 1244 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1245 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, 1246 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1247 { 0, 0, 0, 0 } 1248 }; 1249 1250 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, 1251 u32 mask, u32 write) 1252 { 1253 u32 pat, val, before; 1254 static const u32 test_pattern[] = { 1255 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1256 1257 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { 1258 before = readl(adapter->hw.hw_addr + reg); 1259 writel((test_pattern[pat] & write), 1260 (adapter->hw.hw_addr + reg)); 1261 val = readl(adapter->hw.hw_addr + reg); 1262 if (val != (test_pattern[pat] & write & mask)) { 1263 e_err(drv, "pattern test reg %04X failed: got " 1264 "0x%08X expected 0x%08X\n", 1265 reg, val, (test_pattern[pat] & write & mask)); 1266 *data = reg; 1267 writel(before, adapter->hw.hw_addr + reg); 1268 return 1; 1269 } 1270 writel(before, adapter->hw.hw_addr + reg); 1271 } 1272 return 0; 1273 } 1274 1275 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, 1276 u32 mask, u32 write) 1277 { 1278 u32 val, before; 1279 before = readl(adapter->hw.hw_addr + reg); 1280 writel((write & mask), (adapter->hw.hw_addr + reg)); 1281 val = readl(adapter->hw.hw_addr + reg); 1282 if ((write & mask) != (val & mask)) { 1283 e_err(drv, "set/check reg %04X test failed: got 0x%08X " 1284 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 1285 *data = reg; 1286 writel(before, (adapter->hw.hw_addr + reg)); 1287 return 1; 1288 } 1289 writel(before, (adapter->hw.hw_addr + reg)); 1290 return 0; 1291 } 1292 1293 #define REG_PATTERN_TEST(reg, mask, write) \ 1294 do { \ 1295 if (reg_pattern_test(adapter, data, reg, mask, write)) \ 1296 return 1; \ 1297 } while (0) \ 1298 1299 1300 #define REG_SET_AND_CHECK(reg, mask, write) \ 1301 do { \ 1302 if (reg_set_and_check(adapter, data, reg, mask, write)) \ 1303 return 1; \ 1304 } while (0) \ 1305 1306 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1307 { 1308 const struct ixgbe_reg_test *test; 1309 u32 value, before, after; 1310 u32 i, toggle; 1311 1312 switch (adapter->hw.mac.type) { 1313 case ixgbe_mac_82598EB: 1314 toggle = 0x7FFFF3FF; 1315 test = reg_test_82598; 1316 break; 1317 case ixgbe_mac_82599EB: 1318 case ixgbe_mac_X540: 1319 toggle = 0x7FFFF30F; 1320 test = reg_test_82599; 1321 break; 1322 default: 1323 *data = 1; 1324 return 1; 1325 break; 1326 } 1327 1328 /* 1329 * Because the status register is such a special case, 1330 * we handle it separately from the rest of the register 1331 * tests. Some bits are read-only, some toggle, and some 1332 * are writeable on newer MACs. 1333 */ 1334 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); 1335 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); 1336 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); 1337 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; 1338 if (value != after) { 1339 e_err(drv, "failed STATUS register test got: 0x%08X " 1340 "expected: 0x%08X\n", after, value); 1341 *data = 1; 1342 return 1; 1343 } 1344 /* restore previous status */ 1345 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); 1346 1347 /* 1348 * Perform the remainder of the register test, looping through 1349 * the test table until we either fail or reach the null entry. 1350 */ 1351 while (test->reg) { 1352 for (i = 0; i < test->array_len; i++) { 1353 switch (test->test_type) { 1354 case PATTERN_TEST: 1355 REG_PATTERN_TEST(test->reg + (i * 0x40), 1356 test->mask, 1357 test->write); 1358 break; 1359 case SET_READ_TEST: 1360 REG_SET_AND_CHECK(test->reg + (i * 0x40), 1361 test->mask, 1362 test->write); 1363 break; 1364 case WRITE_NO_TEST: 1365 writel(test->write, 1366 (adapter->hw.hw_addr + test->reg) 1367 + (i * 0x40)); 1368 break; 1369 case TABLE32_TEST: 1370 REG_PATTERN_TEST(test->reg + (i * 4), 1371 test->mask, 1372 test->write); 1373 break; 1374 case TABLE64_TEST_LO: 1375 REG_PATTERN_TEST(test->reg + (i * 8), 1376 test->mask, 1377 test->write); 1378 break; 1379 case TABLE64_TEST_HI: 1380 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1381 test->mask, 1382 test->write); 1383 break; 1384 } 1385 } 1386 test++; 1387 } 1388 1389 *data = 0; 1390 return 0; 1391 } 1392 1393 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) 1394 { 1395 struct ixgbe_hw *hw = &adapter->hw; 1396 if (hw->eeprom.ops.validate_checksum(hw, NULL)) 1397 *data = 1; 1398 else 1399 *data = 0; 1400 return *data; 1401 } 1402 1403 static irqreturn_t ixgbe_test_intr(int irq, void *data) 1404 { 1405 struct net_device *netdev = (struct net_device *) data; 1406 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1407 1408 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); 1409 1410 return IRQ_HANDLED; 1411 } 1412 1413 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) 1414 { 1415 struct net_device *netdev = adapter->netdev; 1416 u32 mask, i = 0, shared_int = true; 1417 u32 irq = adapter->pdev->irq; 1418 1419 *data = 0; 1420 1421 /* Hook up test interrupt handler just for this test */ 1422 if (adapter->msix_entries) { 1423 /* NOTE: we don't test MSI-X interrupts here, yet */ 1424 return 0; 1425 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1426 shared_int = false; 1427 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, 1428 netdev)) { 1429 *data = 1; 1430 return -1; 1431 } 1432 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1433 netdev->name, netdev)) { 1434 shared_int = false; 1435 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1436 netdev->name, netdev)) { 1437 *data = 1; 1438 return -1; 1439 } 1440 e_info(hw, "testing %s interrupt\n", shared_int ? 1441 "shared" : "unshared"); 1442 1443 /* Disable all the interrupts */ 1444 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1445 IXGBE_WRITE_FLUSH(&adapter->hw); 1446 usleep_range(10000, 20000); 1447 1448 /* Test each interrupt */ 1449 for (; i < 10; i++) { 1450 /* Interrupt to test */ 1451 mask = 1 << i; 1452 1453 if (!shared_int) { 1454 /* 1455 * Disable the interrupts to be reported in 1456 * the cause register and then force the same 1457 * interrupt and see if one gets posted. If 1458 * an interrupt was posted to the bus, the 1459 * test failed. 1460 */ 1461 adapter->test_icr = 0; 1462 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1463 ~mask & 0x00007FFF); 1464 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1465 ~mask & 0x00007FFF); 1466 IXGBE_WRITE_FLUSH(&adapter->hw); 1467 usleep_range(10000, 20000); 1468 1469 if (adapter->test_icr & mask) { 1470 *data = 3; 1471 break; 1472 } 1473 } 1474 1475 /* 1476 * Enable the interrupt to be reported in the cause 1477 * register and then force the same interrupt and see 1478 * if one gets posted. If an interrupt was not posted 1479 * to the bus, the test failed. 1480 */ 1481 adapter->test_icr = 0; 1482 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1483 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1484 IXGBE_WRITE_FLUSH(&adapter->hw); 1485 usleep_range(10000, 20000); 1486 1487 if (!(adapter->test_icr &mask)) { 1488 *data = 4; 1489 break; 1490 } 1491 1492 if (!shared_int) { 1493 /* 1494 * Disable the other interrupts to be reported in 1495 * the cause register and then force the other 1496 * interrupts and see if any get posted. If 1497 * an interrupt was posted to the bus, the 1498 * test failed. 1499 */ 1500 adapter->test_icr = 0; 1501 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1502 ~mask & 0x00007FFF); 1503 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1504 ~mask & 0x00007FFF); 1505 IXGBE_WRITE_FLUSH(&adapter->hw); 1506 usleep_range(10000, 20000); 1507 1508 if (adapter->test_icr) { 1509 *data = 5; 1510 break; 1511 } 1512 } 1513 } 1514 1515 /* Disable all the interrupts */ 1516 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1517 IXGBE_WRITE_FLUSH(&adapter->hw); 1518 usleep_range(10000, 20000); 1519 1520 /* Unhook test interrupt handler */ 1521 free_irq(irq, netdev); 1522 1523 return *data; 1524 } 1525 1526 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) 1527 { 1528 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1529 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1530 struct ixgbe_hw *hw = &adapter->hw; 1531 u32 reg_ctl; 1532 1533 /* shut down the DMA engines now so they can be reinitialized later */ 1534 1535 /* first Rx */ 1536 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1537 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1538 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1539 ixgbe_disable_rx_queue(adapter, rx_ring); 1540 1541 /* now Tx */ 1542 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1543 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1544 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1545 1546 switch (hw->mac.type) { 1547 case ixgbe_mac_82599EB: 1548 case ixgbe_mac_X540: 1549 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1550 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1551 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1552 break; 1553 default: 1554 break; 1555 } 1556 1557 ixgbe_reset(adapter); 1558 1559 ixgbe_free_tx_resources(&adapter->test_tx_ring); 1560 ixgbe_free_rx_resources(&adapter->test_rx_ring); 1561 } 1562 1563 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1564 { 1565 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1566 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1567 u32 rctl, reg_data; 1568 int ret_val; 1569 int err; 1570 1571 /* Setup Tx descriptor ring and Tx buffers */ 1572 tx_ring->count = IXGBE_DEFAULT_TXD; 1573 tx_ring->queue_index = 0; 1574 tx_ring->dev = &adapter->pdev->dev; 1575 tx_ring->netdev = adapter->netdev; 1576 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1577 1578 err = ixgbe_setup_tx_resources(tx_ring); 1579 if (err) 1580 return 1; 1581 1582 switch (adapter->hw.mac.type) { 1583 case ixgbe_mac_82599EB: 1584 case ixgbe_mac_X540: 1585 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1586 reg_data |= IXGBE_DMATXCTL_TE; 1587 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1588 break; 1589 default: 1590 break; 1591 } 1592 1593 ixgbe_configure_tx_ring(adapter, tx_ring); 1594 1595 /* Setup Rx Descriptor ring and Rx buffers */ 1596 rx_ring->count = IXGBE_DEFAULT_RXD; 1597 rx_ring->queue_index = 0; 1598 rx_ring->dev = &adapter->pdev->dev; 1599 rx_ring->netdev = adapter->netdev; 1600 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1601 1602 err = ixgbe_setup_rx_resources(rx_ring); 1603 if (err) { 1604 ret_val = 4; 1605 goto err_nomem; 1606 } 1607 1608 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1609 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1610 1611 ixgbe_configure_rx_ring(adapter, rx_ring); 1612 1613 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1614 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1615 1616 return 0; 1617 1618 err_nomem: 1619 ixgbe_free_desc_rings(adapter); 1620 return ret_val; 1621 } 1622 1623 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) 1624 { 1625 struct ixgbe_hw *hw = &adapter->hw; 1626 u32 reg_data; 1627 1628 /* X540 needs to set the MACC.FLU bit to force link up */ 1629 if (adapter->hw.mac.type == ixgbe_mac_X540) { 1630 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); 1631 reg_data |= IXGBE_MACC_FLU; 1632 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); 1633 } 1634 1635 /* right now we only support MAC loopback in the driver */ 1636 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1637 /* Setup MAC loopback */ 1638 reg_data |= IXGBE_HLREG0_LPBK; 1639 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); 1640 1641 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1642 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1643 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); 1644 1645 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1646 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1647 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1648 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); 1649 IXGBE_WRITE_FLUSH(hw); 1650 usleep_range(10000, 20000); 1651 1652 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1653 if (hw->mac.type == ixgbe_mac_82598EB) { 1654 u8 atlas; 1655 1656 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); 1657 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 1658 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); 1659 1660 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); 1661 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 1662 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); 1663 1664 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); 1665 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 1666 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); 1667 1668 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); 1669 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 1670 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); 1671 } 1672 1673 return 0; 1674 } 1675 1676 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) 1677 { 1678 u32 reg_data; 1679 1680 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1681 reg_data &= ~IXGBE_HLREG0_LPBK; 1682 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1683 } 1684 1685 static void ixgbe_create_lbtest_frame(struct sk_buff *skb, 1686 unsigned int frame_size) 1687 { 1688 memset(skb->data, 0xFF, frame_size); 1689 frame_size >>= 1; 1690 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); 1691 memset(&skb->data[frame_size + 10], 0xBE, 1); 1692 memset(&skb->data[frame_size + 12], 0xAF, 1); 1693 } 1694 1695 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, 1696 unsigned int frame_size) 1697 { 1698 unsigned char *data; 1699 bool match = true; 1700 1701 frame_size >>= 1; 1702 1703 data = kmap(rx_buffer->page) + rx_buffer->page_offset; 1704 1705 if (data[3] != 0xFF || 1706 data[frame_size + 10] != 0xBE || 1707 data[frame_size + 12] != 0xAF) 1708 match = false; 1709 1710 kunmap(rx_buffer->page); 1711 1712 return match; 1713 } 1714 1715 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, 1716 struct ixgbe_ring *tx_ring, 1717 unsigned int size) 1718 { 1719 union ixgbe_adv_rx_desc *rx_desc; 1720 struct ixgbe_rx_buffer *rx_buffer; 1721 struct ixgbe_tx_buffer *tx_buffer; 1722 u16 rx_ntc, tx_ntc, count = 0; 1723 1724 /* initialize next to clean and descriptor values */ 1725 rx_ntc = rx_ring->next_to_clean; 1726 tx_ntc = tx_ring->next_to_clean; 1727 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1728 1729 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { 1730 /* check Rx buffer */ 1731 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; 1732 1733 /* sync Rx buffer for CPU read */ 1734 dma_sync_single_for_cpu(rx_ring->dev, 1735 rx_buffer->dma, 1736 ixgbe_rx_bufsz(rx_ring), 1737 DMA_FROM_DEVICE); 1738 1739 /* verify contents of skb */ 1740 if (ixgbe_check_lbtest_frame(rx_buffer, size)) 1741 count++; 1742 1743 /* sync Rx buffer for device write */ 1744 dma_sync_single_for_device(rx_ring->dev, 1745 rx_buffer->dma, 1746 ixgbe_rx_bufsz(rx_ring), 1747 DMA_FROM_DEVICE); 1748 1749 /* unmap buffer on Tx side */ 1750 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; 1751 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); 1752 1753 /* increment Rx/Tx next to clean counters */ 1754 rx_ntc++; 1755 if (rx_ntc == rx_ring->count) 1756 rx_ntc = 0; 1757 tx_ntc++; 1758 if (tx_ntc == tx_ring->count) 1759 tx_ntc = 0; 1760 1761 /* fetch next descriptor */ 1762 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1763 } 1764 1765 netdev_tx_reset_queue(txring_txq(tx_ring)); 1766 1767 /* re-map buffers to ring, store next to clean values */ 1768 ixgbe_alloc_rx_buffers(rx_ring, count); 1769 rx_ring->next_to_clean = rx_ntc; 1770 tx_ring->next_to_clean = tx_ntc; 1771 1772 return count; 1773 } 1774 1775 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1776 { 1777 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1778 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1779 int i, j, lc, good_cnt, ret_val = 0; 1780 unsigned int size = 1024; 1781 netdev_tx_t tx_ret_val; 1782 struct sk_buff *skb; 1783 1784 /* allocate test skb */ 1785 skb = alloc_skb(size, GFP_KERNEL); 1786 if (!skb) 1787 return 11; 1788 1789 /* place data into test skb */ 1790 ixgbe_create_lbtest_frame(skb, size); 1791 skb_put(skb, size); 1792 1793 /* 1794 * Calculate the loop count based on the largest descriptor ring 1795 * The idea is to wrap the largest ring a number of times using 64 1796 * send/receive pairs during each loop 1797 */ 1798 1799 if (rx_ring->count <= tx_ring->count) 1800 lc = ((tx_ring->count / 64) * 2) + 1; 1801 else 1802 lc = ((rx_ring->count / 64) * 2) + 1; 1803 1804 for (j = 0; j <= lc; j++) { 1805 /* reset count of good packets */ 1806 good_cnt = 0; 1807 1808 /* place 64 packets on the transmit queue*/ 1809 for (i = 0; i < 64; i++) { 1810 skb_get(skb); 1811 tx_ret_val = ixgbe_xmit_frame_ring(skb, 1812 adapter, 1813 tx_ring); 1814 if (tx_ret_val == NETDEV_TX_OK) 1815 good_cnt++; 1816 } 1817 1818 if (good_cnt != 64) { 1819 ret_val = 12; 1820 break; 1821 } 1822 1823 /* allow 200 milliseconds for packets to go from Tx to Rx */ 1824 msleep(200); 1825 1826 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); 1827 if (good_cnt != 64) { 1828 ret_val = 13; 1829 break; 1830 } 1831 } 1832 1833 /* free the original skb */ 1834 kfree_skb(skb); 1835 1836 return ret_val; 1837 } 1838 1839 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) 1840 { 1841 *data = ixgbe_setup_desc_rings(adapter); 1842 if (*data) 1843 goto out; 1844 *data = ixgbe_setup_loopback_test(adapter); 1845 if (*data) 1846 goto err_loopback; 1847 *data = ixgbe_run_loopback_test(adapter); 1848 ixgbe_loopback_cleanup(adapter); 1849 1850 err_loopback: 1851 ixgbe_free_desc_rings(adapter); 1852 out: 1853 return *data; 1854 } 1855 1856 static void ixgbe_diag_test(struct net_device *netdev, 1857 struct ethtool_test *eth_test, u64 *data) 1858 { 1859 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1860 bool if_running = netif_running(netdev); 1861 1862 set_bit(__IXGBE_TESTING, &adapter->state); 1863 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1864 /* Offline tests */ 1865 1866 e_info(hw, "offline testing starting\n"); 1867 1868 /* Link test performed before hardware reset so autoneg doesn't 1869 * interfere with test result */ 1870 if (ixgbe_link_test(adapter, &data[4])) 1871 eth_test->flags |= ETH_TEST_FL_FAILED; 1872 1873 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 1874 int i; 1875 for (i = 0; i < adapter->num_vfs; i++) { 1876 if (adapter->vfinfo[i].clear_to_send) { 1877 netdev_warn(netdev, "%s", 1878 "offline diagnostic is not " 1879 "supported when VFs are " 1880 "present\n"); 1881 data[0] = 1; 1882 data[1] = 1; 1883 data[2] = 1; 1884 data[3] = 1; 1885 eth_test->flags |= ETH_TEST_FL_FAILED; 1886 clear_bit(__IXGBE_TESTING, 1887 &adapter->state); 1888 goto skip_ol_tests; 1889 } 1890 } 1891 } 1892 1893 if (if_running) 1894 /* indicate we're in test mode */ 1895 dev_close(netdev); 1896 else 1897 ixgbe_reset(adapter); 1898 1899 e_info(hw, "register testing starting\n"); 1900 if (ixgbe_reg_test(adapter, &data[0])) 1901 eth_test->flags |= ETH_TEST_FL_FAILED; 1902 1903 ixgbe_reset(adapter); 1904 e_info(hw, "eeprom testing starting\n"); 1905 if (ixgbe_eeprom_test(adapter, &data[1])) 1906 eth_test->flags |= ETH_TEST_FL_FAILED; 1907 1908 ixgbe_reset(adapter); 1909 e_info(hw, "interrupt testing starting\n"); 1910 if (ixgbe_intr_test(adapter, &data[2])) 1911 eth_test->flags |= ETH_TEST_FL_FAILED; 1912 1913 /* If SRIOV or VMDq is enabled then skip MAC 1914 * loopback diagnostic. */ 1915 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 1916 IXGBE_FLAG_VMDQ_ENABLED)) { 1917 e_info(hw, "Skip MAC loopback diagnostic in VT " 1918 "mode\n"); 1919 data[3] = 0; 1920 goto skip_loopback; 1921 } 1922 1923 ixgbe_reset(adapter); 1924 e_info(hw, "loopback testing starting\n"); 1925 if (ixgbe_loopback_test(adapter, &data[3])) 1926 eth_test->flags |= ETH_TEST_FL_FAILED; 1927 1928 skip_loopback: 1929 ixgbe_reset(adapter); 1930 1931 clear_bit(__IXGBE_TESTING, &adapter->state); 1932 if (if_running) 1933 dev_open(netdev); 1934 } else { 1935 e_info(hw, "online testing starting\n"); 1936 /* Online tests */ 1937 if (ixgbe_link_test(adapter, &data[4])) 1938 eth_test->flags |= ETH_TEST_FL_FAILED; 1939 1940 /* Online tests aren't run; pass by default */ 1941 data[0] = 0; 1942 data[1] = 0; 1943 data[2] = 0; 1944 data[3] = 0; 1945 1946 clear_bit(__IXGBE_TESTING, &adapter->state); 1947 } 1948 skip_ol_tests: 1949 msleep_interruptible(4 * 1000); 1950 } 1951 1952 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 1953 struct ethtool_wolinfo *wol) 1954 { 1955 struct ixgbe_hw *hw = &adapter->hw; 1956 int retval = 0; 1957 1958 /* WOL not supported for all devices */ 1959 if (!ixgbe_wol_supported(adapter, hw->device_id, 1960 hw->subsystem_device_id)) { 1961 retval = 1; 1962 wol->supported = 0; 1963 } 1964 1965 return retval; 1966 } 1967 1968 static void ixgbe_get_wol(struct net_device *netdev, 1969 struct ethtool_wolinfo *wol) 1970 { 1971 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1972 1973 wol->supported = WAKE_UCAST | WAKE_MCAST | 1974 WAKE_BCAST | WAKE_MAGIC; 1975 wol->wolopts = 0; 1976 1977 if (ixgbe_wol_exclusion(adapter, wol) || 1978 !device_can_wakeup(&adapter->pdev->dev)) 1979 return; 1980 1981 if (adapter->wol & IXGBE_WUFC_EX) 1982 wol->wolopts |= WAKE_UCAST; 1983 if (adapter->wol & IXGBE_WUFC_MC) 1984 wol->wolopts |= WAKE_MCAST; 1985 if (adapter->wol & IXGBE_WUFC_BC) 1986 wol->wolopts |= WAKE_BCAST; 1987 if (adapter->wol & IXGBE_WUFC_MAG) 1988 wol->wolopts |= WAKE_MAGIC; 1989 } 1990 1991 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1992 { 1993 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1994 1995 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1996 return -EOPNOTSUPP; 1997 1998 if (ixgbe_wol_exclusion(adapter, wol)) 1999 return wol->wolopts ? -EOPNOTSUPP : 0; 2000 2001 adapter->wol = 0; 2002 2003 if (wol->wolopts & WAKE_UCAST) 2004 adapter->wol |= IXGBE_WUFC_EX; 2005 if (wol->wolopts & WAKE_MCAST) 2006 adapter->wol |= IXGBE_WUFC_MC; 2007 if (wol->wolopts & WAKE_BCAST) 2008 adapter->wol |= IXGBE_WUFC_BC; 2009 if (wol->wolopts & WAKE_MAGIC) 2010 adapter->wol |= IXGBE_WUFC_MAG; 2011 2012 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 2013 2014 return 0; 2015 } 2016 2017 static int ixgbe_nway_reset(struct net_device *netdev) 2018 { 2019 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2020 2021 if (netif_running(netdev)) 2022 ixgbe_reinit_locked(adapter); 2023 2024 return 0; 2025 } 2026 2027 static int ixgbe_set_phys_id(struct net_device *netdev, 2028 enum ethtool_phys_id_state state) 2029 { 2030 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2031 struct ixgbe_hw *hw = &adapter->hw; 2032 2033 switch (state) { 2034 case ETHTOOL_ID_ACTIVE: 2035 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2036 return 2; 2037 2038 case ETHTOOL_ID_ON: 2039 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2040 break; 2041 2042 case ETHTOOL_ID_OFF: 2043 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2044 break; 2045 2046 case ETHTOOL_ID_INACTIVE: 2047 /* Restore LED settings */ 2048 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); 2049 break; 2050 } 2051 2052 return 0; 2053 } 2054 2055 static int ixgbe_get_coalesce(struct net_device *netdev, 2056 struct ethtool_coalesce *ec) 2057 { 2058 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2059 2060 /* only valid if in constant ITR mode */ 2061 if (adapter->rx_itr_setting <= 1) 2062 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 2063 else 2064 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 2065 2066 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2067 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2068 return 0; 2069 2070 /* only valid if in constant ITR mode */ 2071 if (adapter->tx_itr_setting <= 1) 2072 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 2073 else 2074 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 2075 2076 return 0; 2077 } 2078 2079 /* 2080 * this function must be called before setting the new value of 2081 * rx_itr_setting 2082 */ 2083 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) 2084 { 2085 struct net_device *netdev = adapter->netdev; 2086 2087 /* nothing to do if LRO or RSC are not enabled */ 2088 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || 2089 !(netdev->features & NETIF_F_LRO)) 2090 return false; 2091 2092 /* check the feature flag value and enable RSC if necessary */ 2093 if (adapter->rx_itr_setting == 1 || 2094 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 2095 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2096 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2097 e_info(probe, "rx-usecs value high enough " 2098 "to re-enable RSC\n"); 2099 return true; 2100 } 2101 /* if interrupt rate is too high then disable RSC */ 2102 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2103 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2104 e_info(probe, "rx-usecs set too low, disabling RSC\n"); 2105 return true; 2106 } 2107 return false; 2108 } 2109 2110 static int ixgbe_set_coalesce(struct net_device *netdev, 2111 struct ethtool_coalesce *ec) 2112 { 2113 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2114 struct ixgbe_q_vector *q_vector; 2115 int i; 2116 int num_vectors; 2117 u16 tx_itr_param, rx_itr_param; 2118 bool need_reset = false; 2119 2120 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2121 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count 2122 && ec->tx_coalesce_usecs) 2123 return -EINVAL; 2124 2125 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 2126 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 2127 return -EINVAL; 2128 2129 if (ec->rx_coalesce_usecs > 1) 2130 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 2131 else 2132 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2133 2134 if (adapter->rx_itr_setting == 1) 2135 rx_itr_param = IXGBE_20K_ITR; 2136 else 2137 rx_itr_param = adapter->rx_itr_setting; 2138 2139 if (ec->tx_coalesce_usecs > 1) 2140 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 2141 else 2142 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 2143 2144 if (adapter->tx_itr_setting == 1) 2145 tx_itr_param = IXGBE_10K_ITR; 2146 else 2147 tx_itr_param = adapter->tx_itr_setting; 2148 2149 /* check the old value and enable RSC if necessary */ 2150 need_reset = ixgbe_update_rsc(adapter); 2151 2152 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2153 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2154 else 2155 num_vectors = 1; 2156 2157 for (i = 0; i < num_vectors; i++) { 2158 q_vector = adapter->q_vector[i]; 2159 if (q_vector->tx.count && !q_vector->rx.count) 2160 /* tx only */ 2161 q_vector->itr = tx_itr_param; 2162 else 2163 /* rx only or mixed */ 2164 q_vector->itr = rx_itr_param; 2165 ixgbe_write_eitr(q_vector); 2166 } 2167 2168 /* 2169 * do reset here at the end to make sure EITR==0 case is handled 2170 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings 2171 * also locks in RSC enable/disable which requires reset 2172 */ 2173 if (need_reset) 2174 ixgbe_do_reset(netdev); 2175 2176 return 0; 2177 } 2178 2179 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2180 struct ethtool_rxnfc *cmd) 2181 { 2182 union ixgbe_atr_input *mask = &adapter->fdir_mask; 2183 struct ethtool_rx_flow_spec *fsp = 2184 (struct ethtool_rx_flow_spec *)&cmd->fs; 2185 struct hlist_node *node, *node2; 2186 struct ixgbe_fdir_filter *rule = NULL; 2187 2188 /* report total rule count */ 2189 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2190 2191 hlist_for_each_entry_safe(rule, node, node2, 2192 &adapter->fdir_filter_list, fdir_node) { 2193 if (fsp->location <= rule->sw_idx) 2194 break; 2195 } 2196 2197 if (!rule || fsp->location != rule->sw_idx) 2198 return -EINVAL; 2199 2200 /* fill out the flow spec entry */ 2201 2202 /* set flow type field */ 2203 switch (rule->filter.formatted.flow_type) { 2204 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2205 fsp->flow_type = TCP_V4_FLOW; 2206 break; 2207 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2208 fsp->flow_type = UDP_V4_FLOW; 2209 break; 2210 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2211 fsp->flow_type = SCTP_V4_FLOW; 2212 break; 2213 case IXGBE_ATR_FLOW_TYPE_IPV4: 2214 fsp->flow_type = IP_USER_FLOW; 2215 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 2216 fsp->h_u.usr_ip4_spec.proto = 0; 2217 fsp->m_u.usr_ip4_spec.proto = 0; 2218 break; 2219 default: 2220 return -EINVAL; 2221 } 2222 2223 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; 2224 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; 2225 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; 2226 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; 2227 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; 2228 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; 2229 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; 2230 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; 2231 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; 2232 fsp->m_ext.vlan_tci = mask->formatted.vlan_id; 2233 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; 2234 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; 2235 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); 2236 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); 2237 fsp->flow_type |= FLOW_EXT; 2238 2239 /* record action */ 2240 if (rule->action == IXGBE_FDIR_DROP_QUEUE) 2241 fsp->ring_cookie = RX_CLS_FLOW_DISC; 2242 else 2243 fsp->ring_cookie = rule->action; 2244 2245 return 0; 2246 } 2247 2248 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, 2249 struct ethtool_rxnfc *cmd, 2250 u32 *rule_locs) 2251 { 2252 struct hlist_node *node, *node2; 2253 struct ixgbe_fdir_filter *rule; 2254 int cnt = 0; 2255 2256 /* report total rule count */ 2257 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2258 2259 hlist_for_each_entry_safe(rule, node, node2, 2260 &adapter->fdir_filter_list, fdir_node) { 2261 if (cnt == cmd->rule_cnt) 2262 return -EMSGSIZE; 2263 rule_locs[cnt] = rule->sw_idx; 2264 cnt++; 2265 } 2266 2267 cmd->rule_cnt = cnt; 2268 2269 return 0; 2270 } 2271 2272 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, 2273 struct ethtool_rxnfc *cmd) 2274 { 2275 cmd->data = 0; 2276 2277 /* if RSS is disabled then report no hashing */ 2278 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 2279 return 0; 2280 2281 /* Report default options for RSS on ixgbe */ 2282 switch (cmd->flow_type) { 2283 case TCP_V4_FLOW: 2284 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2285 case UDP_V4_FLOW: 2286 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2287 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2288 case SCTP_V4_FLOW: 2289 case AH_ESP_V4_FLOW: 2290 case AH_V4_FLOW: 2291 case ESP_V4_FLOW: 2292 case IPV4_FLOW: 2293 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2294 break; 2295 case TCP_V6_FLOW: 2296 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2297 case UDP_V6_FLOW: 2298 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2299 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2300 case SCTP_V6_FLOW: 2301 case AH_ESP_V6_FLOW: 2302 case AH_V6_FLOW: 2303 case ESP_V6_FLOW: 2304 case IPV6_FLOW: 2305 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2306 break; 2307 default: 2308 return -EINVAL; 2309 } 2310 2311 return 0; 2312 } 2313 2314 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2315 u32 *rule_locs) 2316 { 2317 struct ixgbe_adapter *adapter = netdev_priv(dev); 2318 int ret = -EOPNOTSUPP; 2319 2320 switch (cmd->cmd) { 2321 case ETHTOOL_GRXRINGS: 2322 cmd->data = adapter->num_rx_queues; 2323 ret = 0; 2324 break; 2325 case ETHTOOL_GRXCLSRLCNT: 2326 cmd->rule_cnt = adapter->fdir_filter_count; 2327 ret = 0; 2328 break; 2329 case ETHTOOL_GRXCLSRULE: 2330 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); 2331 break; 2332 case ETHTOOL_GRXCLSRLALL: 2333 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); 2334 break; 2335 case ETHTOOL_GRXFH: 2336 ret = ixgbe_get_rss_hash_opts(adapter, cmd); 2337 break; 2338 default: 2339 break; 2340 } 2341 2342 return ret; 2343 } 2344 2345 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2346 struct ixgbe_fdir_filter *input, 2347 u16 sw_idx) 2348 { 2349 struct ixgbe_hw *hw = &adapter->hw; 2350 struct hlist_node *node, *node2, *parent; 2351 struct ixgbe_fdir_filter *rule; 2352 int err = -EINVAL; 2353 2354 parent = NULL; 2355 rule = NULL; 2356 2357 hlist_for_each_entry_safe(rule, node, node2, 2358 &adapter->fdir_filter_list, fdir_node) { 2359 /* hash found, or no matching entry */ 2360 if (rule->sw_idx >= sw_idx) 2361 break; 2362 parent = node; 2363 } 2364 2365 /* if there is an old rule occupying our place remove it */ 2366 if (rule && (rule->sw_idx == sw_idx)) { 2367 if (!input || (rule->filter.formatted.bkt_hash != 2368 input->filter.formatted.bkt_hash)) { 2369 err = ixgbe_fdir_erase_perfect_filter_82599(hw, 2370 &rule->filter, 2371 sw_idx); 2372 } 2373 2374 hlist_del(&rule->fdir_node); 2375 kfree(rule); 2376 adapter->fdir_filter_count--; 2377 } 2378 2379 /* 2380 * If no input this was a delete, err should be 0 if a rule was 2381 * successfully found and removed from the list else -EINVAL 2382 */ 2383 if (!input) 2384 return err; 2385 2386 /* initialize node and set software index */ 2387 INIT_HLIST_NODE(&input->fdir_node); 2388 2389 /* add filter to the list */ 2390 if (parent) 2391 hlist_add_after(parent, &input->fdir_node); 2392 else 2393 hlist_add_head(&input->fdir_node, 2394 &adapter->fdir_filter_list); 2395 2396 /* update counts */ 2397 adapter->fdir_filter_count++; 2398 2399 return 0; 2400 } 2401 2402 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, 2403 u8 *flow_type) 2404 { 2405 switch (fsp->flow_type & ~FLOW_EXT) { 2406 case TCP_V4_FLOW: 2407 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2408 break; 2409 case UDP_V4_FLOW: 2410 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2411 break; 2412 case SCTP_V4_FLOW: 2413 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2414 break; 2415 case IP_USER_FLOW: 2416 switch (fsp->h_u.usr_ip4_spec.proto) { 2417 case IPPROTO_TCP: 2418 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2419 break; 2420 case IPPROTO_UDP: 2421 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2422 break; 2423 case IPPROTO_SCTP: 2424 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2425 break; 2426 case 0: 2427 if (!fsp->m_u.usr_ip4_spec.proto) { 2428 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; 2429 break; 2430 } 2431 default: 2432 return 0; 2433 } 2434 break; 2435 default: 2436 return 0; 2437 } 2438 2439 return 1; 2440 } 2441 2442 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2443 struct ethtool_rxnfc *cmd) 2444 { 2445 struct ethtool_rx_flow_spec *fsp = 2446 (struct ethtool_rx_flow_spec *)&cmd->fs; 2447 struct ixgbe_hw *hw = &adapter->hw; 2448 struct ixgbe_fdir_filter *input; 2449 union ixgbe_atr_input mask; 2450 int err; 2451 2452 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 2453 return -EOPNOTSUPP; 2454 2455 /* 2456 * Don't allow programming if the action is a queue greater than 2457 * the number of online Rx queues. 2458 */ 2459 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 2460 (fsp->ring_cookie >= adapter->num_rx_queues)) 2461 return -EINVAL; 2462 2463 /* Don't allow indexes to exist outside of available space */ 2464 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { 2465 e_err(drv, "Location out of range\n"); 2466 return -EINVAL; 2467 } 2468 2469 input = kzalloc(sizeof(*input), GFP_ATOMIC); 2470 if (!input) 2471 return -ENOMEM; 2472 2473 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 2474 2475 /* set SW index */ 2476 input->sw_idx = fsp->location; 2477 2478 /* record flow type */ 2479 if (!ixgbe_flowspec_to_flow_type(fsp, 2480 &input->filter.formatted.flow_type)) { 2481 e_err(drv, "Unrecognized flow type\n"); 2482 goto err_out; 2483 } 2484 2485 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2486 IXGBE_ATR_L4TYPE_MASK; 2487 2488 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) 2489 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; 2490 2491 /* Copy input into formatted structures */ 2492 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2493 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; 2494 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2495 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; 2496 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; 2497 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; 2498 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 2499 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 2500 2501 if (fsp->flow_type & FLOW_EXT) { 2502 input->filter.formatted.vm_pool = 2503 (unsigned char)ntohl(fsp->h_ext.data[1]); 2504 mask.formatted.vm_pool = 2505 (unsigned char)ntohl(fsp->m_ext.data[1]); 2506 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; 2507 mask.formatted.vlan_id = fsp->m_ext.vlan_tci; 2508 input->filter.formatted.flex_bytes = 2509 fsp->h_ext.vlan_etype; 2510 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; 2511 } 2512 2513 /* determine if we need to drop or route the packet */ 2514 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 2515 input->action = IXGBE_FDIR_DROP_QUEUE; 2516 else 2517 input->action = fsp->ring_cookie; 2518 2519 spin_lock(&adapter->fdir_perfect_lock); 2520 2521 if (hlist_empty(&adapter->fdir_filter_list)) { 2522 /* save mask and program input mask into HW */ 2523 memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); 2524 err = ixgbe_fdir_set_input_mask_82599(hw, &mask); 2525 if (err) { 2526 e_err(drv, "Error writing mask\n"); 2527 goto err_out_w_lock; 2528 } 2529 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { 2530 e_err(drv, "Only one mask supported per port\n"); 2531 goto err_out_w_lock; 2532 } 2533 2534 /* apply mask and compute/store hash */ 2535 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); 2536 2537 /* program filters to filter memory */ 2538 err = ixgbe_fdir_write_perfect_filter_82599(hw, 2539 &input->filter, input->sw_idx, 2540 (input->action == IXGBE_FDIR_DROP_QUEUE) ? 2541 IXGBE_FDIR_DROP_QUEUE : 2542 adapter->rx_ring[input->action]->reg_idx); 2543 if (err) 2544 goto err_out_w_lock; 2545 2546 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 2547 2548 spin_unlock(&adapter->fdir_perfect_lock); 2549 2550 return err; 2551 err_out_w_lock: 2552 spin_unlock(&adapter->fdir_perfect_lock); 2553 err_out: 2554 kfree(input); 2555 return -EINVAL; 2556 } 2557 2558 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2559 struct ethtool_rxnfc *cmd) 2560 { 2561 struct ethtool_rx_flow_spec *fsp = 2562 (struct ethtool_rx_flow_spec *)&cmd->fs; 2563 int err; 2564 2565 spin_lock(&adapter->fdir_perfect_lock); 2566 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); 2567 spin_unlock(&adapter->fdir_perfect_lock); 2568 2569 return err; 2570 } 2571 2572 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ 2573 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2574 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, 2575 struct ethtool_rxnfc *nfc) 2576 { 2577 u32 flags2 = adapter->flags2; 2578 2579 /* 2580 * RSS does not support anything other than hashing 2581 * to queues on src and dst IPs and ports 2582 */ 2583 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 2584 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 2585 return -EINVAL; 2586 2587 switch (nfc->flow_type) { 2588 case TCP_V4_FLOW: 2589 case TCP_V6_FLOW: 2590 if (!(nfc->data & RXH_IP_SRC) || 2591 !(nfc->data & RXH_IP_DST) || 2592 !(nfc->data & RXH_L4_B_0_1) || 2593 !(nfc->data & RXH_L4_B_2_3)) 2594 return -EINVAL; 2595 break; 2596 case UDP_V4_FLOW: 2597 if (!(nfc->data & RXH_IP_SRC) || 2598 !(nfc->data & RXH_IP_DST)) 2599 return -EINVAL; 2600 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2601 case 0: 2602 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2603 break; 2604 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2605 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2606 break; 2607 default: 2608 return -EINVAL; 2609 } 2610 break; 2611 case UDP_V6_FLOW: 2612 if (!(nfc->data & RXH_IP_SRC) || 2613 !(nfc->data & RXH_IP_DST)) 2614 return -EINVAL; 2615 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2616 case 0: 2617 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2618 break; 2619 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2620 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2621 break; 2622 default: 2623 return -EINVAL; 2624 } 2625 break; 2626 case AH_ESP_V4_FLOW: 2627 case AH_V4_FLOW: 2628 case ESP_V4_FLOW: 2629 case SCTP_V4_FLOW: 2630 case AH_ESP_V6_FLOW: 2631 case AH_V6_FLOW: 2632 case ESP_V6_FLOW: 2633 case SCTP_V6_FLOW: 2634 if (!(nfc->data & RXH_IP_SRC) || 2635 !(nfc->data & RXH_IP_DST) || 2636 (nfc->data & RXH_L4_B_0_1) || 2637 (nfc->data & RXH_L4_B_2_3)) 2638 return -EINVAL; 2639 break; 2640 default: 2641 return -EINVAL; 2642 } 2643 2644 /* if we changed something we need to update flags */ 2645 if (flags2 != adapter->flags2) { 2646 struct ixgbe_hw *hw = &adapter->hw; 2647 u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 2648 2649 if ((flags2 & UDP_RSS_FLAGS) && 2650 !(adapter->flags2 & UDP_RSS_FLAGS)) 2651 e_warn(drv, "enabling UDP RSS: fragmented packets" 2652 " may arrive out of order to the stack above\n"); 2653 2654 adapter->flags2 = flags2; 2655 2656 /* Perform hash on these packet types */ 2657 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 2658 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 2659 | IXGBE_MRQC_RSS_FIELD_IPV6 2660 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 2661 2662 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2663 IXGBE_MRQC_RSS_FIELD_IPV6_UDP); 2664 2665 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2666 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 2667 2668 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2669 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 2670 2671 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2672 } 2673 2674 return 0; 2675 } 2676 2677 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 2678 { 2679 struct ixgbe_adapter *adapter = netdev_priv(dev); 2680 int ret = -EOPNOTSUPP; 2681 2682 switch (cmd->cmd) { 2683 case ETHTOOL_SRXCLSRLINS: 2684 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); 2685 break; 2686 case ETHTOOL_SRXCLSRLDEL: 2687 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); 2688 break; 2689 case ETHTOOL_SRXFH: 2690 ret = ixgbe_set_rss_hash_opt(adapter, cmd); 2691 break; 2692 default: 2693 break; 2694 } 2695 2696 return ret; 2697 } 2698 2699 static int ixgbe_get_ts_info(struct net_device *dev, 2700 struct ethtool_ts_info *info) 2701 { 2702 struct ixgbe_adapter *adapter = netdev_priv(dev); 2703 2704 switch (adapter->hw.mac.type) { 2705 #ifdef CONFIG_IXGBE_PTP 2706 case ixgbe_mac_X540: 2707 case ixgbe_mac_82599EB: 2708 info->so_timestamping = 2709 SOF_TIMESTAMPING_TX_HARDWARE | 2710 SOF_TIMESTAMPING_RX_HARDWARE | 2711 SOF_TIMESTAMPING_RAW_HARDWARE; 2712 2713 if (adapter->ptp_clock) 2714 info->phc_index = ptp_clock_index(adapter->ptp_clock); 2715 else 2716 info->phc_index = -1; 2717 2718 info->tx_types = 2719 (1 << HWTSTAMP_TX_OFF) | 2720 (1 << HWTSTAMP_TX_ON); 2721 2722 info->rx_filters = 2723 (1 << HWTSTAMP_FILTER_NONE) | 2724 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 2725 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 2726 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | 2727 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 2728 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | 2729 (1 << HWTSTAMP_FILTER_SOME); 2730 break; 2731 #endif /* CONFIG_IXGBE_PTP */ 2732 default: 2733 return ethtool_op_get_ts_info(dev, info); 2734 break; 2735 } 2736 return 0; 2737 } 2738 2739 static const struct ethtool_ops ixgbe_ethtool_ops = { 2740 .get_settings = ixgbe_get_settings, 2741 .set_settings = ixgbe_set_settings, 2742 .get_drvinfo = ixgbe_get_drvinfo, 2743 .get_regs_len = ixgbe_get_regs_len, 2744 .get_regs = ixgbe_get_regs, 2745 .get_wol = ixgbe_get_wol, 2746 .set_wol = ixgbe_set_wol, 2747 .nway_reset = ixgbe_nway_reset, 2748 .get_link = ethtool_op_get_link, 2749 .get_eeprom_len = ixgbe_get_eeprom_len, 2750 .get_eeprom = ixgbe_get_eeprom, 2751 .set_eeprom = ixgbe_set_eeprom, 2752 .get_ringparam = ixgbe_get_ringparam, 2753 .set_ringparam = ixgbe_set_ringparam, 2754 .get_pauseparam = ixgbe_get_pauseparam, 2755 .set_pauseparam = ixgbe_set_pauseparam, 2756 .get_msglevel = ixgbe_get_msglevel, 2757 .set_msglevel = ixgbe_set_msglevel, 2758 .self_test = ixgbe_diag_test, 2759 .get_strings = ixgbe_get_strings, 2760 .set_phys_id = ixgbe_set_phys_id, 2761 .get_sset_count = ixgbe_get_sset_count, 2762 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2763 .get_coalesce = ixgbe_get_coalesce, 2764 .set_coalesce = ixgbe_set_coalesce, 2765 .get_rxnfc = ixgbe_get_rxnfc, 2766 .set_rxnfc = ixgbe_set_rxnfc, 2767 .get_ts_info = ixgbe_get_ts_info, 2768 }; 2769 2770 void ixgbe_set_ethtool_ops(struct net_device *netdev) 2771 { 2772 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); 2773 } 2774