1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* ethtool support for ixgbe */ 29 30 #include <linux/interrupt.h> 31 #include <linux/types.h> 32 #include <linux/module.h> 33 #include <linux/slab.h> 34 #include <linux/pci.h> 35 #include <linux/netdevice.h> 36 #include <linux/ethtool.h> 37 #include <linux/vmalloc.h> 38 #include <linux/highmem.h> 39 #include <linux/uaccess.h> 40 41 #include "ixgbe.h" 42 43 44 #define IXGBE_ALL_RAR_ENTRIES 16 45 46 enum {NETDEV_STATS, IXGBE_STATS}; 47 48 struct ixgbe_stats { 49 char stat_string[ETH_GSTRING_LEN]; 50 int type; 51 int sizeof_stat; 52 int stat_offset; 53 }; 54 55 #define IXGBE_STAT(m) IXGBE_STATS, \ 56 sizeof(((struct ixgbe_adapter *)0)->m), \ 57 offsetof(struct ixgbe_adapter, m) 58 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ 59 sizeof(((struct rtnl_link_stats64 *)0)->m), \ 60 offsetof(struct rtnl_link_stats64, m) 61 62 static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 63 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, 64 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, 65 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, 66 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, 67 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 68 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 69 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 70 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, 71 {"lsc_int", IXGBE_STAT(lsc_int)}, 72 {"tx_busy", IXGBE_STAT(tx_busy)}, 73 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 74 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, 75 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, 76 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, 77 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, 78 {"multicast", IXGBE_NETDEV_STAT(multicast)}, 79 {"broadcast", IXGBE_STAT(stats.bprc)}, 80 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 81 {"collisions", IXGBE_NETDEV_STAT(collisions)}, 82 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, 83 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, 84 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, 85 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, 86 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 87 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 88 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 89 {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, 90 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 91 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 92 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 93 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, 94 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, 95 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, 96 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 97 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 98 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 99 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, 100 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, 101 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, 102 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, 103 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, 104 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, 105 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 106 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 107 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 108 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, 109 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, 110 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, 111 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, 112 #ifdef IXGBE_FCOE 113 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 114 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 115 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, 116 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, 117 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, 118 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, 119 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, 120 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, 121 #endif /* IXGBE_FCOE */ 122 }; 123 124 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so 125 * we set the num_rx_queues to evaluate to num_tx_queues. This is 126 * used because we do not have a good way to get the max number of 127 * rx queues with CONFIG_RPS disabled. 128 */ 129 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues 130 131 #define IXGBE_QUEUE_STATS_LEN ( \ 132 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ 133 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 134 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 135 #define IXGBE_PB_STATS_LEN ( \ 136 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ 137 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ 138 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ 139 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 140 / sizeof(u64)) 141 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 142 IXGBE_PB_STATS_LEN + \ 143 IXGBE_QUEUE_STATS_LEN) 144 145 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 146 "Register test (offline)", "Eeprom test (offline)", 147 "Interrupt test (offline)", "Loopback test (offline)", 148 "Link test (on/offline)" 149 }; 150 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 151 152 static int ixgbe_get_settings(struct net_device *netdev, 153 struct ethtool_cmd *ecmd) 154 { 155 struct ixgbe_adapter *adapter = netdev_priv(netdev); 156 struct ixgbe_hw *hw = &adapter->hw; 157 u32 link_speed = 0; 158 bool link_up; 159 160 ecmd->supported = SUPPORTED_10000baseT_Full; 161 ecmd->autoneg = AUTONEG_ENABLE; 162 ecmd->transceiver = XCVR_EXTERNAL; 163 if ((hw->phy.media_type == ixgbe_media_type_copper) || 164 (hw->phy.multispeed_fiber)) { 165 ecmd->supported |= (SUPPORTED_1000baseT_Full | 166 SUPPORTED_Autoneg); 167 168 switch (hw->mac.type) { 169 case ixgbe_mac_X540: 170 ecmd->supported |= SUPPORTED_100baseT_Full; 171 break; 172 default: 173 break; 174 } 175 176 ecmd->advertising = ADVERTISED_Autoneg; 177 if (hw->phy.autoneg_advertised) { 178 if (hw->phy.autoneg_advertised & 179 IXGBE_LINK_SPEED_100_FULL) 180 ecmd->advertising |= ADVERTISED_100baseT_Full; 181 if (hw->phy.autoneg_advertised & 182 IXGBE_LINK_SPEED_10GB_FULL) 183 ecmd->advertising |= ADVERTISED_10000baseT_Full; 184 if (hw->phy.autoneg_advertised & 185 IXGBE_LINK_SPEED_1GB_FULL) 186 ecmd->advertising |= ADVERTISED_1000baseT_Full; 187 } else { 188 /* 189 * Default advertised modes in case 190 * phy.autoneg_advertised isn't set. 191 */ 192 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 193 ADVERTISED_1000baseT_Full); 194 if (hw->mac.type == ixgbe_mac_X540) 195 ecmd->advertising |= ADVERTISED_100baseT_Full; 196 } 197 198 if (hw->phy.media_type == ixgbe_media_type_copper) { 199 ecmd->supported |= SUPPORTED_TP; 200 ecmd->advertising |= ADVERTISED_TP; 201 ecmd->port = PORT_TP; 202 } else { 203 ecmd->supported |= SUPPORTED_FIBRE; 204 ecmd->advertising |= ADVERTISED_FIBRE; 205 ecmd->port = PORT_FIBRE; 206 } 207 } else if (hw->phy.media_type == ixgbe_media_type_backplane) { 208 /* Set as FIBRE until SERDES defined in kernel */ 209 if (hw->device_id == IXGBE_DEV_ID_82598_BX) { 210 ecmd->supported = (SUPPORTED_1000baseT_Full | 211 SUPPORTED_FIBRE); 212 ecmd->advertising = (ADVERTISED_1000baseT_Full | 213 ADVERTISED_FIBRE); 214 ecmd->port = PORT_FIBRE; 215 ecmd->autoneg = AUTONEG_DISABLE; 216 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || 217 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { 218 ecmd->supported |= (SUPPORTED_1000baseT_Full | 219 SUPPORTED_Autoneg | 220 SUPPORTED_FIBRE); 221 ecmd->advertising = (ADVERTISED_10000baseT_Full | 222 ADVERTISED_1000baseT_Full | 223 ADVERTISED_Autoneg | 224 ADVERTISED_FIBRE); 225 ecmd->port = PORT_FIBRE; 226 } else { 227 ecmd->supported |= (SUPPORTED_1000baseT_Full | 228 SUPPORTED_FIBRE); 229 ecmd->advertising = (ADVERTISED_10000baseT_Full | 230 ADVERTISED_1000baseT_Full | 231 ADVERTISED_FIBRE); 232 ecmd->port = PORT_FIBRE; 233 } 234 } else { 235 ecmd->supported |= SUPPORTED_FIBRE; 236 ecmd->advertising = (ADVERTISED_10000baseT_Full | 237 ADVERTISED_FIBRE); 238 ecmd->port = PORT_FIBRE; 239 ecmd->autoneg = AUTONEG_DISABLE; 240 } 241 242 /* Get PHY type */ 243 switch (adapter->hw.phy.type) { 244 case ixgbe_phy_tn: 245 case ixgbe_phy_aq: 246 case ixgbe_phy_cu_unknown: 247 /* Copper 10G-BASET */ 248 ecmd->port = PORT_TP; 249 break; 250 case ixgbe_phy_qt: 251 ecmd->port = PORT_FIBRE; 252 break; 253 case ixgbe_phy_nl: 254 case ixgbe_phy_sfp_passive_tyco: 255 case ixgbe_phy_sfp_passive_unknown: 256 case ixgbe_phy_sfp_ftl: 257 case ixgbe_phy_sfp_avago: 258 case ixgbe_phy_sfp_intel: 259 case ixgbe_phy_sfp_unknown: 260 switch (adapter->hw.phy.sfp_type) { 261 /* SFP+ devices, further checking needed */ 262 case ixgbe_sfp_type_da_cu: 263 case ixgbe_sfp_type_da_cu_core0: 264 case ixgbe_sfp_type_da_cu_core1: 265 ecmd->port = PORT_DA; 266 break; 267 case ixgbe_sfp_type_sr: 268 case ixgbe_sfp_type_lr: 269 case ixgbe_sfp_type_srlr_core0: 270 case ixgbe_sfp_type_srlr_core1: 271 ecmd->port = PORT_FIBRE; 272 break; 273 case ixgbe_sfp_type_not_present: 274 ecmd->port = PORT_NONE; 275 break; 276 case ixgbe_sfp_type_1g_cu_core0: 277 case ixgbe_sfp_type_1g_cu_core1: 278 ecmd->port = PORT_TP; 279 ecmd->supported = SUPPORTED_TP; 280 ecmd->advertising = (ADVERTISED_1000baseT_Full | 281 ADVERTISED_TP); 282 break; 283 case ixgbe_sfp_type_unknown: 284 default: 285 ecmd->port = PORT_OTHER; 286 break; 287 } 288 break; 289 case ixgbe_phy_xaui: 290 ecmd->port = PORT_NONE; 291 break; 292 case ixgbe_phy_unknown: 293 case ixgbe_phy_generic: 294 case ixgbe_phy_sfp_unsupported: 295 default: 296 ecmd->port = PORT_OTHER; 297 break; 298 } 299 300 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 301 if (link_up) { 302 switch (link_speed) { 303 case IXGBE_LINK_SPEED_10GB_FULL: 304 ethtool_cmd_speed_set(ecmd, SPEED_10000); 305 break; 306 case IXGBE_LINK_SPEED_1GB_FULL: 307 ethtool_cmd_speed_set(ecmd, SPEED_1000); 308 break; 309 case IXGBE_LINK_SPEED_100_FULL: 310 ethtool_cmd_speed_set(ecmd, SPEED_100); 311 break; 312 default: 313 break; 314 } 315 ecmd->duplex = DUPLEX_FULL; 316 } else { 317 ethtool_cmd_speed_set(ecmd, -1); 318 ecmd->duplex = -1; 319 } 320 321 return 0; 322 } 323 324 static int ixgbe_set_settings(struct net_device *netdev, 325 struct ethtool_cmd *ecmd) 326 { 327 struct ixgbe_adapter *adapter = netdev_priv(netdev); 328 struct ixgbe_hw *hw = &adapter->hw; 329 u32 advertised, old; 330 s32 err = 0; 331 332 if ((hw->phy.media_type == ixgbe_media_type_copper) || 333 (hw->phy.multispeed_fiber)) { 334 /* 335 * this function does not support duplex forcing, but can 336 * limit the advertising of the adapter to the specified speed 337 */ 338 if (ecmd->autoneg == AUTONEG_DISABLE) 339 return -EINVAL; 340 341 if (ecmd->advertising & ~ecmd->supported) 342 return -EINVAL; 343 344 old = hw->phy.autoneg_advertised; 345 advertised = 0; 346 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 347 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 348 349 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 350 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 351 352 if (ecmd->advertising & ADVERTISED_100baseT_Full) 353 advertised |= IXGBE_LINK_SPEED_100_FULL; 354 355 if (old == advertised) 356 return err; 357 /* this sets the link speed and restarts auto-neg */ 358 hw->mac.autotry_restart = true; 359 err = hw->mac.ops.setup_link(hw, advertised, true, true); 360 if (err) { 361 e_info(probe, "setup link failed with code %d\n", err); 362 hw->mac.ops.setup_link(hw, old, true, true); 363 } 364 } else { 365 /* in this case we currently only support 10Gb/FULL */ 366 u32 speed = ethtool_cmd_speed(ecmd); 367 if ((ecmd->autoneg == AUTONEG_ENABLE) || 368 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 369 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 370 return -EINVAL; 371 } 372 373 return err; 374 } 375 376 static void ixgbe_get_pauseparam(struct net_device *netdev, 377 struct ethtool_pauseparam *pause) 378 { 379 struct ixgbe_adapter *adapter = netdev_priv(netdev); 380 struct ixgbe_hw *hw = &adapter->hw; 381 382 if (hw->fc.disable_fc_autoneg) 383 pause->autoneg = 0; 384 else 385 pause->autoneg = 1; 386 387 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 388 pause->rx_pause = 1; 389 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 390 pause->tx_pause = 1; 391 } else if (hw->fc.current_mode == ixgbe_fc_full) { 392 pause->rx_pause = 1; 393 pause->tx_pause = 1; 394 #ifdef CONFIG_DCB 395 } else if (hw->fc.current_mode == ixgbe_fc_pfc) { 396 pause->rx_pause = 0; 397 pause->tx_pause = 0; 398 #endif 399 } 400 } 401 402 static int ixgbe_set_pauseparam(struct net_device *netdev, 403 struct ethtool_pauseparam *pause) 404 { 405 struct ixgbe_adapter *adapter = netdev_priv(netdev); 406 struct ixgbe_hw *hw = &adapter->hw; 407 struct ixgbe_fc_info fc; 408 409 #ifdef CONFIG_DCB 410 if (adapter->dcb_cfg.pfc_mode_enable || 411 ((hw->mac.type == ixgbe_mac_82598EB) && 412 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) 413 return -EINVAL; 414 415 #endif 416 fc = hw->fc; 417 418 if (pause->autoneg != AUTONEG_ENABLE) 419 fc.disable_fc_autoneg = true; 420 else 421 fc.disable_fc_autoneg = false; 422 423 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 424 fc.requested_mode = ixgbe_fc_full; 425 else if (pause->rx_pause && !pause->tx_pause) 426 fc.requested_mode = ixgbe_fc_rx_pause; 427 else if (!pause->rx_pause && pause->tx_pause) 428 fc.requested_mode = ixgbe_fc_tx_pause; 429 else if (!pause->rx_pause && !pause->tx_pause) 430 fc.requested_mode = ixgbe_fc_none; 431 else 432 return -EINVAL; 433 434 #ifdef CONFIG_DCB 435 adapter->last_lfc_mode = fc.requested_mode; 436 #endif 437 438 /* if the thing changed then we'll update and use new autoneg */ 439 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { 440 hw->fc = fc; 441 if (netif_running(netdev)) 442 ixgbe_reinit_locked(adapter); 443 else 444 ixgbe_reset(adapter); 445 } 446 447 return 0; 448 } 449 450 static u32 ixgbe_get_msglevel(struct net_device *netdev) 451 { 452 struct ixgbe_adapter *adapter = netdev_priv(netdev); 453 return adapter->msg_enable; 454 } 455 456 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) 457 { 458 struct ixgbe_adapter *adapter = netdev_priv(netdev); 459 adapter->msg_enable = data; 460 } 461 462 static int ixgbe_get_regs_len(struct net_device *netdev) 463 { 464 #define IXGBE_REGS_LEN 1129 465 return IXGBE_REGS_LEN * sizeof(u32); 466 } 467 468 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 469 470 static void ixgbe_get_regs(struct net_device *netdev, 471 struct ethtool_regs *regs, void *p) 472 { 473 struct ixgbe_adapter *adapter = netdev_priv(netdev); 474 struct ixgbe_hw *hw = &adapter->hw; 475 u32 *regs_buff = p; 476 u8 i; 477 478 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); 479 480 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; 481 482 /* General Registers */ 483 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); 484 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); 485 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 486 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); 487 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); 488 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 489 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); 490 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); 491 492 /* NVM Register */ 493 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); 494 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); 495 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); 496 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); 497 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); 498 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); 499 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); 500 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); 501 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); 502 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 503 504 /* Interrupt */ 505 /* don't read EICR because it can clear interrupt causes, instead 506 * read EICS which is a shadow but doesn't clear EICR */ 507 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); 508 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 509 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 510 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 511 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); 512 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); 513 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); 514 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 515 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 516 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 517 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); 518 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 519 520 /* Flow Control */ 521 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); 522 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); 523 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 524 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 525 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 526 for (i = 0; i < 8; i++) { 527 switch (hw->mac.type) { 528 case ixgbe_mac_82598EB: 529 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 530 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 531 break; 532 case ixgbe_mac_82599EB: 533 case ixgbe_mac_X540: 534 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); 535 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 536 break; 537 default: 538 break; 539 } 540 } 541 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 542 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 543 544 /* Receive DMA */ 545 for (i = 0; i < 64; i++) 546 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 547 for (i = 0; i < 64; i++) 548 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 549 for (i = 0; i < 64; i++) 550 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 551 for (i = 0; i < 64; i++) 552 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 553 for (i = 0; i < 64; i++) 554 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 555 for (i = 0; i < 64; i++) 556 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 557 for (i = 0; i < 16; i++) 558 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 559 for (i = 0; i < 16; i++) 560 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 561 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 562 for (i = 0; i < 8; i++) 563 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 564 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 565 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); 566 567 /* Receive */ 568 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 569 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); 570 for (i = 0; i < 16; i++) 571 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 572 for (i = 0; i < 16; i++) 573 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 574 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); 575 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 576 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 577 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 578 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); 579 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 580 for (i = 0; i < 8; i++) 581 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); 582 for (i = 0; i < 8; i++) 583 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); 584 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); 585 586 /* Transmit */ 587 for (i = 0; i < 32; i++) 588 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 589 for (i = 0; i < 32; i++) 590 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 591 for (i = 0; i < 32; i++) 592 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 593 for (i = 0; i < 32; i++) 594 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 595 for (i = 0; i < 32; i++) 596 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 597 for (i = 0; i < 32; i++) 598 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 599 for (i = 0; i < 32; i++) 600 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); 601 for (i = 0; i < 32; i++) 602 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); 603 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); 604 for (i = 0; i < 16; i++) 605 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 606 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); 607 for (i = 0; i < 8; i++) 608 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); 609 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); 610 611 /* Wake Up */ 612 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); 613 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); 614 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); 615 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); 616 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); 617 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); 618 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); 619 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 620 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 621 622 /* DCB */ 623 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 624 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 625 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 626 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); 627 for (i = 0; i < 8; i++) 628 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); 629 for (i = 0; i < 8; i++) 630 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); 631 for (i = 0; i < 8; i++) 632 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); 633 for (i = 0; i < 8; i++) 634 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); 635 for (i = 0; i < 8; i++) 636 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); 637 for (i = 0; i < 8; i++) 638 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); 639 640 /* Statistics */ 641 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); 642 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); 643 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); 644 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); 645 for (i = 0; i < 8; i++) 646 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); 647 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); 648 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); 649 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); 650 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); 651 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); 652 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); 653 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); 654 for (i = 0; i < 8; i++) 655 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); 656 for (i = 0; i < 8; i++) 657 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); 658 for (i = 0; i < 8; i++) 659 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); 660 for (i = 0; i < 8; i++) 661 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); 662 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); 663 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); 664 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); 665 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); 666 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); 667 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); 668 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); 669 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); 670 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); 671 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); 672 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); 673 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); 674 for (i = 0; i < 8; i++) 675 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); 676 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); 677 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); 678 regs_buff[956] = IXGBE_GET_STAT(adapter, roc); 679 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); 680 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); 681 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); 682 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); 683 regs_buff[961] = IXGBE_GET_STAT(adapter, tor); 684 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); 685 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); 686 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); 687 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); 688 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); 689 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); 690 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); 691 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); 692 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); 693 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); 694 regs_buff[973] = IXGBE_GET_STAT(adapter, xec); 695 for (i = 0; i < 16; i++) 696 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); 697 for (i = 0; i < 16; i++) 698 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); 699 for (i = 0; i < 16; i++) 700 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); 701 for (i = 0; i < 16; i++) 702 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); 703 704 /* MAC */ 705 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); 706 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 707 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 708 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); 709 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); 710 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 711 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 712 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); 713 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); 714 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); 715 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); 716 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); 717 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); 718 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); 719 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); 720 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); 721 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); 722 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); 723 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); 724 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); 725 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); 726 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); 727 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); 728 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); 729 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); 730 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); 731 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); 732 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); 733 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 734 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); 735 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); 736 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); 737 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 738 739 /* Diagnostic */ 740 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 741 for (i = 0; i < 8; i++) 742 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 743 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 744 for (i = 0; i < 4; i++) 745 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); 746 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 747 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 748 for (i = 0; i < 8; i++) 749 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 750 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 751 for (i = 0; i < 4; i++) 752 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); 753 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 754 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 755 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 756 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); 757 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); 758 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); 759 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); 760 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); 761 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); 762 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 763 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 764 for (i = 0; i < 8; i++) 765 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 766 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 767 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 768 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 769 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); 770 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); 771 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); 772 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); 773 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); 774 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); 775 776 /* 82599 X540 specific registers */ 777 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); 778 } 779 780 static int ixgbe_get_eeprom_len(struct net_device *netdev) 781 { 782 struct ixgbe_adapter *adapter = netdev_priv(netdev); 783 return adapter->hw.eeprom.word_size * 2; 784 } 785 786 static int ixgbe_get_eeprom(struct net_device *netdev, 787 struct ethtool_eeprom *eeprom, u8 *bytes) 788 { 789 struct ixgbe_adapter *adapter = netdev_priv(netdev); 790 struct ixgbe_hw *hw = &adapter->hw; 791 u16 *eeprom_buff; 792 int first_word, last_word, eeprom_len; 793 int ret_val = 0; 794 u16 i; 795 796 if (eeprom->len == 0) 797 return -EINVAL; 798 799 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 800 801 first_word = eeprom->offset >> 1; 802 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 803 eeprom_len = last_word - first_word + 1; 804 805 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); 806 if (!eeprom_buff) 807 return -ENOMEM; 808 809 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, 810 eeprom_buff); 811 812 /* Device's eeprom is always little-endian, word addressable */ 813 for (i = 0; i < eeprom_len; i++) 814 le16_to_cpus(&eeprom_buff[i]); 815 816 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); 817 kfree(eeprom_buff); 818 819 return ret_val; 820 } 821 822 static int ixgbe_set_eeprom(struct net_device *netdev, 823 struct ethtool_eeprom *eeprom, u8 *bytes) 824 { 825 struct ixgbe_adapter *adapter = netdev_priv(netdev); 826 struct ixgbe_hw *hw = &adapter->hw; 827 u16 *eeprom_buff; 828 void *ptr; 829 int max_len, first_word, last_word, ret_val = 0; 830 u16 i; 831 832 if (eeprom->len == 0) 833 return -EINVAL; 834 835 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 836 return -EINVAL; 837 838 max_len = hw->eeprom.word_size * 2; 839 840 first_word = eeprom->offset >> 1; 841 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 842 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 843 if (!eeprom_buff) 844 return -ENOMEM; 845 846 ptr = eeprom_buff; 847 848 if (eeprom->offset & 1) { 849 /* 850 * need read/modify/write of first changed EEPROM word 851 * only the second byte of the word is being modified 852 */ 853 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); 854 if (ret_val) 855 goto err; 856 857 ptr++; 858 } 859 if ((eeprom->offset + eeprom->len) & 1) { 860 /* 861 * need read/modify/write of last changed EEPROM word 862 * only the first byte of the word is being modified 863 */ 864 ret_val = hw->eeprom.ops.read(hw, last_word, 865 &eeprom_buff[last_word - first_word]); 866 if (ret_val) 867 goto err; 868 } 869 870 /* Device's eeprom is always little-endian, word addressable */ 871 for (i = 0; i < last_word - first_word + 1; i++) 872 le16_to_cpus(&eeprom_buff[i]); 873 874 memcpy(ptr, bytes, eeprom->len); 875 876 for (i = 0; i < last_word - first_word + 1; i++) 877 cpu_to_le16s(&eeprom_buff[i]); 878 879 ret_val = hw->eeprom.ops.write_buffer(hw, first_word, 880 last_word - first_word + 1, 881 eeprom_buff); 882 883 /* Update the checksum */ 884 if (ret_val == 0) 885 hw->eeprom.ops.update_checksum(hw); 886 887 err: 888 kfree(eeprom_buff); 889 return ret_val; 890 } 891 892 static void ixgbe_get_drvinfo(struct net_device *netdev, 893 struct ethtool_drvinfo *drvinfo) 894 { 895 struct ixgbe_adapter *adapter = netdev_priv(netdev); 896 u32 nvm_track_id; 897 898 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 899 strlcpy(drvinfo->version, ixgbe_driver_version, 900 sizeof(drvinfo->version)); 901 902 nvm_track_id = (adapter->eeprom_verh << 16) | 903 adapter->eeprom_verl; 904 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 905 nvm_track_id); 906 907 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 908 sizeof(drvinfo->bus_info)); 909 drvinfo->n_stats = IXGBE_STATS_LEN; 910 drvinfo->testinfo_len = IXGBE_TEST_LEN; 911 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 912 } 913 914 static void ixgbe_get_ringparam(struct net_device *netdev, 915 struct ethtool_ringparam *ring) 916 { 917 struct ixgbe_adapter *adapter = netdev_priv(netdev); 918 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 919 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 920 921 ring->rx_max_pending = IXGBE_MAX_RXD; 922 ring->tx_max_pending = IXGBE_MAX_TXD; 923 ring->rx_pending = rx_ring->count; 924 ring->tx_pending = tx_ring->count; 925 } 926 927 static int ixgbe_set_ringparam(struct net_device *netdev, 928 struct ethtool_ringparam *ring) 929 { 930 struct ixgbe_adapter *adapter = netdev_priv(netdev); 931 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 932 int i, err = 0; 933 u32 new_rx_count, new_tx_count; 934 bool need_update = false; 935 936 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 937 return -EINVAL; 938 939 new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); 940 new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); 941 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 942 943 new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD); 944 new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD); 945 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 946 947 if ((new_tx_count == adapter->tx_ring[0]->count) && 948 (new_rx_count == adapter->rx_ring[0]->count)) { 949 /* nothing to do */ 950 return 0; 951 } 952 953 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 954 usleep_range(1000, 2000); 955 956 if (!netif_running(adapter->netdev)) { 957 for (i = 0; i < adapter->num_tx_queues; i++) 958 adapter->tx_ring[i]->count = new_tx_count; 959 for (i = 0; i < adapter->num_rx_queues; i++) 960 adapter->rx_ring[i]->count = new_rx_count; 961 adapter->tx_ring_count = new_tx_count; 962 adapter->rx_ring_count = new_rx_count; 963 goto clear_reset; 964 } 965 966 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 967 if (!temp_tx_ring) { 968 err = -ENOMEM; 969 goto clear_reset; 970 } 971 972 if (new_tx_count != adapter->tx_ring_count) { 973 for (i = 0; i < adapter->num_tx_queues; i++) { 974 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 975 sizeof(struct ixgbe_ring)); 976 temp_tx_ring[i].count = new_tx_count; 977 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); 978 if (err) { 979 while (i) { 980 i--; 981 ixgbe_free_tx_resources(&temp_tx_ring[i]); 982 } 983 goto clear_reset; 984 } 985 } 986 need_update = true; 987 } 988 989 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 990 if (!temp_rx_ring) { 991 err = -ENOMEM; 992 goto err_setup; 993 } 994 995 if (new_rx_count != adapter->rx_ring_count) { 996 for (i = 0; i < adapter->num_rx_queues; i++) { 997 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 998 sizeof(struct ixgbe_ring)); 999 temp_rx_ring[i].count = new_rx_count; 1000 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); 1001 if (err) { 1002 while (i) { 1003 i--; 1004 ixgbe_free_rx_resources(&temp_rx_ring[i]); 1005 } 1006 goto err_setup; 1007 } 1008 } 1009 need_update = true; 1010 } 1011 1012 /* if rings need to be updated, here's the place to do it in one shot */ 1013 if (need_update) { 1014 ixgbe_down(adapter); 1015 1016 /* tx */ 1017 if (new_tx_count != adapter->tx_ring_count) { 1018 for (i = 0; i < adapter->num_tx_queues; i++) { 1019 ixgbe_free_tx_resources(adapter->tx_ring[i]); 1020 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 1021 sizeof(struct ixgbe_ring)); 1022 } 1023 adapter->tx_ring_count = new_tx_count; 1024 } 1025 1026 /* rx */ 1027 if (new_rx_count != adapter->rx_ring_count) { 1028 for (i = 0; i < adapter->num_rx_queues; i++) { 1029 ixgbe_free_rx_resources(adapter->rx_ring[i]); 1030 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 1031 sizeof(struct ixgbe_ring)); 1032 } 1033 adapter->rx_ring_count = new_rx_count; 1034 } 1035 ixgbe_up(adapter); 1036 } 1037 1038 vfree(temp_rx_ring); 1039 err_setup: 1040 vfree(temp_tx_ring); 1041 clear_reset: 1042 clear_bit(__IXGBE_RESETTING, &adapter->state); 1043 return err; 1044 } 1045 1046 static int ixgbe_get_sset_count(struct net_device *netdev, int sset) 1047 { 1048 switch (sset) { 1049 case ETH_SS_TEST: 1050 return IXGBE_TEST_LEN; 1051 case ETH_SS_STATS: 1052 return IXGBE_STATS_LEN; 1053 default: 1054 return -EOPNOTSUPP; 1055 } 1056 } 1057 1058 static void ixgbe_get_ethtool_stats(struct net_device *netdev, 1059 struct ethtool_stats *stats, u64 *data) 1060 { 1061 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1062 struct rtnl_link_stats64 temp; 1063 const struct rtnl_link_stats64 *net_stats; 1064 unsigned int start; 1065 struct ixgbe_ring *ring; 1066 int i, j; 1067 char *p = NULL; 1068 1069 ixgbe_update_stats(adapter); 1070 net_stats = dev_get_stats(netdev, &temp); 1071 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1072 switch (ixgbe_gstrings_stats[i].type) { 1073 case NETDEV_STATS: 1074 p = (char *) net_stats + 1075 ixgbe_gstrings_stats[i].stat_offset; 1076 break; 1077 case IXGBE_STATS: 1078 p = (char *) adapter + 1079 ixgbe_gstrings_stats[i].stat_offset; 1080 break; 1081 } 1082 1083 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1084 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1085 } 1086 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1087 ring = adapter->tx_ring[j]; 1088 if (!ring) { 1089 data[i] = 0; 1090 data[i+1] = 0; 1091 i += 2; 1092 continue; 1093 } 1094 1095 do { 1096 start = u64_stats_fetch_begin_bh(&ring->syncp); 1097 data[i] = ring->stats.packets; 1098 data[i+1] = ring->stats.bytes; 1099 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1100 i += 2; 1101 } 1102 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1103 ring = adapter->rx_ring[j]; 1104 if (!ring) { 1105 data[i] = 0; 1106 data[i+1] = 0; 1107 i += 2; 1108 continue; 1109 } 1110 1111 do { 1112 start = u64_stats_fetch_begin_bh(&ring->syncp); 1113 data[i] = ring->stats.packets; 1114 data[i+1] = ring->stats.bytes; 1115 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1116 i += 2; 1117 } 1118 1119 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1120 data[i++] = adapter->stats.pxontxc[j]; 1121 data[i++] = adapter->stats.pxofftxc[j]; 1122 } 1123 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { 1124 data[i++] = adapter->stats.pxonrxc[j]; 1125 data[i++] = adapter->stats.pxoffrxc[j]; 1126 } 1127 } 1128 1129 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1130 u8 *data) 1131 { 1132 char *p = (char *)data; 1133 int i; 1134 1135 switch (stringset) { 1136 case ETH_SS_TEST: 1137 memcpy(data, *ixgbe_gstrings_test, 1138 IXGBE_TEST_LEN * ETH_GSTRING_LEN); 1139 break; 1140 case ETH_SS_STATS: 1141 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1142 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 1143 ETH_GSTRING_LEN); 1144 p += ETH_GSTRING_LEN; 1145 } 1146 for (i = 0; i < netdev->num_tx_queues; i++) { 1147 sprintf(p, "tx_queue_%u_packets", i); 1148 p += ETH_GSTRING_LEN; 1149 sprintf(p, "tx_queue_%u_bytes", i); 1150 p += ETH_GSTRING_LEN; 1151 } 1152 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { 1153 sprintf(p, "rx_queue_%u_packets", i); 1154 p += ETH_GSTRING_LEN; 1155 sprintf(p, "rx_queue_%u_bytes", i); 1156 p += ETH_GSTRING_LEN; 1157 } 1158 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1159 sprintf(p, "tx_pb_%u_pxon", i); 1160 p += ETH_GSTRING_LEN; 1161 sprintf(p, "tx_pb_%u_pxoff", i); 1162 p += ETH_GSTRING_LEN; 1163 } 1164 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 1165 sprintf(p, "rx_pb_%u_pxon", i); 1166 p += ETH_GSTRING_LEN; 1167 sprintf(p, "rx_pb_%u_pxoff", i); 1168 p += ETH_GSTRING_LEN; 1169 } 1170 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 1171 break; 1172 } 1173 } 1174 1175 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) 1176 { 1177 struct ixgbe_hw *hw = &adapter->hw; 1178 bool link_up; 1179 u32 link_speed = 0; 1180 *data = 0; 1181 1182 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1183 if (link_up) 1184 return *data; 1185 else 1186 *data = 1; 1187 return *data; 1188 } 1189 1190 /* ethtool register test data */ 1191 struct ixgbe_reg_test { 1192 u16 reg; 1193 u8 array_len; 1194 u8 test_type; 1195 u32 mask; 1196 u32 write; 1197 }; 1198 1199 /* In the hardware, registers are laid out either singly, in arrays 1200 * spaced 0x40 bytes apart, or in contiguous tables. We assume 1201 * most tests take place on arrays or single registers (handled 1202 * as a single-element array) and special-case the tables. 1203 * Table tests are always pattern tests. 1204 * 1205 * We also make provision for some required setup steps by specifying 1206 * registers to be written without any read-back testing. 1207 */ 1208 1209 #define PATTERN_TEST 1 1210 #define SET_READ_TEST 2 1211 #define WRITE_NO_TEST 3 1212 #define TABLE32_TEST 4 1213 #define TABLE64_TEST_LO 5 1214 #define TABLE64_TEST_HI 6 1215 1216 /* default 82599 register test */ 1217 static const struct ixgbe_reg_test reg_test_82599[] = { 1218 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1219 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1220 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1221 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1222 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 1223 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1224 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1225 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1226 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1227 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1228 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1229 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1230 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1231 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1232 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 1233 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, 1234 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1235 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, 1236 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1237 { 0, 0, 0, 0 } 1238 }; 1239 1240 /* default 82598 register test */ 1241 static const struct ixgbe_reg_test reg_test_82598[] = { 1242 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1243 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1244 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1245 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1246 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1247 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1248 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1249 /* Enable all four RX queues before testing. */ 1250 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1251 /* RDH is read-only for 82598, only test RDT. */ 1252 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1253 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1254 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1255 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1256 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, 1257 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1258 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1259 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1260 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, 1261 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, 1262 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1263 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, 1264 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1265 { 0, 0, 0, 0 } 1266 }; 1267 1268 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, 1269 u32 mask, u32 write) 1270 { 1271 u32 pat, val, before; 1272 static const u32 test_pattern[] = { 1273 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1274 1275 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { 1276 before = readl(adapter->hw.hw_addr + reg); 1277 writel((test_pattern[pat] & write), 1278 (adapter->hw.hw_addr + reg)); 1279 val = readl(adapter->hw.hw_addr + reg); 1280 if (val != (test_pattern[pat] & write & mask)) { 1281 e_err(drv, "pattern test reg %04X failed: got " 1282 "0x%08X expected 0x%08X\n", 1283 reg, val, (test_pattern[pat] & write & mask)); 1284 *data = reg; 1285 writel(before, adapter->hw.hw_addr + reg); 1286 return 1; 1287 } 1288 writel(before, adapter->hw.hw_addr + reg); 1289 } 1290 return 0; 1291 } 1292 1293 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, 1294 u32 mask, u32 write) 1295 { 1296 u32 val, before; 1297 before = readl(adapter->hw.hw_addr + reg); 1298 writel((write & mask), (adapter->hw.hw_addr + reg)); 1299 val = readl(adapter->hw.hw_addr + reg); 1300 if ((write & mask) != (val & mask)) { 1301 e_err(drv, "set/check reg %04X test failed: got 0x%08X " 1302 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 1303 *data = reg; 1304 writel(before, (adapter->hw.hw_addr + reg)); 1305 return 1; 1306 } 1307 writel(before, (adapter->hw.hw_addr + reg)); 1308 return 0; 1309 } 1310 1311 #define REG_PATTERN_TEST(reg, mask, write) \ 1312 do { \ 1313 if (reg_pattern_test(adapter, data, reg, mask, write)) \ 1314 return 1; \ 1315 } while (0) \ 1316 1317 1318 #define REG_SET_AND_CHECK(reg, mask, write) \ 1319 do { \ 1320 if (reg_set_and_check(adapter, data, reg, mask, write)) \ 1321 return 1; \ 1322 } while (0) \ 1323 1324 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1325 { 1326 const struct ixgbe_reg_test *test; 1327 u32 value, before, after; 1328 u32 i, toggle; 1329 1330 switch (adapter->hw.mac.type) { 1331 case ixgbe_mac_82598EB: 1332 toggle = 0x7FFFF3FF; 1333 test = reg_test_82598; 1334 break; 1335 case ixgbe_mac_82599EB: 1336 case ixgbe_mac_X540: 1337 toggle = 0x7FFFF30F; 1338 test = reg_test_82599; 1339 break; 1340 default: 1341 *data = 1; 1342 return 1; 1343 break; 1344 } 1345 1346 /* 1347 * Because the status register is such a special case, 1348 * we handle it separately from the rest of the register 1349 * tests. Some bits are read-only, some toggle, and some 1350 * are writeable on newer MACs. 1351 */ 1352 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); 1353 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); 1354 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); 1355 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; 1356 if (value != after) { 1357 e_err(drv, "failed STATUS register test got: 0x%08X " 1358 "expected: 0x%08X\n", after, value); 1359 *data = 1; 1360 return 1; 1361 } 1362 /* restore previous status */ 1363 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); 1364 1365 /* 1366 * Perform the remainder of the register test, looping through 1367 * the test table until we either fail or reach the null entry. 1368 */ 1369 while (test->reg) { 1370 for (i = 0; i < test->array_len; i++) { 1371 switch (test->test_type) { 1372 case PATTERN_TEST: 1373 REG_PATTERN_TEST(test->reg + (i * 0x40), 1374 test->mask, 1375 test->write); 1376 break; 1377 case SET_READ_TEST: 1378 REG_SET_AND_CHECK(test->reg + (i * 0x40), 1379 test->mask, 1380 test->write); 1381 break; 1382 case WRITE_NO_TEST: 1383 writel(test->write, 1384 (adapter->hw.hw_addr + test->reg) 1385 + (i * 0x40)); 1386 break; 1387 case TABLE32_TEST: 1388 REG_PATTERN_TEST(test->reg + (i * 4), 1389 test->mask, 1390 test->write); 1391 break; 1392 case TABLE64_TEST_LO: 1393 REG_PATTERN_TEST(test->reg + (i * 8), 1394 test->mask, 1395 test->write); 1396 break; 1397 case TABLE64_TEST_HI: 1398 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1399 test->mask, 1400 test->write); 1401 break; 1402 } 1403 } 1404 test++; 1405 } 1406 1407 *data = 0; 1408 return 0; 1409 } 1410 1411 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) 1412 { 1413 struct ixgbe_hw *hw = &adapter->hw; 1414 if (hw->eeprom.ops.validate_checksum(hw, NULL)) 1415 *data = 1; 1416 else 1417 *data = 0; 1418 return *data; 1419 } 1420 1421 static irqreturn_t ixgbe_test_intr(int irq, void *data) 1422 { 1423 struct net_device *netdev = (struct net_device *) data; 1424 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1425 1426 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); 1427 1428 return IRQ_HANDLED; 1429 } 1430 1431 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) 1432 { 1433 struct net_device *netdev = adapter->netdev; 1434 u32 mask, i = 0, shared_int = true; 1435 u32 irq = adapter->pdev->irq; 1436 1437 *data = 0; 1438 1439 /* Hook up test interrupt handler just for this test */ 1440 if (adapter->msix_entries) { 1441 /* NOTE: we don't test MSI-X interrupts here, yet */ 1442 return 0; 1443 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1444 shared_int = false; 1445 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, 1446 netdev)) { 1447 *data = 1; 1448 return -1; 1449 } 1450 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1451 netdev->name, netdev)) { 1452 shared_int = false; 1453 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1454 netdev->name, netdev)) { 1455 *data = 1; 1456 return -1; 1457 } 1458 e_info(hw, "testing %s interrupt\n", shared_int ? 1459 "shared" : "unshared"); 1460 1461 /* Disable all the interrupts */ 1462 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1463 IXGBE_WRITE_FLUSH(&adapter->hw); 1464 usleep_range(10000, 20000); 1465 1466 /* Test each interrupt */ 1467 for (; i < 10; i++) { 1468 /* Interrupt to test */ 1469 mask = 1 << i; 1470 1471 if (!shared_int) { 1472 /* 1473 * Disable the interrupts to be reported in 1474 * the cause register and then force the same 1475 * interrupt and see if one gets posted. If 1476 * an interrupt was posted to the bus, the 1477 * test failed. 1478 */ 1479 adapter->test_icr = 0; 1480 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1481 ~mask & 0x00007FFF); 1482 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1483 ~mask & 0x00007FFF); 1484 IXGBE_WRITE_FLUSH(&adapter->hw); 1485 usleep_range(10000, 20000); 1486 1487 if (adapter->test_icr & mask) { 1488 *data = 3; 1489 break; 1490 } 1491 } 1492 1493 /* 1494 * Enable the interrupt to be reported in the cause 1495 * register and then force the same interrupt and see 1496 * if one gets posted. If an interrupt was not posted 1497 * to the bus, the test failed. 1498 */ 1499 adapter->test_icr = 0; 1500 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1501 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1502 IXGBE_WRITE_FLUSH(&adapter->hw); 1503 usleep_range(10000, 20000); 1504 1505 if (!(adapter->test_icr &mask)) { 1506 *data = 4; 1507 break; 1508 } 1509 1510 if (!shared_int) { 1511 /* 1512 * Disable the other interrupts to be reported in 1513 * the cause register and then force the other 1514 * interrupts and see if any get posted. If 1515 * an interrupt was posted to the bus, the 1516 * test failed. 1517 */ 1518 adapter->test_icr = 0; 1519 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1520 ~mask & 0x00007FFF); 1521 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1522 ~mask & 0x00007FFF); 1523 IXGBE_WRITE_FLUSH(&adapter->hw); 1524 usleep_range(10000, 20000); 1525 1526 if (adapter->test_icr) { 1527 *data = 5; 1528 break; 1529 } 1530 } 1531 } 1532 1533 /* Disable all the interrupts */ 1534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1535 IXGBE_WRITE_FLUSH(&adapter->hw); 1536 usleep_range(10000, 20000); 1537 1538 /* Unhook test interrupt handler */ 1539 free_irq(irq, netdev); 1540 1541 return *data; 1542 } 1543 1544 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) 1545 { 1546 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1547 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1548 struct ixgbe_hw *hw = &adapter->hw; 1549 u32 reg_ctl; 1550 1551 /* shut down the DMA engines now so they can be reinitialized later */ 1552 1553 /* first Rx */ 1554 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1555 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1556 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1557 ixgbe_disable_rx_queue(adapter, rx_ring); 1558 1559 /* now Tx */ 1560 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1561 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1562 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1563 1564 switch (hw->mac.type) { 1565 case ixgbe_mac_82599EB: 1566 case ixgbe_mac_X540: 1567 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1568 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1569 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1570 break; 1571 default: 1572 break; 1573 } 1574 1575 ixgbe_reset(adapter); 1576 1577 ixgbe_free_tx_resources(&adapter->test_tx_ring); 1578 ixgbe_free_rx_resources(&adapter->test_rx_ring); 1579 } 1580 1581 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1582 { 1583 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1584 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1585 u32 rctl, reg_data; 1586 int ret_val; 1587 int err; 1588 1589 /* Setup Tx descriptor ring and Tx buffers */ 1590 tx_ring->count = IXGBE_DEFAULT_TXD; 1591 tx_ring->queue_index = 0; 1592 tx_ring->dev = &adapter->pdev->dev; 1593 tx_ring->netdev = adapter->netdev; 1594 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1595 1596 err = ixgbe_setup_tx_resources(tx_ring); 1597 if (err) 1598 return 1; 1599 1600 switch (adapter->hw.mac.type) { 1601 case ixgbe_mac_82599EB: 1602 case ixgbe_mac_X540: 1603 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1604 reg_data |= IXGBE_DMATXCTL_TE; 1605 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1606 break; 1607 default: 1608 break; 1609 } 1610 1611 ixgbe_configure_tx_ring(adapter, tx_ring); 1612 1613 /* Setup Rx Descriptor ring and Rx buffers */ 1614 rx_ring->count = IXGBE_DEFAULT_RXD; 1615 rx_ring->queue_index = 0; 1616 rx_ring->dev = &adapter->pdev->dev; 1617 rx_ring->netdev = adapter->netdev; 1618 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1619 1620 err = ixgbe_setup_rx_resources(rx_ring); 1621 if (err) { 1622 ret_val = 4; 1623 goto err_nomem; 1624 } 1625 1626 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1628 1629 ixgbe_configure_rx_ring(adapter, rx_ring); 1630 1631 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1632 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1633 1634 return 0; 1635 1636 err_nomem: 1637 ixgbe_free_desc_rings(adapter); 1638 return ret_val; 1639 } 1640 1641 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) 1642 { 1643 struct ixgbe_hw *hw = &adapter->hw; 1644 u32 reg_data; 1645 1646 /* X540 needs to set the MACC.FLU bit to force link up */ 1647 if (adapter->hw.mac.type == ixgbe_mac_X540) { 1648 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); 1649 reg_data |= IXGBE_MACC_FLU; 1650 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); 1651 } 1652 1653 /* right now we only support MAC loopback in the driver */ 1654 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1655 /* Setup MAC loopback */ 1656 reg_data |= IXGBE_HLREG0_LPBK; 1657 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); 1658 1659 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1660 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1661 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); 1662 1663 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1664 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1665 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1666 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); 1667 IXGBE_WRITE_FLUSH(hw); 1668 usleep_range(10000, 20000); 1669 1670 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1671 if (hw->mac.type == ixgbe_mac_82598EB) { 1672 u8 atlas; 1673 1674 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); 1675 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 1676 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); 1677 1678 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); 1679 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 1680 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); 1681 1682 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); 1683 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 1684 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); 1685 1686 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); 1687 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 1688 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); 1689 } 1690 1691 return 0; 1692 } 1693 1694 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) 1695 { 1696 u32 reg_data; 1697 1698 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1699 reg_data &= ~IXGBE_HLREG0_LPBK; 1700 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1701 } 1702 1703 static void ixgbe_create_lbtest_frame(struct sk_buff *skb, 1704 unsigned int frame_size) 1705 { 1706 memset(skb->data, 0xFF, frame_size); 1707 frame_size >>= 1; 1708 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); 1709 memset(&skb->data[frame_size + 10], 0xBE, 1); 1710 memset(&skb->data[frame_size + 12], 0xAF, 1); 1711 } 1712 1713 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, 1714 unsigned int frame_size) 1715 { 1716 unsigned char *data; 1717 bool match = true; 1718 1719 frame_size >>= 1; 1720 1721 data = kmap(rx_buffer->page) + rx_buffer->page_offset; 1722 1723 if (data[3] != 0xFF || 1724 data[frame_size + 10] != 0xBE || 1725 data[frame_size + 12] != 0xAF) 1726 match = false; 1727 1728 kunmap(rx_buffer->page); 1729 1730 return match; 1731 } 1732 1733 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, 1734 struct ixgbe_ring *tx_ring, 1735 unsigned int size) 1736 { 1737 union ixgbe_adv_rx_desc *rx_desc; 1738 struct ixgbe_rx_buffer *rx_buffer; 1739 struct ixgbe_tx_buffer *tx_buffer; 1740 u16 rx_ntc, tx_ntc, count = 0; 1741 1742 /* initialize next to clean and descriptor values */ 1743 rx_ntc = rx_ring->next_to_clean; 1744 tx_ntc = tx_ring->next_to_clean; 1745 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1746 1747 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { 1748 /* check Rx buffer */ 1749 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; 1750 1751 /* sync Rx buffer for CPU read */ 1752 dma_sync_single_for_cpu(rx_ring->dev, 1753 rx_buffer->dma, 1754 ixgbe_rx_bufsz(rx_ring), 1755 DMA_FROM_DEVICE); 1756 1757 /* verify contents of skb */ 1758 if (ixgbe_check_lbtest_frame(rx_buffer, size)) 1759 count++; 1760 1761 /* sync Rx buffer for device write */ 1762 dma_sync_single_for_device(rx_ring->dev, 1763 rx_buffer->dma, 1764 ixgbe_rx_bufsz(rx_ring), 1765 DMA_FROM_DEVICE); 1766 1767 /* unmap buffer on Tx side */ 1768 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; 1769 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); 1770 1771 /* increment Rx/Tx next to clean counters */ 1772 rx_ntc++; 1773 if (rx_ntc == rx_ring->count) 1774 rx_ntc = 0; 1775 tx_ntc++; 1776 if (tx_ntc == tx_ring->count) 1777 tx_ntc = 0; 1778 1779 /* fetch next descriptor */ 1780 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1781 } 1782 1783 netdev_tx_reset_queue(txring_txq(tx_ring)); 1784 1785 /* re-map buffers to ring, store next to clean values */ 1786 ixgbe_alloc_rx_buffers(rx_ring, count); 1787 rx_ring->next_to_clean = rx_ntc; 1788 tx_ring->next_to_clean = tx_ntc; 1789 1790 return count; 1791 } 1792 1793 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1794 { 1795 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1796 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1797 int i, j, lc, good_cnt, ret_val = 0; 1798 unsigned int size = 1024; 1799 netdev_tx_t tx_ret_val; 1800 struct sk_buff *skb; 1801 1802 /* allocate test skb */ 1803 skb = alloc_skb(size, GFP_KERNEL); 1804 if (!skb) 1805 return 11; 1806 1807 /* place data into test skb */ 1808 ixgbe_create_lbtest_frame(skb, size); 1809 skb_put(skb, size); 1810 1811 /* 1812 * Calculate the loop count based on the largest descriptor ring 1813 * The idea is to wrap the largest ring a number of times using 64 1814 * send/receive pairs during each loop 1815 */ 1816 1817 if (rx_ring->count <= tx_ring->count) 1818 lc = ((tx_ring->count / 64) * 2) + 1; 1819 else 1820 lc = ((rx_ring->count / 64) * 2) + 1; 1821 1822 for (j = 0; j <= lc; j++) { 1823 /* reset count of good packets */ 1824 good_cnt = 0; 1825 1826 /* place 64 packets on the transmit queue*/ 1827 for (i = 0; i < 64; i++) { 1828 skb_get(skb); 1829 tx_ret_val = ixgbe_xmit_frame_ring(skb, 1830 adapter, 1831 tx_ring); 1832 if (tx_ret_val == NETDEV_TX_OK) 1833 good_cnt++; 1834 } 1835 1836 if (good_cnt != 64) { 1837 ret_val = 12; 1838 break; 1839 } 1840 1841 /* allow 200 milliseconds for packets to go from Tx to Rx */ 1842 msleep(200); 1843 1844 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); 1845 if (good_cnt != 64) { 1846 ret_val = 13; 1847 break; 1848 } 1849 } 1850 1851 /* free the original skb */ 1852 kfree_skb(skb); 1853 1854 return ret_val; 1855 } 1856 1857 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) 1858 { 1859 *data = ixgbe_setup_desc_rings(adapter); 1860 if (*data) 1861 goto out; 1862 *data = ixgbe_setup_loopback_test(adapter); 1863 if (*data) 1864 goto err_loopback; 1865 *data = ixgbe_run_loopback_test(adapter); 1866 ixgbe_loopback_cleanup(adapter); 1867 1868 err_loopback: 1869 ixgbe_free_desc_rings(adapter); 1870 out: 1871 return *data; 1872 } 1873 1874 static void ixgbe_diag_test(struct net_device *netdev, 1875 struct ethtool_test *eth_test, u64 *data) 1876 { 1877 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1878 bool if_running = netif_running(netdev); 1879 1880 set_bit(__IXGBE_TESTING, &adapter->state); 1881 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1882 /* Offline tests */ 1883 1884 e_info(hw, "offline testing starting\n"); 1885 1886 /* Link test performed before hardware reset so autoneg doesn't 1887 * interfere with test result */ 1888 if (ixgbe_link_test(adapter, &data[4])) 1889 eth_test->flags |= ETH_TEST_FL_FAILED; 1890 1891 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 1892 int i; 1893 for (i = 0; i < adapter->num_vfs; i++) { 1894 if (adapter->vfinfo[i].clear_to_send) { 1895 netdev_warn(netdev, "%s", 1896 "offline diagnostic is not " 1897 "supported when VFs are " 1898 "present\n"); 1899 data[0] = 1; 1900 data[1] = 1; 1901 data[2] = 1; 1902 data[3] = 1; 1903 eth_test->flags |= ETH_TEST_FL_FAILED; 1904 clear_bit(__IXGBE_TESTING, 1905 &adapter->state); 1906 goto skip_ol_tests; 1907 } 1908 } 1909 } 1910 1911 if (if_running) 1912 /* indicate we're in test mode */ 1913 dev_close(netdev); 1914 else 1915 ixgbe_reset(adapter); 1916 1917 e_info(hw, "register testing starting\n"); 1918 if (ixgbe_reg_test(adapter, &data[0])) 1919 eth_test->flags |= ETH_TEST_FL_FAILED; 1920 1921 ixgbe_reset(adapter); 1922 e_info(hw, "eeprom testing starting\n"); 1923 if (ixgbe_eeprom_test(adapter, &data[1])) 1924 eth_test->flags |= ETH_TEST_FL_FAILED; 1925 1926 ixgbe_reset(adapter); 1927 e_info(hw, "interrupt testing starting\n"); 1928 if (ixgbe_intr_test(adapter, &data[2])) 1929 eth_test->flags |= ETH_TEST_FL_FAILED; 1930 1931 /* If SRIOV or VMDq is enabled then skip MAC 1932 * loopback diagnostic. */ 1933 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 1934 IXGBE_FLAG_VMDQ_ENABLED)) { 1935 e_info(hw, "Skip MAC loopback diagnostic in VT " 1936 "mode\n"); 1937 data[3] = 0; 1938 goto skip_loopback; 1939 } 1940 1941 ixgbe_reset(adapter); 1942 e_info(hw, "loopback testing starting\n"); 1943 if (ixgbe_loopback_test(adapter, &data[3])) 1944 eth_test->flags |= ETH_TEST_FL_FAILED; 1945 1946 skip_loopback: 1947 ixgbe_reset(adapter); 1948 1949 clear_bit(__IXGBE_TESTING, &adapter->state); 1950 if (if_running) 1951 dev_open(netdev); 1952 } else { 1953 e_info(hw, "online testing starting\n"); 1954 /* Online tests */ 1955 if (ixgbe_link_test(adapter, &data[4])) 1956 eth_test->flags |= ETH_TEST_FL_FAILED; 1957 1958 /* Online tests aren't run; pass by default */ 1959 data[0] = 0; 1960 data[1] = 0; 1961 data[2] = 0; 1962 data[3] = 0; 1963 1964 clear_bit(__IXGBE_TESTING, &adapter->state); 1965 } 1966 skip_ol_tests: 1967 msleep_interruptible(4 * 1000); 1968 } 1969 1970 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 1971 struct ethtool_wolinfo *wol) 1972 { 1973 struct ixgbe_hw *hw = &adapter->hw; 1974 int retval = 1; 1975 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; 1976 1977 /* WOL not supported except for the following */ 1978 switch(hw->device_id) { 1979 case IXGBE_DEV_ID_82599_SFP: 1980 /* Only these subdevices could supports WOL */ 1981 switch (hw->subsystem_device_id) { 1982 case IXGBE_SUBDEV_ID_82599_560FLR: 1983 /* only support first port */ 1984 if (hw->bus.func != 0) { 1985 wol->supported = 0; 1986 break; 1987 } 1988 case IXGBE_SUBDEV_ID_82599_SFP: 1989 retval = 0; 1990 break; 1991 default: 1992 wol->supported = 0; 1993 break; 1994 } 1995 break; 1996 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 1997 /* All except this subdevice support WOL */ 1998 if (hw->subsystem_device_id == 1999 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { 2000 wol->supported = 0; 2001 break; 2002 } 2003 retval = 0; 2004 break; 2005 case IXGBE_DEV_ID_82599_KX4: 2006 retval = 0; 2007 break; 2008 case IXGBE_DEV_ID_X540T: 2009 /* check eeprom to see if enabled wol */ 2010 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 2011 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && 2012 (hw->bus.func == 0))) { 2013 retval = 0; 2014 break; 2015 } 2016 2017 /* All others not supported */ 2018 wol->supported = 0; 2019 break; 2020 default: 2021 wol->supported = 0; 2022 } 2023 2024 return retval; 2025 } 2026 2027 static void ixgbe_get_wol(struct net_device *netdev, 2028 struct ethtool_wolinfo *wol) 2029 { 2030 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2031 2032 wol->supported = WAKE_UCAST | WAKE_MCAST | 2033 WAKE_BCAST | WAKE_MAGIC; 2034 wol->wolopts = 0; 2035 2036 if (ixgbe_wol_exclusion(adapter, wol) || 2037 !device_can_wakeup(&adapter->pdev->dev)) 2038 return; 2039 2040 if (adapter->wol & IXGBE_WUFC_EX) 2041 wol->wolopts |= WAKE_UCAST; 2042 if (adapter->wol & IXGBE_WUFC_MC) 2043 wol->wolopts |= WAKE_MCAST; 2044 if (adapter->wol & IXGBE_WUFC_BC) 2045 wol->wolopts |= WAKE_BCAST; 2046 if (adapter->wol & IXGBE_WUFC_MAG) 2047 wol->wolopts |= WAKE_MAGIC; 2048 } 2049 2050 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2051 { 2052 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2053 2054 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 2055 return -EOPNOTSUPP; 2056 2057 if (ixgbe_wol_exclusion(adapter, wol)) 2058 return wol->wolopts ? -EOPNOTSUPP : 0; 2059 2060 adapter->wol = 0; 2061 2062 if (wol->wolopts & WAKE_UCAST) 2063 adapter->wol |= IXGBE_WUFC_EX; 2064 if (wol->wolopts & WAKE_MCAST) 2065 adapter->wol |= IXGBE_WUFC_MC; 2066 if (wol->wolopts & WAKE_BCAST) 2067 adapter->wol |= IXGBE_WUFC_BC; 2068 if (wol->wolopts & WAKE_MAGIC) 2069 adapter->wol |= IXGBE_WUFC_MAG; 2070 2071 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 2072 2073 return 0; 2074 } 2075 2076 static int ixgbe_nway_reset(struct net_device *netdev) 2077 { 2078 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2079 2080 if (netif_running(netdev)) 2081 ixgbe_reinit_locked(adapter); 2082 2083 return 0; 2084 } 2085 2086 static int ixgbe_set_phys_id(struct net_device *netdev, 2087 enum ethtool_phys_id_state state) 2088 { 2089 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2090 struct ixgbe_hw *hw = &adapter->hw; 2091 2092 switch (state) { 2093 case ETHTOOL_ID_ACTIVE: 2094 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2095 return 2; 2096 2097 case ETHTOOL_ID_ON: 2098 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2099 break; 2100 2101 case ETHTOOL_ID_OFF: 2102 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2103 break; 2104 2105 case ETHTOOL_ID_INACTIVE: 2106 /* Restore LED settings */ 2107 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); 2108 break; 2109 } 2110 2111 return 0; 2112 } 2113 2114 static int ixgbe_get_coalesce(struct net_device *netdev, 2115 struct ethtool_coalesce *ec) 2116 { 2117 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2118 2119 /* only valid if in constant ITR mode */ 2120 if (adapter->rx_itr_setting <= 1) 2121 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 2122 else 2123 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 2124 2125 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2126 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2127 return 0; 2128 2129 /* only valid if in constant ITR mode */ 2130 if (adapter->tx_itr_setting <= 1) 2131 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 2132 else 2133 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 2134 2135 return 0; 2136 } 2137 2138 /* 2139 * this function must be called before setting the new value of 2140 * rx_itr_setting 2141 */ 2142 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) 2143 { 2144 struct net_device *netdev = adapter->netdev; 2145 2146 /* nothing to do if LRO or RSC are not enabled */ 2147 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || 2148 !(netdev->features & NETIF_F_LRO)) 2149 return false; 2150 2151 /* check the feature flag value and enable RSC if necessary */ 2152 if (adapter->rx_itr_setting == 1 || 2153 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 2154 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 2155 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2156 e_info(probe, "rx-usecs value high enough " 2157 "to re-enable RSC\n"); 2158 return true; 2159 } 2160 /* if interrupt rate is too high then disable RSC */ 2161 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2162 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2163 e_info(probe, "rx-usecs set too low, disabling RSC\n"); 2164 return true; 2165 } 2166 return false; 2167 } 2168 2169 static int ixgbe_set_coalesce(struct net_device *netdev, 2170 struct ethtool_coalesce *ec) 2171 { 2172 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2173 struct ixgbe_q_vector *q_vector; 2174 int i; 2175 int num_vectors; 2176 u16 tx_itr_param, rx_itr_param; 2177 bool need_reset = false; 2178 2179 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2180 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count 2181 && ec->tx_coalesce_usecs) 2182 return -EINVAL; 2183 2184 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 2185 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 2186 return -EINVAL; 2187 2188 if (ec->rx_coalesce_usecs > 1) 2189 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 2190 else 2191 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2192 2193 if (adapter->rx_itr_setting == 1) 2194 rx_itr_param = IXGBE_20K_ITR; 2195 else 2196 rx_itr_param = adapter->rx_itr_setting; 2197 2198 if (ec->tx_coalesce_usecs > 1) 2199 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 2200 else 2201 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 2202 2203 if (adapter->tx_itr_setting == 1) 2204 tx_itr_param = IXGBE_10K_ITR; 2205 else 2206 tx_itr_param = adapter->tx_itr_setting; 2207 2208 /* check the old value and enable RSC if necessary */ 2209 need_reset = ixgbe_update_rsc(adapter); 2210 2211 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2212 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2213 else 2214 num_vectors = 1; 2215 2216 for (i = 0; i < num_vectors; i++) { 2217 q_vector = adapter->q_vector[i]; 2218 if (q_vector->tx.count && !q_vector->rx.count) 2219 /* tx only */ 2220 q_vector->itr = tx_itr_param; 2221 else 2222 /* rx only or mixed */ 2223 q_vector->itr = rx_itr_param; 2224 ixgbe_write_eitr(q_vector); 2225 } 2226 2227 /* 2228 * do reset here at the end to make sure EITR==0 case is handled 2229 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings 2230 * also locks in RSC enable/disable which requires reset 2231 */ 2232 if (need_reset) 2233 ixgbe_do_reset(netdev); 2234 2235 return 0; 2236 } 2237 2238 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2239 struct ethtool_rxnfc *cmd) 2240 { 2241 union ixgbe_atr_input *mask = &adapter->fdir_mask; 2242 struct ethtool_rx_flow_spec *fsp = 2243 (struct ethtool_rx_flow_spec *)&cmd->fs; 2244 struct hlist_node *node, *node2; 2245 struct ixgbe_fdir_filter *rule = NULL; 2246 2247 /* report total rule count */ 2248 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2249 2250 hlist_for_each_entry_safe(rule, node, node2, 2251 &adapter->fdir_filter_list, fdir_node) { 2252 if (fsp->location <= rule->sw_idx) 2253 break; 2254 } 2255 2256 if (!rule || fsp->location != rule->sw_idx) 2257 return -EINVAL; 2258 2259 /* fill out the flow spec entry */ 2260 2261 /* set flow type field */ 2262 switch (rule->filter.formatted.flow_type) { 2263 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2264 fsp->flow_type = TCP_V4_FLOW; 2265 break; 2266 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2267 fsp->flow_type = UDP_V4_FLOW; 2268 break; 2269 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2270 fsp->flow_type = SCTP_V4_FLOW; 2271 break; 2272 case IXGBE_ATR_FLOW_TYPE_IPV4: 2273 fsp->flow_type = IP_USER_FLOW; 2274 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 2275 fsp->h_u.usr_ip4_spec.proto = 0; 2276 fsp->m_u.usr_ip4_spec.proto = 0; 2277 break; 2278 default: 2279 return -EINVAL; 2280 } 2281 2282 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; 2283 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; 2284 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; 2285 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; 2286 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; 2287 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; 2288 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; 2289 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; 2290 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; 2291 fsp->m_ext.vlan_tci = mask->formatted.vlan_id; 2292 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; 2293 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; 2294 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); 2295 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); 2296 fsp->flow_type |= FLOW_EXT; 2297 2298 /* record action */ 2299 if (rule->action == IXGBE_FDIR_DROP_QUEUE) 2300 fsp->ring_cookie = RX_CLS_FLOW_DISC; 2301 else 2302 fsp->ring_cookie = rule->action; 2303 2304 return 0; 2305 } 2306 2307 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, 2308 struct ethtool_rxnfc *cmd, 2309 u32 *rule_locs) 2310 { 2311 struct hlist_node *node, *node2; 2312 struct ixgbe_fdir_filter *rule; 2313 int cnt = 0; 2314 2315 /* report total rule count */ 2316 cmd->data = (1024 << adapter->fdir_pballoc) - 2; 2317 2318 hlist_for_each_entry_safe(rule, node, node2, 2319 &adapter->fdir_filter_list, fdir_node) { 2320 if (cnt == cmd->rule_cnt) 2321 return -EMSGSIZE; 2322 rule_locs[cnt] = rule->sw_idx; 2323 cnt++; 2324 } 2325 2326 cmd->rule_cnt = cnt; 2327 2328 return 0; 2329 } 2330 2331 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, 2332 struct ethtool_rxnfc *cmd) 2333 { 2334 cmd->data = 0; 2335 2336 /* if RSS is disabled then report no hashing */ 2337 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 2338 return 0; 2339 2340 /* Report default options for RSS on ixgbe */ 2341 switch (cmd->flow_type) { 2342 case TCP_V4_FLOW: 2343 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2344 case UDP_V4_FLOW: 2345 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2346 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2347 case SCTP_V4_FLOW: 2348 case AH_ESP_V4_FLOW: 2349 case AH_V4_FLOW: 2350 case ESP_V4_FLOW: 2351 case IPV4_FLOW: 2352 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2353 break; 2354 case TCP_V6_FLOW: 2355 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2356 case UDP_V6_FLOW: 2357 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2358 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 2359 case SCTP_V6_FLOW: 2360 case AH_ESP_V6_FLOW: 2361 case AH_V6_FLOW: 2362 case ESP_V6_FLOW: 2363 case IPV6_FLOW: 2364 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2365 break; 2366 default: 2367 return -EINVAL; 2368 } 2369 2370 return 0; 2371 } 2372 2373 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2374 u32 *rule_locs) 2375 { 2376 struct ixgbe_adapter *adapter = netdev_priv(dev); 2377 int ret = -EOPNOTSUPP; 2378 2379 switch (cmd->cmd) { 2380 case ETHTOOL_GRXRINGS: 2381 cmd->data = adapter->num_rx_queues; 2382 ret = 0; 2383 break; 2384 case ETHTOOL_GRXCLSRLCNT: 2385 cmd->rule_cnt = adapter->fdir_filter_count; 2386 ret = 0; 2387 break; 2388 case ETHTOOL_GRXCLSRULE: 2389 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); 2390 break; 2391 case ETHTOOL_GRXCLSRLALL: 2392 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); 2393 break; 2394 case ETHTOOL_GRXFH: 2395 ret = ixgbe_get_rss_hash_opts(adapter, cmd); 2396 break; 2397 default: 2398 break; 2399 } 2400 2401 return ret; 2402 } 2403 2404 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2405 struct ixgbe_fdir_filter *input, 2406 u16 sw_idx) 2407 { 2408 struct ixgbe_hw *hw = &adapter->hw; 2409 struct hlist_node *node, *node2, *parent; 2410 struct ixgbe_fdir_filter *rule; 2411 int err = -EINVAL; 2412 2413 parent = NULL; 2414 rule = NULL; 2415 2416 hlist_for_each_entry_safe(rule, node, node2, 2417 &adapter->fdir_filter_list, fdir_node) { 2418 /* hash found, or no matching entry */ 2419 if (rule->sw_idx >= sw_idx) 2420 break; 2421 parent = node; 2422 } 2423 2424 /* if there is an old rule occupying our place remove it */ 2425 if (rule && (rule->sw_idx == sw_idx)) { 2426 if (!input || (rule->filter.formatted.bkt_hash != 2427 input->filter.formatted.bkt_hash)) { 2428 err = ixgbe_fdir_erase_perfect_filter_82599(hw, 2429 &rule->filter, 2430 sw_idx); 2431 } 2432 2433 hlist_del(&rule->fdir_node); 2434 kfree(rule); 2435 adapter->fdir_filter_count--; 2436 } 2437 2438 /* 2439 * If no input this was a delete, err should be 0 if a rule was 2440 * successfully found and removed from the list else -EINVAL 2441 */ 2442 if (!input) 2443 return err; 2444 2445 /* initialize node and set software index */ 2446 INIT_HLIST_NODE(&input->fdir_node); 2447 2448 /* add filter to the list */ 2449 if (parent) 2450 hlist_add_after(parent, &input->fdir_node); 2451 else 2452 hlist_add_head(&input->fdir_node, 2453 &adapter->fdir_filter_list); 2454 2455 /* update counts */ 2456 adapter->fdir_filter_count++; 2457 2458 return 0; 2459 } 2460 2461 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, 2462 u8 *flow_type) 2463 { 2464 switch (fsp->flow_type & ~FLOW_EXT) { 2465 case TCP_V4_FLOW: 2466 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2467 break; 2468 case UDP_V4_FLOW: 2469 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2470 break; 2471 case SCTP_V4_FLOW: 2472 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2473 break; 2474 case IP_USER_FLOW: 2475 switch (fsp->h_u.usr_ip4_spec.proto) { 2476 case IPPROTO_TCP: 2477 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2478 break; 2479 case IPPROTO_UDP: 2480 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2481 break; 2482 case IPPROTO_SCTP: 2483 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2484 break; 2485 case 0: 2486 if (!fsp->m_u.usr_ip4_spec.proto) { 2487 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; 2488 break; 2489 } 2490 default: 2491 return 0; 2492 } 2493 break; 2494 default: 2495 return 0; 2496 } 2497 2498 return 1; 2499 } 2500 2501 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2502 struct ethtool_rxnfc *cmd) 2503 { 2504 struct ethtool_rx_flow_spec *fsp = 2505 (struct ethtool_rx_flow_spec *)&cmd->fs; 2506 struct ixgbe_hw *hw = &adapter->hw; 2507 struct ixgbe_fdir_filter *input; 2508 union ixgbe_atr_input mask; 2509 int err; 2510 2511 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 2512 return -EOPNOTSUPP; 2513 2514 /* 2515 * Don't allow programming if the action is a queue greater than 2516 * the number of online Rx queues. 2517 */ 2518 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 2519 (fsp->ring_cookie >= adapter->num_rx_queues)) 2520 return -EINVAL; 2521 2522 /* Don't allow indexes to exist outside of available space */ 2523 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { 2524 e_err(drv, "Location out of range\n"); 2525 return -EINVAL; 2526 } 2527 2528 input = kzalloc(sizeof(*input), GFP_ATOMIC); 2529 if (!input) 2530 return -ENOMEM; 2531 2532 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 2533 2534 /* set SW index */ 2535 input->sw_idx = fsp->location; 2536 2537 /* record flow type */ 2538 if (!ixgbe_flowspec_to_flow_type(fsp, 2539 &input->filter.formatted.flow_type)) { 2540 e_err(drv, "Unrecognized flow type\n"); 2541 goto err_out; 2542 } 2543 2544 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2545 IXGBE_ATR_L4TYPE_MASK; 2546 2547 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) 2548 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; 2549 2550 /* Copy input into formatted structures */ 2551 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 2552 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; 2553 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 2554 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; 2555 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; 2556 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; 2557 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 2558 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 2559 2560 if (fsp->flow_type & FLOW_EXT) { 2561 input->filter.formatted.vm_pool = 2562 (unsigned char)ntohl(fsp->h_ext.data[1]); 2563 mask.formatted.vm_pool = 2564 (unsigned char)ntohl(fsp->m_ext.data[1]); 2565 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; 2566 mask.formatted.vlan_id = fsp->m_ext.vlan_tci; 2567 input->filter.formatted.flex_bytes = 2568 fsp->h_ext.vlan_etype; 2569 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; 2570 } 2571 2572 /* determine if we need to drop or route the packet */ 2573 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 2574 input->action = IXGBE_FDIR_DROP_QUEUE; 2575 else 2576 input->action = fsp->ring_cookie; 2577 2578 spin_lock(&adapter->fdir_perfect_lock); 2579 2580 if (hlist_empty(&adapter->fdir_filter_list)) { 2581 /* save mask and program input mask into HW */ 2582 memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); 2583 err = ixgbe_fdir_set_input_mask_82599(hw, &mask); 2584 if (err) { 2585 e_err(drv, "Error writing mask\n"); 2586 goto err_out_w_lock; 2587 } 2588 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { 2589 e_err(drv, "Only one mask supported per port\n"); 2590 goto err_out_w_lock; 2591 } 2592 2593 /* apply mask and compute/store hash */ 2594 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); 2595 2596 /* program filters to filter memory */ 2597 err = ixgbe_fdir_write_perfect_filter_82599(hw, 2598 &input->filter, input->sw_idx, 2599 (input->action == IXGBE_FDIR_DROP_QUEUE) ? 2600 IXGBE_FDIR_DROP_QUEUE : 2601 adapter->rx_ring[input->action]->reg_idx); 2602 if (err) 2603 goto err_out_w_lock; 2604 2605 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 2606 2607 spin_unlock(&adapter->fdir_perfect_lock); 2608 2609 return err; 2610 err_out_w_lock: 2611 spin_unlock(&adapter->fdir_perfect_lock); 2612 err_out: 2613 kfree(input); 2614 return -EINVAL; 2615 } 2616 2617 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2618 struct ethtool_rxnfc *cmd) 2619 { 2620 struct ethtool_rx_flow_spec *fsp = 2621 (struct ethtool_rx_flow_spec *)&cmd->fs; 2622 int err; 2623 2624 spin_lock(&adapter->fdir_perfect_lock); 2625 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); 2626 spin_unlock(&adapter->fdir_perfect_lock); 2627 2628 return err; 2629 } 2630 2631 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ 2632 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2633 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, 2634 struct ethtool_rxnfc *nfc) 2635 { 2636 u32 flags2 = adapter->flags2; 2637 2638 /* 2639 * RSS does not support anything other than hashing 2640 * to queues on src and dst IPs and ports 2641 */ 2642 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 2643 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 2644 return -EINVAL; 2645 2646 switch (nfc->flow_type) { 2647 case TCP_V4_FLOW: 2648 case TCP_V6_FLOW: 2649 if (!(nfc->data & RXH_IP_SRC) || 2650 !(nfc->data & RXH_IP_DST) || 2651 !(nfc->data & RXH_L4_B_0_1) || 2652 !(nfc->data & RXH_L4_B_2_3)) 2653 return -EINVAL; 2654 break; 2655 case UDP_V4_FLOW: 2656 if (!(nfc->data & RXH_IP_SRC) || 2657 !(nfc->data & RXH_IP_DST)) 2658 return -EINVAL; 2659 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2660 case 0: 2661 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2662 break; 2663 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2664 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; 2665 break; 2666 default: 2667 return -EINVAL; 2668 } 2669 break; 2670 case UDP_V6_FLOW: 2671 if (!(nfc->data & RXH_IP_SRC) || 2672 !(nfc->data & RXH_IP_DST)) 2673 return -EINVAL; 2674 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 2675 case 0: 2676 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2677 break; 2678 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 2679 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; 2680 break; 2681 default: 2682 return -EINVAL; 2683 } 2684 break; 2685 case AH_ESP_V4_FLOW: 2686 case AH_V4_FLOW: 2687 case ESP_V4_FLOW: 2688 case SCTP_V4_FLOW: 2689 case AH_ESP_V6_FLOW: 2690 case AH_V6_FLOW: 2691 case ESP_V6_FLOW: 2692 case SCTP_V6_FLOW: 2693 if (!(nfc->data & RXH_IP_SRC) || 2694 !(nfc->data & RXH_IP_DST) || 2695 (nfc->data & RXH_L4_B_0_1) || 2696 (nfc->data & RXH_L4_B_2_3)) 2697 return -EINVAL; 2698 break; 2699 default: 2700 return -EINVAL; 2701 } 2702 2703 /* if we changed something we need to update flags */ 2704 if (flags2 != adapter->flags2) { 2705 struct ixgbe_hw *hw = &adapter->hw; 2706 u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 2707 2708 if ((flags2 & UDP_RSS_FLAGS) && 2709 !(adapter->flags2 & UDP_RSS_FLAGS)) 2710 e_warn(drv, "enabling UDP RSS: fragmented packets" 2711 " may arrive out of order to the stack above\n"); 2712 2713 adapter->flags2 = flags2; 2714 2715 /* Perform hash on these packet types */ 2716 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 2717 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 2718 | IXGBE_MRQC_RSS_FIELD_IPV6 2719 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 2720 2721 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2722 IXGBE_MRQC_RSS_FIELD_IPV6_UDP); 2723 2724 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2725 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 2726 2727 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2728 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 2729 2730 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2731 } 2732 2733 return 0; 2734 } 2735 2736 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 2737 { 2738 struct ixgbe_adapter *adapter = netdev_priv(dev); 2739 int ret = -EOPNOTSUPP; 2740 2741 switch (cmd->cmd) { 2742 case ETHTOOL_SRXCLSRLINS: 2743 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); 2744 break; 2745 case ETHTOOL_SRXCLSRLDEL: 2746 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); 2747 break; 2748 case ETHTOOL_SRXFH: 2749 ret = ixgbe_set_rss_hash_opt(adapter, cmd); 2750 break; 2751 default: 2752 break; 2753 } 2754 2755 return ret; 2756 } 2757 2758 static const struct ethtool_ops ixgbe_ethtool_ops = { 2759 .get_settings = ixgbe_get_settings, 2760 .set_settings = ixgbe_set_settings, 2761 .get_drvinfo = ixgbe_get_drvinfo, 2762 .get_regs_len = ixgbe_get_regs_len, 2763 .get_regs = ixgbe_get_regs, 2764 .get_wol = ixgbe_get_wol, 2765 .set_wol = ixgbe_set_wol, 2766 .nway_reset = ixgbe_nway_reset, 2767 .get_link = ethtool_op_get_link, 2768 .get_eeprom_len = ixgbe_get_eeprom_len, 2769 .get_eeprom = ixgbe_get_eeprom, 2770 .set_eeprom = ixgbe_set_eeprom, 2771 .get_ringparam = ixgbe_get_ringparam, 2772 .set_ringparam = ixgbe_set_ringparam, 2773 .get_pauseparam = ixgbe_get_pauseparam, 2774 .set_pauseparam = ixgbe_set_pauseparam, 2775 .get_msglevel = ixgbe_get_msglevel, 2776 .set_msglevel = ixgbe_set_msglevel, 2777 .self_test = ixgbe_diag_test, 2778 .get_strings = ixgbe_get_strings, 2779 .set_phys_id = ixgbe_set_phys_id, 2780 .get_sset_count = ixgbe_get_sset_count, 2781 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2782 .get_coalesce = ixgbe_get_coalesce, 2783 .set_coalesce = ixgbe_set_coalesce, 2784 .get_rxnfc = ixgbe_get_rxnfc, 2785 .set_rxnfc = ixgbe_set_rxnfc, 2786 }; 2787 2788 void ixgbe_set_ethtool_ops(struct net_device *netdev) 2789 { 2790 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); 2791 } 2792