1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* ethtool support for ixgbevf */ 29 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32 #include <linux/types.h> 33 #include <linux/module.h> 34 #include <linux/slab.h> 35 #include <linux/pci.h> 36 #include <linux/netdevice.h> 37 #include <linux/ethtool.h> 38 #include <linux/vmalloc.h> 39 #include <linux/if_vlan.h> 40 #include <linux/uaccess.h> 41 42 #include "ixgbevf.h" 43 44 #define IXGBE_ALL_RAR_ENTRIES 16 45 46 struct ixgbe_stats { 47 char stat_string[ETH_GSTRING_LEN]; 48 struct { 49 int sizeof_stat; 50 int stat_offset; 51 int base_stat_offset; 52 int saved_reset_offset; 53 }; 54 }; 55 56 #define IXGBEVF_STAT(m, b, r) { \ 57 .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ 58 .stat_offset = offsetof(struct ixgbevf_adapter, m), \ 59 .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ 60 .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ 61 } 62 63 #define IXGBEVF_ZSTAT(m) { \ 64 .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ 65 .stat_offset = offsetof(struct ixgbevf_adapter, m), \ 66 .base_stat_offset = -1, \ 67 .saved_reset_offset = -1 \ 68 } 69 70 static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 71 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, 72 stats.saved_reset_vfgprc)}, 73 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, 74 stats.saved_reset_vfgptc)}, 75 {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, 76 stats.saved_reset_vfgorc)}, 77 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, 78 stats.saved_reset_vfgotc)}, 79 {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, 80 {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)}, 81 {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)}, 82 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, 83 stats.saved_reset_vfmprc)}, 84 {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, 85 #ifdef BP_EXTENDED_STATS 86 {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, 87 {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, 88 {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)}, 89 {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)}, 90 {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)}, 91 {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)}, 92 #endif 93 }; 94 95 #define IXGBE_QUEUE_STATS_LEN 0 96 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 97 98 #define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 99 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 100 "Register test (offline)", 101 "Link test (on/offline)" 102 }; 103 #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) 104 105 static int ixgbevf_get_settings(struct net_device *netdev, 106 struct ethtool_cmd *ecmd) 107 { 108 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 109 struct ixgbe_hw *hw = &adapter->hw; 110 u32 link_speed = 0; 111 bool link_up; 112 113 ecmd->supported = SUPPORTED_10000baseT_Full; 114 ecmd->autoneg = AUTONEG_DISABLE; 115 ecmd->transceiver = XCVR_DUMMY1; 116 ecmd->port = -1; 117 118 hw->mac.get_link_status = 1; 119 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 120 121 if (link_up) { 122 __u32 speed = SPEED_10000; 123 switch (link_speed) { 124 case IXGBE_LINK_SPEED_10GB_FULL: 125 speed = SPEED_10000; 126 break; 127 case IXGBE_LINK_SPEED_1GB_FULL: 128 speed = SPEED_1000; 129 break; 130 case IXGBE_LINK_SPEED_100_FULL: 131 speed = SPEED_100; 132 break; 133 } 134 135 ethtool_cmd_speed_set(ecmd, speed); 136 ecmd->duplex = DUPLEX_FULL; 137 } else { 138 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); 139 ecmd->duplex = DUPLEX_UNKNOWN; 140 } 141 142 return 0; 143 } 144 145 static u32 ixgbevf_get_msglevel(struct net_device *netdev) 146 { 147 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 148 return adapter->msg_enable; 149 } 150 151 static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) 152 { 153 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 154 adapter->msg_enable = data; 155 } 156 157 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) 158 159 static int ixgbevf_get_regs_len(struct net_device *netdev) 160 { 161 #define IXGBE_REGS_LEN 45 162 return IXGBE_REGS_LEN * sizeof(u32); 163 } 164 165 static void ixgbevf_get_regs(struct net_device *netdev, 166 struct ethtool_regs *regs, 167 void *p) 168 { 169 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 170 struct ixgbe_hw *hw = &adapter->hw; 171 u32 *regs_buff = p; 172 u32 regs_len = ixgbevf_get_regs_len(netdev); 173 u8 i; 174 175 memset(p, 0, regs_len); 176 177 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; 178 179 /* General Registers */ 180 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); 181 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); 182 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 183 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); 184 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); 185 186 /* Interrupt */ 187 /* don't read EICR because it can clear interrupt causes, instead 188 * read EICS which is a shadow but doesn't clear EICR */ 189 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); 190 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); 191 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); 192 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); 193 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); 194 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); 195 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); 196 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); 197 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 198 199 /* Receive DMA */ 200 for (i = 0; i < 2; i++) 201 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); 202 for (i = 0; i < 2; i++) 203 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); 204 for (i = 0; i < 2; i++) 205 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); 206 for (i = 0; i < 2; i++) 207 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); 208 for (i = 0; i < 2; i++) 209 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); 210 for (i = 0; i < 2; i++) 211 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 212 for (i = 0; i < 2; i++) 213 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); 214 215 /* Receive */ 216 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); 217 218 /* Transmit */ 219 for (i = 0; i < 2; i++) 220 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); 221 for (i = 0; i < 2; i++) 222 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); 223 for (i = 0; i < 2; i++) 224 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); 225 for (i = 0; i < 2; i++) 226 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); 227 for (i = 0; i < 2; i++) 228 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); 229 for (i = 0; i < 2; i++) 230 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 231 for (i = 0; i < 2; i++) 232 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); 233 for (i = 0; i < 2; i++) 234 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); 235 } 236 237 static void ixgbevf_get_drvinfo(struct net_device *netdev, 238 struct ethtool_drvinfo *drvinfo) 239 { 240 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 241 242 strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); 243 strlcpy(drvinfo->version, ixgbevf_driver_version, 244 sizeof(drvinfo->version)); 245 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 246 sizeof(drvinfo->bus_info)); 247 } 248 249 static void ixgbevf_get_ringparam(struct net_device *netdev, 250 struct ethtool_ringparam *ring) 251 { 252 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 253 254 ring->rx_max_pending = IXGBEVF_MAX_RXD; 255 ring->tx_max_pending = IXGBEVF_MAX_TXD; 256 ring->rx_pending = adapter->rx_ring_count; 257 ring->tx_pending = adapter->tx_ring_count; 258 } 259 260 static int ixgbevf_set_ringparam(struct net_device *netdev, 261 struct ethtool_ringparam *ring) 262 { 263 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 264 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; 265 u32 new_rx_count, new_tx_count; 266 int i, err = 0; 267 268 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 269 return -EINVAL; 270 271 new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD); 272 new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD); 273 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 274 275 new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD); 276 new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD); 277 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 278 279 /* if nothing to do return success */ 280 if ((new_tx_count == adapter->tx_ring_count) && 281 (new_rx_count == adapter->rx_ring_count)) 282 return 0; 283 284 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 285 usleep_range(1000, 2000); 286 287 if (!netif_running(adapter->netdev)) { 288 for (i = 0; i < adapter->num_tx_queues; i++) 289 adapter->tx_ring[i]->count = new_tx_count; 290 for (i = 0; i < adapter->num_rx_queues; i++) 291 adapter->rx_ring[i]->count = new_rx_count; 292 adapter->tx_ring_count = new_tx_count; 293 adapter->rx_ring_count = new_rx_count; 294 goto clear_reset; 295 } 296 297 if (new_tx_count != adapter->tx_ring_count) { 298 tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); 299 if (!tx_ring) { 300 err = -ENOMEM; 301 goto clear_reset; 302 } 303 304 for (i = 0; i < adapter->num_tx_queues; i++) { 305 /* clone ring and setup updated count */ 306 tx_ring[i] = *adapter->tx_ring[i]; 307 tx_ring[i].count = new_tx_count; 308 err = ixgbevf_setup_tx_resources(&tx_ring[i]); 309 if (err) { 310 while (i) { 311 i--; 312 ixgbevf_free_tx_resources(&tx_ring[i]); 313 } 314 315 vfree(tx_ring); 316 tx_ring = NULL; 317 318 goto clear_reset; 319 } 320 } 321 } 322 323 if (new_rx_count != adapter->rx_ring_count) { 324 rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring)); 325 if (!rx_ring) { 326 err = -ENOMEM; 327 goto clear_reset; 328 } 329 330 for (i = 0; i < adapter->num_rx_queues; i++) { 331 /* clone ring and setup updated count */ 332 rx_ring[i] = *adapter->rx_ring[i]; 333 rx_ring[i].count = new_rx_count; 334 err = ixgbevf_setup_rx_resources(&rx_ring[i]); 335 if (err) { 336 while (i) { 337 i--; 338 ixgbevf_free_rx_resources(&rx_ring[i]); 339 } 340 341 vfree(rx_ring); 342 rx_ring = NULL; 343 344 goto clear_reset; 345 } 346 } 347 } 348 349 /* bring interface down to prepare for update */ 350 ixgbevf_down(adapter); 351 352 /* Tx */ 353 if (tx_ring) { 354 for (i = 0; i < adapter->num_tx_queues; i++) { 355 ixgbevf_free_tx_resources(adapter->tx_ring[i]); 356 *adapter->tx_ring[i] = tx_ring[i]; 357 } 358 adapter->tx_ring_count = new_tx_count; 359 360 vfree(tx_ring); 361 tx_ring = NULL; 362 } 363 364 /* Rx */ 365 if (rx_ring) { 366 for (i = 0; i < adapter->num_rx_queues; i++) { 367 ixgbevf_free_rx_resources(adapter->rx_ring[i]); 368 *adapter->rx_ring[i] = rx_ring[i]; 369 } 370 adapter->rx_ring_count = new_rx_count; 371 372 vfree(rx_ring); 373 rx_ring = NULL; 374 } 375 376 /* restore interface using new values */ 377 ixgbevf_up(adapter); 378 379 clear_reset: 380 /* free Tx resources if Rx error is encountered */ 381 if (tx_ring) { 382 for (i = 0; i < adapter->num_tx_queues; i++) 383 ixgbevf_free_tx_resources(&tx_ring[i]); 384 vfree(tx_ring); 385 } 386 387 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 388 return err; 389 } 390 391 static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) 392 { 393 switch (stringset) { 394 case ETH_SS_TEST: 395 return IXGBE_TEST_LEN; 396 case ETH_SS_STATS: 397 return IXGBE_GLOBAL_STATS_LEN; 398 default: 399 return -EINVAL; 400 } 401 } 402 403 static void ixgbevf_get_ethtool_stats(struct net_device *netdev, 404 struct ethtool_stats *stats, u64 *data) 405 { 406 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 407 char *base = (char *) adapter; 408 int i; 409 #ifdef BP_EXTENDED_STATS 410 u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, 411 tx_yields = 0, tx_cleaned = 0, tx_missed = 0; 412 413 for (i = 0; i < adapter->num_rx_queues; i++) { 414 rx_yields += adapter->rx_ring[i]->stats.yields; 415 rx_cleaned += adapter->rx_ring[i]->stats.cleaned; 416 rx_yields += adapter->rx_ring[i]->stats.yields; 417 } 418 419 for (i = 0; i < adapter->num_tx_queues; i++) { 420 tx_yields += adapter->tx_ring[i]->stats.yields; 421 tx_cleaned += adapter->tx_ring[i]->stats.cleaned; 422 tx_yields += adapter->tx_ring[i]->stats.yields; 423 } 424 425 adapter->bp_rx_yields = rx_yields; 426 adapter->bp_rx_cleaned = rx_cleaned; 427 adapter->bp_rx_missed = rx_missed; 428 429 adapter->bp_tx_yields = tx_yields; 430 adapter->bp_tx_cleaned = tx_cleaned; 431 adapter->bp_tx_missed = tx_missed; 432 #endif 433 434 ixgbevf_update_stats(adapter); 435 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 436 char *p = base + ixgbe_gstrings_stats[i].stat_offset; 437 char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; 438 char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; 439 440 if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { 441 if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) 442 data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; 443 else 444 data[i] = *(u64 *)p; 445 } else { 446 if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) 447 data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r; 448 else 449 data[i] = *(u32 *)p; 450 } 451 } 452 } 453 454 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, 455 u8 *data) 456 { 457 char *p = (char *)data; 458 int i; 459 460 switch (stringset) { 461 case ETH_SS_TEST: 462 memcpy(data, *ixgbe_gstrings_test, 463 IXGBE_TEST_LEN * ETH_GSTRING_LEN); 464 break; 465 case ETH_SS_STATS: 466 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 467 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 468 ETH_GSTRING_LEN); 469 p += ETH_GSTRING_LEN; 470 } 471 break; 472 } 473 } 474 475 static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) 476 { 477 struct ixgbe_hw *hw = &adapter->hw; 478 bool link_up; 479 u32 link_speed = 0; 480 *data = 0; 481 482 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 483 if (!link_up) 484 *data = 1; 485 486 return *data; 487 } 488 489 /* ethtool register test data */ 490 struct ixgbevf_reg_test { 491 u16 reg; 492 u8 array_len; 493 u8 test_type; 494 u32 mask; 495 u32 write; 496 }; 497 498 /* In the hardware, registers are laid out either singly, in arrays 499 * spaced 0x40 bytes apart, or in contiguous tables. We assume 500 * most tests take place on arrays or single registers (handled 501 * as a single-element array) and special-case the tables. 502 * Table tests are always pattern tests. 503 * 504 * We also make provision for some required setup steps by specifying 505 * registers to be written without any read-back testing. 506 */ 507 508 #define PATTERN_TEST 1 509 #define SET_READ_TEST 2 510 #define WRITE_NO_TEST 3 511 #define TABLE32_TEST 4 512 #define TABLE64_TEST_LO 5 513 #define TABLE64_TEST_HI 6 514 515 /* default VF register test */ 516 static const struct ixgbevf_reg_test reg_test_vf[] = { 517 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 518 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 519 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 520 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 521 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 522 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, 523 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 524 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 525 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 526 { .reg = 0 } 527 }; 528 529 static const u32 register_test_patterns[] = { 530 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF 531 }; 532 533 static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data, 534 int reg, u32 mask, u32 write) 535 { 536 u32 pat, val, before; 537 538 if (IXGBE_REMOVED(adapter->hw.hw_addr)) { 539 *data = 1; 540 return true; 541 } 542 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { 543 before = ixgbevf_read_reg(&adapter->hw, reg); 544 ixgbe_write_reg(&adapter->hw, reg, 545 register_test_patterns[pat] & write); 546 val = ixgbevf_read_reg(&adapter->hw, reg); 547 if (val != (register_test_patterns[pat] & write & mask)) { 548 hw_dbg(&adapter->hw, 549 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", 550 reg, val, 551 register_test_patterns[pat] & write & mask); 552 *data = reg; 553 ixgbe_write_reg(&adapter->hw, reg, before); 554 return true; 555 } 556 ixgbe_write_reg(&adapter->hw, reg, before); 557 } 558 return false; 559 } 560 561 static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data, 562 int reg, u32 mask, u32 write) 563 { 564 u32 val, before; 565 566 if (IXGBE_REMOVED(adapter->hw.hw_addr)) { 567 *data = 1; 568 return true; 569 } 570 before = ixgbevf_read_reg(&adapter->hw, reg); 571 ixgbe_write_reg(&adapter->hw, reg, write & mask); 572 val = ixgbevf_read_reg(&adapter->hw, reg); 573 if ((write & mask) != (val & mask)) { 574 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", 575 reg, (val & mask), write & mask); 576 *data = reg; 577 ixgbe_write_reg(&adapter->hw, reg, before); 578 return true; 579 } 580 ixgbe_write_reg(&adapter->hw, reg, before); 581 return false; 582 } 583 584 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) 585 { 586 const struct ixgbevf_reg_test *test; 587 u32 i; 588 589 if (IXGBE_REMOVED(adapter->hw.hw_addr)) { 590 dev_err(&adapter->pdev->dev, 591 "Adapter removed - register test blocked\n"); 592 *data = 1; 593 return 1; 594 } 595 test = reg_test_vf; 596 597 /* 598 * Perform the register test, looping through the test table 599 * until we either fail or reach the null entry. 600 */ 601 while (test->reg) { 602 for (i = 0; i < test->array_len; i++) { 603 bool b = false; 604 605 switch (test->test_type) { 606 case PATTERN_TEST: 607 b = reg_pattern_test(adapter, data, 608 test->reg + (i * 0x40), 609 test->mask, 610 test->write); 611 break; 612 case SET_READ_TEST: 613 b = reg_set_and_check(adapter, data, 614 test->reg + (i * 0x40), 615 test->mask, 616 test->write); 617 break; 618 case WRITE_NO_TEST: 619 ixgbe_write_reg(&adapter->hw, 620 test->reg + (i * 0x40), 621 test->write); 622 break; 623 case TABLE32_TEST: 624 b = reg_pattern_test(adapter, data, 625 test->reg + (i * 4), 626 test->mask, 627 test->write); 628 break; 629 case TABLE64_TEST_LO: 630 b = reg_pattern_test(adapter, data, 631 test->reg + (i * 8), 632 test->mask, 633 test->write); 634 break; 635 case TABLE64_TEST_HI: 636 b = reg_pattern_test(adapter, data, 637 test->reg + 4 + (i * 8), 638 test->mask, 639 test->write); 640 break; 641 } 642 if (b) 643 return 1; 644 } 645 test++; 646 } 647 648 *data = 0; 649 return *data; 650 } 651 652 static void ixgbevf_diag_test(struct net_device *netdev, 653 struct ethtool_test *eth_test, u64 *data) 654 { 655 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 656 bool if_running = netif_running(netdev); 657 658 if (IXGBE_REMOVED(adapter->hw.hw_addr)) { 659 dev_err(&adapter->pdev->dev, 660 "Adapter removed - test blocked\n"); 661 data[0] = 1; 662 data[1] = 1; 663 eth_test->flags |= ETH_TEST_FL_FAILED; 664 return; 665 } 666 set_bit(__IXGBEVF_TESTING, &adapter->state); 667 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 668 /* Offline tests */ 669 670 hw_dbg(&adapter->hw, "offline testing starting\n"); 671 672 /* Link test performed before hardware reset so autoneg doesn't 673 * interfere with test result */ 674 if (ixgbevf_link_test(adapter, &data[1])) 675 eth_test->flags |= ETH_TEST_FL_FAILED; 676 677 if (if_running) 678 /* indicate we're in test mode */ 679 dev_close(netdev); 680 else 681 ixgbevf_reset(adapter); 682 683 hw_dbg(&adapter->hw, "register testing starting\n"); 684 if (ixgbevf_reg_test(adapter, &data[0])) 685 eth_test->flags |= ETH_TEST_FL_FAILED; 686 687 ixgbevf_reset(adapter); 688 689 clear_bit(__IXGBEVF_TESTING, &adapter->state); 690 if (if_running) 691 dev_open(netdev); 692 } else { 693 hw_dbg(&adapter->hw, "online testing starting\n"); 694 /* Online tests */ 695 if (ixgbevf_link_test(adapter, &data[1])) 696 eth_test->flags |= ETH_TEST_FL_FAILED; 697 698 /* Online tests aren't run; pass by default */ 699 data[0] = 0; 700 701 clear_bit(__IXGBEVF_TESTING, &adapter->state); 702 } 703 msleep_interruptible(4 * 1000); 704 } 705 706 static int ixgbevf_nway_reset(struct net_device *netdev) 707 { 708 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 709 710 if (netif_running(netdev)) 711 ixgbevf_reinit_locked(adapter); 712 713 return 0; 714 } 715 716 static int ixgbevf_get_coalesce(struct net_device *netdev, 717 struct ethtool_coalesce *ec) 718 { 719 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 720 721 /* only valid if in constant ITR mode */ 722 if (adapter->rx_itr_setting <= 1) 723 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 724 else 725 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 726 727 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 728 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 729 return 0; 730 731 /* only valid if in constant ITR mode */ 732 if (adapter->tx_itr_setting <= 1) 733 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 734 else 735 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 736 737 return 0; 738 } 739 740 static int ixgbevf_set_coalesce(struct net_device *netdev, 741 struct ethtool_coalesce *ec) 742 { 743 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 744 struct ixgbevf_q_vector *q_vector; 745 int num_vectors, i; 746 u16 tx_itr_param, rx_itr_param; 747 748 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 749 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count 750 && ec->tx_coalesce_usecs) 751 return -EINVAL; 752 753 754 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 755 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 756 return -EINVAL; 757 758 if (ec->rx_coalesce_usecs > 1) 759 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 760 else 761 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 762 763 if (adapter->rx_itr_setting == 1) 764 rx_itr_param = IXGBE_20K_ITR; 765 else 766 rx_itr_param = adapter->rx_itr_setting; 767 768 769 if (ec->tx_coalesce_usecs > 1) 770 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 771 else 772 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 773 774 if (adapter->tx_itr_setting == 1) 775 tx_itr_param = IXGBE_10K_ITR; 776 else 777 tx_itr_param = adapter->tx_itr_setting; 778 779 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 780 781 for (i = 0; i < num_vectors; i++) { 782 q_vector = adapter->q_vector[i]; 783 if (q_vector->tx.count && !q_vector->rx.count) 784 /* tx only */ 785 q_vector->itr = tx_itr_param; 786 else 787 /* rx only or mixed */ 788 q_vector->itr = rx_itr_param; 789 ixgbevf_write_eitr(q_vector); 790 } 791 792 return 0; 793 } 794 795 static const struct ethtool_ops ixgbevf_ethtool_ops = { 796 .get_settings = ixgbevf_get_settings, 797 .get_drvinfo = ixgbevf_get_drvinfo, 798 .get_regs_len = ixgbevf_get_regs_len, 799 .get_regs = ixgbevf_get_regs, 800 .nway_reset = ixgbevf_nway_reset, 801 .get_link = ethtool_op_get_link, 802 .get_ringparam = ixgbevf_get_ringparam, 803 .set_ringparam = ixgbevf_set_ringparam, 804 .get_msglevel = ixgbevf_get_msglevel, 805 .set_msglevel = ixgbevf_set_msglevel, 806 .self_test = ixgbevf_diag_test, 807 .get_sset_count = ixgbevf_get_sset_count, 808 .get_strings = ixgbevf_get_strings, 809 .get_ethtool_stats = ixgbevf_get_ethtool_stats, 810 .get_coalesce = ixgbevf_get_coalesce, 811 .set_coalesce = ixgbevf_set_coalesce, 812 }; 813 814 void ixgbevf_set_ethtool_ops(struct net_device *netdev) 815 { 816 netdev->ethtool_ops = &ixgbevf_ethtool_ops; 817 } 818