1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2015 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, see <http://www.gnu.org/licenses/>. 17 18 The full GNU General Public License is included in this distribution in 19 the file called "COPYING". 20 21 Contact Information: 22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 25 *******************************************************************************/ 26 27 /* ethtool support for ixgbevf */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/types.h> 32 #include <linux/module.h> 33 #include <linux/slab.h> 34 #include <linux/pci.h> 35 #include <linux/netdevice.h> 36 #include <linux/ethtool.h> 37 #include <linux/vmalloc.h> 38 #include <linux/if_vlan.h> 39 #include <linux/uaccess.h> 40 41 #include "ixgbevf.h" 42 43 #define IXGBE_ALL_RAR_ENTRIES 16 44 45 enum {NETDEV_STATS, IXGBEVF_STATS}; 46 47 struct ixgbe_stats { 48 char stat_string[ETH_GSTRING_LEN]; 49 int type; 50 int sizeof_stat; 51 int stat_offset; 52 }; 53 54 #define IXGBEVF_STAT(_name, _stat) { \ 55 .stat_string = _name, \ 56 .type = IXGBEVF_STATS, \ 57 .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \ 58 .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ 59 } 60 61 #define IXGBEVF_NETDEV_STAT(_net_stat) { \ 62 .stat_string = #_net_stat, \ 63 .type = NETDEV_STATS, \ 64 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ 65 .stat_offset = offsetof(struct net_device_stats, _net_stat) \ 66 } 67 68 static struct ixgbe_stats ixgbevf_gstrings_stats[] = { 69 IXGBEVF_NETDEV_STAT(rx_packets), 70 IXGBEVF_NETDEV_STAT(tx_packets), 71 IXGBEVF_NETDEV_STAT(rx_bytes), 72 IXGBEVF_NETDEV_STAT(tx_bytes), 73 IXGBEVF_STAT("tx_busy", tx_busy), 74 IXGBEVF_STAT("tx_restart_queue", restart_queue), 75 IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), 76 IXGBEVF_NETDEV_STAT(multicast), 77 IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), 78 }; 79 80 #define IXGBEVF_QUEUE_STATS_LEN ( \ 81 (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ 82 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ 83 (sizeof(struct ixgbevf_stats) / sizeof(u64))) 84 #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) 85 86 #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) 87 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 88 "Register test (offline)", 89 "Link test (on/offline)" 90 }; 91 92 #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) 93 94 static int ixgbevf_get_link_ksettings(struct net_device *netdev, 95 struct ethtool_link_ksettings *cmd) 96 { 97 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 98 struct ixgbe_hw *hw = &adapter->hw; 99 u32 link_speed = 0; 100 bool link_up; 101 102 ethtool_link_ksettings_zero_link_mode(cmd, supported); 103 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); 104 cmd->base.autoneg = AUTONEG_DISABLE; 105 cmd->base.port = -1; 106 107 hw->mac.get_link_status = 1; 108 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 109 110 if (link_up) { 111 __u32 speed = SPEED_10000; 112 113 switch (link_speed) { 114 case IXGBE_LINK_SPEED_10GB_FULL: 115 speed = SPEED_10000; 116 break; 117 case IXGBE_LINK_SPEED_1GB_FULL: 118 speed = SPEED_1000; 119 break; 120 case IXGBE_LINK_SPEED_100_FULL: 121 speed = SPEED_100; 122 break; 123 } 124 125 cmd->base.speed = speed; 126 cmd->base.duplex = DUPLEX_FULL; 127 } else { 128 cmd->base.speed = SPEED_UNKNOWN; 129 cmd->base.duplex = DUPLEX_UNKNOWN; 130 } 131 132 return 0; 133 } 134 135 static u32 ixgbevf_get_msglevel(struct net_device *netdev) 136 { 137 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 138 139 return adapter->msg_enable; 140 } 141 142 static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) 143 { 144 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 145 146 adapter->msg_enable = data; 147 } 148 149 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) 150 151 static int ixgbevf_get_regs_len(struct net_device *netdev) 152 { 153 #define IXGBE_REGS_LEN 45 154 return IXGBE_REGS_LEN * sizeof(u32); 155 } 156 157 static void ixgbevf_get_regs(struct net_device *netdev, 158 struct ethtool_regs *regs, 159 void *p) 160 { 161 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 162 struct ixgbe_hw *hw = &adapter->hw; 163 u32 *regs_buff = p; 164 u32 regs_len = ixgbevf_get_regs_len(netdev); 165 u8 i; 166 167 memset(p, 0, regs_len); 168 169 /* generate a number suitable for ethtool's register version */ 170 regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; 171 172 /* General Registers */ 173 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); 174 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); 175 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 176 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); 177 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); 178 179 /* Interrupt */ 180 /* don't read EICR because it can clear interrupt causes, instead 181 * read EICS which is a shadow but doesn't clear EICR 182 */ 183 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); 184 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); 185 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); 186 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); 187 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); 188 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); 189 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); 190 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); 191 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 192 193 /* Receive DMA */ 194 for (i = 0; i < 2; i++) 195 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); 196 for (i = 0; i < 2; i++) 197 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); 198 for (i = 0; i < 2; i++) 199 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); 200 for (i = 0; i < 2; i++) 201 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); 202 for (i = 0; i < 2; i++) 203 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); 204 for (i = 0; i < 2; i++) 205 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 206 for (i = 0; i < 2; i++) 207 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); 208 209 /* Receive */ 210 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); 211 212 /* Transmit */ 213 for (i = 0; i < 2; i++) 214 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); 215 for (i = 0; i < 2; i++) 216 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); 217 for (i = 0; i < 2; i++) 218 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); 219 for (i = 0; i < 2; i++) 220 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); 221 for (i = 0; i < 2; i++) 222 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); 223 for (i = 0; i < 2; i++) 224 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 225 for (i = 0; i < 2; i++) 226 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); 227 for (i = 0; i < 2; i++) 228 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); 229 } 230 231 static void ixgbevf_get_drvinfo(struct net_device *netdev, 232 struct ethtool_drvinfo *drvinfo) 233 { 234 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 235 236 strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); 237 strlcpy(drvinfo->version, ixgbevf_driver_version, 238 sizeof(drvinfo->version)); 239 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 240 sizeof(drvinfo->bus_info)); 241 } 242 243 static void ixgbevf_get_ringparam(struct net_device *netdev, 244 struct ethtool_ringparam *ring) 245 { 246 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 247 248 ring->rx_max_pending = IXGBEVF_MAX_RXD; 249 ring->tx_max_pending = IXGBEVF_MAX_TXD; 250 ring->rx_pending = adapter->rx_ring_count; 251 ring->tx_pending = adapter->tx_ring_count; 252 } 253 254 static int ixgbevf_set_ringparam(struct net_device *netdev, 255 struct ethtool_ringparam *ring) 256 { 257 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 258 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; 259 u32 new_rx_count, new_tx_count; 260 int i, err = 0; 261 262 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 263 return -EINVAL; 264 265 new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD); 266 new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD); 267 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 268 269 new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD); 270 new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD); 271 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 272 273 /* if nothing to do return success */ 274 if ((new_tx_count == adapter->tx_ring_count) && 275 (new_rx_count == adapter->rx_ring_count)) 276 return 0; 277 278 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 279 usleep_range(1000, 2000); 280 281 if (!netif_running(adapter->netdev)) { 282 for (i = 0; i < adapter->num_tx_queues; i++) 283 adapter->tx_ring[i]->count = new_tx_count; 284 for (i = 0; i < adapter->num_rx_queues; i++) 285 adapter->rx_ring[i]->count = new_rx_count; 286 adapter->tx_ring_count = new_tx_count; 287 adapter->rx_ring_count = new_rx_count; 288 goto clear_reset; 289 } 290 291 if (new_tx_count != adapter->tx_ring_count) { 292 tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); 293 if (!tx_ring) { 294 err = -ENOMEM; 295 goto clear_reset; 296 } 297 298 for (i = 0; i < adapter->num_tx_queues; i++) { 299 /* clone ring and setup updated count */ 300 tx_ring[i] = *adapter->tx_ring[i]; 301 tx_ring[i].count = new_tx_count; 302 err = ixgbevf_setup_tx_resources(&tx_ring[i]); 303 if (err) { 304 while (i) { 305 i--; 306 ixgbevf_free_tx_resources(&tx_ring[i]); 307 } 308 309 vfree(tx_ring); 310 tx_ring = NULL; 311 312 goto clear_reset; 313 } 314 } 315 } 316 317 if (new_rx_count != adapter->rx_ring_count) { 318 rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring)); 319 if (!rx_ring) { 320 err = -ENOMEM; 321 goto clear_reset; 322 } 323 324 for (i = 0; i < adapter->num_rx_queues; i++) { 325 /* clone ring and setup updated count */ 326 rx_ring[i] = *adapter->rx_ring[i]; 327 rx_ring[i].count = new_rx_count; 328 err = ixgbevf_setup_rx_resources(&rx_ring[i]); 329 if (err) { 330 while (i) { 331 i--; 332 ixgbevf_free_rx_resources(&rx_ring[i]); 333 } 334 335 vfree(rx_ring); 336 rx_ring = NULL; 337 338 goto clear_reset; 339 } 340 } 341 } 342 343 /* bring interface down to prepare for update */ 344 ixgbevf_down(adapter); 345 346 /* Tx */ 347 if (tx_ring) { 348 for (i = 0; i < adapter->num_tx_queues; i++) { 349 ixgbevf_free_tx_resources(adapter->tx_ring[i]); 350 *adapter->tx_ring[i] = tx_ring[i]; 351 } 352 adapter->tx_ring_count = new_tx_count; 353 354 vfree(tx_ring); 355 tx_ring = NULL; 356 } 357 358 /* Rx */ 359 if (rx_ring) { 360 for (i = 0; i < adapter->num_rx_queues; i++) { 361 ixgbevf_free_rx_resources(adapter->rx_ring[i]); 362 *adapter->rx_ring[i] = rx_ring[i]; 363 } 364 adapter->rx_ring_count = new_rx_count; 365 366 vfree(rx_ring); 367 rx_ring = NULL; 368 } 369 370 /* restore interface using new values */ 371 ixgbevf_up(adapter); 372 373 clear_reset: 374 /* free Tx resources if Rx error is encountered */ 375 if (tx_ring) { 376 for (i = 0; i < adapter->num_tx_queues; i++) 377 ixgbevf_free_tx_resources(&tx_ring[i]); 378 vfree(tx_ring); 379 } 380 381 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 382 return err; 383 } 384 385 static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) 386 { 387 switch (stringset) { 388 case ETH_SS_TEST: 389 return IXGBEVF_TEST_LEN; 390 case ETH_SS_STATS: 391 return IXGBEVF_STATS_LEN; 392 default: 393 return -EINVAL; 394 } 395 } 396 397 static void ixgbevf_get_ethtool_stats(struct net_device *netdev, 398 struct ethtool_stats *stats, u64 *data) 399 { 400 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 401 struct rtnl_link_stats64 temp; 402 const struct rtnl_link_stats64 *net_stats; 403 unsigned int start; 404 struct ixgbevf_ring *ring; 405 int i, j; 406 char *p; 407 408 ixgbevf_update_stats(adapter); 409 net_stats = dev_get_stats(netdev, &temp); 410 for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { 411 switch (ixgbevf_gstrings_stats[i].type) { 412 case NETDEV_STATS: 413 p = (char *)net_stats + 414 ixgbevf_gstrings_stats[i].stat_offset; 415 break; 416 case IXGBEVF_STATS: 417 p = (char *)adapter + 418 ixgbevf_gstrings_stats[i].stat_offset; 419 break; 420 default: 421 data[i] = 0; 422 continue; 423 } 424 425 data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == 426 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 427 } 428 429 /* populate Tx queue data */ 430 for (j = 0; j < adapter->num_tx_queues; j++) { 431 ring = adapter->tx_ring[j]; 432 if (!ring) { 433 data[i++] = 0; 434 data[i++] = 0; 435 continue; 436 } 437 438 do { 439 start = u64_stats_fetch_begin_irq(&ring->syncp); 440 data[i] = ring->stats.packets; 441 data[i + 1] = ring->stats.bytes; 442 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 443 i += 2; 444 } 445 446 /* populate Rx queue data */ 447 for (j = 0; j < adapter->num_rx_queues; j++) { 448 ring = adapter->rx_ring[j]; 449 if (!ring) { 450 data[i++] = 0; 451 data[i++] = 0; 452 continue; 453 } 454 455 do { 456 start = u64_stats_fetch_begin_irq(&ring->syncp); 457 data[i] = ring->stats.packets; 458 data[i + 1] = ring->stats.bytes; 459 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 460 i += 2; 461 } 462 } 463 464 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, 465 u8 *data) 466 { 467 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 468 char *p = (char *)data; 469 int i; 470 471 switch (stringset) { 472 case ETH_SS_TEST: 473 memcpy(data, *ixgbe_gstrings_test, 474 IXGBEVF_TEST_LEN * ETH_GSTRING_LEN); 475 break; 476 case ETH_SS_STATS: 477 for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { 478 memcpy(p, ixgbevf_gstrings_stats[i].stat_string, 479 ETH_GSTRING_LEN); 480 p += ETH_GSTRING_LEN; 481 } 482 483 for (i = 0; i < adapter->num_tx_queues; i++) { 484 sprintf(p, "tx_queue_%u_packets", i); 485 p += ETH_GSTRING_LEN; 486 sprintf(p, "tx_queue_%u_bytes", i); 487 p += ETH_GSTRING_LEN; 488 } 489 for (i = 0; i < adapter->num_rx_queues; i++) { 490 sprintf(p, "rx_queue_%u_packets", i); 491 p += ETH_GSTRING_LEN; 492 sprintf(p, "rx_queue_%u_bytes", i); 493 p += ETH_GSTRING_LEN; 494 } 495 break; 496 } 497 } 498 499 static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) 500 { 501 struct ixgbe_hw *hw = &adapter->hw; 502 bool link_up; 503 u32 link_speed = 0; 504 *data = 0; 505 506 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 507 if (!link_up) 508 *data = 1; 509 510 return *data; 511 } 512 513 /* ethtool register test data */ 514 struct ixgbevf_reg_test { 515 u16 reg; 516 u8 array_len; 517 u8 test_type; 518 u32 mask; 519 u32 write; 520 }; 521 522 /* In the hardware, registers are laid out either singly, in arrays 523 * spaced 0x40 bytes apart, or in contiguous tables. We assume 524 * most tests take place on arrays or single registers (handled 525 * as a single-element array) and special-case the tables. 526 * Table tests are always pattern tests. 527 * 528 * We also make provision for some required setup steps by specifying 529 * registers to be written without any read-back testing. 530 */ 531 532 #define PATTERN_TEST 1 533 #define SET_READ_TEST 2 534 #define WRITE_NO_TEST 3 535 #define TABLE32_TEST 4 536 #define TABLE64_TEST_LO 5 537 #define TABLE64_TEST_HI 6 538 539 /* default VF register test */ 540 static const struct ixgbevf_reg_test reg_test_vf[] = { 541 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 542 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 543 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 544 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 545 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 546 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, 547 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 548 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 549 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 550 { .reg = 0 } 551 }; 552 553 static const u32 register_test_patterns[] = { 554 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF 555 }; 556 557 static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data, 558 int reg, u32 mask, u32 write) 559 { 560 u32 pat, val, before; 561 562 if (IXGBE_REMOVED(adapter->hw.hw_addr)) { 563 *data = 1; 564 return true; 565 } 566 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { 567 before = ixgbevf_read_reg(&adapter->hw, reg); 568 ixgbe_write_reg(&adapter->hw, reg, 569 register_test_patterns[pat] & write); 570 val = ixgbevf_read_reg(&adapter->hw, reg); 571 if (val != (register_test_patterns[pat] & write & mask)) { 572 hw_dbg(&adapter->hw, 573 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", 574 reg, val, 575 register_test_patterns[pat] & write & mask); 576 *data = reg; 577 ixgbe_write_reg(&adapter->hw, reg, before); 578 return true; 579 } 580 ixgbe_write_reg(&adapter->hw, reg, before); 581 } 582 return false; 583 } 584 585 static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data, 586 int reg, u32 mask, u32 write) 587 { 588 u32 val, before; 589 590 if (IXGBE_REMOVED(adapter->hw.hw_addr)) { 591 *data = 1; 592 return true; 593 } 594 before = ixgbevf_read_reg(&adapter->hw, reg); 595 ixgbe_write_reg(&adapter->hw, reg, write & mask); 596 val = ixgbevf_read_reg(&adapter->hw, reg); 597 if ((write & mask) != (val & mask)) { 598 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", 599 reg, (val & mask), write & mask); 600 *data = reg; 601 ixgbe_write_reg(&adapter->hw, reg, before); 602 return true; 603 } 604 ixgbe_write_reg(&adapter->hw, reg, before); 605 return false; 606 } 607 608 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) 609 { 610 const struct ixgbevf_reg_test *test; 611 u32 i; 612 613 if (IXGBE_REMOVED(adapter->hw.hw_addr)) { 614 dev_err(&adapter->pdev->dev, 615 "Adapter removed - register test blocked\n"); 616 *data = 1; 617 return 1; 618 } 619 test = reg_test_vf; 620 621 /* Perform the register test, looping through the test table 622 * until we either fail or reach the null entry. 623 */ 624 while (test->reg) { 625 for (i = 0; i < test->array_len; i++) { 626 bool b = false; 627 628 switch (test->test_type) { 629 case PATTERN_TEST: 630 b = reg_pattern_test(adapter, data, 631 test->reg + (i * 0x40), 632 test->mask, 633 test->write); 634 break; 635 case SET_READ_TEST: 636 b = reg_set_and_check(adapter, data, 637 test->reg + (i * 0x40), 638 test->mask, 639 test->write); 640 break; 641 case WRITE_NO_TEST: 642 ixgbe_write_reg(&adapter->hw, 643 test->reg + (i * 0x40), 644 test->write); 645 break; 646 case TABLE32_TEST: 647 b = reg_pattern_test(adapter, data, 648 test->reg + (i * 4), 649 test->mask, 650 test->write); 651 break; 652 case TABLE64_TEST_LO: 653 b = reg_pattern_test(adapter, data, 654 test->reg + (i * 8), 655 test->mask, 656 test->write); 657 break; 658 case TABLE64_TEST_HI: 659 b = reg_pattern_test(adapter, data, 660 test->reg + 4 + (i * 8), 661 test->mask, 662 test->write); 663 break; 664 } 665 if (b) 666 return 1; 667 } 668 test++; 669 } 670 671 *data = 0; 672 return *data; 673 } 674 675 static void ixgbevf_diag_test(struct net_device *netdev, 676 struct ethtool_test *eth_test, u64 *data) 677 { 678 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 679 bool if_running = netif_running(netdev); 680 681 if (IXGBE_REMOVED(adapter->hw.hw_addr)) { 682 dev_err(&adapter->pdev->dev, 683 "Adapter removed - test blocked\n"); 684 data[0] = 1; 685 data[1] = 1; 686 eth_test->flags |= ETH_TEST_FL_FAILED; 687 return; 688 } 689 set_bit(__IXGBEVF_TESTING, &adapter->state); 690 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 691 /* Offline tests */ 692 693 hw_dbg(&adapter->hw, "offline testing starting\n"); 694 695 /* Link test performed before hardware reset so autoneg doesn't 696 * interfere with test result 697 */ 698 if (ixgbevf_link_test(adapter, &data[1])) 699 eth_test->flags |= ETH_TEST_FL_FAILED; 700 701 if (if_running) 702 /* indicate we're in test mode */ 703 ixgbevf_close(netdev); 704 else 705 ixgbevf_reset(adapter); 706 707 hw_dbg(&adapter->hw, "register testing starting\n"); 708 if (ixgbevf_reg_test(adapter, &data[0])) 709 eth_test->flags |= ETH_TEST_FL_FAILED; 710 711 ixgbevf_reset(adapter); 712 713 clear_bit(__IXGBEVF_TESTING, &adapter->state); 714 if (if_running) 715 ixgbevf_open(netdev); 716 } else { 717 hw_dbg(&adapter->hw, "online testing starting\n"); 718 /* Online tests */ 719 if (ixgbevf_link_test(adapter, &data[1])) 720 eth_test->flags |= ETH_TEST_FL_FAILED; 721 722 /* Online tests aren't run; pass by default */ 723 data[0] = 0; 724 725 clear_bit(__IXGBEVF_TESTING, &adapter->state); 726 } 727 msleep_interruptible(4 * 1000); 728 } 729 730 static int ixgbevf_nway_reset(struct net_device *netdev) 731 { 732 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 733 734 if (netif_running(netdev)) 735 ixgbevf_reinit_locked(adapter); 736 737 return 0; 738 } 739 740 static int ixgbevf_get_coalesce(struct net_device *netdev, 741 struct ethtool_coalesce *ec) 742 { 743 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 744 745 /* only valid if in constant ITR mode */ 746 if (adapter->rx_itr_setting <= 1) 747 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 748 else 749 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 750 751 /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ 752 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 753 return 0; 754 755 /* only valid if in constant ITR mode */ 756 if (adapter->tx_itr_setting <= 1) 757 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 758 else 759 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 760 761 return 0; 762 } 763 764 static int ixgbevf_set_coalesce(struct net_device *netdev, 765 struct ethtool_coalesce *ec) 766 { 767 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 768 struct ixgbevf_q_vector *q_vector; 769 int num_vectors, i; 770 u16 tx_itr_param, rx_itr_param; 771 772 /* don't accept Tx specific changes if we've got mixed RxTx vectors */ 773 if (adapter->q_vector[0]->tx.count && 774 adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) 775 return -EINVAL; 776 777 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 778 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 779 return -EINVAL; 780 781 if (ec->rx_coalesce_usecs > 1) 782 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 783 else 784 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 785 786 if (adapter->rx_itr_setting == 1) 787 rx_itr_param = IXGBE_20K_ITR; 788 else 789 rx_itr_param = adapter->rx_itr_setting; 790 791 if (ec->tx_coalesce_usecs > 1) 792 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 793 else 794 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 795 796 if (adapter->tx_itr_setting == 1) 797 tx_itr_param = IXGBE_12K_ITR; 798 else 799 tx_itr_param = adapter->tx_itr_setting; 800 801 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 802 803 for (i = 0; i < num_vectors; i++) { 804 q_vector = adapter->q_vector[i]; 805 if (q_vector->tx.count && !q_vector->rx.count) 806 /* Tx only */ 807 q_vector->itr = tx_itr_param; 808 else 809 /* Rx only or mixed */ 810 q_vector->itr = rx_itr_param; 811 ixgbevf_write_eitr(q_vector); 812 } 813 814 return 0; 815 } 816 817 static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 818 u32 *rules __always_unused) 819 { 820 struct ixgbevf_adapter *adapter = netdev_priv(dev); 821 822 switch (info->cmd) { 823 case ETHTOOL_GRXRINGS: 824 info->data = adapter->num_rx_queues; 825 return 0; 826 default: 827 hw_dbg(&adapter->hw, "Command parameters not supported\n"); 828 return -EOPNOTSUPP; 829 } 830 } 831 832 static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) 833 { 834 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 835 836 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) 837 return IXGBEVF_X550_VFRETA_SIZE; 838 839 return IXGBEVF_82599_RETA_SIZE; 840 } 841 842 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) 843 { 844 return IXGBEVF_RSS_HASH_KEY_SIZE; 845 } 846 847 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, 848 u8 *hfunc) 849 { 850 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 851 int err = 0; 852 853 if (hfunc) 854 *hfunc = ETH_RSS_HASH_TOP; 855 856 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { 857 if (key) 858 memcpy(key, adapter->rss_key, 859 ixgbevf_get_rxfh_key_size(netdev)); 860 861 if (indir) { 862 int i; 863 864 for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) 865 indir[i] = adapter->rss_indir_tbl[i]; 866 } 867 } else { 868 /* If neither indirection table nor hash key was requested 869 * - just return a success avoiding taking any locks. 870 */ 871 if (!indir && !key) 872 return 0; 873 874 spin_lock_bh(&adapter->mbx_lock); 875 if (indir) 876 err = ixgbevf_get_reta_locked(&adapter->hw, indir, 877 adapter->num_rx_queues); 878 879 if (!err && key) 880 err = ixgbevf_get_rss_key_locked(&adapter->hw, key); 881 882 spin_unlock_bh(&adapter->mbx_lock); 883 } 884 885 return err; 886 } 887 888 static const struct ethtool_ops ixgbevf_ethtool_ops = { 889 .get_drvinfo = ixgbevf_get_drvinfo, 890 .get_regs_len = ixgbevf_get_regs_len, 891 .get_regs = ixgbevf_get_regs, 892 .nway_reset = ixgbevf_nway_reset, 893 .get_link = ethtool_op_get_link, 894 .get_ringparam = ixgbevf_get_ringparam, 895 .set_ringparam = ixgbevf_set_ringparam, 896 .get_msglevel = ixgbevf_get_msglevel, 897 .set_msglevel = ixgbevf_set_msglevel, 898 .self_test = ixgbevf_diag_test, 899 .get_sset_count = ixgbevf_get_sset_count, 900 .get_strings = ixgbevf_get_strings, 901 .get_ethtool_stats = ixgbevf_get_ethtool_stats, 902 .get_coalesce = ixgbevf_get_coalesce, 903 .set_coalesce = ixgbevf_set_coalesce, 904 .get_rxnfc = ixgbevf_get_rxnfc, 905 .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, 906 .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, 907 .get_rxfh = ixgbevf_get_rxfh, 908 .get_link_ksettings = ixgbevf_get_link_ksettings, 909 }; 910 911 void ixgbevf_set_ethtool_ops(struct net_device *netdev) 912 { 913 netdev->ethtool_ops = &ixgbevf_ethtool_ops; 914 } 915