1 /******************************************************************************* 2 3 Intel(R) Gigabit Ethernet Linux driver 4 Copyright(c) 2007-2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 /* ethtool support for igb */ 29 30 #include <linux/vmalloc.h> 31 #include <linux/netdevice.h> 32 #include <linux/pci.h> 33 #include <linux/delay.h> 34 #include <linux/interrupt.h> 35 #include <linux/if_ether.h> 36 #include <linux/ethtool.h> 37 #include <linux/sched.h> 38 #include <linux/slab.h> 39 #include <linux/pm_runtime.h> 40 41 #include "igb.h" 42 43 struct igb_stats { 44 char stat_string[ETH_GSTRING_LEN]; 45 int sizeof_stat; 46 int stat_offset; 47 }; 48 49 #define IGB_STAT(_name, _stat) { \ 50 .stat_string = _name, \ 51 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ 52 .stat_offset = offsetof(struct igb_adapter, _stat) \ 53 } 54 static const struct igb_stats igb_gstrings_stats[] = { 55 IGB_STAT("rx_packets", stats.gprc), 56 IGB_STAT("tx_packets", stats.gptc), 57 IGB_STAT("rx_bytes", stats.gorc), 58 IGB_STAT("tx_bytes", stats.gotc), 59 IGB_STAT("rx_broadcast", stats.bprc), 60 IGB_STAT("tx_broadcast", stats.bptc), 61 IGB_STAT("rx_multicast", stats.mprc), 62 IGB_STAT("tx_multicast", stats.mptc), 63 IGB_STAT("multicast", stats.mprc), 64 IGB_STAT("collisions", stats.colc), 65 IGB_STAT("rx_crc_errors", stats.crcerrs), 66 IGB_STAT("rx_no_buffer_count", stats.rnbc), 67 IGB_STAT("rx_missed_errors", stats.mpc), 68 IGB_STAT("tx_aborted_errors", stats.ecol), 69 IGB_STAT("tx_carrier_errors", stats.tncrs), 70 IGB_STAT("tx_window_errors", stats.latecol), 71 IGB_STAT("tx_abort_late_coll", stats.latecol), 72 IGB_STAT("tx_deferred_ok", stats.dc), 73 IGB_STAT("tx_single_coll_ok", stats.scc), 74 IGB_STAT("tx_multi_coll_ok", stats.mcc), 75 IGB_STAT("tx_timeout_count", tx_timeout_count), 76 IGB_STAT("rx_long_length_errors", stats.roc), 77 IGB_STAT("rx_short_length_errors", stats.ruc), 78 IGB_STAT("rx_align_errors", stats.algnerrc), 79 IGB_STAT("tx_tcp_seg_good", stats.tsctc), 80 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), 81 IGB_STAT("rx_flow_control_xon", stats.xonrxc), 82 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), 83 IGB_STAT("tx_flow_control_xon", stats.xontxc), 84 IGB_STAT("tx_flow_control_xoff", stats.xofftxc), 85 IGB_STAT("rx_long_byte_count", stats.gorc), 86 IGB_STAT("tx_dma_out_of_sync", stats.doosync), 87 IGB_STAT("tx_smbus", stats.mgptc), 88 IGB_STAT("rx_smbus", stats.mgprc), 89 IGB_STAT("dropped_smbus", stats.mgpdc), 90 IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), 91 IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), 92 IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), 93 IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), 94 }; 95 96 #define IGB_NETDEV_STAT(_net_stat) { \ 97 .stat_string = __stringify(_net_stat), \ 98 .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ 99 .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ 100 } 101 static const struct igb_stats igb_gstrings_net_stats[] = { 102 IGB_NETDEV_STAT(rx_errors), 103 IGB_NETDEV_STAT(tx_errors), 104 IGB_NETDEV_STAT(tx_dropped), 105 IGB_NETDEV_STAT(rx_length_errors), 106 IGB_NETDEV_STAT(rx_over_errors), 107 IGB_NETDEV_STAT(rx_frame_errors), 108 IGB_NETDEV_STAT(rx_fifo_errors), 109 IGB_NETDEV_STAT(tx_fifo_errors), 110 IGB_NETDEV_STAT(tx_heartbeat_errors) 111 }; 112 113 #define IGB_GLOBAL_STATS_LEN \ 114 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) 115 #define IGB_NETDEV_STATS_LEN \ 116 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) 117 #define IGB_RX_QUEUE_STATS_LEN \ 118 (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) 119 120 #define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ 121 122 #define IGB_QUEUE_STATS_LEN \ 123 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ 124 IGB_RX_QUEUE_STATS_LEN) + \ 125 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ 126 IGB_TX_QUEUE_STATS_LEN)) 127 #define IGB_STATS_LEN \ 128 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) 129 130 static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 131 "Register test (offline)", "Eeprom test (offline)", 132 "Interrupt test (offline)", "Loopback test (offline)", 133 "Link test (on/offline)" 134 }; 135 #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) 136 137 static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 138 { 139 struct igb_adapter *adapter = netdev_priv(netdev); 140 struct e1000_hw *hw = &adapter->hw; 141 u32 status; 142 143 if (hw->phy.media_type == e1000_media_type_copper) { 144 145 ecmd->supported = (SUPPORTED_10baseT_Half | 146 SUPPORTED_10baseT_Full | 147 SUPPORTED_100baseT_Half | 148 SUPPORTED_100baseT_Full | 149 SUPPORTED_1000baseT_Full| 150 SUPPORTED_Autoneg | 151 SUPPORTED_TP); 152 ecmd->advertising = (ADVERTISED_TP | 153 ADVERTISED_Pause); 154 155 if (hw->mac.autoneg == 1) { 156 ecmd->advertising |= ADVERTISED_Autoneg; 157 /* the e1000 autoneg seems to match ethtool nicely */ 158 ecmd->advertising |= hw->phy.autoneg_advertised; 159 } 160 161 ecmd->port = PORT_TP; 162 ecmd->phy_address = hw->phy.addr; 163 } else { 164 ecmd->supported = (SUPPORTED_1000baseT_Full | 165 SUPPORTED_FIBRE | 166 SUPPORTED_Autoneg); 167 168 ecmd->advertising = (ADVERTISED_1000baseT_Full | 169 ADVERTISED_FIBRE | 170 ADVERTISED_Autoneg | 171 ADVERTISED_Pause); 172 173 ecmd->port = PORT_FIBRE; 174 } 175 176 ecmd->transceiver = XCVR_INTERNAL; 177 178 status = rd32(E1000_STATUS); 179 180 if (status & E1000_STATUS_LU) { 181 182 if ((status & E1000_STATUS_SPEED_1000) || 183 hw->phy.media_type != e1000_media_type_copper) 184 ethtool_cmd_speed_set(ecmd, SPEED_1000); 185 else if (status & E1000_STATUS_SPEED_100) 186 ethtool_cmd_speed_set(ecmd, SPEED_100); 187 else 188 ethtool_cmd_speed_set(ecmd, SPEED_10); 189 190 if ((status & E1000_STATUS_FD) || 191 hw->phy.media_type != e1000_media_type_copper) 192 ecmd->duplex = DUPLEX_FULL; 193 else 194 ecmd->duplex = DUPLEX_HALF; 195 } else { 196 ethtool_cmd_speed_set(ecmd, -1); 197 ecmd->duplex = -1; 198 } 199 200 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 201 return 0; 202 } 203 204 static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 205 { 206 struct igb_adapter *adapter = netdev_priv(netdev); 207 struct e1000_hw *hw = &adapter->hw; 208 209 /* When SoL/IDER sessions are active, autoneg/speed/duplex 210 * cannot be changed */ 211 if (igb_check_reset_block(hw)) { 212 dev_err(&adapter->pdev->dev, "Cannot change link " 213 "characteristics when SoL/IDER is active.\n"); 214 return -EINVAL; 215 } 216 217 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 218 msleep(1); 219 220 if (ecmd->autoneg == AUTONEG_ENABLE) { 221 hw->mac.autoneg = 1; 222 hw->phy.autoneg_advertised = ecmd->advertising | 223 ADVERTISED_TP | 224 ADVERTISED_Autoneg; 225 ecmd->advertising = hw->phy.autoneg_advertised; 226 if (adapter->fc_autoneg) 227 hw->fc.requested_mode = e1000_fc_default; 228 } else { 229 u32 speed = ethtool_cmd_speed(ecmd); 230 if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { 231 clear_bit(__IGB_RESETTING, &adapter->state); 232 return -EINVAL; 233 } 234 } 235 236 /* reset the link */ 237 if (netif_running(adapter->netdev)) { 238 igb_down(adapter); 239 igb_up(adapter); 240 } else 241 igb_reset(adapter); 242 243 clear_bit(__IGB_RESETTING, &adapter->state); 244 return 0; 245 } 246 247 static u32 igb_get_link(struct net_device *netdev) 248 { 249 struct igb_adapter *adapter = netdev_priv(netdev); 250 struct e1000_mac_info *mac = &adapter->hw.mac; 251 252 /* 253 * If the link is not reported up to netdev, interrupts are disabled, 254 * and so the physical link state may have changed since we last 255 * looked. Set get_link_status to make sure that the true link 256 * state is interrogated, rather than pulling a cached and possibly 257 * stale link state from the driver. 258 */ 259 if (!netif_carrier_ok(netdev)) 260 mac->get_link_status = 1; 261 262 return igb_has_link(adapter); 263 } 264 265 static void igb_get_pauseparam(struct net_device *netdev, 266 struct ethtool_pauseparam *pause) 267 { 268 struct igb_adapter *adapter = netdev_priv(netdev); 269 struct e1000_hw *hw = &adapter->hw; 270 271 pause->autoneg = 272 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 273 274 if (hw->fc.current_mode == e1000_fc_rx_pause) 275 pause->rx_pause = 1; 276 else if (hw->fc.current_mode == e1000_fc_tx_pause) 277 pause->tx_pause = 1; 278 else if (hw->fc.current_mode == e1000_fc_full) { 279 pause->rx_pause = 1; 280 pause->tx_pause = 1; 281 } 282 } 283 284 static int igb_set_pauseparam(struct net_device *netdev, 285 struct ethtool_pauseparam *pause) 286 { 287 struct igb_adapter *adapter = netdev_priv(netdev); 288 struct e1000_hw *hw = &adapter->hw; 289 int retval = 0; 290 291 adapter->fc_autoneg = pause->autoneg; 292 293 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 294 msleep(1); 295 296 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 297 hw->fc.requested_mode = e1000_fc_default; 298 if (netif_running(adapter->netdev)) { 299 igb_down(adapter); 300 igb_up(adapter); 301 } else { 302 igb_reset(adapter); 303 } 304 } else { 305 if (pause->rx_pause && pause->tx_pause) 306 hw->fc.requested_mode = e1000_fc_full; 307 else if (pause->rx_pause && !pause->tx_pause) 308 hw->fc.requested_mode = e1000_fc_rx_pause; 309 else if (!pause->rx_pause && pause->tx_pause) 310 hw->fc.requested_mode = e1000_fc_tx_pause; 311 else if (!pause->rx_pause && !pause->tx_pause) 312 hw->fc.requested_mode = e1000_fc_none; 313 314 hw->fc.current_mode = hw->fc.requested_mode; 315 316 retval = ((hw->phy.media_type == e1000_media_type_copper) ? 317 igb_force_mac_fc(hw) : igb_setup_link(hw)); 318 } 319 320 clear_bit(__IGB_RESETTING, &adapter->state); 321 return retval; 322 } 323 324 static u32 igb_get_msglevel(struct net_device *netdev) 325 { 326 struct igb_adapter *adapter = netdev_priv(netdev); 327 return adapter->msg_enable; 328 } 329 330 static void igb_set_msglevel(struct net_device *netdev, u32 data) 331 { 332 struct igb_adapter *adapter = netdev_priv(netdev); 333 adapter->msg_enable = data; 334 } 335 336 static int igb_get_regs_len(struct net_device *netdev) 337 { 338 #define IGB_REGS_LEN 551 339 return IGB_REGS_LEN * sizeof(u32); 340 } 341 342 static void igb_get_regs(struct net_device *netdev, 343 struct ethtool_regs *regs, void *p) 344 { 345 struct igb_adapter *adapter = netdev_priv(netdev); 346 struct e1000_hw *hw = &adapter->hw; 347 u32 *regs_buff = p; 348 u8 i; 349 350 memset(p, 0, IGB_REGS_LEN * sizeof(u32)); 351 352 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; 353 354 /* General Registers */ 355 regs_buff[0] = rd32(E1000_CTRL); 356 regs_buff[1] = rd32(E1000_STATUS); 357 regs_buff[2] = rd32(E1000_CTRL_EXT); 358 regs_buff[3] = rd32(E1000_MDIC); 359 regs_buff[4] = rd32(E1000_SCTL); 360 regs_buff[5] = rd32(E1000_CONNSW); 361 regs_buff[6] = rd32(E1000_VET); 362 regs_buff[7] = rd32(E1000_LEDCTL); 363 regs_buff[8] = rd32(E1000_PBA); 364 regs_buff[9] = rd32(E1000_PBS); 365 regs_buff[10] = rd32(E1000_FRTIMER); 366 regs_buff[11] = rd32(E1000_TCPTIMER); 367 368 /* NVM Register */ 369 regs_buff[12] = rd32(E1000_EECD); 370 371 /* Interrupt */ 372 /* Reading EICS for EICR because they read the 373 * same but EICS does not clear on read */ 374 regs_buff[13] = rd32(E1000_EICS); 375 regs_buff[14] = rd32(E1000_EICS); 376 regs_buff[15] = rd32(E1000_EIMS); 377 regs_buff[16] = rd32(E1000_EIMC); 378 regs_buff[17] = rd32(E1000_EIAC); 379 regs_buff[18] = rd32(E1000_EIAM); 380 /* Reading ICS for ICR because they read the 381 * same but ICS does not clear on read */ 382 regs_buff[19] = rd32(E1000_ICS); 383 regs_buff[20] = rd32(E1000_ICS); 384 regs_buff[21] = rd32(E1000_IMS); 385 regs_buff[22] = rd32(E1000_IMC); 386 regs_buff[23] = rd32(E1000_IAC); 387 regs_buff[24] = rd32(E1000_IAM); 388 regs_buff[25] = rd32(E1000_IMIRVP); 389 390 /* Flow Control */ 391 regs_buff[26] = rd32(E1000_FCAL); 392 regs_buff[27] = rd32(E1000_FCAH); 393 regs_buff[28] = rd32(E1000_FCTTV); 394 regs_buff[29] = rd32(E1000_FCRTL); 395 regs_buff[30] = rd32(E1000_FCRTH); 396 regs_buff[31] = rd32(E1000_FCRTV); 397 398 /* Receive */ 399 regs_buff[32] = rd32(E1000_RCTL); 400 regs_buff[33] = rd32(E1000_RXCSUM); 401 regs_buff[34] = rd32(E1000_RLPML); 402 regs_buff[35] = rd32(E1000_RFCTL); 403 regs_buff[36] = rd32(E1000_MRQC); 404 regs_buff[37] = rd32(E1000_VT_CTL); 405 406 /* Transmit */ 407 regs_buff[38] = rd32(E1000_TCTL); 408 regs_buff[39] = rd32(E1000_TCTL_EXT); 409 regs_buff[40] = rd32(E1000_TIPG); 410 regs_buff[41] = rd32(E1000_DTXCTL); 411 412 /* Wake Up */ 413 regs_buff[42] = rd32(E1000_WUC); 414 regs_buff[43] = rd32(E1000_WUFC); 415 regs_buff[44] = rd32(E1000_WUS); 416 regs_buff[45] = rd32(E1000_IPAV); 417 regs_buff[46] = rd32(E1000_WUPL); 418 419 /* MAC */ 420 regs_buff[47] = rd32(E1000_PCS_CFG0); 421 regs_buff[48] = rd32(E1000_PCS_LCTL); 422 regs_buff[49] = rd32(E1000_PCS_LSTAT); 423 regs_buff[50] = rd32(E1000_PCS_ANADV); 424 regs_buff[51] = rd32(E1000_PCS_LPAB); 425 regs_buff[52] = rd32(E1000_PCS_NPTX); 426 regs_buff[53] = rd32(E1000_PCS_LPABNP); 427 428 /* Statistics */ 429 regs_buff[54] = adapter->stats.crcerrs; 430 regs_buff[55] = adapter->stats.algnerrc; 431 regs_buff[56] = adapter->stats.symerrs; 432 regs_buff[57] = adapter->stats.rxerrc; 433 regs_buff[58] = adapter->stats.mpc; 434 regs_buff[59] = adapter->stats.scc; 435 regs_buff[60] = adapter->stats.ecol; 436 regs_buff[61] = adapter->stats.mcc; 437 regs_buff[62] = adapter->stats.latecol; 438 regs_buff[63] = adapter->stats.colc; 439 regs_buff[64] = adapter->stats.dc; 440 regs_buff[65] = adapter->stats.tncrs; 441 regs_buff[66] = adapter->stats.sec; 442 regs_buff[67] = adapter->stats.htdpmc; 443 regs_buff[68] = adapter->stats.rlec; 444 regs_buff[69] = adapter->stats.xonrxc; 445 regs_buff[70] = adapter->stats.xontxc; 446 regs_buff[71] = adapter->stats.xoffrxc; 447 regs_buff[72] = adapter->stats.xofftxc; 448 regs_buff[73] = adapter->stats.fcruc; 449 regs_buff[74] = adapter->stats.prc64; 450 regs_buff[75] = adapter->stats.prc127; 451 regs_buff[76] = adapter->stats.prc255; 452 regs_buff[77] = adapter->stats.prc511; 453 regs_buff[78] = adapter->stats.prc1023; 454 regs_buff[79] = adapter->stats.prc1522; 455 regs_buff[80] = adapter->stats.gprc; 456 regs_buff[81] = adapter->stats.bprc; 457 regs_buff[82] = adapter->stats.mprc; 458 regs_buff[83] = adapter->stats.gptc; 459 regs_buff[84] = adapter->stats.gorc; 460 regs_buff[86] = adapter->stats.gotc; 461 regs_buff[88] = adapter->stats.rnbc; 462 regs_buff[89] = adapter->stats.ruc; 463 regs_buff[90] = adapter->stats.rfc; 464 regs_buff[91] = adapter->stats.roc; 465 regs_buff[92] = adapter->stats.rjc; 466 regs_buff[93] = adapter->stats.mgprc; 467 regs_buff[94] = adapter->stats.mgpdc; 468 regs_buff[95] = adapter->stats.mgptc; 469 regs_buff[96] = adapter->stats.tor; 470 regs_buff[98] = adapter->stats.tot; 471 regs_buff[100] = adapter->stats.tpr; 472 regs_buff[101] = adapter->stats.tpt; 473 regs_buff[102] = adapter->stats.ptc64; 474 regs_buff[103] = adapter->stats.ptc127; 475 regs_buff[104] = adapter->stats.ptc255; 476 regs_buff[105] = adapter->stats.ptc511; 477 regs_buff[106] = adapter->stats.ptc1023; 478 regs_buff[107] = adapter->stats.ptc1522; 479 regs_buff[108] = adapter->stats.mptc; 480 regs_buff[109] = adapter->stats.bptc; 481 regs_buff[110] = adapter->stats.tsctc; 482 regs_buff[111] = adapter->stats.iac; 483 regs_buff[112] = adapter->stats.rpthc; 484 regs_buff[113] = adapter->stats.hgptc; 485 regs_buff[114] = adapter->stats.hgorc; 486 regs_buff[116] = adapter->stats.hgotc; 487 regs_buff[118] = adapter->stats.lenerrs; 488 regs_buff[119] = adapter->stats.scvpc; 489 regs_buff[120] = adapter->stats.hrmpc; 490 491 for (i = 0; i < 4; i++) 492 regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); 493 for (i = 0; i < 4; i++) 494 regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); 495 for (i = 0; i < 4; i++) 496 regs_buff[129 + i] = rd32(E1000_RDBAL(i)); 497 for (i = 0; i < 4; i++) 498 regs_buff[133 + i] = rd32(E1000_RDBAH(i)); 499 for (i = 0; i < 4; i++) 500 regs_buff[137 + i] = rd32(E1000_RDLEN(i)); 501 for (i = 0; i < 4; i++) 502 regs_buff[141 + i] = rd32(E1000_RDH(i)); 503 for (i = 0; i < 4; i++) 504 regs_buff[145 + i] = rd32(E1000_RDT(i)); 505 for (i = 0; i < 4; i++) 506 regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); 507 508 for (i = 0; i < 10; i++) 509 regs_buff[153 + i] = rd32(E1000_EITR(i)); 510 for (i = 0; i < 8; i++) 511 regs_buff[163 + i] = rd32(E1000_IMIR(i)); 512 for (i = 0; i < 8; i++) 513 regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); 514 for (i = 0; i < 16; i++) 515 regs_buff[179 + i] = rd32(E1000_RAL(i)); 516 for (i = 0; i < 16; i++) 517 regs_buff[195 + i] = rd32(E1000_RAH(i)); 518 519 for (i = 0; i < 4; i++) 520 regs_buff[211 + i] = rd32(E1000_TDBAL(i)); 521 for (i = 0; i < 4; i++) 522 regs_buff[215 + i] = rd32(E1000_TDBAH(i)); 523 for (i = 0; i < 4; i++) 524 regs_buff[219 + i] = rd32(E1000_TDLEN(i)); 525 for (i = 0; i < 4; i++) 526 regs_buff[223 + i] = rd32(E1000_TDH(i)); 527 for (i = 0; i < 4; i++) 528 regs_buff[227 + i] = rd32(E1000_TDT(i)); 529 for (i = 0; i < 4; i++) 530 regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); 531 for (i = 0; i < 4; i++) 532 regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); 533 for (i = 0; i < 4; i++) 534 regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); 535 for (i = 0; i < 4; i++) 536 regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); 537 538 for (i = 0; i < 4; i++) 539 regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); 540 for (i = 0; i < 4; i++) 541 regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); 542 for (i = 0; i < 32; i++) 543 regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); 544 for (i = 0; i < 128; i++) 545 regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); 546 for (i = 0; i < 128; i++) 547 regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); 548 for (i = 0; i < 4; i++) 549 regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); 550 551 regs_buff[547] = rd32(E1000_TDFH); 552 regs_buff[548] = rd32(E1000_TDFT); 553 regs_buff[549] = rd32(E1000_TDFHS); 554 regs_buff[550] = rd32(E1000_TDFPC); 555 regs_buff[551] = adapter->stats.o2bgptc; 556 regs_buff[552] = adapter->stats.b2ospc; 557 regs_buff[553] = adapter->stats.o2bspc; 558 regs_buff[554] = adapter->stats.b2ogprc; 559 } 560 561 static int igb_get_eeprom_len(struct net_device *netdev) 562 { 563 struct igb_adapter *adapter = netdev_priv(netdev); 564 return adapter->hw.nvm.word_size * 2; 565 } 566 567 static int igb_get_eeprom(struct net_device *netdev, 568 struct ethtool_eeprom *eeprom, u8 *bytes) 569 { 570 struct igb_adapter *adapter = netdev_priv(netdev); 571 struct e1000_hw *hw = &adapter->hw; 572 u16 *eeprom_buff; 573 int first_word, last_word; 574 int ret_val = 0; 575 u16 i; 576 577 if (eeprom->len == 0) 578 return -EINVAL; 579 580 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 581 582 first_word = eeprom->offset >> 1; 583 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 584 585 eeprom_buff = kmalloc(sizeof(u16) * 586 (last_word - first_word + 1), GFP_KERNEL); 587 if (!eeprom_buff) 588 return -ENOMEM; 589 590 if (hw->nvm.type == e1000_nvm_eeprom_spi) 591 ret_val = hw->nvm.ops.read(hw, first_word, 592 last_word - first_word + 1, 593 eeprom_buff); 594 else { 595 for (i = 0; i < last_word - first_word + 1; i++) { 596 ret_val = hw->nvm.ops.read(hw, first_word + i, 1, 597 &eeprom_buff[i]); 598 if (ret_val) 599 break; 600 } 601 } 602 603 /* Device's eeprom is always little-endian, word addressable */ 604 for (i = 0; i < last_word - first_word + 1; i++) 605 le16_to_cpus(&eeprom_buff[i]); 606 607 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), 608 eeprom->len); 609 kfree(eeprom_buff); 610 611 return ret_val; 612 } 613 614 static int igb_set_eeprom(struct net_device *netdev, 615 struct ethtool_eeprom *eeprom, u8 *bytes) 616 { 617 struct igb_adapter *adapter = netdev_priv(netdev); 618 struct e1000_hw *hw = &adapter->hw; 619 u16 *eeprom_buff; 620 void *ptr; 621 int max_len, first_word, last_word, ret_val = 0; 622 u16 i; 623 624 if (eeprom->len == 0) 625 return -EOPNOTSUPP; 626 627 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 628 return -EFAULT; 629 630 max_len = hw->nvm.word_size * 2; 631 632 first_word = eeprom->offset >> 1; 633 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 634 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 635 if (!eeprom_buff) 636 return -ENOMEM; 637 638 ptr = (void *)eeprom_buff; 639 640 if (eeprom->offset & 1) { 641 /* need read/modify/write of first changed EEPROM word */ 642 /* only the second byte of the word is being modified */ 643 ret_val = hw->nvm.ops.read(hw, first_word, 1, 644 &eeprom_buff[0]); 645 ptr++; 646 } 647 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 648 /* need read/modify/write of last changed EEPROM word */ 649 /* only the first byte of the word is being modified */ 650 ret_val = hw->nvm.ops.read(hw, last_word, 1, 651 &eeprom_buff[last_word - first_word]); 652 } 653 654 /* Device's eeprom is always little-endian, word addressable */ 655 for (i = 0; i < last_word - first_word + 1; i++) 656 le16_to_cpus(&eeprom_buff[i]); 657 658 memcpy(ptr, bytes, eeprom->len); 659 660 for (i = 0; i < last_word - first_word + 1; i++) 661 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); 662 663 ret_val = hw->nvm.ops.write(hw, first_word, 664 last_word - first_word + 1, eeprom_buff); 665 666 /* Update the checksum over the first part of the EEPROM if needed 667 * and flush shadow RAM for 82573 controllers */ 668 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) 669 hw->nvm.ops.update(hw); 670 671 kfree(eeprom_buff); 672 return ret_val; 673 } 674 675 static void igb_get_drvinfo(struct net_device *netdev, 676 struct ethtool_drvinfo *drvinfo) 677 { 678 struct igb_adapter *adapter = netdev_priv(netdev); 679 u16 eeprom_data; 680 681 strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); 682 strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); 683 684 /* EEPROM image version # is reported as firmware version # for 685 * 82575 controllers */ 686 adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); 687 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 688 "%d.%d-%d", 689 (eeprom_data & 0xF000) >> 12, 690 (eeprom_data & 0x0FF0) >> 4, 691 eeprom_data & 0x000F); 692 693 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 694 sizeof(drvinfo->bus_info)); 695 drvinfo->n_stats = IGB_STATS_LEN; 696 drvinfo->testinfo_len = IGB_TEST_LEN; 697 drvinfo->regdump_len = igb_get_regs_len(netdev); 698 drvinfo->eedump_len = igb_get_eeprom_len(netdev); 699 } 700 701 static void igb_get_ringparam(struct net_device *netdev, 702 struct ethtool_ringparam *ring) 703 { 704 struct igb_adapter *adapter = netdev_priv(netdev); 705 706 ring->rx_max_pending = IGB_MAX_RXD; 707 ring->tx_max_pending = IGB_MAX_TXD; 708 ring->rx_pending = adapter->rx_ring_count; 709 ring->tx_pending = adapter->tx_ring_count; 710 } 711 712 static int igb_set_ringparam(struct net_device *netdev, 713 struct ethtool_ringparam *ring) 714 { 715 struct igb_adapter *adapter = netdev_priv(netdev); 716 struct igb_ring *temp_ring; 717 int i, err = 0; 718 u16 new_rx_count, new_tx_count; 719 720 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 721 return -EINVAL; 722 723 new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); 724 new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); 725 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 726 727 new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); 728 new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); 729 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 730 731 if ((new_tx_count == adapter->tx_ring_count) && 732 (new_rx_count == adapter->rx_ring_count)) { 733 /* nothing to do */ 734 return 0; 735 } 736 737 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 738 msleep(1); 739 740 if (!netif_running(adapter->netdev)) { 741 for (i = 0; i < adapter->num_tx_queues; i++) 742 adapter->tx_ring[i]->count = new_tx_count; 743 for (i = 0; i < adapter->num_rx_queues; i++) 744 adapter->rx_ring[i]->count = new_rx_count; 745 adapter->tx_ring_count = new_tx_count; 746 adapter->rx_ring_count = new_rx_count; 747 goto clear_reset; 748 } 749 750 if (adapter->num_tx_queues > adapter->num_rx_queues) 751 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); 752 else 753 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); 754 755 if (!temp_ring) { 756 err = -ENOMEM; 757 goto clear_reset; 758 } 759 760 igb_down(adapter); 761 762 /* 763 * We can't just free everything and then setup again, 764 * because the ISRs in MSI-X mode get passed pointers 765 * to the tx and rx ring structs. 766 */ 767 if (new_tx_count != adapter->tx_ring_count) { 768 for (i = 0; i < adapter->num_tx_queues; i++) { 769 memcpy(&temp_ring[i], adapter->tx_ring[i], 770 sizeof(struct igb_ring)); 771 772 temp_ring[i].count = new_tx_count; 773 err = igb_setup_tx_resources(&temp_ring[i]); 774 if (err) { 775 while (i) { 776 i--; 777 igb_free_tx_resources(&temp_ring[i]); 778 } 779 goto err_setup; 780 } 781 } 782 783 for (i = 0; i < adapter->num_tx_queues; i++) { 784 igb_free_tx_resources(adapter->tx_ring[i]); 785 786 memcpy(adapter->tx_ring[i], &temp_ring[i], 787 sizeof(struct igb_ring)); 788 } 789 790 adapter->tx_ring_count = new_tx_count; 791 } 792 793 if (new_rx_count != adapter->rx_ring_count) { 794 for (i = 0; i < adapter->num_rx_queues; i++) { 795 memcpy(&temp_ring[i], adapter->rx_ring[i], 796 sizeof(struct igb_ring)); 797 798 temp_ring[i].count = new_rx_count; 799 err = igb_setup_rx_resources(&temp_ring[i]); 800 if (err) { 801 while (i) { 802 i--; 803 igb_free_rx_resources(&temp_ring[i]); 804 } 805 goto err_setup; 806 } 807 808 } 809 810 for (i = 0; i < adapter->num_rx_queues; i++) { 811 igb_free_rx_resources(adapter->rx_ring[i]); 812 813 memcpy(adapter->rx_ring[i], &temp_ring[i], 814 sizeof(struct igb_ring)); 815 } 816 817 adapter->rx_ring_count = new_rx_count; 818 } 819 err_setup: 820 igb_up(adapter); 821 vfree(temp_ring); 822 clear_reset: 823 clear_bit(__IGB_RESETTING, &adapter->state); 824 return err; 825 } 826 827 /* ethtool register test data */ 828 struct igb_reg_test { 829 u16 reg; 830 u16 reg_offset; 831 u16 array_len; 832 u16 test_type; 833 u32 mask; 834 u32 write; 835 }; 836 837 /* In the hardware, registers are laid out either singly, in arrays 838 * spaced 0x100 bytes apart, or in contiguous tables. We assume 839 * most tests take place on arrays or single registers (handled 840 * as a single-element array) and special-case the tables. 841 * Table tests are always pattern tests. 842 * 843 * We also make provision for some required setup steps by specifying 844 * registers to be written without any read-back testing. 845 */ 846 847 #define PATTERN_TEST 1 848 #define SET_READ_TEST 2 849 #define WRITE_NO_TEST 3 850 #define TABLE32_TEST 4 851 #define TABLE64_TEST_LO 5 852 #define TABLE64_TEST_HI 6 853 854 /* i350 reg test */ 855 static struct igb_reg_test reg_test_i350[] = { 856 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 857 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 858 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 859 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, 860 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 861 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 862 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 863 { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 864 { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 865 { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 866 /* RDH is read-only for i350, only test RDT. */ 867 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 868 { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 869 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 870 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 871 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 872 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 873 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 874 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 875 { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 876 { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 877 { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 878 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 879 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 880 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 881 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 882 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 883 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 884 { E1000_RA, 0, 16, TABLE64_TEST_LO, 885 0xFFFFFFFF, 0xFFFFFFFF }, 886 { E1000_RA, 0, 16, TABLE64_TEST_HI, 887 0xC3FFFFFF, 0xFFFFFFFF }, 888 { E1000_RA2, 0, 16, TABLE64_TEST_LO, 889 0xFFFFFFFF, 0xFFFFFFFF }, 890 { E1000_RA2, 0, 16, TABLE64_TEST_HI, 891 0xC3FFFFFF, 0xFFFFFFFF }, 892 { E1000_MTA, 0, 128, TABLE32_TEST, 893 0xFFFFFFFF, 0xFFFFFFFF }, 894 { 0, 0, 0, 0 } 895 }; 896 897 /* 82580 reg test */ 898 static struct igb_reg_test reg_test_82580[] = { 899 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 900 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 901 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 902 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 903 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 904 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 905 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 906 { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 907 { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 908 { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 909 /* RDH is read-only for 82580, only test RDT. */ 910 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 911 { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 912 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 913 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 914 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 915 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 916 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 917 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 918 { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 919 { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 920 { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 921 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 922 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 923 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 924 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 925 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 926 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 927 { E1000_RA, 0, 16, TABLE64_TEST_LO, 928 0xFFFFFFFF, 0xFFFFFFFF }, 929 { E1000_RA, 0, 16, TABLE64_TEST_HI, 930 0x83FFFFFF, 0xFFFFFFFF }, 931 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 932 0xFFFFFFFF, 0xFFFFFFFF }, 933 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 934 0x83FFFFFF, 0xFFFFFFFF }, 935 { E1000_MTA, 0, 128, TABLE32_TEST, 936 0xFFFFFFFF, 0xFFFFFFFF }, 937 { 0, 0, 0, 0 } 938 }; 939 940 /* 82576 reg test */ 941 static struct igb_reg_test reg_test_82576[] = { 942 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 943 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 944 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 945 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 946 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 947 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 948 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 949 { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 950 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 951 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 952 /* Enable all RX queues before testing. */ 953 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 954 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 955 /* RDH is read-only for 82576, only test RDT. */ 956 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 957 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 958 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 959 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, 960 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 961 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 962 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 963 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 964 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 965 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 966 { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 967 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 968 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 969 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 970 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 971 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 972 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 973 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 974 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 975 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 976 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 977 { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 978 { 0, 0, 0, 0 } 979 }; 980 981 /* 82575 register test */ 982 static struct igb_reg_test reg_test_82575[] = { 983 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 984 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 985 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 986 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 987 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 988 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 989 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 990 /* Enable all four RX queues before testing. */ 991 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 992 /* RDH is read-only for 82575, only test RDT. */ 993 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 994 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 995 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 996 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 997 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 998 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 999 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1000 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1001 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1002 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, 1003 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, 1004 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 1005 { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, 1006 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1007 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, 1008 { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1009 { 0, 0, 0, 0 } 1010 }; 1011 1012 static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, 1013 int reg, u32 mask, u32 write) 1014 { 1015 struct e1000_hw *hw = &adapter->hw; 1016 u32 pat, val; 1017 static const u32 _test[] = 1018 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1019 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1020 wr32(reg, (_test[pat] & write)); 1021 val = rd32(reg) & mask; 1022 if (val != (_test[pat] & write & mask)) { 1023 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 1024 "failed: got 0x%08X expected 0x%08X\n", 1025 reg, val, (_test[pat] & write & mask)); 1026 *data = reg; 1027 return 1; 1028 } 1029 } 1030 1031 return 0; 1032 } 1033 1034 static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, 1035 int reg, u32 mask, u32 write) 1036 { 1037 struct e1000_hw *hw = &adapter->hw; 1038 u32 val; 1039 wr32(reg, write & mask); 1040 val = rd32(reg); 1041 if ((write & mask) != (val & mask)) { 1042 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" 1043 " got 0x%08X expected 0x%08X\n", reg, 1044 (val & mask), (write & mask)); 1045 *data = reg; 1046 return 1; 1047 } 1048 1049 return 0; 1050 } 1051 1052 #define REG_PATTERN_TEST(reg, mask, write) \ 1053 do { \ 1054 if (reg_pattern_test(adapter, data, reg, mask, write)) \ 1055 return 1; \ 1056 } while (0) 1057 1058 #define REG_SET_AND_CHECK(reg, mask, write) \ 1059 do { \ 1060 if (reg_set_and_check(adapter, data, reg, mask, write)) \ 1061 return 1; \ 1062 } while (0) 1063 1064 static int igb_reg_test(struct igb_adapter *adapter, u64 *data) 1065 { 1066 struct e1000_hw *hw = &adapter->hw; 1067 struct igb_reg_test *test; 1068 u32 value, before, after; 1069 u32 i, toggle; 1070 1071 switch (adapter->hw.mac.type) { 1072 case e1000_i350: 1073 test = reg_test_i350; 1074 toggle = 0x7FEFF3FF; 1075 break; 1076 case e1000_82580: 1077 test = reg_test_82580; 1078 toggle = 0x7FEFF3FF; 1079 break; 1080 case e1000_82576: 1081 test = reg_test_82576; 1082 toggle = 0x7FFFF3FF; 1083 break; 1084 default: 1085 test = reg_test_82575; 1086 toggle = 0x7FFFF3FF; 1087 break; 1088 } 1089 1090 /* Because the status register is such a special case, 1091 * we handle it separately from the rest of the register 1092 * tests. Some bits are read-only, some toggle, and some 1093 * are writable on newer MACs. 1094 */ 1095 before = rd32(E1000_STATUS); 1096 value = (rd32(E1000_STATUS) & toggle); 1097 wr32(E1000_STATUS, toggle); 1098 after = rd32(E1000_STATUS) & toggle; 1099 if (value != after) { 1100 dev_err(&adapter->pdev->dev, "failed STATUS register test " 1101 "got: 0x%08X expected: 0x%08X\n", after, value); 1102 *data = 1; 1103 return 1; 1104 } 1105 /* restore previous status */ 1106 wr32(E1000_STATUS, before); 1107 1108 /* Perform the remainder of the register test, looping through 1109 * the test table until we either fail or reach the null entry. 1110 */ 1111 while (test->reg) { 1112 for (i = 0; i < test->array_len; i++) { 1113 switch (test->test_type) { 1114 case PATTERN_TEST: 1115 REG_PATTERN_TEST(test->reg + 1116 (i * test->reg_offset), 1117 test->mask, 1118 test->write); 1119 break; 1120 case SET_READ_TEST: 1121 REG_SET_AND_CHECK(test->reg + 1122 (i * test->reg_offset), 1123 test->mask, 1124 test->write); 1125 break; 1126 case WRITE_NO_TEST: 1127 writel(test->write, 1128 (adapter->hw.hw_addr + test->reg) 1129 + (i * test->reg_offset)); 1130 break; 1131 case TABLE32_TEST: 1132 REG_PATTERN_TEST(test->reg + (i * 4), 1133 test->mask, 1134 test->write); 1135 break; 1136 case TABLE64_TEST_LO: 1137 REG_PATTERN_TEST(test->reg + (i * 8), 1138 test->mask, 1139 test->write); 1140 break; 1141 case TABLE64_TEST_HI: 1142 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1143 test->mask, 1144 test->write); 1145 break; 1146 } 1147 } 1148 test++; 1149 } 1150 1151 *data = 0; 1152 return 0; 1153 } 1154 1155 static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) 1156 { 1157 u16 temp; 1158 u16 checksum = 0; 1159 u16 i; 1160 1161 *data = 0; 1162 /* Read and add up the contents of the EEPROM */ 1163 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1164 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) { 1165 *data = 1; 1166 break; 1167 } 1168 checksum += temp; 1169 } 1170 1171 /* If Checksum is not Correct return error else test passed */ 1172 if ((checksum != (u16) NVM_SUM) && !(*data)) 1173 *data = 2; 1174 1175 return *data; 1176 } 1177 1178 static irqreturn_t igb_test_intr(int irq, void *data) 1179 { 1180 struct igb_adapter *adapter = (struct igb_adapter *) data; 1181 struct e1000_hw *hw = &adapter->hw; 1182 1183 adapter->test_icr |= rd32(E1000_ICR); 1184 1185 return IRQ_HANDLED; 1186 } 1187 1188 static int igb_intr_test(struct igb_adapter *adapter, u64 *data) 1189 { 1190 struct e1000_hw *hw = &adapter->hw; 1191 struct net_device *netdev = adapter->netdev; 1192 u32 mask, ics_mask, i = 0, shared_int = true; 1193 u32 irq = adapter->pdev->irq; 1194 1195 *data = 0; 1196 1197 /* Hook up test interrupt handler just for this test */ 1198 if (adapter->msix_entries) { 1199 if (request_irq(adapter->msix_entries[0].vector, 1200 igb_test_intr, 0, netdev->name, adapter)) { 1201 *data = 1; 1202 return -1; 1203 } 1204 } else if (adapter->flags & IGB_FLAG_HAS_MSI) { 1205 shared_int = false; 1206 if (request_irq(irq, 1207 igb_test_intr, 0, netdev->name, adapter)) { 1208 *data = 1; 1209 return -1; 1210 } 1211 } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, 1212 netdev->name, adapter)) { 1213 shared_int = false; 1214 } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, 1215 netdev->name, adapter)) { 1216 *data = 1; 1217 return -1; 1218 } 1219 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1220 (shared_int ? "shared" : "unshared")); 1221 1222 /* Disable all the interrupts */ 1223 wr32(E1000_IMC, ~0); 1224 wrfl(); 1225 msleep(10); 1226 1227 /* Define all writable bits for ICS */ 1228 switch (hw->mac.type) { 1229 case e1000_82575: 1230 ics_mask = 0x37F47EDD; 1231 break; 1232 case e1000_82576: 1233 ics_mask = 0x77D4FBFD; 1234 break; 1235 case e1000_82580: 1236 ics_mask = 0x77DCFED5; 1237 break; 1238 case e1000_i350: 1239 ics_mask = 0x77DCFED5; 1240 break; 1241 default: 1242 ics_mask = 0x7FFFFFFF; 1243 break; 1244 } 1245 1246 /* Test each interrupt */ 1247 for (; i < 31; i++) { 1248 /* Interrupt to test */ 1249 mask = 1 << i; 1250 1251 if (!(mask & ics_mask)) 1252 continue; 1253 1254 if (!shared_int) { 1255 /* Disable the interrupt to be reported in 1256 * the cause register and then force the same 1257 * interrupt and see if one gets posted. If 1258 * an interrupt was posted to the bus, the 1259 * test failed. 1260 */ 1261 adapter->test_icr = 0; 1262 1263 /* Flush any pending interrupts */ 1264 wr32(E1000_ICR, ~0); 1265 1266 wr32(E1000_IMC, mask); 1267 wr32(E1000_ICS, mask); 1268 wrfl(); 1269 msleep(10); 1270 1271 if (adapter->test_icr & mask) { 1272 *data = 3; 1273 break; 1274 } 1275 } 1276 1277 /* Enable the interrupt to be reported in 1278 * the cause register and then force the same 1279 * interrupt and see if one gets posted. If 1280 * an interrupt was not posted to the bus, the 1281 * test failed. 1282 */ 1283 adapter->test_icr = 0; 1284 1285 /* Flush any pending interrupts */ 1286 wr32(E1000_ICR, ~0); 1287 1288 wr32(E1000_IMS, mask); 1289 wr32(E1000_ICS, mask); 1290 wrfl(); 1291 msleep(10); 1292 1293 if (!(adapter->test_icr & mask)) { 1294 *data = 4; 1295 break; 1296 } 1297 1298 if (!shared_int) { 1299 /* Disable the other interrupts to be reported in 1300 * the cause register and then force the other 1301 * interrupts and see if any get posted. If 1302 * an interrupt was posted to the bus, the 1303 * test failed. 1304 */ 1305 adapter->test_icr = 0; 1306 1307 /* Flush any pending interrupts */ 1308 wr32(E1000_ICR, ~0); 1309 1310 wr32(E1000_IMC, ~mask); 1311 wr32(E1000_ICS, ~mask); 1312 wrfl(); 1313 msleep(10); 1314 1315 if (adapter->test_icr & mask) { 1316 *data = 5; 1317 break; 1318 } 1319 } 1320 } 1321 1322 /* Disable all the interrupts */ 1323 wr32(E1000_IMC, ~0); 1324 wrfl(); 1325 msleep(10); 1326 1327 /* Unhook test interrupt handler */ 1328 if (adapter->msix_entries) 1329 free_irq(adapter->msix_entries[0].vector, adapter); 1330 else 1331 free_irq(irq, adapter); 1332 1333 return *data; 1334 } 1335 1336 static void igb_free_desc_rings(struct igb_adapter *adapter) 1337 { 1338 igb_free_tx_resources(&adapter->test_tx_ring); 1339 igb_free_rx_resources(&adapter->test_rx_ring); 1340 } 1341 1342 static int igb_setup_desc_rings(struct igb_adapter *adapter) 1343 { 1344 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1345 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1346 struct e1000_hw *hw = &adapter->hw; 1347 int ret_val; 1348 1349 /* Setup Tx descriptor ring and Tx buffers */ 1350 tx_ring->count = IGB_DEFAULT_TXD; 1351 tx_ring->dev = &adapter->pdev->dev; 1352 tx_ring->netdev = adapter->netdev; 1353 tx_ring->reg_idx = adapter->vfs_allocated_count; 1354 1355 if (igb_setup_tx_resources(tx_ring)) { 1356 ret_val = 1; 1357 goto err_nomem; 1358 } 1359 1360 igb_setup_tctl(adapter); 1361 igb_configure_tx_ring(adapter, tx_ring); 1362 1363 /* Setup Rx descriptor ring and Rx buffers */ 1364 rx_ring->count = IGB_DEFAULT_RXD; 1365 rx_ring->dev = &adapter->pdev->dev; 1366 rx_ring->netdev = adapter->netdev; 1367 rx_ring->reg_idx = adapter->vfs_allocated_count; 1368 1369 if (igb_setup_rx_resources(rx_ring)) { 1370 ret_val = 3; 1371 goto err_nomem; 1372 } 1373 1374 /* set the default queue to queue 0 of PF */ 1375 wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); 1376 1377 /* enable receive ring */ 1378 igb_setup_rctl(adapter); 1379 igb_configure_rx_ring(adapter, rx_ring); 1380 1381 igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); 1382 1383 return 0; 1384 1385 err_nomem: 1386 igb_free_desc_rings(adapter); 1387 return ret_val; 1388 } 1389 1390 static void igb_phy_disable_receiver(struct igb_adapter *adapter) 1391 { 1392 struct e1000_hw *hw = &adapter->hw; 1393 1394 /* Write out to PHY registers 29 and 30 to disable the Receiver. */ 1395 igb_write_phy_reg(hw, 29, 0x001F); 1396 igb_write_phy_reg(hw, 30, 0x8FFC); 1397 igb_write_phy_reg(hw, 29, 0x001A); 1398 igb_write_phy_reg(hw, 30, 0x8FF0); 1399 } 1400 1401 static int igb_integrated_phy_loopback(struct igb_adapter *adapter) 1402 { 1403 struct e1000_hw *hw = &adapter->hw; 1404 u32 ctrl_reg = 0; 1405 1406 hw->mac.autoneg = false; 1407 1408 if (hw->phy.type == e1000_phy_m88) { 1409 /* Auto-MDI/MDIX Off */ 1410 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 1411 /* reset to update Auto-MDI/MDIX */ 1412 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); 1413 /* autoneg off */ 1414 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); 1415 } else if (hw->phy.type == e1000_phy_82580) { 1416 /* enable MII loopback */ 1417 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); 1418 } 1419 1420 ctrl_reg = rd32(E1000_CTRL); 1421 1422 /* force 1000, set loopback */ 1423 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1424 1425 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1426 ctrl_reg = rd32(E1000_CTRL); 1427 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 1428 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 1429 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1430 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1431 E1000_CTRL_FD | /* Force Duplex to FULL */ 1432 E1000_CTRL_SLU); /* Set link up enable bit */ 1433 1434 if (hw->phy.type == e1000_phy_m88) 1435 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1436 1437 wr32(E1000_CTRL, ctrl_reg); 1438 1439 /* Disable the receiver on the PHY so when a cable is plugged in, the 1440 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1441 */ 1442 if (hw->phy.type == e1000_phy_m88) 1443 igb_phy_disable_receiver(adapter); 1444 1445 udelay(500); 1446 1447 return 0; 1448 } 1449 1450 static int igb_set_phy_loopback(struct igb_adapter *adapter) 1451 { 1452 return igb_integrated_phy_loopback(adapter); 1453 } 1454 1455 static int igb_setup_loopback_test(struct igb_adapter *adapter) 1456 { 1457 struct e1000_hw *hw = &adapter->hw; 1458 u32 reg; 1459 1460 reg = rd32(E1000_CTRL_EXT); 1461 1462 /* use CTRL_EXT to identify link type as SGMII can appear as copper */ 1463 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { 1464 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || 1465 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || 1466 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || 1467 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { 1468 1469 /* Enable DH89xxCC MPHY for near end loopback */ 1470 reg = rd32(E1000_MPHY_ADDR_CTL); 1471 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | 1472 E1000_MPHY_PCS_CLK_REG_OFFSET; 1473 wr32(E1000_MPHY_ADDR_CTL, reg); 1474 1475 reg = rd32(E1000_MPHY_DATA); 1476 reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; 1477 wr32(E1000_MPHY_DATA, reg); 1478 } 1479 1480 reg = rd32(E1000_RCTL); 1481 reg |= E1000_RCTL_LBM_TCVR; 1482 wr32(E1000_RCTL, reg); 1483 1484 wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); 1485 1486 reg = rd32(E1000_CTRL); 1487 reg &= ~(E1000_CTRL_RFCE | 1488 E1000_CTRL_TFCE | 1489 E1000_CTRL_LRST); 1490 reg |= E1000_CTRL_SLU | 1491 E1000_CTRL_FD; 1492 wr32(E1000_CTRL, reg); 1493 1494 /* Unset switch control to serdes energy detect */ 1495 reg = rd32(E1000_CONNSW); 1496 reg &= ~E1000_CONNSW_ENRGSRC; 1497 wr32(E1000_CONNSW, reg); 1498 1499 /* Set PCS register for forced speed */ 1500 reg = rd32(E1000_PCS_LCTL); 1501 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ 1502 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ 1503 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ 1504 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1505 E1000_PCS_LCTL_FSD | /* Force Speed */ 1506 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1507 wr32(E1000_PCS_LCTL, reg); 1508 1509 return 0; 1510 } 1511 1512 return igb_set_phy_loopback(adapter); 1513 } 1514 1515 static void igb_loopback_cleanup(struct igb_adapter *adapter) 1516 { 1517 struct e1000_hw *hw = &adapter->hw; 1518 u32 rctl; 1519 u16 phy_reg; 1520 1521 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || 1522 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || 1523 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || 1524 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { 1525 u32 reg; 1526 1527 /* Disable near end loopback on DH89xxCC */ 1528 reg = rd32(E1000_MPHY_ADDR_CTL); 1529 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | 1530 E1000_MPHY_PCS_CLK_REG_OFFSET; 1531 wr32(E1000_MPHY_ADDR_CTL, reg); 1532 1533 reg = rd32(E1000_MPHY_DATA); 1534 reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; 1535 wr32(E1000_MPHY_DATA, reg); 1536 } 1537 1538 rctl = rd32(E1000_RCTL); 1539 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1540 wr32(E1000_RCTL, rctl); 1541 1542 hw->mac.autoneg = true; 1543 igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); 1544 if (phy_reg & MII_CR_LOOPBACK) { 1545 phy_reg &= ~MII_CR_LOOPBACK; 1546 igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); 1547 igb_phy_sw_reset(hw); 1548 } 1549 } 1550 1551 static void igb_create_lbtest_frame(struct sk_buff *skb, 1552 unsigned int frame_size) 1553 { 1554 memset(skb->data, 0xFF, frame_size); 1555 frame_size /= 2; 1556 memset(&skb->data[frame_size], 0xAA, frame_size - 1); 1557 memset(&skb->data[frame_size + 10], 0xBE, 1); 1558 memset(&skb->data[frame_size + 12], 0xAF, 1); 1559 } 1560 1561 static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1562 { 1563 frame_size /= 2; 1564 if (*(skb->data + 3) == 0xFF) { 1565 if ((*(skb->data + frame_size + 10) == 0xBE) && 1566 (*(skb->data + frame_size + 12) == 0xAF)) { 1567 return 0; 1568 } 1569 } 1570 return 13; 1571 } 1572 1573 static int igb_clean_test_rings(struct igb_ring *rx_ring, 1574 struct igb_ring *tx_ring, 1575 unsigned int size) 1576 { 1577 union e1000_adv_rx_desc *rx_desc; 1578 struct igb_rx_buffer *rx_buffer_info; 1579 struct igb_tx_buffer *tx_buffer_info; 1580 struct netdev_queue *txq; 1581 u16 rx_ntc, tx_ntc, count = 0; 1582 unsigned int total_bytes = 0, total_packets = 0; 1583 1584 /* initialize next to clean and descriptor values */ 1585 rx_ntc = rx_ring->next_to_clean; 1586 tx_ntc = tx_ring->next_to_clean; 1587 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1588 1589 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { 1590 /* check rx buffer */ 1591 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1592 1593 /* unmap rx buffer, will be remapped by alloc_rx_buffers */ 1594 dma_unmap_single(rx_ring->dev, 1595 rx_buffer_info->dma, 1596 IGB_RX_HDR_LEN, 1597 DMA_FROM_DEVICE); 1598 rx_buffer_info->dma = 0; 1599 1600 /* verify contents of skb */ 1601 if (!igb_check_lbtest_frame(rx_buffer_info->skb, size)) 1602 count++; 1603 1604 /* unmap buffer on tx side */ 1605 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1606 total_bytes += tx_buffer_info->bytecount; 1607 total_packets += tx_buffer_info->gso_segs; 1608 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1609 1610 /* increment rx/tx next to clean counters */ 1611 rx_ntc++; 1612 if (rx_ntc == rx_ring->count) 1613 rx_ntc = 0; 1614 tx_ntc++; 1615 if (tx_ntc == tx_ring->count) 1616 tx_ntc = 0; 1617 1618 /* fetch next descriptor */ 1619 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1620 } 1621 1622 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); 1623 netdev_tx_completed_queue(txq, total_packets, total_bytes); 1624 1625 /* re-map buffers to ring, store next to clean values */ 1626 igb_alloc_rx_buffers(rx_ring, count); 1627 rx_ring->next_to_clean = rx_ntc; 1628 tx_ring->next_to_clean = tx_ntc; 1629 1630 return count; 1631 } 1632 1633 static int igb_run_loopback_test(struct igb_adapter *adapter) 1634 { 1635 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1636 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1637 u16 i, j, lc, good_cnt; 1638 int ret_val = 0; 1639 unsigned int size = IGB_RX_HDR_LEN; 1640 netdev_tx_t tx_ret_val; 1641 struct sk_buff *skb; 1642 1643 /* allocate test skb */ 1644 skb = alloc_skb(size, GFP_KERNEL); 1645 if (!skb) 1646 return 11; 1647 1648 /* place data into test skb */ 1649 igb_create_lbtest_frame(skb, size); 1650 skb_put(skb, size); 1651 1652 /* 1653 * Calculate the loop count based on the largest descriptor ring 1654 * The idea is to wrap the largest ring a number of times using 64 1655 * send/receive pairs during each loop 1656 */ 1657 1658 if (rx_ring->count <= tx_ring->count) 1659 lc = ((tx_ring->count / 64) * 2) + 1; 1660 else 1661 lc = ((rx_ring->count / 64) * 2) + 1; 1662 1663 for (j = 0; j <= lc; j++) { /* loop count loop */ 1664 /* reset count of good packets */ 1665 good_cnt = 0; 1666 1667 /* place 64 packets on the transmit queue*/ 1668 for (i = 0; i < 64; i++) { 1669 skb_get(skb); 1670 tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); 1671 if (tx_ret_val == NETDEV_TX_OK) 1672 good_cnt++; 1673 } 1674 1675 if (good_cnt != 64) { 1676 ret_val = 12; 1677 break; 1678 } 1679 1680 /* allow 200 milliseconds for packets to go from tx to rx */ 1681 msleep(200); 1682 1683 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); 1684 if (good_cnt != 64) { 1685 ret_val = 13; 1686 break; 1687 } 1688 } /* end loop count loop */ 1689 1690 /* free the original skb */ 1691 kfree_skb(skb); 1692 1693 return ret_val; 1694 } 1695 1696 static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) 1697 { 1698 /* PHY loopback cannot be performed if SoL/IDER 1699 * sessions are active */ 1700 if (igb_check_reset_block(&adapter->hw)) { 1701 dev_err(&adapter->pdev->dev, 1702 "Cannot do PHY loopback test " 1703 "when SoL/IDER is active.\n"); 1704 *data = 0; 1705 goto out; 1706 } 1707 *data = igb_setup_desc_rings(adapter); 1708 if (*data) 1709 goto out; 1710 *data = igb_setup_loopback_test(adapter); 1711 if (*data) 1712 goto err_loopback; 1713 *data = igb_run_loopback_test(adapter); 1714 igb_loopback_cleanup(adapter); 1715 1716 err_loopback: 1717 igb_free_desc_rings(adapter); 1718 out: 1719 return *data; 1720 } 1721 1722 static int igb_link_test(struct igb_adapter *adapter, u64 *data) 1723 { 1724 struct e1000_hw *hw = &adapter->hw; 1725 *data = 0; 1726 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1727 int i = 0; 1728 hw->mac.serdes_has_link = false; 1729 1730 /* On some blade server designs, link establishment 1731 * could take as long as 2-3 minutes */ 1732 do { 1733 hw->mac.ops.check_for_link(&adapter->hw); 1734 if (hw->mac.serdes_has_link) 1735 return *data; 1736 msleep(20); 1737 } while (i++ < 3750); 1738 1739 *data = 1; 1740 } else { 1741 hw->mac.ops.check_for_link(&adapter->hw); 1742 if (hw->mac.autoneg) 1743 msleep(4000); 1744 1745 if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) 1746 *data = 1; 1747 } 1748 return *data; 1749 } 1750 1751 static void igb_diag_test(struct net_device *netdev, 1752 struct ethtool_test *eth_test, u64 *data) 1753 { 1754 struct igb_adapter *adapter = netdev_priv(netdev); 1755 u16 autoneg_advertised; 1756 u8 forced_speed_duplex, autoneg; 1757 bool if_running = netif_running(netdev); 1758 1759 set_bit(__IGB_TESTING, &adapter->state); 1760 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1761 /* Offline tests */ 1762 1763 /* save speed, duplex, autoneg settings */ 1764 autoneg_advertised = adapter->hw.phy.autoneg_advertised; 1765 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; 1766 autoneg = adapter->hw.mac.autoneg; 1767 1768 dev_info(&adapter->pdev->dev, "offline testing starting\n"); 1769 1770 /* power up link for link test */ 1771 igb_power_up_link(adapter); 1772 1773 /* Link test performed before hardware reset so autoneg doesn't 1774 * interfere with test result */ 1775 if (igb_link_test(adapter, &data[4])) 1776 eth_test->flags |= ETH_TEST_FL_FAILED; 1777 1778 if (if_running) 1779 /* indicate we're in test mode */ 1780 dev_close(netdev); 1781 else 1782 igb_reset(adapter); 1783 1784 if (igb_reg_test(adapter, &data[0])) 1785 eth_test->flags |= ETH_TEST_FL_FAILED; 1786 1787 igb_reset(adapter); 1788 if (igb_eeprom_test(adapter, &data[1])) 1789 eth_test->flags |= ETH_TEST_FL_FAILED; 1790 1791 igb_reset(adapter); 1792 if (igb_intr_test(adapter, &data[2])) 1793 eth_test->flags |= ETH_TEST_FL_FAILED; 1794 1795 igb_reset(adapter); 1796 /* power up link for loopback test */ 1797 igb_power_up_link(adapter); 1798 if (igb_loopback_test(adapter, &data[3])) 1799 eth_test->flags |= ETH_TEST_FL_FAILED; 1800 1801 /* restore speed, duplex, autoneg settings */ 1802 adapter->hw.phy.autoneg_advertised = autoneg_advertised; 1803 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; 1804 adapter->hw.mac.autoneg = autoneg; 1805 1806 /* force this routine to wait until autoneg complete/timeout */ 1807 adapter->hw.phy.autoneg_wait_to_complete = true; 1808 igb_reset(adapter); 1809 adapter->hw.phy.autoneg_wait_to_complete = false; 1810 1811 clear_bit(__IGB_TESTING, &adapter->state); 1812 if (if_running) 1813 dev_open(netdev); 1814 } else { 1815 dev_info(&adapter->pdev->dev, "online testing starting\n"); 1816 1817 /* PHY is powered down when interface is down */ 1818 if (if_running && igb_link_test(adapter, &data[4])) 1819 eth_test->flags |= ETH_TEST_FL_FAILED; 1820 else 1821 data[4] = 0; 1822 1823 /* Online tests aren't run; pass by default */ 1824 data[0] = 0; 1825 data[1] = 0; 1826 data[2] = 0; 1827 data[3] = 0; 1828 1829 clear_bit(__IGB_TESTING, &adapter->state); 1830 } 1831 msleep_interruptible(4 * 1000); 1832 } 1833 1834 static int igb_wol_exclusion(struct igb_adapter *adapter, 1835 struct ethtool_wolinfo *wol) 1836 { 1837 struct e1000_hw *hw = &adapter->hw; 1838 int retval = 1; /* fail by default */ 1839 1840 switch (hw->device_id) { 1841 case E1000_DEV_ID_82575GB_QUAD_COPPER: 1842 /* WoL not supported */ 1843 wol->supported = 0; 1844 break; 1845 case E1000_DEV_ID_82575EB_FIBER_SERDES: 1846 case E1000_DEV_ID_82576_FIBER: 1847 case E1000_DEV_ID_82576_SERDES: 1848 /* Wake events not supported on port B */ 1849 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { 1850 wol->supported = 0; 1851 break; 1852 } 1853 /* return success for non excluded adapter ports */ 1854 retval = 0; 1855 break; 1856 case E1000_DEV_ID_82576_QUAD_COPPER: 1857 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 1858 /* quad port adapters only support WoL on port A */ 1859 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { 1860 wol->supported = 0; 1861 break; 1862 } 1863 /* return success for non excluded adapter ports */ 1864 retval = 0; 1865 break; 1866 default: 1867 /* dual port cards only support WoL on port A from now on 1868 * unless it was enabled in the eeprom for port B 1869 * so exclude FUNC_1 ports from having WoL enabled */ 1870 if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) && 1871 !adapter->eeprom_wol) { 1872 wol->supported = 0; 1873 break; 1874 } 1875 1876 retval = 0; 1877 } 1878 1879 return retval; 1880 } 1881 1882 static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1883 { 1884 struct igb_adapter *adapter = netdev_priv(netdev); 1885 1886 wol->supported = WAKE_UCAST | WAKE_MCAST | 1887 WAKE_BCAST | WAKE_MAGIC | 1888 WAKE_PHY; 1889 wol->wolopts = 0; 1890 1891 /* this function will set ->supported = 0 and return 1 if wol is not 1892 * supported by this hardware */ 1893 if (igb_wol_exclusion(adapter, wol) || 1894 !device_can_wakeup(&adapter->pdev->dev)) 1895 return; 1896 1897 /* apply any specific unsupported masks here */ 1898 switch (adapter->hw.device_id) { 1899 default: 1900 break; 1901 } 1902 1903 if (adapter->wol & E1000_WUFC_EX) 1904 wol->wolopts |= WAKE_UCAST; 1905 if (adapter->wol & E1000_WUFC_MC) 1906 wol->wolopts |= WAKE_MCAST; 1907 if (adapter->wol & E1000_WUFC_BC) 1908 wol->wolopts |= WAKE_BCAST; 1909 if (adapter->wol & E1000_WUFC_MAG) 1910 wol->wolopts |= WAKE_MAGIC; 1911 if (adapter->wol & E1000_WUFC_LNKC) 1912 wol->wolopts |= WAKE_PHY; 1913 } 1914 1915 static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1916 { 1917 struct igb_adapter *adapter = netdev_priv(netdev); 1918 1919 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) 1920 return -EOPNOTSUPP; 1921 1922 if (igb_wol_exclusion(adapter, wol) || 1923 !device_can_wakeup(&adapter->pdev->dev)) 1924 return wol->wolopts ? -EOPNOTSUPP : 0; 1925 1926 /* these settings will always override what we currently have */ 1927 adapter->wol = 0; 1928 1929 if (wol->wolopts & WAKE_UCAST) 1930 adapter->wol |= E1000_WUFC_EX; 1931 if (wol->wolopts & WAKE_MCAST) 1932 adapter->wol |= E1000_WUFC_MC; 1933 if (wol->wolopts & WAKE_BCAST) 1934 adapter->wol |= E1000_WUFC_BC; 1935 if (wol->wolopts & WAKE_MAGIC) 1936 adapter->wol |= E1000_WUFC_MAG; 1937 if (wol->wolopts & WAKE_PHY) 1938 adapter->wol |= E1000_WUFC_LNKC; 1939 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1940 1941 return 0; 1942 } 1943 1944 /* bit defines for adapter->led_status */ 1945 #define IGB_LED_ON 0 1946 1947 static int igb_set_phys_id(struct net_device *netdev, 1948 enum ethtool_phys_id_state state) 1949 { 1950 struct igb_adapter *adapter = netdev_priv(netdev); 1951 struct e1000_hw *hw = &adapter->hw; 1952 1953 switch (state) { 1954 case ETHTOOL_ID_ACTIVE: 1955 igb_blink_led(hw); 1956 return 2; 1957 case ETHTOOL_ID_ON: 1958 igb_blink_led(hw); 1959 break; 1960 case ETHTOOL_ID_OFF: 1961 igb_led_off(hw); 1962 break; 1963 case ETHTOOL_ID_INACTIVE: 1964 igb_led_off(hw); 1965 clear_bit(IGB_LED_ON, &adapter->led_status); 1966 igb_cleanup_led(hw); 1967 break; 1968 } 1969 1970 return 0; 1971 } 1972 1973 static int igb_set_coalesce(struct net_device *netdev, 1974 struct ethtool_coalesce *ec) 1975 { 1976 struct igb_adapter *adapter = netdev_priv(netdev); 1977 int i; 1978 1979 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1980 ((ec->rx_coalesce_usecs > 3) && 1981 (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || 1982 (ec->rx_coalesce_usecs == 2)) 1983 return -EINVAL; 1984 1985 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1986 ((ec->tx_coalesce_usecs > 3) && 1987 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || 1988 (ec->tx_coalesce_usecs == 2)) 1989 return -EINVAL; 1990 1991 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) 1992 return -EINVAL; 1993 1994 /* If ITR is disabled, disable DMAC */ 1995 if (ec->rx_coalesce_usecs == 0) { 1996 if (adapter->flags & IGB_FLAG_DMAC) 1997 adapter->flags &= ~IGB_FLAG_DMAC; 1998 } 1999 2000 /* convert to rate of irq's per second */ 2001 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) 2002 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 2003 else 2004 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 2005 2006 /* convert to rate of irq's per second */ 2007 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) 2008 adapter->tx_itr_setting = adapter->rx_itr_setting; 2009 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) 2010 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 2011 else 2012 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 2013 2014 for (i = 0; i < adapter->num_q_vectors; i++) { 2015 struct igb_q_vector *q_vector = adapter->q_vector[i]; 2016 q_vector->tx.work_limit = adapter->tx_work_limit; 2017 if (q_vector->rx.ring) 2018 q_vector->itr_val = adapter->rx_itr_setting; 2019 else 2020 q_vector->itr_val = adapter->tx_itr_setting; 2021 if (q_vector->itr_val && q_vector->itr_val <= 3) 2022 q_vector->itr_val = IGB_START_ITR; 2023 q_vector->set_itr = 1; 2024 } 2025 2026 return 0; 2027 } 2028 2029 static int igb_get_coalesce(struct net_device *netdev, 2030 struct ethtool_coalesce *ec) 2031 { 2032 struct igb_adapter *adapter = netdev_priv(netdev); 2033 2034 if (adapter->rx_itr_setting <= 3) 2035 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 2036 else 2037 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 2038 2039 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { 2040 if (adapter->tx_itr_setting <= 3) 2041 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 2042 else 2043 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 2044 } 2045 2046 return 0; 2047 } 2048 2049 static int igb_nway_reset(struct net_device *netdev) 2050 { 2051 struct igb_adapter *adapter = netdev_priv(netdev); 2052 if (netif_running(netdev)) 2053 igb_reinit_locked(adapter); 2054 return 0; 2055 } 2056 2057 static int igb_get_sset_count(struct net_device *netdev, int sset) 2058 { 2059 switch (sset) { 2060 case ETH_SS_STATS: 2061 return IGB_STATS_LEN; 2062 case ETH_SS_TEST: 2063 return IGB_TEST_LEN; 2064 default: 2065 return -ENOTSUPP; 2066 } 2067 } 2068 2069 static void igb_get_ethtool_stats(struct net_device *netdev, 2070 struct ethtool_stats *stats, u64 *data) 2071 { 2072 struct igb_adapter *adapter = netdev_priv(netdev); 2073 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 2074 unsigned int start; 2075 struct igb_ring *ring; 2076 int i, j; 2077 char *p; 2078 2079 spin_lock(&adapter->stats64_lock); 2080 igb_update_stats(adapter, net_stats); 2081 2082 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2083 p = (char *)adapter + igb_gstrings_stats[i].stat_offset; 2084 data[i] = (igb_gstrings_stats[i].sizeof_stat == 2085 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2086 } 2087 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { 2088 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; 2089 data[i] = (igb_gstrings_net_stats[j].sizeof_stat == 2090 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2091 } 2092 for (j = 0; j < adapter->num_tx_queues; j++) { 2093 u64 restart2; 2094 2095 ring = adapter->tx_ring[j]; 2096 do { 2097 start = u64_stats_fetch_begin_bh(&ring->tx_syncp); 2098 data[i] = ring->tx_stats.packets; 2099 data[i+1] = ring->tx_stats.bytes; 2100 data[i+2] = ring->tx_stats.restart_queue; 2101 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); 2102 do { 2103 start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); 2104 restart2 = ring->tx_stats.restart_queue2; 2105 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); 2106 data[i+2] += restart2; 2107 2108 i += IGB_TX_QUEUE_STATS_LEN; 2109 } 2110 for (j = 0; j < adapter->num_rx_queues; j++) { 2111 ring = adapter->rx_ring[j]; 2112 do { 2113 start = u64_stats_fetch_begin_bh(&ring->rx_syncp); 2114 data[i] = ring->rx_stats.packets; 2115 data[i+1] = ring->rx_stats.bytes; 2116 data[i+2] = ring->rx_stats.drops; 2117 data[i+3] = ring->rx_stats.csum_err; 2118 data[i+4] = ring->rx_stats.alloc_failed; 2119 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); 2120 i += IGB_RX_QUEUE_STATS_LEN; 2121 } 2122 spin_unlock(&adapter->stats64_lock); 2123 } 2124 2125 static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2126 { 2127 struct igb_adapter *adapter = netdev_priv(netdev); 2128 u8 *p = data; 2129 int i; 2130 2131 switch (stringset) { 2132 case ETH_SS_TEST: 2133 memcpy(data, *igb_gstrings_test, 2134 IGB_TEST_LEN*ETH_GSTRING_LEN); 2135 break; 2136 case ETH_SS_STATS: 2137 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2138 memcpy(p, igb_gstrings_stats[i].stat_string, 2139 ETH_GSTRING_LEN); 2140 p += ETH_GSTRING_LEN; 2141 } 2142 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { 2143 memcpy(p, igb_gstrings_net_stats[i].stat_string, 2144 ETH_GSTRING_LEN); 2145 p += ETH_GSTRING_LEN; 2146 } 2147 for (i = 0; i < adapter->num_tx_queues; i++) { 2148 sprintf(p, "tx_queue_%u_packets", i); 2149 p += ETH_GSTRING_LEN; 2150 sprintf(p, "tx_queue_%u_bytes", i); 2151 p += ETH_GSTRING_LEN; 2152 sprintf(p, "tx_queue_%u_restart", i); 2153 p += ETH_GSTRING_LEN; 2154 } 2155 for (i = 0; i < adapter->num_rx_queues; i++) { 2156 sprintf(p, "rx_queue_%u_packets", i); 2157 p += ETH_GSTRING_LEN; 2158 sprintf(p, "rx_queue_%u_bytes", i); 2159 p += ETH_GSTRING_LEN; 2160 sprintf(p, "rx_queue_%u_drops", i); 2161 p += ETH_GSTRING_LEN; 2162 sprintf(p, "rx_queue_%u_csum_err", i); 2163 p += ETH_GSTRING_LEN; 2164 sprintf(p, "rx_queue_%u_alloc_failed", i); 2165 p += ETH_GSTRING_LEN; 2166 } 2167 /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2168 break; 2169 } 2170 } 2171 2172 static int igb_ethtool_begin(struct net_device *netdev) 2173 { 2174 struct igb_adapter *adapter = netdev_priv(netdev); 2175 pm_runtime_get_sync(&adapter->pdev->dev); 2176 return 0; 2177 } 2178 2179 static void igb_ethtool_complete(struct net_device *netdev) 2180 { 2181 struct igb_adapter *adapter = netdev_priv(netdev); 2182 pm_runtime_put(&adapter->pdev->dev); 2183 } 2184 2185 static const struct ethtool_ops igb_ethtool_ops = { 2186 .get_settings = igb_get_settings, 2187 .set_settings = igb_set_settings, 2188 .get_drvinfo = igb_get_drvinfo, 2189 .get_regs_len = igb_get_regs_len, 2190 .get_regs = igb_get_regs, 2191 .get_wol = igb_get_wol, 2192 .set_wol = igb_set_wol, 2193 .get_msglevel = igb_get_msglevel, 2194 .set_msglevel = igb_set_msglevel, 2195 .nway_reset = igb_nway_reset, 2196 .get_link = igb_get_link, 2197 .get_eeprom_len = igb_get_eeprom_len, 2198 .get_eeprom = igb_get_eeprom, 2199 .set_eeprom = igb_set_eeprom, 2200 .get_ringparam = igb_get_ringparam, 2201 .set_ringparam = igb_set_ringparam, 2202 .get_pauseparam = igb_get_pauseparam, 2203 .set_pauseparam = igb_set_pauseparam, 2204 .self_test = igb_diag_test, 2205 .get_strings = igb_get_strings, 2206 .set_phys_id = igb_set_phys_id, 2207 .get_sset_count = igb_get_sset_count, 2208 .get_ethtool_stats = igb_get_ethtool_stats, 2209 .get_coalesce = igb_get_coalesce, 2210 .set_coalesce = igb_set_coalesce, 2211 .begin = igb_ethtool_begin, 2212 .complete = igb_ethtool_complete, 2213 }; 2214 2215 void igb_set_ethtool_ops(struct net_device *netdev) 2216 { 2217 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); 2218 } 2219