1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 /* ethtool support for igc */ 5 #include <linux/pm_runtime.h> 6 7 #include "igc.h" 8 9 static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { 10 #define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) 11 "legacy-rx", 12 }; 13 14 #define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) 15 16 static void igc_get_drvinfo(struct net_device *netdev, 17 struct ethtool_drvinfo *drvinfo) 18 { 19 struct igc_adapter *adapter = netdev_priv(netdev); 20 21 strlcpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); 22 strlcpy(drvinfo->version, igc_driver_version, sizeof(drvinfo->version)); 23 24 /* add fw_version here */ 25 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 26 sizeof(drvinfo->bus_info)); 27 28 drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; 29 } 30 31 static int igc_get_regs_len(struct net_device *netdev) 32 { 33 return IGC_REGS_LEN * sizeof(u32); 34 } 35 36 static void igc_get_regs(struct net_device *netdev, 37 struct ethtool_regs *regs, void *p) 38 { 39 struct igc_adapter *adapter = netdev_priv(netdev); 40 struct igc_hw *hw = &adapter->hw; 41 u32 *regs_buff = p; 42 u8 i; 43 44 memset(p, 0, IGC_REGS_LEN * sizeof(u32)); 45 46 regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; 47 48 /* General Registers */ 49 regs_buff[0] = rd32(IGC_CTRL); 50 regs_buff[1] = rd32(IGC_STATUS); 51 regs_buff[2] = rd32(IGC_CTRL_EXT); 52 regs_buff[3] = rd32(IGC_MDIC); 53 regs_buff[4] = rd32(IGC_CONNSW); 54 55 /* NVM Register */ 56 regs_buff[5] = rd32(IGC_EECD); 57 58 /* Interrupt */ 59 /* Reading EICS for EICR because they read the 60 * same but EICS does not clear on read 61 */ 62 regs_buff[6] = rd32(IGC_EICS); 63 regs_buff[7] = rd32(IGC_EICS); 64 regs_buff[8] = rd32(IGC_EIMS); 65 regs_buff[9] = rd32(IGC_EIMC); 66 regs_buff[10] = rd32(IGC_EIAC); 67 regs_buff[11] = rd32(IGC_EIAM); 68 /* Reading ICS for ICR because they read the 69 * same but ICS does not clear on read 70 */ 71 regs_buff[12] = rd32(IGC_ICS); 72 regs_buff[13] = rd32(IGC_ICS); 73 regs_buff[14] = rd32(IGC_IMS); 74 regs_buff[15] = rd32(IGC_IMC); 75 regs_buff[16] = rd32(IGC_IAC); 76 regs_buff[17] = rd32(IGC_IAM); 77 78 /* Flow Control */ 79 regs_buff[18] = rd32(IGC_FCAL); 80 regs_buff[19] = rd32(IGC_FCAH); 81 regs_buff[20] = rd32(IGC_FCTTV); 82 regs_buff[21] = rd32(IGC_FCRTL); 83 regs_buff[22] = rd32(IGC_FCRTH); 84 regs_buff[23] = rd32(IGC_FCRTV); 85 86 /* Receive */ 87 regs_buff[24] = rd32(IGC_RCTL); 88 regs_buff[25] = rd32(IGC_RXCSUM); 89 regs_buff[26] = rd32(IGC_RLPML); 90 regs_buff[27] = rd32(IGC_RFCTL); 91 92 /* Transmit */ 93 regs_buff[28] = rd32(IGC_TCTL); 94 regs_buff[29] = rd32(IGC_TIPG); 95 96 /* Wake Up */ 97 98 /* MAC */ 99 100 /* Statistics */ 101 regs_buff[30] = adapter->stats.crcerrs; 102 regs_buff[31] = adapter->stats.algnerrc; 103 regs_buff[32] = adapter->stats.symerrs; 104 regs_buff[33] = adapter->stats.rxerrc; 105 regs_buff[34] = adapter->stats.mpc; 106 regs_buff[35] = adapter->stats.scc; 107 regs_buff[36] = adapter->stats.ecol; 108 regs_buff[37] = adapter->stats.mcc; 109 regs_buff[38] = adapter->stats.latecol; 110 regs_buff[39] = adapter->stats.colc; 111 regs_buff[40] = adapter->stats.dc; 112 regs_buff[41] = adapter->stats.tncrs; 113 regs_buff[42] = adapter->stats.sec; 114 regs_buff[43] = adapter->stats.htdpmc; 115 regs_buff[44] = adapter->stats.rlec; 116 regs_buff[45] = adapter->stats.xonrxc; 117 regs_buff[46] = adapter->stats.xontxc; 118 regs_buff[47] = adapter->stats.xoffrxc; 119 regs_buff[48] = adapter->stats.xofftxc; 120 regs_buff[49] = adapter->stats.fcruc; 121 regs_buff[50] = adapter->stats.prc64; 122 regs_buff[51] = adapter->stats.prc127; 123 regs_buff[52] = adapter->stats.prc255; 124 regs_buff[53] = adapter->stats.prc511; 125 regs_buff[54] = adapter->stats.prc1023; 126 regs_buff[55] = adapter->stats.prc1522; 127 regs_buff[56] = adapter->stats.gprc; 128 regs_buff[57] = adapter->stats.bprc; 129 regs_buff[58] = adapter->stats.mprc; 130 regs_buff[59] = adapter->stats.gptc; 131 regs_buff[60] = adapter->stats.gorc; 132 regs_buff[61] = adapter->stats.gotc; 133 regs_buff[62] = adapter->stats.rnbc; 134 regs_buff[63] = adapter->stats.ruc; 135 regs_buff[64] = adapter->stats.rfc; 136 regs_buff[65] = adapter->stats.roc; 137 regs_buff[66] = adapter->stats.rjc; 138 regs_buff[67] = adapter->stats.mgprc; 139 regs_buff[68] = adapter->stats.mgpdc; 140 regs_buff[69] = adapter->stats.mgptc; 141 regs_buff[70] = adapter->stats.tor; 142 regs_buff[71] = adapter->stats.tot; 143 regs_buff[72] = adapter->stats.tpr; 144 regs_buff[73] = adapter->stats.tpt; 145 regs_buff[74] = adapter->stats.ptc64; 146 regs_buff[75] = adapter->stats.ptc127; 147 regs_buff[76] = adapter->stats.ptc255; 148 regs_buff[77] = adapter->stats.ptc511; 149 regs_buff[78] = adapter->stats.ptc1023; 150 regs_buff[79] = adapter->stats.ptc1522; 151 regs_buff[80] = adapter->stats.mptc; 152 regs_buff[81] = adapter->stats.bptc; 153 regs_buff[82] = adapter->stats.tsctc; 154 regs_buff[83] = adapter->stats.iac; 155 regs_buff[84] = adapter->stats.rpthc; 156 regs_buff[85] = adapter->stats.hgptc; 157 regs_buff[86] = adapter->stats.hgorc; 158 regs_buff[87] = adapter->stats.hgotc; 159 regs_buff[88] = adapter->stats.lenerrs; 160 regs_buff[89] = adapter->stats.scvpc; 161 regs_buff[90] = adapter->stats.hrmpc; 162 163 for (i = 0; i < 4; i++) 164 regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); 165 for (i = 0; i < 4; i++) 166 regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); 167 for (i = 0; i < 4; i++) 168 regs_buff[99 + i] = rd32(IGC_RDBAL(i)); 169 for (i = 0; i < 4; i++) 170 regs_buff[103 + i] = rd32(IGC_RDBAH(i)); 171 for (i = 0; i < 4; i++) 172 regs_buff[107 + i] = rd32(IGC_RDLEN(i)); 173 for (i = 0; i < 4; i++) 174 regs_buff[111 + i] = rd32(IGC_RDH(i)); 175 for (i = 0; i < 4; i++) 176 regs_buff[115 + i] = rd32(IGC_RDT(i)); 177 for (i = 0; i < 4; i++) 178 regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); 179 180 for (i = 0; i < 10; i++) 181 regs_buff[123 + i] = rd32(IGC_EITR(i)); 182 for (i = 0; i < 16; i++) 183 regs_buff[139 + i] = rd32(IGC_RAL(i)); 184 for (i = 0; i < 16; i++) 185 regs_buff[145 + i] = rd32(IGC_RAH(i)); 186 187 for (i = 0; i < 4; i++) 188 regs_buff[149 + i] = rd32(IGC_TDBAL(i)); 189 for (i = 0; i < 4; i++) 190 regs_buff[152 + i] = rd32(IGC_TDBAH(i)); 191 for (i = 0; i < 4; i++) 192 regs_buff[156 + i] = rd32(IGC_TDLEN(i)); 193 for (i = 0; i < 4; i++) 194 regs_buff[160 + i] = rd32(IGC_TDH(i)); 195 for (i = 0; i < 4; i++) 196 regs_buff[164 + i] = rd32(IGC_TDT(i)); 197 for (i = 0; i < 4; i++) 198 regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); 199 } 200 201 static u32 igc_get_msglevel(struct net_device *netdev) 202 { 203 struct igc_adapter *adapter = netdev_priv(netdev); 204 205 return adapter->msg_enable; 206 } 207 208 static void igc_set_msglevel(struct net_device *netdev, u32 data) 209 { 210 struct igc_adapter *adapter = netdev_priv(netdev); 211 212 adapter->msg_enable = data; 213 } 214 215 static int igc_nway_reset(struct net_device *netdev) 216 { 217 struct igc_adapter *adapter = netdev_priv(netdev); 218 219 if (netif_running(netdev)) 220 igc_reinit_locked(adapter); 221 return 0; 222 } 223 224 static u32 igc_get_link(struct net_device *netdev) 225 { 226 struct igc_adapter *adapter = netdev_priv(netdev); 227 struct igc_mac_info *mac = &adapter->hw.mac; 228 229 /* If the link is not reported up to netdev, interrupts are disabled, 230 * and so the physical link state may have changed since we last 231 * looked. Set get_link_status to make sure that the true link 232 * state is interrogated, rather than pulling a cached and possibly 233 * stale link state from the driver. 234 */ 235 if (!netif_carrier_ok(netdev)) 236 mac->get_link_status = 1; 237 238 return igc_has_link(adapter); 239 } 240 241 static int igc_get_eeprom_len(struct net_device *netdev) 242 { 243 struct igc_adapter *adapter = netdev_priv(netdev); 244 245 return adapter->hw.nvm.word_size * 2; 246 } 247 248 static int igc_get_eeprom(struct net_device *netdev, 249 struct ethtool_eeprom *eeprom, u8 *bytes) 250 { 251 struct igc_adapter *adapter = netdev_priv(netdev); 252 struct igc_hw *hw = &adapter->hw; 253 int first_word, last_word; 254 u16 *eeprom_buff; 255 int ret_val = 0; 256 u16 i; 257 258 if (eeprom->len == 0) 259 return -EINVAL; 260 261 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 262 263 first_word = eeprom->offset >> 1; 264 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 265 266 eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), 267 GFP_KERNEL); 268 if (!eeprom_buff) 269 return -ENOMEM; 270 271 if (hw->nvm.type == igc_nvm_eeprom_spi) { 272 ret_val = hw->nvm.ops.read(hw, first_word, 273 last_word - first_word + 1, 274 eeprom_buff); 275 } else { 276 for (i = 0; i < last_word - first_word + 1; i++) { 277 ret_val = hw->nvm.ops.read(hw, first_word + i, 1, 278 &eeprom_buff[i]); 279 if (ret_val) 280 break; 281 } 282 } 283 284 /* Device's eeprom is always little-endian, word addressable */ 285 for (i = 0; i < last_word - first_word + 1; i++) 286 le16_to_cpus(&eeprom_buff[i]); 287 288 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), 289 eeprom->len); 290 kfree(eeprom_buff); 291 292 return ret_val; 293 } 294 295 static int igc_set_eeprom(struct net_device *netdev, 296 struct ethtool_eeprom *eeprom, u8 *bytes) 297 { 298 struct igc_adapter *adapter = netdev_priv(netdev); 299 struct igc_hw *hw = &adapter->hw; 300 int max_len, first_word, last_word, ret_val = 0; 301 u16 *eeprom_buff; 302 void *ptr; 303 u16 i; 304 305 if (eeprom->len == 0) 306 return -EOPNOTSUPP; 307 308 if (hw->mac.type >= igc_i225 && 309 !igc_get_flash_presence_i225(hw)) { 310 return -EOPNOTSUPP; 311 } 312 313 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 314 return -EFAULT; 315 316 max_len = hw->nvm.word_size * 2; 317 318 first_word = eeprom->offset >> 1; 319 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 320 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 321 if (!eeprom_buff) 322 return -ENOMEM; 323 324 ptr = (void *)eeprom_buff; 325 326 if (eeprom->offset & 1) { 327 /* need read/modify/write of first changed EEPROM word 328 * only the second byte of the word is being modified 329 */ 330 ret_val = hw->nvm.ops.read(hw, first_word, 1, 331 &eeprom_buff[0]); 332 ptr++; 333 } 334 if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { 335 /* need read/modify/write of last changed EEPROM word 336 * only the first byte of the word is being modified 337 */ 338 ret_val = hw->nvm.ops.read(hw, last_word, 1, 339 &eeprom_buff[last_word - first_word]); 340 } 341 342 /* Device's eeprom is always little-endian, word addressable */ 343 for (i = 0; i < last_word - first_word + 1; i++) 344 le16_to_cpus(&eeprom_buff[i]); 345 346 memcpy(ptr, bytes, eeprom->len); 347 348 for (i = 0; i < last_word - first_word + 1; i++) 349 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); 350 351 ret_val = hw->nvm.ops.write(hw, first_word, 352 last_word - first_word + 1, eeprom_buff); 353 354 /* Update the checksum if nvm write succeeded */ 355 if (ret_val == 0) 356 hw->nvm.ops.update(hw); 357 358 /* check if need: igc_set_fw_version(adapter); */ 359 kfree(eeprom_buff); 360 return ret_val; 361 } 362 363 static void igc_get_ringparam(struct net_device *netdev, 364 struct ethtool_ringparam *ring) 365 { 366 struct igc_adapter *adapter = netdev_priv(netdev); 367 368 ring->rx_max_pending = IGC_MAX_RXD; 369 ring->tx_max_pending = IGC_MAX_TXD; 370 ring->rx_pending = adapter->rx_ring_count; 371 ring->tx_pending = adapter->tx_ring_count; 372 } 373 374 static int igc_set_ringparam(struct net_device *netdev, 375 struct ethtool_ringparam *ring) 376 { 377 struct igc_adapter *adapter = netdev_priv(netdev); 378 struct igc_ring *temp_ring; 379 u16 new_rx_count, new_tx_count; 380 int i, err = 0; 381 382 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 383 return -EINVAL; 384 385 new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); 386 new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); 387 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 388 389 new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); 390 new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); 391 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 392 393 if (new_tx_count == adapter->tx_ring_count && 394 new_rx_count == adapter->rx_ring_count) { 395 /* nothing to do */ 396 return 0; 397 } 398 399 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 400 usleep_range(1000, 2000); 401 402 if (!netif_running(adapter->netdev)) { 403 for (i = 0; i < adapter->num_tx_queues; i++) 404 adapter->tx_ring[i]->count = new_tx_count; 405 for (i = 0; i < adapter->num_rx_queues; i++) 406 adapter->rx_ring[i]->count = new_rx_count; 407 adapter->tx_ring_count = new_tx_count; 408 adapter->rx_ring_count = new_rx_count; 409 goto clear_reset; 410 } 411 412 if (adapter->num_tx_queues > adapter->num_rx_queues) 413 temp_ring = vmalloc(array_size(sizeof(struct igc_ring), 414 adapter->num_tx_queues)); 415 else 416 temp_ring = vmalloc(array_size(sizeof(struct igc_ring), 417 adapter->num_rx_queues)); 418 419 if (!temp_ring) { 420 err = -ENOMEM; 421 goto clear_reset; 422 } 423 424 igc_down(adapter); 425 426 /* We can't just free everything and then setup again, 427 * because the ISRs in MSI-X mode get passed pointers 428 * to the Tx and Rx ring structs. 429 */ 430 if (new_tx_count != adapter->tx_ring_count) { 431 for (i = 0; i < adapter->num_tx_queues; i++) { 432 memcpy(&temp_ring[i], adapter->tx_ring[i], 433 sizeof(struct igc_ring)); 434 435 temp_ring[i].count = new_tx_count; 436 err = igc_setup_tx_resources(&temp_ring[i]); 437 if (err) { 438 while (i) { 439 i--; 440 igc_free_tx_resources(&temp_ring[i]); 441 } 442 goto err_setup; 443 } 444 } 445 446 for (i = 0; i < adapter->num_tx_queues; i++) { 447 igc_free_tx_resources(adapter->tx_ring[i]); 448 449 memcpy(adapter->tx_ring[i], &temp_ring[i], 450 sizeof(struct igc_ring)); 451 } 452 453 adapter->tx_ring_count = new_tx_count; 454 } 455 456 if (new_rx_count != adapter->rx_ring_count) { 457 for (i = 0; i < adapter->num_rx_queues; i++) { 458 memcpy(&temp_ring[i], adapter->rx_ring[i], 459 sizeof(struct igc_ring)); 460 461 temp_ring[i].count = new_rx_count; 462 err = igc_setup_rx_resources(&temp_ring[i]); 463 if (err) { 464 while (i) { 465 i--; 466 igc_free_rx_resources(&temp_ring[i]); 467 } 468 goto err_setup; 469 } 470 } 471 472 for (i = 0; i < adapter->num_rx_queues; i++) { 473 igc_free_rx_resources(adapter->rx_ring[i]); 474 475 memcpy(adapter->rx_ring[i], &temp_ring[i], 476 sizeof(struct igc_ring)); 477 } 478 479 adapter->rx_ring_count = new_rx_count; 480 } 481 err_setup: 482 igc_up(adapter); 483 vfree(temp_ring); 484 clear_reset: 485 clear_bit(__IGC_RESETTING, &adapter->state); 486 return err; 487 } 488 489 static void igc_get_pauseparam(struct net_device *netdev, 490 struct ethtool_pauseparam *pause) 491 { 492 struct igc_adapter *adapter = netdev_priv(netdev); 493 struct igc_hw *hw = &adapter->hw; 494 495 pause->autoneg = 496 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 497 498 if (hw->fc.current_mode == igc_fc_rx_pause) { 499 pause->rx_pause = 1; 500 } else if (hw->fc.current_mode == igc_fc_tx_pause) { 501 pause->tx_pause = 1; 502 } else if (hw->fc.current_mode == igc_fc_full) { 503 pause->rx_pause = 1; 504 pause->tx_pause = 1; 505 } 506 } 507 508 static int igc_set_pauseparam(struct net_device *netdev, 509 struct ethtool_pauseparam *pause) 510 { 511 struct igc_adapter *adapter = netdev_priv(netdev); 512 struct igc_hw *hw = &adapter->hw; 513 int retval = 0; 514 515 adapter->fc_autoneg = pause->autoneg; 516 517 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 518 usleep_range(1000, 2000); 519 520 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 521 hw->fc.requested_mode = igc_fc_default; 522 if (netif_running(adapter->netdev)) { 523 igc_down(adapter); 524 igc_up(adapter); 525 } else { 526 igc_reset(adapter); 527 } 528 } else { 529 if (pause->rx_pause && pause->tx_pause) 530 hw->fc.requested_mode = igc_fc_full; 531 else if (pause->rx_pause && !pause->tx_pause) 532 hw->fc.requested_mode = igc_fc_rx_pause; 533 else if (!pause->rx_pause && pause->tx_pause) 534 hw->fc.requested_mode = igc_fc_tx_pause; 535 else if (!pause->rx_pause && !pause->tx_pause) 536 hw->fc.requested_mode = igc_fc_none; 537 538 hw->fc.current_mode = hw->fc.requested_mode; 539 540 retval = ((hw->phy.media_type == igc_media_type_copper) ? 541 igc_force_mac_fc(hw) : igc_setup_link(hw)); 542 } 543 544 clear_bit(__IGC_RESETTING, &adapter->state); 545 return retval; 546 } 547 548 static int igc_get_coalesce(struct net_device *netdev, 549 struct ethtool_coalesce *ec) 550 { 551 struct igc_adapter *adapter = netdev_priv(netdev); 552 553 if (adapter->rx_itr_setting <= 3) 554 ec->rx_coalesce_usecs = adapter->rx_itr_setting; 555 else 556 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; 557 558 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { 559 if (adapter->tx_itr_setting <= 3) 560 ec->tx_coalesce_usecs = adapter->tx_itr_setting; 561 else 562 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; 563 } 564 565 return 0; 566 } 567 568 static int igc_set_coalesce(struct net_device *netdev, 569 struct ethtool_coalesce *ec) 570 { 571 struct igc_adapter *adapter = netdev_priv(netdev); 572 int i; 573 574 if (ec->rx_max_coalesced_frames || 575 ec->rx_coalesce_usecs_irq || 576 ec->rx_max_coalesced_frames_irq || 577 ec->tx_max_coalesced_frames || 578 ec->tx_coalesce_usecs_irq || 579 ec->stats_block_coalesce_usecs || 580 ec->use_adaptive_rx_coalesce || 581 ec->use_adaptive_tx_coalesce || 582 ec->pkt_rate_low || 583 ec->rx_coalesce_usecs_low || 584 ec->rx_max_coalesced_frames_low || 585 ec->tx_coalesce_usecs_low || 586 ec->tx_max_coalesced_frames_low || 587 ec->pkt_rate_high || 588 ec->rx_coalesce_usecs_high || 589 ec->rx_max_coalesced_frames_high || 590 ec->tx_coalesce_usecs_high || 591 ec->tx_max_coalesced_frames_high || 592 ec->rate_sample_interval) 593 return -ENOTSUPP; 594 595 if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || 596 (ec->rx_coalesce_usecs > 3 && 597 ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || 598 ec->rx_coalesce_usecs == 2) 599 return -EINVAL; 600 601 if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || 602 (ec->tx_coalesce_usecs > 3 && 603 ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || 604 ec->tx_coalesce_usecs == 2) 605 return -EINVAL; 606 607 if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) 608 return -EINVAL; 609 610 /* If ITR is disabled, disable DMAC */ 611 if (ec->rx_coalesce_usecs == 0) { 612 if (adapter->flags & IGC_FLAG_DMAC) 613 adapter->flags &= ~IGC_FLAG_DMAC; 614 } 615 616 /* convert to rate of irq's per second */ 617 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) 618 adapter->rx_itr_setting = ec->rx_coalesce_usecs; 619 else 620 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; 621 622 /* convert to rate of irq's per second */ 623 if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) 624 adapter->tx_itr_setting = adapter->rx_itr_setting; 625 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) 626 adapter->tx_itr_setting = ec->tx_coalesce_usecs; 627 else 628 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; 629 630 for (i = 0; i < adapter->num_q_vectors; i++) { 631 struct igc_q_vector *q_vector = adapter->q_vector[i]; 632 633 q_vector->tx.work_limit = adapter->tx_work_limit; 634 if (q_vector->rx.ring) 635 q_vector->itr_val = adapter->rx_itr_setting; 636 else 637 q_vector->itr_val = adapter->tx_itr_setting; 638 if (q_vector->itr_val && q_vector->itr_val <= 3) 639 q_vector->itr_val = IGC_START_ITR; 640 q_vector->set_itr = 1; 641 } 642 643 return 0; 644 } 645 646 void igc_write_rss_indir_tbl(struct igc_adapter *adapter) 647 { 648 struct igc_hw *hw = &adapter->hw; 649 u32 reg = IGC_RETA(0); 650 u32 shift = 0; 651 int i = 0; 652 653 while (i < IGC_RETA_SIZE) { 654 u32 val = 0; 655 int j; 656 657 for (j = 3; j >= 0; j--) { 658 val <<= 8; 659 val |= adapter->rss_indir_tbl[i + j]; 660 } 661 662 wr32(reg, val << shift); 663 reg += 4; 664 i += 4; 665 } 666 } 667 668 static u32 igc_get_rxfh_indir_size(struct net_device *netdev) 669 { 670 return IGC_RETA_SIZE; 671 } 672 673 static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, 674 u8 *hfunc) 675 { 676 struct igc_adapter *adapter = netdev_priv(netdev); 677 int i; 678 679 if (hfunc) 680 *hfunc = ETH_RSS_HASH_TOP; 681 if (!indir) 682 return 0; 683 for (i = 0; i < IGC_RETA_SIZE; i++) 684 indir[i] = adapter->rss_indir_tbl[i]; 685 686 return 0; 687 } 688 689 static int igc_set_rxfh(struct net_device *netdev, const u32 *indir, 690 const u8 *key, const u8 hfunc) 691 { 692 struct igc_adapter *adapter = netdev_priv(netdev); 693 u32 num_queues; 694 int i; 695 696 /* We do not allow change in unsupported parameters */ 697 if (key || 698 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 699 return -EOPNOTSUPP; 700 if (!indir) 701 return 0; 702 703 num_queues = adapter->rss_queues; 704 705 /* Verify user input. */ 706 for (i = 0; i < IGC_RETA_SIZE; i++) 707 if (indir[i] >= num_queues) 708 return -EINVAL; 709 710 for (i = 0; i < IGC_RETA_SIZE; i++) 711 adapter->rss_indir_tbl[i] = indir[i]; 712 713 igc_write_rss_indir_tbl(adapter); 714 715 return 0; 716 } 717 718 static unsigned int igc_max_channels(struct igc_adapter *adapter) 719 { 720 return igc_get_max_rss_queues(adapter); 721 } 722 723 static void igc_get_channels(struct net_device *netdev, 724 struct ethtool_channels *ch) 725 { 726 struct igc_adapter *adapter = netdev_priv(netdev); 727 728 /* Report maximum channels */ 729 ch->max_combined = igc_max_channels(adapter); 730 731 /* Report info for other vector */ 732 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 733 ch->max_other = NON_Q_VECTORS; 734 ch->other_count = NON_Q_VECTORS; 735 } 736 737 ch->combined_count = adapter->rss_queues; 738 } 739 740 static int igc_set_channels(struct net_device *netdev, 741 struct ethtool_channels *ch) 742 { 743 struct igc_adapter *adapter = netdev_priv(netdev); 744 unsigned int count = ch->combined_count; 745 unsigned int max_combined = 0; 746 747 /* Verify they are not requesting separate vectors */ 748 if (!count || ch->rx_count || ch->tx_count) 749 return -EINVAL; 750 751 /* Verify other_count is valid and has not been changed */ 752 if (ch->other_count != NON_Q_VECTORS) 753 return -EINVAL; 754 755 /* Verify the number of channels doesn't exceed hw limits */ 756 max_combined = igc_max_channels(adapter); 757 if (count > max_combined) 758 return -EINVAL; 759 760 if (count != adapter->rss_queues) { 761 adapter->rss_queues = count; 762 igc_set_flag_queue_pairs(adapter, max_combined); 763 764 /* Hardware has to reinitialize queues and interrupts to 765 * match the new configuration. 766 */ 767 return igc_reinit_queues(adapter); 768 } 769 770 return 0; 771 } 772 773 static u32 igc_get_priv_flags(struct net_device *netdev) 774 { 775 struct igc_adapter *adapter = netdev_priv(netdev); 776 u32 priv_flags = 0; 777 778 if (adapter->flags & IGC_FLAG_RX_LEGACY) 779 priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; 780 781 return priv_flags; 782 } 783 784 static int igc_set_priv_flags(struct net_device *netdev, u32 priv_flags) 785 { 786 struct igc_adapter *adapter = netdev_priv(netdev); 787 unsigned int flags = adapter->flags; 788 789 flags &= ~IGC_FLAG_RX_LEGACY; 790 if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) 791 flags |= IGC_FLAG_RX_LEGACY; 792 793 if (flags != adapter->flags) { 794 adapter->flags = flags; 795 796 /* reset interface to repopulate queues */ 797 if (netif_running(netdev)) 798 igc_reinit_locked(adapter); 799 } 800 801 return 0; 802 } 803 804 static int igc_ethtool_begin(struct net_device *netdev) 805 { 806 struct igc_adapter *adapter = netdev_priv(netdev); 807 808 pm_runtime_get_sync(&adapter->pdev->dev); 809 return 0; 810 } 811 812 static void igc_ethtool_complete(struct net_device *netdev) 813 { 814 struct igc_adapter *adapter = netdev_priv(netdev); 815 816 pm_runtime_put(&adapter->pdev->dev); 817 } 818 819 static int igc_get_link_ksettings(struct net_device *netdev, 820 struct ethtool_link_ksettings *cmd) 821 { 822 struct igc_adapter *adapter = netdev_priv(netdev); 823 struct igc_hw *hw = &adapter->hw; 824 u32 status; 825 u32 speed; 826 827 ethtool_link_ksettings_zero_link_mode(cmd, supported); 828 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 829 830 /* supported link modes */ 831 ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); 832 ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); 833 ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); 834 ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); 835 ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); 836 ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); 837 838 /* twisted pair */ 839 cmd->base.port = PORT_TP; 840 cmd->base.phy_address = hw->phy.addr; 841 842 /* advertising link modes */ 843 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); 844 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); 845 ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); 846 ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); 847 ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); 848 ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); 849 850 /* set autoneg settings */ 851 if (hw->mac.autoneg == 1) { 852 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 853 ethtool_link_ksettings_add_link_mode(cmd, advertising, 854 Autoneg); 855 } 856 857 switch (hw->fc.requested_mode) { 858 case igc_fc_full: 859 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); 860 break; 861 case igc_fc_rx_pause: 862 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); 863 ethtool_link_ksettings_add_link_mode(cmd, advertising, 864 Asym_Pause); 865 break; 866 case igc_fc_tx_pause: 867 ethtool_link_ksettings_add_link_mode(cmd, advertising, 868 Asym_Pause); 869 break; 870 default: 871 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); 872 ethtool_link_ksettings_add_link_mode(cmd, advertising, 873 Asym_Pause); 874 } 875 876 status = rd32(IGC_STATUS); 877 878 if (status & IGC_STATUS_LU) { 879 if (status & IGC_STATUS_SPEED_1000) { 880 /* For I225, STATUS will indicate 1G speed in both 881 * 1 Gbps and 2.5 Gbps link modes. 882 * An additional bit is used 883 * to differentiate between 1 Gbps and 2.5 Gbps. 884 */ 885 if (hw->mac.type == igc_i225 && 886 (status & IGC_STATUS_SPEED_2500)) { 887 speed = SPEED_2500; 888 hw_dbg("2500 Mbs, "); 889 } else { 890 speed = SPEED_1000; 891 hw_dbg("1000 Mbs, "); 892 } 893 } else if (status & IGC_STATUS_SPEED_100) { 894 speed = SPEED_100; 895 hw_dbg("100 Mbs, "); 896 } else { 897 speed = SPEED_10; 898 hw_dbg("10 Mbs, "); 899 } 900 if ((status & IGC_STATUS_FD) || 901 hw->phy.media_type != igc_media_type_copper) 902 cmd->base.duplex = DUPLEX_FULL; 903 else 904 cmd->base.duplex = DUPLEX_HALF; 905 } else { 906 speed = SPEED_UNKNOWN; 907 cmd->base.duplex = DUPLEX_UNKNOWN; 908 } 909 cmd->base.speed = speed; 910 if (hw->mac.autoneg) 911 cmd->base.autoneg = AUTONEG_ENABLE; 912 else 913 cmd->base.autoneg = AUTONEG_DISABLE; 914 915 /* MDI-X => 2; MDI =>1; Invalid =>0 */ 916 if (hw->phy.media_type == igc_media_type_copper) 917 cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : 918 ETH_TP_MDI; 919 else 920 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 921 922 if (hw->phy.mdix == AUTO_ALL_MODES) 923 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; 924 else 925 cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; 926 927 return 0; 928 } 929 930 static int igc_set_link_ksettings(struct net_device *netdev, 931 const struct ethtool_link_ksettings *cmd) 932 { 933 struct igc_adapter *adapter = netdev_priv(netdev); 934 struct igc_hw *hw = &adapter->hw; 935 u32 advertising; 936 937 /* When adapter in resetting mode, autoneg/speed/duplex 938 * cannot be changed 939 */ 940 if (igc_check_reset_block(hw)) { 941 dev_err(&adapter->pdev->dev, 942 "Cannot change link characteristics when reset is active.\n"); 943 return -EINVAL; 944 } 945 946 /* MDI setting is only allowed when autoneg enabled because 947 * some hardware doesn't allow MDI setting when speed or 948 * duplex is forced. 949 */ 950 if (cmd->base.eth_tp_mdix_ctrl) { 951 if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && 952 cmd->base.autoneg != AUTONEG_ENABLE) { 953 dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); 954 return -EINVAL; 955 } 956 } 957 958 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 959 usleep_range(1000, 2000); 960 961 ethtool_convert_link_mode_to_legacy_u32(&advertising, 962 cmd->link_modes.advertising); 963 964 if (cmd->base.autoneg == AUTONEG_ENABLE) { 965 hw->mac.autoneg = 1; 966 hw->phy.autoneg_advertised = advertising; 967 if (adapter->fc_autoneg) 968 hw->fc.requested_mode = igc_fc_default; 969 } else { 970 /* calling this overrides forced MDI setting */ 971 dev_info(&adapter->pdev->dev, 972 "Force mode currently not supported\n"); 973 } 974 975 /* MDI-X => 2; MDI => 1; Auto => 3 */ 976 if (cmd->base.eth_tp_mdix_ctrl) { 977 /* fix up the value for auto (3 => 0) as zero is mapped 978 * internally to auto 979 */ 980 if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) 981 hw->phy.mdix = AUTO_ALL_MODES; 982 else 983 hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; 984 } 985 986 /* reset the link */ 987 if (netif_running(adapter->netdev)) { 988 igc_down(adapter); 989 igc_up(adapter); 990 } else { 991 igc_reset(adapter); 992 } 993 994 clear_bit(__IGC_RESETTING, &adapter->state); 995 996 return 0; 997 } 998 999 static const struct ethtool_ops igc_ethtool_ops = { 1000 .get_drvinfo = igc_get_drvinfo, 1001 .get_regs_len = igc_get_regs_len, 1002 .get_regs = igc_get_regs, 1003 .get_msglevel = igc_get_msglevel, 1004 .set_msglevel = igc_set_msglevel, 1005 .nway_reset = igc_nway_reset, 1006 .get_link = igc_get_link, 1007 .get_eeprom_len = igc_get_eeprom_len, 1008 .get_eeprom = igc_get_eeprom, 1009 .set_eeprom = igc_set_eeprom, 1010 .get_ringparam = igc_get_ringparam, 1011 .set_ringparam = igc_set_ringparam, 1012 .get_pauseparam = igc_get_pauseparam, 1013 .set_pauseparam = igc_set_pauseparam, 1014 .get_coalesce = igc_get_coalesce, 1015 .set_coalesce = igc_set_coalesce, 1016 .get_rxfh_indir_size = igc_get_rxfh_indir_size, 1017 .get_rxfh = igc_get_rxfh, 1018 .set_rxfh = igc_set_rxfh, 1019 .get_channels = igc_get_channels, 1020 .set_channels = igc_set_channels, 1021 .get_priv_flags = igc_get_priv_flags, 1022 .set_priv_flags = igc_set_priv_flags, 1023 .begin = igc_ethtool_begin, 1024 .complete = igc_ethtool_complete, 1025 .get_link_ksettings = igc_get_link_ksettings, 1026 .set_link_ksettings = igc_set_link_ksettings, 1027 }; 1028 1029 void igc_set_ethtool_ops(struct net_device *netdev) 1030 { 1031 netdev->ethtool_ops = &igc_ethtool_ops; 1032 } 1033