1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* ethtool support for ice */ 5 6 #include "ice.h" 7 #include "ice_flow.h" 8 #include "ice_lib.h" 9 #include "ice_dcb_lib.h" 10 11 struct ice_stats { 12 char stat_string[ETH_GSTRING_LEN]; 13 int sizeof_stat; 14 int stat_offset; 15 }; 16 17 #define ICE_STAT(_type, _name, _stat) { \ 18 .stat_string = _name, \ 19 .sizeof_stat = sizeof_field(_type, _stat), \ 20 .stat_offset = offsetof(_type, _stat) \ 21 } 22 23 #define ICE_VSI_STAT(_name, _stat) \ 24 ICE_STAT(struct ice_vsi, _name, _stat) 25 #define ICE_PF_STAT(_name, _stat) \ 26 ICE_STAT(struct ice_pf, _name, _stat) 27 28 static int ice_q_stats_len(struct net_device *netdev) 29 { 30 struct ice_netdev_priv *np = netdev_priv(netdev); 31 32 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * 33 (sizeof(struct ice_q_stats) / sizeof(u64))); 34 } 35 36 #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats) 37 #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats) 38 39 #define ICE_PFC_STATS_LEN ( \ 40 (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \ 41 sizeof_field(struct ice_pf, stats.priority_xon_rx) + \ 42 sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \ 43 sizeof_field(struct ice_pf, stats.priority_xon_tx)) \ 44 / sizeof(u64)) 45 #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \ 46 ICE_VSI_STATS_LEN + ice_q_stats_len(n)) 47 48 static const struct ice_stats ice_gstrings_vsi_stats[] = { 49 ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast), 50 ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast), 51 ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast), 52 ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast), 53 ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), 54 ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), 55 ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes), 56 ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes), 57 ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards), 58 ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), 59 ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), 60 ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), 61 ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), 62 ICE_VSI_STAT("tx_linearize", tx_linearize), 63 }; 64 65 enum ice_ethtool_test_id { 66 ICE_ETH_TEST_REG = 0, 67 ICE_ETH_TEST_EEPROM, 68 ICE_ETH_TEST_INTR, 69 ICE_ETH_TEST_LOOP, 70 ICE_ETH_TEST_LINK, 71 }; 72 73 static const char ice_gstrings_test[][ETH_GSTRING_LEN] = { 74 "Register test (offline)", 75 "EEPROM test (offline)", 76 "Interrupt test (offline)", 77 "Loopback test (offline)", 78 "Link test (on/offline)", 79 }; 80 81 #define ICE_TEST_LEN (sizeof(ice_gstrings_test) / ETH_GSTRING_LEN) 82 83 /* These PF_STATs might look like duplicates of some NETDEV_STATs, 84 * but they aren't. This device is capable of supporting multiple 85 * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual 86 * netdevs whereas the PF_STATs are for the physical function that's 87 * hosting these netdevs. 88 * 89 * The PF_STATs are appended to the netdev stats only when ethtool -S 90 * is queried on the base PF netdev. 91 */ 92 static const struct ice_stats ice_gstrings_pf_stats[] = { 93 ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes), 94 ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes), 95 ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast), 96 ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast), 97 ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast), 98 ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast), 99 ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast), 100 ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast), 101 ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors), 102 ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64), 103 ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64), 104 ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127), 105 ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127), 106 ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255), 107 ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255), 108 ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511), 109 ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511), 110 ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023), 111 ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023), 112 ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522), 113 ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522), 114 ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big), 115 ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big), 116 ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx), 117 ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx), 118 ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx), 119 ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx), 120 ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down), 121 ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize), 122 ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments), 123 ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize), 124 ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber), 125 ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error), 126 ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors), 127 ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards), 128 ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors), 129 ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), 130 ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults), 131 ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults), 132 }; 133 134 static const u32 ice_regs_dump_list[] = { 135 PFGEN_STATE, 136 PRTGEN_STATUS, 137 QRX_CTRL(0), 138 QINT_TQCTL(0), 139 QINT_RQCTL(0), 140 PFINT_OICR_ENA, 141 QRX_ITR(0), 142 PF0INT_ITR_0(0), 143 PF0INT_ITR_1(0), 144 PF0INT_ITR_2(0), 145 }; 146 147 struct ice_priv_flag { 148 char name[ETH_GSTRING_LEN]; 149 u32 bitno; /* bit position in pf->flags */ 150 }; 151 152 #define ICE_PRIV_FLAG(_name, _bitno) { \ 153 .name = _name, \ 154 .bitno = _bitno, \ 155 } 156 157 static const struct ice_priv_flag ice_gstrings_priv_flags[] = { 158 ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), 159 ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), 160 ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), 161 }; 162 163 #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) 164 165 static void 166 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 167 { 168 struct ice_netdev_priv *np = netdev_priv(netdev); 169 struct ice_vsi *vsi = np->vsi; 170 struct ice_pf *pf = vsi->back; 171 172 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); 173 strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version)); 174 strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw), 175 sizeof(drvinfo->fw_version)); 176 strlcpy(drvinfo->bus_info, pci_name(pf->pdev), 177 sizeof(drvinfo->bus_info)); 178 drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; 179 } 180 181 static int ice_get_regs_len(struct net_device __always_unused *netdev) 182 { 183 return sizeof(ice_regs_dump_list); 184 } 185 186 static void 187 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 188 { 189 struct ice_netdev_priv *np = netdev_priv(netdev); 190 struct ice_pf *pf = np->vsi->back; 191 struct ice_hw *hw = &pf->hw; 192 u32 *regs_buf = (u32 *)p; 193 int i; 194 195 regs->version = 1; 196 197 for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i) 198 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]); 199 } 200 201 static u32 ice_get_msglevel(struct net_device *netdev) 202 { 203 struct ice_netdev_priv *np = netdev_priv(netdev); 204 struct ice_pf *pf = np->vsi->back; 205 206 #ifndef CONFIG_DYNAMIC_DEBUG 207 if (pf->hw.debug_mask) 208 netdev_info(netdev, "hw debug_mask: 0x%llX\n", 209 pf->hw.debug_mask); 210 #endif /* !CONFIG_DYNAMIC_DEBUG */ 211 212 return pf->msg_enable; 213 } 214 215 static void ice_set_msglevel(struct net_device *netdev, u32 data) 216 { 217 struct ice_netdev_priv *np = netdev_priv(netdev); 218 struct ice_pf *pf = np->vsi->back; 219 220 #ifndef CONFIG_DYNAMIC_DEBUG 221 if (ICE_DBG_USER & data) 222 pf->hw.debug_mask = data; 223 else 224 pf->msg_enable = data; 225 #else 226 pf->msg_enable = data; 227 #endif /* !CONFIG_DYNAMIC_DEBUG */ 228 } 229 230 static int ice_get_eeprom_len(struct net_device *netdev) 231 { 232 struct ice_netdev_priv *np = netdev_priv(netdev); 233 struct ice_pf *pf = np->vsi->back; 234 235 return (int)(pf->hw.nvm.sr_words * sizeof(u16)); 236 } 237 238 static int 239 ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 240 u8 *bytes) 241 { 242 struct ice_netdev_priv *np = netdev_priv(netdev); 243 u16 first_word, last_word, nwords; 244 struct ice_vsi *vsi = np->vsi; 245 struct ice_pf *pf = vsi->back; 246 struct ice_hw *hw = &pf->hw; 247 enum ice_status status; 248 struct device *dev; 249 int ret = 0; 250 u16 *buf; 251 252 dev = ice_pf_to_dev(pf); 253 254 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 255 256 first_word = eeprom->offset >> 1; 257 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 258 nwords = last_word - first_word + 1; 259 260 buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL); 261 if (!buf) 262 return -ENOMEM; 263 264 status = ice_read_sr_buf(hw, first_word, &nwords, buf); 265 if (status) { 266 dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n", 267 status, hw->adminq.sq_last_status); 268 eeprom->len = sizeof(u16) * nwords; 269 ret = -EIO; 270 goto out; 271 } 272 273 memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len); 274 out: 275 devm_kfree(dev, buf); 276 return ret; 277 } 278 279 /** 280 * ice_active_vfs - check if there are any active VFs 281 * @pf: board private structure 282 * 283 * Returns true if an active VF is found, otherwise returns false 284 */ 285 static bool ice_active_vfs(struct ice_pf *pf) 286 { 287 int i; 288 289 ice_for_each_vf(pf, i) { 290 struct ice_vf *vf = &pf->vf[i]; 291 292 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 293 return true; 294 } 295 296 return false; 297 } 298 299 /** 300 * ice_link_test - perform a link test on a given net_device 301 * @netdev: network interface device structure 302 * 303 * This function performs one of the self-tests required by ethtool. 304 * Returns 0 on success, non-zero on failure. 305 */ 306 static u64 ice_link_test(struct net_device *netdev) 307 { 308 struct ice_netdev_priv *np = netdev_priv(netdev); 309 enum ice_status status; 310 bool link_up = false; 311 312 netdev_info(netdev, "link test\n"); 313 status = ice_get_link_status(np->vsi->port_info, &link_up); 314 if (status) { 315 netdev_err(netdev, "link query error, status = %d\n", status); 316 return 1; 317 } 318 319 if (!link_up) 320 return 2; 321 322 return 0; 323 } 324 325 /** 326 * ice_eeprom_test - perform an EEPROM test on a given net_device 327 * @netdev: network interface device structure 328 * 329 * This function performs one of the self-tests required by ethtool. 330 * Returns 0 on success, non-zero on failure. 331 */ 332 static u64 ice_eeprom_test(struct net_device *netdev) 333 { 334 struct ice_netdev_priv *np = netdev_priv(netdev); 335 struct ice_pf *pf = np->vsi->back; 336 337 netdev_info(netdev, "EEPROM test\n"); 338 return !!(ice_nvm_validate_checksum(&pf->hw)); 339 } 340 341 /** 342 * ice_reg_pattern_test 343 * @hw: pointer to the HW struct 344 * @reg: reg to be tested 345 * @mask: bits to be touched 346 */ 347 static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) 348 { 349 struct ice_pf *pf = (struct ice_pf *)hw->back; 350 struct device *dev = ice_pf_to_dev(pf); 351 static const u32 patterns[] = { 352 0x5A5A5A5A, 0xA5A5A5A5, 353 0x00000000, 0xFFFFFFFF 354 }; 355 u32 val, orig_val; 356 int i; 357 358 orig_val = rd32(hw, reg); 359 for (i = 0; i < ARRAY_SIZE(patterns); ++i) { 360 u32 pattern = patterns[i] & mask; 361 362 wr32(hw, reg, pattern); 363 val = rd32(hw, reg); 364 if (val == pattern) 365 continue; 366 dev_err(dev, 367 "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n" 368 , __func__, reg, pattern, val); 369 return 1; 370 } 371 372 wr32(hw, reg, orig_val); 373 val = rd32(hw, reg); 374 if (val != orig_val) { 375 dev_err(dev, 376 "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n" 377 , __func__, reg, orig_val, val); 378 return 1; 379 } 380 381 return 0; 382 } 383 384 /** 385 * ice_reg_test - perform a register test on a given net_device 386 * @netdev: network interface device structure 387 * 388 * This function performs one of the self-tests required by ethtool. 389 * Returns 0 on success, non-zero on failure. 390 */ 391 static u64 ice_reg_test(struct net_device *netdev) 392 { 393 struct ice_netdev_priv *np = netdev_priv(netdev); 394 struct ice_hw *hw = np->vsi->port_info->hw; 395 u32 int_elements = hw->func_caps.common_cap.num_msix_vectors ? 396 hw->func_caps.common_cap.num_msix_vectors - 1 : 1; 397 struct ice_diag_reg_test_info { 398 u32 address; 399 u32 mask; 400 u32 elem_num; 401 u32 elem_size; 402 } ice_reg_list[] = { 403 {GLINT_ITR(0, 0), 0x00000fff, int_elements, 404 GLINT_ITR(0, 1) - GLINT_ITR(0, 0)}, 405 {GLINT_ITR(1, 0), 0x00000fff, int_elements, 406 GLINT_ITR(1, 1) - GLINT_ITR(1, 0)}, 407 {GLINT_ITR(0, 0), 0x00000fff, int_elements, 408 GLINT_ITR(2, 1) - GLINT_ITR(2, 0)}, 409 {GLINT_CTL, 0xffff0001, 1, 0} 410 }; 411 int i; 412 413 netdev_dbg(netdev, "Register test\n"); 414 for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) { 415 u32 j; 416 417 for (j = 0; j < ice_reg_list[i].elem_num; ++j) { 418 u32 mask = ice_reg_list[i].mask; 419 u32 reg = ice_reg_list[i].address + 420 (j * ice_reg_list[i].elem_size); 421 422 /* bail on failure (non-zero return) */ 423 if (ice_reg_pattern_test(hw, reg, mask)) 424 return 1; 425 } 426 } 427 428 return 0; 429 } 430 431 /** 432 * ice_lbtest_prepare_rings - configure Tx/Rx test rings 433 * @vsi: pointer to the VSI structure 434 * 435 * Function configures rings of a VSI for loopback test without 436 * enabling interrupts or informing the kernel about new queues. 437 * 438 * Returns 0 on success, negative on failure. 439 */ 440 static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) 441 { 442 int status; 443 444 status = ice_vsi_setup_tx_rings(vsi); 445 if (status) 446 goto err_setup_tx_ring; 447 448 status = ice_vsi_setup_rx_rings(vsi); 449 if (status) 450 goto err_setup_rx_ring; 451 452 status = ice_vsi_cfg(vsi); 453 if (status) 454 goto err_setup_rx_ring; 455 456 status = ice_vsi_start_rx_rings(vsi); 457 if (status) 458 goto err_start_rx_ring; 459 460 return status; 461 462 err_start_rx_ring: 463 ice_vsi_free_rx_rings(vsi); 464 err_setup_rx_ring: 465 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 466 err_setup_tx_ring: 467 ice_vsi_free_tx_rings(vsi); 468 469 return status; 470 } 471 472 /** 473 * ice_lbtest_disable_rings - disable Tx/Rx test rings after loopback test 474 * @vsi: pointer to the VSI structure 475 * 476 * Function stops and frees VSI rings after a loopback test. 477 * Returns 0 on success, negative on failure. 478 */ 479 static int ice_lbtest_disable_rings(struct ice_vsi *vsi) 480 { 481 int status; 482 483 status = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 484 if (status) 485 netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n", 486 vsi->vsi_num, status); 487 488 status = ice_vsi_stop_rx_rings(vsi); 489 if (status) 490 netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n", 491 vsi->vsi_num, status); 492 493 ice_vsi_free_tx_rings(vsi); 494 ice_vsi_free_rx_rings(vsi); 495 496 return status; 497 } 498 499 /** 500 * ice_lbtest_create_frame - create test packet 501 * @pf: pointer to the PF structure 502 * @ret_data: allocated frame buffer 503 * @size: size of the packet data 504 * 505 * Function allocates a frame with a test pattern on specific offsets. 506 * Returns 0 on success, non-zero on failure. 507 */ 508 static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size) 509 { 510 u8 *data; 511 512 if (!pf) 513 return -EINVAL; 514 515 data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL); 516 if (!data) 517 return -ENOMEM; 518 519 /* Since the ethernet test frame should always be at least 520 * 64 bytes long, fill some octets in the payload with test data. 521 */ 522 memset(data, 0xFF, size); 523 data[32] = 0xDE; 524 data[42] = 0xAD; 525 data[44] = 0xBE; 526 data[46] = 0xEF; 527 528 *ret_data = data; 529 530 return 0; 531 } 532 533 /** 534 * ice_lbtest_check_frame - verify received loopback frame 535 * @frame: pointer to the raw packet data 536 * 537 * Function verifies received test frame with a pattern. 538 * Returns true if frame matches the pattern, false otherwise. 539 */ 540 static bool ice_lbtest_check_frame(u8 *frame) 541 { 542 /* Validate bytes of a frame under offsets chosen earlier */ 543 if (frame[32] == 0xDE && 544 frame[42] == 0xAD && 545 frame[44] == 0xBE && 546 frame[46] == 0xEF && 547 frame[48] == 0xFF) 548 return true; 549 550 return false; 551 } 552 553 /** 554 * ice_diag_send - send test frames to the test ring 555 * @tx_ring: pointer to the transmit ring 556 * @data: pointer to the raw packet data 557 * @size: size of the packet to send 558 * 559 * Function sends loopback packets on a test Tx ring. 560 */ 561 static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size) 562 { 563 struct ice_tx_desc *tx_desc; 564 struct ice_tx_buf *tx_buf; 565 dma_addr_t dma; 566 u64 td_cmd; 567 568 tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use); 569 tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use]; 570 571 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 572 if (dma_mapping_error(tx_ring->dev, dma)) 573 return -EINVAL; 574 575 tx_desc->buf_addr = cpu_to_le64(dma); 576 577 /* These flags are required for a descriptor to be pushed out */ 578 td_cmd = (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); 579 tx_desc->cmd_type_offset_bsz = 580 cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 581 (td_cmd << ICE_TXD_QW1_CMD_S) | 582 ((u64)0 << ICE_TXD_QW1_OFFSET_S) | 583 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 584 ((u64)0 << ICE_TXD_QW1_L2TAG1_S)); 585 586 tx_buf->next_to_watch = tx_desc; 587 588 /* Force memory write to complete before letting h/w know 589 * there are new descriptors to fetch. 590 */ 591 wmb(); 592 593 tx_ring->next_to_use++; 594 if (tx_ring->next_to_use >= tx_ring->count) 595 tx_ring->next_to_use = 0; 596 597 writel_relaxed(tx_ring->next_to_use, tx_ring->tail); 598 599 /* Wait until the packets get transmitted to the receive queue. */ 600 usleep_range(1000, 2000); 601 dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE); 602 603 return 0; 604 } 605 606 #define ICE_LB_FRAME_SIZE 64 607 /** 608 * ice_lbtest_receive_frames - receive and verify test frames 609 * @rx_ring: pointer to the receive ring 610 * 611 * Function receives loopback packets and verify their correctness. 612 * Returns number of received valid frames. 613 */ 614 static int ice_lbtest_receive_frames(struct ice_ring *rx_ring) 615 { 616 struct ice_rx_buf *rx_buf; 617 int valid_frames, i; 618 u8 *received_buf; 619 620 valid_frames = 0; 621 622 for (i = 0; i < rx_ring->count; i++) { 623 union ice_32b_rx_flex_desc *rx_desc; 624 625 rx_desc = ICE_RX_DESC(rx_ring, i); 626 627 if (!(rx_desc->wb.status_error0 & 628 cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS))) 629 continue; 630 631 rx_buf = &rx_ring->rx_buf[i]; 632 received_buf = page_address(rx_buf->page) + rx_buf->page_offset; 633 634 if (ice_lbtest_check_frame(received_buf)) 635 valid_frames++; 636 } 637 638 return valid_frames; 639 } 640 641 /** 642 * ice_loopback_test - perform a loopback test on a given net_device 643 * @netdev: network interface device structure 644 * 645 * This function performs one of the self-tests required by ethtool. 646 * Returns 0 on success, non-zero on failure. 647 */ 648 static u64 ice_loopback_test(struct net_device *netdev) 649 { 650 struct ice_netdev_priv *np = netdev_priv(netdev); 651 struct ice_vsi *orig_vsi = np->vsi, *test_vsi; 652 struct ice_pf *pf = orig_vsi->back; 653 struct ice_ring *tx_ring, *rx_ring; 654 u8 broadcast[ETH_ALEN], ret = 0; 655 int num_frames, valid_frames; 656 LIST_HEAD(tmp_list); 657 struct device *dev; 658 u8 *tx_frame; 659 int i; 660 661 dev = ice_pf_to_dev(pf); 662 netdev_info(netdev, "loopback test\n"); 663 664 test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info); 665 if (!test_vsi) { 666 netdev_err(netdev, "Failed to create a VSI for the loopback test"); 667 return 1; 668 } 669 670 test_vsi->netdev = netdev; 671 tx_ring = test_vsi->tx_rings[0]; 672 rx_ring = test_vsi->rx_rings[0]; 673 674 if (ice_lbtest_prepare_rings(test_vsi)) { 675 ret = 2; 676 goto lbtest_vsi_close; 677 } 678 679 if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) { 680 ret = 3; 681 goto lbtest_rings_dis; 682 } 683 684 /* Enable MAC loopback in firmware */ 685 if (ice_aq_set_mac_loopback(&pf->hw, true, NULL)) { 686 ret = 4; 687 goto lbtest_mac_dis; 688 } 689 690 /* Test VSI needs to receive broadcast packets */ 691 eth_broadcast_addr(broadcast); 692 if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) { 693 ret = 5; 694 goto lbtest_mac_dis; 695 } 696 697 if (ice_add_mac(&pf->hw, &tmp_list)) { 698 ret = 6; 699 goto free_mac_list; 700 } 701 702 if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) { 703 ret = 7; 704 goto remove_mac_filters; 705 } 706 707 num_frames = min_t(int, tx_ring->count, 32); 708 for (i = 0; i < num_frames; i++) { 709 if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) { 710 ret = 8; 711 goto lbtest_free_frame; 712 } 713 } 714 715 valid_frames = ice_lbtest_receive_frames(rx_ring); 716 if (!valid_frames) 717 ret = 9; 718 else if (valid_frames != num_frames) 719 ret = 10; 720 721 lbtest_free_frame: 722 devm_kfree(dev, tx_frame); 723 remove_mac_filters: 724 if (ice_remove_mac(&pf->hw, &tmp_list)) 725 netdev_err(netdev, "Could not remove MAC filter for the test VSI"); 726 free_mac_list: 727 ice_free_fltr_list(dev, &tmp_list); 728 lbtest_mac_dis: 729 /* Disable MAC loopback after the test is completed. */ 730 if (ice_aq_set_mac_loopback(&pf->hw, false, NULL)) 731 netdev_err(netdev, "Could not disable MAC loopback\n"); 732 lbtest_rings_dis: 733 if (ice_lbtest_disable_rings(test_vsi)) 734 netdev_err(netdev, "Could not disable test rings\n"); 735 lbtest_vsi_close: 736 test_vsi->netdev = NULL; 737 if (ice_vsi_release(test_vsi)) 738 netdev_err(netdev, "Failed to remove the test VSI"); 739 740 return ret; 741 } 742 743 /** 744 * ice_intr_test - perform an interrupt test on a given net_device 745 * @netdev: network interface device structure 746 * 747 * This function performs one of the self-tests required by ethtool. 748 * Returns 0 on success, non-zero on failure. 749 */ 750 static u64 ice_intr_test(struct net_device *netdev) 751 { 752 struct ice_netdev_priv *np = netdev_priv(netdev); 753 struct ice_pf *pf = np->vsi->back; 754 u16 swic_old = pf->sw_int_count; 755 756 netdev_info(netdev, "interrupt test\n"); 757 758 wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_idx), 759 GLINT_DYN_CTL_SW_ITR_INDX_M | 760 GLINT_DYN_CTL_INTENA_MSK_M | 761 GLINT_DYN_CTL_SWINT_TRIG_M); 762 763 usleep_range(1000, 2000); 764 return (swic_old == pf->sw_int_count); 765 } 766 767 /** 768 * ice_self_test - handler function for performing a self-test by ethtool 769 * @netdev: network interface device structure 770 * @eth_test: ethtool_test structure 771 * @data: required by ethtool.self_test 772 * 773 * This function is called after invoking 'ethtool -t devname' command where 774 * devname is the name of the network device on which ethtool should operate. 775 * It performs a set of self-tests to check if a device works properly. 776 */ 777 static void 778 ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, 779 u64 *data) 780 { 781 struct ice_netdev_priv *np = netdev_priv(netdev); 782 bool if_running = netif_running(netdev); 783 struct ice_pf *pf = np->vsi->back; 784 struct device *dev; 785 786 dev = ice_pf_to_dev(pf); 787 788 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 789 netdev_info(netdev, "offline testing starting\n"); 790 791 set_bit(__ICE_TESTING, pf->state); 792 793 if (ice_active_vfs(pf)) { 794 dev_warn(dev, 795 "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); 796 data[ICE_ETH_TEST_REG] = 1; 797 data[ICE_ETH_TEST_EEPROM] = 1; 798 data[ICE_ETH_TEST_INTR] = 1; 799 data[ICE_ETH_TEST_LOOP] = 1; 800 data[ICE_ETH_TEST_LINK] = 1; 801 eth_test->flags |= ETH_TEST_FL_FAILED; 802 clear_bit(__ICE_TESTING, pf->state); 803 goto skip_ol_tests; 804 } 805 /* If the device is online then take it offline */ 806 if (if_running) 807 /* indicate we're in test mode */ 808 ice_stop(netdev); 809 810 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev); 811 data[ICE_ETH_TEST_EEPROM] = ice_eeprom_test(netdev); 812 data[ICE_ETH_TEST_INTR] = ice_intr_test(netdev); 813 data[ICE_ETH_TEST_LOOP] = ice_loopback_test(netdev); 814 data[ICE_ETH_TEST_REG] = ice_reg_test(netdev); 815 816 if (data[ICE_ETH_TEST_LINK] || 817 data[ICE_ETH_TEST_EEPROM] || 818 data[ICE_ETH_TEST_LOOP] || 819 data[ICE_ETH_TEST_INTR] || 820 data[ICE_ETH_TEST_REG]) 821 eth_test->flags |= ETH_TEST_FL_FAILED; 822 823 clear_bit(__ICE_TESTING, pf->state); 824 825 if (if_running) { 826 int status = ice_open(netdev); 827 828 if (status) { 829 dev_err(dev, "Could not open device %s, err %d", 830 pf->int_name, status); 831 } 832 } 833 } else { 834 /* Online tests */ 835 netdev_info(netdev, "online testing starting\n"); 836 837 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev); 838 if (data[ICE_ETH_TEST_LINK]) 839 eth_test->flags |= ETH_TEST_FL_FAILED; 840 841 /* Offline only tests, not run in online; pass by default */ 842 data[ICE_ETH_TEST_REG] = 0; 843 data[ICE_ETH_TEST_EEPROM] = 0; 844 data[ICE_ETH_TEST_INTR] = 0; 845 data[ICE_ETH_TEST_LOOP] = 0; 846 } 847 848 skip_ol_tests: 849 netdev_info(netdev, "testing finished\n"); 850 } 851 852 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 853 { 854 struct ice_netdev_priv *np = netdev_priv(netdev); 855 struct ice_vsi *vsi = np->vsi; 856 char *p = (char *)data; 857 unsigned int i; 858 859 switch (stringset) { 860 case ETH_SS_STATS: 861 for (i = 0; i < ICE_VSI_STATS_LEN; i++) { 862 snprintf(p, ETH_GSTRING_LEN, "%s", 863 ice_gstrings_vsi_stats[i].stat_string); 864 p += ETH_GSTRING_LEN; 865 } 866 867 ice_for_each_alloc_txq(vsi, i) { 868 snprintf(p, ETH_GSTRING_LEN, 869 "tx_queue_%u_packets", i); 870 p += ETH_GSTRING_LEN; 871 snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); 872 p += ETH_GSTRING_LEN; 873 } 874 875 ice_for_each_alloc_rxq(vsi, i) { 876 snprintf(p, ETH_GSTRING_LEN, 877 "rx_queue_%u_packets", i); 878 p += ETH_GSTRING_LEN; 879 snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); 880 p += ETH_GSTRING_LEN; 881 } 882 883 if (vsi->type != ICE_VSI_PF) 884 return; 885 886 for (i = 0; i < ICE_PF_STATS_LEN; i++) { 887 snprintf(p, ETH_GSTRING_LEN, "%s", 888 ice_gstrings_pf_stats[i].stat_string); 889 p += ETH_GSTRING_LEN; 890 } 891 892 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 893 snprintf(p, ETH_GSTRING_LEN, 894 "tx_priority_%u_xon.nic", i); 895 p += ETH_GSTRING_LEN; 896 snprintf(p, ETH_GSTRING_LEN, 897 "tx_priority_%u_xoff.nic", i); 898 p += ETH_GSTRING_LEN; 899 } 900 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 901 snprintf(p, ETH_GSTRING_LEN, 902 "rx_priority_%u_xon.nic", i); 903 p += ETH_GSTRING_LEN; 904 snprintf(p, ETH_GSTRING_LEN, 905 "rx_priority_%u_xoff.nic", i); 906 p += ETH_GSTRING_LEN; 907 } 908 break; 909 case ETH_SS_TEST: 910 memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN); 911 break; 912 case ETH_SS_PRIV_FLAGS: 913 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 914 snprintf(p, ETH_GSTRING_LEN, "%s", 915 ice_gstrings_priv_flags[i].name); 916 p += ETH_GSTRING_LEN; 917 } 918 break; 919 default: 920 break; 921 } 922 } 923 924 static int 925 ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) 926 { 927 struct ice_netdev_priv *np = netdev_priv(netdev); 928 bool led_active; 929 930 switch (state) { 931 case ETHTOOL_ID_ACTIVE: 932 led_active = true; 933 break; 934 case ETHTOOL_ID_INACTIVE: 935 led_active = false; 936 break; 937 default: 938 return -EINVAL; 939 } 940 941 if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL)) 942 return -EIO; 943 944 return 0; 945 } 946 947 /** 948 * ice_set_fec_cfg - Set link FEC options 949 * @netdev: network interface device structure 950 * @req_fec: FEC mode to configure 951 */ 952 static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) 953 { 954 struct ice_netdev_priv *np = netdev_priv(netdev); 955 struct ice_aqc_set_phy_cfg_data config = { 0 }; 956 struct ice_aqc_get_phy_caps_data *caps; 957 struct ice_vsi *vsi = np->vsi; 958 u8 sw_cfg_caps, sw_cfg_fec; 959 struct ice_port_info *pi; 960 enum ice_status status; 961 int err = 0; 962 963 pi = vsi->port_info; 964 if (!pi) 965 return -EOPNOTSUPP; 966 967 /* Changing the FEC parameters is not supported if not the PF VSI */ 968 if (vsi->type != ICE_VSI_PF) { 969 netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n"); 970 return -EOPNOTSUPP; 971 } 972 973 /* Get last SW configuration */ 974 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 975 if (!caps) 976 return -ENOMEM; 977 978 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 979 caps, NULL); 980 if (status) { 981 err = -EAGAIN; 982 goto done; 983 } 984 985 /* Copy SW configuration returned from PHY caps to PHY config */ 986 ice_copy_phy_caps_to_cfg(caps, &config); 987 sw_cfg_caps = caps->caps; 988 sw_cfg_fec = caps->link_fec_options; 989 990 /* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */ 991 memset(caps, 0, sizeof(*caps)); 992 993 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 994 caps, NULL); 995 if (status) { 996 err = -EAGAIN; 997 goto done; 998 } 999 1000 config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC); 1001 config.link_fec_opt = caps->link_fec_options; 1002 1003 ice_cfg_phy_fec(&config, req_fec); 1004 1005 /* If FEC mode has changed, then set PHY configuration and enable AN. */ 1006 if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) != 1007 (sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) || 1008 config.link_fec_opt != sw_cfg_fec) { 1009 if (caps->caps & ICE_AQC_PHY_AN_MODE) 1010 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1011 1012 status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL); 1013 1014 if (status) 1015 err = -EAGAIN; 1016 } 1017 1018 done: 1019 kfree(caps); 1020 return err; 1021 } 1022 1023 /** 1024 * ice_set_fecparam - Set FEC link options 1025 * @netdev: network interface device structure 1026 * @fecparam: Ethtool structure to retrieve FEC parameters 1027 */ 1028 static int 1029 ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) 1030 { 1031 struct ice_netdev_priv *np = netdev_priv(netdev); 1032 struct ice_vsi *vsi = np->vsi; 1033 enum ice_fec_mode fec; 1034 1035 switch (fecparam->fec) { 1036 case ETHTOOL_FEC_AUTO: 1037 fec = ICE_FEC_AUTO; 1038 break; 1039 case ETHTOOL_FEC_RS: 1040 fec = ICE_FEC_RS; 1041 break; 1042 case ETHTOOL_FEC_BASER: 1043 fec = ICE_FEC_BASER; 1044 break; 1045 case ETHTOOL_FEC_OFF: 1046 case ETHTOOL_FEC_NONE: 1047 fec = ICE_FEC_NONE; 1048 break; 1049 default: 1050 dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n", 1051 fecparam->fec); 1052 return -EINVAL; 1053 } 1054 1055 return ice_set_fec_cfg(netdev, fec); 1056 } 1057 1058 /** 1059 * ice_get_fecparam - Get link FEC options 1060 * @netdev: network interface device structure 1061 * @fecparam: Ethtool structure to retrieve FEC parameters 1062 */ 1063 static int 1064 ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) 1065 { 1066 struct ice_netdev_priv *np = netdev_priv(netdev); 1067 struct ice_aqc_get_phy_caps_data *caps; 1068 struct ice_link_status *link_info; 1069 struct ice_vsi *vsi = np->vsi; 1070 struct ice_port_info *pi; 1071 enum ice_status status; 1072 int err = 0; 1073 1074 pi = vsi->port_info; 1075 1076 if (!pi) 1077 return -EOPNOTSUPP; 1078 link_info = &pi->phy.link_info; 1079 1080 /* Set FEC mode based on negotiated link info */ 1081 switch (link_info->fec_info) { 1082 case ICE_AQ_LINK_25G_KR_FEC_EN: 1083 fecparam->active_fec = ETHTOOL_FEC_BASER; 1084 break; 1085 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 1086 /* fall through */ 1087 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 1088 fecparam->active_fec = ETHTOOL_FEC_RS; 1089 break; 1090 default: 1091 fecparam->active_fec = ETHTOOL_FEC_OFF; 1092 break; 1093 } 1094 1095 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 1096 if (!caps) 1097 return -ENOMEM; 1098 1099 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 1100 caps, NULL); 1101 if (status) { 1102 err = -EAGAIN; 1103 goto done; 1104 } 1105 1106 /* Set supported/configured FEC modes based on PHY capability */ 1107 if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC) 1108 fecparam->fec |= ETHTOOL_FEC_AUTO; 1109 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || 1110 caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 1111 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN || 1112 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 1113 fecparam->fec |= ETHTOOL_FEC_BASER; 1114 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 1115 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ || 1116 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) 1117 fecparam->fec |= ETHTOOL_FEC_RS; 1118 if (caps->link_fec_options == 0) 1119 fecparam->fec |= ETHTOOL_FEC_OFF; 1120 1121 done: 1122 kfree(caps); 1123 return err; 1124 } 1125 1126 /** 1127 * ice_get_priv_flags - report device private flags 1128 * @netdev: network interface device structure 1129 * 1130 * The get string set count and the string set should be matched for each 1131 * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags 1132 * array. 1133 * 1134 * Returns a u32 bitmap of flags. 1135 */ 1136 static u32 ice_get_priv_flags(struct net_device *netdev) 1137 { 1138 struct ice_netdev_priv *np = netdev_priv(netdev); 1139 struct ice_vsi *vsi = np->vsi; 1140 struct ice_pf *pf = vsi->back; 1141 u32 i, ret_flags = 0; 1142 1143 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 1144 const struct ice_priv_flag *priv_flag; 1145 1146 priv_flag = &ice_gstrings_priv_flags[i]; 1147 1148 if (test_bit(priv_flag->bitno, pf->flags)) 1149 ret_flags |= BIT(i); 1150 } 1151 1152 return ret_flags; 1153 } 1154 1155 /** 1156 * ice_set_priv_flags - set private flags 1157 * @netdev: network interface device structure 1158 * @flags: bit flags to be set 1159 */ 1160 static int ice_set_priv_flags(struct net_device *netdev, u32 flags) 1161 { 1162 struct ice_netdev_priv *np = netdev_priv(netdev); 1163 DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS); 1164 DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS); 1165 struct ice_vsi *vsi = np->vsi; 1166 struct ice_pf *pf = vsi->back; 1167 struct device *dev; 1168 int ret = 0; 1169 u32 i; 1170 1171 if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE)) 1172 return -EINVAL; 1173 1174 dev = ice_pf_to_dev(pf); 1175 set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); 1176 1177 bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS); 1178 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 1179 const struct ice_priv_flag *priv_flag; 1180 1181 priv_flag = &ice_gstrings_priv_flags[i]; 1182 1183 if (flags & BIT(i)) 1184 set_bit(priv_flag->bitno, pf->flags); 1185 else 1186 clear_bit(priv_flag->bitno, pf->flags); 1187 } 1188 1189 bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); 1190 1191 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { 1192 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { 1193 enum ice_status status; 1194 1195 /* Disable FW LLDP engine */ 1196 status = ice_cfg_lldp_mib_change(&pf->hw, false); 1197 1198 /* If unregistering for LLDP events fails, this is 1199 * not an error state, as there shouldn't be any 1200 * events to respond to. 1201 */ 1202 if (status) 1203 dev_info(dev, 1204 "Failed to unreg for LLDP events\n"); 1205 1206 /* The AQ call to stop the FW LLDP agent will generate 1207 * an error if the agent is already stopped. 1208 */ 1209 status = ice_aq_stop_lldp(&pf->hw, true, true, NULL); 1210 if (status) 1211 dev_warn(dev, "Fail to stop LLDP agent\n"); 1212 /* Use case for having the FW LLDP agent stopped 1213 * will likely not need DCB, so failure to init is 1214 * not a concern of ethtool 1215 */ 1216 status = ice_init_pf_dcb(pf, true); 1217 if (status) 1218 dev_warn(dev, "Fail to init DCB\n"); 1219 } else { 1220 enum ice_status status; 1221 bool dcbx_agent_status; 1222 1223 /* AQ command to start FW LLDP agent will return an 1224 * error if the agent is already started 1225 */ 1226 status = ice_aq_start_lldp(&pf->hw, true, NULL); 1227 if (status) 1228 dev_warn(dev, "Fail to start LLDP Agent\n"); 1229 1230 /* AQ command to start FW DCBX agent will fail if 1231 * the agent is already started 1232 */ 1233 status = ice_aq_start_stop_dcbx(&pf->hw, true, 1234 &dcbx_agent_status, 1235 NULL); 1236 if (status) 1237 dev_dbg(dev, "Failed to start FW DCBX\n"); 1238 1239 dev_info(dev, "FW DCBX agent is %s\n", 1240 dcbx_agent_status ? "ACTIVE" : "DISABLED"); 1241 1242 /* Failure to configure MIB change or init DCB is not 1243 * relevant to ethtool. Print notification that 1244 * registration/init failed but do not return error 1245 * state to ethtool 1246 */ 1247 status = ice_init_pf_dcb(pf, true); 1248 if (status) 1249 dev_dbg(dev, "Fail to init DCB\n"); 1250 1251 /* Remove rule to direct LLDP packets to default VSI. 1252 * The FW LLDP engine will now be consuming them. 1253 */ 1254 ice_cfg_sw_lldp(vsi, false, false); 1255 1256 /* Register for MIB change events */ 1257 status = ice_cfg_lldp_mib_change(&pf->hw, true); 1258 if (status) 1259 dev_dbg(dev, 1260 "Fail to enable MIB change events\n"); 1261 } 1262 } 1263 if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { 1264 /* down and up VSI so that changes of Rx cfg are reflected. */ 1265 ice_down(vsi); 1266 ice_up(vsi); 1267 } 1268 clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); 1269 return ret; 1270 } 1271 1272 static int ice_get_sset_count(struct net_device *netdev, int sset) 1273 { 1274 switch (sset) { 1275 case ETH_SS_STATS: 1276 /* The number (and order) of strings reported *must* remain 1277 * constant for a given netdevice. This function must not 1278 * report a different number based on run time parameters 1279 * (such as the number of queues in use, or the setting of 1280 * a private ethtool flag). This is due to the nature of the 1281 * ethtool stats API. 1282 * 1283 * Userspace programs such as ethtool must make 3 separate 1284 * ioctl requests, one for size, one for the strings, and 1285 * finally one for the stats. Since these cross into 1286 * userspace, changes to the number or size could result in 1287 * undefined memory access or incorrect string<->value 1288 * correlations for statistics. 1289 * 1290 * Even if it appears to be safe, changes to the size or 1291 * order of strings will suffer from race conditions and are 1292 * not safe. 1293 */ 1294 return ICE_ALL_STATS_LEN(netdev); 1295 case ETH_SS_TEST: 1296 return ICE_TEST_LEN; 1297 case ETH_SS_PRIV_FLAGS: 1298 return ICE_PRIV_FLAG_ARRAY_SIZE; 1299 default: 1300 return -EOPNOTSUPP; 1301 } 1302 } 1303 1304 static void 1305 ice_get_ethtool_stats(struct net_device *netdev, 1306 struct ethtool_stats __always_unused *stats, u64 *data) 1307 { 1308 struct ice_netdev_priv *np = netdev_priv(netdev); 1309 struct ice_vsi *vsi = np->vsi; 1310 struct ice_pf *pf = vsi->back; 1311 struct ice_ring *ring; 1312 unsigned int j; 1313 int i = 0; 1314 char *p; 1315 1316 ice_update_pf_stats(pf); 1317 ice_update_vsi_stats(vsi); 1318 1319 for (j = 0; j < ICE_VSI_STATS_LEN; j++) { 1320 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset; 1321 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat == 1322 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1323 } 1324 1325 /* populate per queue stats */ 1326 rcu_read_lock(); 1327 1328 ice_for_each_alloc_txq(vsi, j) { 1329 ring = READ_ONCE(vsi->tx_rings[j]); 1330 if (ring) { 1331 data[i++] = ring->stats.pkts; 1332 data[i++] = ring->stats.bytes; 1333 } else { 1334 data[i++] = 0; 1335 data[i++] = 0; 1336 } 1337 } 1338 1339 ice_for_each_alloc_rxq(vsi, j) { 1340 ring = READ_ONCE(vsi->rx_rings[j]); 1341 if (ring) { 1342 data[i++] = ring->stats.pkts; 1343 data[i++] = ring->stats.bytes; 1344 } else { 1345 data[i++] = 0; 1346 data[i++] = 0; 1347 } 1348 } 1349 1350 rcu_read_unlock(); 1351 1352 if (vsi->type != ICE_VSI_PF) 1353 return; 1354 1355 for (j = 0; j < ICE_PF_STATS_LEN; j++) { 1356 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset; 1357 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat == 1358 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1359 } 1360 1361 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) { 1362 data[i++] = pf->stats.priority_xon_tx[j]; 1363 data[i++] = pf->stats.priority_xoff_tx[j]; 1364 } 1365 1366 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) { 1367 data[i++] = pf->stats.priority_xon_rx[j]; 1368 data[i++] = pf->stats.priority_xoff_rx[j]; 1369 } 1370 } 1371 1372 /** 1373 * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes 1374 * @netdev: network interface device structure 1375 * @ks: ethtool link ksettings struct to fill out 1376 */ 1377 static void 1378 ice_phy_type_to_ethtool(struct net_device *netdev, 1379 struct ethtool_link_ksettings *ks) 1380 { 1381 struct ice_netdev_priv *np = netdev_priv(netdev); 1382 struct ice_link_status *hw_link_info; 1383 bool need_add_adv_mode = false; 1384 struct ice_vsi *vsi = np->vsi; 1385 u64 phy_types_high; 1386 u64 phy_types_low; 1387 1388 hw_link_info = &vsi->port_info->phy.link_info; 1389 phy_types_low = vsi->port_info->phy.phy_type_low; 1390 phy_types_high = vsi->port_info->phy.phy_type_high; 1391 1392 ethtool_link_ksettings_zero_link_mode(ks, supported); 1393 ethtool_link_ksettings_zero_link_mode(ks, advertising); 1394 1395 if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || 1396 phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) { 1397 ethtool_link_ksettings_add_link_mode(ks, supported, 1398 100baseT_Full); 1399 if (!hw_link_info->req_speeds || 1400 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) 1401 ethtool_link_ksettings_add_link_mode(ks, advertising, 1402 100baseT_Full); 1403 } 1404 if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || 1405 phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) { 1406 ethtool_link_ksettings_add_link_mode(ks, supported, 1407 1000baseT_Full); 1408 if (!hw_link_info->req_speeds || 1409 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) 1410 ethtool_link_ksettings_add_link_mode(ks, advertising, 1411 1000baseT_Full); 1412 } 1413 if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) { 1414 ethtool_link_ksettings_add_link_mode(ks, supported, 1415 1000baseKX_Full); 1416 if (!hw_link_info->req_speeds || 1417 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) 1418 ethtool_link_ksettings_add_link_mode(ks, advertising, 1419 1000baseKX_Full); 1420 } 1421 if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX || 1422 phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) { 1423 ethtool_link_ksettings_add_link_mode(ks, supported, 1424 1000baseX_Full); 1425 if (!hw_link_info->req_speeds || 1426 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) 1427 ethtool_link_ksettings_add_link_mode(ks, advertising, 1428 1000baseX_Full); 1429 } 1430 if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) { 1431 ethtool_link_ksettings_add_link_mode(ks, supported, 1432 2500baseT_Full); 1433 if (!hw_link_info->req_speeds || 1434 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) 1435 ethtool_link_ksettings_add_link_mode(ks, advertising, 1436 2500baseT_Full); 1437 } 1438 if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X || 1439 phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) { 1440 ethtool_link_ksettings_add_link_mode(ks, supported, 1441 2500baseX_Full); 1442 if (!hw_link_info->req_speeds || 1443 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) 1444 ethtool_link_ksettings_add_link_mode(ks, advertising, 1445 2500baseX_Full); 1446 } 1447 if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || 1448 phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) { 1449 ethtool_link_ksettings_add_link_mode(ks, supported, 1450 5000baseT_Full); 1451 if (!hw_link_info->req_speeds || 1452 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) 1453 ethtool_link_ksettings_add_link_mode(ks, advertising, 1454 5000baseT_Full); 1455 } 1456 if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || 1457 phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA || 1458 phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || 1459 phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) { 1460 ethtool_link_ksettings_add_link_mode(ks, supported, 1461 10000baseT_Full); 1462 if (!hw_link_info->req_speeds || 1463 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 1464 ethtool_link_ksettings_add_link_mode(ks, advertising, 1465 10000baseT_Full); 1466 } 1467 if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) { 1468 ethtool_link_ksettings_add_link_mode(ks, supported, 1469 10000baseKR_Full); 1470 if (!hw_link_info->req_speeds || 1471 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 1472 ethtool_link_ksettings_add_link_mode(ks, advertising, 1473 10000baseKR_Full); 1474 } 1475 if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) { 1476 ethtool_link_ksettings_add_link_mode(ks, supported, 1477 10000baseSR_Full); 1478 if (!hw_link_info->req_speeds || 1479 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 1480 ethtool_link_ksettings_add_link_mode(ks, advertising, 1481 10000baseSR_Full); 1482 } 1483 if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) { 1484 ethtool_link_ksettings_add_link_mode(ks, supported, 1485 10000baseLR_Full); 1486 if (!hw_link_info->req_speeds || 1487 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 1488 ethtool_link_ksettings_add_link_mode(ks, advertising, 1489 10000baseLR_Full); 1490 } 1491 if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || 1492 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || 1493 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || 1494 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || 1495 phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC || 1496 phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) { 1497 ethtool_link_ksettings_add_link_mode(ks, supported, 1498 25000baseCR_Full); 1499 if (!hw_link_info->req_speeds || 1500 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) 1501 ethtool_link_ksettings_add_link_mode(ks, advertising, 1502 25000baseCR_Full); 1503 } 1504 if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR || 1505 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) { 1506 ethtool_link_ksettings_add_link_mode(ks, supported, 1507 25000baseSR_Full); 1508 if (!hw_link_info->req_speeds || 1509 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) 1510 ethtool_link_ksettings_add_link_mode(ks, advertising, 1511 25000baseSR_Full); 1512 } 1513 if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || 1514 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || 1515 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) { 1516 ethtool_link_ksettings_add_link_mode(ks, supported, 1517 25000baseKR_Full); 1518 if (!hw_link_info->req_speeds || 1519 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) 1520 ethtool_link_ksettings_add_link_mode(ks, advertising, 1521 25000baseKR_Full); 1522 } 1523 if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { 1524 ethtool_link_ksettings_add_link_mode(ks, supported, 1525 40000baseKR4_Full); 1526 if (!hw_link_info->req_speeds || 1527 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 1528 ethtool_link_ksettings_add_link_mode(ks, advertising, 1529 40000baseKR4_Full); 1530 } 1531 if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || 1532 phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC || 1533 phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) { 1534 ethtool_link_ksettings_add_link_mode(ks, supported, 1535 40000baseCR4_Full); 1536 if (!hw_link_info->req_speeds || 1537 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 1538 ethtool_link_ksettings_add_link_mode(ks, advertising, 1539 40000baseCR4_Full); 1540 } 1541 if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) { 1542 ethtool_link_ksettings_add_link_mode(ks, supported, 1543 40000baseSR4_Full); 1544 if (!hw_link_info->req_speeds || 1545 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 1546 ethtool_link_ksettings_add_link_mode(ks, advertising, 1547 40000baseSR4_Full); 1548 } 1549 if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) { 1550 ethtool_link_ksettings_add_link_mode(ks, supported, 1551 40000baseLR4_Full); 1552 if (!hw_link_info->req_speeds || 1553 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 1554 ethtool_link_ksettings_add_link_mode(ks, advertising, 1555 40000baseLR4_Full); 1556 } 1557 if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || 1558 phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC || 1559 phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 || 1560 phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC || 1561 phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 || 1562 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || 1563 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR || 1564 phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC || 1565 phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) { 1566 ethtool_link_ksettings_add_link_mode(ks, supported, 1567 50000baseCR2_Full); 1568 if (!hw_link_info->req_speeds || 1569 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) 1570 ethtool_link_ksettings_add_link_mode(ks, advertising, 1571 50000baseCR2_Full); 1572 } 1573 if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || 1574 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { 1575 ethtool_link_ksettings_add_link_mode(ks, supported, 1576 50000baseKR2_Full); 1577 if (!hw_link_info->req_speeds || 1578 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) 1579 ethtool_link_ksettings_add_link_mode(ks, advertising, 1580 50000baseKR2_Full); 1581 } 1582 if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 || 1583 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 || 1584 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR || 1585 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) { 1586 ethtool_link_ksettings_add_link_mode(ks, supported, 1587 50000baseSR2_Full); 1588 if (!hw_link_info->req_speeds || 1589 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) 1590 ethtool_link_ksettings_add_link_mode(ks, advertising, 1591 50000baseSR2_Full); 1592 } 1593 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || 1594 phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC || 1595 phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 || 1596 phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC || 1597 phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 || 1598 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 || 1599 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 || 1600 phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC || 1601 phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 || 1602 phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC || 1603 phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) { 1604 ethtool_link_ksettings_add_link_mode(ks, supported, 1605 100000baseCR4_Full); 1606 if (!hw_link_info->req_speeds || 1607 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 1608 need_add_adv_mode = true; 1609 } 1610 if (need_add_adv_mode) { 1611 need_add_adv_mode = false; 1612 ethtool_link_ksettings_add_link_mode(ks, advertising, 1613 100000baseCR4_Full); 1614 } 1615 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 || 1616 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { 1617 ethtool_link_ksettings_add_link_mode(ks, supported, 1618 100000baseSR4_Full); 1619 if (!hw_link_info->req_speeds || 1620 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 1621 need_add_adv_mode = true; 1622 } 1623 if (need_add_adv_mode) { 1624 need_add_adv_mode = false; 1625 ethtool_link_ksettings_add_link_mode(ks, advertising, 1626 100000baseSR4_Full); 1627 } 1628 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 || 1629 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) { 1630 ethtool_link_ksettings_add_link_mode(ks, supported, 1631 100000baseLR4_ER4_Full); 1632 if (!hw_link_info->req_speeds || 1633 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 1634 need_add_adv_mode = true; 1635 } 1636 if (need_add_adv_mode) { 1637 need_add_adv_mode = false; 1638 ethtool_link_ksettings_add_link_mode(ks, advertising, 1639 100000baseLR4_ER4_Full); 1640 } 1641 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || 1642 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || 1643 phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { 1644 ethtool_link_ksettings_add_link_mode(ks, supported, 1645 100000baseKR4_Full); 1646 if (!hw_link_info->req_speeds || 1647 hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 1648 need_add_adv_mode = true; 1649 } 1650 if (need_add_adv_mode) 1651 ethtool_link_ksettings_add_link_mode(ks, advertising, 1652 100000baseKR4_Full); 1653 1654 /* Autoneg PHY types */ 1655 if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || 1656 phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || 1657 phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX || 1658 phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T || 1659 phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX || 1660 phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || 1661 phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR || 1662 phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || 1663 phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 || 1664 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || 1665 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || 1666 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || 1667 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || 1668 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || 1669 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || 1670 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 || 1671 phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || 1672 phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { 1673 ethtool_link_ksettings_add_link_mode(ks, supported, 1674 Autoneg); 1675 ethtool_link_ksettings_add_link_mode(ks, advertising, 1676 Autoneg); 1677 } 1678 if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || 1679 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || 1680 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || 1681 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { 1682 ethtool_link_ksettings_add_link_mode(ks, supported, 1683 Autoneg); 1684 ethtool_link_ksettings_add_link_mode(ks, advertising, 1685 Autoneg); 1686 } 1687 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || 1688 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || 1689 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || 1690 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) { 1691 ethtool_link_ksettings_add_link_mode(ks, supported, 1692 Autoneg); 1693 ethtool_link_ksettings_add_link_mode(ks, advertising, 1694 Autoneg); 1695 } 1696 } 1697 1698 #define TEST_SET_BITS_TIMEOUT 50 1699 #define TEST_SET_BITS_SLEEP_MAX 2000 1700 #define TEST_SET_BITS_SLEEP_MIN 1000 1701 1702 /** 1703 * ice_get_settings_link_up - Get Link settings for when link is up 1704 * @ks: ethtool ksettings to fill in 1705 * @netdev: network interface device structure 1706 */ 1707 static void 1708 ice_get_settings_link_up(struct ethtool_link_ksettings *ks, 1709 struct net_device *netdev) 1710 { 1711 struct ice_netdev_priv *np = netdev_priv(netdev); 1712 struct ice_port_info *pi = np->vsi->port_info; 1713 struct ethtool_link_ksettings cap_ksettings; 1714 struct ice_link_status *link_info; 1715 struct ice_vsi *vsi = np->vsi; 1716 bool unrecog_phy_high = false; 1717 bool unrecog_phy_low = false; 1718 1719 link_info = &vsi->port_info->phy.link_info; 1720 1721 /* Initialize supported and advertised settings based on PHY settings */ 1722 switch (link_info->phy_type_low) { 1723 case ICE_PHY_TYPE_LOW_100BASE_TX: 1724 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1725 ethtool_link_ksettings_add_link_mode(ks, supported, 1726 100baseT_Full); 1727 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1728 ethtool_link_ksettings_add_link_mode(ks, advertising, 1729 100baseT_Full); 1730 break; 1731 case ICE_PHY_TYPE_LOW_100M_SGMII: 1732 ethtool_link_ksettings_add_link_mode(ks, supported, 1733 100baseT_Full); 1734 break; 1735 case ICE_PHY_TYPE_LOW_1000BASE_T: 1736 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1737 ethtool_link_ksettings_add_link_mode(ks, supported, 1738 1000baseT_Full); 1739 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1740 ethtool_link_ksettings_add_link_mode(ks, advertising, 1741 1000baseT_Full); 1742 break; 1743 case ICE_PHY_TYPE_LOW_1G_SGMII: 1744 ethtool_link_ksettings_add_link_mode(ks, supported, 1745 1000baseT_Full); 1746 break; 1747 case ICE_PHY_TYPE_LOW_1000BASE_SX: 1748 case ICE_PHY_TYPE_LOW_1000BASE_LX: 1749 ethtool_link_ksettings_add_link_mode(ks, supported, 1750 1000baseX_Full); 1751 break; 1752 case ICE_PHY_TYPE_LOW_1000BASE_KX: 1753 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1754 ethtool_link_ksettings_add_link_mode(ks, supported, 1755 1000baseKX_Full); 1756 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1757 ethtool_link_ksettings_add_link_mode(ks, advertising, 1758 1000baseKX_Full); 1759 break; 1760 case ICE_PHY_TYPE_LOW_2500BASE_T: 1761 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1762 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1763 ethtool_link_ksettings_add_link_mode(ks, supported, 1764 2500baseT_Full); 1765 ethtool_link_ksettings_add_link_mode(ks, advertising, 1766 2500baseT_Full); 1767 break; 1768 case ICE_PHY_TYPE_LOW_2500BASE_X: 1769 ethtool_link_ksettings_add_link_mode(ks, supported, 1770 2500baseX_Full); 1771 break; 1772 case ICE_PHY_TYPE_LOW_2500BASE_KX: 1773 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1774 ethtool_link_ksettings_add_link_mode(ks, supported, 1775 2500baseX_Full); 1776 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1777 ethtool_link_ksettings_add_link_mode(ks, advertising, 1778 2500baseX_Full); 1779 break; 1780 case ICE_PHY_TYPE_LOW_5GBASE_T: 1781 case ICE_PHY_TYPE_LOW_5GBASE_KR: 1782 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1783 ethtool_link_ksettings_add_link_mode(ks, supported, 1784 5000baseT_Full); 1785 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1786 ethtool_link_ksettings_add_link_mode(ks, advertising, 1787 5000baseT_Full); 1788 break; 1789 case ICE_PHY_TYPE_LOW_10GBASE_T: 1790 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1791 ethtool_link_ksettings_add_link_mode(ks, supported, 1792 10000baseT_Full); 1793 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1794 ethtool_link_ksettings_add_link_mode(ks, advertising, 1795 10000baseT_Full); 1796 break; 1797 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 1798 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 1799 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 1800 ethtool_link_ksettings_add_link_mode(ks, supported, 1801 10000baseT_Full); 1802 break; 1803 case ICE_PHY_TYPE_LOW_10GBASE_SR: 1804 ethtool_link_ksettings_add_link_mode(ks, supported, 1805 10000baseSR_Full); 1806 break; 1807 case ICE_PHY_TYPE_LOW_10GBASE_LR: 1808 ethtool_link_ksettings_add_link_mode(ks, supported, 1809 10000baseLR_Full); 1810 break; 1811 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 1812 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1813 ethtool_link_ksettings_add_link_mode(ks, supported, 1814 10000baseKR_Full); 1815 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1816 ethtool_link_ksettings_add_link_mode(ks, advertising, 1817 10000baseKR_Full); 1818 break; 1819 case ICE_PHY_TYPE_LOW_25GBASE_T: 1820 case ICE_PHY_TYPE_LOW_25GBASE_CR: 1821 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 1822 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 1823 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1824 ethtool_link_ksettings_add_link_mode(ks, supported, 1825 25000baseCR_Full); 1826 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1827 ethtool_link_ksettings_add_link_mode(ks, advertising, 1828 25000baseCR_Full); 1829 break; 1830 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 1831 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 1832 ethtool_link_ksettings_add_link_mode(ks, supported, 1833 25000baseCR_Full); 1834 break; 1835 case ICE_PHY_TYPE_LOW_25GBASE_SR: 1836 case ICE_PHY_TYPE_LOW_25GBASE_LR: 1837 ethtool_link_ksettings_add_link_mode(ks, supported, 1838 25000baseSR_Full); 1839 break; 1840 case ICE_PHY_TYPE_LOW_25GBASE_KR: 1841 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 1842 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 1843 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1844 ethtool_link_ksettings_add_link_mode(ks, supported, 1845 25000baseKR_Full); 1846 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1847 ethtool_link_ksettings_add_link_mode(ks, advertising, 1848 25000baseKR_Full); 1849 break; 1850 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 1851 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1852 ethtool_link_ksettings_add_link_mode(ks, supported, 1853 40000baseCR4_Full); 1854 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1855 ethtool_link_ksettings_add_link_mode(ks, advertising, 1856 40000baseCR4_Full); 1857 break; 1858 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 1859 case ICE_PHY_TYPE_LOW_40G_XLAUI: 1860 ethtool_link_ksettings_add_link_mode(ks, supported, 1861 40000baseCR4_Full); 1862 break; 1863 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 1864 ethtool_link_ksettings_add_link_mode(ks, supported, 1865 40000baseSR4_Full); 1866 break; 1867 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 1868 ethtool_link_ksettings_add_link_mode(ks, supported, 1869 40000baseLR4_Full); 1870 break; 1871 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 1872 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1873 ethtool_link_ksettings_add_link_mode(ks, supported, 1874 40000baseKR4_Full); 1875 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1876 ethtool_link_ksettings_add_link_mode(ks, advertising, 1877 40000baseKR4_Full); 1878 break; 1879 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 1880 case ICE_PHY_TYPE_LOW_50GBASE_CP: 1881 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1882 ethtool_link_ksettings_add_link_mode(ks, supported, 1883 50000baseCR2_Full); 1884 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1885 ethtool_link_ksettings_add_link_mode(ks, advertising, 1886 50000baseCR2_Full); 1887 break; 1888 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 1889 case ICE_PHY_TYPE_LOW_50G_LAUI2: 1890 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 1891 case ICE_PHY_TYPE_LOW_50G_AUI2: 1892 case ICE_PHY_TYPE_LOW_50GBASE_SR: 1893 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 1894 case ICE_PHY_TYPE_LOW_50G_AUI1: 1895 ethtool_link_ksettings_add_link_mode(ks, supported, 1896 50000baseCR2_Full); 1897 break; 1898 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 1899 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 1900 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1901 ethtool_link_ksettings_add_link_mode(ks, supported, 1902 50000baseKR2_Full); 1903 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1904 ethtool_link_ksettings_add_link_mode(ks, advertising, 1905 50000baseKR2_Full); 1906 break; 1907 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 1908 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 1909 case ICE_PHY_TYPE_LOW_50GBASE_FR: 1910 case ICE_PHY_TYPE_LOW_50GBASE_LR: 1911 ethtool_link_ksettings_add_link_mode(ks, supported, 1912 50000baseSR2_Full); 1913 break; 1914 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 1915 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1916 ethtool_link_ksettings_add_link_mode(ks, supported, 1917 100000baseCR4_Full); 1918 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1919 ethtool_link_ksettings_add_link_mode(ks, advertising, 1920 100000baseCR4_Full); 1921 break; 1922 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 1923 case ICE_PHY_TYPE_LOW_100G_CAUI4: 1924 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 1925 case ICE_PHY_TYPE_LOW_100G_AUI4: 1926 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 1927 ethtool_link_ksettings_add_link_mode(ks, supported, 1928 100000baseCR4_Full); 1929 break; 1930 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 1931 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1932 ethtool_link_ksettings_add_link_mode(ks, supported, 1933 100000baseCR4_Full); 1934 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1935 ethtool_link_ksettings_add_link_mode(ks, advertising, 1936 100000baseCR4_Full); 1937 break; 1938 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 1939 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 1940 ethtool_link_ksettings_add_link_mode(ks, supported, 1941 100000baseSR4_Full); 1942 break; 1943 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 1944 case ICE_PHY_TYPE_LOW_100GBASE_DR: 1945 ethtool_link_ksettings_add_link_mode(ks, supported, 1946 100000baseLR4_ER4_Full); 1947 break; 1948 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 1949 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 1950 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1951 ethtool_link_ksettings_add_link_mode(ks, supported, 1952 100000baseKR4_Full); 1953 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1954 ethtool_link_ksettings_add_link_mode(ks, advertising, 1955 100000baseKR4_Full); 1956 break; 1957 default: 1958 unrecog_phy_low = true; 1959 } 1960 1961 switch (link_info->phy_type_high) { 1962 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 1963 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1964 ethtool_link_ksettings_add_link_mode(ks, supported, 1965 100000baseKR4_Full); 1966 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1967 ethtool_link_ksettings_add_link_mode(ks, advertising, 1968 100000baseKR4_Full); 1969 break; 1970 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 1971 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 1972 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 1973 case ICE_PHY_TYPE_HIGH_100G_AUI2: 1974 ethtool_link_ksettings_add_link_mode(ks, supported, 1975 100000baseCR4_Full); 1976 break; 1977 default: 1978 unrecog_phy_high = true; 1979 } 1980 1981 if (unrecog_phy_low && unrecog_phy_high) { 1982 /* if we got here and link is up something bad is afoot */ 1983 netdev_info(netdev, 1984 "WARNING: Unrecognized PHY_Low (0x%llx).\n", 1985 (u64)link_info->phy_type_low); 1986 netdev_info(netdev, 1987 "WARNING: Unrecognized PHY_High (0x%llx).\n", 1988 (u64)link_info->phy_type_high); 1989 } 1990 1991 /* Now that we've worked out everything that could be supported by the 1992 * current PHY type, get what is supported by the NVM and intersect 1993 * them to get what is truly supported 1994 */ 1995 memset(&cap_ksettings, 0, sizeof(cap_ksettings)); 1996 ice_phy_type_to_ethtool(netdev, &cap_ksettings); 1997 ethtool_intersect_link_masks(ks, &cap_ksettings); 1998 1999 switch (link_info->link_speed) { 2000 case ICE_AQ_LINK_SPEED_100GB: 2001 ks->base.speed = SPEED_100000; 2002 break; 2003 case ICE_AQ_LINK_SPEED_50GB: 2004 ks->base.speed = SPEED_50000; 2005 break; 2006 case ICE_AQ_LINK_SPEED_40GB: 2007 ks->base.speed = SPEED_40000; 2008 break; 2009 case ICE_AQ_LINK_SPEED_25GB: 2010 ks->base.speed = SPEED_25000; 2011 break; 2012 case ICE_AQ_LINK_SPEED_20GB: 2013 ks->base.speed = SPEED_20000; 2014 break; 2015 case ICE_AQ_LINK_SPEED_10GB: 2016 ks->base.speed = SPEED_10000; 2017 break; 2018 case ICE_AQ_LINK_SPEED_5GB: 2019 ks->base.speed = SPEED_5000; 2020 break; 2021 case ICE_AQ_LINK_SPEED_2500MB: 2022 ks->base.speed = SPEED_2500; 2023 break; 2024 case ICE_AQ_LINK_SPEED_1000MB: 2025 ks->base.speed = SPEED_1000; 2026 break; 2027 case ICE_AQ_LINK_SPEED_100MB: 2028 ks->base.speed = SPEED_100; 2029 break; 2030 default: 2031 netdev_info(netdev, 2032 "WARNING: Unrecognized link_speed (0x%x).\n", 2033 link_info->link_speed); 2034 break; 2035 } 2036 ks->base.duplex = DUPLEX_FULL; 2037 2038 if (link_info->an_info & ICE_AQ_AN_COMPLETED) 2039 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, 2040 Autoneg); 2041 2042 /* Set flow control negotiated Rx/Tx pause */ 2043 switch (pi->fc.current_mode) { 2044 case ICE_FC_FULL: 2045 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); 2046 break; 2047 case ICE_FC_TX_PAUSE: 2048 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); 2049 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, 2050 Asym_Pause); 2051 break; 2052 case ICE_FC_RX_PAUSE: 2053 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, 2054 Asym_Pause); 2055 break; 2056 case ICE_FC_PFC: 2057 /* fall through */ 2058 default: 2059 ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause); 2060 ethtool_link_ksettings_del_link_mode(ks, lp_advertising, 2061 Asym_Pause); 2062 break; 2063 } 2064 } 2065 2066 /** 2067 * ice_get_settings_link_down - Get the Link settings when link is down 2068 * @ks: ethtool ksettings to fill in 2069 * @netdev: network interface device structure 2070 * 2071 * Reports link settings that can be determined when link is down 2072 */ 2073 static void 2074 ice_get_settings_link_down(struct ethtool_link_ksettings *ks, 2075 struct net_device *netdev) 2076 { 2077 /* link is down and the driver needs to fall back on 2078 * supported PHY types to figure out what info to display 2079 */ 2080 ice_phy_type_to_ethtool(netdev, ks); 2081 2082 /* With no link, speed and duplex are unknown */ 2083 ks->base.speed = SPEED_UNKNOWN; 2084 ks->base.duplex = DUPLEX_UNKNOWN; 2085 } 2086 2087 /** 2088 * ice_get_link_ksettings - Get Link Speed and Duplex settings 2089 * @netdev: network interface device structure 2090 * @ks: ethtool ksettings 2091 * 2092 * Reports speed/duplex settings based on media_type 2093 */ 2094 static int 2095 ice_get_link_ksettings(struct net_device *netdev, 2096 struct ethtool_link_ksettings *ks) 2097 { 2098 struct ice_netdev_priv *np = netdev_priv(netdev); 2099 struct ice_aqc_get_phy_caps_data *caps; 2100 struct ice_link_status *hw_link_info; 2101 struct ice_vsi *vsi = np->vsi; 2102 enum ice_status status; 2103 int err = 0; 2104 2105 ethtool_link_ksettings_zero_link_mode(ks, supported); 2106 ethtool_link_ksettings_zero_link_mode(ks, advertising); 2107 ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); 2108 hw_link_info = &vsi->port_info->phy.link_info; 2109 2110 /* set speed and duplex */ 2111 if (hw_link_info->link_info & ICE_AQ_LINK_UP) 2112 ice_get_settings_link_up(ks, netdev); 2113 else 2114 ice_get_settings_link_down(ks, netdev); 2115 2116 /* set autoneg settings */ 2117 ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? 2118 AUTONEG_ENABLE : AUTONEG_DISABLE; 2119 2120 /* set media type settings */ 2121 switch (vsi->port_info->phy.media_type) { 2122 case ICE_MEDIA_FIBER: 2123 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 2124 ks->base.port = PORT_FIBRE; 2125 break; 2126 case ICE_MEDIA_BASET: 2127 ethtool_link_ksettings_add_link_mode(ks, supported, TP); 2128 ethtool_link_ksettings_add_link_mode(ks, advertising, TP); 2129 ks->base.port = PORT_TP; 2130 break; 2131 case ICE_MEDIA_BACKPLANE: 2132 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 2133 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); 2134 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 2135 ethtool_link_ksettings_add_link_mode(ks, advertising, 2136 Backplane); 2137 ks->base.port = PORT_NONE; 2138 break; 2139 case ICE_MEDIA_DA: 2140 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 2141 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); 2142 ks->base.port = PORT_DA; 2143 break; 2144 default: 2145 ks->base.port = PORT_OTHER; 2146 break; 2147 } 2148 2149 /* flow control is symmetric and always supported */ 2150 ethtool_link_ksettings_add_link_mode(ks, supported, Pause); 2151 2152 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 2153 if (!caps) 2154 return -ENOMEM; 2155 2156 status = ice_aq_get_phy_caps(vsi->port_info, false, 2157 ICE_AQC_REPORT_SW_CFG, caps, NULL); 2158 if (status) { 2159 err = -EIO; 2160 goto done; 2161 } 2162 2163 /* Set the advertised flow control based on the PHY capability */ 2164 if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && 2165 (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) { 2166 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 2167 ethtool_link_ksettings_add_link_mode(ks, advertising, 2168 Asym_Pause); 2169 } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) { 2170 ethtool_link_ksettings_add_link_mode(ks, advertising, 2171 Asym_Pause); 2172 } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) { 2173 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 2174 ethtool_link_ksettings_add_link_mode(ks, advertising, 2175 Asym_Pause); 2176 } else { 2177 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); 2178 ethtool_link_ksettings_del_link_mode(ks, advertising, 2179 Asym_Pause); 2180 } 2181 2182 /* Set advertised FEC modes based on PHY capability */ 2183 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); 2184 2185 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 2186 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 2187 ethtool_link_ksettings_add_link_mode(ks, advertising, 2188 FEC_BASER); 2189 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 2190 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 2191 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); 2192 2193 status = ice_aq_get_phy_caps(vsi->port_info, false, 2194 ICE_AQC_REPORT_TOPO_CAP, caps, NULL); 2195 if (status) { 2196 err = -EIO; 2197 goto done; 2198 } 2199 2200 /* Set supported FEC modes based on PHY capability */ 2201 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); 2202 2203 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || 2204 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) 2205 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); 2206 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) 2207 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); 2208 2209 done: 2210 kfree(caps); 2211 return err; 2212 } 2213 2214 /** 2215 * ice_ksettings_find_adv_link_speed - Find advertising link speed 2216 * @ks: ethtool ksettings 2217 */ 2218 static u16 2219 ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) 2220 { 2221 u16 adv_link_speed = 0; 2222 2223 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2224 100baseT_Full)) 2225 adv_link_speed |= ICE_AQ_LINK_SPEED_100MB; 2226 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2227 1000baseX_Full)) 2228 adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; 2229 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2230 1000baseT_Full) || 2231 ethtool_link_ksettings_test_link_mode(ks, advertising, 2232 1000baseKX_Full)) 2233 adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; 2234 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2235 2500baseT_Full)) 2236 adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; 2237 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2238 2500baseX_Full)) 2239 adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; 2240 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2241 5000baseT_Full)) 2242 adv_link_speed |= ICE_AQ_LINK_SPEED_5GB; 2243 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2244 10000baseT_Full) || 2245 ethtool_link_ksettings_test_link_mode(ks, advertising, 2246 10000baseKR_Full)) 2247 adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; 2248 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2249 10000baseSR_Full) || 2250 ethtool_link_ksettings_test_link_mode(ks, advertising, 2251 10000baseLR_Full)) 2252 adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; 2253 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2254 25000baseCR_Full) || 2255 ethtool_link_ksettings_test_link_mode(ks, advertising, 2256 25000baseSR_Full) || 2257 ethtool_link_ksettings_test_link_mode(ks, advertising, 2258 25000baseKR_Full)) 2259 adv_link_speed |= ICE_AQ_LINK_SPEED_25GB; 2260 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2261 40000baseCR4_Full) || 2262 ethtool_link_ksettings_test_link_mode(ks, advertising, 2263 40000baseSR4_Full) || 2264 ethtool_link_ksettings_test_link_mode(ks, advertising, 2265 40000baseLR4_Full) || 2266 ethtool_link_ksettings_test_link_mode(ks, advertising, 2267 40000baseKR4_Full)) 2268 adv_link_speed |= ICE_AQ_LINK_SPEED_40GB; 2269 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2270 50000baseCR2_Full) || 2271 ethtool_link_ksettings_test_link_mode(ks, advertising, 2272 50000baseKR2_Full)) 2273 adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; 2274 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2275 50000baseSR2_Full)) 2276 adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; 2277 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2278 100000baseCR4_Full) || 2279 ethtool_link_ksettings_test_link_mode(ks, advertising, 2280 100000baseSR4_Full) || 2281 ethtool_link_ksettings_test_link_mode(ks, advertising, 2282 100000baseLR4_ER4_Full) || 2283 ethtool_link_ksettings_test_link_mode(ks, advertising, 2284 100000baseKR4_Full)) 2285 adv_link_speed |= ICE_AQ_LINK_SPEED_100GB; 2286 2287 return adv_link_speed; 2288 } 2289 2290 /** 2291 * ice_setup_autoneg 2292 * @p: port info 2293 * @ks: ethtool_link_ksettings 2294 * @config: configuration that will be sent down to FW 2295 * @autoneg_enabled: autonegotiation is enabled or not 2296 * @autoneg_changed: will there a change in autonegotiation 2297 * @netdev: network interface device structure 2298 * 2299 * Setup PHY autonegotiation feature 2300 */ 2301 static int 2302 ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, 2303 struct ice_aqc_set_phy_cfg_data *config, 2304 u8 autoneg_enabled, u8 *autoneg_changed, 2305 struct net_device *netdev) 2306 { 2307 int err = 0; 2308 2309 *autoneg_changed = 0; 2310 2311 /* Check autoneg */ 2312 if (autoneg_enabled == AUTONEG_ENABLE) { 2313 /* If autoneg was not already enabled */ 2314 if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) { 2315 /* If autoneg is not supported, return error */ 2316 if (!ethtool_link_ksettings_test_link_mode(ks, 2317 supported, 2318 Autoneg)) { 2319 netdev_info(netdev, "Autoneg not supported on this phy.\n"); 2320 err = -EINVAL; 2321 } else { 2322 /* Autoneg is allowed to change */ 2323 config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2324 *autoneg_changed = 1; 2325 } 2326 } 2327 } else { 2328 /* If autoneg is currently enabled */ 2329 if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) { 2330 /* If autoneg is supported 10GBASE_T is the only PHY 2331 * that can disable it, so otherwise return error 2332 */ 2333 if (ethtool_link_ksettings_test_link_mode(ks, 2334 supported, 2335 Autoneg)) { 2336 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); 2337 err = -EINVAL; 2338 } else { 2339 /* Autoneg is allowed to change */ 2340 config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2341 *autoneg_changed = 1; 2342 } 2343 } 2344 } 2345 2346 return err; 2347 } 2348 2349 /** 2350 * ice_set_link_ksettings - Set Speed and Duplex 2351 * @netdev: network interface device structure 2352 * @ks: ethtool ksettings 2353 * 2354 * Set speed/duplex per media_types advertised/forced 2355 */ 2356 static int 2357 ice_set_link_ksettings(struct net_device *netdev, 2358 const struct ethtool_link_ksettings *ks) 2359 { 2360 u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0; 2361 struct ice_netdev_priv *np = netdev_priv(netdev); 2362 struct ethtool_link_ksettings safe_ks, copy_ks; 2363 struct ice_aqc_get_phy_caps_data *abilities; 2364 u16 adv_link_speed, curr_link_speed, idx; 2365 struct ice_aqc_set_phy_cfg_data config; 2366 struct ice_pf *pf = np->vsi->back; 2367 struct ice_port_info *p; 2368 u8 autoneg_changed = 0; 2369 enum ice_status status; 2370 u64 phy_type_high; 2371 u64 phy_type_low; 2372 int err = 0; 2373 bool linkup; 2374 2375 p = np->vsi->port_info; 2376 2377 if (!p) 2378 return -EOPNOTSUPP; 2379 2380 /* Check if this is LAN VSI */ 2381 ice_for_each_vsi(pf, idx) 2382 if (pf->vsi[idx]->type == ICE_VSI_PF) { 2383 if (np->vsi != pf->vsi[idx]) 2384 return -EOPNOTSUPP; 2385 break; 2386 } 2387 2388 if (p->phy.media_type != ICE_MEDIA_BASET && 2389 p->phy.media_type != ICE_MEDIA_FIBER && 2390 p->phy.media_type != ICE_MEDIA_BACKPLANE && 2391 p->phy.media_type != ICE_MEDIA_DA && 2392 p->phy.link_info.link_info & ICE_AQ_LINK_UP) 2393 return -EOPNOTSUPP; 2394 2395 /* copy the ksettings to copy_ks to avoid modifying the original */ 2396 memcpy(©_ks, ks, sizeof(copy_ks)); 2397 2398 /* save autoneg out of ksettings */ 2399 autoneg = copy_ks.base.autoneg; 2400 2401 memset(&safe_ks, 0, sizeof(safe_ks)); 2402 2403 /* Get link modes supported by hardware.*/ 2404 ice_phy_type_to_ethtool(netdev, &safe_ks); 2405 2406 /* and check against modes requested by user. 2407 * Return an error if unsupported mode was set. 2408 */ 2409 if (!bitmap_subset(copy_ks.link_modes.advertising, 2410 safe_ks.link_modes.supported, 2411 __ETHTOOL_LINK_MODE_MASK_NBITS)) 2412 return -EINVAL; 2413 2414 /* get our own copy of the bits to check against */ 2415 memset(&safe_ks, 0, sizeof(safe_ks)); 2416 safe_ks.base.cmd = copy_ks.base.cmd; 2417 safe_ks.base.link_mode_masks_nwords = 2418 copy_ks.base.link_mode_masks_nwords; 2419 ice_get_link_ksettings(netdev, &safe_ks); 2420 2421 /* set autoneg back to what it currently is */ 2422 copy_ks.base.autoneg = safe_ks.base.autoneg; 2423 /* we don't compare the speed */ 2424 copy_ks.base.speed = safe_ks.base.speed; 2425 2426 /* If copy_ks.base and safe_ks.base are not the same now, then they are 2427 * trying to set something that we do not support. 2428 */ 2429 if (memcmp(©_ks.base, &safe_ks.base, sizeof(copy_ks.base))) 2430 return -EOPNOTSUPP; 2431 2432 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { 2433 timeout--; 2434 if (!timeout) 2435 return -EBUSY; 2436 usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); 2437 } 2438 2439 abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); 2440 if (!abilities) 2441 return -ENOMEM; 2442 2443 /* Get the current PHY config */ 2444 status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities, 2445 NULL); 2446 if (status) { 2447 err = -EAGAIN; 2448 goto done; 2449 } 2450 2451 /* Copy abilities to config in case autoneg is not set below */ 2452 memset(&config, 0, sizeof(config)); 2453 config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; 2454 if (abilities->caps & ICE_AQC_PHY_AN_MODE) 2455 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2456 2457 /* Check autoneg */ 2458 err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, 2459 netdev); 2460 2461 if (err) 2462 goto done; 2463 2464 /* Call to get the current link speed */ 2465 p->phy.get_link_info = true; 2466 status = ice_get_link_status(p, &linkup); 2467 if (status) { 2468 err = -EAGAIN; 2469 goto done; 2470 } 2471 2472 curr_link_speed = p->phy.link_info.link_speed; 2473 adv_link_speed = ice_ksettings_find_adv_link_speed(ks); 2474 2475 /* If speed didn't get set, set it to what it currently is. 2476 * This is needed because if advertise is 0 (as it is when autoneg 2477 * is disabled) then speed won't get set. 2478 */ 2479 if (!adv_link_speed) 2480 adv_link_speed = curr_link_speed; 2481 2482 /* Convert the advertise link speeds to their corresponded PHY_TYPE */ 2483 ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed); 2484 2485 if (!autoneg_changed && adv_link_speed == curr_link_speed) { 2486 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); 2487 goto done; 2488 } 2489 2490 /* copy over the rest of the abilities */ 2491 config.low_power_ctrl = abilities->low_power_ctrl; 2492 config.eee_cap = abilities->eee_cap; 2493 config.eeer_value = abilities->eeer_value; 2494 config.link_fec_opt = abilities->link_fec_options; 2495 2496 /* save the requested speeds */ 2497 p->phy.link_info.req_speeds = adv_link_speed; 2498 2499 /* set link and auto negotiation so changes take effect */ 2500 config.caps |= ICE_AQ_PHY_ENA_LINK; 2501 2502 if (phy_type_low || phy_type_high) { 2503 config.phy_type_high = cpu_to_le64(phy_type_high) & 2504 abilities->phy_type_high; 2505 config.phy_type_low = cpu_to_le64(phy_type_low) & 2506 abilities->phy_type_low; 2507 } else { 2508 err = -EAGAIN; 2509 netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n"); 2510 goto done; 2511 } 2512 2513 /* If link is up put link down */ 2514 if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) { 2515 /* Tell the OS link is going down, the link will go 2516 * back up when fw says it is ready asynchronously 2517 */ 2518 ice_print_link_msg(np->vsi, false); 2519 netif_carrier_off(netdev); 2520 netif_tx_stop_all_queues(netdev); 2521 } 2522 2523 /* make the aq call */ 2524 status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL); 2525 if (status) { 2526 netdev_info(netdev, "Set phy config failed,\n"); 2527 err = -EAGAIN; 2528 } 2529 2530 done: 2531 kfree(abilities); 2532 clear_bit(__ICE_CFG_BUSY, pf->state); 2533 2534 return err; 2535 } 2536 2537 /** 2538 * ice_parse_hdrs - parses headers from RSS hash input 2539 * @nfc: ethtool rxnfc command 2540 * 2541 * This function parses the rxnfc command and returns intended 2542 * header types for RSS configuration 2543 */ 2544 static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc) 2545 { 2546 u32 hdrs = ICE_FLOW_SEG_HDR_NONE; 2547 2548 switch (nfc->flow_type) { 2549 case TCP_V4_FLOW: 2550 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4; 2551 break; 2552 case UDP_V4_FLOW: 2553 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4; 2554 break; 2555 case SCTP_V4_FLOW: 2556 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4; 2557 break; 2558 case TCP_V6_FLOW: 2559 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6; 2560 break; 2561 case UDP_V6_FLOW: 2562 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6; 2563 break; 2564 case SCTP_V6_FLOW: 2565 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6; 2566 break; 2567 default: 2568 break; 2569 } 2570 return hdrs; 2571 } 2572 2573 #define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) 2574 #define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) 2575 #define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) 2576 #define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) 2577 #define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) 2578 #define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) 2579 #define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) 2580 #define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) 2581 #define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \ 2582 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) 2583 #define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \ 2584 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) 2585 2586 /** 2587 * ice_parse_hash_flds - parses hash fields from RSS hash input 2588 * @nfc: ethtool rxnfc command 2589 * 2590 * This function parses the rxnfc command and returns intended 2591 * hash fields for RSS configuration 2592 */ 2593 static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc) 2594 { 2595 u64 hfld = ICE_HASH_INVALID; 2596 2597 if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) { 2598 switch (nfc->flow_type) { 2599 case TCP_V4_FLOW: 2600 case UDP_V4_FLOW: 2601 case SCTP_V4_FLOW: 2602 if (nfc->data & RXH_IP_SRC) 2603 hfld |= ICE_FLOW_HASH_FLD_IPV4_SA; 2604 if (nfc->data & RXH_IP_DST) 2605 hfld |= ICE_FLOW_HASH_FLD_IPV4_DA; 2606 break; 2607 case TCP_V6_FLOW: 2608 case UDP_V6_FLOW: 2609 case SCTP_V6_FLOW: 2610 if (nfc->data & RXH_IP_SRC) 2611 hfld |= ICE_FLOW_HASH_FLD_IPV6_SA; 2612 if (nfc->data & RXH_IP_DST) 2613 hfld |= ICE_FLOW_HASH_FLD_IPV6_DA; 2614 break; 2615 default: 2616 break; 2617 } 2618 } 2619 2620 if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) { 2621 switch (nfc->flow_type) { 2622 case TCP_V4_FLOW: 2623 case TCP_V6_FLOW: 2624 if (nfc->data & RXH_L4_B_0_1) 2625 hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT; 2626 if (nfc->data & RXH_L4_B_2_3) 2627 hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT; 2628 break; 2629 case UDP_V4_FLOW: 2630 case UDP_V6_FLOW: 2631 if (nfc->data & RXH_L4_B_0_1) 2632 hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT; 2633 if (nfc->data & RXH_L4_B_2_3) 2634 hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT; 2635 break; 2636 case SCTP_V4_FLOW: 2637 case SCTP_V6_FLOW: 2638 if (nfc->data & RXH_L4_B_0_1) 2639 hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT; 2640 if (nfc->data & RXH_L4_B_2_3) 2641 hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT; 2642 break; 2643 default: 2644 break; 2645 } 2646 } 2647 2648 return hfld; 2649 } 2650 2651 /** 2652 * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash 2653 * @vsi: the VSI being configured 2654 * @nfc: ethtool rxnfc command 2655 * 2656 * Returns Success if the flow input set is supported. 2657 */ 2658 static int 2659 ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) 2660 { 2661 struct ice_pf *pf = vsi->back; 2662 enum ice_status status; 2663 struct device *dev; 2664 u64 hashed_flds; 2665 u32 hdrs; 2666 2667 dev = ice_pf_to_dev(pf); 2668 if (ice_is_safe_mode(pf)) { 2669 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 2670 vsi->vsi_num); 2671 return -EINVAL; 2672 } 2673 2674 hashed_flds = ice_parse_hash_flds(nfc); 2675 if (hashed_flds == ICE_HASH_INVALID) { 2676 dev_dbg(dev, "Invalid hash fields, vsi num = %d\n", 2677 vsi->vsi_num); 2678 return -EINVAL; 2679 } 2680 2681 hdrs = ice_parse_hdrs(nfc); 2682 if (hdrs == ICE_FLOW_SEG_HDR_NONE) { 2683 dev_dbg(dev, "Header type is not valid, vsi num = %d\n", 2684 vsi->vsi_num); 2685 return -EINVAL; 2686 } 2687 2688 status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs); 2689 if (status) { 2690 dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", 2691 vsi->vsi_num, status); 2692 return -EINVAL; 2693 } 2694 2695 return 0; 2696 } 2697 2698 /** 2699 * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type 2700 * @vsi: the VSI being configured 2701 * @nfc: ethtool rxnfc command 2702 */ 2703 static void 2704 ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) 2705 { 2706 struct ice_pf *pf = vsi->back; 2707 struct device *dev; 2708 u64 hash_flds; 2709 u32 hdrs; 2710 2711 dev = ice_pf_to_dev(pf); 2712 2713 nfc->data = 0; 2714 if (ice_is_safe_mode(pf)) { 2715 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 2716 vsi->vsi_num); 2717 return; 2718 } 2719 2720 hdrs = ice_parse_hdrs(nfc); 2721 if (hdrs == ICE_FLOW_SEG_HDR_NONE) { 2722 dev_dbg(dev, "Header type is not valid, vsi num = %d\n", 2723 vsi->vsi_num); 2724 return; 2725 } 2726 2727 hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs); 2728 if (hash_flds == ICE_HASH_INVALID) { 2729 dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n", 2730 vsi->vsi_num); 2731 return; 2732 } 2733 2734 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA || 2735 hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA) 2736 nfc->data |= (u64)RXH_IP_SRC; 2737 2738 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA || 2739 hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA) 2740 nfc->data |= (u64)RXH_IP_DST; 2741 2742 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT || 2743 hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT || 2744 hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) 2745 nfc->data |= (u64)RXH_L4_B_0_1; 2746 2747 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT || 2748 hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT || 2749 hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT) 2750 nfc->data |= (u64)RXH_L4_B_2_3; 2751 } 2752 2753 /** 2754 * ice_set_rxnfc - command to set Rx flow rules. 2755 * @netdev: network interface device structure 2756 * @cmd: ethtool rxnfc command 2757 * 2758 * Returns 0 for success and negative values for errors 2759 */ 2760 static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 2761 { 2762 struct ice_netdev_priv *np = netdev_priv(netdev); 2763 struct ice_vsi *vsi = np->vsi; 2764 2765 switch (cmd->cmd) { 2766 case ETHTOOL_SRXFH: 2767 return ice_set_rss_hash_opt(vsi, cmd); 2768 default: 2769 break; 2770 } 2771 return -EOPNOTSUPP; 2772 } 2773 2774 /** 2775 * ice_get_rxnfc - command to get Rx flow classification rules 2776 * @netdev: network interface device structure 2777 * @cmd: ethtool rxnfc command 2778 * @rule_locs: buffer to rturn Rx flow classification rules 2779 * 2780 * Returns Success if the command is supported. 2781 */ 2782 static int 2783 ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 2784 u32 __always_unused *rule_locs) 2785 { 2786 struct ice_netdev_priv *np = netdev_priv(netdev); 2787 struct ice_vsi *vsi = np->vsi; 2788 int ret = -EOPNOTSUPP; 2789 2790 switch (cmd->cmd) { 2791 case ETHTOOL_GRXRINGS: 2792 cmd->data = vsi->rss_size; 2793 ret = 0; 2794 break; 2795 case ETHTOOL_GRXFH: 2796 ice_get_rss_hash_opt(vsi, cmd); 2797 ret = 0; 2798 break; 2799 default: 2800 break; 2801 } 2802 2803 return ret; 2804 } 2805 2806 static void 2807 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) 2808 { 2809 struct ice_netdev_priv *np = netdev_priv(netdev); 2810 struct ice_vsi *vsi = np->vsi; 2811 2812 ring->rx_max_pending = ICE_MAX_NUM_DESC; 2813 ring->tx_max_pending = ICE_MAX_NUM_DESC; 2814 ring->rx_pending = vsi->rx_rings[0]->count; 2815 ring->tx_pending = vsi->tx_rings[0]->count; 2816 2817 /* Rx mini and jumbo rings are not supported */ 2818 ring->rx_mini_max_pending = 0; 2819 ring->rx_jumbo_max_pending = 0; 2820 ring->rx_mini_pending = 0; 2821 ring->rx_jumbo_pending = 0; 2822 } 2823 2824 static int 2825 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) 2826 { 2827 struct ice_ring *tx_rings = NULL, *rx_rings = NULL; 2828 struct ice_netdev_priv *np = netdev_priv(netdev); 2829 struct ice_ring *xdp_rings = NULL; 2830 struct ice_vsi *vsi = np->vsi; 2831 struct ice_pf *pf = vsi->back; 2832 int i, timeout = 50, err = 0; 2833 u32 new_rx_cnt, new_tx_cnt; 2834 2835 if (ring->tx_pending > ICE_MAX_NUM_DESC || 2836 ring->tx_pending < ICE_MIN_NUM_DESC || 2837 ring->rx_pending > ICE_MAX_NUM_DESC || 2838 ring->rx_pending < ICE_MIN_NUM_DESC) { 2839 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", 2840 ring->tx_pending, ring->rx_pending, 2841 ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC, 2842 ICE_REQ_DESC_MULTIPLE); 2843 return -EINVAL; 2844 } 2845 2846 new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); 2847 if (new_tx_cnt != ring->tx_pending) 2848 netdev_info(netdev, 2849 "Requested Tx descriptor count rounded up to %d\n", 2850 new_tx_cnt); 2851 new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE); 2852 if (new_rx_cnt != ring->rx_pending) 2853 netdev_info(netdev, 2854 "Requested Rx descriptor count rounded up to %d\n", 2855 new_rx_cnt); 2856 2857 /* if nothing to do return success */ 2858 if (new_tx_cnt == vsi->tx_rings[0]->count && 2859 new_rx_cnt == vsi->rx_rings[0]->count) { 2860 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); 2861 return 0; 2862 } 2863 2864 /* If there is a AF_XDP UMEM attached to any of Rx rings, 2865 * disallow changing the number of descriptors -- regardless 2866 * if the netdev is running or not. 2867 */ 2868 if (ice_xsk_any_rx_ring_ena(vsi)) 2869 return -EBUSY; 2870 2871 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { 2872 timeout--; 2873 if (!timeout) 2874 return -EBUSY; 2875 usleep_range(1000, 2000); 2876 } 2877 2878 /* set for the next time the netdev is started */ 2879 if (!netif_running(vsi->netdev)) { 2880 for (i = 0; i < vsi->alloc_txq; i++) 2881 vsi->tx_rings[i]->count = new_tx_cnt; 2882 for (i = 0; i < vsi->alloc_rxq; i++) 2883 vsi->rx_rings[i]->count = new_rx_cnt; 2884 if (ice_is_xdp_ena_vsi(vsi)) 2885 for (i = 0; i < vsi->num_xdp_txq; i++) 2886 vsi->xdp_rings[i]->count = new_tx_cnt; 2887 vsi->num_tx_desc = new_tx_cnt; 2888 vsi->num_rx_desc = new_rx_cnt; 2889 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); 2890 goto done; 2891 } 2892 2893 if (new_tx_cnt == vsi->tx_rings[0]->count) 2894 goto process_rx; 2895 2896 /* alloc updated Tx resources */ 2897 netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", 2898 vsi->tx_rings[0]->count, new_tx_cnt); 2899 2900 tx_rings = kcalloc(vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL); 2901 if (!tx_rings) { 2902 err = -ENOMEM; 2903 goto done; 2904 } 2905 2906 ice_for_each_txq(vsi, i) { 2907 /* clone ring and setup updated count */ 2908 tx_rings[i] = *vsi->tx_rings[i]; 2909 tx_rings[i].count = new_tx_cnt; 2910 tx_rings[i].desc = NULL; 2911 tx_rings[i].tx_buf = NULL; 2912 err = ice_setup_tx_ring(&tx_rings[i]); 2913 if (err) { 2914 while (i--) 2915 ice_clean_tx_ring(&tx_rings[i]); 2916 kfree(tx_rings); 2917 goto done; 2918 } 2919 } 2920 2921 if (!ice_is_xdp_ena_vsi(vsi)) 2922 goto process_rx; 2923 2924 /* alloc updated XDP resources */ 2925 netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n", 2926 vsi->xdp_rings[0]->count, new_tx_cnt); 2927 2928 xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL); 2929 if (!xdp_rings) { 2930 err = -ENOMEM; 2931 goto free_tx; 2932 } 2933 2934 for (i = 0; i < vsi->num_xdp_txq; i++) { 2935 /* clone ring and setup updated count */ 2936 xdp_rings[i] = *vsi->xdp_rings[i]; 2937 xdp_rings[i].count = new_tx_cnt; 2938 xdp_rings[i].desc = NULL; 2939 xdp_rings[i].tx_buf = NULL; 2940 err = ice_setup_tx_ring(&xdp_rings[i]); 2941 if (err) { 2942 while (i--) 2943 ice_clean_tx_ring(&xdp_rings[i]); 2944 kfree(xdp_rings); 2945 goto free_tx; 2946 } 2947 ice_set_ring_xdp(&xdp_rings[i]); 2948 } 2949 2950 process_rx: 2951 if (new_rx_cnt == vsi->rx_rings[0]->count) 2952 goto process_link; 2953 2954 /* alloc updated Rx resources */ 2955 netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", 2956 vsi->rx_rings[0]->count, new_rx_cnt); 2957 2958 rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL); 2959 if (!rx_rings) { 2960 err = -ENOMEM; 2961 goto done; 2962 } 2963 2964 ice_for_each_rxq(vsi, i) { 2965 /* clone ring and setup updated count */ 2966 rx_rings[i] = *vsi->rx_rings[i]; 2967 rx_rings[i].count = new_rx_cnt; 2968 rx_rings[i].desc = NULL; 2969 rx_rings[i].rx_buf = NULL; 2970 /* this is to allow wr32 to have something to write to 2971 * during early allocation of Rx buffers 2972 */ 2973 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS; 2974 2975 err = ice_setup_rx_ring(&rx_rings[i]); 2976 if (err) 2977 goto rx_unwind; 2978 2979 /* allocate Rx buffers */ 2980 err = ice_alloc_rx_bufs(&rx_rings[i], 2981 ICE_DESC_UNUSED(&rx_rings[i])); 2982 rx_unwind: 2983 if (err) { 2984 while (i) { 2985 i--; 2986 ice_free_rx_ring(&rx_rings[i]); 2987 } 2988 kfree(rx_rings); 2989 err = -ENOMEM; 2990 goto free_tx; 2991 } 2992 } 2993 2994 process_link: 2995 /* Bring interface down, copy in the new ring info, then restore the 2996 * interface. if VSI is up, bring it down and then back up 2997 */ 2998 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { 2999 ice_down(vsi); 3000 3001 if (tx_rings) { 3002 ice_for_each_txq(vsi, i) { 3003 ice_free_tx_ring(vsi->tx_rings[i]); 3004 *vsi->tx_rings[i] = tx_rings[i]; 3005 } 3006 kfree(tx_rings); 3007 } 3008 3009 if (rx_rings) { 3010 ice_for_each_rxq(vsi, i) { 3011 ice_free_rx_ring(vsi->rx_rings[i]); 3012 /* copy the real tail offset */ 3013 rx_rings[i].tail = vsi->rx_rings[i]->tail; 3014 /* this is to fake out the allocation routine 3015 * into thinking it has to realloc everything 3016 * but the recycling logic will let us re-use 3017 * the buffers allocated above 3018 */ 3019 rx_rings[i].next_to_use = 0; 3020 rx_rings[i].next_to_clean = 0; 3021 rx_rings[i].next_to_alloc = 0; 3022 *vsi->rx_rings[i] = rx_rings[i]; 3023 } 3024 kfree(rx_rings); 3025 } 3026 3027 if (xdp_rings) { 3028 for (i = 0; i < vsi->num_xdp_txq; i++) { 3029 ice_free_tx_ring(vsi->xdp_rings[i]); 3030 *vsi->xdp_rings[i] = xdp_rings[i]; 3031 } 3032 kfree(xdp_rings); 3033 } 3034 3035 vsi->num_tx_desc = new_tx_cnt; 3036 vsi->num_rx_desc = new_rx_cnt; 3037 ice_up(vsi); 3038 } 3039 goto done; 3040 3041 free_tx: 3042 /* error cleanup if the Rx allocations failed after getting Tx */ 3043 if (tx_rings) { 3044 ice_for_each_txq(vsi, i) 3045 ice_free_tx_ring(&tx_rings[i]); 3046 kfree(tx_rings); 3047 } 3048 3049 done: 3050 clear_bit(__ICE_CFG_BUSY, pf->state); 3051 return err; 3052 } 3053 3054 static int ice_nway_reset(struct net_device *netdev) 3055 { 3056 /* restart autonegotiation */ 3057 struct ice_netdev_priv *np = netdev_priv(netdev); 3058 struct ice_vsi *vsi = np->vsi; 3059 struct ice_port_info *pi; 3060 enum ice_status status; 3061 3062 pi = vsi->port_info; 3063 /* If VSI state is up, then restart autoneg with link up */ 3064 if (!test_bit(__ICE_DOWN, vsi->back->state)) 3065 status = ice_aq_set_link_restart_an(pi, true, NULL); 3066 else 3067 status = ice_aq_set_link_restart_an(pi, false, NULL); 3068 3069 if (status) { 3070 netdev_info(netdev, "link restart failed, err %d aq_err %d\n", 3071 status, pi->hw->adminq.sq_last_status); 3072 return -EIO; 3073 } 3074 3075 return 0; 3076 } 3077 3078 /** 3079 * ice_get_pauseparam - Get Flow Control status 3080 * @netdev: network interface device structure 3081 * @pause: ethernet pause (flow control) parameters 3082 * 3083 * Get requested flow control status from PHY capability. 3084 * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which 3085 * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report 3086 * the negotiated Rx/Tx pause via lp_advertising. 3087 */ 3088 static void 3089 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 3090 { 3091 struct ice_netdev_priv *np = netdev_priv(netdev); 3092 struct ice_port_info *pi = np->vsi->port_info; 3093 struct ice_aqc_get_phy_caps_data *pcaps; 3094 struct ice_dcbx_cfg *dcbx_cfg; 3095 enum ice_status status; 3096 3097 /* Initialize pause params */ 3098 pause->rx_pause = 0; 3099 pause->tx_pause = 0; 3100 3101 dcbx_cfg = &pi->local_dcbx_cfg; 3102 3103 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3104 if (!pcaps) 3105 return; 3106 3107 /* Get current PHY config */ 3108 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 3109 NULL); 3110 if (status) 3111 goto out; 3112 3113 pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? 3114 AUTONEG_ENABLE : AUTONEG_DISABLE); 3115 3116 if (dcbx_cfg->pfc.pfcena) 3117 /* PFC enabled so report LFC as off */ 3118 goto out; 3119 3120 if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3121 pause->tx_pause = 1; 3122 if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3123 pause->rx_pause = 1; 3124 3125 out: 3126 kfree(pcaps); 3127 } 3128 3129 /** 3130 * ice_set_pauseparam - Set Flow Control parameter 3131 * @netdev: network interface device structure 3132 * @pause: return Tx/Rx flow control status 3133 */ 3134 static int 3135 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 3136 { 3137 struct ice_netdev_priv *np = netdev_priv(netdev); 3138 struct ice_aqc_get_phy_caps_data *pcaps; 3139 struct ice_link_status *hw_link_info; 3140 struct ice_pf *pf = np->vsi->back; 3141 struct ice_dcbx_cfg *dcbx_cfg; 3142 struct ice_vsi *vsi = np->vsi; 3143 struct ice_hw *hw = &pf->hw; 3144 struct ice_port_info *pi; 3145 enum ice_status status; 3146 u8 aq_failures; 3147 bool link_up; 3148 int err = 0; 3149 u32 is_an; 3150 3151 pi = vsi->port_info; 3152 hw_link_info = &pi->phy.link_info; 3153 dcbx_cfg = &pi->local_dcbx_cfg; 3154 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; 3155 3156 /* Changing the port's flow control is not supported if this isn't the 3157 * PF VSI 3158 */ 3159 if (vsi->type != ICE_VSI_PF) { 3160 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n"); 3161 return -EOPNOTSUPP; 3162 } 3163 3164 /* Get pause param reports configured and negotiated flow control pause 3165 * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is 3166 * defined get pause param pause->autoneg reports SW configured setting, 3167 * so compare pause->autoneg with SW configured to prevent the user from 3168 * using set pause param to chance autoneg. 3169 */ 3170 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3171 if (!pcaps) 3172 return -ENOMEM; 3173 3174 /* Get current PHY config */ 3175 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 3176 NULL); 3177 if (status) { 3178 kfree(pcaps); 3179 return -EIO; 3180 } 3181 3182 is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? 3183 AUTONEG_ENABLE : AUTONEG_DISABLE); 3184 3185 kfree(pcaps); 3186 3187 if (pause->autoneg != is_an) { 3188 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 3189 return -EOPNOTSUPP; 3190 } 3191 3192 /* If we have link and don't have autoneg */ 3193 if (!test_bit(__ICE_DOWN, pf->state) && 3194 !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { 3195 /* Send message that it might not necessarily work*/ 3196 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); 3197 } 3198 3199 if (dcbx_cfg->pfc.pfcena) { 3200 netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n"); 3201 return -EOPNOTSUPP; 3202 } 3203 if (pause->rx_pause && pause->tx_pause) 3204 pi->fc.req_mode = ICE_FC_FULL; 3205 else if (pause->rx_pause && !pause->tx_pause) 3206 pi->fc.req_mode = ICE_FC_RX_PAUSE; 3207 else if (!pause->rx_pause && pause->tx_pause) 3208 pi->fc.req_mode = ICE_FC_TX_PAUSE; 3209 else if (!pause->rx_pause && !pause->tx_pause) 3210 pi->fc.req_mode = ICE_FC_NONE; 3211 else 3212 return -EINVAL; 3213 3214 /* Tell the OS link is going down, the link will go back up when fw 3215 * says it is ready asynchronously 3216 */ 3217 ice_print_link_msg(vsi, false); 3218 netif_carrier_off(netdev); 3219 netif_tx_stop_all_queues(netdev); 3220 3221 /* Set the FC mode and only restart AN if link is up */ 3222 status = ice_set_fc(pi, &aq_failures, link_up); 3223 3224 if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { 3225 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n", 3226 status, hw->adminq.sq_last_status); 3227 err = -EAGAIN; 3228 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { 3229 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n", 3230 status, hw->adminq.sq_last_status); 3231 err = -EAGAIN; 3232 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { 3233 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n", 3234 status, hw->adminq.sq_last_status); 3235 err = -EAGAIN; 3236 } 3237 3238 if (!test_bit(__ICE_DOWN, pf->state)) { 3239 /* Give it a little more time to try to come back. If still 3240 * down, restart autoneg link or reinitialize the interface. 3241 */ 3242 msleep(75); 3243 if (!test_bit(__ICE_DOWN, pf->state)) 3244 return ice_nway_reset(netdev); 3245 3246 ice_down(vsi); 3247 ice_up(vsi); 3248 } 3249 3250 return err; 3251 } 3252 3253 /** 3254 * ice_get_rxfh_key_size - get the RSS hash key size 3255 * @netdev: network interface device structure 3256 * 3257 * Returns the table size. 3258 */ 3259 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev) 3260 { 3261 return ICE_VSIQF_HKEY_ARRAY_SIZE; 3262 } 3263 3264 /** 3265 * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size 3266 * @netdev: network interface device structure 3267 * 3268 * Returns the table size. 3269 */ 3270 static u32 ice_get_rxfh_indir_size(struct net_device *netdev) 3271 { 3272 struct ice_netdev_priv *np = netdev_priv(netdev); 3273 3274 return np->vsi->rss_table_size; 3275 } 3276 3277 /** 3278 * ice_get_rxfh - get the Rx flow hash indirection table 3279 * @netdev: network interface device structure 3280 * @indir: indirection table 3281 * @key: hash key 3282 * @hfunc: hash function 3283 * 3284 * Reads the indirection table directly from the hardware. 3285 */ 3286 static int 3287 ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) 3288 { 3289 struct ice_netdev_priv *np = netdev_priv(netdev); 3290 struct ice_vsi *vsi = np->vsi; 3291 struct ice_pf *pf = vsi->back; 3292 int ret = 0, i; 3293 u8 *lut; 3294 3295 if (hfunc) 3296 *hfunc = ETH_RSS_HASH_TOP; 3297 3298 if (!indir) 3299 return 0; 3300 3301 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 3302 /* RSS not supported return error here */ 3303 netdev_warn(netdev, "RSS is not configured on this VSI!\n"); 3304 return -EIO; 3305 } 3306 3307 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 3308 if (!lut) 3309 return -ENOMEM; 3310 3311 if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) { 3312 ret = -EIO; 3313 goto out; 3314 } 3315 3316 for (i = 0; i < vsi->rss_table_size; i++) 3317 indir[i] = (u32)(lut[i]); 3318 3319 out: 3320 kfree(lut); 3321 return ret; 3322 } 3323 3324 /** 3325 * ice_set_rxfh - set the Rx flow hash indirection table 3326 * @netdev: network interface device structure 3327 * @indir: indirection table 3328 * @key: hash key 3329 * @hfunc: hash function 3330 * 3331 * Returns -EINVAL if the table specifies an invalid queue ID, otherwise 3332 * returns 0 after programming the table. 3333 */ 3334 static int 3335 ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, 3336 const u8 hfunc) 3337 { 3338 struct ice_netdev_priv *np = netdev_priv(netdev); 3339 struct ice_vsi *vsi = np->vsi; 3340 struct ice_pf *pf = vsi->back; 3341 struct device *dev; 3342 u8 *seed = NULL; 3343 3344 dev = ice_pf_to_dev(pf); 3345 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 3346 return -EOPNOTSUPP; 3347 3348 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 3349 /* RSS not supported return error here */ 3350 netdev_warn(netdev, "RSS is not configured on this VSI!\n"); 3351 return -EIO; 3352 } 3353 3354 if (key) { 3355 if (!vsi->rss_hkey_user) { 3356 vsi->rss_hkey_user = 3357 devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE, 3358 GFP_KERNEL); 3359 if (!vsi->rss_hkey_user) 3360 return -ENOMEM; 3361 } 3362 memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE); 3363 seed = vsi->rss_hkey_user; 3364 } 3365 3366 if (!vsi->rss_lut_user) { 3367 vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size, 3368 GFP_KERNEL); 3369 if (!vsi->rss_lut_user) 3370 return -ENOMEM; 3371 } 3372 3373 /* Each 32 bits pointed by 'indir' is stored with a lut entry */ 3374 if (indir) { 3375 int i; 3376 3377 for (i = 0; i < vsi->rss_table_size; i++) 3378 vsi->rss_lut_user[i] = (u8)(indir[i]); 3379 } else { 3380 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size, 3381 vsi->rss_size); 3382 } 3383 3384 if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size)) 3385 return -EIO; 3386 3387 return 0; 3388 } 3389 3390 /** 3391 * ice_get_max_txq - return the maximum number of Tx queues for in a PF 3392 * @pf: PF structure 3393 */ 3394 static int ice_get_max_txq(struct ice_pf *pf) 3395 { 3396 return min_t(int, num_online_cpus(), 3397 pf->hw.func_caps.common_cap.num_txq); 3398 } 3399 3400 /** 3401 * ice_get_max_rxq - return the maximum number of Rx queues for in a PF 3402 * @pf: PF structure 3403 */ 3404 static int ice_get_max_rxq(struct ice_pf *pf) 3405 { 3406 return min_t(int, num_online_cpus(), 3407 pf->hw.func_caps.common_cap.num_rxq); 3408 } 3409 3410 /** 3411 * ice_get_combined_cnt - return the current number of combined channels 3412 * @vsi: PF VSI pointer 3413 * 3414 * Go through all queue vectors and count ones that have both Rx and Tx ring 3415 * attached 3416 */ 3417 static u32 ice_get_combined_cnt(struct ice_vsi *vsi) 3418 { 3419 u32 combined = 0; 3420 int q_idx; 3421 3422 ice_for_each_q_vector(vsi, q_idx) { 3423 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 3424 3425 if (q_vector->rx.ring && q_vector->tx.ring) 3426 combined++; 3427 } 3428 3429 return combined; 3430 } 3431 3432 /** 3433 * ice_get_channels - get the current and max supported channels 3434 * @dev: network interface device structure 3435 * @ch: ethtool channel data structure 3436 */ 3437 static void 3438 ice_get_channels(struct net_device *dev, struct ethtool_channels *ch) 3439 { 3440 struct ice_netdev_priv *np = netdev_priv(dev); 3441 struct ice_vsi *vsi = np->vsi; 3442 struct ice_pf *pf = vsi->back; 3443 3444 /* check to see if VSI is active */ 3445 if (test_bit(__ICE_DOWN, vsi->state)) 3446 return; 3447 3448 /* report maximum channels */ 3449 ch->max_rx = ice_get_max_rxq(pf); 3450 ch->max_tx = ice_get_max_txq(pf); 3451 ch->max_combined = min_t(int, ch->max_rx, ch->max_tx); 3452 3453 /* report current channels */ 3454 ch->combined_count = ice_get_combined_cnt(vsi); 3455 ch->rx_count = vsi->num_rxq - ch->combined_count; 3456 ch->tx_count = vsi->num_txq - ch->combined_count; 3457 } 3458 3459 /** 3460 * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size 3461 * @vsi: VSI to reconfigure RSS LUT on 3462 * @req_rss_size: requested range of queue numbers for hashing 3463 * 3464 * Set the VSI's RSS parameters, configure the RSS LUT based on these. 3465 */ 3466 static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) 3467 { 3468 struct ice_pf *pf = vsi->back; 3469 enum ice_status status; 3470 struct device *dev; 3471 struct ice_hw *hw; 3472 int err = 0; 3473 u8 *lut; 3474 3475 dev = ice_pf_to_dev(pf); 3476 hw = &pf->hw; 3477 3478 if (!req_rss_size) 3479 return -EINVAL; 3480 3481 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 3482 if (!lut) 3483 return -ENOMEM; 3484 3485 /* set RSS LUT parameters */ 3486 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 3487 vsi->rss_size = 1; 3488 } else { 3489 struct ice_hw_common_caps *caps = &hw->func_caps.common_cap; 3490 3491 vsi->rss_size = min_t(int, req_rss_size, 3492 BIT(caps->rss_table_entry_width)); 3493 } 3494 3495 /* create/set RSS LUT */ 3496 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 3497 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, 3498 vsi->rss_table_size); 3499 if (status) { 3500 dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n", 3501 status, hw->adminq.rq_last_status); 3502 err = -EIO; 3503 } 3504 3505 kfree(lut); 3506 return err; 3507 } 3508 3509 /** 3510 * ice_set_channels - set the number channels 3511 * @dev: network interface device structure 3512 * @ch: ethtool channel data structure 3513 */ 3514 static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) 3515 { 3516 struct ice_netdev_priv *np = netdev_priv(dev); 3517 struct ice_vsi *vsi = np->vsi; 3518 struct ice_pf *pf = vsi->back; 3519 int new_rx = 0, new_tx = 0; 3520 u32 curr_combined; 3521 3522 /* do not support changing channels in Safe Mode */ 3523 if (ice_is_safe_mode(pf)) { 3524 netdev_err(dev, "Changing channel in Safe Mode is not supported\n"); 3525 return -EOPNOTSUPP; 3526 } 3527 /* do not support changing other_count */ 3528 if (ch->other_count) 3529 return -EINVAL; 3530 3531 curr_combined = ice_get_combined_cnt(vsi); 3532 3533 /* these checks are for cases where user didn't specify a particular 3534 * value on cmd line but we get non-zero value anyway via 3535 * get_channels(); look at ethtool.c in ethtool repository (the user 3536 * space part), particularly, do_schannels() routine 3537 */ 3538 if (ch->rx_count == vsi->num_rxq - curr_combined) 3539 ch->rx_count = 0; 3540 if (ch->tx_count == vsi->num_txq - curr_combined) 3541 ch->tx_count = 0; 3542 if (ch->combined_count == curr_combined) 3543 ch->combined_count = 0; 3544 3545 if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) { 3546 netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n"); 3547 return -EINVAL; 3548 } 3549 3550 new_rx = ch->combined_count + ch->rx_count; 3551 new_tx = ch->combined_count + ch->tx_count; 3552 3553 if (new_rx > ice_get_max_rxq(pf)) { 3554 netdev_err(dev, "Maximum allowed Rx channels is %d\n", 3555 ice_get_max_rxq(pf)); 3556 return -EINVAL; 3557 } 3558 if (new_tx > ice_get_max_txq(pf)) { 3559 netdev_err(dev, "Maximum allowed Tx channels is %d\n", 3560 ice_get_max_txq(pf)); 3561 return -EINVAL; 3562 } 3563 3564 ice_vsi_recfg_qs(vsi, new_rx, new_tx); 3565 3566 if (new_rx && !netif_is_rxfh_configured(dev)) 3567 return ice_vsi_set_dflt_rss_lut(vsi, new_rx); 3568 3569 return 0; 3570 } 3571 3572 enum ice_container_type { 3573 ICE_RX_CONTAINER, 3574 ICE_TX_CONTAINER, 3575 }; 3576 3577 /** 3578 * ice_get_rc_coalesce - get ITR values for specific ring container 3579 * @ec: ethtool structure to fill with driver's coalesce settings 3580 * @c_type: container type, Rx or Tx 3581 * @rc: ring container that the ITR values will come from 3582 * 3583 * Query the device for ice_ring_container specific ITR values. This is 3584 * done per ice_ring_container because each q_vector can have 1 or more rings 3585 * and all of said ring(s) will have the same ITR values. 3586 * 3587 * Returns 0 on success, negative otherwise. 3588 */ 3589 static int 3590 ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, 3591 struct ice_ring_container *rc) 3592 { 3593 struct ice_pf *pf; 3594 3595 if (!rc->ring) 3596 return -EINVAL; 3597 3598 pf = rc->ring->vsi->back; 3599 3600 switch (c_type) { 3601 case ICE_RX_CONTAINER: 3602 ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); 3603 ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; 3604 ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl; 3605 break; 3606 case ICE_TX_CONTAINER: 3607 ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); 3608 ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; 3609 break; 3610 default: 3611 dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type); 3612 return -EINVAL; 3613 } 3614 3615 return 0; 3616 } 3617 3618 /** 3619 * ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings 3620 * @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings 3621 * @ec: coalesce settings to program the device with 3622 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 3623 * 3624 * Return 0 on success, and negative under the following conditions: 3625 * 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed. 3626 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings. 3627 */ 3628 static int 3629 ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) 3630 { 3631 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { 3632 if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, 3633 &vsi->rx_rings[q_num]->q_vector->rx)) 3634 return -EINVAL; 3635 if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, 3636 &vsi->tx_rings[q_num]->q_vector->tx)) 3637 return -EINVAL; 3638 } else if (q_num < vsi->num_rxq) { 3639 if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, 3640 &vsi->rx_rings[q_num]->q_vector->rx)) 3641 return -EINVAL; 3642 } else if (q_num < vsi->num_txq) { 3643 if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, 3644 &vsi->tx_rings[q_num]->q_vector->tx)) 3645 return -EINVAL; 3646 } else { 3647 return -EINVAL; 3648 } 3649 3650 return 0; 3651 } 3652 3653 /** 3654 * __ice_get_coalesce - get ITR/INTRL values for the device 3655 * @netdev: pointer to the netdev associated with this query 3656 * @ec: ethtool structure to fill with driver's coalesce settings 3657 * @q_num: queue number to get the coalesce settings for 3658 * 3659 * If the caller passes in a negative q_num then we return coalesce settings 3660 * based on queue number 0, else use the actual q_num passed in. 3661 */ 3662 static int 3663 __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, 3664 int q_num) 3665 { 3666 struct ice_netdev_priv *np = netdev_priv(netdev); 3667 struct ice_vsi *vsi = np->vsi; 3668 3669 if (q_num < 0) 3670 q_num = 0; 3671 3672 if (ice_get_q_coalesce(vsi, ec, q_num)) 3673 return -EINVAL; 3674 3675 return 0; 3676 } 3677 3678 static int 3679 ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) 3680 { 3681 return __ice_get_coalesce(netdev, ec, -1); 3682 } 3683 3684 static int 3685 ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, 3686 struct ethtool_coalesce *ec) 3687 { 3688 return __ice_get_coalesce(netdev, ec, q_num); 3689 } 3690 3691 /** 3692 * ice_set_rc_coalesce - set ITR values for specific ring container 3693 * @c_type: container type, Rx or Tx 3694 * @ec: ethtool structure from user to update ITR settings 3695 * @rc: ring container that the ITR values will come from 3696 * @vsi: VSI associated to the ring container 3697 * 3698 * Set specific ITR values. This is done per ice_ring_container because each 3699 * q_vector can have 1 or more rings and all of said ring(s) will have the same 3700 * ITR values. 3701 * 3702 * Returns 0 on success, negative otherwise. 3703 */ 3704 static int 3705 ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, 3706 struct ice_ring_container *rc, struct ice_vsi *vsi) 3707 { 3708 const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx"; 3709 u32 use_adaptive_coalesce, coalesce_usecs; 3710 struct ice_pf *pf = vsi->back; 3711 u16 itr_setting; 3712 3713 if (!rc->ring) 3714 return -EINVAL; 3715 3716 switch (c_type) { 3717 case ICE_RX_CONTAINER: 3718 if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || 3719 (ec->rx_coalesce_usecs_high && 3720 ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) { 3721 netdev_info(vsi->netdev, 3722 "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n", 3723 c_type_str, pf->hw.intrl_gran, 3724 ICE_MAX_INTRL); 3725 return -EINVAL; 3726 } 3727 if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { 3728 rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; 3729 wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx), 3730 ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high, 3731 pf->hw.intrl_gran)); 3732 } 3733 3734 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; 3735 coalesce_usecs = ec->rx_coalesce_usecs; 3736 3737 break; 3738 case ICE_TX_CONTAINER: 3739 if (ec->tx_coalesce_usecs_high) { 3740 netdev_info(vsi->netdev, 3741 "setting %s-usecs-high is not supported\n", 3742 c_type_str); 3743 return -EINVAL; 3744 } 3745 3746 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; 3747 coalesce_usecs = ec->tx_coalesce_usecs; 3748 3749 break; 3750 default: 3751 dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n", 3752 c_type); 3753 return -EINVAL; 3754 } 3755 3756 itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; 3757 if (coalesce_usecs != itr_setting && use_adaptive_coalesce) { 3758 netdev_info(vsi->netdev, 3759 "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n", 3760 c_type_str, c_type_str); 3761 return -EINVAL; 3762 } 3763 3764 if (coalesce_usecs > ICE_ITR_MAX) { 3765 netdev_info(vsi->netdev, 3766 "Invalid value, %s-usecs range is 0-%d\n", 3767 c_type_str, ICE_ITR_MAX); 3768 return -EINVAL; 3769 } 3770 3771 /* hardware only supports an ITR granularity of 2us */ 3772 if (coalesce_usecs % 2 != 0) { 3773 netdev_info(vsi->netdev, 3774 "Invalid value, %s-usecs must be even\n", 3775 c_type_str); 3776 return -EINVAL; 3777 } 3778 3779 if (use_adaptive_coalesce) { 3780 rc->itr_setting |= ICE_ITR_DYNAMIC; 3781 } else { 3782 /* store user facing value how it was set */ 3783 rc->itr_setting = coalesce_usecs; 3784 /* set to static and convert to value HW understands */ 3785 rc->target_itr = 3786 ITR_TO_REG(ITR_REG_ALIGN(rc->itr_setting)); 3787 } 3788 3789 return 0; 3790 } 3791 3792 /** 3793 * ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings 3794 * @vsi: VSI associated to the queue that need updating 3795 * @ec: coalesce settings to program the device with 3796 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 3797 * 3798 * Return 0 on success, and negative under the following conditions: 3799 * 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed. 3800 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings. 3801 */ 3802 static int 3803 ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) 3804 { 3805 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { 3806 if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, 3807 &vsi->rx_rings[q_num]->q_vector->rx, 3808 vsi)) 3809 return -EINVAL; 3810 3811 if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, 3812 &vsi->tx_rings[q_num]->q_vector->tx, 3813 vsi)) 3814 return -EINVAL; 3815 } else if (q_num < vsi->num_rxq) { 3816 if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, 3817 &vsi->rx_rings[q_num]->q_vector->rx, 3818 vsi)) 3819 return -EINVAL; 3820 } else if (q_num < vsi->num_txq) { 3821 if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, 3822 &vsi->tx_rings[q_num]->q_vector->tx, 3823 vsi)) 3824 return -EINVAL; 3825 } else { 3826 return -EINVAL; 3827 } 3828 3829 return 0; 3830 } 3831 3832 /** 3833 * ice_is_coalesce_param_invalid - check for unsupported coalesce parameters 3834 * @netdev: pointer to the netdev associated with this query 3835 * @ec: ethtool structure to fill with driver's coalesce settings 3836 * 3837 * Print netdev info if driver doesn't support one of the parameters 3838 * and return error. When any parameters will be implemented, remove only 3839 * this parameter from param array. 3840 */ 3841 static int 3842 ice_is_coalesce_param_invalid(struct net_device *netdev, 3843 struct ethtool_coalesce *ec) 3844 { 3845 struct ice_ethtool_not_used { 3846 u32 value; 3847 const char *name; 3848 } param[] = { 3849 {ec->stats_block_coalesce_usecs, "stats-block-usecs"}, 3850 {ec->rate_sample_interval, "sample-interval"}, 3851 {ec->pkt_rate_low, "pkt-rate-low"}, 3852 {ec->pkt_rate_high, "pkt-rate-high"}, 3853 {ec->rx_max_coalesced_frames, "rx-frames"}, 3854 {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"}, 3855 {ec->rx_max_coalesced_frames_irq, "rx-frames-irq"}, 3856 {ec->tx_max_coalesced_frames, "tx-frames"}, 3857 {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"}, 3858 {ec->tx_max_coalesced_frames_irq, "tx-frames-irq"}, 3859 {ec->rx_coalesce_usecs_low, "rx-usecs-low"}, 3860 {ec->rx_max_coalesced_frames_low, "rx-frames-low"}, 3861 {ec->tx_coalesce_usecs_low, "tx-usecs-low"}, 3862 {ec->tx_max_coalesced_frames_low, "tx-frames-low"}, 3863 {ec->rx_max_coalesced_frames_high, "rx-frames-high"}, 3864 {ec->tx_max_coalesced_frames_high, "tx-frames-high"} 3865 }; 3866 int i; 3867 3868 for (i = 0; i < ARRAY_SIZE(param); i++) { 3869 if (param[i].value) { 3870 netdev_info(netdev, "Setting %s not supported\n", 3871 param[i].name); 3872 return -EINVAL; 3873 } 3874 } 3875 3876 return 0; 3877 } 3878 3879 /** 3880 * __ice_set_coalesce - set ITR/INTRL values for the device 3881 * @netdev: pointer to the netdev associated with this query 3882 * @ec: ethtool structure to fill with driver's coalesce settings 3883 * @q_num: queue number to get the coalesce settings for 3884 * 3885 * If the caller passes in a negative q_num then we set the coalesce settings 3886 * for all Tx/Rx queues, else use the actual q_num passed in. 3887 */ 3888 static int 3889 __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, 3890 int q_num) 3891 { 3892 struct ice_netdev_priv *np = netdev_priv(netdev); 3893 struct ice_vsi *vsi = np->vsi; 3894 3895 if (ice_is_coalesce_param_invalid(netdev, ec)) 3896 return -EINVAL; 3897 3898 if (q_num < 0) { 3899 int v_idx; 3900 3901 ice_for_each_q_vector(vsi, v_idx) { 3902 /* In some cases if DCB is configured the num_[rx|tx]q 3903 * can be less than vsi->num_q_vectors. This check 3904 * accounts for that so we don't report a false failure 3905 */ 3906 if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq) 3907 goto set_complete; 3908 3909 if (ice_set_q_coalesce(vsi, ec, v_idx)) 3910 return -EINVAL; 3911 } 3912 goto set_complete; 3913 } 3914 3915 if (ice_set_q_coalesce(vsi, ec, q_num)) 3916 return -EINVAL; 3917 3918 set_complete: 3919 3920 return 0; 3921 } 3922 3923 static int 3924 ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) 3925 { 3926 return __ice_set_coalesce(netdev, ec, -1); 3927 } 3928 3929 static int 3930 ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, 3931 struct ethtool_coalesce *ec) 3932 { 3933 return __ice_set_coalesce(netdev, ec, q_num); 3934 } 3935 3936 #define ICE_I2C_EEPROM_DEV_ADDR 0xA0 3937 #define ICE_I2C_EEPROM_DEV_ADDR2 0xA2 3938 #define ICE_MODULE_TYPE_SFP 0x03 3939 #define ICE_MODULE_TYPE_QSFP_PLUS 0x0D 3940 #define ICE_MODULE_TYPE_QSFP28 0x11 3941 #define ICE_MODULE_SFF_ADDR_MODE 0x04 3942 #define ICE_MODULE_SFF_DIAG_CAPAB 0x40 3943 #define ICE_MODULE_REVISION_ADDR 0x01 3944 #define ICE_MODULE_SFF_8472_COMP 0x5E 3945 #define ICE_MODULE_SFF_8472_SWAP 0x5C 3946 #define ICE_MODULE_QSFP_MAX_LEN 640 3947 3948 /** 3949 * ice_get_module_info - get SFF module type and revision information 3950 * @netdev: network interface device structure 3951 * @modinfo: module EEPROM size and layout information structure 3952 */ 3953 static int 3954 ice_get_module_info(struct net_device *netdev, 3955 struct ethtool_modinfo *modinfo) 3956 { 3957 struct ice_netdev_priv *np = netdev_priv(netdev); 3958 struct ice_vsi *vsi = np->vsi; 3959 struct ice_pf *pf = vsi->back; 3960 struct ice_hw *hw = &pf->hw; 3961 enum ice_status status; 3962 u8 sff8472_comp = 0; 3963 u8 sff8472_swap = 0; 3964 u8 sff8636_rev = 0; 3965 u8 value = 0; 3966 3967 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00, 3968 0, &value, 1, 0, NULL); 3969 if (status) 3970 return -EIO; 3971 3972 switch (value) { 3973 case ICE_MODULE_TYPE_SFP: 3974 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 3975 ICE_MODULE_SFF_8472_COMP, 0x00, 0, 3976 &sff8472_comp, 1, 0, NULL); 3977 if (status) 3978 return -EIO; 3979 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 3980 ICE_MODULE_SFF_8472_SWAP, 0x00, 0, 3981 &sff8472_swap, 1, 0, NULL); 3982 if (status) 3983 return -EIO; 3984 3985 if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) { 3986 modinfo->type = ETH_MODULE_SFF_8079; 3987 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 3988 } else if (sff8472_comp && 3989 (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) { 3990 modinfo->type = ETH_MODULE_SFF_8472; 3991 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 3992 } else { 3993 modinfo->type = ETH_MODULE_SFF_8079; 3994 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 3995 } 3996 break; 3997 case ICE_MODULE_TYPE_QSFP_PLUS: 3998 case ICE_MODULE_TYPE_QSFP28: 3999 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 4000 ICE_MODULE_REVISION_ADDR, 0x00, 0, 4001 &sff8636_rev, 1, 0, NULL); 4002 if (status) 4003 return -EIO; 4004 /* Check revision compliance */ 4005 if (sff8636_rev > 0x02) { 4006 /* Module is SFF-8636 compliant */ 4007 modinfo->type = ETH_MODULE_SFF_8636; 4008 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN; 4009 } else { 4010 modinfo->type = ETH_MODULE_SFF_8436; 4011 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN; 4012 } 4013 break; 4014 default: 4015 netdev_warn(netdev, 4016 "SFF Module Type not recognized.\n"); 4017 return -EINVAL; 4018 } 4019 return 0; 4020 } 4021 4022 /** 4023 * ice_get_module_eeprom - fill buffer with SFF EEPROM contents 4024 * @netdev: network interface device structure 4025 * @ee: EEPROM dump request structure 4026 * @data: buffer to be filled with EEPROM contents 4027 */ 4028 static int 4029 ice_get_module_eeprom(struct net_device *netdev, 4030 struct ethtool_eeprom *ee, u8 *data) 4031 { 4032 struct ice_netdev_priv *np = netdev_priv(netdev); 4033 u8 addr = ICE_I2C_EEPROM_DEV_ADDR; 4034 struct ice_vsi *vsi = np->vsi; 4035 struct ice_pf *pf = vsi->back; 4036 struct ice_hw *hw = &pf->hw; 4037 enum ice_status status; 4038 bool is_sfp = false; 4039 u16 offset = 0; 4040 u8 value = 0; 4041 u8 page = 0; 4042 int i; 4043 4044 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, 4045 &value, 1, 0, NULL); 4046 if (status) 4047 return -EIO; 4048 4049 if (!ee || !ee->len || !data) 4050 return -EINVAL; 4051 4052 if (value == ICE_MODULE_TYPE_SFP) 4053 is_sfp = true; 4054 4055 for (i = 0; i < ee->len; i++) { 4056 offset = i + ee->offset; 4057 4058 /* Check if we need to access the other memory page */ 4059 if (is_sfp) { 4060 if (offset >= ETH_MODULE_SFF_8079_LEN) { 4061 offset -= ETH_MODULE_SFF_8079_LEN; 4062 addr = ICE_I2C_EEPROM_DEV_ADDR2; 4063 } 4064 } else { 4065 while (offset >= ETH_MODULE_SFF_8436_LEN) { 4066 /* Compute memory page number and offset. */ 4067 offset -= ETH_MODULE_SFF_8436_LEN / 2; 4068 page++; 4069 } 4070 } 4071 4072 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, !is_sfp, 4073 &value, 1, 0, NULL); 4074 if (status) 4075 value = 0; 4076 data[i] = value; 4077 } 4078 return 0; 4079 } 4080 4081 static const struct ethtool_ops ice_ethtool_ops = { 4082 .get_link_ksettings = ice_get_link_ksettings, 4083 .set_link_ksettings = ice_set_link_ksettings, 4084 .get_drvinfo = ice_get_drvinfo, 4085 .get_regs_len = ice_get_regs_len, 4086 .get_regs = ice_get_regs, 4087 .get_msglevel = ice_get_msglevel, 4088 .set_msglevel = ice_set_msglevel, 4089 .self_test = ice_self_test, 4090 .get_link = ethtool_op_get_link, 4091 .get_eeprom_len = ice_get_eeprom_len, 4092 .get_eeprom = ice_get_eeprom, 4093 .get_coalesce = ice_get_coalesce, 4094 .set_coalesce = ice_set_coalesce, 4095 .get_strings = ice_get_strings, 4096 .set_phys_id = ice_set_phys_id, 4097 .get_ethtool_stats = ice_get_ethtool_stats, 4098 .get_priv_flags = ice_get_priv_flags, 4099 .set_priv_flags = ice_set_priv_flags, 4100 .get_sset_count = ice_get_sset_count, 4101 .get_rxnfc = ice_get_rxnfc, 4102 .set_rxnfc = ice_set_rxnfc, 4103 .get_ringparam = ice_get_ringparam, 4104 .set_ringparam = ice_set_ringparam, 4105 .nway_reset = ice_nway_reset, 4106 .get_pauseparam = ice_get_pauseparam, 4107 .set_pauseparam = ice_set_pauseparam, 4108 .get_rxfh_key_size = ice_get_rxfh_key_size, 4109 .get_rxfh_indir_size = ice_get_rxfh_indir_size, 4110 .get_rxfh = ice_get_rxfh, 4111 .set_rxfh = ice_set_rxfh, 4112 .get_channels = ice_get_channels, 4113 .set_channels = ice_set_channels, 4114 .get_ts_info = ethtool_op_get_ts_info, 4115 .get_per_queue_coalesce = ice_get_per_q_coalesce, 4116 .set_per_queue_coalesce = ice_set_per_q_coalesce, 4117 .get_fecparam = ice_get_fecparam, 4118 .set_fecparam = ice_set_fecparam, 4119 .get_module_info = ice_get_module_info, 4120 .get_module_eeprom = ice_get_module_eeprom, 4121 }; 4122 4123 static const struct ethtool_ops ice_ethtool_safe_mode_ops = { 4124 .get_link_ksettings = ice_get_link_ksettings, 4125 .set_link_ksettings = ice_set_link_ksettings, 4126 .get_drvinfo = ice_get_drvinfo, 4127 .get_regs_len = ice_get_regs_len, 4128 .get_regs = ice_get_regs, 4129 .get_msglevel = ice_get_msglevel, 4130 .set_msglevel = ice_set_msglevel, 4131 .get_eeprom_len = ice_get_eeprom_len, 4132 .get_eeprom = ice_get_eeprom, 4133 .get_strings = ice_get_strings, 4134 .get_ethtool_stats = ice_get_ethtool_stats, 4135 .get_sset_count = ice_get_sset_count, 4136 .get_ringparam = ice_get_ringparam, 4137 .set_ringparam = ice_set_ringparam, 4138 .nway_reset = ice_nway_reset, 4139 .get_channels = ice_get_channels, 4140 }; 4141 4142 /** 4143 * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops 4144 * @netdev: network interface device structure 4145 */ 4146 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) 4147 { 4148 netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; 4149 } 4150 4151 /** 4152 * ice_set_ethtool_ops - setup netdev ethtool ops 4153 * @netdev: network interface device structure 4154 * 4155 * setup netdev ethtool ops with ice specific ops 4156 */ 4157 void ice_set_ethtool_ops(struct net_device *netdev) 4158 { 4159 netdev->ethtool_ops = &ice_ethtool_ops; 4160 } 4161