1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* ethtool support for ice */ 5 6 #include "ice.h" 7 8 struct ice_stats { 9 char stat_string[ETH_GSTRING_LEN]; 10 int sizeof_stat; 11 int stat_offset; 12 }; 13 14 #define ICE_STAT(_type, _name, _stat) { \ 15 .stat_string = _name, \ 16 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ 17 .stat_offset = offsetof(_type, _stat) \ 18 } 19 20 #define ICE_VSI_STAT(_name, _stat) \ 21 ICE_STAT(struct ice_vsi, _name, _stat) 22 #define ICE_PF_STAT(_name, _stat) \ 23 ICE_STAT(struct ice_pf, _name, _stat) 24 25 static int ice_q_stats_len(struct net_device *netdev) 26 { 27 struct ice_netdev_priv *np = netdev_priv(netdev); 28 29 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * 30 (sizeof(struct ice_q_stats) / sizeof(u64))); 31 } 32 33 #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats) 34 #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats) 35 36 #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \ 37 ice_q_stats_len(n)) 38 39 static const struct ice_stats ice_gstrings_vsi_stats[] = { 40 ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast), 41 ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast), 42 ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast), 43 ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast), 44 ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), 45 ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), 46 ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes), 47 ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes), 48 ICE_VSI_STAT("rx_discards", eth_stats.rx_discards), 49 ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), 50 ICE_VSI_STAT("tx_linearize", tx_linearize), 51 ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), 52 ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), 53 ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), 54 }; 55 56 /* These PF_STATs might look like duplicates of some NETDEV_STATs, 57 * but they aren't. This device is capable of supporting multiple 58 * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual 59 * netdevs whereas the PF_STATs are for the physical function that's 60 * hosting these netdevs. 61 * 62 * The PF_STATs are appended to the netdev stats only when ethtool -S 63 * is queried on the base PF netdev. 64 */ 65 static const struct ice_stats ice_gstrings_pf_stats[] = { 66 ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes), 67 ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes), 68 ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast), 69 ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast), 70 ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast), 71 ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast), 72 ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), 73 ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), 74 ICE_PF_STAT("tx_errors", stats.eth.tx_errors), 75 ICE_PF_STAT("tx_size_64", stats.tx_size_64), 76 ICE_PF_STAT("rx_size_64", stats.rx_size_64), 77 ICE_PF_STAT("tx_size_127", stats.tx_size_127), 78 ICE_PF_STAT("rx_size_127", stats.rx_size_127), 79 ICE_PF_STAT("tx_size_255", stats.tx_size_255), 80 ICE_PF_STAT("rx_size_255", stats.rx_size_255), 81 ICE_PF_STAT("tx_size_511", stats.tx_size_511), 82 ICE_PF_STAT("rx_size_511", stats.rx_size_511), 83 ICE_PF_STAT("tx_size_1023", stats.tx_size_1023), 84 ICE_PF_STAT("rx_size_1023", stats.rx_size_1023), 85 ICE_PF_STAT("tx_size_1522", stats.tx_size_1522), 86 ICE_PF_STAT("rx_size_1522", stats.rx_size_1522), 87 ICE_PF_STAT("tx_size_big", stats.tx_size_big), 88 ICE_PF_STAT("rx_size_big", stats.rx_size_big), 89 ICE_PF_STAT("link_xon_tx", stats.link_xon_tx), 90 ICE_PF_STAT("link_xon_rx", stats.link_xon_rx), 91 ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx), 92 ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 93 ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), 94 ICE_PF_STAT("rx_undersize", stats.rx_undersize), 95 ICE_PF_STAT("rx_fragments", stats.rx_fragments), 96 ICE_PF_STAT("rx_oversize", stats.rx_oversize), 97 ICE_PF_STAT("rx_jabber", stats.rx_jabber), 98 ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error), 99 ICE_PF_STAT("rx_length_errors", stats.rx_len_errors), 100 ICE_PF_STAT("rx_dropped", stats.eth.rx_discards), 101 ICE_PF_STAT("rx_crc_errors", stats.crc_errors), 102 ICE_PF_STAT("illegal_bytes", stats.illegal_bytes), 103 ICE_PF_STAT("mac_local_faults", stats.mac_local_faults), 104 ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 105 }; 106 107 static const u32 ice_regs_dump_list[] = { 108 PFGEN_STATE, 109 PRTGEN_STATUS, 110 QRX_CTRL(0), 111 QINT_TQCTL(0), 112 QINT_RQCTL(0), 113 PFINT_OICR_ENA, 114 QRX_ITR(0), 115 }; 116 117 struct ice_priv_flag { 118 char name[ETH_GSTRING_LEN]; 119 u32 bitno; /* bit position in pf->flags */ 120 }; 121 122 #define ICE_PRIV_FLAG(_name, _bitno) { \ 123 .name = _name, \ 124 .bitno = _bitno, \ 125 } 126 127 static const struct ice_priv_flag ice_gstrings_priv_flags[] = { 128 ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), 129 }; 130 131 #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) 132 133 /** 134 * ice_nvm_version_str - format the NVM version strings 135 * @hw: ptr to the hardware info 136 */ 137 static char *ice_nvm_version_str(struct ice_hw *hw) 138 { 139 static char buf[ICE_ETHTOOL_FWVER_LEN]; 140 u8 ver, patch; 141 u32 full_ver; 142 u16 build; 143 144 full_ver = hw->nvm.oem_ver; 145 ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); 146 build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >> 147 ICE_OEM_VER_BUILD_SHIFT); 148 patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK); 149 150 snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", 151 (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT, 152 (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT, 153 hw->nvm.eetrack, ver, build, patch); 154 155 return buf; 156 } 157 158 static void 159 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 160 { 161 struct ice_netdev_priv *np = netdev_priv(netdev); 162 struct ice_vsi *vsi = np->vsi; 163 struct ice_pf *pf = vsi->back; 164 165 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); 166 strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version)); 167 strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw), 168 sizeof(drvinfo->fw_version)); 169 strlcpy(drvinfo->bus_info, pci_name(pf->pdev), 170 sizeof(drvinfo->bus_info)); 171 drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; 172 } 173 174 static int ice_get_regs_len(struct net_device __always_unused *netdev) 175 { 176 return sizeof(ice_regs_dump_list); 177 } 178 179 static void 180 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 181 { 182 struct ice_netdev_priv *np = netdev_priv(netdev); 183 struct ice_pf *pf = np->vsi->back; 184 struct ice_hw *hw = &pf->hw; 185 u32 *regs_buf = (u32 *)p; 186 int i; 187 188 regs->version = 1; 189 190 for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i) 191 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]); 192 } 193 194 static u32 ice_get_msglevel(struct net_device *netdev) 195 { 196 struct ice_netdev_priv *np = netdev_priv(netdev); 197 struct ice_pf *pf = np->vsi->back; 198 199 #ifndef CONFIG_DYNAMIC_DEBUG 200 if (pf->hw.debug_mask) 201 netdev_info(netdev, "hw debug_mask: 0x%llX\n", 202 pf->hw.debug_mask); 203 #endif /* !CONFIG_DYNAMIC_DEBUG */ 204 205 return pf->msg_enable; 206 } 207 208 static void ice_set_msglevel(struct net_device *netdev, u32 data) 209 { 210 struct ice_netdev_priv *np = netdev_priv(netdev); 211 struct ice_pf *pf = np->vsi->back; 212 213 #ifndef CONFIG_DYNAMIC_DEBUG 214 if (ICE_DBG_USER & data) 215 pf->hw.debug_mask = data; 216 else 217 pf->msg_enable = data; 218 #else 219 pf->msg_enable = data; 220 #endif /* !CONFIG_DYNAMIC_DEBUG */ 221 } 222 223 static int ice_get_eeprom_len(struct net_device *netdev) 224 { 225 struct ice_netdev_priv *np = netdev_priv(netdev); 226 struct ice_pf *pf = np->vsi->back; 227 228 return (int)(pf->hw.nvm.sr_words * sizeof(u16)); 229 } 230 231 static int 232 ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 233 u8 *bytes) 234 { 235 struct ice_netdev_priv *np = netdev_priv(netdev); 236 u16 first_word, last_word, nwords; 237 struct ice_vsi *vsi = np->vsi; 238 struct ice_pf *pf = vsi->back; 239 struct ice_hw *hw = &pf->hw; 240 enum ice_status status; 241 struct device *dev; 242 int ret = 0; 243 u16 *buf; 244 245 dev = &pf->pdev->dev; 246 247 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 248 249 first_word = eeprom->offset >> 1; 250 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 251 nwords = last_word - first_word + 1; 252 253 buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL); 254 if (!buf) 255 return -ENOMEM; 256 257 status = ice_read_sr_buf(hw, first_word, &nwords, buf); 258 if (status) { 259 dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n", 260 status, hw->adminq.sq_last_status); 261 eeprom->len = sizeof(u16) * nwords; 262 ret = -EIO; 263 goto out; 264 } 265 266 memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len); 267 out: 268 devm_kfree(dev, buf); 269 return ret; 270 } 271 272 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 273 { 274 struct ice_netdev_priv *np = netdev_priv(netdev); 275 struct ice_vsi *vsi = np->vsi; 276 char *p = (char *)data; 277 unsigned int i; 278 279 switch (stringset) { 280 case ETH_SS_STATS: 281 for (i = 0; i < ICE_VSI_STATS_LEN; i++) { 282 snprintf(p, ETH_GSTRING_LEN, "%s", 283 ice_gstrings_vsi_stats[i].stat_string); 284 p += ETH_GSTRING_LEN; 285 } 286 287 ice_for_each_alloc_txq(vsi, i) { 288 snprintf(p, ETH_GSTRING_LEN, 289 "tx-queue-%u.tx_packets", i); 290 p += ETH_GSTRING_LEN; 291 snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i); 292 p += ETH_GSTRING_LEN; 293 } 294 295 ice_for_each_alloc_rxq(vsi, i) { 296 snprintf(p, ETH_GSTRING_LEN, 297 "rx-queue-%u.rx_packets", i); 298 p += ETH_GSTRING_LEN; 299 snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i); 300 p += ETH_GSTRING_LEN; 301 } 302 303 if (vsi->type != ICE_VSI_PF) 304 return; 305 306 for (i = 0; i < ICE_PF_STATS_LEN; i++) { 307 snprintf(p, ETH_GSTRING_LEN, "port.%s", 308 ice_gstrings_pf_stats[i].stat_string); 309 p += ETH_GSTRING_LEN; 310 } 311 312 break; 313 case ETH_SS_PRIV_FLAGS: 314 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 315 snprintf(p, ETH_GSTRING_LEN, "%s", 316 ice_gstrings_priv_flags[i].name); 317 p += ETH_GSTRING_LEN; 318 } 319 break; 320 default: 321 break; 322 } 323 } 324 325 static int 326 ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) 327 { 328 struct ice_netdev_priv *np = netdev_priv(netdev); 329 bool led_active; 330 331 switch (state) { 332 case ETHTOOL_ID_ACTIVE: 333 led_active = true; 334 break; 335 case ETHTOOL_ID_INACTIVE: 336 led_active = false; 337 break; 338 default: 339 return -EINVAL; 340 } 341 342 if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL)) 343 return -EIO; 344 345 return 0; 346 } 347 348 /** 349 * ice_get_priv_flags - report device private flags 350 * @netdev: network interface device structure 351 * 352 * The get string set count and the string set should be matched for each 353 * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags 354 * array. 355 * 356 * Returns a u32 bitmap of flags. 357 */ 358 static u32 ice_get_priv_flags(struct net_device *netdev) 359 { 360 struct ice_netdev_priv *np = netdev_priv(netdev); 361 struct ice_vsi *vsi = np->vsi; 362 struct ice_pf *pf = vsi->back; 363 u32 i, ret_flags = 0; 364 365 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 366 const struct ice_priv_flag *priv_flag; 367 368 priv_flag = &ice_gstrings_priv_flags[i]; 369 370 if (test_bit(priv_flag->bitno, pf->flags)) 371 ret_flags |= BIT(i); 372 } 373 374 return ret_flags; 375 } 376 377 /** 378 * ice_set_priv_flags - set private flags 379 * @netdev: network interface device structure 380 * @flags: bit flags to be set 381 */ 382 static int ice_set_priv_flags(struct net_device *netdev, u32 flags) 383 { 384 struct ice_netdev_priv *np = netdev_priv(netdev); 385 struct ice_vsi *vsi = np->vsi; 386 struct ice_pf *pf = vsi->back; 387 u32 i; 388 389 if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE)) 390 return -EINVAL; 391 392 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 393 const struct ice_priv_flag *priv_flag; 394 395 priv_flag = &ice_gstrings_priv_flags[i]; 396 397 if (flags & BIT(i)) 398 set_bit(priv_flag->bitno, pf->flags); 399 else 400 clear_bit(priv_flag->bitno, pf->flags); 401 } 402 403 return 0; 404 } 405 406 static int ice_get_sset_count(struct net_device *netdev, int sset) 407 { 408 switch (sset) { 409 case ETH_SS_STATS: 410 /* The number (and order) of strings reported *must* remain 411 * constant for a given netdevice. This function must not 412 * report a different number based on run time parameters 413 * (such as the number of queues in use, or the setting of 414 * a private ethtool flag). This is due to the nature of the 415 * ethtool stats API. 416 * 417 * Userspace programs such as ethtool must make 3 separate 418 * ioctl requests, one for size, one for the strings, and 419 * finally one for the stats. Since these cross into 420 * userspace, changes to the number or size could result in 421 * undefined memory access or incorrect string<->value 422 * correlations for statistics. 423 * 424 * Even if it appears to be safe, changes to the size or 425 * order of strings will suffer from race conditions and are 426 * not safe. 427 */ 428 return ICE_ALL_STATS_LEN(netdev); 429 case ETH_SS_PRIV_FLAGS: 430 return ICE_PRIV_FLAG_ARRAY_SIZE; 431 default: 432 return -EOPNOTSUPP; 433 } 434 } 435 436 static void 437 ice_get_ethtool_stats(struct net_device *netdev, 438 struct ethtool_stats __always_unused *stats, u64 *data) 439 { 440 struct ice_netdev_priv *np = netdev_priv(netdev); 441 struct ice_vsi *vsi = np->vsi; 442 struct ice_pf *pf = vsi->back; 443 struct ice_ring *ring; 444 unsigned int j = 0; 445 int i = 0; 446 char *p; 447 448 for (j = 0; j < ICE_VSI_STATS_LEN; j++) { 449 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset; 450 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat == 451 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 452 } 453 454 /* populate per queue stats */ 455 rcu_read_lock(); 456 457 ice_for_each_alloc_txq(vsi, j) { 458 ring = READ_ONCE(vsi->tx_rings[j]); 459 if (ring) { 460 data[i++] = ring->stats.pkts; 461 data[i++] = ring->stats.bytes; 462 } else { 463 data[i++] = 0; 464 data[i++] = 0; 465 } 466 } 467 468 ice_for_each_alloc_rxq(vsi, j) { 469 ring = READ_ONCE(vsi->rx_rings[j]); 470 if (ring) { 471 data[i++] = ring->stats.pkts; 472 data[i++] = ring->stats.bytes; 473 } else { 474 data[i++] = 0; 475 data[i++] = 0; 476 } 477 } 478 479 rcu_read_unlock(); 480 481 if (vsi->type != ICE_VSI_PF) 482 return; 483 484 for (j = 0; j < ICE_PF_STATS_LEN; j++) { 485 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset; 486 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat == 487 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 488 } 489 } 490 491 /** 492 * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes 493 * @netdev: network interface device structure 494 * @ks: ethtool link ksettings struct to fill out 495 */ 496 static void 497 ice_phy_type_to_ethtool(struct net_device *netdev, 498 struct ethtool_link_ksettings *ks) 499 { 500 struct ice_netdev_priv *np = netdev_priv(netdev); 501 struct ice_link_status *hw_link_info; 502 bool need_add_adv_mode = false; 503 struct ice_vsi *vsi = np->vsi; 504 u64 phy_types_high; 505 u64 phy_types_low; 506 507 hw_link_info = &vsi->port_info->phy.link_info; 508 phy_types_low = vsi->port_info->phy.phy_type_low; 509 phy_types_high = vsi->port_info->phy.phy_type_high; 510 511 ethtool_link_ksettings_zero_link_mode(ks, supported); 512 ethtool_link_ksettings_zero_link_mode(ks, advertising); 513 514 if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || 515 phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) { 516 ethtool_link_ksettings_add_link_mode(ks, supported, 517 100baseT_Full); 518 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) 519 ethtool_link_ksettings_add_link_mode(ks, advertising, 520 100baseT_Full); 521 } 522 if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || 523 phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) { 524 ethtool_link_ksettings_add_link_mode(ks, supported, 525 1000baseT_Full); 526 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) 527 ethtool_link_ksettings_add_link_mode(ks, advertising, 528 1000baseT_Full); 529 } 530 if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) { 531 ethtool_link_ksettings_add_link_mode(ks, supported, 532 1000baseKX_Full); 533 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) 534 ethtool_link_ksettings_add_link_mode(ks, advertising, 535 1000baseKX_Full); 536 } 537 if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX || 538 phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) { 539 ethtool_link_ksettings_add_link_mode(ks, supported, 540 1000baseX_Full); 541 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) 542 ethtool_link_ksettings_add_link_mode(ks, advertising, 543 1000baseX_Full); 544 } 545 if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) { 546 ethtool_link_ksettings_add_link_mode(ks, supported, 547 2500baseT_Full); 548 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) 549 ethtool_link_ksettings_add_link_mode(ks, advertising, 550 2500baseT_Full); 551 } 552 if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X || 553 phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) { 554 ethtool_link_ksettings_add_link_mode(ks, supported, 555 2500baseX_Full); 556 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) 557 ethtool_link_ksettings_add_link_mode(ks, advertising, 558 2500baseX_Full); 559 } 560 if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || 561 phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) { 562 ethtool_link_ksettings_add_link_mode(ks, supported, 563 5000baseT_Full); 564 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) 565 ethtool_link_ksettings_add_link_mode(ks, advertising, 566 5000baseT_Full); 567 } 568 if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || 569 phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA || 570 phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || 571 phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) { 572 ethtool_link_ksettings_add_link_mode(ks, supported, 573 10000baseT_Full); 574 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 575 ethtool_link_ksettings_add_link_mode(ks, advertising, 576 10000baseT_Full); 577 } 578 if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) { 579 ethtool_link_ksettings_add_link_mode(ks, supported, 580 10000baseKR_Full); 581 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 582 ethtool_link_ksettings_add_link_mode(ks, advertising, 583 10000baseKR_Full); 584 } 585 if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) { 586 ethtool_link_ksettings_add_link_mode(ks, supported, 587 10000baseSR_Full); 588 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 589 ethtool_link_ksettings_add_link_mode(ks, advertising, 590 10000baseSR_Full); 591 } 592 if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) { 593 ethtool_link_ksettings_add_link_mode(ks, supported, 594 10000baseLR_Full); 595 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) 596 ethtool_link_ksettings_add_link_mode(ks, advertising, 597 10000baseLR_Full); 598 } 599 if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || 600 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || 601 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || 602 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || 603 phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC || 604 phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) { 605 ethtool_link_ksettings_add_link_mode(ks, supported, 606 25000baseCR_Full); 607 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) 608 ethtool_link_ksettings_add_link_mode(ks, advertising, 609 25000baseCR_Full); 610 } 611 if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR || 612 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) { 613 ethtool_link_ksettings_add_link_mode(ks, supported, 614 25000baseSR_Full); 615 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) 616 ethtool_link_ksettings_add_link_mode(ks, advertising, 617 25000baseSR_Full); 618 } 619 if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || 620 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || 621 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) { 622 ethtool_link_ksettings_add_link_mode(ks, supported, 623 25000baseKR_Full); 624 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) 625 ethtool_link_ksettings_add_link_mode(ks, advertising, 626 25000baseKR_Full); 627 } 628 if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { 629 ethtool_link_ksettings_add_link_mode(ks, supported, 630 40000baseKR4_Full); 631 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 632 ethtool_link_ksettings_add_link_mode(ks, advertising, 633 40000baseKR4_Full); 634 } 635 if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || 636 phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC || 637 phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) { 638 ethtool_link_ksettings_add_link_mode(ks, supported, 639 40000baseCR4_Full); 640 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 641 ethtool_link_ksettings_add_link_mode(ks, advertising, 642 40000baseCR4_Full); 643 } 644 if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) { 645 ethtool_link_ksettings_add_link_mode(ks, supported, 646 40000baseSR4_Full); 647 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 648 ethtool_link_ksettings_add_link_mode(ks, advertising, 649 40000baseSR4_Full); 650 } 651 if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) { 652 ethtool_link_ksettings_add_link_mode(ks, supported, 653 40000baseLR4_Full); 654 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) 655 ethtool_link_ksettings_add_link_mode(ks, advertising, 656 40000baseLR4_Full); 657 } 658 if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || 659 phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC || 660 phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 || 661 phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC || 662 phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 || 663 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || 664 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR || 665 phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC || 666 phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) { 667 ethtool_link_ksettings_add_link_mode(ks, supported, 668 50000baseCR2_Full); 669 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) 670 ethtool_link_ksettings_add_link_mode(ks, advertising, 671 50000baseCR2_Full); 672 } 673 if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || 674 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { 675 ethtool_link_ksettings_add_link_mode(ks, supported, 676 50000baseKR2_Full); 677 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) 678 ethtool_link_ksettings_add_link_mode(ks, advertising, 679 50000baseKR2_Full); 680 } 681 if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 || 682 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 || 683 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR || 684 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) { 685 ethtool_link_ksettings_add_link_mode(ks, supported, 686 50000baseSR2_Full); 687 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) 688 ethtool_link_ksettings_add_link_mode(ks, advertising, 689 50000baseSR2_Full); 690 } 691 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || 692 phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC || 693 phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 || 694 phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC || 695 phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 || 696 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 || 697 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 || 698 phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC || 699 phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 || 700 phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC || 701 phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) { 702 ethtool_link_ksettings_add_link_mode(ks, supported, 703 100000baseCR4_Full); 704 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 705 need_add_adv_mode = true; 706 } 707 if (need_add_adv_mode) { 708 need_add_adv_mode = false; 709 ethtool_link_ksettings_add_link_mode(ks, advertising, 710 100000baseCR4_Full); 711 } 712 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 || 713 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { 714 ethtool_link_ksettings_add_link_mode(ks, supported, 715 100000baseSR4_Full); 716 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 717 need_add_adv_mode = true; 718 } 719 if (need_add_adv_mode) { 720 need_add_adv_mode = false; 721 ethtool_link_ksettings_add_link_mode(ks, advertising, 722 100000baseSR4_Full); 723 } 724 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 || 725 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) { 726 ethtool_link_ksettings_add_link_mode(ks, supported, 727 100000baseLR4_ER4_Full); 728 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 729 need_add_adv_mode = true; 730 } 731 if (need_add_adv_mode) { 732 need_add_adv_mode = false; 733 ethtool_link_ksettings_add_link_mode(ks, advertising, 734 100000baseLR4_ER4_Full); 735 } 736 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || 737 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || 738 phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { 739 ethtool_link_ksettings_add_link_mode(ks, supported, 740 100000baseKR4_Full); 741 if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) 742 need_add_adv_mode = true; 743 } 744 if (need_add_adv_mode) 745 ethtool_link_ksettings_add_link_mode(ks, advertising, 746 100000baseKR4_Full); 747 748 /* Autoneg PHY types */ 749 if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || 750 phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || 751 phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX || 752 phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T || 753 phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX || 754 phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || 755 phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR || 756 phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || 757 phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 || 758 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || 759 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || 760 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || 761 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || 762 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || 763 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || 764 phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 || 765 phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || 766 phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { 767 ethtool_link_ksettings_add_link_mode(ks, supported, 768 Autoneg); 769 ethtool_link_ksettings_add_link_mode(ks, advertising, 770 Autoneg); 771 } 772 if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || 773 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || 774 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || 775 phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { 776 ethtool_link_ksettings_add_link_mode(ks, supported, 777 Autoneg); 778 ethtool_link_ksettings_add_link_mode(ks, advertising, 779 Autoneg); 780 } 781 if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || 782 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || 783 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || 784 phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) { 785 ethtool_link_ksettings_add_link_mode(ks, supported, 786 Autoneg); 787 ethtool_link_ksettings_add_link_mode(ks, advertising, 788 Autoneg); 789 } 790 } 791 792 #define TEST_SET_BITS_TIMEOUT 50 793 #define TEST_SET_BITS_SLEEP_MAX 2000 794 #define TEST_SET_BITS_SLEEP_MIN 1000 795 796 /** 797 * ice_get_settings_link_up - Get Link settings for when link is up 798 * @ks: ethtool ksettings to fill in 799 * @netdev: network interface device structure 800 */ 801 static void 802 ice_get_settings_link_up(struct ethtool_link_ksettings *ks, 803 struct net_device *netdev) 804 { 805 struct ice_netdev_priv *np = netdev_priv(netdev); 806 struct ethtool_link_ksettings cap_ksettings; 807 struct ice_link_status *link_info; 808 struct ice_vsi *vsi = np->vsi; 809 bool unrecog_phy_high = false; 810 bool unrecog_phy_low = false; 811 812 link_info = &vsi->port_info->phy.link_info; 813 814 /* Initialize supported and advertised settings based on phy settings */ 815 switch (link_info->phy_type_low) { 816 case ICE_PHY_TYPE_LOW_100BASE_TX: 817 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 818 ethtool_link_ksettings_add_link_mode(ks, supported, 819 100baseT_Full); 820 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 821 ethtool_link_ksettings_add_link_mode(ks, advertising, 822 100baseT_Full); 823 break; 824 case ICE_PHY_TYPE_LOW_100M_SGMII: 825 ethtool_link_ksettings_add_link_mode(ks, supported, 826 100baseT_Full); 827 break; 828 case ICE_PHY_TYPE_LOW_1000BASE_T: 829 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 830 ethtool_link_ksettings_add_link_mode(ks, supported, 831 1000baseT_Full); 832 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 833 ethtool_link_ksettings_add_link_mode(ks, advertising, 834 1000baseT_Full); 835 break; 836 case ICE_PHY_TYPE_LOW_1G_SGMII: 837 ethtool_link_ksettings_add_link_mode(ks, supported, 838 1000baseT_Full); 839 break; 840 case ICE_PHY_TYPE_LOW_1000BASE_SX: 841 case ICE_PHY_TYPE_LOW_1000BASE_LX: 842 ethtool_link_ksettings_add_link_mode(ks, supported, 843 1000baseX_Full); 844 break; 845 case ICE_PHY_TYPE_LOW_1000BASE_KX: 846 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 847 ethtool_link_ksettings_add_link_mode(ks, supported, 848 1000baseKX_Full); 849 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 850 ethtool_link_ksettings_add_link_mode(ks, advertising, 851 1000baseKX_Full); 852 break; 853 case ICE_PHY_TYPE_LOW_2500BASE_T: 854 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 855 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 856 ethtool_link_ksettings_add_link_mode(ks, supported, 857 2500baseT_Full); 858 ethtool_link_ksettings_add_link_mode(ks, advertising, 859 2500baseT_Full); 860 break; 861 case ICE_PHY_TYPE_LOW_2500BASE_X: 862 ethtool_link_ksettings_add_link_mode(ks, supported, 863 2500baseX_Full); 864 break; 865 case ICE_PHY_TYPE_LOW_2500BASE_KX: 866 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 867 ethtool_link_ksettings_add_link_mode(ks, supported, 868 2500baseX_Full); 869 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 870 ethtool_link_ksettings_add_link_mode(ks, advertising, 871 2500baseX_Full); 872 break; 873 case ICE_PHY_TYPE_LOW_5GBASE_T: 874 case ICE_PHY_TYPE_LOW_5GBASE_KR: 875 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 876 ethtool_link_ksettings_add_link_mode(ks, supported, 877 5000baseT_Full); 878 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 879 ethtool_link_ksettings_add_link_mode(ks, advertising, 880 5000baseT_Full); 881 break; 882 case ICE_PHY_TYPE_LOW_10GBASE_T: 883 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 884 ethtool_link_ksettings_add_link_mode(ks, supported, 885 10000baseT_Full); 886 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 887 ethtool_link_ksettings_add_link_mode(ks, advertising, 888 10000baseT_Full); 889 break; 890 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 891 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 892 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 893 ethtool_link_ksettings_add_link_mode(ks, supported, 894 10000baseT_Full); 895 break; 896 case ICE_PHY_TYPE_LOW_10GBASE_SR: 897 ethtool_link_ksettings_add_link_mode(ks, supported, 898 10000baseSR_Full); 899 break; 900 case ICE_PHY_TYPE_LOW_10GBASE_LR: 901 ethtool_link_ksettings_add_link_mode(ks, supported, 902 10000baseLR_Full); 903 break; 904 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 905 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 906 ethtool_link_ksettings_add_link_mode(ks, supported, 907 10000baseKR_Full); 908 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 909 ethtool_link_ksettings_add_link_mode(ks, advertising, 910 10000baseKR_Full); 911 break; 912 case ICE_PHY_TYPE_LOW_25GBASE_T: 913 case ICE_PHY_TYPE_LOW_25GBASE_CR: 914 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 915 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 916 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 917 ethtool_link_ksettings_add_link_mode(ks, supported, 918 25000baseCR_Full); 919 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 920 ethtool_link_ksettings_add_link_mode(ks, advertising, 921 25000baseCR_Full); 922 break; 923 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 924 ethtool_link_ksettings_add_link_mode(ks, supported, 925 25000baseCR_Full); 926 break; 927 case ICE_PHY_TYPE_LOW_25GBASE_SR: 928 case ICE_PHY_TYPE_LOW_25GBASE_LR: 929 ethtool_link_ksettings_add_link_mode(ks, supported, 930 25000baseSR_Full); 931 break; 932 case ICE_PHY_TYPE_LOW_25GBASE_KR: 933 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 934 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 935 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 936 ethtool_link_ksettings_add_link_mode(ks, supported, 937 25000baseKR_Full); 938 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 939 ethtool_link_ksettings_add_link_mode(ks, advertising, 940 25000baseKR_Full); 941 break; 942 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 943 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 944 ethtool_link_ksettings_add_link_mode(ks, supported, 945 40000baseCR4_Full); 946 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 947 ethtool_link_ksettings_add_link_mode(ks, advertising, 948 40000baseCR4_Full); 949 break; 950 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 951 case ICE_PHY_TYPE_LOW_40G_XLAUI: 952 ethtool_link_ksettings_add_link_mode(ks, supported, 953 40000baseCR4_Full); 954 break; 955 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 956 ethtool_link_ksettings_add_link_mode(ks, supported, 957 40000baseSR4_Full); 958 break; 959 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 960 ethtool_link_ksettings_add_link_mode(ks, supported, 961 40000baseLR4_Full); 962 break; 963 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 964 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 965 ethtool_link_ksettings_add_link_mode(ks, supported, 966 40000baseKR4_Full); 967 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 968 ethtool_link_ksettings_add_link_mode(ks, advertising, 969 40000baseKR4_Full); 970 break; 971 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 972 case ICE_PHY_TYPE_LOW_50GBASE_CP: 973 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 974 ethtool_link_ksettings_add_link_mode(ks, supported, 975 50000baseCR2_Full); 976 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 977 ethtool_link_ksettings_add_link_mode(ks, advertising, 978 50000baseCR2_Full); 979 break; 980 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 981 case ICE_PHY_TYPE_LOW_50G_LAUI2: 982 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 983 case ICE_PHY_TYPE_LOW_50G_AUI2: 984 case ICE_PHY_TYPE_LOW_50GBASE_SR: 985 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 986 case ICE_PHY_TYPE_LOW_50G_AUI1: 987 ethtool_link_ksettings_add_link_mode(ks, supported, 988 50000baseCR2_Full); 989 break; 990 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 991 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 992 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 993 ethtool_link_ksettings_add_link_mode(ks, supported, 994 50000baseKR2_Full); 995 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 996 ethtool_link_ksettings_add_link_mode(ks, advertising, 997 50000baseKR2_Full); 998 break; 999 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 1000 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 1001 case ICE_PHY_TYPE_LOW_50GBASE_FR: 1002 case ICE_PHY_TYPE_LOW_50GBASE_LR: 1003 ethtool_link_ksettings_add_link_mode(ks, supported, 1004 50000baseSR2_Full); 1005 break; 1006 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 1007 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1008 ethtool_link_ksettings_add_link_mode(ks, supported, 1009 100000baseCR4_Full); 1010 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1011 ethtool_link_ksettings_add_link_mode(ks, advertising, 1012 100000baseCR4_Full); 1013 break; 1014 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 1015 case ICE_PHY_TYPE_LOW_100G_CAUI4: 1016 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 1017 case ICE_PHY_TYPE_LOW_100G_AUI4: 1018 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 1019 ethtool_link_ksettings_add_link_mode(ks, supported, 1020 100000baseCR4_Full); 1021 break; 1022 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 1023 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1024 ethtool_link_ksettings_add_link_mode(ks, supported, 1025 100000baseCR4_Full); 1026 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1027 ethtool_link_ksettings_add_link_mode(ks, advertising, 1028 100000baseCR4_Full); 1029 break; 1030 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 1031 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 1032 ethtool_link_ksettings_add_link_mode(ks, supported, 1033 100000baseSR4_Full); 1034 break; 1035 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 1036 case ICE_PHY_TYPE_LOW_100GBASE_DR: 1037 ethtool_link_ksettings_add_link_mode(ks, supported, 1038 100000baseLR4_ER4_Full); 1039 break; 1040 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 1041 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 1042 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1043 ethtool_link_ksettings_add_link_mode(ks, supported, 1044 100000baseKR4_Full); 1045 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1046 ethtool_link_ksettings_add_link_mode(ks, advertising, 1047 100000baseKR4_Full); 1048 break; 1049 default: 1050 unrecog_phy_low = true; 1051 } 1052 1053 switch (link_info->phy_type_high) { 1054 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 1055 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1056 ethtool_link_ksettings_add_link_mode(ks, supported, 1057 100000baseKR4_Full); 1058 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1059 ethtool_link_ksettings_add_link_mode(ks, advertising, 1060 100000baseKR4_Full); 1061 break; 1062 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 1063 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 1064 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 1065 case ICE_PHY_TYPE_HIGH_100G_AUI2: 1066 ethtool_link_ksettings_add_link_mode(ks, supported, 1067 100000baseCR4_Full); 1068 break; 1069 default: 1070 unrecog_phy_high = true; 1071 } 1072 1073 if (unrecog_phy_low && unrecog_phy_high) { 1074 /* if we got here and link is up something bad is afoot */ 1075 netdev_info(netdev, 1076 "WARNING: Unrecognized PHY_Low (0x%llx).\n", 1077 (u64)link_info->phy_type_low); 1078 netdev_info(netdev, 1079 "WARNING: Unrecognized PHY_High (0x%llx).\n", 1080 (u64)link_info->phy_type_high); 1081 } 1082 1083 /* Now that we've worked out everything that could be supported by the 1084 * current PHY type, get what is supported by the NVM and intersect 1085 * them to get what is truly supported 1086 */ 1087 memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); 1088 ice_phy_type_to_ethtool(netdev, &cap_ksettings); 1089 ethtool_intersect_link_masks(ks, &cap_ksettings); 1090 1091 switch (link_info->link_speed) { 1092 case ICE_AQ_LINK_SPEED_100GB: 1093 ks->base.speed = SPEED_100000; 1094 break; 1095 case ICE_AQ_LINK_SPEED_50GB: 1096 ks->base.speed = SPEED_50000; 1097 break; 1098 case ICE_AQ_LINK_SPEED_40GB: 1099 ks->base.speed = SPEED_40000; 1100 break; 1101 case ICE_AQ_LINK_SPEED_25GB: 1102 ks->base.speed = SPEED_25000; 1103 break; 1104 case ICE_AQ_LINK_SPEED_20GB: 1105 ks->base.speed = SPEED_20000; 1106 break; 1107 case ICE_AQ_LINK_SPEED_10GB: 1108 ks->base.speed = SPEED_10000; 1109 break; 1110 case ICE_AQ_LINK_SPEED_5GB: 1111 ks->base.speed = SPEED_5000; 1112 break; 1113 case ICE_AQ_LINK_SPEED_2500MB: 1114 ks->base.speed = SPEED_2500; 1115 break; 1116 case ICE_AQ_LINK_SPEED_1000MB: 1117 ks->base.speed = SPEED_1000; 1118 break; 1119 case ICE_AQ_LINK_SPEED_100MB: 1120 ks->base.speed = SPEED_100; 1121 break; 1122 default: 1123 netdev_info(netdev, 1124 "WARNING: Unrecognized link_speed (0x%x).\n", 1125 link_info->link_speed); 1126 break; 1127 } 1128 ks->base.duplex = DUPLEX_FULL; 1129 } 1130 1131 /** 1132 * ice_get_settings_link_down - Get the Link settings when link is down 1133 * @ks: ethtool ksettings to fill in 1134 * @netdev: network interface device structure 1135 * 1136 * Reports link settings that can be determined when link is down 1137 */ 1138 static void 1139 ice_get_settings_link_down(struct ethtool_link_ksettings *ks, 1140 struct net_device __always_unused *netdev) 1141 { 1142 /* link is down and the driver needs to fall back on 1143 * supported phy types to figure out what info to display 1144 */ 1145 ice_phy_type_to_ethtool(netdev, ks); 1146 1147 /* With no link, speed and duplex are unknown */ 1148 ks->base.speed = SPEED_UNKNOWN; 1149 ks->base.duplex = DUPLEX_UNKNOWN; 1150 } 1151 1152 /** 1153 * ice_get_link_ksettings - Get Link Speed and Duplex settings 1154 * @netdev: network interface device structure 1155 * @ks: ethtool ksettings 1156 * 1157 * Reports speed/duplex settings based on media_type 1158 */ 1159 static int ice_get_link_ksettings(struct net_device *netdev, 1160 struct ethtool_link_ksettings *ks) 1161 { 1162 struct ice_netdev_priv *np = netdev_priv(netdev); 1163 struct ice_link_status *hw_link_info; 1164 struct ice_vsi *vsi = np->vsi; 1165 1166 ethtool_link_ksettings_zero_link_mode(ks, supported); 1167 ethtool_link_ksettings_zero_link_mode(ks, advertising); 1168 hw_link_info = &vsi->port_info->phy.link_info; 1169 1170 /* set speed and duplex */ 1171 if (hw_link_info->link_info & ICE_AQ_LINK_UP) 1172 ice_get_settings_link_up(ks, netdev); 1173 else 1174 ice_get_settings_link_down(ks, netdev); 1175 1176 /* set autoneg settings */ 1177 ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? 1178 AUTONEG_ENABLE : AUTONEG_DISABLE; 1179 1180 /* set media type settings */ 1181 switch (vsi->port_info->phy.media_type) { 1182 case ICE_MEDIA_FIBER: 1183 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 1184 ks->base.port = PORT_FIBRE; 1185 break; 1186 case ICE_MEDIA_BASET: 1187 ethtool_link_ksettings_add_link_mode(ks, supported, TP); 1188 ethtool_link_ksettings_add_link_mode(ks, advertising, TP); 1189 ks->base.port = PORT_TP; 1190 break; 1191 case ICE_MEDIA_BACKPLANE: 1192 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 1193 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); 1194 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 1195 ethtool_link_ksettings_add_link_mode(ks, advertising, 1196 Backplane); 1197 ks->base.port = PORT_NONE; 1198 break; 1199 case ICE_MEDIA_DA: 1200 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 1201 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); 1202 ks->base.port = PORT_DA; 1203 break; 1204 default: 1205 ks->base.port = PORT_OTHER; 1206 break; 1207 } 1208 1209 /* flow control is symmetric and always supported */ 1210 ethtool_link_ksettings_add_link_mode(ks, supported, Pause); 1211 1212 switch (vsi->port_info->fc.req_mode) { 1213 case ICE_FC_FULL: 1214 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 1215 break; 1216 case ICE_FC_TX_PAUSE: 1217 ethtool_link_ksettings_add_link_mode(ks, advertising, 1218 Asym_Pause); 1219 break; 1220 case ICE_FC_RX_PAUSE: 1221 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 1222 ethtool_link_ksettings_add_link_mode(ks, advertising, 1223 Asym_Pause); 1224 break; 1225 case ICE_FC_PFC: 1226 default: 1227 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); 1228 ethtool_link_ksettings_del_link_mode(ks, advertising, 1229 Asym_Pause); 1230 break; 1231 } 1232 1233 return 0; 1234 } 1235 1236 /** 1237 * ice_ksettings_find_adv_link_speed - Find advertising link speed 1238 * @ks: ethtool ksettings 1239 */ 1240 static u16 1241 ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) 1242 { 1243 u16 adv_link_speed = 0; 1244 1245 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1246 100baseT_Full)) 1247 adv_link_speed |= ICE_AQ_LINK_SPEED_100MB; 1248 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1249 1000baseX_Full)) 1250 adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; 1251 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1252 1000baseT_Full) || 1253 ethtool_link_ksettings_test_link_mode(ks, advertising, 1254 1000baseKX_Full)) 1255 adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; 1256 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1257 2500baseT_Full)) 1258 adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; 1259 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1260 2500baseX_Full)) 1261 adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; 1262 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1263 5000baseT_Full)) 1264 adv_link_speed |= ICE_AQ_LINK_SPEED_5GB; 1265 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1266 10000baseT_Full) || 1267 ethtool_link_ksettings_test_link_mode(ks, advertising, 1268 10000baseKR_Full)) 1269 adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; 1270 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1271 10000baseSR_Full) || 1272 ethtool_link_ksettings_test_link_mode(ks, advertising, 1273 10000baseLR_Full)) 1274 adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; 1275 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1276 25000baseCR_Full) || 1277 ethtool_link_ksettings_test_link_mode(ks, advertising, 1278 25000baseSR_Full) || 1279 ethtool_link_ksettings_test_link_mode(ks, advertising, 1280 25000baseKR_Full)) 1281 adv_link_speed |= ICE_AQ_LINK_SPEED_25GB; 1282 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1283 40000baseCR4_Full) || 1284 ethtool_link_ksettings_test_link_mode(ks, advertising, 1285 40000baseSR4_Full) || 1286 ethtool_link_ksettings_test_link_mode(ks, advertising, 1287 40000baseLR4_Full) || 1288 ethtool_link_ksettings_test_link_mode(ks, advertising, 1289 40000baseKR4_Full)) 1290 adv_link_speed |= ICE_AQ_LINK_SPEED_40GB; 1291 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1292 50000baseCR2_Full) || 1293 ethtool_link_ksettings_test_link_mode(ks, advertising, 1294 50000baseKR2_Full)) 1295 adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; 1296 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1297 50000baseSR2_Full)) 1298 adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; 1299 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1300 100000baseCR4_Full) || 1301 ethtool_link_ksettings_test_link_mode(ks, advertising, 1302 100000baseSR4_Full) || 1303 ethtool_link_ksettings_test_link_mode(ks, advertising, 1304 100000baseLR4_ER4_Full) || 1305 ethtool_link_ksettings_test_link_mode(ks, advertising, 1306 100000baseKR4_Full)) 1307 adv_link_speed |= ICE_AQ_LINK_SPEED_100GB; 1308 1309 return adv_link_speed; 1310 } 1311 1312 /** 1313 * ice_setup_autoneg 1314 * @p: port info 1315 * @ks: ethtool_link_ksettings 1316 * @config: configuration that will be sent down to FW 1317 * @autoneg_enabled: autonegotiation is enabled or not 1318 * @autoneg_changed: will there a change in autonegotiation 1319 * @netdev: network interface device structure 1320 * 1321 * Setup PHY autonegotiation feature 1322 */ 1323 static int 1324 ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, 1325 struct ice_aqc_set_phy_cfg_data *config, 1326 u8 autoneg_enabled, u8 *autoneg_changed, 1327 struct net_device *netdev) 1328 { 1329 int err = 0; 1330 1331 *autoneg_changed = 0; 1332 1333 /* Check autoneg */ 1334 if (autoneg_enabled == AUTONEG_ENABLE) { 1335 /* If autoneg was not already enabled */ 1336 if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) { 1337 /* If autoneg is not supported, return error */ 1338 if (!ethtool_link_ksettings_test_link_mode(ks, 1339 supported, 1340 Autoneg)) { 1341 netdev_info(netdev, "Autoneg not supported on this phy.\n"); 1342 err = -EINVAL; 1343 } else { 1344 /* Autoneg is allowed to change */ 1345 config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1346 *autoneg_changed = 1; 1347 } 1348 } 1349 } else { 1350 /* If autoneg is currently enabled */ 1351 if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) { 1352 /* If autoneg is supported 10GBASE_T is the only phy 1353 * that can disable it, so otherwise return error 1354 */ 1355 if (ethtool_link_ksettings_test_link_mode(ks, 1356 supported, 1357 Autoneg)) { 1358 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); 1359 err = -EINVAL; 1360 } else { 1361 /* Autoneg is allowed to change */ 1362 config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1363 *autoneg_changed = 1; 1364 } 1365 } 1366 } 1367 1368 return err; 1369 } 1370 1371 /** 1372 * ice_set_link_ksettings - Set Speed and Duplex 1373 * @netdev: network interface device structure 1374 * @ks: ethtool ksettings 1375 * 1376 * Set speed/duplex per media_types advertised/forced 1377 */ 1378 static int 1379 ice_set_link_ksettings(struct net_device *netdev, 1380 const struct ethtool_link_ksettings *ks) 1381 { 1382 u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0; 1383 struct ice_netdev_priv *np = netdev_priv(netdev); 1384 struct ethtool_link_ksettings safe_ks, copy_ks; 1385 struct ice_aqc_get_phy_caps_data *abilities; 1386 u16 adv_link_speed, curr_link_speed, idx; 1387 struct ice_aqc_set_phy_cfg_data config; 1388 struct ice_pf *pf = np->vsi->back; 1389 struct ice_port_info *p; 1390 u8 autoneg_changed = 0; 1391 enum ice_status status; 1392 u64 phy_type_high; 1393 u64 phy_type_low; 1394 int err = 0; 1395 bool linkup; 1396 1397 p = np->vsi->port_info; 1398 1399 if (!p) 1400 return -EOPNOTSUPP; 1401 1402 /* Check if this is lan vsi */ 1403 for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) { 1404 if (pf->vsi[idx]->type == ICE_VSI_PF) { 1405 if (np->vsi != pf->vsi[idx]) 1406 return -EOPNOTSUPP; 1407 break; 1408 } 1409 } 1410 1411 if (p->phy.media_type != ICE_MEDIA_BASET && 1412 p->phy.media_type != ICE_MEDIA_FIBER && 1413 p->phy.media_type != ICE_MEDIA_BACKPLANE && 1414 p->phy.media_type != ICE_MEDIA_DA && 1415 p->phy.link_info.link_info & ICE_AQ_LINK_UP) 1416 return -EOPNOTSUPP; 1417 1418 /* copy the ksettings to copy_ks to avoid modifying the original */ 1419 memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); 1420 1421 /* save autoneg out of ksettings */ 1422 autoneg = copy_ks.base.autoneg; 1423 1424 memset(&safe_ks, 0, sizeof(safe_ks)); 1425 1426 /* Get link modes supported by hardware.*/ 1427 ice_phy_type_to_ethtool(netdev, &safe_ks); 1428 1429 /* and check against modes requested by user. 1430 * Return an error if unsupported mode was set. 1431 */ 1432 if (!bitmap_subset(copy_ks.link_modes.advertising, 1433 safe_ks.link_modes.supported, 1434 __ETHTOOL_LINK_MODE_MASK_NBITS)) 1435 return -EINVAL; 1436 1437 /* get our own copy of the bits to check against */ 1438 memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); 1439 safe_ks.base.cmd = copy_ks.base.cmd; 1440 safe_ks.base.link_mode_masks_nwords = 1441 copy_ks.base.link_mode_masks_nwords; 1442 ice_get_link_ksettings(netdev, &safe_ks); 1443 1444 /* set autoneg back to what it currently is */ 1445 copy_ks.base.autoneg = safe_ks.base.autoneg; 1446 /* we don't compare the speed */ 1447 copy_ks.base.speed = safe_ks.base.speed; 1448 1449 /* If copy_ks.base and safe_ks.base are not the same now, then they are 1450 * trying to set something that we do not support. 1451 */ 1452 if (memcmp(©_ks.base, &safe_ks.base, 1453 sizeof(struct ethtool_link_settings))) 1454 return -EOPNOTSUPP; 1455 1456 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { 1457 timeout--; 1458 if (!timeout) 1459 return -EBUSY; 1460 usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); 1461 } 1462 1463 abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities), 1464 GFP_KERNEL); 1465 if (!abilities) 1466 return -ENOMEM; 1467 1468 /* Get the current phy config */ 1469 status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities, 1470 NULL); 1471 if (status) { 1472 err = -EAGAIN; 1473 goto done; 1474 } 1475 1476 /* Copy abilities to config in case autoneg is not set below */ 1477 memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data)); 1478 config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; 1479 if (abilities->caps & ICE_AQC_PHY_AN_MODE) 1480 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1481 1482 /* Check autoneg */ 1483 err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, 1484 netdev); 1485 1486 if (err) 1487 goto done; 1488 1489 /* Call to get the current link speed */ 1490 p->phy.get_link_info = true; 1491 status = ice_get_link_status(p, &linkup); 1492 if (status) { 1493 err = -EAGAIN; 1494 goto done; 1495 } 1496 1497 curr_link_speed = p->phy.link_info.link_speed; 1498 adv_link_speed = ice_ksettings_find_adv_link_speed(ks); 1499 1500 /* If speed didn't get set, set it to what it currently is. 1501 * This is needed because if advertise is 0 (as it is when autoneg 1502 * is disabled) then speed won't get set. 1503 */ 1504 if (!adv_link_speed) 1505 adv_link_speed = curr_link_speed; 1506 1507 /* Convert the advertise link speeds to their corresponded PHY_TYPE */ 1508 ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed); 1509 1510 if (!autoneg_changed && adv_link_speed == curr_link_speed) { 1511 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); 1512 goto done; 1513 } 1514 1515 /* copy over the rest of the abilities */ 1516 config.low_power_ctrl = abilities->low_power_ctrl; 1517 config.eee_cap = abilities->eee_cap; 1518 config.eeer_value = abilities->eeer_value; 1519 config.link_fec_opt = abilities->link_fec_options; 1520 1521 /* save the requested speeds */ 1522 p->phy.link_info.req_speeds = adv_link_speed; 1523 1524 /* set link and auto negotiation so changes take effect */ 1525 config.caps |= ICE_AQ_PHY_ENA_LINK; 1526 1527 if (phy_type_low || phy_type_high) { 1528 config.phy_type_high = cpu_to_le64(phy_type_high) & 1529 abilities->phy_type_high; 1530 config.phy_type_low = cpu_to_le64(phy_type_low) & 1531 abilities->phy_type_low; 1532 } else { 1533 err = -EAGAIN; 1534 netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n"); 1535 goto done; 1536 } 1537 1538 /* If link is up put link down */ 1539 if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) { 1540 /* Tell the OS link is going down, the link will go 1541 * back up when fw says it is ready asynchronously 1542 */ 1543 ice_print_link_msg(np->vsi, false); 1544 netif_carrier_off(netdev); 1545 netif_tx_stop_all_queues(netdev); 1546 } 1547 1548 /* make the aq call */ 1549 status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL); 1550 if (status) { 1551 netdev_info(netdev, "Set phy config failed,\n"); 1552 err = -EAGAIN; 1553 } 1554 1555 done: 1556 devm_kfree(&pf->pdev->dev, abilities); 1557 clear_bit(__ICE_CFG_BUSY, pf->state); 1558 1559 return err; 1560 } 1561 1562 /** 1563 * ice_get_rxnfc - command to get RX flow classification rules 1564 * @netdev: network interface device structure 1565 * @cmd: ethtool rxnfc command 1566 * @rule_locs: buffer to rturn Rx flow classification rules 1567 * 1568 * Returns Success if the command is supported. 1569 */ 1570 static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 1571 u32 __always_unused *rule_locs) 1572 { 1573 struct ice_netdev_priv *np = netdev_priv(netdev); 1574 struct ice_vsi *vsi = np->vsi; 1575 int ret = -EOPNOTSUPP; 1576 1577 switch (cmd->cmd) { 1578 case ETHTOOL_GRXRINGS: 1579 cmd->data = vsi->rss_size; 1580 ret = 0; 1581 break; 1582 default: 1583 break; 1584 } 1585 1586 return ret; 1587 } 1588 1589 static void 1590 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) 1591 { 1592 struct ice_netdev_priv *np = netdev_priv(netdev); 1593 struct ice_vsi *vsi = np->vsi; 1594 1595 ring->rx_max_pending = ICE_MAX_NUM_DESC; 1596 ring->tx_max_pending = ICE_MAX_NUM_DESC; 1597 ring->rx_pending = vsi->rx_rings[0]->count; 1598 ring->tx_pending = vsi->tx_rings[0]->count; 1599 1600 /* Rx mini and jumbo rings are not supported */ 1601 ring->rx_mini_max_pending = 0; 1602 ring->rx_jumbo_max_pending = 0; 1603 ring->rx_mini_pending = 0; 1604 ring->rx_jumbo_pending = 0; 1605 } 1606 1607 static int 1608 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) 1609 { 1610 struct ice_ring *tx_rings = NULL, *rx_rings = NULL; 1611 struct ice_netdev_priv *np = netdev_priv(netdev); 1612 struct ice_vsi *vsi = np->vsi; 1613 struct ice_pf *pf = vsi->back; 1614 int i, timeout = 50, err = 0; 1615 u32 new_rx_cnt, new_tx_cnt; 1616 1617 if (ring->tx_pending > ICE_MAX_NUM_DESC || 1618 ring->tx_pending < ICE_MIN_NUM_DESC || 1619 ring->rx_pending > ICE_MAX_NUM_DESC || 1620 ring->rx_pending < ICE_MIN_NUM_DESC) { 1621 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", 1622 ring->tx_pending, ring->rx_pending, 1623 ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC, 1624 ICE_REQ_DESC_MULTIPLE); 1625 return -EINVAL; 1626 } 1627 1628 new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); 1629 if (new_tx_cnt != ring->tx_pending) 1630 netdev_info(netdev, 1631 "Requested Tx descriptor count rounded up to %d\n", 1632 new_tx_cnt); 1633 new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE); 1634 if (new_rx_cnt != ring->rx_pending) 1635 netdev_info(netdev, 1636 "Requested Rx descriptor count rounded up to %d\n", 1637 new_rx_cnt); 1638 1639 /* if nothing to do return success */ 1640 if (new_tx_cnt == vsi->tx_rings[0]->count && 1641 new_rx_cnt == vsi->rx_rings[0]->count) { 1642 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); 1643 return 0; 1644 } 1645 1646 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { 1647 timeout--; 1648 if (!timeout) 1649 return -EBUSY; 1650 usleep_range(1000, 2000); 1651 } 1652 1653 /* set for the next time the netdev is started */ 1654 if (!netif_running(vsi->netdev)) { 1655 for (i = 0; i < vsi->alloc_txq; i++) 1656 vsi->tx_rings[i]->count = new_tx_cnt; 1657 for (i = 0; i < vsi->alloc_rxq; i++) 1658 vsi->rx_rings[i]->count = new_rx_cnt; 1659 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); 1660 goto done; 1661 } 1662 1663 if (new_tx_cnt == vsi->tx_rings[0]->count) 1664 goto process_rx; 1665 1666 /* alloc updated Tx resources */ 1667 netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", 1668 vsi->tx_rings[0]->count, new_tx_cnt); 1669 1670 tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, 1671 sizeof(struct ice_ring), GFP_KERNEL); 1672 if (!tx_rings) { 1673 err = -ENOMEM; 1674 goto done; 1675 } 1676 1677 for (i = 0; i < vsi->alloc_txq; i++) { 1678 /* clone ring and setup updated count */ 1679 tx_rings[i] = *vsi->tx_rings[i]; 1680 tx_rings[i].count = new_tx_cnt; 1681 tx_rings[i].desc = NULL; 1682 tx_rings[i].tx_buf = NULL; 1683 err = ice_setup_tx_ring(&tx_rings[i]); 1684 if (err) { 1685 while (i) { 1686 i--; 1687 ice_clean_tx_ring(&tx_rings[i]); 1688 } 1689 devm_kfree(&pf->pdev->dev, tx_rings); 1690 goto done; 1691 } 1692 } 1693 1694 process_rx: 1695 if (new_rx_cnt == vsi->rx_rings[0]->count) 1696 goto process_link; 1697 1698 /* alloc updated Rx resources */ 1699 netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", 1700 vsi->rx_rings[0]->count, new_rx_cnt); 1701 1702 rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, 1703 sizeof(struct ice_ring), GFP_KERNEL); 1704 if (!rx_rings) { 1705 err = -ENOMEM; 1706 goto done; 1707 } 1708 1709 for (i = 0; i < vsi->alloc_rxq; i++) { 1710 /* clone ring and setup updated count */ 1711 rx_rings[i] = *vsi->rx_rings[i]; 1712 rx_rings[i].count = new_rx_cnt; 1713 rx_rings[i].desc = NULL; 1714 rx_rings[i].rx_buf = NULL; 1715 /* this is to allow wr32 to have something to write to 1716 * during early allocation of Rx buffers 1717 */ 1718 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS; 1719 1720 err = ice_setup_rx_ring(&rx_rings[i]); 1721 if (err) 1722 goto rx_unwind; 1723 1724 /* allocate Rx buffers */ 1725 err = ice_alloc_rx_bufs(&rx_rings[i], 1726 ICE_DESC_UNUSED(&rx_rings[i])); 1727 rx_unwind: 1728 if (err) { 1729 while (i) { 1730 i--; 1731 ice_free_rx_ring(&rx_rings[i]); 1732 } 1733 devm_kfree(&pf->pdev->dev, rx_rings); 1734 err = -ENOMEM; 1735 goto free_tx; 1736 } 1737 } 1738 1739 process_link: 1740 /* Bring interface down, copy in the new ring info, then restore the 1741 * interface. if VSI is up, bring it down and then back up 1742 */ 1743 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { 1744 ice_down(vsi); 1745 1746 if (tx_rings) { 1747 for (i = 0; i < vsi->alloc_txq; i++) { 1748 ice_free_tx_ring(vsi->tx_rings[i]); 1749 *vsi->tx_rings[i] = tx_rings[i]; 1750 } 1751 devm_kfree(&pf->pdev->dev, tx_rings); 1752 } 1753 1754 if (rx_rings) { 1755 for (i = 0; i < vsi->alloc_rxq; i++) { 1756 ice_free_rx_ring(vsi->rx_rings[i]); 1757 /* copy the real tail offset */ 1758 rx_rings[i].tail = vsi->rx_rings[i]->tail; 1759 /* this is to fake out the allocation routine 1760 * into thinking it has to realloc everything 1761 * but the recycling logic will let us re-use 1762 * the buffers allocated above 1763 */ 1764 rx_rings[i].next_to_use = 0; 1765 rx_rings[i].next_to_clean = 0; 1766 rx_rings[i].next_to_alloc = 0; 1767 *vsi->rx_rings[i] = rx_rings[i]; 1768 } 1769 devm_kfree(&pf->pdev->dev, rx_rings); 1770 } 1771 1772 ice_up(vsi); 1773 } 1774 goto done; 1775 1776 free_tx: 1777 /* error cleanup if the Rx allocations failed after getting Tx */ 1778 if (tx_rings) { 1779 for (i = 0; i < vsi->alloc_txq; i++) 1780 ice_free_tx_ring(&tx_rings[i]); 1781 devm_kfree(&pf->pdev->dev, tx_rings); 1782 } 1783 1784 done: 1785 clear_bit(__ICE_CFG_BUSY, pf->state); 1786 return err; 1787 } 1788 1789 static int ice_nway_reset(struct net_device *netdev) 1790 { 1791 /* restart autonegotiation */ 1792 struct ice_netdev_priv *np = netdev_priv(netdev); 1793 struct ice_vsi *vsi = np->vsi; 1794 struct ice_port_info *pi; 1795 enum ice_status status; 1796 1797 pi = vsi->port_info; 1798 /* If VSI state is up, then restart autoneg with link up */ 1799 if (!test_bit(__ICE_DOWN, vsi->back->state)) 1800 status = ice_aq_set_link_restart_an(pi, true, NULL); 1801 else 1802 status = ice_aq_set_link_restart_an(pi, false, NULL); 1803 1804 if (status) { 1805 netdev_info(netdev, "link restart failed, err %d aq_err %d\n", 1806 status, pi->hw->adminq.sq_last_status); 1807 return -EIO; 1808 } 1809 1810 return 0; 1811 } 1812 1813 /** 1814 * ice_get_pauseparam - Get Flow Control status 1815 * @netdev: network interface device structure 1816 * @pause: ethernet pause (flow control) parameters 1817 */ 1818 static void 1819 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1820 { 1821 struct ice_netdev_priv *np = netdev_priv(netdev); 1822 struct ice_port_info *pi; 1823 1824 pi = np->vsi->port_info; 1825 pause->autoneg = 1826 ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ? 1827 AUTONEG_ENABLE : AUTONEG_DISABLE); 1828 1829 if (pi->fc.current_mode == ICE_FC_RX_PAUSE) { 1830 pause->rx_pause = 1; 1831 } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) { 1832 pause->tx_pause = 1; 1833 } else if (pi->fc.current_mode == ICE_FC_FULL) { 1834 pause->rx_pause = 1; 1835 pause->tx_pause = 1; 1836 } 1837 } 1838 1839 /** 1840 * ice_set_pauseparam - Set Flow Control parameter 1841 * @netdev: network interface device structure 1842 * @pause: return Tx/Rx flow control status 1843 */ 1844 static int 1845 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1846 { 1847 struct ice_netdev_priv *np = netdev_priv(netdev); 1848 struct ice_link_status *hw_link_info; 1849 struct ice_pf *pf = np->vsi->back; 1850 struct ice_vsi *vsi = np->vsi; 1851 struct ice_hw *hw = &pf->hw; 1852 struct ice_port_info *pi; 1853 enum ice_status status; 1854 u8 aq_failures; 1855 bool link_up; 1856 int err = 0; 1857 1858 pi = vsi->port_info; 1859 hw_link_info = &pi->phy.link_info; 1860 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; 1861 1862 /* Changing the port's flow control is not supported if this isn't the 1863 * PF VSI 1864 */ 1865 if (vsi->type != ICE_VSI_PF) { 1866 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n"); 1867 return -EOPNOTSUPP; 1868 } 1869 1870 if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { 1871 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 1872 return -EOPNOTSUPP; 1873 } 1874 1875 /* If we have link and don't have autoneg */ 1876 if (!test_bit(__ICE_DOWN, pf->state) && 1877 !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { 1878 /* Send message that it might not necessarily work*/ 1879 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); 1880 } 1881 1882 if (pause->rx_pause && pause->tx_pause) 1883 pi->fc.req_mode = ICE_FC_FULL; 1884 else if (pause->rx_pause && !pause->tx_pause) 1885 pi->fc.req_mode = ICE_FC_RX_PAUSE; 1886 else if (!pause->rx_pause && pause->tx_pause) 1887 pi->fc.req_mode = ICE_FC_TX_PAUSE; 1888 else if (!pause->rx_pause && !pause->tx_pause) 1889 pi->fc.req_mode = ICE_FC_NONE; 1890 else 1891 return -EINVAL; 1892 1893 /* Tell the OS link is going down, the link will go back up when fw 1894 * says it is ready asynchronously 1895 */ 1896 ice_print_link_msg(vsi, false); 1897 netif_carrier_off(netdev); 1898 netif_tx_stop_all_queues(netdev); 1899 1900 /* Set the FC mode and only restart AN if link is up */ 1901 status = ice_set_fc(pi, &aq_failures, link_up); 1902 1903 if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { 1904 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n", 1905 status, hw->adminq.sq_last_status); 1906 err = -EAGAIN; 1907 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { 1908 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n", 1909 status, hw->adminq.sq_last_status); 1910 err = -EAGAIN; 1911 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { 1912 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n", 1913 status, hw->adminq.sq_last_status); 1914 err = -EAGAIN; 1915 } 1916 1917 if (!test_bit(__ICE_DOWN, pf->state)) { 1918 /* Give it a little more time to try to come back. If still 1919 * down, restart autoneg link or reinitialize the interface. 1920 */ 1921 msleep(75); 1922 if (!test_bit(__ICE_DOWN, pf->state)) 1923 return ice_nway_reset(netdev); 1924 1925 ice_down(vsi); 1926 ice_up(vsi); 1927 } 1928 1929 return err; 1930 } 1931 1932 /** 1933 * ice_get_rxfh_key_size - get the RSS hash key size 1934 * @netdev: network interface device structure 1935 * 1936 * Returns the table size. 1937 */ 1938 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev) 1939 { 1940 return ICE_VSIQF_HKEY_ARRAY_SIZE; 1941 } 1942 1943 /** 1944 * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size 1945 * @netdev: network interface device structure 1946 * 1947 * Returns the table size. 1948 */ 1949 static u32 ice_get_rxfh_indir_size(struct net_device *netdev) 1950 { 1951 struct ice_netdev_priv *np = netdev_priv(netdev); 1952 1953 return np->vsi->rss_table_size; 1954 } 1955 1956 /** 1957 * ice_get_rxfh - get the Rx flow hash indirection table 1958 * @netdev: network interface device structure 1959 * @indir: indirection table 1960 * @key: hash key 1961 * @hfunc: hash function 1962 * 1963 * Reads the indirection table directly from the hardware. 1964 */ 1965 static int 1966 ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) 1967 { 1968 struct ice_netdev_priv *np = netdev_priv(netdev); 1969 struct ice_vsi *vsi = np->vsi; 1970 struct ice_pf *pf = vsi->back; 1971 int ret = 0, i; 1972 u8 *lut; 1973 1974 if (hfunc) 1975 *hfunc = ETH_RSS_HASH_TOP; 1976 1977 if (!indir) 1978 return 0; 1979 1980 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 1981 /* RSS not supported return error here */ 1982 netdev_warn(netdev, "RSS is not configured on this VSI!\n"); 1983 return -EIO; 1984 } 1985 1986 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); 1987 if (!lut) 1988 return -ENOMEM; 1989 1990 if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) { 1991 ret = -EIO; 1992 goto out; 1993 } 1994 1995 for (i = 0; i < vsi->rss_table_size; i++) 1996 indir[i] = (u32)(lut[i]); 1997 1998 out: 1999 devm_kfree(&pf->pdev->dev, lut); 2000 return ret; 2001 } 2002 2003 /** 2004 * ice_set_rxfh - set the Rx flow hash indirection table 2005 * @netdev: network interface device structure 2006 * @indir: indirection table 2007 * @key: hash key 2008 * @hfunc: hash function 2009 * 2010 * Returns -EINVAL if the table specifies an invalid queue id, otherwise 2011 * returns 0 after programming the table. 2012 */ 2013 static int ice_set_rxfh(struct net_device *netdev, const u32 *indir, 2014 const u8 *key, const u8 hfunc) 2015 { 2016 struct ice_netdev_priv *np = netdev_priv(netdev); 2017 struct ice_vsi *vsi = np->vsi; 2018 struct ice_pf *pf = vsi->back; 2019 u8 *seed = NULL; 2020 2021 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 2022 return -EOPNOTSUPP; 2023 2024 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2025 /* RSS not supported return error here */ 2026 netdev_warn(netdev, "RSS is not configured on this VSI!\n"); 2027 return -EIO; 2028 } 2029 2030 if (key) { 2031 if (!vsi->rss_hkey_user) { 2032 vsi->rss_hkey_user = 2033 devm_kzalloc(&pf->pdev->dev, 2034 ICE_VSIQF_HKEY_ARRAY_SIZE, 2035 GFP_KERNEL); 2036 if (!vsi->rss_hkey_user) 2037 return -ENOMEM; 2038 } 2039 memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE); 2040 seed = vsi->rss_hkey_user; 2041 } 2042 2043 if (!vsi->rss_lut_user) { 2044 vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev, 2045 vsi->rss_table_size, 2046 GFP_KERNEL); 2047 if (!vsi->rss_lut_user) 2048 return -ENOMEM; 2049 } 2050 2051 /* Each 32 bits pointed by 'indir' is stored with a lut entry */ 2052 if (indir) { 2053 int i; 2054 2055 for (i = 0; i < vsi->rss_table_size; i++) 2056 vsi->rss_lut_user[i] = (u8)(indir[i]); 2057 } else { 2058 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size, 2059 vsi->rss_size); 2060 } 2061 2062 if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size)) 2063 return -EIO; 2064 2065 return 0; 2066 } 2067 2068 enum ice_container_type { 2069 ICE_RX_CONTAINER, 2070 ICE_TX_CONTAINER, 2071 }; 2072 2073 /** 2074 * ice_get_rc_coalesce - get ITR values for specific ring container 2075 * @ec: ethtool structure to fill with driver's coalesce settings 2076 * @c_type: container type, RX or TX 2077 * @rc: ring container that the ITR values will come from 2078 * 2079 * Query the device for ice_ring_container specific ITR values. This is 2080 * done per ice_ring_container because each q_vector can have 1 or more rings 2081 * and all of said ring(s) will have the same ITR values. 2082 * 2083 * Returns 0 on success, negative otherwise. 2084 */ 2085 static int 2086 ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, 2087 struct ice_ring_container *rc) 2088 { 2089 struct ice_pf *pf = rc->ring->vsi->back; 2090 2091 switch (c_type) { 2092 case ICE_RX_CONTAINER: 2093 ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); 2094 ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; 2095 break; 2096 case ICE_TX_CONTAINER: 2097 ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); 2098 ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; 2099 break; 2100 default: 2101 dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type); 2102 return -EINVAL; 2103 } 2104 2105 return 0; 2106 } 2107 2108 /** 2109 * __ice_get_coalesce - get ITR/INTRL values for the device 2110 * @netdev: pointer to the netdev associated with this query 2111 * @ec: ethtool structure to fill with driver's coalesce settings 2112 * @q_num: queue number to get the coalesce settings for 2113 */ 2114 static int 2115 __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, 2116 int q_num) 2117 { 2118 struct ice_netdev_priv *np = netdev_priv(netdev); 2119 int tx = -EINVAL, rx = -EINVAL; 2120 struct ice_vsi *vsi = np->vsi; 2121 2122 if (q_num < 0) { 2123 rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, 2124 &vsi->rx_rings[0]->q_vector->rx); 2125 tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, 2126 &vsi->tx_rings[0]->q_vector->tx); 2127 2128 goto update_coalesced_frames; 2129 } 2130 2131 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { 2132 rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, 2133 &vsi->rx_rings[q_num]->q_vector->rx); 2134 tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, 2135 &vsi->tx_rings[q_num]->q_vector->tx); 2136 } else if (q_num < vsi->num_rxq) { 2137 rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, 2138 &vsi->rx_rings[q_num]->q_vector->rx); 2139 } else if (q_num < vsi->num_txq) { 2140 tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, 2141 &vsi->tx_rings[q_num]->q_vector->tx); 2142 } else { 2143 /* q_num is invalid for both Rx and Tx queues */ 2144 return -EINVAL; 2145 } 2146 2147 update_coalesced_frames: 2148 /* either q_num is invalid for both Rx and Tx queues or setting coalesce 2149 * failed completely 2150 */ 2151 if (tx && rx) 2152 return -EINVAL; 2153 2154 if (q_num < vsi->num_txq) 2155 ec->tx_max_coalesced_frames_irq = vsi->work_lmt; 2156 2157 if (q_num < vsi->num_rxq) 2158 ec->rx_max_coalesced_frames_irq = vsi->work_lmt; 2159 2160 return 0; 2161 } 2162 2163 static int 2164 ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) 2165 { 2166 return __ice_get_coalesce(netdev, ec, -1); 2167 } 2168 2169 static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, 2170 struct ethtool_coalesce *ec) 2171 { 2172 return __ice_get_coalesce(netdev, ec, q_num); 2173 } 2174 2175 /** 2176 * ice_set_rc_coalesce - set ITR values for specific ring container 2177 * @c_type: container type, RX or TX 2178 * @ec: ethtool structure from user to update ITR settings 2179 * @rc: ring container that the ITR values will come from 2180 * @vsi: VSI associated to the ring container 2181 * 2182 * Set specific ITR values. This is done per ice_ring_container because each 2183 * q_vector can have 1 or more rings and all of said ring(s) will have the same 2184 * ITR values. 2185 * 2186 * Returns 0 on success, negative otherwise. 2187 */ 2188 static int 2189 ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, 2190 struct ice_ring_container *rc, struct ice_vsi *vsi) 2191 { 2192 struct ice_pf *pf = vsi->back; 2193 u16 itr_setting; 2194 2195 if (!rc->ring) 2196 return -EINVAL; 2197 2198 itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; 2199 2200 switch (c_type) { 2201 case ICE_RX_CONTAINER: 2202 if (ec->rx_coalesce_usecs != itr_setting && 2203 ec->use_adaptive_rx_coalesce) { 2204 netdev_info(vsi->netdev, 2205 "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); 2206 return -EINVAL; 2207 } 2208 2209 if (ec->rx_coalesce_usecs > ICE_ITR_MAX) { 2210 netdev_info(vsi->netdev, 2211 "Invalid value, rx-usecs range is 0-%d\n", 2212 ICE_ITR_MAX); 2213 return -EINVAL; 2214 } 2215 2216 if (ec->use_adaptive_rx_coalesce) { 2217 rc->itr_setting |= ICE_ITR_DYNAMIC; 2218 } else { 2219 rc->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); 2220 rc->target_itr = ITR_TO_REG(rc->itr_setting); 2221 } 2222 break; 2223 case ICE_TX_CONTAINER: 2224 if (ec->tx_coalesce_usecs != itr_setting && 2225 ec->use_adaptive_tx_coalesce) { 2226 netdev_info(vsi->netdev, 2227 "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); 2228 return -EINVAL; 2229 } 2230 2231 if (ec->tx_coalesce_usecs > ICE_ITR_MAX) { 2232 netdev_info(vsi->netdev, 2233 "Invalid value, tx-usecs range is 0-%d\n", 2234 ICE_ITR_MAX); 2235 return -EINVAL; 2236 } 2237 2238 if (ec->use_adaptive_tx_coalesce) { 2239 rc->itr_setting |= ICE_ITR_DYNAMIC; 2240 } else { 2241 rc->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); 2242 rc->target_itr = ITR_TO_REG(rc->itr_setting); 2243 } 2244 break; 2245 default: 2246 dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type); 2247 return -EINVAL; 2248 } 2249 2250 return 0; 2251 } 2252 2253 static int 2254 __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, 2255 int q_num) 2256 { 2257 struct ice_netdev_priv *np = netdev_priv(netdev); 2258 int rx = -EINVAL, tx = -EINVAL; 2259 struct ice_vsi *vsi = np->vsi; 2260 2261 if (q_num < 0) { 2262 int i; 2263 2264 ice_for_each_q_vector(vsi, i) { 2265 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2266 2267 if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, 2268 &q_vector->rx, vsi) || 2269 ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, 2270 &q_vector->tx, vsi)) 2271 return -EINVAL; 2272 } 2273 2274 goto set_work_lmt; 2275 } 2276 2277 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { 2278 rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, 2279 &vsi->rx_rings[q_num]->q_vector->rx, 2280 vsi); 2281 tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, 2282 &vsi->tx_rings[q_num]->q_vector->tx, 2283 vsi); 2284 } else if (q_num < vsi->num_rxq) { 2285 rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, 2286 &vsi->rx_rings[q_num]->q_vector->rx, 2287 vsi); 2288 } else if (q_num < vsi->num_txq) { 2289 tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, 2290 &vsi->tx_rings[q_num]->q_vector->tx, 2291 vsi); 2292 } 2293 2294 /* either q_num is invalid for both Rx and Tx queues or setting coalesce 2295 * failed completely 2296 */ 2297 if (rx && tx) 2298 return -EINVAL; 2299 2300 set_work_lmt: 2301 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) 2302 vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq, 2303 ec->rx_max_coalesced_frames_irq); 2304 2305 return 0; 2306 } 2307 2308 static int 2309 ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) 2310 { 2311 return __ice_set_coalesce(netdev, ec, -1); 2312 } 2313 2314 static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, 2315 struct ethtool_coalesce *ec) 2316 { 2317 return __ice_set_coalesce(netdev, ec, q_num); 2318 } 2319 2320 static const struct ethtool_ops ice_ethtool_ops = { 2321 .get_link_ksettings = ice_get_link_ksettings, 2322 .set_link_ksettings = ice_set_link_ksettings, 2323 .get_drvinfo = ice_get_drvinfo, 2324 .get_regs_len = ice_get_regs_len, 2325 .get_regs = ice_get_regs, 2326 .get_msglevel = ice_get_msglevel, 2327 .set_msglevel = ice_set_msglevel, 2328 .get_link = ethtool_op_get_link, 2329 .get_eeprom_len = ice_get_eeprom_len, 2330 .get_eeprom = ice_get_eeprom, 2331 .get_coalesce = ice_get_coalesce, 2332 .set_coalesce = ice_set_coalesce, 2333 .get_strings = ice_get_strings, 2334 .set_phys_id = ice_set_phys_id, 2335 .get_ethtool_stats = ice_get_ethtool_stats, 2336 .get_priv_flags = ice_get_priv_flags, 2337 .set_priv_flags = ice_set_priv_flags, 2338 .get_sset_count = ice_get_sset_count, 2339 .get_rxnfc = ice_get_rxnfc, 2340 .get_ringparam = ice_get_ringparam, 2341 .set_ringparam = ice_set_ringparam, 2342 .nway_reset = ice_nway_reset, 2343 .get_pauseparam = ice_get_pauseparam, 2344 .set_pauseparam = ice_set_pauseparam, 2345 .get_rxfh_key_size = ice_get_rxfh_key_size, 2346 .get_rxfh_indir_size = ice_get_rxfh_indir_size, 2347 .get_rxfh = ice_get_rxfh, 2348 .set_rxfh = ice_set_rxfh, 2349 .get_ts_info = ethtool_op_get_ts_info, 2350 .get_per_queue_coalesce = ice_get_per_q_coalesce, 2351 .set_per_queue_coalesce = ice_set_per_q_coalesce, 2352 }; 2353 2354 /** 2355 * ice_set_ethtool_ops - setup netdev ethtool ops 2356 * @netdev: network interface device structure 2357 * 2358 * setup netdev ethtool ops with ice specific ops 2359 */ 2360 void ice_set_ethtool_ops(struct net_device *netdev) 2361 { 2362 netdev->ethtool_ops = &ice_ethtool_ops; 2363 } 2364