1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 12 static const char * const ice_link_mode_str_low[] = { 13 [0] = "100BASE_TX", 14 [1] = "100M_SGMII", 15 [2] = "1000BASE_T", 16 [3] = "1000BASE_SX", 17 [4] = "1000BASE_LX", 18 [5] = "1000BASE_KX", 19 [6] = "1G_SGMII", 20 [7] = "2500BASE_T", 21 [8] = "2500BASE_X", 22 [9] = "2500BASE_KX", 23 [10] = "5GBASE_T", 24 [11] = "5GBASE_KR", 25 [12] = "10GBASE_T", 26 [13] = "10G_SFI_DA", 27 [14] = "10GBASE_SR", 28 [15] = "10GBASE_LR", 29 [16] = "10GBASE_KR_CR1", 30 [17] = "10G_SFI_AOC_ACC", 31 [18] = "10G_SFI_C2C", 32 [19] = "25GBASE_T", 33 [20] = "25GBASE_CR", 34 [21] = "25GBASE_CR_S", 35 [22] = "25GBASE_CR1", 36 [23] = "25GBASE_SR", 37 [24] = "25GBASE_LR", 38 [25] = "25GBASE_KR", 39 [26] = "25GBASE_KR_S", 40 [27] = "25GBASE_KR1", 41 [28] = "25G_AUI_AOC_ACC", 42 [29] = "25G_AUI_C2C", 43 [30] = "40GBASE_CR4", 44 [31] = "40GBASE_SR4", 45 [32] = "40GBASE_LR4", 46 [33] = "40GBASE_KR4", 47 [34] = "40G_XLAUI_AOC_ACC", 48 [35] = "40G_XLAUI", 49 [36] = "50GBASE_CR2", 50 [37] = "50GBASE_SR2", 51 [38] = "50GBASE_LR2", 52 [39] = "50GBASE_KR2", 53 [40] = "50G_LAUI2_AOC_ACC", 54 [41] = "50G_LAUI2", 55 [42] = "50G_AUI2_AOC_ACC", 56 [43] = "50G_AUI2", 57 [44] = "50GBASE_CP", 58 [45] = "50GBASE_SR", 59 [46] = "50GBASE_FR", 60 [47] = "50GBASE_LR", 61 [48] = "50GBASE_KR_PAM4", 62 [49] = "50G_AUI1_AOC_ACC", 63 [50] = "50G_AUI1", 64 [51] = "100GBASE_CR4", 65 [52] = "100GBASE_SR4", 66 [53] = "100GBASE_LR4", 67 [54] = "100GBASE_KR4", 68 [55] = "100G_CAUI4_AOC_ACC", 69 [56] = "100G_CAUI4", 70 [57] = "100G_AUI4_AOC_ACC", 71 [58] = "100G_AUI4", 72 [59] = "100GBASE_CR_PAM4", 73 [60] = "100GBASE_KR_PAM4", 74 [61] = "100GBASE_CP2", 75 [62] = "100GBASE_SR2", 76 [63] = "100GBASE_DR", 77 }; 78 79 static const char * const ice_link_mode_str_high[] = { 80 [0] = "100GBASE_KR2_PAM4", 81 [1] = "100G_CAUI2_AOC_ACC", 82 [2] = "100G_CAUI2", 83 [3] = "100G_AUI2_AOC_ACC", 84 [4] = "100G_AUI2", 85 }; 86 87 /** 88 * ice_dump_phy_type - helper function to dump phy_type 89 * @hw: pointer to the HW structure 90 * @low: 64 bit value for phy_type_low 91 * @high: 64 bit value for phy_type_high 92 * @prefix: prefix string to differentiate multiple dumps 93 */ 94 static void 95 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 96 { 97 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 98 99 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 100 if (low & BIT_ULL(i)) 101 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 102 prefix, i, ice_link_mode_str_low[i]); 103 } 104 105 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 106 107 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 108 if (high & BIT_ULL(i)) 109 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 110 prefix, i, ice_link_mode_str_high[i]); 111 } 112 } 113 114 /** 115 * ice_set_mac_type - Sets MAC type 116 * @hw: pointer to the HW structure 117 * 118 * This function sets the MAC type of the adapter based on the 119 * vendor ID and device ID stored in the HW structure. 120 */ 121 static int ice_set_mac_type(struct ice_hw *hw) 122 { 123 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 124 return -ENODEV; 125 126 switch (hw->device_id) { 127 case ICE_DEV_ID_E810C_BACKPLANE: 128 case ICE_DEV_ID_E810C_QSFP: 129 case ICE_DEV_ID_E810C_SFP: 130 case ICE_DEV_ID_E810_XXV_BACKPLANE: 131 case ICE_DEV_ID_E810_XXV_QSFP: 132 case ICE_DEV_ID_E810_XXV_SFP: 133 hw->mac_type = ICE_MAC_E810; 134 break; 135 case ICE_DEV_ID_E823C_10G_BASE_T: 136 case ICE_DEV_ID_E823C_BACKPLANE: 137 case ICE_DEV_ID_E823C_QSFP: 138 case ICE_DEV_ID_E823C_SFP: 139 case ICE_DEV_ID_E823C_SGMII: 140 case ICE_DEV_ID_E822C_10G_BASE_T: 141 case ICE_DEV_ID_E822C_BACKPLANE: 142 case ICE_DEV_ID_E822C_QSFP: 143 case ICE_DEV_ID_E822C_SFP: 144 case ICE_DEV_ID_E822C_SGMII: 145 case ICE_DEV_ID_E822L_10G_BASE_T: 146 case ICE_DEV_ID_E822L_BACKPLANE: 147 case ICE_DEV_ID_E822L_SFP: 148 case ICE_DEV_ID_E822L_SGMII: 149 case ICE_DEV_ID_E823L_10G_BASE_T: 150 case ICE_DEV_ID_E823L_1GBE: 151 case ICE_DEV_ID_E823L_BACKPLANE: 152 case ICE_DEV_ID_E823L_QSFP: 153 case ICE_DEV_ID_E823L_SFP: 154 hw->mac_type = ICE_MAC_GENERIC; 155 break; 156 default: 157 hw->mac_type = ICE_MAC_UNKNOWN; 158 break; 159 } 160 161 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 162 return 0; 163 } 164 165 /** 166 * ice_is_e810 167 * @hw: pointer to the hardware structure 168 * 169 * returns true if the device is E810 based, false if not. 170 */ 171 bool ice_is_e810(struct ice_hw *hw) 172 { 173 return hw->mac_type == ICE_MAC_E810; 174 } 175 176 /** 177 * ice_is_e810t 178 * @hw: pointer to the hardware structure 179 * 180 * returns true if the device is E810T based, false if not. 181 */ 182 bool ice_is_e810t(struct ice_hw *hw) 183 { 184 switch (hw->device_id) { 185 case ICE_DEV_ID_E810C_SFP: 186 switch (hw->subsystem_device_id) { 187 case ICE_SUBDEV_ID_E810T: 188 case ICE_SUBDEV_ID_E810T2: 189 case ICE_SUBDEV_ID_E810T3: 190 case ICE_SUBDEV_ID_E810T4: 191 case ICE_SUBDEV_ID_E810T6: 192 case ICE_SUBDEV_ID_E810T7: 193 return true; 194 } 195 break; 196 case ICE_DEV_ID_E810C_QSFP: 197 switch (hw->subsystem_device_id) { 198 case ICE_SUBDEV_ID_E810T2: 199 case ICE_SUBDEV_ID_E810T3: 200 case ICE_SUBDEV_ID_E810T5: 201 return true; 202 } 203 break; 204 default: 205 break; 206 } 207 208 return false; 209 } 210 211 /** 212 * ice_is_e823 213 * @hw: pointer to the hardware structure 214 * 215 * returns true if the device is E823-L or E823-C based, false if not. 216 */ 217 bool ice_is_e823(struct ice_hw *hw) 218 { 219 switch (hw->device_id) { 220 case ICE_DEV_ID_E823L_BACKPLANE: 221 case ICE_DEV_ID_E823L_SFP: 222 case ICE_DEV_ID_E823L_10G_BASE_T: 223 case ICE_DEV_ID_E823L_1GBE: 224 case ICE_DEV_ID_E823L_QSFP: 225 case ICE_DEV_ID_E823C_BACKPLANE: 226 case ICE_DEV_ID_E823C_QSFP: 227 case ICE_DEV_ID_E823C_SFP: 228 case ICE_DEV_ID_E823C_10G_BASE_T: 229 case ICE_DEV_ID_E823C_SGMII: 230 return true; 231 default: 232 return false; 233 } 234 } 235 236 /** 237 * ice_clear_pf_cfg - Clear PF configuration 238 * @hw: pointer to the hardware structure 239 * 240 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 241 * configuration, flow director filters, etc.). 242 */ 243 int ice_clear_pf_cfg(struct ice_hw *hw) 244 { 245 struct ice_aq_desc desc; 246 247 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 248 249 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 250 } 251 252 /** 253 * ice_aq_manage_mac_read - manage MAC address read command 254 * @hw: pointer to the HW struct 255 * @buf: a virtual buffer to hold the manage MAC read response 256 * @buf_size: Size of the virtual buffer 257 * @cd: pointer to command details structure or NULL 258 * 259 * This function is used to return per PF station MAC address (0x0107). 260 * NOTE: Upon successful completion of this command, MAC address information 261 * is returned in user specified buffer. Please interpret user specified 262 * buffer as "manage_mac_read" response. 263 * Response such as various MAC addresses are stored in HW struct (port.mac) 264 * ice_discover_dev_caps is expected to be called before this function is 265 * called. 266 */ 267 static int 268 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 269 struct ice_sq_cd *cd) 270 { 271 struct ice_aqc_manage_mac_read_resp *resp; 272 struct ice_aqc_manage_mac_read *cmd; 273 struct ice_aq_desc desc; 274 int status; 275 u16 flags; 276 u8 i; 277 278 cmd = &desc.params.mac_read; 279 280 if (buf_size < sizeof(*resp)) 281 return -EINVAL; 282 283 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 284 285 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 286 if (status) 287 return status; 288 289 resp = buf; 290 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 291 292 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 293 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 294 return -EIO; 295 } 296 297 /* A single port can report up to two (LAN and WoL) addresses */ 298 for (i = 0; i < cmd->num_addr; i++) 299 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 300 ether_addr_copy(hw->port_info->mac.lan_addr, 301 resp[i].mac_addr); 302 ether_addr_copy(hw->port_info->mac.perm_addr, 303 resp[i].mac_addr); 304 break; 305 } 306 307 return 0; 308 } 309 310 /** 311 * ice_aq_get_phy_caps - returns PHY capabilities 312 * @pi: port information structure 313 * @qual_mods: report qualified modules 314 * @report_mode: report mode capabilities 315 * @pcaps: structure for PHY capabilities to be filled 316 * @cd: pointer to command details structure or NULL 317 * 318 * Returns the various PHY capabilities supported on the Port (0x0600) 319 */ 320 int 321 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 322 struct ice_aqc_get_phy_caps_data *pcaps, 323 struct ice_sq_cd *cd) 324 { 325 struct ice_aqc_get_phy_caps *cmd; 326 u16 pcaps_size = sizeof(*pcaps); 327 struct ice_aq_desc desc; 328 const char *prefix; 329 struct ice_hw *hw; 330 int status; 331 332 cmd = &desc.params.get_phy; 333 334 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 335 return -EINVAL; 336 hw = pi->hw; 337 338 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 339 !ice_fw_supports_report_dflt_cfg(hw)) 340 return -EINVAL; 341 342 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 343 344 if (qual_mods) 345 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 346 347 cmd->param0 |= cpu_to_le16(report_mode); 348 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 349 350 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 351 352 switch (report_mode) { 353 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 354 prefix = "phy_caps_media"; 355 break; 356 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 357 prefix = "phy_caps_no_media"; 358 break; 359 case ICE_AQC_REPORT_ACTIVE_CFG: 360 prefix = "phy_caps_active"; 361 break; 362 case ICE_AQC_REPORT_DFLT_CFG: 363 prefix = "phy_caps_default"; 364 break; 365 default: 366 prefix = "phy_caps_invalid"; 367 } 368 369 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 370 le64_to_cpu(pcaps->phy_type_high), prefix); 371 372 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 373 prefix, report_mode); 374 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 375 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 376 pcaps->low_power_ctrl_an); 377 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 378 pcaps->eee_cap); 379 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 380 pcaps->eeer_value); 381 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 382 pcaps->link_fec_options); 383 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 384 prefix, pcaps->module_compliance_enforcement); 385 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 386 prefix, pcaps->extended_compliance_code); 387 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 388 pcaps->module_type[0]); 389 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 390 pcaps->module_type[1]); 391 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 392 pcaps->module_type[2]); 393 394 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 395 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 396 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 397 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 398 sizeof(pi->phy.link_info.module_type)); 399 } 400 401 return status; 402 } 403 404 /** 405 * ice_aq_get_link_topo_handle - get link topology node return status 406 * @pi: port information structure 407 * @node_type: requested node type 408 * @cd: pointer to command details structure or NULL 409 * 410 * Get link topology node return status for specified node type (0x06E0) 411 * 412 * Node type cage can be used to determine if cage is present. If AQC 413 * returns error (ENOENT), then no cage present. If no cage present, then 414 * connection type is backplane or BASE-T. 415 */ 416 static int 417 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 418 struct ice_sq_cd *cd) 419 { 420 struct ice_aqc_get_link_topo *cmd; 421 struct ice_aq_desc desc; 422 423 cmd = &desc.params.get_link_topo; 424 425 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 426 427 cmd->addr.topo_params.node_type_ctx = 428 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 429 ICE_AQC_LINK_TOPO_NODE_CTX_S); 430 431 /* set node type */ 432 cmd->addr.topo_params.node_type_ctx |= 433 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 434 435 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 436 } 437 438 /** 439 * ice_is_media_cage_present 440 * @pi: port information structure 441 * 442 * Returns true if media cage is present, else false. If no cage, then 443 * media type is backplane or BASE-T. 444 */ 445 static bool ice_is_media_cage_present(struct ice_port_info *pi) 446 { 447 /* Node type cage can be used to determine if cage is present. If AQC 448 * returns error (ENOENT), then no cage present. If no cage present then 449 * connection type is backplane or BASE-T. 450 */ 451 return !ice_aq_get_link_topo_handle(pi, 452 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 453 NULL); 454 } 455 456 /** 457 * ice_get_media_type - Gets media type 458 * @pi: port information structure 459 */ 460 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 461 { 462 struct ice_link_status *hw_link_info; 463 464 if (!pi) 465 return ICE_MEDIA_UNKNOWN; 466 467 hw_link_info = &pi->phy.link_info; 468 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 469 /* If more than one media type is selected, report unknown */ 470 return ICE_MEDIA_UNKNOWN; 471 472 if (hw_link_info->phy_type_low) { 473 /* 1G SGMII is a special case where some DA cable PHYs 474 * may show this as an option when it really shouldn't 475 * be since SGMII is meant to be between a MAC and a PHY 476 * in a backplane. Try to detect this case and handle it 477 */ 478 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 479 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 480 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 481 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 482 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 483 return ICE_MEDIA_DA; 484 485 switch (hw_link_info->phy_type_low) { 486 case ICE_PHY_TYPE_LOW_1000BASE_SX: 487 case ICE_PHY_TYPE_LOW_1000BASE_LX: 488 case ICE_PHY_TYPE_LOW_10GBASE_SR: 489 case ICE_PHY_TYPE_LOW_10GBASE_LR: 490 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 491 case ICE_PHY_TYPE_LOW_25GBASE_SR: 492 case ICE_PHY_TYPE_LOW_25GBASE_LR: 493 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 494 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 495 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 496 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 497 case ICE_PHY_TYPE_LOW_50GBASE_SR: 498 case ICE_PHY_TYPE_LOW_50GBASE_FR: 499 case ICE_PHY_TYPE_LOW_50GBASE_LR: 500 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 501 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 502 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 503 case ICE_PHY_TYPE_LOW_100GBASE_DR: 504 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 505 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 506 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 507 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 508 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 509 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 510 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 511 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 512 return ICE_MEDIA_FIBER; 513 case ICE_PHY_TYPE_LOW_100BASE_TX: 514 case ICE_PHY_TYPE_LOW_1000BASE_T: 515 case ICE_PHY_TYPE_LOW_2500BASE_T: 516 case ICE_PHY_TYPE_LOW_5GBASE_T: 517 case ICE_PHY_TYPE_LOW_10GBASE_T: 518 case ICE_PHY_TYPE_LOW_25GBASE_T: 519 return ICE_MEDIA_BASET; 520 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 521 case ICE_PHY_TYPE_LOW_25GBASE_CR: 522 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 523 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 524 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 525 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 526 case ICE_PHY_TYPE_LOW_50GBASE_CP: 527 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 528 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 529 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 530 return ICE_MEDIA_DA; 531 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 532 case ICE_PHY_TYPE_LOW_40G_XLAUI: 533 case ICE_PHY_TYPE_LOW_50G_LAUI2: 534 case ICE_PHY_TYPE_LOW_50G_AUI2: 535 case ICE_PHY_TYPE_LOW_50G_AUI1: 536 case ICE_PHY_TYPE_LOW_100G_AUI4: 537 case ICE_PHY_TYPE_LOW_100G_CAUI4: 538 if (ice_is_media_cage_present(pi)) 539 return ICE_MEDIA_DA; 540 fallthrough; 541 case ICE_PHY_TYPE_LOW_1000BASE_KX: 542 case ICE_PHY_TYPE_LOW_2500BASE_KX: 543 case ICE_PHY_TYPE_LOW_2500BASE_X: 544 case ICE_PHY_TYPE_LOW_5GBASE_KR: 545 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 546 case ICE_PHY_TYPE_LOW_25GBASE_KR: 547 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 548 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 549 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 550 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 551 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 552 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 553 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 554 return ICE_MEDIA_BACKPLANE; 555 } 556 } else { 557 switch (hw_link_info->phy_type_high) { 558 case ICE_PHY_TYPE_HIGH_100G_AUI2: 559 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 560 if (ice_is_media_cage_present(pi)) 561 return ICE_MEDIA_DA; 562 fallthrough; 563 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 564 return ICE_MEDIA_BACKPLANE; 565 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 566 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 567 return ICE_MEDIA_FIBER; 568 } 569 } 570 return ICE_MEDIA_UNKNOWN; 571 } 572 573 /** 574 * ice_aq_get_link_info 575 * @pi: port information structure 576 * @ena_lse: enable/disable LinkStatusEvent reporting 577 * @link: pointer to link status structure - optional 578 * @cd: pointer to command details structure or NULL 579 * 580 * Get Link Status (0x607). Returns the link status of the adapter. 581 */ 582 int 583 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 584 struct ice_link_status *link, struct ice_sq_cd *cd) 585 { 586 struct ice_aqc_get_link_status_data link_data = { 0 }; 587 struct ice_aqc_get_link_status *resp; 588 struct ice_link_status *li_old, *li; 589 enum ice_media_type *hw_media_type; 590 struct ice_fc_info *hw_fc_info; 591 bool tx_pause, rx_pause; 592 struct ice_aq_desc desc; 593 struct ice_hw *hw; 594 u16 cmd_flags; 595 int status; 596 597 if (!pi) 598 return -EINVAL; 599 hw = pi->hw; 600 li_old = &pi->phy.link_info_old; 601 hw_media_type = &pi->phy.media_type; 602 li = &pi->phy.link_info; 603 hw_fc_info = &pi->fc; 604 605 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 606 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 607 resp = &desc.params.get_link_status; 608 resp->cmd_flags = cpu_to_le16(cmd_flags); 609 resp->lport_num = pi->lport; 610 611 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); 612 613 if (status) 614 return status; 615 616 /* save off old link status information */ 617 *li_old = *li; 618 619 /* update current link status information */ 620 li->link_speed = le16_to_cpu(link_data.link_speed); 621 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 622 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 623 *hw_media_type = ice_get_media_type(pi); 624 li->link_info = link_data.link_info; 625 li->link_cfg_err = link_data.link_cfg_err; 626 li->an_info = link_data.an_info; 627 li->ext_info = link_data.ext_info; 628 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 629 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 630 li->topo_media_conflict = link_data.topo_media_conflict; 631 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 632 ICE_AQ_CFG_PACING_TYPE_M); 633 634 /* update fc info */ 635 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 636 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 637 if (tx_pause && rx_pause) 638 hw_fc_info->current_mode = ICE_FC_FULL; 639 else if (tx_pause) 640 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 641 else if (rx_pause) 642 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 643 else 644 hw_fc_info->current_mode = ICE_FC_NONE; 645 646 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 647 648 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 649 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 650 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 651 (unsigned long long)li->phy_type_low); 652 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 653 (unsigned long long)li->phy_type_high); 654 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 655 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 656 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 657 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 658 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 659 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 660 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 661 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 662 li->max_frame_size); 663 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 664 665 /* save link status information */ 666 if (link) 667 *link = *li; 668 669 /* flag cleared so calling functions don't call AQ again */ 670 pi->phy.get_link_info = false; 671 672 return 0; 673 } 674 675 /** 676 * ice_fill_tx_timer_and_fc_thresh 677 * @hw: pointer to the HW struct 678 * @cmd: pointer to MAC cfg structure 679 * 680 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 681 * descriptor 682 */ 683 static void 684 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 685 struct ice_aqc_set_mac_cfg *cmd) 686 { 687 u16 fc_thres_val, tx_timer_val; 688 u32 val; 689 690 /* We read back the transmit timer and FC threshold value of 691 * LFC. Thus, we will use index = 692 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 693 * 694 * Also, because we are operating on transmit timer and FC 695 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 696 */ 697 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 698 699 /* Retrieve the transmit timer */ 700 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 701 tx_timer_val = val & 702 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 703 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); 704 705 /* Retrieve the FC threshold */ 706 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 707 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 708 709 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); 710 } 711 712 /** 713 * ice_aq_set_mac_cfg 714 * @hw: pointer to the HW struct 715 * @max_frame_size: Maximum Frame Size to be supported 716 * @cd: pointer to command details structure or NULL 717 * 718 * Set MAC configuration (0x0603) 719 */ 720 int 721 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 722 { 723 struct ice_aqc_set_mac_cfg *cmd; 724 struct ice_aq_desc desc; 725 726 cmd = &desc.params.set_mac_cfg; 727 728 if (max_frame_size == 0) 729 return -EINVAL; 730 731 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 732 733 cmd->max_frame_size = cpu_to_le16(max_frame_size); 734 735 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 736 737 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 738 } 739 740 /** 741 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 742 * @hw: pointer to the HW struct 743 */ 744 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 745 { 746 struct ice_switch_info *sw; 747 int status; 748 749 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 750 sizeof(*hw->switch_info), GFP_KERNEL); 751 sw = hw->switch_info; 752 753 if (!sw) 754 return -ENOMEM; 755 756 INIT_LIST_HEAD(&sw->vsi_list_map_head); 757 sw->prof_res_bm_init = 0; 758 759 status = ice_init_def_sw_recp(hw); 760 if (status) { 761 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 762 return status; 763 } 764 return 0; 765 } 766 767 /** 768 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 769 * @hw: pointer to the HW struct 770 */ 771 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 772 { 773 struct ice_switch_info *sw = hw->switch_info; 774 struct ice_vsi_list_map_info *v_pos_map; 775 struct ice_vsi_list_map_info *v_tmp_map; 776 struct ice_sw_recipe *recps; 777 u8 i; 778 779 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 780 list_entry) { 781 list_del(&v_pos_map->list_entry); 782 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 783 } 784 recps = sw->recp_list; 785 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 786 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 787 788 recps[i].root_rid = i; 789 list_for_each_entry_safe(rg_entry, tmprg_entry, 790 &recps[i].rg_list, l_entry) { 791 list_del(&rg_entry->l_entry); 792 devm_kfree(ice_hw_to_dev(hw), rg_entry); 793 } 794 795 if (recps[i].adv_rule) { 796 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 797 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 798 799 mutex_destroy(&recps[i].filt_rule_lock); 800 list_for_each_entry_safe(lst_itr, tmp_entry, 801 &recps[i].filt_rules, 802 list_entry) { 803 list_del(&lst_itr->list_entry); 804 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 805 devm_kfree(ice_hw_to_dev(hw), lst_itr); 806 } 807 } else { 808 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 809 810 mutex_destroy(&recps[i].filt_rule_lock); 811 list_for_each_entry_safe(lst_itr, tmp_entry, 812 &recps[i].filt_rules, 813 list_entry) { 814 list_del(&lst_itr->list_entry); 815 devm_kfree(ice_hw_to_dev(hw), lst_itr); 816 } 817 } 818 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 819 } 820 ice_rm_all_sw_replay_rule_info(hw); 821 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 822 devm_kfree(ice_hw_to_dev(hw), sw); 823 } 824 825 /** 826 * ice_get_fw_log_cfg - get FW logging configuration 827 * @hw: pointer to the HW struct 828 */ 829 static int ice_get_fw_log_cfg(struct ice_hw *hw) 830 { 831 struct ice_aq_desc desc; 832 __le16 *config; 833 int status; 834 u16 size; 835 836 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 837 config = kzalloc(size, GFP_KERNEL); 838 if (!config) 839 return -ENOMEM; 840 841 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 842 843 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 844 if (!status) { 845 u16 i; 846 847 /* Save FW logging information into the HW structure */ 848 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 849 u16 v, m, flgs; 850 851 v = le16_to_cpu(config[i]); 852 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 853 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 854 855 if (m < ICE_AQC_FW_LOG_ID_MAX) 856 hw->fw_log.evnts[m].cur = flgs; 857 } 858 } 859 860 kfree(config); 861 862 return status; 863 } 864 865 /** 866 * ice_cfg_fw_log - configure FW logging 867 * @hw: pointer to the HW struct 868 * @enable: enable certain FW logging events if true, disable all if false 869 * 870 * This function enables/disables the FW logging via Rx CQ events and a UART 871 * port based on predetermined configurations. FW logging via the Rx CQ can be 872 * enabled/disabled for individual PF's. However, FW logging via the UART can 873 * only be enabled/disabled for all PFs on the same device. 874 * 875 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 876 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 877 * before initializing the device. 878 * 879 * When re/configuring FW logging, callers need to update the "cfg" elements of 880 * the hw->fw_log.evnts array with the desired logging event configurations for 881 * modules of interest. When disabling FW logging completely, the callers can 882 * just pass false in the "enable" parameter. On completion, the function will 883 * update the "cur" element of the hw->fw_log.evnts array with the resulting 884 * logging event configurations of the modules that are being re/configured. FW 885 * logging modules that are not part of a reconfiguration operation retain their 886 * previous states. 887 * 888 * Before resetting the device, it is recommended that the driver disables FW 889 * logging before shutting down the control queue. When disabling FW logging 890 * ("enable" = false), the latest configurations of FW logging events stored in 891 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 892 * a device reset. 893 * 894 * When enabling FW logging to emit log messages via the Rx CQ during the 895 * device's initialization phase, a mechanism alternative to interrupt handlers 896 * needs to be used to extract FW log messages from the Rx CQ periodically and 897 * to prevent the Rx CQ from being full and stalling other types of control 898 * messages from FW to SW. Interrupts are typically disabled during the device's 899 * initialization phase. 900 */ 901 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) 902 { 903 struct ice_aqc_fw_logging *cmd; 904 u16 i, chgs = 0, len = 0; 905 struct ice_aq_desc desc; 906 __le16 *data = NULL; 907 u8 actv_evnts = 0; 908 void *buf = NULL; 909 int status = 0; 910 911 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 912 return 0; 913 914 /* Disable FW logging only when the control queue is still responsive */ 915 if (!enable && 916 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 917 return 0; 918 919 /* Get current FW log settings */ 920 status = ice_get_fw_log_cfg(hw); 921 if (status) 922 return status; 923 924 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 925 cmd = &desc.params.fw_logging; 926 927 /* Indicate which controls are valid */ 928 if (hw->fw_log.cq_en) 929 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 930 931 if (hw->fw_log.uart_en) 932 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 933 934 if (enable) { 935 /* Fill in an array of entries with FW logging modules and 936 * logging events being reconfigured. 937 */ 938 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 939 u16 val; 940 941 /* Keep track of enabled event types */ 942 actv_evnts |= hw->fw_log.evnts[i].cfg; 943 944 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 945 continue; 946 947 if (!data) { 948 data = devm_kcalloc(ice_hw_to_dev(hw), 949 ICE_AQC_FW_LOG_ID_MAX, 950 sizeof(*data), 951 GFP_KERNEL); 952 if (!data) 953 return -ENOMEM; 954 } 955 956 val = i << ICE_AQC_FW_LOG_ID_S; 957 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 958 data[chgs++] = cpu_to_le16(val); 959 } 960 961 /* Only enable FW logging if at least one module is specified. 962 * If FW logging is currently enabled but all modules are not 963 * enabled to emit log messages, disable FW logging altogether. 964 */ 965 if (actv_evnts) { 966 /* Leave if there is effectively no change */ 967 if (!chgs) 968 goto out; 969 970 if (hw->fw_log.cq_en) 971 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 972 973 if (hw->fw_log.uart_en) 974 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 975 976 buf = data; 977 len = sizeof(*data) * chgs; 978 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 979 } 980 } 981 982 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 983 if (!status) { 984 /* Update the current configuration to reflect events enabled. 985 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 986 * logging mode is enabled for the device. They do not reflect 987 * actual modules being enabled to emit log messages. So, their 988 * values remain unchanged even when all modules are disabled. 989 */ 990 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 991 992 hw->fw_log.actv_evnts = actv_evnts; 993 for (i = 0; i < cnt; i++) { 994 u16 v, m; 995 996 if (!enable) { 997 /* When disabling all FW logging events as part 998 * of device's de-initialization, the original 999 * configurations are retained, and can be used 1000 * to reconfigure FW logging later if the device 1001 * is re-initialized. 1002 */ 1003 hw->fw_log.evnts[i].cur = 0; 1004 continue; 1005 } 1006 1007 v = le16_to_cpu(data[i]); 1008 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 1009 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 1010 } 1011 } 1012 1013 out: 1014 devm_kfree(ice_hw_to_dev(hw), data); 1015 1016 return status; 1017 } 1018 1019 /** 1020 * ice_output_fw_log 1021 * @hw: pointer to the HW struct 1022 * @desc: pointer to the AQ message descriptor 1023 * @buf: pointer to the buffer accompanying the AQ message 1024 * 1025 * Formats a FW Log message and outputs it via the standard driver logs. 1026 */ 1027 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 1028 { 1029 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 1030 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 1031 le16_to_cpu(desc->datalen)); 1032 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 1033 } 1034 1035 /** 1036 * ice_get_itr_intrl_gran 1037 * @hw: pointer to the HW struct 1038 * 1039 * Determines the ITR/INTRL granularities based on the maximum aggregate 1040 * bandwidth according to the device's configuration during power-on. 1041 */ 1042 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1043 { 1044 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 1045 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 1046 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 1047 1048 switch (max_agg_bw) { 1049 case ICE_MAX_AGG_BW_200G: 1050 case ICE_MAX_AGG_BW_100G: 1051 case ICE_MAX_AGG_BW_50G: 1052 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1053 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1054 break; 1055 case ICE_MAX_AGG_BW_25G: 1056 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1057 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1058 break; 1059 } 1060 } 1061 1062 /** 1063 * ice_init_hw - main hardware initialization routine 1064 * @hw: pointer to the hardware structure 1065 */ 1066 int ice_init_hw(struct ice_hw *hw) 1067 { 1068 struct ice_aqc_get_phy_caps_data *pcaps; 1069 u16 mac_buf_len; 1070 void *mac_buf; 1071 int status; 1072 1073 /* Set MAC type based on DeviceID */ 1074 status = ice_set_mac_type(hw); 1075 if (status) 1076 return status; 1077 1078 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 1079 PF_FUNC_RID_FUNC_NUM_M) >> 1080 PF_FUNC_RID_FUNC_NUM_S; 1081 1082 status = ice_reset(hw, ICE_RESET_PFR); 1083 if (status) 1084 return status; 1085 1086 ice_get_itr_intrl_gran(hw); 1087 1088 status = ice_create_all_ctrlq(hw); 1089 if (status) 1090 goto err_unroll_cqinit; 1091 1092 /* Enable FW logging. Not fatal if this fails. */ 1093 status = ice_cfg_fw_log(hw, true); 1094 if (status) 1095 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 1096 1097 status = ice_clear_pf_cfg(hw); 1098 if (status) 1099 goto err_unroll_cqinit; 1100 1101 /* Set bit to enable Flow Director filters */ 1102 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1103 INIT_LIST_HEAD(&hw->fdir_list_head); 1104 1105 ice_clear_pxe_mode(hw); 1106 1107 status = ice_init_nvm(hw); 1108 if (status) 1109 goto err_unroll_cqinit; 1110 1111 status = ice_get_caps(hw); 1112 if (status) 1113 goto err_unroll_cqinit; 1114 1115 if (!hw->port_info) 1116 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1117 sizeof(*hw->port_info), 1118 GFP_KERNEL); 1119 if (!hw->port_info) { 1120 status = -ENOMEM; 1121 goto err_unroll_cqinit; 1122 } 1123 1124 /* set the back pointer to HW */ 1125 hw->port_info->hw = hw; 1126 1127 /* Initialize port_info struct with switch configuration data */ 1128 status = ice_get_initial_sw_cfg(hw); 1129 if (status) 1130 goto err_unroll_alloc; 1131 1132 hw->evb_veb = true; 1133 1134 /* init xarray for identifying scheduling nodes uniquely */ 1135 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1136 1137 /* Query the allocated resources for Tx scheduler */ 1138 status = ice_sched_query_res_alloc(hw); 1139 if (status) { 1140 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1141 goto err_unroll_alloc; 1142 } 1143 ice_sched_get_psm_clk_freq(hw); 1144 1145 /* Initialize port_info struct with scheduler data */ 1146 status = ice_sched_init_port(hw->port_info); 1147 if (status) 1148 goto err_unroll_sched; 1149 1150 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1151 if (!pcaps) { 1152 status = -ENOMEM; 1153 goto err_unroll_sched; 1154 } 1155 1156 /* Initialize port_info struct with PHY capabilities */ 1157 status = ice_aq_get_phy_caps(hw->port_info, false, 1158 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1159 NULL); 1160 devm_kfree(ice_hw_to_dev(hw), pcaps); 1161 if (status) 1162 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1163 status); 1164 1165 /* Initialize port_info struct with link information */ 1166 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1167 if (status) 1168 goto err_unroll_sched; 1169 1170 /* need a valid SW entry point to build a Tx tree */ 1171 if (!hw->sw_entry_point_layer) { 1172 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1173 status = -EIO; 1174 goto err_unroll_sched; 1175 } 1176 INIT_LIST_HEAD(&hw->agg_list); 1177 /* Initialize max burst size */ 1178 if (!hw->max_burst_size) 1179 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1180 1181 status = ice_init_fltr_mgmt_struct(hw); 1182 if (status) 1183 goto err_unroll_sched; 1184 1185 /* Get MAC information */ 1186 /* A single port can report up to two (LAN and WoL) addresses */ 1187 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 1188 sizeof(struct ice_aqc_manage_mac_read_resp), 1189 GFP_KERNEL); 1190 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1191 1192 if (!mac_buf) { 1193 status = -ENOMEM; 1194 goto err_unroll_fltr_mgmt_struct; 1195 } 1196 1197 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1198 devm_kfree(ice_hw_to_dev(hw), mac_buf); 1199 1200 if (status) 1201 goto err_unroll_fltr_mgmt_struct; 1202 /* enable jumbo frame support at MAC level */ 1203 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1204 if (status) 1205 goto err_unroll_fltr_mgmt_struct; 1206 /* Obtain counter base index which would be used by flow director */ 1207 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1208 if (status) 1209 goto err_unroll_fltr_mgmt_struct; 1210 status = ice_init_hw_tbls(hw); 1211 if (status) 1212 goto err_unroll_fltr_mgmt_struct; 1213 mutex_init(&hw->tnl_lock); 1214 return 0; 1215 1216 err_unroll_fltr_mgmt_struct: 1217 ice_cleanup_fltr_mgmt_struct(hw); 1218 err_unroll_sched: 1219 ice_sched_cleanup_all(hw); 1220 err_unroll_alloc: 1221 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1222 err_unroll_cqinit: 1223 ice_destroy_all_ctrlq(hw); 1224 return status; 1225 } 1226 1227 /** 1228 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1229 * @hw: pointer to the hardware structure 1230 * 1231 * This should be called only during nominal operation, not as a result of 1232 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1233 * applicable initializations if it fails for any reason. 1234 */ 1235 void ice_deinit_hw(struct ice_hw *hw) 1236 { 1237 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1238 ice_cleanup_fltr_mgmt_struct(hw); 1239 1240 ice_sched_cleanup_all(hw); 1241 ice_sched_clear_agg(hw); 1242 ice_free_seg(hw); 1243 ice_free_hw_tbls(hw); 1244 mutex_destroy(&hw->tnl_lock); 1245 1246 /* Attempt to disable FW logging before shutting down control queues */ 1247 ice_cfg_fw_log(hw, false); 1248 ice_destroy_all_ctrlq(hw); 1249 1250 /* Clear VSI contexts if not already cleared */ 1251 ice_clear_all_vsi_ctx(hw); 1252 } 1253 1254 /** 1255 * ice_check_reset - Check to see if a global reset is complete 1256 * @hw: pointer to the hardware structure 1257 */ 1258 int ice_check_reset(struct ice_hw *hw) 1259 { 1260 u32 cnt, reg = 0, grst_timeout, uld_mask; 1261 1262 /* Poll for Device Active state in case a recent CORER, GLOBR, 1263 * or EMPR has occurred. The grst delay value is in 100ms units. 1264 * Add 1sec for outstanding AQ commands that can take a long time. 1265 */ 1266 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1267 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1268 1269 for (cnt = 0; cnt < grst_timeout; cnt++) { 1270 mdelay(100); 1271 reg = rd32(hw, GLGEN_RSTAT); 1272 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1273 break; 1274 } 1275 1276 if (cnt == grst_timeout) { 1277 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1278 return -EIO; 1279 } 1280 1281 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1282 GLNVM_ULD_PCIER_DONE_1_M |\ 1283 GLNVM_ULD_CORER_DONE_M |\ 1284 GLNVM_ULD_GLOBR_DONE_M |\ 1285 GLNVM_ULD_POR_DONE_M |\ 1286 GLNVM_ULD_POR_DONE_1_M |\ 1287 GLNVM_ULD_PCIER_DONE_2_M) 1288 1289 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1290 GLNVM_ULD_PE_DONE_M : 0); 1291 1292 /* Device is Active; check Global Reset processes are done */ 1293 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1294 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1295 if (reg == uld_mask) { 1296 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1297 break; 1298 } 1299 mdelay(10); 1300 } 1301 1302 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1303 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1304 reg); 1305 return -EIO; 1306 } 1307 1308 return 0; 1309 } 1310 1311 /** 1312 * ice_pf_reset - Reset the PF 1313 * @hw: pointer to the hardware structure 1314 * 1315 * If a global reset has been triggered, this function checks 1316 * for its completion and then issues the PF reset 1317 */ 1318 static int ice_pf_reset(struct ice_hw *hw) 1319 { 1320 u32 cnt, reg; 1321 1322 /* If at function entry a global reset was already in progress, i.e. 1323 * state is not 'device active' or any of the reset done bits are not 1324 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1325 * global reset is done. 1326 */ 1327 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1328 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1329 /* poll on global reset currently in progress until done */ 1330 if (ice_check_reset(hw)) 1331 return -EIO; 1332 1333 return 0; 1334 } 1335 1336 /* Reset the PF */ 1337 reg = rd32(hw, PFGEN_CTRL); 1338 1339 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1340 1341 /* Wait for the PFR to complete. The wait time is the global config lock 1342 * timeout plus the PFR timeout which will account for a possible reset 1343 * that is occurring during a download package operation. 1344 */ 1345 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1346 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1347 reg = rd32(hw, PFGEN_CTRL); 1348 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1349 break; 1350 1351 mdelay(1); 1352 } 1353 1354 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1355 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1356 return -EIO; 1357 } 1358 1359 return 0; 1360 } 1361 1362 /** 1363 * ice_reset - Perform different types of reset 1364 * @hw: pointer to the hardware structure 1365 * @req: reset request 1366 * 1367 * This function triggers a reset as specified by the req parameter. 1368 * 1369 * Note: 1370 * If anything other than a PF reset is triggered, PXE mode is restored. 1371 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1372 * interface has been restored in the rebuild flow. 1373 */ 1374 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1375 { 1376 u32 val = 0; 1377 1378 switch (req) { 1379 case ICE_RESET_PFR: 1380 return ice_pf_reset(hw); 1381 case ICE_RESET_CORER: 1382 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1383 val = GLGEN_RTRIG_CORER_M; 1384 break; 1385 case ICE_RESET_GLOBR: 1386 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1387 val = GLGEN_RTRIG_GLOBR_M; 1388 break; 1389 default: 1390 return -EINVAL; 1391 } 1392 1393 val |= rd32(hw, GLGEN_RTRIG); 1394 wr32(hw, GLGEN_RTRIG, val); 1395 ice_flush(hw); 1396 1397 /* wait for the FW to be ready */ 1398 return ice_check_reset(hw); 1399 } 1400 1401 /** 1402 * ice_copy_rxq_ctx_to_hw 1403 * @hw: pointer to the hardware structure 1404 * @ice_rxq_ctx: pointer to the rxq context 1405 * @rxq_index: the index of the Rx queue 1406 * 1407 * Copies rxq context from dense structure to HW register space 1408 */ 1409 static int 1410 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1411 { 1412 u8 i; 1413 1414 if (!ice_rxq_ctx) 1415 return -EINVAL; 1416 1417 if (rxq_index > QRX_CTRL_MAX_INDEX) 1418 return -EINVAL; 1419 1420 /* Copy each dword separately to HW */ 1421 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1422 wr32(hw, QRX_CONTEXT(i, rxq_index), 1423 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1424 1425 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1426 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1427 } 1428 1429 return 0; 1430 } 1431 1432 /* LAN Rx Queue Context */ 1433 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1434 /* Field Width LSB */ 1435 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1436 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1437 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1438 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1439 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1440 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1441 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1442 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1443 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1444 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1445 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1446 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1447 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1448 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1449 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1450 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1451 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1452 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1453 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1454 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1455 { 0 } 1456 }; 1457 1458 /** 1459 * ice_write_rxq_ctx 1460 * @hw: pointer to the hardware structure 1461 * @rlan_ctx: pointer to the rxq context 1462 * @rxq_index: the index of the Rx queue 1463 * 1464 * Converts rxq context from sparse to dense structure and then writes 1465 * it to HW register space and enables the hardware to prefetch descriptors 1466 * instead of only fetching them on demand 1467 */ 1468 int 1469 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1470 u32 rxq_index) 1471 { 1472 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1473 1474 if (!rlan_ctx) 1475 return -EINVAL; 1476 1477 rlan_ctx->prefena = 1; 1478 1479 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1480 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1481 } 1482 1483 /* LAN Tx Queue Context */ 1484 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1485 /* Field Width LSB */ 1486 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1487 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1488 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1489 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1490 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1491 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1492 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1493 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1494 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1495 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1496 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1497 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1498 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1499 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1500 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1501 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1502 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1503 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1504 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1505 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1506 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1507 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1508 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1509 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1510 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1511 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1512 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1513 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1514 { 0 } 1515 }; 1516 1517 /* Sideband Queue command wrappers */ 1518 1519 /** 1520 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1521 * @hw: pointer to the HW struct 1522 * @desc: descriptor describing the command 1523 * @buf: buffer to use for indirect commands (NULL for direct commands) 1524 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1525 * @cd: pointer to command details structure 1526 */ 1527 static int 1528 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1529 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1530 { 1531 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1532 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1533 } 1534 1535 /** 1536 * ice_sbq_rw_reg - Fill Sideband Queue command 1537 * @hw: pointer to the HW struct 1538 * @in: message info to be filled in descriptor 1539 */ 1540 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1541 { 1542 struct ice_sbq_cmd_desc desc = {0}; 1543 struct ice_sbq_msg_req msg = {0}; 1544 u16 msg_len; 1545 int status; 1546 1547 msg_len = sizeof(msg); 1548 1549 msg.dest_dev = in->dest_dev; 1550 msg.opcode = in->opcode; 1551 msg.flags = ICE_SBQ_MSG_FLAGS; 1552 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1553 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1554 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1555 1556 if (in->opcode) 1557 msg.data = cpu_to_le32(in->data); 1558 else 1559 /* data read comes back in completion, so shorten the struct by 1560 * sizeof(msg.data) 1561 */ 1562 msg_len -= sizeof(msg.data); 1563 1564 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1565 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1566 desc.param0.cmd_len = cpu_to_le16(msg_len); 1567 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1568 if (!status && !in->opcode) 1569 in->data = le32_to_cpu 1570 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1571 return status; 1572 } 1573 1574 /* FW Admin Queue command wrappers */ 1575 1576 /* Software lock/mutex that is meant to be held while the Global Config Lock 1577 * in firmware is acquired by the software to prevent most (but not all) types 1578 * of AQ commands from being sent to FW 1579 */ 1580 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1581 1582 /** 1583 * ice_should_retry_sq_send_cmd 1584 * @opcode: AQ opcode 1585 * 1586 * Decide if we should retry the send command routine for the ATQ, depending 1587 * on the opcode. 1588 */ 1589 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1590 { 1591 switch (opcode) { 1592 case ice_aqc_opc_get_link_topo: 1593 case ice_aqc_opc_lldp_stop: 1594 case ice_aqc_opc_lldp_start: 1595 case ice_aqc_opc_lldp_filter_ctrl: 1596 return true; 1597 } 1598 1599 return false; 1600 } 1601 1602 /** 1603 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1604 * @hw: pointer to the HW struct 1605 * @cq: pointer to the specific Control queue 1606 * @desc: prefilled descriptor describing the command 1607 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1608 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1609 * @cd: pointer to command details structure 1610 * 1611 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1612 * Queue if the EBUSY AQ error is returned. 1613 */ 1614 static int 1615 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1616 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1617 struct ice_sq_cd *cd) 1618 { 1619 struct ice_aq_desc desc_cpy; 1620 bool is_cmd_for_retry; 1621 u8 idx = 0; 1622 u16 opcode; 1623 int status; 1624 1625 opcode = le16_to_cpu(desc->opcode); 1626 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1627 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1628 1629 if (is_cmd_for_retry) { 1630 /* All retryable cmds are direct, without buf. */ 1631 WARN_ON(buf); 1632 1633 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1634 } 1635 1636 do { 1637 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1638 1639 if (!is_cmd_for_retry || !status || 1640 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1641 break; 1642 1643 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1644 1645 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1646 1647 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1648 1649 return status; 1650 } 1651 1652 /** 1653 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1654 * @hw: pointer to the HW struct 1655 * @desc: descriptor describing the command 1656 * @buf: buffer to use for indirect commands (NULL for direct commands) 1657 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1658 * @cd: pointer to command details structure 1659 * 1660 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1661 */ 1662 int 1663 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1664 u16 buf_size, struct ice_sq_cd *cd) 1665 { 1666 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1667 bool lock_acquired = false; 1668 int status; 1669 1670 /* When a package download is in process (i.e. when the firmware's 1671 * Global Configuration Lock resource is held), only the Download 1672 * Package, Get Version, Get Package Info List, Upload Section, 1673 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1674 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1675 * Recipes to Profile Association, and Release Resource (with resource 1676 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1677 * must block until the package download completes and the Global Config 1678 * Lock is released. See also ice_acquire_global_cfg_lock(). 1679 */ 1680 switch (le16_to_cpu(desc->opcode)) { 1681 case ice_aqc_opc_download_pkg: 1682 case ice_aqc_opc_get_pkg_info_list: 1683 case ice_aqc_opc_get_ver: 1684 case ice_aqc_opc_upload_section: 1685 case ice_aqc_opc_update_pkg: 1686 case ice_aqc_opc_set_port_params: 1687 case ice_aqc_opc_get_vlan_mode_parameters: 1688 case ice_aqc_opc_set_vlan_mode_parameters: 1689 case ice_aqc_opc_add_recipe: 1690 case ice_aqc_opc_recipe_to_profile: 1691 case ice_aqc_opc_get_recipe: 1692 case ice_aqc_opc_get_recipe_to_profile: 1693 break; 1694 case ice_aqc_opc_release_res: 1695 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1696 break; 1697 fallthrough; 1698 default: 1699 mutex_lock(&ice_global_cfg_lock_sw); 1700 lock_acquired = true; 1701 break; 1702 } 1703 1704 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1705 if (lock_acquired) 1706 mutex_unlock(&ice_global_cfg_lock_sw); 1707 1708 return status; 1709 } 1710 1711 /** 1712 * ice_aq_get_fw_ver 1713 * @hw: pointer to the HW struct 1714 * @cd: pointer to command details structure or NULL 1715 * 1716 * Get the firmware version (0x0001) from the admin queue commands 1717 */ 1718 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1719 { 1720 struct ice_aqc_get_ver *resp; 1721 struct ice_aq_desc desc; 1722 int status; 1723 1724 resp = &desc.params.get_ver; 1725 1726 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1727 1728 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1729 1730 if (!status) { 1731 hw->fw_branch = resp->fw_branch; 1732 hw->fw_maj_ver = resp->fw_major; 1733 hw->fw_min_ver = resp->fw_minor; 1734 hw->fw_patch = resp->fw_patch; 1735 hw->fw_build = le32_to_cpu(resp->fw_build); 1736 hw->api_branch = resp->api_branch; 1737 hw->api_maj_ver = resp->api_major; 1738 hw->api_min_ver = resp->api_minor; 1739 hw->api_patch = resp->api_patch; 1740 } 1741 1742 return status; 1743 } 1744 1745 /** 1746 * ice_aq_send_driver_ver 1747 * @hw: pointer to the HW struct 1748 * @dv: driver's major, minor version 1749 * @cd: pointer to command details structure or NULL 1750 * 1751 * Send the driver version (0x0002) to the firmware 1752 */ 1753 int 1754 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1755 struct ice_sq_cd *cd) 1756 { 1757 struct ice_aqc_driver_ver *cmd; 1758 struct ice_aq_desc desc; 1759 u16 len; 1760 1761 cmd = &desc.params.driver_ver; 1762 1763 if (!dv) 1764 return -EINVAL; 1765 1766 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1767 1768 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1769 cmd->major_ver = dv->major_ver; 1770 cmd->minor_ver = dv->minor_ver; 1771 cmd->build_ver = dv->build_ver; 1772 cmd->subbuild_ver = dv->subbuild_ver; 1773 1774 len = 0; 1775 while (len < sizeof(dv->driver_string) && 1776 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1777 len++; 1778 1779 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1780 } 1781 1782 /** 1783 * ice_aq_q_shutdown 1784 * @hw: pointer to the HW struct 1785 * @unloading: is the driver unloading itself 1786 * 1787 * Tell the Firmware that we're shutting down the AdminQ and whether 1788 * or not the driver is unloading as well (0x0003). 1789 */ 1790 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1791 { 1792 struct ice_aqc_q_shutdown *cmd; 1793 struct ice_aq_desc desc; 1794 1795 cmd = &desc.params.q_shutdown; 1796 1797 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1798 1799 if (unloading) 1800 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1801 1802 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1803 } 1804 1805 /** 1806 * ice_aq_req_res 1807 * @hw: pointer to the HW struct 1808 * @res: resource ID 1809 * @access: access type 1810 * @sdp_number: resource number 1811 * @timeout: the maximum time in ms that the driver may hold the resource 1812 * @cd: pointer to command details structure or NULL 1813 * 1814 * Requests common resource using the admin queue commands (0x0008). 1815 * When attempting to acquire the Global Config Lock, the driver can 1816 * learn of three states: 1817 * 1) 0 - acquired lock, and can perform download package 1818 * 2) -EIO - did not get lock, driver should fail to load 1819 * 3) -EALREADY - did not get lock, but another driver has 1820 * successfully downloaded the package; the driver does 1821 * not have to download the package and can continue 1822 * loading 1823 * 1824 * Note that if the caller is in an acquire lock, perform action, release lock 1825 * phase of operation, it is possible that the FW may detect a timeout and issue 1826 * a CORER. In this case, the driver will receive a CORER interrupt and will 1827 * have to determine its cause. The calling thread that is handling this flow 1828 * will likely get an error propagated back to it indicating the Download 1829 * Package, Update Package or the Release Resource AQ commands timed out. 1830 */ 1831 static int 1832 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1833 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1834 struct ice_sq_cd *cd) 1835 { 1836 struct ice_aqc_req_res *cmd_resp; 1837 struct ice_aq_desc desc; 1838 int status; 1839 1840 cmd_resp = &desc.params.res_owner; 1841 1842 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1843 1844 cmd_resp->res_id = cpu_to_le16(res); 1845 cmd_resp->access_type = cpu_to_le16(access); 1846 cmd_resp->res_number = cpu_to_le32(sdp_number); 1847 cmd_resp->timeout = cpu_to_le32(*timeout); 1848 *timeout = 0; 1849 1850 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1851 1852 /* The completion specifies the maximum time in ms that the driver 1853 * may hold the resource in the Timeout field. 1854 */ 1855 1856 /* Global config lock response utilizes an additional status field. 1857 * 1858 * If the Global config lock resource is held by some other driver, the 1859 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1860 * and the timeout field indicates the maximum time the current owner 1861 * of the resource has to free it. 1862 */ 1863 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1864 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1865 *timeout = le32_to_cpu(cmd_resp->timeout); 1866 return 0; 1867 } else if (le16_to_cpu(cmd_resp->status) == 1868 ICE_AQ_RES_GLBL_IN_PROG) { 1869 *timeout = le32_to_cpu(cmd_resp->timeout); 1870 return -EIO; 1871 } else if (le16_to_cpu(cmd_resp->status) == 1872 ICE_AQ_RES_GLBL_DONE) { 1873 return -EALREADY; 1874 } 1875 1876 /* invalid FW response, force a timeout immediately */ 1877 *timeout = 0; 1878 return -EIO; 1879 } 1880 1881 /* If the resource is held by some other driver, the command completes 1882 * with a busy return value and the timeout field indicates the maximum 1883 * time the current owner of the resource has to free it. 1884 */ 1885 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1886 *timeout = le32_to_cpu(cmd_resp->timeout); 1887 1888 return status; 1889 } 1890 1891 /** 1892 * ice_aq_release_res 1893 * @hw: pointer to the HW struct 1894 * @res: resource ID 1895 * @sdp_number: resource number 1896 * @cd: pointer to command details structure or NULL 1897 * 1898 * release common resource using the admin queue commands (0x0009) 1899 */ 1900 static int 1901 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1902 struct ice_sq_cd *cd) 1903 { 1904 struct ice_aqc_req_res *cmd; 1905 struct ice_aq_desc desc; 1906 1907 cmd = &desc.params.res_owner; 1908 1909 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1910 1911 cmd->res_id = cpu_to_le16(res); 1912 cmd->res_number = cpu_to_le32(sdp_number); 1913 1914 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1915 } 1916 1917 /** 1918 * ice_acquire_res 1919 * @hw: pointer to the HW structure 1920 * @res: resource ID 1921 * @access: access type (read or write) 1922 * @timeout: timeout in milliseconds 1923 * 1924 * This function will attempt to acquire the ownership of a resource. 1925 */ 1926 int 1927 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1928 enum ice_aq_res_access_type access, u32 timeout) 1929 { 1930 #define ICE_RES_POLLING_DELAY_MS 10 1931 u32 delay = ICE_RES_POLLING_DELAY_MS; 1932 u32 time_left = timeout; 1933 int status; 1934 1935 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1936 1937 /* A return code of -EALREADY means that another driver has 1938 * previously acquired the resource and performed any necessary updates; 1939 * in this case the caller does not obtain the resource and has no 1940 * further work to do. 1941 */ 1942 if (status == -EALREADY) 1943 goto ice_acquire_res_exit; 1944 1945 if (status) 1946 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1947 1948 /* If necessary, poll until the current lock owner timeouts */ 1949 timeout = time_left; 1950 while (status && timeout && time_left) { 1951 mdelay(delay); 1952 timeout = (timeout > delay) ? timeout - delay : 0; 1953 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1954 1955 if (status == -EALREADY) 1956 /* lock free, but no work to do */ 1957 break; 1958 1959 if (!status) 1960 /* lock acquired */ 1961 break; 1962 } 1963 if (status && status != -EALREADY) 1964 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1965 1966 ice_acquire_res_exit: 1967 if (status == -EALREADY) { 1968 if (access == ICE_RES_WRITE) 1969 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1970 else 1971 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1972 } 1973 return status; 1974 } 1975 1976 /** 1977 * ice_release_res 1978 * @hw: pointer to the HW structure 1979 * @res: resource ID 1980 * 1981 * This function will release a resource using the proper Admin Command. 1982 */ 1983 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1984 { 1985 unsigned long timeout; 1986 int status; 1987 1988 /* there are some rare cases when trying to release the resource 1989 * results in an admin queue timeout, so handle them correctly 1990 */ 1991 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1992 do { 1993 status = ice_aq_release_res(hw, res, 0, NULL); 1994 if (status != -EIO) 1995 break; 1996 usleep_range(1000, 2000); 1997 } while (time_before(jiffies, timeout)); 1998 } 1999 2000 /** 2001 * ice_aq_alloc_free_res - command to allocate/free resources 2002 * @hw: pointer to the HW struct 2003 * @num_entries: number of resource entries in buffer 2004 * @buf: Indirect buffer to hold data parameters and response 2005 * @buf_size: size of buffer for indirect commands 2006 * @opc: pass in the command opcode 2007 * @cd: pointer to command details structure or NULL 2008 * 2009 * Helper function to allocate/free resources using the admin queue commands 2010 */ 2011 int 2012 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 2013 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2014 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2015 { 2016 struct ice_aqc_alloc_free_res_cmd *cmd; 2017 struct ice_aq_desc desc; 2018 2019 cmd = &desc.params.sw_res_ctrl; 2020 2021 if (!buf) 2022 return -EINVAL; 2023 2024 if (buf_size < flex_array_size(buf, elem, num_entries)) 2025 return -EINVAL; 2026 2027 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2028 2029 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2030 2031 cmd->num_entries = cpu_to_le16(num_entries); 2032 2033 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2034 } 2035 2036 /** 2037 * ice_alloc_hw_res - allocate resource 2038 * @hw: pointer to the HW struct 2039 * @type: type of resource 2040 * @num: number of resources to allocate 2041 * @btm: allocate from bottom 2042 * @res: pointer to array that will receive the resources 2043 */ 2044 int 2045 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2046 { 2047 struct ice_aqc_alloc_free_res_elem *buf; 2048 u16 buf_len; 2049 int status; 2050 2051 buf_len = struct_size(buf, elem, num); 2052 buf = kzalloc(buf_len, GFP_KERNEL); 2053 if (!buf) 2054 return -ENOMEM; 2055 2056 /* Prepare buffer to allocate resource. */ 2057 buf->num_elems = cpu_to_le16(num); 2058 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2059 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2060 if (btm) 2061 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2062 2063 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2064 ice_aqc_opc_alloc_res, NULL); 2065 if (status) 2066 goto ice_alloc_res_exit; 2067 2068 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2069 2070 ice_alloc_res_exit: 2071 kfree(buf); 2072 return status; 2073 } 2074 2075 /** 2076 * ice_free_hw_res - free allocated HW resource 2077 * @hw: pointer to the HW struct 2078 * @type: type of resource to free 2079 * @num: number of resources 2080 * @res: pointer to array that contains the resources to free 2081 */ 2082 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2083 { 2084 struct ice_aqc_alloc_free_res_elem *buf; 2085 u16 buf_len; 2086 int status; 2087 2088 buf_len = struct_size(buf, elem, num); 2089 buf = kzalloc(buf_len, GFP_KERNEL); 2090 if (!buf) 2091 return -ENOMEM; 2092 2093 /* Prepare buffer to free resource. */ 2094 buf->num_elems = cpu_to_le16(num); 2095 buf->res_type = cpu_to_le16(type); 2096 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2097 2098 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 2099 ice_aqc_opc_free_res, NULL); 2100 if (status) 2101 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2102 2103 kfree(buf); 2104 return status; 2105 } 2106 2107 /** 2108 * ice_get_num_per_func - determine number of resources per PF 2109 * @hw: pointer to the HW structure 2110 * @max: value to be evenly split between each PF 2111 * 2112 * Determine the number of valid functions by going through the bitmap returned 2113 * from parsing capabilities and use this to calculate the number of resources 2114 * per PF based on the max value passed in. 2115 */ 2116 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2117 { 2118 u8 funcs; 2119 2120 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2121 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2122 ICE_CAPS_VALID_FUNCS_M); 2123 2124 if (!funcs) 2125 return 0; 2126 2127 return max / funcs; 2128 } 2129 2130 /** 2131 * ice_parse_common_caps - parse common device/function capabilities 2132 * @hw: pointer to the HW struct 2133 * @caps: pointer to common capabilities structure 2134 * @elem: the capability element to parse 2135 * @prefix: message prefix for tracing capabilities 2136 * 2137 * Given a capability element, extract relevant details into the common 2138 * capability structure. 2139 * 2140 * Returns: true if the capability matches one of the common capability ids, 2141 * false otherwise. 2142 */ 2143 static bool 2144 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2145 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2146 { 2147 u32 logical_id = le32_to_cpu(elem->logical_id); 2148 u32 phys_id = le32_to_cpu(elem->phys_id); 2149 u32 number = le32_to_cpu(elem->number); 2150 u16 cap = le16_to_cpu(elem->cap); 2151 bool found = true; 2152 2153 switch (cap) { 2154 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2155 caps->valid_functions = number; 2156 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2157 caps->valid_functions); 2158 break; 2159 case ICE_AQC_CAPS_SRIOV: 2160 caps->sr_iov_1_1 = (number == 1); 2161 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2162 caps->sr_iov_1_1); 2163 break; 2164 case ICE_AQC_CAPS_DCB: 2165 caps->dcb = (number == 1); 2166 caps->active_tc_bitmap = logical_id; 2167 caps->maxtc = phys_id; 2168 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2169 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2170 caps->active_tc_bitmap); 2171 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2172 break; 2173 case ICE_AQC_CAPS_RSS: 2174 caps->rss_table_size = number; 2175 caps->rss_table_entry_width = logical_id; 2176 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2177 caps->rss_table_size); 2178 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2179 caps->rss_table_entry_width); 2180 break; 2181 case ICE_AQC_CAPS_RXQS: 2182 caps->num_rxq = number; 2183 caps->rxq_first_id = phys_id; 2184 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2185 caps->num_rxq); 2186 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2187 caps->rxq_first_id); 2188 break; 2189 case ICE_AQC_CAPS_TXQS: 2190 caps->num_txq = number; 2191 caps->txq_first_id = phys_id; 2192 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2193 caps->num_txq); 2194 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2195 caps->txq_first_id); 2196 break; 2197 case ICE_AQC_CAPS_MSIX: 2198 caps->num_msix_vectors = number; 2199 caps->msix_vector_first_id = phys_id; 2200 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2201 caps->num_msix_vectors); 2202 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2203 caps->msix_vector_first_id); 2204 break; 2205 case ICE_AQC_CAPS_PENDING_NVM_VER: 2206 caps->nvm_update_pending_nvm = true; 2207 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2208 break; 2209 case ICE_AQC_CAPS_PENDING_OROM_VER: 2210 caps->nvm_update_pending_orom = true; 2211 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2212 break; 2213 case ICE_AQC_CAPS_PENDING_NET_VER: 2214 caps->nvm_update_pending_netlist = true; 2215 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2216 break; 2217 case ICE_AQC_CAPS_NVM_MGMT: 2218 caps->nvm_unified_update = 2219 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2220 true : false; 2221 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2222 caps->nvm_unified_update); 2223 break; 2224 case ICE_AQC_CAPS_RDMA: 2225 caps->rdma = (number == 1); 2226 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2227 break; 2228 case ICE_AQC_CAPS_MAX_MTU: 2229 caps->max_mtu = number; 2230 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2231 prefix, caps->max_mtu); 2232 break; 2233 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2234 caps->pcie_reset_avoidance = (number > 0); 2235 ice_debug(hw, ICE_DBG_INIT, 2236 "%s: pcie_reset_avoidance = %d\n", prefix, 2237 caps->pcie_reset_avoidance); 2238 break; 2239 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2240 caps->reset_restrict_support = (number == 1); 2241 ice_debug(hw, ICE_DBG_INIT, 2242 "%s: reset_restrict_support = %d\n", prefix, 2243 caps->reset_restrict_support); 2244 break; 2245 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2246 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2247 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2248 prefix, caps->roce_lag); 2249 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2250 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2251 prefix, caps->sriov_lag); 2252 break; 2253 default: 2254 /* Not one of the recognized common capabilities */ 2255 found = false; 2256 } 2257 2258 return found; 2259 } 2260 2261 /** 2262 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2263 * @hw: pointer to the HW structure 2264 * @caps: pointer to capabilities structure to fix 2265 * 2266 * Re-calculate the capabilities that are dependent on the number of physical 2267 * ports; i.e. some features are not supported or function differently on 2268 * devices with more than 4 ports. 2269 */ 2270 static void 2271 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2272 { 2273 /* This assumes device capabilities are always scanned before function 2274 * capabilities during the initialization flow. 2275 */ 2276 if (hw->dev_caps.num_funcs > 4) { 2277 /* Max 4 TCs per port */ 2278 caps->maxtc = 4; 2279 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2280 caps->maxtc); 2281 if (caps->rdma) { 2282 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2283 caps->rdma = 0; 2284 } 2285 2286 /* print message only when processing device capabilities 2287 * during initialization. 2288 */ 2289 if (caps == &hw->dev_caps.common_cap) 2290 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2291 } 2292 } 2293 2294 /** 2295 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2296 * @hw: pointer to the HW struct 2297 * @func_p: pointer to function capabilities structure 2298 * @cap: pointer to the capability element to parse 2299 * 2300 * Extract function capabilities for ICE_AQC_CAPS_VF. 2301 */ 2302 static void 2303 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2304 struct ice_aqc_list_caps_elem *cap) 2305 { 2306 u32 logical_id = le32_to_cpu(cap->logical_id); 2307 u32 number = le32_to_cpu(cap->number); 2308 2309 func_p->num_allocd_vfs = number; 2310 func_p->vf_base_id = logical_id; 2311 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2312 func_p->num_allocd_vfs); 2313 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2314 func_p->vf_base_id); 2315 } 2316 2317 /** 2318 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2319 * @hw: pointer to the HW struct 2320 * @func_p: pointer to function capabilities structure 2321 * @cap: pointer to the capability element to parse 2322 * 2323 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2324 */ 2325 static void 2326 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2327 struct ice_aqc_list_caps_elem *cap) 2328 { 2329 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2330 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2331 le32_to_cpu(cap->number)); 2332 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2333 func_p->guar_num_vsi); 2334 } 2335 2336 /** 2337 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2338 * @hw: pointer to the HW struct 2339 * @func_p: pointer to function capabilities structure 2340 * @cap: pointer to the capability element to parse 2341 * 2342 * Extract function capabilities for ICE_AQC_CAPS_1588. 2343 */ 2344 static void 2345 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2346 struct ice_aqc_list_caps_elem *cap) 2347 { 2348 struct ice_ts_func_info *info = &func_p->ts_func_info; 2349 u32 number = le32_to_cpu(cap->number); 2350 2351 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2352 func_p->common_cap.ieee_1588 = info->ena; 2353 2354 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2355 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2356 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2357 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2358 2359 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; 2360 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2361 2362 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2363 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2364 } else { 2365 /* Unknown clock frequency, so assume a (probably incorrect) 2366 * default to avoid out-of-bounds look ups of frequency 2367 * related information. 2368 */ 2369 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2370 info->clk_freq); 2371 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2372 } 2373 2374 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2375 func_p->common_cap.ieee_1588); 2376 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2377 info->src_tmr_owned); 2378 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2379 info->tmr_ena); 2380 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2381 info->tmr_index_owned); 2382 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2383 info->tmr_index_assoc); 2384 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2385 info->clk_freq); 2386 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2387 info->clk_src); 2388 } 2389 2390 /** 2391 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2392 * @hw: pointer to the HW struct 2393 * @func_p: pointer to function capabilities structure 2394 * 2395 * Extract function capabilities for ICE_AQC_CAPS_FD. 2396 */ 2397 static void 2398 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2399 { 2400 u32 reg_val, val; 2401 2402 reg_val = rd32(hw, GLQF_FD_SIZE); 2403 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 2404 GLQF_FD_SIZE_FD_GSIZE_S; 2405 func_p->fd_fltr_guar = 2406 ice_get_num_per_func(hw, val); 2407 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 2408 GLQF_FD_SIZE_FD_BSIZE_S; 2409 func_p->fd_fltr_best_effort = val; 2410 2411 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2412 func_p->fd_fltr_guar); 2413 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2414 func_p->fd_fltr_best_effort); 2415 } 2416 2417 /** 2418 * ice_parse_func_caps - Parse function capabilities 2419 * @hw: pointer to the HW struct 2420 * @func_p: pointer to function capabilities structure 2421 * @buf: buffer containing the function capability records 2422 * @cap_count: the number of capabilities 2423 * 2424 * Helper function to parse function (0x000A) capabilities list. For 2425 * capabilities shared between device and function, this relies on 2426 * ice_parse_common_caps. 2427 * 2428 * Loop through the list of provided capabilities and extract the relevant 2429 * data into the function capabilities structured. 2430 */ 2431 static void 2432 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2433 void *buf, u32 cap_count) 2434 { 2435 struct ice_aqc_list_caps_elem *cap_resp; 2436 u32 i; 2437 2438 cap_resp = buf; 2439 2440 memset(func_p, 0, sizeof(*func_p)); 2441 2442 for (i = 0; i < cap_count; i++) { 2443 u16 cap = le16_to_cpu(cap_resp[i].cap); 2444 bool found; 2445 2446 found = ice_parse_common_caps(hw, &func_p->common_cap, 2447 &cap_resp[i], "func caps"); 2448 2449 switch (cap) { 2450 case ICE_AQC_CAPS_VF: 2451 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2452 break; 2453 case ICE_AQC_CAPS_VSI: 2454 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2455 break; 2456 case ICE_AQC_CAPS_1588: 2457 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2458 break; 2459 case ICE_AQC_CAPS_FD: 2460 ice_parse_fdir_func_caps(hw, func_p); 2461 break; 2462 default: 2463 /* Don't list common capabilities as unknown */ 2464 if (!found) 2465 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2466 i, cap); 2467 break; 2468 } 2469 } 2470 2471 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2472 } 2473 2474 /** 2475 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2476 * @hw: pointer to the HW struct 2477 * @dev_p: pointer to device capabilities structure 2478 * @cap: capability element to parse 2479 * 2480 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2481 */ 2482 static void 2483 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2484 struct ice_aqc_list_caps_elem *cap) 2485 { 2486 u32 number = le32_to_cpu(cap->number); 2487 2488 dev_p->num_funcs = hweight32(number); 2489 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2490 dev_p->num_funcs); 2491 } 2492 2493 /** 2494 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2495 * @hw: pointer to the HW struct 2496 * @dev_p: pointer to device capabilities structure 2497 * @cap: capability element to parse 2498 * 2499 * Parse ICE_AQC_CAPS_VF for device capabilities. 2500 */ 2501 static void 2502 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2503 struct ice_aqc_list_caps_elem *cap) 2504 { 2505 u32 number = le32_to_cpu(cap->number); 2506 2507 dev_p->num_vfs_exposed = number; 2508 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2509 dev_p->num_vfs_exposed); 2510 } 2511 2512 /** 2513 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2514 * @hw: pointer to the HW struct 2515 * @dev_p: pointer to device capabilities structure 2516 * @cap: capability element to parse 2517 * 2518 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2519 */ 2520 static void 2521 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2522 struct ice_aqc_list_caps_elem *cap) 2523 { 2524 u32 number = le32_to_cpu(cap->number); 2525 2526 dev_p->num_vsi_allocd_to_host = number; 2527 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2528 dev_p->num_vsi_allocd_to_host); 2529 } 2530 2531 /** 2532 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2533 * @hw: pointer to the HW struct 2534 * @dev_p: pointer to device capabilities structure 2535 * @cap: capability element to parse 2536 * 2537 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2538 */ 2539 static void 2540 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2541 struct ice_aqc_list_caps_elem *cap) 2542 { 2543 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2544 u32 logical_id = le32_to_cpu(cap->logical_id); 2545 u32 phys_id = le32_to_cpu(cap->phys_id); 2546 u32 number = le32_to_cpu(cap->number); 2547 2548 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2549 dev_p->common_cap.ieee_1588 = info->ena; 2550 2551 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2552 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2553 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2554 2555 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; 2556 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2557 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2558 2559 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2560 2561 info->ena_ports = logical_id; 2562 info->tmr_own_map = phys_id; 2563 2564 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2565 dev_p->common_cap.ieee_1588); 2566 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2567 info->tmr0_owner); 2568 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2569 info->tmr0_owned); 2570 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2571 info->tmr0_ena); 2572 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2573 info->tmr1_owner); 2574 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2575 info->tmr1_owned); 2576 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2577 info->tmr1_ena); 2578 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2579 info->ts_ll_read); 2580 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2581 info->ena_ports); 2582 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2583 info->tmr_own_map); 2584 } 2585 2586 /** 2587 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2588 * @hw: pointer to the HW struct 2589 * @dev_p: pointer to device capabilities structure 2590 * @cap: capability element to parse 2591 * 2592 * Parse ICE_AQC_CAPS_FD for device capabilities. 2593 */ 2594 static void 2595 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2596 struct ice_aqc_list_caps_elem *cap) 2597 { 2598 u32 number = le32_to_cpu(cap->number); 2599 2600 dev_p->num_flow_director_fltr = number; 2601 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2602 dev_p->num_flow_director_fltr); 2603 } 2604 2605 /** 2606 * ice_parse_dev_caps - Parse device capabilities 2607 * @hw: pointer to the HW struct 2608 * @dev_p: pointer to device capabilities structure 2609 * @buf: buffer containing the device capability records 2610 * @cap_count: the number of capabilities 2611 * 2612 * Helper device to parse device (0x000B) capabilities list. For 2613 * capabilities shared between device and function, this relies on 2614 * ice_parse_common_caps. 2615 * 2616 * Loop through the list of provided capabilities and extract the relevant 2617 * data into the device capabilities structured. 2618 */ 2619 static void 2620 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2621 void *buf, u32 cap_count) 2622 { 2623 struct ice_aqc_list_caps_elem *cap_resp; 2624 u32 i; 2625 2626 cap_resp = buf; 2627 2628 memset(dev_p, 0, sizeof(*dev_p)); 2629 2630 for (i = 0; i < cap_count; i++) { 2631 u16 cap = le16_to_cpu(cap_resp[i].cap); 2632 bool found; 2633 2634 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2635 &cap_resp[i], "dev caps"); 2636 2637 switch (cap) { 2638 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2639 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2640 break; 2641 case ICE_AQC_CAPS_VF: 2642 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2643 break; 2644 case ICE_AQC_CAPS_VSI: 2645 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2646 break; 2647 case ICE_AQC_CAPS_1588: 2648 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2649 break; 2650 case ICE_AQC_CAPS_FD: 2651 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2652 break; 2653 default: 2654 /* Don't list common capabilities as unknown */ 2655 if (!found) 2656 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2657 i, cap); 2658 break; 2659 } 2660 } 2661 2662 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2663 } 2664 2665 /** 2666 * ice_aq_get_netlist_node 2667 * @hw: pointer to the hw struct 2668 * @cmd: get_link_topo AQ structure 2669 * @node_part_number: output node part number if node found 2670 * @node_handle: output node handle parameter if node found 2671 */ 2672 static int 2673 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 2674 u8 *node_part_number, u16 *node_handle) 2675 { 2676 struct ice_aq_desc desc; 2677 2678 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 2679 desc.params.get_link_topo = *cmd; 2680 2681 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 2682 return -EIO; 2683 2684 if (node_handle) 2685 *node_handle = le16_to_cpu(desc.params.get_link_topo.addr.handle); 2686 if (node_part_number) 2687 *node_part_number = desc.params.get_link_topo.node_part_num; 2688 2689 return 0; 2690 } 2691 2692 /** 2693 * ice_is_pf_c827 - check if pf contains c827 phy 2694 * @hw: pointer to the hw struct 2695 */ 2696 bool ice_is_pf_c827(struct ice_hw *hw) 2697 { 2698 struct ice_aqc_get_link_topo cmd = {}; 2699 u8 node_part_number; 2700 u16 node_handle; 2701 int status; 2702 2703 if (hw->mac_type != ICE_MAC_E810) 2704 return false; 2705 2706 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2707 return true; 2708 2709 cmd.addr.topo_params.node_type_ctx = 2710 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2711 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2712 cmd.addr.topo_params.index = 0; 2713 2714 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2715 &node_handle); 2716 2717 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2718 return false; 2719 2720 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2721 return true; 2722 2723 return false; 2724 } 2725 2726 /** 2727 * ice_aq_list_caps - query function/device capabilities 2728 * @hw: pointer to the HW struct 2729 * @buf: a buffer to hold the capabilities 2730 * @buf_size: size of the buffer 2731 * @cap_count: if not NULL, set to the number of capabilities reported 2732 * @opc: capabilities type to discover, device or function 2733 * @cd: pointer to command details structure or NULL 2734 * 2735 * Get the function (0x000A) or device (0x000B) capabilities description from 2736 * firmware and store it in the buffer. 2737 * 2738 * If the cap_count pointer is not NULL, then it is set to the number of 2739 * capabilities firmware will report. Note that if the buffer size is too 2740 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2741 * cap_count will still be updated in this case. It is recommended that the 2742 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2743 * firmware could return) to avoid this. 2744 */ 2745 int 2746 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2747 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2748 { 2749 struct ice_aqc_list_caps *cmd; 2750 struct ice_aq_desc desc; 2751 int status; 2752 2753 cmd = &desc.params.get_cap; 2754 2755 if (opc != ice_aqc_opc_list_func_caps && 2756 opc != ice_aqc_opc_list_dev_caps) 2757 return -EINVAL; 2758 2759 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2760 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2761 2762 if (cap_count) 2763 *cap_count = le32_to_cpu(cmd->count); 2764 2765 return status; 2766 } 2767 2768 /** 2769 * ice_discover_dev_caps - Read and extract device capabilities 2770 * @hw: pointer to the hardware structure 2771 * @dev_caps: pointer to device capabilities structure 2772 * 2773 * Read the device capabilities and extract them into the dev_caps structure 2774 * for later use. 2775 */ 2776 int 2777 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2778 { 2779 u32 cap_count = 0; 2780 void *cbuf; 2781 int status; 2782 2783 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2784 if (!cbuf) 2785 return -ENOMEM; 2786 2787 /* Although the driver doesn't know the number of capabilities the 2788 * device will return, we can simply send a 4KB buffer, the maximum 2789 * possible size that firmware can return. 2790 */ 2791 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2792 2793 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2794 ice_aqc_opc_list_dev_caps, NULL); 2795 if (!status) 2796 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2797 kfree(cbuf); 2798 2799 return status; 2800 } 2801 2802 /** 2803 * ice_discover_func_caps - Read and extract function capabilities 2804 * @hw: pointer to the hardware structure 2805 * @func_caps: pointer to function capabilities structure 2806 * 2807 * Read the function capabilities and extract them into the func_caps structure 2808 * for later use. 2809 */ 2810 static int 2811 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2812 { 2813 u32 cap_count = 0; 2814 void *cbuf; 2815 int status; 2816 2817 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2818 if (!cbuf) 2819 return -ENOMEM; 2820 2821 /* Although the driver doesn't know the number of capabilities the 2822 * device will return, we can simply send a 4KB buffer, the maximum 2823 * possible size that firmware can return. 2824 */ 2825 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2826 2827 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2828 ice_aqc_opc_list_func_caps, NULL); 2829 if (!status) 2830 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2831 kfree(cbuf); 2832 2833 return status; 2834 } 2835 2836 /** 2837 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2838 * @hw: pointer to the hardware structure 2839 */ 2840 void ice_set_safe_mode_caps(struct ice_hw *hw) 2841 { 2842 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2843 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2844 struct ice_hw_common_caps cached_caps; 2845 u32 num_funcs; 2846 2847 /* cache some func_caps values that should be restored after memset */ 2848 cached_caps = func_caps->common_cap; 2849 2850 /* unset func capabilities */ 2851 memset(func_caps, 0, sizeof(*func_caps)); 2852 2853 #define ICE_RESTORE_FUNC_CAP(name) \ 2854 func_caps->common_cap.name = cached_caps.name 2855 2856 /* restore cached values */ 2857 ICE_RESTORE_FUNC_CAP(valid_functions); 2858 ICE_RESTORE_FUNC_CAP(txq_first_id); 2859 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2860 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2861 ICE_RESTORE_FUNC_CAP(max_mtu); 2862 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2863 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2864 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2865 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2866 2867 /* one Tx and one Rx queue in safe mode */ 2868 func_caps->common_cap.num_rxq = 1; 2869 func_caps->common_cap.num_txq = 1; 2870 2871 /* two MSIX vectors, one for traffic and one for misc causes */ 2872 func_caps->common_cap.num_msix_vectors = 2; 2873 func_caps->guar_num_vsi = 1; 2874 2875 /* cache some dev_caps values that should be restored after memset */ 2876 cached_caps = dev_caps->common_cap; 2877 num_funcs = dev_caps->num_funcs; 2878 2879 /* unset dev capabilities */ 2880 memset(dev_caps, 0, sizeof(*dev_caps)); 2881 2882 #define ICE_RESTORE_DEV_CAP(name) \ 2883 dev_caps->common_cap.name = cached_caps.name 2884 2885 /* restore cached values */ 2886 ICE_RESTORE_DEV_CAP(valid_functions); 2887 ICE_RESTORE_DEV_CAP(txq_first_id); 2888 ICE_RESTORE_DEV_CAP(rxq_first_id); 2889 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2890 ICE_RESTORE_DEV_CAP(max_mtu); 2891 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2892 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2893 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2894 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2895 dev_caps->num_funcs = num_funcs; 2896 2897 /* one Tx and one Rx queue per function in safe mode */ 2898 dev_caps->common_cap.num_rxq = num_funcs; 2899 dev_caps->common_cap.num_txq = num_funcs; 2900 2901 /* two MSIX vectors per function */ 2902 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2903 } 2904 2905 /** 2906 * ice_get_caps - get info about the HW 2907 * @hw: pointer to the hardware structure 2908 */ 2909 int ice_get_caps(struct ice_hw *hw) 2910 { 2911 int status; 2912 2913 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2914 if (status) 2915 return status; 2916 2917 return ice_discover_func_caps(hw, &hw->func_caps); 2918 } 2919 2920 /** 2921 * ice_aq_manage_mac_write - manage MAC address write command 2922 * @hw: pointer to the HW struct 2923 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2924 * @flags: flags to control write behavior 2925 * @cd: pointer to command details structure or NULL 2926 * 2927 * This function is used to write MAC address to the NVM (0x0108). 2928 */ 2929 int 2930 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2931 struct ice_sq_cd *cd) 2932 { 2933 struct ice_aqc_manage_mac_write *cmd; 2934 struct ice_aq_desc desc; 2935 2936 cmd = &desc.params.mac_write; 2937 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2938 2939 cmd->flags = flags; 2940 ether_addr_copy(cmd->mac_addr, mac_addr); 2941 2942 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2943 } 2944 2945 /** 2946 * ice_aq_clear_pxe_mode 2947 * @hw: pointer to the HW struct 2948 * 2949 * Tell the firmware that the driver is taking over from PXE (0x0110). 2950 */ 2951 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 2952 { 2953 struct ice_aq_desc desc; 2954 2955 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2956 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2957 2958 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2959 } 2960 2961 /** 2962 * ice_clear_pxe_mode - clear pxe operations mode 2963 * @hw: pointer to the HW struct 2964 * 2965 * Make sure all PXE mode settings are cleared, including things 2966 * like descriptor fetch/write-back mode. 2967 */ 2968 void ice_clear_pxe_mode(struct ice_hw *hw) 2969 { 2970 if (ice_check_sq_alive(hw, &hw->adminq)) 2971 ice_aq_clear_pxe_mode(hw); 2972 } 2973 2974 /** 2975 * ice_aq_set_port_params - set physical port parameters. 2976 * @pi: pointer to the port info struct 2977 * @double_vlan: if set double VLAN is enabled 2978 * @cd: pointer to command details structure or NULL 2979 * 2980 * Set Physical port parameters (0x0203) 2981 */ 2982 int 2983 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 2984 struct ice_sq_cd *cd) 2985 2986 { 2987 struct ice_aqc_set_port_params *cmd; 2988 struct ice_hw *hw = pi->hw; 2989 struct ice_aq_desc desc; 2990 u16 cmd_flags = 0; 2991 2992 cmd = &desc.params.set_port_params; 2993 2994 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 2995 if (double_vlan) 2996 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 2997 cmd->cmd_flags = cpu_to_le16(cmd_flags); 2998 2999 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3000 } 3001 3002 /** 3003 * ice_is_100m_speed_supported 3004 * @hw: pointer to the HW struct 3005 * 3006 * returns true if 100M speeds are supported by the device, 3007 * false otherwise. 3008 */ 3009 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3010 { 3011 switch (hw->device_id) { 3012 case ICE_DEV_ID_E822C_SGMII: 3013 case ICE_DEV_ID_E822L_SGMII: 3014 case ICE_DEV_ID_E823L_1GBE: 3015 case ICE_DEV_ID_E823C_SGMII: 3016 return true; 3017 default: 3018 return false; 3019 } 3020 } 3021 3022 /** 3023 * ice_get_link_speed_based_on_phy_type - returns link speed 3024 * @phy_type_low: lower part of phy_type 3025 * @phy_type_high: higher part of phy_type 3026 * 3027 * This helper function will convert an entry in PHY type structure 3028 * [phy_type_low, phy_type_high] to its corresponding link speed. 3029 * Note: In the structure of [phy_type_low, phy_type_high], there should 3030 * be one bit set, as this function will convert one PHY type to its 3031 * speed. 3032 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3033 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3034 */ 3035 static u16 3036 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3037 { 3038 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3039 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3040 3041 switch (phy_type_low) { 3042 case ICE_PHY_TYPE_LOW_100BASE_TX: 3043 case ICE_PHY_TYPE_LOW_100M_SGMII: 3044 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3045 break; 3046 case ICE_PHY_TYPE_LOW_1000BASE_T: 3047 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3048 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3049 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3050 case ICE_PHY_TYPE_LOW_1G_SGMII: 3051 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3052 break; 3053 case ICE_PHY_TYPE_LOW_2500BASE_T: 3054 case ICE_PHY_TYPE_LOW_2500BASE_X: 3055 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3056 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3057 break; 3058 case ICE_PHY_TYPE_LOW_5GBASE_T: 3059 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3060 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3061 break; 3062 case ICE_PHY_TYPE_LOW_10GBASE_T: 3063 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3064 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3065 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3066 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3067 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3068 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3069 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3070 break; 3071 case ICE_PHY_TYPE_LOW_25GBASE_T: 3072 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3073 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3074 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3075 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3076 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3077 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3078 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3079 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3080 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3081 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3082 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3083 break; 3084 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3085 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3086 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3087 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3088 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3089 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3090 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3091 break; 3092 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3093 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3094 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3095 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3096 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3097 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3098 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3099 case ICE_PHY_TYPE_LOW_50G_AUI2: 3100 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3101 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3102 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3103 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3104 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3105 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3106 case ICE_PHY_TYPE_LOW_50G_AUI1: 3107 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3108 break; 3109 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3110 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3111 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3112 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3113 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3114 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3115 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3116 case ICE_PHY_TYPE_LOW_100G_AUI4: 3117 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3118 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3119 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3120 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3121 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3122 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3123 break; 3124 default: 3125 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3126 break; 3127 } 3128 3129 switch (phy_type_high) { 3130 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3131 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3132 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3133 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3134 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3135 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3136 break; 3137 default: 3138 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3139 break; 3140 } 3141 3142 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3143 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3144 return ICE_AQ_LINK_SPEED_UNKNOWN; 3145 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3146 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3147 return ICE_AQ_LINK_SPEED_UNKNOWN; 3148 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3149 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3150 return speed_phy_type_low; 3151 else 3152 return speed_phy_type_high; 3153 } 3154 3155 /** 3156 * ice_update_phy_type 3157 * @phy_type_low: pointer to the lower part of phy_type 3158 * @phy_type_high: pointer to the higher part of phy_type 3159 * @link_speeds_bitmap: targeted link speeds bitmap 3160 * 3161 * Note: For the link_speeds_bitmap structure, you can check it at 3162 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3163 * link_speeds_bitmap include multiple speeds. 3164 * 3165 * Each entry in this [phy_type_low, phy_type_high] structure will 3166 * present a certain link speed. This helper function will turn on bits 3167 * in [phy_type_low, phy_type_high] structure based on the value of 3168 * link_speeds_bitmap input parameter. 3169 */ 3170 void 3171 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3172 u16 link_speeds_bitmap) 3173 { 3174 u64 pt_high; 3175 u64 pt_low; 3176 int index; 3177 u16 speed; 3178 3179 /* We first check with low part of phy_type */ 3180 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3181 pt_low = BIT_ULL(index); 3182 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3183 3184 if (link_speeds_bitmap & speed) 3185 *phy_type_low |= BIT_ULL(index); 3186 } 3187 3188 /* We then check with high part of phy_type */ 3189 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3190 pt_high = BIT_ULL(index); 3191 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3192 3193 if (link_speeds_bitmap & speed) 3194 *phy_type_high |= BIT_ULL(index); 3195 } 3196 } 3197 3198 /** 3199 * ice_aq_set_phy_cfg 3200 * @hw: pointer to the HW struct 3201 * @pi: port info structure of the interested logical port 3202 * @cfg: structure with PHY configuration data to be set 3203 * @cd: pointer to command details structure or NULL 3204 * 3205 * Set the various PHY configuration parameters supported on the Port. 3206 * One or more of the Set PHY config parameters may be ignored in an MFP 3207 * mode as the PF may not have the privilege to set some of the PHY Config 3208 * parameters. This status will be indicated by the command response (0x0601). 3209 */ 3210 int 3211 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3212 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3213 { 3214 struct ice_aq_desc desc; 3215 int status; 3216 3217 if (!cfg) 3218 return -EINVAL; 3219 3220 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3221 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3222 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3223 cfg->caps); 3224 3225 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3226 } 3227 3228 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3229 desc.params.set_phy.lport_num = pi->lport; 3230 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3231 3232 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3233 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3234 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3235 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3236 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3237 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3238 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3239 cfg->low_power_ctrl_an); 3240 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3241 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3242 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3243 cfg->link_fec_opt); 3244 3245 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3246 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3247 status = 0; 3248 3249 if (!status) 3250 pi->phy.curr_user_phy_cfg = *cfg; 3251 3252 return status; 3253 } 3254 3255 /** 3256 * ice_update_link_info - update status of the HW network link 3257 * @pi: port info structure of the interested logical port 3258 */ 3259 int ice_update_link_info(struct ice_port_info *pi) 3260 { 3261 struct ice_link_status *li; 3262 int status; 3263 3264 if (!pi) 3265 return -EINVAL; 3266 3267 li = &pi->phy.link_info; 3268 3269 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3270 if (status) 3271 return status; 3272 3273 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3274 struct ice_aqc_get_phy_caps_data *pcaps; 3275 struct ice_hw *hw; 3276 3277 hw = pi->hw; 3278 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 3279 GFP_KERNEL); 3280 if (!pcaps) 3281 return -ENOMEM; 3282 3283 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3284 pcaps, NULL); 3285 3286 devm_kfree(ice_hw_to_dev(hw), pcaps); 3287 } 3288 3289 return status; 3290 } 3291 3292 /** 3293 * ice_cache_phy_user_req 3294 * @pi: port information structure 3295 * @cache_data: PHY logging data 3296 * @cache_mode: PHY logging mode 3297 * 3298 * Log the user request on (FC, FEC, SPEED) for later use. 3299 */ 3300 static void 3301 ice_cache_phy_user_req(struct ice_port_info *pi, 3302 struct ice_phy_cache_mode_data cache_data, 3303 enum ice_phy_cache_mode cache_mode) 3304 { 3305 if (!pi) 3306 return; 3307 3308 switch (cache_mode) { 3309 case ICE_FC_MODE: 3310 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3311 break; 3312 case ICE_SPEED_MODE: 3313 pi->phy.curr_user_speed_req = 3314 cache_data.data.curr_user_speed_req; 3315 break; 3316 case ICE_FEC_MODE: 3317 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3318 break; 3319 default: 3320 break; 3321 } 3322 } 3323 3324 /** 3325 * ice_caps_to_fc_mode 3326 * @caps: PHY capabilities 3327 * 3328 * Convert PHY FC capabilities to ice FC mode 3329 */ 3330 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3331 { 3332 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3333 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3334 return ICE_FC_FULL; 3335 3336 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3337 return ICE_FC_TX_PAUSE; 3338 3339 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3340 return ICE_FC_RX_PAUSE; 3341 3342 return ICE_FC_NONE; 3343 } 3344 3345 /** 3346 * ice_caps_to_fec_mode 3347 * @caps: PHY capabilities 3348 * @fec_options: Link FEC options 3349 * 3350 * Convert PHY FEC capabilities to ice FEC mode 3351 */ 3352 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3353 { 3354 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3355 return ICE_FEC_AUTO; 3356 3357 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3358 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3359 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3360 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3361 return ICE_FEC_BASER; 3362 3363 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3364 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3365 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3366 return ICE_FEC_RS; 3367 3368 return ICE_FEC_NONE; 3369 } 3370 3371 /** 3372 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3373 * @pi: port information structure 3374 * @cfg: PHY configuration data to set FC mode 3375 * @req_mode: FC mode to configure 3376 */ 3377 int 3378 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3379 enum ice_fc_mode req_mode) 3380 { 3381 struct ice_phy_cache_mode_data cache_data; 3382 u8 pause_mask = 0x0; 3383 3384 if (!pi || !cfg) 3385 return -EINVAL; 3386 3387 switch (req_mode) { 3388 case ICE_FC_FULL: 3389 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3390 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3391 break; 3392 case ICE_FC_RX_PAUSE: 3393 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3394 break; 3395 case ICE_FC_TX_PAUSE: 3396 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3397 break; 3398 default: 3399 break; 3400 } 3401 3402 /* clear the old pause settings */ 3403 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3404 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3405 3406 /* set the new capabilities */ 3407 cfg->caps |= pause_mask; 3408 3409 /* Cache user FC request */ 3410 cache_data.data.curr_user_fc_req = req_mode; 3411 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3412 3413 return 0; 3414 } 3415 3416 /** 3417 * ice_set_fc 3418 * @pi: port information structure 3419 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3420 * @ena_auto_link_update: enable automatic link update 3421 * 3422 * Set the requested flow control mode. 3423 */ 3424 int 3425 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3426 { 3427 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3428 struct ice_aqc_get_phy_caps_data *pcaps; 3429 struct ice_hw *hw; 3430 int status; 3431 3432 if (!pi || !aq_failures) 3433 return -EINVAL; 3434 3435 *aq_failures = 0; 3436 hw = pi->hw; 3437 3438 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3439 if (!pcaps) 3440 return -ENOMEM; 3441 3442 /* Get the current PHY config */ 3443 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3444 pcaps, NULL); 3445 if (status) { 3446 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3447 goto out; 3448 } 3449 3450 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3451 3452 /* Configure the set PHY data */ 3453 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3454 if (status) 3455 goto out; 3456 3457 /* If the capabilities have changed, then set the new config */ 3458 if (cfg.caps != pcaps->caps) { 3459 int retry_count, retry_max = 10; 3460 3461 /* Auto restart link so settings take effect */ 3462 if (ena_auto_link_update) 3463 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3464 3465 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3466 if (status) { 3467 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3468 goto out; 3469 } 3470 3471 /* Update the link info 3472 * It sometimes takes a really long time for link to 3473 * come back from the atomic reset. Thus, we wait a 3474 * little bit. 3475 */ 3476 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3477 status = ice_update_link_info(pi); 3478 3479 if (!status) 3480 break; 3481 3482 mdelay(100); 3483 } 3484 3485 if (status) 3486 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3487 } 3488 3489 out: 3490 devm_kfree(ice_hw_to_dev(hw), pcaps); 3491 return status; 3492 } 3493 3494 /** 3495 * ice_phy_caps_equals_cfg 3496 * @phy_caps: PHY capabilities 3497 * @phy_cfg: PHY configuration 3498 * 3499 * Helper function to determine if PHY capabilities matches PHY 3500 * configuration 3501 */ 3502 bool 3503 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3504 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3505 { 3506 u8 caps_mask, cfg_mask; 3507 3508 if (!phy_caps || !phy_cfg) 3509 return false; 3510 3511 /* These bits are not common between capabilities and configuration. 3512 * Do not use them to determine equality. 3513 */ 3514 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3515 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3516 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3517 3518 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3519 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3520 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3521 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3522 phy_caps->eee_cap != phy_cfg->eee_cap || 3523 phy_caps->eeer_value != phy_cfg->eeer_value || 3524 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3525 return false; 3526 3527 return true; 3528 } 3529 3530 /** 3531 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3532 * @pi: port information structure 3533 * @caps: PHY ability structure to copy date from 3534 * @cfg: PHY configuration structure to copy data to 3535 * 3536 * Helper function to copy AQC PHY get ability data to PHY set configuration 3537 * data structure 3538 */ 3539 void 3540 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3541 struct ice_aqc_get_phy_caps_data *caps, 3542 struct ice_aqc_set_phy_cfg_data *cfg) 3543 { 3544 if (!pi || !caps || !cfg) 3545 return; 3546 3547 memset(cfg, 0, sizeof(*cfg)); 3548 cfg->phy_type_low = caps->phy_type_low; 3549 cfg->phy_type_high = caps->phy_type_high; 3550 cfg->caps = caps->caps; 3551 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3552 cfg->eee_cap = caps->eee_cap; 3553 cfg->eeer_value = caps->eeer_value; 3554 cfg->link_fec_opt = caps->link_fec_options; 3555 cfg->module_compliance_enforcement = 3556 caps->module_compliance_enforcement; 3557 } 3558 3559 /** 3560 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3561 * @pi: port information structure 3562 * @cfg: PHY configuration data to set FEC mode 3563 * @fec: FEC mode to configure 3564 */ 3565 int 3566 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3567 enum ice_fec_mode fec) 3568 { 3569 struct ice_aqc_get_phy_caps_data *pcaps; 3570 struct ice_hw *hw; 3571 int status; 3572 3573 if (!pi || !cfg) 3574 return -EINVAL; 3575 3576 hw = pi->hw; 3577 3578 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3579 if (!pcaps) 3580 return -ENOMEM; 3581 3582 status = ice_aq_get_phy_caps(pi, false, 3583 (ice_fw_supports_report_dflt_cfg(hw) ? 3584 ICE_AQC_REPORT_DFLT_CFG : 3585 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3586 if (status) 3587 goto out; 3588 3589 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3590 cfg->link_fec_opt = pcaps->link_fec_options; 3591 3592 switch (fec) { 3593 case ICE_FEC_BASER: 3594 /* Clear RS bits, and AND BASE-R ability 3595 * bits and OR request bits. 3596 */ 3597 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3598 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3599 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3600 ICE_AQC_PHY_FEC_25G_KR_REQ; 3601 break; 3602 case ICE_FEC_RS: 3603 /* Clear BASE-R bits, and AND RS ability 3604 * bits and OR request bits. 3605 */ 3606 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3607 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3608 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3609 break; 3610 case ICE_FEC_NONE: 3611 /* Clear all FEC option bits. */ 3612 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3613 break; 3614 case ICE_FEC_AUTO: 3615 /* AND auto FEC bit, and all caps bits. */ 3616 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3617 cfg->link_fec_opt |= pcaps->link_fec_options; 3618 break; 3619 default: 3620 status = -EINVAL; 3621 break; 3622 } 3623 3624 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3625 !ice_fw_supports_report_dflt_cfg(hw)) { 3626 struct ice_link_default_override_tlv tlv = { 0 }; 3627 3628 status = ice_get_link_default_override(&tlv, pi); 3629 if (status) 3630 goto out; 3631 3632 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3633 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3634 cfg->link_fec_opt = tlv.fec_options; 3635 } 3636 3637 out: 3638 kfree(pcaps); 3639 3640 return status; 3641 } 3642 3643 /** 3644 * ice_get_link_status - get status of the HW network link 3645 * @pi: port information structure 3646 * @link_up: pointer to bool (true/false = linkup/linkdown) 3647 * 3648 * Variable link_up is true if link is up, false if link is down. 3649 * The variable link_up is invalid if status is non zero. As a 3650 * result of this call, link status reporting becomes enabled 3651 */ 3652 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3653 { 3654 struct ice_phy_info *phy_info; 3655 int status = 0; 3656 3657 if (!pi || !link_up) 3658 return -EINVAL; 3659 3660 phy_info = &pi->phy; 3661 3662 if (phy_info->get_link_info) { 3663 status = ice_update_link_info(pi); 3664 3665 if (status) 3666 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3667 status); 3668 } 3669 3670 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3671 3672 return status; 3673 } 3674 3675 /** 3676 * ice_aq_set_link_restart_an 3677 * @pi: pointer to the port information structure 3678 * @ena_link: if true: enable link, if false: disable link 3679 * @cd: pointer to command details structure or NULL 3680 * 3681 * Sets up the link and restarts the Auto-Negotiation over the link. 3682 */ 3683 int 3684 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3685 struct ice_sq_cd *cd) 3686 { 3687 struct ice_aqc_restart_an *cmd; 3688 struct ice_aq_desc desc; 3689 3690 cmd = &desc.params.restart_an; 3691 3692 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3693 3694 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3695 cmd->lport_num = pi->lport; 3696 if (ena_link) 3697 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3698 else 3699 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3700 3701 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3702 } 3703 3704 /** 3705 * ice_aq_set_event_mask 3706 * @hw: pointer to the HW struct 3707 * @port_num: port number of the physical function 3708 * @mask: event mask to be set 3709 * @cd: pointer to command details structure or NULL 3710 * 3711 * Set event mask (0x0613) 3712 */ 3713 int 3714 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3715 struct ice_sq_cd *cd) 3716 { 3717 struct ice_aqc_set_event_mask *cmd; 3718 struct ice_aq_desc desc; 3719 3720 cmd = &desc.params.set_event_mask; 3721 3722 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3723 3724 cmd->lport_num = port_num; 3725 3726 cmd->event_mask = cpu_to_le16(mask); 3727 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3728 } 3729 3730 /** 3731 * ice_aq_set_mac_loopback 3732 * @hw: pointer to the HW struct 3733 * @ena_lpbk: Enable or Disable loopback 3734 * @cd: pointer to command details structure or NULL 3735 * 3736 * Enable/disable loopback on a given port 3737 */ 3738 int 3739 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3740 { 3741 struct ice_aqc_set_mac_lb *cmd; 3742 struct ice_aq_desc desc; 3743 3744 cmd = &desc.params.set_mac_lb; 3745 3746 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3747 if (ena_lpbk) 3748 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3749 3750 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3751 } 3752 3753 /** 3754 * ice_aq_set_port_id_led 3755 * @pi: pointer to the port information 3756 * @is_orig_mode: is this LED set to original mode (by the net-list) 3757 * @cd: pointer to command details structure or NULL 3758 * 3759 * Set LED value for the given port (0x06e9) 3760 */ 3761 int 3762 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3763 struct ice_sq_cd *cd) 3764 { 3765 struct ice_aqc_set_port_id_led *cmd; 3766 struct ice_hw *hw = pi->hw; 3767 struct ice_aq_desc desc; 3768 3769 cmd = &desc.params.set_port_id_led; 3770 3771 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3772 3773 if (is_orig_mode) 3774 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3775 else 3776 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3777 3778 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3779 } 3780 3781 /** 3782 * ice_aq_get_port_options 3783 * @hw: pointer to the HW struct 3784 * @options: buffer for the resultant port options 3785 * @option_count: input - size of the buffer in port options structures, 3786 * output - number of returned port options 3787 * @lport: logical port to call the command with (optional) 3788 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3789 * when PF owns more than 1 port it must be true 3790 * @active_option_idx: index of active port option in returned buffer 3791 * @active_option_valid: active option in returned buffer is valid 3792 * @pending_option_idx: index of pending port option in returned buffer 3793 * @pending_option_valid: pending option in returned buffer is valid 3794 * 3795 * Calls Get Port Options AQC (0x06ea) and verifies result. 3796 */ 3797 int 3798 ice_aq_get_port_options(struct ice_hw *hw, 3799 struct ice_aqc_get_port_options_elem *options, 3800 u8 *option_count, u8 lport, bool lport_valid, 3801 u8 *active_option_idx, bool *active_option_valid, 3802 u8 *pending_option_idx, bool *pending_option_valid) 3803 { 3804 struct ice_aqc_get_port_options *cmd; 3805 struct ice_aq_desc desc; 3806 int status; 3807 u8 i; 3808 3809 /* options buffer shall be able to hold max returned options */ 3810 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3811 return -EINVAL; 3812 3813 cmd = &desc.params.get_port_options; 3814 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3815 3816 if (lport_valid) 3817 cmd->lport_num = lport; 3818 cmd->lport_num_valid = lport_valid; 3819 3820 status = ice_aq_send_cmd(hw, &desc, options, 3821 *option_count * sizeof(*options), NULL); 3822 if (status) 3823 return status; 3824 3825 /* verify direct FW response & set output parameters */ 3826 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3827 cmd->port_options_count); 3828 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3829 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3830 cmd->port_options); 3831 if (*active_option_valid) { 3832 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3833 cmd->port_options); 3834 if (*active_option_idx > (*option_count - 1)) 3835 return -EIO; 3836 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3837 *active_option_idx); 3838 } 3839 3840 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3841 cmd->pending_port_option_status); 3842 if (*pending_option_valid) { 3843 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3844 cmd->pending_port_option_status); 3845 if (*pending_option_idx > (*option_count - 1)) 3846 return -EIO; 3847 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3848 *pending_option_idx); 3849 } 3850 3851 /* mask output options fields */ 3852 for (i = 0; i < *option_count; i++) { 3853 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3854 options[i].pmd); 3855 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 3856 options[i].max_lane_speed); 3857 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 3858 options[i].pmd, options[i].max_lane_speed); 3859 } 3860 3861 return 0; 3862 } 3863 3864 /** 3865 * ice_aq_set_port_option 3866 * @hw: pointer to the HW struct 3867 * @lport: logical port to call the command with 3868 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3869 * when PF owns more than 1 port it must be true 3870 * @new_option: new port option to be written 3871 * 3872 * Calls Set Port Options AQC (0x06eb). 3873 */ 3874 int 3875 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 3876 u8 new_option) 3877 { 3878 struct ice_aqc_set_port_option *cmd; 3879 struct ice_aq_desc desc; 3880 3881 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 3882 return -EINVAL; 3883 3884 cmd = &desc.params.set_port_option; 3885 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 3886 3887 if (lport_valid) 3888 cmd->lport_num = lport; 3889 3890 cmd->lport_num_valid = lport_valid; 3891 cmd->selected_port_option = new_option; 3892 3893 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3894 } 3895 3896 /** 3897 * ice_aq_sff_eeprom 3898 * @hw: pointer to the HW struct 3899 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3900 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3901 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3902 * @page: QSFP page 3903 * @set_page: set or ignore the page 3904 * @data: pointer to data buffer to be read/written to the I2C device. 3905 * @length: 1-16 for read, 1 for write. 3906 * @write: 0 read, 1 for write. 3907 * @cd: pointer to command details structure or NULL 3908 * 3909 * Read/Write SFF EEPROM (0x06EE) 3910 */ 3911 int 3912 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3913 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3914 bool write, struct ice_sq_cd *cd) 3915 { 3916 struct ice_aqc_sff_eeprom *cmd; 3917 struct ice_aq_desc desc; 3918 int status; 3919 3920 if (!data || (mem_addr & 0xff00)) 3921 return -EINVAL; 3922 3923 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3924 cmd = &desc.params.read_write_sff_param; 3925 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3926 cmd->lport_num = (u8)(lport & 0xff); 3927 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3928 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 3929 ICE_AQC_SFF_I2CBUS_7BIT_M) | 3930 ((set_page << 3931 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 3932 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 3933 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3934 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 3935 if (write) 3936 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 3937 3938 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3939 return status; 3940 } 3941 3942 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 3943 { 3944 switch (type) { 3945 case ICE_LUT_VSI: 3946 return ICE_LUT_VSI_SIZE; 3947 case ICE_LUT_GLOBAL: 3948 return ICE_LUT_GLOBAL_SIZE; 3949 case ICE_LUT_PF: 3950 return ICE_LUT_PF_SIZE; 3951 } 3952 WARN_ONCE(1, "incorrect type passed"); 3953 return ICE_LUT_VSI_SIZE; 3954 } 3955 3956 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 3957 { 3958 switch (size) { 3959 case ICE_LUT_VSI_SIZE: 3960 return ICE_AQC_LUT_SIZE_SMALL; 3961 case ICE_LUT_GLOBAL_SIZE: 3962 return ICE_AQC_LUT_SIZE_512; 3963 case ICE_LUT_PF_SIZE: 3964 return ICE_AQC_LUT_SIZE_2K; 3965 } 3966 WARN_ONCE(1, "incorrect size passed"); 3967 return 0; 3968 } 3969 3970 /** 3971 * __ice_aq_get_set_rss_lut 3972 * @hw: pointer to the hardware structure 3973 * @params: RSS LUT parameters 3974 * @set: set true to set the table, false to get the table 3975 * 3976 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3977 */ 3978 static int 3979 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 3980 struct ice_aq_get_set_rss_lut_params *params, bool set) 3981 { 3982 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 3983 enum ice_lut_type lut_type = params->lut_type; 3984 struct ice_aqc_get_set_rss_lut *desc_params; 3985 enum ice_aqc_lut_flags flags; 3986 enum ice_lut_size lut_size; 3987 struct ice_aq_desc desc; 3988 u8 *lut = params->lut; 3989 3990 3991 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 3992 return -EINVAL; 3993 3994 lut_size = ice_lut_type_to_size(lut_type); 3995 if (lut_size > params->lut_size) 3996 return -EINVAL; 3997 else if (set && lut_size != params->lut_size) 3998 return -EINVAL; 3999 4000 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4001 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4002 if (set) 4003 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4004 4005 desc_params = &desc.params.get_set_rss_lut; 4006 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4007 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4008 4009 if (lut_type == ICE_LUT_GLOBAL) 4010 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4011 params->global_lut_id); 4012 4013 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4014 desc_params->flags = cpu_to_le16(flags); 4015 4016 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4017 } 4018 4019 /** 4020 * ice_aq_get_rss_lut 4021 * @hw: pointer to the hardware structure 4022 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4023 * 4024 * get the RSS lookup table, PF or VSI type 4025 */ 4026 int 4027 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4028 { 4029 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4030 } 4031 4032 /** 4033 * ice_aq_set_rss_lut 4034 * @hw: pointer to the hardware structure 4035 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4036 * 4037 * set the RSS lookup table, PF or VSI type 4038 */ 4039 int 4040 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4041 { 4042 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4043 } 4044 4045 /** 4046 * __ice_aq_get_set_rss_key 4047 * @hw: pointer to the HW struct 4048 * @vsi_id: VSI FW index 4049 * @key: pointer to key info struct 4050 * @set: set true to set the key, false to get the key 4051 * 4052 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4053 */ 4054 static int 4055 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4056 struct ice_aqc_get_set_rss_keys *key, bool set) 4057 { 4058 struct ice_aqc_get_set_rss_key *desc_params; 4059 u16 key_size = sizeof(*key); 4060 struct ice_aq_desc desc; 4061 4062 if (set) { 4063 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4064 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4065 } else { 4066 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4067 } 4068 4069 desc_params = &desc.params.get_set_rss_key; 4070 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4071 4072 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4073 } 4074 4075 /** 4076 * ice_aq_get_rss_key 4077 * @hw: pointer to the HW struct 4078 * @vsi_handle: software VSI handle 4079 * @key: pointer to key info struct 4080 * 4081 * get the RSS key per VSI 4082 */ 4083 int 4084 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4085 struct ice_aqc_get_set_rss_keys *key) 4086 { 4087 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4088 return -EINVAL; 4089 4090 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4091 key, false); 4092 } 4093 4094 /** 4095 * ice_aq_set_rss_key 4096 * @hw: pointer to the HW struct 4097 * @vsi_handle: software VSI handle 4098 * @keys: pointer to key info struct 4099 * 4100 * set the RSS key per VSI 4101 */ 4102 int 4103 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4104 struct ice_aqc_get_set_rss_keys *keys) 4105 { 4106 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4107 return -EINVAL; 4108 4109 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4110 keys, true); 4111 } 4112 4113 /** 4114 * ice_aq_add_lan_txq 4115 * @hw: pointer to the hardware structure 4116 * @num_qgrps: Number of added queue groups 4117 * @qg_list: list of queue groups to be added 4118 * @buf_size: size of buffer for indirect command 4119 * @cd: pointer to command details structure or NULL 4120 * 4121 * Add Tx LAN queue (0x0C30) 4122 * 4123 * NOTE: 4124 * Prior to calling add Tx LAN queue: 4125 * Initialize the following as part of the Tx queue context: 4126 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4127 * Cache profile and Packet shaper profile. 4128 * 4129 * After add Tx LAN queue AQ command is completed: 4130 * Interrupts should be associated with specific queues, 4131 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4132 * flow. 4133 */ 4134 static int 4135 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4136 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4137 struct ice_sq_cd *cd) 4138 { 4139 struct ice_aqc_add_tx_qgrp *list; 4140 struct ice_aqc_add_txqs *cmd; 4141 struct ice_aq_desc desc; 4142 u16 i, sum_size = 0; 4143 4144 cmd = &desc.params.add_txqs; 4145 4146 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4147 4148 if (!qg_list) 4149 return -EINVAL; 4150 4151 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4152 return -EINVAL; 4153 4154 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4155 sum_size += struct_size(list, txqs, list->num_txqs); 4156 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4157 list->num_txqs); 4158 } 4159 4160 if (buf_size != sum_size) 4161 return -EINVAL; 4162 4163 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4164 4165 cmd->num_qgrps = num_qgrps; 4166 4167 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4168 } 4169 4170 /** 4171 * ice_aq_dis_lan_txq 4172 * @hw: pointer to the hardware structure 4173 * @num_qgrps: number of groups in the list 4174 * @qg_list: the list of groups to disable 4175 * @buf_size: the total size of the qg_list buffer in bytes 4176 * @rst_src: if called due to reset, specifies the reset source 4177 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4178 * @cd: pointer to command details structure or NULL 4179 * 4180 * Disable LAN Tx queue (0x0C31) 4181 */ 4182 static int 4183 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4184 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4185 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4186 struct ice_sq_cd *cd) 4187 { 4188 struct ice_aqc_dis_txq_item *item; 4189 struct ice_aqc_dis_txqs *cmd; 4190 struct ice_aq_desc desc; 4191 u16 i, sz = 0; 4192 int status; 4193 4194 cmd = &desc.params.dis_txqs; 4195 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4196 4197 /* qg_list can be NULL only in VM/VF reset flow */ 4198 if (!qg_list && !rst_src) 4199 return -EINVAL; 4200 4201 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4202 return -EINVAL; 4203 4204 cmd->num_entries = num_qgrps; 4205 4206 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 4207 ICE_AQC_Q_DIS_TIMEOUT_M); 4208 4209 switch (rst_src) { 4210 case ICE_VM_RESET: 4211 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4212 cmd->vmvf_and_timeout |= 4213 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 4214 break; 4215 case ICE_VF_RESET: 4216 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4217 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4218 cmd->vmvf_and_timeout |= 4219 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 4220 ICE_AQC_Q_DIS_VMVF_NUM_M); 4221 break; 4222 case ICE_NO_RESET: 4223 default: 4224 break; 4225 } 4226 4227 /* flush pipe on time out */ 4228 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4229 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4230 if (!qg_list) 4231 goto do_aq; 4232 4233 /* set RD bit to indicate that command buffer is provided by the driver 4234 * and it needs to be read by the firmware 4235 */ 4236 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4237 4238 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4239 u16 item_size = struct_size(item, q_id, item->num_qs); 4240 4241 /* If the num of queues is even, add 2 bytes of padding */ 4242 if ((item->num_qs % 2) == 0) 4243 item_size += 2; 4244 4245 sz += item_size; 4246 4247 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4248 } 4249 4250 if (buf_size != sz) 4251 return -EINVAL; 4252 4253 do_aq: 4254 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4255 if (status) { 4256 if (!qg_list) 4257 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4258 vmvf_num, hw->adminq.sq_last_status); 4259 else 4260 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4261 le16_to_cpu(qg_list[0].q_id[0]), 4262 hw->adminq.sq_last_status); 4263 } 4264 return status; 4265 } 4266 4267 /** 4268 * ice_aq_cfg_lan_txq 4269 * @hw: pointer to the hardware structure 4270 * @buf: buffer for command 4271 * @buf_size: size of buffer in bytes 4272 * @num_qs: number of queues being configured 4273 * @oldport: origination lport 4274 * @newport: destination lport 4275 * @cd: pointer to command details structure or NULL 4276 * 4277 * Move/Configure LAN Tx queue (0x0C32) 4278 * 4279 * There is a better AQ command to use for moving nodes, so only coding 4280 * this one for configuring the node. 4281 */ 4282 int 4283 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4284 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4285 struct ice_sq_cd *cd) 4286 { 4287 struct ice_aqc_cfg_txqs *cmd; 4288 struct ice_aq_desc desc; 4289 int status; 4290 4291 cmd = &desc.params.cfg_txqs; 4292 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4293 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4294 4295 if (!buf) 4296 return -EINVAL; 4297 4298 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4299 cmd->num_qs = num_qs; 4300 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4301 cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) & 4302 ICE_AQC_Q_CFG_DST_PRT_M; 4303 cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) & 4304 ICE_AQC_Q_CFG_TIMEOUT_M; 4305 cmd->blocked_cgds = 0; 4306 4307 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4308 if (status) 4309 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4310 hw->adminq.sq_last_status); 4311 return status; 4312 } 4313 4314 /** 4315 * ice_aq_add_rdma_qsets 4316 * @hw: pointer to the hardware structure 4317 * @num_qset_grps: Number of RDMA Qset groups 4318 * @qset_list: list of Qset groups to be added 4319 * @buf_size: size of buffer for indirect command 4320 * @cd: pointer to command details structure or NULL 4321 * 4322 * Add Tx RDMA Qsets (0x0C33) 4323 */ 4324 static int 4325 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4326 struct ice_aqc_add_rdma_qset_data *qset_list, 4327 u16 buf_size, struct ice_sq_cd *cd) 4328 { 4329 struct ice_aqc_add_rdma_qset_data *list; 4330 struct ice_aqc_add_rdma_qset *cmd; 4331 struct ice_aq_desc desc; 4332 u16 i, sum_size = 0; 4333 4334 cmd = &desc.params.add_rdma_qset; 4335 4336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4337 4338 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4339 return -EINVAL; 4340 4341 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4342 u16 num_qsets = le16_to_cpu(list->num_qsets); 4343 4344 sum_size += struct_size(list, rdma_qsets, num_qsets); 4345 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4346 num_qsets); 4347 } 4348 4349 if (buf_size != sum_size) 4350 return -EINVAL; 4351 4352 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4353 4354 cmd->num_qset_grps = num_qset_grps; 4355 4356 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4357 } 4358 4359 /* End of FW Admin Queue command wrappers */ 4360 4361 /** 4362 * ice_write_byte - write a byte to a packed context structure 4363 * @src_ctx: the context structure to read from 4364 * @dest_ctx: the context to be written to 4365 * @ce_info: a description of the struct to be filled 4366 */ 4367 static void 4368 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4369 { 4370 u8 src_byte, dest_byte, mask; 4371 u8 *from, *dest; 4372 u16 shift_width; 4373 4374 /* copy from the next struct field */ 4375 from = src_ctx + ce_info->offset; 4376 4377 /* prepare the bits and mask */ 4378 shift_width = ce_info->lsb % 8; 4379 mask = (u8)(BIT(ce_info->width) - 1); 4380 4381 src_byte = *from; 4382 src_byte &= mask; 4383 4384 /* shift to correct alignment */ 4385 mask <<= shift_width; 4386 src_byte <<= shift_width; 4387 4388 /* get the current bits from the target bit string */ 4389 dest = dest_ctx + (ce_info->lsb / 8); 4390 4391 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4392 4393 dest_byte &= ~mask; /* get the bits not changing */ 4394 dest_byte |= src_byte; /* add in the new bits */ 4395 4396 /* put it all back */ 4397 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4398 } 4399 4400 /** 4401 * ice_write_word - write a word to a packed context structure 4402 * @src_ctx: the context structure to read from 4403 * @dest_ctx: the context to be written to 4404 * @ce_info: a description of the struct to be filled 4405 */ 4406 static void 4407 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4408 { 4409 u16 src_word, mask; 4410 __le16 dest_word; 4411 u8 *from, *dest; 4412 u16 shift_width; 4413 4414 /* copy from the next struct field */ 4415 from = src_ctx + ce_info->offset; 4416 4417 /* prepare the bits and mask */ 4418 shift_width = ce_info->lsb % 8; 4419 mask = BIT(ce_info->width) - 1; 4420 4421 /* don't swizzle the bits until after the mask because the mask bits 4422 * will be in a different bit position on big endian machines 4423 */ 4424 src_word = *(u16 *)from; 4425 src_word &= mask; 4426 4427 /* shift to correct alignment */ 4428 mask <<= shift_width; 4429 src_word <<= shift_width; 4430 4431 /* get the current bits from the target bit string */ 4432 dest = dest_ctx + (ce_info->lsb / 8); 4433 4434 memcpy(&dest_word, dest, sizeof(dest_word)); 4435 4436 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4437 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4438 4439 /* put it all back */ 4440 memcpy(dest, &dest_word, sizeof(dest_word)); 4441 } 4442 4443 /** 4444 * ice_write_dword - write a dword to a packed context structure 4445 * @src_ctx: the context structure to read from 4446 * @dest_ctx: the context to be written to 4447 * @ce_info: a description of the struct to be filled 4448 */ 4449 static void 4450 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4451 { 4452 u32 src_dword, mask; 4453 __le32 dest_dword; 4454 u8 *from, *dest; 4455 u16 shift_width; 4456 4457 /* copy from the next struct field */ 4458 from = src_ctx + ce_info->offset; 4459 4460 /* prepare the bits and mask */ 4461 shift_width = ce_info->lsb % 8; 4462 4463 /* if the field width is exactly 32 on an x86 machine, then the shift 4464 * operation will not work because the SHL instructions count is masked 4465 * to 5 bits so the shift will do nothing 4466 */ 4467 if (ce_info->width < 32) 4468 mask = BIT(ce_info->width) - 1; 4469 else 4470 mask = (u32)~0; 4471 4472 /* don't swizzle the bits until after the mask because the mask bits 4473 * will be in a different bit position on big endian machines 4474 */ 4475 src_dword = *(u32 *)from; 4476 src_dword &= mask; 4477 4478 /* shift to correct alignment */ 4479 mask <<= shift_width; 4480 src_dword <<= shift_width; 4481 4482 /* get the current bits from the target bit string */ 4483 dest = dest_ctx + (ce_info->lsb / 8); 4484 4485 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4486 4487 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4488 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4489 4490 /* put it all back */ 4491 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4492 } 4493 4494 /** 4495 * ice_write_qword - write a qword to a packed context structure 4496 * @src_ctx: the context structure to read from 4497 * @dest_ctx: the context to be written to 4498 * @ce_info: a description of the struct to be filled 4499 */ 4500 static void 4501 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4502 { 4503 u64 src_qword, mask; 4504 __le64 dest_qword; 4505 u8 *from, *dest; 4506 u16 shift_width; 4507 4508 /* copy from the next struct field */ 4509 from = src_ctx + ce_info->offset; 4510 4511 /* prepare the bits and mask */ 4512 shift_width = ce_info->lsb % 8; 4513 4514 /* if the field width is exactly 64 on an x86 machine, then the shift 4515 * operation will not work because the SHL instructions count is masked 4516 * to 6 bits so the shift will do nothing 4517 */ 4518 if (ce_info->width < 64) 4519 mask = BIT_ULL(ce_info->width) - 1; 4520 else 4521 mask = (u64)~0; 4522 4523 /* don't swizzle the bits until after the mask because the mask bits 4524 * will be in a different bit position on big endian machines 4525 */ 4526 src_qword = *(u64 *)from; 4527 src_qword &= mask; 4528 4529 /* shift to correct alignment */ 4530 mask <<= shift_width; 4531 src_qword <<= shift_width; 4532 4533 /* get the current bits from the target bit string */ 4534 dest = dest_ctx + (ce_info->lsb / 8); 4535 4536 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4537 4538 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4539 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4540 4541 /* put it all back */ 4542 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4543 } 4544 4545 /** 4546 * ice_set_ctx - set context bits in packed structure 4547 * @hw: pointer to the hardware structure 4548 * @src_ctx: pointer to a generic non-packed context structure 4549 * @dest_ctx: pointer to memory for the packed structure 4550 * @ce_info: a description of the structure to be transformed 4551 */ 4552 int 4553 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4554 const struct ice_ctx_ele *ce_info) 4555 { 4556 int f; 4557 4558 for (f = 0; ce_info[f].width; f++) { 4559 /* We have to deal with each element of the FW response 4560 * using the correct size so that we are correct regardless 4561 * of the endianness of the machine. 4562 */ 4563 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4564 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4565 f, ce_info[f].width, ce_info[f].size_of); 4566 continue; 4567 } 4568 switch (ce_info[f].size_of) { 4569 case sizeof(u8): 4570 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4571 break; 4572 case sizeof(u16): 4573 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4574 break; 4575 case sizeof(u32): 4576 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4577 break; 4578 case sizeof(u64): 4579 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4580 break; 4581 default: 4582 return -EINVAL; 4583 } 4584 } 4585 4586 return 0; 4587 } 4588 4589 /** 4590 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4591 * @hw: pointer to the HW struct 4592 * @vsi_handle: software VSI handle 4593 * @tc: TC number 4594 * @q_handle: software queue handle 4595 */ 4596 struct ice_q_ctx * 4597 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4598 { 4599 struct ice_vsi_ctx *vsi; 4600 struct ice_q_ctx *q_ctx; 4601 4602 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4603 if (!vsi) 4604 return NULL; 4605 if (q_handle >= vsi->num_lan_q_entries[tc]) 4606 return NULL; 4607 if (!vsi->lan_q_ctx[tc]) 4608 return NULL; 4609 q_ctx = vsi->lan_q_ctx[tc]; 4610 return &q_ctx[q_handle]; 4611 } 4612 4613 /** 4614 * ice_ena_vsi_txq 4615 * @pi: port information structure 4616 * @vsi_handle: software VSI handle 4617 * @tc: TC number 4618 * @q_handle: software queue handle 4619 * @num_qgrps: Number of added queue groups 4620 * @buf: list of queue groups to be added 4621 * @buf_size: size of buffer for indirect command 4622 * @cd: pointer to command details structure or NULL 4623 * 4624 * This function adds one LAN queue 4625 */ 4626 int 4627 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4628 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4629 struct ice_sq_cd *cd) 4630 { 4631 struct ice_aqc_txsched_elem_data node = { 0 }; 4632 struct ice_sched_node *parent; 4633 struct ice_q_ctx *q_ctx; 4634 struct ice_hw *hw; 4635 int status; 4636 4637 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4638 return -EIO; 4639 4640 if (num_qgrps > 1 || buf->num_txqs > 1) 4641 return -ENOSPC; 4642 4643 hw = pi->hw; 4644 4645 if (!ice_is_vsi_valid(hw, vsi_handle)) 4646 return -EINVAL; 4647 4648 mutex_lock(&pi->sched_lock); 4649 4650 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4651 if (!q_ctx) { 4652 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4653 q_handle); 4654 status = -EINVAL; 4655 goto ena_txq_exit; 4656 } 4657 4658 /* find a parent node */ 4659 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4660 ICE_SCHED_NODE_OWNER_LAN); 4661 if (!parent) { 4662 status = -EINVAL; 4663 goto ena_txq_exit; 4664 } 4665 4666 buf->parent_teid = parent->info.node_teid; 4667 node.parent_teid = parent->info.node_teid; 4668 /* Mark that the values in the "generic" section as valid. The default 4669 * value in the "generic" section is zero. This means that : 4670 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4671 * - 0 priority among siblings, indicated by Bit 1-3. 4672 * - WFQ, indicated by Bit 4. 4673 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4674 * Bit 5-6. 4675 * - Bit 7 is reserved. 4676 * Without setting the generic section as valid in valid_sections, the 4677 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4678 */ 4679 buf->txqs[0].info.valid_sections = 4680 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4681 ICE_AQC_ELEM_VALID_EIR; 4682 buf->txqs[0].info.generic = 0; 4683 buf->txqs[0].info.cir_bw.bw_profile_idx = 4684 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4685 buf->txqs[0].info.cir_bw.bw_alloc = 4686 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4687 buf->txqs[0].info.eir_bw.bw_profile_idx = 4688 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4689 buf->txqs[0].info.eir_bw.bw_alloc = 4690 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4691 4692 /* add the LAN queue */ 4693 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4694 if (status) { 4695 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4696 le16_to_cpu(buf->txqs[0].txq_id), 4697 hw->adminq.sq_last_status); 4698 goto ena_txq_exit; 4699 } 4700 4701 node.node_teid = buf->txqs[0].q_teid; 4702 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4703 q_ctx->q_handle = q_handle; 4704 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4705 4706 /* add a leaf node into scheduler tree queue layer */ 4707 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4708 if (!status) 4709 status = ice_sched_replay_q_bw(pi, q_ctx); 4710 4711 ena_txq_exit: 4712 mutex_unlock(&pi->sched_lock); 4713 return status; 4714 } 4715 4716 /** 4717 * ice_dis_vsi_txq 4718 * @pi: port information structure 4719 * @vsi_handle: software VSI handle 4720 * @tc: TC number 4721 * @num_queues: number of queues 4722 * @q_handles: pointer to software queue handle array 4723 * @q_ids: pointer to the q_id array 4724 * @q_teids: pointer to queue node teids 4725 * @rst_src: if called due to reset, specifies the reset source 4726 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4727 * @cd: pointer to command details structure or NULL 4728 * 4729 * This function removes queues and their corresponding nodes in SW DB 4730 */ 4731 int 4732 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4733 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4734 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4735 struct ice_sq_cd *cd) 4736 { 4737 struct ice_aqc_dis_txq_item *qg_list; 4738 struct ice_q_ctx *q_ctx; 4739 int status = -ENOENT; 4740 struct ice_hw *hw; 4741 u16 i, buf_size; 4742 4743 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4744 return -EIO; 4745 4746 hw = pi->hw; 4747 4748 if (!num_queues) { 4749 /* if queue is disabled already yet the disable queue command 4750 * has to be sent to complete the VF reset, then call 4751 * ice_aq_dis_lan_txq without any queue information 4752 */ 4753 if (rst_src) 4754 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4755 vmvf_num, NULL); 4756 return -EIO; 4757 } 4758 4759 buf_size = struct_size(qg_list, q_id, 1); 4760 qg_list = kzalloc(buf_size, GFP_KERNEL); 4761 if (!qg_list) 4762 return -ENOMEM; 4763 4764 mutex_lock(&pi->sched_lock); 4765 4766 for (i = 0; i < num_queues; i++) { 4767 struct ice_sched_node *node; 4768 4769 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4770 if (!node) 4771 continue; 4772 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4773 if (!q_ctx) { 4774 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4775 q_handles[i]); 4776 continue; 4777 } 4778 if (q_ctx->q_handle != q_handles[i]) { 4779 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4780 q_ctx->q_handle, q_handles[i]); 4781 continue; 4782 } 4783 qg_list->parent_teid = node->info.parent_teid; 4784 qg_list->num_qs = 1; 4785 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4786 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4787 vmvf_num, cd); 4788 4789 if (status) 4790 break; 4791 ice_free_sched_node(pi, node); 4792 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4793 q_ctx->q_teid = ICE_INVAL_TEID; 4794 } 4795 mutex_unlock(&pi->sched_lock); 4796 kfree(qg_list); 4797 return status; 4798 } 4799 4800 /** 4801 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4802 * @pi: port information structure 4803 * @vsi_handle: software VSI handle 4804 * @tc_bitmap: TC bitmap 4805 * @maxqs: max queues array per TC 4806 * @owner: LAN or RDMA 4807 * 4808 * This function adds/updates the VSI queues per TC. 4809 */ 4810 static int 4811 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4812 u16 *maxqs, u8 owner) 4813 { 4814 int status = 0; 4815 u8 i; 4816 4817 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4818 return -EIO; 4819 4820 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4821 return -EINVAL; 4822 4823 mutex_lock(&pi->sched_lock); 4824 4825 ice_for_each_traffic_class(i) { 4826 /* configuration is possible only if TC node is present */ 4827 if (!ice_sched_get_tc_node(pi, i)) 4828 continue; 4829 4830 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4831 ice_is_tc_ena(tc_bitmap, i)); 4832 if (status) 4833 break; 4834 } 4835 4836 mutex_unlock(&pi->sched_lock); 4837 return status; 4838 } 4839 4840 /** 4841 * ice_cfg_vsi_lan - configure VSI LAN queues 4842 * @pi: port information structure 4843 * @vsi_handle: software VSI handle 4844 * @tc_bitmap: TC bitmap 4845 * @max_lanqs: max LAN queues array per TC 4846 * 4847 * This function adds/updates the VSI LAN queues per TC. 4848 */ 4849 int 4850 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4851 u16 *max_lanqs) 4852 { 4853 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4854 ICE_SCHED_NODE_OWNER_LAN); 4855 } 4856 4857 /** 4858 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4859 * @pi: port information structure 4860 * @vsi_handle: software VSI handle 4861 * @tc_bitmap: TC bitmap 4862 * @max_rdmaqs: max RDMA queues array per TC 4863 * 4864 * This function adds/updates the VSI RDMA queues per TC. 4865 */ 4866 int 4867 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4868 u16 *max_rdmaqs) 4869 { 4870 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4871 ICE_SCHED_NODE_OWNER_RDMA); 4872 } 4873 4874 /** 4875 * ice_ena_vsi_rdma_qset 4876 * @pi: port information structure 4877 * @vsi_handle: software VSI handle 4878 * @tc: TC number 4879 * @rdma_qset: pointer to RDMA Qset 4880 * @num_qsets: number of RDMA Qsets 4881 * @qset_teid: pointer to Qset node TEIDs 4882 * 4883 * This function adds RDMA Qset 4884 */ 4885 int 4886 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4887 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4888 { 4889 struct ice_aqc_txsched_elem_data node = { 0 }; 4890 struct ice_aqc_add_rdma_qset_data *buf; 4891 struct ice_sched_node *parent; 4892 struct ice_hw *hw; 4893 u16 i, buf_size; 4894 int ret; 4895 4896 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4897 return -EIO; 4898 hw = pi->hw; 4899 4900 if (!ice_is_vsi_valid(hw, vsi_handle)) 4901 return -EINVAL; 4902 4903 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4904 buf = kzalloc(buf_size, GFP_KERNEL); 4905 if (!buf) 4906 return -ENOMEM; 4907 mutex_lock(&pi->sched_lock); 4908 4909 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4910 ICE_SCHED_NODE_OWNER_RDMA); 4911 if (!parent) { 4912 ret = -EINVAL; 4913 goto rdma_error_exit; 4914 } 4915 buf->parent_teid = parent->info.node_teid; 4916 node.parent_teid = parent->info.node_teid; 4917 4918 buf->num_qsets = cpu_to_le16(num_qsets); 4919 for (i = 0; i < num_qsets; i++) { 4920 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4921 buf->rdma_qsets[i].info.valid_sections = 4922 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4923 ICE_AQC_ELEM_VALID_EIR; 4924 buf->rdma_qsets[i].info.generic = 0; 4925 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4926 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4927 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4928 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4929 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4930 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4931 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4932 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4933 } 4934 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4935 if (ret) { 4936 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4937 goto rdma_error_exit; 4938 } 4939 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4940 for (i = 0; i < num_qsets; i++) { 4941 node.node_teid = buf->rdma_qsets[i].qset_teid; 4942 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4943 &node, NULL); 4944 if (ret) 4945 break; 4946 qset_teid[i] = le32_to_cpu(node.node_teid); 4947 } 4948 rdma_error_exit: 4949 mutex_unlock(&pi->sched_lock); 4950 kfree(buf); 4951 return ret; 4952 } 4953 4954 /** 4955 * ice_dis_vsi_rdma_qset - free RDMA resources 4956 * @pi: port_info struct 4957 * @count: number of RDMA Qsets to free 4958 * @qset_teid: TEID of Qset node 4959 * @q_id: list of queue IDs being disabled 4960 */ 4961 int 4962 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4963 u16 *q_id) 4964 { 4965 struct ice_aqc_dis_txq_item *qg_list; 4966 struct ice_hw *hw; 4967 int status = 0; 4968 u16 qg_size; 4969 int i; 4970 4971 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4972 return -EIO; 4973 4974 hw = pi->hw; 4975 4976 qg_size = struct_size(qg_list, q_id, 1); 4977 qg_list = kzalloc(qg_size, GFP_KERNEL); 4978 if (!qg_list) 4979 return -ENOMEM; 4980 4981 mutex_lock(&pi->sched_lock); 4982 4983 for (i = 0; i < count; i++) { 4984 struct ice_sched_node *node; 4985 4986 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4987 if (!node) 4988 continue; 4989 4990 qg_list->parent_teid = node->info.parent_teid; 4991 qg_list->num_qs = 1; 4992 qg_list->q_id[0] = 4993 cpu_to_le16(q_id[i] | 4994 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4995 4996 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4997 ICE_NO_RESET, 0, NULL); 4998 if (status) 4999 break; 5000 5001 ice_free_sched_node(pi, node); 5002 } 5003 5004 mutex_unlock(&pi->sched_lock); 5005 kfree(qg_list); 5006 return status; 5007 } 5008 5009 /** 5010 * ice_replay_pre_init - replay pre initialization 5011 * @hw: pointer to the HW struct 5012 * 5013 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5014 */ 5015 static int ice_replay_pre_init(struct ice_hw *hw) 5016 { 5017 struct ice_switch_info *sw = hw->switch_info; 5018 u8 i; 5019 5020 /* Delete old entries from replay filter list head if there is any */ 5021 ice_rm_all_sw_replay_rule_info(hw); 5022 /* In start of replay, move entries into replay_rules list, it 5023 * will allow adding rules entries back to filt_rules list, 5024 * which is operational list. 5025 */ 5026 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5027 list_replace_init(&sw->recp_list[i].filt_rules, 5028 &sw->recp_list[i].filt_replay_rules); 5029 ice_sched_replay_agg_vsi_preinit(hw); 5030 5031 return 0; 5032 } 5033 5034 /** 5035 * ice_replay_vsi - replay VSI configuration 5036 * @hw: pointer to the HW struct 5037 * @vsi_handle: driver VSI handle 5038 * 5039 * Restore all VSI configuration after reset. It is required to call this 5040 * function with main VSI first. 5041 */ 5042 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5043 { 5044 int status; 5045 5046 if (!ice_is_vsi_valid(hw, vsi_handle)) 5047 return -EINVAL; 5048 5049 /* Replay pre-initialization if there is any */ 5050 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5051 status = ice_replay_pre_init(hw); 5052 if (status) 5053 return status; 5054 } 5055 /* Replay per VSI all RSS configurations */ 5056 status = ice_replay_rss_cfg(hw, vsi_handle); 5057 if (status) 5058 return status; 5059 /* Replay per VSI all filters */ 5060 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5061 if (!status) 5062 status = ice_replay_vsi_agg(hw, vsi_handle); 5063 return status; 5064 } 5065 5066 /** 5067 * ice_replay_post - post replay configuration cleanup 5068 * @hw: pointer to the HW struct 5069 * 5070 * Post replay cleanup. 5071 */ 5072 void ice_replay_post(struct ice_hw *hw) 5073 { 5074 /* Delete old entries from replay filter list head */ 5075 ice_rm_all_sw_replay_rule_info(hw); 5076 ice_sched_replay_agg(hw); 5077 } 5078 5079 /** 5080 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5081 * @hw: ptr to the hardware info 5082 * @reg: offset of 64 bit HW register to read from 5083 * @prev_stat_loaded: bool to specify if previous stats are loaded 5084 * @prev_stat: ptr to previous loaded stat value 5085 * @cur_stat: ptr to current stat value 5086 */ 5087 void 5088 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5089 u64 *prev_stat, u64 *cur_stat) 5090 { 5091 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5092 5093 /* device stats are not reset at PFR, they likely will not be zeroed 5094 * when the driver starts. Thus, save the value from the first read 5095 * without adding to the statistic value so that we report stats which 5096 * count up from zero. 5097 */ 5098 if (!prev_stat_loaded) { 5099 *prev_stat = new_data; 5100 return; 5101 } 5102 5103 /* Calculate the difference between the new and old values, and then 5104 * add it to the software stat value. 5105 */ 5106 if (new_data >= *prev_stat) 5107 *cur_stat += new_data - *prev_stat; 5108 else 5109 /* to manage the potential roll-over */ 5110 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5111 5112 /* Update the previously stored value to prepare for next read */ 5113 *prev_stat = new_data; 5114 } 5115 5116 /** 5117 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5118 * @hw: ptr to the hardware info 5119 * @reg: offset of HW register to read from 5120 * @prev_stat_loaded: bool to specify if previous stats are loaded 5121 * @prev_stat: ptr to previous loaded stat value 5122 * @cur_stat: ptr to current stat value 5123 */ 5124 void 5125 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5126 u64 *prev_stat, u64 *cur_stat) 5127 { 5128 u32 new_data; 5129 5130 new_data = rd32(hw, reg); 5131 5132 /* device stats are not reset at PFR, they likely will not be zeroed 5133 * when the driver starts. Thus, save the value from the first read 5134 * without adding to the statistic value so that we report stats which 5135 * count up from zero. 5136 */ 5137 if (!prev_stat_loaded) { 5138 *prev_stat = new_data; 5139 return; 5140 } 5141 5142 /* Calculate the difference between the new and old values, and then 5143 * add it to the software stat value. 5144 */ 5145 if (new_data >= *prev_stat) 5146 *cur_stat += new_data - *prev_stat; 5147 else 5148 /* to manage the potential roll-over */ 5149 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5150 5151 /* Update the previously stored value to prepare for next read */ 5152 *prev_stat = new_data; 5153 } 5154 5155 /** 5156 * ice_sched_query_elem - query element information from HW 5157 * @hw: pointer to the HW struct 5158 * @node_teid: node TEID to be queried 5159 * @buf: buffer to element information 5160 * 5161 * This function queries HW element information 5162 */ 5163 int 5164 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5165 struct ice_aqc_txsched_elem_data *buf) 5166 { 5167 u16 buf_size, num_elem_ret = 0; 5168 int status; 5169 5170 buf_size = sizeof(*buf); 5171 memset(buf, 0, buf_size); 5172 buf->node_teid = cpu_to_le32(node_teid); 5173 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5174 NULL); 5175 if (status || num_elem_ret != 1) 5176 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5177 return status; 5178 } 5179 5180 /** 5181 * ice_aq_read_i2c 5182 * @hw: pointer to the hw struct 5183 * @topo_addr: topology address for a device to communicate with 5184 * @bus_addr: 7-bit I2C bus address 5185 * @addr: I2C memory address (I2C offset) with up to 16 bits 5186 * @params: I2C parameters: bit [7] - Repeated start, 5187 * bits [6:5] data offset size, 5188 * bit [4] - I2C address type, 5189 * bits [3:0] - data size to read (0-16 bytes) 5190 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5191 * @cd: pointer to command details structure or NULL 5192 * 5193 * Read I2C (0x06E2) 5194 */ 5195 int 5196 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5197 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5198 struct ice_sq_cd *cd) 5199 { 5200 struct ice_aq_desc desc = { 0 }; 5201 struct ice_aqc_i2c *cmd; 5202 u8 data_size; 5203 int status; 5204 5205 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5206 cmd = &desc.params.read_write_i2c; 5207 5208 if (!data) 5209 return -EINVAL; 5210 5211 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5212 5213 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5214 cmd->topo_addr = topo_addr; 5215 cmd->i2c_params = params; 5216 cmd->i2c_addr = addr; 5217 5218 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5219 if (!status) { 5220 struct ice_aqc_read_i2c_resp *resp; 5221 u8 i; 5222 5223 resp = &desc.params.read_i2c_resp; 5224 for (i = 0; i < data_size; i++) { 5225 *data = resp->i2c_data[i]; 5226 data++; 5227 } 5228 } 5229 5230 return status; 5231 } 5232 5233 /** 5234 * ice_aq_write_i2c 5235 * @hw: pointer to the hw struct 5236 * @topo_addr: topology address for a device to communicate with 5237 * @bus_addr: 7-bit I2C bus address 5238 * @addr: I2C memory address (I2C offset) with up to 16 bits 5239 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5240 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5241 * @cd: pointer to command details structure or NULL 5242 * 5243 * Write I2C (0x06E3) 5244 * 5245 * * Return: 5246 * * 0 - Successful write to the i2c device 5247 * * -EINVAL - Data size greater than 4 bytes 5248 * * -EIO - FW error 5249 */ 5250 int 5251 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5252 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5253 struct ice_sq_cd *cd) 5254 { 5255 struct ice_aq_desc desc = { 0 }; 5256 struct ice_aqc_i2c *cmd; 5257 u8 data_size; 5258 5259 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5260 cmd = &desc.params.read_write_i2c; 5261 5262 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5263 5264 /* data_size limited to 4 */ 5265 if (data_size > 4) 5266 return -EINVAL; 5267 5268 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5269 cmd->topo_addr = topo_addr; 5270 cmd->i2c_params = params; 5271 cmd->i2c_addr = addr; 5272 5273 memcpy(cmd->i2c_data, data, data_size); 5274 5275 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5276 } 5277 5278 /** 5279 * ice_aq_set_driver_param - Set driver parameter to share via firmware 5280 * @hw: pointer to the HW struct 5281 * @idx: parameter index to set 5282 * @value: the value to set the parameter to 5283 * @cd: pointer to command details structure or NULL 5284 * 5285 * Set the value of one of the software defined parameters. All PFs connected 5286 * to this device can read the value using ice_aq_get_driver_param. 5287 * 5288 * Note that firmware provides no synchronization or locking, and will not 5289 * save the parameter value during a device reset. It is expected that 5290 * a single PF will write the parameter value, while all other PFs will only 5291 * read it. 5292 */ 5293 int 5294 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 5295 u32 value, struct ice_sq_cd *cd) 5296 { 5297 struct ice_aqc_driver_shared_params *cmd; 5298 struct ice_aq_desc desc; 5299 5300 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 5301 return -EIO; 5302 5303 cmd = &desc.params.drv_shared_params; 5304 5305 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 5306 5307 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET; 5308 cmd->param_indx = idx; 5309 cmd->param_val = cpu_to_le32(value); 5310 5311 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5312 } 5313 5314 /** 5315 * ice_aq_get_driver_param - Get driver parameter shared via firmware 5316 * @hw: pointer to the HW struct 5317 * @idx: parameter index to set 5318 * @value: storage to return the shared parameter 5319 * @cd: pointer to command details structure or NULL 5320 * 5321 * Get the value of one of the software defined parameters. 5322 * 5323 * Note that firmware provides no synchronization or locking. It is expected 5324 * that only a single PF will write a given parameter. 5325 */ 5326 int 5327 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 5328 u32 *value, struct ice_sq_cd *cd) 5329 { 5330 struct ice_aqc_driver_shared_params *cmd; 5331 struct ice_aq_desc desc; 5332 int status; 5333 5334 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 5335 return -EIO; 5336 5337 cmd = &desc.params.drv_shared_params; 5338 5339 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 5340 5341 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET; 5342 cmd->param_indx = idx; 5343 5344 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5345 if (status) 5346 return status; 5347 5348 *value = le32_to_cpu(cmd->param_val); 5349 5350 return 0; 5351 } 5352 5353 /** 5354 * ice_aq_set_gpio 5355 * @hw: pointer to the hw struct 5356 * @gpio_ctrl_handle: GPIO controller node handle 5357 * @pin_idx: IO Number of the GPIO that needs to be set 5358 * @value: SW provide IO value to set in the LSB 5359 * @cd: pointer to command details structure or NULL 5360 * 5361 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5362 */ 5363 int 5364 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5365 struct ice_sq_cd *cd) 5366 { 5367 struct ice_aqc_gpio *cmd; 5368 struct ice_aq_desc desc; 5369 5370 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5371 cmd = &desc.params.read_write_gpio; 5372 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5373 cmd->gpio_num = pin_idx; 5374 cmd->gpio_val = value ? 1 : 0; 5375 5376 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5377 } 5378 5379 /** 5380 * ice_aq_get_gpio 5381 * @hw: pointer to the hw struct 5382 * @gpio_ctrl_handle: GPIO controller node handle 5383 * @pin_idx: IO Number of the GPIO that needs to be set 5384 * @value: IO value read 5385 * @cd: pointer to command details structure or NULL 5386 * 5387 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5388 * the topology 5389 */ 5390 int 5391 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5392 bool *value, struct ice_sq_cd *cd) 5393 { 5394 struct ice_aqc_gpio *cmd; 5395 struct ice_aq_desc desc; 5396 int status; 5397 5398 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5399 cmd = &desc.params.read_write_gpio; 5400 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5401 cmd->gpio_num = pin_idx; 5402 5403 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5404 if (status) 5405 return status; 5406 5407 *value = !!cmd->gpio_val; 5408 return 0; 5409 } 5410 5411 /** 5412 * ice_is_fw_api_min_ver 5413 * @hw: pointer to the hardware structure 5414 * @maj: major version 5415 * @min: minor version 5416 * @patch: patch version 5417 * 5418 * Checks if the firmware API is minimum version 5419 */ 5420 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5421 { 5422 if (hw->api_maj_ver == maj) { 5423 if (hw->api_min_ver > min) 5424 return true; 5425 if (hw->api_min_ver == min && hw->api_patch >= patch) 5426 return true; 5427 } else if (hw->api_maj_ver > maj) { 5428 return true; 5429 } 5430 5431 return false; 5432 } 5433 5434 /** 5435 * ice_fw_supports_link_override 5436 * @hw: pointer to the hardware structure 5437 * 5438 * Checks if the firmware supports link override 5439 */ 5440 bool ice_fw_supports_link_override(struct ice_hw *hw) 5441 { 5442 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5443 ICE_FW_API_LINK_OVERRIDE_MIN, 5444 ICE_FW_API_LINK_OVERRIDE_PATCH); 5445 } 5446 5447 /** 5448 * ice_get_link_default_override 5449 * @ldo: pointer to the link default override struct 5450 * @pi: pointer to the port info struct 5451 * 5452 * Gets the link default override for a port 5453 */ 5454 int 5455 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5456 struct ice_port_info *pi) 5457 { 5458 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5459 struct ice_hw *hw = pi->hw; 5460 int status; 5461 5462 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5463 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5464 if (status) { 5465 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5466 return status; 5467 } 5468 5469 /* Each port has its own config; calculate for our port */ 5470 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5471 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5472 5473 /* link options first */ 5474 status = ice_read_sr_word(hw, tlv_start, &buf); 5475 if (status) { 5476 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5477 return status; 5478 } 5479 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 5480 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5481 ICE_LINK_OVERRIDE_PHY_CFG_S; 5482 5483 /* link PHY config */ 5484 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5485 status = ice_read_sr_word(hw, offset, &buf); 5486 if (status) { 5487 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5488 return status; 5489 } 5490 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5491 5492 /* PHY types low */ 5493 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5494 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5495 status = ice_read_sr_word(hw, (offset + i), &buf); 5496 if (status) { 5497 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5498 return status; 5499 } 5500 /* shift 16 bits at a time to fill 64 bits */ 5501 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5502 } 5503 5504 /* PHY types high */ 5505 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5506 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5507 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5508 status = ice_read_sr_word(hw, (offset + i), &buf); 5509 if (status) { 5510 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5511 return status; 5512 } 5513 /* shift 16 bits at a time to fill 64 bits */ 5514 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5515 } 5516 5517 return status; 5518 } 5519 5520 /** 5521 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5522 * @caps: get PHY capability data 5523 */ 5524 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5525 { 5526 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5527 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5528 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5529 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5530 return true; 5531 5532 return false; 5533 } 5534 5535 /** 5536 * ice_aq_set_lldp_mib - Set the LLDP MIB 5537 * @hw: pointer to the HW struct 5538 * @mib_type: Local, Remote or both Local and Remote MIBs 5539 * @buf: pointer to the caller-supplied buffer to store the MIB block 5540 * @buf_size: size of the buffer (in bytes) 5541 * @cd: pointer to command details structure or NULL 5542 * 5543 * Set the LLDP MIB. (0x0A08) 5544 */ 5545 int 5546 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5547 struct ice_sq_cd *cd) 5548 { 5549 struct ice_aqc_lldp_set_local_mib *cmd; 5550 struct ice_aq_desc desc; 5551 5552 cmd = &desc.params.lldp_set_mib; 5553 5554 if (buf_size == 0 || !buf) 5555 return -EINVAL; 5556 5557 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5558 5559 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5560 desc.datalen = cpu_to_le16(buf_size); 5561 5562 cmd->type = mib_type; 5563 cmd->length = cpu_to_le16(buf_size); 5564 5565 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5566 } 5567 5568 /** 5569 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5570 * @hw: pointer to HW struct 5571 */ 5572 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5573 { 5574 if (hw->mac_type != ICE_MAC_E810) 5575 return false; 5576 5577 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5578 ICE_FW_API_LLDP_FLTR_MIN, 5579 ICE_FW_API_LLDP_FLTR_PATCH); 5580 } 5581 5582 /** 5583 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 5584 * @hw: pointer to HW struct 5585 * @vsi_num: absolute HW index for VSI 5586 * @add: boolean for if adding or removing a filter 5587 */ 5588 int 5589 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 5590 { 5591 struct ice_aqc_lldp_filter_ctrl *cmd; 5592 struct ice_aq_desc desc; 5593 5594 cmd = &desc.params.lldp_filter_ctrl; 5595 5596 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 5597 5598 if (add) 5599 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 5600 else 5601 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 5602 5603 cmd->vsi_num = cpu_to_le16(vsi_num); 5604 5605 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5606 } 5607 5608 /** 5609 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 5610 * @hw: pointer to HW struct 5611 */ 5612 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 5613 { 5614 struct ice_aq_desc desc; 5615 5616 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 5617 5618 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5619 } 5620 5621 /** 5622 * ice_fw_supports_report_dflt_cfg 5623 * @hw: pointer to the hardware structure 5624 * 5625 * Checks if the firmware supports report default configuration 5626 */ 5627 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 5628 { 5629 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 5630 ICE_FW_API_REPORT_DFLT_CFG_MIN, 5631 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 5632 } 5633 5634 /* each of the indexes into the following array match the speed of a return 5635 * value from the list of AQ returned speeds like the range: 5636 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 5637 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 5638 * array. The array is defined as 15 elements long because the link_speed 5639 * returned by the firmware is a 16 bit * value, but is indexed 5640 * by [fls(speed) - 1] 5641 */ 5642 static const u32 ice_aq_to_link_speed[] = { 5643 SPEED_10, /* BIT(0) */ 5644 SPEED_100, 5645 SPEED_1000, 5646 SPEED_2500, 5647 SPEED_5000, 5648 SPEED_10000, 5649 SPEED_20000, 5650 SPEED_25000, 5651 SPEED_40000, 5652 SPEED_50000, 5653 SPEED_100000, /* BIT(10) */ 5654 }; 5655 5656 /** 5657 * ice_get_link_speed - get integer speed from table 5658 * @index: array index from fls(aq speed) - 1 5659 * 5660 * Returns: u32 value containing integer speed 5661 */ 5662 u32 ice_get_link_speed(u16 index) 5663 { 5664 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 5665 return 0; 5666 5667 return ice_aq_to_link_speed[index]; 5668 } 5669