1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 9 #define ICE_PF_RESET_WAIT_COUNT 300 10 11 static const char * const ice_link_mode_str_low[] = { 12 [0] = "100BASE_TX", 13 [1] = "100M_SGMII", 14 [2] = "1000BASE_T", 15 [3] = "1000BASE_SX", 16 [4] = "1000BASE_LX", 17 [5] = "1000BASE_KX", 18 [6] = "1G_SGMII", 19 [7] = "2500BASE_T", 20 [8] = "2500BASE_X", 21 [9] = "2500BASE_KX", 22 [10] = "5GBASE_T", 23 [11] = "5GBASE_KR", 24 [12] = "10GBASE_T", 25 [13] = "10G_SFI_DA", 26 [14] = "10GBASE_SR", 27 [15] = "10GBASE_LR", 28 [16] = "10GBASE_KR_CR1", 29 [17] = "10G_SFI_AOC_ACC", 30 [18] = "10G_SFI_C2C", 31 [19] = "25GBASE_T", 32 [20] = "25GBASE_CR", 33 [21] = "25GBASE_CR_S", 34 [22] = "25GBASE_CR1", 35 [23] = "25GBASE_SR", 36 [24] = "25GBASE_LR", 37 [25] = "25GBASE_KR", 38 [26] = "25GBASE_KR_S", 39 [27] = "25GBASE_KR1", 40 [28] = "25G_AUI_AOC_ACC", 41 [29] = "25G_AUI_C2C", 42 [30] = "40GBASE_CR4", 43 [31] = "40GBASE_SR4", 44 [32] = "40GBASE_LR4", 45 [33] = "40GBASE_KR4", 46 [34] = "40G_XLAUI_AOC_ACC", 47 [35] = "40G_XLAUI", 48 [36] = "50GBASE_CR2", 49 [37] = "50GBASE_SR2", 50 [38] = "50GBASE_LR2", 51 [39] = "50GBASE_KR2", 52 [40] = "50G_LAUI2_AOC_ACC", 53 [41] = "50G_LAUI2", 54 [42] = "50G_AUI2_AOC_ACC", 55 [43] = "50G_AUI2", 56 [44] = "50GBASE_CP", 57 [45] = "50GBASE_SR", 58 [46] = "50GBASE_FR", 59 [47] = "50GBASE_LR", 60 [48] = "50GBASE_KR_PAM4", 61 [49] = "50G_AUI1_AOC_ACC", 62 [50] = "50G_AUI1", 63 [51] = "100GBASE_CR4", 64 [52] = "100GBASE_SR4", 65 [53] = "100GBASE_LR4", 66 [54] = "100GBASE_KR4", 67 [55] = "100G_CAUI4_AOC_ACC", 68 [56] = "100G_CAUI4", 69 [57] = "100G_AUI4_AOC_ACC", 70 [58] = "100G_AUI4", 71 [59] = "100GBASE_CR_PAM4", 72 [60] = "100GBASE_KR_PAM4", 73 [61] = "100GBASE_CP2", 74 [62] = "100GBASE_SR2", 75 [63] = "100GBASE_DR", 76 }; 77 78 static const char * const ice_link_mode_str_high[] = { 79 [0] = "100GBASE_KR2_PAM4", 80 [1] = "100G_CAUI2_AOC_ACC", 81 [2] = "100G_CAUI2", 82 [3] = "100G_AUI2_AOC_ACC", 83 [4] = "100G_AUI2", 84 }; 85 86 /** 87 * ice_dump_phy_type - helper function to dump phy_type 88 * @hw: pointer to the HW structure 89 * @low: 64 bit value for phy_type_low 90 * @high: 64 bit value for phy_type_high 91 * @prefix: prefix string to differentiate multiple dumps 92 */ 93 static void 94 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 95 { 96 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 97 98 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 99 if (low & BIT_ULL(i)) 100 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 101 prefix, i, ice_link_mode_str_low[i]); 102 } 103 104 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 105 106 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 107 if (high & BIT_ULL(i)) 108 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 109 prefix, i, ice_link_mode_str_high[i]); 110 } 111 } 112 113 /** 114 * ice_set_mac_type - Sets MAC type 115 * @hw: pointer to the HW structure 116 * 117 * This function sets the MAC type of the adapter based on the 118 * vendor ID and device ID stored in the HW structure. 119 */ 120 static int ice_set_mac_type(struct ice_hw *hw) 121 { 122 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 123 return -ENODEV; 124 125 switch (hw->device_id) { 126 case ICE_DEV_ID_E810C_BACKPLANE: 127 case ICE_DEV_ID_E810C_QSFP: 128 case ICE_DEV_ID_E810C_SFP: 129 case ICE_DEV_ID_E810_XXV_BACKPLANE: 130 case ICE_DEV_ID_E810_XXV_QSFP: 131 case ICE_DEV_ID_E810_XXV_SFP: 132 hw->mac_type = ICE_MAC_E810; 133 break; 134 case ICE_DEV_ID_E823C_10G_BASE_T: 135 case ICE_DEV_ID_E823C_BACKPLANE: 136 case ICE_DEV_ID_E823C_QSFP: 137 case ICE_DEV_ID_E823C_SFP: 138 case ICE_DEV_ID_E823C_SGMII: 139 case ICE_DEV_ID_E822C_10G_BASE_T: 140 case ICE_DEV_ID_E822C_BACKPLANE: 141 case ICE_DEV_ID_E822C_QSFP: 142 case ICE_DEV_ID_E822C_SFP: 143 case ICE_DEV_ID_E822C_SGMII: 144 case ICE_DEV_ID_E822L_10G_BASE_T: 145 case ICE_DEV_ID_E822L_BACKPLANE: 146 case ICE_DEV_ID_E822L_SFP: 147 case ICE_DEV_ID_E822L_SGMII: 148 case ICE_DEV_ID_E823L_10G_BASE_T: 149 case ICE_DEV_ID_E823L_1GBE: 150 case ICE_DEV_ID_E823L_BACKPLANE: 151 case ICE_DEV_ID_E823L_QSFP: 152 case ICE_DEV_ID_E823L_SFP: 153 hw->mac_type = ICE_MAC_GENERIC; 154 break; 155 default: 156 hw->mac_type = ICE_MAC_UNKNOWN; 157 break; 158 } 159 160 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 161 return 0; 162 } 163 164 /** 165 * ice_is_e810 166 * @hw: pointer to the hardware structure 167 * 168 * returns true if the device is E810 based, false if not. 169 */ 170 bool ice_is_e810(struct ice_hw *hw) 171 { 172 return hw->mac_type == ICE_MAC_E810; 173 } 174 175 /** 176 * ice_is_e810t 177 * @hw: pointer to the hardware structure 178 * 179 * returns true if the device is E810T based, false if not. 180 */ 181 bool ice_is_e810t(struct ice_hw *hw) 182 { 183 switch (hw->device_id) { 184 case ICE_DEV_ID_E810C_SFP: 185 switch (hw->subsystem_device_id) { 186 case ICE_SUBDEV_ID_E810T: 187 case ICE_SUBDEV_ID_E810T2: 188 case ICE_SUBDEV_ID_E810T3: 189 case ICE_SUBDEV_ID_E810T4: 190 case ICE_SUBDEV_ID_E810T6: 191 case ICE_SUBDEV_ID_E810T7: 192 return true; 193 } 194 break; 195 case ICE_DEV_ID_E810C_QSFP: 196 switch (hw->subsystem_device_id) { 197 case ICE_SUBDEV_ID_E810T2: 198 case ICE_SUBDEV_ID_E810T3: 199 case ICE_SUBDEV_ID_E810T5: 200 return true; 201 } 202 break; 203 default: 204 break; 205 } 206 207 return false; 208 } 209 210 /** 211 * ice_is_e823 212 * @hw: pointer to the hardware structure 213 * 214 * returns true if the device is E823-L or E823-C based, false if not. 215 */ 216 bool ice_is_e823(struct ice_hw *hw) 217 { 218 switch (hw->device_id) { 219 case ICE_DEV_ID_E823L_BACKPLANE: 220 case ICE_DEV_ID_E823L_SFP: 221 case ICE_DEV_ID_E823L_10G_BASE_T: 222 case ICE_DEV_ID_E823L_1GBE: 223 case ICE_DEV_ID_E823L_QSFP: 224 case ICE_DEV_ID_E823C_BACKPLANE: 225 case ICE_DEV_ID_E823C_QSFP: 226 case ICE_DEV_ID_E823C_SFP: 227 case ICE_DEV_ID_E823C_10G_BASE_T: 228 case ICE_DEV_ID_E823C_SGMII: 229 return true; 230 default: 231 return false; 232 } 233 } 234 235 /** 236 * ice_clear_pf_cfg - Clear PF configuration 237 * @hw: pointer to the hardware structure 238 * 239 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 240 * configuration, flow director filters, etc.). 241 */ 242 int ice_clear_pf_cfg(struct ice_hw *hw) 243 { 244 struct ice_aq_desc desc; 245 246 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 247 248 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 249 } 250 251 /** 252 * ice_aq_manage_mac_read - manage MAC address read command 253 * @hw: pointer to the HW struct 254 * @buf: a virtual buffer to hold the manage MAC read response 255 * @buf_size: Size of the virtual buffer 256 * @cd: pointer to command details structure or NULL 257 * 258 * This function is used to return per PF station MAC address (0x0107). 259 * NOTE: Upon successful completion of this command, MAC address information 260 * is returned in user specified buffer. Please interpret user specified 261 * buffer as "manage_mac_read" response. 262 * Response such as various MAC addresses are stored in HW struct (port.mac) 263 * ice_discover_dev_caps is expected to be called before this function is 264 * called. 265 */ 266 static int 267 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 268 struct ice_sq_cd *cd) 269 { 270 struct ice_aqc_manage_mac_read_resp *resp; 271 struct ice_aqc_manage_mac_read *cmd; 272 struct ice_aq_desc desc; 273 int status; 274 u16 flags; 275 u8 i; 276 277 cmd = &desc.params.mac_read; 278 279 if (buf_size < sizeof(*resp)) 280 return -EINVAL; 281 282 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 283 284 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 285 if (status) 286 return status; 287 288 resp = buf; 289 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 290 291 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 292 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 293 return -EIO; 294 } 295 296 /* A single port can report up to two (LAN and WoL) addresses */ 297 for (i = 0; i < cmd->num_addr; i++) 298 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 299 ether_addr_copy(hw->port_info->mac.lan_addr, 300 resp[i].mac_addr); 301 ether_addr_copy(hw->port_info->mac.perm_addr, 302 resp[i].mac_addr); 303 break; 304 } 305 306 return 0; 307 } 308 309 /** 310 * ice_aq_get_phy_caps - returns PHY capabilities 311 * @pi: port information structure 312 * @qual_mods: report qualified modules 313 * @report_mode: report mode capabilities 314 * @pcaps: structure for PHY capabilities to be filled 315 * @cd: pointer to command details structure or NULL 316 * 317 * Returns the various PHY capabilities supported on the Port (0x0600) 318 */ 319 int 320 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 321 struct ice_aqc_get_phy_caps_data *pcaps, 322 struct ice_sq_cd *cd) 323 { 324 struct ice_aqc_get_phy_caps *cmd; 325 u16 pcaps_size = sizeof(*pcaps); 326 struct ice_aq_desc desc; 327 const char *prefix; 328 struct ice_hw *hw; 329 int status; 330 331 cmd = &desc.params.get_phy; 332 333 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 334 return -EINVAL; 335 hw = pi->hw; 336 337 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 338 !ice_fw_supports_report_dflt_cfg(hw)) 339 return -EINVAL; 340 341 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 342 343 if (qual_mods) 344 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 345 346 cmd->param0 |= cpu_to_le16(report_mode); 347 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 348 349 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 350 351 switch (report_mode) { 352 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 353 prefix = "phy_caps_media"; 354 break; 355 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 356 prefix = "phy_caps_no_media"; 357 break; 358 case ICE_AQC_REPORT_ACTIVE_CFG: 359 prefix = "phy_caps_active"; 360 break; 361 case ICE_AQC_REPORT_DFLT_CFG: 362 prefix = "phy_caps_default"; 363 break; 364 default: 365 prefix = "phy_caps_invalid"; 366 } 367 368 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 369 le64_to_cpu(pcaps->phy_type_high), prefix); 370 371 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 372 prefix, report_mode); 373 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 374 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 375 pcaps->low_power_ctrl_an); 376 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 377 pcaps->eee_cap); 378 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 379 pcaps->eeer_value); 380 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 381 pcaps->link_fec_options); 382 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 383 prefix, pcaps->module_compliance_enforcement); 384 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 385 prefix, pcaps->extended_compliance_code); 386 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 387 pcaps->module_type[0]); 388 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 389 pcaps->module_type[1]); 390 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 391 pcaps->module_type[2]); 392 393 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 394 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 395 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 396 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 397 sizeof(pi->phy.link_info.module_type)); 398 } 399 400 return status; 401 } 402 403 /** 404 * ice_aq_get_link_topo_handle - get link topology node return status 405 * @pi: port information structure 406 * @node_type: requested node type 407 * @cd: pointer to command details structure or NULL 408 * 409 * Get link topology node return status for specified node type (0x06E0) 410 * 411 * Node type cage can be used to determine if cage is present. If AQC 412 * returns error (ENOENT), then no cage present. If no cage present, then 413 * connection type is backplane or BASE-T. 414 */ 415 static int 416 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 417 struct ice_sq_cd *cd) 418 { 419 struct ice_aqc_get_link_topo *cmd; 420 struct ice_aq_desc desc; 421 422 cmd = &desc.params.get_link_topo; 423 424 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 425 426 cmd->addr.topo_params.node_type_ctx = 427 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 428 ICE_AQC_LINK_TOPO_NODE_CTX_S); 429 430 /* set node type */ 431 cmd->addr.topo_params.node_type_ctx |= 432 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 433 434 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 435 } 436 437 /** 438 * ice_is_media_cage_present 439 * @pi: port information structure 440 * 441 * Returns true if media cage is present, else false. If no cage, then 442 * media type is backplane or BASE-T. 443 */ 444 static bool ice_is_media_cage_present(struct ice_port_info *pi) 445 { 446 /* Node type cage can be used to determine if cage is present. If AQC 447 * returns error (ENOENT), then no cage present. If no cage present then 448 * connection type is backplane or BASE-T. 449 */ 450 return !ice_aq_get_link_topo_handle(pi, 451 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 452 NULL); 453 } 454 455 /** 456 * ice_get_media_type - Gets media type 457 * @pi: port information structure 458 */ 459 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 460 { 461 struct ice_link_status *hw_link_info; 462 463 if (!pi) 464 return ICE_MEDIA_UNKNOWN; 465 466 hw_link_info = &pi->phy.link_info; 467 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 468 /* If more than one media type is selected, report unknown */ 469 return ICE_MEDIA_UNKNOWN; 470 471 if (hw_link_info->phy_type_low) { 472 /* 1G SGMII is a special case where some DA cable PHYs 473 * may show this as an option when it really shouldn't 474 * be since SGMII is meant to be between a MAC and a PHY 475 * in a backplane. Try to detect this case and handle it 476 */ 477 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 478 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 479 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 480 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 481 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 482 return ICE_MEDIA_DA; 483 484 switch (hw_link_info->phy_type_low) { 485 case ICE_PHY_TYPE_LOW_1000BASE_SX: 486 case ICE_PHY_TYPE_LOW_1000BASE_LX: 487 case ICE_PHY_TYPE_LOW_10GBASE_SR: 488 case ICE_PHY_TYPE_LOW_10GBASE_LR: 489 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 490 case ICE_PHY_TYPE_LOW_25GBASE_SR: 491 case ICE_PHY_TYPE_LOW_25GBASE_LR: 492 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 493 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 494 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 495 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 496 case ICE_PHY_TYPE_LOW_50GBASE_SR: 497 case ICE_PHY_TYPE_LOW_50GBASE_FR: 498 case ICE_PHY_TYPE_LOW_50GBASE_LR: 499 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 500 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 501 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 502 case ICE_PHY_TYPE_LOW_100GBASE_DR: 503 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 504 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 505 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 506 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 507 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 508 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 509 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 510 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 511 return ICE_MEDIA_FIBER; 512 case ICE_PHY_TYPE_LOW_100BASE_TX: 513 case ICE_PHY_TYPE_LOW_1000BASE_T: 514 case ICE_PHY_TYPE_LOW_2500BASE_T: 515 case ICE_PHY_TYPE_LOW_5GBASE_T: 516 case ICE_PHY_TYPE_LOW_10GBASE_T: 517 case ICE_PHY_TYPE_LOW_25GBASE_T: 518 return ICE_MEDIA_BASET; 519 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 520 case ICE_PHY_TYPE_LOW_25GBASE_CR: 521 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 522 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 523 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 524 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 525 case ICE_PHY_TYPE_LOW_50GBASE_CP: 526 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 527 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 528 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 529 return ICE_MEDIA_DA; 530 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 531 case ICE_PHY_TYPE_LOW_40G_XLAUI: 532 case ICE_PHY_TYPE_LOW_50G_LAUI2: 533 case ICE_PHY_TYPE_LOW_50G_AUI2: 534 case ICE_PHY_TYPE_LOW_50G_AUI1: 535 case ICE_PHY_TYPE_LOW_100G_AUI4: 536 case ICE_PHY_TYPE_LOW_100G_CAUI4: 537 if (ice_is_media_cage_present(pi)) 538 return ICE_MEDIA_DA; 539 fallthrough; 540 case ICE_PHY_TYPE_LOW_1000BASE_KX: 541 case ICE_PHY_TYPE_LOW_2500BASE_KX: 542 case ICE_PHY_TYPE_LOW_2500BASE_X: 543 case ICE_PHY_TYPE_LOW_5GBASE_KR: 544 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 545 case ICE_PHY_TYPE_LOW_25GBASE_KR: 546 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 547 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 548 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 549 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 550 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 551 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 552 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 553 return ICE_MEDIA_BACKPLANE; 554 } 555 } else { 556 switch (hw_link_info->phy_type_high) { 557 case ICE_PHY_TYPE_HIGH_100G_AUI2: 558 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 559 if (ice_is_media_cage_present(pi)) 560 return ICE_MEDIA_DA; 561 fallthrough; 562 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 563 return ICE_MEDIA_BACKPLANE; 564 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 565 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 566 return ICE_MEDIA_FIBER; 567 } 568 } 569 return ICE_MEDIA_UNKNOWN; 570 } 571 572 /** 573 * ice_aq_get_link_info 574 * @pi: port information structure 575 * @ena_lse: enable/disable LinkStatusEvent reporting 576 * @link: pointer to link status structure - optional 577 * @cd: pointer to command details structure or NULL 578 * 579 * Get Link Status (0x607). Returns the link status of the adapter. 580 */ 581 int 582 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 583 struct ice_link_status *link, struct ice_sq_cd *cd) 584 { 585 struct ice_aqc_get_link_status_data link_data = { 0 }; 586 struct ice_aqc_get_link_status *resp; 587 struct ice_link_status *li_old, *li; 588 enum ice_media_type *hw_media_type; 589 struct ice_fc_info *hw_fc_info; 590 bool tx_pause, rx_pause; 591 struct ice_aq_desc desc; 592 struct ice_hw *hw; 593 u16 cmd_flags; 594 int status; 595 596 if (!pi) 597 return -EINVAL; 598 hw = pi->hw; 599 li_old = &pi->phy.link_info_old; 600 hw_media_type = &pi->phy.media_type; 601 li = &pi->phy.link_info; 602 hw_fc_info = &pi->fc; 603 604 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 605 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 606 resp = &desc.params.get_link_status; 607 resp->cmd_flags = cpu_to_le16(cmd_flags); 608 resp->lport_num = pi->lport; 609 610 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); 611 612 if (status) 613 return status; 614 615 /* save off old link status information */ 616 *li_old = *li; 617 618 /* update current link status information */ 619 li->link_speed = le16_to_cpu(link_data.link_speed); 620 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 621 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 622 *hw_media_type = ice_get_media_type(pi); 623 li->link_info = link_data.link_info; 624 li->link_cfg_err = link_data.link_cfg_err; 625 li->an_info = link_data.an_info; 626 li->ext_info = link_data.ext_info; 627 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 628 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 629 li->topo_media_conflict = link_data.topo_media_conflict; 630 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 631 ICE_AQ_CFG_PACING_TYPE_M); 632 633 /* update fc info */ 634 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 635 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 636 if (tx_pause && rx_pause) 637 hw_fc_info->current_mode = ICE_FC_FULL; 638 else if (tx_pause) 639 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 640 else if (rx_pause) 641 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 642 else 643 hw_fc_info->current_mode = ICE_FC_NONE; 644 645 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 646 647 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 648 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 649 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 650 (unsigned long long)li->phy_type_low); 651 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 652 (unsigned long long)li->phy_type_high); 653 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 654 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 655 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 656 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 657 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 658 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 659 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 660 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 661 li->max_frame_size); 662 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 663 664 /* save link status information */ 665 if (link) 666 *link = *li; 667 668 /* flag cleared so calling functions don't call AQ again */ 669 pi->phy.get_link_info = false; 670 671 return 0; 672 } 673 674 /** 675 * ice_fill_tx_timer_and_fc_thresh 676 * @hw: pointer to the HW struct 677 * @cmd: pointer to MAC cfg structure 678 * 679 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 680 * descriptor 681 */ 682 static void 683 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 684 struct ice_aqc_set_mac_cfg *cmd) 685 { 686 u16 fc_thres_val, tx_timer_val; 687 u32 val; 688 689 /* We read back the transmit timer and FC threshold value of 690 * LFC. Thus, we will use index = 691 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 692 * 693 * Also, because we are operating on transmit timer and FC 694 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 695 */ 696 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 697 698 /* Retrieve the transmit timer */ 699 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 700 tx_timer_val = val & 701 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 702 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); 703 704 /* Retrieve the FC threshold */ 705 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 706 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 707 708 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); 709 } 710 711 /** 712 * ice_aq_set_mac_cfg 713 * @hw: pointer to the HW struct 714 * @max_frame_size: Maximum Frame Size to be supported 715 * @cd: pointer to command details structure or NULL 716 * 717 * Set MAC configuration (0x0603) 718 */ 719 int 720 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 721 { 722 struct ice_aqc_set_mac_cfg *cmd; 723 struct ice_aq_desc desc; 724 725 cmd = &desc.params.set_mac_cfg; 726 727 if (max_frame_size == 0) 728 return -EINVAL; 729 730 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 731 732 cmd->max_frame_size = cpu_to_le16(max_frame_size); 733 734 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 735 736 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 737 } 738 739 /** 740 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 741 * @hw: pointer to the HW struct 742 */ 743 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 744 { 745 struct ice_switch_info *sw; 746 int status; 747 748 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 749 sizeof(*hw->switch_info), GFP_KERNEL); 750 sw = hw->switch_info; 751 752 if (!sw) 753 return -ENOMEM; 754 755 INIT_LIST_HEAD(&sw->vsi_list_map_head); 756 sw->prof_res_bm_init = 0; 757 758 status = ice_init_def_sw_recp(hw); 759 if (status) { 760 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 761 return status; 762 } 763 return 0; 764 } 765 766 /** 767 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 768 * @hw: pointer to the HW struct 769 */ 770 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 771 { 772 struct ice_switch_info *sw = hw->switch_info; 773 struct ice_vsi_list_map_info *v_pos_map; 774 struct ice_vsi_list_map_info *v_tmp_map; 775 struct ice_sw_recipe *recps; 776 u8 i; 777 778 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 779 list_entry) { 780 list_del(&v_pos_map->list_entry); 781 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 782 } 783 recps = sw->recp_list; 784 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 785 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 786 787 recps[i].root_rid = i; 788 list_for_each_entry_safe(rg_entry, tmprg_entry, 789 &recps[i].rg_list, l_entry) { 790 list_del(&rg_entry->l_entry); 791 devm_kfree(ice_hw_to_dev(hw), rg_entry); 792 } 793 794 if (recps[i].adv_rule) { 795 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 796 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 797 798 mutex_destroy(&recps[i].filt_rule_lock); 799 list_for_each_entry_safe(lst_itr, tmp_entry, 800 &recps[i].filt_rules, 801 list_entry) { 802 list_del(&lst_itr->list_entry); 803 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 804 devm_kfree(ice_hw_to_dev(hw), lst_itr); 805 } 806 } else { 807 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 808 809 mutex_destroy(&recps[i].filt_rule_lock); 810 list_for_each_entry_safe(lst_itr, tmp_entry, 811 &recps[i].filt_rules, 812 list_entry) { 813 list_del(&lst_itr->list_entry); 814 devm_kfree(ice_hw_to_dev(hw), lst_itr); 815 } 816 } 817 if (recps[i].root_buf) 818 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 819 } 820 ice_rm_all_sw_replay_rule_info(hw); 821 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 822 devm_kfree(ice_hw_to_dev(hw), sw); 823 } 824 825 /** 826 * ice_get_fw_log_cfg - get FW logging configuration 827 * @hw: pointer to the HW struct 828 */ 829 static int ice_get_fw_log_cfg(struct ice_hw *hw) 830 { 831 struct ice_aq_desc desc; 832 __le16 *config; 833 int status; 834 u16 size; 835 836 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 837 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); 838 if (!config) 839 return -ENOMEM; 840 841 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 842 843 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 844 if (!status) { 845 u16 i; 846 847 /* Save FW logging information into the HW structure */ 848 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 849 u16 v, m, flgs; 850 851 v = le16_to_cpu(config[i]); 852 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 853 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 854 855 if (m < ICE_AQC_FW_LOG_ID_MAX) 856 hw->fw_log.evnts[m].cur = flgs; 857 } 858 } 859 860 devm_kfree(ice_hw_to_dev(hw), config); 861 862 return status; 863 } 864 865 /** 866 * ice_cfg_fw_log - configure FW logging 867 * @hw: pointer to the HW struct 868 * @enable: enable certain FW logging events if true, disable all if false 869 * 870 * This function enables/disables the FW logging via Rx CQ events and a UART 871 * port based on predetermined configurations. FW logging via the Rx CQ can be 872 * enabled/disabled for individual PF's. However, FW logging via the UART can 873 * only be enabled/disabled for all PFs on the same device. 874 * 875 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 876 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 877 * before initializing the device. 878 * 879 * When re/configuring FW logging, callers need to update the "cfg" elements of 880 * the hw->fw_log.evnts array with the desired logging event configurations for 881 * modules of interest. When disabling FW logging completely, the callers can 882 * just pass false in the "enable" parameter. On completion, the function will 883 * update the "cur" element of the hw->fw_log.evnts array with the resulting 884 * logging event configurations of the modules that are being re/configured. FW 885 * logging modules that are not part of a reconfiguration operation retain their 886 * previous states. 887 * 888 * Before resetting the device, it is recommended that the driver disables FW 889 * logging before shutting down the control queue. When disabling FW logging 890 * ("enable" = false), the latest configurations of FW logging events stored in 891 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 892 * a device reset. 893 * 894 * When enabling FW logging to emit log messages via the Rx CQ during the 895 * device's initialization phase, a mechanism alternative to interrupt handlers 896 * needs to be used to extract FW log messages from the Rx CQ periodically and 897 * to prevent the Rx CQ from being full and stalling other types of control 898 * messages from FW to SW. Interrupts are typically disabled during the device's 899 * initialization phase. 900 */ 901 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) 902 { 903 struct ice_aqc_fw_logging *cmd; 904 u16 i, chgs = 0, len = 0; 905 struct ice_aq_desc desc; 906 __le16 *data = NULL; 907 u8 actv_evnts = 0; 908 void *buf = NULL; 909 int status = 0; 910 911 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 912 return 0; 913 914 /* Disable FW logging only when the control queue is still responsive */ 915 if (!enable && 916 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 917 return 0; 918 919 /* Get current FW log settings */ 920 status = ice_get_fw_log_cfg(hw); 921 if (status) 922 return status; 923 924 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 925 cmd = &desc.params.fw_logging; 926 927 /* Indicate which controls are valid */ 928 if (hw->fw_log.cq_en) 929 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 930 931 if (hw->fw_log.uart_en) 932 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 933 934 if (enable) { 935 /* Fill in an array of entries with FW logging modules and 936 * logging events being reconfigured. 937 */ 938 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 939 u16 val; 940 941 /* Keep track of enabled event types */ 942 actv_evnts |= hw->fw_log.evnts[i].cfg; 943 944 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 945 continue; 946 947 if (!data) { 948 data = devm_kcalloc(ice_hw_to_dev(hw), 949 ICE_AQC_FW_LOG_ID_MAX, 950 sizeof(*data), 951 GFP_KERNEL); 952 if (!data) 953 return -ENOMEM; 954 } 955 956 val = i << ICE_AQC_FW_LOG_ID_S; 957 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 958 data[chgs++] = cpu_to_le16(val); 959 } 960 961 /* Only enable FW logging if at least one module is specified. 962 * If FW logging is currently enabled but all modules are not 963 * enabled to emit log messages, disable FW logging altogether. 964 */ 965 if (actv_evnts) { 966 /* Leave if there is effectively no change */ 967 if (!chgs) 968 goto out; 969 970 if (hw->fw_log.cq_en) 971 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 972 973 if (hw->fw_log.uart_en) 974 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 975 976 buf = data; 977 len = sizeof(*data) * chgs; 978 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 979 } 980 } 981 982 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 983 if (!status) { 984 /* Update the current configuration to reflect events enabled. 985 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 986 * logging mode is enabled for the device. They do not reflect 987 * actual modules being enabled to emit log messages. So, their 988 * values remain unchanged even when all modules are disabled. 989 */ 990 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 991 992 hw->fw_log.actv_evnts = actv_evnts; 993 for (i = 0; i < cnt; i++) { 994 u16 v, m; 995 996 if (!enable) { 997 /* When disabling all FW logging events as part 998 * of device's de-initialization, the original 999 * configurations are retained, and can be used 1000 * to reconfigure FW logging later if the device 1001 * is re-initialized. 1002 */ 1003 hw->fw_log.evnts[i].cur = 0; 1004 continue; 1005 } 1006 1007 v = le16_to_cpu(data[i]); 1008 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 1009 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 1010 } 1011 } 1012 1013 out: 1014 if (data) 1015 devm_kfree(ice_hw_to_dev(hw), data); 1016 1017 return status; 1018 } 1019 1020 /** 1021 * ice_output_fw_log 1022 * @hw: pointer to the HW struct 1023 * @desc: pointer to the AQ message descriptor 1024 * @buf: pointer to the buffer accompanying the AQ message 1025 * 1026 * Formats a FW Log message and outputs it via the standard driver logs. 1027 */ 1028 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 1029 { 1030 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 1031 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 1032 le16_to_cpu(desc->datalen)); 1033 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 1034 } 1035 1036 /** 1037 * ice_get_itr_intrl_gran 1038 * @hw: pointer to the HW struct 1039 * 1040 * Determines the ITR/INTRL granularities based on the maximum aggregate 1041 * bandwidth according to the device's configuration during power-on. 1042 */ 1043 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1044 { 1045 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 1046 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 1047 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 1048 1049 switch (max_agg_bw) { 1050 case ICE_MAX_AGG_BW_200G: 1051 case ICE_MAX_AGG_BW_100G: 1052 case ICE_MAX_AGG_BW_50G: 1053 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1054 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1055 break; 1056 case ICE_MAX_AGG_BW_25G: 1057 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1058 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1059 break; 1060 } 1061 } 1062 1063 /** 1064 * ice_init_hw - main hardware initialization routine 1065 * @hw: pointer to the hardware structure 1066 */ 1067 int ice_init_hw(struct ice_hw *hw) 1068 { 1069 struct ice_aqc_get_phy_caps_data *pcaps; 1070 u16 mac_buf_len; 1071 void *mac_buf; 1072 int status; 1073 1074 /* Set MAC type based on DeviceID */ 1075 status = ice_set_mac_type(hw); 1076 if (status) 1077 return status; 1078 1079 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 1080 PF_FUNC_RID_FUNC_NUM_M) >> 1081 PF_FUNC_RID_FUNC_NUM_S; 1082 1083 status = ice_reset(hw, ICE_RESET_PFR); 1084 if (status) 1085 return status; 1086 1087 ice_get_itr_intrl_gran(hw); 1088 1089 status = ice_create_all_ctrlq(hw); 1090 if (status) 1091 goto err_unroll_cqinit; 1092 1093 /* Enable FW logging. Not fatal if this fails. */ 1094 status = ice_cfg_fw_log(hw, true); 1095 if (status) 1096 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 1097 1098 status = ice_clear_pf_cfg(hw); 1099 if (status) 1100 goto err_unroll_cqinit; 1101 1102 /* Set bit to enable Flow Director filters */ 1103 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1104 INIT_LIST_HEAD(&hw->fdir_list_head); 1105 1106 ice_clear_pxe_mode(hw); 1107 1108 status = ice_init_nvm(hw); 1109 if (status) 1110 goto err_unroll_cqinit; 1111 1112 status = ice_get_caps(hw); 1113 if (status) 1114 goto err_unroll_cqinit; 1115 1116 if (!hw->port_info) 1117 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1118 sizeof(*hw->port_info), 1119 GFP_KERNEL); 1120 if (!hw->port_info) { 1121 status = -ENOMEM; 1122 goto err_unroll_cqinit; 1123 } 1124 1125 /* set the back pointer to HW */ 1126 hw->port_info->hw = hw; 1127 1128 /* Initialize port_info struct with switch configuration data */ 1129 status = ice_get_initial_sw_cfg(hw); 1130 if (status) 1131 goto err_unroll_alloc; 1132 1133 hw->evb_veb = true; 1134 1135 /* init xarray for identifying scheduling nodes uniquely */ 1136 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1137 1138 /* Query the allocated resources for Tx scheduler */ 1139 status = ice_sched_query_res_alloc(hw); 1140 if (status) { 1141 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1142 goto err_unroll_alloc; 1143 } 1144 ice_sched_get_psm_clk_freq(hw); 1145 1146 /* Initialize port_info struct with scheduler data */ 1147 status = ice_sched_init_port(hw->port_info); 1148 if (status) 1149 goto err_unroll_sched; 1150 1151 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1152 if (!pcaps) { 1153 status = -ENOMEM; 1154 goto err_unroll_sched; 1155 } 1156 1157 /* Initialize port_info struct with PHY capabilities */ 1158 status = ice_aq_get_phy_caps(hw->port_info, false, 1159 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1160 NULL); 1161 devm_kfree(ice_hw_to_dev(hw), pcaps); 1162 if (status) 1163 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1164 status); 1165 1166 /* Initialize port_info struct with link information */ 1167 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1168 if (status) 1169 goto err_unroll_sched; 1170 1171 /* need a valid SW entry point to build a Tx tree */ 1172 if (!hw->sw_entry_point_layer) { 1173 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1174 status = -EIO; 1175 goto err_unroll_sched; 1176 } 1177 INIT_LIST_HEAD(&hw->agg_list); 1178 /* Initialize max burst size */ 1179 if (!hw->max_burst_size) 1180 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1181 1182 status = ice_init_fltr_mgmt_struct(hw); 1183 if (status) 1184 goto err_unroll_sched; 1185 1186 /* Get MAC information */ 1187 /* A single port can report up to two (LAN and WoL) addresses */ 1188 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 1189 sizeof(struct ice_aqc_manage_mac_read_resp), 1190 GFP_KERNEL); 1191 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1192 1193 if (!mac_buf) { 1194 status = -ENOMEM; 1195 goto err_unroll_fltr_mgmt_struct; 1196 } 1197 1198 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1199 devm_kfree(ice_hw_to_dev(hw), mac_buf); 1200 1201 if (status) 1202 goto err_unroll_fltr_mgmt_struct; 1203 /* enable jumbo frame support at MAC level */ 1204 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1205 if (status) 1206 goto err_unroll_fltr_mgmt_struct; 1207 /* Obtain counter base index which would be used by flow director */ 1208 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1209 if (status) 1210 goto err_unroll_fltr_mgmt_struct; 1211 status = ice_init_hw_tbls(hw); 1212 if (status) 1213 goto err_unroll_fltr_mgmt_struct; 1214 mutex_init(&hw->tnl_lock); 1215 return 0; 1216 1217 err_unroll_fltr_mgmt_struct: 1218 ice_cleanup_fltr_mgmt_struct(hw); 1219 err_unroll_sched: 1220 ice_sched_cleanup_all(hw); 1221 err_unroll_alloc: 1222 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1223 err_unroll_cqinit: 1224 ice_destroy_all_ctrlq(hw); 1225 return status; 1226 } 1227 1228 /** 1229 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1230 * @hw: pointer to the hardware structure 1231 * 1232 * This should be called only during nominal operation, not as a result of 1233 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1234 * applicable initializations if it fails for any reason. 1235 */ 1236 void ice_deinit_hw(struct ice_hw *hw) 1237 { 1238 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1239 ice_cleanup_fltr_mgmt_struct(hw); 1240 1241 ice_sched_cleanup_all(hw); 1242 ice_sched_clear_agg(hw); 1243 ice_free_seg(hw); 1244 ice_free_hw_tbls(hw); 1245 mutex_destroy(&hw->tnl_lock); 1246 1247 /* Attempt to disable FW logging before shutting down control queues */ 1248 ice_cfg_fw_log(hw, false); 1249 ice_destroy_all_ctrlq(hw); 1250 1251 /* Clear VSI contexts if not already cleared */ 1252 ice_clear_all_vsi_ctx(hw); 1253 } 1254 1255 /** 1256 * ice_check_reset - Check to see if a global reset is complete 1257 * @hw: pointer to the hardware structure 1258 */ 1259 int ice_check_reset(struct ice_hw *hw) 1260 { 1261 u32 cnt, reg = 0, grst_timeout, uld_mask; 1262 1263 /* Poll for Device Active state in case a recent CORER, GLOBR, 1264 * or EMPR has occurred. The grst delay value is in 100ms units. 1265 * Add 1sec for outstanding AQ commands that can take a long time. 1266 */ 1267 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1268 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1269 1270 for (cnt = 0; cnt < grst_timeout; cnt++) { 1271 mdelay(100); 1272 reg = rd32(hw, GLGEN_RSTAT); 1273 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1274 break; 1275 } 1276 1277 if (cnt == grst_timeout) { 1278 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1279 return -EIO; 1280 } 1281 1282 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1283 GLNVM_ULD_PCIER_DONE_1_M |\ 1284 GLNVM_ULD_CORER_DONE_M |\ 1285 GLNVM_ULD_GLOBR_DONE_M |\ 1286 GLNVM_ULD_POR_DONE_M |\ 1287 GLNVM_ULD_POR_DONE_1_M |\ 1288 GLNVM_ULD_PCIER_DONE_2_M) 1289 1290 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1291 GLNVM_ULD_PE_DONE_M : 0); 1292 1293 /* Device is Active; check Global Reset processes are done */ 1294 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1295 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1296 if (reg == uld_mask) { 1297 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1298 break; 1299 } 1300 mdelay(10); 1301 } 1302 1303 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1304 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1305 reg); 1306 return -EIO; 1307 } 1308 1309 return 0; 1310 } 1311 1312 /** 1313 * ice_pf_reset - Reset the PF 1314 * @hw: pointer to the hardware structure 1315 * 1316 * If a global reset has been triggered, this function checks 1317 * for its completion and then issues the PF reset 1318 */ 1319 static int ice_pf_reset(struct ice_hw *hw) 1320 { 1321 u32 cnt, reg; 1322 1323 /* If at function entry a global reset was already in progress, i.e. 1324 * state is not 'device active' or any of the reset done bits are not 1325 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1326 * global reset is done. 1327 */ 1328 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1329 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1330 /* poll on global reset currently in progress until done */ 1331 if (ice_check_reset(hw)) 1332 return -EIO; 1333 1334 return 0; 1335 } 1336 1337 /* Reset the PF */ 1338 reg = rd32(hw, PFGEN_CTRL); 1339 1340 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1341 1342 /* Wait for the PFR to complete. The wait time is the global config lock 1343 * timeout plus the PFR timeout which will account for a possible reset 1344 * that is occurring during a download package operation. 1345 */ 1346 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1347 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1348 reg = rd32(hw, PFGEN_CTRL); 1349 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1350 break; 1351 1352 mdelay(1); 1353 } 1354 1355 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1356 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1357 return -EIO; 1358 } 1359 1360 return 0; 1361 } 1362 1363 /** 1364 * ice_reset - Perform different types of reset 1365 * @hw: pointer to the hardware structure 1366 * @req: reset request 1367 * 1368 * This function triggers a reset as specified by the req parameter. 1369 * 1370 * Note: 1371 * If anything other than a PF reset is triggered, PXE mode is restored. 1372 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1373 * interface has been restored in the rebuild flow. 1374 */ 1375 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1376 { 1377 u32 val = 0; 1378 1379 switch (req) { 1380 case ICE_RESET_PFR: 1381 return ice_pf_reset(hw); 1382 case ICE_RESET_CORER: 1383 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1384 val = GLGEN_RTRIG_CORER_M; 1385 break; 1386 case ICE_RESET_GLOBR: 1387 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1388 val = GLGEN_RTRIG_GLOBR_M; 1389 break; 1390 default: 1391 return -EINVAL; 1392 } 1393 1394 val |= rd32(hw, GLGEN_RTRIG); 1395 wr32(hw, GLGEN_RTRIG, val); 1396 ice_flush(hw); 1397 1398 /* wait for the FW to be ready */ 1399 return ice_check_reset(hw); 1400 } 1401 1402 /** 1403 * ice_copy_rxq_ctx_to_hw 1404 * @hw: pointer to the hardware structure 1405 * @ice_rxq_ctx: pointer to the rxq context 1406 * @rxq_index: the index of the Rx queue 1407 * 1408 * Copies rxq context from dense structure to HW register space 1409 */ 1410 static int 1411 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1412 { 1413 u8 i; 1414 1415 if (!ice_rxq_ctx) 1416 return -EINVAL; 1417 1418 if (rxq_index > QRX_CTRL_MAX_INDEX) 1419 return -EINVAL; 1420 1421 /* Copy each dword separately to HW */ 1422 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1423 wr32(hw, QRX_CONTEXT(i, rxq_index), 1424 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1425 1426 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1427 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1428 } 1429 1430 return 0; 1431 } 1432 1433 /* LAN Rx Queue Context */ 1434 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1435 /* Field Width LSB */ 1436 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1437 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1438 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1439 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1440 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1441 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1442 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1443 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1444 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1445 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1446 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1447 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1448 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1449 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1450 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1451 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1452 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1453 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1454 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1455 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1456 { 0 } 1457 }; 1458 1459 /** 1460 * ice_write_rxq_ctx 1461 * @hw: pointer to the hardware structure 1462 * @rlan_ctx: pointer to the rxq context 1463 * @rxq_index: the index of the Rx queue 1464 * 1465 * Converts rxq context from sparse to dense structure and then writes 1466 * it to HW register space and enables the hardware to prefetch descriptors 1467 * instead of only fetching them on demand 1468 */ 1469 int 1470 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1471 u32 rxq_index) 1472 { 1473 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1474 1475 if (!rlan_ctx) 1476 return -EINVAL; 1477 1478 rlan_ctx->prefena = 1; 1479 1480 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1481 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1482 } 1483 1484 /* LAN Tx Queue Context */ 1485 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1486 /* Field Width LSB */ 1487 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1488 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1489 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1490 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1491 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1492 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1493 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1494 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1495 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1496 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1497 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1498 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1499 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1500 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1501 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1502 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1503 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1504 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1505 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1506 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1507 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1508 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1509 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1510 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1511 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1512 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1513 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1514 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1515 { 0 } 1516 }; 1517 1518 /* Sideband Queue command wrappers */ 1519 1520 /** 1521 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1522 * @hw: pointer to the HW struct 1523 * @desc: descriptor describing the command 1524 * @buf: buffer to use for indirect commands (NULL for direct commands) 1525 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1526 * @cd: pointer to command details structure 1527 */ 1528 static int 1529 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1530 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1531 { 1532 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1533 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1534 } 1535 1536 /** 1537 * ice_sbq_rw_reg - Fill Sideband Queue command 1538 * @hw: pointer to the HW struct 1539 * @in: message info to be filled in descriptor 1540 */ 1541 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1542 { 1543 struct ice_sbq_cmd_desc desc = {0}; 1544 struct ice_sbq_msg_req msg = {0}; 1545 u16 msg_len; 1546 int status; 1547 1548 msg_len = sizeof(msg); 1549 1550 msg.dest_dev = in->dest_dev; 1551 msg.opcode = in->opcode; 1552 msg.flags = ICE_SBQ_MSG_FLAGS; 1553 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1554 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1555 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1556 1557 if (in->opcode) 1558 msg.data = cpu_to_le32(in->data); 1559 else 1560 /* data read comes back in completion, so shorten the struct by 1561 * sizeof(msg.data) 1562 */ 1563 msg_len -= sizeof(msg.data); 1564 1565 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1566 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1567 desc.param0.cmd_len = cpu_to_le16(msg_len); 1568 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1569 if (!status && !in->opcode) 1570 in->data = le32_to_cpu 1571 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1572 return status; 1573 } 1574 1575 /* FW Admin Queue command wrappers */ 1576 1577 /* Software lock/mutex that is meant to be held while the Global Config Lock 1578 * in firmware is acquired by the software to prevent most (but not all) types 1579 * of AQ commands from being sent to FW 1580 */ 1581 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1582 1583 /** 1584 * ice_should_retry_sq_send_cmd 1585 * @opcode: AQ opcode 1586 * 1587 * Decide if we should retry the send command routine for the ATQ, depending 1588 * on the opcode. 1589 */ 1590 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1591 { 1592 switch (opcode) { 1593 case ice_aqc_opc_get_link_topo: 1594 case ice_aqc_opc_lldp_stop: 1595 case ice_aqc_opc_lldp_start: 1596 case ice_aqc_opc_lldp_filter_ctrl: 1597 return true; 1598 } 1599 1600 return false; 1601 } 1602 1603 /** 1604 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1605 * @hw: pointer to the HW struct 1606 * @cq: pointer to the specific Control queue 1607 * @desc: prefilled descriptor describing the command 1608 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1609 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1610 * @cd: pointer to command details structure 1611 * 1612 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1613 * Queue if the EBUSY AQ error is returned. 1614 */ 1615 static int 1616 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1617 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1618 struct ice_sq_cd *cd) 1619 { 1620 struct ice_aq_desc desc_cpy; 1621 bool is_cmd_for_retry; 1622 u8 idx = 0; 1623 u16 opcode; 1624 int status; 1625 1626 opcode = le16_to_cpu(desc->opcode); 1627 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1628 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1629 1630 if (is_cmd_for_retry) { 1631 /* All retryable cmds are direct, without buf. */ 1632 WARN_ON(buf); 1633 1634 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1635 } 1636 1637 do { 1638 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1639 1640 if (!is_cmd_for_retry || !status || 1641 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1642 break; 1643 1644 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1645 1646 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1647 1648 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1649 1650 return status; 1651 } 1652 1653 /** 1654 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1655 * @hw: pointer to the HW struct 1656 * @desc: descriptor describing the command 1657 * @buf: buffer to use for indirect commands (NULL for direct commands) 1658 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1659 * @cd: pointer to command details structure 1660 * 1661 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1662 */ 1663 int 1664 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1665 u16 buf_size, struct ice_sq_cd *cd) 1666 { 1667 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1668 bool lock_acquired = false; 1669 int status; 1670 1671 /* When a package download is in process (i.e. when the firmware's 1672 * Global Configuration Lock resource is held), only the Download 1673 * Package, Get Version, Get Package Info List, Upload Section, 1674 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1675 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1676 * Recipes to Profile Association, and Release Resource (with resource 1677 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1678 * must block until the package download completes and the Global Config 1679 * Lock is released. See also ice_acquire_global_cfg_lock(). 1680 */ 1681 switch (le16_to_cpu(desc->opcode)) { 1682 case ice_aqc_opc_download_pkg: 1683 case ice_aqc_opc_get_pkg_info_list: 1684 case ice_aqc_opc_get_ver: 1685 case ice_aqc_opc_upload_section: 1686 case ice_aqc_opc_update_pkg: 1687 case ice_aqc_opc_set_port_params: 1688 case ice_aqc_opc_get_vlan_mode_parameters: 1689 case ice_aqc_opc_set_vlan_mode_parameters: 1690 case ice_aqc_opc_add_recipe: 1691 case ice_aqc_opc_recipe_to_profile: 1692 case ice_aqc_opc_get_recipe: 1693 case ice_aqc_opc_get_recipe_to_profile: 1694 break; 1695 case ice_aqc_opc_release_res: 1696 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1697 break; 1698 fallthrough; 1699 default: 1700 mutex_lock(&ice_global_cfg_lock_sw); 1701 lock_acquired = true; 1702 break; 1703 } 1704 1705 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1706 if (lock_acquired) 1707 mutex_unlock(&ice_global_cfg_lock_sw); 1708 1709 return status; 1710 } 1711 1712 /** 1713 * ice_aq_get_fw_ver 1714 * @hw: pointer to the HW struct 1715 * @cd: pointer to command details structure or NULL 1716 * 1717 * Get the firmware version (0x0001) from the admin queue commands 1718 */ 1719 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1720 { 1721 struct ice_aqc_get_ver *resp; 1722 struct ice_aq_desc desc; 1723 int status; 1724 1725 resp = &desc.params.get_ver; 1726 1727 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1728 1729 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1730 1731 if (!status) { 1732 hw->fw_branch = resp->fw_branch; 1733 hw->fw_maj_ver = resp->fw_major; 1734 hw->fw_min_ver = resp->fw_minor; 1735 hw->fw_patch = resp->fw_patch; 1736 hw->fw_build = le32_to_cpu(resp->fw_build); 1737 hw->api_branch = resp->api_branch; 1738 hw->api_maj_ver = resp->api_major; 1739 hw->api_min_ver = resp->api_minor; 1740 hw->api_patch = resp->api_patch; 1741 } 1742 1743 return status; 1744 } 1745 1746 /** 1747 * ice_aq_send_driver_ver 1748 * @hw: pointer to the HW struct 1749 * @dv: driver's major, minor version 1750 * @cd: pointer to command details structure or NULL 1751 * 1752 * Send the driver version (0x0002) to the firmware 1753 */ 1754 int 1755 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1756 struct ice_sq_cd *cd) 1757 { 1758 struct ice_aqc_driver_ver *cmd; 1759 struct ice_aq_desc desc; 1760 u16 len; 1761 1762 cmd = &desc.params.driver_ver; 1763 1764 if (!dv) 1765 return -EINVAL; 1766 1767 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1768 1769 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1770 cmd->major_ver = dv->major_ver; 1771 cmd->minor_ver = dv->minor_ver; 1772 cmd->build_ver = dv->build_ver; 1773 cmd->subbuild_ver = dv->subbuild_ver; 1774 1775 len = 0; 1776 while (len < sizeof(dv->driver_string) && 1777 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1778 len++; 1779 1780 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1781 } 1782 1783 /** 1784 * ice_aq_q_shutdown 1785 * @hw: pointer to the HW struct 1786 * @unloading: is the driver unloading itself 1787 * 1788 * Tell the Firmware that we're shutting down the AdminQ and whether 1789 * or not the driver is unloading as well (0x0003). 1790 */ 1791 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1792 { 1793 struct ice_aqc_q_shutdown *cmd; 1794 struct ice_aq_desc desc; 1795 1796 cmd = &desc.params.q_shutdown; 1797 1798 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1799 1800 if (unloading) 1801 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1802 1803 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1804 } 1805 1806 /** 1807 * ice_aq_req_res 1808 * @hw: pointer to the HW struct 1809 * @res: resource ID 1810 * @access: access type 1811 * @sdp_number: resource number 1812 * @timeout: the maximum time in ms that the driver may hold the resource 1813 * @cd: pointer to command details structure or NULL 1814 * 1815 * Requests common resource using the admin queue commands (0x0008). 1816 * When attempting to acquire the Global Config Lock, the driver can 1817 * learn of three states: 1818 * 1) 0 - acquired lock, and can perform download package 1819 * 2) -EIO - did not get lock, driver should fail to load 1820 * 3) -EALREADY - did not get lock, but another driver has 1821 * successfully downloaded the package; the driver does 1822 * not have to download the package and can continue 1823 * loading 1824 * 1825 * Note that if the caller is in an acquire lock, perform action, release lock 1826 * phase of operation, it is possible that the FW may detect a timeout and issue 1827 * a CORER. In this case, the driver will receive a CORER interrupt and will 1828 * have to determine its cause. The calling thread that is handling this flow 1829 * will likely get an error propagated back to it indicating the Download 1830 * Package, Update Package or the Release Resource AQ commands timed out. 1831 */ 1832 static int 1833 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1834 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1835 struct ice_sq_cd *cd) 1836 { 1837 struct ice_aqc_req_res *cmd_resp; 1838 struct ice_aq_desc desc; 1839 int status; 1840 1841 cmd_resp = &desc.params.res_owner; 1842 1843 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1844 1845 cmd_resp->res_id = cpu_to_le16(res); 1846 cmd_resp->access_type = cpu_to_le16(access); 1847 cmd_resp->res_number = cpu_to_le32(sdp_number); 1848 cmd_resp->timeout = cpu_to_le32(*timeout); 1849 *timeout = 0; 1850 1851 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1852 1853 /* The completion specifies the maximum time in ms that the driver 1854 * may hold the resource in the Timeout field. 1855 */ 1856 1857 /* Global config lock response utilizes an additional status field. 1858 * 1859 * If the Global config lock resource is held by some other driver, the 1860 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1861 * and the timeout field indicates the maximum time the current owner 1862 * of the resource has to free it. 1863 */ 1864 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1865 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1866 *timeout = le32_to_cpu(cmd_resp->timeout); 1867 return 0; 1868 } else if (le16_to_cpu(cmd_resp->status) == 1869 ICE_AQ_RES_GLBL_IN_PROG) { 1870 *timeout = le32_to_cpu(cmd_resp->timeout); 1871 return -EIO; 1872 } else if (le16_to_cpu(cmd_resp->status) == 1873 ICE_AQ_RES_GLBL_DONE) { 1874 return -EALREADY; 1875 } 1876 1877 /* invalid FW response, force a timeout immediately */ 1878 *timeout = 0; 1879 return -EIO; 1880 } 1881 1882 /* If the resource is held by some other driver, the command completes 1883 * with a busy return value and the timeout field indicates the maximum 1884 * time the current owner of the resource has to free it. 1885 */ 1886 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1887 *timeout = le32_to_cpu(cmd_resp->timeout); 1888 1889 return status; 1890 } 1891 1892 /** 1893 * ice_aq_release_res 1894 * @hw: pointer to the HW struct 1895 * @res: resource ID 1896 * @sdp_number: resource number 1897 * @cd: pointer to command details structure or NULL 1898 * 1899 * release common resource using the admin queue commands (0x0009) 1900 */ 1901 static int 1902 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1903 struct ice_sq_cd *cd) 1904 { 1905 struct ice_aqc_req_res *cmd; 1906 struct ice_aq_desc desc; 1907 1908 cmd = &desc.params.res_owner; 1909 1910 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1911 1912 cmd->res_id = cpu_to_le16(res); 1913 cmd->res_number = cpu_to_le32(sdp_number); 1914 1915 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1916 } 1917 1918 /** 1919 * ice_acquire_res 1920 * @hw: pointer to the HW structure 1921 * @res: resource ID 1922 * @access: access type (read or write) 1923 * @timeout: timeout in milliseconds 1924 * 1925 * This function will attempt to acquire the ownership of a resource. 1926 */ 1927 int 1928 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1929 enum ice_aq_res_access_type access, u32 timeout) 1930 { 1931 #define ICE_RES_POLLING_DELAY_MS 10 1932 u32 delay = ICE_RES_POLLING_DELAY_MS; 1933 u32 time_left = timeout; 1934 int status; 1935 1936 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1937 1938 /* A return code of -EALREADY means that another driver has 1939 * previously acquired the resource and performed any necessary updates; 1940 * in this case the caller does not obtain the resource and has no 1941 * further work to do. 1942 */ 1943 if (status == -EALREADY) 1944 goto ice_acquire_res_exit; 1945 1946 if (status) 1947 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1948 1949 /* If necessary, poll until the current lock owner timeouts */ 1950 timeout = time_left; 1951 while (status && timeout && time_left) { 1952 mdelay(delay); 1953 timeout = (timeout > delay) ? timeout - delay : 0; 1954 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1955 1956 if (status == -EALREADY) 1957 /* lock free, but no work to do */ 1958 break; 1959 1960 if (!status) 1961 /* lock acquired */ 1962 break; 1963 } 1964 if (status && status != -EALREADY) 1965 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1966 1967 ice_acquire_res_exit: 1968 if (status == -EALREADY) { 1969 if (access == ICE_RES_WRITE) 1970 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1971 else 1972 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1973 } 1974 return status; 1975 } 1976 1977 /** 1978 * ice_release_res 1979 * @hw: pointer to the HW structure 1980 * @res: resource ID 1981 * 1982 * This function will release a resource using the proper Admin Command. 1983 */ 1984 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1985 { 1986 unsigned long timeout; 1987 int status; 1988 1989 /* there are some rare cases when trying to release the resource 1990 * results in an admin queue timeout, so handle them correctly 1991 */ 1992 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 1993 do { 1994 status = ice_aq_release_res(hw, res, 0, NULL); 1995 if (status != -EIO) 1996 break; 1997 usleep_range(1000, 2000); 1998 } while (time_before(jiffies, timeout)); 1999 } 2000 2001 /** 2002 * ice_aq_alloc_free_res - command to allocate/free resources 2003 * @hw: pointer to the HW struct 2004 * @num_entries: number of resource entries in buffer 2005 * @buf: Indirect buffer to hold data parameters and response 2006 * @buf_size: size of buffer for indirect commands 2007 * @opc: pass in the command opcode 2008 * @cd: pointer to command details structure or NULL 2009 * 2010 * Helper function to allocate/free resources using the admin queue commands 2011 */ 2012 int 2013 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 2014 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2015 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2016 { 2017 struct ice_aqc_alloc_free_res_cmd *cmd; 2018 struct ice_aq_desc desc; 2019 2020 cmd = &desc.params.sw_res_ctrl; 2021 2022 if (!buf) 2023 return -EINVAL; 2024 2025 if (buf_size < flex_array_size(buf, elem, num_entries)) 2026 return -EINVAL; 2027 2028 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2029 2030 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2031 2032 cmd->num_entries = cpu_to_le16(num_entries); 2033 2034 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2035 } 2036 2037 /** 2038 * ice_alloc_hw_res - allocate resource 2039 * @hw: pointer to the HW struct 2040 * @type: type of resource 2041 * @num: number of resources to allocate 2042 * @btm: allocate from bottom 2043 * @res: pointer to array that will receive the resources 2044 */ 2045 int 2046 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2047 { 2048 struct ice_aqc_alloc_free_res_elem *buf; 2049 u16 buf_len; 2050 int status; 2051 2052 buf_len = struct_size(buf, elem, num); 2053 buf = kzalloc(buf_len, GFP_KERNEL); 2054 if (!buf) 2055 return -ENOMEM; 2056 2057 /* Prepare buffer to allocate resource. */ 2058 buf->num_elems = cpu_to_le16(num); 2059 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2060 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2061 if (btm) 2062 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2063 2064 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2065 ice_aqc_opc_alloc_res, NULL); 2066 if (status) 2067 goto ice_alloc_res_exit; 2068 2069 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2070 2071 ice_alloc_res_exit: 2072 kfree(buf); 2073 return status; 2074 } 2075 2076 /** 2077 * ice_free_hw_res - free allocated HW resource 2078 * @hw: pointer to the HW struct 2079 * @type: type of resource to free 2080 * @num: number of resources 2081 * @res: pointer to array that contains the resources to free 2082 */ 2083 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2084 { 2085 struct ice_aqc_alloc_free_res_elem *buf; 2086 u16 buf_len; 2087 int status; 2088 2089 buf_len = struct_size(buf, elem, num); 2090 buf = kzalloc(buf_len, GFP_KERNEL); 2091 if (!buf) 2092 return -ENOMEM; 2093 2094 /* Prepare buffer to free resource. */ 2095 buf->num_elems = cpu_to_le16(num); 2096 buf->res_type = cpu_to_le16(type); 2097 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2098 2099 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 2100 ice_aqc_opc_free_res, NULL); 2101 if (status) 2102 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2103 2104 kfree(buf); 2105 return status; 2106 } 2107 2108 /** 2109 * ice_get_num_per_func - determine number of resources per PF 2110 * @hw: pointer to the HW structure 2111 * @max: value to be evenly split between each PF 2112 * 2113 * Determine the number of valid functions by going through the bitmap returned 2114 * from parsing capabilities and use this to calculate the number of resources 2115 * per PF based on the max value passed in. 2116 */ 2117 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2118 { 2119 u8 funcs; 2120 2121 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2122 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2123 ICE_CAPS_VALID_FUNCS_M); 2124 2125 if (!funcs) 2126 return 0; 2127 2128 return max / funcs; 2129 } 2130 2131 /** 2132 * ice_parse_common_caps - parse common device/function capabilities 2133 * @hw: pointer to the HW struct 2134 * @caps: pointer to common capabilities structure 2135 * @elem: the capability element to parse 2136 * @prefix: message prefix for tracing capabilities 2137 * 2138 * Given a capability element, extract relevant details into the common 2139 * capability structure. 2140 * 2141 * Returns: true if the capability matches one of the common capability ids, 2142 * false otherwise. 2143 */ 2144 static bool 2145 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2146 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2147 { 2148 u32 logical_id = le32_to_cpu(elem->logical_id); 2149 u32 phys_id = le32_to_cpu(elem->phys_id); 2150 u32 number = le32_to_cpu(elem->number); 2151 u16 cap = le16_to_cpu(elem->cap); 2152 bool found = true; 2153 2154 switch (cap) { 2155 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2156 caps->valid_functions = number; 2157 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2158 caps->valid_functions); 2159 break; 2160 case ICE_AQC_CAPS_SRIOV: 2161 caps->sr_iov_1_1 = (number == 1); 2162 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2163 caps->sr_iov_1_1); 2164 break; 2165 case ICE_AQC_CAPS_DCB: 2166 caps->dcb = (number == 1); 2167 caps->active_tc_bitmap = logical_id; 2168 caps->maxtc = phys_id; 2169 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2170 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2171 caps->active_tc_bitmap); 2172 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2173 break; 2174 case ICE_AQC_CAPS_RSS: 2175 caps->rss_table_size = number; 2176 caps->rss_table_entry_width = logical_id; 2177 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2178 caps->rss_table_size); 2179 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2180 caps->rss_table_entry_width); 2181 break; 2182 case ICE_AQC_CAPS_RXQS: 2183 caps->num_rxq = number; 2184 caps->rxq_first_id = phys_id; 2185 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2186 caps->num_rxq); 2187 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2188 caps->rxq_first_id); 2189 break; 2190 case ICE_AQC_CAPS_TXQS: 2191 caps->num_txq = number; 2192 caps->txq_first_id = phys_id; 2193 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2194 caps->num_txq); 2195 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2196 caps->txq_first_id); 2197 break; 2198 case ICE_AQC_CAPS_MSIX: 2199 caps->num_msix_vectors = number; 2200 caps->msix_vector_first_id = phys_id; 2201 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2202 caps->num_msix_vectors); 2203 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2204 caps->msix_vector_first_id); 2205 break; 2206 case ICE_AQC_CAPS_PENDING_NVM_VER: 2207 caps->nvm_update_pending_nvm = true; 2208 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2209 break; 2210 case ICE_AQC_CAPS_PENDING_OROM_VER: 2211 caps->nvm_update_pending_orom = true; 2212 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2213 break; 2214 case ICE_AQC_CAPS_PENDING_NET_VER: 2215 caps->nvm_update_pending_netlist = true; 2216 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2217 break; 2218 case ICE_AQC_CAPS_NVM_MGMT: 2219 caps->nvm_unified_update = 2220 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2221 true : false; 2222 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2223 caps->nvm_unified_update); 2224 break; 2225 case ICE_AQC_CAPS_RDMA: 2226 caps->rdma = (number == 1); 2227 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2228 break; 2229 case ICE_AQC_CAPS_MAX_MTU: 2230 caps->max_mtu = number; 2231 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2232 prefix, caps->max_mtu); 2233 break; 2234 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2235 caps->pcie_reset_avoidance = (number > 0); 2236 ice_debug(hw, ICE_DBG_INIT, 2237 "%s: pcie_reset_avoidance = %d\n", prefix, 2238 caps->pcie_reset_avoidance); 2239 break; 2240 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2241 caps->reset_restrict_support = (number == 1); 2242 ice_debug(hw, ICE_DBG_INIT, 2243 "%s: reset_restrict_support = %d\n", prefix, 2244 caps->reset_restrict_support); 2245 break; 2246 default: 2247 /* Not one of the recognized common capabilities */ 2248 found = false; 2249 } 2250 2251 return found; 2252 } 2253 2254 /** 2255 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2256 * @hw: pointer to the HW structure 2257 * @caps: pointer to capabilities structure to fix 2258 * 2259 * Re-calculate the capabilities that are dependent on the number of physical 2260 * ports; i.e. some features are not supported or function differently on 2261 * devices with more than 4 ports. 2262 */ 2263 static void 2264 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2265 { 2266 /* This assumes device capabilities are always scanned before function 2267 * capabilities during the initialization flow. 2268 */ 2269 if (hw->dev_caps.num_funcs > 4) { 2270 /* Max 4 TCs per port */ 2271 caps->maxtc = 4; 2272 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2273 caps->maxtc); 2274 if (caps->rdma) { 2275 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2276 caps->rdma = 0; 2277 } 2278 2279 /* print message only when processing device capabilities 2280 * during initialization. 2281 */ 2282 if (caps == &hw->dev_caps.common_cap) 2283 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2284 } 2285 } 2286 2287 /** 2288 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2289 * @hw: pointer to the HW struct 2290 * @func_p: pointer to function capabilities structure 2291 * @cap: pointer to the capability element to parse 2292 * 2293 * Extract function capabilities for ICE_AQC_CAPS_VF. 2294 */ 2295 static void 2296 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2297 struct ice_aqc_list_caps_elem *cap) 2298 { 2299 u32 logical_id = le32_to_cpu(cap->logical_id); 2300 u32 number = le32_to_cpu(cap->number); 2301 2302 func_p->num_allocd_vfs = number; 2303 func_p->vf_base_id = logical_id; 2304 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2305 func_p->num_allocd_vfs); 2306 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2307 func_p->vf_base_id); 2308 } 2309 2310 /** 2311 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2312 * @hw: pointer to the HW struct 2313 * @func_p: pointer to function capabilities structure 2314 * @cap: pointer to the capability element to parse 2315 * 2316 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2317 */ 2318 static void 2319 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2320 struct ice_aqc_list_caps_elem *cap) 2321 { 2322 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2323 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2324 le32_to_cpu(cap->number)); 2325 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2326 func_p->guar_num_vsi); 2327 } 2328 2329 /** 2330 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2331 * @hw: pointer to the HW struct 2332 * @func_p: pointer to function capabilities structure 2333 * @cap: pointer to the capability element to parse 2334 * 2335 * Extract function capabilities for ICE_AQC_CAPS_1588. 2336 */ 2337 static void 2338 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2339 struct ice_aqc_list_caps_elem *cap) 2340 { 2341 struct ice_ts_func_info *info = &func_p->ts_func_info; 2342 u32 number = le32_to_cpu(cap->number); 2343 2344 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2345 func_p->common_cap.ieee_1588 = info->ena; 2346 2347 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2348 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2349 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2350 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2351 2352 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; 2353 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2354 2355 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2356 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2357 } else { 2358 /* Unknown clock frequency, so assume a (probably incorrect) 2359 * default to avoid out-of-bounds look ups of frequency 2360 * related information. 2361 */ 2362 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2363 info->clk_freq); 2364 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2365 } 2366 2367 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2368 func_p->common_cap.ieee_1588); 2369 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2370 info->src_tmr_owned); 2371 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2372 info->tmr_ena); 2373 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2374 info->tmr_index_owned); 2375 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2376 info->tmr_index_assoc); 2377 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2378 info->clk_freq); 2379 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2380 info->clk_src); 2381 } 2382 2383 /** 2384 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2385 * @hw: pointer to the HW struct 2386 * @func_p: pointer to function capabilities structure 2387 * 2388 * Extract function capabilities for ICE_AQC_CAPS_FD. 2389 */ 2390 static void 2391 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2392 { 2393 u32 reg_val, val; 2394 2395 reg_val = rd32(hw, GLQF_FD_SIZE); 2396 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 2397 GLQF_FD_SIZE_FD_GSIZE_S; 2398 func_p->fd_fltr_guar = 2399 ice_get_num_per_func(hw, val); 2400 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 2401 GLQF_FD_SIZE_FD_BSIZE_S; 2402 func_p->fd_fltr_best_effort = val; 2403 2404 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2405 func_p->fd_fltr_guar); 2406 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2407 func_p->fd_fltr_best_effort); 2408 } 2409 2410 /** 2411 * ice_parse_func_caps - Parse function capabilities 2412 * @hw: pointer to the HW struct 2413 * @func_p: pointer to function capabilities structure 2414 * @buf: buffer containing the function capability records 2415 * @cap_count: the number of capabilities 2416 * 2417 * Helper function to parse function (0x000A) capabilities list. For 2418 * capabilities shared between device and function, this relies on 2419 * ice_parse_common_caps. 2420 * 2421 * Loop through the list of provided capabilities and extract the relevant 2422 * data into the function capabilities structured. 2423 */ 2424 static void 2425 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2426 void *buf, u32 cap_count) 2427 { 2428 struct ice_aqc_list_caps_elem *cap_resp; 2429 u32 i; 2430 2431 cap_resp = buf; 2432 2433 memset(func_p, 0, sizeof(*func_p)); 2434 2435 for (i = 0; i < cap_count; i++) { 2436 u16 cap = le16_to_cpu(cap_resp[i].cap); 2437 bool found; 2438 2439 found = ice_parse_common_caps(hw, &func_p->common_cap, 2440 &cap_resp[i], "func caps"); 2441 2442 switch (cap) { 2443 case ICE_AQC_CAPS_VF: 2444 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2445 break; 2446 case ICE_AQC_CAPS_VSI: 2447 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2448 break; 2449 case ICE_AQC_CAPS_1588: 2450 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2451 break; 2452 case ICE_AQC_CAPS_FD: 2453 ice_parse_fdir_func_caps(hw, func_p); 2454 break; 2455 default: 2456 /* Don't list common capabilities as unknown */ 2457 if (!found) 2458 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2459 i, cap); 2460 break; 2461 } 2462 } 2463 2464 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2465 } 2466 2467 /** 2468 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2469 * @hw: pointer to the HW struct 2470 * @dev_p: pointer to device capabilities structure 2471 * @cap: capability element to parse 2472 * 2473 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2474 */ 2475 static void 2476 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2477 struct ice_aqc_list_caps_elem *cap) 2478 { 2479 u32 number = le32_to_cpu(cap->number); 2480 2481 dev_p->num_funcs = hweight32(number); 2482 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2483 dev_p->num_funcs); 2484 } 2485 2486 /** 2487 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2488 * @hw: pointer to the HW struct 2489 * @dev_p: pointer to device capabilities structure 2490 * @cap: capability element to parse 2491 * 2492 * Parse ICE_AQC_CAPS_VF for device capabilities. 2493 */ 2494 static void 2495 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2496 struct ice_aqc_list_caps_elem *cap) 2497 { 2498 u32 number = le32_to_cpu(cap->number); 2499 2500 dev_p->num_vfs_exposed = number; 2501 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2502 dev_p->num_vfs_exposed); 2503 } 2504 2505 /** 2506 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2507 * @hw: pointer to the HW struct 2508 * @dev_p: pointer to device capabilities structure 2509 * @cap: capability element to parse 2510 * 2511 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2512 */ 2513 static void 2514 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2515 struct ice_aqc_list_caps_elem *cap) 2516 { 2517 u32 number = le32_to_cpu(cap->number); 2518 2519 dev_p->num_vsi_allocd_to_host = number; 2520 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2521 dev_p->num_vsi_allocd_to_host); 2522 } 2523 2524 /** 2525 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2526 * @hw: pointer to the HW struct 2527 * @dev_p: pointer to device capabilities structure 2528 * @cap: capability element to parse 2529 * 2530 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2531 */ 2532 static void 2533 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2534 struct ice_aqc_list_caps_elem *cap) 2535 { 2536 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2537 u32 logical_id = le32_to_cpu(cap->logical_id); 2538 u32 phys_id = le32_to_cpu(cap->phys_id); 2539 u32 number = le32_to_cpu(cap->number); 2540 2541 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2542 dev_p->common_cap.ieee_1588 = info->ena; 2543 2544 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2545 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2546 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2547 2548 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; 2549 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2550 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2551 2552 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2553 2554 info->ena_ports = logical_id; 2555 info->tmr_own_map = phys_id; 2556 2557 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2558 dev_p->common_cap.ieee_1588); 2559 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2560 info->tmr0_owner); 2561 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2562 info->tmr0_owned); 2563 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2564 info->tmr0_ena); 2565 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2566 info->tmr1_owner); 2567 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2568 info->tmr1_owned); 2569 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2570 info->tmr1_ena); 2571 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2572 info->ts_ll_read); 2573 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2574 info->ena_ports); 2575 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2576 info->tmr_own_map); 2577 } 2578 2579 /** 2580 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2581 * @hw: pointer to the HW struct 2582 * @dev_p: pointer to device capabilities structure 2583 * @cap: capability element to parse 2584 * 2585 * Parse ICE_AQC_CAPS_FD for device capabilities. 2586 */ 2587 static void 2588 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2589 struct ice_aqc_list_caps_elem *cap) 2590 { 2591 u32 number = le32_to_cpu(cap->number); 2592 2593 dev_p->num_flow_director_fltr = number; 2594 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2595 dev_p->num_flow_director_fltr); 2596 } 2597 2598 /** 2599 * ice_parse_dev_caps - Parse device capabilities 2600 * @hw: pointer to the HW struct 2601 * @dev_p: pointer to device capabilities structure 2602 * @buf: buffer containing the device capability records 2603 * @cap_count: the number of capabilities 2604 * 2605 * Helper device to parse device (0x000B) capabilities list. For 2606 * capabilities shared between device and function, this relies on 2607 * ice_parse_common_caps. 2608 * 2609 * Loop through the list of provided capabilities and extract the relevant 2610 * data into the device capabilities structured. 2611 */ 2612 static void 2613 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2614 void *buf, u32 cap_count) 2615 { 2616 struct ice_aqc_list_caps_elem *cap_resp; 2617 u32 i; 2618 2619 cap_resp = buf; 2620 2621 memset(dev_p, 0, sizeof(*dev_p)); 2622 2623 for (i = 0; i < cap_count; i++) { 2624 u16 cap = le16_to_cpu(cap_resp[i].cap); 2625 bool found; 2626 2627 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2628 &cap_resp[i], "dev caps"); 2629 2630 switch (cap) { 2631 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2632 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2633 break; 2634 case ICE_AQC_CAPS_VF: 2635 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2636 break; 2637 case ICE_AQC_CAPS_VSI: 2638 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2639 break; 2640 case ICE_AQC_CAPS_1588: 2641 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2642 break; 2643 case ICE_AQC_CAPS_FD: 2644 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2645 break; 2646 default: 2647 /* Don't list common capabilities as unknown */ 2648 if (!found) 2649 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2650 i, cap); 2651 break; 2652 } 2653 } 2654 2655 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2656 } 2657 2658 /** 2659 * ice_aq_list_caps - query function/device capabilities 2660 * @hw: pointer to the HW struct 2661 * @buf: a buffer to hold the capabilities 2662 * @buf_size: size of the buffer 2663 * @cap_count: if not NULL, set to the number of capabilities reported 2664 * @opc: capabilities type to discover, device or function 2665 * @cd: pointer to command details structure or NULL 2666 * 2667 * Get the function (0x000A) or device (0x000B) capabilities description from 2668 * firmware and store it in the buffer. 2669 * 2670 * If the cap_count pointer is not NULL, then it is set to the number of 2671 * capabilities firmware will report. Note that if the buffer size is too 2672 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2673 * cap_count will still be updated in this case. It is recommended that the 2674 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2675 * firmware could return) to avoid this. 2676 */ 2677 int 2678 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2679 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2680 { 2681 struct ice_aqc_list_caps *cmd; 2682 struct ice_aq_desc desc; 2683 int status; 2684 2685 cmd = &desc.params.get_cap; 2686 2687 if (opc != ice_aqc_opc_list_func_caps && 2688 opc != ice_aqc_opc_list_dev_caps) 2689 return -EINVAL; 2690 2691 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2692 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2693 2694 if (cap_count) 2695 *cap_count = le32_to_cpu(cmd->count); 2696 2697 return status; 2698 } 2699 2700 /** 2701 * ice_discover_dev_caps - Read and extract device capabilities 2702 * @hw: pointer to the hardware structure 2703 * @dev_caps: pointer to device capabilities structure 2704 * 2705 * Read the device capabilities and extract them into the dev_caps structure 2706 * for later use. 2707 */ 2708 int 2709 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2710 { 2711 u32 cap_count = 0; 2712 void *cbuf; 2713 int status; 2714 2715 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2716 if (!cbuf) 2717 return -ENOMEM; 2718 2719 /* Although the driver doesn't know the number of capabilities the 2720 * device will return, we can simply send a 4KB buffer, the maximum 2721 * possible size that firmware can return. 2722 */ 2723 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2724 2725 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2726 ice_aqc_opc_list_dev_caps, NULL); 2727 if (!status) 2728 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2729 kfree(cbuf); 2730 2731 return status; 2732 } 2733 2734 /** 2735 * ice_discover_func_caps - Read and extract function capabilities 2736 * @hw: pointer to the hardware structure 2737 * @func_caps: pointer to function capabilities structure 2738 * 2739 * Read the function capabilities and extract them into the func_caps structure 2740 * for later use. 2741 */ 2742 static int 2743 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2744 { 2745 u32 cap_count = 0; 2746 void *cbuf; 2747 int status; 2748 2749 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2750 if (!cbuf) 2751 return -ENOMEM; 2752 2753 /* Although the driver doesn't know the number of capabilities the 2754 * device will return, we can simply send a 4KB buffer, the maximum 2755 * possible size that firmware can return. 2756 */ 2757 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2758 2759 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2760 ice_aqc_opc_list_func_caps, NULL); 2761 if (!status) 2762 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2763 kfree(cbuf); 2764 2765 return status; 2766 } 2767 2768 /** 2769 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2770 * @hw: pointer to the hardware structure 2771 */ 2772 void ice_set_safe_mode_caps(struct ice_hw *hw) 2773 { 2774 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2775 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2776 struct ice_hw_common_caps cached_caps; 2777 u32 num_funcs; 2778 2779 /* cache some func_caps values that should be restored after memset */ 2780 cached_caps = func_caps->common_cap; 2781 2782 /* unset func capabilities */ 2783 memset(func_caps, 0, sizeof(*func_caps)); 2784 2785 #define ICE_RESTORE_FUNC_CAP(name) \ 2786 func_caps->common_cap.name = cached_caps.name 2787 2788 /* restore cached values */ 2789 ICE_RESTORE_FUNC_CAP(valid_functions); 2790 ICE_RESTORE_FUNC_CAP(txq_first_id); 2791 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2792 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2793 ICE_RESTORE_FUNC_CAP(max_mtu); 2794 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2795 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2796 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2797 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2798 2799 /* one Tx and one Rx queue in safe mode */ 2800 func_caps->common_cap.num_rxq = 1; 2801 func_caps->common_cap.num_txq = 1; 2802 2803 /* two MSIX vectors, one for traffic and one for misc causes */ 2804 func_caps->common_cap.num_msix_vectors = 2; 2805 func_caps->guar_num_vsi = 1; 2806 2807 /* cache some dev_caps values that should be restored after memset */ 2808 cached_caps = dev_caps->common_cap; 2809 num_funcs = dev_caps->num_funcs; 2810 2811 /* unset dev capabilities */ 2812 memset(dev_caps, 0, sizeof(*dev_caps)); 2813 2814 #define ICE_RESTORE_DEV_CAP(name) \ 2815 dev_caps->common_cap.name = cached_caps.name 2816 2817 /* restore cached values */ 2818 ICE_RESTORE_DEV_CAP(valid_functions); 2819 ICE_RESTORE_DEV_CAP(txq_first_id); 2820 ICE_RESTORE_DEV_CAP(rxq_first_id); 2821 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2822 ICE_RESTORE_DEV_CAP(max_mtu); 2823 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2824 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2825 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2826 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2827 dev_caps->num_funcs = num_funcs; 2828 2829 /* one Tx and one Rx queue per function in safe mode */ 2830 dev_caps->common_cap.num_rxq = num_funcs; 2831 dev_caps->common_cap.num_txq = num_funcs; 2832 2833 /* two MSIX vectors per function */ 2834 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2835 } 2836 2837 /** 2838 * ice_get_caps - get info about the HW 2839 * @hw: pointer to the hardware structure 2840 */ 2841 int ice_get_caps(struct ice_hw *hw) 2842 { 2843 int status; 2844 2845 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2846 if (status) 2847 return status; 2848 2849 return ice_discover_func_caps(hw, &hw->func_caps); 2850 } 2851 2852 /** 2853 * ice_aq_manage_mac_write - manage MAC address write command 2854 * @hw: pointer to the HW struct 2855 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2856 * @flags: flags to control write behavior 2857 * @cd: pointer to command details structure or NULL 2858 * 2859 * This function is used to write MAC address to the NVM (0x0108). 2860 */ 2861 int 2862 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2863 struct ice_sq_cd *cd) 2864 { 2865 struct ice_aqc_manage_mac_write *cmd; 2866 struct ice_aq_desc desc; 2867 2868 cmd = &desc.params.mac_write; 2869 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2870 2871 cmd->flags = flags; 2872 ether_addr_copy(cmd->mac_addr, mac_addr); 2873 2874 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2875 } 2876 2877 /** 2878 * ice_aq_clear_pxe_mode 2879 * @hw: pointer to the HW struct 2880 * 2881 * Tell the firmware that the driver is taking over from PXE (0x0110). 2882 */ 2883 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 2884 { 2885 struct ice_aq_desc desc; 2886 2887 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2888 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2889 2890 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2891 } 2892 2893 /** 2894 * ice_clear_pxe_mode - clear pxe operations mode 2895 * @hw: pointer to the HW struct 2896 * 2897 * Make sure all PXE mode settings are cleared, including things 2898 * like descriptor fetch/write-back mode. 2899 */ 2900 void ice_clear_pxe_mode(struct ice_hw *hw) 2901 { 2902 if (ice_check_sq_alive(hw, &hw->adminq)) 2903 ice_aq_clear_pxe_mode(hw); 2904 } 2905 2906 /** 2907 * ice_aq_set_port_params - set physical port parameters. 2908 * @pi: pointer to the port info struct 2909 * @double_vlan: if set double VLAN is enabled 2910 * @cd: pointer to command details structure or NULL 2911 * 2912 * Set Physical port parameters (0x0203) 2913 */ 2914 int 2915 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 2916 struct ice_sq_cd *cd) 2917 2918 { 2919 struct ice_aqc_set_port_params *cmd; 2920 struct ice_hw *hw = pi->hw; 2921 struct ice_aq_desc desc; 2922 u16 cmd_flags = 0; 2923 2924 cmd = &desc.params.set_port_params; 2925 2926 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 2927 if (double_vlan) 2928 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 2929 cmd->cmd_flags = cpu_to_le16(cmd_flags); 2930 2931 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2932 } 2933 2934 /** 2935 * ice_is_100m_speed_supported 2936 * @hw: pointer to the HW struct 2937 * 2938 * returns true if 100M speeds are supported by the device, 2939 * false otherwise. 2940 */ 2941 bool ice_is_100m_speed_supported(struct ice_hw *hw) 2942 { 2943 switch (hw->device_id) { 2944 case ICE_DEV_ID_E822C_SGMII: 2945 case ICE_DEV_ID_E822L_SGMII: 2946 case ICE_DEV_ID_E823L_1GBE: 2947 case ICE_DEV_ID_E823C_SGMII: 2948 return true; 2949 default: 2950 return false; 2951 } 2952 } 2953 2954 /** 2955 * ice_get_link_speed_based_on_phy_type - returns link speed 2956 * @phy_type_low: lower part of phy_type 2957 * @phy_type_high: higher part of phy_type 2958 * 2959 * This helper function will convert an entry in PHY type structure 2960 * [phy_type_low, phy_type_high] to its corresponding link speed. 2961 * Note: In the structure of [phy_type_low, phy_type_high], there should 2962 * be one bit set, as this function will convert one PHY type to its 2963 * speed. 2964 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 2965 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 2966 */ 2967 static u16 2968 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 2969 { 2970 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2971 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2972 2973 switch (phy_type_low) { 2974 case ICE_PHY_TYPE_LOW_100BASE_TX: 2975 case ICE_PHY_TYPE_LOW_100M_SGMII: 2976 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 2977 break; 2978 case ICE_PHY_TYPE_LOW_1000BASE_T: 2979 case ICE_PHY_TYPE_LOW_1000BASE_SX: 2980 case ICE_PHY_TYPE_LOW_1000BASE_LX: 2981 case ICE_PHY_TYPE_LOW_1000BASE_KX: 2982 case ICE_PHY_TYPE_LOW_1G_SGMII: 2983 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 2984 break; 2985 case ICE_PHY_TYPE_LOW_2500BASE_T: 2986 case ICE_PHY_TYPE_LOW_2500BASE_X: 2987 case ICE_PHY_TYPE_LOW_2500BASE_KX: 2988 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 2989 break; 2990 case ICE_PHY_TYPE_LOW_5GBASE_T: 2991 case ICE_PHY_TYPE_LOW_5GBASE_KR: 2992 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 2993 break; 2994 case ICE_PHY_TYPE_LOW_10GBASE_T: 2995 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 2996 case ICE_PHY_TYPE_LOW_10GBASE_SR: 2997 case ICE_PHY_TYPE_LOW_10GBASE_LR: 2998 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 2999 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3000 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3001 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3002 break; 3003 case ICE_PHY_TYPE_LOW_25GBASE_T: 3004 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3005 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3006 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3007 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3008 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3009 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3010 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3011 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3012 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3013 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3014 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3015 break; 3016 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3017 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3018 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3019 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3020 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3021 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3022 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3023 break; 3024 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3025 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3026 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3027 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3028 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3029 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3030 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3031 case ICE_PHY_TYPE_LOW_50G_AUI2: 3032 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3033 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3034 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3035 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3036 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3037 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3038 case ICE_PHY_TYPE_LOW_50G_AUI1: 3039 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3040 break; 3041 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3042 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3043 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3044 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3045 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3046 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3047 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3048 case ICE_PHY_TYPE_LOW_100G_AUI4: 3049 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3050 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3051 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3052 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3053 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3054 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3055 break; 3056 default: 3057 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3058 break; 3059 } 3060 3061 switch (phy_type_high) { 3062 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3063 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3064 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3065 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3066 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3067 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3068 break; 3069 default: 3070 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3071 break; 3072 } 3073 3074 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3075 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3076 return ICE_AQ_LINK_SPEED_UNKNOWN; 3077 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3078 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3079 return ICE_AQ_LINK_SPEED_UNKNOWN; 3080 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3081 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3082 return speed_phy_type_low; 3083 else 3084 return speed_phy_type_high; 3085 } 3086 3087 /** 3088 * ice_update_phy_type 3089 * @phy_type_low: pointer to the lower part of phy_type 3090 * @phy_type_high: pointer to the higher part of phy_type 3091 * @link_speeds_bitmap: targeted link speeds bitmap 3092 * 3093 * Note: For the link_speeds_bitmap structure, you can check it at 3094 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3095 * link_speeds_bitmap include multiple speeds. 3096 * 3097 * Each entry in this [phy_type_low, phy_type_high] structure will 3098 * present a certain link speed. This helper function will turn on bits 3099 * in [phy_type_low, phy_type_high] structure based on the value of 3100 * link_speeds_bitmap input parameter. 3101 */ 3102 void 3103 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3104 u16 link_speeds_bitmap) 3105 { 3106 u64 pt_high; 3107 u64 pt_low; 3108 int index; 3109 u16 speed; 3110 3111 /* We first check with low part of phy_type */ 3112 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3113 pt_low = BIT_ULL(index); 3114 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3115 3116 if (link_speeds_bitmap & speed) 3117 *phy_type_low |= BIT_ULL(index); 3118 } 3119 3120 /* We then check with high part of phy_type */ 3121 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3122 pt_high = BIT_ULL(index); 3123 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3124 3125 if (link_speeds_bitmap & speed) 3126 *phy_type_high |= BIT_ULL(index); 3127 } 3128 } 3129 3130 /** 3131 * ice_aq_set_phy_cfg 3132 * @hw: pointer to the HW struct 3133 * @pi: port info structure of the interested logical port 3134 * @cfg: structure with PHY configuration data to be set 3135 * @cd: pointer to command details structure or NULL 3136 * 3137 * Set the various PHY configuration parameters supported on the Port. 3138 * One or more of the Set PHY config parameters may be ignored in an MFP 3139 * mode as the PF may not have the privilege to set some of the PHY Config 3140 * parameters. This status will be indicated by the command response (0x0601). 3141 */ 3142 int 3143 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3144 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3145 { 3146 struct ice_aq_desc desc; 3147 int status; 3148 3149 if (!cfg) 3150 return -EINVAL; 3151 3152 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3153 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3154 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3155 cfg->caps); 3156 3157 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3158 } 3159 3160 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3161 desc.params.set_phy.lport_num = pi->lport; 3162 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3163 3164 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3165 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3166 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3167 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3168 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3169 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3170 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3171 cfg->low_power_ctrl_an); 3172 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3173 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3174 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3175 cfg->link_fec_opt); 3176 3177 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3178 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3179 status = 0; 3180 3181 if (!status) 3182 pi->phy.curr_user_phy_cfg = *cfg; 3183 3184 return status; 3185 } 3186 3187 /** 3188 * ice_update_link_info - update status of the HW network link 3189 * @pi: port info structure of the interested logical port 3190 */ 3191 int ice_update_link_info(struct ice_port_info *pi) 3192 { 3193 struct ice_link_status *li; 3194 int status; 3195 3196 if (!pi) 3197 return -EINVAL; 3198 3199 li = &pi->phy.link_info; 3200 3201 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3202 if (status) 3203 return status; 3204 3205 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3206 struct ice_aqc_get_phy_caps_data *pcaps; 3207 struct ice_hw *hw; 3208 3209 hw = pi->hw; 3210 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 3211 GFP_KERNEL); 3212 if (!pcaps) 3213 return -ENOMEM; 3214 3215 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3216 pcaps, NULL); 3217 3218 devm_kfree(ice_hw_to_dev(hw), pcaps); 3219 } 3220 3221 return status; 3222 } 3223 3224 /** 3225 * ice_cache_phy_user_req 3226 * @pi: port information structure 3227 * @cache_data: PHY logging data 3228 * @cache_mode: PHY logging mode 3229 * 3230 * Log the user request on (FC, FEC, SPEED) for later use. 3231 */ 3232 static void 3233 ice_cache_phy_user_req(struct ice_port_info *pi, 3234 struct ice_phy_cache_mode_data cache_data, 3235 enum ice_phy_cache_mode cache_mode) 3236 { 3237 if (!pi) 3238 return; 3239 3240 switch (cache_mode) { 3241 case ICE_FC_MODE: 3242 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3243 break; 3244 case ICE_SPEED_MODE: 3245 pi->phy.curr_user_speed_req = 3246 cache_data.data.curr_user_speed_req; 3247 break; 3248 case ICE_FEC_MODE: 3249 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3250 break; 3251 default: 3252 break; 3253 } 3254 } 3255 3256 /** 3257 * ice_caps_to_fc_mode 3258 * @caps: PHY capabilities 3259 * 3260 * Convert PHY FC capabilities to ice FC mode 3261 */ 3262 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3263 { 3264 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3265 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3266 return ICE_FC_FULL; 3267 3268 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3269 return ICE_FC_TX_PAUSE; 3270 3271 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3272 return ICE_FC_RX_PAUSE; 3273 3274 return ICE_FC_NONE; 3275 } 3276 3277 /** 3278 * ice_caps_to_fec_mode 3279 * @caps: PHY capabilities 3280 * @fec_options: Link FEC options 3281 * 3282 * Convert PHY FEC capabilities to ice FEC mode 3283 */ 3284 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3285 { 3286 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3287 return ICE_FEC_AUTO; 3288 3289 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3290 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3291 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3292 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3293 return ICE_FEC_BASER; 3294 3295 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3296 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3297 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3298 return ICE_FEC_RS; 3299 3300 return ICE_FEC_NONE; 3301 } 3302 3303 /** 3304 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3305 * @pi: port information structure 3306 * @cfg: PHY configuration data to set FC mode 3307 * @req_mode: FC mode to configure 3308 */ 3309 int 3310 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3311 enum ice_fc_mode req_mode) 3312 { 3313 struct ice_phy_cache_mode_data cache_data; 3314 u8 pause_mask = 0x0; 3315 3316 if (!pi || !cfg) 3317 return -EINVAL; 3318 3319 switch (req_mode) { 3320 case ICE_FC_FULL: 3321 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3322 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3323 break; 3324 case ICE_FC_RX_PAUSE: 3325 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3326 break; 3327 case ICE_FC_TX_PAUSE: 3328 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3329 break; 3330 default: 3331 break; 3332 } 3333 3334 /* clear the old pause settings */ 3335 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3336 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3337 3338 /* set the new capabilities */ 3339 cfg->caps |= pause_mask; 3340 3341 /* Cache user FC request */ 3342 cache_data.data.curr_user_fc_req = req_mode; 3343 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3344 3345 return 0; 3346 } 3347 3348 /** 3349 * ice_set_fc 3350 * @pi: port information structure 3351 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3352 * @ena_auto_link_update: enable automatic link update 3353 * 3354 * Set the requested flow control mode. 3355 */ 3356 int 3357 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3358 { 3359 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3360 struct ice_aqc_get_phy_caps_data *pcaps; 3361 struct ice_hw *hw; 3362 int status; 3363 3364 if (!pi || !aq_failures) 3365 return -EINVAL; 3366 3367 *aq_failures = 0; 3368 hw = pi->hw; 3369 3370 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3371 if (!pcaps) 3372 return -ENOMEM; 3373 3374 /* Get the current PHY config */ 3375 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3376 pcaps, NULL); 3377 if (status) { 3378 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3379 goto out; 3380 } 3381 3382 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3383 3384 /* Configure the set PHY data */ 3385 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3386 if (status) 3387 goto out; 3388 3389 /* If the capabilities have changed, then set the new config */ 3390 if (cfg.caps != pcaps->caps) { 3391 int retry_count, retry_max = 10; 3392 3393 /* Auto restart link so settings take effect */ 3394 if (ena_auto_link_update) 3395 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3396 3397 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3398 if (status) { 3399 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3400 goto out; 3401 } 3402 3403 /* Update the link info 3404 * It sometimes takes a really long time for link to 3405 * come back from the atomic reset. Thus, we wait a 3406 * little bit. 3407 */ 3408 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3409 status = ice_update_link_info(pi); 3410 3411 if (!status) 3412 break; 3413 3414 mdelay(100); 3415 } 3416 3417 if (status) 3418 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3419 } 3420 3421 out: 3422 devm_kfree(ice_hw_to_dev(hw), pcaps); 3423 return status; 3424 } 3425 3426 /** 3427 * ice_phy_caps_equals_cfg 3428 * @phy_caps: PHY capabilities 3429 * @phy_cfg: PHY configuration 3430 * 3431 * Helper function to determine if PHY capabilities matches PHY 3432 * configuration 3433 */ 3434 bool 3435 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3436 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3437 { 3438 u8 caps_mask, cfg_mask; 3439 3440 if (!phy_caps || !phy_cfg) 3441 return false; 3442 3443 /* These bits are not common between capabilities and configuration. 3444 * Do not use them to determine equality. 3445 */ 3446 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3447 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3448 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3449 3450 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3451 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3452 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3453 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3454 phy_caps->eee_cap != phy_cfg->eee_cap || 3455 phy_caps->eeer_value != phy_cfg->eeer_value || 3456 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3457 return false; 3458 3459 return true; 3460 } 3461 3462 /** 3463 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3464 * @pi: port information structure 3465 * @caps: PHY ability structure to copy date from 3466 * @cfg: PHY configuration structure to copy data to 3467 * 3468 * Helper function to copy AQC PHY get ability data to PHY set configuration 3469 * data structure 3470 */ 3471 void 3472 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3473 struct ice_aqc_get_phy_caps_data *caps, 3474 struct ice_aqc_set_phy_cfg_data *cfg) 3475 { 3476 if (!pi || !caps || !cfg) 3477 return; 3478 3479 memset(cfg, 0, sizeof(*cfg)); 3480 cfg->phy_type_low = caps->phy_type_low; 3481 cfg->phy_type_high = caps->phy_type_high; 3482 cfg->caps = caps->caps; 3483 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3484 cfg->eee_cap = caps->eee_cap; 3485 cfg->eeer_value = caps->eeer_value; 3486 cfg->link_fec_opt = caps->link_fec_options; 3487 cfg->module_compliance_enforcement = 3488 caps->module_compliance_enforcement; 3489 } 3490 3491 /** 3492 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3493 * @pi: port information structure 3494 * @cfg: PHY configuration data to set FEC mode 3495 * @fec: FEC mode to configure 3496 */ 3497 int 3498 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3499 enum ice_fec_mode fec) 3500 { 3501 struct ice_aqc_get_phy_caps_data *pcaps; 3502 struct ice_hw *hw; 3503 int status; 3504 3505 if (!pi || !cfg) 3506 return -EINVAL; 3507 3508 hw = pi->hw; 3509 3510 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3511 if (!pcaps) 3512 return -ENOMEM; 3513 3514 status = ice_aq_get_phy_caps(pi, false, 3515 (ice_fw_supports_report_dflt_cfg(hw) ? 3516 ICE_AQC_REPORT_DFLT_CFG : 3517 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3518 if (status) 3519 goto out; 3520 3521 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3522 cfg->link_fec_opt = pcaps->link_fec_options; 3523 3524 switch (fec) { 3525 case ICE_FEC_BASER: 3526 /* Clear RS bits, and AND BASE-R ability 3527 * bits and OR request bits. 3528 */ 3529 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3530 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3531 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3532 ICE_AQC_PHY_FEC_25G_KR_REQ; 3533 break; 3534 case ICE_FEC_RS: 3535 /* Clear BASE-R bits, and AND RS ability 3536 * bits and OR request bits. 3537 */ 3538 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3539 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3540 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3541 break; 3542 case ICE_FEC_NONE: 3543 /* Clear all FEC option bits. */ 3544 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3545 break; 3546 case ICE_FEC_AUTO: 3547 /* AND auto FEC bit, and all caps bits. */ 3548 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3549 cfg->link_fec_opt |= pcaps->link_fec_options; 3550 break; 3551 default: 3552 status = -EINVAL; 3553 break; 3554 } 3555 3556 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3557 !ice_fw_supports_report_dflt_cfg(hw)) { 3558 struct ice_link_default_override_tlv tlv = { 0 }; 3559 3560 status = ice_get_link_default_override(&tlv, pi); 3561 if (status) 3562 goto out; 3563 3564 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3565 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3566 cfg->link_fec_opt = tlv.fec_options; 3567 } 3568 3569 out: 3570 kfree(pcaps); 3571 3572 return status; 3573 } 3574 3575 /** 3576 * ice_get_link_status - get status of the HW network link 3577 * @pi: port information structure 3578 * @link_up: pointer to bool (true/false = linkup/linkdown) 3579 * 3580 * Variable link_up is true if link is up, false if link is down. 3581 * The variable link_up is invalid if status is non zero. As a 3582 * result of this call, link status reporting becomes enabled 3583 */ 3584 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3585 { 3586 struct ice_phy_info *phy_info; 3587 int status = 0; 3588 3589 if (!pi || !link_up) 3590 return -EINVAL; 3591 3592 phy_info = &pi->phy; 3593 3594 if (phy_info->get_link_info) { 3595 status = ice_update_link_info(pi); 3596 3597 if (status) 3598 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3599 status); 3600 } 3601 3602 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3603 3604 return status; 3605 } 3606 3607 /** 3608 * ice_aq_set_link_restart_an 3609 * @pi: pointer to the port information structure 3610 * @ena_link: if true: enable link, if false: disable link 3611 * @cd: pointer to command details structure or NULL 3612 * 3613 * Sets up the link and restarts the Auto-Negotiation over the link. 3614 */ 3615 int 3616 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3617 struct ice_sq_cd *cd) 3618 { 3619 struct ice_aqc_restart_an *cmd; 3620 struct ice_aq_desc desc; 3621 3622 cmd = &desc.params.restart_an; 3623 3624 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3625 3626 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3627 cmd->lport_num = pi->lport; 3628 if (ena_link) 3629 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3630 else 3631 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3632 3633 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3634 } 3635 3636 /** 3637 * ice_aq_set_event_mask 3638 * @hw: pointer to the HW struct 3639 * @port_num: port number of the physical function 3640 * @mask: event mask to be set 3641 * @cd: pointer to command details structure or NULL 3642 * 3643 * Set event mask (0x0613) 3644 */ 3645 int 3646 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3647 struct ice_sq_cd *cd) 3648 { 3649 struct ice_aqc_set_event_mask *cmd; 3650 struct ice_aq_desc desc; 3651 3652 cmd = &desc.params.set_event_mask; 3653 3654 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3655 3656 cmd->lport_num = port_num; 3657 3658 cmd->event_mask = cpu_to_le16(mask); 3659 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3660 } 3661 3662 /** 3663 * ice_aq_set_mac_loopback 3664 * @hw: pointer to the HW struct 3665 * @ena_lpbk: Enable or Disable loopback 3666 * @cd: pointer to command details structure or NULL 3667 * 3668 * Enable/disable loopback on a given port 3669 */ 3670 int 3671 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3672 { 3673 struct ice_aqc_set_mac_lb *cmd; 3674 struct ice_aq_desc desc; 3675 3676 cmd = &desc.params.set_mac_lb; 3677 3678 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3679 if (ena_lpbk) 3680 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3681 3682 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3683 } 3684 3685 /** 3686 * ice_aq_set_port_id_led 3687 * @pi: pointer to the port information 3688 * @is_orig_mode: is this LED set to original mode (by the net-list) 3689 * @cd: pointer to command details structure or NULL 3690 * 3691 * Set LED value for the given port (0x06e9) 3692 */ 3693 int 3694 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3695 struct ice_sq_cd *cd) 3696 { 3697 struct ice_aqc_set_port_id_led *cmd; 3698 struct ice_hw *hw = pi->hw; 3699 struct ice_aq_desc desc; 3700 3701 cmd = &desc.params.set_port_id_led; 3702 3703 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3704 3705 if (is_orig_mode) 3706 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3707 else 3708 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3709 3710 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3711 } 3712 3713 /** 3714 * ice_aq_get_port_options 3715 * @hw: pointer to the HW struct 3716 * @options: buffer for the resultant port options 3717 * @option_count: input - size of the buffer in port options structures, 3718 * output - number of returned port options 3719 * @lport: logical port to call the command with (optional) 3720 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3721 * when PF owns more than 1 port it must be true 3722 * @active_option_idx: index of active port option in returned buffer 3723 * @active_option_valid: active option in returned buffer is valid 3724 * @pending_option_idx: index of pending port option in returned buffer 3725 * @pending_option_valid: pending option in returned buffer is valid 3726 * 3727 * Calls Get Port Options AQC (0x06ea) and verifies result. 3728 */ 3729 int 3730 ice_aq_get_port_options(struct ice_hw *hw, 3731 struct ice_aqc_get_port_options_elem *options, 3732 u8 *option_count, u8 lport, bool lport_valid, 3733 u8 *active_option_idx, bool *active_option_valid, 3734 u8 *pending_option_idx, bool *pending_option_valid) 3735 { 3736 struct ice_aqc_get_port_options *cmd; 3737 struct ice_aq_desc desc; 3738 int status; 3739 u8 i; 3740 3741 /* options buffer shall be able to hold max returned options */ 3742 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3743 return -EINVAL; 3744 3745 cmd = &desc.params.get_port_options; 3746 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3747 3748 if (lport_valid) 3749 cmd->lport_num = lport; 3750 cmd->lport_num_valid = lport_valid; 3751 3752 status = ice_aq_send_cmd(hw, &desc, options, 3753 *option_count * sizeof(*options), NULL); 3754 if (status) 3755 return status; 3756 3757 /* verify direct FW response & set output parameters */ 3758 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3759 cmd->port_options_count); 3760 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3761 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3762 cmd->port_options); 3763 if (*active_option_valid) { 3764 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3765 cmd->port_options); 3766 if (*active_option_idx > (*option_count - 1)) 3767 return -EIO; 3768 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3769 *active_option_idx); 3770 } 3771 3772 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3773 cmd->pending_port_option_status); 3774 if (*pending_option_valid) { 3775 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3776 cmd->pending_port_option_status); 3777 if (*pending_option_idx > (*option_count - 1)) 3778 return -EIO; 3779 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3780 *pending_option_idx); 3781 } 3782 3783 /* mask output options fields */ 3784 for (i = 0; i < *option_count; i++) { 3785 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3786 options[i].pmd); 3787 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 3788 options[i].max_lane_speed); 3789 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 3790 options[i].pmd, options[i].max_lane_speed); 3791 } 3792 3793 return 0; 3794 } 3795 3796 /** 3797 * ice_aq_set_port_option 3798 * @hw: pointer to the HW struct 3799 * @lport: logical port to call the command with 3800 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3801 * when PF owns more than 1 port it must be true 3802 * @new_option: new port option to be written 3803 * 3804 * Calls Set Port Options AQC (0x06eb). 3805 */ 3806 int 3807 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 3808 u8 new_option) 3809 { 3810 struct ice_aqc_set_port_option *cmd; 3811 struct ice_aq_desc desc; 3812 3813 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 3814 return -EINVAL; 3815 3816 cmd = &desc.params.set_port_option; 3817 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 3818 3819 if (lport_valid) 3820 cmd->lport_num = lport; 3821 3822 cmd->lport_num_valid = lport_valid; 3823 cmd->selected_port_option = new_option; 3824 3825 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3826 } 3827 3828 /** 3829 * ice_aq_sff_eeprom 3830 * @hw: pointer to the HW struct 3831 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3832 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3833 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3834 * @page: QSFP page 3835 * @set_page: set or ignore the page 3836 * @data: pointer to data buffer to be read/written to the I2C device. 3837 * @length: 1-16 for read, 1 for write. 3838 * @write: 0 read, 1 for write. 3839 * @cd: pointer to command details structure or NULL 3840 * 3841 * Read/Write SFF EEPROM (0x06EE) 3842 */ 3843 int 3844 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3845 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3846 bool write, struct ice_sq_cd *cd) 3847 { 3848 struct ice_aqc_sff_eeprom *cmd; 3849 struct ice_aq_desc desc; 3850 int status; 3851 3852 if (!data || (mem_addr & 0xff00)) 3853 return -EINVAL; 3854 3855 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3856 cmd = &desc.params.read_write_sff_param; 3857 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3858 cmd->lport_num = (u8)(lport & 0xff); 3859 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3860 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 3861 ICE_AQC_SFF_I2CBUS_7BIT_M) | 3862 ((set_page << 3863 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 3864 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 3865 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3866 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 3867 if (write) 3868 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 3869 3870 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3871 return status; 3872 } 3873 3874 /** 3875 * __ice_aq_get_set_rss_lut 3876 * @hw: pointer to the hardware structure 3877 * @params: RSS LUT parameters 3878 * @set: set true to set the table, false to get the table 3879 * 3880 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3881 */ 3882 static int 3883 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) 3884 { 3885 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; 3886 struct ice_aqc_get_set_rss_lut *cmd_resp; 3887 struct ice_aq_desc desc; 3888 int status; 3889 u8 *lut; 3890 3891 if (!params) 3892 return -EINVAL; 3893 3894 vsi_handle = params->vsi_handle; 3895 lut = params->lut; 3896 3897 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 3898 return -EINVAL; 3899 3900 lut_size = params->lut_size; 3901 lut_type = params->lut_type; 3902 glob_lut_idx = params->global_lut_id; 3903 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3904 3905 cmd_resp = &desc.params.get_set_rss_lut; 3906 3907 if (set) { 3908 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 3909 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3910 } else { 3911 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 3912 } 3913 3914 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3915 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 3916 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 3917 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 3918 3919 switch (lut_type) { 3920 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 3921 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 3922 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 3923 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 3924 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 3925 break; 3926 default: 3927 status = -EINVAL; 3928 goto ice_aq_get_set_rss_lut_exit; 3929 } 3930 3931 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 3932 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 3933 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 3934 3935 if (!set) 3936 goto ice_aq_get_set_rss_lut_send; 3937 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3938 if (!set) 3939 goto ice_aq_get_set_rss_lut_send; 3940 } else { 3941 goto ice_aq_get_set_rss_lut_send; 3942 } 3943 3944 /* LUT size is only valid for Global and PF table types */ 3945 switch (lut_size) { 3946 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 3947 break; 3948 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 3949 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 3950 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3951 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3952 break; 3953 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 3954 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3955 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 3956 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3957 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3958 break; 3959 } 3960 fallthrough; 3961 default: 3962 status = -EINVAL; 3963 goto ice_aq_get_set_rss_lut_exit; 3964 } 3965 3966 ice_aq_get_set_rss_lut_send: 3967 cmd_resp->flags = cpu_to_le16(flags); 3968 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 3969 3970 ice_aq_get_set_rss_lut_exit: 3971 return status; 3972 } 3973 3974 /** 3975 * ice_aq_get_rss_lut 3976 * @hw: pointer to the hardware structure 3977 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 3978 * 3979 * get the RSS lookup table, PF or VSI type 3980 */ 3981 int 3982 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 3983 { 3984 return __ice_aq_get_set_rss_lut(hw, get_params, false); 3985 } 3986 3987 /** 3988 * ice_aq_set_rss_lut 3989 * @hw: pointer to the hardware structure 3990 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 3991 * 3992 * set the RSS lookup table, PF or VSI type 3993 */ 3994 int 3995 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 3996 { 3997 return __ice_aq_get_set_rss_lut(hw, set_params, true); 3998 } 3999 4000 /** 4001 * __ice_aq_get_set_rss_key 4002 * @hw: pointer to the HW struct 4003 * @vsi_id: VSI FW index 4004 * @key: pointer to key info struct 4005 * @set: set true to set the key, false to get the key 4006 * 4007 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4008 */ 4009 static int 4010 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4011 struct ice_aqc_get_set_rss_keys *key, bool set) 4012 { 4013 struct ice_aqc_get_set_rss_key *cmd_resp; 4014 u16 key_size = sizeof(*key); 4015 struct ice_aq_desc desc; 4016 4017 cmd_resp = &desc.params.get_set_rss_key; 4018 4019 if (set) { 4020 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4021 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4022 } else { 4023 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4024 } 4025 4026 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 4027 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 4028 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 4029 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 4030 4031 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4032 } 4033 4034 /** 4035 * ice_aq_get_rss_key 4036 * @hw: pointer to the HW struct 4037 * @vsi_handle: software VSI handle 4038 * @key: pointer to key info struct 4039 * 4040 * get the RSS key per VSI 4041 */ 4042 int 4043 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4044 struct ice_aqc_get_set_rss_keys *key) 4045 { 4046 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4047 return -EINVAL; 4048 4049 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4050 key, false); 4051 } 4052 4053 /** 4054 * ice_aq_set_rss_key 4055 * @hw: pointer to the HW struct 4056 * @vsi_handle: software VSI handle 4057 * @keys: pointer to key info struct 4058 * 4059 * set the RSS key per VSI 4060 */ 4061 int 4062 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4063 struct ice_aqc_get_set_rss_keys *keys) 4064 { 4065 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4066 return -EINVAL; 4067 4068 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4069 keys, true); 4070 } 4071 4072 /** 4073 * ice_aq_add_lan_txq 4074 * @hw: pointer to the hardware structure 4075 * @num_qgrps: Number of added queue groups 4076 * @qg_list: list of queue groups to be added 4077 * @buf_size: size of buffer for indirect command 4078 * @cd: pointer to command details structure or NULL 4079 * 4080 * Add Tx LAN queue (0x0C30) 4081 * 4082 * NOTE: 4083 * Prior to calling add Tx LAN queue: 4084 * Initialize the following as part of the Tx queue context: 4085 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4086 * Cache profile and Packet shaper profile. 4087 * 4088 * After add Tx LAN queue AQ command is completed: 4089 * Interrupts should be associated with specific queues, 4090 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4091 * flow. 4092 */ 4093 static int 4094 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4095 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4096 struct ice_sq_cd *cd) 4097 { 4098 struct ice_aqc_add_tx_qgrp *list; 4099 struct ice_aqc_add_txqs *cmd; 4100 struct ice_aq_desc desc; 4101 u16 i, sum_size = 0; 4102 4103 cmd = &desc.params.add_txqs; 4104 4105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4106 4107 if (!qg_list) 4108 return -EINVAL; 4109 4110 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4111 return -EINVAL; 4112 4113 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4114 sum_size += struct_size(list, txqs, list->num_txqs); 4115 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4116 list->num_txqs); 4117 } 4118 4119 if (buf_size != sum_size) 4120 return -EINVAL; 4121 4122 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4123 4124 cmd->num_qgrps = num_qgrps; 4125 4126 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4127 } 4128 4129 /** 4130 * ice_aq_dis_lan_txq 4131 * @hw: pointer to the hardware structure 4132 * @num_qgrps: number of groups in the list 4133 * @qg_list: the list of groups to disable 4134 * @buf_size: the total size of the qg_list buffer in bytes 4135 * @rst_src: if called due to reset, specifies the reset source 4136 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4137 * @cd: pointer to command details structure or NULL 4138 * 4139 * Disable LAN Tx queue (0x0C31) 4140 */ 4141 static int 4142 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4143 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4144 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4145 struct ice_sq_cd *cd) 4146 { 4147 struct ice_aqc_dis_txq_item *item; 4148 struct ice_aqc_dis_txqs *cmd; 4149 struct ice_aq_desc desc; 4150 u16 i, sz = 0; 4151 int status; 4152 4153 cmd = &desc.params.dis_txqs; 4154 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4155 4156 /* qg_list can be NULL only in VM/VF reset flow */ 4157 if (!qg_list && !rst_src) 4158 return -EINVAL; 4159 4160 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4161 return -EINVAL; 4162 4163 cmd->num_entries = num_qgrps; 4164 4165 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 4166 ICE_AQC_Q_DIS_TIMEOUT_M); 4167 4168 switch (rst_src) { 4169 case ICE_VM_RESET: 4170 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4171 cmd->vmvf_and_timeout |= 4172 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 4173 break; 4174 case ICE_VF_RESET: 4175 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4176 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4177 cmd->vmvf_and_timeout |= 4178 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 4179 ICE_AQC_Q_DIS_VMVF_NUM_M); 4180 break; 4181 case ICE_NO_RESET: 4182 default: 4183 break; 4184 } 4185 4186 /* flush pipe on time out */ 4187 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4188 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4189 if (!qg_list) 4190 goto do_aq; 4191 4192 /* set RD bit to indicate that command buffer is provided by the driver 4193 * and it needs to be read by the firmware 4194 */ 4195 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4196 4197 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4198 u16 item_size = struct_size(item, q_id, item->num_qs); 4199 4200 /* If the num of queues is even, add 2 bytes of padding */ 4201 if ((item->num_qs % 2) == 0) 4202 item_size += 2; 4203 4204 sz += item_size; 4205 4206 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4207 } 4208 4209 if (buf_size != sz) 4210 return -EINVAL; 4211 4212 do_aq: 4213 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4214 if (status) { 4215 if (!qg_list) 4216 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4217 vmvf_num, hw->adminq.sq_last_status); 4218 else 4219 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4220 le16_to_cpu(qg_list[0].q_id[0]), 4221 hw->adminq.sq_last_status); 4222 } 4223 return status; 4224 } 4225 4226 /** 4227 * ice_aq_add_rdma_qsets 4228 * @hw: pointer to the hardware structure 4229 * @num_qset_grps: Number of RDMA Qset groups 4230 * @qset_list: list of Qset groups to be added 4231 * @buf_size: size of buffer for indirect command 4232 * @cd: pointer to command details structure or NULL 4233 * 4234 * Add Tx RDMA Qsets (0x0C33) 4235 */ 4236 static int 4237 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4238 struct ice_aqc_add_rdma_qset_data *qset_list, 4239 u16 buf_size, struct ice_sq_cd *cd) 4240 { 4241 struct ice_aqc_add_rdma_qset_data *list; 4242 struct ice_aqc_add_rdma_qset *cmd; 4243 struct ice_aq_desc desc; 4244 u16 i, sum_size = 0; 4245 4246 cmd = &desc.params.add_rdma_qset; 4247 4248 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4249 4250 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4251 return -EINVAL; 4252 4253 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4254 u16 num_qsets = le16_to_cpu(list->num_qsets); 4255 4256 sum_size += struct_size(list, rdma_qsets, num_qsets); 4257 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4258 num_qsets); 4259 } 4260 4261 if (buf_size != sum_size) 4262 return -EINVAL; 4263 4264 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4265 4266 cmd->num_qset_grps = num_qset_grps; 4267 4268 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4269 } 4270 4271 /* End of FW Admin Queue command wrappers */ 4272 4273 /** 4274 * ice_write_byte - write a byte to a packed context structure 4275 * @src_ctx: the context structure to read from 4276 * @dest_ctx: the context to be written to 4277 * @ce_info: a description of the struct to be filled 4278 */ 4279 static void 4280 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4281 { 4282 u8 src_byte, dest_byte, mask; 4283 u8 *from, *dest; 4284 u16 shift_width; 4285 4286 /* copy from the next struct field */ 4287 from = src_ctx + ce_info->offset; 4288 4289 /* prepare the bits and mask */ 4290 shift_width = ce_info->lsb % 8; 4291 mask = (u8)(BIT(ce_info->width) - 1); 4292 4293 src_byte = *from; 4294 src_byte &= mask; 4295 4296 /* shift to correct alignment */ 4297 mask <<= shift_width; 4298 src_byte <<= shift_width; 4299 4300 /* get the current bits from the target bit string */ 4301 dest = dest_ctx + (ce_info->lsb / 8); 4302 4303 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4304 4305 dest_byte &= ~mask; /* get the bits not changing */ 4306 dest_byte |= src_byte; /* add in the new bits */ 4307 4308 /* put it all back */ 4309 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4310 } 4311 4312 /** 4313 * ice_write_word - write a word to a packed context structure 4314 * @src_ctx: the context structure to read from 4315 * @dest_ctx: the context to be written to 4316 * @ce_info: a description of the struct to be filled 4317 */ 4318 static void 4319 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4320 { 4321 u16 src_word, mask; 4322 __le16 dest_word; 4323 u8 *from, *dest; 4324 u16 shift_width; 4325 4326 /* copy from the next struct field */ 4327 from = src_ctx + ce_info->offset; 4328 4329 /* prepare the bits and mask */ 4330 shift_width = ce_info->lsb % 8; 4331 mask = BIT(ce_info->width) - 1; 4332 4333 /* don't swizzle the bits until after the mask because the mask bits 4334 * will be in a different bit position on big endian machines 4335 */ 4336 src_word = *(u16 *)from; 4337 src_word &= mask; 4338 4339 /* shift to correct alignment */ 4340 mask <<= shift_width; 4341 src_word <<= shift_width; 4342 4343 /* get the current bits from the target bit string */ 4344 dest = dest_ctx + (ce_info->lsb / 8); 4345 4346 memcpy(&dest_word, dest, sizeof(dest_word)); 4347 4348 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4349 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4350 4351 /* put it all back */ 4352 memcpy(dest, &dest_word, sizeof(dest_word)); 4353 } 4354 4355 /** 4356 * ice_write_dword - write a dword to a packed context structure 4357 * @src_ctx: the context structure to read from 4358 * @dest_ctx: the context to be written to 4359 * @ce_info: a description of the struct to be filled 4360 */ 4361 static void 4362 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4363 { 4364 u32 src_dword, mask; 4365 __le32 dest_dword; 4366 u8 *from, *dest; 4367 u16 shift_width; 4368 4369 /* copy from the next struct field */ 4370 from = src_ctx + ce_info->offset; 4371 4372 /* prepare the bits and mask */ 4373 shift_width = ce_info->lsb % 8; 4374 4375 /* if the field width is exactly 32 on an x86 machine, then the shift 4376 * operation will not work because the SHL instructions count is masked 4377 * to 5 bits so the shift will do nothing 4378 */ 4379 if (ce_info->width < 32) 4380 mask = BIT(ce_info->width) - 1; 4381 else 4382 mask = (u32)~0; 4383 4384 /* don't swizzle the bits until after the mask because the mask bits 4385 * will be in a different bit position on big endian machines 4386 */ 4387 src_dword = *(u32 *)from; 4388 src_dword &= mask; 4389 4390 /* shift to correct alignment */ 4391 mask <<= shift_width; 4392 src_dword <<= shift_width; 4393 4394 /* get the current bits from the target bit string */ 4395 dest = dest_ctx + (ce_info->lsb / 8); 4396 4397 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4398 4399 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4400 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4401 4402 /* put it all back */ 4403 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4404 } 4405 4406 /** 4407 * ice_write_qword - write a qword to a packed context structure 4408 * @src_ctx: the context structure to read from 4409 * @dest_ctx: the context to be written to 4410 * @ce_info: a description of the struct to be filled 4411 */ 4412 static void 4413 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4414 { 4415 u64 src_qword, mask; 4416 __le64 dest_qword; 4417 u8 *from, *dest; 4418 u16 shift_width; 4419 4420 /* copy from the next struct field */ 4421 from = src_ctx + ce_info->offset; 4422 4423 /* prepare the bits and mask */ 4424 shift_width = ce_info->lsb % 8; 4425 4426 /* if the field width is exactly 64 on an x86 machine, then the shift 4427 * operation will not work because the SHL instructions count is masked 4428 * to 6 bits so the shift will do nothing 4429 */ 4430 if (ce_info->width < 64) 4431 mask = BIT_ULL(ce_info->width) - 1; 4432 else 4433 mask = (u64)~0; 4434 4435 /* don't swizzle the bits until after the mask because the mask bits 4436 * will be in a different bit position on big endian machines 4437 */ 4438 src_qword = *(u64 *)from; 4439 src_qword &= mask; 4440 4441 /* shift to correct alignment */ 4442 mask <<= shift_width; 4443 src_qword <<= shift_width; 4444 4445 /* get the current bits from the target bit string */ 4446 dest = dest_ctx + (ce_info->lsb / 8); 4447 4448 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4449 4450 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4451 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4452 4453 /* put it all back */ 4454 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4455 } 4456 4457 /** 4458 * ice_set_ctx - set context bits in packed structure 4459 * @hw: pointer to the hardware structure 4460 * @src_ctx: pointer to a generic non-packed context structure 4461 * @dest_ctx: pointer to memory for the packed structure 4462 * @ce_info: a description of the structure to be transformed 4463 */ 4464 int 4465 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4466 const struct ice_ctx_ele *ce_info) 4467 { 4468 int f; 4469 4470 for (f = 0; ce_info[f].width; f++) { 4471 /* We have to deal with each element of the FW response 4472 * using the correct size so that we are correct regardless 4473 * of the endianness of the machine. 4474 */ 4475 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4476 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4477 f, ce_info[f].width, ce_info[f].size_of); 4478 continue; 4479 } 4480 switch (ce_info[f].size_of) { 4481 case sizeof(u8): 4482 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4483 break; 4484 case sizeof(u16): 4485 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4486 break; 4487 case sizeof(u32): 4488 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4489 break; 4490 case sizeof(u64): 4491 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4492 break; 4493 default: 4494 return -EINVAL; 4495 } 4496 } 4497 4498 return 0; 4499 } 4500 4501 /** 4502 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4503 * @hw: pointer to the HW struct 4504 * @vsi_handle: software VSI handle 4505 * @tc: TC number 4506 * @q_handle: software queue handle 4507 */ 4508 struct ice_q_ctx * 4509 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4510 { 4511 struct ice_vsi_ctx *vsi; 4512 struct ice_q_ctx *q_ctx; 4513 4514 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4515 if (!vsi) 4516 return NULL; 4517 if (q_handle >= vsi->num_lan_q_entries[tc]) 4518 return NULL; 4519 if (!vsi->lan_q_ctx[tc]) 4520 return NULL; 4521 q_ctx = vsi->lan_q_ctx[tc]; 4522 return &q_ctx[q_handle]; 4523 } 4524 4525 /** 4526 * ice_ena_vsi_txq 4527 * @pi: port information structure 4528 * @vsi_handle: software VSI handle 4529 * @tc: TC number 4530 * @q_handle: software queue handle 4531 * @num_qgrps: Number of added queue groups 4532 * @buf: list of queue groups to be added 4533 * @buf_size: size of buffer for indirect command 4534 * @cd: pointer to command details structure or NULL 4535 * 4536 * This function adds one LAN queue 4537 */ 4538 int 4539 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4540 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4541 struct ice_sq_cd *cd) 4542 { 4543 struct ice_aqc_txsched_elem_data node = { 0 }; 4544 struct ice_sched_node *parent; 4545 struct ice_q_ctx *q_ctx; 4546 struct ice_hw *hw; 4547 int status; 4548 4549 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4550 return -EIO; 4551 4552 if (num_qgrps > 1 || buf->num_txqs > 1) 4553 return -ENOSPC; 4554 4555 hw = pi->hw; 4556 4557 if (!ice_is_vsi_valid(hw, vsi_handle)) 4558 return -EINVAL; 4559 4560 mutex_lock(&pi->sched_lock); 4561 4562 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4563 if (!q_ctx) { 4564 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4565 q_handle); 4566 status = -EINVAL; 4567 goto ena_txq_exit; 4568 } 4569 4570 /* find a parent node */ 4571 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4572 ICE_SCHED_NODE_OWNER_LAN); 4573 if (!parent) { 4574 status = -EINVAL; 4575 goto ena_txq_exit; 4576 } 4577 4578 buf->parent_teid = parent->info.node_teid; 4579 node.parent_teid = parent->info.node_teid; 4580 /* Mark that the values in the "generic" section as valid. The default 4581 * value in the "generic" section is zero. This means that : 4582 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4583 * - 0 priority among siblings, indicated by Bit 1-3. 4584 * - WFQ, indicated by Bit 4. 4585 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4586 * Bit 5-6. 4587 * - Bit 7 is reserved. 4588 * Without setting the generic section as valid in valid_sections, the 4589 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4590 */ 4591 buf->txqs[0].info.valid_sections = 4592 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4593 ICE_AQC_ELEM_VALID_EIR; 4594 buf->txqs[0].info.generic = 0; 4595 buf->txqs[0].info.cir_bw.bw_profile_idx = 4596 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4597 buf->txqs[0].info.cir_bw.bw_alloc = 4598 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4599 buf->txqs[0].info.eir_bw.bw_profile_idx = 4600 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4601 buf->txqs[0].info.eir_bw.bw_alloc = 4602 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4603 4604 /* add the LAN queue */ 4605 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4606 if (status) { 4607 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4608 le16_to_cpu(buf->txqs[0].txq_id), 4609 hw->adminq.sq_last_status); 4610 goto ena_txq_exit; 4611 } 4612 4613 node.node_teid = buf->txqs[0].q_teid; 4614 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4615 q_ctx->q_handle = q_handle; 4616 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4617 4618 /* add a leaf node into scheduler tree queue layer */ 4619 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4620 if (!status) 4621 status = ice_sched_replay_q_bw(pi, q_ctx); 4622 4623 ena_txq_exit: 4624 mutex_unlock(&pi->sched_lock); 4625 return status; 4626 } 4627 4628 /** 4629 * ice_dis_vsi_txq 4630 * @pi: port information structure 4631 * @vsi_handle: software VSI handle 4632 * @tc: TC number 4633 * @num_queues: number of queues 4634 * @q_handles: pointer to software queue handle array 4635 * @q_ids: pointer to the q_id array 4636 * @q_teids: pointer to queue node teids 4637 * @rst_src: if called due to reset, specifies the reset source 4638 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4639 * @cd: pointer to command details structure or NULL 4640 * 4641 * This function removes queues and their corresponding nodes in SW DB 4642 */ 4643 int 4644 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4645 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4646 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4647 struct ice_sq_cd *cd) 4648 { 4649 struct ice_aqc_dis_txq_item *qg_list; 4650 struct ice_q_ctx *q_ctx; 4651 int status = -ENOENT; 4652 struct ice_hw *hw; 4653 u16 i, buf_size; 4654 4655 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4656 return -EIO; 4657 4658 hw = pi->hw; 4659 4660 if (!num_queues) { 4661 /* if queue is disabled already yet the disable queue command 4662 * has to be sent to complete the VF reset, then call 4663 * ice_aq_dis_lan_txq without any queue information 4664 */ 4665 if (rst_src) 4666 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4667 vmvf_num, NULL); 4668 return -EIO; 4669 } 4670 4671 buf_size = struct_size(qg_list, q_id, 1); 4672 qg_list = kzalloc(buf_size, GFP_KERNEL); 4673 if (!qg_list) 4674 return -ENOMEM; 4675 4676 mutex_lock(&pi->sched_lock); 4677 4678 for (i = 0; i < num_queues; i++) { 4679 struct ice_sched_node *node; 4680 4681 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4682 if (!node) 4683 continue; 4684 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4685 if (!q_ctx) { 4686 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4687 q_handles[i]); 4688 continue; 4689 } 4690 if (q_ctx->q_handle != q_handles[i]) { 4691 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4692 q_ctx->q_handle, q_handles[i]); 4693 continue; 4694 } 4695 qg_list->parent_teid = node->info.parent_teid; 4696 qg_list->num_qs = 1; 4697 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4698 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4699 vmvf_num, cd); 4700 4701 if (status) 4702 break; 4703 ice_free_sched_node(pi, node); 4704 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4705 } 4706 mutex_unlock(&pi->sched_lock); 4707 kfree(qg_list); 4708 return status; 4709 } 4710 4711 /** 4712 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4713 * @pi: port information structure 4714 * @vsi_handle: software VSI handle 4715 * @tc_bitmap: TC bitmap 4716 * @maxqs: max queues array per TC 4717 * @owner: LAN or RDMA 4718 * 4719 * This function adds/updates the VSI queues per TC. 4720 */ 4721 static int 4722 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4723 u16 *maxqs, u8 owner) 4724 { 4725 int status = 0; 4726 u8 i; 4727 4728 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4729 return -EIO; 4730 4731 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4732 return -EINVAL; 4733 4734 mutex_lock(&pi->sched_lock); 4735 4736 ice_for_each_traffic_class(i) { 4737 /* configuration is possible only if TC node is present */ 4738 if (!ice_sched_get_tc_node(pi, i)) 4739 continue; 4740 4741 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4742 ice_is_tc_ena(tc_bitmap, i)); 4743 if (status) 4744 break; 4745 } 4746 4747 mutex_unlock(&pi->sched_lock); 4748 return status; 4749 } 4750 4751 /** 4752 * ice_cfg_vsi_lan - configure VSI LAN queues 4753 * @pi: port information structure 4754 * @vsi_handle: software VSI handle 4755 * @tc_bitmap: TC bitmap 4756 * @max_lanqs: max LAN queues array per TC 4757 * 4758 * This function adds/updates the VSI LAN queues per TC. 4759 */ 4760 int 4761 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4762 u16 *max_lanqs) 4763 { 4764 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4765 ICE_SCHED_NODE_OWNER_LAN); 4766 } 4767 4768 /** 4769 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4770 * @pi: port information structure 4771 * @vsi_handle: software VSI handle 4772 * @tc_bitmap: TC bitmap 4773 * @max_rdmaqs: max RDMA queues array per TC 4774 * 4775 * This function adds/updates the VSI RDMA queues per TC. 4776 */ 4777 int 4778 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4779 u16 *max_rdmaqs) 4780 { 4781 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4782 ICE_SCHED_NODE_OWNER_RDMA); 4783 } 4784 4785 /** 4786 * ice_ena_vsi_rdma_qset 4787 * @pi: port information structure 4788 * @vsi_handle: software VSI handle 4789 * @tc: TC number 4790 * @rdma_qset: pointer to RDMA Qset 4791 * @num_qsets: number of RDMA Qsets 4792 * @qset_teid: pointer to Qset node TEIDs 4793 * 4794 * This function adds RDMA Qset 4795 */ 4796 int 4797 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4798 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4799 { 4800 struct ice_aqc_txsched_elem_data node = { 0 }; 4801 struct ice_aqc_add_rdma_qset_data *buf; 4802 struct ice_sched_node *parent; 4803 struct ice_hw *hw; 4804 u16 i, buf_size; 4805 int ret; 4806 4807 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4808 return -EIO; 4809 hw = pi->hw; 4810 4811 if (!ice_is_vsi_valid(hw, vsi_handle)) 4812 return -EINVAL; 4813 4814 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4815 buf = kzalloc(buf_size, GFP_KERNEL); 4816 if (!buf) 4817 return -ENOMEM; 4818 mutex_lock(&pi->sched_lock); 4819 4820 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4821 ICE_SCHED_NODE_OWNER_RDMA); 4822 if (!parent) { 4823 ret = -EINVAL; 4824 goto rdma_error_exit; 4825 } 4826 buf->parent_teid = parent->info.node_teid; 4827 node.parent_teid = parent->info.node_teid; 4828 4829 buf->num_qsets = cpu_to_le16(num_qsets); 4830 for (i = 0; i < num_qsets; i++) { 4831 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4832 buf->rdma_qsets[i].info.valid_sections = 4833 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4834 ICE_AQC_ELEM_VALID_EIR; 4835 buf->rdma_qsets[i].info.generic = 0; 4836 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4837 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4838 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4839 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4840 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4841 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4842 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4843 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4844 } 4845 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4846 if (ret) { 4847 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4848 goto rdma_error_exit; 4849 } 4850 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4851 for (i = 0; i < num_qsets; i++) { 4852 node.node_teid = buf->rdma_qsets[i].qset_teid; 4853 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4854 &node, NULL); 4855 if (ret) 4856 break; 4857 qset_teid[i] = le32_to_cpu(node.node_teid); 4858 } 4859 rdma_error_exit: 4860 mutex_unlock(&pi->sched_lock); 4861 kfree(buf); 4862 return ret; 4863 } 4864 4865 /** 4866 * ice_dis_vsi_rdma_qset - free RDMA resources 4867 * @pi: port_info struct 4868 * @count: number of RDMA Qsets to free 4869 * @qset_teid: TEID of Qset node 4870 * @q_id: list of queue IDs being disabled 4871 */ 4872 int 4873 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4874 u16 *q_id) 4875 { 4876 struct ice_aqc_dis_txq_item *qg_list; 4877 struct ice_hw *hw; 4878 int status = 0; 4879 u16 qg_size; 4880 int i; 4881 4882 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4883 return -EIO; 4884 4885 hw = pi->hw; 4886 4887 qg_size = struct_size(qg_list, q_id, 1); 4888 qg_list = kzalloc(qg_size, GFP_KERNEL); 4889 if (!qg_list) 4890 return -ENOMEM; 4891 4892 mutex_lock(&pi->sched_lock); 4893 4894 for (i = 0; i < count; i++) { 4895 struct ice_sched_node *node; 4896 4897 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4898 if (!node) 4899 continue; 4900 4901 qg_list->parent_teid = node->info.parent_teid; 4902 qg_list->num_qs = 1; 4903 qg_list->q_id[0] = 4904 cpu_to_le16(q_id[i] | 4905 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4906 4907 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4908 ICE_NO_RESET, 0, NULL); 4909 if (status) 4910 break; 4911 4912 ice_free_sched_node(pi, node); 4913 } 4914 4915 mutex_unlock(&pi->sched_lock); 4916 kfree(qg_list); 4917 return status; 4918 } 4919 4920 /** 4921 * ice_replay_pre_init - replay pre initialization 4922 * @hw: pointer to the HW struct 4923 * 4924 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 4925 */ 4926 static int ice_replay_pre_init(struct ice_hw *hw) 4927 { 4928 struct ice_switch_info *sw = hw->switch_info; 4929 u8 i; 4930 4931 /* Delete old entries from replay filter list head if there is any */ 4932 ice_rm_all_sw_replay_rule_info(hw); 4933 /* In start of replay, move entries into replay_rules list, it 4934 * will allow adding rules entries back to filt_rules list, 4935 * which is operational list. 4936 */ 4937 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 4938 list_replace_init(&sw->recp_list[i].filt_rules, 4939 &sw->recp_list[i].filt_replay_rules); 4940 ice_sched_replay_agg_vsi_preinit(hw); 4941 4942 return 0; 4943 } 4944 4945 /** 4946 * ice_replay_vsi - replay VSI configuration 4947 * @hw: pointer to the HW struct 4948 * @vsi_handle: driver VSI handle 4949 * 4950 * Restore all VSI configuration after reset. It is required to call this 4951 * function with main VSI first. 4952 */ 4953 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 4954 { 4955 int status; 4956 4957 if (!ice_is_vsi_valid(hw, vsi_handle)) 4958 return -EINVAL; 4959 4960 /* Replay pre-initialization if there is any */ 4961 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 4962 status = ice_replay_pre_init(hw); 4963 if (status) 4964 return status; 4965 } 4966 /* Replay per VSI all RSS configurations */ 4967 status = ice_replay_rss_cfg(hw, vsi_handle); 4968 if (status) 4969 return status; 4970 /* Replay per VSI all filters */ 4971 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 4972 if (!status) 4973 status = ice_replay_vsi_agg(hw, vsi_handle); 4974 return status; 4975 } 4976 4977 /** 4978 * ice_replay_post - post replay configuration cleanup 4979 * @hw: pointer to the HW struct 4980 * 4981 * Post replay cleanup. 4982 */ 4983 void ice_replay_post(struct ice_hw *hw) 4984 { 4985 /* Delete old entries from replay filter list head */ 4986 ice_rm_all_sw_replay_rule_info(hw); 4987 ice_sched_replay_agg(hw); 4988 } 4989 4990 /** 4991 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 4992 * @hw: ptr to the hardware info 4993 * @reg: offset of 64 bit HW register to read from 4994 * @prev_stat_loaded: bool to specify if previous stats are loaded 4995 * @prev_stat: ptr to previous loaded stat value 4996 * @cur_stat: ptr to current stat value 4997 */ 4998 void 4999 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5000 u64 *prev_stat, u64 *cur_stat) 5001 { 5002 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5003 5004 /* device stats are not reset at PFR, they likely will not be zeroed 5005 * when the driver starts. Thus, save the value from the first read 5006 * without adding to the statistic value so that we report stats which 5007 * count up from zero. 5008 */ 5009 if (!prev_stat_loaded) { 5010 *prev_stat = new_data; 5011 return; 5012 } 5013 5014 /* Calculate the difference between the new and old values, and then 5015 * add it to the software stat value. 5016 */ 5017 if (new_data >= *prev_stat) 5018 *cur_stat += new_data - *prev_stat; 5019 else 5020 /* to manage the potential roll-over */ 5021 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5022 5023 /* Update the previously stored value to prepare for next read */ 5024 *prev_stat = new_data; 5025 } 5026 5027 /** 5028 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5029 * @hw: ptr to the hardware info 5030 * @reg: offset of HW register to read from 5031 * @prev_stat_loaded: bool to specify if previous stats are loaded 5032 * @prev_stat: ptr to previous loaded stat value 5033 * @cur_stat: ptr to current stat value 5034 */ 5035 void 5036 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5037 u64 *prev_stat, u64 *cur_stat) 5038 { 5039 u32 new_data; 5040 5041 new_data = rd32(hw, reg); 5042 5043 /* device stats are not reset at PFR, they likely will not be zeroed 5044 * when the driver starts. Thus, save the value from the first read 5045 * without adding to the statistic value so that we report stats which 5046 * count up from zero. 5047 */ 5048 if (!prev_stat_loaded) { 5049 *prev_stat = new_data; 5050 return; 5051 } 5052 5053 /* Calculate the difference between the new and old values, and then 5054 * add it to the software stat value. 5055 */ 5056 if (new_data >= *prev_stat) 5057 *cur_stat += new_data - *prev_stat; 5058 else 5059 /* to manage the potential roll-over */ 5060 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5061 5062 /* Update the previously stored value to prepare for next read */ 5063 *prev_stat = new_data; 5064 } 5065 5066 /** 5067 * ice_sched_query_elem - query element information from HW 5068 * @hw: pointer to the HW struct 5069 * @node_teid: node TEID to be queried 5070 * @buf: buffer to element information 5071 * 5072 * This function queries HW element information 5073 */ 5074 int 5075 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5076 struct ice_aqc_txsched_elem_data *buf) 5077 { 5078 u16 buf_size, num_elem_ret = 0; 5079 int status; 5080 5081 buf_size = sizeof(*buf); 5082 memset(buf, 0, buf_size); 5083 buf->node_teid = cpu_to_le32(node_teid); 5084 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5085 NULL); 5086 if (status || num_elem_ret != 1) 5087 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5088 return status; 5089 } 5090 5091 /** 5092 * ice_aq_read_i2c 5093 * @hw: pointer to the hw struct 5094 * @topo_addr: topology address for a device to communicate with 5095 * @bus_addr: 7-bit I2C bus address 5096 * @addr: I2C memory address (I2C offset) with up to 16 bits 5097 * @params: I2C parameters: bit [7] - Repeated start, 5098 * bits [6:5] data offset size, 5099 * bit [4] - I2C address type, 5100 * bits [3:0] - data size to read (0-16 bytes) 5101 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5102 * @cd: pointer to command details structure or NULL 5103 * 5104 * Read I2C (0x06E2) 5105 */ 5106 int 5107 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5108 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5109 struct ice_sq_cd *cd) 5110 { 5111 struct ice_aq_desc desc = { 0 }; 5112 struct ice_aqc_i2c *cmd; 5113 u8 data_size; 5114 int status; 5115 5116 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5117 cmd = &desc.params.read_write_i2c; 5118 5119 if (!data) 5120 return -EINVAL; 5121 5122 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5123 5124 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5125 cmd->topo_addr = topo_addr; 5126 cmd->i2c_params = params; 5127 cmd->i2c_addr = addr; 5128 5129 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5130 if (!status) { 5131 struct ice_aqc_read_i2c_resp *resp; 5132 u8 i; 5133 5134 resp = &desc.params.read_i2c_resp; 5135 for (i = 0; i < data_size; i++) { 5136 *data = resp->i2c_data[i]; 5137 data++; 5138 } 5139 } 5140 5141 return status; 5142 } 5143 5144 /** 5145 * ice_aq_write_i2c 5146 * @hw: pointer to the hw struct 5147 * @topo_addr: topology address for a device to communicate with 5148 * @bus_addr: 7-bit I2C bus address 5149 * @addr: I2C memory address (I2C offset) with up to 16 bits 5150 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5151 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5152 * @cd: pointer to command details structure or NULL 5153 * 5154 * Write I2C (0x06E3) 5155 * 5156 * * Return: 5157 * * 0 - Successful write to the i2c device 5158 * * -EINVAL - Data size greater than 4 bytes 5159 * * -EIO - FW error 5160 */ 5161 int 5162 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5163 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5164 struct ice_sq_cd *cd) 5165 { 5166 struct ice_aq_desc desc = { 0 }; 5167 struct ice_aqc_i2c *cmd; 5168 u8 data_size; 5169 5170 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5171 cmd = &desc.params.read_write_i2c; 5172 5173 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5174 5175 /* data_size limited to 4 */ 5176 if (data_size > 4) 5177 return -EINVAL; 5178 5179 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5180 cmd->topo_addr = topo_addr; 5181 cmd->i2c_params = params; 5182 cmd->i2c_addr = addr; 5183 5184 memcpy(cmd->i2c_data, data, data_size); 5185 5186 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5187 } 5188 5189 /** 5190 * ice_aq_set_driver_param - Set driver parameter to share via firmware 5191 * @hw: pointer to the HW struct 5192 * @idx: parameter index to set 5193 * @value: the value to set the parameter to 5194 * @cd: pointer to command details structure or NULL 5195 * 5196 * Set the value of one of the software defined parameters. All PFs connected 5197 * to this device can read the value using ice_aq_get_driver_param. 5198 * 5199 * Note that firmware provides no synchronization or locking, and will not 5200 * save the parameter value during a device reset. It is expected that 5201 * a single PF will write the parameter value, while all other PFs will only 5202 * read it. 5203 */ 5204 int 5205 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 5206 u32 value, struct ice_sq_cd *cd) 5207 { 5208 struct ice_aqc_driver_shared_params *cmd; 5209 struct ice_aq_desc desc; 5210 5211 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 5212 return -EIO; 5213 5214 cmd = &desc.params.drv_shared_params; 5215 5216 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 5217 5218 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET; 5219 cmd->param_indx = idx; 5220 cmd->param_val = cpu_to_le32(value); 5221 5222 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5223 } 5224 5225 /** 5226 * ice_aq_get_driver_param - Get driver parameter shared via firmware 5227 * @hw: pointer to the HW struct 5228 * @idx: parameter index to set 5229 * @value: storage to return the shared parameter 5230 * @cd: pointer to command details structure or NULL 5231 * 5232 * Get the value of one of the software defined parameters. 5233 * 5234 * Note that firmware provides no synchronization or locking. It is expected 5235 * that only a single PF will write a given parameter. 5236 */ 5237 int 5238 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 5239 u32 *value, struct ice_sq_cd *cd) 5240 { 5241 struct ice_aqc_driver_shared_params *cmd; 5242 struct ice_aq_desc desc; 5243 int status; 5244 5245 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 5246 return -EIO; 5247 5248 cmd = &desc.params.drv_shared_params; 5249 5250 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 5251 5252 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET; 5253 cmd->param_indx = idx; 5254 5255 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5256 if (status) 5257 return status; 5258 5259 *value = le32_to_cpu(cmd->param_val); 5260 5261 return 0; 5262 } 5263 5264 /** 5265 * ice_aq_set_gpio 5266 * @hw: pointer to the hw struct 5267 * @gpio_ctrl_handle: GPIO controller node handle 5268 * @pin_idx: IO Number of the GPIO that needs to be set 5269 * @value: SW provide IO value to set in the LSB 5270 * @cd: pointer to command details structure or NULL 5271 * 5272 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5273 */ 5274 int 5275 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5276 struct ice_sq_cd *cd) 5277 { 5278 struct ice_aqc_gpio *cmd; 5279 struct ice_aq_desc desc; 5280 5281 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5282 cmd = &desc.params.read_write_gpio; 5283 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5284 cmd->gpio_num = pin_idx; 5285 cmd->gpio_val = value ? 1 : 0; 5286 5287 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5288 } 5289 5290 /** 5291 * ice_aq_get_gpio 5292 * @hw: pointer to the hw struct 5293 * @gpio_ctrl_handle: GPIO controller node handle 5294 * @pin_idx: IO Number of the GPIO that needs to be set 5295 * @value: IO value read 5296 * @cd: pointer to command details structure or NULL 5297 * 5298 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5299 * the topology 5300 */ 5301 int 5302 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5303 bool *value, struct ice_sq_cd *cd) 5304 { 5305 struct ice_aqc_gpio *cmd; 5306 struct ice_aq_desc desc; 5307 int status; 5308 5309 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5310 cmd = &desc.params.read_write_gpio; 5311 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5312 cmd->gpio_num = pin_idx; 5313 5314 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5315 if (status) 5316 return status; 5317 5318 *value = !!cmd->gpio_val; 5319 return 0; 5320 } 5321 5322 /** 5323 * ice_is_fw_api_min_ver 5324 * @hw: pointer to the hardware structure 5325 * @maj: major version 5326 * @min: minor version 5327 * @patch: patch version 5328 * 5329 * Checks if the firmware API is minimum version 5330 */ 5331 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5332 { 5333 if (hw->api_maj_ver == maj) { 5334 if (hw->api_min_ver > min) 5335 return true; 5336 if (hw->api_min_ver == min && hw->api_patch >= patch) 5337 return true; 5338 } else if (hw->api_maj_ver > maj) { 5339 return true; 5340 } 5341 5342 return false; 5343 } 5344 5345 /** 5346 * ice_fw_supports_link_override 5347 * @hw: pointer to the hardware structure 5348 * 5349 * Checks if the firmware supports link override 5350 */ 5351 bool ice_fw_supports_link_override(struct ice_hw *hw) 5352 { 5353 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5354 ICE_FW_API_LINK_OVERRIDE_MIN, 5355 ICE_FW_API_LINK_OVERRIDE_PATCH); 5356 } 5357 5358 /** 5359 * ice_get_link_default_override 5360 * @ldo: pointer to the link default override struct 5361 * @pi: pointer to the port info struct 5362 * 5363 * Gets the link default override for a port 5364 */ 5365 int 5366 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5367 struct ice_port_info *pi) 5368 { 5369 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5370 struct ice_hw *hw = pi->hw; 5371 int status; 5372 5373 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5374 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5375 if (status) { 5376 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5377 return status; 5378 } 5379 5380 /* Each port has its own config; calculate for our port */ 5381 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5382 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5383 5384 /* link options first */ 5385 status = ice_read_sr_word(hw, tlv_start, &buf); 5386 if (status) { 5387 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5388 return status; 5389 } 5390 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 5391 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5392 ICE_LINK_OVERRIDE_PHY_CFG_S; 5393 5394 /* link PHY config */ 5395 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5396 status = ice_read_sr_word(hw, offset, &buf); 5397 if (status) { 5398 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5399 return status; 5400 } 5401 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5402 5403 /* PHY types low */ 5404 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5405 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5406 status = ice_read_sr_word(hw, (offset + i), &buf); 5407 if (status) { 5408 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5409 return status; 5410 } 5411 /* shift 16 bits at a time to fill 64 bits */ 5412 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5413 } 5414 5415 /* PHY types high */ 5416 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5417 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5418 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5419 status = ice_read_sr_word(hw, (offset + i), &buf); 5420 if (status) { 5421 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5422 return status; 5423 } 5424 /* shift 16 bits at a time to fill 64 bits */ 5425 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5426 } 5427 5428 return status; 5429 } 5430 5431 /** 5432 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5433 * @caps: get PHY capability data 5434 */ 5435 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5436 { 5437 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5438 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5439 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5440 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5441 return true; 5442 5443 return false; 5444 } 5445 5446 /** 5447 * ice_aq_set_lldp_mib - Set the LLDP MIB 5448 * @hw: pointer to the HW struct 5449 * @mib_type: Local, Remote or both Local and Remote MIBs 5450 * @buf: pointer to the caller-supplied buffer to store the MIB block 5451 * @buf_size: size of the buffer (in bytes) 5452 * @cd: pointer to command details structure or NULL 5453 * 5454 * Set the LLDP MIB. (0x0A08) 5455 */ 5456 int 5457 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5458 struct ice_sq_cd *cd) 5459 { 5460 struct ice_aqc_lldp_set_local_mib *cmd; 5461 struct ice_aq_desc desc; 5462 5463 cmd = &desc.params.lldp_set_mib; 5464 5465 if (buf_size == 0 || !buf) 5466 return -EINVAL; 5467 5468 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5469 5470 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5471 desc.datalen = cpu_to_le16(buf_size); 5472 5473 cmd->type = mib_type; 5474 cmd->length = cpu_to_le16(buf_size); 5475 5476 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5477 } 5478 5479 /** 5480 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5481 * @hw: pointer to HW struct 5482 */ 5483 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5484 { 5485 if (hw->mac_type != ICE_MAC_E810) 5486 return false; 5487 5488 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5489 ICE_FW_API_LLDP_FLTR_MIN, 5490 ICE_FW_API_LLDP_FLTR_PATCH); 5491 } 5492 5493 /** 5494 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 5495 * @hw: pointer to HW struct 5496 * @vsi_num: absolute HW index for VSI 5497 * @add: boolean for if adding or removing a filter 5498 */ 5499 int 5500 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 5501 { 5502 struct ice_aqc_lldp_filter_ctrl *cmd; 5503 struct ice_aq_desc desc; 5504 5505 cmd = &desc.params.lldp_filter_ctrl; 5506 5507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 5508 5509 if (add) 5510 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 5511 else 5512 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 5513 5514 cmd->vsi_num = cpu_to_le16(vsi_num); 5515 5516 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5517 } 5518 5519 /** 5520 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 5521 * @hw: pointer to HW struct 5522 */ 5523 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 5524 { 5525 struct ice_aq_desc desc; 5526 5527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 5528 5529 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5530 } 5531 5532 /** 5533 * ice_fw_supports_report_dflt_cfg 5534 * @hw: pointer to the hardware structure 5535 * 5536 * Checks if the firmware supports report default configuration 5537 */ 5538 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 5539 { 5540 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 5541 ICE_FW_API_REPORT_DFLT_CFG_MIN, 5542 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 5543 } 5544 5545 /* each of the indexes into the following array match the speed of a return 5546 * value from the list of AQ returned speeds like the range: 5547 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 5548 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 5549 * array. The array is defined as 15 elements long because the link_speed 5550 * returned by the firmware is a 16 bit * value, but is indexed 5551 * by [fls(speed) - 1] 5552 */ 5553 static const u32 ice_aq_to_link_speed[] = { 5554 SPEED_10, /* BIT(0) */ 5555 SPEED_100, 5556 SPEED_1000, 5557 SPEED_2500, 5558 SPEED_5000, 5559 SPEED_10000, 5560 SPEED_20000, 5561 SPEED_25000, 5562 SPEED_40000, 5563 SPEED_50000, 5564 SPEED_100000, /* BIT(10) */ 5565 }; 5566 5567 /** 5568 * ice_get_link_speed - get integer speed from table 5569 * @index: array index from fls(aq speed) - 1 5570 * 5571 * Returns: u32 value containing integer speed 5572 */ 5573 u32 ice_get_link_speed(u16 index) 5574 { 5575 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 5576 return 0; 5577 5578 return ice_aq_to_link_speed[index]; 5579 } 5580