1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 9 #define ICE_PF_RESET_WAIT_COUNT 300 10 11 static const char * const ice_link_mode_str_low[] = { 12 [0] = "100BASE_TX", 13 [1] = "100M_SGMII", 14 [2] = "1000BASE_T", 15 [3] = "1000BASE_SX", 16 [4] = "1000BASE_LX", 17 [5] = "1000BASE_KX", 18 [6] = "1G_SGMII", 19 [7] = "2500BASE_T", 20 [8] = "2500BASE_X", 21 [9] = "2500BASE_KX", 22 [10] = "5GBASE_T", 23 [11] = "5GBASE_KR", 24 [12] = "10GBASE_T", 25 [13] = "10G_SFI_DA", 26 [14] = "10GBASE_SR", 27 [15] = "10GBASE_LR", 28 [16] = "10GBASE_KR_CR1", 29 [17] = "10G_SFI_AOC_ACC", 30 [18] = "10G_SFI_C2C", 31 [19] = "25GBASE_T", 32 [20] = "25GBASE_CR", 33 [21] = "25GBASE_CR_S", 34 [22] = "25GBASE_CR1", 35 [23] = "25GBASE_SR", 36 [24] = "25GBASE_LR", 37 [25] = "25GBASE_KR", 38 [26] = "25GBASE_KR_S", 39 [27] = "25GBASE_KR1", 40 [28] = "25G_AUI_AOC_ACC", 41 [29] = "25G_AUI_C2C", 42 [30] = "40GBASE_CR4", 43 [31] = "40GBASE_SR4", 44 [32] = "40GBASE_LR4", 45 [33] = "40GBASE_KR4", 46 [34] = "40G_XLAUI_AOC_ACC", 47 [35] = "40G_XLAUI", 48 [36] = "50GBASE_CR2", 49 [37] = "50GBASE_SR2", 50 [38] = "50GBASE_LR2", 51 [39] = "50GBASE_KR2", 52 [40] = "50G_LAUI2_AOC_ACC", 53 [41] = "50G_LAUI2", 54 [42] = "50G_AUI2_AOC_ACC", 55 [43] = "50G_AUI2", 56 [44] = "50GBASE_CP", 57 [45] = "50GBASE_SR", 58 [46] = "50GBASE_FR", 59 [47] = "50GBASE_LR", 60 [48] = "50GBASE_KR_PAM4", 61 [49] = "50G_AUI1_AOC_ACC", 62 [50] = "50G_AUI1", 63 [51] = "100GBASE_CR4", 64 [52] = "100GBASE_SR4", 65 [53] = "100GBASE_LR4", 66 [54] = "100GBASE_KR4", 67 [55] = "100G_CAUI4_AOC_ACC", 68 [56] = "100G_CAUI4", 69 [57] = "100G_AUI4_AOC_ACC", 70 [58] = "100G_AUI4", 71 [59] = "100GBASE_CR_PAM4", 72 [60] = "100GBASE_KR_PAM4", 73 [61] = "100GBASE_CP2", 74 [62] = "100GBASE_SR2", 75 [63] = "100GBASE_DR", 76 }; 77 78 static const char * const ice_link_mode_str_high[] = { 79 [0] = "100GBASE_KR2_PAM4", 80 [1] = "100G_CAUI2_AOC_ACC", 81 [2] = "100G_CAUI2", 82 [3] = "100G_AUI2_AOC_ACC", 83 [4] = "100G_AUI2", 84 }; 85 86 /** 87 * ice_dump_phy_type - helper function to dump phy_type 88 * @hw: pointer to the HW structure 89 * @low: 64 bit value for phy_type_low 90 * @high: 64 bit value for phy_type_high 91 * @prefix: prefix string to differentiate multiple dumps 92 */ 93 static void 94 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 95 { 96 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 97 98 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 99 if (low & BIT_ULL(i)) 100 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 101 prefix, i, ice_link_mode_str_low[i]); 102 } 103 104 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 105 106 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 107 if (high & BIT_ULL(i)) 108 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 109 prefix, i, ice_link_mode_str_high[i]); 110 } 111 } 112 113 /** 114 * ice_set_mac_type - Sets MAC type 115 * @hw: pointer to the HW structure 116 * 117 * This function sets the MAC type of the adapter based on the 118 * vendor ID and device ID stored in the HW structure. 119 */ 120 static int ice_set_mac_type(struct ice_hw *hw) 121 { 122 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 123 return -ENODEV; 124 125 switch (hw->device_id) { 126 case ICE_DEV_ID_E810C_BACKPLANE: 127 case ICE_DEV_ID_E810C_QSFP: 128 case ICE_DEV_ID_E810C_SFP: 129 case ICE_DEV_ID_E810_XXV_BACKPLANE: 130 case ICE_DEV_ID_E810_XXV_QSFP: 131 case ICE_DEV_ID_E810_XXV_SFP: 132 hw->mac_type = ICE_MAC_E810; 133 break; 134 case ICE_DEV_ID_E823C_10G_BASE_T: 135 case ICE_DEV_ID_E823C_BACKPLANE: 136 case ICE_DEV_ID_E823C_QSFP: 137 case ICE_DEV_ID_E823C_SFP: 138 case ICE_DEV_ID_E823C_SGMII: 139 case ICE_DEV_ID_E822C_10G_BASE_T: 140 case ICE_DEV_ID_E822C_BACKPLANE: 141 case ICE_DEV_ID_E822C_QSFP: 142 case ICE_DEV_ID_E822C_SFP: 143 case ICE_DEV_ID_E822C_SGMII: 144 case ICE_DEV_ID_E822L_10G_BASE_T: 145 case ICE_DEV_ID_E822L_BACKPLANE: 146 case ICE_DEV_ID_E822L_SFP: 147 case ICE_DEV_ID_E822L_SGMII: 148 case ICE_DEV_ID_E823L_10G_BASE_T: 149 case ICE_DEV_ID_E823L_1GBE: 150 case ICE_DEV_ID_E823L_BACKPLANE: 151 case ICE_DEV_ID_E823L_QSFP: 152 case ICE_DEV_ID_E823L_SFP: 153 hw->mac_type = ICE_MAC_GENERIC; 154 break; 155 default: 156 hw->mac_type = ICE_MAC_UNKNOWN; 157 break; 158 } 159 160 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 161 return 0; 162 } 163 164 /** 165 * ice_is_e810 166 * @hw: pointer to the hardware structure 167 * 168 * returns true if the device is E810 based, false if not. 169 */ 170 bool ice_is_e810(struct ice_hw *hw) 171 { 172 return hw->mac_type == ICE_MAC_E810; 173 } 174 175 /** 176 * ice_is_e810t 177 * @hw: pointer to the hardware structure 178 * 179 * returns true if the device is E810T based, false if not. 180 */ 181 bool ice_is_e810t(struct ice_hw *hw) 182 { 183 switch (hw->device_id) { 184 case ICE_DEV_ID_E810C_SFP: 185 switch (hw->subsystem_device_id) { 186 case ICE_SUBDEV_ID_E810T: 187 case ICE_SUBDEV_ID_E810T2: 188 case ICE_SUBDEV_ID_E810T3: 189 case ICE_SUBDEV_ID_E810T4: 190 case ICE_SUBDEV_ID_E810T6: 191 case ICE_SUBDEV_ID_E810T7: 192 return true; 193 } 194 break; 195 case ICE_DEV_ID_E810C_QSFP: 196 switch (hw->subsystem_device_id) { 197 case ICE_SUBDEV_ID_E810T2: 198 case ICE_SUBDEV_ID_E810T3: 199 case ICE_SUBDEV_ID_E810T5: 200 return true; 201 } 202 break; 203 default: 204 break; 205 } 206 207 return false; 208 } 209 210 /** 211 * ice_clear_pf_cfg - Clear PF configuration 212 * @hw: pointer to the hardware structure 213 * 214 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 215 * configuration, flow director filters, etc.). 216 */ 217 int ice_clear_pf_cfg(struct ice_hw *hw) 218 { 219 struct ice_aq_desc desc; 220 221 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 222 223 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 224 } 225 226 /** 227 * ice_aq_manage_mac_read - manage MAC address read command 228 * @hw: pointer to the HW struct 229 * @buf: a virtual buffer to hold the manage MAC read response 230 * @buf_size: Size of the virtual buffer 231 * @cd: pointer to command details structure or NULL 232 * 233 * This function is used to return per PF station MAC address (0x0107). 234 * NOTE: Upon successful completion of this command, MAC address information 235 * is returned in user specified buffer. Please interpret user specified 236 * buffer as "manage_mac_read" response. 237 * Response such as various MAC addresses are stored in HW struct (port.mac) 238 * ice_discover_dev_caps is expected to be called before this function is 239 * called. 240 */ 241 static int 242 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 243 struct ice_sq_cd *cd) 244 { 245 struct ice_aqc_manage_mac_read_resp *resp; 246 struct ice_aqc_manage_mac_read *cmd; 247 struct ice_aq_desc desc; 248 int status; 249 u16 flags; 250 u8 i; 251 252 cmd = &desc.params.mac_read; 253 254 if (buf_size < sizeof(*resp)) 255 return -EINVAL; 256 257 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 258 259 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 260 if (status) 261 return status; 262 263 resp = buf; 264 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 265 266 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 267 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 268 return -EIO; 269 } 270 271 /* A single port can report up to two (LAN and WoL) addresses */ 272 for (i = 0; i < cmd->num_addr; i++) 273 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 274 ether_addr_copy(hw->port_info->mac.lan_addr, 275 resp[i].mac_addr); 276 ether_addr_copy(hw->port_info->mac.perm_addr, 277 resp[i].mac_addr); 278 break; 279 } 280 281 return 0; 282 } 283 284 /** 285 * ice_aq_get_phy_caps - returns PHY capabilities 286 * @pi: port information structure 287 * @qual_mods: report qualified modules 288 * @report_mode: report mode capabilities 289 * @pcaps: structure for PHY capabilities to be filled 290 * @cd: pointer to command details structure or NULL 291 * 292 * Returns the various PHY capabilities supported on the Port (0x0600) 293 */ 294 int 295 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 296 struct ice_aqc_get_phy_caps_data *pcaps, 297 struct ice_sq_cd *cd) 298 { 299 struct ice_aqc_get_phy_caps *cmd; 300 u16 pcaps_size = sizeof(*pcaps); 301 struct ice_aq_desc desc; 302 const char *prefix; 303 struct ice_hw *hw; 304 int status; 305 306 cmd = &desc.params.get_phy; 307 308 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 309 return -EINVAL; 310 hw = pi->hw; 311 312 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 313 !ice_fw_supports_report_dflt_cfg(hw)) 314 return -EINVAL; 315 316 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 317 318 if (qual_mods) 319 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 320 321 cmd->param0 |= cpu_to_le16(report_mode); 322 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 323 324 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 325 326 switch (report_mode) { 327 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 328 prefix = "phy_caps_media"; 329 break; 330 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 331 prefix = "phy_caps_no_media"; 332 break; 333 case ICE_AQC_REPORT_ACTIVE_CFG: 334 prefix = "phy_caps_active"; 335 break; 336 case ICE_AQC_REPORT_DFLT_CFG: 337 prefix = "phy_caps_default"; 338 break; 339 default: 340 prefix = "phy_caps_invalid"; 341 } 342 343 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 344 le64_to_cpu(pcaps->phy_type_high), prefix); 345 346 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 347 prefix, report_mode); 348 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 349 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 350 pcaps->low_power_ctrl_an); 351 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 352 pcaps->eee_cap); 353 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 354 pcaps->eeer_value); 355 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 356 pcaps->link_fec_options); 357 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 358 prefix, pcaps->module_compliance_enforcement); 359 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 360 prefix, pcaps->extended_compliance_code); 361 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 362 pcaps->module_type[0]); 363 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 364 pcaps->module_type[1]); 365 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 366 pcaps->module_type[2]); 367 368 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 369 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 370 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 371 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 372 sizeof(pi->phy.link_info.module_type)); 373 } 374 375 return status; 376 } 377 378 /** 379 * ice_aq_get_link_topo_handle - get link topology node return status 380 * @pi: port information structure 381 * @node_type: requested node type 382 * @cd: pointer to command details structure or NULL 383 * 384 * Get link topology node return status for specified node type (0x06E0) 385 * 386 * Node type cage can be used to determine if cage is present. If AQC 387 * returns error (ENOENT), then no cage present. If no cage present, then 388 * connection type is backplane or BASE-T. 389 */ 390 static int 391 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 392 struct ice_sq_cd *cd) 393 { 394 struct ice_aqc_get_link_topo *cmd; 395 struct ice_aq_desc desc; 396 397 cmd = &desc.params.get_link_topo; 398 399 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 400 401 cmd->addr.topo_params.node_type_ctx = 402 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 403 ICE_AQC_LINK_TOPO_NODE_CTX_S); 404 405 /* set node type */ 406 cmd->addr.topo_params.node_type_ctx |= 407 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 408 409 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 410 } 411 412 /** 413 * ice_is_media_cage_present 414 * @pi: port information structure 415 * 416 * Returns true if media cage is present, else false. If no cage, then 417 * media type is backplane or BASE-T. 418 */ 419 static bool ice_is_media_cage_present(struct ice_port_info *pi) 420 { 421 /* Node type cage can be used to determine if cage is present. If AQC 422 * returns error (ENOENT), then no cage present. If no cage present then 423 * connection type is backplane or BASE-T. 424 */ 425 return !ice_aq_get_link_topo_handle(pi, 426 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 427 NULL); 428 } 429 430 /** 431 * ice_get_media_type - Gets media type 432 * @pi: port information structure 433 */ 434 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 435 { 436 struct ice_link_status *hw_link_info; 437 438 if (!pi) 439 return ICE_MEDIA_UNKNOWN; 440 441 hw_link_info = &pi->phy.link_info; 442 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 443 /* If more than one media type is selected, report unknown */ 444 return ICE_MEDIA_UNKNOWN; 445 446 if (hw_link_info->phy_type_low) { 447 /* 1G SGMII is a special case where some DA cable PHYs 448 * may show this as an option when it really shouldn't 449 * be since SGMII is meant to be between a MAC and a PHY 450 * in a backplane. Try to detect this case and handle it 451 */ 452 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 453 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 454 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 455 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 456 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 457 return ICE_MEDIA_DA; 458 459 switch (hw_link_info->phy_type_low) { 460 case ICE_PHY_TYPE_LOW_1000BASE_SX: 461 case ICE_PHY_TYPE_LOW_1000BASE_LX: 462 case ICE_PHY_TYPE_LOW_10GBASE_SR: 463 case ICE_PHY_TYPE_LOW_10GBASE_LR: 464 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 465 case ICE_PHY_TYPE_LOW_25GBASE_SR: 466 case ICE_PHY_TYPE_LOW_25GBASE_LR: 467 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 468 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 469 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 470 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 471 case ICE_PHY_TYPE_LOW_50GBASE_SR: 472 case ICE_PHY_TYPE_LOW_50GBASE_FR: 473 case ICE_PHY_TYPE_LOW_50GBASE_LR: 474 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 475 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 476 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 477 case ICE_PHY_TYPE_LOW_100GBASE_DR: 478 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 479 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 480 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 481 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 482 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 483 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 484 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 485 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 486 return ICE_MEDIA_FIBER; 487 case ICE_PHY_TYPE_LOW_100BASE_TX: 488 case ICE_PHY_TYPE_LOW_1000BASE_T: 489 case ICE_PHY_TYPE_LOW_2500BASE_T: 490 case ICE_PHY_TYPE_LOW_5GBASE_T: 491 case ICE_PHY_TYPE_LOW_10GBASE_T: 492 case ICE_PHY_TYPE_LOW_25GBASE_T: 493 return ICE_MEDIA_BASET; 494 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 495 case ICE_PHY_TYPE_LOW_25GBASE_CR: 496 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 497 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 498 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 499 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 500 case ICE_PHY_TYPE_LOW_50GBASE_CP: 501 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 502 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 503 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 504 return ICE_MEDIA_DA; 505 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 506 case ICE_PHY_TYPE_LOW_40G_XLAUI: 507 case ICE_PHY_TYPE_LOW_50G_LAUI2: 508 case ICE_PHY_TYPE_LOW_50G_AUI2: 509 case ICE_PHY_TYPE_LOW_50G_AUI1: 510 case ICE_PHY_TYPE_LOW_100G_AUI4: 511 case ICE_PHY_TYPE_LOW_100G_CAUI4: 512 if (ice_is_media_cage_present(pi)) 513 return ICE_MEDIA_DA; 514 fallthrough; 515 case ICE_PHY_TYPE_LOW_1000BASE_KX: 516 case ICE_PHY_TYPE_LOW_2500BASE_KX: 517 case ICE_PHY_TYPE_LOW_2500BASE_X: 518 case ICE_PHY_TYPE_LOW_5GBASE_KR: 519 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 520 case ICE_PHY_TYPE_LOW_25GBASE_KR: 521 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 522 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 523 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 524 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 525 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 526 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 527 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 528 return ICE_MEDIA_BACKPLANE; 529 } 530 } else { 531 switch (hw_link_info->phy_type_high) { 532 case ICE_PHY_TYPE_HIGH_100G_AUI2: 533 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 534 if (ice_is_media_cage_present(pi)) 535 return ICE_MEDIA_DA; 536 fallthrough; 537 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 538 return ICE_MEDIA_BACKPLANE; 539 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 540 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 541 return ICE_MEDIA_FIBER; 542 } 543 } 544 return ICE_MEDIA_UNKNOWN; 545 } 546 547 /** 548 * ice_aq_get_link_info 549 * @pi: port information structure 550 * @ena_lse: enable/disable LinkStatusEvent reporting 551 * @link: pointer to link status structure - optional 552 * @cd: pointer to command details structure or NULL 553 * 554 * Get Link Status (0x607). Returns the link status of the adapter. 555 */ 556 int 557 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 558 struct ice_link_status *link, struct ice_sq_cd *cd) 559 { 560 struct ice_aqc_get_link_status_data link_data = { 0 }; 561 struct ice_aqc_get_link_status *resp; 562 struct ice_link_status *li_old, *li; 563 enum ice_media_type *hw_media_type; 564 struct ice_fc_info *hw_fc_info; 565 bool tx_pause, rx_pause; 566 struct ice_aq_desc desc; 567 struct ice_hw *hw; 568 u16 cmd_flags; 569 int status; 570 571 if (!pi) 572 return -EINVAL; 573 hw = pi->hw; 574 li_old = &pi->phy.link_info_old; 575 hw_media_type = &pi->phy.media_type; 576 li = &pi->phy.link_info; 577 hw_fc_info = &pi->fc; 578 579 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 580 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 581 resp = &desc.params.get_link_status; 582 resp->cmd_flags = cpu_to_le16(cmd_flags); 583 resp->lport_num = pi->lport; 584 585 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); 586 587 if (status) 588 return status; 589 590 /* save off old link status information */ 591 *li_old = *li; 592 593 /* update current link status information */ 594 li->link_speed = le16_to_cpu(link_data.link_speed); 595 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 596 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 597 *hw_media_type = ice_get_media_type(pi); 598 li->link_info = link_data.link_info; 599 li->link_cfg_err = link_data.link_cfg_err; 600 li->an_info = link_data.an_info; 601 li->ext_info = link_data.ext_info; 602 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 603 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 604 li->topo_media_conflict = link_data.topo_media_conflict; 605 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 606 ICE_AQ_CFG_PACING_TYPE_M); 607 608 /* update fc info */ 609 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 610 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 611 if (tx_pause && rx_pause) 612 hw_fc_info->current_mode = ICE_FC_FULL; 613 else if (tx_pause) 614 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 615 else if (rx_pause) 616 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 617 else 618 hw_fc_info->current_mode = ICE_FC_NONE; 619 620 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 621 622 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 623 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 624 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 625 (unsigned long long)li->phy_type_low); 626 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 627 (unsigned long long)li->phy_type_high); 628 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 629 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 630 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 631 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 632 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 633 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 634 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 635 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 636 li->max_frame_size); 637 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 638 639 /* save link status information */ 640 if (link) 641 *link = *li; 642 643 /* flag cleared so calling functions don't call AQ again */ 644 pi->phy.get_link_info = false; 645 646 return 0; 647 } 648 649 /** 650 * ice_fill_tx_timer_and_fc_thresh 651 * @hw: pointer to the HW struct 652 * @cmd: pointer to MAC cfg structure 653 * 654 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 655 * descriptor 656 */ 657 static void 658 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 659 struct ice_aqc_set_mac_cfg *cmd) 660 { 661 u16 fc_thres_val, tx_timer_val; 662 u32 val; 663 664 /* We read back the transmit timer and FC threshold value of 665 * LFC. Thus, we will use index = 666 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 667 * 668 * Also, because we are operating on transmit timer and FC 669 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 670 */ 671 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 672 673 /* Retrieve the transmit timer */ 674 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 675 tx_timer_val = val & 676 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 677 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); 678 679 /* Retrieve the FC threshold */ 680 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 681 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 682 683 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); 684 } 685 686 /** 687 * ice_aq_set_mac_cfg 688 * @hw: pointer to the HW struct 689 * @max_frame_size: Maximum Frame Size to be supported 690 * @cd: pointer to command details structure or NULL 691 * 692 * Set MAC configuration (0x0603) 693 */ 694 int 695 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 696 { 697 struct ice_aqc_set_mac_cfg *cmd; 698 struct ice_aq_desc desc; 699 700 cmd = &desc.params.set_mac_cfg; 701 702 if (max_frame_size == 0) 703 return -EINVAL; 704 705 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 706 707 cmd->max_frame_size = cpu_to_le16(max_frame_size); 708 709 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 710 711 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 712 } 713 714 /** 715 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 716 * @hw: pointer to the HW struct 717 */ 718 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 719 { 720 struct ice_switch_info *sw; 721 int status; 722 723 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 724 sizeof(*hw->switch_info), GFP_KERNEL); 725 sw = hw->switch_info; 726 727 if (!sw) 728 return -ENOMEM; 729 730 INIT_LIST_HEAD(&sw->vsi_list_map_head); 731 sw->prof_res_bm_init = 0; 732 733 status = ice_init_def_sw_recp(hw); 734 if (status) { 735 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 736 return status; 737 } 738 return 0; 739 } 740 741 /** 742 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 743 * @hw: pointer to the HW struct 744 */ 745 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 746 { 747 struct ice_switch_info *sw = hw->switch_info; 748 struct ice_vsi_list_map_info *v_pos_map; 749 struct ice_vsi_list_map_info *v_tmp_map; 750 struct ice_sw_recipe *recps; 751 u8 i; 752 753 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 754 list_entry) { 755 list_del(&v_pos_map->list_entry); 756 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 757 } 758 recps = sw->recp_list; 759 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 760 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 761 762 recps[i].root_rid = i; 763 list_for_each_entry_safe(rg_entry, tmprg_entry, 764 &recps[i].rg_list, l_entry) { 765 list_del(&rg_entry->l_entry); 766 devm_kfree(ice_hw_to_dev(hw), rg_entry); 767 } 768 769 if (recps[i].adv_rule) { 770 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 771 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 772 773 mutex_destroy(&recps[i].filt_rule_lock); 774 list_for_each_entry_safe(lst_itr, tmp_entry, 775 &recps[i].filt_rules, 776 list_entry) { 777 list_del(&lst_itr->list_entry); 778 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 779 devm_kfree(ice_hw_to_dev(hw), lst_itr); 780 } 781 } else { 782 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 783 784 mutex_destroy(&recps[i].filt_rule_lock); 785 list_for_each_entry_safe(lst_itr, tmp_entry, 786 &recps[i].filt_rules, 787 list_entry) { 788 list_del(&lst_itr->list_entry); 789 devm_kfree(ice_hw_to_dev(hw), lst_itr); 790 } 791 } 792 if (recps[i].root_buf) 793 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 794 } 795 ice_rm_all_sw_replay_rule_info(hw); 796 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 797 devm_kfree(ice_hw_to_dev(hw), sw); 798 } 799 800 /** 801 * ice_get_fw_log_cfg - get FW logging configuration 802 * @hw: pointer to the HW struct 803 */ 804 static int ice_get_fw_log_cfg(struct ice_hw *hw) 805 { 806 struct ice_aq_desc desc; 807 __le16 *config; 808 int status; 809 u16 size; 810 811 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 812 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); 813 if (!config) 814 return -ENOMEM; 815 816 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 817 818 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 819 if (!status) { 820 u16 i; 821 822 /* Save FW logging information into the HW structure */ 823 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 824 u16 v, m, flgs; 825 826 v = le16_to_cpu(config[i]); 827 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 828 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 829 830 if (m < ICE_AQC_FW_LOG_ID_MAX) 831 hw->fw_log.evnts[m].cur = flgs; 832 } 833 } 834 835 devm_kfree(ice_hw_to_dev(hw), config); 836 837 return status; 838 } 839 840 /** 841 * ice_cfg_fw_log - configure FW logging 842 * @hw: pointer to the HW struct 843 * @enable: enable certain FW logging events if true, disable all if false 844 * 845 * This function enables/disables the FW logging via Rx CQ events and a UART 846 * port based on predetermined configurations. FW logging via the Rx CQ can be 847 * enabled/disabled for individual PF's. However, FW logging via the UART can 848 * only be enabled/disabled for all PFs on the same device. 849 * 850 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 851 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 852 * before initializing the device. 853 * 854 * When re/configuring FW logging, callers need to update the "cfg" elements of 855 * the hw->fw_log.evnts array with the desired logging event configurations for 856 * modules of interest. When disabling FW logging completely, the callers can 857 * just pass false in the "enable" parameter. On completion, the function will 858 * update the "cur" element of the hw->fw_log.evnts array with the resulting 859 * logging event configurations of the modules that are being re/configured. FW 860 * logging modules that are not part of a reconfiguration operation retain their 861 * previous states. 862 * 863 * Before resetting the device, it is recommended that the driver disables FW 864 * logging before shutting down the control queue. When disabling FW logging 865 * ("enable" = false), the latest configurations of FW logging events stored in 866 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 867 * a device reset. 868 * 869 * When enabling FW logging to emit log messages via the Rx CQ during the 870 * device's initialization phase, a mechanism alternative to interrupt handlers 871 * needs to be used to extract FW log messages from the Rx CQ periodically and 872 * to prevent the Rx CQ from being full and stalling other types of control 873 * messages from FW to SW. Interrupts are typically disabled during the device's 874 * initialization phase. 875 */ 876 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) 877 { 878 struct ice_aqc_fw_logging *cmd; 879 u16 i, chgs = 0, len = 0; 880 struct ice_aq_desc desc; 881 __le16 *data = NULL; 882 u8 actv_evnts = 0; 883 void *buf = NULL; 884 int status = 0; 885 886 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 887 return 0; 888 889 /* Disable FW logging only when the control queue is still responsive */ 890 if (!enable && 891 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 892 return 0; 893 894 /* Get current FW log settings */ 895 status = ice_get_fw_log_cfg(hw); 896 if (status) 897 return status; 898 899 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 900 cmd = &desc.params.fw_logging; 901 902 /* Indicate which controls are valid */ 903 if (hw->fw_log.cq_en) 904 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 905 906 if (hw->fw_log.uart_en) 907 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 908 909 if (enable) { 910 /* Fill in an array of entries with FW logging modules and 911 * logging events being reconfigured. 912 */ 913 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 914 u16 val; 915 916 /* Keep track of enabled event types */ 917 actv_evnts |= hw->fw_log.evnts[i].cfg; 918 919 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 920 continue; 921 922 if (!data) { 923 data = devm_kcalloc(ice_hw_to_dev(hw), 924 ICE_AQC_FW_LOG_ID_MAX, 925 sizeof(*data), 926 GFP_KERNEL); 927 if (!data) 928 return -ENOMEM; 929 } 930 931 val = i << ICE_AQC_FW_LOG_ID_S; 932 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 933 data[chgs++] = cpu_to_le16(val); 934 } 935 936 /* Only enable FW logging if at least one module is specified. 937 * If FW logging is currently enabled but all modules are not 938 * enabled to emit log messages, disable FW logging altogether. 939 */ 940 if (actv_evnts) { 941 /* Leave if there is effectively no change */ 942 if (!chgs) 943 goto out; 944 945 if (hw->fw_log.cq_en) 946 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 947 948 if (hw->fw_log.uart_en) 949 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 950 951 buf = data; 952 len = sizeof(*data) * chgs; 953 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 954 } 955 } 956 957 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 958 if (!status) { 959 /* Update the current configuration to reflect events enabled. 960 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 961 * logging mode is enabled for the device. They do not reflect 962 * actual modules being enabled to emit log messages. So, their 963 * values remain unchanged even when all modules are disabled. 964 */ 965 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 966 967 hw->fw_log.actv_evnts = actv_evnts; 968 for (i = 0; i < cnt; i++) { 969 u16 v, m; 970 971 if (!enable) { 972 /* When disabling all FW logging events as part 973 * of device's de-initialization, the original 974 * configurations are retained, and can be used 975 * to reconfigure FW logging later if the device 976 * is re-initialized. 977 */ 978 hw->fw_log.evnts[i].cur = 0; 979 continue; 980 } 981 982 v = le16_to_cpu(data[i]); 983 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 984 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 985 } 986 } 987 988 out: 989 if (data) 990 devm_kfree(ice_hw_to_dev(hw), data); 991 992 return status; 993 } 994 995 /** 996 * ice_output_fw_log 997 * @hw: pointer to the HW struct 998 * @desc: pointer to the AQ message descriptor 999 * @buf: pointer to the buffer accompanying the AQ message 1000 * 1001 * Formats a FW Log message and outputs it via the standard driver logs. 1002 */ 1003 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 1004 { 1005 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 1006 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 1007 le16_to_cpu(desc->datalen)); 1008 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 1009 } 1010 1011 /** 1012 * ice_get_itr_intrl_gran 1013 * @hw: pointer to the HW struct 1014 * 1015 * Determines the ITR/INTRL granularities based on the maximum aggregate 1016 * bandwidth according to the device's configuration during power-on. 1017 */ 1018 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1019 { 1020 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 1021 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 1022 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 1023 1024 switch (max_agg_bw) { 1025 case ICE_MAX_AGG_BW_200G: 1026 case ICE_MAX_AGG_BW_100G: 1027 case ICE_MAX_AGG_BW_50G: 1028 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1029 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1030 break; 1031 case ICE_MAX_AGG_BW_25G: 1032 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1033 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1034 break; 1035 } 1036 } 1037 1038 /** 1039 * ice_init_hw - main hardware initialization routine 1040 * @hw: pointer to the hardware structure 1041 */ 1042 int ice_init_hw(struct ice_hw *hw) 1043 { 1044 struct ice_aqc_get_phy_caps_data *pcaps; 1045 u16 mac_buf_len; 1046 void *mac_buf; 1047 int status; 1048 1049 /* Set MAC type based on DeviceID */ 1050 status = ice_set_mac_type(hw); 1051 if (status) 1052 return status; 1053 1054 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 1055 PF_FUNC_RID_FUNC_NUM_M) >> 1056 PF_FUNC_RID_FUNC_NUM_S; 1057 1058 status = ice_reset(hw, ICE_RESET_PFR); 1059 if (status) 1060 return status; 1061 1062 ice_get_itr_intrl_gran(hw); 1063 1064 status = ice_create_all_ctrlq(hw); 1065 if (status) 1066 goto err_unroll_cqinit; 1067 1068 /* Enable FW logging. Not fatal if this fails. */ 1069 status = ice_cfg_fw_log(hw, true); 1070 if (status) 1071 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 1072 1073 status = ice_clear_pf_cfg(hw); 1074 if (status) 1075 goto err_unroll_cqinit; 1076 1077 /* Set bit to enable Flow Director filters */ 1078 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1079 INIT_LIST_HEAD(&hw->fdir_list_head); 1080 1081 ice_clear_pxe_mode(hw); 1082 1083 status = ice_init_nvm(hw); 1084 if (status) 1085 goto err_unroll_cqinit; 1086 1087 status = ice_get_caps(hw); 1088 if (status) 1089 goto err_unroll_cqinit; 1090 1091 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1092 sizeof(*hw->port_info), GFP_KERNEL); 1093 if (!hw->port_info) { 1094 status = -ENOMEM; 1095 goto err_unroll_cqinit; 1096 } 1097 1098 /* set the back pointer to HW */ 1099 hw->port_info->hw = hw; 1100 1101 /* Initialize port_info struct with switch configuration data */ 1102 status = ice_get_initial_sw_cfg(hw); 1103 if (status) 1104 goto err_unroll_alloc; 1105 1106 hw->evb_veb = true; 1107 1108 /* init xarray for identifying scheduling nodes uniquely */ 1109 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1110 1111 /* Query the allocated resources for Tx scheduler */ 1112 status = ice_sched_query_res_alloc(hw); 1113 if (status) { 1114 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1115 goto err_unroll_alloc; 1116 } 1117 ice_sched_get_psm_clk_freq(hw); 1118 1119 /* Initialize port_info struct with scheduler data */ 1120 status = ice_sched_init_port(hw->port_info); 1121 if (status) 1122 goto err_unroll_sched; 1123 1124 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1125 if (!pcaps) { 1126 status = -ENOMEM; 1127 goto err_unroll_sched; 1128 } 1129 1130 /* Initialize port_info struct with PHY capabilities */ 1131 status = ice_aq_get_phy_caps(hw->port_info, false, 1132 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1133 NULL); 1134 devm_kfree(ice_hw_to_dev(hw), pcaps); 1135 if (status) 1136 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1137 status); 1138 1139 /* Initialize port_info struct with link information */ 1140 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1141 if (status) 1142 goto err_unroll_sched; 1143 1144 /* need a valid SW entry point to build a Tx tree */ 1145 if (!hw->sw_entry_point_layer) { 1146 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1147 status = -EIO; 1148 goto err_unroll_sched; 1149 } 1150 INIT_LIST_HEAD(&hw->agg_list); 1151 /* Initialize max burst size */ 1152 if (!hw->max_burst_size) 1153 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1154 1155 status = ice_init_fltr_mgmt_struct(hw); 1156 if (status) 1157 goto err_unroll_sched; 1158 1159 /* Get MAC information */ 1160 /* A single port can report up to two (LAN and WoL) addresses */ 1161 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 1162 sizeof(struct ice_aqc_manage_mac_read_resp), 1163 GFP_KERNEL); 1164 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1165 1166 if (!mac_buf) { 1167 status = -ENOMEM; 1168 goto err_unroll_fltr_mgmt_struct; 1169 } 1170 1171 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1172 devm_kfree(ice_hw_to_dev(hw), mac_buf); 1173 1174 if (status) 1175 goto err_unroll_fltr_mgmt_struct; 1176 /* enable jumbo frame support at MAC level */ 1177 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1178 if (status) 1179 goto err_unroll_fltr_mgmt_struct; 1180 /* Obtain counter base index which would be used by flow director */ 1181 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1182 if (status) 1183 goto err_unroll_fltr_mgmt_struct; 1184 status = ice_init_hw_tbls(hw); 1185 if (status) 1186 goto err_unroll_fltr_mgmt_struct; 1187 mutex_init(&hw->tnl_lock); 1188 return 0; 1189 1190 err_unroll_fltr_mgmt_struct: 1191 ice_cleanup_fltr_mgmt_struct(hw); 1192 err_unroll_sched: 1193 ice_sched_cleanup_all(hw); 1194 err_unroll_alloc: 1195 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1196 err_unroll_cqinit: 1197 ice_destroy_all_ctrlq(hw); 1198 return status; 1199 } 1200 1201 /** 1202 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1203 * @hw: pointer to the hardware structure 1204 * 1205 * This should be called only during nominal operation, not as a result of 1206 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1207 * applicable initializations if it fails for any reason. 1208 */ 1209 void ice_deinit_hw(struct ice_hw *hw) 1210 { 1211 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1212 ice_cleanup_fltr_mgmt_struct(hw); 1213 1214 ice_sched_cleanup_all(hw); 1215 ice_sched_clear_agg(hw); 1216 ice_free_seg(hw); 1217 ice_free_hw_tbls(hw); 1218 mutex_destroy(&hw->tnl_lock); 1219 1220 if (hw->port_info) { 1221 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1222 hw->port_info = NULL; 1223 } 1224 1225 /* Attempt to disable FW logging before shutting down control queues */ 1226 ice_cfg_fw_log(hw, false); 1227 ice_destroy_all_ctrlq(hw); 1228 1229 /* Clear VSI contexts if not already cleared */ 1230 ice_clear_all_vsi_ctx(hw); 1231 } 1232 1233 /** 1234 * ice_check_reset - Check to see if a global reset is complete 1235 * @hw: pointer to the hardware structure 1236 */ 1237 int ice_check_reset(struct ice_hw *hw) 1238 { 1239 u32 cnt, reg = 0, grst_timeout, uld_mask; 1240 1241 /* Poll for Device Active state in case a recent CORER, GLOBR, 1242 * or EMPR has occurred. The grst delay value is in 100ms units. 1243 * Add 1sec for outstanding AQ commands that can take a long time. 1244 */ 1245 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1246 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1247 1248 for (cnt = 0; cnt < grst_timeout; cnt++) { 1249 mdelay(100); 1250 reg = rd32(hw, GLGEN_RSTAT); 1251 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1252 break; 1253 } 1254 1255 if (cnt == grst_timeout) { 1256 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1257 return -EIO; 1258 } 1259 1260 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1261 GLNVM_ULD_PCIER_DONE_1_M |\ 1262 GLNVM_ULD_CORER_DONE_M |\ 1263 GLNVM_ULD_GLOBR_DONE_M |\ 1264 GLNVM_ULD_POR_DONE_M |\ 1265 GLNVM_ULD_POR_DONE_1_M |\ 1266 GLNVM_ULD_PCIER_DONE_2_M) 1267 1268 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1269 GLNVM_ULD_PE_DONE_M : 0); 1270 1271 /* Device is Active; check Global Reset processes are done */ 1272 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1273 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1274 if (reg == uld_mask) { 1275 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1276 break; 1277 } 1278 mdelay(10); 1279 } 1280 1281 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1282 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1283 reg); 1284 return -EIO; 1285 } 1286 1287 return 0; 1288 } 1289 1290 /** 1291 * ice_pf_reset - Reset the PF 1292 * @hw: pointer to the hardware structure 1293 * 1294 * If a global reset has been triggered, this function checks 1295 * for its completion and then issues the PF reset 1296 */ 1297 static int ice_pf_reset(struct ice_hw *hw) 1298 { 1299 u32 cnt, reg; 1300 1301 /* If at function entry a global reset was already in progress, i.e. 1302 * state is not 'device active' or any of the reset done bits are not 1303 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1304 * global reset is done. 1305 */ 1306 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1307 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1308 /* poll on global reset currently in progress until done */ 1309 if (ice_check_reset(hw)) 1310 return -EIO; 1311 1312 return 0; 1313 } 1314 1315 /* Reset the PF */ 1316 reg = rd32(hw, PFGEN_CTRL); 1317 1318 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1319 1320 /* Wait for the PFR to complete. The wait time is the global config lock 1321 * timeout plus the PFR timeout which will account for a possible reset 1322 * that is occurring during a download package operation. 1323 */ 1324 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1325 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1326 reg = rd32(hw, PFGEN_CTRL); 1327 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1328 break; 1329 1330 mdelay(1); 1331 } 1332 1333 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1334 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1335 return -EIO; 1336 } 1337 1338 return 0; 1339 } 1340 1341 /** 1342 * ice_reset - Perform different types of reset 1343 * @hw: pointer to the hardware structure 1344 * @req: reset request 1345 * 1346 * This function triggers a reset as specified by the req parameter. 1347 * 1348 * Note: 1349 * If anything other than a PF reset is triggered, PXE mode is restored. 1350 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1351 * interface has been restored in the rebuild flow. 1352 */ 1353 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1354 { 1355 u32 val = 0; 1356 1357 switch (req) { 1358 case ICE_RESET_PFR: 1359 return ice_pf_reset(hw); 1360 case ICE_RESET_CORER: 1361 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1362 val = GLGEN_RTRIG_CORER_M; 1363 break; 1364 case ICE_RESET_GLOBR: 1365 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1366 val = GLGEN_RTRIG_GLOBR_M; 1367 break; 1368 default: 1369 return -EINVAL; 1370 } 1371 1372 val |= rd32(hw, GLGEN_RTRIG); 1373 wr32(hw, GLGEN_RTRIG, val); 1374 ice_flush(hw); 1375 1376 /* wait for the FW to be ready */ 1377 return ice_check_reset(hw); 1378 } 1379 1380 /** 1381 * ice_copy_rxq_ctx_to_hw 1382 * @hw: pointer to the hardware structure 1383 * @ice_rxq_ctx: pointer to the rxq context 1384 * @rxq_index: the index of the Rx queue 1385 * 1386 * Copies rxq context from dense structure to HW register space 1387 */ 1388 static int 1389 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1390 { 1391 u8 i; 1392 1393 if (!ice_rxq_ctx) 1394 return -EINVAL; 1395 1396 if (rxq_index > QRX_CTRL_MAX_INDEX) 1397 return -EINVAL; 1398 1399 /* Copy each dword separately to HW */ 1400 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1401 wr32(hw, QRX_CONTEXT(i, rxq_index), 1402 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1403 1404 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1405 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1406 } 1407 1408 return 0; 1409 } 1410 1411 /* LAN Rx Queue Context */ 1412 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1413 /* Field Width LSB */ 1414 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1415 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1416 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1417 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1418 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1419 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1420 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1421 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1422 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1423 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1424 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1425 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1426 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1427 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1428 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1429 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1430 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1431 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1432 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1433 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1434 { 0 } 1435 }; 1436 1437 /** 1438 * ice_write_rxq_ctx 1439 * @hw: pointer to the hardware structure 1440 * @rlan_ctx: pointer to the rxq context 1441 * @rxq_index: the index of the Rx queue 1442 * 1443 * Converts rxq context from sparse to dense structure and then writes 1444 * it to HW register space and enables the hardware to prefetch descriptors 1445 * instead of only fetching them on demand 1446 */ 1447 int 1448 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1449 u32 rxq_index) 1450 { 1451 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1452 1453 if (!rlan_ctx) 1454 return -EINVAL; 1455 1456 rlan_ctx->prefena = 1; 1457 1458 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1459 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1460 } 1461 1462 /* LAN Tx Queue Context */ 1463 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1464 /* Field Width LSB */ 1465 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1466 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1467 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1468 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1469 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1470 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1471 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1472 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1473 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1474 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1475 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1476 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1477 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1478 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1479 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1480 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1481 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1482 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1483 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1484 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1485 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1486 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1487 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1488 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1489 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1490 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1491 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1492 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1493 { 0 } 1494 }; 1495 1496 /* Sideband Queue command wrappers */ 1497 1498 /** 1499 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1500 * @hw: pointer to the HW struct 1501 * @desc: descriptor describing the command 1502 * @buf: buffer to use for indirect commands (NULL for direct commands) 1503 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1504 * @cd: pointer to command details structure 1505 */ 1506 static int 1507 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1508 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1509 { 1510 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1511 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1512 } 1513 1514 /** 1515 * ice_sbq_rw_reg - Fill Sideband Queue command 1516 * @hw: pointer to the HW struct 1517 * @in: message info to be filled in descriptor 1518 */ 1519 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1520 { 1521 struct ice_sbq_cmd_desc desc = {0}; 1522 struct ice_sbq_msg_req msg = {0}; 1523 u16 msg_len; 1524 int status; 1525 1526 msg_len = sizeof(msg); 1527 1528 msg.dest_dev = in->dest_dev; 1529 msg.opcode = in->opcode; 1530 msg.flags = ICE_SBQ_MSG_FLAGS; 1531 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1532 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1533 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1534 1535 if (in->opcode) 1536 msg.data = cpu_to_le32(in->data); 1537 else 1538 /* data read comes back in completion, so shorten the struct by 1539 * sizeof(msg.data) 1540 */ 1541 msg_len -= sizeof(msg.data); 1542 1543 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1544 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1545 desc.param0.cmd_len = cpu_to_le16(msg_len); 1546 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1547 if (!status && !in->opcode) 1548 in->data = le32_to_cpu 1549 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1550 return status; 1551 } 1552 1553 /* FW Admin Queue command wrappers */ 1554 1555 /* Software lock/mutex that is meant to be held while the Global Config Lock 1556 * in firmware is acquired by the software to prevent most (but not all) types 1557 * of AQ commands from being sent to FW 1558 */ 1559 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1560 1561 /** 1562 * ice_should_retry_sq_send_cmd 1563 * @opcode: AQ opcode 1564 * 1565 * Decide if we should retry the send command routine for the ATQ, depending 1566 * on the opcode. 1567 */ 1568 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1569 { 1570 switch (opcode) { 1571 case ice_aqc_opc_get_link_topo: 1572 case ice_aqc_opc_lldp_stop: 1573 case ice_aqc_opc_lldp_start: 1574 case ice_aqc_opc_lldp_filter_ctrl: 1575 return true; 1576 } 1577 1578 return false; 1579 } 1580 1581 /** 1582 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1583 * @hw: pointer to the HW struct 1584 * @cq: pointer to the specific Control queue 1585 * @desc: prefilled descriptor describing the command 1586 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1587 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1588 * @cd: pointer to command details structure 1589 * 1590 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1591 * Queue if the EBUSY AQ error is returned. 1592 */ 1593 static int 1594 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1595 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1596 struct ice_sq_cd *cd) 1597 { 1598 struct ice_aq_desc desc_cpy; 1599 bool is_cmd_for_retry; 1600 u8 *buf_cpy = NULL; 1601 u8 idx = 0; 1602 u16 opcode; 1603 int status; 1604 1605 opcode = le16_to_cpu(desc->opcode); 1606 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1607 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1608 1609 if (is_cmd_for_retry) { 1610 if (buf) { 1611 buf_cpy = kzalloc(buf_size, GFP_KERNEL); 1612 if (!buf_cpy) 1613 return -ENOMEM; 1614 } 1615 1616 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1617 } 1618 1619 do { 1620 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1621 1622 if (!is_cmd_for_retry || !status || 1623 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1624 break; 1625 1626 if (buf_cpy) 1627 memcpy(buf, buf_cpy, buf_size); 1628 1629 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1630 1631 mdelay(ICE_SQ_SEND_DELAY_TIME_MS); 1632 1633 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1634 1635 kfree(buf_cpy); 1636 1637 return status; 1638 } 1639 1640 /** 1641 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1642 * @hw: pointer to the HW struct 1643 * @desc: descriptor describing the command 1644 * @buf: buffer to use for indirect commands (NULL for direct commands) 1645 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1646 * @cd: pointer to command details structure 1647 * 1648 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1649 */ 1650 int 1651 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1652 u16 buf_size, struct ice_sq_cd *cd) 1653 { 1654 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1655 bool lock_acquired = false; 1656 int status; 1657 1658 /* When a package download is in process (i.e. when the firmware's 1659 * Global Configuration Lock resource is held), only the Download 1660 * Package, Get Version, Get Package Info List, Upload Section, 1661 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1662 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1663 * Recipes to Profile Association, and Release Resource (with resource 1664 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1665 * must block until the package download completes and the Global Config 1666 * Lock is released. See also ice_acquire_global_cfg_lock(). 1667 */ 1668 switch (le16_to_cpu(desc->opcode)) { 1669 case ice_aqc_opc_download_pkg: 1670 case ice_aqc_opc_get_pkg_info_list: 1671 case ice_aqc_opc_get_ver: 1672 case ice_aqc_opc_upload_section: 1673 case ice_aqc_opc_update_pkg: 1674 case ice_aqc_opc_set_port_params: 1675 case ice_aqc_opc_get_vlan_mode_parameters: 1676 case ice_aqc_opc_set_vlan_mode_parameters: 1677 case ice_aqc_opc_add_recipe: 1678 case ice_aqc_opc_recipe_to_profile: 1679 case ice_aqc_opc_get_recipe: 1680 case ice_aqc_opc_get_recipe_to_profile: 1681 break; 1682 case ice_aqc_opc_release_res: 1683 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1684 break; 1685 fallthrough; 1686 default: 1687 mutex_lock(&ice_global_cfg_lock_sw); 1688 lock_acquired = true; 1689 break; 1690 } 1691 1692 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1693 if (lock_acquired) 1694 mutex_unlock(&ice_global_cfg_lock_sw); 1695 1696 return status; 1697 } 1698 1699 /** 1700 * ice_aq_get_fw_ver 1701 * @hw: pointer to the HW struct 1702 * @cd: pointer to command details structure or NULL 1703 * 1704 * Get the firmware version (0x0001) from the admin queue commands 1705 */ 1706 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1707 { 1708 struct ice_aqc_get_ver *resp; 1709 struct ice_aq_desc desc; 1710 int status; 1711 1712 resp = &desc.params.get_ver; 1713 1714 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1715 1716 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1717 1718 if (!status) { 1719 hw->fw_branch = resp->fw_branch; 1720 hw->fw_maj_ver = resp->fw_major; 1721 hw->fw_min_ver = resp->fw_minor; 1722 hw->fw_patch = resp->fw_patch; 1723 hw->fw_build = le32_to_cpu(resp->fw_build); 1724 hw->api_branch = resp->api_branch; 1725 hw->api_maj_ver = resp->api_major; 1726 hw->api_min_ver = resp->api_minor; 1727 hw->api_patch = resp->api_patch; 1728 } 1729 1730 return status; 1731 } 1732 1733 /** 1734 * ice_aq_send_driver_ver 1735 * @hw: pointer to the HW struct 1736 * @dv: driver's major, minor version 1737 * @cd: pointer to command details structure or NULL 1738 * 1739 * Send the driver version (0x0002) to the firmware 1740 */ 1741 int 1742 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1743 struct ice_sq_cd *cd) 1744 { 1745 struct ice_aqc_driver_ver *cmd; 1746 struct ice_aq_desc desc; 1747 u16 len; 1748 1749 cmd = &desc.params.driver_ver; 1750 1751 if (!dv) 1752 return -EINVAL; 1753 1754 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1755 1756 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1757 cmd->major_ver = dv->major_ver; 1758 cmd->minor_ver = dv->minor_ver; 1759 cmd->build_ver = dv->build_ver; 1760 cmd->subbuild_ver = dv->subbuild_ver; 1761 1762 len = 0; 1763 while (len < sizeof(dv->driver_string) && 1764 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1765 len++; 1766 1767 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1768 } 1769 1770 /** 1771 * ice_aq_q_shutdown 1772 * @hw: pointer to the HW struct 1773 * @unloading: is the driver unloading itself 1774 * 1775 * Tell the Firmware that we're shutting down the AdminQ and whether 1776 * or not the driver is unloading as well (0x0003). 1777 */ 1778 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1779 { 1780 struct ice_aqc_q_shutdown *cmd; 1781 struct ice_aq_desc desc; 1782 1783 cmd = &desc.params.q_shutdown; 1784 1785 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1786 1787 if (unloading) 1788 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1789 1790 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1791 } 1792 1793 /** 1794 * ice_aq_req_res 1795 * @hw: pointer to the HW struct 1796 * @res: resource ID 1797 * @access: access type 1798 * @sdp_number: resource number 1799 * @timeout: the maximum time in ms that the driver may hold the resource 1800 * @cd: pointer to command details structure or NULL 1801 * 1802 * Requests common resource using the admin queue commands (0x0008). 1803 * When attempting to acquire the Global Config Lock, the driver can 1804 * learn of three states: 1805 * 1) 0 - acquired lock, and can perform download package 1806 * 2) -EIO - did not get lock, driver should fail to load 1807 * 3) -EALREADY - did not get lock, but another driver has 1808 * successfully downloaded the package; the driver does 1809 * not have to download the package and can continue 1810 * loading 1811 * 1812 * Note that if the caller is in an acquire lock, perform action, release lock 1813 * phase of operation, it is possible that the FW may detect a timeout and issue 1814 * a CORER. In this case, the driver will receive a CORER interrupt and will 1815 * have to determine its cause. The calling thread that is handling this flow 1816 * will likely get an error propagated back to it indicating the Download 1817 * Package, Update Package or the Release Resource AQ commands timed out. 1818 */ 1819 static int 1820 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1821 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1822 struct ice_sq_cd *cd) 1823 { 1824 struct ice_aqc_req_res *cmd_resp; 1825 struct ice_aq_desc desc; 1826 int status; 1827 1828 cmd_resp = &desc.params.res_owner; 1829 1830 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1831 1832 cmd_resp->res_id = cpu_to_le16(res); 1833 cmd_resp->access_type = cpu_to_le16(access); 1834 cmd_resp->res_number = cpu_to_le32(sdp_number); 1835 cmd_resp->timeout = cpu_to_le32(*timeout); 1836 *timeout = 0; 1837 1838 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1839 1840 /* The completion specifies the maximum time in ms that the driver 1841 * may hold the resource in the Timeout field. 1842 */ 1843 1844 /* Global config lock response utilizes an additional status field. 1845 * 1846 * If the Global config lock resource is held by some other driver, the 1847 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1848 * and the timeout field indicates the maximum time the current owner 1849 * of the resource has to free it. 1850 */ 1851 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1852 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1853 *timeout = le32_to_cpu(cmd_resp->timeout); 1854 return 0; 1855 } else if (le16_to_cpu(cmd_resp->status) == 1856 ICE_AQ_RES_GLBL_IN_PROG) { 1857 *timeout = le32_to_cpu(cmd_resp->timeout); 1858 return -EIO; 1859 } else if (le16_to_cpu(cmd_resp->status) == 1860 ICE_AQ_RES_GLBL_DONE) { 1861 return -EALREADY; 1862 } 1863 1864 /* invalid FW response, force a timeout immediately */ 1865 *timeout = 0; 1866 return -EIO; 1867 } 1868 1869 /* If the resource is held by some other driver, the command completes 1870 * with a busy return value and the timeout field indicates the maximum 1871 * time the current owner of the resource has to free it. 1872 */ 1873 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1874 *timeout = le32_to_cpu(cmd_resp->timeout); 1875 1876 return status; 1877 } 1878 1879 /** 1880 * ice_aq_release_res 1881 * @hw: pointer to the HW struct 1882 * @res: resource ID 1883 * @sdp_number: resource number 1884 * @cd: pointer to command details structure or NULL 1885 * 1886 * release common resource using the admin queue commands (0x0009) 1887 */ 1888 static int 1889 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1890 struct ice_sq_cd *cd) 1891 { 1892 struct ice_aqc_req_res *cmd; 1893 struct ice_aq_desc desc; 1894 1895 cmd = &desc.params.res_owner; 1896 1897 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1898 1899 cmd->res_id = cpu_to_le16(res); 1900 cmd->res_number = cpu_to_le32(sdp_number); 1901 1902 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1903 } 1904 1905 /** 1906 * ice_acquire_res 1907 * @hw: pointer to the HW structure 1908 * @res: resource ID 1909 * @access: access type (read or write) 1910 * @timeout: timeout in milliseconds 1911 * 1912 * This function will attempt to acquire the ownership of a resource. 1913 */ 1914 int 1915 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1916 enum ice_aq_res_access_type access, u32 timeout) 1917 { 1918 #define ICE_RES_POLLING_DELAY_MS 10 1919 u32 delay = ICE_RES_POLLING_DELAY_MS; 1920 u32 time_left = timeout; 1921 int status; 1922 1923 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1924 1925 /* A return code of -EALREADY means that another driver has 1926 * previously acquired the resource and performed any necessary updates; 1927 * in this case the caller does not obtain the resource and has no 1928 * further work to do. 1929 */ 1930 if (status == -EALREADY) 1931 goto ice_acquire_res_exit; 1932 1933 if (status) 1934 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1935 1936 /* If necessary, poll until the current lock owner timeouts */ 1937 timeout = time_left; 1938 while (status && timeout && time_left) { 1939 mdelay(delay); 1940 timeout = (timeout > delay) ? timeout - delay : 0; 1941 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1942 1943 if (status == -EALREADY) 1944 /* lock free, but no work to do */ 1945 break; 1946 1947 if (!status) 1948 /* lock acquired */ 1949 break; 1950 } 1951 if (status && status != -EALREADY) 1952 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1953 1954 ice_acquire_res_exit: 1955 if (status == -EALREADY) { 1956 if (access == ICE_RES_WRITE) 1957 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1958 else 1959 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 1960 } 1961 return status; 1962 } 1963 1964 /** 1965 * ice_release_res 1966 * @hw: pointer to the HW structure 1967 * @res: resource ID 1968 * 1969 * This function will release a resource using the proper Admin Command. 1970 */ 1971 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1972 { 1973 u32 total_delay = 0; 1974 int status; 1975 1976 status = ice_aq_release_res(hw, res, 0, NULL); 1977 1978 /* there are some rare cases when trying to release the resource 1979 * results in an admin queue timeout, so handle them correctly 1980 */ 1981 while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) { 1982 mdelay(1); 1983 status = ice_aq_release_res(hw, res, 0, NULL); 1984 total_delay++; 1985 } 1986 } 1987 1988 /** 1989 * ice_aq_alloc_free_res - command to allocate/free resources 1990 * @hw: pointer to the HW struct 1991 * @num_entries: number of resource entries in buffer 1992 * @buf: Indirect buffer to hold data parameters and response 1993 * @buf_size: size of buffer for indirect commands 1994 * @opc: pass in the command opcode 1995 * @cd: pointer to command details structure or NULL 1996 * 1997 * Helper function to allocate/free resources using the admin queue commands 1998 */ 1999 int 2000 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 2001 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2002 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2003 { 2004 struct ice_aqc_alloc_free_res_cmd *cmd; 2005 struct ice_aq_desc desc; 2006 2007 cmd = &desc.params.sw_res_ctrl; 2008 2009 if (!buf) 2010 return -EINVAL; 2011 2012 if (buf_size < flex_array_size(buf, elem, num_entries)) 2013 return -EINVAL; 2014 2015 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2016 2017 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2018 2019 cmd->num_entries = cpu_to_le16(num_entries); 2020 2021 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2022 } 2023 2024 /** 2025 * ice_alloc_hw_res - allocate resource 2026 * @hw: pointer to the HW struct 2027 * @type: type of resource 2028 * @num: number of resources to allocate 2029 * @btm: allocate from bottom 2030 * @res: pointer to array that will receive the resources 2031 */ 2032 int 2033 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2034 { 2035 struct ice_aqc_alloc_free_res_elem *buf; 2036 u16 buf_len; 2037 int status; 2038 2039 buf_len = struct_size(buf, elem, num); 2040 buf = kzalloc(buf_len, GFP_KERNEL); 2041 if (!buf) 2042 return -ENOMEM; 2043 2044 /* Prepare buffer to allocate resource. */ 2045 buf->num_elems = cpu_to_le16(num); 2046 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2047 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2048 if (btm) 2049 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2050 2051 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2052 ice_aqc_opc_alloc_res, NULL); 2053 if (status) 2054 goto ice_alloc_res_exit; 2055 2056 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2057 2058 ice_alloc_res_exit: 2059 kfree(buf); 2060 return status; 2061 } 2062 2063 /** 2064 * ice_free_hw_res - free allocated HW resource 2065 * @hw: pointer to the HW struct 2066 * @type: type of resource to free 2067 * @num: number of resources 2068 * @res: pointer to array that contains the resources to free 2069 */ 2070 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2071 { 2072 struct ice_aqc_alloc_free_res_elem *buf; 2073 u16 buf_len; 2074 int status; 2075 2076 buf_len = struct_size(buf, elem, num); 2077 buf = kzalloc(buf_len, GFP_KERNEL); 2078 if (!buf) 2079 return -ENOMEM; 2080 2081 /* Prepare buffer to free resource. */ 2082 buf->num_elems = cpu_to_le16(num); 2083 buf->res_type = cpu_to_le16(type); 2084 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2085 2086 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 2087 ice_aqc_opc_free_res, NULL); 2088 if (status) 2089 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2090 2091 kfree(buf); 2092 return status; 2093 } 2094 2095 /** 2096 * ice_get_num_per_func - determine number of resources per PF 2097 * @hw: pointer to the HW structure 2098 * @max: value to be evenly split between each PF 2099 * 2100 * Determine the number of valid functions by going through the bitmap returned 2101 * from parsing capabilities and use this to calculate the number of resources 2102 * per PF based on the max value passed in. 2103 */ 2104 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2105 { 2106 u8 funcs; 2107 2108 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2109 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2110 ICE_CAPS_VALID_FUNCS_M); 2111 2112 if (!funcs) 2113 return 0; 2114 2115 return max / funcs; 2116 } 2117 2118 /** 2119 * ice_parse_common_caps - parse common device/function capabilities 2120 * @hw: pointer to the HW struct 2121 * @caps: pointer to common capabilities structure 2122 * @elem: the capability element to parse 2123 * @prefix: message prefix for tracing capabilities 2124 * 2125 * Given a capability element, extract relevant details into the common 2126 * capability structure. 2127 * 2128 * Returns: true if the capability matches one of the common capability ids, 2129 * false otherwise. 2130 */ 2131 static bool 2132 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2133 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2134 { 2135 u32 logical_id = le32_to_cpu(elem->logical_id); 2136 u32 phys_id = le32_to_cpu(elem->phys_id); 2137 u32 number = le32_to_cpu(elem->number); 2138 u16 cap = le16_to_cpu(elem->cap); 2139 bool found = true; 2140 2141 switch (cap) { 2142 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2143 caps->valid_functions = number; 2144 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2145 caps->valid_functions); 2146 break; 2147 case ICE_AQC_CAPS_SRIOV: 2148 caps->sr_iov_1_1 = (number == 1); 2149 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2150 caps->sr_iov_1_1); 2151 break; 2152 case ICE_AQC_CAPS_DCB: 2153 caps->dcb = (number == 1); 2154 caps->active_tc_bitmap = logical_id; 2155 caps->maxtc = phys_id; 2156 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2157 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2158 caps->active_tc_bitmap); 2159 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2160 break; 2161 case ICE_AQC_CAPS_RSS: 2162 caps->rss_table_size = number; 2163 caps->rss_table_entry_width = logical_id; 2164 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2165 caps->rss_table_size); 2166 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2167 caps->rss_table_entry_width); 2168 break; 2169 case ICE_AQC_CAPS_RXQS: 2170 caps->num_rxq = number; 2171 caps->rxq_first_id = phys_id; 2172 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2173 caps->num_rxq); 2174 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2175 caps->rxq_first_id); 2176 break; 2177 case ICE_AQC_CAPS_TXQS: 2178 caps->num_txq = number; 2179 caps->txq_first_id = phys_id; 2180 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2181 caps->num_txq); 2182 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2183 caps->txq_first_id); 2184 break; 2185 case ICE_AQC_CAPS_MSIX: 2186 caps->num_msix_vectors = number; 2187 caps->msix_vector_first_id = phys_id; 2188 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2189 caps->num_msix_vectors); 2190 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2191 caps->msix_vector_first_id); 2192 break; 2193 case ICE_AQC_CAPS_PENDING_NVM_VER: 2194 caps->nvm_update_pending_nvm = true; 2195 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2196 break; 2197 case ICE_AQC_CAPS_PENDING_OROM_VER: 2198 caps->nvm_update_pending_orom = true; 2199 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2200 break; 2201 case ICE_AQC_CAPS_PENDING_NET_VER: 2202 caps->nvm_update_pending_netlist = true; 2203 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2204 break; 2205 case ICE_AQC_CAPS_NVM_MGMT: 2206 caps->nvm_unified_update = 2207 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2208 true : false; 2209 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2210 caps->nvm_unified_update); 2211 break; 2212 case ICE_AQC_CAPS_RDMA: 2213 caps->rdma = (number == 1); 2214 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2215 break; 2216 case ICE_AQC_CAPS_MAX_MTU: 2217 caps->max_mtu = number; 2218 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2219 prefix, caps->max_mtu); 2220 break; 2221 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2222 caps->pcie_reset_avoidance = (number > 0); 2223 ice_debug(hw, ICE_DBG_INIT, 2224 "%s: pcie_reset_avoidance = %d\n", prefix, 2225 caps->pcie_reset_avoidance); 2226 break; 2227 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2228 caps->reset_restrict_support = (number == 1); 2229 ice_debug(hw, ICE_DBG_INIT, 2230 "%s: reset_restrict_support = %d\n", prefix, 2231 caps->reset_restrict_support); 2232 break; 2233 default: 2234 /* Not one of the recognized common capabilities */ 2235 found = false; 2236 } 2237 2238 return found; 2239 } 2240 2241 /** 2242 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2243 * @hw: pointer to the HW structure 2244 * @caps: pointer to capabilities structure to fix 2245 * 2246 * Re-calculate the capabilities that are dependent on the number of physical 2247 * ports; i.e. some features are not supported or function differently on 2248 * devices with more than 4 ports. 2249 */ 2250 static void 2251 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2252 { 2253 /* This assumes device capabilities are always scanned before function 2254 * capabilities during the initialization flow. 2255 */ 2256 if (hw->dev_caps.num_funcs > 4) { 2257 /* Max 4 TCs per port */ 2258 caps->maxtc = 4; 2259 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2260 caps->maxtc); 2261 if (caps->rdma) { 2262 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2263 caps->rdma = 0; 2264 } 2265 2266 /* print message only when processing device capabilities 2267 * during initialization. 2268 */ 2269 if (caps == &hw->dev_caps.common_cap) 2270 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2271 } 2272 } 2273 2274 /** 2275 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2276 * @hw: pointer to the HW struct 2277 * @func_p: pointer to function capabilities structure 2278 * @cap: pointer to the capability element to parse 2279 * 2280 * Extract function capabilities for ICE_AQC_CAPS_VF. 2281 */ 2282 static void 2283 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2284 struct ice_aqc_list_caps_elem *cap) 2285 { 2286 u32 logical_id = le32_to_cpu(cap->logical_id); 2287 u32 number = le32_to_cpu(cap->number); 2288 2289 func_p->num_allocd_vfs = number; 2290 func_p->vf_base_id = logical_id; 2291 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2292 func_p->num_allocd_vfs); 2293 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2294 func_p->vf_base_id); 2295 } 2296 2297 /** 2298 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2299 * @hw: pointer to the HW struct 2300 * @func_p: pointer to function capabilities structure 2301 * @cap: pointer to the capability element to parse 2302 * 2303 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2304 */ 2305 static void 2306 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2307 struct ice_aqc_list_caps_elem *cap) 2308 { 2309 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2310 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2311 le32_to_cpu(cap->number)); 2312 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2313 func_p->guar_num_vsi); 2314 } 2315 2316 /** 2317 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2318 * @hw: pointer to the HW struct 2319 * @func_p: pointer to function capabilities structure 2320 * @cap: pointer to the capability element to parse 2321 * 2322 * Extract function capabilities for ICE_AQC_CAPS_1588. 2323 */ 2324 static void 2325 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2326 struct ice_aqc_list_caps_elem *cap) 2327 { 2328 struct ice_ts_func_info *info = &func_p->ts_func_info; 2329 u32 number = le32_to_cpu(cap->number); 2330 2331 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2332 func_p->common_cap.ieee_1588 = info->ena; 2333 2334 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2335 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2336 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2337 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2338 2339 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; 2340 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2341 2342 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2343 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2344 } else { 2345 /* Unknown clock frequency, so assume a (probably incorrect) 2346 * default to avoid out-of-bounds look ups of frequency 2347 * related information. 2348 */ 2349 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2350 info->clk_freq); 2351 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2352 } 2353 2354 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2355 func_p->common_cap.ieee_1588); 2356 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2357 info->src_tmr_owned); 2358 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2359 info->tmr_ena); 2360 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2361 info->tmr_index_owned); 2362 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2363 info->tmr_index_assoc); 2364 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2365 info->clk_freq); 2366 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2367 info->clk_src); 2368 } 2369 2370 /** 2371 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2372 * @hw: pointer to the HW struct 2373 * @func_p: pointer to function capabilities structure 2374 * 2375 * Extract function capabilities for ICE_AQC_CAPS_FD. 2376 */ 2377 static void 2378 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2379 { 2380 u32 reg_val, val; 2381 2382 reg_val = rd32(hw, GLQF_FD_SIZE); 2383 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 2384 GLQF_FD_SIZE_FD_GSIZE_S; 2385 func_p->fd_fltr_guar = 2386 ice_get_num_per_func(hw, val); 2387 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 2388 GLQF_FD_SIZE_FD_BSIZE_S; 2389 func_p->fd_fltr_best_effort = val; 2390 2391 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2392 func_p->fd_fltr_guar); 2393 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2394 func_p->fd_fltr_best_effort); 2395 } 2396 2397 /** 2398 * ice_parse_func_caps - Parse function capabilities 2399 * @hw: pointer to the HW struct 2400 * @func_p: pointer to function capabilities structure 2401 * @buf: buffer containing the function capability records 2402 * @cap_count: the number of capabilities 2403 * 2404 * Helper function to parse function (0x000A) capabilities list. For 2405 * capabilities shared between device and function, this relies on 2406 * ice_parse_common_caps. 2407 * 2408 * Loop through the list of provided capabilities and extract the relevant 2409 * data into the function capabilities structured. 2410 */ 2411 static void 2412 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2413 void *buf, u32 cap_count) 2414 { 2415 struct ice_aqc_list_caps_elem *cap_resp; 2416 u32 i; 2417 2418 cap_resp = buf; 2419 2420 memset(func_p, 0, sizeof(*func_p)); 2421 2422 for (i = 0; i < cap_count; i++) { 2423 u16 cap = le16_to_cpu(cap_resp[i].cap); 2424 bool found; 2425 2426 found = ice_parse_common_caps(hw, &func_p->common_cap, 2427 &cap_resp[i], "func caps"); 2428 2429 switch (cap) { 2430 case ICE_AQC_CAPS_VF: 2431 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2432 break; 2433 case ICE_AQC_CAPS_VSI: 2434 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2435 break; 2436 case ICE_AQC_CAPS_1588: 2437 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2438 break; 2439 case ICE_AQC_CAPS_FD: 2440 ice_parse_fdir_func_caps(hw, func_p); 2441 break; 2442 default: 2443 /* Don't list common capabilities as unknown */ 2444 if (!found) 2445 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2446 i, cap); 2447 break; 2448 } 2449 } 2450 2451 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2452 } 2453 2454 /** 2455 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2456 * @hw: pointer to the HW struct 2457 * @dev_p: pointer to device capabilities structure 2458 * @cap: capability element to parse 2459 * 2460 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2461 */ 2462 static void 2463 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2464 struct ice_aqc_list_caps_elem *cap) 2465 { 2466 u32 number = le32_to_cpu(cap->number); 2467 2468 dev_p->num_funcs = hweight32(number); 2469 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2470 dev_p->num_funcs); 2471 } 2472 2473 /** 2474 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2475 * @hw: pointer to the HW struct 2476 * @dev_p: pointer to device capabilities structure 2477 * @cap: capability element to parse 2478 * 2479 * Parse ICE_AQC_CAPS_VF for device capabilities. 2480 */ 2481 static void 2482 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2483 struct ice_aqc_list_caps_elem *cap) 2484 { 2485 u32 number = le32_to_cpu(cap->number); 2486 2487 dev_p->num_vfs_exposed = number; 2488 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2489 dev_p->num_vfs_exposed); 2490 } 2491 2492 /** 2493 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2494 * @hw: pointer to the HW struct 2495 * @dev_p: pointer to device capabilities structure 2496 * @cap: capability element to parse 2497 * 2498 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2499 */ 2500 static void 2501 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2502 struct ice_aqc_list_caps_elem *cap) 2503 { 2504 u32 number = le32_to_cpu(cap->number); 2505 2506 dev_p->num_vsi_allocd_to_host = number; 2507 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2508 dev_p->num_vsi_allocd_to_host); 2509 } 2510 2511 /** 2512 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2513 * @hw: pointer to the HW struct 2514 * @dev_p: pointer to device capabilities structure 2515 * @cap: capability element to parse 2516 * 2517 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2518 */ 2519 static void 2520 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2521 struct ice_aqc_list_caps_elem *cap) 2522 { 2523 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2524 u32 logical_id = le32_to_cpu(cap->logical_id); 2525 u32 phys_id = le32_to_cpu(cap->phys_id); 2526 u32 number = le32_to_cpu(cap->number); 2527 2528 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2529 dev_p->common_cap.ieee_1588 = info->ena; 2530 2531 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2532 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2533 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2534 2535 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; 2536 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2537 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2538 2539 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2540 2541 info->ena_ports = logical_id; 2542 info->tmr_own_map = phys_id; 2543 2544 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2545 dev_p->common_cap.ieee_1588); 2546 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2547 info->tmr0_owner); 2548 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2549 info->tmr0_owned); 2550 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2551 info->tmr0_ena); 2552 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2553 info->tmr1_owner); 2554 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2555 info->tmr1_owned); 2556 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2557 info->tmr1_ena); 2558 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2559 info->ts_ll_read); 2560 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2561 info->ena_ports); 2562 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2563 info->tmr_own_map); 2564 } 2565 2566 /** 2567 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2568 * @hw: pointer to the HW struct 2569 * @dev_p: pointer to device capabilities structure 2570 * @cap: capability element to parse 2571 * 2572 * Parse ICE_AQC_CAPS_FD for device capabilities. 2573 */ 2574 static void 2575 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2576 struct ice_aqc_list_caps_elem *cap) 2577 { 2578 u32 number = le32_to_cpu(cap->number); 2579 2580 dev_p->num_flow_director_fltr = number; 2581 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2582 dev_p->num_flow_director_fltr); 2583 } 2584 2585 /** 2586 * ice_parse_dev_caps - Parse device capabilities 2587 * @hw: pointer to the HW struct 2588 * @dev_p: pointer to device capabilities structure 2589 * @buf: buffer containing the device capability records 2590 * @cap_count: the number of capabilities 2591 * 2592 * Helper device to parse device (0x000B) capabilities list. For 2593 * capabilities shared between device and function, this relies on 2594 * ice_parse_common_caps. 2595 * 2596 * Loop through the list of provided capabilities and extract the relevant 2597 * data into the device capabilities structured. 2598 */ 2599 static void 2600 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2601 void *buf, u32 cap_count) 2602 { 2603 struct ice_aqc_list_caps_elem *cap_resp; 2604 u32 i; 2605 2606 cap_resp = buf; 2607 2608 memset(dev_p, 0, sizeof(*dev_p)); 2609 2610 for (i = 0; i < cap_count; i++) { 2611 u16 cap = le16_to_cpu(cap_resp[i].cap); 2612 bool found; 2613 2614 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2615 &cap_resp[i], "dev caps"); 2616 2617 switch (cap) { 2618 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2619 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2620 break; 2621 case ICE_AQC_CAPS_VF: 2622 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2623 break; 2624 case ICE_AQC_CAPS_VSI: 2625 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2626 break; 2627 case ICE_AQC_CAPS_1588: 2628 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2629 break; 2630 case ICE_AQC_CAPS_FD: 2631 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2632 break; 2633 default: 2634 /* Don't list common capabilities as unknown */ 2635 if (!found) 2636 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2637 i, cap); 2638 break; 2639 } 2640 } 2641 2642 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2643 } 2644 2645 /** 2646 * ice_aq_list_caps - query function/device capabilities 2647 * @hw: pointer to the HW struct 2648 * @buf: a buffer to hold the capabilities 2649 * @buf_size: size of the buffer 2650 * @cap_count: if not NULL, set to the number of capabilities reported 2651 * @opc: capabilities type to discover, device or function 2652 * @cd: pointer to command details structure or NULL 2653 * 2654 * Get the function (0x000A) or device (0x000B) capabilities description from 2655 * firmware and store it in the buffer. 2656 * 2657 * If the cap_count pointer is not NULL, then it is set to the number of 2658 * capabilities firmware will report. Note that if the buffer size is too 2659 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2660 * cap_count will still be updated in this case. It is recommended that the 2661 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2662 * firmware could return) to avoid this. 2663 */ 2664 int 2665 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2666 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2667 { 2668 struct ice_aqc_list_caps *cmd; 2669 struct ice_aq_desc desc; 2670 int status; 2671 2672 cmd = &desc.params.get_cap; 2673 2674 if (opc != ice_aqc_opc_list_func_caps && 2675 opc != ice_aqc_opc_list_dev_caps) 2676 return -EINVAL; 2677 2678 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2679 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2680 2681 if (cap_count) 2682 *cap_count = le32_to_cpu(cmd->count); 2683 2684 return status; 2685 } 2686 2687 /** 2688 * ice_discover_dev_caps - Read and extract device capabilities 2689 * @hw: pointer to the hardware structure 2690 * @dev_caps: pointer to device capabilities structure 2691 * 2692 * Read the device capabilities and extract them into the dev_caps structure 2693 * for later use. 2694 */ 2695 int 2696 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2697 { 2698 u32 cap_count = 0; 2699 void *cbuf; 2700 int status; 2701 2702 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2703 if (!cbuf) 2704 return -ENOMEM; 2705 2706 /* Although the driver doesn't know the number of capabilities the 2707 * device will return, we can simply send a 4KB buffer, the maximum 2708 * possible size that firmware can return. 2709 */ 2710 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2711 2712 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2713 ice_aqc_opc_list_dev_caps, NULL); 2714 if (!status) 2715 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2716 kfree(cbuf); 2717 2718 return status; 2719 } 2720 2721 /** 2722 * ice_discover_func_caps - Read and extract function capabilities 2723 * @hw: pointer to the hardware structure 2724 * @func_caps: pointer to function capabilities structure 2725 * 2726 * Read the function capabilities and extract them into the func_caps structure 2727 * for later use. 2728 */ 2729 static int 2730 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2731 { 2732 u32 cap_count = 0; 2733 void *cbuf; 2734 int status; 2735 2736 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2737 if (!cbuf) 2738 return -ENOMEM; 2739 2740 /* Although the driver doesn't know the number of capabilities the 2741 * device will return, we can simply send a 4KB buffer, the maximum 2742 * possible size that firmware can return. 2743 */ 2744 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2745 2746 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2747 ice_aqc_opc_list_func_caps, NULL); 2748 if (!status) 2749 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2750 kfree(cbuf); 2751 2752 return status; 2753 } 2754 2755 /** 2756 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2757 * @hw: pointer to the hardware structure 2758 */ 2759 void ice_set_safe_mode_caps(struct ice_hw *hw) 2760 { 2761 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2762 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2763 struct ice_hw_common_caps cached_caps; 2764 u32 num_funcs; 2765 2766 /* cache some func_caps values that should be restored after memset */ 2767 cached_caps = func_caps->common_cap; 2768 2769 /* unset func capabilities */ 2770 memset(func_caps, 0, sizeof(*func_caps)); 2771 2772 #define ICE_RESTORE_FUNC_CAP(name) \ 2773 func_caps->common_cap.name = cached_caps.name 2774 2775 /* restore cached values */ 2776 ICE_RESTORE_FUNC_CAP(valid_functions); 2777 ICE_RESTORE_FUNC_CAP(txq_first_id); 2778 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2779 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2780 ICE_RESTORE_FUNC_CAP(max_mtu); 2781 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2782 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2783 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2784 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2785 2786 /* one Tx and one Rx queue in safe mode */ 2787 func_caps->common_cap.num_rxq = 1; 2788 func_caps->common_cap.num_txq = 1; 2789 2790 /* two MSIX vectors, one for traffic and one for misc causes */ 2791 func_caps->common_cap.num_msix_vectors = 2; 2792 func_caps->guar_num_vsi = 1; 2793 2794 /* cache some dev_caps values that should be restored after memset */ 2795 cached_caps = dev_caps->common_cap; 2796 num_funcs = dev_caps->num_funcs; 2797 2798 /* unset dev capabilities */ 2799 memset(dev_caps, 0, sizeof(*dev_caps)); 2800 2801 #define ICE_RESTORE_DEV_CAP(name) \ 2802 dev_caps->common_cap.name = cached_caps.name 2803 2804 /* restore cached values */ 2805 ICE_RESTORE_DEV_CAP(valid_functions); 2806 ICE_RESTORE_DEV_CAP(txq_first_id); 2807 ICE_RESTORE_DEV_CAP(rxq_first_id); 2808 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2809 ICE_RESTORE_DEV_CAP(max_mtu); 2810 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2811 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2812 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2813 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2814 dev_caps->num_funcs = num_funcs; 2815 2816 /* one Tx and one Rx queue per function in safe mode */ 2817 dev_caps->common_cap.num_rxq = num_funcs; 2818 dev_caps->common_cap.num_txq = num_funcs; 2819 2820 /* two MSIX vectors per function */ 2821 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2822 } 2823 2824 /** 2825 * ice_get_caps - get info about the HW 2826 * @hw: pointer to the hardware structure 2827 */ 2828 int ice_get_caps(struct ice_hw *hw) 2829 { 2830 int status; 2831 2832 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2833 if (status) 2834 return status; 2835 2836 return ice_discover_func_caps(hw, &hw->func_caps); 2837 } 2838 2839 /** 2840 * ice_aq_manage_mac_write - manage MAC address write command 2841 * @hw: pointer to the HW struct 2842 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2843 * @flags: flags to control write behavior 2844 * @cd: pointer to command details structure or NULL 2845 * 2846 * This function is used to write MAC address to the NVM (0x0108). 2847 */ 2848 int 2849 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2850 struct ice_sq_cd *cd) 2851 { 2852 struct ice_aqc_manage_mac_write *cmd; 2853 struct ice_aq_desc desc; 2854 2855 cmd = &desc.params.mac_write; 2856 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2857 2858 cmd->flags = flags; 2859 ether_addr_copy(cmd->mac_addr, mac_addr); 2860 2861 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2862 } 2863 2864 /** 2865 * ice_aq_clear_pxe_mode 2866 * @hw: pointer to the HW struct 2867 * 2868 * Tell the firmware that the driver is taking over from PXE (0x0110). 2869 */ 2870 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 2871 { 2872 struct ice_aq_desc desc; 2873 2874 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2875 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2876 2877 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2878 } 2879 2880 /** 2881 * ice_clear_pxe_mode - clear pxe operations mode 2882 * @hw: pointer to the HW struct 2883 * 2884 * Make sure all PXE mode settings are cleared, including things 2885 * like descriptor fetch/write-back mode. 2886 */ 2887 void ice_clear_pxe_mode(struct ice_hw *hw) 2888 { 2889 if (ice_check_sq_alive(hw, &hw->adminq)) 2890 ice_aq_clear_pxe_mode(hw); 2891 } 2892 2893 /** 2894 * ice_aq_set_port_params - set physical port parameters. 2895 * @pi: pointer to the port info struct 2896 * @double_vlan: if set double VLAN is enabled 2897 * @cd: pointer to command details structure or NULL 2898 * 2899 * Set Physical port parameters (0x0203) 2900 */ 2901 int 2902 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 2903 struct ice_sq_cd *cd) 2904 2905 { 2906 struct ice_aqc_set_port_params *cmd; 2907 struct ice_hw *hw = pi->hw; 2908 struct ice_aq_desc desc; 2909 u16 cmd_flags = 0; 2910 2911 cmd = &desc.params.set_port_params; 2912 2913 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 2914 if (double_vlan) 2915 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 2916 cmd->cmd_flags = cpu_to_le16(cmd_flags); 2917 2918 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2919 } 2920 2921 /** 2922 * ice_is_100m_speed_supported 2923 * @hw: pointer to the HW struct 2924 * 2925 * returns true if 100M speeds are supported by the device, 2926 * false otherwise. 2927 */ 2928 bool ice_is_100m_speed_supported(struct ice_hw *hw) 2929 { 2930 switch (hw->device_id) { 2931 case ICE_DEV_ID_E822C_SGMII: 2932 case ICE_DEV_ID_E822L_SGMII: 2933 case ICE_DEV_ID_E823L_1GBE: 2934 case ICE_DEV_ID_E823C_SGMII: 2935 return true; 2936 default: 2937 return false; 2938 } 2939 } 2940 2941 /** 2942 * ice_get_link_speed_based_on_phy_type - returns link speed 2943 * @phy_type_low: lower part of phy_type 2944 * @phy_type_high: higher part of phy_type 2945 * 2946 * This helper function will convert an entry in PHY type structure 2947 * [phy_type_low, phy_type_high] to its corresponding link speed. 2948 * Note: In the structure of [phy_type_low, phy_type_high], there should 2949 * be one bit set, as this function will convert one PHY type to its 2950 * speed. 2951 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 2952 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 2953 */ 2954 static u16 2955 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 2956 { 2957 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2958 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2959 2960 switch (phy_type_low) { 2961 case ICE_PHY_TYPE_LOW_100BASE_TX: 2962 case ICE_PHY_TYPE_LOW_100M_SGMII: 2963 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 2964 break; 2965 case ICE_PHY_TYPE_LOW_1000BASE_T: 2966 case ICE_PHY_TYPE_LOW_1000BASE_SX: 2967 case ICE_PHY_TYPE_LOW_1000BASE_LX: 2968 case ICE_PHY_TYPE_LOW_1000BASE_KX: 2969 case ICE_PHY_TYPE_LOW_1G_SGMII: 2970 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 2971 break; 2972 case ICE_PHY_TYPE_LOW_2500BASE_T: 2973 case ICE_PHY_TYPE_LOW_2500BASE_X: 2974 case ICE_PHY_TYPE_LOW_2500BASE_KX: 2975 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 2976 break; 2977 case ICE_PHY_TYPE_LOW_5GBASE_T: 2978 case ICE_PHY_TYPE_LOW_5GBASE_KR: 2979 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 2980 break; 2981 case ICE_PHY_TYPE_LOW_10GBASE_T: 2982 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 2983 case ICE_PHY_TYPE_LOW_10GBASE_SR: 2984 case ICE_PHY_TYPE_LOW_10GBASE_LR: 2985 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 2986 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 2987 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 2988 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 2989 break; 2990 case ICE_PHY_TYPE_LOW_25GBASE_T: 2991 case ICE_PHY_TYPE_LOW_25GBASE_CR: 2992 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 2993 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 2994 case ICE_PHY_TYPE_LOW_25GBASE_SR: 2995 case ICE_PHY_TYPE_LOW_25GBASE_LR: 2996 case ICE_PHY_TYPE_LOW_25GBASE_KR: 2997 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 2998 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 2999 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3000 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3001 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3002 break; 3003 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3004 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3005 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3006 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3007 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3008 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3009 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3010 break; 3011 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3012 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3013 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3014 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3015 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3016 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3017 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3018 case ICE_PHY_TYPE_LOW_50G_AUI2: 3019 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3020 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3021 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3022 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3023 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3024 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3025 case ICE_PHY_TYPE_LOW_50G_AUI1: 3026 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3027 break; 3028 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3029 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3030 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3031 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3032 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3033 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3034 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3035 case ICE_PHY_TYPE_LOW_100G_AUI4: 3036 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3037 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3038 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3039 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3040 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3041 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3042 break; 3043 default: 3044 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3045 break; 3046 } 3047 3048 switch (phy_type_high) { 3049 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3050 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3051 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3052 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3053 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3054 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3055 break; 3056 default: 3057 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3058 break; 3059 } 3060 3061 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3062 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3063 return ICE_AQ_LINK_SPEED_UNKNOWN; 3064 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3065 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3066 return ICE_AQ_LINK_SPEED_UNKNOWN; 3067 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3068 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3069 return speed_phy_type_low; 3070 else 3071 return speed_phy_type_high; 3072 } 3073 3074 /** 3075 * ice_update_phy_type 3076 * @phy_type_low: pointer to the lower part of phy_type 3077 * @phy_type_high: pointer to the higher part of phy_type 3078 * @link_speeds_bitmap: targeted link speeds bitmap 3079 * 3080 * Note: For the link_speeds_bitmap structure, you can check it at 3081 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3082 * link_speeds_bitmap include multiple speeds. 3083 * 3084 * Each entry in this [phy_type_low, phy_type_high] structure will 3085 * present a certain link speed. This helper function will turn on bits 3086 * in [phy_type_low, phy_type_high] structure based on the value of 3087 * link_speeds_bitmap input parameter. 3088 */ 3089 void 3090 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3091 u16 link_speeds_bitmap) 3092 { 3093 u64 pt_high; 3094 u64 pt_low; 3095 int index; 3096 u16 speed; 3097 3098 /* We first check with low part of phy_type */ 3099 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3100 pt_low = BIT_ULL(index); 3101 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3102 3103 if (link_speeds_bitmap & speed) 3104 *phy_type_low |= BIT_ULL(index); 3105 } 3106 3107 /* We then check with high part of phy_type */ 3108 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3109 pt_high = BIT_ULL(index); 3110 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3111 3112 if (link_speeds_bitmap & speed) 3113 *phy_type_high |= BIT_ULL(index); 3114 } 3115 } 3116 3117 /** 3118 * ice_aq_set_phy_cfg 3119 * @hw: pointer to the HW struct 3120 * @pi: port info structure of the interested logical port 3121 * @cfg: structure with PHY configuration data to be set 3122 * @cd: pointer to command details structure or NULL 3123 * 3124 * Set the various PHY configuration parameters supported on the Port. 3125 * One or more of the Set PHY config parameters may be ignored in an MFP 3126 * mode as the PF may not have the privilege to set some of the PHY Config 3127 * parameters. This status will be indicated by the command response (0x0601). 3128 */ 3129 int 3130 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3131 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3132 { 3133 struct ice_aq_desc desc; 3134 int status; 3135 3136 if (!cfg) 3137 return -EINVAL; 3138 3139 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3140 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3141 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3142 cfg->caps); 3143 3144 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3145 } 3146 3147 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3148 desc.params.set_phy.lport_num = pi->lport; 3149 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3150 3151 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3152 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3153 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3154 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3155 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3156 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3157 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3158 cfg->low_power_ctrl_an); 3159 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3160 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3161 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3162 cfg->link_fec_opt); 3163 3164 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3165 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3166 status = 0; 3167 3168 if (!status) 3169 pi->phy.curr_user_phy_cfg = *cfg; 3170 3171 return status; 3172 } 3173 3174 /** 3175 * ice_update_link_info - update status of the HW network link 3176 * @pi: port info structure of the interested logical port 3177 */ 3178 int ice_update_link_info(struct ice_port_info *pi) 3179 { 3180 struct ice_link_status *li; 3181 int status; 3182 3183 if (!pi) 3184 return -EINVAL; 3185 3186 li = &pi->phy.link_info; 3187 3188 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3189 if (status) 3190 return status; 3191 3192 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3193 struct ice_aqc_get_phy_caps_data *pcaps; 3194 struct ice_hw *hw; 3195 3196 hw = pi->hw; 3197 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 3198 GFP_KERNEL); 3199 if (!pcaps) 3200 return -ENOMEM; 3201 3202 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3203 pcaps, NULL); 3204 3205 devm_kfree(ice_hw_to_dev(hw), pcaps); 3206 } 3207 3208 return status; 3209 } 3210 3211 /** 3212 * ice_cache_phy_user_req 3213 * @pi: port information structure 3214 * @cache_data: PHY logging data 3215 * @cache_mode: PHY logging mode 3216 * 3217 * Log the user request on (FC, FEC, SPEED) for later use. 3218 */ 3219 static void 3220 ice_cache_phy_user_req(struct ice_port_info *pi, 3221 struct ice_phy_cache_mode_data cache_data, 3222 enum ice_phy_cache_mode cache_mode) 3223 { 3224 if (!pi) 3225 return; 3226 3227 switch (cache_mode) { 3228 case ICE_FC_MODE: 3229 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3230 break; 3231 case ICE_SPEED_MODE: 3232 pi->phy.curr_user_speed_req = 3233 cache_data.data.curr_user_speed_req; 3234 break; 3235 case ICE_FEC_MODE: 3236 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3237 break; 3238 default: 3239 break; 3240 } 3241 } 3242 3243 /** 3244 * ice_caps_to_fc_mode 3245 * @caps: PHY capabilities 3246 * 3247 * Convert PHY FC capabilities to ice FC mode 3248 */ 3249 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3250 { 3251 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3252 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3253 return ICE_FC_FULL; 3254 3255 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3256 return ICE_FC_TX_PAUSE; 3257 3258 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3259 return ICE_FC_RX_PAUSE; 3260 3261 return ICE_FC_NONE; 3262 } 3263 3264 /** 3265 * ice_caps_to_fec_mode 3266 * @caps: PHY capabilities 3267 * @fec_options: Link FEC options 3268 * 3269 * Convert PHY FEC capabilities to ice FEC mode 3270 */ 3271 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3272 { 3273 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3274 return ICE_FEC_AUTO; 3275 3276 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3277 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3278 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3279 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3280 return ICE_FEC_BASER; 3281 3282 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3283 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3284 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3285 return ICE_FEC_RS; 3286 3287 return ICE_FEC_NONE; 3288 } 3289 3290 /** 3291 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3292 * @pi: port information structure 3293 * @cfg: PHY configuration data to set FC mode 3294 * @req_mode: FC mode to configure 3295 */ 3296 int 3297 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3298 enum ice_fc_mode req_mode) 3299 { 3300 struct ice_phy_cache_mode_data cache_data; 3301 u8 pause_mask = 0x0; 3302 3303 if (!pi || !cfg) 3304 return -EINVAL; 3305 3306 switch (req_mode) { 3307 case ICE_FC_FULL: 3308 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3309 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3310 break; 3311 case ICE_FC_RX_PAUSE: 3312 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3313 break; 3314 case ICE_FC_TX_PAUSE: 3315 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3316 break; 3317 default: 3318 break; 3319 } 3320 3321 /* clear the old pause settings */ 3322 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3323 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3324 3325 /* set the new capabilities */ 3326 cfg->caps |= pause_mask; 3327 3328 /* Cache user FC request */ 3329 cache_data.data.curr_user_fc_req = req_mode; 3330 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3331 3332 return 0; 3333 } 3334 3335 /** 3336 * ice_set_fc 3337 * @pi: port information structure 3338 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3339 * @ena_auto_link_update: enable automatic link update 3340 * 3341 * Set the requested flow control mode. 3342 */ 3343 int 3344 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3345 { 3346 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3347 struct ice_aqc_get_phy_caps_data *pcaps; 3348 struct ice_hw *hw; 3349 int status; 3350 3351 if (!pi || !aq_failures) 3352 return -EINVAL; 3353 3354 *aq_failures = 0; 3355 hw = pi->hw; 3356 3357 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3358 if (!pcaps) 3359 return -ENOMEM; 3360 3361 /* Get the current PHY config */ 3362 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3363 pcaps, NULL); 3364 if (status) { 3365 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3366 goto out; 3367 } 3368 3369 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3370 3371 /* Configure the set PHY data */ 3372 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3373 if (status) 3374 goto out; 3375 3376 /* If the capabilities have changed, then set the new config */ 3377 if (cfg.caps != pcaps->caps) { 3378 int retry_count, retry_max = 10; 3379 3380 /* Auto restart link so settings take effect */ 3381 if (ena_auto_link_update) 3382 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3383 3384 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3385 if (status) { 3386 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3387 goto out; 3388 } 3389 3390 /* Update the link info 3391 * It sometimes takes a really long time for link to 3392 * come back from the atomic reset. Thus, we wait a 3393 * little bit. 3394 */ 3395 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3396 status = ice_update_link_info(pi); 3397 3398 if (!status) 3399 break; 3400 3401 mdelay(100); 3402 } 3403 3404 if (status) 3405 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3406 } 3407 3408 out: 3409 devm_kfree(ice_hw_to_dev(hw), pcaps); 3410 return status; 3411 } 3412 3413 /** 3414 * ice_phy_caps_equals_cfg 3415 * @phy_caps: PHY capabilities 3416 * @phy_cfg: PHY configuration 3417 * 3418 * Helper function to determine if PHY capabilities matches PHY 3419 * configuration 3420 */ 3421 bool 3422 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3423 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3424 { 3425 u8 caps_mask, cfg_mask; 3426 3427 if (!phy_caps || !phy_cfg) 3428 return false; 3429 3430 /* These bits are not common between capabilities and configuration. 3431 * Do not use them to determine equality. 3432 */ 3433 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3434 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3435 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3436 3437 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3438 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3439 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3440 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3441 phy_caps->eee_cap != phy_cfg->eee_cap || 3442 phy_caps->eeer_value != phy_cfg->eeer_value || 3443 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3444 return false; 3445 3446 return true; 3447 } 3448 3449 /** 3450 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3451 * @pi: port information structure 3452 * @caps: PHY ability structure to copy date from 3453 * @cfg: PHY configuration structure to copy data to 3454 * 3455 * Helper function to copy AQC PHY get ability data to PHY set configuration 3456 * data structure 3457 */ 3458 void 3459 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3460 struct ice_aqc_get_phy_caps_data *caps, 3461 struct ice_aqc_set_phy_cfg_data *cfg) 3462 { 3463 if (!pi || !caps || !cfg) 3464 return; 3465 3466 memset(cfg, 0, sizeof(*cfg)); 3467 cfg->phy_type_low = caps->phy_type_low; 3468 cfg->phy_type_high = caps->phy_type_high; 3469 cfg->caps = caps->caps; 3470 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3471 cfg->eee_cap = caps->eee_cap; 3472 cfg->eeer_value = caps->eeer_value; 3473 cfg->link_fec_opt = caps->link_fec_options; 3474 cfg->module_compliance_enforcement = 3475 caps->module_compliance_enforcement; 3476 } 3477 3478 /** 3479 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3480 * @pi: port information structure 3481 * @cfg: PHY configuration data to set FEC mode 3482 * @fec: FEC mode to configure 3483 */ 3484 int 3485 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3486 enum ice_fec_mode fec) 3487 { 3488 struct ice_aqc_get_phy_caps_data *pcaps; 3489 struct ice_hw *hw; 3490 int status; 3491 3492 if (!pi || !cfg) 3493 return -EINVAL; 3494 3495 hw = pi->hw; 3496 3497 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3498 if (!pcaps) 3499 return -ENOMEM; 3500 3501 status = ice_aq_get_phy_caps(pi, false, 3502 (ice_fw_supports_report_dflt_cfg(hw) ? 3503 ICE_AQC_REPORT_DFLT_CFG : 3504 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3505 if (status) 3506 goto out; 3507 3508 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3509 cfg->link_fec_opt = pcaps->link_fec_options; 3510 3511 switch (fec) { 3512 case ICE_FEC_BASER: 3513 /* Clear RS bits, and AND BASE-R ability 3514 * bits and OR request bits. 3515 */ 3516 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3517 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3518 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3519 ICE_AQC_PHY_FEC_25G_KR_REQ; 3520 break; 3521 case ICE_FEC_RS: 3522 /* Clear BASE-R bits, and AND RS ability 3523 * bits and OR request bits. 3524 */ 3525 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3526 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3527 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3528 break; 3529 case ICE_FEC_NONE: 3530 /* Clear all FEC option bits. */ 3531 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3532 break; 3533 case ICE_FEC_AUTO: 3534 /* AND auto FEC bit, and all caps bits. */ 3535 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3536 cfg->link_fec_opt |= pcaps->link_fec_options; 3537 break; 3538 default: 3539 status = -EINVAL; 3540 break; 3541 } 3542 3543 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3544 !ice_fw_supports_report_dflt_cfg(hw)) { 3545 struct ice_link_default_override_tlv tlv = { 0 }; 3546 3547 status = ice_get_link_default_override(&tlv, pi); 3548 if (status) 3549 goto out; 3550 3551 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3552 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3553 cfg->link_fec_opt = tlv.fec_options; 3554 } 3555 3556 out: 3557 kfree(pcaps); 3558 3559 return status; 3560 } 3561 3562 /** 3563 * ice_get_link_status - get status of the HW network link 3564 * @pi: port information structure 3565 * @link_up: pointer to bool (true/false = linkup/linkdown) 3566 * 3567 * Variable link_up is true if link is up, false if link is down. 3568 * The variable link_up is invalid if status is non zero. As a 3569 * result of this call, link status reporting becomes enabled 3570 */ 3571 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3572 { 3573 struct ice_phy_info *phy_info; 3574 int status = 0; 3575 3576 if (!pi || !link_up) 3577 return -EINVAL; 3578 3579 phy_info = &pi->phy; 3580 3581 if (phy_info->get_link_info) { 3582 status = ice_update_link_info(pi); 3583 3584 if (status) 3585 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3586 status); 3587 } 3588 3589 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3590 3591 return status; 3592 } 3593 3594 /** 3595 * ice_aq_set_link_restart_an 3596 * @pi: pointer to the port information structure 3597 * @ena_link: if true: enable link, if false: disable link 3598 * @cd: pointer to command details structure or NULL 3599 * 3600 * Sets up the link and restarts the Auto-Negotiation over the link. 3601 */ 3602 int 3603 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3604 struct ice_sq_cd *cd) 3605 { 3606 struct ice_aqc_restart_an *cmd; 3607 struct ice_aq_desc desc; 3608 3609 cmd = &desc.params.restart_an; 3610 3611 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3612 3613 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3614 cmd->lport_num = pi->lport; 3615 if (ena_link) 3616 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3617 else 3618 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3619 3620 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3621 } 3622 3623 /** 3624 * ice_aq_set_event_mask 3625 * @hw: pointer to the HW struct 3626 * @port_num: port number of the physical function 3627 * @mask: event mask to be set 3628 * @cd: pointer to command details structure or NULL 3629 * 3630 * Set event mask (0x0613) 3631 */ 3632 int 3633 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3634 struct ice_sq_cd *cd) 3635 { 3636 struct ice_aqc_set_event_mask *cmd; 3637 struct ice_aq_desc desc; 3638 3639 cmd = &desc.params.set_event_mask; 3640 3641 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3642 3643 cmd->lport_num = port_num; 3644 3645 cmd->event_mask = cpu_to_le16(mask); 3646 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3647 } 3648 3649 /** 3650 * ice_aq_set_mac_loopback 3651 * @hw: pointer to the HW struct 3652 * @ena_lpbk: Enable or Disable loopback 3653 * @cd: pointer to command details structure or NULL 3654 * 3655 * Enable/disable loopback on a given port 3656 */ 3657 int 3658 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3659 { 3660 struct ice_aqc_set_mac_lb *cmd; 3661 struct ice_aq_desc desc; 3662 3663 cmd = &desc.params.set_mac_lb; 3664 3665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3666 if (ena_lpbk) 3667 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3668 3669 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3670 } 3671 3672 /** 3673 * ice_aq_set_port_id_led 3674 * @pi: pointer to the port information 3675 * @is_orig_mode: is this LED set to original mode (by the net-list) 3676 * @cd: pointer to command details structure or NULL 3677 * 3678 * Set LED value for the given port (0x06e9) 3679 */ 3680 int 3681 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3682 struct ice_sq_cd *cd) 3683 { 3684 struct ice_aqc_set_port_id_led *cmd; 3685 struct ice_hw *hw = pi->hw; 3686 struct ice_aq_desc desc; 3687 3688 cmd = &desc.params.set_port_id_led; 3689 3690 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3691 3692 if (is_orig_mode) 3693 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3694 else 3695 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3696 3697 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3698 } 3699 3700 /** 3701 * ice_aq_get_port_options 3702 * @hw: pointer to the HW struct 3703 * @options: buffer for the resultant port options 3704 * @option_count: input - size of the buffer in port options structures, 3705 * output - number of returned port options 3706 * @lport: logical port to call the command with (optional) 3707 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3708 * when PF owns more than 1 port it must be true 3709 * @active_option_idx: index of active port option in returned buffer 3710 * @active_option_valid: active option in returned buffer is valid 3711 * @pending_option_idx: index of pending port option in returned buffer 3712 * @pending_option_valid: pending option in returned buffer is valid 3713 * 3714 * Calls Get Port Options AQC (0x06ea) and verifies result. 3715 */ 3716 int 3717 ice_aq_get_port_options(struct ice_hw *hw, 3718 struct ice_aqc_get_port_options_elem *options, 3719 u8 *option_count, u8 lport, bool lport_valid, 3720 u8 *active_option_idx, bool *active_option_valid, 3721 u8 *pending_option_idx, bool *pending_option_valid) 3722 { 3723 struct ice_aqc_get_port_options *cmd; 3724 struct ice_aq_desc desc; 3725 int status; 3726 u8 i; 3727 3728 /* options buffer shall be able to hold max returned options */ 3729 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3730 return -EINVAL; 3731 3732 cmd = &desc.params.get_port_options; 3733 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3734 3735 if (lport_valid) 3736 cmd->lport_num = lport; 3737 cmd->lport_num_valid = lport_valid; 3738 3739 status = ice_aq_send_cmd(hw, &desc, options, 3740 *option_count * sizeof(*options), NULL); 3741 if (status) 3742 return status; 3743 3744 /* verify direct FW response & set output parameters */ 3745 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3746 cmd->port_options_count); 3747 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3748 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3749 cmd->port_options); 3750 if (*active_option_valid) { 3751 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3752 cmd->port_options); 3753 if (*active_option_idx > (*option_count - 1)) 3754 return -EIO; 3755 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3756 *active_option_idx); 3757 } 3758 3759 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3760 cmd->pending_port_option_status); 3761 if (*pending_option_valid) { 3762 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 3763 cmd->pending_port_option_status); 3764 if (*pending_option_idx > (*option_count - 1)) 3765 return -EIO; 3766 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 3767 *pending_option_idx); 3768 } 3769 3770 /* mask output options fields */ 3771 for (i = 0; i < *option_count; i++) { 3772 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3773 options[i].pmd); 3774 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 3775 options[i].max_lane_speed); 3776 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 3777 options[i].pmd, options[i].max_lane_speed); 3778 } 3779 3780 return 0; 3781 } 3782 3783 /** 3784 * ice_aq_set_port_option 3785 * @hw: pointer to the HW struct 3786 * @lport: logical port to call the command with 3787 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3788 * when PF owns more than 1 port it must be true 3789 * @new_option: new port option to be written 3790 * 3791 * Calls Set Port Options AQC (0x06eb). 3792 */ 3793 int 3794 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 3795 u8 new_option) 3796 { 3797 struct ice_aqc_set_port_option *cmd; 3798 struct ice_aq_desc desc; 3799 3800 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 3801 return -EINVAL; 3802 3803 cmd = &desc.params.set_port_option; 3804 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 3805 3806 if (lport_valid) 3807 cmd->lport_num = lport; 3808 3809 cmd->lport_num_valid = lport_valid; 3810 cmd->selected_port_option = new_option; 3811 3812 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3813 } 3814 3815 /** 3816 * ice_aq_sff_eeprom 3817 * @hw: pointer to the HW struct 3818 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3819 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3820 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3821 * @page: QSFP page 3822 * @set_page: set or ignore the page 3823 * @data: pointer to data buffer to be read/written to the I2C device. 3824 * @length: 1-16 for read, 1 for write. 3825 * @write: 0 read, 1 for write. 3826 * @cd: pointer to command details structure or NULL 3827 * 3828 * Read/Write SFF EEPROM (0x06EE) 3829 */ 3830 int 3831 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3832 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3833 bool write, struct ice_sq_cd *cd) 3834 { 3835 struct ice_aqc_sff_eeprom *cmd; 3836 struct ice_aq_desc desc; 3837 int status; 3838 3839 if (!data || (mem_addr & 0xff00)) 3840 return -EINVAL; 3841 3842 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3843 cmd = &desc.params.read_write_sff_param; 3844 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3845 cmd->lport_num = (u8)(lport & 0xff); 3846 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3847 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 3848 ICE_AQC_SFF_I2CBUS_7BIT_M) | 3849 ((set_page << 3850 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 3851 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 3852 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3853 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 3854 if (write) 3855 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 3856 3857 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3858 return status; 3859 } 3860 3861 /** 3862 * __ice_aq_get_set_rss_lut 3863 * @hw: pointer to the hardware structure 3864 * @params: RSS LUT parameters 3865 * @set: set true to set the table, false to get the table 3866 * 3867 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3868 */ 3869 static int 3870 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) 3871 { 3872 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; 3873 struct ice_aqc_get_set_rss_lut *cmd_resp; 3874 struct ice_aq_desc desc; 3875 int status; 3876 u8 *lut; 3877 3878 if (!params) 3879 return -EINVAL; 3880 3881 vsi_handle = params->vsi_handle; 3882 lut = params->lut; 3883 3884 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 3885 return -EINVAL; 3886 3887 lut_size = params->lut_size; 3888 lut_type = params->lut_type; 3889 glob_lut_idx = params->global_lut_id; 3890 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3891 3892 cmd_resp = &desc.params.get_set_rss_lut; 3893 3894 if (set) { 3895 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 3896 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3897 } else { 3898 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 3899 } 3900 3901 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3902 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 3903 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 3904 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 3905 3906 switch (lut_type) { 3907 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 3908 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 3909 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 3910 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 3911 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 3912 break; 3913 default: 3914 status = -EINVAL; 3915 goto ice_aq_get_set_rss_lut_exit; 3916 } 3917 3918 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 3919 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 3920 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 3921 3922 if (!set) 3923 goto ice_aq_get_set_rss_lut_send; 3924 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3925 if (!set) 3926 goto ice_aq_get_set_rss_lut_send; 3927 } else { 3928 goto ice_aq_get_set_rss_lut_send; 3929 } 3930 3931 /* LUT size is only valid for Global and PF table types */ 3932 switch (lut_size) { 3933 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 3934 break; 3935 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 3936 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 3937 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3938 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3939 break; 3940 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 3941 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3942 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 3943 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3944 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3945 break; 3946 } 3947 fallthrough; 3948 default: 3949 status = -EINVAL; 3950 goto ice_aq_get_set_rss_lut_exit; 3951 } 3952 3953 ice_aq_get_set_rss_lut_send: 3954 cmd_resp->flags = cpu_to_le16(flags); 3955 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 3956 3957 ice_aq_get_set_rss_lut_exit: 3958 return status; 3959 } 3960 3961 /** 3962 * ice_aq_get_rss_lut 3963 * @hw: pointer to the hardware structure 3964 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 3965 * 3966 * get the RSS lookup table, PF or VSI type 3967 */ 3968 int 3969 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 3970 { 3971 return __ice_aq_get_set_rss_lut(hw, get_params, false); 3972 } 3973 3974 /** 3975 * ice_aq_set_rss_lut 3976 * @hw: pointer to the hardware structure 3977 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 3978 * 3979 * set the RSS lookup table, PF or VSI type 3980 */ 3981 int 3982 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 3983 { 3984 return __ice_aq_get_set_rss_lut(hw, set_params, true); 3985 } 3986 3987 /** 3988 * __ice_aq_get_set_rss_key 3989 * @hw: pointer to the HW struct 3990 * @vsi_id: VSI FW index 3991 * @key: pointer to key info struct 3992 * @set: set true to set the key, false to get the key 3993 * 3994 * get (0x0B04) or set (0x0B02) the RSS key per VSI 3995 */ 3996 static int 3997 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 3998 struct ice_aqc_get_set_rss_keys *key, bool set) 3999 { 4000 struct ice_aqc_get_set_rss_key *cmd_resp; 4001 u16 key_size = sizeof(*key); 4002 struct ice_aq_desc desc; 4003 4004 cmd_resp = &desc.params.get_set_rss_key; 4005 4006 if (set) { 4007 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4008 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4009 } else { 4010 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4011 } 4012 4013 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 4014 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 4015 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 4016 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 4017 4018 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4019 } 4020 4021 /** 4022 * ice_aq_get_rss_key 4023 * @hw: pointer to the HW struct 4024 * @vsi_handle: software VSI handle 4025 * @key: pointer to key info struct 4026 * 4027 * get the RSS key per VSI 4028 */ 4029 int 4030 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4031 struct ice_aqc_get_set_rss_keys *key) 4032 { 4033 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4034 return -EINVAL; 4035 4036 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4037 key, false); 4038 } 4039 4040 /** 4041 * ice_aq_set_rss_key 4042 * @hw: pointer to the HW struct 4043 * @vsi_handle: software VSI handle 4044 * @keys: pointer to key info struct 4045 * 4046 * set the RSS key per VSI 4047 */ 4048 int 4049 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4050 struct ice_aqc_get_set_rss_keys *keys) 4051 { 4052 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4053 return -EINVAL; 4054 4055 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4056 keys, true); 4057 } 4058 4059 /** 4060 * ice_aq_add_lan_txq 4061 * @hw: pointer to the hardware structure 4062 * @num_qgrps: Number of added queue groups 4063 * @qg_list: list of queue groups to be added 4064 * @buf_size: size of buffer for indirect command 4065 * @cd: pointer to command details structure or NULL 4066 * 4067 * Add Tx LAN queue (0x0C30) 4068 * 4069 * NOTE: 4070 * Prior to calling add Tx LAN queue: 4071 * Initialize the following as part of the Tx queue context: 4072 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4073 * Cache profile and Packet shaper profile. 4074 * 4075 * After add Tx LAN queue AQ command is completed: 4076 * Interrupts should be associated with specific queues, 4077 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4078 * flow. 4079 */ 4080 static int 4081 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4082 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4083 struct ice_sq_cd *cd) 4084 { 4085 struct ice_aqc_add_tx_qgrp *list; 4086 struct ice_aqc_add_txqs *cmd; 4087 struct ice_aq_desc desc; 4088 u16 i, sum_size = 0; 4089 4090 cmd = &desc.params.add_txqs; 4091 4092 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4093 4094 if (!qg_list) 4095 return -EINVAL; 4096 4097 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4098 return -EINVAL; 4099 4100 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4101 sum_size += struct_size(list, txqs, list->num_txqs); 4102 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4103 list->num_txqs); 4104 } 4105 4106 if (buf_size != sum_size) 4107 return -EINVAL; 4108 4109 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4110 4111 cmd->num_qgrps = num_qgrps; 4112 4113 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4114 } 4115 4116 /** 4117 * ice_aq_dis_lan_txq 4118 * @hw: pointer to the hardware structure 4119 * @num_qgrps: number of groups in the list 4120 * @qg_list: the list of groups to disable 4121 * @buf_size: the total size of the qg_list buffer in bytes 4122 * @rst_src: if called due to reset, specifies the reset source 4123 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4124 * @cd: pointer to command details structure or NULL 4125 * 4126 * Disable LAN Tx queue (0x0C31) 4127 */ 4128 static int 4129 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4130 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4131 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4132 struct ice_sq_cd *cd) 4133 { 4134 struct ice_aqc_dis_txq_item *item; 4135 struct ice_aqc_dis_txqs *cmd; 4136 struct ice_aq_desc desc; 4137 u16 i, sz = 0; 4138 int status; 4139 4140 cmd = &desc.params.dis_txqs; 4141 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4142 4143 /* qg_list can be NULL only in VM/VF reset flow */ 4144 if (!qg_list && !rst_src) 4145 return -EINVAL; 4146 4147 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4148 return -EINVAL; 4149 4150 cmd->num_entries = num_qgrps; 4151 4152 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 4153 ICE_AQC_Q_DIS_TIMEOUT_M); 4154 4155 switch (rst_src) { 4156 case ICE_VM_RESET: 4157 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4158 cmd->vmvf_and_timeout |= 4159 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 4160 break; 4161 case ICE_VF_RESET: 4162 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4163 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4164 cmd->vmvf_and_timeout |= 4165 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 4166 ICE_AQC_Q_DIS_VMVF_NUM_M); 4167 break; 4168 case ICE_NO_RESET: 4169 default: 4170 break; 4171 } 4172 4173 /* flush pipe on time out */ 4174 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4175 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4176 if (!qg_list) 4177 goto do_aq; 4178 4179 /* set RD bit to indicate that command buffer is provided by the driver 4180 * and it needs to be read by the firmware 4181 */ 4182 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4183 4184 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4185 u16 item_size = struct_size(item, q_id, item->num_qs); 4186 4187 /* If the num of queues is even, add 2 bytes of padding */ 4188 if ((item->num_qs % 2) == 0) 4189 item_size += 2; 4190 4191 sz += item_size; 4192 4193 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4194 } 4195 4196 if (buf_size != sz) 4197 return -EINVAL; 4198 4199 do_aq: 4200 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4201 if (status) { 4202 if (!qg_list) 4203 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4204 vmvf_num, hw->adminq.sq_last_status); 4205 else 4206 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4207 le16_to_cpu(qg_list[0].q_id[0]), 4208 hw->adminq.sq_last_status); 4209 } 4210 return status; 4211 } 4212 4213 /** 4214 * ice_aq_add_rdma_qsets 4215 * @hw: pointer to the hardware structure 4216 * @num_qset_grps: Number of RDMA Qset groups 4217 * @qset_list: list of Qset groups to be added 4218 * @buf_size: size of buffer for indirect command 4219 * @cd: pointer to command details structure or NULL 4220 * 4221 * Add Tx RDMA Qsets (0x0C33) 4222 */ 4223 static int 4224 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4225 struct ice_aqc_add_rdma_qset_data *qset_list, 4226 u16 buf_size, struct ice_sq_cd *cd) 4227 { 4228 struct ice_aqc_add_rdma_qset_data *list; 4229 struct ice_aqc_add_rdma_qset *cmd; 4230 struct ice_aq_desc desc; 4231 u16 i, sum_size = 0; 4232 4233 cmd = &desc.params.add_rdma_qset; 4234 4235 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4236 4237 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4238 return -EINVAL; 4239 4240 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4241 u16 num_qsets = le16_to_cpu(list->num_qsets); 4242 4243 sum_size += struct_size(list, rdma_qsets, num_qsets); 4244 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4245 num_qsets); 4246 } 4247 4248 if (buf_size != sum_size) 4249 return -EINVAL; 4250 4251 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4252 4253 cmd->num_qset_grps = num_qset_grps; 4254 4255 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4256 } 4257 4258 /* End of FW Admin Queue command wrappers */ 4259 4260 /** 4261 * ice_write_byte - write a byte to a packed context structure 4262 * @src_ctx: the context structure to read from 4263 * @dest_ctx: the context to be written to 4264 * @ce_info: a description of the struct to be filled 4265 */ 4266 static void 4267 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4268 { 4269 u8 src_byte, dest_byte, mask; 4270 u8 *from, *dest; 4271 u16 shift_width; 4272 4273 /* copy from the next struct field */ 4274 from = src_ctx + ce_info->offset; 4275 4276 /* prepare the bits and mask */ 4277 shift_width = ce_info->lsb % 8; 4278 mask = (u8)(BIT(ce_info->width) - 1); 4279 4280 src_byte = *from; 4281 src_byte &= mask; 4282 4283 /* shift to correct alignment */ 4284 mask <<= shift_width; 4285 src_byte <<= shift_width; 4286 4287 /* get the current bits from the target bit string */ 4288 dest = dest_ctx + (ce_info->lsb / 8); 4289 4290 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4291 4292 dest_byte &= ~mask; /* get the bits not changing */ 4293 dest_byte |= src_byte; /* add in the new bits */ 4294 4295 /* put it all back */ 4296 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4297 } 4298 4299 /** 4300 * ice_write_word - write a word to a packed context structure 4301 * @src_ctx: the context structure to read from 4302 * @dest_ctx: the context to be written to 4303 * @ce_info: a description of the struct to be filled 4304 */ 4305 static void 4306 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4307 { 4308 u16 src_word, mask; 4309 __le16 dest_word; 4310 u8 *from, *dest; 4311 u16 shift_width; 4312 4313 /* copy from the next struct field */ 4314 from = src_ctx + ce_info->offset; 4315 4316 /* prepare the bits and mask */ 4317 shift_width = ce_info->lsb % 8; 4318 mask = BIT(ce_info->width) - 1; 4319 4320 /* don't swizzle the bits until after the mask because the mask bits 4321 * will be in a different bit position on big endian machines 4322 */ 4323 src_word = *(u16 *)from; 4324 src_word &= mask; 4325 4326 /* shift to correct alignment */ 4327 mask <<= shift_width; 4328 src_word <<= shift_width; 4329 4330 /* get the current bits from the target bit string */ 4331 dest = dest_ctx + (ce_info->lsb / 8); 4332 4333 memcpy(&dest_word, dest, sizeof(dest_word)); 4334 4335 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4336 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4337 4338 /* put it all back */ 4339 memcpy(dest, &dest_word, sizeof(dest_word)); 4340 } 4341 4342 /** 4343 * ice_write_dword - write a dword to a packed context structure 4344 * @src_ctx: the context structure to read from 4345 * @dest_ctx: the context to be written to 4346 * @ce_info: a description of the struct to be filled 4347 */ 4348 static void 4349 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4350 { 4351 u32 src_dword, mask; 4352 __le32 dest_dword; 4353 u8 *from, *dest; 4354 u16 shift_width; 4355 4356 /* copy from the next struct field */ 4357 from = src_ctx + ce_info->offset; 4358 4359 /* prepare the bits and mask */ 4360 shift_width = ce_info->lsb % 8; 4361 4362 /* if the field width is exactly 32 on an x86 machine, then the shift 4363 * operation will not work because the SHL instructions count is masked 4364 * to 5 bits so the shift will do nothing 4365 */ 4366 if (ce_info->width < 32) 4367 mask = BIT(ce_info->width) - 1; 4368 else 4369 mask = (u32)~0; 4370 4371 /* don't swizzle the bits until after the mask because the mask bits 4372 * will be in a different bit position on big endian machines 4373 */ 4374 src_dword = *(u32 *)from; 4375 src_dword &= mask; 4376 4377 /* shift to correct alignment */ 4378 mask <<= shift_width; 4379 src_dword <<= shift_width; 4380 4381 /* get the current bits from the target bit string */ 4382 dest = dest_ctx + (ce_info->lsb / 8); 4383 4384 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4385 4386 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4387 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4388 4389 /* put it all back */ 4390 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4391 } 4392 4393 /** 4394 * ice_write_qword - write a qword to a packed context structure 4395 * @src_ctx: the context structure to read from 4396 * @dest_ctx: the context to be written to 4397 * @ce_info: a description of the struct to be filled 4398 */ 4399 static void 4400 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4401 { 4402 u64 src_qword, mask; 4403 __le64 dest_qword; 4404 u8 *from, *dest; 4405 u16 shift_width; 4406 4407 /* copy from the next struct field */ 4408 from = src_ctx + ce_info->offset; 4409 4410 /* prepare the bits and mask */ 4411 shift_width = ce_info->lsb % 8; 4412 4413 /* if the field width is exactly 64 on an x86 machine, then the shift 4414 * operation will not work because the SHL instructions count is masked 4415 * to 6 bits so the shift will do nothing 4416 */ 4417 if (ce_info->width < 64) 4418 mask = BIT_ULL(ce_info->width) - 1; 4419 else 4420 mask = (u64)~0; 4421 4422 /* don't swizzle the bits until after the mask because the mask bits 4423 * will be in a different bit position on big endian machines 4424 */ 4425 src_qword = *(u64 *)from; 4426 src_qword &= mask; 4427 4428 /* shift to correct alignment */ 4429 mask <<= shift_width; 4430 src_qword <<= shift_width; 4431 4432 /* get the current bits from the target bit string */ 4433 dest = dest_ctx + (ce_info->lsb / 8); 4434 4435 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4436 4437 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4438 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4439 4440 /* put it all back */ 4441 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4442 } 4443 4444 /** 4445 * ice_set_ctx - set context bits in packed structure 4446 * @hw: pointer to the hardware structure 4447 * @src_ctx: pointer to a generic non-packed context structure 4448 * @dest_ctx: pointer to memory for the packed structure 4449 * @ce_info: a description of the structure to be transformed 4450 */ 4451 int 4452 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4453 const struct ice_ctx_ele *ce_info) 4454 { 4455 int f; 4456 4457 for (f = 0; ce_info[f].width; f++) { 4458 /* We have to deal with each element of the FW response 4459 * using the correct size so that we are correct regardless 4460 * of the endianness of the machine. 4461 */ 4462 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4463 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4464 f, ce_info[f].width, ce_info[f].size_of); 4465 continue; 4466 } 4467 switch (ce_info[f].size_of) { 4468 case sizeof(u8): 4469 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4470 break; 4471 case sizeof(u16): 4472 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4473 break; 4474 case sizeof(u32): 4475 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4476 break; 4477 case sizeof(u64): 4478 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4479 break; 4480 default: 4481 return -EINVAL; 4482 } 4483 } 4484 4485 return 0; 4486 } 4487 4488 /** 4489 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4490 * @hw: pointer to the HW struct 4491 * @vsi_handle: software VSI handle 4492 * @tc: TC number 4493 * @q_handle: software queue handle 4494 */ 4495 struct ice_q_ctx * 4496 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4497 { 4498 struct ice_vsi_ctx *vsi; 4499 struct ice_q_ctx *q_ctx; 4500 4501 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4502 if (!vsi) 4503 return NULL; 4504 if (q_handle >= vsi->num_lan_q_entries[tc]) 4505 return NULL; 4506 if (!vsi->lan_q_ctx[tc]) 4507 return NULL; 4508 q_ctx = vsi->lan_q_ctx[tc]; 4509 return &q_ctx[q_handle]; 4510 } 4511 4512 /** 4513 * ice_ena_vsi_txq 4514 * @pi: port information structure 4515 * @vsi_handle: software VSI handle 4516 * @tc: TC number 4517 * @q_handle: software queue handle 4518 * @num_qgrps: Number of added queue groups 4519 * @buf: list of queue groups to be added 4520 * @buf_size: size of buffer for indirect command 4521 * @cd: pointer to command details structure or NULL 4522 * 4523 * This function adds one LAN queue 4524 */ 4525 int 4526 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4527 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4528 struct ice_sq_cd *cd) 4529 { 4530 struct ice_aqc_txsched_elem_data node = { 0 }; 4531 struct ice_sched_node *parent; 4532 struct ice_q_ctx *q_ctx; 4533 struct ice_hw *hw; 4534 int status; 4535 4536 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4537 return -EIO; 4538 4539 if (num_qgrps > 1 || buf->num_txqs > 1) 4540 return -ENOSPC; 4541 4542 hw = pi->hw; 4543 4544 if (!ice_is_vsi_valid(hw, vsi_handle)) 4545 return -EINVAL; 4546 4547 mutex_lock(&pi->sched_lock); 4548 4549 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4550 if (!q_ctx) { 4551 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4552 q_handle); 4553 status = -EINVAL; 4554 goto ena_txq_exit; 4555 } 4556 4557 /* find a parent node */ 4558 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4559 ICE_SCHED_NODE_OWNER_LAN); 4560 if (!parent) { 4561 status = -EINVAL; 4562 goto ena_txq_exit; 4563 } 4564 4565 buf->parent_teid = parent->info.node_teid; 4566 node.parent_teid = parent->info.node_teid; 4567 /* Mark that the values in the "generic" section as valid. The default 4568 * value in the "generic" section is zero. This means that : 4569 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4570 * - 0 priority among siblings, indicated by Bit 1-3. 4571 * - WFQ, indicated by Bit 4. 4572 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4573 * Bit 5-6. 4574 * - Bit 7 is reserved. 4575 * Without setting the generic section as valid in valid_sections, the 4576 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4577 */ 4578 buf->txqs[0].info.valid_sections = 4579 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4580 ICE_AQC_ELEM_VALID_EIR; 4581 buf->txqs[0].info.generic = 0; 4582 buf->txqs[0].info.cir_bw.bw_profile_idx = 4583 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4584 buf->txqs[0].info.cir_bw.bw_alloc = 4585 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4586 buf->txqs[0].info.eir_bw.bw_profile_idx = 4587 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4588 buf->txqs[0].info.eir_bw.bw_alloc = 4589 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4590 4591 /* add the LAN queue */ 4592 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4593 if (status) { 4594 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4595 le16_to_cpu(buf->txqs[0].txq_id), 4596 hw->adminq.sq_last_status); 4597 goto ena_txq_exit; 4598 } 4599 4600 node.node_teid = buf->txqs[0].q_teid; 4601 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4602 q_ctx->q_handle = q_handle; 4603 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4604 4605 /* add a leaf node into scheduler tree queue layer */ 4606 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4607 if (!status) 4608 status = ice_sched_replay_q_bw(pi, q_ctx); 4609 4610 ena_txq_exit: 4611 mutex_unlock(&pi->sched_lock); 4612 return status; 4613 } 4614 4615 /** 4616 * ice_dis_vsi_txq 4617 * @pi: port information structure 4618 * @vsi_handle: software VSI handle 4619 * @tc: TC number 4620 * @num_queues: number of queues 4621 * @q_handles: pointer to software queue handle array 4622 * @q_ids: pointer to the q_id array 4623 * @q_teids: pointer to queue node teids 4624 * @rst_src: if called due to reset, specifies the reset source 4625 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4626 * @cd: pointer to command details structure or NULL 4627 * 4628 * This function removes queues and their corresponding nodes in SW DB 4629 */ 4630 int 4631 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4632 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4633 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4634 struct ice_sq_cd *cd) 4635 { 4636 struct ice_aqc_dis_txq_item *qg_list; 4637 struct ice_q_ctx *q_ctx; 4638 int status = -ENOENT; 4639 struct ice_hw *hw; 4640 u16 i, buf_size; 4641 4642 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4643 return -EIO; 4644 4645 hw = pi->hw; 4646 4647 if (!num_queues) { 4648 /* if queue is disabled already yet the disable queue command 4649 * has to be sent to complete the VF reset, then call 4650 * ice_aq_dis_lan_txq without any queue information 4651 */ 4652 if (rst_src) 4653 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4654 vmvf_num, NULL); 4655 return -EIO; 4656 } 4657 4658 buf_size = struct_size(qg_list, q_id, 1); 4659 qg_list = kzalloc(buf_size, GFP_KERNEL); 4660 if (!qg_list) 4661 return -ENOMEM; 4662 4663 mutex_lock(&pi->sched_lock); 4664 4665 for (i = 0; i < num_queues; i++) { 4666 struct ice_sched_node *node; 4667 4668 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4669 if (!node) 4670 continue; 4671 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4672 if (!q_ctx) { 4673 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4674 q_handles[i]); 4675 continue; 4676 } 4677 if (q_ctx->q_handle != q_handles[i]) { 4678 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4679 q_ctx->q_handle, q_handles[i]); 4680 continue; 4681 } 4682 qg_list->parent_teid = node->info.parent_teid; 4683 qg_list->num_qs = 1; 4684 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4685 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4686 vmvf_num, cd); 4687 4688 if (status) 4689 break; 4690 ice_free_sched_node(pi, node); 4691 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4692 } 4693 mutex_unlock(&pi->sched_lock); 4694 kfree(qg_list); 4695 return status; 4696 } 4697 4698 /** 4699 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4700 * @pi: port information structure 4701 * @vsi_handle: software VSI handle 4702 * @tc_bitmap: TC bitmap 4703 * @maxqs: max queues array per TC 4704 * @owner: LAN or RDMA 4705 * 4706 * This function adds/updates the VSI queues per TC. 4707 */ 4708 static int 4709 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4710 u16 *maxqs, u8 owner) 4711 { 4712 int status = 0; 4713 u8 i; 4714 4715 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4716 return -EIO; 4717 4718 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4719 return -EINVAL; 4720 4721 mutex_lock(&pi->sched_lock); 4722 4723 ice_for_each_traffic_class(i) { 4724 /* configuration is possible only if TC node is present */ 4725 if (!ice_sched_get_tc_node(pi, i)) 4726 continue; 4727 4728 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4729 ice_is_tc_ena(tc_bitmap, i)); 4730 if (status) 4731 break; 4732 } 4733 4734 mutex_unlock(&pi->sched_lock); 4735 return status; 4736 } 4737 4738 /** 4739 * ice_cfg_vsi_lan - configure VSI LAN queues 4740 * @pi: port information structure 4741 * @vsi_handle: software VSI handle 4742 * @tc_bitmap: TC bitmap 4743 * @max_lanqs: max LAN queues array per TC 4744 * 4745 * This function adds/updates the VSI LAN queues per TC. 4746 */ 4747 int 4748 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4749 u16 *max_lanqs) 4750 { 4751 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4752 ICE_SCHED_NODE_OWNER_LAN); 4753 } 4754 4755 /** 4756 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4757 * @pi: port information structure 4758 * @vsi_handle: software VSI handle 4759 * @tc_bitmap: TC bitmap 4760 * @max_rdmaqs: max RDMA queues array per TC 4761 * 4762 * This function adds/updates the VSI RDMA queues per TC. 4763 */ 4764 int 4765 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4766 u16 *max_rdmaqs) 4767 { 4768 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 4769 ICE_SCHED_NODE_OWNER_RDMA); 4770 } 4771 4772 /** 4773 * ice_ena_vsi_rdma_qset 4774 * @pi: port information structure 4775 * @vsi_handle: software VSI handle 4776 * @tc: TC number 4777 * @rdma_qset: pointer to RDMA Qset 4778 * @num_qsets: number of RDMA Qsets 4779 * @qset_teid: pointer to Qset node TEIDs 4780 * 4781 * This function adds RDMA Qset 4782 */ 4783 int 4784 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4785 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4786 { 4787 struct ice_aqc_txsched_elem_data node = { 0 }; 4788 struct ice_aqc_add_rdma_qset_data *buf; 4789 struct ice_sched_node *parent; 4790 struct ice_hw *hw; 4791 u16 i, buf_size; 4792 int ret; 4793 4794 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4795 return -EIO; 4796 hw = pi->hw; 4797 4798 if (!ice_is_vsi_valid(hw, vsi_handle)) 4799 return -EINVAL; 4800 4801 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4802 buf = kzalloc(buf_size, GFP_KERNEL); 4803 if (!buf) 4804 return -ENOMEM; 4805 mutex_lock(&pi->sched_lock); 4806 4807 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4808 ICE_SCHED_NODE_OWNER_RDMA); 4809 if (!parent) { 4810 ret = -EINVAL; 4811 goto rdma_error_exit; 4812 } 4813 buf->parent_teid = parent->info.node_teid; 4814 node.parent_teid = parent->info.node_teid; 4815 4816 buf->num_qsets = cpu_to_le16(num_qsets); 4817 for (i = 0; i < num_qsets; i++) { 4818 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4819 buf->rdma_qsets[i].info.valid_sections = 4820 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4821 ICE_AQC_ELEM_VALID_EIR; 4822 buf->rdma_qsets[i].info.generic = 0; 4823 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4824 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4825 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4826 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4827 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4828 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4829 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4830 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4831 } 4832 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4833 if (ret) { 4834 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4835 goto rdma_error_exit; 4836 } 4837 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4838 for (i = 0; i < num_qsets; i++) { 4839 node.node_teid = buf->rdma_qsets[i].qset_teid; 4840 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4841 &node, NULL); 4842 if (ret) 4843 break; 4844 qset_teid[i] = le32_to_cpu(node.node_teid); 4845 } 4846 rdma_error_exit: 4847 mutex_unlock(&pi->sched_lock); 4848 kfree(buf); 4849 return ret; 4850 } 4851 4852 /** 4853 * ice_dis_vsi_rdma_qset - free RDMA resources 4854 * @pi: port_info struct 4855 * @count: number of RDMA Qsets to free 4856 * @qset_teid: TEID of Qset node 4857 * @q_id: list of queue IDs being disabled 4858 */ 4859 int 4860 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4861 u16 *q_id) 4862 { 4863 struct ice_aqc_dis_txq_item *qg_list; 4864 struct ice_hw *hw; 4865 int status = 0; 4866 u16 qg_size; 4867 int i; 4868 4869 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4870 return -EIO; 4871 4872 hw = pi->hw; 4873 4874 qg_size = struct_size(qg_list, q_id, 1); 4875 qg_list = kzalloc(qg_size, GFP_KERNEL); 4876 if (!qg_list) 4877 return -ENOMEM; 4878 4879 mutex_lock(&pi->sched_lock); 4880 4881 for (i = 0; i < count; i++) { 4882 struct ice_sched_node *node; 4883 4884 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4885 if (!node) 4886 continue; 4887 4888 qg_list->parent_teid = node->info.parent_teid; 4889 qg_list->num_qs = 1; 4890 qg_list->q_id[0] = 4891 cpu_to_le16(q_id[i] | 4892 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4893 4894 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4895 ICE_NO_RESET, 0, NULL); 4896 if (status) 4897 break; 4898 4899 ice_free_sched_node(pi, node); 4900 } 4901 4902 mutex_unlock(&pi->sched_lock); 4903 kfree(qg_list); 4904 return status; 4905 } 4906 4907 /** 4908 * ice_replay_pre_init - replay pre initialization 4909 * @hw: pointer to the HW struct 4910 * 4911 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 4912 */ 4913 static int ice_replay_pre_init(struct ice_hw *hw) 4914 { 4915 struct ice_switch_info *sw = hw->switch_info; 4916 u8 i; 4917 4918 /* Delete old entries from replay filter list head if there is any */ 4919 ice_rm_all_sw_replay_rule_info(hw); 4920 /* In start of replay, move entries into replay_rules list, it 4921 * will allow adding rules entries back to filt_rules list, 4922 * which is operational list. 4923 */ 4924 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 4925 list_replace_init(&sw->recp_list[i].filt_rules, 4926 &sw->recp_list[i].filt_replay_rules); 4927 ice_sched_replay_agg_vsi_preinit(hw); 4928 4929 return 0; 4930 } 4931 4932 /** 4933 * ice_replay_vsi - replay VSI configuration 4934 * @hw: pointer to the HW struct 4935 * @vsi_handle: driver VSI handle 4936 * 4937 * Restore all VSI configuration after reset. It is required to call this 4938 * function with main VSI first. 4939 */ 4940 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 4941 { 4942 int status; 4943 4944 if (!ice_is_vsi_valid(hw, vsi_handle)) 4945 return -EINVAL; 4946 4947 /* Replay pre-initialization if there is any */ 4948 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 4949 status = ice_replay_pre_init(hw); 4950 if (status) 4951 return status; 4952 } 4953 /* Replay per VSI all RSS configurations */ 4954 status = ice_replay_rss_cfg(hw, vsi_handle); 4955 if (status) 4956 return status; 4957 /* Replay per VSI all filters */ 4958 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 4959 if (!status) 4960 status = ice_replay_vsi_agg(hw, vsi_handle); 4961 return status; 4962 } 4963 4964 /** 4965 * ice_replay_post - post replay configuration cleanup 4966 * @hw: pointer to the HW struct 4967 * 4968 * Post replay cleanup. 4969 */ 4970 void ice_replay_post(struct ice_hw *hw) 4971 { 4972 /* Delete old entries from replay filter list head */ 4973 ice_rm_all_sw_replay_rule_info(hw); 4974 ice_sched_replay_agg(hw); 4975 } 4976 4977 /** 4978 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 4979 * @hw: ptr to the hardware info 4980 * @reg: offset of 64 bit HW register to read from 4981 * @prev_stat_loaded: bool to specify if previous stats are loaded 4982 * @prev_stat: ptr to previous loaded stat value 4983 * @cur_stat: ptr to current stat value 4984 */ 4985 void 4986 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 4987 u64 *prev_stat, u64 *cur_stat) 4988 { 4989 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 4990 4991 /* device stats are not reset at PFR, they likely will not be zeroed 4992 * when the driver starts. Thus, save the value from the first read 4993 * without adding to the statistic value so that we report stats which 4994 * count up from zero. 4995 */ 4996 if (!prev_stat_loaded) { 4997 *prev_stat = new_data; 4998 return; 4999 } 5000 5001 /* Calculate the difference between the new and old values, and then 5002 * add it to the software stat value. 5003 */ 5004 if (new_data >= *prev_stat) 5005 *cur_stat += new_data - *prev_stat; 5006 else 5007 /* to manage the potential roll-over */ 5008 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5009 5010 /* Update the previously stored value to prepare for next read */ 5011 *prev_stat = new_data; 5012 } 5013 5014 /** 5015 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5016 * @hw: ptr to the hardware info 5017 * @reg: offset of HW register to read from 5018 * @prev_stat_loaded: bool to specify if previous stats are loaded 5019 * @prev_stat: ptr to previous loaded stat value 5020 * @cur_stat: ptr to current stat value 5021 */ 5022 void 5023 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5024 u64 *prev_stat, u64 *cur_stat) 5025 { 5026 u32 new_data; 5027 5028 new_data = rd32(hw, reg); 5029 5030 /* device stats are not reset at PFR, they likely will not be zeroed 5031 * when the driver starts. Thus, save the value from the first read 5032 * without adding to the statistic value so that we report stats which 5033 * count up from zero. 5034 */ 5035 if (!prev_stat_loaded) { 5036 *prev_stat = new_data; 5037 return; 5038 } 5039 5040 /* Calculate the difference between the new and old values, and then 5041 * add it to the software stat value. 5042 */ 5043 if (new_data >= *prev_stat) 5044 *cur_stat += new_data - *prev_stat; 5045 else 5046 /* to manage the potential roll-over */ 5047 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5048 5049 /* Update the previously stored value to prepare for next read */ 5050 *prev_stat = new_data; 5051 } 5052 5053 /** 5054 * ice_sched_query_elem - query element information from HW 5055 * @hw: pointer to the HW struct 5056 * @node_teid: node TEID to be queried 5057 * @buf: buffer to element information 5058 * 5059 * This function queries HW element information 5060 */ 5061 int 5062 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5063 struct ice_aqc_txsched_elem_data *buf) 5064 { 5065 u16 buf_size, num_elem_ret = 0; 5066 int status; 5067 5068 buf_size = sizeof(*buf); 5069 memset(buf, 0, buf_size); 5070 buf->node_teid = cpu_to_le32(node_teid); 5071 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5072 NULL); 5073 if (status || num_elem_ret != 1) 5074 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5075 return status; 5076 } 5077 5078 /** 5079 * ice_aq_read_i2c 5080 * @hw: pointer to the hw struct 5081 * @topo_addr: topology address for a device to communicate with 5082 * @bus_addr: 7-bit I2C bus address 5083 * @addr: I2C memory address (I2C offset) with up to 16 bits 5084 * @params: I2C parameters: bit [7] - Repeated start, 5085 * bits [6:5] data offset size, 5086 * bit [4] - I2C address type, 5087 * bits [3:0] - data size to read (0-16 bytes) 5088 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5089 * @cd: pointer to command details structure or NULL 5090 * 5091 * Read I2C (0x06E2) 5092 */ 5093 int 5094 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5095 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5096 struct ice_sq_cd *cd) 5097 { 5098 struct ice_aq_desc desc = { 0 }; 5099 struct ice_aqc_i2c *cmd; 5100 u8 data_size; 5101 int status; 5102 5103 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5104 cmd = &desc.params.read_write_i2c; 5105 5106 if (!data) 5107 return -EINVAL; 5108 5109 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5110 5111 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5112 cmd->topo_addr = topo_addr; 5113 cmd->i2c_params = params; 5114 cmd->i2c_addr = addr; 5115 5116 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5117 if (!status) { 5118 struct ice_aqc_read_i2c_resp *resp; 5119 u8 i; 5120 5121 resp = &desc.params.read_i2c_resp; 5122 for (i = 0; i < data_size; i++) { 5123 *data = resp->i2c_data[i]; 5124 data++; 5125 } 5126 } 5127 5128 return status; 5129 } 5130 5131 /** 5132 * ice_aq_write_i2c 5133 * @hw: pointer to the hw struct 5134 * @topo_addr: topology address for a device to communicate with 5135 * @bus_addr: 7-bit I2C bus address 5136 * @addr: I2C memory address (I2C offset) with up to 16 bits 5137 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5138 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5139 * @cd: pointer to command details structure or NULL 5140 * 5141 * Write I2C (0x06E3) 5142 * 5143 * * Return: 5144 * * 0 - Successful write to the i2c device 5145 * * -EINVAL - Data size greater than 4 bytes 5146 * * -EIO - FW error 5147 */ 5148 int 5149 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5150 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5151 struct ice_sq_cd *cd) 5152 { 5153 struct ice_aq_desc desc = { 0 }; 5154 struct ice_aqc_i2c *cmd; 5155 u8 data_size; 5156 5157 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5158 cmd = &desc.params.read_write_i2c; 5159 5160 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5161 5162 /* data_size limited to 4 */ 5163 if (data_size > 4) 5164 return -EINVAL; 5165 5166 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5167 cmd->topo_addr = topo_addr; 5168 cmd->i2c_params = params; 5169 cmd->i2c_addr = addr; 5170 5171 memcpy(cmd->i2c_data, data, data_size); 5172 5173 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5174 } 5175 5176 /** 5177 * ice_aq_set_driver_param - Set driver parameter to share via firmware 5178 * @hw: pointer to the HW struct 5179 * @idx: parameter index to set 5180 * @value: the value to set the parameter to 5181 * @cd: pointer to command details structure or NULL 5182 * 5183 * Set the value of one of the software defined parameters. All PFs connected 5184 * to this device can read the value using ice_aq_get_driver_param. 5185 * 5186 * Note that firmware provides no synchronization or locking, and will not 5187 * save the parameter value during a device reset. It is expected that 5188 * a single PF will write the parameter value, while all other PFs will only 5189 * read it. 5190 */ 5191 int 5192 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 5193 u32 value, struct ice_sq_cd *cd) 5194 { 5195 struct ice_aqc_driver_shared_params *cmd; 5196 struct ice_aq_desc desc; 5197 5198 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 5199 return -EIO; 5200 5201 cmd = &desc.params.drv_shared_params; 5202 5203 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 5204 5205 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET; 5206 cmd->param_indx = idx; 5207 cmd->param_val = cpu_to_le32(value); 5208 5209 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5210 } 5211 5212 /** 5213 * ice_aq_get_driver_param - Get driver parameter shared via firmware 5214 * @hw: pointer to the HW struct 5215 * @idx: parameter index to set 5216 * @value: storage to return the shared parameter 5217 * @cd: pointer to command details structure or NULL 5218 * 5219 * Get the value of one of the software defined parameters. 5220 * 5221 * Note that firmware provides no synchronization or locking. It is expected 5222 * that only a single PF will write a given parameter. 5223 */ 5224 int 5225 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 5226 u32 *value, struct ice_sq_cd *cd) 5227 { 5228 struct ice_aqc_driver_shared_params *cmd; 5229 struct ice_aq_desc desc; 5230 int status; 5231 5232 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 5233 return -EIO; 5234 5235 cmd = &desc.params.drv_shared_params; 5236 5237 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 5238 5239 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET; 5240 cmd->param_indx = idx; 5241 5242 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5243 if (status) 5244 return status; 5245 5246 *value = le32_to_cpu(cmd->param_val); 5247 5248 return 0; 5249 } 5250 5251 /** 5252 * ice_aq_set_gpio 5253 * @hw: pointer to the hw struct 5254 * @gpio_ctrl_handle: GPIO controller node handle 5255 * @pin_idx: IO Number of the GPIO that needs to be set 5256 * @value: SW provide IO value to set in the LSB 5257 * @cd: pointer to command details structure or NULL 5258 * 5259 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5260 */ 5261 int 5262 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5263 struct ice_sq_cd *cd) 5264 { 5265 struct ice_aqc_gpio *cmd; 5266 struct ice_aq_desc desc; 5267 5268 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5269 cmd = &desc.params.read_write_gpio; 5270 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5271 cmd->gpio_num = pin_idx; 5272 cmd->gpio_val = value ? 1 : 0; 5273 5274 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5275 } 5276 5277 /** 5278 * ice_aq_get_gpio 5279 * @hw: pointer to the hw struct 5280 * @gpio_ctrl_handle: GPIO controller node handle 5281 * @pin_idx: IO Number of the GPIO that needs to be set 5282 * @value: IO value read 5283 * @cd: pointer to command details structure or NULL 5284 * 5285 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5286 * the topology 5287 */ 5288 int 5289 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5290 bool *value, struct ice_sq_cd *cd) 5291 { 5292 struct ice_aqc_gpio *cmd; 5293 struct ice_aq_desc desc; 5294 int status; 5295 5296 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5297 cmd = &desc.params.read_write_gpio; 5298 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5299 cmd->gpio_num = pin_idx; 5300 5301 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5302 if (status) 5303 return status; 5304 5305 *value = !!cmd->gpio_val; 5306 return 0; 5307 } 5308 5309 /** 5310 * ice_is_fw_api_min_ver 5311 * @hw: pointer to the hardware structure 5312 * @maj: major version 5313 * @min: minor version 5314 * @patch: patch version 5315 * 5316 * Checks if the firmware API is minimum version 5317 */ 5318 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5319 { 5320 if (hw->api_maj_ver == maj) { 5321 if (hw->api_min_ver > min) 5322 return true; 5323 if (hw->api_min_ver == min && hw->api_patch >= patch) 5324 return true; 5325 } else if (hw->api_maj_ver > maj) { 5326 return true; 5327 } 5328 5329 return false; 5330 } 5331 5332 /** 5333 * ice_fw_supports_link_override 5334 * @hw: pointer to the hardware structure 5335 * 5336 * Checks if the firmware supports link override 5337 */ 5338 bool ice_fw_supports_link_override(struct ice_hw *hw) 5339 { 5340 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5341 ICE_FW_API_LINK_OVERRIDE_MIN, 5342 ICE_FW_API_LINK_OVERRIDE_PATCH); 5343 } 5344 5345 /** 5346 * ice_get_link_default_override 5347 * @ldo: pointer to the link default override struct 5348 * @pi: pointer to the port info struct 5349 * 5350 * Gets the link default override for a port 5351 */ 5352 int 5353 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5354 struct ice_port_info *pi) 5355 { 5356 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5357 struct ice_hw *hw = pi->hw; 5358 int status; 5359 5360 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5361 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5362 if (status) { 5363 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5364 return status; 5365 } 5366 5367 /* Each port has its own config; calculate for our port */ 5368 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5369 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5370 5371 /* link options first */ 5372 status = ice_read_sr_word(hw, tlv_start, &buf); 5373 if (status) { 5374 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5375 return status; 5376 } 5377 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 5378 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5379 ICE_LINK_OVERRIDE_PHY_CFG_S; 5380 5381 /* link PHY config */ 5382 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5383 status = ice_read_sr_word(hw, offset, &buf); 5384 if (status) { 5385 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5386 return status; 5387 } 5388 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5389 5390 /* PHY types low */ 5391 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5392 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5393 status = ice_read_sr_word(hw, (offset + i), &buf); 5394 if (status) { 5395 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5396 return status; 5397 } 5398 /* shift 16 bits at a time to fill 64 bits */ 5399 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5400 } 5401 5402 /* PHY types high */ 5403 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5404 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5405 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5406 status = ice_read_sr_word(hw, (offset + i), &buf); 5407 if (status) { 5408 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5409 return status; 5410 } 5411 /* shift 16 bits at a time to fill 64 bits */ 5412 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5413 } 5414 5415 return status; 5416 } 5417 5418 /** 5419 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5420 * @caps: get PHY capability data 5421 */ 5422 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5423 { 5424 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5425 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5426 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5427 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5428 return true; 5429 5430 return false; 5431 } 5432 5433 /** 5434 * ice_aq_set_lldp_mib - Set the LLDP MIB 5435 * @hw: pointer to the HW struct 5436 * @mib_type: Local, Remote or both Local and Remote MIBs 5437 * @buf: pointer to the caller-supplied buffer to store the MIB block 5438 * @buf_size: size of the buffer (in bytes) 5439 * @cd: pointer to command details structure or NULL 5440 * 5441 * Set the LLDP MIB. (0x0A08) 5442 */ 5443 int 5444 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 5445 struct ice_sq_cd *cd) 5446 { 5447 struct ice_aqc_lldp_set_local_mib *cmd; 5448 struct ice_aq_desc desc; 5449 5450 cmd = &desc.params.lldp_set_mib; 5451 5452 if (buf_size == 0 || !buf) 5453 return -EINVAL; 5454 5455 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 5456 5457 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 5458 desc.datalen = cpu_to_le16(buf_size); 5459 5460 cmd->type = mib_type; 5461 cmd->length = cpu_to_le16(buf_size); 5462 5463 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 5464 } 5465 5466 /** 5467 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 5468 * @hw: pointer to HW struct 5469 */ 5470 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 5471 { 5472 if (hw->mac_type != ICE_MAC_E810) 5473 return false; 5474 5475 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 5476 ICE_FW_API_LLDP_FLTR_MIN, 5477 ICE_FW_API_LLDP_FLTR_PATCH); 5478 } 5479 5480 /** 5481 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 5482 * @hw: pointer to HW struct 5483 * @vsi_num: absolute HW index for VSI 5484 * @add: boolean for if adding or removing a filter 5485 */ 5486 int 5487 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 5488 { 5489 struct ice_aqc_lldp_filter_ctrl *cmd; 5490 struct ice_aq_desc desc; 5491 5492 cmd = &desc.params.lldp_filter_ctrl; 5493 5494 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 5495 5496 if (add) 5497 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 5498 else 5499 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 5500 5501 cmd->vsi_num = cpu_to_le16(vsi_num); 5502 5503 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5504 } 5505 5506 /** 5507 * ice_fw_supports_report_dflt_cfg 5508 * @hw: pointer to the hardware structure 5509 * 5510 * Checks if the firmware supports report default configuration 5511 */ 5512 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 5513 { 5514 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 5515 ICE_FW_API_REPORT_DFLT_CFG_MIN, 5516 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 5517 } 5518 5519 /* each of the indexes into the following array match the speed of a return 5520 * value from the list of AQ returned speeds like the range: 5521 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 5522 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 5523 * array. The array is defined as 15 elements long because the link_speed 5524 * returned by the firmware is a 16 bit * value, but is indexed 5525 * by [fls(speed) - 1] 5526 */ 5527 static const u32 ice_aq_to_link_speed[15] = { 5528 SPEED_10, /* BIT(0) */ 5529 SPEED_100, 5530 SPEED_1000, 5531 SPEED_2500, 5532 SPEED_5000, 5533 SPEED_10000, 5534 SPEED_20000, 5535 SPEED_25000, 5536 SPEED_40000, 5537 SPEED_50000, 5538 SPEED_100000, /* BIT(10) */ 5539 0, 5540 0, 5541 0, 5542 0 /* BIT(14) */ 5543 }; 5544 5545 /** 5546 * ice_get_link_speed - get integer speed from table 5547 * @index: array index from fls(aq speed) - 1 5548 * 5549 * Returns: u32 value containing integer speed 5550 */ 5551 u32 ice_get_link_speed(u16 index) 5552 { 5553 return ice_aq_to_link_speed[index]; 5554 } 5555