1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_lib.h" 6 #include "ice_sched.h" 7 #include "ice_adminq_cmd.h" 8 #include "ice_flow.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 12 /** 13 * ice_set_mac_type - Sets MAC type 14 * @hw: pointer to the HW structure 15 * 16 * This function sets the MAC type of the adapter based on the 17 * vendor ID and device ID stored in the HW structure. 18 */ 19 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 20 { 21 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 22 return ICE_ERR_DEVICE_NOT_SUPPORTED; 23 24 switch (hw->device_id) { 25 case ICE_DEV_ID_E810C_BACKPLANE: 26 case ICE_DEV_ID_E810C_QSFP: 27 case ICE_DEV_ID_E810C_SFP: 28 case ICE_DEV_ID_E810_XXV_SFP: 29 hw->mac_type = ICE_MAC_E810; 30 break; 31 case ICE_DEV_ID_E823C_10G_BASE_T: 32 case ICE_DEV_ID_E823C_BACKPLANE: 33 case ICE_DEV_ID_E823C_QSFP: 34 case ICE_DEV_ID_E823C_SFP: 35 case ICE_DEV_ID_E823C_SGMII: 36 case ICE_DEV_ID_E822C_10G_BASE_T: 37 case ICE_DEV_ID_E822C_BACKPLANE: 38 case ICE_DEV_ID_E822C_QSFP: 39 case ICE_DEV_ID_E822C_SFP: 40 case ICE_DEV_ID_E822C_SGMII: 41 case ICE_DEV_ID_E822L_10G_BASE_T: 42 case ICE_DEV_ID_E822L_BACKPLANE: 43 case ICE_DEV_ID_E822L_SFP: 44 case ICE_DEV_ID_E822L_SGMII: 45 case ICE_DEV_ID_E823L_10G_BASE_T: 46 case ICE_DEV_ID_E823L_1GBE: 47 case ICE_DEV_ID_E823L_BACKPLANE: 48 case ICE_DEV_ID_E823L_QSFP: 49 case ICE_DEV_ID_E823L_SFP: 50 hw->mac_type = ICE_MAC_GENERIC; 51 break; 52 default: 53 hw->mac_type = ICE_MAC_UNKNOWN; 54 break; 55 } 56 57 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 58 return 0; 59 } 60 61 /** 62 * ice_is_e810 63 * @hw: pointer to the hardware structure 64 * 65 * returns true if the device is E810 based, false if not. 66 */ 67 bool ice_is_e810(struct ice_hw *hw) 68 { 69 return hw->mac_type == ICE_MAC_E810; 70 } 71 72 /** 73 * ice_clear_pf_cfg - Clear PF configuration 74 * @hw: pointer to the hardware structure 75 * 76 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 77 * configuration, flow director filters, etc.). 78 */ 79 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 80 { 81 struct ice_aq_desc desc; 82 83 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 84 85 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 86 } 87 88 /** 89 * ice_aq_manage_mac_read - manage MAC address read command 90 * @hw: pointer to the HW struct 91 * @buf: a virtual buffer to hold the manage MAC read response 92 * @buf_size: Size of the virtual buffer 93 * @cd: pointer to command details structure or NULL 94 * 95 * This function is used to return per PF station MAC address (0x0107). 96 * NOTE: Upon successful completion of this command, MAC address information 97 * is returned in user specified buffer. Please interpret user specified 98 * buffer as "manage_mac_read" response. 99 * Response such as various MAC addresses are stored in HW struct (port.mac) 100 * ice_discover_dev_caps is expected to be called before this function is 101 * called. 102 */ 103 static enum ice_status 104 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 105 struct ice_sq_cd *cd) 106 { 107 struct ice_aqc_manage_mac_read_resp *resp; 108 struct ice_aqc_manage_mac_read *cmd; 109 struct ice_aq_desc desc; 110 enum ice_status status; 111 u16 flags; 112 u8 i; 113 114 cmd = &desc.params.mac_read; 115 116 if (buf_size < sizeof(*resp)) 117 return ICE_ERR_BUF_TOO_SHORT; 118 119 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 120 121 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 122 if (status) 123 return status; 124 125 resp = buf; 126 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 127 128 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 129 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 130 return ICE_ERR_CFG; 131 } 132 133 /* A single port can report up to two (LAN and WoL) addresses */ 134 for (i = 0; i < cmd->num_addr; i++) 135 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 136 ether_addr_copy(hw->port_info->mac.lan_addr, 137 resp[i].mac_addr); 138 ether_addr_copy(hw->port_info->mac.perm_addr, 139 resp[i].mac_addr); 140 break; 141 } 142 143 return 0; 144 } 145 146 /** 147 * ice_aq_get_phy_caps - returns PHY capabilities 148 * @pi: port information structure 149 * @qual_mods: report qualified modules 150 * @report_mode: report mode capabilities 151 * @pcaps: structure for PHY capabilities to be filled 152 * @cd: pointer to command details structure or NULL 153 * 154 * Returns the various PHY capabilities supported on the Port (0x0600) 155 */ 156 enum ice_status 157 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 158 struct ice_aqc_get_phy_caps_data *pcaps, 159 struct ice_sq_cd *cd) 160 { 161 struct ice_aqc_get_phy_caps *cmd; 162 u16 pcaps_size = sizeof(*pcaps); 163 struct ice_aq_desc desc; 164 enum ice_status status; 165 struct ice_hw *hw; 166 167 cmd = &desc.params.get_phy; 168 169 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 170 return ICE_ERR_PARAM; 171 hw = pi->hw; 172 173 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 174 !ice_fw_supports_report_dflt_cfg(hw)) 175 return ICE_ERR_PARAM; 176 177 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 178 179 if (qual_mods) 180 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 181 182 cmd->param0 |= cpu_to_le16(report_mode); 183 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 184 185 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n", 186 report_mode); 187 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 188 (unsigned long long)le64_to_cpu(pcaps->phy_type_low)); 189 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 190 (unsigned long long)le64_to_cpu(pcaps->phy_type_high)); 191 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps); 192 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 193 pcaps->low_power_ctrl_an); 194 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap); 195 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", 196 pcaps->eeer_value); 197 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n", 198 pcaps->link_fec_options); 199 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n", 200 pcaps->module_compliance_enforcement); 201 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n", 202 pcaps->extended_compliance_code); 203 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n", 204 pcaps->module_type[0]); 205 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n", 206 pcaps->module_type[1]); 207 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", 208 pcaps->module_type[2]); 209 210 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 211 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 212 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 213 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 214 sizeof(pi->phy.link_info.module_type)); 215 } 216 217 return status; 218 } 219 220 /** 221 * ice_aq_get_link_topo_handle - get link topology node return status 222 * @pi: port information structure 223 * @node_type: requested node type 224 * @cd: pointer to command details structure or NULL 225 * 226 * Get link topology node return status for specified node type (0x06E0) 227 * 228 * Node type cage can be used to determine if cage is present. If AQC 229 * returns error (ENOENT), then no cage present. If no cage present, then 230 * connection type is backplane or BASE-T. 231 */ 232 static enum ice_status 233 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 234 struct ice_sq_cd *cd) 235 { 236 struct ice_aqc_get_link_topo *cmd; 237 struct ice_aq_desc desc; 238 239 cmd = &desc.params.get_link_topo; 240 241 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 242 243 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 244 ICE_AQC_LINK_TOPO_NODE_CTX_S); 245 246 /* set node type */ 247 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 248 249 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 250 } 251 252 /** 253 * ice_is_media_cage_present 254 * @pi: port information structure 255 * 256 * Returns true if media cage is present, else false. If no cage, then 257 * media type is backplane or BASE-T. 258 */ 259 static bool ice_is_media_cage_present(struct ice_port_info *pi) 260 { 261 /* Node type cage can be used to determine if cage is present. If AQC 262 * returns error (ENOENT), then no cage present. If no cage present then 263 * connection type is backplane or BASE-T. 264 */ 265 return !ice_aq_get_link_topo_handle(pi, 266 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 267 NULL); 268 } 269 270 /** 271 * ice_get_media_type - Gets media type 272 * @pi: port information structure 273 */ 274 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 275 { 276 struct ice_link_status *hw_link_info; 277 278 if (!pi) 279 return ICE_MEDIA_UNKNOWN; 280 281 hw_link_info = &pi->phy.link_info; 282 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 283 /* If more than one media type is selected, report unknown */ 284 return ICE_MEDIA_UNKNOWN; 285 286 if (hw_link_info->phy_type_low) { 287 /* 1G SGMII is a special case where some DA cable PHYs 288 * may show this as an option when it really shouldn't 289 * be since SGMII is meant to be between a MAC and a PHY 290 * in a backplane. Try to detect this case and handle it 291 */ 292 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 293 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 294 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 295 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 296 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 297 return ICE_MEDIA_DA; 298 299 switch (hw_link_info->phy_type_low) { 300 case ICE_PHY_TYPE_LOW_1000BASE_SX: 301 case ICE_PHY_TYPE_LOW_1000BASE_LX: 302 case ICE_PHY_TYPE_LOW_10GBASE_SR: 303 case ICE_PHY_TYPE_LOW_10GBASE_LR: 304 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 305 case ICE_PHY_TYPE_LOW_25GBASE_SR: 306 case ICE_PHY_TYPE_LOW_25GBASE_LR: 307 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 308 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 309 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 310 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 311 case ICE_PHY_TYPE_LOW_50GBASE_SR: 312 case ICE_PHY_TYPE_LOW_50GBASE_FR: 313 case ICE_PHY_TYPE_LOW_50GBASE_LR: 314 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 315 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 316 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 317 case ICE_PHY_TYPE_LOW_100GBASE_DR: 318 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 319 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 320 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 321 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 322 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 323 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 324 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 325 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 326 return ICE_MEDIA_FIBER; 327 case ICE_PHY_TYPE_LOW_100BASE_TX: 328 case ICE_PHY_TYPE_LOW_1000BASE_T: 329 case ICE_PHY_TYPE_LOW_2500BASE_T: 330 case ICE_PHY_TYPE_LOW_5GBASE_T: 331 case ICE_PHY_TYPE_LOW_10GBASE_T: 332 case ICE_PHY_TYPE_LOW_25GBASE_T: 333 return ICE_MEDIA_BASET; 334 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 335 case ICE_PHY_TYPE_LOW_25GBASE_CR: 336 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 337 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 338 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 339 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 340 case ICE_PHY_TYPE_LOW_50GBASE_CP: 341 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 342 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 343 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 344 return ICE_MEDIA_DA; 345 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 346 case ICE_PHY_TYPE_LOW_40G_XLAUI: 347 case ICE_PHY_TYPE_LOW_50G_LAUI2: 348 case ICE_PHY_TYPE_LOW_50G_AUI2: 349 case ICE_PHY_TYPE_LOW_50G_AUI1: 350 case ICE_PHY_TYPE_LOW_100G_AUI4: 351 case ICE_PHY_TYPE_LOW_100G_CAUI4: 352 if (ice_is_media_cage_present(pi)) 353 return ICE_MEDIA_DA; 354 fallthrough; 355 case ICE_PHY_TYPE_LOW_1000BASE_KX: 356 case ICE_PHY_TYPE_LOW_2500BASE_KX: 357 case ICE_PHY_TYPE_LOW_2500BASE_X: 358 case ICE_PHY_TYPE_LOW_5GBASE_KR: 359 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 360 case ICE_PHY_TYPE_LOW_25GBASE_KR: 361 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 362 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 363 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 364 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 365 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 366 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 367 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 368 return ICE_MEDIA_BACKPLANE; 369 } 370 } else { 371 switch (hw_link_info->phy_type_high) { 372 case ICE_PHY_TYPE_HIGH_100G_AUI2: 373 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 374 if (ice_is_media_cage_present(pi)) 375 return ICE_MEDIA_DA; 376 fallthrough; 377 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 378 return ICE_MEDIA_BACKPLANE; 379 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 380 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 381 return ICE_MEDIA_FIBER; 382 } 383 } 384 return ICE_MEDIA_UNKNOWN; 385 } 386 387 /** 388 * ice_aq_get_link_info 389 * @pi: port information structure 390 * @ena_lse: enable/disable LinkStatusEvent reporting 391 * @link: pointer to link status structure - optional 392 * @cd: pointer to command details structure or NULL 393 * 394 * Get Link Status (0x607). Returns the link status of the adapter. 395 */ 396 enum ice_status 397 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 398 struct ice_link_status *link, struct ice_sq_cd *cd) 399 { 400 struct ice_aqc_get_link_status_data link_data = { 0 }; 401 struct ice_aqc_get_link_status *resp; 402 struct ice_link_status *li_old, *li; 403 enum ice_media_type *hw_media_type; 404 struct ice_fc_info *hw_fc_info; 405 bool tx_pause, rx_pause; 406 struct ice_aq_desc desc; 407 enum ice_status status; 408 struct ice_hw *hw; 409 u16 cmd_flags; 410 411 if (!pi) 412 return ICE_ERR_PARAM; 413 hw = pi->hw; 414 li_old = &pi->phy.link_info_old; 415 hw_media_type = &pi->phy.media_type; 416 li = &pi->phy.link_info; 417 hw_fc_info = &pi->fc; 418 419 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 420 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 421 resp = &desc.params.get_link_status; 422 resp->cmd_flags = cpu_to_le16(cmd_flags); 423 resp->lport_num = pi->lport; 424 425 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); 426 427 if (status) 428 return status; 429 430 /* save off old link status information */ 431 *li_old = *li; 432 433 /* update current link status information */ 434 li->link_speed = le16_to_cpu(link_data.link_speed); 435 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 436 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 437 *hw_media_type = ice_get_media_type(pi); 438 li->link_info = link_data.link_info; 439 li->link_cfg_err = link_data.link_cfg_err; 440 li->an_info = link_data.an_info; 441 li->ext_info = link_data.ext_info; 442 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 443 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 444 li->topo_media_conflict = link_data.topo_media_conflict; 445 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 446 ICE_AQ_CFG_PACING_TYPE_M); 447 448 /* update fc info */ 449 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 450 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 451 if (tx_pause && rx_pause) 452 hw_fc_info->current_mode = ICE_FC_FULL; 453 else if (tx_pause) 454 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 455 else if (rx_pause) 456 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 457 else 458 hw_fc_info->current_mode = ICE_FC_NONE; 459 460 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 461 462 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 463 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 464 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 465 (unsigned long long)li->phy_type_low); 466 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 467 (unsigned long long)li->phy_type_high); 468 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 469 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 470 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 471 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 472 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 473 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 474 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 475 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 476 li->max_frame_size); 477 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 478 479 /* save link status information */ 480 if (link) 481 *link = *li; 482 483 /* flag cleared so calling functions don't call AQ again */ 484 pi->phy.get_link_info = false; 485 486 return 0; 487 } 488 489 /** 490 * ice_fill_tx_timer_and_fc_thresh 491 * @hw: pointer to the HW struct 492 * @cmd: pointer to MAC cfg structure 493 * 494 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 495 * descriptor 496 */ 497 static void 498 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 499 struct ice_aqc_set_mac_cfg *cmd) 500 { 501 u16 fc_thres_val, tx_timer_val; 502 u32 val; 503 504 /* We read back the transmit timer and FC threshold value of 505 * LFC. Thus, we will use index = 506 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 507 * 508 * Also, because we are operating on transmit timer and FC 509 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 510 */ 511 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 512 513 /* Retrieve the transmit timer */ 514 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 515 tx_timer_val = val & 516 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 517 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); 518 519 /* Retrieve the FC threshold */ 520 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 521 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 522 523 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); 524 } 525 526 /** 527 * ice_aq_set_mac_cfg 528 * @hw: pointer to the HW struct 529 * @max_frame_size: Maximum Frame Size to be supported 530 * @cd: pointer to command details structure or NULL 531 * 532 * Set MAC configuration (0x0603) 533 */ 534 enum ice_status 535 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 536 { 537 struct ice_aqc_set_mac_cfg *cmd; 538 struct ice_aq_desc desc; 539 540 cmd = &desc.params.set_mac_cfg; 541 542 if (max_frame_size == 0) 543 return ICE_ERR_PARAM; 544 545 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 546 547 cmd->max_frame_size = cpu_to_le16(max_frame_size); 548 549 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 550 551 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 552 } 553 554 /** 555 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 556 * @hw: pointer to the HW struct 557 */ 558 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 559 { 560 struct ice_switch_info *sw; 561 enum ice_status status; 562 563 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 564 sizeof(*hw->switch_info), GFP_KERNEL); 565 sw = hw->switch_info; 566 567 if (!sw) 568 return ICE_ERR_NO_MEMORY; 569 570 INIT_LIST_HEAD(&sw->vsi_list_map_head); 571 572 status = ice_init_def_sw_recp(hw); 573 if (status) { 574 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 575 return status; 576 } 577 return 0; 578 } 579 580 /** 581 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 582 * @hw: pointer to the HW struct 583 */ 584 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 585 { 586 struct ice_switch_info *sw = hw->switch_info; 587 struct ice_vsi_list_map_info *v_pos_map; 588 struct ice_vsi_list_map_info *v_tmp_map; 589 struct ice_sw_recipe *recps; 590 u8 i; 591 592 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 593 list_entry) { 594 list_del(&v_pos_map->list_entry); 595 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 596 } 597 recps = hw->switch_info->recp_list; 598 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 599 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 600 601 recps[i].root_rid = i; 602 mutex_destroy(&recps[i].filt_rule_lock); 603 list_for_each_entry_safe(lst_itr, tmp_entry, 604 &recps[i].filt_rules, list_entry) { 605 list_del(&lst_itr->list_entry); 606 devm_kfree(ice_hw_to_dev(hw), lst_itr); 607 } 608 } 609 ice_rm_all_sw_replay_rule_info(hw); 610 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 611 devm_kfree(ice_hw_to_dev(hw), sw); 612 } 613 614 /** 615 * ice_get_fw_log_cfg - get FW logging configuration 616 * @hw: pointer to the HW struct 617 */ 618 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) 619 { 620 struct ice_aq_desc desc; 621 enum ice_status status; 622 __le16 *config; 623 u16 size; 624 625 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 626 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); 627 if (!config) 628 return ICE_ERR_NO_MEMORY; 629 630 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 631 632 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 633 if (!status) { 634 u16 i; 635 636 /* Save FW logging information into the HW structure */ 637 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 638 u16 v, m, flgs; 639 640 v = le16_to_cpu(config[i]); 641 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 642 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 643 644 if (m < ICE_AQC_FW_LOG_ID_MAX) 645 hw->fw_log.evnts[m].cur = flgs; 646 } 647 } 648 649 devm_kfree(ice_hw_to_dev(hw), config); 650 651 return status; 652 } 653 654 /** 655 * ice_cfg_fw_log - configure FW logging 656 * @hw: pointer to the HW struct 657 * @enable: enable certain FW logging events if true, disable all if false 658 * 659 * This function enables/disables the FW logging via Rx CQ events and a UART 660 * port based on predetermined configurations. FW logging via the Rx CQ can be 661 * enabled/disabled for individual PF's. However, FW logging via the UART can 662 * only be enabled/disabled for all PFs on the same device. 663 * 664 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 665 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 666 * before initializing the device. 667 * 668 * When re/configuring FW logging, callers need to update the "cfg" elements of 669 * the hw->fw_log.evnts array with the desired logging event configurations for 670 * modules of interest. When disabling FW logging completely, the callers can 671 * just pass false in the "enable" parameter. On completion, the function will 672 * update the "cur" element of the hw->fw_log.evnts array with the resulting 673 * logging event configurations of the modules that are being re/configured. FW 674 * logging modules that are not part of a reconfiguration operation retain their 675 * previous states. 676 * 677 * Before resetting the device, it is recommended that the driver disables FW 678 * logging before shutting down the control queue. When disabling FW logging 679 * ("enable" = false), the latest configurations of FW logging events stored in 680 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 681 * a device reset. 682 * 683 * When enabling FW logging to emit log messages via the Rx CQ during the 684 * device's initialization phase, a mechanism alternative to interrupt handlers 685 * needs to be used to extract FW log messages from the Rx CQ periodically and 686 * to prevent the Rx CQ from being full and stalling other types of control 687 * messages from FW to SW. Interrupts are typically disabled during the device's 688 * initialization phase. 689 */ 690 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) 691 { 692 struct ice_aqc_fw_logging *cmd; 693 enum ice_status status = 0; 694 u16 i, chgs = 0, len = 0; 695 struct ice_aq_desc desc; 696 __le16 *data = NULL; 697 u8 actv_evnts = 0; 698 void *buf = NULL; 699 700 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 701 return 0; 702 703 /* Disable FW logging only when the control queue is still responsive */ 704 if (!enable && 705 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 706 return 0; 707 708 /* Get current FW log settings */ 709 status = ice_get_fw_log_cfg(hw); 710 if (status) 711 return status; 712 713 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 714 cmd = &desc.params.fw_logging; 715 716 /* Indicate which controls are valid */ 717 if (hw->fw_log.cq_en) 718 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 719 720 if (hw->fw_log.uart_en) 721 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 722 723 if (enable) { 724 /* Fill in an array of entries with FW logging modules and 725 * logging events being reconfigured. 726 */ 727 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 728 u16 val; 729 730 /* Keep track of enabled event types */ 731 actv_evnts |= hw->fw_log.evnts[i].cfg; 732 733 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 734 continue; 735 736 if (!data) { 737 data = devm_kcalloc(ice_hw_to_dev(hw), 738 ICE_AQC_FW_LOG_ID_MAX, 739 sizeof(*data), 740 GFP_KERNEL); 741 if (!data) 742 return ICE_ERR_NO_MEMORY; 743 } 744 745 val = i << ICE_AQC_FW_LOG_ID_S; 746 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 747 data[chgs++] = cpu_to_le16(val); 748 } 749 750 /* Only enable FW logging if at least one module is specified. 751 * If FW logging is currently enabled but all modules are not 752 * enabled to emit log messages, disable FW logging altogether. 753 */ 754 if (actv_evnts) { 755 /* Leave if there is effectively no change */ 756 if (!chgs) 757 goto out; 758 759 if (hw->fw_log.cq_en) 760 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 761 762 if (hw->fw_log.uart_en) 763 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 764 765 buf = data; 766 len = sizeof(*data) * chgs; 767 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 768 } 769 } 770 771 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 772 if (!status) { 773 /* Update the current configuration to reflect events enabled. 774 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 775 * logging mode is enabled for the device. They do not reflect 776 * actual modules being enabled to emit log messages. So, their 777 * values remain unchanged even when all modules are disabled. 778 */ 779 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 780 781 hw->fw_log.actv_evnts = actv_evnts; 782 for (i = 0; i < cnt; i++) { 783 u16 v, m; 784 785 if (!enable) { 786 /* When disabling all FW logging events as part 787 * of device's de-initialization, the original 788 * configurations are retained, and can be used 789 * to reconfigure FW logging later if the device 790 * is re-initialized. 791 */ 792 hw->fw_log.evnts[i].cur = 0; 793 continue; 794 } 795 796 v = le16_to_cpu(data[i]); 797 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 798 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 799 } 800 } 801 802 out: 803 if (data) 804 devm_kfree(ice_hw_to_dev(hw), data); 805 806 return status; 807 } 808 809 /** 810 * ice_output_fw_log 811 * @hw: pointer to the HW struct 812 * @desc: pointer to the AQ message descriptor 813 * @buf: pointer to the buffer accompanying the AQ message 814 * 815 * Formats a FW Log message and outputs it via the standard driver logs. 816 */ 817 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 818 { 819 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 820 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 821 le16_to_cpu(desc->datalen)); 822 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 823 } 824 825 /** 826 * ice_get_itr_intrl_gran 827 * @hw: pointer to the HW struct 828 * 829 * Determines the ITR/INTRL granularities based on the maximum aggregate 830 * bandwidth according to the device's configuration during power-on. 831 */ 832 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 833 { 834 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 835 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 836 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 837 838 switch (max_agg_bw) { 839 case ICE_MAX_AGG_BW_200G: 840 case ICE_MAX_AGG_BW_100G: 841 case ICE_MAX_AGG_BW_50G: 842 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 843 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 844 break; 845 case ICE_MAX_AGG_BW_25G: 846 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 847 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 848 break; 849 } 850 } 851 852 /** 853 * ice_init_hw - main hardware initialization routine 854 * @hw: pointer to the hardware structure 855 */ 856 enum ice_status ice_init_hw(struct ice_hw *hw) 857 { 858 struct ice_aqc_get_phy_caps_data *pcaps; 859 enum ice_status status; 860 u16 mac_buf_len; 861 void *mac_buf; 862 863 /* Set MAC type based on DeviceID */ 864 status = ice_set_mac_type(hw); 865 if (status) 866 return status; 867 868 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 869 PF_FUNC_RID_FUNC_NUM_M) >> 870 PF_FUNC_RID_FUNC_NUM_S; 871 872 status = ice_reset(hw, ICE_RESET_PFR); 873 if (status) 874 return status; 875 876 ice_get_itr_intrl_gran(hw); 877 878 status = ice_create_all_ctrlq(hw); 879 if (status) 880 goto err_unroll_cqinit; 881 882 /* Enable FW logging. Not fatal if this fails. */ 883 status = ice_cfg_fw_log(hw, true); 884 if (status) 885 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 886 887 status = ice_clear_pf_cfg(hw); 888 if (status) 889 goto err_unroll_cqinit; 890 891 /* Set bit to enable Flow Director filters */ 892 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 893 INIT_LIST_HEAD(&hw->fdir_list_head); 894 895 ice_clear_pxe_mode(hw); 896 897 status = ice_init_nvm(hw); 898 if (status) 899 goto err_unroll_cqinit; 900 901 status = ice_get_caps(hw); 902 if (status) 903 goto err_unroll_cqinit; 904 905 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 906 sizeof(*hw->port_info), GFP_KERNEL); 907 if (!hw->port_info) { 908 status = ICE_ERR_NO_MEMORY; 909 goto err_unroll_cqinit; 910 } 911 912 /* set the back pointer to HW */ 913 hw->port_info->hw = hw; 914 915 /* Initialize port_info struct with switch configuration data */ 916 status = ice_get_initial_sw_cfg(hw); 917 if (status) 918 goto err_unroll_alloc; 919 920 hw->evb_veb = true; 921 922 /* Query the allocated resources for Tx scheduler */ 923 status = ice_sched_query_res_alloc(hw); 924 if (status) { 925 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 926 goto err_unroll_alloc; 927 } 928 ice_sched_get_psm_clk_freq(hw); 929 930 /* Initialize port_info struct with scheduler data */ 931 status = ice_sched_init_port(hw->port_info); 932 if (status) 933 goto err_unroll_sched; 934 935 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 936 if (!pcaps) { 937 status = ICE_ERR_NO_MEMORY; 938 goto err_unroll_sched; 939 } 940 941 /* Initialize port_info struct with PHY capabilities */ 942 status = ice_aq_get_phy_caps(hw->port_info, false, 943 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 944 NULL); 945 devm_kfree(ice_hw_to_dev(hw), pcaps); 946 if (status) 947 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 948 status); 949 950 /* Initialize port_info struct with link information */ 951 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 952 if (status) 953 goto err_unroll_sched; 954 955 /* need a valid SW entry point to build a Tx tree */ 956 if (!hw->sw_entry_point_layer) { 957 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 958 status = ICE_ERR_CFG; 959 goto err_unroll_sched; 960 } 961 INIT_LIST_HEAD(&hw->agg_list); 962 /* Initialize max burst size */ 963 if (!hw->max_burst_size) 964 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 965 966 status = ice_init_fltr_mgmt_struct(hw); 967 if (status) 968 goto err_unroll_sched; 969 970 /* Get MAC information */ 971 /* A single port can report up to two (LAN and WoL) addresses */ 972 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 973 sizeof(struct ice_aqc_manage_mac_read_resp), 974 GFP_KERNEL); 975 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 976 977 if (!mac_buf) { 978 status = ICE_ERR_NO_MEMORY; 979 goto err_unroll_fltr_mgmt_struct; 980 } 981 982 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 983 devm_kfree(ice_hw_to_dev(hw), mac_buf); 984 985 if (status) 986 goto err_unroll_fltr_mgmt_struct; 987 /* enable jumbo frame support at MAC level */ 988 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 989 if (status) 990 goto err_unroll_fltr_mgmt_struct; 991 /* Obtain counter base index which would be used by flow director */ 992 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 993 if (status) 994 goto err_unroll_fltr_mgmt_struct; 995 status = ice_init_hw_tbls(hw); 996 if (status) 997 goto err_unroll_fltr_mgmt_struct; 998 mutex_init(&hw->tnl_lock); 999 return 0; 1000 1001 err_unroll_fltr_mgmt_struct: 1002 ice_cleanup_fltr_mgmt_struct(hw); 1003 err_unroll_sched: 1004 ice_sched_cleanup_all(hw); 1005 err_unroll_alloc: 1006 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1007 err_unroll_cqinit: 1008 ice_destroy_all_ctrlq(hw); 1009 return status; 1010 } 1011 1012 /** 1013 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1014 * @hw: pointer to the hardware structure 1015 * 1016 * This should be called only during nominal operation, not as a result of 1017 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1018 * applicable initializations if it fails for any reason. 1019 */ 1020 void ice_deinit_hw(struct ice_hw *hw) 1021 { 1022 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1023 ice_cleanup_fltr_mgmt_struct(hw); 1024 1025 ice_sched_cleanup_all(hw); 1026 ice_sched_clear_agg(hw); 1027 ice_free_seg(hw); 1028 ice_free_hw_tbls(hw); 1029 mutex_destroy(&hw->tnl_lock); 1030 1031 if (hw->port_info) { 1032 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1033 hw->port_info = NULL; 1034 } 1035 1036 /* Attempt to disable FW logging before shutting down control queues */ 1037 ice_cfg_fw_log(hw, false); 1038 ice_destroy_all_ctrlq(hw); 1039 1040 /* Clear VSI contexts if not already cleared */ 1041 ice_clear_all_vsi_ctx(hw); 1042 } 1043 1044 /** 1045 * ice_check_reset - Check to see if a global reset is complete 1046 * @hw: pointer to the hardware structure 1047 */ 1048 enum ice_status ice_check_reset(struct ice_hw *hw) 1049 { 1050 u32 cnt, reg = 0, grst_timeout, uld_mask; 1051 1052 /* Poll for Device Active state in case a recent CORER, GLOBR, 1053 * or EMPR has occurred. The grst delay value is in 100ms units. 1054 * Add 1sec for outstanding AQ commands that can take a long time. 1055 */ 1056 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1057 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1058 1059 for (cnt = 0; cnt < grst_timeout; cnt++) { 1060 mdelay(100); 1061 reg = rd32(hw, GLGEN_RSTAT); 1062 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1063 break; 1064 } 1065 1066 if (cnt == grst_timeout) { 1067 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1068 return ICE_ERR_RESET_FAILED; 1069 } 1070 1071 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1072 GLNVM_ULD_PCIER_DONE_1_M |\ 1073 GLNVM_ULD_CORER_DONE_M |\ 1074 GLNVM_ULD_GLOBR_DONE_M |\ 1075 GLNVM_ULD_POR_DONE_M |\ 1076 GLNVM_ULD_POR_DONE_1_M |\ 1077 GLNVM_ULD_PCIER_DONE_2_M) 1078 1079 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1080 GLNVM_ULD_PE_DONE_M : 0); 1081 1082 /* Device is Active; check Global Reset processes are done */ 1083 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1084 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1085 if (reg == uld_mask) { 1086 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1087 break; 1088 } 1089 mdelay(10); 1090 } 1091 1092 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1093 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1094 reg); 1095 return ICE_ERR_RESET_FAILED; 1096 } 1097 1098 return 0; 1099 } 1100 1101 /** 1102 * ice_pf_reset - Reset the PF 1103 * @hw: pointer to the hardware structure 1104 * 1105 * If a global reset has been triggered, this function checks 1106 * for its completion and then issues the PF reset 1107 */ 1108 static enum ice_status ice_pf_reset(struct ice_hw *hw) 1109 { 1110 u32 cnt, reg; 1111 1112 /* If at function entry a global reset was already in progress, i.e. 1113 * state is not 'device active' or any of the reset done bits are not 1114 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1115 * global reset is done. 1116 */ 1117 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1118 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1119 /* poll on global reset currently in progress until done */ 1120 if (ice_check_reset(hw)) 1121 return ICE_ERR_RESET_FAILED; 1122 1123 return 0; 1124 } 1125 1126 /* Reset the PF */ 1127 reg = rd32(hw, PFGEN_CTRL); 1128 1129 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1130 1131 /* Wait for the PFR to complete. The wait time is the global config lock 1132 * timeout plus the PFR timeout which will account for a possible reset 1133 * that is occurring during a download package operation. 1134 */ 1135 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1136 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1137 reg = rd32(hw, PFGEN_CTRL); 1138 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1139 break; 1140 1141 mdelay(1); 1142 } 1143 1144 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1145 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1146 return ICE_ERR_RESET_FAILED; 1147 } 1148 1149 return 0; 1150 } 1151 1152 /** 1153 * ice_reset - Perform different types of reset 1154 * @hw: pointer to the hardware structure 1155 * @req: reset request 1156 * 1157 * This function triggers a reset as specified by the req parameter. 1158 * 1159 * Note: 1160 * If anything other than a PF reset is triggered, PXE mode is restored. 1161 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1162 * interface has been restored in the rebuild flow. 1163 */ 1164 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1165 { 1166 u32 val = 0; 1167 1168 switch (req) { 1169 case ICE_RESET_PFR: 1170 return ice_pf_reset(hw); 1171 case ICE_RESET_CORER: 1172 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1173 val = GLGEN_RTRIG_CORER_M; 1174 break; 1175 case ICE_RESET_GLOBR: 1176 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1177 val = GLGEN_RTRIG_GLOBR_M; 1178 break; 1179 default: 1180 return ICE_ERR_PARAM; 1181 } 1182 1183 val |= rd32(hw, GLGEN_RTRIG); 1184 wr32(hw, GLGEN_RTRIG, val); 1185 ice_flush(hw); 1186 1187 /* wait for the FW to be ready */ 1188 return ice_check_reset(hw); 1189 } 1190 1191 /** 1192 * ice_copy_rxq_ctx_to_hw 1193 * @hw: pointer to the hardware structure 1194 * @ice_rxq_ctx: pointer to the rxq context 1195 * @rxq_index: the index of the Rx queue 1196 * 1197 * Copies rxq context from dense structure to HW register space 1198 */ 1199 static enum ice_status 1200 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1201 { 1202 u8 i; 1203 1204 if (!ice_rxq_ctx) 1205 return ICE_ERR_BAD_PTR; 1206 1207 if (rxq_index > QRX_CTRL_MAX_INDEX) 1208 return ICE_ERR_PARAM; 1209 1210 /* Copy each dword separately to HW */ 1211 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1212 wr32(hw, QRX_CONTEXT(i, rxq_index), 1213 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1214 1215 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1216 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1217 } 1218 1219 return 0; 1220 } 1221 1222 /* LAN Rx Queue Context */ 1223 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1224 /* Field Width LSB */ 1225 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1226 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1227 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1228 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1229 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1230 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1231 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1232 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1233 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1234 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1235 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1236 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1237 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1238 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1239 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1240 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1241 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1242 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1243 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1244 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1245 { 0 } 1246 }; 1247 1248 /** 1249 * ice_write_rxq_ctx 1250 * @hw: pointer to the hardware structure 1251 * @rlan_ctx: pointer to the rxq context 1252 * @rxq_index: the index of the Rx queue 1253 * 1254 * Converts rxq context from sparse to dense structure and then writes 1255 * it to HW register space and enables the hardware to prefetch descriptors 1256 * instead of only fetching them on demand 1257 */ 1258 enum ice_status 1259 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1260 u32 rxq_index) 1261 { 1262 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1263 1264 if (!rlan_ctx) 1265 return ICE_ERR_BAD_PTR; 1266 1267 rlan_ctx->prefena = 1; 1268 1269 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1270 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1271 } 1272 1273 /* LAN Tx Queue Context */ 1274 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1275 /* Field Width LSB */ 1276 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1277 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1278 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1279 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1280 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1281 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1282 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1283 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1284 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1285 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1286 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1287 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1288 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1289 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1290 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1291 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1292 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1293 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1294 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1295 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1296 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1297 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1298 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1299 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1300 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1301 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1302 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1303 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1304 { 0 } 1305 }; 1306 1307 /* Sideband Queue command wrappers */ 1308 1309 /** 1310 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1311 * @hw: pointer to the HW struct 1312 * @desc: descriptor describing the command 1313 * @buf: buffer to use for indirect commands (NULL for direct commands) 1314 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1315 * @cd: pointer to command details structure 1316 */ 1317 static int 1318 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1319 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1320 { 1321 return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw), 1322 (struct ice_aq_desc *)desc, 1323 buf, buf_size, cd)); 1324 } 1325 1326 /** 1327 * ice_sbq_rw_reg - Fill Sideband Queue command 1328 * @hw: pointer to the HW struct 1329 * @in: message info to be filled in descriptor 1330 */ 1331 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1332 { 1333 struct ice_sbq_cmd_desc desc = {0}; 1334 struct ice_sbq_msg_req msg = {0}; 1335 u16 msg_len; 1336 int status; 1337 1338 msg_len = sizeof(msg); 1339 1340 msg.dest_dev = in->dest_dev; 1341 msg.opcode = in->opcode; 1342 msg.flags = ICE_SBQ_MSG_FLAGS; 1343 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1344 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1345 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1346 1347 if (in->opcode) 1348 msg.data = cpu_to_le32(in->data); 1349 else 1350 /* data read comes back in completion, so shorten the struct by 1351 * sizeof(msg.data) 1352 */ 1353 msg_len -= sizeof(msg.data); 1354 1355 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1356 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1357 desc.param0.cmd_len = cpu_to_le16(msg_len); 1358 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1359 if (!status && !in->opcode) 1360 in->data = le32_to_cpu 1361 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1362 return status; 1363 } 1364 1365 /* FW Admin Queue command wrappers */ 1366 1367 /* Software lock/mutex that is meant to be held while the Global Config Lock 1368 * in firmware is acquired by the software to prevent most (but not all) types 1369 * of AQ commands from being sent to FW 1370 */ 1371 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1372 1373 /** 1374 * ice_should_retry_sq_send_cmd 1375 * @opcode: AQ opcode 1376 * 1377 * Decide if we should retry the send command routine for the ATQ, depending 1378 * on the opcode. 1379 */ 1380 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1381 { 1382 switch (opcode) { 1383 case ice_aqc_opc_get_link_topo: 1384 case ice_aqc_opc_lldp_stop: 1385 case ice_aqc_opc_lldp_start: 1386 case ice_aqc_opc_lldp_filter_ctrl: 1387 return true; 1388 } 1389 1390 return false; 1391 } 1392 1393 /** 1394 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1395 * @hw: pointer to the HW struct 1396 * @cq: pointer to the specific Control queue 1397 * @desc: prefilled descriptor describing the command 1398 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1399 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1400 * @cd: pointer to command details structure 1401 * 1402 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1403 * Queue if the EBUSY AQ error is returned. 1404 */ 1405 static enum ice_status 1406 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1407 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1408 struct ice_sq_cd *cd) 1409 { 1410 struct ice_aq_desc desc_cpy; 1411 enum ice_status status; 1412 bool is_cmd_for_retry; 1413 u8 *buf_cpy = NULL; 1414 u8 idx = 0; 1415 u16 opcode; 1416 1417 opcode = le16_to_cpu(desc->opcode); 1418 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1419 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1420 1421 if (is_cmd_for_retry) { 1422 if (buf) { 1423 buf_cpy = kzalloc(buf_size, GFP_KERNEL); 1424 if (!buf_cpy) 1425 return ICE_ERR_NO_MEMORY; 1426 } 1427 1428 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1429 } 1430 1431 do { 1432 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1433 1434 if (!is_cmd_for_retry || !status || 1435 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1436 break; 1437 1438 if (buf_cpy) 1439 memcpy(buf, buf_cpy, buf_size); 1440 1441 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1442 1443 mdelay(ICE_SQ_SEND_DELAY_TIME_MS); 1444 1445 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1446 1447 kfree(buf_cpy); 1448 1449 return status; 1450 } 1451 1452 /** 1453 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1454 * @hw: pointer to the HW struct 1455 * @desc: descriptor describing the command 1456 * @buf: buffer to use for indirect commands (NULL for direct commands) 1457 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1458 * @cd: pointer to command details structure 1459 * 1460 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1461 */ 1462 enum ice_status 1463 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1464 u16 buf_size, struct ice_sq_cd *cd) 1465 { 1466 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1467 bool lock_acquired = false; 1468 enum ice_status status; 1469 1470 /* When a package download is in process (i.e. when the firmware's 1471 * Global Configuration Lock resource is held), only the Download 1472 * Package, Get Version, Get Package Info List and Release Resource 1473 * (with resource ID set to Global Config Lock) AdminQ commands are 1474 * allowed; all others must block until the package download completes 1475 * and the Global Config Lock is released. See also 1476 * ice_acquire_global_cfg_lock(). 1477 */ 1478 switch (le16_to_cpu(desc->opcode)) { 1479 case ice_aqc_opc_download_pkg: 1480 case ice_aqc_opc_get_pkg_info_list: 1481 case ice_aqc_opc_get_ver: 1482 break; 1483 case ice_aqc_opc_release_res: 1484 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1485 break; 1486 fallthrough; 1487 default: 1488 mutex_lock(&ice_global_cfg_lock_sw); 1489 lock_acquired = true; 1490 break; 1491 } 1492 1493 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1494 if (lock_acquired) 1495 mutex_unlock(&ice_global_cfg_lock_sw); 1496 1497 return status; 1498 } 1499 1500 /** 1501 * ice_aq_get_fw_ver 1502 * @hw: pointer to the HW struct 1503 * @cd: pointer to command details structure or NULL 1504 * 1505 * Get the firmware version (0x0001) from the admin queue commands 1506 */ 1507 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1508 { 1509 struct ice_aqc_get_ver *resp; 1510 struct ice_aq_desc desc; 1511 enum ice_status status; 1512 1513 resp = &desc.params.get_ver; 1514 1515 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1516 1517 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1518 1519 if (!status) { 1520 hw->fw_branch = resp->fw_branch; 1521 hw->fw_maj_ver = resp->fw_major; 1522 hw->fw_min_ver = resp->fw_minor; 1523 hw->fw_patch = resp->fw_patch; 1524 hw->fw_build = le32_to_cpu(resp->fw_build); 1525 hw->api_branch = resp->api_branch; 1526 hw->api_maj_ver = resp->api_major; 1527 hw->api_min_ver = resp->api_minor; 1528 hw->api_patch = resp->api_patch; 1529 } 1530 1531 return status; 1532 } 1533 1534 /** 1535 * ice_aq_send_driver_ver 1536 * @hw: pointer to the HW struct 1537 * @dv: driver's major, minor version 1538 * @cd: pointer to command details structure or NULL 1539 * 1540 * Send the driver version (0x0002) to the firmware 1541 */ 1542 enum ice_status 1543 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1544 struct ice_sq_cd *cd) 1545 { 1546 struct ice_aqc_driver_ver *cmd; 1547 struct ice_aq_desc desc; 1548 u16 len; 1549 1550 cmd = &desc.params.driver_ver; 1551 1552 if (!dv) 1553 return ICE_ERR_PARAM; 1554 1555 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1556 1557 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1558 cmd->major_ver = dv->major_ver; 1559 cmd->minor_ver = dv->minor_ver; 1560 cmd->build_ver = dv->build_ver; 1561 cmd->subbuild_ver = dv->subbuild_ver; 1562 1563 len = 0; 1564 while (len < sizeof(dv->driver_string) && 1565 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1566 len++; 1567 1568 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1569 } 1570 1571 /** 1572 * ice_aq_q_shutdown 1573 * @hw: pointer to the HW struct 1574 * @unloading: is the driver unloading itself 1575 * 1576 * Tell the Firmware that we're shutting down the AdminQ and whether 1577 * or not the driver is unloading as well (0x0003). 1578 */ 1579 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1580 { 1581 struct ice_aqc_q_shutdown *cmd; 1582 struct ice_aq_desc desc; 1583 1584 cmd = &desc.params.q_shutdown; 1585 1586 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1587 1588 if (unloading) 1589 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1590 1591 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1592 } 1593 1594 /** 1595 * ice_aq_req_res 1596 * @hw: pointer to the HW struct 1597 * @res: resource ID 1598 * @access: access type 1599 * @sdp_number: resource number 1600 * @timeout: the maximum time in ms that the driver may hold the resource 1601 * @cd: pointer to command details structure or NULL 1602 * 1603 * Requests common resource using the admin queue commands (0x0008). 1604 * When attempting to acquire the Global Config Lock, the driver can 1605 * learn of three states: 1606 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1607 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1608 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1609 * successfully downloaded the package; the driver does 1610 * not have to download the package and can continue 1611 * loading 1612 * 1613 * Note that if the caller is in an acquire lock, perform action, release lock 1614 * phase of operation, it is possible that the FW may detect a timeout and issue 1615 * a CORER. In this case, the driver will receive a CORER interrupt and will 1616 * have to determine its cause. The calling thread that is handling this flow 1617 * will likely get an error propagated back to it indicating the Download 1618 * Package, Update Package or the Release Resource AQ commands timed out. 1619 */ 1620 static enum ice_status 1621 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1622 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1623 struct ice_sq_cd *cd) 1624 { 1625 struct ice_aqc_req_res *cmd_resp; 1626 struct ice_aq_desc desc; 1627 enum ice_status status; 1628 1629 cmd_resp = &desc.params.res_owner; 1630 1631 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1632 1633 cmd_resp->res_id = cpu_to_le16(res); 1634 cmd_resp->access_type = cpu_to_le16(access); 1635 cmd_resp->res_number = cpu_to_le32(sdp_number); 1636 cmd_resp->timeout = cpu_to_le32(*timeout); 1637 *timeout = 0; 1638 1639 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1640 1641 /* The completion specifies the maximum time in ms that the driver 1642 * may hold the resource in the Timeout field. 1643 */ 1644 1645 /* Global config lock response utilizes an additional status field. 1646 * 1647 * If the Global config lock resource is held by some other driver, the 1648 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1649 * and the timeout field indicates the maximum time the current owner 1650 * of the resource has to free it. 1651 */ 1652 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1653 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1654 *timeout = le32_to_cpu(cmd_resp->timeout); 1655 return 0; 1656 } else if (le16_to_cpu(cmd_resp->status) == 1657 ICE_AQ_RES_GLBL_IN_PROG) { 1658 *timeout = le32_to_cpu(cmd_resp->timeout); 1659 return ICE_ERR_AQ_ERROR; 1660 } else if (le16_to_cpu(cmd_resp->status) == 1661 ICE_AQ_RES_GLBL_DONE) { 1662 return ICE_ERR_AQ_NO_WORK; 1663 } 1664 1665 /* invalid FW response, force a timeout immediately */ 1666 *timeout = 0; 1667 return ICE_ERR_AQ_ERROR; 1668 } 1669 1670 /* If the resource is held by some other driver, the command completes 1671 * with a busy return value and the timeout field indicates the maximum 1672 * time the current owner of the resource has to free it. 1673 */ 1674 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1675 *timeout = le32_to_cpu(cmd_resp->timeout); 1676 1677 return status; 1678 } 1679 1680 /** 1681 * ice_aq_release_res 1682 * @hw: pointer to the HW struct 1683 * @res: resource ID 1684 * @sdp_number: resource number 1685 * @cd: pointer to command details structure or NULL 1686 * 1687 * release common resource using the admin queue commands (0x0009) 1688 */ 1689 static enum ice_status 1690 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1691 struct ice_sq_cd *cd) 1692 { 1693 struct ice_aqc_req_res *cmd; 1694 struct ice_aq_desc desc; 1695 1696 cmd = &desc.params.res_owner; 1697 1698 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1699 1700 cmd->res_id = cpu_to_le16(res); 1701 cmd->res_number = cpu_to_le32(sdp_number); 1702 1703 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1704 } 1705 1706 /** 1707 * ice_acquire_res 1708 * @hw: pointer to the HW structure 1709 * @res: resource ID 1710 * @access: access type (read or write) 1711 * @timeout: timeout in milliseconds 1712 * 1713 * This function will attempt to acquire the ownership of a resource. 1714 */ 1715 enum ice_status 1716 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1717 enum ice_aq_res_access_type access, u32 timeout) 1718 { 1719 #define ICE_RES_POLLING_DELAY_MS 10 1720 u32 delay = ICE_RES_POLLING_DELAY_MS; 1721 u32 time_left = timeout; 1722 enum ice_status status; 1723 1724 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1725 1726 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 1727 * previously acquired the resource and performed any necessary updates; 1728 * in this case the caller does not obtain the resource and has no 1729 * further work to do. 1730 */ 1731 if (status == ICE_ERR_AQ_NO_WORK) 1732 goto ice_acquire_res_exit; 1733 1734 if (status) 1735 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1736 1737 /* If necessary, poll until the current lock owner timeouts */ 1738 timeout = time_left; 1739 while (status && timeout && time_left) { 1740 mdelay(delay); 1741 timeout = (timeout > delay) ? timeout - delay : 0; 1742 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1743 1744 if (status == ICE_ERR_AQ_NO_WORK) 1745 /* lock free, but no work to do */ 1746 break; 1747 1748 if (!status) 1749 /* lock acquired */ 1750 break; 1751 } 1752 if (status && status != ICE_ERR_AQ_NO_WORK) 1753 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1754 1755 ice_acquire_res_exit: 1756 if (status == ICE_ERR_AQ_NO_WORK) { 1757 if (access == ICE_RES_WRITE) 1758 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1759 else 1760 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1761 } 1762 return status; 1763 } 1764 1765 /** 1766 * ice_release_res 1767 * @hw: pointer to the HW structure 1768 * @res: resource ID 1769 * 1770 * This function will release a resource using the proper Admin Command. 1771 */ 1772 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1773 { 1774 enum ice_status status; 1775 u32 total_delay = 0; 1776 1777 status = ice_aq_release_res(hw, res, 0, NULL); 1778 1779 /* there are some rare cases when trying to release the resource 1780 * results in an admin queue timeout, so handle them correctly 1781 */ 1782 while ((status == ICE_ERR_AQ_TIMEOUT) && 1783 (total_delay < hw->adminq.sq_cmd_timeout)) { 1784 mdelay(1); 1785 status = ice_aq_release_res(hw, res, 0, NULL); 1786 total_delay++; 1787 } 1788 } 1789 1790 /** 1791 * ice_aq_alloc_free_res - command to allocate/free resources 1792 * @hw: pointer to the HW struct 1793 * @num_entries: number of resource entries in buffer 1794 * @buf: Indirect buffer to hold data parameters and response 1795 * @buf_size: size of buffer for indirect commands 1796 * @opc: pass in the command opcode 1797 * @cd: pointer to command details structure or NULL 1798 * 1799 * Helper function to allocate/free resources using the admin queue commands 1800 */ 1801 enum ice_status 1802 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 1803 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1804 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1805 { 1806 struct ice_aqc_alloc_free_res_cmd *cmd; 1807 struct ice_aq_desc desc; 1808 1809 cmd = &desc.params.sw_res_ctrl; 1810 1811 if (!buf) 1812 return ICE_ERR_PARAM; 1813 1814 if (buf_size < flex_array_size(buf, elem, num_entries)) 1815 return ICE_ERR_PARAM; 1816 1817 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1818 1819 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1820 1821 cmd->num_entries = cpu_to_le16(num_entries); 1822 1823 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1824 } 1825 1826 /** 1827 * ice_alloc_hw_res - allocate resource 1828 * @hw: pointer to the HW struct 1829 * @type: type of resource 1830 * @num: number of resources to allocate 1831 * @btm: allocate from bottom 1832 * @res: pointer to array that will receive the resources 1833 */ 1834 enum ice_status 1835 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1836 { 1837 struct ice_aqc_alloc_free_res_elem *buf; 1838 enum ice_status status; 1839 u16 buf_len; 1840 1841 buf_len = struct_size(buf, elem, num); 1842 buf = kzalloc(buf_len, GFP_KERNEL); 1843 if (!buf) 1844 return ICE_ERR_NO_MEMORY; 1845 1846 /* Prepare buffer to allocate resource. */ 1847 buf->num_elems = cpu_to_le16(num); 1848 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1849 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1850 if (btm) 1851 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1852 1853 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 1854 ice_aqc_opc_alloc_res, NULL); 1855 if (status) 1856 goto ice_alloc_res_exit; 1857 1858 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 1859 1860 ice_alloc_res_exit: 1861 kfree(buf); 1862 return status; 1863 } 1864 1865 /** 1866 * ice_free_hw_res - free allocated HW resource 1867 * @hw: pointer to the HW struct 1868 * @type: type of resource to free 1869 * @num: number of resources 1870 * @res: pointer to array that contains the resources to free 1871 */ 1872 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 1873 { 1874 struct ice_aqc_alloc_free_res_elem *buf; 1875 enum ice_status status; 1876 u16 buf_len; 1877 1878 buf_len = struct_size(buf, elem, num); 1879 buf = kzalloc(buf_len, GFP_KERNEL); 1880 if (!buf) 1881 return ICE_ERR_NO_MEMORY; 1882 1883 /* Prepare buffer to free resource. */ 1884 buf->num_elems = cpu_to_le16(num); 1885 buf->res_type = cpu_to_le16(type); 1886 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 1887 1888 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 1889 ice_aqc_opc_free_res, NULL); 1890 if (status) 1891 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 1892 1893 kfree(buf); 1894 return status; 1895 } 1896 1897 /** 1898 * ice_get_num_per_func - determine number of resources per PF 1899 * @hw: pointer to the HW structure 1900 * @max: value to be evenly split between each PF 1901 * 1902 * Determine the number of valid functions by going through the bitmap returned 1903 * from parsing capabilities and use this to calculate the number of resources 1904 * per PF based on the max value passed in. 1905 */ 1906 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 1907 { 1908 u8 funcs; 1909 1910 #define ICE_CAPS_VALID_FUNCS_M 0xFF 1911 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 1912 ICE_CAPS_VALID_FUNCS_M); 1913 1914 if (!funcs) 1915 return 0; 1916 1917 return max / funcs; 1918 } 1919 1920 /** 1921 * ice_parse_common_caps - parse common device/function capabilities 1922 * @hw: pointer to the HW struct 1923 * @caps: pointer to common capabilities structure 1924 * @elem: the capability element to parse 1925 * @prefix: message prefix for tracing capabilities 1926 * 1927 * Given a capability element, extract relevant details into the common 1928 * capability structure. 1929 * 1930 * Returns: true if the capability matches one of the common capability ids, 1931 * false otherwise. 1932 */ 1933 static bool 1934 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 1935 struct ice_aqc_list_caps_elem *elem, const char *prefix) 1936 { 1937 u32 logical_id = le32_to_cpu(elem->logical_id); 1938 u32 phys_id = le32_to_cpu(elem->phys_id); 1939 u32 number = le32_to_cpu(elem->number); 1940 u16 cap = le16_to_cpu(elem->cap); 1941 bool found = true; 1942 1943 switch (cap) { 1944 case ICE_AQC_CAPS_VALID_FUNCTIONS: 1945 caps->valid_functions = number; 1946 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 1947 caps->valid_functions); 1948 break; 1949 case ICE_AQC_CAPS_SRIOV: 1950 caps->sr_iov_1_1 = (number == 1); 1951 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 1952 caps->sr_iov_1_1); 1953 break; 1954 case ICE_AQC_CAPS_DCB: 1955 caps->dcb = (number == 1); 1956 caps->active_tc_bitmap = logical_id; 1957 caps->maxtc = phys_id; 1958 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 1959 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 1960 caps->active_tc_bitmap); 1961 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 1962 break; 1963 case ICE_AQC_CAPS_RSS: 1964 caps->rss_table_size = number; 1965 caps->rss_table_entry_width = logical_id; 1966 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 1967 caps->rss_table_size); 1968 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 1969 caps->rss_table_entry_width); 1970 break; 1971 case ICE_AQC_CAPS_RXQS: 1972 caps->num_rxq = number; 1973 caps->rxq_first_id = phys_id; 1974 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 1975 caps->num_rxq); 1976 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 1977 caps->rxq_first_id); 1978 break; 1979 case ICE_AQC_CAPS_TXQS: 1980 caps->num_txq = number; 1981 caps->txq_first_id = phys_id; 1982 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 1983 caps->num_txq); 1984 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 1985 caps->txq_first_id); 1986 break; 1987 case ICE_AQC_CAPS_MSIX: 1988 caps->num_msix_vectors = number; 1989 caps->msix_vector_first_id = phys_id; 1990 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 1991 caps->num_msix_vectors); 1992 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 1993 caps->msix_vector_first_id); 1994 break; 1995 case ICE_AQC_CAPS_PENDING_NVM_VER: 1996 caps->nvm_update_pending_nvm = true; 1997 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 1998 break; 1999 case ICE_AQC_CAPS_PENDING_OROM_VER: 2000 caps->nvm_update_pending_orom = true; 2001 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2002 break; 2003 case ICE_AQC_CAPS_PENDING_NET_VER: 2004 caps->nvm_update_pending_netlist = true; 2005 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2006 break; 2007 case ICE_AQC_CAPS_NVM_MGMT: 2008 caps->nvm_unified_update = 2009 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2010 true : false; 2011 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2012 caps->nvm_unified_update); 2013 break; 2014 case ICE_AQC_CAPS_RDMA: 2015 caps->rdma = (number == 1); 2016 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2017 break; 2018 case ICE_AQC_CAPS_MAX_MTU: 2019 caps->max_mtu = number; 2020 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2021 prefix, caps->max_mtu); 2022 break; 2023 default: 2024 /* Not one of the recognized common capabilities */ 2025 found = false; 2026 } 2027 2028 return found; 2029 } 2030 2031 /** 2032 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2033 * @hw: pointer to the HW structure 2034 * @caps: pointer to capabilities structure to fix 2035 * 2036 * Re-calculate the capabilities that are dependent on the number of physical 2037 * ports; i.e. some features are not supported or function differently on 2038 * devices with more than 4 ports. 2039 */ 2040 static void 2041 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2042 { 2043 /* This assumes device capabilities are always scanned before function 2044 * capabilities during the initialization flow. 2045 */ 2046 if (hw->dev_caps.num_funcs > 4) { 2047 /* Max 4 TCs per port */ 2048 caps->maxtc = 4; 2049 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2050 caps->maxtc); 2051 if (caps->rdma) { 2052 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2053 caps->rdma = 0; 2054 } 2055 2056 /* print message only when processing device capabilities 2057 * during initialization. 2058 */ 2059 if (caps == &hw->dev_caps.common_cap) 2060 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2061 } 2062 } 2063 2064 /** 2065 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2066 * @hw: pointer to the HW struct 2067 * @func_p: pointer to function capabilities structure 2068 * @cap: pointer to the capability element to parse 2069 * 2070 * Extract function capabilities for ICE_AQC_CAPS_VF. 2071 */ 2072 static void 2073 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2074 struct ice_aqc_list_caps_elem *cap) 2075 { 2076 u32 logical_id = le32_to_cpu(cap->logical_id); 2077 u32 number = le32_to_cpu(cap->number); 2078 2079 func_p->num_allocd_vfs = number; 2080 func_p->vf_base_id = logical_id; 2081 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2082 func_p->num_allocd_vfs); 2083 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2084 func_p->vf_base_id); 2085 } 2086 2087 /** 2088 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2089 * @hw: pointer to the HW struct 2090 * @func_p: pointer to function capabilities structure 2091 * @cap: pointer to the capability element to parse 2092 * 2093 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2094 */ 2095 static void 2096 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2097 struct ice_aqc_list_caps_elem *cap) 2098 { 2099 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2100 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2101 le32_to_cpu(cap->number)); 2102 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2103 func_p->guar_num_vsi); 2104 } 2105 2106 /** 2107 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2108 * @hw: pointer to the HW struct 2109 * @func_p: pointer to function capabilities structure 2110 * @cap: pointer to the capability element to parse 2111 * 2112 * Extract function capabilities for ICE_AQC_CAPS_1588. 2113 */ 2114 static void 2115 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2116 struct ice_aqc_list_caps_elem *cap) 2117 { 2118 struct ice_ts_func_info *info = &func_p->ts_func_info; 2119 u32 number = le32_to_cpu(cap->number); 2120 2121 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2122 func_p->common_cap.ieee_1588 = info->ena; 2123 2124 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2125 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2126 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2127 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2128 2129 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; 2130 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2131 2132 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2133 func_p->common_cap.ieee_1588); 2134 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2135 info->src_tmr_owned); 2136 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2137 info->tmr_ena); 2138 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2139 info->tmr_index_owned); 2140 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2141 info->tmr_index_assoc); 2142 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2143 info->clk_freq); 2144 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2145 info->clk_src); 2146 } 2147 2148 /** 2149 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2150 * @hw: pointer to the HW struct 2151 * @func_p: pointer to function capabilities structure 2152 * 2153 * Extract function capabilities for ICE_AQC_CAPS_FD. 2154 */ 2155 static void 2156 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2157 { 2158 u32 reg_val, val; 2159 2160 reg_val = rd32(hw, GLQF_FD_SIZE); 2161 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 2162 GLQF_FD_SIZE_FD_GSIZE_S; 2163 func_p->fd_fltr_guar = 2164 ice_get_num_per_func(hw, val); 2165 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 2166 GLQF_FD_SIZE_FD_BSIZE_S; 2167 func_p->fd_fltr_best_effort = val; 2168 2169 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2170 func_p->fd_fltr_guar); 2171 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2172 func_p->fd_fltr_best_effort); 2173 } 2174 2175 /** 2176 * ice_parse_func_caps - Parse function capabilities 2177 * @hw: pointer to the HW struct 2178 * @func_p: pointer to function capabilities structure 2179 * @buf: buffer containing the function capability records 2180 * @cap_count: the number of capabilities 2181 * 2182 * Helper function to parse function (0x000A) capabilities list. For 2183 * capabilities shared between device and function, this relies on 2184 * ice_parse_common_caps. 2185 * 2186 * Loop through the list of provided capabilities and extract the relevant 2187 * data into the function capabilities structured. 2188 */ 2189 static void 2190 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2191 void *buf, u32 cap_count) 2192 { 2193 struct ice_aqc_list_caps_elem *cap_resp; 2194 u32 i; 2195 2196 cap_resp = buf; 2197 2198 memset(func_p, 0, sizeof(*func_p)); 2199 2200 for (i = 0; i < cap_count; i++) { 2201 u16 cap = le16_to_cpu(cap_resp[i].cap); 2202 bool found; 2203 2204 found = ice_parse_common_caps(hw, &func_p->common_cap, 2205 &cap_resp[i], "func caps"); 2206 2207 switch (cap) { 2208 case ICE_AQC_CAPS_VF: 2209 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2210 break; 2211 case ICE_AQC_CAPS_VSI: 2212 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2213 break; 2214 case ICE_AQC_CAPS_1588: 2215 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2216 break; 2217 case ICE_AQC_CAPS_FD: 2218 ice_parse_fdir_func_caps(hw, func_p); 2219 break; 2220 default: 2221 /* Don't list common capabilities as unknown */ 2222 if (!found) 2223 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2224 i, cap); 2225 break; 2226 } 2227 } 2228 2229 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2230 } 2231 2232 /** 2233 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2234 * @hw: pointer to the HW struct 2235 * @dev_p: pointer to device capabilities structure 2236 * @cap: capability element to parse 2237 * 2238 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2239 */ 2240 static void 2241 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2242 struct ice_aqc_list_caps_elem *cap) 2243 { 2244 u32 number = le32_to_cpu(cap->number); 2245 2246 dev_p->num_funcs = hweight32(number); 2247 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2248 dev_p->num_funcs); 2249 } 2250 2251 /** 2252 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2253 * @hw: pointer to the HW struct 2254 * @dev_p: pointer to device capabilities structure 2255 * @cap: capability element to parse 2256 * 2257 * Parse ICE_AQC_CAPS_VF for device capabilities. 2258 */ 2259 static void 2260 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2261 struct ice_aqc_list_caps_elem *cap) 2262 { 2263 u32 number = le32_to_cpu(cap->number); 2264 2265 dev_p->num_vfs_exposed = number; 2266 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2267 dev_p->num_vfs_exposed); 2268 } 2269 2270 /** 2271 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2272 * @hw: pointer to the HW struct 2273 * @dev_p: pointer to device capabilities structure 2274 * @cap: capability element to parse 2275 * 2276 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2277 */ 2278 static void 2279 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2280 struct ice_aqc_list_caps_elem *cap) 2281 { 2282 u32 number = le32_to_cpu(cap->number); 2283 2284 dev_p->num_vsi_allocd_to_host = number; 2285 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2286 dev_p->num_vsi_allocd_to_host); 2287 } 2288 2289 /** 2290 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2291 * @hw: pointer to the HW struct 2292 * @dev_p: pointer to device capabilities structure 2293 * @cap: capability element to parse 2294 * 2295 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2296 */ 2297 static void 2298 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2299 struct ice_aqc_list_caps_elem *cap) 2300 { 2301 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2302 u32 logical_id = le32_to_cpu(cap->logical_id); 2303 u32 phys_id = le32_to_cpu(cap->phys_id); 2304 u32 number = le32_to_cpu(cap->number); 2305 2306 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2307 dev_p->common_cap.ieee_1588 = info->ena; 2308 2309 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2310 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2311 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2312 2313 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; 2314 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2315 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2316 2317 info->ena_ports = logical_id; 2318 info->tmr_own_map = phys_id; 2319 2320 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2321 dev_p->common_cap.ieee_1588); 2322 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2323 info->tmr0_owner); 2324 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2325 info->tmr0_owned); 2326 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2327 info->tmr0_ena); 2328 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2329 info->tmr1_owner); 2330 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2331 info->tmr1_owned); 2332 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2333 info->tmr1_ena); 2334 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2335 info->ena_ports); 2336 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2337 info->tmr_own_map); 2338 } 2339 2340 /** 2341 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2342 * @hw: pointer to the HW struct 2343 * @dev_p: pointer to device capabilities structure 2344 * @cap: capability element to parse 2345 * 2346 * Parse ICE_AQC_CAPS_FD for device capabilities. 2347 */ 2348 static void 2349 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2350 struct ice_aqc_list_caps_elem *cap) 2351 { 2352 u32 number = le32_to_cpu(cap->number); 2353 2354 dev_p->num_flow_director_fltr = number; 2355 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2356 dev_p->num_flow_director_fltr); 2357 } 2358 2359 /** 2360 * ice_parse_dev_caps - Parse device capabilities 2361 * @hw: pointer to the HW struct 2362 * @dev_p: pointer to device capabilities structure 2363 * @buf: buffer containing the device capability records 2364 * @cap_count: the number of capabilities 2365 * 2366 * Helper device to parse device (0x000B) capabilities list. For 2367 * capabilities shared between device and function, this relies on 2368 * ice_parse_common_caps. 2369 * 2370 * Loop through the list of provided capabilities and extract the relevant 2371 * data into the device capabilities structured. 2372 */ 2373 static void 2374 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2375 void *buf, u32 cap_count) 2376 { 2377 struct ice_aqc_list_caps_elem *cap_resp; 2378 u32 i; 2379 2380 cap_resp = buf; 2381 2382 memset(dev_p, 0, sizeof(*dev_p)); 2383 2384 for (i = 0; i < cap_count; i++) { 2385 u16 cap = le16_to_cpu(cap_resp[i].cap); 2386 bool found; 2387 2388 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2389 &cap_resp[i], "dev caps"); 2390 2391 switch (cap) { 2392 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2393 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2394 break; 2395 case ICE_AQC_CAPS_VF: 2396 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2397 break; 2398 case ICE_AQC_CAPS_VSI: 2399 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2400 break; 2401 case ICE_AQC_CAPS_1588: 2402 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2403 break; 2404 case ICE_AQC_CAPS_FD: 2405 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2406 break; 2407 default: 2408 /* Don't list common capabilities as unknown */ 2409 if (!found) 2410 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2411 i, cap); 2412 break; 2413 } 2414 } 2415 2416 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2417 } 2418 2419 /** 2420 * ice_aq_list_caps - query function/device capabilities 2421 * @hw: pointer to the HW struct 2422 * @buf: a buffer to hold the capabilities 2423 * @buf_size: size of the buffer 2424 * @cap_count: if not NULL, set to the number of capabilities reported 2425 * @opc: capabilities type to discover, device or function 2426 * @cd: pointer to command details structure or NULL 2427 * 2428 * Get the function (0x000A) or device (0x000B) capabilities description from 2429 * firmware and store it in the buffer. 2430 * 2431 * If the cap_count pointer is not NULL, then it is set to the number of 2432 * capabilities firmware will report. Note that if the buffer size is too 2433 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2434 * cap_count will still be updated in this case. It is recommended that the 2435 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2436 * firmware could return) to avoid this. 2437 */ 2438 enum ice_status 2439 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2440 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2441 { 2442 struct ice_aqc_list_caps *cmd; 2443 struct ice_aq_desc desc; 2444 enum ice_status status; 2445 2446 cmd = &desc.params.get_cap; 2447 2448 if (opc != ice_aqc_opc_list_func_caps && 2449 opc != ice_aqc_opc_list_dev_caps) 2450 return ICE_ERR_PARAM; 2451 2452 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2453 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2454 2455 if (cap_count) 2456 *cap_count = le32_to_cpu(cmd->count); 2457 2458 return status; 2459 } 2460 2461 /** 2462 * ice_discover_dev_caps - Read and extract device capabilities 2463 * @hw: pointer to the hardware structure 2464 * @dev_caps: pointer to device capabilities structure 2465 * 2466 * Read the device capabilities and extract them into the dev_caps structure 2467 * for later use. 2468 */ 2469 enum ice_status 2470 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2471 { 2472 enum ice_status status; 2473 u32 cap_count = 0; 2474 void *cbuf; 2475 2476 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2477 if (!cbuf) 2478 return ICE_ERR_NO_MEMORY; 2479 2480 /* Although the driver doesn't know the number of capabilities the 2481 * device will return, we can simply send a 4KB buffer, the maximum 2482 * possible size that firmware can return. 2483 */ 2484 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2485 2486 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2487 ice_aqc_opc_list_dev_caps, NULL); 2488 if (!status) 2489 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2490 kfree(cbuf); 2491 2492 return status; 2493 } 2494 2495 /** 2496 * ice_discover_func_caps - Read and extract function capabilities 2497 * @hw: pointer to the hardware structure 2498 * @func_caps: pointer to function capabilities structure 2499 * 2500 * Read the function capabilities and extract them into the func_caps structure 2501 * for later use. 2502 */ 2503 static enum ice_status 2504 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2505 { 2506 enum ice_status status; 2507 u32 cap_count = 0; 2508 void *cbuf; 2509 2510 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2511 if (!cbuf) 2512 return ICE_ERR_NO_MEMORY; 2513 2514 /* Although the driver doesn't know the number of capabilities the 2515 * device will return, we can simply send a 4KB buffer, the maximum 2516 * possible size that firmware can return. 2517 */ 2518 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2519 2520 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2521 ice_aqc_opc_list_func_caps, NULL); 2522 if (!status) 2523 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2524 kfree(cbuf); 2525 2526 return status; 2527 } 2528 2529 /** 2530 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2531 * @hw: pointer to the hardware structure 2532 */ 2533 void ice_set_safe_mode_caps(struct ice_hw *hw) 2534 { 2535 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2536 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2537 struct ice_hw_common_caps cached_caps; 2538 u32 num_funcs; 2539 2540 /* cache some func_caps values that should be restored after memset */ 2541 cached_caps = func_caps->common_cap; 2542 2543 /* unset func capabilities */ 2544 memset(func_caps, 0, sizeof(*func_caps)); 2545 2546 #define ICE_RESTORE_FUNC_CAP(name) \ 2547 func_caps->common_cap.name = cached_caps.name 2548 2549 /* restore cached values */ 2550 ICE_RESTORE_FUNC_CAP(valid_functions); 2551 ICE_RESTORE_FUNC_CAP(txq_first_id); 2552 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2553 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2554 ICE_RESTORE_FUNC_CAP(max_mtu); 2555 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2556 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2557 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2558 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2559 2560 /* one Tx and one Rx queue in safe mode */ 2561 func_caps->common_cap.num_rxq = 1; 2562 func_caps->common_cap.num_txq = 1; 2563 2564 /* two MSIX vectors, one for traffic and one for misc causes */ 2565 func_caps->common_cap.num_msix_vectors = 2; 2566 func_caps->guar_num_vsi = 1; 2567 2568 /* cache some dev_caps values that should be restored after memset */ 2569 cached_caps = dev_caps->common_cap; 2570 num_funcs = dev_caps->num_funcs; 2571 2572 /* unset dev capabilities */ 2573 memset(dev_caps, 0, sizeof(*dev_caps)); 2574 2575 #define ICE_RESTORE_DEV_CAP(name) \ 2576 dev_caps->common_cap.name = cached_caps.name 2577 2578 /* restore cached values */ 2579 ICE_RESTORE_DEV_CAP(valid_functions); 2580 ICE_RESTORE_DEV_CAP(txq_first_id); 2581 ICE_RESTORE_DEV_CAP(rxq_first_id); 2582 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2583 ICE_RESTORE_DEV_CAP(max_mtu); 2584 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2585 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2586 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2587 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2588 dev_caps->num_funcs = num_funcs; 2589 2590 /* one Tx and one Rx queue per function in safe mode */ 2591 dev_caps->common_cap.num_rxq = num_funcs; 2592 dev_caps->common_cap.num_txq = num_funcs; 2593 2594 /* two MSIX vectors per function */ 2595 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2596 } 2597 2598 /** 2599 * ice_get_caps - get info about the HW 2600 * @hw: pointer to the hardware structure 2601 */ 2602 enum ice_status ice_get_caps(struct ice_hw *hw) 2603 { 2604 enum ice_status status; 2605 2606 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2607 if (status) 2608 return status; 2609 2610 return ice_discover_func_caps(hw, &hw->func_caps); 2611 } 2612 2613 /** 2614 * ice_aq_manage_mac_write - manage MAC address write command 2615 * @hw: pointer to the HW struct 2616 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2617 * @flags: flags to control write behavior 2618 * @cd: pointer to command details structure or NULL 2619 * 2620 * This function is used to write MAC address to the NVM (0x0108). 2621 */ 2622 enum ice_status 2623 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2624 struct ice_sq_cd *cd) 2625 { 2626 struct ice_aqc_manage_mac_write *cmd; 2627 struct ice_aq_desc desc; 2628 2629 cmd = &desc.params.mac_write; 2630 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2631 2632 cmd->flags = flags; 2633 ether_addr_copy(cmd->mac_addr, mac_addr); 2634 2635 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2636 } 2637 2638 /** 2639 * ice_aq_clear_pxe_mode 2640 * @hw: pointer to the HW struct 2641 * 2642 * Tell the firmware that the driver is taking over from PXE (0x0110). 2643 */ 2644 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 2645 { 2646 struct ice_aq_desc desc; 2647 2648 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2649 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2650 2651 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2652 } 2653 2654 /** 2655 * ice_clear_pxe_mode - clear pxe operations mode 2656 * @hw: pointer to the HW struct 2657 * 2658 * Make sure all PXE mode settings are cleared, including things 2659 * like descriptor fetch/write-back mode. 2660 */ 2661 void ice_clear_pxe_mode(struct ice_hw *hw) 2662 { 2663 if (ice_check_sq_alive(hw, &hw->adminq)) 2664 ice_aq_clear_pxe_mode(hw); 2665 } 2666 2667 /** 2668 * ice_get_link_speed_based_on_phy_type - returns link speed 2669 * @phy_type_low: lower part of phy_type 2670 * @phy_type_high: higher part of phy_type 2671 * 2672 * This helper function will convert an entry in PHY type structure 2673 * [phy_type_low, phy_type_high] to its corresponding link speed. 2674 * Note: In the structure of [phy_type_low, phy_type_high], there should 2675 * be one bit set, as this function will convert one PHY type to its 2676 * speed. 2677 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2678 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2679 */ 2680 static u16 2681 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 2682 { 2683 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2684 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2685 2686 switch (phy_type_low) { 2687 case ICE_PHY_TYPE_LOW_100BASE_TX: 2688 case ICE_PHY_TYPE_LOW_100M_SGMII: 2689 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 2690 break; 2691 case ICE_PHY_TYPE_LOW_1000BASE_T: 2692 case ICE_PHY_TYPE_LOW_1000BASE_SX: 2693 case ICE_PHY_TYPE_LOW_1000BASE_LX: 2694 case ICE_PHY_TYPE_LOW_1000BASE_KX: 2695 case ICE_PHY_TYPE_LOW_1G_SGMII: 2696 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 2697 break; 2698 case ICE_PHY_TYPE_LOW_2500BASE_T: 2699 case ICE_PHY_TYPE_LOW_2500BASE_X: 2700 case ICE_PHY_TYPE_LOW_2500BASE_KX: 2701 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 2702 break; 2703 case ICE_PHY_TYPE_LOW_5GBASE_T: 2704 case ICE_PHY_TYPE_LOW_5GBASE_KR: 2705 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 2706 break; 2707 case ICE_PHY_TYPE_LOW_10GBASE_T: 2708 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 2709 case ICE_PHY_TYPE_LOW_10GBASE_SR: 2710 case ICE_PHY_TYPE_LOW_10GBASE_LR: 2711 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 2712 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 2713 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 2714 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 2715 break; 2716 case ICE_PHY_TYPE_LOW_25GBASE_T: 2717 case ICE_PHY_TYPE_LOW_25GBASE_CR: 2718 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 2719 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 2720 case ICE_PHY_TYPE_LOW_25GBASE_SR: 2721 case ICE_PHY_TYPE_LOW_25GBASE_LR: 2722 case ICE_PHY_TYPE_LOW_25GBASE_KR: 2723 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 2724 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 2725 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 2726 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 2727 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 2728 break; 2729 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 2730 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 2731 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 2732 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 2733 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 2734 case ICE_PHY_TYPE_LOW_40G_XLAUI: 2735 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 2736 break; 2737 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 2738 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 2739 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 2740 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 2741 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 2742 case ICE_PHY_TYPE_LOW_50G_LAUI2: 2743 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 2744 case ICE_PHY_TYPE_LOW_50G_AUI2: 2745 case ICE_PHY_TYPE_LOW_50GBASE_CP: 2746 case ICE_PHY_TYPE_LOW_50GBASE_SR: 2747 case ICE_PHY_TYPE_LOW_50GBASE_FR: 2748 case ICE_PHY_TYPE_LOW_50GBASE_LR: 2749 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 2750 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 2751 case ICE_PHY_TYPE_LOW_50G_AUI1: 2752 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 2753 break; 2754 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 2755 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 2756 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 2757 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 2758 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 2759 case ICE_PHY_TYPE_LOW_100G_CAUI4: 2760 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 2761 case ICE_PHY_TYPE_LOW_100G_AUI4: 2762 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 2763 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 2764 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 2765 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 2766 case ICE_PHY_TYPE_LOW_100GBASE_DR: 2767 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 2768 break; 2769 default: 2770 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2771 break; 2772 } 2773 2774 switch (phy_type_high) { 2775 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 2776 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 2777 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 2778 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 2779 case ICE_PHY_TYPE_HIGH_100G_AUI2: 2780 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 2781 break; 2782 default: 2783 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2784 break; 2785 } 2786 2787 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 2788 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2789 return ICE_AQ_LINK_SPEED_UNKNOWN; 2790 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2791 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 2792 return ICE_AQ_LINK_SPEED_UNKNOWN; 2793 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2794 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2795 return speed_phy_type_low; 2796 else 2797 return speed_phy_type_high; 2798 } 2799 2800 /** 2801 * ice_update_phy_type 2802 * @phy_type_low: pointer to the lower part of phy_type 2803 * @phy_type_high: pointer to the higher part of phy_type 2804 * @link_speeds_bitmap: targeted link speeds bitmap 2805 * 2806 * Note: For the link_speeds_bitmap structure, you can check it at 2807 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 2808 * link_speeds_bitmap include multiple speeds. 2809 * 2810 * Each entry in this [phy_type_low, phy_type_high] structure will 2811 * present a certain link speed. This helper function will turn on bits 2812 * in [phy_type_low, phy_type_high] structure based on the value of 2813 * link_speeds_bitmap input parameter. 2814 */ 2815 void 2816 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 2817 u16 link_speeds_bitmap) 2818 { 2819 u64 pt_high; 2820 u64 pt_low; 2821 int index; 2822 u16 speed; 2823 2824 /* We first check with low part of phy_type */ 2825 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 2826 pt_low = BIT_ULL(index); 2827 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 2828 2829 if (link_speeds_bitmap & speed) 2830 *phy_type_low |= BIT_ULL(index); 2831 } 2832 2833 /* We then check with high part of phy_type */ 2834 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 2835 pt_high = BIT_ULL(index); 2836 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 2837 2838 if (link_speeds_bitmap & speed) 2839 *phy_type_high |= BIT_ULL(index); 2840 } 2841 } 2842 2843 /** 2844 * ice_aq_set_phy_cfg 2845 * @hw: pointer to the HW struct 2846 * @pi: port info structure of the interested logical port 2847 * @cfg: structure with PHY configuration data to be set 2848 * @cd: pointer to command details structure or NULL 2849 * 2850 * Set the various PHY configuration parameters supported on the Port. 2851 * One or more of the Set PHY config parameters may be ignored in an MFP 2852 * mode as the PF may not have the privilege to set some of the PHY Config 2853 * parameters. This status will be indicated by the command response (0x0601). 2854 */ 2855 enum ice_status 2856 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 2857 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 2858 { 2859 struct ice_aq_desc desc; 2860 enum ice_status status; 2861 2862 if (!cfg) 2863 return ICE_ERR_PARAM; 2864 2865 /* Ensure that only valid bits of cfg->caps can be turned on. */ 2866 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 2867 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 2868 cfg->caps); 2869 2870 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 2871 } 2872 2873 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 2874 desc.params.set_phy.lport_num = pi->lport; 2875 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2876 2877 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 2878 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 2879 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 2880 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 2881 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 2882 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 2883 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 2884 cfg->low_power_ctrl_an); 2885 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 2886 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 2887 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 2888 cfg->link_fec_opt); 2889 2890 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 2891 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 2892 status = 0; 2893 2894 if (!status) 2895 pi->phy.curr_user_phy_cfg = *cfg; 2896 2897 return status; 2898 } 2899 2900 /** 2901 * ice_update_link_info - update status of the HW network link 2902 * @pi: port info structure of the interested logical port 2903 */ 2904 enum ice_status ice_update_link_info(struct ice_port_info *pi) 2905 { 2906 struct ice_link_status *li; 2907 enum ice_status status; 2908 2909 if (!pi) 2910 return ICE_ERR_PARAM; 2911 2912 li = &pi->phy.link_info; 2913 2914 status = ice_aq_get_link_info(pi, true, NULL, NULL); 2915 if (status) 2916 return status; 2917 2918 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 2919 struct ice_aqc_get_phy_caps_data *pcaps; 2920 struct ice_hw *hw; 2921 2922 hw = pi->hw; 2923 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 2924 GFP_KERNEL); 2925 if (!pcaps) 2926 return ICE_ERR_NO_MEMORY; 2927 2928 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2929 pcaps, NULL); 2930 2931 devm_kfree(ice_hw_to_dev(hw), pcaps); 2932 } 2933 2934 return status; 2935 } 2936 2937 /** 2938 * ice_cache_phy_user_req 2939 * @pi: port information structure 2940 * @cache_data: PHY logging data 2941 * @cache_mode: PHY logging mode 2942 * 2943 * Log the user request on (FC, FEC, SPEED) for later use. 2944 */ 2945 static void 2946 ice_cache_phy_user_req(struct ice_port_info *pi, 2947 struct ice_phy_cache_mode_data cache_data, 2948 enum ice_phy_cache_mode cache_mode) 2949 { 2950 if (!pi) 2951 return; 2952 2953 switch (cache_mode) { 2954 case ICE_FC_MODE: 2955 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 2956 break; 2957 case ICE_SPEED_MODE: 2958 pi->phy.curr_user_speed_req = 2959 cache_data.data.curr_user_speed_req; 2960 break; 2961 case ICE_FEC_MODE: 2962 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 2963 break; 2964 default: 2965 break; 2966 } 2967 } 2968 2969 /** 2970 * ice_caps_to_fc_mode 2971 * @caps: PHY capabilities 2972 * 2973 * Convert PHY FC capabilities to ice FC mode 2974 */ 2975 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 2976 { 2977 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 2978 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2979 return ICE_FC_FULL; 2980 2981 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 2982 return ICE_FC_TX_PAUSE; 2983 2984 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2985 return ICE_FC_RX_PAUSE; 2986 2987 return ICE_FC_NONE; 2988 } 2989 2990 /** 2991 * ice_caps_to_fec_mode 2992 * @caps: PHY capabilities 2993 * @fec_options: Link FEC options 2994 * 2995 * Convert PHY FEC capabilities to ice FEC mode 2996 */ 2997 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 2998 { 2999 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3000 return ICE_FEC_AUTO; 3001 3002 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3003 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3004 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3005 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3006 return ICE_FEC_BASER; 3007 3008 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3009 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3010 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3011 return ICE_FEC_RS; 3012 3013 return ICE_FEC_NONE; 3014 } 3015 3016 /** 3017 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3018 * @pi: port information structure 3019 * @cfg: PHY configuration data to set FC mode 3020 * @req_mode: FC mode to configure 3021 */ 3022 enum ice_status 3023 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3024 enum ice_fc_mode req_mode) 3025 { 3026 struct ice_phy_cache_mode_data cache_data; 3027 u8 pause_mask = 0x0; 3028 3029 if (!pi || !cfg) 3030 return ICE_ERR_BAD_PTR; 3031 3032 switch (req_mode) { 3033 case ICE_FC_FULL: 3034 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3035 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3036 break; 3037 case ICE_FC_RX_PAUSE: 3038 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3039 break; 3040 case ICE_FC_TX_PAUSE: 3041 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3042 break; 3043 default: 3044 break; 3045 } 3046 3047 /* clear the old pause settings */ 3048 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3049 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3050 3051 /* set the new capabilities */ 3052 cfg->caps |= pause_mask; 3053 3054 /* Cache user FC request */ 3055 cache_data.data.curr_user_fc_req = req_mode; 3056 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3057 3058 return 0; 3059 } 3060 3061 /** 3062 * ice_set_fc 3063 * @pi: port information structure 3064 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3065 * @ena_auto_link_update: enable automatic link update 3066 * 3067 * Set the requested flow control mode. 3068 */ 3069 enum ice_status 3070 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3071 { 3072 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3073 struct ice_aqc_get_phy_caps_data *pcaps; 3074 enum ice_status status; 3075 struct ice_hw *hw; 3076 3077 if (!pi || !aq_failures) 3078 return ICE_ERR_BAD_PTR; 3079 3080 *aq_failures = 0; 3081 hw = pi->hw; 3082 3083 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3084 if (!pcaps) 3085 return ICE_ERR_NO_MEMORY; 3086 3087 /* Get the current PHY config */ 3088 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3089 pcaps, NULL); 3090 if (status) { 3091 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3092 goto out; 3093 } 3094 3095 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3096 3097 /* Configure the set PHY data */ 3098 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3099 if (status) 3100 goto out; 3101 3102 /* If the capabilities have changed, then set the new config */ 3103 if (cfg.caps != pcaps->caps) { 3104 int retry_count, retry_max = 10; 3105 3106 /* Auto restart link so settings take effect */ 3107 if (ena_auto_link_update) 3108 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3109 3110 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3111 if (status) { 3112 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3113 goto out; 3114 } 3115 3116 /* Update the link info 3117 * It sometimes takes a really long time for link to 3118 * come back from the atomic reset. Thus, we wait a 3119 * little bit. 3120 */ 3121 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3122 status = ice_update_link_info(pi); 3123 3124 if (!status) 3125 break; 3126 3127 mdelay(100); 3128 } 3129 3130 if (status) 3131 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3132 } 3133 3134 out: 3135 devm_kfree(ice_hw_to_dev(hw), pcaps); 3136 return status; 3137 } 3138 3139 /** 3140 * ice_phy_caps_equals_cfg 3141 * @phy_caps: PHY capabilities 3142 * @phy_cfg: PHY configuration 3143 * 3144 * Helper function to determine if PHY capabilities matches PHY 3145 * configuration 3146 */ 3147 bool 3148 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3149 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3150 { 3151 u8 caps_mask, cfg_mask; 3152 3153 if (!phy_caps || !phy_cfg) 3154 return false; 3155 3156 /* These bits are not common between capabilities and configuration. 3157 * Do not use them to determine equality. 3158 */ 3159 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3160 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3161 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3162 3163 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3164 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3165 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3166 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3167 phy_caps->eee_cap != phy_cfg->eee_cap || 3168 phy_caps->eeer_value != phy_cfg->eeer_value || 3169 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3170 return false; 3171 3172 return true; 3173 } 3174 3175 /** 3176 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3177 * @pi: port information structure 3178 * @caps: PHY ability structure to copy date from 3179 * @cfg: PHY configuration structure to copy data to 3180 * 3181 * Helper function to copy AQC PHY get ability data to PHY set configuration 3182 * data structure 3183 */ 3184 void 3185 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3186 struct ice_aqc_get_phy_caps_data *caps, 3187 struct ice_aqc_set_phy_cfg_data *cfg) 3188 { 3189 if (!pi || !caps || !cfg) 3190 return; 3191 3192 memset(cfg, 0, sizeof(*cfg)); 3193 cfg->phy_type_low = caps->phy_type_low; 3194 cfg->phy_type_high = caps->phy_type_high; 3195 cfg->caps = caps->caps; 3196 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3197 cfg->eee_cap = caps->eee_cap; 3198 cfg->eeer_value = caps->eeer_value; 3199 cfg->link_fec_opt = caps->link_fec_options; 3200 cfg->module_compliance_enforcement = 3201 caps->module_compliance_enforcement; 3202 } 3203 3204 /** 3205 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3206 * @pi: port information structure 3207 * @cfg: PHY configuration data to set FEC mode 3208 * @fec: FEC mode to configure 3209 */ 3210 enum ice_status 3211 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3212 enum ice_fec_mode fec) 3213 { 3214 struct ice_aqc_get_phy_caps_data *pcaps; 3215 enum ice_status status; 3216 struct ice_hw *hw; 3217 3218 if (!pi || !cfg) 3219 return ICE_ERR_BAD_PTR; 3220 3221 hw = pi->hw; 3222 3223 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3224 if (!pcaps) 3225 return ICE_ERR_NO_MEMORY; 3226 3227 status = ice_aq_get_phy_caps(pi, false, 3228 (ice_fw_supports_report_dflt_cfg(hw) ? 3229 ICE_AQC_REPORT_DFLT_CFG : 3230 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3231 if (status) 3232 goto out; 3233 3234 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3235 cfg->link_fec_opt = pcaps->link_fec_options; 3236 3237 switch (fec) { 3238 case ICE_FEC_BASER: 3239 /* Clear RS bits, and AND BASE-R ability 3240 * bits and OR request bits. 3241 */ 3242 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3243 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3244 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3245 ICE_AQC_PHY_FEC_25G_KR_REQ; 3246 break; 3247 case ICE_FEC_RS: 3248 /* Clear BASE-R bits, and AND RS ability 3249 * bits and OR request bits. 3250 */ 3251 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3252 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3253 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3254 break; 3255 case ICE_FEC_NONE: 3256 /* Clear all FEC option bits. */ 3257 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3258 break; 3259 case ICE_FEC_AUTO: 3260 /* AND auto FEC bit, and all caps bits. */ 3261 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3262 cfg->link_fec_opt |= pcaps->link_fec_options; 3263 break; 3264 default: 3265 status = ICE_ERR_PARAM; 3266 break; 3267 } 3268 3269 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3270 !ice_fw_supports_report_dflt_cfg(hw)) { 3271 struct ice_link_default_override_tlv tlv; 3272 3273 if (ice_get_link_default_override(&tlv, pi)) 3274 goto out; 3275 3276 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3277 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3278 cfg->link_fec_opt = tlv.fec_options; 3279 } 3280 3281 out: 3282 kfree(pcaps); 3283 3284 return status; 3285 } 3286 3287 /** 3288 * ice_get_link_status - get status of the HW network link 3289 * @pi: port information structure 3290 * @link_up: pointer to bool (true/false = linkup/linkdown) 3291 * 3292 * Variable link_up is true if link is up, false if link is down. 3293 * The variable link_up is invalid if status is non zero. As a 3294 * result of this call, link status reporting becomes enabled 3295 */ 3296 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3297 { 3298 struct ice_phy_info *phy_info; 3299 enum ice_status status = 0; 3300 3301 if (!pi || !link_up) 3302 return ICE_ERR_PARAM; 3303 3304 phy_info = &pi->phy; 3305 3306 if (phy_info->get_link_info) { 3307 status = ice_update_link_info(pi); 3308 3309 if (status) 3310 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3311 status); 3312 } 3313 3314 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3315 3316 return status; 3317 } 3318 3319 /** 3320 * ice_aq_set_link_restart_an 3321 * @pi: pointer to the port information structure 3322 * @ena_link: if true: enable link, if false: disable link 3323 * @cd: pointer to command details structure or NULL 3324 * 3325 * Sets up the link and restarts the Auto-Negotiation over the link. 3326 */ 3327 enum ice_status 3328 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3329 struct ice_sq_cd *cd) 3330 { 3331 struct ice_aqc_restart_an *cmd; 3332 struct ice_aq_desc desc; 3333 3334 cmd = &desc.params.restart_an; 3335 3336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3337 3338 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3339 cmd->lport_num = pi->lport; 3340 if (ena_link) 3341 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3342 else 3343 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3344 3345 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3346 } 3347 3348 /** 3349 * ice_aq_set_event_mask 3350 * @hw: pointer to the HW struct 3351 * @port_num: port number of the physical function 3352 * @mask: event mask to be set 3353 * @cd: pointer to command details structure or NULL 3354 * 3355 * Set event mask (0x0613) 3356 */ 3357 enum ice_status 3358 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3359 struct ice_sq_cd *cd) 3360 { 3361 struct ice_aqc_set_event_mask *cmd; 3362 struct ice_aq_desc desc; 3363 3364 cmd = &desc.params.set_event_mask; 3365 3366 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3367 3368 cmd->lport_num = port_num; 3369 3370 cmd->event_mask = cpu_to_le16(mask); 3371 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3372 } 3373 3374 /** 3375 * ice_aq_set_mac_loopback 3376 * @hw: pointer to the HW struct 3377 * @ena_lpbk: Enable or Disable loopback 3378 * @cd: pointer to command details structure or NULL 3379 * 3380 * Enable/disable loopback on a given port 3381 */ 3382 enum ice_status 3383 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3384 { 3385 struct ice_aqc_set_mac_lb *cmd; 3386 struct ice_aq_desc desc; 3387 3388 cmd = &desc.params.set_mac_lb; 3389 3390 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3391 if (ena_lpbk) 3392 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3393 3394 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3395 } 3396 3397 /** 3398 * ice_aq_set_port_id_led 3399 * @pi: pointer to the port information 3400 * @is_orig_mode: is this LED set to original mode (by the net-list) 3401 * @cd: pointer to command details structure or NULL 3402 * 3403 * Set LED value for the given port (0x06e9) 3404 */ 3405 enum ice_status 3406 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3407 struct ice_sq_cd *cd) 3408 { 3409 struct ice_aqc_set_port_id_led *cmd; 3410 struct ice_hw *hw = pi->hw; 3411 struct ice_aq_desc desc; 3412 3413 cmd = &desc.params.set_port_id_led; 3414 3415 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3416 3417 if (is_orig_mode) 3418 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3419 else 3420 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3421 3422 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3423 } 3424 3425 /** 3426 * ice_aq_sff_eeprom 3427 * @hw: pointer to the HW struct 3428 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3429 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3430 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3431 * @page: QSFP page 3432 * @set_page: set or ignore the page 3433 * @data: pointer to data buffer to be read/written to the I2C device. 3434 * @length: 1-16 for read, 1 for write. 3435 * @write: 0 read, 1 for write. 3436 * @cd: pointer to command details structure or NULL 3437 * 3438 * Read/Write SFF EEPROM (0x06EE) 3439 */ 3440 enum ice_status 3441 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3442 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3443 bool write, struct ice_sq_cd *cd) 3444 { 3445 struct ice_aqc_sff_eeprom *cmd; 3446 struct ice_aq_desc desc; 3447 enum ice_status status; 3448 3449 if (!data || (mem_addr & 0xff00)) 3450 return ICE_ERR_PARAM; 3451 3452 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3453 cmd = &desc.params.read_write_sff_param; 3454 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3455 cmd->lport_num = (u8)(lport & 0xff); 3456 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3457 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 3458 ICE_AQC_SFF_I2CBUS_7BIT_M) | 3459 ((set_page << 3460 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 3461 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 3462 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3463 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 3464 if (write) 3465 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 3466 3467 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3468 return status; 3469 } 3470 3471 /** 3472 * __ice_aq_get_set_rss_lut 3473 * @hw: pointer to the hardware structure 3474 * @params: RSS LUT parameters 3475 * @set: set true to set the table, false to get the table 3476 * 3477 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3478 */ 3479 static enum ice_status 3480 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) 3481 { 3482 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; 3483 struct ice_aqc_get_set_rss_lut *cmd_resp; 3484 struct ice_aq_desc desc; 3485 enum ice_status status; 3486 u8 *lut; 3487 3488 if (!params) 3489 return ICE_ERR_PARAM; 3490 3491 vsi_handle = params->vsi_handle; 3492 lut = params->lut; 3493 3494 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 3495 return ICE_ERR_PARAM; 3496 3497 lut_size = params->lut_size; 3498 lut_type = params->lut_type; 3499 glob_lut_idx = params->global_lut_id; 3500 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3501 3502 cmd_resp = &desc.params.get_set_rss_lut; 3503 3504 if (set) { 3505 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 3506 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3507 } else { 3508 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 3509 } 3510 3511 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3512 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 3513 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 3514 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 3515 3516 switch (lut_type) { 3517 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 3518 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 3519 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 3520 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 3521 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 3522 break; 3523 default: 3524 status = ICE_ERR_PARAM; 3525 goto ice_aq_get_set_rss_lut_exit; 3526 } 3527 3528 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 3529 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 3530 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 3531 3532 if (!set) 3533 goto ice_aq_get_set_rss_lut_send; 3534 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3535 if (!set) 3536 goto ice_aq_get_set_rss_lut_send; 3537 } else { 3538 goto ice_aq_get_set_rss_lut_send; 3539 } 3540 3541 /* LUT size is only valid for Global and PF table types */ 3542 switch (lut_size) { 3543 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 3544 break; 3545 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 3546 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 3547 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3548 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3549 break; 3550 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 3551 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3552 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 3553 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3554 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3555 break; 3556 } 3557 fallthrough; 3558 default: 3559 status = ICE_ERR_PARAM; 3560 goto ice_aq_get_set_rss_lut_exit; 3561 } 3562 3563 ice_aq_get_set_rss_lut_send: 3564 cmd_resp->flags = cpu_to_le16(flags); 3565 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 3566 3567 ice_aq_get_set_rss_lut_exit: 3568 return status; 3569 } 3570 3571 /** 3572 * ice_aq_get_rss_lut 3573 * @hw: pointer to the hardware structure 3574 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 3575 * 3576 * get the RSS lookup table, PF or VSI type 3577 */ 3578 enum ice_status 3579 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 3580 { 3581 return __ice_aq_get_set_rss_lut(hw, get_params, false); 3582 } 3583 3584 /** 3585 * ice_aq_set_rss_lut 3586 * @hw: pointer to the hardware structure 3587 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 3588 * 3589 * set the RSS lookup table, PF or VSI type 3590 */ 3591 enum ice_status 3592 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 3593 { 3594 return __ice_aq_get_set_rss_lut(hw, set_params, true); 3595 } 3596 3597 /** 3598 * __ice_aq_get_set_rss_key 3599 * @hw: pointer to the HW struct 3600 * @vsi_id: VSI FW index 3601 * @key: pointer to key info struct 3602 * @set: set true to set the key, false to get the key 3603 * 3604 * get (0x0B04) or set (0x0B02) the RSS key per VSI 3605 */ 3606 static enum 3607 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 3608 struct ice_aqc_get_set_rss_keys *key, 3609 bool set) 3610 { 3611 struct ice_aqc_get_set_rss_key *cmd_resp; 3612 u16 key_size = sizeof(*key); 3613 struct ice_aq_desc desc; 3614 3615 cmd_resp = &desc.params.get_set_rss_key; 3616 3617 if (set) { 3618 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 3619 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3620 } else { 3621 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 3622 } 3623 3624 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3625 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 3626 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 3627 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 3628 3629 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 3630 } 3631 3632 /** 3633 * ice_aq_get_rss_key 3634 * @hw: pointer to the HW struct 3635 * @vsi_handle: software VSI handle 3636 * @key: pointer to key info struct 3637 * 3638 * get the RSS key per VSI 3639 */ 3640 enum ice_status 3641 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 3642 struct ice_aqc_get_set_rss_keys *key) 3643 { 3644 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 3645 return ICE_ERR_PARAM; 3646 3647 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3648 key, false); 3649 } 3650 3651 /** 3652 * ice_aq_set_rss_key 3653 * @hw: pointer to the HW struct 3654 * @vsi_handle: software VSI handle 3655 * @keys: pointer to key info struct 3656 * 3657 * set the RSS key per VSI 3658 */ 3659 enum ice_status 3660 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 3661 struct ice_aqc_get_set_rss_keys *keys) 3662 { 3663 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 3664 return ICE_ERR_PARAM; 3665 3666 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3667 keys, true); 3668 } 3669 3670 /** 3671 * ice_aq_add_lan_txq 3672 * @hw: pointer to the hardware structure 3673 * @num_qgrps: Number of added queue groups 3674 * @qg_list: list of queue groups to be added 3675 * @buf_size: size of buffer for indirect command 3676 * @cd: pointer to command details structure or NULL 3677 * 3678 * Add Tx LAN queue (0x0C30) 3679 * 3680 * NOTE: 3681 * Prior to calling add Tx LAN queue: 3682 * Initialize the following as part of the Tx queue context: 3683 * Completion queue ID if the queue uses Completion queue, Quanta profile, 3684 * Cache profile and Packet shaper profile. 3685 * 3686 * After add Tx LAN queue AQ command is completed: 3687 * Interrupts should be associated with specific queues, 3688 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 3689 * flow. 3690 */ 3691 static enum ice_status 3692 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 3693 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 3694 struct ice_sq_cd *cd) 3695 { 3696 struct ice_aqc_add_tx_qgrp *list; 3697 struct ice_aqc_add_txqs *cmd; 3698 struct ice_aq_desc desc; 3699 u16 i, sum_size = 0; 3700 3701 cmd = &desc.params.add_txqs; 3702 3703 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 3704 3705 if (!qg_list) 3706 return ICE_ERR_PARAM; 3707 3708 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 3709 return ICE_ERR_PARAM; 3710 3711 for (i = 0, list = qg_list; i < num_qgrps; i++) { 3712 sum_size += struct_size(list, txqs, list->num_txqs); 3713 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 3714 list->num_txqs); 3715 } 3716 3717 if (buf_size != sum_size) 3718 return ICE_ERR_PARAM; 3719 3720 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3721 3722 cmd->num_qgrps = num_qgrps; 3723 3724 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 3725 } 3726 3727 /** 3728 * ice_aq_dis_lan_txq 3729 * @hw: pointer to the hardware structure 3730 * @num_qgrps: number of groups in the list 3731 * @qg_list: the list of groups to disable 3732 * @buf_size: the total size of the qg_list buffer in bytes 3733 * @rst_src: if called due to reset, specifies the reset source 3734 * @vmvf_num: the relative VM or VF number that is undergoing the reset 3735 * @cd: pointer to command details structure or NULL 3736 * 3737 * Disable LAN Tx queue (0x0C31) 3738 */ 3739 static enum ice_status 3740 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 3741 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 3742 enum ice_disq_rst_src rst_src, u16 vmvf_num, 3743 struct ice_sq_cd *cd) 3744 { 3745 struct ice_aqc_dis_txq_item *item; 3746 struct ice_aqc_dis_txqs *cmd; 3747 struct ice_aq_desc desc; 3748 enum ice_status status; 3749 u16 i, sz = 0; 3750 3751 cmd = &desc.params.dis_txqs; 3752 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 3753 3754 /* qg_list can be NULL only in VM/VF reset flow */ 3755 if (!qg_list && !rst_src) 3756 return ICE_ERR_PARAM; 3757 3758 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 3759 return ICE_ERR_PARAM; 3760 3761 cmd->num_entries = num_qgrps; 3762 3763 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 3764 ICE_AQC_Q_DIS_TIMEOUT_M); 3765 3766 switch (rst_src) { 3767 case ICE_VM_RESET: 3768 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 3769 cmd->vmvf_and_timeout |= 3770 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 3771 break; 3772 case ICE_VF_RESET: 3773 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 3774 /* In this case, FW expects vmvf_num to be absolute VF ID */ 3775 cmd->vmvf_and_timeout |= 3776 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 3777 ICE_AQC_Q_DIS_VMVF_NUM_M); 3778 break; 3779 case ICE_NO_RESET: 3780 default: 3781 break; 3782 } 3783 3784 /* flush pipe on time out */ 3785 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 3786 /* If no queue group info, we are in a reset flow. Issue the AQ */ 3787 if (!qg_list) 3788 goto do_aq; 3789 3790 /* set RD bit to indicate that command buffer is provided by the driver 3791 * and it needs to be read by the firmware 3792 */ 3793 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3794 3795 for (i = 0, item = qg_list; i < num_qgrps; i++) { 3796 u16 item_size = struct_size(item, q_id, item->num_qs); 3797 3798 /* If the num of queues is even, add 2 bytes of padding */ 3799 if ((item->num_qs % 2) == 0) 3800 item_size += 2; 3801 3802 sz += item_size; 3803 3804 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 3805 } 3806 3807 if (buf_size != sz) 3808 return ICE_ERR_PARAM; 3809 3810 do_aq: 3811 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 3812 if (status) { 3813 if (!qg_list) 3814 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 3815 vmvf_num, hw->adminq.sq_last_status); 3816 else 3817 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 3818 le16_to_cpu(qg_list[0].q_id[0]), 3819 hw->adminq.sq_last_status); 3820 } 3821 return status; 3822 } 3823 3824 /** 3825 * ice_aq_add_rdma_qsets 3826 * @hw: pointer to the hardware structure 3827 * @num_qset_grps: Number of RDMA Qset groups 3828 * @qset_list: list of Qset groups to be added 3829 * @buf_size: size of buffer for indirect command 3830 * @cd: pointer to command details structure or NULL 3831 * 3832 * Add Tx RDMA Qsets (0x0C33) 3833 */ 3834 static int 3835 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 3836 struct ice_aqc_add_rdma_qset_data *qset_list, 3837 u16 buf_size, struct ice_sq_cd *cd) 3838 { 3839 struct ice_aqc_add_rdma_qset_data *list; 3840 struct ice_aqc_add_rdma_qset *cmd; 3841 struct ice_aq_desc desc; 3842 u16 i, sum_size = 0; 3843 3844 cmd = &desc.params.add_rdma_qset; 3845 3846 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 3847 3848 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 3849 return -EINVAL; 3850 3851 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 3852 u16 num_qsets = le16_to_cpu(list->num_qsets); 3853 3854 sum_size += struct_size(list, rdma_qsets, num_qsets); 3855 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 3856 num_qsets); 3857 } 3858 3859 if (buf_size != sum_size) 3860 return -EINVAL; 3861 3862 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3863 3864 cmd->num_qset_grps = num_qset_grps; 3865 3866 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list, 3867 buf_size, cd)); 3868 } 3869 3870 /* End of FW Admin Queue command wrappers */ 3871 3872 /** 3873 * ice_write_byte - write a byte to a packed context structure 3874 * @src_ctx: the context structure to read from 3875 * @dest_ctx: the context to be written to 3876 * @ce_info: a description of the struct to be filled 3877 */ 3878 static void 3879 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3880 { 3881 u8 src_byte, dest_byte, mask; 3882 u8 *from, *dest; 3883 u16 shift_width; 3884 3885 /* copy from the next struct field */ 3886 from = src_ctx + ce_info->offset; 3887 3888 /* prepare the bits and mask */ 3889 shift_width = ce_info->lsb % 8; 3890 mask = (u8)(BIT(ce_info->width) - 1); 3891 3892 src_byte = *from; 3893 src_byte &= mask; 3894 3895 /* shift to correct alignment */ 3896 mask <<= shift_width; 3897 src_byte <<= shift_width; 3898 3899 /* get the current bits from the target bit string */ 3900 dest = dest_ctx + (ce_info->lsb / 8); 3901 3902 memcpy(&dest_byte, dest, sizeof(dest_byte)); 3903 3904 dest_byte &= ~mask; /* get the bits not changing */ 3905 dest_byte |= src_byte; /* add in the new bits */ 3906 3907 /* put it all back */ 3908 memcpy(dest, &dest_byte, sizeof(dest_byte)); 3909 } 3910 3911 /** 3912 * ice_write_word - write a word to a packed context structure 3913 * @src_ctx: the context structure to read from 3914 * @dest_ctx: the context to be written to 3915 * @ce_info: a description of the struct to be filled 3916 */ 3917 static void 3918 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3919 { 3920 u16 src_word, mask; 3921 __le16 dest_word; 3922 u8 *from, *dest; 3923 u16 shift_width; 3924 3925 /* copy from the next struct field */ 3926 from = src_ctx + ce_info->offset; 3927 3928 /* prepare the bits and mask */ 3929 shift_width = ce_info->lsb % 8; 3930 mask = BIT(ce_info->width) - 1; 3931 3932 /* don't swizzle the bits until after the mask because the mask bits 3933 * will be in a different bit position on big endian machines 3934 */ 3935 src_word = *(u16 *)from; 3936 src_word &= mask; 3937 3938 /* shift to correct alignment */ 3939 mask <<= shift_width; 3940 src_word <<= shift_width; 3941 3942 /* get the current bits from the target bit string */ 3943 dest = dest_ctx + (ce_info->lsb / 8); 3944 3945 memcpy(&dest_word, dest, sizeof(dest_word)); 3946 3947 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 3948 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 3949 3950 /* put it all back */ 3951 memcpy(dest, &dest_word, sizeof(dest_word)); 3952 } 3953 3954 /** 3955 * ice_write_dword - write a dword to a packed context structure 3956 * @src_ctx: the context structure to read from 3957 * @dest_ctx: the context to be written to 3958 * @ce_info: a description of the struct to be filled 3959 */ 3960 static void 3961 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3962 { 3963 u32 src_dword, mask; 3964 __le32 dest_dword; 3965 u8 *from, *dest; 3966 u16 shift_width; 3967 3968 /* copy from the next struct field */ 3969 from = src_ctx + ce_info->offset; 3970 3971 /* prepare the bits and mask */ 3972 shift_width = ce_info->lsb % 8; 3973 3974 /* if the field width is exactly 32 on an x86 machine, then the shift 3975 * operation will not work because the SHL instructions count is masked 3976 * to 5 bits so the shift will do nothing 3977 */ 3978 if (ce_info->width < 32) 3979 mask = BIT(ce_info->width) - 1; 3980 else 3981 mask = (u32)~0; 3982 3983 /* don't swizzle the bits until after the mask because the mask bits 3984 * will be in a different bit position on big endian machines 3985 */ 3986 src_dword = *(u32 *)from; 3987 src_dword &= mask; 3988 3989 /* shift to correct alignment */ 3990 mask <<= shift_width; 3991 src_dword <<= shift_width; 3992 3993 /* get the current bits from the target bit string */ 3994 dest = dest_ctx + (ce_info->lsb / 8); 3995 3996 memcpy(&dest_dword, dest, sizeof(dest_dword)); 3997 3998 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 3999 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4000 4001 /* put it all back */ 4002 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4003 } 4004 4005 /** 4006 * ice_write_qword - write a qword to a packed context structure 4007 * @src_ctx: the context structure to read from 4008 * @dest_ctx: the context to be written to 4009 * @ce_info: a description of the struct to be filled 4010 */ 4011 static void 4012 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4013 { 4014 u64 src_qword, mask; 4015 __le64 dest_qword; 4016 u8 *from, *dest; 4017 u16 shift_width; 4018 4019 /* copy from the next struct field */ 4020 from = src_ctx + ce_info->offset; 4021 4022 /* prepare the bits and mask */ 4023 shift_width = ce_info->lsb % 8; 4024 4025 /* if the field width is exactly 64 on an x86 machine, then the shift 4026 * operation will not work because the SHL instructions count is masked 4027 * to 6 bits so the shift will do nothing 4028 */ 4029 if (ce_info->width < 64) 4030 mask = BIT_ULL(ce_info->width) - 1; 4031 else 4032 mask = (u64)~0; 4033 4034 /* don't swizzle the bits until after the mask because the mask bits 4035 * will be in a different bit position on big endian machines 4036 */ 4037 src_qword = *(u64 *)from; 4038 src_qword &= mask; 4039 4040 /* shift to correct alignment */ 4041 mask <<= shift_width; 4042 src_qword <<= shift_width; 4043 4044 /* get the current bits from the target bit string */ 4045 dest = dest_ctx + (ce_info->lsb / 8); 4046 4047 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4048 4049 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4050 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4051 4052 /* put it all back */ 4053 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4054 } 4055 4056 /** 4057 * ice_set_ctx - set context bits in packed structure 4058 * @hw: pointer to the hardware structure 4059 * @src_ctx: pointer to a generic non-packed context structure 4060 * @dest_ctx: pointer to memory for the packed structure 4061 * @ce_info: a description of the structure to be transformed 4062 */ 4063 enum ice_status 4064 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4065 const struct ice_ctx_ele *ce_info) 4066 { 4067 int f; 4068 4069 for (f = 0; ce_info[f].width; f++) { 4070 /* We have to deal with each element of the FW response 4071 * using the correct size so that we are correct regardless 4072 * of the endianness of the machine. 4073 */ 4074 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4075 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4076 f, ce_info[f].width, ce_info[f].size_of); 4077 continue; 4078 } 4079 switch (ce_info[f].size_of) { 4080 case sizeof(u8): 4081 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4082 break; 4083 case sizeof(u16): 4084 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4085 break; 4086 case sizeof(u32): 4087 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4088 break; 4089 case sizeof(u64): 4090 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4091 break; 4092 default: 4093 return ICE_ERR_INVAL_SIZE; 4094 } 4095 } 4096 4097 return 0; 4098 } 4099 4100 /** 4101 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4102 * @hw: pointer to the HW struct 4103 * @vsi_handle: software VSI handle 4104 * @tc: TC number 4105 * @q_handle: software queue handle 4106 */ 4107 struct ice_q_ctx * 4108 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4109 { 4110 struct ice_vsi_ctx *vsi; 4111 struct ice_q_ctx *q_ctx; 4112 4113 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4114 if (!vsi) 4115 return NULL; 4116 if (q_handle >= vsi->num_lan_q_entries[tc]) 4117 return NULL; 4118 if (!vsi->lan_q_ctx[tc]) 4119 return NULL; 4120 q_ctx = vsi->lan_q_ctx[tc]; 4121 return &q_ctx[q_handle]; 4122 } 4123 4124 /** 4125 * ice_ena_vsi_txq 4126 * @pi: port information structure 4127 * @vsi_handle: software VSI handle 4128 * @tc: TC number 4129 * @q_handle: software queue handle 4130 * @num_qgrps: Number of added queue groups 4131 * @buf: list of queue groups to be added 4132 * @buf_size: size of buffer for indirect command 4133 * @cd: pointer to command details structure or NULL 4134 * 4135 * This function adds one LAN queue 4136 */ 4137 enum ice_status 4138 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4139 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4140 struct ice_sq_cd *cd) 4141 { 4142 struct ice_aqc_txsched_elem_data node = { 0 }; 4143 struct ice_sched_node *parent; 4144 struct ice_q_ctx *q_ctx; 4145 enum ice_status status; 4146 struct ice_hw *hw; 4147 4148 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4149 return ICE_ERR_CFG; 4150 4151 if (num_qgrps > 1 || buf->num_txqs > 1) 4152 return ICE_ERR_MAX_LIMIT; 4153 4154 hw = pi->hw; 4155 4156 if (!ice_is_vsi_valid(hw, vsi_handle)) 4157 return ICE_ERR_PARAM; 4158 4159 mutex_lock(&pi->sched_lock); 4160 4161 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4162 if (!q_ctx) { 4163 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4164 q_handle); 4165 status = ICE_ERR_PARAM; 4166 goto ena_txq_exit; 4167 } 4168 4169 /* find a parent node */ 4170 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4171 ICE_SCHED_NODE_OWNER_LAN); 4172 if (!parent) { 4173 status = ICE_ERR_PARAM; 4174 goto ena_txq_exit; 4175 } 4176 4177 buf->parent_teid = parent->info.node_teid; 4178 node.parent_teid = parent->info.node_teid; 4179 /* Mark that the values in the "generic" section as valid. The default 4180 * value in the "generic" section is zero. This means that : 4181 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4182 * - 0 priority among siblings, indicated by Bit 1-3. 4183 * - WFQ, indicated by Bit 4. 4184 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4185 * Bit 5-6. 4186 * - Bit 7 is reserved. 4187 * Without setting the generic section as valid in valid_sections, the 4188 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4189 */ 4190 buf->txqs[0].info.valid_sections = 4191 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4192 ICE_AQC_ELEM_VALID_EIR; 4193 buf->txqs[0].info.generic = 0; 4194 buf->txqs[0].info.cir_bw.bw_profile_idx = 4195 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4196 buf->txqs[0].info.cir_bw.bw_alloc = 4197 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4198 buf->txqs[0].info.eir_bw.bw_profile_idx = 4199 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4200 buf->txqs[0].info.eir_bw.bw_alloc = 4201 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4202 4203 /* add the LAN queue */ 4204 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4205 if (status) { 4206 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4207 le16_to_cpu(buf->txqs[0].txq_id), 4208 hw->adminq.sq_last_status); 4209 goto ena_txq_exit; 4210 } 4211 4212 node.node_teid = buf->txqs[0].q_teid; 4213 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4214 q_ctx->q_handle = q_handle; 4215 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4216 4217 /* add a leaf node into scheduler tree queue layer */ 4218 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 4219 if (!status) 4220 status = ice_sched_replay_q_bw(pi, q_ctx); 4221 4222 ena_txq_exit: 4223 mutex_unlock(&pi->sched_lock); 4224 return status; 4225 } 4226 4227 /** 4228 * ice_dis_vsi_txq 4229 * @pi: port information structure 4230 * @vsi_handle: software VSI handle 4231 * @tc: TC number 4232 * @num_queues: number of queues 4233 * @q_handles: pointer to software queue handle array 4234 * @q_ids: pointer to the q_id array 4235 * @q_teids: pointer to queue node teids 4236 * @rst_src: if called due to reset, specifies the reset source 4237 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4238 * @cd: pointer to command details structure or NULL 4239 * 4240 * This function removes queues and their corresponding nodes in SW DB 4241 */ 4242 enum ice_status 4243 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4244 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4245 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4246 struct ice_sq_cd *cd) 4247 { 4248 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 4249 struct ice_aqc_dis_txq_item *qg_list; 4250 struct ice_q_ctx *q_ctx; 4251 struct ice_hw *hw; 4252 u16 i, buf_size; 4253 4254 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4255 return ICE_ERR_CFG; 4256 4257 hw = pi->hw; 4258 4259 if (!num_queues) { 4260 /* if queue is disabled already yet the disable queue command 4261 * has to be sent to complete the VF reset, then call 4262 * ice_aq_dis_lan_txq without any queue information 4263 */ 4264 if (rst_src) 4265 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4266 vmvf_num, NULL); 4267 return ICE_ERR_CFG; 4268 } 4269 4270 buf_size = struct_size(qg_list, q_id, 1); 4271 qg_list = kzalloc(buf_size, GFP_KERNEL); 4272 if (!qg_list) 4273 return ICE_ERR_NO_MEMORY; 4274 4275 mutex_lock(&pi->sched_lock); 4276 4277 for (i = 0; i < num_queues; i++) { 4278 struct ice_sched_node *node; 4279 4280 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4281 if (!node) 4282 continue; 4283 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4284 if (!q_ctx) { 4285 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4286 q_handles[i]); 4287 continue; 4288 } 4289 if (q_ctx->q_handle != q_handles[i]) { 4290 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4291 q_ctx->q_handle, q_handles[i]); 4292 continue; 4293 } 4294 qg_list->parent_teid = node->info.parent_teid; 4295 qg_list->num_qs = 1; 4296 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4297 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4298 vmvf_num, cd); 4299 4300 if (status) 4301 break; 4302 ice_free_sched_node(pi, node); 4303 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4304 } 4305 mutex_unlock(&pi->sched_lock); 4306 kfree(qg_list); 4307 return status; 4308 } 4309 4310 /** 4311 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4312 * @pi: port information structure 4313 * @vsi_handle: software VSI handle 4314 * @tc_bitmap: TC bitmap 4315 * @maxqs: max queues array per TC 4316 * @owner: LAN or RDMA 4317 * 4318 * This function adds/updates the VSI queues per TC. 4319 */ 4320 static enum ice_status 4321 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4322 u16 *maxqs, u8 owner) 4323 { 4324 enum ice_status status = 0; 4325 u8 i; 4326 4327 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4328 return ICE_ERR_CFG; 4329 4330 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4331 return ICE_ERR_PARAM; 4332 4333 mutex_lock(&pi->sched_lock); 4334 4335 ice_for_each_traffic_class(i) { 4336 /* configuration is possible only if TC node is present */ 4337 if (!ice_sched_get_tc_node(pi, i)) 4338 continue; 4339 4340 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4341 ice_is_tc_ena(tc_bitmap, i)); 4342 if (status) 4343 break; 4344 } 4345 4346 mutex_unlock(&pi->sched_lock); 4347 return status; 4348 } 4349 4350 /** 4351 * ice_cfg_vsi_lan - configure VSI LAN queues 4352 * @pi: port information structure 4353 * @vsi_handle: software VSI handle 4354 * @tc_bitmap: TC bitmap 4355 * @max_lanqs: max LAN queues array per TC 4356 * 4357 * This function adds/updates the VSI LAN queues per TC. 4358 */ 4359 enum ice_status 4360 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4361 u16 *max_lanqs) 4362 { 4363 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4364 ICE_SCHED_NODE_OWNER_LAN); 4365 } 4366 4367 /** 4368 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4369 * @pi: port information structure 4370 * @vsi_handle: software VSI handle 4371 * @tc_bitmap: TC bitmap 4372 * @max_rdmaqs: max RDMA queues array per TC 4373 * 4374 * This function adds/updates the VSI RDMA queues per TC. 4375 */ 4376 int 4377 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4378 u16 *max_rdmaqs) 4379 { 4380 return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, 4381 max_rdmaqs, 4382 ICE_SCHED_NODE_OWNER_RDMA)); 4383 } 4384 4385 /** 4386 * ice_ena_vsi_rdma_qset 4387 * @pi: port information structure 4388 * @vsi_handle: software VSI handle 4389 * @tc: TC number 4390 * @rdma_qset: pointer to RDMA Qset 4391 * @num_qsets: number of RDMA Qsets 4392 * @qset_teid: pointer to Qset node TEIDs 4393 * 4394 * This function adds RDMA Qset 4395 */ 4396 int 4397 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4398 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4399 { 4400 struct ice_aqc_txsched_elem_data node = { 0 }; 4401 struct ice_aqc_add_rdma_qset_data *buf; 4402 struct ice_sched_node *parent; 4403 enum ice_status status; 4404 struct ice_hw *hw; 4405 u16 i, buf_size; 4406 int ret; 4407 4408 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4409 return -EIO; 4410 hw = pi->hw; 4411 4412 if (!ice_is_vsi_valid(hw, vsi_handle)) 4413 return -EINVAL; 4414 4415 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4416 buf = kzalloc(buf_size, GFP_KERNEL); 4417 if (!buf) 4418 return -ENOMEM; 4419 mutex_lock(&pi->sched_lock); 4420 4421 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4422 ICE_SCHED_NODE_OWNER_RDMA); 4423 if (!parent) { 4424 ret = -EINVAL; 4425 goto rdma_error_exit; 4426 } 4427 buf->parent_teid = parent->info.node_teid; 4428 node.parent_teid = parent->info.node_teid; 4429 4430 buf->num_qsets = cpu_to_le16(num_qsets); 4431 for (i = 0; i < num_qsets; i++) { 4432 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4433 buf->rdma_qsets[i].info.valid_sections = 4434 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4435 ICE_AQC_ELEM_VALID_EIR; 4436 buf->rdma_qsets[i].info.generic = 0; 4437 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4438 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4439 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4440 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4441 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4442 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4443 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4444 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4445 } 4446 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4447 if (ret) { 4448 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4449 goto rdma_error_exit; 4450 } 4451 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4452 for (i = 0; i < num_qsets; i++) { 4453 node.node_teid = buf->rdma_qsets[i].qset_teid; 4454 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4455 &node); 4456 if (status) { 4457 ret = ice_status_to_errno(status); 4458 break; 4459 } 4460 qset_teid[i] = le32_to_cpu(node.node_teid); 4461 } 4462 rdma_error_exit: 4463 mutex_unlock(&pi->sched_lock); 4464 kfree(buf); 4465 return ret; 4466 } 4467 4468 /** 4469 * ice_dis_vsi_rdma_qset - free RDMA resources 4470 * @pi: port_info struct 4471 * @count: number of RDMA Qsets to free 4472 * @qset_teid: TEID of Qset node 4473 * @q_id: list of queue IDs being disabled 4474 */ 4475 int 4476 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4477 u16 *q_id) 4478 { 4479 struct ice_aqc_dis_txq_item *qg_list; 4480 enum ice_status status = 0; 4481 struct ice_hw *hw; 4482 u16 qg_size; 4483 int i; 4484 4485 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4486 return -EIO; 4487 4488 hw = pi->hw; 4489 4490 qg_size = struct_size(qg_list, q_id, 1); 4491 qg_list = kzalloc(qg_size, GFP_KERNEL); 4492 if (!qg_list) 4493 return -ENOMEM; 4494 4495 mutex_lock(&pi->sched_lock); 4496 4497 for (i = 0; i < count; i++) { 4498 struct ice_sched_node *node; 4499 4500 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4501 if (!node) 4502 continue; 4503 4504 qg_list->parent_teid = node->info.parent_teid; 4505 qg_list->num_qs = 1; 4506 qg_list->q_id[0] = 4507 cpu_to_le16(q_id[i] | 4508 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4509 4510 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4511 ICE_NO_RESET, 0, NULL); 4512 if (status) 4513 break; 4514 4515 ice_free_sched_node(pi, node); 4516 } 4517 4518 mutex_unlock(&pi->sched_lock); 4519 kfree(qg_list); 4520 return ice_status_to_errno(status); 4521 } 4522 4523 /** 4524 * ice_replay_pre_init - replay pre initialization 4525 * @hw: pointer to the HW struct 4526 * 4527 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 4528 */ 4529 static enum ice_status ice_replay_pre_init(struct ice_hw *hw) 4530 { 4531 struct ice_switch_info *sw = hw->switch_info; 4532 u8 i; 4533 4534 /* Delete old entries from replay filter list head if there is any */ 4535 ice_rm_all_sw_replay_rule_info(hw); 4536 /* In start of replay, move entries into replay_rules list, it 4537 * will allow adding rules entries back to filt_rules list, 4538 * which is operational list. 4539 */ 4540 for (i = 0; i < ICE_SW_LKUP_LAST; i++) 4541 list_replace_init(&sw->recp_list[i].filt_rules, 4542 &sw->recp_list[i].filt_replay_rules); 4543 ice_sched_replay_agg_vsi_preinit(hw); 4544 4545 return 0; 4546 } 4547 4548 /** 4549 * ice_replay_vsi - replay VSI configuration 4550 * @hw: pointer to the HW struct 4551 * @vsi_handle: driver VSI handle 4552 * 4553 * Restore all VSI configuration after reset. It is required to call this 4554 * function with main VSI first. 4555 */ 4556 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 4557 { 4558 enum ice_status status; 4559 4560 if (!ice_is_vsi_valid(hw, vsi_handle)) 4561 return ICE_ERR_PARAM; 4562 4563 /* Replay pre-initialization if there is any */ 4564 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 4565 status = ice_replay_pre_init(hw); 4566 if (status) 4567 return status; 4568 } 4569 /* Replay per VSI all RSS configurations */ 4570 status = ice_replay_rss_cfg(hw, vsi_handle); 4571 if (status) 4572 return status; 4573 /* Replay per VSI all filters */ 4574 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 4575 if (!status) 4576 status = ice_replay_vsi_agg(hw, vsi_handle); 4577 return status; 4578 } 4579 4580 /** 4581 * ice_replay_post - post replay configuration cleanup 4582 * @hw: pointer to the HW struct 4583 * 4584 * Post replay cleanup. 4585 */ 4586 void ice_replay_post(struct ice_hw *hw) 4587 { 4588 /* Delete old entries from replay filter list head */ 4589 ice_rm_all_sw_replay_rule_info(hw); 4590 ice_sched_replay_agg(hw); 4591 } 4592 4593 /** 4594 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 4595 * @hw: ptr to the hardware info 4596 * @reg: offset of 64 bit HW register to read from 4597 * @prev_stat_loaded: bool to specify if previous stats are loaded 4598 * @prev_stat: ptr to previous loaded stat value 4599 * @cur_stat: ptr to current stat value 4600 */ 4601 void 4602 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 4603 u64 *prev_stat, u64 *cur_stat) 4604 { 4605 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 4606 4607 /* device stats are not reset at PFR, they likely will not be zeroed 4608 * when the driver starts. Thus, save the value from the first read 4609 * without adding to the statistic value so that we report stats which 4610 * count up from zero. 4611 */ 4612 if (!prev_stat_loaded) { 4613 *prev_stat = new_data; 4614 return; 4615 } 4616 4617 /* Calculate the difference between the new and old values, and then 4618 * add it to the software stat value. 4619 */ 4620 if (new_data >= *prev_stat) 4621 *cur_stat += new_data - *prev_stat; 4622 else 4623 /* to manage the potential roll-over */ 4624 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 4625 4626 /* Update the previously stored value to prepare for next read */ 4627 *prev_stat = new_data; 4628 } 4629 4630 /** 4631 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 4632 * @hw: ptr to the hardware info 4633 * @reg: offset of HW register to read from 4634 * @prev_stat_loaded: bool to specify if previous stats are loaded 4635 * @prev_stat: ptr to previous loaded stat value 4636 * @cur_stat: ptr to current stat value 4637 */ 4638 void 4639 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 4640 u64 *prev_stat, u64 *cur_stat) 4641 { 4642 u32 new_data; 4643 4644 new_data = rd32(hw, reg); 4645 4646 /* device stats are not reset at PFR, they likely will not be zeroed 4647 * when the driver starts. Thus, save the value from the first read 4648 * without adding to the statistic value so that we report stats which 4649 * count up from zero. 4650 */ 4651 if (!prev_stat_loaded) { 4652 *prev_stat = new_data; 4653 return; 4654 } 4655 4656 /* Calculate the difference between the new and old values, and then 4657 * add it to the software stat value. 4658 */ 4659 if (new_data >= *prev_stat) 4660 *cur_stat += new_data - *prev_stat; 4661 else 4662 /* to manage the potential roll-over */ 4663 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 4664 4665 /* Update the previously stored value to prepare for next read */ 4666 *prev_stat = new_data; 4667 } 4668 4669 /** 4670 * ice_sched_query_elem - query element information from HW 4671 * @hw: pointer to the HW struct 4672 * @node_teid: node TEID to be queried 4673 * @buf: buffer to element information 4674 * 4675 * This function queries HW element information 4676 */ 4677 enum ice_status 4678 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 4679 struct ice_aqc_txsched_elem_data *buf) 4680 { 4681 u16 buf_size, num_elem_ret = 0; 4682 enum ice_status status; 4683 4684 buf_size = sizeof(*buf); 4685 memset(buf, 0, buf_size); 4686 buf->node_teid = cpu_to_le32(node_teid); 4687 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 4688 NULL); 4689 if (status || num_elem_ret != 1) 4690 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 4691 return status; 4692 } 4693 4694 /** 4695 * ice_aq_set_driver_param - Set driver parameter to share via firmware 4696 * @hw: pointer to the HW struct 4697 * @idx: parameter index to set 4698 * @value: the value to set the parameter to 4699 * @cd: pointer to command details structure or NULL 4700 * 4701 * Set the value of one of the software defined parameters. All PFs connected 4702 * to this device can read the value using ice_aq_get_driver_param. 4703 * 4704 * Note that firmware provides no synchronization or locking, and will not 4705 * save the parameter value during a device reset. It is expected that 4706 * a single PF will write the parameter value, while all other PFs will only 4707 * read it. 4708 */ 4709 int 4710 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 4711 u32 value, struct ice_sq_cd *cd) 4712 { 4713 struct ice_aqc_driver_shared_params *cmd; 4714 struct ice_aq_desc desc; 4715 4716 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 4717 return -EIO; 4718 4719 cmd = &desc.params.drv_shared_params; 4720 4721 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 4722 4723 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET; 4724 cmd->param_indx = idx; 4725 cmd->param_val = cpu_to_le32(value); 4726 4727 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); 4728 } 4729 4730 /** 4731 * ice_aq_get_driver_param - Get driver parameter shared via firmware 4732 * @hw: pointer to the HW struct 4733 * @idx: parameter index to set 4734 * @value: storage to return the shared parameter 4735 * @cd: pointer to command details structure or NULL 4736 * 4737 * Get the value of one of the software defined parameters. 4738 * 4739 * Note that firmware provides no synchronization or locking. It is expected 4740 * that only a single PF will write a given parameter. 4741 */ 4742 int 4743 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 4744 u32 *value, struct ice_sq_cd *cd) 4745 { 4746 struct ice_aqc_driver_shared_params *cmd; 4747 struct ice_aq_desc desc; 4748 enum ice_status status; 4749 4750 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 4751 return -EIO; 4752 4753 cmd = &desc.params.drv_shared_params; 4754 4755 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 4756 4757 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET; 4758 cmd->param_indx = idx; 4759 4760 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4761 if (status) 4762 return ice_status_to_errno(status); 4763 4764 *value = le32_to_cpu(cmd->param_val); 4765 4766 return 0; 4767 } 4768 4769 /** 4770 * ice_fw_supports_link_override 4771 * @hw: pointer to the hardware structure 4772 * 4773 * Checks if the firmware supports link override 4774 */ 4775 bool ice_fw_supports_link_override(struct ice_hw *hw) 4776 { 4777 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { 4778 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) 4779 return true; 4780 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && 4781 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) 4782 return true; 4783 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { 4784 return true; 4785 } 4786 4787 return false; 4788 } 4789 4790 /** 4791 * ice_get_link_default_override 4792 * @ldo: pointer to the link default override struct 4793 * @pi: pointer to the port info struct 4794 * 4795 * Gets the link default override for a port 4796 */ 4797 enum ice_status 4798 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 4799 struct ice_port_info *pi) 4800 { 4801 u16 i, tlv, tlv_len, tlv_start, buf, offset; 4802 struct ice_hw *hw = pi->hw; 4803 enum ice_status status; 4804 4805 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 4806 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 4807 if (status) { 4808 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 4809 return status; 4810 } 4811 4812 /* Each port has its own config; calculate for our port */ 4813 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 4814 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 4815 4816 /* link options first */ 4817 status = ice_read_sr_word(hw, tlv_start, &buf); 4818 if (status) { 4819 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 4820 return status; 4821 } 4822 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 4823 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 4824 ICE_LINK_OVERRIDE_PHY_CFG_S; 4825 4826 /* link PHY config */ 4827 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 4828 status = ice_read_sr_word(hw, offset, &buf); 4829 if (status) { 4830 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 4831 return status; 4832 } 4833 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 4834 4835 /* PHY types low */ 4836 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 4837 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 4838 status = ice_read_sr_word(hw, (offset + i), &buf); 4839 if (status) { 4840 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 4841 return status; 4842 } 4843 /* shift 16 bits at a time to fill 64 bits */ 4844 ldo->phy_type_low |= ((u64)buf << (i * 16)); 4845 } 4846 4847 /* PHY types high */ 4848 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 4849 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 4850 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 4851 status = ice_read_sr_word(hw, (offset + i), &buf); 4852 if (status) { 4853 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 4854 return status; 4855 } 4856 /* shift 16 bits at a time to fill 64 bits */ 4857 ldo->phy_type_high |= ((u64)buf << (i * 16)); 4858 } 4859 4860 return status; 4861 } 4862 4863 /** 4864 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 4865 * @caps: get PHY capability data 4866 */ 4867 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 4868 { 4869 if (caps->caps & ICE_AQC_PHY_AN_MODE || 4870 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 4871 ICE_AQC_PHY_AN_EN_CLAUSE73 | 4872 ICE_AQC_PHY_AN_EN_CLAUSE37)) 4873 return true; 4874 4875 return false; 4876 } 4877 4878 /** 4879 * ice_aq_set_lldp_mib - Set the LLDP MIB 4880 * @hw: pointer to the HW struct 4881 * @mib_type: Local, Remote or both Local and Remote MIBs 4882 * @buf: pointer to the caller-supplied buffer to store the MIB block 4883 * @buf_size: size of the buffer (in bytes) 4884 * @cd: pointer to command details structure or NULL 4885 * 4886 * Set the LLDP MIB. (0x0A08) 4887 */ 4888 enum ice_status 4889 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 4890 struct ice_sq_cd *cd) 4891 { 4892 struct ice_aqc_lldp_set_local_mib *cmd; 4893 struct ice_aq_desc desc; 4894 4895 cmd = &desc.params.lldp_set_mib; 4896 4897 if (buf_size == 0 || !buf) 4898 return ICE_ERR_PARAM; 4899 4900 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 4901 4902 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 4903 desc.datalen = cpu_to_le16(buf_size); 4904 4905 cmd->type = mib_type; 4906 cmd->length = cpu_to_le16(buf_size); 4907 4908 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4909 } 4910 4911 /** 4912 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 4913 * @hw: pointer to HW struct 4914 */ 4915 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 4916 { 4917 if (hw->mac_type != ICE_MAC_E810) 4918 return false; 4919 4920 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) { 4921 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN) 4922 return true; 4923 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN && 4924 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH) 4925 return true; 4926 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) { 4927 return true; 4928 } 4929 return false; 4930 } 4931 4932 /** 4933 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 4934 * @hw: pointer to HW struct 4935 * @vsi_num: absolute HW index for VSI 4936 * @add: boolean for if adding or removing a filter 4937 */ 4938 enum ice_status 4939 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 4940 { 4941 struct ice_aqc_lldp_filter_ctrl *cmd; 4942 struct ice_aq_desc desc; 4943 4944 cmd = &desc.params.lldp_filter_ctrl; 4945 4946 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 4947 4948 if (add) 4949 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 4950 else 4951 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 4952 4953 cmd->vsi_num = cpu_to_le16(vsi_num); 4954 4955 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4956 } 4957 4958 /** 4959 * ice_fw_supports_report_dflt_cfg 4960 * @hw: pointer to the hardware structure 4961 * 4962 * Checks if the firmware supports report default configuration 4963 */ 4964 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 4965 { 4966 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { 4967 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) 4968 return true; 4969 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN && 4970 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH) 4971 return true; 4972 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) { 4973 return true; 4974 } 4975 return false; 4976 } 4977