1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 9 #define ICE_PF_RESET_WAIT_COUNT 300 10 11 /** 12 * ice_set_mac_type - Sets MAC type 13 * @hw: pointer to the HW structure 14 * 15 * This function sets the MAC type of the adapter based on the 16 * vendor ID and device ID stored in the HW structure. 17 */ 18 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 19 { 20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 21 return ICE_ERR_DEVICE_NOT_SUPPORTED; 22 23 switch (hw->device_id) { 24 case ICE_DEV_ID_E810C_BACKPLANE: 25 case ICE_DEV_ID_E810C_QSFP: 26 case ICE_DEV_ID_E810C_SFP: 27 case ICE_DEV_ID_E810_XXV_SFP: 28 hw->mac_type = ICE_MAC_E810; 29 break; 30 case ICE_DEV_ID_E823C_10G_BASE_T: 31 case ICE_DEV_ID_E823C_BACKPLANE: 32 case ICE_DEV_ID_E823C_QSFP: 33 case ICE_DEV_ID_E823C_SFP: 34 case ICE_DEV_ID_E823C_SGMII: 35 case ICE_DEV_ID_E822C_10G_BASE_T: 36 case ICE_DEV_ID_E822C_BACKPLANE: 37 case ICE_DEV_ID_E822C_QSFP: 38 case ICE_DEV_ID_E822C_SFP: 39 case ICE_DEV_ID_E822C_SGMII: 40 case ICE_DEV_ID_E822L_10G_BASE_T: 41 case ICE_DEV_ID_E822L_BACKPLANE: 42 case ICE_DEV_ID_E822L_SFP: 43 case ICE_DEV_ID_E822L_SGMII: 44 case ICE_DEV_ID_E823L_10G_BASE_T: 45 case ICE_DEV_ID_E823L_1GBE: 46 case ICE_DEV_ID_E823L_BACKPLANE: 47 case ICE_DEV_ID_E823L_QSFP: 48 case ICE_DEV_ID_E823L_SFP: 49 hw->mac_type = ICE_MAC_GENERIC; 50 break; 51 default: 52 hw->mac_type = ICE_MAC_UNKNOWN; 53 break; 54 } 55 56 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 57 return 0; 58 } 59 60 /** 61 * ice_clear_pf_cfg - Clear PF configuration 62 * @hw: pointer to the hardware structure 63 * 64 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 65 * configuration, flow director filters, etc.). 66 */ 67 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 68 { 69 struct ice_aq_desc desc; 70 71 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 72 73 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 74 } 75 76 /** 77 * ice_aq_manage_mac_read - manage MAC address read command 78 * @hw: pointer to the HW struct 79 * @buf: a virtual buffer to hold the manage MAC read response 80 * @buf_size: Size of the virtual buffer 81 * @cd: pointer to command details structure or NULL 82 * 83 * This function is used to return per PF station MAC address (0x0107). 84 * NOTE: Upon successful completion of this command, MAC address information 85 * is returned in user specified buffer. Please interpret user specified 86 * buffer as "manage_mac_read" response. 87 * Response such as various MAC addresses are stored in HW struct (port.mac) 88 * ice_discover_dev_caps is expected to be called before this function is 89 * called. 90 */ 91 static enum ice_status 92 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 93 struct ice_sq_cd *cd) 94 { 95 struct ice_aqc_manage_mac_read_resp *resp; 96 struct ice_aqc_manage_mac_read *cmd; 97 struct ice_aq_desc desc; 98 enum ice_status status; 99 u16 flags; 100 u8 i; 101 102 cmd = &desc.params.mac_read; 103 104 if (buf_size < sizeof(*resp)) 105 return ICE_ERR_BUF_TOO_SHORT; 106 107 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 108 109 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 110 if (status) 111 return status; 112 113 resp = (struct ice_aqc_manage_mac_read_resp *)buf; 114 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 115 116 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 117 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 118 return ICE_ERR_CFG; 119 } 120 121 /* A single port can report up to two (LAN and WoL) addresses */ 122 for (i = 0; i < cmd->num_addr; i++) 123 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 124 ether_addr_copy(hw->port_info->mac.lan_addr, 125 resp[i].mac_addr); 126 ether_addr_copy(hw->port_info->mac.perm_addr, 127 resp[i].mac_addr); 128 break; 129 } 130 131 return 0; 132 } 133 134 /** 135 * ice_aq_get_phy_caps - returns PHY capabilities 136 * @pi: port information structure 137 * @qual_mods: report qualified modules 138 * @report_mode: report mode capabilities 139 * @pcaps: structure for PHY capabilities to be filled 140 * @cd: pointer to command details structure or NULL 141 * 142 * Returns the various PHY capabilities supported on the Port (0x0600) 143 */ 144 enum ice_status 145 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 146 struct ice_aqc_get_phy_caps_data *pcaps, 147 struct ice_sq_cd *cd) 148 { 149 struct ice_aqc_get_phy_caps *cmd; 150 u16 pcaps_size = sizeof(*pcaps); 151 struct ice_aq_desc desc; 152 enum ice_status status; 153 struct ice_hw *hw; 154 155 cmd = &desc.params.get_phy; 156 157 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 158 return ICE_ERR_PARAM; 159 hw = pi->hw; 160 161 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 162 163 if (qual_mods) 164 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 165 166 cmd->param0 |= cpu_to_le16(report_mode); 167 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 168 169 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n", 170 report_mode); 171 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 172 (unsigned long long)le64_to_cpu(pcaps->phy_type_low)); 173 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 174 (unsigned long long)le64_to_cpu(pcaps->phy_type_high)); 175 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps); 176 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 177 pcaps->low_power_ctrl_an); 178 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap); 179 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", 180 pcaps->eeer_value); 181 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n", 182 pcaps->link_fec_options); 183 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n", 184 pcaps->module_compliance_enforcement); 185 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n", 186 pcaps->extended_compliance_code); 187 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n", 188 pcaps->module_type[0]); 189 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n", 190 pcaps->module_type[1]); 191 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", 192 pcaps->module_type[2]); 193 194 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { 195 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 196 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 197 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 198 sizeof(pi->phy.link_info.module_type)); 199 } 200 201 return status; 202 } 203 204 /** 205 * ice_aq_get_link_topo_handle - get link topology node return status 206 * @pi: port information structure 207 * @node_type: requested node type 208 * @cd: pointer to command details structure or NULL 209 * 210 * Get link topology node return status for specified node type (0x06E0) 211 * 212 * Node type cage can be used to determine if cage is present. If AQC 213 * returns error (ENOENT), then no cage present. If no cage present, then 214 * connection type is backplane or BASE-T. 215 */ 216 static enum ice_status 217 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 218 struct ice_sq_cd *cd) 219 { 220 struct ice_aqc_get_link_topo *cmd; 221 struct ice_aq_desc desc; 222 223 cmd = &desc.params.get_link_topo; 224 225 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 226 227 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 228 ICE_AQC_LINK_TOPO_NODE_CTX_S); 229 230 /* set node type */ 231 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 232 233 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 234 } 235 236 /** 237 * ice_is_media_cage_present 238 * @pi: port information structure 239 * 240 * Returns true if media cage is present, else false. If no cage, then 241 * media type is backplane or BASE-T. 242 */ 243 static bool ice_is_media_cage_present(struct ice_port_info *pi) 244 { 245 /* Node type cage can be used to determine if cage is present. If AQC 246 * returns error (ENOENT), then no cage present. If no cage present then 247 * connection type is backplane or BASE-T. 248 */ 249 return !ice_aq_get_link_topo_handle(pi, 250 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 251 NULL); 252 } 253 254 /** 255 * ice_get_media_type - Gets media type 256 * @pi: port information structure 257 */ 258 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 259 { 260 struct ice_link_status *hw_link_info; 261 262 if (!pi) 263 return ICE_MEDIA_UNKNOWN; 264 265 hw_link_info = &pi->phy.link_info; 266 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 267 /* If more than one media type is selected, report unknown */ 268 return ICE_MEDIA_UNKNOWN; 269 270 if (hw_link_info->phy_type_low) { 271 /* 1G SGMII is a special case where some DA cable PHYs 272 * may show this as an option when it really shouldn't 273 * be since SGMII is meant to be between a MAC and a PHY 274 * in a backplane. Try to detect this case and handle it 275 */ 276 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 277 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 278 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 279 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 280 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 281 return ICE_MEDIA_DA; 282 283 switch (hw_link_info->phy_type_low) { 284 case ICE_PHY_TYPE_LOW_1000BASE_SX: 285 case ICE_PHY_TYPE_LOW_1000BASE_LX: 286 case ICE_PHY_TYPE_LOW_10GBASE_SR: 287 case ICE_PHY_TYPE_LOW_10GBASE_LR: 288 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 289 case ICE_PHY_TYPE_LOW_25GBASE_SR: 290 case ICE_PHY_TYPE_LOW_25GBASE_LR: 291 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 292 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 293 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 294 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 295 case ICE_PHY_TYPE_LOW_50GBASE_SR: 296 case ICE_PHY_TYPE_LOW_50GBASE_FR: 297 case ICE_PHY_TYPE_LOW_50GBASE_LR: 298 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 299 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 300 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 301 case ICE_PHY_TYPE_LOW_100GBASE_DR: 302 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 303 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 304 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 305 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 306 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 307 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 308 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 309 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 310 return ICE_MEDIA_FIBER; 311 case ICE_PHY_TYPE_LOW_100BASE_TX: 312 case ICE_PHY_TYPE_LOW_1000BASE_T: 313 case ICE_PHY_TYPE_LOW_2500BASE_T: 314 case ICE_PHY_TYPE_LOW_5GBASE_T: 315 case ICE_PHY_TYPE_LOW_10GBASE_T: 316 case ICE_PHY_TYPE_LOW_25GBASE_T: 317 return ICE_MEDIA_BASET; 318 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 319 case ICE_PHY_TYPE_LOW_25GBASE_CR: 320 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 321 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 322 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 323 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 324 case ICE_PHY_TYPE_LOW_50GBASE_CP: 325 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 326 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 327 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 328 return ICE_MEDIA_DA; 329 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 330 case ICE_PHY_TYPE_LOW_40G_XLAUI: 331 case ICE_PHY_TYPE_LOW_50G_LAUI2: 332 case ICE_PHY_TYPE_LOW_50G_AUI2: 333 case ICE_PHY_TYPE_LOW_50G_AUI1: 334 case ICE_PHY_TYPE_LOW_100G_AUI4: 335 case ICE_PHY_TYPE_LOW_100G_CAUI4: 336 if (ice_is_media_cage_present(pi)) 337 return ICE_MEDIA_DA; 338 fallthrough; 339 case ICE_PHY_TYPE_LOW_1000BASE_KX: 340 case ICE_PHY_TYPE_LOW_2500BASE_KX: 341 case ICE_PHY_TYPE_LOW_2500BASE_X: 342 case ICE_PHY_TYPE_LOW_5GBASE_KR: 343 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 344 case ICE_PHY_TYPE_LOW_25GBASE_KR: 345 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 346 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 347 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 348 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 349 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 350 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 351 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 352 return ICE_MEDIA_BACKPLANE; 353 } 354 } else { 355 switch (hw_link_info->phy_type_high) { 356 case ICE_PHY_TYPE_HIGH_100G_AUI2: 357 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 358 if (ice_is_media_cage_present(pi)) 359 return ICE_MEDIA_DA; 360 fallthrough; 361 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 362 return ICE_MEDIA_BACKPLANE; 363 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 364 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 365 return ICE_MEDIA_FIBER; 366 } 367 } 368 return ICE_MEDIA_UNKNOWN; 369 } 370 371 /** 372 * ice_aq_get_link_info 373 * @pi: port information structure 374 * @ena_lse: enable/disable LinkStatusEvent reporting 375 * @link: pointer to link status structure - optional 376 * @cd: pointer to command details structure or NULL 377 * 378 * Get Link Status (0x607). Returns the link status of the adapter. 379 */ 380 enum ice_status 381 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 382 struct ice_link_status *link, struct ice_sq_cd *cd) 383 { 384 struct ice_aqc_get_link_status_data link_data = { 0 }; 385 struct ice_aqc_get_link_status *resp; 386 struct ice_link_status *li_old, *li; 387 enum ice_media_type *hw_media_type; 388 struct ice_fc_info *hw_fc_info; 389 bool tx_pause, rx_pause; 390 struct ice_aq_desc desc; 391 enum ice_status status; 392 struct ice_hw *hw; 393 u16 cmd_flags; 394 395 if (!pi) 396 return ICE_ERR_PARAM; 397 hw = pi->hw; 398 li_old = &pi->phy.link_info_old; 399 hw_media_type = &pi->phy.media_type; 400 li = &pi->phy.link_info; 401 hw_fc_info = &pi->fc; 402 403 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 404 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 405 resp = &desc.params.get_link_status; 406 resp->cmd_flags = cpu_to_le16(cmd_flags); 407 resp->lport_num = pi->lport; 408 409 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); 410 411 if (status) 412 return status; 413 414 /* save off old link status information */ 415 *li_old = *li; 416 417 /* update current link status information */ 418 li->link_speed = le16_to_cpu(link_data.link_speed); 419 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 420 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 421 *hw_media_type = ice_get_media_type(pi); 422 li->link_info = link_data.link_info; 423 li->an_info = link_data.an_info; 424 li->ext_info = link_data.ext_info; 425 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 426 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 427 li->topo_media_conflict = link_data.topo_media_conflict; 428 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 429 ICE_AQ_CFG_PACING_TYPE_M); 430 431 /* update fc info */ 432 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 433 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 434 if (tx_pause && rx_pause) 435 hw_fc_info->current_mode = ICE_FC_FULL; 436 else if (tx_pause) 437 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 438 else if (rx_pause) 439 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 440 else 441 hw_fc_info->current_mode = ICE_FC_NONE; 442 443 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 444 445 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 446 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 447 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 448 (unsigned long long)li->phy_type_low); 449 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 450 (unsigned long long)li->phy_type_high); 451 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 452 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 453 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 454 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 455 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 456 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 457 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 458 li->max_frame_size); 459 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 460 461 /* save link status information */ 462 if (link) 463 *link = *li; 464 465 /* flag cleared so calling functions don't call AQ again */ 466 pi->phy.get_link_info = false; 467 468 return 0; 469 } 470 471 /** 472 * ice_fill_tx_timer_and_fc_thresh 473 * @hw: pointer to the HW struct 474 * @cmd: pointer to MAC cfg structure 475 * 476 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 477 * descriptor 478 */ 479 static void 480 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 481 struct ice_aqc_set_mac_cfg *cmd) 482 { 483 u16 fc_thres_val, tx_timer_val; 484 u32 val; 485 486 /* We read back the transmit timer and FC threshold value of 487 * LFC. Thus, we will use index = 488 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 489 * 490 * Also, because we are operating on transmit timer and FC 491 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 492 */ 493 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 494 495 /* Retrieve the transmit timer */ 496 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 497 tx_timer_val = val & 498 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 499 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); 500 501 /* Retrieve the FC threshold */ 502 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 503 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 504 505 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); 506 } 507 508 /** 509 * ice_aq_set_mac_cfg 510 * @hw: pointer to the HW struct 511 * @max_frame_size: Maximum Frame Size to be supported 512 * @cd: pointer to command details structure or NULL 513 * 514 * Set MAC configuration (0x0603) 515 */ 516 enum ice_status 517 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 518 { 519 struct ice_aqc_set_mac_cfg *cmd; 520 struct ice_aq_desc desc; 521 522 cmd = &desc.params.set_mac_cfg; 523 524 if (max_frame_size == 0) 525 return ICE_ERR_PARAM; 526 527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 528 529 cmd->max_frame_size = cpu_to_le16(max_frame_size); 530 531 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 532 533 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 534 } 535 536 /** 537 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 538 * @hw: pointer to the HW struct 539 */ 540 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 541 { 542 struct ice_switch_info *sw; 543 enum ice_status status; 544 545 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 546 sizeof(*hw->switch_info), GFP_KERNEL); 547 sw = hw->switch_info; 548 549 if (!sw) 550 return ICE_ERR_NO_MEMORY; 551 552 INIT_LIST_HEAD(&sw->vsi_list_map_head); 553 554 status = ice_init_def_sw_recp(hw); 555 if (status) { 556 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 557 return status; 558 } 559 return 0; 560 } 561 562 /** 563 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 564 * @hw: pointer to the HW struct 565 */ 566 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 567 { 568 struct ice_switch_info *sw = hw->switch_info; 569 struct ice_vsi_list_map_info *v_pos_map; 570 struct ice_vsi_list_map_info *v_tmp_map; 571 struct ice_sw_recipe *recps; 572 u8 i; 573 574 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 575 list_entry) { 576 list_del(&v_pos_map->list_entry); 577 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 578 } 579 recps = hw->switch_info->recp_list; 580 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 581 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 582 583 recps[i].root_rid = i; 584 mutex_destroy(&recps[i].filt_rule_lock); 585 list_for_each_entry_safe(lst_itr, tmp_entry, 586 &recps[i].filt_rules, list_entry) { 587 list_del(&lst_itr->list_entry); 588 devm_kfree(ice_hw_to_dev(hw), lst_itr); 589 } 590 } 591 ice_rm_all_sw_replay_rule_info(hw); 592 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 593 devm_kfree(ice_hw_to_dev(hw), sw); 594 } 595 596 /** 597 * ice_get_fw_log_cfg - get FW logging configuration 598 * @hw: pointer to the HW struct 599 */ 600 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) 601 { 602 struct ice_aq_desc desc; 603 enum ice_status status; 604 __le16 *config; 605 u16 size; 606 607 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 608 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); 609 if (!config) 610 return ICE_ERR_NO_MEMORY; 611 612 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 613 614 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 615 616 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 617 if (!status) { 618 u16 i; 619 620 /* Save FW logging information into the HW structure */ 621 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 622 u16 v, m, flgs; 623 624 v = le16_to_cpu(config[i]); 625 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 626 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 627 628 if (m < ICE_AQC_FW_LOG_ID_MAX) 629 hw->fw_log.evnts[m].cur = flgs; 630 } 631 } 632 633 devm_kfree(ice_hw_to_dev(hw), config); 634 635 return status; 636 } 637 638 /** 639 * ice_cfg_fw_log - configure FW logging 640 * @hw: pointer to the HW struct 641 * @enable: enable certain FW logging events if true, disable all if false 642 * 643 * This function enables/disables the FW logging via Rx CQ events and a UART 644 * port based on predetermined configurations. FW logging via the Rx CQ can be 645 * enabled/disabled for individual PF's. However, FW logging via the UART can 646 * only be enabled/disabled for all PFs on the same device. 647 * 648 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 649 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 650 * before initializing the device. 651 * 652 * When re/configuring FW logging, callers need to update the "cfg" elements of 653 * the hw->fw_log.evnts array with the desired logging event configurations for 654 * modules of interest. When disabling FW logging completely, the callers can 655 * just pass false in the "enable" parameter. On completion, the function will 656 * update the "cur" element of the hw->fw_log.evnts array with the resulting 657 * logging event configurations of the modules that are being re/configured. FW 658 * logging modules that are not part of a reconfiguration operation retain their 659 * previous states. 660 * 661 * Before resetting the device, it is recommended that the driver disables FW 662 * logging before shutting down the control queue. When disabling FW logging 663 * ("enable" = false), the latest configurations of FW logging events stored in 664 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 665 * a device reset. 666 * 667 * When enabling FW logging to emit log messages via the Rx CQ during the 668 * device's initialization phase, a mechanism alternative to interrupt handlers 669 * needs to be used to extract FW log messages from the Rx CQ periodically and 670 * to prevent the Rx CQ from being full and stalling other types of control 671 * messages from FW to SW. Interrupts are typically disabled during the device's 672 * initialization phase. 673 */ 674 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) 675 { 676 struct ice_aqc_fw_logging *cmd; 677 enum ice_status status = 0; 678 u16 i, chgs = 0, len = 0; 679 struct ice_aq_desc desc; 680 __le16 *data = NULL; 681 u8 actv_evnts = 0; 682 void *buf = NULL; 683 684 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 685 return 0; 686 687 /* Disable FW logging only when the control queue is still responsive */ 688 if (!enable && 689 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 690 return 0; 691 692 /* Get current FW log settings */ 693 status = ice_get_fw_log_cfg(hw); 694 if (status) 695 return status; 696 697 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 698 cmd = &desc.params.fw_logging; 699 700 /* Indicate which controls are valid */ 701 if (hw->fw_log.cq_en) 702 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 703 704 if (hw->fw_log.uart_en) 705 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 706 707 if (enable) { 708 /* Fill in an array of entries with FW logging modules and 709 * logging events being reconfigured. 710 */ 711 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 712 u16 val; 713 714 /* Keep track of enabled event types */ 715 actv_evnts |= hw->fw_log.evnts[i].cfg; 716 717 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 718 continue; 719 720 if (!data) { 721 data = devm_kcalloc(ice_hw_to_dev(hw), 722 sizeof(*data), 723 ICE_AQC_FW_LOG_ID_MAX, 724 GFP_KERNEL); 725 if (!data) 726 return ICE_ERR_NO_MEMORY; 727 } 728 729 val = i << ICE_AQC_FW_LOG_ID_S; 730 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 731 data[chgs++] = cpu_to_le16(val); 732 } 733 734 /* Only enable FW logging if at least one module is specified. 735 * If FW logging is currently enabled but all modules are not 736 * enabled to emit log messages, disable FW logging altogether. 737 */ 738 if (actv_evnts) { 739 /* Leave if there is effectively no change */ 740 if (!chgs) 741 goto out; 742 743 if (hw->fw_log.cq_en) 744 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 745 746 if (hw->fw_log.uart_en) 747 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 748 749 buf = data; 750 len = sizeof(*data) * chgs; 751 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 752 } 753 } 754 755 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 756 if (!status) { 757 /* Update the current configuration to reflect events enabled. 758 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 759 * logging mode is enabled for the device. They do not reflect 760 * actual modules being enabled to emit log messages. So, their 761 * values remain unchanged even when all modules are disabled. 762 */ 763 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 764 765 hw->fw_log.actv_evnts = actv_evnts; 766 for (i = 0; i < cnt; i++) { 767 u16 v, m; 768 769 if (!enable) { 770 /* When disabling all FW logging events as part 771 * of device's de-initialization, the original 772 * configurations are retained, and can be used 773 * to reconfigure FW logging later if the device 774 * is re-initialized. 775 */ 776 hw->fw_log.evnts[i].cur = 0; 777 continue; 778 } 779 780 v = le16_to_cpu(data[i]); 781 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 782 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 783 } 784 } 785 786 out: 787 if (data) 788 devm_kfree(ice_hw_to_dev(hw), data); 789 790 return status; 791 } 792 793 /** 794 * ice_output_fw_log 795 * @hw: pointer to the HW struct 796 * @desc: pointer to the AQ message descriptor 797 * @buf: pointer to the buffer accompanying the AQ message 798 * 799 * Formats a FW Log message and outputs it via the standard driver logs. 800 */ 801 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 802 { 803 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 804 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 805 le16_to_cpu(desc->datalen)); 806 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 807 } 808 809 /** 810 * ice_get_itr_intrl_gran 811 * @hw: pointer to the HW struct 812 * 813 * Determines the ITR/INTRL granularities based on the maximum aggregate 814 * bandwidth according to the device's configuration during power-on. 815 */ 816 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 817 { 818 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 819 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 820 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 821 822 switch (max_agg_bw) { 823 case ICE_MAX_AGG_BW_200G: 824 case ICE_MAX_AGG_BW_100G: 825 case ICE_MAX_AGG_BW_50G: 826 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 827 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 828 break; 829 case ICE_MAX_AGG_BW_25G: 830 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 831 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 832 break; 833 } 834 } 835 836 /** 837 * ice_init_hw - main hardware initialization routine 838 * @hw: pointer to the hardware structure 839 */ 840 enum ice_status ice_init_hw(struct ice_hw *hw) 841 { 842 struct ice_aqc_get_phy_caps_data *pcaps; 843 enum ice_status status; 844 u16 mac_buf_len; 845 void *mac_buf; 846 847 /* Set MAC type based on DeviceID */ 848 status = ice_set_mac_type(hw); 849 if (status) 850 return status; 851 852 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 853 PF_FUNC_RID_FUNC_NUM_M) >> 854 PF_FUNC_RID_FUNC_NUM_S; 855 856 status = ice_reset(hw, ICE_RESET_PFR); 857 if (status) 858 return status; 859 860 ice_get_itr_intrl_gran(hw); 861 862 status = ice_create_all_ctrlq(hw); 863 if (status) 864 goto err_unroll_cqinit; 865 866 /* Enable FW logging. Not fatal if this fails. */ 867 status = ice_cfg_fw_log(hw, true); 868 if (status) 869 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 870 871 status = ice_clear_pf_cfg(hw); 872 if (status) 873 goto err_unroll_cqinit; 874 875 /* Set bit to enable Flow Director filters */ 876 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 877 INIT_LIST_HEAD(&hw->fdir_list_head); 878 879 ice_clear_pxe_mode(hw); 880 881 status = ice_init_nvm(hw); 882 if (status) 883 goto err_unroll_cqinit; 884 885 status = ice_get_caps(hw); 886 if (status) 887 goto err_unroll_cqinit; 888 889 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 890 sizeof(*hw->port_info), GFP_KERNEL); 891 if (!hw->port_info) { 892 status = ICE_ERR_NO_MEMORY; 893 goto err_unroll_cqinit; 894 } 895 896 /* set the back pointer to HW */ 897 hw->port_info->hw = hw; 898 899 /* Initialize port_info struct with switch configuration data */ 900 status = ice_get_initial_sw_cfg(hw); 901 if (status) 902 goto err_unroll_alloc; 903 904 hw->evb_veb = true; 905 906 /* Query the allocated resources for Tx scheduler */ 907 status = ice_sched_query_res_alloc(hw); 908 if (status) { 909 ice_debug(hw, ICE_DBG_SCHED, 910 "Failed to get scheduler allocated resources\n"); 911 goto err_unroll_alloc; 912 } 913 914 /* Initialize port_info struct with scheduler data */ 915 status = ice_sched_init_port(hw->port_info); 916 if (status) 917 goto err_unroll_sched; 918 919 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 920 if (!pcaps) { 921 status = ICE_ERR_NO_MEMORY; 922 goto err_unroll_sched; 923 } 924 925 /* Initialize port_info struct with PHY capabilities */ 926 status = ice_aq_get_phy_caps(hw->port_info, false, 927 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); 928 devm_kfree(ice_hw_to_dev(hw), pcaps); 929 if (status) 930 goto err_unroll_sched; 931 932 /* Initialize port_info struct with link information */ 933 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 934 if (status) 935 goto err_unroll_sched; 936 937 /* need a valid SW entry point to build a Tx tree */ 938 if (!hw->sw_entry_point_layer) { 939 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 940 status = ICE_ERR_CFG; 941 goto err_unroll_sched; 942 } 943 INIT_LIST_HEAD(&hw->agg_list); 944 /* Initialize max burst size */ 945 if (!hw->max_burst_size) 946 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 947 948 status = ice_init_fltr_mgmt_struct(hw); 949 if (status) 950 goto err_unroll_sched; 951 952 /* Get MAC information */ 953 /* A single port can report up to two (LAN and WoL) addresses */ 954 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 955 sizeof(struct ice_aqc_manage_mac_read_resp), 956 GFP_KERNEL); 957 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 958 959 if (!mac_buf) { 960 status = ICE_ERR_NO_MEMORY; 961 goto err_unroll_fltr_mgmt_struct; 962 } 963 964 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 965 devm_kfree(ice_hw_to_dev(hw), mac_buf); 966 967 if (status) 968 goto err_unroll_fltr_mgmt_struct; 969 /* enable jumbo frame support at MAC level */ 970 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 971 if (status) 972 goto err_unroll_fltr_mgmt_struct; 973 /* Obtain counter base index which would be used by flow director */ 974 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 975 if (status) 976 goto err_unroll_fltr_mgmt_struct; 977 status = ice_init_hw_tbls(hw); 978 if (status) 979 goto err_unroll_fltr_mgmt_struct; 980 mutex_init(&hw->tnl_lock); 981 return 0; 982 983 err_unroll_fltr_mgmt_struct: 984 ice_cleanup_fltr_mgmt_struct(hw); 985 err_unroll_sched: 986 ice_sched_cleanup_all(hw); 987 err_unroll_alloc: 988 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 989 err_unroll_cqinit: 990 ice_destroy_all_ctrlq(hw); 991 return status; 992 } 993 994 /** 995 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 996 * @hw: pointer to the hardware structure 997 * 998 * This should be called only during nominal operation, not as a result of 999 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1000 * applicable initializations if it fails for any reason. 1001 */ 1002 void ice_deinit_hw(struct ice_hw *hw) 1003 { 1004 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1005 ice_cleanup_fltr_mgmt_struct(hw); 1006 1007 ice_sched_cleanup_all(hw); 1008 ice_sched_clear_agg(hw); 1009 ice_free_seg(hw); 1010 ice_free_hw_tbls(hw); 1011 mutex_destroy(&hw->tnl_lock); 1012 1013 if (hw->port_info) { 1014 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1015 hw->port_info = NULL; 1016 } 1017 1018 /* Attempt to disable FW logging before shutting down control queues */ 1019 ice_cfg_fw_log(hw, false); 1020 ice_destroy_all_ctrlq(hw); 1021 1022 /* Clear VSI contexts if not already cleared */ 1023 ice_clear_all_vsi_ctx(hw); 1024 } 1025 1026 /** 1027 * ice_check_reset - Check to see if a global reset is complete 1028 * @hw: pointer to the hardware structure 1029 */ 1030 enum ice_status ice_check_reset(struct ice_hw *hw) 1031 { 1032 u32 cnt, reg = 0, grst_delay, uld_mask; 1033 1034 /* Poll for Device Active state in case a recent CORER, GLOBR, 1035 * or EMPR has occurred. The grst delay value is in 100ms units. 1036 * Add 1sec for outstanding AQ commands that can take a long time. 1037 */ 1038 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1039 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1040 1041 for (cnt = 0; cnt < grst_delay; cnt++) { 1042 mdelay(100); 1043 reg = rd32(hw, GLGEN_RSTAT); 1044 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1045 break; 1046 } 1047 1048 if (cnt == grst_delay) { 1049 ice_debug(hw, ICE_DBG_INIT, 1050 "Global reset polling failed to complete.\n"); 1051 return ICE_ERR_RESET_FAILED; 1052 } 1053 1054 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1055 GLNVM_ULD_PCIER_DONE_1_M |\ 1056 GLNVM_ULD_CORER_DONE_M |\ 1057 GLNVM_ULD_GLOBR_DONE_M |\ 1058 GLNVM_ULD_POR_DONE_M |\ 1059 GLNVM_ULD_POR_DONE_1_M |\ 1060 GLNVM_ULD_PCIER_DONE_2_M) 1061 1062 uld_mask = ICE_RESET_DONE_MASK; 1063 1064 /* Device is Active; check Global Reset processes are done */ 1065 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1066 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1067 if (reg == uld_mask) { 1068 ice_debug(hw, ICE_DBG_INIT, 1069 "Global reset processes done. %d\n", cnt); 1070 break; 1071 } 1072 mdelay(10); 1073 } 1074 1075 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1076 ice_debug(hw, ICE_DBG_INIT, 1077 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1078 reg); 1079 return ICE_ERR_RESET_FAILED; 1080 } 1081 1082 return 0; 1083 } 1084 1085 /** 1086 * ice_pf_reset - Reset the PF 1087 * @hw: pointer to the hardware structure 1088 * 1089 * If a global reset has been triggered, this function checks 1090 * for its completion and then issues the PF reset 1091 */ 1092 static enum ice_status ice_pf_reset(struct ice_hw *hw) 1093 { 1094 u32 cnt, reg; 1095 1096 /* If at function entry a global reset was already in progress, i.e. 1097 * state is not 'device active' or any of the reset done bits are not 1098 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1099 * global reset is done. 1100 */ 1101 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1102 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1103 /* poll on global reset currently in progress until done */ 1104 if (ice_check_reset(hw)) 1105 return ICE_ERR_RESET_FAILED; 1106 1107 return 0; 1108 } 1109 1110 /* Reset the PF */ 1111 reg = rd32(hw, PFGEN_CTRL); 1112 1113 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1114 1115 /* Wait for the PFR to complete. The wait time is the global config lock 1116 * timeout plus the PFR timeout which will account for a possible reset 1117 * that is occurring during a download package operation. 1118 */ 1119 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1120 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1121 reg = rd32(hw, PFGEN_CTRL); 1122 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1123 break; 1124 1125 mdelay(1); 1126 } 1127 1128 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1129 ice_debug(hw, ICE_DBG_INIT, 1130 "PF reset polling failed to complete.\n"); 1131 return ICE_ERR_RESET_FAILED; 1132 } 1133 1134 return 0; 1135 } 1136 1137 /** 1138 * ice_reset - Perform different types of reset 1139 * @hw: pointer to the hardware structure 1140 * @req: reset request 1141 * 1142 * This function triggers a reset as specified by the req parameter. 1143 * 1144 * Note: 1145 * If anything other than a PF reset is triggered, PXE mode is restored. 1146 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1147 * interface has been restored in the rebuild flow. 1148 */ 1149 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1150 { 1151 u32 val = 0; 1152 1153 switch (req) { 1154 case ICE_RESET_PFR: 1155 return ice_pf_reset(hw); 1156 case ICE_RESET_CORER: 1157 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1158 val = GLGEN_RTRIG_CORER_M; 1159 break; 1160 case ICE_RESET_GLOBR: 1161 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1162 val = GLGEN_RTRIG_GLOBR_M; 1163 break; 1164 default: 1165 return ICE_ERR_PARAM; 1166 } 1167 1168 val |= rd32(hw, GLGEN_RTRIG); 1169 wr32(hw, GLGEN_RTRIG, val); 1170 ice_flush(hw); 1171 1172 /* wait for the FW to be ready */ 1173 return ice_check_reset(hw); 1174 } 1175 1176 /** 1177 * ice_copy_rxq_ctx_to_hw 1178 * @hw: pointer to the hardware structure 1179 * @ice_rxq_ctx: pointer to the rxq context 1180 * @rxq_index: the index of the Rx queue 1181 * 1182 * Copies rxq context from dense structure to HW register space 1183 */ 1184 static enum ice_status 1185 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1186 { 1187 u8 i; 1188 1189 if (!ice_rxq_ctx) 1190 return ICE_ERR_BAD_PTR; 1191 1192 if (rxq_index > QRX_CTRL_MAX_INDEX) 1193 return ICE_ERR_PARAM; 1194 1195 /* Copy each dword separately to HW */ 1196 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1197 wr32(hw, QRX_CONTEXT(i, rxq_index), 1198 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1199 1200 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1201 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1202 } 1203 1204 return 0; 1205 } 1206 1207 /* LAN Rx Queue Context */ 1208 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1209 /* Field Width LSB */ 1210 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1211 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1212 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1213 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1214 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1215 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1216 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1217 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1218 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1219 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1220 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1221 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1222 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1223 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1224 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1225 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1226 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1227 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1228 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1229 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1230 { 0 } 1231 }; 1232 1233 /** 1234 * ice_write_rxq_ctx 1235 * @hw: pointer to the hardware structure 1236 * @rlan_ctx: pointer to the rxq context 1237 * @rxq_index: the index of the Rx queue 1238 * 1239 * Converts rxq context from sparse to dense structure and then writes 1240 * it to HW register space and enables the hardware to prefetch descriptors 1241 * instead of only fetching them on demand 1242 */ 1243 enum ice_status 1244 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1245 u32 rxq_index) 1246 { 1247 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1248 1249 if (!rlan_ctx) 1250 return ICE_ERR_BAD_PTR; 1251 1252 rlan_ctx->prefena = 1; 1253 1254 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1255 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1256 } 1257 1258 /* LAN Tx Queue Context */ 1259 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1260 /* Field Width LSB */ 1261 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1262 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1263 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1264 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1265 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1266 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1267 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1268 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1269 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1270 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1271 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1272 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1273 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1274 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1275 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1276 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1277 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1278 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1279 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1280 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1281 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1282 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1283 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1284 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1285 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1286 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1287 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1288 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1289 { 0 } 1290 }; 1291 1292 /* FW Admin Queue command wrappers */ 1293 1294 /* Software lock/mutex that is meant to be held while the Global Config Lock 1295 * in firmware is acquired by the software to prevent most (but not all) types 1296 * of AQ commands from being sent to FW 1297 */ 1298 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1299 1300 /** 1301 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1302 * @hw: pointer to the HW struct 1303 * @desc: descriptor describing the command 1304 * @buf: buffer to use for indirect commands (NULL for direct commands) 1305 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1306 * @cd: pointer to command details structure 1307 * 1308 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1309 */ 1310 enum ice_status 1311 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1312 u16 buf_size, struct ice_sq_cd *cd) 1313 { 1314 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1315 bool lock_acquired = false; 1316 enum ice_status status; 1317 1318 /* When a package download is in process (i.e. when the firmware's 1319 * Global Configuration Lock resource is held), only the Download 1320 * Package, Get Version, Get Package Info List and Release Resource 1321 * (with resource ID set to Global Config Lock) AdminQ commands are 1322 * allowed; all others must block until the package download completes 1323 * and the Global Config Lock is released. See also 1324 * ice_acquire_global_cfg_lock(). 1325 */ 1326 switch (le16_to_cpu(desc->opcode)) { 1327 case ice_aqc_opc_download_pkg: 1328 case ice_aqc_opc_get_pkg_info_list: 1329 case ice_aqc_opc_get_ver: 1330 break; 1331 case ice_aqc_opc_release_res: 1332 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1333 break; 1334 fallthrough; 1335 default: 1336 mutex_lock(&ice_global_cfg_lock_sw); 1337 lock_acquired = true; 1338 break; 1339 } 1340 1341 status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); 1342 if (lock_acquired) 1343 mutex_unlock(&ice_global_cfg_lock_sw); 1344 1345 return status; 1346 } 1347 1348 /** 1349 * ice_aq_get_fw_ver 1350 * @hw: pointer to the HW struct 1351 * @cd: pointer to command details structure or NULL 1352 * 1353 * Get the firmware version (0x0001) from the admin queue commands 1354 */ 1355 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1356 { 1357 struct ice_aqc_get_ver *resp; 1358 struct ice_aq_desc desc; 1359 enum ice_status status; 1360 1361 resp = &desc.params.get_ver; 1362 1363 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1364 1365 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1366 1367 if (!status) { 1368 hw->fw_branch = resp->fw_branch; 1369 hw->fw_maj_ver = resp->fw_major; 1370 hw->fw_min_ver = resp->fw_minor; 1371 hw->fw_patch = resp->fw_patch; 1372 hw->fw_build = le32_to_cpu(resp->fw_build); 1373 hw->api_branch = resp->api_branch; 1374 hw->api_maj_ver = resp->api_major; 1375 hw->api_min_ver = resp->api_minor; 1376 hw->api_patch = resp->api_patch; 1377 } 1378 1379 return status; 1380 } 1381 1382 /** 1383 * ice_aq_send_driver_ver 1384 * @hw: pointer to the HW struct 1385 * @dv: driver's major, minor version 1386 * @cd: pointer to command details structure or NULL 1387 * 1388 * Send the driver version (0x0002) to the firmware 1389 */ 1390 enum ice_status 1391 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1392 struct ice_sq_cd *cd) 1393 { 1394 struct ice_aqc_driver_ver *cmd; 1395 struct ice_aq_desc desc; 1396 u16 len; 1397 1398 cmd = &desc.params.driver_ver; 1399 1400 if (!dv) 1401 return ICE_ERR_PARAM; 1402 1403 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1404 1405 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1406 cmd->major_ver = dv->major_ver; 1407 cmd->minor_ver = dv->minor_ver; 1408 cmd->build_ver = dv->build_ver; 1409 cmd->subbuild_ver = dv->subbuild_ver; 1410 1411 len = 0; 1412 while (len < sizeof(dv->driver_string) && 1413 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1414 len++; 1415 1416 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1417 } 1418 1419 /** 1420 * ice_aq_q_shutdown 1421 * @hw: pointer to the HW struct 1422 * @unloading: is the driver unloading itself 1423 * 1424 * Tell the Firmware that we're shutting down the AdminQ and whether 1425 * or not the driver is unloading as well (0x0003). 1426 */ 1427 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1428 { 1429 struct ice_aqc_q_shutdown *cmd; 1430 struct ice_aq_desc desc; 1431 1432 cmd = &desc.params.q_shutdown; 1433 1434 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1435 1436 if (unloading) 1437 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1438 1439 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1440 } 1441 1442 /** 1443 * ice_aq_req_res 1444 * @hw: pointer to the HW struct 1445 * @res: resource ID 1446 * @access: access type 1447 * @sdp_number: resource number 1448 * @timeout: the maximum time in ms that the driver may hold the resource 1449 * @cd: pointer to command details structure or NULL 1450 * 1451 * Requests common resource using the admin queue commands (0x0008). 1452 * When attempting to acquire the Global Config Lock, the driver can 1453 * learn of three states: 1454 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1455 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1456 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1457 * successfully downloaded the package; the driver does 1458 * not have to download the package and can continue 1459 * loading 1460 * 1461 * Note that if the caller is in an acquire lock, perform action, release lock 1462 * phase of operation, it is possible that the FW may detect a timeout and issue 1463 * a CORER. In this case, the driver will receive a CORER interrupt and will 1464 * have to determine its cause. The calling thread that is handling this flow 1465 * will likely get an error propagated back to it indicating the Download 1466 * Package, Update Package or the Release Resource AQ commands timed out. 1467 */ 1468 static enum ice_status 1469 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1470 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1471 struct ice_sq_cd *cd) 1472 { 1473 struct ice_aqc_req_res *cmd_resp; 1474 struct ice_aq_desc desc; 1475 enum ice_status status; 1476 1477 cmd_resp = &desc.params.res_owner; 1478 1479 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1480 1481 cmd_resp->res_id = cpu_to_le16(res); 1482 cmd_resp->access_type = cpu_to_le16(access); 1483 cmd_resp->res_number = cpu_to_le32(sdp_number); 1484 cmd_resp->timeout = cpu_to_le32(*timeout); 1485 *timeout = 0; 1486 1487 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1488 1489 /* The completion specifies the maximum time in ms that the driver 1490 * may hold the resource in the Timeout field. 1491 */ 1492 1493 /* Global config lock response utilizes an additional status field. 1494 * 1495 * If the Global config lock resource is held by some other driver, the 1496 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1497 * and the timeout field indicates the maximum time the current owner 1498 * of the resource has to free it. 1499 */ 1500 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1501 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1502 *timeout = le32_to_cpu(cmd_resp->timeout); 1503 return 0; 1504 } else if (le16_to_cpu(cmd_resp->status) == 1505 ICE_AQ_RES_GLBL_IN_PROG) { 1506 *timeout = le32_to_cpu(cmd_resp->timeout); 1507 return ICE_ERR_AQ_ERROR; 1508 } else if (le16_to_cpu(cmd_resp->status) == 1509 ICE_AQ_RES_GLBL_DONE) { 1510 return ICE_ERR_AQ_NO_WORK; 1511 } 1512 1513 /* invalid FW response, force a timeout immediately */ 1514 *timeout = 0; 1515 return ICE_ERR_AQ_ERROR; 1516 } 1517 1518 /* If the resource is held by some other driver, the command completes 1519 * with a busy return value and the timeout field indicates the maximum 1520 * time the current owner of the resource has to free it. 1521 */ 1522 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1523 *timeout = le32_to_cpu(cmd_resp->timeout); 1524 1525 return status; 1526 } 1527 1528 /** 1529 * ice_aq_release_res 1530 * @hw: pointer to the HW struct 1531 * @res: resource ID 1532 * @sdp_number: resource number 1533 * @cd: pointer to command details structure or NULL 1534 * 1535 * release common resource using the admin queue commands (0x0009) 1536 */ 1537 static enum ice_status 1538 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1539 struct ice_sq_cd *cd) 1540 { 1541 struct ice_aqc_req_res *cmd; 1542 struct ice_aq_desc desc; 1543 1544 cmd = &desc.params.res_owner; 1545 1546 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1547 1548 cmd->res_id = cpu_to_le16(res); 1549 cmd->res_number = cpu_to_le32(sdp_number); 1550 1551 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1552 } 1553 1554 /** 1555 * ice_acquire_res 1556 * @hw: pointer to the HW structure 1557 * @res: resource ID 1558 * @access: access type (read or write) 1559 * @timeout: timeout in milliseconds 1560 * 1561 * This function will attempt to acquire the ownership of a resource. 1562 */ 1563 enum ice_status 1564 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1565 enum ice_aq_res_access_type access, u32 timeout) 1566 { 1567 #define ICE_RES_POLLING_DELAY_MS 10 1568 u32 delay = ICE_RES_POLLING_DELAY_MS; 1569 u32 time_left = timeout; 1570 enum ice_status status; 1571 1572 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1573 1574 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 1575 * previously acquired the resource and performed any necessary updates; 1576 * in this case the caller does not obtain the resource and has no 1577 * further work to do. 1578 */ 1579 if (status == ICE_ERR_AQ_NO_WORK) 1580 goto ice_acquire_res_exit; 1581 1582 if (status) 1583 ice_debug(hw, ICE_DBG_RES, 1584 "resource %d acquire type %d failed.\n", res, access); 1585 1586 /* If necessary, poll until the current lock owner timeouts */ 1587 timeout = time_left; 1588 while (status && timeout && time_left) { 1589 mdelay(delay); 1590 timeout = (timeout > delay) ? timeout - delay : 0; 1591 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1592 1593 if (status == ICE_ERR_AQ_NO_WORK) 1594 /* lock free, but no work to do */ 1595 break; 1596 1597 if (!status) 1598 /* lock acquired */ 1599 break; 1600 } 1601 if (status && status != ICE_ERR_AQ_NO_WORK) 1602 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1603 1604 ice_acquire_res_exit: 1605 if (status == ICE_ERR_AQ_NO_WORK) { 1606 if (access == ICE_RES_WRITE) 1607 ice_debug(hw, ICE_DBG_RES, 1608 "resource indicates no work to do.\n"); 1609 else 1610 ice_debug(hw, ICE_DBG_RES, 1611 "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1612 } 1613 return status; 1614 } 1615 1616 /** 1617 * ice_release_res 1618 * @hw: pointer to the HW structure 1619 * @res: resource ID 1620 * 1621 * This function will release a resource using the proper Admin Command. 1622 */ 1623 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1624 { 1625 enum ice_status status; 1626 u32 total_delay = 0; 1627 1628 status = ice_aq_release_res(hw, res, 0, NULL); 1629 1630 /* there are some rare cases when trying to release the resource 1631 * results in an admin queue timeout, so handle them correctly 1632 */ 1633 while ((status == ICE_ERR_AQ_TIMEOUT) && 1634 (total_delay < hw->adminq.sq_cmd_timeout)) { 1635 mdelay(1); 1636 status = ice_aq_release_res(hw, res, 0, NULL); 1637 total_delay++; 1638 } 1639 } 1640 1641 /** 1642 * ice_aq_alloc_free_res - command to allocate/free resources 1643 * @hw: pointer to the HW struct 1644 * @num_entries: number of resource entries in buffer 1645 * @buf: Indirect buffer to hold data parameters and response 1646 * @buf_size: size of buffer for indirect commands 1647 * @opc: pass in the command opcode 1648 * @cd: pointer to command details structure or NULL 1649 * 1650 * Helper function to allocate/free resources using the admin queue commands 1651 */ 1652 enum ice_status 1653 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 1654 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1655 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1656 { 1657 struct ice_aqc_alloc_free_res_cmd *cmd; 1658 struct ice_aq_desc desc; 1659 1660 cmd = &desc.params.sw_res_ctrl; 1661 1662 if (!buf) 1663 return ICE_ERR_PARAM; 1664 1665 if (buf_size < (num_entries * sizeof(buf->elem[0]))) 1666 return ICE_ERR_PARAM; 1667 1668 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1669 1670 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1671 1672 cmd->num_entries = cpu_to_le16(num_entries); 1673 1674 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1675 } 1676 1677 /** 1678 * ice_alloc_hw_res - allocate resource 1679 * @hw: pointer to the HW struct 1680 * @type: type of resource 1681 * @num: number of resources to allocate 1682 * @btm: allocate from bottom 1683 * @res: pointer to array that will receive the resources 1684 */ 1685 enum ice_status 1686 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1687 { 1688 struct ice_aqc_alloc_free_res_elem *buf; 1689 enum ice_status status; 1690 u16 buf_len; 1691 1692 buf_len = struct_size(buf, elem, num); 1693 buf = kzalloc(buf_len, GFP_KERNEL); 1694 if (!buf) 1695 return ICE_ERR_NO_MEMORY; 1696 1697 /* Prepare buffer to allocate resource. */ 1698 buf->num_elems = cpu_to_le16(num); 1699 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1700 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1701 if (btm) 1702 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1703 1704 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 1705 ice_aqc_opc_alloc_res, NULL); 1706 if (status) 1707 goto ice_alloc_res_exit; 1708 1709 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 1710 1711 ice_alloc_res_exit: 1712 kfree(buf); 1713 return status; 1714 } 1715 1716 /** 1717 * ice_free_hw_res - free allocated HW resource 1718 * @hw: pointer to the HW struct 1719 * @type: type of resource to free 1720 * @num: number of resources 1721 * @res: pointer to array that contains the resources to free 1722 */ 1723 enum ice_status 1724 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 1725 { 1726 struct ice_aqc_alloc_free_res_elem *buf; 1727 enum ice_status status; 1728 u16 buf_len; 1729 1730 buf_len = struct_size(buf, elem, num); 1731 buf = kzalloc(buf_len, GFP_KERNEL); 1732 if (!buf) 1733 return ICE_ERR_NO_MEMORY; 1734 1735 /* Prepare buffer to free resource. */ 1736 buf->num_elems = cpu_to_le16(num); 1737 buf->res_type = cpu_to_le16(type); 1738 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 1739 1740 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 1741 ice_aqc_opc_free_res, NULL); 1742 if (status) 1743 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 1744 1745 kfree(buf); 1746 return status; 1747 } 1748 1749 /** 1750 * ice_get_num_per_func - determine number of resources per PF 1751 * @hw: pointer to the HW structure 1752 * @max: value to be evenly split between each PF 1753 * 1754 * Determine the number of valid functions by going through the bitmap returned 1755 * from parsing capabilities and use this to calculate the number of resources 1756 * per PF based on the max value passed in. 1757 */ 1758 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 1759 { 1760 u8 funcs; 1761 1762 #define ICE_CAPS_VALID_FUNCS_M 0xFF 1763 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 1764 ICE_CAPS_VALID_FUNCS_M); 1765 1766 if (!funcs) 1767 return 0; 1768 1769 return max / funcs; 1770 } 1771 1772 /** 1773 * ice_parse_common_caps - parse common device/function capabilities 1774 * @hw: pointer to the HW struct 1775 * @caps: pointer to common capabilities structure 1776 * @elem: the capability element to parse 1777 * @prefix: message prefix for tracing capabilities 1778 * 1779 * Given a capability element, extract relevant details into the common 1780 * capability structure. 1781 * 1782 * Returns: true if the capability matches one of the common capability ids, 1783 * false otherwise. 1784 */ 1785 static bool 1786 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 1787 struct ice_aqc_list_caps_elem *elem, const char *prefix) 1788 { 1789 u32 logical_id = le32_to_cpu(elem->logical_id); 1790 u32 phys_id = le32_to_cpu(elem->phys_id); 1791 u32 number = le32_to_cpu(elem->number); 1792 u16 cap = le16_to_cpu(elem->cap); 1793 bool found = true; 1794 1795 switch (cap) { 1796 case ICE_AQC_CAPS_VALID_FUNCTIONS: 1797 caps->valid_functions = number; 1798 ice_debug(hw, ICE_DBG_INIT, 1799 "%s: valid_functions (bitmap) = %d\n", prefix, 1800 caps->valid_functions); 1801 break; 1802 case ICE_AQC_CAPS_SRIOV: 1803 caps->sr_iov_1_1 = (number == 1); 1804 ice_debug(hw, ICE_DBG_INIT, 1805 "%s: sr_iov_1_1 = %d\n", prefix, 1806 caps->sr_iov_1_1); 1807 break; 1808 case ICE_AQC_CAPS_DCB: 1809 caps->dcb = (number == 1); 1810 caps->active_tc_bitmap = logical_id; 1811 caps->maxtc = phys_id; 1812 ice_debug(hw, ICE_DBG_INIT, 1813 "%s: dcb = %d\n", prefix, caps->dcb); 1814 ice_debug(hw, ICE_DBG_INIT, 1815 "%s: active_tc_bitmap = %d\n", prefix, 1816 caps->active_tc_bitmap); 1817 ice_debug(hw, ICE_DBG_INIT, 1818 "%s: maxtc = %d\n", prefix, caps->maxtc); 1819 break; 1820 case ICE_AQC_CAPS_RSS: 1821 caps->rss_table_size = number; 1822 caps->rss_table_entry_width = logical_id; 1823 ice_debug(hw, ICE_DBG_INIT, 1824 "%s: rss_table_size = %d\n", prefix, 1825 caps->rss_table_size); 1826 ice_debug(hw, ICE_DBG_INIT, 1827 "%s: rss_table_entry_width = %d\n", prefix, 1828 caps->rss_table_entry_width); 1829 break; 1830 case ICE_AQC_CAPS_RXQS: 1831 caps->num_rxq = number; 1832 caps->rxq_first_id = phys_id; 1833 ice_debug(hw, ICE_DBG_INIT, 1834 "%s: num_rxq = %d\n", prefix, 1835 caps->num_rxq); 1836 ice_debug(hw, ICE_DBG_INIT, 1837 "%s: rxq_first_id = %d\n", prefix, 1838 caps->rxq_first_id); 1839 break; 1840 case ICE_AQC_CAPS_TXQS: 1841 caps->num_txq = number; 1842 caps->txq_first_id = phys_id; 1843 ice_debug(hw, ICE_DBG_INIT, 1844 "%s: num_txq = %d\n", prefix, 1845 caps->num_txq); 1846 ice_debug(hw, ICE_DBG_INIT, 1847 "%s: txq_first_id = %d\n", prefix, 1848 caps->txq_first_id); 1849 break; 1850 case ICE_AQC_CAPS_MSIX: 1851 caps->num_msix_vectors = number; 1852 caps->msix_vector_first_id = phys_id; 1853 ice_debug(hw, ICE_DBG_INIT, 1854 "%s: num_msix_vectors = %d\n", prefix, 1855 caps->num_msix_vectors); 1856 ice_debug(hw, ICE_DBG_INIT, 1857 "%s: msix_vector_first_id = %d\n", prefix, 1858 caps->msix_vector_first_id); 1859 break; 1860 case ICE_AQC_CAPS_MAX_MTU: 1861 caps->max_mtu = number; 1862 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 1863 prefix, caps->max_mtu); 1864 break; 1865 default: 1866 /* Not one of the recognized common capabilities */ 1867 found = false; 1868 } 1869 1870 return found; 1871 } 1872 1873 /** 1874 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 1875 * @hw: pointer to the HW structure 1876 * @caps: pointer to capabilities structure to fix 1877 * 1878 * Re-calculate the capabilities that are dependent on the number of physical 1879 * ports; i.e. some features are not supported or function differently on 1880 * devices with more than 4 ports. 1881 */ 1882 static void 1883 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 1884 { 1885 /* This assumes device capabilities are always scanned before function 1886 * capabilities during the initialization flow. 1887 */ 1888 if (hw->dev_caps.num_funcs > 4) { 1889 /* Max 4 TCs per port */ 1890 caps->maxtc = 4; 1891 ice_debug(hw, ICE_DBG_INIT, 1892 "reducing maxtc to %d (based on #ports)\n", 1893 caps->maxtc); 1894 } 1895 } 1896 1897 /** 1898 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 1899 * @hw: pointer to the HW struct 1900 * @func_p: pointer to function capabilities structure 1901 * @cap: pointer to the capability element to parse 1902 * 1903 * Extract function capabilities for ICE_AQC_CAPS_VF. 1904 */ 1905 static void 1906 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 1907 struct ice_aqc_list_caps_elem *cap) 1908 { 1909 u32 logical_id = le32_to_cpu(cap->logical_id); 1910 u32 number = le32_to_cpu(cap->number); 1911 1912 func_p->num_allocd_vfs = number; 1913 func_p->vf_base_id = logical_id; 1914 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 1915 func_p->num_allocd_vfs); 1916 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 1917 func_p->vf_base_id); 1918 } 1919 1920 /** 1921 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 1922 * @hw: pointer to the HW struct 1923 * @func_p: pointer to function capabilities structure 1924 * @cap: pointer to the capability element to parse 1925 * 1926 * Extract function capabilities for ICE_AQC_CAPS_VSI. 1927 */ 1928 static void 1929 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 1930 struct ice_aqc_list_caps_elem *cap) 1931 { 1932 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 1933 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 1934 le32_to_cpu(cap->number)); 1935 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 1936 func_p->guar_num_vsi); 1937 } 1938 1939 /** 1940 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 1941 * @hw: pointer to the HW struct 1942 * @func_p: pointer to function capabilities structure 1943 * 1944 * Extract function capabilities for ICE_AQC_CAPS_FD. 1945 */ 1946 static void 1947 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 1948 { 1949 u32 reg_val, val; 1950 1951 reg_val = rd32(hw, GLQF_FD_SIZE); 1952 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 1953 GLQF_FD_SIZE_FD_GSIZE_S; 1954 func_p->fd_fltr_guar = 1955 ice_get_num_per_func(hw, val); 1956 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 1957 GLQF_FD_SIZE_FD_BSIZE_S; 1958 func_p->fd_fltr_best_effort = val; 1959 1960 ice_debug(hw, ICE_DBG_INIT, 1961 "func caps: fd_fltr_guar = %d\n", 1962 func_p->fd_fltr_guar); 1963 ice_debug(hw, ICE_DBG_INIT, 1964 "func caps: fd_fltr_best_effort = %d\n", 1965 func_p->fd_fltr_best_effort); 1966 } 1967 1968 /** 1969 * ice_parse_func_caps - Parse function capabilities 1970 * @hw: pointer to the HW struct 1971 * @func_p: pointer to function capabilities structure 1972 * @buf: buffer containing the function capability records 1973 * @cap_count: the number of capabilities 1974 * 1975 * Helper function to parse function (0x000A) capabilities list. For 1976 * capabilities shared between device and function, this relies on 1977 * ice_parse_common_caps. 1978 * 1979 * Loop through the list of provided capabilities and extract the relevant 1980 * data into the function capabilities structured. 1981 */ 1982 static void 1983 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 1984 void *buf, u32 cap_count) 1985 { 1986 struct ice_aqc_list_caps_elem *cap_resp; 1987 u32 i; 1988 1989 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1990 1991 memset(func_p, 0, sizeof(*func_p)); 1992 1993 for (i = 0; i < cap_count; i++) { 1994 u16 cap = le16_to_cpu(cap_resp[i].cap); 1995 bool found; 1996 1997 found = ice_parse_common_caps(hw, &func_p->common_cap, 1998 &cap_resp[i], "func caps"); 1999 2000 switch (cap) { 2001 case ICE_AQC_CAPS_VF: 2002 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2003 break; 2004 case ICE_AQC_CAPS_VSI: 2005 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2006 break; 2007 case ICE_AQC_CAPS_FD: 2008 ice_parse_fdir_func_caps(hw, func_p); 2009 break; 2010 default: 2011 /* Don't list common capabilities as unknown */ 2012 if (!found) 2013 ice_debug(hw, ICE_DBG_INIT, 2014 "func caps: unknown capability[%d]: 0x%x\n", 2015 i, cap); 2016 break; 2017 } 2018 } 2019 2020 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2021 } 2022 2023 /** 2024 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2025 * @hw: pointer to the HW struct 2026 * @dev_p: pointer to device capabilities structure 2027 * @cap: capability element to parse 2028 * 2029 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2030 */ 2031 static void 2032 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2033 struct ice_aqc_list_caps_elem *cap) 2034 { 2035 u32 number = le32_to_cpu(cap->number); 2036 2037 dev_p->num_funcs = hweight32(number); 2038 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2039 dev_p->num_funcs); 2040 } 2041 2042 /** 2043 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2044 * @hw: pointer to the HW struct 2045 * @dev_p: pointer to device capabilities structure 2046 * @cap: capability element to parse 2047 * 2048 * Parse ICE_AQC_CAPS_VF for device capabilities. 2049 */ 2050 static void 2051 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2052 struct ice_aqc_list_caps_elem *cap) 2053 { 2054 u32 number = le32_to_cpu(cap->number); 2055 2056 dev_p->num_vfs_exposed = number; 2057 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2058 dev_p->num_vfs_exposed); 2059 } 2060 2061 /** 2062 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2063 * @hw: pointer to the HW struct 2064 * @dev_p: pointer to device capabilities structure 2065 * @cap: capability element to parse 2066 * 2067 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2068 */ 2069 static void 2070 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2071 struct ice_aqc_list_caps_elem *cap) 2072 { 2073 u32 number = le32_to_cpu(cap->number); 2074 2075 dev_p->num_vsi_allocd_to_host = number; 2076 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2077 dev_p->num_vsi_allocd_to_host); 2078 } 2079 2080 /** 2081 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2082 * @hw: pointer to the HW struct 2083 * @dev_p: pointer to device capabilities structure 2084 * @cap: capability element to parse 2085 * 2086 * Parse ICE_AQC_CAPS_FD for device capabilities. 2087 */ 2088 static void 2089 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2090 struct ice_aqc_list_caps_elem *cap) 2091 { 2092 u32 number = le32_to_cpu(cap->number); 2093 2094 dev_p->num_flow_director_fltr = number; 2095 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2096 dev_p->num_flow_director_fltr); 2097 } 2098 2099 /** 2100 * ice_parse_dev_caps - Parse device capabilities 2101 * @hw: pointer to the HW struct 2102 * @dev_p: pointer to device capabilities structure 2103 * @buf: buffer containing the device capability records 2104 * @cap_count: the number of capabilities 2105 * 2106 * Helper device to parse device (0x000B) capabilities list. For 2107 * capabilities shared between device and device, this relies on 2108 * ice_parse_common_caps. 2109 * 2110 * Loop through the list of provided capabilities and extract the relevant 2111 * data into the device capabilities structured. 2112 */ 2113 static void 2114 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2115 void *buf, u32 cap_count) 2116 { 2117 struct ice_aqc_list_caps_elem *cap_resp; 2118 u32 i; 2119 2120 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 2121 2122 memset(dev_p, 0, sizeof(*dev_p)); 2123 2124 for (i = 0; i < cap_count; i++) { 2125 u16 cap = le16_to_cpu(cap_resp[i].cap); 2126 bool found; 2127 2128 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2129 &cap_resp[i], "dev caps"); 2130 2131 switch (cap) { 2132 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2133 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2134 break; 2135 case ICE_AQC_CAPS_VF: 2136 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2137 break; 2138 case ICE_AQC_CAPS_VSI: 2139 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2140 break; 2141 case ICE_AQC_CAPS_FD: 2142 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2143 break; 2144 default: 2145 /* Don't list common capabilities as unknown */ 2146 if (!found) 2147 ice_debug(hw, ICE_DBG_INIT, 2148 "dev caps: unknown capability[%d]: 0x%x\n", 2149 i, cap); 2150 break; 2151 } 2152 } 2153 2154 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2155 } 2156 2157 /** 2158 * ice_aq_list_caps - query function/device capabilities 2159 * @hw: pointer to the HW struct 2160 * @buf: a buffer to hold the capabilities 2161 * @buf_size: size of the buffer 2162 * @cap_count: if not NULL, set to the number of capabilities reported 2163 * @opc: capabilities type to discover, device or function 2164 * @cd: pointer to command details structure or NULL 2165 * 2166 * Get the function (0x000A) or device (0x000B) capabilities description from 2167 * firmware and store it in the buffer. 2168 * 2169 * If the cap_count pointer is not NULL, then it is set to the number of 2170 * capabilities firmware will report. Note that if the buffer size is too 2171 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2172 * cap_count will still be updated in this case. It is recommended that the 2173 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2174 * firmware could return) to avoid this. 2175 */ 2176 enum ice_status 2177 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2178 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2179 { 2180 struct ice_aqc_list_caps *cmd; 2181 struct ice_aq_desc desc; 2182 enum ice_status status; 2183 2184 cmd = &desc.params.get_cap; 2185 2186 if (opc != ice_aqc_opc_list_func_caps && 2187 opc != ice_aqc_opc_list_dev_caps) 2188 return ICE_ERR_PARAM; 2189 2190 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2191 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2192 2193 if (cap_count) 2194 *cap_count = le32_to_cpu(cmd->count); 2195 2196 return status; 2197 } 2198 2199 /** 2200 * ice_discover_dev_caps - Read and extract device capabilities 2201 * @hw: pointer to the hardware structure 2202 * @dev_caps: pointer to device capabilities structure 2203 * 2204 * Read the device capabilities and extract them into the dev_caps structure 2205 * for later use. 2206 */ 2207 static enum ice_status 2208 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2209 { 2210 enum ice_status status; 2211 u32 cap_count = 0; 2212 void *cbuf; 2213 2214 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2215 if (!cbuf) 2216 return ICE_ERR_NO_MEMORY; 2217 2218 /* Although the driver doesn't know the number of capabilities the 2219 * device will return, we can simply send a 4KB buffer, the maximum 2220 * possible size that firmware can return. 2221 */ 2222 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2223 2224 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2225 ice_aqc_opc_list_dev_caps, NULL); 2226 if (!status) 2227 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2228 kfree(cbuf); 2229 2230 return status; 2231 } 2232 2233 /** 2234 * ice_discover_func_caps - Read and extract function capabilities 2235 * @hw: pointer to the hardware structure 2236 * @func_caps: pointer to function capabilities structure 2237 * 2238 * Read the function capabilities and extract them into the func_caps structure 2239 * for later use. 2240 */ 2241 static enum ice_status 2242 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2243 { 2244 enum ice_status status; 2245 u32 cap_count = 0; 2246 void *cbuf; 2247 2248 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2249 if (!cbuf) 2250 return ICE_ERR_NO_MEMORY; 2251 2252 /* Although the driver doesn't know the number of capabilities the 2253 * device will return, we can simply send a 4KB buffer, the maximum 2254 * possible size that firmware can return. 2255 */ 2256 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2257 2258 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2259 ice_aqc_opc_list_func_caps, NULL); 2260 if (!status) 2261 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2262 kfree(cbuf); 2263 2264 return status; 2265 } 2266 2267 /** 2268 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2269 * @hw: pointer to the hardware structure 2270 */ 2271 void ice_set_safe_mode_caps(struct ice_hw *hw) 2272 { 2273 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2274 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2275 u32 valid_func, rxq_first_id, txq_first_id; 2276 u32 msix_vector_first_id, max_mtu; 2277 u32 num_funcs; 2278 2279 /* cache some func_caps values that should be restored after memset */ 2280 valid_func = func_caps->common_cap.valid_functions; 2281 txq_first_id = func_caps->common_cap.txq_first_id; 2282 rxq_first_id = func_caps->common_cap.rxq_first_id; 2283 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id; 2284 max_mtu = func_caps->common_cap.max_mtu; 2285 2286 /* unset func capabilities */ 2287 memset(func_caps, 0, sizeof(*func_caps)); 2288 2289 /* restore cached values */ 2290 func_caps->common_cap.valid_functions = valid_func; 2291 func_caps->common_cap.txq_first_id = txq_first_id; 2292 func_caps->common_cap.rxq_first_id = rxq_first_id; 2293 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id; 2294 func_caps->common_cap.max_mtu = max_mtu; 2295 2296 /* one Tx and one Rx queue in safe mode */ 2297 func_caps->common_cap.num_rxq = 1; 2298 func_caps->common_cap.num_txq = 1; 2299 2300 /* two MSIX vectors, one for traffic and one for misc causes */ 2301 func_caps->common_cap.num_msix_vectors = 2; 2302 func_caps->guar_num_vsi = 1; 2303 2304 /* cache some dev_caps values that should be restored after memset */ 2305 valid_func = dev_caps->common_cap.valid_functions; 2306 txq_first_id = dev_caps->common_cap.txq_first_id; 2307 rxq_first_id = dev_caps->common_cap.rxq_first_id; 2308 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id; 2309 max_mtu = dev_caps->common_cap.max_mtu; 2310 num_funcs = dev_caps->num_funcs; 2311 2312 /* unset dev capabilities */ 2313 memset(dev_caps, 0, sizeof(*dev_caps)); 2314 2315 /* restore cached values */ 2316 dev_caps->common_cap.valid_functions = valid_func; 2317 dev_caps->common_cap.txq_first_id = txq_first_id; 2318 dev_caps->common_cap.rxq_first_id = rxq_first_id; 2319 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id; 2320 dev_caps->common_cap.max_mtu = max_mtu; 2321 dev_caps->num_funcs = num_funcs; 2322 2323 /* one Tx and one Rx queue per function in safe mode */ 2324 dev_caps->common_cap.num_rxq = num_funcs; 2325 dev_caps->common_cap.num_txq = num_funcs; 2326 2327 /* two MSIX vectors per function */ 2328 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2329 } 2330 2331 /** 2332 * ice_get_caps - get info about the HW 2333 * @hw: pointer to the hardware structure 2334 */ 2335 enum ice_status ice_get_caps(struct ice_hw *hw) 2336 { 2337 enum ice_status status; 2338 2339 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2340 if (status) 2341 return status; 2342 2343 return ice_discover_func_caps(hw, &hw->func_caps); 2344 } 2345 2346 /** 2347 * ice_aq_manage_mac_write - manage MAC address write command 2348 * @hw: pointer to the HW struct 2349 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2350 * @flags: flags to control write behavior 2351 * @cd: pointer to command details structure or NULL 2352 * 2353 * This function is used to write MAC address to the NVM (0x0108). 2354 */ 2355 enum ice_status 2356 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2357 struct ice_sq_cd *cd) 2358 { 2359 struct ice_aqc_manage_mac_write *cmd; 2360 struct ice_aq_desc desc; 2361 2362 cmd = &desc.params.mac_write; 2363 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2364 2365 cmd->flags = flags; 2366 ether_addr_copy(cmd->mac_addr, mac_addr); 2367 2368 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2369 } 2370 2371 /** 2372 * ice_aq_clear_pxe_mode 2373 * @hw: pointer to the HW struct 2374 * 2375 * Tell the firmware that the driver is taking over from PXE (0x0110). 2376 */ 2377 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 2378 { 2379 struct ice_aq_desc desc; 2380 2381 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2382 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2383 2384 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2385 } 2386 2387 /** 2388 * ice_clear_pxe_mode - clear pxe operations mode 2389 * @hw: pointer to the HW struct 2390 * 2391 * Make sure all PXE mode settings are cleared, including things 2392 * like descriptor fetch/write-back mode. 2393 */ 2394 void ice_clear_pxe_mode(struct ice_hw *hw) 2395 { 2396 if (ice_check_sq_alive(hw, &hw->adminq)) 2397 ice_aq_clear_pxe_mode(hw); 2398 } 2399 2400 /** 2401 * ice_get_link_speed_based_on_phy_type - returns link speed 2402 * @phy_type_low: lower part of phy_type 2403 * @phy_type_high: higher part of phy_type 2404 * 2405 * This helper function will convert an entry in PHY type structure 2406 * [phy_type_low, phy_type_high] to its corresponding link speed. 2407 * Note: In the structure of [phy_type_low, phy_type_high], there should 2408 * be one bit set, as this function will convert one PHY type to its 2409 * speed. 2410 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2411 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2412 */ 2413 static u16 2414 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 2415 { 2416 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2417 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2418 2419 switch (phy_type_low) { 2420 case ICE_PHY_TYPE_LOW_100BASE_TX: 2421 case ICE_PHY_TYPE_LOW_100M_SGMII: 2422 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 2423 break; 2424 case ICE_PHY_TYPE_LOW_1000BASE_T: 2425 case ICE_PHY_TYPE_LOW_1000BASE_SX: 2426 case ICE_PHY_TYPE_LOW_1000BASE_LX: 2427 case ICE_PHY_TYPE_LOW_1000BASE_KX: 2428 case ICE_PHY_TYPE_LOW_1G_SGMII: 2429 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 2430 break; 2431 case ICE_PHY_TYPE_LOW_2500BASE_T: 2432 case ICE_PHY_TYPE_LOW_2500BASE_X: 2433 case ICE_PHY_TYPE_LOW_2500BASE_KX: 2434 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 2435 break; 2436 case ICE_PHY_TYPE_LOW_5GBASE_T: 2437 case ICE_PHY_TYPE_LOW_5GBASE_KR: 2438 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 2439 break; 2440 case ICE_PHY_TYPE_LOW_10GBASE_T: 2441 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 2442 case ICE_PHY_TYPE_LOW_10GBASE_SR: 2443 case ICE_PHY_TYPE_LOW_10GBASE_LR: 2444 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 2445 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 2446 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 2447 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 2448 break; 2449 case ICE_PHY_TYPE_LOW_25GBASE_T: 2450 case ICE_PHY_TYPE_LOW_25GBASE_CR: 2451 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 2452 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 2453 case ICE_PHY_TYPE_LOW_25GBASE_SR: 2454 case ICE_PHY_TYPE_LOW_25GBASE_LR: 2455 case ICE_PHY_TYPE_LOW_25GBASE_KR: 2456 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 2457 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 2458 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 2459 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 2460 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 2461 break; 2462 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 2463 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 2464 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 2465 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 2466 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 2467 case ICE_PHY_TYPE_LOW_40G_XLAUI: 2468 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 2469 break; 2470 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 2471 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 2472 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 2473 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 2474 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 2475 case ICE_PHY_TYPE_LOW_50G_LAUI2: 2476 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 2477 case ICE_PHY_TYPE_LOW_50G_AUI2: 2478 case ICE_PHY_TYPE_LOW_50GBASE_CP: 2479 case ICE_PHY_TYPE_LOW_50GBASE_SR: 2480 case ICE_PHY_TYPE_LOW_50GBASE_FR: 2481 case ICE_PHY_TYPE_LOW_50GBASE_LR: 2482 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 2483 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 2484 case ICE_PHY_TYPE_LOW_50G_AUI1: 2485 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 2486 break; 2487 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 2488 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 2489 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 2490 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 2491 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 2492 case ICE_PHY_TYPE_LOW_100G_CAUI4: 2493 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 2494 case ICE_PHY_TYPE_LOW_100G_AUI4: 2495 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 2496 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 2497 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 2498 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 2499 case ICE_PHY_TYPE_LOW_100GBASE_DR: 2500 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 2501 break; 2502 default: 2503 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2504 break; 2505 } 2506 2507 switch (phy_type_high) { 2508 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 2509 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 2510 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 2511 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 2512 case ICE_PHY_TYPE_HIGH_100G_AUI2: 2513 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 2514 break; 2515 default: 2516 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2517 break; 2518 } 2519 2520 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 2521 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2522 return ICE_AQ_LINK_SPEED_UNKNOWN; 2523 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2524 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 2525 return ICE_AQ_LINK_SPEED_UNKNOWN; 2526 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2527 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2528 return speed_phy_type_low; 2529 else 2530 return speed_phy_type_high; 2531 } 2532 2533 /** 2534 * ice_update_phy_type 2535 * @phy_type_low: pointer to the lower part of phy_type 2536 * @phy_type_high: pointer to the higher part of phy_type 2537 * @link_speeds_bitmap: targeted link speeds bitmap 2538 * 2539 * Note: For the link_speeds_bitmap structure, you can check it at 2540 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 2541 * link_speeds_bitmap include multiple speeds. 2542 * 2543 * Each entry in this [phy_type_low, phy_type_high] structure will 2544 * present a certain link speed. This helper function will turn on bits 2545 * in [phy_type_low, phy_type_high] structure based on the value of 2546 * link_speeds_bitmap input parameter. 2547 */ 2548 void 2549 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 2550 u16 link_speeds_bitmap) 2551 { 2552 u64 pt_high; 2553 u64 pt_low; 2554 int index; 2555 u16 speed; 2556 2557 /* We first check with low part of phy_type */ 2558 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 2559 pt_low = BIT_ULL(index); 2560 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 2561 2562 if (link_speeds_bitmap & speed) 2563 *phy_type_low |= BIT_ULL(index); 2564 } 2565 2566 /* We then check with high part of phy_type */ 2567 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 2568 pt_high = BIT_ULL(index); 2569 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 2570 2571 if (link_speeds_bitmap & speed) 2572 *phy_type_high |= BIT_ULL(index); 2573 } 2574 } 2575 2576 /** 2577 * ice_aq_set_phy_cfg 2578 * @hw: pointer to the HW struct 2579 * @pi: port info structure of the interested logical port 2580 * @cfg: structure with PHY configuration data to be set 2581 * @cd: pointer to command details structure or NULL 2582 * 2583 * Set the various PHY configuration parameters supported on the Port. 2584 * One or more of the Set PHY config parameters may be ignored in an MFP 2585 * mode as the PF may not have the privilege to set some of the PHY Config 2586 * parameters. This status will be indicated by the command response (0x0601). 2587 */ 2588 enum ice_status 2589 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 2590 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 2591 { 2592 struct ice_aq_desc desc; 2593 enum ice_status status; 2594 2595 if (!cfg) 2596 return ICE_ERR_PARAM; 2597 2598 /* Ensure that only valid bits of cfg->caps can be turned on. */ 2599 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 2600 ice_debug(hw, ICE_DBG_PHY, 2601 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 2602 cfg->caps); 2603 2604 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 2605 } 2606 2607 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 2608 desc.params.set_phy.lport_num = pi->lport; 2609 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2610 2611 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 2612 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 2613 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 2614 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 2615 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 2616 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 2617 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 2618 cfg->low_power_ctrl_an); 2619 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 2620 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 2621 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 2622 cfg->link_fec_opt); 2623 2624 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 2625 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 2626 status = 0; 2627 2628 if (!status) 2629 pi->phy.curr_user_phy_cfg = *cfg; 2630 2631 return status; 2632 } 2633 2634 /** 2635 * ice_update_link_info - update status of the HW network link 2636 * @pi: port info structure of the interested logical port 2637 */ 2638 enum ice_status ice_update_link_info(struct ice_port_info *pi) 2639 { 2640 struct ice_link_status *li; 2641 enum ice_status status; 2642 2643 if (!pi) 2644 return ICE_ERR_PARAM; 2645 2646 li = &pi->phy.link_info; 2647 2648 status = ice_aq_get_link_info(pi, true, NULL, NULL); 2649 if (status) 2650 return status; 2651 2652 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 2653 struct ice_aqc_get_phy_caps_data *pcaps; 2654 struct ice_hw *hw; 2655 2656 hw = pi->hw; 2657 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 2658 GFP_KERNEL); 2659 if (!pcaps) 2660 return ICE_ERR_NO_MEMORY; 2661 2662 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 2663 pcaps, NULL); 2664 2665 devm_kfree(ice_hw_to_dev(hw), pcaps); 2666 } 2667 2668 return status; 2669 } 2670 2671 /** 2672 * ice_cache_phy_user_req 2673 * @pi: port information structure 2674 * @cache_data: PHY logging data 2675 * @cache_mode: PHY logging mode 2676 * 2677 * Log the user request on (FC, FEC, SPEED) for later use. 2678 */ 2679 static void 2680 ice_cache_phy_user_req(struct ice_port_info *pi, 2681 struct ice_phy_cache_mode_data cache_data, 2682 enum ice_phy_cache_mode cache_mode) 2683 { 2684 if (!pi) 2685 return; 2686 2687 switch (cache_mode) { 2688 case ICE_FC_MODE: 2689 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 2690 break; 2691 case ICE_SPEED_MODE: 2692 pi->phy.curr_user_speed_req = 2693 cache_data.data.curr_user_speed_req; 2694 break; 2695 case ICE_FEC_MODE: 2696 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 2697 break; 2698 default: 2699 break; 2700 } 2701 } 2702 2703 /** 2704 * ice_caps_to_fc_mode 2705 * @caps: PHY capabilities 2706 * 2707 * Convert PHY FC capabilities to ice FC mode 2708 */ 2709 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 2710 { 2711 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 2712 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2713 return ICE_FC_FULL; 2714 2715 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 2716 return ICE_FC_TX_PAUSE; 2717 2718 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2719 return ICE_FC_RX_PAUSE; 2720 2721 return ICE_FC_NONE; 2722 } 2723 2724 /** 2725 * ice_caps_to_fec_mode 2726 * @caps: PHY capabilities 2727 * @fec_options: Link FEC options 2728 * 2729 * Convert PHY FEC capabilities to ice FEC mode 2730 */ 2731 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 2732 { 2733 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 2734 return ICE_FEC_AUTO; 2735 2736 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 2737 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 2738 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 2739 ICE_AQC_PHY_FEC_25G_KR_REQ)) 2740 return ICE_FEC_BASER; 2741 2742 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 2743 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 2744 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 2745 return ICE_FEC_RS; 2746 2747 return ICE_FEC_NONE; 2748 } 2749 2750 /** 2751 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 2752 * @pi: port information structure 2753 * @cfg: PHY configuration data to set FC mode 2754 * @req_mode: FC mode to configure 2755 */ 2756 enum ice_status 2757 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 2758 enum ice_fc_mode req_mode) 2759 { 2760 struct ice_phy_cache_mode_data cache_data; 2761 u8 pause_mask = 0x0; 2762 2763 if (!pi || !cfg) 2764 return ICE_ERR_BAD_PTR; 2765 2766 switch (req_mode) { 2767 case ICE_FC_FULL: 2768 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2769 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2770 break; 2771 case ICE_FC_RX_PAUSE: 2772 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2773 break; 2774 case ICE_FC_TX_PAUSE: 2775 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2776 break; 2777 default: 2778 break; 2779 } 2780 2781 /* clear the old pause settings */ 2782 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 2783 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 2784 2785 /* set the new capabilities */ 2786 cfg->caps |= pause_mask; 2787 2788 /* Cache user FC request */ 2789 cache_data.data.curr_user_fc_req = req_mode; 2790 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 2791 2792 return 0; 2793 } 2794 2795 /** 2796 * ice_set_fc 2797 * @pi: port information structure 2798 * @aq_failures: pointer to status code, specific to ice_set_fc routine 2799 * @ena_auto_link_update: enable automatic link update 2800 * 2801 * Set the requested flow control mode. 2802 */ 2803 enum ice_status 2804 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 2805 { 2806 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 2807 struct ice_aqc_get_phy_caps_data *pcaps; 2808 enum ice_status status; 2809 struct ice_hw *hw; 2810 2811 if (!pi || !aq_failures) 2812 return ICE_ERR_BAD_PTR; 2813 2814 *aq_failures = 0; 2815 hw = pi->hw; 2816 2817 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 2818 if (!pcaps) 2819 return ICE_ERR_NO_MEMORY; 2820 2821 /* Get the current PHY config */ 2822 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 2823 NULL); 2824 if (status) { 2825 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 2826 goto out; 2827 } 2828 2829 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 2830 2831 /* Configure the set PHY data */ 2832 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 2833 if (status) 2834 goto out; 2835 2836 /* If the capabilities have changed, then set the new config */ 2837 if (cfg.caps != pcaps->caps) { 2838 int retry_count, retry_max = 10; 2839 2840 /* Auto restart link so settings take effect */ 2841 if (ena_auto_link_update) 2842 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2843 2844 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 2845 if (status) { 2846 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 2847 goto out; 2848 } 2849 2850 /* Update the link info 2851 * It sometimes takes a really long time for link to 2852 * come back from the atomic reset. Thus, we wait a 2853 * little bit. 2854 */ 2855 for (retry_count = 0; retry_count < retry_max; retry_count++) { 2856 status = ice_update_link_info(pi); 2857 2858 if (!status) 2859 break; 2860 2861 mdelay(100); 2862 } 2863 2864 if (status) 2865 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 2866 } 2867 2868 out: 2869 devm_kfree(ice_hw_to_dev(hw), pcaps); 2870 return status; 2871 } 2872 2873 /** 2874 * ice_phy_caps_equals_cfg 2875 * @phy_caps: PHY capabilities 2876 * @phy_cfg: PHY configuration 2877 * 2878 * Helper function to determine if PHY capabilities matches PHY 2879 * configuration 2880 */ 2881 bool 2882 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 2883 struct ice_aqc_set_phy_cfg_data *phy_cfg) 2884 { 2885 u8 caps_mask, cfg_mask; 2886 2887 if (!phy_caps || !phy_cfg) 2888 return false; 2889 2890 /* These bits are not common between capabilities and configuration. 2891 * Do not use them to determine equality. 2892 */ 2893 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 2894 ICE_AQC_GET_PHY_EN_MOD_QUAL); 2895 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2896 2897 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 2898 phy_caps->phy_type_high != phy_cfg->phy_type_high || 2899 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 2900 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 2901 phy_caps->eee_cap != phy_cfg->eee_cap || 2902 phy_caps->eeer_value != phy_cfg->eeer_value || 2903 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 2904 return false; 2905 2906 return true; 2907 } 2908 2909 /** 2910 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 2911 * @pi: port information structure 2912 * @caps: PHY ability structure to copy date from 2913 * @cfg: PHY configuration structure to copy data to 2914 * 2915 * Helper function to copy AQC PHY get ability data to PHY set configuration 2916 * data structure 2917 */ 2918 void 2919 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 2920 struct ice_aqc_get_phy_caps_data *caps, 2921 struct ice_aqc_set_phy_cfg_data *cfg) 2922 { 2923 if (!pi || !caps || !cfg) 2924 return; 2925 2926 memset(cfg, 0, sizeof(*cfg)); 2927 cfg->phy_type_low = caps->phy_type_low; 2928 cfg->phy_type_high = caps->phy_type_high; 2929 cfg->caps = caps->caps; 2930 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 2931 cfg->eee_cap = caps->eee_cap; 2932 cfg->eeer_value = caps->eeer_value; 2933 cfg->link_fec_opt = caps->link_fec_options; 2934 cfg->module_compliance_enforcement = 2935 caps->module_compliance_enforcement; 2936 2937 if (ice_fw_supports_link_override(pi->hw)) { 2938 struct ice_link_default_override_tlv tlv; 2939 2940 if (ice_get_link_default_override(&tlv, pi)) 2941 return; 2942 2943 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) 2944 cfg->module_compliance_enforcement |= 2945 ICE_LINK_OVERRIDE_STRICT_MODE; 2946 } 2947 } 2948 2949 /** 2950 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 2951 * @pi: port information structure 2952 * @cfg: PHY configuration data to set FEC mode 2953 * @fec: FEC mode to configure 2954 */ 2955 enum ice_status 2956 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 2957 enum ice_fec_mode fec) 2958 { 2959 struct ice_aqc_get_phy_caps_data *pcaps; 2960 enum ice_status status; 2961 2962 if (!pi || !cfg) 2963 return ICE_ERR_BAD_PTR; 2964 2965 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2966 if (!pcaps) 2967 return ICE_ERR_NO_MEMORY; 2968 2969 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, 2970 NULL); 2971 if (status) 2972 goto out; 2973 2974 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2975 cfg->link_fec_opt = pcaps->link_fec_options; 2976 2977 switch (fec) { 2978 case ICE_FEC_BASER: 2979 /* Clear RS bits, and AND BASE-R ability 2980 * bits and OR request bits. 2981 */ 2982 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 2983 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 2984 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 2985 ICE_AQC_PHY_FEC_25G_KR_REQ; 2986 break; 2987 case ICE_FEC_RS: 2988 /* Clear BASE-R bits, and AND RS ability 2989 * bits and OR request bits. 2990 */ 2991 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 2992 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 2993 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 2994 break; 2995 case ICE_FEC_NONE: 2996 /* Clear all FEC option bits. */ 2997 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 2998 break; 2999 case ICE_FEC_AUTO: 3000 /* AND auto FEC bit, and all caps bits. */ 3001 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3002 cfg->link_fec_opt |= pcaps->link_fec_options; 3003 break; 3004 default: 3005 status = ICE_ERR_PARAM; 3006 break; 3007 } 3008 3009 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { 3010 struct ice_link_default_override_tlv tlv; 3011 3012 if (ice_get_link_default_override(&tlv, pi)) 3013 goto out; 3014 3015 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3016 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3017 cfg->link_fec_opt = tlv.fec_options; 3018 } 3019 3020 out: 3021 kfree(pcaps); 3022 3023 return status; 3024 } 3025 3026 /** 3027 * ice_get_link_status - get status of the HW network link 3028 * @pi: port information structure 3029 * @link_up: pointer to bool (true/false = linkup/linkdown) 3030 * 3031 * Variable link_up is true if link is up, false if link is down. 3032 * The variable link_up is invalid if status is non zero. As a 3033 * result of this call, link status reporting becomes enabled 3034 */ 3035 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3036 { 3037 struct ice_phy_info *phy_info; 3038 enum ice_status status = 0; 3039 3040 if (!pi || !link_up) 3041 return ICE_ERR_PARAM; 3042 3043 phy_info = &pi->phy; 3044 3045 if (phy_info->get_link_info) { 3046 status = ice_update_link_info(pi); 3047 3048 if (status) 3049 ice_debug(pi->hw, ICE_DBG_LINK, 3050 "get link status error, status = %d\n", 3051 status); 3052 } 3053 3054 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3055 3056 return status; 3057 } 3058 3059 /** 3060 * ice_aq_set_link_restart_an 3061 * @pi: pointer to the port information structure 3062 * @ena_link: if true: enable link, if false: disable link 3063 * @cd: pointer to command details structure or NULL 3064 * 3065 * Sets up the link and restarts the Auto-Negotiation over the link. 3066 */ 3067 enum ice_status 3068 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3069 struct ice_sq_cd *cd) 3070 { 3071 struct ice_aqc_restart_an *cmd; 3072 struct ice_aq_desc desc; 3073 3074 cmd = &desc.params.restart_an; 3075 3076 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3077 3078 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3079 cmd->lport_num = pi->lport; 3080 if (ena_link) 3081 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3082 else 3083 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3084 3085 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3086 } 3087 3088 /** 3089 * ice_aq_set_event_mask 3090 * @hw: pointer to the HW struct 3091 * @port_num: port number of the physical function 3092 * @mask: event mask to be set 3093 * @cd: pointer to command details structure or NULL 3094 * 3095 * Set event mask (0x0613) 3096 */ 3097 enum ice_status 3098 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3099 struct ice_sq_cd *cd) 3100 { 3101 struct ice_aqc_set_event_mask *cmd; 3102 struct ice_aq_desc desc; 3103 3104 cmd = &desc.params.set_event_mask; 3105 3106 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3107 3108 cmd->lport_num = port_num; 3109 3110 cmd->event_mask = cpu_to_le16(mask); 3111 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3112 } 3113 3114 /** 3115 * ice_aq_set_mac_loopback 3116 * @hw: pointer to the HW struct 3117 * @ena_lpbk: Enable or Disable loopback 3118 * @cd: pointer to command details structure or NULL 3119 * 3120 * Enable/disable loopback on a given port 3121 */ 3122 enum ice_status 3123 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3124 { 3125 struct ice_aqc_set_mac_lb *cmd; 3126 struct ice_aq_desc desc; 3127 3128 cmd = &desc.params.set_mac_lb; 3129 3130 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3131 if (ena_lpbk) 3132 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3133 3134 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3135 } 3136 3137 /** 3138 * ice_aq_set_port_id_led 3139 * @pi: pointer to the port information 3140 * @is_orig_mode: is this LED set to original mode (by the net-list) 3141 * @cd: pointer to command details structure or NULL 3142 * 3143 * Set LED value for the given port (0x06e9) 3144 */ 3145 enum ice_status 3146 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3147 struct ice_sq_cd *cd) 3148 { 3149 struct ice_aqc_set_port_id_led *cmd; 3150 struct ice_hw *hw = pi->hw; 3151 struct ice_aq_desc desc; 3152 3153 cmd = &desc.params.set_port_id_led; 3154 3155 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3156 3157 if (is_orig_mode) 3158 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3159 else 3160 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3161 3162 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3163 } 3164 3165 /** 3166 * ice_aq_sff_eeprom 3167 * @hw: pointer to the HW struct 3168 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3169 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3170 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3171 * @page: QSFP page 3172 * @set_page: set or ignore the page 3173 * @data: pointer to data buffer to be read/written to the I2C device. 3174 * @length: 1-16 for read, 1 for write. 3175 * @write: 0 read, 1 for write. 3176 * @cd: pointer to command details structure or NULL 3177 * 3178 * Read/Write SFF EEPROM (0x06EE) 3179 */ 3180 enum ice_status 3181 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3182 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3183 bool write, struct ice_sq_cd *cd) 3184 { 3185 struct ice_aqc_sff_eeprom *cmd; 3186 struct ice_aq_desc desc; 3187 enum ice_status status; 3188 3189 if (!data || (mem_addr & 0xff00)) 3190 return ICE_ERR_PARAM; 3191 3192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3193 cmd = &desc.params.read_write_sff_param; 3194 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF); 3195 cmd->lport_num = (u8)(lport & 0xff); 3196 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3197 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 3198 ICE_AQC_SFF_I2CBUS_7BIT_M) | 3199 ((set_page << 3200 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 3201 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 3202 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3203 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 3204 if (write) 3205 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 3206 3207 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3208 return status; 3209 } 3210 3211 /** 3212 * __ice_aq_get_set_rss_lut 3213 * @hw: pointer to the hardware structure 3214 * @vsi_id: VSI FW index 3215 * @lut_type: LUT table type 3216 * @lut: pointer to the LUT buffer provided by the caller 3217 * @lut_size: size of the LUT buffer 3218 * @glob_lut_idx: global LUT index 3219 * @set: set true to set the table, false to get the table 3220 * 3221 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3222 */ 3223 static enum ice_status 3224 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 3225 u16 lut_size, u8 glob_lut_idx, bool set) 3226 { 3227 struct ice_aqc_get_set_rss_lut *cmd_resp; 3228 struct ice_aq_desc desc; 3229 enum ice_status status; 3230 u16 flags = 0; 3231 3232 cmd_resp = &desc.params.get_set_rss_lut; 3233 3234 if (set) { 3235 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 3236 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3237 } else { 3238 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 3239 } 3240 3241 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3242 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 3243 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 3244 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 3245 3246 switch (lut_type) { 3247 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 3248 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 3249 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 3250 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 3251 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 3252 break; 3253 default: 3254 status = ICE_ERR_PARAM; 3255 goto ice_aq_get_set_rss_lut_exit; 3256 } 3257 3258 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 3259 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 3260 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 3261 3262 if (!set) 3263 goto ice_aq_get_set_rss_lut_send; 3264 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3265 if (!set) 3266 goto ice_aq_get_set_rss_lut_send; 3267 } else { 3268 goto ice_aq_get_set_rss_lut_send; 3269 } 3270 3271 /* LUT size is only valid for Global and PF table types */ 3272 switch (lut_size) { 3273 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 3274 break; 3275 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 3276 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 3277 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3278 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3279 break; 3280 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 3281 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3282 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 3283 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3284 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3285 break; 3286 } 3287 fallthrough; 3288 default: 3289 status = ICE_ERR_PARAM; 3290 goto ice_aq_get_set_rss_lut_exit; 3291 } 3292 3293 ice_aq_get_set_rss_lut_send: 3294 cmd_resp->flags = cpu_to_le16(flags); 3295 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 3296 3297 ice_aq_get_set_rss_lut_exit: 3298 return status; 3299 } 3300 3301 /** 3302 * ice_aq_get_rss_lut 3303 * @hw: pointer to the hardware structure 3304 * @vsi_handle: software VSI handle 3305 * @lut_type: LUT table type 3306 * @lut: pointer to the LUT buffer provided by the caller 3307 * @lut_size: size of the LUT buffer 3308 * 3309 * get the RSS lookup table, PF or VSI type 3310 */ 3311 enum ice_status 3312 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 3313 u8 *lut, u16 lut_size) 3314 { 3315 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 3316 return ICE_ERR_PARAM; 3317 3318 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3319 lut_type, lut, lut_size, 0, false); 3320 } 3321 3322 /** 3323 * ice_aq_set_rss_lut 3324 * @hw: pointer to the hardware structure 3325 * @vsi_handle: software VSI handle 3326 * @lut_type: LUT table type 3327 * @lut: pointer to the LUT buffer provided by the caller 3328 * @lut_size: size of the LUT buffer 3329 * 3330 * set the RSS lookup table, PF or VSI type 3331 */ 3332 enum ice_status 3333 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 3334 u8 *lut, u16 lut_size) 3335 { 3336 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 3337 return ICE_ERR_PARAM; 3338 3339 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3340 lut_type, lut, lut_size, 0, true); 3341 } 3342 3343 /** 3344 * __ice_aq_get_set_rss_key 3345 * @hw: pointer to the HW struct 3346 * @vsi_id: VSI FW index 3347 * @key: pointer to key info struct 3348 * @set: set true to set the key, false to get the key 3349 * 3350 * get (0x0B04) or set (0x0B02) the RSS key per VSI 3351 */ 3352 static enum 3353 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 3354 struct ice_aqc_get_set_rss_keys *key, 3355 bool set) 3356 { 3357 struct ice_aqc_get_set_rss_key *cmd_resp; 3358 u16 key_size = sizeof(*key); 3359 struct ice_aq_desc desc; 3360 3361 cmd_resp = &desc.params.get_set_rss_key; 3362 3363 if (set) { 3364 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 3365 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3366 } else { 3367 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 3368 } 3369 3370 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3371 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 3372 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 3373 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 3374 3375 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 3376 } 3377 3378 /** 3379 * ice_aq_get_rss_key 3380 * @hw: pointer to the HW struct 3381 * @vsi_handle: software VSI handle 3382 * @key: pointer to key info struct 3383 * 3384 * get the RSS key per VSI 3385 */ 3386 enum ice_status 3387 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 3388 struct ice_aqc_get_set_rss_keys *key) 3389 { 3390 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 3391 return ICE_ERR_PARAM; 3392 3393 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3394 key, false); 3395 } 3396 3397 /** 3398 * ice_aq_set_rss_key 3399 * @hw: pointer to the HW struct 3400 * @vsi_handle: software VSI handle 3401 * @keys: pointer to key info struct 3402 * 3403 * set the RSS key per VSI 3404 */ 3405 enum ice_status 3406 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 3407 struct ice_aqc_get_set_rss_keys *keys) 3408 { 3409 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 3410 return ICE_ERR_PARAM; 3411 3412 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3413 keys, true); 3414 } 3415 3416 /** 3417 * ice_aq_add_lan_txq 3418 * @hw: pointer to the hardware structure 3419 * @num_qgrps: Number of added queue groups 3420 * @qg_list: list of queue groups to be added 3421 * @buf_size: size of buffer for indirect command 3422 * @cd: pointer to command details structure or NULL 3423 * 3424 * Add Tx LAN queue (0x0C30) 3425 * 3426 * NOTE: 3427 * Prior to calling add Tx LAN queue: 3428 * Initialize the following as part of the Tx queue context: 3429 * Completion queue ID if the queue uses Completion queue, Quanta profile, 3430 * Cache profile and Packet shaper profile. 3431 * 3432 * After add Tx LAN queue AQ command is completed: 3433 * Interrupts should be associated with specific queues, 3434 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 3435 * flow. 3436 */ 3437 static enum ice_status 3438 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 3439 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 3440 struct ice_sq_cd *cd) 3441 { 3442 struct ice_aqc_add_tx_qgrp *list; 3443 struct ice_aqc_add_txqs *cmd; 3444 struct ice_aq_desc desc; 3445 u16 i, sum_size = 0; 3446 3447 cmd = &desc.params.add_txqs; 3448 3449 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 3450 3451 if (!qg_list) 3452 return ICE_ERR_PARAM; 3453 3454 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 3455 return ICE_ERR_PARAM; 3456 3457 for (i = 0, list = qg_list; i < num_qgrps; i++) { 3458 sum_size += struct_size(list, txqs, list->num_txqs); 3459 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 3460 list->num_txqs); 3461 } 3462 3463 if (buf_size != sum_size) 3464 return ICE_ERR_PARAM; 3465 3466 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3467 3468 cmd->num_qgrps = num_qgrps; 3469 3470 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 3471 } 3472 3473 /** 3474 * ice_aq_dis_lan_txq 3475 * @hw: pointer to the hardware structure 3476 * @num_qgrps: number of groups in the list 3477 * @qg_list: the list of groups to disable 3478 * @buf_size: the total size of the qg_list buffer in bytes 3479 * @rst_src: if called due to reset, specifies the reset source 3480 * @vmvf_num: the relative VM or VF number that is undergoing the reset 3481 * @cd: pointer to command details structure or NULL 3482 * 3483 * Disable LAN Tx queue (0x0C31) 3484 */ 3485 static enum ice_status 3486 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 3487 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 3488 enum ice_disq_rst_src rst_src, u16 vmvf_num, 3489 struct ice_sq_cd *cd) 3490 { 3491 struct ice_aqc_dis_txq_item *item; 3492 struct ice_aqc_dis_txqs *cmd; 3493 struct ice_aq_desc desc; 3494 enum ice_status status; 3495 u16 i, sz = 0; 3496 3497 cmd = &desc.params.dis_txqs; 3498 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 3499 3500 /* qg_list can be NULL only in VM/VF reset flow */ 3501 if (!qg_list && !rst_src) 3502 return ICE_ERR_PARAM; 3503 3504 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 3505 return ICE_ERR_PARAM; 3506 3507 cmd->num_entries = num_qgrps; 3508 3509 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 3510 ICE_AQC_Q_DIS_TIMEOUT_M); 3511 3512 switch (rst_src) { 3513 case ICE_VM_RESET: 3514 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 3515 cmd->vmvf_and_timeout |= 3516 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 3517 break; 3518 case ICE_VF_RESET: 3519 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 3520 /* In this case, FW expects vmvf_num to be absolute VF ID */ 3521 cmd->vmvf_and_timeout |= 3522 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 3523 ICE_AQC_Q_DIS_VMVF_NUM_M); 3524 break; 3525 case ICE_NO_RESET: 3526 default: 3527 break; 3528 } 3529 3530 /* flush pipe on time out */ 3531 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 3532 /* If no queue group info, we are in a reset flow. Issue the AQ */ 3533 if (!qg_list) 3534 goto do_aq; 3535 3536 /* set RD bit to indicate that command buffer is provided by the driver 3537 * and it needs to be read by the firmware 3538 */ 3539 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3540 3541 for (i = 0, item = qg_list; i < num_qgrps; i++) { 3542 u16 item_size = struct_size(item, q_id, item->num_qs); 3543 3544 /* If the num of queues is even, add 2 bytes of padding */ 3545 if ((item->num_qs % 2) == 0) 3546 item_size += 2; 3547 3548 sz += item_size; 3549 3550 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 3551 } 3552 3553 if (buf_size != sz) 3554 return ICE_ERR_PARAM; 3555 3556 do_aq: 3557 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 3558 if (status) { 3559 if (!qg_list) 3560 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 3561 vmvf_num, hw->adminq.sq_last_status); 3562 else 3563 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 3564 le16_to_cpu(qg_list[0].q_id[0]), 3565 hw->adminq.sq_last_status); 3566 } 3567 return status; 3568 } 3569 3570 /* End of FW Admin Queue command wrappers */ 3571 3572 /** 3573 * ice_write_byte - write a byte to a packed context structure 3574 * @src_ctx: the context structure to read from 3575 * @dest_ctx: the context to be written to 3576 * @ce_info: a description of the struct to be filled 3577 */ 3578 static void 3579 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3580 { 3581 u8 src_byte, dest_byte, mask; 3582 u8 *from, *dest; 3583 u16 shift_width; 3584 3585 /* copy from the next struct field */ 3586 from = src_ctx + ce_info->offset; 3587 3588 /* prepare the bits and mask */ 3589 shift_width = ce_info->lsb % 8; 3590 mask = (u8)(BIT(ce_info->width) - 1); 3591 3592 src_byte = *from; 3593 src_byte &= mask; 3594 3595 /* shift to correct alignment */ 3596 mask <<= shift_width; 3597 src_byte <<= shift_width; 3598 3599 /* get the current bits from the target bit string */ 3600 dest = dest_ctx + (ce_info->lsb / 8); 3601 3602 memcpy(&dest_byte, dest, sizeof(dest_byte)); 3603 3604 dest_byte &= ~mask; /* get the bits not changing */ 3605 dest_byte |= src_byte; /* add in the new bits */ 3606 3607 /* put it all back */ 3608 memcpy(dest, &dest_byte, sizeof(dest_byte)); 3609 } 3610 3611 /** 3612 * ice_write_word - write a word to a packed context structure 3613 * @src_ctx: the context structure to read from 3614 * @dest_ctx: the context to be written to 3615 * @ce_info: a description of the struct to be filled 3616 */ 3617 static void 3618 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3619 { 3620 u16 src_word, mask; 3621 __le16 dest_word; 3622 u8 *from, *dest; 3623 u16 shift_width; 3624 3625 /* copy from the next struct field */ 3626 from = src_ctx + ce_info->offset; 3627 3628 /* prepare the bits and mask */ 3629 shift_width = ce_info->lsb % 8; 3630 mask = BIT(ce_info->width) - 1; 3631 3632 /* don't swizzle the bits until after the mask because the mask bits 3633 * will be in a different bit position on big endian machines 3634 */ 3635 src_word = *(u16 *)from; 3636 src_word &= mask; 3637 3638 /* shift to correct alignment */ 3639 mask <<= shift_width; 3640 src_word <<= shift_width; 3641 3642 /* get the current bits from the target bit string */ 3643 dest = dest_ctx + (ce_info->lsb / 8); 3644 3645 memcpy(&dest_word, dest, sizeof(dest_word)); 3646 3647 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 3648 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 3649 3650 /* put it all back */ 3651 memcpy(dest, &dest_word, sizeof(dest_word)); 3652 } 3653 3654 /** 3655 * ice_write_dword - write a dword to a packed context structure 3656 * @src_ctx: the context structure to read from 3657 * @dest_ctx: the context to be written to 3658 * @ce_info: a description of the struct to be filled 3659 */ 3660 static void 3661 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3662 { 3663 u32 src_dword, mask; 3664 __le32 dest_dword; 3665 u8 *from, *dest; 3666 u16 shift_width; 3667 3668 /* copy from the next struct field */ 3669 from = src_ctx + ce_info->offset; 3670 3671 /* prepare the bits and mask */ 3672 shift_width = ce_info->lsb % 8; 3673 3674 /* if the field width is exactly 32 on an x86 machine, then the shift 3675 * operation will not work because the SHL instructions count is masked 3676 * to 5 bits so the shift will do nothing 3677 */ 3678 if (ce_info->width < 32) 3679 mask = BIT(ce_info->width) - 1; 3680 else 3681 mask = (u32)~0; 3682 3683 /* don't swizzle the bits until after the mask because the mask bits 3684 * will be in a different bit position on big endian machines 3685 */ 3686 src_dword = *(u32 *)from; 3687 src_dword &= mask; 3688 3689 /* shift to correct alignment */ 3690 mask <<= shift_width; 3691 src_dword <<= shift_width; 3692 3693 /* get the current bits from the target bit string */ 3694 dest = dest_ctx + (ce_info->lsb / 8); 3695 3696 memcpy(&dest_dword, dest, sizeof(dest_dword)); 3697 3698 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 3699 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 3700 3701 /* put it all back */ 3702 memcpy(dest, &dest_dword, sizeof(dest_dword)); 3703 } 3704 3705 /** 3706 * ice_write_qword - write a qword to a packed context structure 3707 * @src_ctx: the context structure to read from 3708 * @dest_ctx: the context to be written to 3709 * @ce_info: a description of the struct to be filled 3710 */ 3711 static void 3712 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3713 { 3714 u64 src_qword, mask; 3715 __le64 dest_qword; 3716 u8 *from, *dest; 3717 u16 shift_width; 3718 3719 /* copy from the next struct field */ 3720 from = src_ctx + ce_info->offset; 3721 3722 /* prepare the bits and mask */ 3723 shift_width = ce_info->lsb % 8; 3724 3725 /* if the field width is exactly 64 on an x86 machine, then the shift 3726 * operation will not work because the SHL instructions count is masked 3727 * to 6 bits so the shift will do nothing 3728 */ 3729 if (ce_info->width < 64) 3730 mask = BIT_ULL(ce_info->width) - 1; 3731 else 3732 mask = (u64)~0; 3733 3734 /* don't swizzle the bits until after the mask because the mask bits 3735 * will be in a different bit position on big endian machines 3736 */ 3737 src_qword = *(u64 *)from; 3738 src_qword &= mask; 3739 3740 /* shift to correct alignment */ 3741 mask <<= shift_width; 3742 src_qword <<= shift_width; 3743 3744 /* get the current bits from the target bit string */ 3745 dest = dest_ctx + (ce_info->lsb / 8); 3746 3747 memcpy(&dest_qword, dest, sizeof(dest_qword)); 3748 3749 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 3750 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 3751 3752 /* put it all back */ 3753 memcpy(dest, &dest_qword, sizeof(dest_qword)); 3754 } 3755 3756 /** 3757 * ice_set_ctx - set context bits in packed structure 3758 * @hw: pointer to the hardware structure 3759 * @src_ctx: pointer to a generic non-packed context structure 3760 * @dest_ctx: pointer to memory for the packed structure 3761 * @ce_info: a description of the structure to be transformed 3762 */ 3763 enum ice_status 3764 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 3765 const struct ice_ctx_ele *ce_info) 3766 { 3767 int f; 3768 3769 for (f = 0; ce_info[f].width; f++) { 3770 /* We have to deal with each element of the FW response 3771 * using the correct size so that we are correct regardless 3772 * of the endianness of the machine. 3773 */ 3774 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 3775 ice_debug(hw, ICE_DBG_QCTX, 3776 "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 3777 f, ce_info[f].width, ce_info[f].size_of); 3778 continue; 3779 } 3780 switch (ce_info[f].size_of) { 3781 case sizeof(u8): 3782 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 3783 break; 3784 case sizeof(u16): 3785 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 3786 break; 3787 case sizeof(u32): 3788 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 3789 break; 3790 case sizeof(u64): 3791 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 3792 break; 3793 default: 3794 return ICE_ERR_INVAL_SIZE; 3795 } 3796 } 3797 3798 return 0; 3799 } 3800 3801 /** 3802 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 3803 * @hw: pointer to the HW struct 3804 * @vsi_handle: software VSI handle 3805 * @tc: TC number 3806 * @q_handle: software queue handle 3807 */ 3808 struct ice_q_ctx * 3809 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 3810 { 3811 struct ice_vsi_ctx *vsi; 3812 struct ice_q_ctx *q_ctx; 3813 3814 vsi = ice_get_vsi_ctx(hw, vsi_handle); 3815 if (!vsi) 3816 return NULL; 3817 if (q_handle >= vsi->num_lan_q_entries[tc]) 3818 return NULL; 3819 if (!vsi->lan_q_ctx[tc]) 3820 return NULL; 3821 q_ctx = vsi->lan_q_ctx[tc]; 3822 return &q_ctx[q_handle]; 3823 } 3824 3825 /** 3826 * ice_ena_vsi_txq 3827 * @pi: port information structure 3828 * @vsi_handle: software VSI handle 3829 * @tc: TC number 3830 * @q_handle: software queue handle 3831 * @num_qgrps: Number of added queue groups 3832 * @buf: list of queue groups to be added 3833 * @buf_size: size of buffer for indirect command 3834 * @cd: pointer to command details structure or NULL 3835 * 3836 * This function adds one LAN queue 3837 */ 3838 enum ice_status 3839 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 3840 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 3841 struct ice_sq_cd *cd) 3842 { 3843 struct ice_aqc_txsched_elem_data node = { 0 }; 3844 struct ice_sched_node *parent; 3845 struct ice_q_ctx *q_ctx; 3846 enum ice_status status; 3847 struct ice_hw *hw; 3848 3849 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 3850 return ICE_ERR_CFG; 3851 3852 if (num_qgrps > 1 || buf->num_txqs > 1) 3853 return ICE_ERR_MAX_LIMIT; 3854 3855 hw = pi->hw; 3856 3857 if (!ice_is_vsi_valid(hw, vsi_handle)) 3858 return ICE_ERR_PARAM; 3859 3860 mutex_lock(&pi->sched_lock); 3861 3862 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 3863 if (!q_ctx) { 3864 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 3865 q_handle); 3866 status = ICE_ERR_PARAM; 3867 goto ena_txq_exit; 3868 } 3869 3870 /* find a parent node */ 3871 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 3872 ICE_SCHED_NODE_OWNER_LAN); 3873 if (!parent) { 3874 status = ICE_ERR_PARAM; 3875 goto ena_txq_exit; 3876 } 3877 3878 buf->parent_teid = parent->info.node_teid; 3879 node.parent_teid = parent->info.node_teid; 3880 /* Mark that the values in the "generic" section as valid. The default 3881 * value in the "generic" section is zero. This means that : 3882 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 3883 * - 0 priority among siblings, indicated by Bit 1-3. 3884 * - WFQ, indicated by Bit 4. 3885 * - 0 Adjustment value is used in PSM credit update flow, indicated by 3886 * Bit 5-6. 3887 * - Bit 7 is reserved. 3888 * Without setting the generic section as valid in valid_sections, the 3889 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 3890 */ 3891 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; 3892 3893 /* add the LAN queue */ 3894 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 3895 if (status) { 3896 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 3897 le16_to_cpu(buf->txqs[0].txq_id), 3898 hw->adminq.sq_last_status); 3899 goto ena_txq_exit; 3900 } 3901 3902 node.node_teid = buf->txqs[0].q_teid; 3903 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 3904 q_ctx->q_handle = q_handle; 3905 q_ctx->q_teid = le32_to_cpu(node.node_teid); 3906 3907 /* add a leaf node into scheduler tree queue layer */ 3908 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 3909 if (!status) 3910 status = ice_sched_replay_q_bw(pi, q_ctx); 3911 3912 ena_txq_exit: 3913 mutex_unlock(&pi->sched_lock); 3914 return status; 3915 } 3916 3917 /** 3918 * ice_dis_vsi_txq 3919 * @pi: port information structure 3920 * @vsi_handle: software VSI handle 3921 * @tc: TC number 3922 * @num_queues: number of queues 3923 * @q_handles: pointer to software queue handle array 3924 * @q_ids: pointer to the q_id array 3925 * @q_teids: pointer to queue node teids 3926 * @rst_src: if called due to reset, specifies the reset source 3927 * @vmvf_num: the relative VM or VF number that is undergoing the reset 3928 * @cd: pointer to command details structure or NULL 3929 * 3930 * This function removes queues and their corresponding nodes in SW DB 3931 */ 3932 enum ice_status 3933 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 3934 u16 *q_handles, u16 *q_ids, u32 *q_teids, 3935 enum ice_disq_rst_src rst_src, u16 vmvf_num, 3936 struct ice_sq_cd *cd) 3937 { 3938 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 3939 struct ice_aqc_dis_txq_item *qg_list; 3940 struct ice_q_ctx *q_ctx; 3941 struct ice_hw *hw; 3942 u16 i, buf_size; 3943 3944 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 3945 return ICE_ERR_CFG; 3946 3947 hw = pi->hw; 3948 3949 if (!num_queues) { 3950 /* if queue is disabled already yet the disable queue command 3951 * has to be sent to complete the VF reset, then call 3952 * ice_aq_dis_lan_txq without any queue information 3953 */ 3954 if (rst_src) 3955 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 3956 vmvf_num, NULL); 3957 return ICE_ERR_CFG; 3958 } 3959 3960 buf_size = struct_size(qg_list, q_id, 1); 3961 qg_list = kzalloc(buf_size, GFP_KERNEL); 3962 if (!qg_list) 3963 return ICE_ERR_NO_MEMORY; 3964 3965 mutex_lock(&pi->sched_lock); 3966 3967 for (i = 0; i < num_queues; i++) { 3968 struct ice_sched_node *node; 3969 3970 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 3971 if (!node) 3972 continue; 3973 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 3974 if (!q_ctx) { 3975 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 3976 q_handles[i]); 3977 continue; 3978 } 3979 if (q_ctx->q_handle != q_handles[i]) { 3980 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 3981 q_ctx->q_handle, q_handles[i]); 3982 continue; 3983 } 3984 qg_list->parent_teid = node->info.parent_teid; 3985 qg_list->num_qs = 1; 3986 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 3987 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 3988 vmvf_num, cd); 3989 3990 if (status) 3991 break; 3992 ice_free_sched_node(pi, node); 3993 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 3994 } 3995 mutex_unlock(&pi->sched_lock); 3996 kfree(qg_list); 3997 return status; 3998 } 3999 4000 /** 4001 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4002 * @pi: port information structure 4003 * @vsi_handle: software VSI handle 4004 * @tc_bitmap: TC bitmap 4005 * @maxqs: max queues array per TC 4006 * @owner: LAN or RDMA 4007 * 4008 * This function adds/updates the VSI queues per TC. 4009 */ 4010 static enum ice_status 4011 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4012 u16 *maxqs, u8 owner) 4013 { 4014 enum ice_status status = 0; 4015 u8 i; 4016 4017 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4018 return ICE_ERR_CFG; 4019 4020 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4021 return ICE_ERR_PARAM; 4022 4023 mutex_lock(&pi->sched_lock); 4024 4025 ice_for_each_traffic_class(i) { 4026 /* configuration is possible only if TC node is present */ 4027 if (!ice_sched_get_tc_node(pi, i)) 4028 continue; 4029 4030 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4031 ice_is_tc_ena(tc_bitmap, i)); 4032 if (status) 4033 break; 4034 } 4035 4036 mutex_unlock(&pi->sched_lock); 4037 return status; 4038 } 4039 4040 /** 4041 * ice_cfg_vsi_lan - configure VSI LAN queues 4042 * @pi: port information structure 4043 * @vsi_handle: software VSI handle 4044 * @tc_bitmap: TC bitmap 4045 * @max_lanqs: max LAN queues array per TC 4046 * 4047 * This function adds/updates the VSI LAN queues per TC. 4048 */ 4049 enum ice_status 4050 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4051 u16 *max_lanqs) 4052 { 4053 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4054 ICE_SCHED_NODE_OWNER_LAN); 4055 } 4056 4057 /** 4058 * ice_replay_pre_init - replay pre initialization 4059 * @hw: pointer to the HW struct 4060 * 4061 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 4062 */ 4063 static enum ice_status ice_replay_pre_init(struct ice_hw *hw) 4064 { 4065 struct ice_switch_info *sw = hw->switch_info; 4066 u8 i; 4067 4068 /* Delete old entries from replay filter list head if there is any */ 4069 ice_rm_all_sw_replay_rule_info(hw); 4070 /* In start of replay, move entries into replay_rules list, it 4071 * will allow adding rules entries back to filt_rules list, 4072 * which is operational list. 4073 */ 4074 for (i = 0; i < ICE_SW_LKUP_LAST; i++) 4075 list_replace_init(&sw->recp_list[i].filt_rules, 4076 &sw->recp_list[i].filt_replay_rules); 4077 4078 return 0; 4079 } 4080 4081 /** 4082 * ice_replay_vsi - replay VSI configuration 4083 * @hw: pointer to the HW struct 4084 * @vsi_handle: driver VSI handle 4085 * 4086 * Restore all VSI configuration after reset. It is required to call this 4087 * function with main VSI first. 4088 */ 4089 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 4090 { 4091 enum ice_status status; 4092 4093 if (!ice_is_vsi_valid(hw, vsi_handle)) 4094 return ICE_ERR_PARAM; 4095 4096 /* Replay pre-initialization if there is any */ 4097 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 4098 status = ice_replay_pre_init(hw); 4099 if (status) 4100 return status; 4101 } 4102 /* Replay per VSI all RSS configurations */ 4103 status = ice_replay_rss_cfg(hw, vsi_handle); 4104 if (status) 4105 return status; 4106 /* Replay per VSI all filters */ 4107 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 4108 return status; 4109 } 4110 4111 /** 4112 * ice_replay_post - post replay configuration cleanup 4113 * @hw: pointer to the HW struct 4114 * 4115 * Post replay cleanup. 4116 */ 4117 void ice_replay_post(struct ice_hw *hw) 4118 { 4119 /* Delete old entries from replay filter list head */ 4120 ice_rm_all_sw_replay_rule_info(hw); 4121 } 4122 4123 /** 4124 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 4125 * @hw: ptr to the hardware info 4126 * @reg: offset of 64 bit HW register to read from 4127 * @prev_stat_loaded: bool to specify if previous stats are loaded 4128 * @prev_stat: ptr to previous loaded stat value 4129 * @cur_stat: ptr to current stat value 4130 */ 4131 void 4132 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 4133 u64 *prev_stat, u64 *cur_stat) 4134 { 4135 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 4136 4137 /* device stats are not reset at PFR, they likely will not be zeroed 4138 * when the driver starts. Thus, save the value from the first read 4139 * without adding to the statistic value so that we report stats which 4140 * count up from zero. 4141 */ 4142 if (!prev_stat_loaded) { 4143 *prev_stat = new_data; 4144 return; 4145 } 4146 4147 /* Calculate the difference between the new and old values, and then 4148 * add it to the software stat value. 4149 */ 4150 if (new_data >= *prev_stat) 4151 *cur_stat += new_data - *prev_stat; 4152 else 4153 /* to manage the potential roll-over */ 4154 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 4155 4156 /* Update the previously stored value to prepare for next read */ 4157 *prev_stat = new_data; 4158 } 4159 4160 /** 4161 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 4162 * @hw: ptr to the hardware info 4163 * @reg: offset of HW register to read from 4164 * @prev_stat_loaded: bool to specify if previous stats are loaded 4165 * @prev_stat: ptr to previous loaded stat value 4166 * @cur_stat: ptr to current stat value 4167 */ 4168 void 4169 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 4170 u64 *prev_stat, u64 *cur_stat) 4171 { 4172 u32 new_data; 4173 4174 new_data = rd32(hw, reg); 4175 4176 /* device stats are not reset at PFR, they likely will not be zeroed 4177 * when the driver starts. Thus, save the value from the first read 4178 * without adding to the statistic value so that we report stats which 4179 * count up from zero. 4180 */ 4181 if (!prev_stat_loaded) { 4182 *prev_stat = new_data; 4183 return; 4184 } 4185 4186 /* Calculate the difference between the new and old values, and then 4187 * add it to the software stat value. 4188 */ 4189 if (new_data >= *prev_stat) 4190 *cur_stat += new_data - *prev_stat; 4191 else 4192 /* to manage the potential roll-over */ 4193 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 4194 4195 /* Update the previously stored value to prepare for next read */ 4196 *prev_stat = new_data; 4197 } 4198 4199 /** 4200 * ice_sched_query_elem - query element information from HW 4201 * @hw: pointer to the HW struct 4202 * @node_teid: node TEID to be queried 4203 * @buf: buffer to element information 4204 * 4205 * This function queries HW element information 4206 */ 4207 enum ice_status 4208 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 4209 struct ice_aqc_txsched_elem_data *buf) 4210 { 4211 u16 buf_size, num_elem_ret = 0; 4212 enum ice_status status; 4213 4214 buf_size = sizeof(*buf); 4215 memset(buf, 0, buf_size); 4216 buf->node_teid = cpu_to_le32(node_teid); 4217 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 4218 NULL); 4219 if (status || num_elem_ret != 1) 4220 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 4221 return status; 4222 } 4223 4224 /** 4225 * ice_fw_supports_link_override 4226 * @hw: pointer to the hardware structure 4227 * 4228 * Checks if the firmware supports link override 4229 */ 4230 bool ice_fw_supports_link_override(struct ice_hw *hw) 4231 { 4232 /* Currently, only supported for E810 devices */ 4233 if (hw->mac_type != ICE_MAC_E810) 4234 return false; 4235 4236 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { 4237 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) 4238 return true; 4239 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && 4240 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) 4241 return true; 4242 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { 4243 return true; 4244 } 4245 4246 return false; 4247 } 4248 4249 /** 4250 * ice_get_link_default_override 4251 * @ldo: pointer to the link default override struct 4252 * @pi: pointer to the port info struct 4253 * 4254 * Gets the link default override for a port 4255 */ 4256 enum ice_status 4257 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 4258 struct ice_port_info *pi) 4259 { 4260 u16 i, tlv, tlv_len, tlv_start, buf, offset; 4261 struct ice_hw *hw = pi->hw; 4262 enum ice_status status; 4263 4264 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 4265 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 4266 if (status) { 4267 ice_debug(hw, ICE_DBG_INIT, 4268 "Failed to read link override TLV.\n"); 4269 return status; 4270 } 4271 4272 /* Each port has its own config; calculate for our port */ 4273 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 4274 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 4275 4276 /* link options first */ 4277 status = ice_read_sr_word(hw, tlv_start, &buf); 4278 if (status) { 4279 ice_debug(hw, ICE_DBG_INIT, 4280 "Failed to read override link options.\n"); 4281 return status; 4282 } 4283 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 4284 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 4285 ICE_LINK_OVERRIDE_PHY_CFG_S; 4286 4287 /* link PHY config */ 4288 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 4289 status = ice_read_sr_word(hw, offset, &buf); 4290 if (status) { 4291 ice_debug(hw, ICE_DBG_INIT, 4292 "Failed to read override phy config.\n"); 4293 return status; 4294 } 4295 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 4296 4297 /* PHY types low */ 4298 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 4299 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 4300 status = ice_read_sr_word(hw, (offset + i), &buf); 4301 if (status) { 4302 ice_debug(hw, ICE_DBG_INIT, 4303 "Failed to read override link options.\n"); 4304 return status; 4305 } 4306 /* shift 16 bits at a time to fill 64 bits */ 4307 ldo->phy_type_low |= ((u64)buf << (i * 16)); 4308 } 4309 4310 /* PHY types high */ 4311 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 4312 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 4313 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 4314 status = ice_read_sr_word(hw, (offset + i), &buf); 4315 if (status) { 4316 ice_debug(hw, ICE_DBG_INIT, 4317 "Failed to read override link options.\n"); 4318 return status; 4319 } 4320 /* shift 16 bits at a time to fill 64 bits */ 4321 ldo->phy_type_high |= ((u64)buf << (i * 16)); 4322 } 4323 4324 return status; 4325 } 4326 4327 /** 4328 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 4329 * @caps: get PHY capability data 4330 */ 4331 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 4332 { 4333 if (caps->caps & ICE_AQC_PHY_AN_MODE || 4334 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 4335 ICE_AQC_PHY_AN_EN_CLAUSE73 | 4336 ICE_AQC_PHY_AN_EN_CLAUSE37)) 4337 return true; 4338 4339 return false; 4340 } 4341